diff options
author | Mike Pagano <mpagano@gentoo.org> | 2020-09-17 10:57:11 -0400 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2020-09-17 10:57:11 -0400 |
commit | aec56589ca906076f1dc545d65c1cebcafaa9775 (patch) | |
tree | eb2ab462de5a689d92b29871003fd520aec6a700 | |
parent | Update cpu opt patch for v9.1 >= gcc < v10.X. (diff) | |
download | linux-patches-aec56589ca906076f1dc545d65c1cebcafaa9775.tar.gz linux-patches-aec56589ca906076f1dc545d65c1cebcafaa9775.tar.bz2 linux-patches-aec56589ca906076f1dc545d65c1cebcafaa9775.zip |
Linux patch 5.8.105.8-13
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1009_linux-5.8.10.patch | 6953 |
2 files changed, 6957 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 8c023836..f2e8a67c 100644 --- a/0000_README +++ b/0000_README @@ -79,6 +79,10 @@ Patch: 1008_linux-5.8.9.patch From: http://www.kernel.org Desc: Linux 5.8.9 +Patch: 1009_linux-5.8.10.patch +From: http://www.kernel.org +Desc: Linux 5.8.10 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1009_linux-5.8.10.patch b/1009_linux-5.8.10.patch new file mode 100644 index 00000000..bc12dd87 --- /dev/null +++ b/1009_linux-5.8.10.patch @@ -0,0 +1,6953 @@ +diff --git a/Documentation/sound/designs/timestamping.rst b/Documentation/sound/designs/timestamping.rst +index 2b0fff5034151..7c7ecf5dbc4bd 100644 +--- a/Documentation/sound/designs/timestamping.rst ++++ b/Documentation/sound/designs/timestamping.rst +@@ -143,7 +143,7 @@ timestamp shows when the information is put together by the driver + before returning from the ``STATUS`` and ``STATUS_EXT`` ioctl. in most cases + this driver_timestamp will be identical to the regular system tstamp. + +-Examples of typestamping with HDaudio: ++Examples of timestamping with HDAudio: + + 1. DMA timestamp, no compensation for DMA+analog delay + :: +diff --git a/Makefile b/Makefile +index 36eab48d1d4a6..d937530d33427 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 8 +-SUBLEVEL = 9 ++SUBLEVEL = 10 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +@@ -876,10 +876,6 @@ KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections + LDFLAGS_vmlinux += --gc-sections + endif + +-ifdef CONFIG_LIVEPATCH +-KBUILD_CFLAGS += $(call cc-option, -flive-patching=inline-clone) +-endif +- + ifdef CONFIG_SHADOW_CALL_STACK + CC_FLAGS_SCS := -fsanitize=shadow-call-stack + KBUILD_CFLAGS += $(CC_FLAGS_SCS) +diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts +index 9acbeba832c0b..dcaa44e408ace 100644 +--- a/arch/arc/boot/dts/hsdk.dts ++++ b/arch/arc/boot/dts/hsdk.dts +@@ -88,6 +88,8 @@ + + arcpct: pct { + compatible = "snps,archs-pct"; ++ interrupt-parent = <&cpu_intc>; ++ interrupts = <20>; + }; + + /* TIMER0 with interrupt for clockevent */ +@@ -208,7 +210,7 @@ + reg = <0x8000 0x2000>; + interrupts = <10>; + interrupt-names = "macirq"; +- phy-mode = "rgmii"; ++ phy-mode = "rgmii-id"; + snps,pbl = <32>; + snps,multicast-filter-bins = <256>; + clocks = <&gmacclk>; +@@ -226,7 +228,7 @@ + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dwmac-mdio"; +- phy0: ethernet-phy@0 { ++ phy0: ethernet-phy@0 { /* Micrel KSZ9031 */ + reg = <0>; + }; + }; +diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c +index 28e8bf04b253f..a331bb5d8319f 100644 +--- a/arch/arc/kernel/troubleshoot.c ++++ b/arch/arc/kernel/troubleshoot.c +@@ -18,44 +18,37 @@ + + #define ARC_PATH_MAX 256 + +-/* +- * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25) +- * -Prints 3 regs per line and a CR. +- * -To continue, callee regs right after scratch, special handling of CR +- */ +-static noinline void print_reg_file(long *reg_rev, int start_num) ++static noinline void print_regs_scratch(struct pt_regs *regs) + { +- unsigned int i; +- char buf[512]; +- int n = 0, len = sizeof(buf); +- +- for (i = start_num; i < start_num + 13; i++) { +- n += scnprintf(buf + n, len - n, "r%02u: 0x%08lx\t", +- i, (unsigned long)*reg_rev); +- +- if (((i + 1) % 3) == 0) +- n += scnprintf(buf + n, len - n, "\n"); +- +- /* because pt_regs has regs reversed: r12..r0, r25..r13 */ +- if (is_isa_arcv2() && start_num == 0) +- reg_rev++; +- else +- reg_rev--; +- } +- +- if (start_num != 0) +- n += scnprintf(buf + n, len - n, "\n\n"); ++ pr_cont("BTA: 0x%08lx\n SP: 0x%08lx FP: 0x%08lx BLK: %pS\n", ++ regs->bta, regs->sp, regs->fp, (void *)regs->blink); ++ pr_cont("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n", ++ regs->lp_start, regs->lp_end, regs->lp_count); + +- /* To continue printing callee regs on same line as scratch regs */ +- if (start_num == 0) +- pr_info("%s", buf); +- else +- pr_cont("%s\n", buf); ++ pr_info("r00: 0x%08lx\tr01: 0x%08lx\tr02: 0x%08lx\n" \ ++ "r03: 0x%08lx\tr04: 0x%08lx\tr05: 0x%08lx\n" \ ++ "r06: 0x%08lx\tr07: 0x%08lx\tr08: 0x%08lx\n" \ ++ "r09: 0x%08lx\tr10: 0x%08lx\tr11: 0x%08lx\n" \ ++ "r12: 0x%08lx\t", ++ regs->r0, regs->r1, regs->r2, ++ regs->r3, regs->r4, regs->r5, ++ regs->r6, regs->r7, regs->r8, ++ regs->r9, regs->r10, regs->r11, ++ regs->r12); + } + +-static void show_callee_regs(struct callee_regs *cregs) ++static void print_regs_callee(struct callee_regs *regs) + { +- print_reg_file(&(cregs->r13), 13); ++ pr_cont("r13: 0x%08lx\tr14: 0x%08lx\n" \ ++ "r15: 0x%08lx\tr16: 0x%08lx\tr17: 0x%08lx\n" \ ++ "r18: 0x%08lx\tr19: 0x%08lx\tr20: 0x%08lx\n" \ ++ "r21: 0x%08lx\tr22: 0x%08lx\tr23: 0x%08lx\n" \ ++ "r24: 0x%08lx\tr25: 0x%08lx\n", ++ regs->r13, regs->r14, ++ regs->r15, regs->r16, regs->r17, ++ regs->r18, regs->r19, regs->r20, ++ regs->r21, regs->r22, regs->r23, ++ regs->r24, regs->r25); + } + + static void print_task_path_n_nm(struct task_struct *tsk) +@@ -175,7 +168,7 @@ static void show_ecr_verbose(struct pt_regs *regs) + void show_regs(struct pt_regs *regs) + { + struct task_struct *tsk = current; +- struct callee_regs *cregs; ++ struct callee_regs *cregs = (struct callee_regs *)tsk->thread.callee_reg; + + /* + * generic code calls us with preemption disabled, but some calls +@@ -204,25 +197,15 @@ void show_regs(struct pt_regs *regs) + STS_BIT(regs, A2), STS_BIT(regs, A1), + STS_BIT(regs, E2), STS_BIT(regs, E1)); + #else +- pr_cont(" [%2s%2s%2s%2s]", ++ pr_cont(" [%2s%2s%2s%2s] ", + STS_BIT(regs, IE), + (regs->status32 & STATUS_U_MASK) ? "U " : "K ", + STS_BIT(regs, DE), STS_BIT(regs, AE)); + #endif +- pr_cont(" BTA: 0x%08lx\n SP: 0x%08lx FP: 0x%08lx BLK: %pS\n", +- regs->bta, regs->sp, regs->fp, (void *)regs->blink); +- pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n", +- regs->lp_start, regs->lp_end, regs->lp_count); +- +- /* print regs->r0 thru regs->r12 +- * Sequential printing was generating horrible code +- */ +- print_reg_file(&(regs->r0), 0); + +- /* If Callee regs were saved, display them too */ +- cregs = (struct callee_regs *)current->thread.callee_reg; ++ print_regs_scratch(regs); + if (cregs) +- show_callee_regs(cregs); ++ print_regs_callee(cregs); + + preempt_disable(); + } +diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h +index a4a61531c7fb9..77712c5ffe848 100644 +--- a/arch/arc/plat-eznps/include/plat/ctop.h ++++ b/arch/arc/plat-eznps/include/plat/ctop.h +@@ -33,7 +33,6 @@ + #define CTOP_AUX_DPC (CTOP_AUX_BASE + 0x02C) + #define CTOP_AUX_LPC (CTOP_AUX_BASE + 0x030) + #define CTOP_AUX_EFLAGS (CTOP_AUX_BASE + 0x080) +-#define CTOP_AUX_IACK (CTOP_AUX_BASE + 0x088) + #define CTOP_AUX_GPA1 (CTOP_AUX_BASE + 0x08C) + #define CTOP_AUX_UDMC (CTOP_AUX_BASE + 0x300) + +diff --git a/arch/arm/boot/dts/bcm-hr2.dtsi b/arch/arm/boot/dts/bcm-hr2.dtsi +index 5e5f5ca3c86f1..bba0e8cd2acbd 100644 +--- a/arch/arm/boot/dts/bcm-hr2.dtsi ++++ b/arch/arm/boot/dts/bcm-hr2.dtsi +@@ -217,7 +217,7 @@ + }; + + qspi: spi@27200 { +- compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi"; ++ compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi"; + reg = <0x027200 0x184>, + <0x027000 0x124>, + <0x11c408 0x004>, +diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi +index 3175266ede646..465937b79c8e4 100644 +--- a/arch/arm/boot/dts/bcm-nsp.dtsi ++++ b/arch/arm/boot/dts/bcm-nsp.dtsi +@@ -284,7 +284,7 @@ + }; + + qspi: spi@27200 { +- compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi"; ++ compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi"; + reg = <0x027200 0x184>, + <0x027000 0x124>, + <0x11c408 0x004>, +diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi +index 2d9b4dd058307..0016720ce5300 100644 +--- a/arch/arm/boot/dts/bcm5301x.dtsi ++++ b/arch/arm/boot/dts/bcm5301x.dtsi +@@ -488,7 +488,7 @@ + }; + + spi@18029200 { +- compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi"; ++ compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi"; + reg = <0x18029200 0x184>, + <0x18029000 0x124>, + <0x1811b408 0x004>, +diff --git a/arch/arm/boot/dts/imx6sx-pinfunc.h b/arch/arm/boot/dts/imx6sx-pinfunc.h +index 0b02c7e60c174..f4dc46207954c 100644 +--- a/arch/arm/boot/dts/imx6sx-pinfunc.h ++++ b/arch/arm/boot/dts/imx6sx-pinfunc.h +@@ -1026,7 +1026,7 @@ + #define MX6SX_PAD_QSPI1B_DQS__SIM_M_HADDR_15 0x01B0 0x04F8 0x0000 0x7 0x0 + #define MX6SX_PAD_QSPI1B_SCLK__QSPI1_B_SCLK 0x01B4 0x04FC 0x0000 0x0 0x0 + #define MX6SX_PAD_QSPI1B_SCLK__UART3_DCE_RX 0x01B4 0x04FC 0x0840 0x1 0x4 +-#define MX6SX_PAD_QSPI1B_SCLK__UART3_DTE_TX 0x01B4 0x04FC 0x0000 0x0 0x0 ++#define MX6SX_PAD_QSPI1B_SCLK__UART3_DTE_TX 0x01B4 0x04FC 0x0000 0x1 0x0 + #define MX6SX_PAD_QSPI1B_SCLK__ECSPI3_SCLK 0x01B4 0x04FC 0x0730 0x2 0x1 + #define MX6SX_PAD_QSPI1B_SCLK__ESAI_RX_HF_CLK 0x01B4 0x04FC 0x0780 0x3 0x2 + #define MX6SX_PAD_QSPI1B_SCLK__CSI1_DATA_16 0x01B4 0x04FC 0x06DC 0x4 0x1 +diff --git a/arch/arm/boot/dts/imx7d-zii-rmu2.dts b/arch/arm/boot/dts/imx7d-zii-rmu2.dts +index e5e20b07f184b..7cb6153fc650b 100644 +--- a/arch/arm/boot/dts/imx7d-zii-rmu2.dts ++++ b/arch/arm/boot/dts/imx7d-zii-rmu2.dts +@@ -58,7 +58,7 @@ + <&clks IMX7D_ENET1_TIME_ROOT_CLK>; + assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>; + assigned-clock-rates = <0>, <100000000>; +- phy-mode = "rgmii"; ++ phy-mode = "rgmii-id"; + phy-handle = <&fec1_phy>; + status = "okay"; + +diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi +index f7c4878534c8e..1bff3efe8aafe 100644 +--- a/arch/arm/boot/dts/imx7ulp.dtsi ++++ b/arch/arm/boot/dts/imx7ulp.dtsi +@@ -394,7 +394,7 @@ + clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>, + <&pcc3 IMX7ULP_CLK_PCTLC>; + clock-names = "gpio", "port"; +- gpio-ranges = <&iomuxc1 0 0 32>; ++ gpio-ranges = <&iomuxc1 0 0 20>; + }; + + gpio_ptd: gpio@40af0000 { +@@ -408,7 +408,7 @@ + clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>, + <&pcc3 IMX7ULP_CLK_PCTLD>; + clock-names = "gpio", "port"; +- gpio-ranges = <&iomuxc1 0 32 32>; ++ gpio-ranges = <&iomuxc1 0 32 12>; + }; + + gpio_pte: gpio@40b00000 { +@@ -422,7 +422,7 @@ + clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>, + <&pcc3 IMX7ULP_CLK_PCTLE>; + clock-names = "gpio", "port"; +- gpio-ranges = <&iomuxc1 0 64 32>; ++ gpio-ranges = <&iomuxc1 0 64 16>; + }; + + gpio_ptf: gpio@40b10000 { +@@ -436,7 +436,7 @@ + clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>, + <&pcc3 IMX7ULP_CLK_PCTLF>; + clock-names = "gpio", "port"; +- gpio-ranges = <&iomuxc1 0 96 32>; ++ gpio-ranges = <&iomuxc1 0 96 20>; + }; + }; + +diff --git a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi +index 100396f6c2feb..395e05f10d36c 100644 +--- a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi ++++ b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi +@@ -51,6 +51,8 @@ + + &mcbsp2 { + status = "okay"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&mcbsp2_pins>; + }; + + &charger { +@@ -102,35 +104,18 @@ + regulator-max-microvolt = <3300000>; + }; + +- lcd0: display@0 { +- compatible = "panel-dpi"; +- label = "28"; +- status = "okay"; +- /* default-on; */ ++ lcd0: display { ++ /* This isn't the exact LCD, but the timings meet spec */ ++ compatible = "logicpd,type28"; + pinctrl-names = "default"; + pinctrl-0 = <&lcd_enable_pin>; +- enable-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>; /* gpio155, lcd INI */ ++ backlight = <&bl>; ++ enable-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>; + port { + lcd_in: endpoint { + remote-endpoint = <&dpi_out>; + }; + }; +- +- panel-timing { +- clock-frequency = <9000000>; +- hactive = <480>; +- vactive = <272>; +- hfront-porch = <3>; +- hback-porch = <2>; +- hsync-len = <42>; +- vback-porch = <3>; +- vfront-porch = <2>; +- vsync-len = <11>; +- hsync-active = <1>; +- vsync-active = <1>; +- de-active = <1>; +- pixelclk-active = <0>; +- }; + }; + + bl: backlight { +diff --git a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi +index 381f0e82bb706..b0f6613e6d549 100644 +--- a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi ++++ b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi +@@ -81,6 +81,8 @@ + }; + + &mcbsp2 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&mcbsp2_pins>; + status = "okay"; + }; + +diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi +index b2ff27af090ec..9435ce527e855 100644 +--- a/arch/arm/boot/dts/ls1021a.dtsi ++++ b/arch/arm/boot/dts/ls1021a.dtsi +@@ -181,7 +181,7 @@ + #address-cells = <1>; + #size-cells = <0>; + reg = <0x0 0x1550000 0x0 0x10000>, +- <0x0 0x40000000 0x0 0x40000000>; ++ <0x0 0x40000000 0x0 0x20000000>; + reg-names = "QuadSPI", "QuadSPI-memory"; + interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>; + clock-names = "qspi_en", "qspi"; +diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi +index fb889c5b00c9d..de55ac5e60f39 100644 +--- a/arch/arm/boot/dts/omap5.dtsi ++++ b/arch/arm/boot/dts/omap5.dtsi +@@ -463,11 +463,11 @@ + }; + }; + +- target-module@5000 { ++ target-module@4000 { + compatible = "ti,sysc-omap2", "ti,sysc"; +- reg = <0x5000 0x4>, +- <0x5010 0x4>, +- <0x5014 0x4>; ++ reg = <0x4000 0x4>, ++ <0x4010 0x4>, ++ <0x4014 0x4>; + reg-names = "rev", "sysc", "syss"; + ti,sysc-sidle = <SYSC_IDLE_FORCE>, + <SYSC_IDLE_NO>, +@@ -479,7 +479,7 @@ + ti,syss-mask = <1>; + #address-cells = <1>; + #size-cells = <1>; +- ranges = <0 0x5000 0x1000>; ++ ranges = <0 0x4000 0x1000>; + + dsi1: encoder@0 { + compatible = "ti,omap5-dsi"; +@@ -489,8 +489,9 @@ + reg-names = "proto", "phy", "pll"; + interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>; + status = "disabled"; +- clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>; +- clock-names = "fck"; ++ clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>, ++ <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>; ++ clock-names = "fck", "sys_clk"; + }; + }; + +@@ -520,8 +521,9 @@ + reg-names = "proto", "phy", "pll"; + interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>; + status = "disabled"; +- clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>; +- clock-names = "fck"; ++ clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>, ++ <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>; ++ clock-names = "fck", "sys_clk"; + }; + }; + +diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi +index 8f614c4b0e3eb..9c71472c237bd 100644 +--- a/arch/arm/boot/dts/socfpga_arria10.dtsi ++++ b/arch/arm/boot/dts/socfpga_arria10.dtsi +@@ -819,7 +819,7 @@ + timer3: timer3@ffd00100 { + compatible = "snps,dw-apb-timer"; + interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>; +- reg = <0xffd01000 0x100>; ++ reg = <0xffd00100 0x100>; + clocks = <&l4_sys_free_clk>; + clock-names = "timer"; + resets = <&rst L4SYSTIMER1_RESET>; +diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi +index 2d547e7b21ad5..8a14ac34d1313 100644 +--- a/arch/arm/boot/dts/vfxxx.dtsi ++++ b/arch/arm/boot/dts/vfxxx.dtsi +@@ -495,7 +495,7 @@ + }; + + ocotp: ocotp@400a5000 { +- compatible = "fsl,vf610-ocotp"; ++ compatible = "fsl,vf610-ocotp", "syscon"; + reg = <0x400a5000 0x1000>; + clocks = <&clks VF610_CLK_OCOTP>; + }; +diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c +index 54aff33e55e6e..bfa5e1b8dba7f 100644 +--- a/arch/arm/mach-omap2/omap-iommu.c ++++ b/arch/arm/mach-omap2/omap-iommu.c +@@ -74,7 +74,7 @@ static struct powerdomain *_get_pwrdm(struct device *dev) + return pwrdm; + + clk = of_clk_get(dev->of_node->parent, 0); +- if (!clk) { ++ if (IS_ERR(clk)) { + dev_err(dev, "no fck found\n"); + return NULL; + } +diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi +index 15f7b0ed38369..39802066232e1 100644 +--- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi ++++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi +@@ -745,7 +745,7 @@ + }; + + qspi: spi@66470200 { +- compatible = "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi"; ++ compatible = "brcm,spi-ns2-qspi", "brcm,spi-bcm-qspi"; + reg = <0x66470200 0x184>, + <0x66470000 0x124>, + <0x67017408 0x004>, +diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile +index a39f0a1723e02..903c0eb61290d 100644 +--- a/arch/arm64/boot/dts/freescale/Makefile ++++ b/arch/arm64/boot/dts/freescale/Makefile +@@ -28,6 +28,7 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-honeycomb.dtb + dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-qds.dtb + dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-rdb.dtb + ++dtb-$(CONFIG_ARCH_MXC) += imx8mm-beacon-kit.dtb + dtb-$(CONFIG_ARCH_MXC) += imx8mm-evk.dtb + dtb-$(CONFIG_ARCH_MXC) += imx8mn-evk.dtb + dtb-$(CONFIG_ARCH_MXC) += imx8mn-ddr4-evk.dtb +diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi +index 45e2c0a4e8896..437e2ccf8f866 100644 +--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi ++++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi +@@ -688,7 +688,7 @@ + reg = <0x30bd0000 0x10000>; + interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk IMX8MP_CLK_SDMA1_ROOT>, +- <&clk IMX8MP_CLK_SDMA1_ROOT>; ++ <&clk IMX8MP_CLK_AHB>; + clock-names = "ipg", "ahb"; + #dma-cells = <3>; + fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin"; +diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi +index 978f8122c0d2c..66ac66856e7e8 100644 +--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi ++++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi +@@ -420,7 +420,7 @@ + tmu: tmu@30260000 { + compatible = "fsl,imx8mq-tmu"; + reg = <0x30260000 0x10000>; +- interrupt = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>; ++ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk IMX8MQ_CLK_TMU_ROOT>; + little-endian; + fsl,tmu-range = <0xb0000 0xa0026 0x80048 0x70061>; +diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c +index 65b08a74aec65..37c0b51a7b7b5 100644 +--- a/arch/arm64/kernel/module-plts.c ++++ b/arch/arm64/kernel/module-plts.c +@@ -271,8 +271,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, + mod->arch.core.plt_shndx = i; + else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt")) + mod->arch.init.plt_shndx = i; +- else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) && +- !strcmp(secstrings + sechdrs[i].sh_name, ++ else if (!strcmp(secstrings + sechdrs[i].sh_name, + ".text.ftrace_trampoline")) + tramp = sechdrs + i; + else if (sechdrs[i].sh_type == SHT_SYMTAB) +diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c +index bd47f06739d6c..d906350d543dd 100644 +--- a/arch/arm64/kvm/mmu.c ++++ b/arch/arm64/kvm/mmu.c +@@ -1873,6 +1873,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, + !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) { + force_pte = true; + vma_pagesize = PAGE_SIZE; ++ vma_shift = PAGE_SHIFT; + } + + /* +@@ -1967,7 +1968,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, + (fault_status == FSC_PERM && + stage2_is_exec(kvm, fault_ipa, vma_pagesize)); + +- if (vma_pagesize == PUD_SIZE) { ++ /* ++ * If PUD_SIZE == PMD_SIZE, there is no real PUD level, and ++ * all we have is a 2-level page table. Trying to map a PUD in ++ * this case would be fatally wrong. ++ */ ++ if (PUD_SIZE != PMD_SIZE && vma_pagesize == PUD_SIZE) { + pud_t new_pud = kvm_pfn_pud(pfn, mem_type); + + new_pud = kvm_pud_mkhuge(new_pud); +diff --git a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h +index b6e9c99b85a52..eb181224eb4c4 100644 +--- a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h ++++ b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h +@@ -26,7 +26,6 @@ + #define cpu_has_counter 1 + #define cpu_has_dc_aliases (PAGE_SIZE < 0x4000) + #define cpu_has_divec 0 +-#define cpu_has_ejtag 0 + #define cpu_has_inclusive_pcaches 1 + #define cpu_has_llsc 1 + #define cpu_has_mcheck 0 +@@ -42,7 +41,6 @@ + #define cpu_has_veic 0 + #define cpu_has_vint 0 + #define cpu_has_vtag_icache 0 +-#define cpu_has_watch 1 + #define cpu_has_wsbh 1 + #define cpu_has_ic_fills_f_dc 1 + #define cpu_hwrena_impl_bits 0xc0000000 +diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig +index 08b7f4cef2434..ddf5e97877e2b 100644 +--- a/arch/powerpc/configs/pasemi_defconfig ++++ b/arch/powerpc/configs/pasemi_defconfig +@@ -109,7 +109,6 @@ CONFIG_FB_NVIDIA=y + CONFIG_FB_NVIDIA_I2C=y + CONFIG_FB_RADEON=y + # CONFIG_LCD_CLASS_DEVICE is not set +-CONFIG_VGACON_SOFT_SCROLLBACK=y + CONFIG_LOGO=y + CONFIG_SOUND=y + CONFIG_SND=y +diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig +index feb5d47d8d1e0..4cc9039e4deb9 100644 +--- a/arch/powerpc/configs/ppc6xx_defconfig ++++ b/arch/powerpc/configs/ppc6xx_defconfig +@@ -772,7 +772,6 @@ CONFIG_FB_TRIDENT=m + CONFIG_FB_SM501=m + CONFIG_FB_IBM_GXT4500=y + CONFIG_LCD_PLATFORM=m +-CONFIG_VGACON_SOFT_SCROLLBACK=y + CONFIG_FRAMEBUFFER_CONSOLE=y + CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y + CONFIG_LOGO=y +diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig +index 550904591e94d..62e834793af05 100644 +--- a/arch/x86/configs/i386_defconfig ++++ b/arch/x86/configs/i386_defconfig +@@ -202,7 +202,6 @@ CONFIG_FB_MODE_HELPERS=y + CONFIG_FB_TILEBLITTING=y + CONFIG_FB_EFI=y + # CONFIG_LCD_CLASS_DEVICE is not set +-CONFIG_VGACON_SOFT_SCROLLBACK=y + CONFIG_LOGO=y + # CONFIG_LOGO_LINUX_MONO is not set + # CONFIG_LOGO_LINUX_VGA16 is not set +diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig +index 6149610090751..075d97ec20d56 100644 +--- a/arch/x86/configs/x86_64_defconfig ++++ b/arch/x86/configs/x86_64_defconfig +@@ -197,7 +197,6 @@ CONFIG_FB_MODE_HELPERS=y + CONFIG_FB_TILEBLITTING=y + CONFIG_FB_EFI=y + # CONFIG_LCD_CLASS_DEVICE is not set +-CONFIG_VGACON_SOFT_SCROLLBACK=y + CONFIG_LOGO=y + # CONFIG_LOGO_LINUX_MONO is not set + # CONFIG_LOGO_LINUX_VGA16 is not set +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c +index 9516a958e7801..1e6724c30cc05 100644 +--- a/arch/x86/kvm/mmu/mmu.c ++++ b/arch/x86/kvm/mmu/mmu.c +@@ -2521,7 +2521,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, + } + + if (sp->unsync_children) +- kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); ++ kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); + + __clear_sp_write_flooding_count(sp); + trace_kvm_mmu_get_page(sp, false); +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c +index 11e4df5600183..a5810928b011f 100644 +--- a/arch/x86/kvm/vmx/nested.c ++++ b/arch/x86/kvm/vmx/nested.c +@@ -4620,7 +4620,7 @@ void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu) + vmx->nested.msrs.entry_ctls_high &= + ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; + vmx->nested.msrs.exit_ctls_high &= +- ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; ++ ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; + } + } + +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c +index eb33c764d1593..b934e99649436 100644 +--- a/arch/x86/kvm/vmx/vmx.c ++++ b/arch/x86/kvm/vmx/vmx.c +@@ -6028,6 +6028,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) + (exit_reason != EXIT_REASON_EXCEPTION_NMI && + exit_reason != EXIT_REASON_EPT_VIOLATION && + exit_reason != EXIT_REASON_PML_FULL && ++ exit_reason != EXIT_REASON_APIC_ACCESS && + exit_reason != EXIT_REASON_TASK_SWITCH)) { + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index f7304132d5907..f5481ae588aff 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -2696,7 +2696,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) + return 1; + + if (!lapic_in_kernel(vcpu)) +- return 1; ++ return data ? 1 : 0; + + vcpu->arch.apf.msr_en_val = data; + +diff --git a/block/bio.c b/block/bio.c +index b1883adc8f154..eac129f21d2df 100644 +--- a/block/bio.c ++++ b/block/bio.c +@@ -877,8 +877,10 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page, + struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; + + if (page_is_mergeable(bv, page, len, off, same_page)) { +- if (bio->bi_iter.bi_size > UINT_MAX - len) ++ if (bio->bi_iter.bi_size > UINT_MAX - len) { ++ *same_page = false; + return false; ++ } + bv->bv_len += len; + bio->bi_iter.bi_size += len; + return true; +diff --git a/block/partitions/core.c b/block/partitions/core.c +index 534e11285a8d4..b45539764c994 100644 +--- a/block/partitions/core.c ++++ b/block/partitions/core.c +@@ -529,7 +529,7 @@ int bdev_del_partition(struct block_device *bdev, int partno) + + bdevp = bdget_disk(bdev->bd_disk, partno); + if (!bdevp) +- return -ENOMEM; ++ return -ENXIO; + + mutex_lock(&bdevp->bd_mutex); + mutex_lock_nested(&bdev->bd_mutex, 1); +diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c +index cc87004d5e2d6..5f22555933df1 100644 +--- a/drivers/atm/firestream.c ++++ b/drivers/atm/firestream.c +@@ -998,6 +998,7 @@ static int fs_open(struct atm_vcc *atm_vcc) + error = make_rate (pcr, r, &tmc0, NULL); + if (error) { + kfree(tc); ++ kfree(vcc); + return error; + } + } +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c +index 4f61e92094614..ae6d7dbbf5d1e 100644 +--- a/drivers/block/rbd.c ++++ b/drivers/block/rbd.c +@@ -5120,6 +5120,9 @@ static ssize_t rbd_config_info_show(struct device *dev, + { + struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); + ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ + return sprintf(buf, "%s\n", rbd_dev->config_info); + } + +@@ -5231,6 +5234,9 @@ static ssize_t rbd_image_refresh(struct device *dev, + struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); + int ret; + ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ + ret = rbd_dev_refresh(rbd_dev); + if (ret) + return ret; +@@ -7059,6 +7065,9 @@ static ssize_t do_rbd_add(struct bus_type *bus, + struct rbd_client *rbdc; + int rc; + ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ + if (!try_module_get(THIS_MODULE)) + return -ENODEV; + +@@ -7209,6 +7218,9 @@ static ssize_t do_rbd_remove(struct bus_type *bus, + bool force = false; + int ret; + ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ + dev_id = -1; + opt_buf[0] = '\0'; + sscanf(buf, "%d %5s", &dev_id, opt_buf); +diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c +index 8c730a47e0537..36a469150ff9c 100644 +--- a/drivers/cpufreq/intel_pstate.c ++++ b/drivers/cpufreq/intel_pstate.c +@@ -762,7 +762,7 @@ static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max, + + rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); + WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap); +- if (global.no_turbo) ++ if (global.no_turbo || global.turbo_disabled) + *current_max = HWP_GUARANTEED_PERF(cap); + else + *current_max = HWP_HIGHEST_PERF(cap); +@@ -2534,9 +2534,15 @@ static int intel_pstate_update_status(const char *buf, size_t size) + { + int ret; + +- if (size == 3 && !strncmp(buf, "off", size)) +- return intel_pstate_driver ? +- intel_pstate_unregister_driver() : -EINVAL; ++ if (size == 3 && !strncmp(buf, "off", size)) { ++ if (!intel_pstate_driver) ++ return -EINVAL; ++ ++ if (hwp_active) ++ return -EBUSY; ++ ++ return intel_pstate_unregister_driver(); ++ } + + if (size == 6 && !strncmp(buf, "active", size)) { + if (intel_pstate_driver) { +diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c +index 8a05db3343d39..dcbcb712de6e8 100644 +--- a/drivers/dma/acpi-dma.c ++++ b/drivers/dma/acpi-dma.c +@@ -135,11 +135,13 @@ static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma) + if (ret < 0) { + dev_warn(&adev->dev, + "error in parsing resource group\n"); +- return; ++ break; + } + + grp = (struct acpi_csrt_group *)((void *)grp + grp->length); + } ++ ++ acpi_put_table((struct acpi_table_header *)csrt); + } + + /** +diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c +index 448f663da89c6..8beed91428bd6 100644 +--- a/drivers/dma/dma-jz4780.c ++++ b/drivers/dma/dma-jz4780.c +@@ -879,24 +879,11 @@ static int jz4780_dma_probe(struct platform_device *pdev) + return -EINVAL; + } + +- ret = platform_get_irq(pdev, 0); +- if (ret < 0) +- return ret; +- +- jzdma->irq = ret; +- +- ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev), +- jzdma); +- if (ret) { +- dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq); +- return ret; +- } +- + jzdma->clk = devm_clk_get(dev, NULL); + if (IS_ERR(jzdma->clk)) { + dev_err(dev, "failed to get clock\n"); + ret = PTR_ERR(jzdma->clk); +- goto err_free_irq; ++ return ret; + } + + clk_prepare_enable(jzdma->clk); +@@ -949,10 +936,23 @@ static int jz4780_dma_probe(struct platform_device *pdev) + jzchan->vchan.desc_free = jz4780_dma_desc_free; + } + ++ ret = platform_get_irq(pdev, 0); ++ if (ret < 0) ++ goto err_disable_clk; ++ ++ jzdma->irq = ret; ++ ++ ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev), ++ jzdma); ++ if (ret) { ++ dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq); ++ goto err_disable_clk; ++ } ++ + ret = dmaenginem_async_device_register(dd); + if (ret) { + dev_err(dev, "failed to register device\n"); +- goto err_disable_clk; ++ goto err_free_irq; + } + + /* Register with OF DMA helpers. */ +@@ -960,17 +960,17 @@ static int jz4780_dma_probe(struct platform_device *pdev) + jzdma); + if (ret) { + dev_err(dev, "failed to register OF DMA controller\n"); +- goto err_disable_clk; ++ goto err_free_irq; + } + + dev_info(dev, "JZ4780 DMA controller initialised\n"); + return 0; + +-err_disable_clk: +- clk_disable_unprepare(jzdma->clk); +- + err_free_irq: + free_irq(jzdma->irq, jzdma); ++ ++err_disable_clk: ++ clk_disable_unprepare(jzdma->clk); + return ret; + } + +diff --git a/drivers/firmware/efi/embedded-firmware.c b/drivers/firmware/efi/embedded-firmware.c +index a1b199de9006e..84e32634ed6cd 100644 +--- a/drivers/firmware/efi/embedded-firmware.c ++++ b/drivers/firmware/efi/embedded-firmware.c +@@ -16,9 +16,9 @@ + + /* Exported for use by lib/test_firmware.c only */ + LIST_HEAD(efi_embedded_fw_list); +-EXPORT_SYMBOL_GPL(efi_embedded_fw_list); +- +-static bool checked_for_fw; ++EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_list, TEST_FIRMWARE); ++bool efi_embedded_fw_checked; ++EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_checked, TEST_FIRMWARE); + + static const struct dmi_system_id * const embedded_fw_table[] = { + #ifdef CONFIG_TOUCHSCREEN_DMI +@@ -119,14 +119,14 @@ void __init efi_check_for_embedded_firmwares(void) + } + } + +- checked_for_fw = true; ++ efi_embedded_fw_checked = true; + } + + int efi_get_embedded_fw(const char *name, const u8 **data, size_t *size) + { + struct efi_embedded_fw *iter, *fw = NULL; + +- if (!checked_for_fw) { ++ if (!efi_embedded_fw_checked) { + pr_warn("Warning %s called while we did not check for embedded fw\n", + __func__); + return -ENOENT; +diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +index 753cb2cf6b77e..3adf9c1dfdbb0 100644 +--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c ++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +@@ -3587,7 +3587,8 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, + case AMDGPU_PP_SENSOR_GPU_POWER: + return smu7_get_gpu_power(hwmgr, (uint32_t *)value); + case AMDGPU_PP_SENSOR_VDDGFX: +- if ((data->vr_config & 0xff) == 0x2) ++ if ((data->vr_config & VRCONF_VDDGFX_MASK) == ++ (VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT)) + val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID); + else +diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c +index 6021f8d9efd1f..48fa49f69d6d0 100644 +--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c +@@ -164,6 +164,11 @@ static int a2xx_hw_init(struct msm_gpu *gpu) + if (ret) + return ret; + ++ gpu_write(gpu, REG_AXXX_CP_RB_CNTL, ++ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); ++ ++ gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); ++ + /* NOTE: PM4/micro-engine firmware registers look to be the same + * for a2xx and a3xx.. we could possibly push that part down to + * adreno_gpu base class. Or push both PM4 and PFP but +diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +index 0a5ea9f56cb88..f6471145a7a60 100644 +--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +@@ -211,6 +211,16 @@ static int a3xx_hw_init(struct msm_gpu *gpu) + if (ret) + return ret; + ++ /* ++ * Use the default ringbuffer size and block size but disable the RPTR ++ * shadow ++ */ ++ gpu_write(gpu, REG_AXXX_CP_RB_CNTL, ++ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); ++ ++ /* Set the ringbuffer address */ ++ gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); ++ + /* setup access protection: */ + gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007); + +diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +index b9b26b2bf9c54..9547536006254 100644 +--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +@@ -267,6 +267,16 @@ static int a4xx_hw_init(struct msm_gpu *gpu) + if (ret) + return ret; + ++ /* ++ * Use the default ringbuffer size and block size but disable the RPTR ++ * shadow ++ */ ++ gpu_write(gpu, REG_A4XX_CP_RB_CNTL, ++ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); ++ ++ /* Set the ringbuffer address */ ++ gpu_write(gpu, REG_A4XX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); ++ + /* Load PM4: */ + ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data); + len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4; +diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +index d95970a73fb4b..1bf0969ce725f 100644 +--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +@@ -702,8 +702,6 @@ static int a5xx_hw_init(struct msm_gpu *gpu) + if (ret) + return ret; + +- a5xx_preempt_hw_init(gpu); +- + if (!adreno_is_a510(adreno_gpu)) + a5xx_gpmu_ucode_init(gpu); + +@@ -711,6 +709,15 @@ static int a5xx_hw_init(struct msm_gpu *gpu) + if (ret) + return ret; + ++ /* Set the ringbuffer address */ ++ gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI, ++ gpu->rb[0]->iova); ++ ++ gpu_write(gpu, REG_A5XX_CP_RB_CNTL, ++ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); ++ ++ a5xx_preempt_hw_init(gpu); ++ + /* Disable the interrupts through the initial bringup stage */ + gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK); + +@@ -1510,7 +1517,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) + + check_speed_bin(&pdev->dev); + +- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4); ++ /* Restricting nr_rings to 1 to temporarily disable preemption */ ++ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); + if (ret) { + a5xx_destroy(&(a5xx_gpu->base.base)); + return ERR_PTR(ret); +diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +index 54868d4e3958f..1e5b1a15a70f0 100644 +--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h ++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +@@ -31,6 +31,7 @@ struct a5xx_gpu { + struct msm_ringbuffer *next_ring; + + struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS]; ++ struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS]; + struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS]; + uint64_t preempt_iova[MSM_GPU_MAX_RINGS]; + +diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c +index 9cf9353a7ff11..9f3fe177b00e9 100644 +--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c ++++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c +@@ -226,19 +226,31 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, + struct adreno_gpu *adreno_gpu = &a5xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + struct a5xx_preempt_record *ptr; +- struct drm_gem_object *bo = NULL; +- u64 iova = 0; ++ void *counters; ++ struct drm_gem_object *bo = NULL, *counters_bo = NULL; ++ u64 iova = 0, counters_iova = 0; + + ptr = msm_gem_kernel_new(gpu->dev, + A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE, +- MSM_BO_UNCACHED, gpu->aspace, &bo, &iova); ++ MSM_BO_UNCACHED | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova); + + if (IS_ERR(ptr)) + return PTR_ERR(ptr); + ++ /* The buffer to store counters needs to be unprivileged */ ++ counters = msm_gem_kernel_new(gpu->dev, ++ A5XX_PREEMPT_COUNTER_SIZE, ++ MSM_BO_UNCACHED, gpu->aspace, &counters_bo, &counters_iova); ++ if (IS_ERR(counters)) { ++ msm_gem_kernel_put(bo, gpu->aspace, true); ++ return PTR_ERR(counters); ++ } ++ + msm_gem_object_set_name(bo, "preempt"); ++ msm_gem_object_set_name(counters_bo, "preempt_counters"); + + a5xx_gpu->preempt_bo[ring->id] = bo; ++ a5xx_gpu->preempt_counters_bo[ring->id] = counters_bo; + a5xx_gpu->preempt_iova[ring->id] = iova; + a5xx_gpu->preempt[ring->id] = ptr; + +@@ -249,7 +261,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, + ptr->data = 0; + ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT; + ptr->rptr_addr = rbmemptr(ring, rptr); +- ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE; ++ ptr->counter = counters_iova; + + return 0; + } +@@ -260,8 +272,11 @@ void a5xx_preempt_fini(struct msm_gpu *gpu) + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + int i; + +- for (i = 0; i < gpu->nr_rings; i++) ++ for (i = 0; i < gpu->nr_rings; i++) { + msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace, true); ++ msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i], ++ gpu->aspace, true); ++ } + } + + void a5xx_preempt_init(struct msm_gpu *gpu) +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +index 7768557cdfb28..b7dc350d96fc8 100644 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +@@ -557,6 +557,13 @@ static int a6xx_hw_init(struct msm_gpu *gpu) + if (ret) + goto out; + ++ /* Set the ringbuffer address */ ++ gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI, ++ gpu->rb[0]->iova); ++ ++ gpu_write(gpu, REG_A6XX_CP_RB_CNTL, ++ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); ++ + /* Always come up on rb 0 */ + a6xx_gpu->cur_ring = gpu->rb[0]; + +diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c +index e7b39f3ca33dc..a74ccc5b8220d 100644 +--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c +@@ -400,26 +400,6 @@ int adreno_hw_init(struct msm_gpu *gpu) + ring->memptrs->rptr = 0; + } + +- /* +- * Setup REG_CP_RB_CNTL. The same value is used across targets (with +- * the excpetion of A430 that disables the RPTR shadow) - the cacluation +- * for the ringbuffer size and block size is moved to msm_gpu.h for the +- * pre-processor to deal with and the A430 variant is ORed in here +- */ +- adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL, +- MSM_GPU_RB_CNTL_DEFAULT | +- (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0)); +- +- /* Setup ringbuffer address - use ringbuffer[0] for GPU init */ +- adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE, +- REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova); +- +- if (!adreno_is_a430(adreno_gpu)) { +- adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR, +- REG_ADRENO_CP_RB_RPTR_ADDR_HI, +- rbmemptr(gpu->rb[0], rptr)); +- } +- + return 0; + } + +@@ -427,11 +407,8 @@ int adreno_hw_init(struct msm_gpu *gpu) + static uint32_t get_rptr(struct adreno_gpu *adreno_gpu, + struct msm_ringbuffer *ring) + { +- if (adreno_is_a430(adreno_gpu)) +- return ring->memptrs->rptr = adreno_gpu_read( +- adreno_gpu, REG_ADRENO_CP_RB_RPTR); +- else +- return ring->memptrs->rptr; ++ return ring->memptrs->rptr = adreno_gpu_read( ++ adreno_gpu, REG_ADRENO_CP_RB_RPTR); + } + + struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu) +diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c +index e397c44cc0112..39ecb5a18431e 100644 +--- a/drivers/gpu/drm/msm/msm_ringbuffer.c ++++ b/drivers/gpu/drm/msm/msm_ringbuffer.c +@@ -27,7 +27,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, + ring->id = id; + + ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ, +- MSM_BO_WC, gpu->aspace, &ring->bo, &ring->iova); ++ MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &ring->bo, ++ &ring->iova); + + if (IS_ERR(ring->start)) { + ret = PTR_ERR(ring->start); +diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c +index 072ea113e6be5..ed5d866178028 100644 +--- a/drivers/gpu/drm/sun4i/sun4i_backend.c ++++ b/drivers/gpu/drm/sun4i/sun4i_backend.c +@@ -589,8 +589,7 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine, + + /* We can't have an alpha plane at the lowest position */ + if (!backend->quirks->supports_lowest_plane_alpha && +- (plane_states[0]->fb->format->has_alpha || +- (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE))) ++ (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE)) + return -EINVAL; + + for (i = 1; i < num_planes; i++) { +@@ -995,7 +994,6 @@ static const struct sun4i_backend_quirks sun6i_backend_quirks = { + + static const struct sun4i_backend_quirks sun7i_backend_quirks = { + .needs_output_muxing = true, +- .supports_lowest_plane_alpha = true, + }; + + static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = { +diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c +index 359b56e43b83c..24d95f058918c 100644 +--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c ++++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c +@@ -1433,14 +1433,18 @@ static int sun8i_r40_tcon_tv_set_mux(struct sun4i_tcon *tcon, + if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) && + encoder->encoder_type == DRM_MODE_ENCODER_TMDS) { + ret = sun8i_tcon_top_set_hdmi_src(&pdev->dev, id); +- if (ret) ++ if (ret) { ++ put_device(&pdev->dev); + return ret; ++ } + } + + if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP)) { + ret = sun8i_tcon_top_de_config(&pdev->dev, tcon->id, id); +- if (ret) ++ if (ret) { ++ put_device(&pdev->dev); + return ret; ++ } + } + + return 0; +diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +index aa67cb037e9d1..32d4c3f7fc4eb 100644 +--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c ++++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +@@ -889,7 +889,7 @@ static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi, + regmap_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(0), + sun6i_dsi_dcs_build_pkt_hdr(dsi, msg)); + +- bounce = kzalloc(msg->tx_len + sizeof(crc), GFP_KERNEL); ++ bounce = kzalloc(ALIGN(msg->tx_len + sizeof(crc), 4), GFP_KERNEL); + if (!bounce) + return -ENOMEM; + +@@ -900,7 +900,7 @@ static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi, + memcpy((u8 *)bounce + msg->tx_len, &crc, sizeof(crc)); + len += sizeof(crc); + +- regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, len); ++ regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, DIV_ROUND_UP(len, 4)); + regmap_write(dsi->regs, SUN6I_DSI_CMD_CTL_REG, len + 4 - 1); + kfree(bounce); + +diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c +index 22c8c5375d0db..c0147af6a8406 100644 +--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c ++++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c +@@ -211,7 +211,7 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel, + return 0; + } + +-static bool sun8i_vi_layer_get_csc_mode(const struct drm_format_info *format) ++static u32 sun8i_vi_layer_get_csc_mode(const struct drm_format_info *format) + { + if (!format->is_yuv) + return SUN8I_CSC_MODE_OFF; +diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c +index d733bbc4ac0e5..17ff24d999d18 100644 +--- a/drivers/gpu/drm/tve200/tve200_display.c ++++ b/drivers/gpu/drm/tve200/tve200_display.c +@@ -14,6 +14,7 @@ + #include <linux/version.h> + #include <linux/dma-buf.h> + #include <linux/of_graph.h> ++#include <linux/delay.h> + + #include <drm/drm_fb_cma_helper.h> + #include <drm/drm_fourcc.h> +@@ -130,9 +131,25 @@ static void tve200_display_enable(struct drm_simple_display_pipe *pipe, + struct drm_connector *connector = priv->connector; + u32 format = fb->format->format; + u32 ctrl1 = 0; ++ int retries; + + clk_prepare_enable(priv->clk); + ++ /* Reset the TVE200 and wait for it to come back online */ ++ writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4); ++ for (retries = 0; retries < 5; retries++) { ++ usleep_range(30000, 50000); ++ if (readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET) ++ continue; ++ else ++ break; ++ } ++ if (retries == 5 && ++ readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET) { ++ dev_err(drm->dev, "can't get hardware out of reset\n"); ++ return; ++ } ++ + /* Function 1 */ + ctrl1 |= TVE200_CTRL_CSMODE; + /* Interlace mode for CCIR656: parameterize? */ +@@ -230,8 +247,9 @@ static void tve200_display_disable(struct drm_simple_display_pipe *pipe) + + drm_crtc_vblank_off(crtc); + +- /* Disable and Power Down */ ++ /* Disable put into reset and Power Down */ + writel(0, priv->regs + TVE200_CTRL); ++ writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4); + + clk_disable_unprepare(priv->clk); + } +@@ -279,6 +297,8 @@ static int tve200_display_enable_vblank(struct drm_simple_display_pipe *pipe) + struct drm_device *drm = crtc->dev; + struct tve200_drm_dev_private *priv = drm->dev_private; + ++ /* Clear any IRQs and enable */ ++ writel(0xFF, priv->regs + TVE200_INT_CLR); + writel(TVE200_INT_V_STATUS, priv->regs + TVE200_INT_EN); + return 0; + } +diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c +index cc7fd957a3072..2b8421a35ab94 100644 +--- a/drivers/gpu/drm/virtio/virtgpu_display.c ++++ b/drivers/gpu/drm/virtio/virtgpu_display.c +@@ -123,6 +123,17 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc, + static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) + { ++ struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc); ++ ++ /* ++ * virtio-gpu can't do modeset and plane update operations ++ * independent from each other. So the actual modeset happens ++ * in the plane update callback, and here we just check ++ * whenever we must force the modeset. ++ */ ++ if (drm_atomic_crtc_needs_modeset(crtc->state)) { ++ output->needs_modeset = true; ++ } + } + + static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = { +diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h +index 9ff9f4ac0522a..4ab1b0ba29253 100644 +--- a/drivers/gpu/drm/virtio/virtgpu_drv.h ++++ b/drivers/gpu/drm/virtio/virtgpu_drv.h +@@ -138,6 +138,7 @@ struct virtio_gpu_output { + int cur_x; + int cur_y; + bool enabled; ++ bool needs_modeset; + }; + #define drm_crtc_to_virtio_gpu_output(x) \ + container_of(x, struct virtio_gpu_output, crtc) +diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c +index 52d24179bcecc..65757409d9ed1 100644 +--- a/drivers/gpu/drm/virtio/virtgpu_plane.c ++++ b/drivers/gpu/drm/virtio/virtgpu_plane.c +@@ -163,7 +163,9 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane, + plane->state->src_w != old_state->src_w || + plane->state->src_h != old_state->src_h || + plane->state->src_x != old_state->src_x || +- plane->state->src_y != old_state->src_y) { ++ plane->state->src_y != old_state->src_y || ++ output->needs_modeset) { ++ output->needs_modeset = false; + DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", + bo->hw_res_handle, + plane->state->crtc_w, plane->state->crtc_h, +diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c +index 45c4f888b7c4e..dae193749d443 100644 +--- a/drivers/hid/hid-elan.c ++++ b/drivers/hid/hid-elan.c +@@ -188,6 +188,7 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi) + ret = input_mt_init_slots(input, ELAN_MAX_FINGERS, INPUT_MT_POINTER); + if (ret) { + hid_err(hdev, "Failed to init elan MT slots: %d\n", ret); ++ input_free_device(input); + return ret; + } + +@@ -198,6 +199,7 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi) + if (ret) { + hid_err(hdev, "Failed to register elan input device: %d\n", + ret); ++ input_mt_destroy_slots(input); + input_free_device(input); + return ret; + } +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index 6ea3619842d8d..b49ec7dde6457 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -849,6 +849,7 @@ + #define USB_DEVICE_ID_MS_POWER_COVER 0x07da + #define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd + #define USB_DEVICE_ID_MS_PIXART_MOUSE 0x00cb ++#define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS 0x02e0 + + #define USB_VENDOR_ID_MOJO 0x8282 + #define USB_DEVICE_ID_RETRO_ADAPTER 0x3201 +@@ -1014,6 +1015,8 @@ + #define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa + #define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0 + #define USB_DEVICE_ID_SAITEK_X52 0x075c ++#define USB_DEVICE_ID_SAITEK_X52_2 0x0255 ++#define USB_DEVICE_ID_SAITEK_X52_PRO 0x0762 + + #define USB_VENDOR_ID_SAMSUNG 0x0419 + #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 +diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c +index 2d8b589201a4e..8cb1ca1936e42 100644 +--- a/drivers/hid/hid-microsoft.c ++++ b/drivers/hid/hid-microsoft.c +@@ -451,6 +451,8 @@ static const struct hid_device_id ms_devices[] = { + .driver_data = MS_SURFACE_DIAL }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER), + .driver_data = MS_QUIRK_FF }, ++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS), ++ .driver_data = MS_QUIRK_FF }, + { } + }; + MODULE_DEVICE_TABLE(hid, ms_devices); +diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c +index a65aef6a322fb..7a2be0205dfd1 100644 +--- a/drivers/hid/hid-quirks.c ++++ b/drivers/hid/hid-quirks.c +@@ -150,6 +150,8 @@ static const struct hid_device_id hid_quirks[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD }, + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_PRO), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, + { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS }, + { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS }, + { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET }, +diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c +index 75f07138a6fa2..dfcf04e1967f1 100644 +--- a/drivers/i2c/busses/i2c-npcm7xx.c ++++ b/drivers/i2c/busses/i2c-npcm7xx.c +@@ -2093,8 +2093,12 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, + } + } + +- /* Adaptive TimeOut: astimated time in usec + 100% margin */ +- timeout_usec = (2 * 10000 / bus->bus_freq) * (2 + nread + nwrite); ++ /* ++ * Adaptive TimeOut: estimated time in usec + 100% margin: ++ * 2: double the timeout for clock stretching case ++ * 9: bits per transaction (including the ack/nack) ++ */ ++ timeout_usec = (2 * 9 * USEC_PER_SEC / bus->bus_freq) * (2 + nread + nwrite); + timeout = max(msecs_to_jiffies(35), usecs_to_jiffies(timeout_usec)); + if (nwrite >= 32 * 1024 || nread >= 32 * 1024) { + dev_err(bus->dev, "i2c%d buffer too big\n", bus->num); +diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c +index 121b4e89f038c..bcdf25f32e220 100644 +--- a/drivers/iio/accel/bmc150-accel-core.c ++++ b/drivers/iio/accel/bmc150-accel-core.c +@@ -189,6 +189,14 @@ struct bmc150_accel_data { + struct mutex mutex; + u8 fifo_mode, watermark; + s16 buffer[8]; ++ /* ++ * Ensure there is sufficient space and correct alignment for ++ * the timestamp if enabled ++ */ ++ struct { ++ __le16 channels[3]; ++ s64 ts __aligned(8); ++ } scan; + u8 bw_bits; + u32 slope_dur; + u32 slope_thres; +@@ -922,15 +930,16 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev, + * now. + */ + for (i = 0; i < count; i++) { +- u16 sample[8]; + int j, bit; + + j = 0; + for_each_set_bit(bit, indio_dev->active_scan_mask, + indio_dev->masklength) +- memcpy(&sample[j++], &buffer[i * 3 + bit], 2); ++ memcpy(&data->scan.channels[j++], &buffer[i * 3 + bit], ++ sizeof(data->scan.channels[0])); + +- iio_push_to_buffers_with_timestamp(indio_dev, sample, tstamp); ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, ++ tstamp); + + tstamp += sample_period; + } +diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c +index 0b876b2dc5bd4..76429e2a6fb8f 100644 +--- a/drivers/iio/accel/kxsd9.c ++++ b/drivers/iio/accel/kxsd9.c +@@ -209,14 +209,20 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p) + const struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct kxsd9_state *st = iio_priv(indio_dev); ++ /* ++ * Ensure correct positioning and alignment of timestamp. ++ * No need to zero initialize as all elements written. ++ */ ++ struct { ++ __be16 chan[4]; ++ s64 ts __aligned(8); ++ } hw_values; + int ret; +- /* 4 * 16bit values AND timestamp */ +- __be16 hw_values[8]; + + ret = regmap_bulk_read(st->map, + KXSD9_REG_X, +- &hw_values, +- 8); ++ hw_values.chan, ++ sizeof(hw_values.chan)); + if (ret) { + dev_err(st->dev, + "error reading data\n"); +@@ -224,7 +230,7 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p) + } + + iio_push_to_buffers_with_timestamp(indio_dev, +- hw_values, ++ &hw_values, + iio_get_time_ns(indio_dev)); + iio_trigger_notify_done(indio_dev->trig); + +diff --git a/drivers/iio/accel/mma7455_core.c b/drivers/iio/accel/mma7455_core.c +index 8b5a6aff9bf4b..70ec3490bdb85 100644 +--- a/drivers/iio/accel/mma7455_core.c ++++ b/drivers/iio/accel/mma7455_core.c +@@ -52,6 +52,14 @@ + + struct mma7455_data { + struct regmap *regmap; ++ /* ++ * Used to reorganize data. Will ensure correct alignment of ++ * the timestamp if present ++ */ ++ struct { ++ __le16 channels[3]; ++ s64 ts __aligned(8); ++ } scan; + }; + + static int mma7455_drdy(struct mma7455_data *mma7455) +@@ -82,19 +90,19 @@ static irqreturn_t mma7455_trigger_handler(int irq, void *p) + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct mma7455_data *mma7455 = iio_priv(indio_dev); +- u8 buf[16]; /* 3 x 16-bit channels + padding + ts */ + int ret; + + ret = mma7455_drdy(mma7455); + if (ret) + goto done; + +- ret = regmap_bulk_read(mma7455->regmap, MMA7455_REG_XOUTL, buf, +- sizeof(__le16) * 3); ++ ret = regmap_bulk_read(mma7455->regmap, MMA7455_REG_XOUTL, ++ mma7455->scan.channels, ++ sizeof(mma7455->scan.channels)); + if (ret) + goto done; + +- iio_push_to_buffers_with_timestamp(indio_dev, buf, ++ iio_push_to_buffers_with_timestamp(indio_dev, &mma7455->scan, + iio_get_time_ns(indio_dev)); + + done: +diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c +index 813bca7cfc3ed..85d453b3f5ec1 100644 +--- a/drivers/iio/accel/mma8452.c ++++ b/drivers/iio/accel/mma8452.c +@@ -110,6 +110,12 @@ struct mma8452_data { + int sleep_val; + struct regulator *vdd_reg; + struct regulator *vddio_reg; ++ ++ /* Ensure correct alignment of time stamp when present */ ++ struct { ++ __be16 channels[3]; ++ s64 ts __aligned(8); ++ } buffer; + }; + + /** +@@ -1091,14 +1097,13 @@ static irqreturn_t mma8452_trigger_handler(int irq, void *p) + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct mma8452_data *data = iio_priv(indio_dev); +- u8 buffer[16]; /* 3 16-bit channels + padding + ts */ + int ret; + +- ret = mma8452_read(data, (__be16 *)buffer); ++ ret = mma8452_read(data, data->buffer.channels); + if (ret < 0) + goto done; + +- iio_push_to_buffers_with_timestamp(indio_dev, buffer, ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer, + iio_get_time_ns(indio_dev)); + + done: +diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c +index bdd7cba6f6b0b..d3e9ec00ef959 100644 +--- a/drivers/iio/adc/ina2xx-adc.c ++++ b/drivers/iio/adc/ina2xx-adc.c +@@ -146,6 +146,11 @@ struct ina2xx_chip_info { + int range_vbus; /* Bus voltage maximum in V */ + int pga_gain_vshunt; /* Shunt voltage PGA gain */ + bool allow_async_readout; ++ /* data buffer needs space for channel data and timestamp */ ++ struct { ++ u16 chan[4]; ++ u64 ts __aligned(8); ++ } scan; + }; + + static const struct ina2xx_config ina2xx_config[] = { +@@ -738,8 +743,6 @@ static int ina2xx_conversion_ready(struct iio_dev *indio_dev) + static int ina2xx_work_buffer(struct iio_dev *indio_dev) + { + struct ina2xx_chip_info *chip = iio_priv(indio_dev); +- /* data buffer needs space for channel data and timestap */ +- unsigned short data[4 + sizeof(s64)/sizeof(short)]; + int bit, ret, i = 0; + s64 time; + +@@ -758,10 +761,10 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev) + if (ret < 0) + return ret; + +- data[i++] = val; ++ chip->scan.chan[i++] = val; + } + +- iio_push_to_buffers_with_timestamp(indio_dev, data, time); ++ iio_push_to_buffers_with_timestamp(indio_dev, &chip->scan, time); + + return 0; + }; +diff --git a/drivers/iio/adc/max1118.c b/drivers/iio/adc/max1118.c +index 0c5d7aaf68262..b3d8cba6ce698 100644 +--- a/drivers/iio/adc/max1118.c ++++ b/drivers/iio/adc/max1118.c +@@ -35,6 +35,11 @@ struct max1118 { + struct spi_device *spi; + struct mutex lock; + struct regulator *reg; ++ /* Ensure natural alignment of buffer elements */ ++ struct { ++ u8 channels[2]; ++ s64 ts __aligned(8); ++ } scan; + + u8 data ____cacheline_aligned; + }; +@@ -165,7 +170,6 @@ static irqreturn_t max1118_trigger_handler(int irq, void *p) + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct max1118 *adc = iio_priv(indio_dev); +- u8 data[16] = { }; /* 2x 8-bit ADC data + padding + 8 bytes timestamp */ + int scan_index; + int i = 0; + +@@ -183,10 +187,10 @@ static irqreturn_t max1118_trigger_handler(int irq, void *p) + goto out; + } + +- data[i] = ret; ++ adc->scan.channels[i] = ret; + i++; + } +- iio_push_to_buffers_with_timestamp(indio_dev, data, ++ iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan, + iio_get_time_ns(indio_dev)); + out: + mutex_unlock(&adc->lock); +diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c +index d86c0b5d80a3d..f96f0cecbcdef 100644 +--- a/drivers/iio/adc/mcp3422.c ++++ b/drivers/iio/adc/mcp3422.c +@@ -96,16 +96,12 @@ static int mcp3422_update_config(struct mcp3422 *adc, u8 newconfig) + { + int ret; + +- mutex_lock(&adc->lock); +- + ret = i2c_master_send(adc->i2c, &newconfig, 1); + if (ret > 0) { + adc->config = newconfig; + ret = 0; + } + +- mutex_unlock(&adc->lock); +- + return ret; + } + +@@ -138,6 +134,8 @@ static int mcp3422_read_channel(struct mcp3422 *adc, + u8 config; + u8 req_channel = channel->channel; + ++ mutex_lock(&adc->lock); ++ + if (req_channel != MCP3422_CHANNEL(adc->config)) { + config = adc->config; + config &= ~MCP3422_CHANNEL_MASK; +@@ -145,12 +143,18 @@ static int mcp3422_read_channel(struct mcp3422 *adc, + config &= ~MCP3422_PGA_MASK; + config |= MCP3422_PGA_VALUE(adc->pga[req_channel]); + ret = mcp3422_update_config(adc, config); +- if (ret < 0) ++ if (ret < 0) { ++ mutex_unlock(&adc->lock); + return ret; ++ } + msleep(mcp3422_read_times[MCP3422_SAMPLE_RATE(adc->config)]); + } + +- return mcp3422_read(adc, value, &config); ++ ret = mcp3422_read(adc, value, &config); ++ ++ mutex_unlock(&adc->lock); ++ ++ return ret; + } + + static int mcp3422_read_raw(struct iio_dev *iio, +diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c +index 0235863ff77b0..cc8cbffe2b7b5 100644 +--- a/drivers/iio/adc/ti-adc081c.c ++++ b/drivers/iio/adc/ti-adc081c.c +@@ -33,6 +33,12 @@ struct adc081c { + + /* 8, 10 or 12 */ + int bits; ++ ++ /* Ensure natural alignment of buffer elements */ ++ struct { ++ u16 channel; ++ s64 ts __aligned(8); ++ } scan; + }; + + #define REG_CONV_RES 0x00 +@@ -128,14 +134,13 @@ static irqreturn_t adc081c_trigger_handler(int irq, void *p) + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct adc081c *data = iio_priv(indio_dev); +- u16 buf[8]; /* 2 bytes data + 6 bytes padding + 8 bytes timestamp */ + int ret; + + ret = i2c_smbus_read_word_swapped(data->i2c, REG_CONV_RES); + if (ret < 0) + goto out; +- buf[0] = ret; +- iio_push_to_buffers_with_timestamp(indio_dev, buf, ++ data->scan.channel = ret; ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + iio_get_time_ns(indio_dev)); + out: + iio_trigger_notify_done(indio_dev->trig); +diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c +index bdedf456ee05d..fc053216d282c 100644 +--- a/drivers/iio/adc/ti-adc084s021.c ++++ b/drivers/iio/adc/ti-adc084s021.c +@@ -25,6 +25,11 @@ struct adc084s021 { + struct spi_transfer spi_trans; + struct regulator *reg; + struct mutex lock; ++ /* Buffer used to align data */ ++ struct { ++ __be16 channels[4]; ++ s64 ts __aligned(8); ++ } scan; + /* + * DMA (thus cache coherency maintenance) requires the + * transfer buffers to live in their own cache line. +@@ -140,14 +145,13 @@ static irqreturn_t adc084s021_buffer_trigger_handler(int irq, void *pollfunc) + struct iio_poll_func *pf = pollfunc; + struct iio_dev *indio_dev = pf->indio_dev; + struct adc084s021 *adc = iio_priv(indio_dev); +- __be16 data[8] = {0}; /* 4 * 16-bit words of data + 8 bytes timestamp */ + + mutex_lock(&adc->lock); + +- if (adc084s021_adc_conversion(adc, &data) < 0) ++ if (adc084s021_adc_conversion(adc, adc->scan.channels) < 0) + dev_err(&adc->spi->dev, "Failed to read data\n"); + +- iio_push_to_buffers_with_timestamp(indio_dev, data, ++ iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan, + iio_get_time_ns(indio_dev)); + mutex_unlock(&adc->lock); + iio_trigger_notify_done(indio_dev->trig); +diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c +index 5ea4f45d6bade..64fe3b2a6ec6d 100644 +--- a/drivers/iio/adc/ti-ads1015.c ++++ b/drivers/iio/adc/ti-ads1015.c +@@ -316,6 +316,7 @@ static const struct iio_chan_spec ads1115_channels[] = { + IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP), + }; + ++#ifdef CONFIG_PM + static int ads1015_set_power_state(struct ads1015_data *data, bool on) + { + int ret; +@@ -333,6 +334,15 @@ static int ads1015_set_power_state(struct ads1015_data *data, bool on) + return ret < 0 ? ret : 0; + } + ++#else /* !CONFIG_PM */ ++ ++static int ads1015_set_power_state(struct ads1015_data *data, bool on) ++{ ++ return 0; ++} ++ ++#endif /* !CONFIG_PM */ ++ + static + int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val) + { +diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c +index 3ecd633f9ed32..b2b6009078e10 100644 +--- a/drivers/iio/chemical/ccs811.c ++++ b/drivers/iio/chemical/ccs811.c +@@ -78,6 +78,11 @@ struct ccs811_data { + struct iio_trigger *drdy_trig; + struct gpio_desc *wakeup_gpio; + bool drdy_trig_on; ++ /* Ensures correct alignment of timestamp if present */ ++ struct { ++ s16 channels[2]; ++ s64 ts __aligned(8); ++ } scan; + }; + + static const struct iio_chan_spec ccs811_channels[] = { +@@ -327,17 +332,17 @@ static irqreturn_t ccs811_trigger_handler(int irq, void *p) + struct iio_dev *indio_dev = pf->indio_dev; + struct ccs811_data *data = iio_priv(indio_dev); + struct i2c_client *client = data->client; +- s16 buf[8]; /* s16 eCO2 + s16 TVOC + padding + 8 byte timestamp */ + int ret; + +- ret = i2c_smbus_read_i2c_block_data(client, CCS811_ALG_RESULT_DATA, 4, +- (u8 *)&buf); ++ ret = i2c_smbus_read_i2c_block_data(client, CCS811_ALG_RESULT_DATA, ++ sizeof(data->scan.channels), ++ (u8 *)data->scan.channels); + if (ret != 4) { + dev_err(&client->dev, "cannot read sensor data\n"); + goto err; + } + +- iio_push_to_buffers_with_timestamp(indio_dev, buf, ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + iio_get_time_ns(indio_dev)); + + err: +diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +index c831915ca7e56..6b6b5987ac753 100644 +--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c ++++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +@@ -72,10 +72,13 @@ static void get_default_min_max_freq(enum motionsensor_type type, + + switch (type) { + case MOTIONSENSE_TYPE_ACCEL: +- case MOTIONSENSE_TYPE_GYRO: + *min_freq = 12500; + *max_freq = 100000; + break; ++ case MOTIONSENSE_TYPE_GYRO: ++ *min_freq = 25000; ++ *max_freq = 100000; ++ break; + case MOTIONSENSE_TYPE_MAG: + *min_freq = 5000; + *max_freq = 25000; +diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c +index 5a3fcb127cd20..d10ed80566567 100644 +--- a/drivers/iio/light/ltr501.c ++++ b/drivers/iio/light/ltr501.c +@@ -1243,13 +1243,16 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p) + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct ltr501_data *data = iio_priv(indio_dev); +- u16 buf[8]; ++ struct { ++ u16 channels[3]; ++ s64 ts __aligned(8); ++ } scan; + __le16 als_buf[2]; + u8 mask = 0; + int j = 0; + int ret, psdata; + +- memset(buf, 0, sizeof(buf)); ++ memset(&scan, 0, sizeof(scan)); + + /* figure out which data needs to be ready */ + if (test_bit(0, indio_dev->active_scan_mask) || +@@ -1268,9 +1271,9 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p) + if (ret < 0) + return ret; + if (test_bit(0, indio_dev->active_scan_mask)) +- buf[j++] = le16_to_cpu(als_buf[1]); ++ scan.channels[j++] = le16_to_cpu(als_buf[1]); + if (test_bit(1, indio_dev->active_scan_mask)) +- buf[j++] = le16_to_cpu(als_buf[0]); ++ scan.channels[j++] = le16_to_cpu(als_buf[0]); + } + + if (mask & LTR501_STATUS_PS_RDY) { +@@ -1278,10 +1281,10 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p) + &psdata, 2); + if (ret < 0) + goto done; +- buf[j++] = psdata & LTR501_PS_DATA_MASK; ++ scan.channels[j++] = psdata & LTR501_PS_DATA_MASK; + } + +- iio_push_to_buffers_with_timestamp(indio_dev, buf, ++ iio_push_to_buffers_with_timestamp(indio_dev, &scan, + iio_get_time_ns(indio_dev)); + + done: +diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c +index d6d8007ba430a..8cc619de2c3ae 100644 +--- a/drivers/iio/light/max44000.c ++++ b/drivers/iio/light/max44000.c +@@ -75,6 +75,11 @@ + struct max44000_data { + struct mutex lock; + struct regmap *regmap; ++ /* Ensure naturally aligned timestamp */ ++ struct { ++ u16 channels[2]; ++ s64 ts __aligned(8); ++ } scan; + }; + + /* Default scale is set to the minimum of 0.03125 or 1 / (1 << 5) lux */ +@@ -488,7 +493,6 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p) + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct max44000_data *data = iio_priv(indio_dev); +- u16 buf[8]; /* 2x u16 + padding + 8 bytes timestamp */ + int index = 0; + unsigned int regval; + int ret; +@@ -498,17 +502,17 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p) + ret = max44000_read_alsval(data); + if (ret < 0) + goto out_unlock; +- buf[index++] = ret; ++ data->scan.channels[index++] = ret; + } + if (test_bit(MAX44000_SCAN_INDEX_PRX, indio_dev->active_scan_mask)) { + ret = regmap_read(data->regmap, MAX44000_REG_PRX_DATA, ®val); + if (ret < 0) + goto out_unlock; +- buf[index] = regval; ++ data->scan.channels[index] = regval; + } + mutex_unlock(&data->lock); + +- iio_push_to_buffers_with_timestamp(indio_dev, buf, ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + iio_get_time_ns(indio_dev)); + iio_trigger_notify_done(indio_dev->trig); + return IRQ_HANDLED; +diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c +index 3c881541ae72f..3fc44ec45f763 100644 +--- a/drivers/iio/magnetometer/ak8975.c ++++ b/drivers/iio/magnetometer/ak8975.c +@@ -365,6 +365,12 @@ struct ak8975_data { + struct iio_mount_matrix orientation; + struct regulator *vdd; + struct regulator *vid; ++ ++ /* Ensure natural alignment of timestamp */ ++ struct { ++ s16 channels[3]; ++ s64 ts __aligned(8); ++ } scan; + }; + + /* Enable attached power regulator if any. */ +@@ -787,7 +793,6 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev) + const struct i2c_client *client = data->client; + const struct ak_def *def = data->def; + int ret; +- s16 buff[8]; /* 3 x 16 bits axis values + 1 aligned 64 bits timestamp */ + __le16 fval[3]; + + mutex_lock(&data->lock); +@@ -810,12 +815,13 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev) + mutex_unlock(&data->lock); + + /* Clamp to valid range. */ +- buff[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range); +- buff[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range); +- buff[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range); ++ data->scan.channels[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range); ++ data->scan.channels[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range); ++ data->scan.channels[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range); + +- iio_push_to_buffers_with_timestamp(indio_dev, buff, ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + iio_get_time_ns(indio_dev)); ++ + return; + + unlock: +diff --git a/drivers/iio/proximity/mb1232.c b/drivers/iio/proximity/mb1232.c +index 166b3e6d7db89..5254b1fbccfdc 100644 +--- a/drivers/iio/proximity/mb1232.c ++++ b/drivers/iio/proximity/mb1232.c +@@ -40,6 +40,11 @@ struct mb1232_data { + */ + struct completion ranging; + int irqnr; ++ /* Ensure correct alignment of data to push to IIO buffer */ ++ struct { ++ s16 distance; ++ s64 ts __aligned(8); ++ } scan; + }; + + static irqreturn_t mb1232_handle_irq(int irq, void *dev_id) +@@ -113,17 +118,13 @@ static irqreturn_t mb1232_trigger_handler(int irq, void *p) + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct mb1232_data *data = iio_priv(indio_dev); +- /* +- * triggered buffer +- * 16-bit channel + 48-bit padding + 64-bit timestamp +- */ +- s16 buffer[8] = { 0 }; + +- buffer[0] = mb1232_read_distance(data); +- if (buffer[0] < 0) ++ data->scan.distance = mb1232_read_distance(data); ++ if (data->scan.distance < 0) + goto err; + +- iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, ++ pf->timestamp); + + err: + iio_trigger_notify_done(indio_dev->trig); +diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c +index 513825e424bff..a92fc3f90bb5b 100644 +--- a/drivers/infiniband/core/cq.c ++++ b/drivers/infiniband/core/cq.c +@@ -379,7 +379,7 @@ static int ib_alloc_cqs(struct ib_device *dev, unsigned int nr_cqes, + { + LIST_HEAD(tmp_list); + unsigned int nr_cqs, i; +- struct ib_cq *cq; ++ struct ib_cq *cq, *n; + int ret; + + if (poll_ctx > IB_POLL_LAST_POOL_TYPE) { +@@ -412,7 +412,7 @@ static int ib_alloc_cqs(struct ib_device *dev, unsigned int nr_cqes, + return 0; + + out_free_cqs: +- list_for_each_entry(cq, &tmp_list, pool_entry) { ++ list_for_each_entry_safe(cq, n, &tmp_list, pool_entry) { + cq->shared = false; + ib_free_cq(cq); + } +diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c +index f369f0a19e851..1b0ea945756f0 100644 +--- a/drivers/infiniband/core/verbs.c ++++ b/drivers/infiniband/core/verbs.c +@@ -1803,7 +1803,7 @@ int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width) + + dev_put(netdev); + +- if (!rc) { ++ if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) { + netdev_speed = lksettings.base.speed; + } else { + netdev_speed = SPEED_1000; +diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c +index 8b6ad5cddfce9..cb6e873039df5 100644 +--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c ++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c +@@ -752,12 +752,6 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) + gsi_sqp = rdev->gsi_ctx.gsi_sqp; + gsi_sah = rdev->gsi_ctx.gsi_sah; + +- /* remove from active qp list */ +- mutex_lock(&rdev->qp_lock); +- list_del(&gsi_sqp->list); +- mutex_unlock(&rdev->qp_lock); +- atomic_dec(&rdev->qp_count); +- + ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n"); + bnxt_qplib_destroy_ah(&rdev->qplib_res, + &gsi_sah->qplib_ah, +@@ -772,6 +766,12 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) + } + bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp); + ++ /* remove from active qp list */ ++ mutex_lock(&rdev->qp_lock); ++ list_del(&gsi_sqp->list); ++ mutex_unlock(&rdev->qp_lock); ++ atomic_dec(&rdev->qp_count); ++ + kfree(rdev->gsi_ctx.sqp_tbl); + kfree(gsi_sah); + kfree(gsi_sqp); +@@ -792,11 +792,6 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) + unsigned int flags; + int rc; + +- mutex_lock(&rdev->qp_lock); +- list_del(&qp->list); +- mutex_unlock(&rdev->qp_lock); +- atomic_dec(&rdev->qp_count); +- + bnxt_qplib_flush_cqn_wq(&qp->qplib_qp); + + rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); +@@ -819,6 +814,11 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) + goto sh_fail; + } + ++ mutex_lock(&rdev->qp_lock); ++ list_del(&qp->list); ++ mutex_unlock(&rdev->qp_lock); ++ atomic_dec(&rdev->qp_count); ++ + ib_umem_release(qp->rumem); + ib_umem_release(qp->sumem); + +@@ -3178,6 +3178,19 @@ static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc, + wc->wc_flags |= IB_WC_GRH; + } + ++static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev, ++ u16 vlan_id) ++{ ++ /* ++ * Check if the vlan is configured in the host. If not configured, it ++ * can be a transparent VLAN. So dont report the vlan id. ++ */ ++ if (!__vlan_find_dev_deep_rcu(rdev->netdev, ++ htons(ETH_P_8021Q), vlan_id)) ++ return false; ++ return true; ++} ++ + static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe, + u16 *vid, u8 *sl) + { +@@ -3246,9 +3259,11 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp, + wc->src_qp = orig_cqe->src_qp; + memcpy(wc->smac, orig_cqe->smac, ETH_ALEN); + if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) { +- wc->vlan_id = vlan_id; +- wc->sl = sl; +- wc->wc_flags |= IB_WC_WITH_VLAN; ++ if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) { ++ wc->vlan_id = vlan_id; ++ wc->sl = sl; ++ wc->wc_flags |= IB_WC_WITH_VLAN; ++ } + } + wc->port_num = 1; + wc->vendor_err = orig_cqe->status; +diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c +index 5c41e13496a02..882c4f49d3a87 100644 +--- a/drivers/infiniband/hw/bnxt_re/main.c ++++ b/drivers/infiniband/hw/bnxt_re/main.c +@@ -1027,8 +1027,7 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) + struct bnxt_qplib_nq *nq; + + nq = &rdev->nq[i]; +- nq->hwq.max_elements = (qplib_ctx->cq_count + +- qplib_ctx->srqc_count + 2); ++ nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT; + rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]); + if (rc) { + ibdev_err(&rdev->ibdev, "Alloc Failed NQ%d rc:%#x", +diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c +index c5e29577cd434..4b53f79b91d1d 100644 +--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c ++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c +@@ -796,6 +796,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) + u16 cmd_flags = 0; + u32 qp_flags = 0; + u8 pg_sz_lvl; ++ u32 tbl_indx; + int rc; + + RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags); +@@ -891,8 +892,9 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) + rq->dbinfo.xid = qp->id; + rq->dbinfo.db = qp->dpi->dbr; + } +- rcfw->qp_tbl[qp->id].qp_id = qp->id; +- rcfw->qp_tbl[qp->id].qp_handle = (void *)qp; ++ tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw); ++ rcfw->qp_tbl[tbl_indx].qp_id = qp->id; ++ rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp; + + return 0; + +@@ -920,10 +922,10 @@ static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size) + sq = &qp->sq; + hwq = &sq->hwq; + ++ /* First psn entry */ + fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->max_elements, &psn_pg); + if (!IS_ALIGNED(fpsne, PAGE_SIZE)) +- indx_pad = ALIGN(fpsne, PAGE_SIZE) / size; +- ++ indx_pad = (fpsne & ~PAGE_MASK) / size; + page = (u64 *)psn_pg; + for (indx = 0; indx < hwq->max_elements; indx++) { + pg_num = (indx + indx_pad) / (PAGE_SIZE / size); +@@ -950,6 +952,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) + u32 qp_flags = 0; + u8 pg_sz_lvl; + u16 max_rsge; ++ u32 tbl_indx; + + RCFW_CMD_PREP(req, CREATE_QP, cmd_flags); + +@@ -1118,8 +1121,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) + rq->dbinfo.xid = qp->id; + rq->dbinfo.db = qp->dpi->dbr; + } +- rcfw->qp_tbl[qp->id].qp_id = qp->id; +- rcfw->qp_tbl[qp->id].qp_handle = (void *)qp; ++ tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw); ++ rcfw->qp_tbl[tbl_indx].qp_id = qp->id; ++ rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp; + + return 0; + +@@ -1467,10 +1471,12 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, + struct cmdq_destroy_qp req; + struct creq_destroy_qp_resp resp; + u16 cmd_flags = 0; ++ u32 tbl_indx; + int rc; + +- rcfw->qp_tbl[qp->id].qp_id = BNXT_QPLIB_QP_ID_INVALID; +- rcfw->qp_tbl[qp->id].qp_handle = NULL; ++ tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw); ++ rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID; ++ rcfw->qp_tbl[tbl_indx].qp_handle = NULL; + + RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags); + +@@ -1478,8 +1484,8 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) { +- rcfw->qp_tbl[qp->id].qp_id = qp->id; +- rcfw->qp_tbl[qp->id].qp_handle = qp; ++ rcfw->qp_tbl[tbl_indx].qp_id = qp->id; ++ rcfw->qp_tbl[tbl_indx].qp_handle = qp; + return rc; + } + +diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +index 4e211162acee2..f7736e34ac64c 100644 +--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c ++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +@@ -307,14 +307,15 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, + __le16 mcookie; + u16 cookie; + int rc = 0; +- u32 qp_id; ++ u32 qp_id, tbl_indx; + + pdev = rcfw->pdev; + switch (qp_event->event) { + case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: + err_event = (struct creq_qp_error_notification *)qp_event; + qp_id = le32_to_cpu(err_event->xid); +- qp = rcfw->qp_tbl[qp_id].qp_handle; ++ tbl_indx = map_qp_id_to_tbl_indx(qp_id, rcfw); ++ qp = rcfw->qp_tbl[tbl_indx].qp_handle; + dev_dbg(&pdev->dev, "Received QP error notification\n"); + dev_dbg(&pdev->dev, + "qpid 0x%x, req_err=0x%x, resp_err=0x%x\n", +@@ -615,8 +616,9 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res, + + cmdq->bmap_size = bmap_size; + +- rcfw->qp_tbl_size = qp_tbl_sz; +- rcfw->qp_tbl = kcalloc(qp_tbl_sz, sizeof(struct bnxt_qplib_qp_node), ++ /* Allocate one extra to hold the QP1 entries */ ++ rcfw->qp_tbl_size = qp_tbl_sz + 1; ++ rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node), + GFP_KERNEL); + if (!rcfw->qp_tbl) + goto fail; +diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +index 157387636d004..5f2f0a5a3560f 100644 +--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h ++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +@@ -216,4 +216,9 @@ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw); + int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, + struct bnxt_qplib_ctx *ctx, int is_virtfn); + void bnxt_qplib_mark_qp_error(void *qp_handle); ++static inline u32 map_qp_id_to_tbl_indx(u32 qid, struct bnxt_qplib_rcfw *rcfw) ++{ ++ /* Last index of the qp_tbl is for QP1 ie. qp_tbl_size - 1*/ ++ return (qid == 1) ? rcfw->qp_tbl_size - 1 : qid % rcfw->qp_tbl_size - 2; ++} + #endif /* __BNXT_QPLIB_RCFW_H__ */ +diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c +index 816d28854a8e1..5bcf481a9c3c2 100644 +--- a/drivers/infiniband/hw/mlx4/main.c ++++ b/drivers/infiniband/hw/mlx4/main.c +@@ -784,7 +784,8 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, + props->ip_gids = true; + props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; + props->max_msg_sz = mdev->dev->caps.max_msg_sz; +- props->pkey_tbl_len = 1; ++ if (mdev->dev->caps.pkey_table_len[port]) ++ props->pkey_tbl_len = 1; + props->max_mtu = IB_MTU_4096; + props->max_vl_num = 2; + props->state = IB_PORT_DOWN; +diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c +index 5642eefb4ba1c..d6b1236b114ab 100644 +--- a/drivers/infiniband/sw/rxe/rxe.c ++++ b/drivers/infiniband/sw/rxe/rxe.c +@@ -48,6 +48,8 @@ static void rxe_cleanup_ports(struct rxe_dev *rxe) + + } + ++bool rxe_initialized; ++ + /* free resources for a rxe device all objects created for this device must + * have been destroyed + */ +@@ -147,9 +149,6 @@ static int rxe_init_ports(struct rxe_dev *rxe) + + rxe_init_port_param(port); + +- if (!port->attr.pkey_tbl_len || !port->attr.gid_tbl_len) +- return -EINVAL; +- + port->pkey_tbl = kcalloc(port->attr.pkey_tbl_len, + sizeof(*port->pkey_tbl), GFP_KERNEL); + +@@ -348,6 +347,7 @@ static int __init rxe_module_init(void) + return err; + + rdma_link_register(&rxe_link_ops); ++ rxe_initialized = true; + pr_info("loaded\n"); + return 0; + } +@@ -359,6 +359,7 @@ static void __exit rxe_module_exit(void) + rxe_net_exit(); + rxe_cache_exit(); + ++ rxe_initialized = false; + pr_info("unloaded\n"); + } + +diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h +index fb07eed9e4028..cae1b0a24c850 100644 +--- a/drivers/infiniband/sw/rxe/rxe.h ++++ b/drivers/infiniband/sw/rxe/rxe.h +@@ -67,6 +67,8 @@ + + #define RXE_ROCE_V2_SPORT (0xc000) + ++extern bool rxe_initialized; ++ + static inline u32 rxe_crc32(struct rxe_dev *rxe, + u32 crc, void *next, size_t len) + { +diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c +index e83c7b518bfa2..bfb96a0d071bb 100644 +--- a/drivers/infiniband/sw/rxe/rxe_mr.c ++++ b/drivers/infiniband/sw/rxe/rxe_mr.c +@@ -207,6 +207,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start, + vaddr = page_address(sg_page_iter_page(&sg_iter)); + if (!vaddr) { + pr_warn("null vaddr\n"); ++ ib_umem_release(umem); + err = -ENOMEM; + goto err1; + } +diff --git a/drivers/infiniband/sw/rxe/rxe_sysfs.c b/drivers/infiniband/sw/rxe/rxe_sysfs.c +index ccda5f5a3bc0a..2af31d421bfc3 100644 +--- a/drivers/infiniband/sw/rxe/rxe_sysfs.c ++++ b/drivers/infiniband/sw/rxe/rxe_sysfs.c +@@ -61,6 +61,11 @@ static int rxe_param_set_add(const char *val, const struct kernel_param *kp) + struct net_device *ndev; + struct rxe_dev *exists; + ++ if (!rxe_initialized) { ++ pr_err("Module parameters are not supported, use rdma link add or rxe_cfg\n"); ++ return -EAGAIN; ++ } ++ + len = sanitize_arg(val, intf, sizeof(intf)); + if (!len) { + pr_err("add: invalid interface name\n"); +diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c +index 84fec5fd798d5..00ba6fb1e6763 100644 +--- a/drivers/infiniband/sw/rxe/rxe_verbs.c ++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c +@@ -1083,7 +1083,7 @@ static ssize_t parent_show(struct device *device, + struct rxe_dev *rxe = + rdma_device_to_drv_device(device, struct rxe_dev, ib_dev); + +- return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1)); ++ return scnprintf(buf, PAGE_SIZE, "%s\n", rxe_parent_name(rxe, 1)); + } + + static DEVICE_ATTR_RO(parent); +diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c +index b7df38ee8ae05..49ca8727e3fa3 100644 +--- a/drivers/infiniband/ulp/isert/ib_isert.c ++++ b/drivers/infiniband/ulp/isert/ib_isert.c +@@ -183,15 +183,15 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn) + rx_desc = isert_conn->rx_descs; + + for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { +- dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc, +- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); ++ dma_addr = ib_dma_map_single(ib_dev, rx_desc->buf, ++ ISER_RX_SIZE, DMA_FROM_DEVICE); + if (ib_dma_mapping_error(ib_dev, dma_addr)) + goto dma_map_fail; + + rx_desc->dma_addr = dma_addr; + + rx_sg = &rx_desc->rx_sg; +- rx_sg->addr = rx_desc->dma_addr; ++ rx_sg->addr = rx_desc->dma_addr + isert_get_hdr_offset(rx_desc); + rx_sg->length = ISER_RX_PAYLOAD_SIZE; + rx_sg->lkey = device->pd->local_dma_lkey; + rx_desc->rx_cqe.done = isert_recv_done; +@@ -203,7 +203,7 @@ dma_map_fail: + rx_desc = isert_conn->rx_descs; + for (j = 0; j < i; j++, rx_desc++) { + ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, +- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); ++ ISER_RX_SIZE, DMA_FROM_DEVICE); + } + kfree(isert_conn->rx_descs); + isert_conn->rx_descs = NULL; +@@ -224,7 +224,7 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn) + rx_desc = isert_conn->rx_descs; + for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) { + ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, +- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); ++ ISER_RX_SIZE, DMA_FROM_DEVICE); + } + + kfree(isert_conn->rx_descs); +@@ -409,10 +409,9 @@ isert_free_login_buf(struct isert_conn *isert_conn) + ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE); + kfree(isert_conn->login_rsp_buf); + +- ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, +- ISER_RX_PAYLOAD_SIZE, +- DMA_FROM_DEVICE); +- kfree(isert_conn->login_req_buf); ++ ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr, ++ ISER_RX_SIZE, DMA_FROM_DEVICE); ++ kfree(isert_conn->login_desc); + } + + static int +@@ -421,25 +420,25 @@ isert_alloc_login_buf(struct isert_conn *isert_conn, + { + int ret; + +- isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf), ++ isert_conn->login_desc = kzalloc(sizeof(*isert_conn->login_desc), + GFP_KERNEL); +- if (!isert_conn->login_req_buf) ++ if (!isert_conn->login_desc) + return -ENOMEM; + +- isert_conn->login_req_dma = ib_dma_map_single(ib_dev, +- isert_conn->login_req_buf, +- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); +- ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); ++ isert_conn->login_desc->dma_addr = ib_dma_map_single(ib_dev, ++ isert_conn->login_desc->buf, ++ ISER_RX_SIZE, DMA_FROM_DEVICE); ++ ret = ib_dma_mapping_error(ib_dev, isert_conn->login_desc->dma_addr); + if (ret) { +- isert_err("login_req_dma mapping error: %d\n", ret); +- isert_conn->login_req_dma = 0; +- goto out_free_login_req_buf; ++ isert_err("login_desc dma mapping error: %d\n", ret); ++ isert_conn->login_desc->dma_addr = 0; ++ goto out_free_login_desc; + } + + isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL); + if (!isert_conn->login_rsp_buf) { + ret = -ENOMEM; +- goto out_unmap_login_req_buf; ++ goto out_unmap_login_desc; + } + + isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev, +@@ -456,11 +455,11 @@ isert_alloc_login_buf(struct isert_conn *isert_conn, + + out_free_login_rsp_buf: + kfree(isert_conn->login_rsp_buf); +-out_unmap_login_req_buf: +- ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, +- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); +-out_free_login_req_buf: +- kfree(isert_conn->login_req_buf); ++out_unmap_login_desc: ++ ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr, ++ ISER_RX_SIZE, DMA_FROM_DEVICE); ++out_free_login_desc: ++ kfree(isert_conn->login_desc); + return ret; + } + +@@ -579,7 +578,7 @@ isert_connect_release(struct isert_conn *isert_conn) + ib_destroy_qp(isert_conn->qp); + } + +- if (isert_conn->login_req_buf) ++ if (isert_conn->login_desc) + isert_free_login_buf(isert_conn); + + isert_device_put(device); +@@ -965,17 +964,18 @@ isert_login_post_recv(struct isert_conn *isert_conn) + int ret; + + memset(&sge, 0, sizeof(struct ib_sge)); +- sge.addr = isert_conn->login_req_dma; ++ sge.addr = isert_conn->login_desc->dma_addr + ++ isert_get_hdr_offset(isert_conn->login_desc); + sge.length = ISER_RX_PAYLOAD_SIZE; + sge.lkey = isert_conn->device->pd->local_dma_lkey; + + isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n", + sge.addr, sge.length, sge.lkey); + +- isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done; ++ isert_conn->login_desc->rx_cqe.done = isert_login_recv_done; + + memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); +- rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe; ++ rx_wr.wr_cqe = &isert_conn->login_desc->rx_cqe; + rx_wr.sg_list = &sge; + rx_wr.num_sge = 1; + +@@ -1052,7 +1052,7 @@ post_send: + static void + isert_rx_login_req(struct isert_conn *isert_conn) + { +- struct iser_rx_desc *rx_desc = isert_conn->login_req_buf; ++ struct iser_rx_desc *rx_desc = isert_conn->login_desc; + int rx_buflen = isert_conn->login_req_len; + struct iscsi_conn *conn = isert_conn->conn; + struct iscsi_login *login = conn->conn_login; +@@ -1064,7 +1064,7 @@ isert_rx_login_req(struct isert_conn *isert_conn) + + if (login->first_request) { + struct iscsi_login_req *login_req = +- (struct iscsi_login_req *)&rx_desc->iscsi_header; ++ (struct iscsi_login_req *)isert_get_iscsi_hdr(rx_desc); + /* + * Setup the initial iscsi_login values from the leading + * login request PDU. +@@ -1083,13 +1083,13 @@ isert_rx_login_req(struct isert_conn *isert_conn) + login->tsih = be16_to_cpu(login_req->tsih); + } + +- memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN); ++ memcpy(&login->req[0], isert_get_iscsi_hdr(rx_desc), ISCSI_HDR_LEN); + + size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); + isert_dbg("Using login payload size: %d, rx_buflen: %d " + "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen, + MAX_KEY_VALUE_PAIRS); +- memcpy(login->req_buf, &rx_desc->data[0], size); ++ memcpy(login->req_buf, isert_get_data(rx_desc), size); + + if (login->first_request) { + complete(&isert_conn->login_comp); +@@ -1154,14 +1154,15 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn, + if (imm_data_len != data_len) { + sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); + sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents, +- &rx_desc->data[0], imm_data_len); ++ isert_get_data(rx_desc), imm_data_len); + isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n", + sg_nents, imm_data_len); + } else { + sg_init_table(&isert_cmd->sg, 1); + cmd->se_cmd.t_data_sg = &isert_cmd->sg; + cmd->se_cmd.t_data_nents = 1; +- sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len); ++ sg_set_buf(&isert_cmd->sg, isert_get_data(rx_desc), ++ imm_data_len); + isert_dbg("Transfer Immediate imm_data_len: %d\n", + imm_data_len); + } +@@ -1230,9 +1231,9 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn, + } + isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u " + "sg_nents: %u from %p %u\n", sg_start, sg_off, +- sg_nents, &rx_desc->data[0], unsol_data_len); ++ sg_nents, isert_get_data(rx_desc), unsol_data_len); + +- sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0], ++ sg_copy_from_buffer(sg_start, sg_nents, isert_get_data(rx_desc), + unsol_data_len); + + rc = iscsit_check_dataout_payload(cmd, hdr, false); +@@ -1291,7 +1292,7 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd + } + cmd->text_in_ptr = text_in; + +- memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length); ++ memcpy(cmd->text_in_ptr, isert_get_data(rx_desc), payload_length); + + return iscsit_process_text_cmd(conn, cmd, hdr); + } +@@ -1301,7 +1302,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, + uint32_t read_stag, uint64_t read_va, + uint32_t write_stag, uint64_t write_va) + { +- struct iscsi_hdr *hdr = &rx_desc->iscsi_header; ++ struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc); + struct iscsi_conn *conn = isert_conn->conn; + struct iscsi_cmd *cmd; + struct isert_cmd *isert_cmd; +@@ -1399,8 +1400,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) + struct isert_conn *isert_conn = wc->qp->qp_context; + struct ib_device *ib_dev = isert_conn->cm_id->device; + struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe); +- struct iscsi_hdr *hdr = &rx_desc->iscsi_header; +- struct iser_ctrl *iser_ctrl = &rx_desc->iser_header; ++ struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc); ++ struct iser_ctrl *iser_ctrl = isert_get_iser_hdr(rx_desc); + uint64_t read_va = 0, write_va = 0; + uint32_t read_stag = 0, write_stag = 0; + +@@ -1414,7 +1415,7 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) + rx_desc->in_use = true; + + ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, +- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); ++ ISER_RX_SIZE, DMA_FROM_DEVICE); + + isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", + rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags, +@@ -1449,7 +1450,7 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) + read_stag, read_va, write_stag, write_va); + + ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr, +- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); ++ ISER_RX_SIZE, DMA_FROM_DEVICE); + } + + static void +@@ -1463,8 +1464,8 @@ isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) + return; + } + +- ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma, +- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); ++ ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_desc->dma_addr, ++ ISER_RX_SIZE, DMA_FROM_DEVICE); + + isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN; + +@@ -1479,8 +1480,8 @@ isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) + complete(&isert_conn->login_req_comp); + mutex_unlock(&isert_conn->mutex); + +- ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma, +- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); ++ ib_dma_sync_single_for_device(ib_dev, isert_conn->login_desc->dma_addr, ++ ISER_RX_SIZE, DMA_FROM_DEVICE); + } + + static void +diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h +index 3b296bac4f603..d267a6d60d87d 100644 +--- a/drivers/infiniband/ulp/isert/ib_isert.h ++++ b/drivers/infiniband/ulp/isert/ib_isert.h +@@ -59,9 +59,11 @@ + ISERT_MAX_TX_MISC_PDUS + \ + ISERT_MAX_RX_MISC_PDUS) + +-#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \ +- (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \ +- sizeof(struct ib_cqe) + sizeof(bool))) ++/* ++ * RX size is default of 8k plus headers, but data needs to align to ++ * 512 boundary, so use 1024 to have the extra space for alignment. ++ */ ++#define ISER_RX_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 1024) + + #define ISCSI_ISER_SG_TABLESIZE 256 + +@@ -80,21 +82,41 @@ enum iser_conn_state { + }; + + struct iser_rx_desc { +- struct iser_ctrl iser_header; +- struct iscsi_hdr iscsi_header; +- char data[ISCSI_DEF_MAX_RECV_SEG_LEN]; ++ char buf[ISER_RX_SIZE]; + u64 dma_addr; + struct ib_sge rx_sg; + struct ib_cqe rx_cqe; + bool in_use; +- char pad[ISER_RX_PAD_SIZE]; +-} __packed; ++}; + + static inline struct iser_rx_desc *cqe_to_rx_desc(struct ib_cqe *cqe) + { + return container_of(cqe, struct iser_rx_desc, rx_cqe); + } + ++static void *isert_get_iser_hdr(struct iser_rx_desc *desc) ++{ ++ return PTR_ALIGN(desc->buf + ISER_HEADERS_LEN, 512) - ISER_HEADERS_LEN; ++} ++ ++static size_t isert_get_hdr_offset(struct iser_rx_desc *desc) ++{ ++ return isert_get_iser_hdr(desc) - (void *)desc->buf; ++} ++ ++static void *isert_get_iscsi_hdr(struct iser_rx_desc *desc) ++{ ++ return isert_get_iser_hdr(desc) + sizeof(struct iser_ctrl); ++} ++ ++static void *isert_get_data(struct iser_rx_desc *desc) ++{ ++ void *data = isert_get_iser_hdr(desc) + ISER_HEADERS_LEN; ++ ++ WARN_ON((uintptr_t)data & 511); ++ return data; ++} ++ + struct iser_tx_desc { + struct iser_ctrl iser_header; + struct iscsi_hdr iscsi_header; +@@ -141,9 +163,8 @@ struct isert_conn { + u32 responder_resources; + u32 initiator_depth; + bool pi_support; +- struct iser_rx_desc *login_req_buf; ++ struct iser_rx_desc *login_desc; + char *login_rsp_buf; +- u64 login_req_dma; + int login_req_len; + u64 login_rsp_dma; + struct iser_rx_desc *rx_descs; +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c +index 3d7877534bcc9..cf6a2be61695d 100644 +--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c ++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c +@@ -152,13 +152,6 @@ static struct attribute_group rtrs_srv_stats_attr_group = { + .attrs = rtrs_srv_stats_attrs, + }; + +-static void rtrs_srv_dev_release(struct device *dev) +-{ +- struct rtrs_srv *srv = container_of(dev, struct rtrs_srv, dev); +- +- kfree(srv); +-} +- + static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess) + { + struct rtrs_srv *srv = sess->srv; +@@ -172,7 +165,6 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess) + goto unlock; + } + srv->dev.class = rtrs_dev_class; +- srv->dev.release = rtrs_srv_dev_release; + err = dev_set_name(&srv->dev, "%s", sess->s.sessname); + if (err) + goto unlock; +@@ -182,16 +174,16 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess) + * sysfs files are created + */ + dev_set_uevent_suppress(&srv->dev, true); +- err = device_register(&srv->dev); ++ err = device_add(&srv->dev); + if (err) { +- pr_err("device_register(): %d\n", err); ++ pr_err("device_add(): %d\n", err); + goto put; + } + srv->kobj_paths = kobject_create_and_add("paths", &srv->dev.kobj); + if (!srv->kobj_paths) { + err = -ENOMEM; + pr_err("kobject_create_and_add(): %d\n", err); +- device_unregister(&srv->dev); ++ device_del(&srv->dev); + goto unlock; + } + dev_set_uevent_suppress(&srv->dev, false); +@@ -216,7 +208,7 @@ rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess) + kobject_del(srv->kobj_paths); + kobject_put(srv->kobj_paths); + mutex_unlock(&srv->paths_mutex); +- device_unregister(&srv->dev); ++ device_del(&srv->dev); + } else { + mutex_unlock(&srv->paths_mutex); + } +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c +index a219bd1bdbc26..28f6414dfa3dc 100644 +--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c ++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c +@@ -1319,6 +1319,13 @@ static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_sess *sess) + return sess->cur_cq_vector; + } + ++static void rtrs_srv_dev_release(struct device *dev) ++{ ++ struct rtrs_srv *srv = container_of(dev, struct rtrs_srv, dev); ++ ++ kfree(srv); ++} ++ + static struct rtrs_srv *__alloc_srv(struct rtrs_srv_ctx *ctx, + const uuid_t *paths_uuid) + { +@@ -1336,6 +1343,8 @@ static struct rtrs_srv *__alloc_srv(struct rtrs_srv_ctx *ctx, + uuid_copy(&srv->paths_uuid, paths_uuid); + srv->queue_depth = sess_queue_depth; + srv->ctx = ctx; ++ device_initialize(&srv->dev); ++ srv->dev.release = rtrs_srv_dev_release; + + srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks), + GFP_KERNEL); +diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c +index 2a11a63e7217a..b360dc34c90c7 100644 +--- a/drivers/interconnect/qcom/bcm-voter.c ++++ b/drivers/interconnect/qcom/bcm-voter.c +@@ -52,8 +52,20 @@ static int cmp_vcd(void *priv, struct list_head *a, struct list_head *b) + return 1; + } + ++static u64 bcm_div(u64 num, u32 base) ++{ ++ /* Ensure that small votes aren't lost. */ ++ if (num && num < base) ++ return 1; ++ ++ do_div(num, base); ++ ++ return num; ++} ++ + static void bcm_aggregate(struct qcom_icc_bcm *bcm) + { ++ struct qcom_icc_node *node; + size_t i, bucket; + u64 agg_avg[QCOM_ICC_NUM_BUCKETS] = {0}; + u64 agg_peak[QCOM_ICC_NUM_BUCKETS] = {0}; +@@ -61,22 +73,21 @@ static void bcm_aggregate(struct qcom_icc_bcm *bcm) + + for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) { + for (i = 0; i < bcm->num_nodes; i++) { +- temp = bcm->nodes[i]->sum_avg[bucket] * bcm->aux_data.width; +- do_div(temp, bcm->nodes[i]->buswidth * bcm->nodes[i]->channels); ++ node = bcm->nodes[i]; ++ temp = bcm_div(node->sum_avg[bucket] * bcm->aux_data.width, ++ node->buswidth * node->channels); + agg_avg[bucket] = max(agg_avg[bucket], temp); + +- temp = bcm->nodes[i]->max_peak[bucket] * bcm->aux_data.width; +- do_div(temp, bcm->nodes[i]->buswidth); ++ temp = bcm_div(node->max_peak[bucket] * bcm->aux_data.width, ++ node->buswidth); + agg_peak[bucket] = max(agg_peak[bucket], temp); + } + + temp = agg_avg[bucket] * 1000ULL; +- do_div(temp, bcm->aux_data.unit); +- bcm->vote_x[bucket] = temp; ++ bcm->vote_x[bucket] = bcm_div(temp, bcm->aux_data.unit); + + temp = agg_peak[bucket] * 1000ULL; +- do_div(temp, bcm->aux_data.unit); +- bcm->vote_y[bucket] = temp; ++ bcm->vote_y[bucket] = bcm_div(temp, bcm->aux_data.unit); + } + + if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 && +diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c +index 200ee948f6ec1..37c74c842f3a3 100644 +--- a/drivers/iommu/amd/iommu.c ++++ b/drivers/iommu/amd/iommu.c +@@ -2650,7 +2650,12 @@ static int amd_iommu_def_domain_type(struct device *dev) + if (!dev_data) + return 0; + +- if (dev_data->iommu_v2) ++ /* ++ * Do not identity map IOMMUv2 capable devices when memory encryption is ++ * active, because some of those devices (AMD GPUs) don't have the ++ * encryption bit in their DMA-mask and require remapping. ++ */ ++ if (!mem_encrypt_active() && dev_data->iommu_v2) + return IOMMU_DOMAIN_IDENTITY; + + return 0; +diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c +index e4b025c5637c4..5a188cac7a0f1 100644 +--- a/drivers/iommu/amd/iommu_v2.c ++++ b/drivers/iommu/amd/iommu_v2.c +@@ -737,6 +737,13 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids) + + might_sleep(); + ++ /* ++ * When memory encryption is active the device is likely not in a ++ * direct-mapped domain. Forbid using IOMMUv2 functionality for now. ++ */ ++ if (mem_encrypt_active()) ++ return -ENODEV; ++ + if (!amd_iommu_v2_supported()) + return -ENODEV; + +diff --git a/drivers/media/rc/gpio-ir-tx.c b/drivers/media/rc/gpio-ir-tx.c +index f33b443bfa47b..c6cd2e6d8e654 100644 +--- a/drivers/media/rc/gpio-ir-tx.c ++++ b/drivers/media/rc/gpio-ir-tx.c +@@ -19,8 +19,6 @@ struct gpio_ir { + struct gpio_desc *gpio; + unsigned int carrier; + unsigned int duty_cycle; +- /* we need a spinlock to hold the cpu while transmitting */ +- spinlock_t lock; + }; + + static const struct of_device_id gpio_ir_tx_of_match[] = { +@@ -53,12 +51,11 @@ static int gpio_ir_tx_set_carrier(struct rc_dev *dev, u32 carrier) + static void gpio_ir_tx_unmodulated(struct gpio_ir *gpio_ir, uint *txbuf, + uint count) + { +- unsigned long flags; + ktime_t edge; + s32 delta; + int i; + +- spin_lock_irqsave(&gpio_ir->lock, flags); ++ local_irq_disable(); + + edge = ktime_get(); + +@@ -72,14 +69,11 @@ static void gpio_ir_tx_unmodulated(struct gpio_ir *gpio_ir, uint *txbuf, + } + + gpiod_set_value(gpio_ir->gpio, 0); +- +- spin_unlock_irqrestore(&gpio_ir->lock, flags); + } + + static void gpio_ir_tx_modulated(struct gpio_ir *gpio_ir, uint *txbuf, + uint count) + { +- unsigned long flags; + ktime_t edge; + /* + * delta should never exceed 0.5 seconds (IR_MAX_DURATION) and on +@@ -95,7 +89,7 @@ static void gpio_ir_tx_modulated(struct gpio_ir *gpio_ir, uint *txbuf, + space = DIV_ROUND_CLOSEST((100 - gpio_ir->duty_cycle) * + (NSEC_PER_SEC / 100), gpio_ir->carrier); + +- spin_lock_irqsave(&gpio_ir->lock, flags); ++ local_irq_disable(); + + edge = ktime_get(); + +@@ -128,19 +122,20 @@ static void gpio_ir_tx_modulated(struct gpio_ir *gpio_ir, uint *txbuf, + edge = last; + } + } +- +- spin_unlock_irqrestore(&gpio_ir->lock, flags); + } + + static int gpio_ir_tx(struct rc_dev *dev, unsigned int *txbuf, + unsigned int count) + { + struct gpio_ir *gpio_ir = dev->priv; ++ unsigned long flags; + ++ local_irq_save(flags); + if (gpio_ir->carrier) + gpio_ir_tx_modulated(gpio_ir, txbuf, count); + else + gpio_ir_tx_unmodulated(gpio_ir, txbuf, count); ++ local_irq_restore(flags); + + return count; + } +@@ -176,7 +171,6 @@ static int gpio_ir_tx_probe(struct platform_device *pdev) + + gpio_ir->carrier = 38000; + gpio_ir->duty_cycle = 50; +- spin_lock_init(&gpio_ir->lock); + + rc = devm_rc_register_device(&pdev->dev, rcdev); + if (rc < 0) +diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c +index 9ff18d4961ceb..5561075f7a1bb 100644 +--- a/drivers/misc/eeprom/at24.c ++++ b/drivers/misc/eeprom/at24.c +@@ -692,10 +692,6 @@ static int at24_probe(struct i2c_client *client) + nvmem_config.word_size = 1; + nvmem_config.size = byte_len; + +- at24->nvmem = devm_nvmem_register(dev, &nvmem_config); +- if (IS_ERR(at24->nvmem)) +- return PTR_ERR(at24->nvmem); +- + i2c_set_clientdata(client, at24); + + err = regulator_enable(at24->vcc_reg); +@@ -708,6 +704,13 @@ static int at24_probe(struct i2c_client *client) + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + ++ at24->nvmem = devm_nvmem_register(dev, &nvmem_config); ++ if (IS_ERR(at24->nvmem)) { ++ pm_runtime_disable(dev); ++ regulator_disable(at24->vcc_reg); ++ return PTR_ERR(at24->nvmem); ++ } ++ + /* + * Perform a one-byte test read to verify that the + * chip is functional. +diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c +index 93d346c01110d..4c229dd2b6e54 100644 +--- a/drivers/mmc/core/sdio_ops.c ++++ b/drivers/mmc/core/sdio_ops.c +@@ -121,6 +121,7 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, + struct sg_table sgtable; + unsigned int nents, left_size, i; + unsigned int seg_size = card->host->max_seg_size; ++ int err; + + WARN_ON(blksz == 0); + +@@ -170,28 +171,32 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, + + mmc_set_data_timeout(&data, card); + +- mmc_wait_for_req(card->host, &mrq); ++ mmc_pre_req(card->host, &mrq); + +- if (nents > 1) +- sg_free_table(&sgtable); ++ mmc_wait_for_req(card->host, &mrq); + + if (cmd.error) +- return cmd.error; +- if (data.error) +- return data.error; +- +- if (mmc_host_is_spi(card->host)) { ++ err = cmd.error; ++ else if (data.error) ++ err = data.error; ++ else if (mmc_host_is_spi(card->host)) + /* host driver already reported errors */ +- } else { +- if (cmd.resp[0] & R5_ERROR) +- return -EIO; +- if (cmd.resp[0] & R5_FUNCTION_NUMBER) +- return -EINVAL; +- if (cmd.resp[0] & R5_OUT_OF_RANGE) +- return -ERANGE; +- } ++ err = 0; ++ else if (cmd.resp[0] & R5_ERROR) ++ err = -EIO; ++ else if (cmd.resp[0] & R5_FUNCTION_NUMBER) ++ err = -EINVAL; ++ else if (cmd.resp[0] & R5_OUT_OF_RANGE) ++ err = -ERANGE; ++ else ++ err = 0; + +- return 0; ++ mmc_post_req(card->host, &mrq, err); ++ ++ if (nents > 1) ++ sg_free_table(&sgtable); ++ ++ return err; + } + + int sdio_reset(struct mmc_host *host) +diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c +index 2d9f79b50a7fa..841e34aa7caae 100644 +--- a/drivers/mmc/host/sdhci-acpi.c ++++ b/drivers/mmc/host/sdhci-acpi.c +@@ -550,12 +550,18 @@ static int amd_select_drive_strength(struct mmc_card *card, + return MMC_SET_DRIVER_TYPE_A; + } + +-static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host) ++static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host, bool enable) + { ++ struct sdhci_acpi_host *acpi_host = sdhci_priv(host); ++ struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); ++ + /* AMD Platform requires dll setting */ + sdhci_writel(host, 0x40003210, SDHCI_AMD_RESET_DLL_REGISTER); + usleep_range(10, 20); +- sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER); ++ if (enable) ++ sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER); ++ ++ amd_host->dll_enabled = enable; + } + + /* +@@ -595,10 +601,8 @@ static void amd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) + + /* DLL is only required for HS400 */ + if (host->timing == MMC_TIMING_MMC_HS400 && +- !amd_host->dll_enabled) { +- sdhci_acpi_amd_hs400_dll(host); +- amd_host->dll_enabled = true; +- } ++ !amd_host->dll_enabled) ++ sdhci_acpi_amd_hs400_dll(host, true); + } + } + +@@ -619,10 +623,23 @@ static int amd_sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) + return err; + } + ++static void amd_sdhci_reset(struct sdhci_host *host, u8 mask) ++{ ++ struct sdhci_acpi_host *acpi_host = sdhci_priv(host); ++ struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); ++ ++ if (mask & SDHCI_RESET_ALL) { ++ amd_host->tuned_clock = false; ++ sdhci_acpi_amd_hs400_dll(host, false); ++ } ++ ++ sdhci_reset(host, mask); ++} ++ + static const struct sdhci_ops sdhci_acpi_ops_amd = { + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, +- .reset = sdhci_reset, ++ .reset = amd_sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, + }; + +diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c +index c0d58e9fcc333..0450f521c6f9a 100644 +--- a/drivers/mmc/host/sdhci-msm.c ++++ b/drivers/mmc/host/sdhci-msm.c +@@ -1158,7 +1158,7 @@ static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable) + static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) + { + struct sdhci_host *host = mmc_priv(mmc); +- int tuning_seq_cnt = 3; ++ int tuning_seq_cnt = 10; + u8 phase, tuned_phases[16], tuned_phase_cnt = 0; + int rc; + struct mmc_ios ios = host->mmc->ios; +@@ -1214,6 +1214,22 @@ retry: + } while (++phase < ARRAY_SIZE(tuned_phases)); + + if (tuned_phase_cnt) { ++ if (tuned_phase_cnt == ARRAY_SIZE(tuned_phases)) { ++ /* ++ * All phases valid is _almost_ as bad as no phases ++ * valid. Probably all phases are not really reliable ++ * but we didn't detect where the unreliable place is. ++ * That means we'll essentially be guessing and hoping ++ * we get a good phase. Better to try a few times. ++ */ ++ dev_dbg(mmc_dev(mmc), "%s: All phases valid; try again\n", ++ mmc_hostname(mmc)); ++ if (--tuning_seq_cnt) { ++ tuned_phase_cnt = 0; ++ goto retry; ++ } ++ } ++ + rc = msm_find_most_appropriate_phase(host, tuned_phases, + tuned_phase_cnt); + if (rc < 0) +diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c +index 7c73d243dc6ce..45881b3099567 100644 +--- a/drivers/mmc/host/sdhci-of-esdhc.c ++++ b/drivers/mmc/host/sdhci-of-esdhc.c +@@ -81,6 +81,7 @@ struct sdhci_esdhc { + bool quirk_tuning_erratum_type2; + bool quirk_ignore_data_inhibit; + bool quirk_delay_before_data_reset; ++ bool quirk_trans_complete_erratum; + bool in_sw_tuning; + unsigned int peripheral_clock; + const struct esdhc_clk_fixup *clk_fixup; +@@ -1177,10 +1178,11 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, + + static u32 esdhc_irq(struct sdhci_host *host, u32 intmask) + { ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); ++ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); + u32 command; + +- if (of_find_compatible_node(NULL, NULL, +- "fsl,p2020-esdhc")) { ++ if (esdhc->quirk_trans_complete_erratum) { + command = SDHCI_GET_CMD(sdhci_readw(host, + SDHCI_COMMAND)); + if (command == MMC_WRITE_MULTIPLE_BLOCK && +@@ -1334,8 +1336,10 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host) + esdhc->clk_fixup = match->data; + np = pdev->dev.of_node; + +- if (of_device_is_compatible(np, "fsl,p2020-esdhc")) ++ if (of_device_is_compatible(np, "fsl,p2020-esdhc")) { + esdhc->quirk_delay_before_data_reset = true; ++ esdhc->quirk_trans_complete_erratum = true; ++ } + + clk = of_clk_get(np, 0); + if (!IS_ERR(clk)) { +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +index 71ed4c54f6d5d..eaadcc7043349 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +@@ -20,6 +20,7 @@ + #include <net/pkt_cls.h> + #include <net/tcp.h> + #include <net/vxlan.h> ++#include <net/geneve.h> + + #include "hnae3.h" + #include "hns3_enet.h" +@@ -780,7 +781,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, + * and it is udp packet, which has a dest port as the IANA assigned. + * the hardware is expected to do the checksum offload, but the + * hardware will not do the checksum offload when udp dest port is +- * 4789. ++ * 4789 or 6081. + */ + static bool hns3_tunnel_csum_bug(struct sk_buff *skb) + { +@@ -789,7 +790,8 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb) + l4.hdr = skb_transport_header(skb); + + if (!(!skb->encapsulation && +- l4.udp->dest == htons(IANA_VXLAN_UDP_PORT))) ++ (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) || ++ l4.udp->dest == htons(GENEVE_UDP_PORT)))) + return false; + + skb_checksum_help(skb); +diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c +index 386ed2aa31fd9..9b00708676cf7 100644 +--- a/drivers/net/wan/hdlc.c ++++ b/drivers/net/wan/hdlc.c +@@ -229,7 +229,7 @@ static void hdlc_setup_dev(struct net_device *dev) + dev->min_mtu = 68; + dev->max_mtu = HDLC_MAX_MTU; + dev->type = ARPHRD_RAWHDLC; +- dev->hard_header_len = 16; ++ dev->hard_header_len = 0; + dev->needed_headroom = 0; + dev->addr_len = 0; + dev->header_ops = &hdlc_null_ops; +diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c +index d8cba3625c185..444130655d8ea 100644 +--- a/drivers/net/wan/hdlc_cisco.c ++++ b/drivers/net/wan/hdlc_cisco.c +@@ -370,6 +370,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) + memcpy(&state(hdlc)->settings, &new_settings, size); + spin_lock_init(&state(hdlc)->lock); + dev->header_ops = &cisco_header_ops; ++ dev->hard_header_len = sizeof(struct hdlc_header); + dev->type = ARPHRD_CISCO; + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); + netif_dormant_on(dev); +diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c +index 1ea15f2123ed5..e61616b0b91c7 100644 +--- a/drivers/net/wan/lapbether.c ++++ b/drivers/net/wan/lapbether.c +@@ -210,6 +210,8 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb) + + skb->dev = dev = lapbeth->ethdev; + ++ skb_reset_network_header(skb); ++ + dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0); + + dev_queue_xmit(skb); +@@ -340,6 +342,7 @@ static int lapbeth_new_device(struct net_device *dev) + */ + ndev->needed_headroom = -1 + 3 + 2 + dev->hard_header_len + + dev->needed_headroom; ++ ndev->needed_tailroom = dev->needed_tailroom; + + lapbeth = netdev_priv(ndev); + lapbeth->axdev = ndev; +diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c +index 9642971e89cea..4578547659839 100644 +--- a/drivers/nfc/st95hf/core.c ++++ b/drivers/nfc/st95hf/core.c +@@ -966,7 +966,7 @@ static int st95hf_in_send_cmd(struct nfc_digital_dev *ddev, + rc = down_killable(&stcontext->exchange_lock); + if (rc) { + WARN(1, "Semaphore is not found up in st95hf_in_send_cmd\n"); +- return rc; ++ goto free_skb_resp; + } + + rc = st95hf_spi_send(&stcontext->spicontext, skb->data, +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c +index fa0039dcacc66..f2556f0ea20dc 100644 +--- a/drivers/nvme/host/core.c ++++ b/drivers/nvme/host/core.c +@@ -3324,10 +3324,6 @@ static ssize_t nvme_sysfs_delete(struct device *dev, + { + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + +- /* Can't delete non-created controllers */ +- if (!ctrl->created) +- return -EBUSY; +- + if (device_remove_file_self(dev, attr)) + nvme_delete_ctrl_sync(ctrl); + return count; +@@ -4129,7 +4125,6 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl) + nvme_queue_scan(ctrl); + nvme_start_queues(ctrl); + } +- ctrl->created = true; + } + EXPORT_SYMBOL_GPL(nvme_start_ctrl); + +@@ -4287,7 +4282,7 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl) + } + EXPORT_SYMBOL_GPL(nvme_unfreeze); + +-void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) ++int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) + { + struct nvme_ns *ns; + +@@ -4298,6 +4293,7 @@ void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) + break; + } + up_read(&ctrl->namespaces_rwsem); ++ return timeout; + } + EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); + +diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c +index 4ec4829d62334..8575724734e02 100644 +--- a/drivers/nvme/host/fabrics.c ++++ b/drivers/nvme/host/fabrics.c +@@ -565,10 +565,14 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq, + struct nvme_request *req = nvme_req(rq); + + /* +- * If we are in some state of setup or teardown only allow +- * internally generated commands. ++ * currently we have a problem sending passthru commands ++ * on the admin_q if the controller is not LIVE because we can't ++ * make sure that they are going out after the admin connect, ++ * controller enable and/or other commands in the initialization ++ * sequence. until the controller will be LIVE, fail with ++ * BLK_STS_RESOURCE so that they will be rescheduled. + */ +- if (!blk_rq_is_passthrough(rq) || (req->flags & NVME_REQ_USERCMD)) ++ if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD)) + return false; + + /* +@@ -576,9 +580,8 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq, + * which is require to set the queue live in the appropinquate states. + */ + switch (ctrl->state) { +- case NVME_CTRL_NEW: + case NVME_CTRL_CONNECTING: +- if (nvme_is_fabrics(req->cmd) && ++ if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) && + req->cmd->fabrics.fctype == nvme_fabrics_type_connect) + return true; + break; +diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h +index e268f1d7e1a0f..1db144eb74ff1 100644 +--- a/drivers/nvme/host/nvme.h ++++ b/drivers/nvme/host/nvme.h +@@ -271,7 +271,6 @@ struct nvme_ctrl { + struct nvme_command ka_cmd; + struct work_struct fw_act_work; + unsigned long events; +- bool created; + + #ifdef CONFIG_NVME_MULTIPATH + /* asymmetric namespace access: */ +@@ -538,7 +537,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl); + void nvme_sync_queues(struct nvme_ctrl *ctrl); + void nvme_unfreeze(struct nvme_ctrl *ctrl); + void nvme_wait_freeze(struct nvme_ctrl *ctrl); +-void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); ++int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); + void nvme_start_freeze(struct nvme_ctrl *ctrl); + + #define NVME_QID_ANY -1 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index d4b1ff7471231..69a19fe241063 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -1250,8 +1250,8 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) + dev_warn_ratelimited(dev->ctrl.device, + "I/O %d QID %d timeout, disable controller\n", + req->tag, nvmeq->qid); +- nvme_dev_disable(dev, true); + nvme_req(req)->flags |= NVME_REQ_CANCELLED; ++ nvme_dev_disable(dev, true); + return BLK_EH_DONE; + case NVME_CTRL_RESETTING: + return BLK_EH_RESET_TIMER; +@@ -1268,10 +1268,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) + dev_warn(dev->ctrl.device, + "I/O %d QID %d timeout, reset controller\n", + req->tag, nvmeq->qid); ++ nvme_req(req)->flags |= NVME_REQ_CANCELLED; + nvme_dev_disable(dev, false); + nvme_reset_ctrl(&dev->ctrl); + +- nvme_req(req)->flags |= NVME_REQ_CANCELLED; + return BLK_EH_DONE; + } + +diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c +index 876859cd14e86..6c07bb55b0f83 100644 +--- a/drivers/nvme/host/rdma.c ++++ b/drivers/nvme/host/rdma.c +@@ -121,6 +121,7 @@ struct nvme_rdma_ctrl { + struct sockaddr_storage src_addr; + + struct nvme_ctrl ctrl; ++ struct mutex teardown_lock; + bool use_inline_data; + u32 io_queues[HCTX_MAX_TYPES]; + }; +@@ -949,7 +950,15 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) + + if (!new) { + nvme_start_queues(&ctrl->ctrl); +- nvme_wait_freeze(&ctrl->ctrl); ++ if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) { ++ /* ++ * If we timed out waiting for freeze we are likely to ++ * be stuck. Fail the controller initialization just ++ * to be safe. ++ */ ++ ret = -ENODEV; ++ goto out_wait_freeze_timed_out; ++ } + blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset, + ctrl->ctrl.queue_count - 1); + nvme_unfreeze(&ctrl->ctrl); +@@ -957,6 +966,9 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) + + return 0; + ++out_wait_freeze_timed_out: ++ nvme_stop_queues(&ctrl->ctrl); ++ nvme_rdma_stop_io_queues(ctrl); + out_cleanup_connect_q: + if (new) + blk_cleanup_queue(ctrl->ctrl.connect_q); +@@ -971,6 +983,7 @@ out_free_io_queues: + static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, + bool remove) + { ++ mutex_lock(&ctrl->teardown_lock); + blk_mq_quiesce_queue(ctrl->ctrl.admin_q); + nvme_rdma_stop_queue(&ctrl->queues[0]); + if (ctrl->ctrl.admin_tagset) { +@@ -981,11 +994,13 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, + if (remove) + blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); + nvme_rdma_destroy_admin_queue(ctrl, remove); ++ mutex_unlock(&ctrl->teardown_lock); + } + + static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, + bool remove) + { ++ mutex_lock(&ctrl->teardown_lock); + if (ctrl->ctrl.queue_count > 1) { + nvme_start_freeze(&ctrl->ctrl); + nvme_stop_queues(&ctrl->ctrl); +@@ -999,6 +1014,7 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, + nvme_start_queues(&ctrl->ctrl); + nvme_rdma_destroy_io_queues(ctrl, remove); + } ++ mutex_unlock(&ctrl->teardown_lock); + } + + static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) +@@ -1154,6 +1170,7 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl) + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) + return; + ++ dev_warn(ctrl->ctrl.device, "starting error recovery\n"); + queue_work(nvme_reset_wq, &ctrl->err_work); + } + +@@ -1920,6 +1937,22 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, + return 0; + } + ++static void nvme_rdma_complete_timed_out(struct request *rq) ++{ ++ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); ++ struct nvme_rdma_queue *queue = req->queue; ++ struct nvme_rdma_ctrl *ctrl = queue->ctrl; ++ ++ /* fence other contexts that may complete the command */ ++ mutex_lock(&ctrl->teardown_lock); ++ nvme_rdma_stop_queue(queue); ++ if (!blk_mq_request_completed(rq)) { ++ nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; ++ blk_mq_complete_request(rq); ++ } ++ mutex_unlock(&ctrl->teardown_lock); ++} ++ + static enum blk_eh_timer_return + nvme_rdma_timeout(struct request *rq, bool reserved) + { +@@ -1930,29 +1963,29 @@ nvme_rdma_timeout(struct request *rq, bool reserved) + dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n", + rq->tag, nvme_rdma_queue_idx(queue)); + +- /* +- * Restart the timer if a controller reset is already scheduled. Any +- * timed out commands would be handled before entering the connecting +- * state. +- */ +- if (ctrl->ctrl.state == NVME_CTRL_RESETTING) +- return BLK_EH_RESET_TIMER; +- + if (ctrl->ctrl.state != NVME_CTRL_LIVE) { + /* +- * Teardown immediately if controller times out while starting +- * or we are already started error recovery. all outstanding +- * requests are completed on shutdown, so we return BLK_EH_DONE. ++ * If we are resetting, connecting or deleting we should ++ * complete immediately because we may block controller ++ * teardown or setup sequence ++ * - ctrl disable/shutdown fabrics requests ++ * - connect requests ++ * - initialization admin requests ++ * - I/O requests that entered after unquiescing and ++ * the controller stopped responding ++ * ++ * All other requests should be cancelled by the error ++ * recovery work, so it's fine that we fail it here. + */ +- flush_work(&ctrl->err_work); +- nvme_rdma_teardown_io_queues(ctrl, false); +- nvme_rdma_teardown_admin_queue(ctrl, false); ++ nvme_rdma_complete_timed_out(rq); + return BLK_EH_DONE; + } + +- dev_warn(ctrl->ctrl.device, "starting error recovery\n"); ++ /* ++ * LIVE state should trigger the normal error recovery which will ++ * handle completing this request. ++ */ + nvme_rdma_error_recovery(ctrl); +- + return BLK_EH_RESET_TIMER; + } + +@@ -2252,6 +2285,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, + return ERR_PTR(-ENOMEM); + ctrl->ctrl.opts = opts; + INIT_LIST_HEAD(&ctrl->list); ++ mutex_init(&ctrl->teardown_lock); + + if (!(opts->mask & NVMF_OPT_TRSVCID)) { + opts->trsvcid = +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c +index a6d2e3330a584..f1f66bf96cbb9 100644 +--- a/drivers/nvme/host/tcp.c ++++ b/drivers/nvme/host/tcp.c +@@ -122,6 +122,7 @@ struct nvme_tcp_ctrl { + struct sockaddr_storage src_addr; + struct nvme_ctrl ctrl; + ++ struct mutex teardown_lock; + struct work_struct err_work; + struct delayed_work connect_work; + struct nvme_tcp_request async_req; +@@ -447,6 +448,7 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl) + if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) + return; + ++ dev_warn(ctrl->device, "starting error recovery\n"); + queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work); + } + +@@ -1497,7 +1499,6 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) + + if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) + return; +- + __nvme_tcp_stop_queue(queue); + } + +@@ -1752,7 +1753,15 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) + + if (!new) { + nvme_start_queues(ctrl); +- nvme_wait_freeze(ctrl); ++ if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) { ++ /* ++ * If we timed out waiting for freeze we are likely to ++ * be stuck. Fail the controller initialization just ++ * to be safe. ++ */ ++ ret = -ENODEV; ++ goto out_wait_freeze_timed_out; ++ } + blk_mq_update_nr_hw_queues(ctrl->tagset, + ctrl->queue_count - 1); + nvme_unfreeze(ctrl); +@@ -1760,6 +1769,9 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) + + return 0; + ++out_wait_freeze_timed_out: ++ nvme_stop_queues(ctrl); ++ nvme_tcp_stop_io_queues(ctrl); + out_cleanup_connect_q: + if (new) + blk_cleanup_queue(ctrl->connect_q); +@@ -1845,6 +1857,7 @@ out_free_queue: + static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, + bool remove) + { ++ mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock); + blk_mq_quiesce_queue(ctrl->admin_q); + nvme_tcp_stop_queue(ctrl, 0); + if (ctrl->admin_tagset) { +@@ -1855,13 +1868,16 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, + if (remove) + blk_mq_unquiesce_queue(ctrl->admin_q); + nvme_tcp_destroy_admin_queue(ctrl, remove); ++ mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock); + } + + static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, + bool remove) + { ++ mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock); + if (ctrl->queue_count <= 1) +- return; ++ goto out; ++ blk_mq_quiesce_queue(ctrl->admin_q); + nvme_start_freeze(ctrl); + nvme_stop_queues(ctrl); + nvme_tcp_stop_io_queues(ctrl); +@@ -1873,6 +1889,8 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, + if (remove) + nvme_start_queues(ctrl); + nvme_tcp_destroy_io_queues(ctrl, remove); ++out: ++ mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock); + } + + static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl) +@@ -2119,40 +2137,55 @@ static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg) + nvme_tcp_queue_request(&ctrl->async_req, true); + } + ++static void nvme_tcp_complete_timed_out(struct request *rq) ++{ ++ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); ++ struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; ++ ++ /* fence other contexts that may complete the command */ ++ mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock); ++ nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue)); ++ if (!blk_mq_request_completed(rq)) { ++ nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; ++ blk_mq_complete_request(rq); ++ } ++ mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock); ++} ++ + static enum blk_eh_timer_return + nvme_tcp_timeout(struct request *rq, bool reserved) + { + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); +- struct nvme_tcp_ctrl *ctrl = req->queue->ctrl; ++ struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; + struct nvme_tcp_cmd_pdu *pdu = req->pdu; + +- /* +- * Restart the timer if a controller reset is already scheduled. Any +- * timed out commands would be handled before entering the connecting +- * state. +- */ +- if (ctrl->ctrl.state == NVME_CTRL_RESETTING) +- return BLK_EH_RESET_TIMER; +- +- dev_warn(ctrl->ctrl.device, ++ dev_warn(ctrl->device, + "queue %d: timeout request %#x type %d\n", + nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type); + +- if (ctrl->ctrl.state != NVME_CTRL_LIVE) { ++ if (ctrl->state != NVME_CTRL_LIVE) { + /* +- * Teardown immediately if controller times out while starting +- * or we are already started error recovery. all outstanding +- * requests are completed on shutdown, so we return BLK_EH_DONE. ++ * If we are resetting, connecting or deleting we should ++ * complete immediately because we may block controller ++ * teardown or setup sequence ++ * - ctrl disable/shutdown fabrics requests ++ * - connect requests ++ * - initialization admin requests ++ * - I/O requests that entered after unquiescing and ++ * the controller stopped responding ++ * ++ * All other requests should be cancelled by the error ++ * recovery work, so it's fine that we fail it here. + */ +- flush_work(&ctrl->err_work); +- nvme_tcp_teardown_io_queues(&ctrl->ctrl, false); +- nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false); ++ nvme_tcp_complete_timed_out(rq); + return BLK_EH_DONE; + } + +- dev_warn(ctrl->ctrl.device, "starting error recovery\n"); +- nvme_tcp_error_recovery(&ctrl->ctrl); +- ++ /* ++ * LIVE state should trigger the normal error recovery which will ++ * handle completing this request. ++ */ ++ nvme_tcp_error_recovery(ctrl); + return BLK_EH_RESET_TIMER; + } + +@@ -2384,6 +2417,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, + nvme_tcp_reconnect_ctrl_work); + INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work); + INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work); ++ mutex_init(&ctrl->teardown_lock); + + if (!(opts->mask & NVMF_OPT_TRSVCID)) { + opts->trsvcid = +diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c +index de9217cfd22d7..3d29b773ced27 100644 +--- a/drivers/nvme/target/tcp.c ++++ b/drivers/nvme/target/tcp.c +@@ -160,6 +160,11 @@ static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd); + static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, + struct nvmet_tcp_cmd *cmd) + { ++ if (unlikely(!queue->nr_cmds)) { ++ /* We didn't allocate cmds yet, send 0xffff */ ++ return USHRT_MAX; ++ } ++ + return cmd - queue->cmds; + } + +@@ -872,7 +877,10 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) + struct nvme_tcp_data_pdu *data = &queue->pdu.data; + struct nvmet_tcp_cmd *cmd; + +- cmd = &queue->cmds[data->ttag]; ++ if (likely(queue->nr_cmds)) ++ cmd = &queue->cmds[data->ttag]; ++ else ++ cmd = &queue->connect; + + if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) { + pr_err("ttag %u unexpected data offset %u (expected %u)\n", +diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c +index e91040af33945..ba277136f52b1 100644 +--- a/drivers/phy/qualcomm/phy-qcom-qmp.c ++++ b/drivers/phy/qualcomm/phy-qcom-qmp.c +@@ -504,8 +504,8 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0xf), + QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x1), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x0), +- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0x1f), +- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f), ++ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff), ++ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x6), + QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0xf), + QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x0), +@@ -531,7 +531,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x0), + QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80), + QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x1), +- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0xa), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x1), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x1), +@@ -540,7 +539,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f), + QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19), + QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19), +- QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x7), + }; + + static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = { +@@ -548,6 +546,8 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x6), + QMP_PHY_INIT_CFG(QSERDES_TX_RES_CODE_LANE_OFFSET, 0x2), + QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12), ++ QMP_PHY_INIT_CFG(QSERDES_TX_EMP_POST1_LVL, 0x36), ++ QMP_PHY_INIT_CFG(QSERDES_TX_SLEW_CNTL, 0x0a), + }; + + static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = { +@@ -558,7 +558,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb), + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b), + QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x4), +- QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x4), + }; + + static const struct qmp_phy_init_tbl ipq8074_pcie_pcs_tbl[] = { +@@ -1673,6 +1672,9 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = { + .pwrdn_ctrl = SW_PWRDN, + }; + ++static const char * const ipq8074_pciephy_clk_l[] = { ++ "aux", "cfg_ahb", ++}; + /* list of resets */ + static const char * const ipq8074_pciephy_reset_l[] = { + "phy", "common", +@@ -1690,8 +1692,8 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = { + .rx_tbl_num = ARRAY_SIZE(ipq8074_pcie_rx_tbl), + .pcs_tbl = ipq8074_pcie_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(ipq8074_pcie_pcs_tbl), +- .clk_list = NULL, +- .num_clks = 0, ++ .clk_list = ipq8074_pciephy_clk_l, ++ .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l), + .reset_list = ipq8074_pciephy_reset_l, + .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l), + .vreg_list = NULL, +diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h +index 6d017a0c0c8d9..832b3d0984033 100644 +--- a/drivers/phy/qualcomm/phy-qcom-qmp.h ++++ b/drivers/phy/qualcomm/phy-qcom-qmp.h +@@ -77,6 +77,8 @@ + #define QSERDES_COM_CORECLK_DIV_MODE1 0x1bc + + /* Only for QMP V2 PHY - TX registers */ ++#define QSERDES_TX_EMP_POST1_LVL 0x018 ++#define QSERDES_TX_SLEW_CNTL 0x040 + #define QSERDES_TX_RES_CODE_LANE_OFFSET 0x054 + #define QSERDES_TX_DEBUG_BUS_SEL 0x064 + #define QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN 0x068 +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c +index 720f28844795b..be8c709a74883 100644 +--- a/drivers/regulator/core.c ++++ b/drivers/regulator/core.c +@@ -235,8 +235,8 @@ static bool regulator_supply_is_couple(struct regulator_dev *rdev) + static void regulator_unlock_recursive(struct regulator_dev *rdev, + unsigned int n_coupled) + { +- struct regulator_dev *c_rdev; +- int i; ++ struct regulator_dev *c_rdev, *supply_rdev; ++ int i, supply_n_coupled; + + for (i = n_coupled; i > 0; i--) { + c_rdev = rdev->coupling_desc.coupled_rdevs[i - 1]; +@@ -244,10 +244,13 @@ static void regulator_unlock_recursive(struct regulator_dev *rdev, + if (!c_rdev) + continue; + +- if (c_rdev->supply && !regulator_supply_is_couple(c_rdev)) +- regulator_unlock_recursive( +- c_rdev->supply->rdev, +- c_rdev->coupling_desc.n_coupled); ++ if (c_rdev->supply && !regulator_supply_is_couple(c_rdev)) { ++ supply_rdev = c_rdev->supply->rdev; ++ supply_n_coupled = supply_rdev->coupling_desc.n_coupled; ++ ++ regulator_unlock_recursive(supply_rdev, ++ supply_n_coupled); ++ } + + regulator_unlock(c_rdev); + } +@@ -1460,7 +1463,7 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, + const char *consumer_dev_name, + const char *supply) + { +- struct regulator_map *node; ++ struct regulator_map *node, *new_node; + int has_dev; + + if (supply == NULL) +@@ -1471,6 +1474,22 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, + else + has_dev = 0; + ++ new_node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL); ++ if (new_node == NULL) ++ return -ENOMEM; ++ ++ new_node->regulator = rdev; ++ new_node->supply = supply; ++ ++ if (has_dev) { ++ new_node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL); ++ if (new_node->dev_name == NULL) { ++ kfree(new_node); ++ return -ENOMEM; ++ } ++ } ++ ++ mutex_lock(®ulator_list_mutex); + list_for_each_entry(node, ®ulator_map_list, list) { + if (node->dev_name && consumer_dev_name) { + if (strcmp(node->dev_name, consumer_dev_name) != 0) +@@ -1488,26 +1507,19 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, + node->regulator->desc->name, + supply, + dev_name(&rdev->dev), rdev_get_name(rdev)); +- return -EBUSY; ++ goto fail; + } + +- node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL); +- if (node == NULL) +- return -ENOMEM; +- +- node->regulator = rdev; +- node->supply = supply; +- +- if (has_dev) { +- node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL); +- if (node->dev_name == NULL) { +- kfree(node); +- return -ENOMEM; +- } +- } ++ list_add(&new_node->list, ®ulator_map_list); ++ mutex_unlock(®ulator_list_mutex); + +- list_add(&node->list, ®ulator_map_list); + return 0; ++ ++fail: ++ mutex_unlock(®ulator_list_mutex); ++ kfree(new_node->dev_name); ++ kfree(new_node); ++ return -EBUSY; + } + + static void unset_regulator_supplies(struct regulator_dev *rdev) +@@ -1579,44 +1591,53 @@ static struct regulator *create_regulator(struct regulator_dev *rdev, + const char *supply_name) + { + struct regulator *regulator; +- char buf[REG_STR_SIZE]; +- int err, size; ++ int err; ++ ++ if (dev) { ++ char buf[REG_STR_SIZE]; ++ int size; ++ ++ size = snprintf(buf, REG_STR_SIZE, "%s-%s", ++ dev->kobj.name, supply_name); ++ if (size >= REG_STR_SIZE) ++ return NULL; ++ ++ supply_name = kstrdup(buf, GFP_KERNEL); ++ if (supply_name == NULL) ++ return NULL; ++ } else { ++ supply_name = kstrdup_const(supply_name, GFP_KERNEL); ++ if (supply_name == NULL) ++ return NULL; ++ } + + regulator = kzalloc(sizeof(*regulator), GFP_KERNEL); +- if (regulator == NULL) ++ if (regulator == NULL) { ++ kfree(supply_name); + return NULL; ++ } + +- regulator_lock(rdev); + regulator->rdev = rdev; ++ regulator->supply_name = supply_name; ++ ++ regulator_lock(rdev); + list_add(®ulator->list, &rdev->consumer_list); ++ regulator_unlock(rdev); + + if (dev) { + regulator->dev = dev; + + /* Add a link to the device sysfs entry */ +- size = snprintf(buf, REG_STR_SIZE, "%s-%s", +- dev->kobj.name, supply_name); +- if (size >= REG_STR_SIZE) +- goto overflow_err; +- +- regulator->supply_name = kstrdup(buf, GFP_KERNEL); +- if (regulator->supply_name == NULL) +- goto overflow_err; +- + err = sysfs_create_link_nowarn(&rdev->dev.kobj, &dev->kobj, +- buf); ++ supply_name); + if (err) { + rdev_dbg(rdev, "could not add device link %s err %d\n", + dev->kobj.name, err); + /* non-fatal */ + } +- } else { +- regulator->supply_name = kstrdup_const(supply_name, GFP_KERNEL); +- if (regulator->supply_name == NULL) +- goto overflow_err; + } + +- regulator->debugfs = debugfs_create_dir(regulator->supply_name, ++ regulator->debugfs = debugfs_create_dir(supply_name, + rdev->debugfs); + if (!regulator->debugfs) { + rdev_dbg(rdev, "Failed to create debugfs directory\n"); +@@ -1641,13 +1662,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev, + _regulator_is_enabled(rdev)) + regulator->always_on = true; + +- regulator_unlock(rdev); + return regulator; +-overflow_err: +- list_del(®ulator->list); +- kfree(regulator); +- regulator_unlock(rdev); +- return NULL; + } + + static int _regulator_get_enable_time(struct regulator_dev *rdev) +@@ -2222,10 +2237,13 @@ EXPORT_SYMBOL_GPL(regulator_bulk_unregister_supply_alias); + static int regulator_ena_gpio_request(struct regulator_dev *rdev, + const struct regulator_config *config) + { +- struct regulator_enable_gpio *pin; ++ struct regulator_enable_gpio *pin, *new_pin; + struct gpio_desc *gpiod; + + gpiod = config->ena_gpiod; ++ new_pin = kzalloc(sizeof(*new_pin), GFP_KERNEL); ++ ++ mutex_lock(®ulator_list_mutex); + + list_for_each_entry(pin, ®ulator_ena_gpio_list, list) { + if (pin->gpiod == gpiod) { +@@ -2234,9 +2252,13 @@ static int regulator_ena_gpio_request(struct regulator_dev *rdev, + } + } + +- pin = kzalloc(sizeof(struct regulator_enable_gpio), GFP_KERNEL); +- if (pin == NULL) ++ if (new_pin == NULL) { ++ mutex_unlock(®ulator_list_mutex); + return -ENOMEM; ++ } ++ ++ pin = new_pin; ++ new_pin = NULL; + + pin->gpiod = gpiod; + list_add(&pin->list, ®ulator_ena_gpio_list); +@@ -2244,6 +2266,10 @@ static int regulator_ena_gpio_request(struct regulator_dev *rdev, + update_ena_gpio_to_rdev: + pin->request_count++; + rdev->ena_pin = pin; ++ ++ mutex_unlock(®ulator_list_mutex); ++ kfree(new_pin); ++ + return 0; + } + +@@ -4880,13 +4906,9 @@ static void regulator_resolve_coupling(struct regulator_dev *rdev) + return; + } + +- regulator_lock(c_rdev); +- + c_desc->coupled_rdevs[i] = c_rdev; + c_desc->n_resolved++; + +- regulator_unlock(c_rdev); +- + regulator_resolve_coupling(c_rdev); + } + } +@@ -4971,7 +4993,10 @@ static int regulator_init_coupling(struct regulator_dev *rdev) + if (!of_check_coupling_data(rdev)) + return -EPERM; + ++ mutex_lock(®ulator_list_mutex); + rdev->coupling_desc.coupler = regulator_find_coupler(rdev); ++ mutex_unlock(®ulator_list_mutex); ++ + if (IS_ERR(rdev->coupling_desc.coupler)) { + err = PTR_ERR(rdev->coupling_desc.coupler); + rdev_err(rdev, "failed to get coupler: %d\n", err); +@@ -5072,6 +5097,7 @@ regulator_register(const struct regulator_desc *regulator_desc, + ret = -ENOMEM; + goto rinse; + } ++ device_initialize(&rdev->dev); + + /* + * Duplicate the config so the driver could override it after +@@ -5079,9 +5105,8 @@ regulator_register(const struct regulator_desc *regulator_desc, + */ + config = kmemdup(cfg, sizeof(*cfg), GFP_KERNEL); + if (config == NULL) { +- kfree(rdev); + ret = -ENOMEM; +- goto rinse; ++ goto clean; + } + + init_data = regulator_of_get_init_data(dev, regulator_desc, config, +@@ -5093,10 +5118,8 @@ regulator_register(const struct regulator_desc *regulator_desc, + * from a gpio extender or something else. + */ + if (PTR_ERR(init_data) == -EPROBE_DEFER) { +- kfree(config); +- kfree(rdev); + ret = -EPROBE_DEFER; +- goto rinse; ++ goto clean; + } + + /* +@@ -5137,9 +5160,7 @@ regulator_register(const struct regulator_desc *regulator_desc, + } + + if (config->ena_gpiod) { +- mutex_lock(®ulator_list_mutex); + ret = regulator_ena_gpio_request(rdev, config); +- mutex_unlock(®ulator_list_mutex); + if (ret != 0) { + rdev_err(rdev, "Failed to request enable GPIO: %d\n", + ret); +@@ -5151,7 +5172,6 @@ regulator_register(const struct regulator_desc *regulator_desc, + } + + /* register with sysfs */ +- device_initialize(&rdev->dev); + rdev->dev.class = ®ulator_class; + rdev->dev.parent = dev; + dev_set_name(&rdev->dev, "regulator.%lu", +@@ -5179,27 +5199,22 @@ regulator_register(const struct regulator_desc *regulator_desc, + if (ret < 0) + goto wash; + +- mutex_lock(®ulator_list_mutex); + ret = regulator_init_coupling(rdev); +- mutex_unlock(®ulator_list_mutex); + if (ret < 0) + goto wash; + + /* add consumers devices */ + if (init_data) { +- mutex_lock(®ulator_list_mutex); + for (i = 0; i < init_data->num_consumer_supplies; i++) { + ret = set_consumer_device_supply(rdev, + init_data->consumer_supplies[i].dev_name, + init_data->consumer_supplies[i].supply); + if (ret < 0) { +- mutex_unlock(®ulator_list_mutex); + dev_err(dev, "Failed to set supply %s\n", + init_data->consumer_supplies[i].supply); + goto unset_supplies; + } + } +- mutex_unlock(®ulator_list_mutex); + } + + if (!rdev->desc->ops->get_voltage && +@@ -5234,13 +5249,11 @@ wash: + mutex_lock(®ulator_list_mutex); + regulator_ena_gpio_free(rdev); + mutex_unlock(®ulator_list_mutex); +- put_device(&rdev->dev); +- rdev = NULL; + clean: + if (dangling_of_gpiod) + gpiod_put(config->ena_gpiod); +- kfree(rdev); + kfree(config); ++ put_device(&rdev->dev); + rinse: + if (dangling_cfg_gpiod) + gpiod_put(cfg->ena_gpiod); +diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c +index 5d716d3887071..6de4bc77fd55c 100644 +--- a/drivers/scsi/libsas/sas_ata.c ++++ b/drivers/scsi/libsas/sas_ata.c +@@ -209,7 +209,10 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) + task->num_scatter = si; + } + +- task->data_dir = qc->dma_dir; ++ if (qc->tf.protocol == ATA_PROT_NODATA) ++ task->data_dir = DMA_NONE; ++ else ++ task->data_dir = qc->dma_dir; + task->scatter = qc->sg; + task->ata_task.retry_count = 1; + task->task_state_flags = SAS_TASK_STATE_PENDING; +diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c +index 6637f84a3d1bc..0dd6cc0ccdf2d 100644 +--- a/drivers/scsi/lpfc/lpfc_init.c ++++ b/drivers/scsi/lpfc/lpfc_init.c +@@ -11257,7 +11257,6 @@ lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl) + { + cpumask_clear(&eqhdl->aff_mask); + irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING); +- irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask); + } + + /** +diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c +index fcf03f733e417..1a0e2e4342ad8 100644 +--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c ++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c +@@ -3690,7 +3690,7 @@ int megasas_irqpoll(struct irq_poll *irqpoll, int budget) + instance = irq_ctx->instance; + + if (irq_ctx->irq_line_enable) { +- disable_irq(irq_ctx->os_irq); ++ disable_irq_nosync(irq_ctx->os_irq); + irq_ctx->irq_line_enable = false; + } + +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c +index 96b78fdc6b8a9..a85c9672c6ea3 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c +@@ -1732,7 +1732,7 @@ _base_irqpoll(struct irq_poll *irqpoll, int budget) + reply_q = container_of(irqpoll, struct adapter_reply_queue, + irqpoll); + if (reply_q->irq_line_enable) { +- disable_irq(reply_q->os_irq); ++ disable_irq_nosync(reply_q->os_irq); + reply_q->irq_line_enable = false; + } + num_entries = _base_process_reply_queue(reply_q); +diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c +index 36b1ca2dadbb5..51cfab9d1afdc 100644 +--- a/drivers/scsi/qedf/qedf_main.c ++++ b/drivers/scsi/qedf/qedf_main.c +@@ -3843,7 +3843,7 @@ void qedf_stag_change_work(struct work_struct *work) + container_of(work, struct qedf_ctx, stag_work.work); + + if (!qedf) { +- QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL"); ++ QEDF_ERR(NULL, "qedf is NULL"); + return; + } + QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n"); +diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h +index 42dbf90d46510..392312333746f 100644 +--- a/drivers/scsi/qla2xxx/qla_def.h ++++ b/drivers/scsi/qla2xxx/qla_def.h +@@ -1605,7 +1605,7 @@ typedef struct { + */ + uint8_t firmware_options[2]; + +- uint16_t frame_payload_size; ++ __le16 frame_payload_size; + __le16 max_iocb_allocation; + __le16 execution_throttle; + uint8_t retry_count; +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c +index 2436a17f5cd91..2861c636dd651 100644 +--- a/drivers/scsi/qla2xxx/qla_init.c ++++ b/drivers/scsi/qla2xxx/qla_init.c +@@ -4603,18 +4603,18 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) + nv->firmware_options[1] = BIT_7 | BIT_5; + nv->add_firmware_options[0] = BIT_5; + nv->add_firmware_options[1] = BIT_5 | BIT_4; +- nv->frame_payload_size = 2048; ++ nv->frame_payload_size = cpu_to_le16(2048); + nv->special_options[1] = BIT_7; + } else if (IS_QLA2200(ha)) { + nv->firmware_options[0] = BIT_2 | BIT_1; + nv->firmware_options[1] = BIT_7 | BIT_5; + nv->add_firmware_options[0] = BIT_5; + nv->add_firmware_options[1] = BIT_5 | BIT_4; +- nv->frame_payload_size = 1024; ++ nv->frame_payload_size = cpu_to_le16(1024); + } else if (IS_QLA2100(ha)) { + nv->firmware_options[0] = BIT_3 | BIT_1; + nv->firmware_options[1] = BIT_5; +- nv->frame_payload_size = 1024; ++ nv->frame_payload_size = cpu_to_le16(1024); + } + + nv->max_iocb_allocation = cpu_to_le16(256); +diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c +index a9a72574b34ab..684761e86d4fc 100644 +--- a/drivers/soundwire/stream.c ++++ b/drivers/soundwire/stream.c +@@ -716,6 +716,7 @@ error: + kfree(wbuf); + error_1: + kfree(wr_msg); ++ bus->defer_msg.msg = NULL; + return ret; + } + +@@ -839,9 +840,10 @@ static int do_bank_switch(struct sdw_stream_runtime *stream) + error: + list_for_each_entry(m_rt, &stream->master_list, stream_node) { + bus = m_rt->bus; +- +- kfree(bus->defer_msg.msg->buf); +- kfree(bus->defer_msg.msg); ++ if (bus->defer_msg.msg) { ++ kfree(bus->defer_msg.msg->buf); ++ kfree(bus->defer_msg.msg); ++ } + } + + msg_unlock: +diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c +index d4b33b358a31e..3056428b09f31 100644 +--- a/drivers/spi/spi-stm32.c ++++ b/drivers/spi/spi-stm32.c +@@ -936,7 +936,11 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) + } + + if (sr & STM32H7_SPI_SR_SUSP) { +- dev_warn(spi->dev, "Communication suspended\n"); ++ static DEFINE_RATELIMIT_STATE(rs, ++ DEFAULT_RATELIMIT_INTERVAL * 10, ++ 1); ++ if (__ratelimit(&rs)) ++ dev_dbg_ratelimited(spi->dev, "Communication suspended\n"); + if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) + stm32h7_spi_read_rxfifo(spi, false); + /* +@@ -2060,7 +2064,7 @@ static int stm32_spi_resume(struct device *dev) + } + + ret = pm_runtime_get_sync(dev); +- if (ret) { ++ if (ret < 0) { + dev_err(dev, "Unable to power device:%d\n", ret); + return ret; + } +diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c +index 4ac30accf226a..cc329b990e165 100644 +--- a/drivers/staging/greybus/audio_topology.c ++++ b/drivers/staging/greybus/audio_topology.c +@@ -460,6 +460,15 @@ static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol, + val = ucontrol->value.integer.value[0] & mask; + connect = !!val; + ++ ret = gb_pm_runtime_get_sync(bundle); ++ if (ret) ++ return ret; ++ ++ ret = gb_audio_gb_get_control(module->mgmt_connection, data->ctl_id, ++ GB_AUDIO_INVALID_INDEX, &gbvalue); ++ if (ret) ++ goto exit; ++ + /* update ucontrol */ + if (gbvalue.value.integer_value[0] != val) { + for (wi = 0; wi < wlist->num_widgets; wi++) { +@@ -473,25 +482,17 @@ static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol, + gbvalue.value.integer_value[0] = + cpu_to_le32(ucontrol->value.integer.value[0]); + +- ret = gb_pm_runtime_get_sync(bundle); +- if (ret) +- return ret; +- + ret = gb_audio_gb_set_control(module->mgmt_connection, + data->ctl_id, + GB_AUDIO_INVALID_INDEX, &gbvalue); +- +- gb_pm_runtime_put_autosuspend(bundle); +- +- if (ret) { +- dev_err_ratelimited(codec->dev, +- "%d:Error in %s for %s\n", ret, +- __func__, kcontrol->id.name); +- return ret; +- } + } + +- return 0; ++exit: ++ gb_pm_runtime_put_autosuspend(bundle); ++ if (ret) ++ dev_err_ratelimited(codec_dev, "%d:Error in %s for %s\n", ret, ++ __func__, kcontrol->id.name); ++ return ret; + } + + #define SOC_DAPM_MIXER_GB(xname, kcount, data) \ +diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c +index fa1bf8b069fda..2720f7319a3d0 100644 +--- a/drivers/staging/wlan-ng/hfa384x_usb.c ++++ b/drivers/staging/wlan-ng/hfa384x_usb.c +@@ -524,13 +524,8 @@ static void hfa384x_usb_defer(struct work_struct *data) + */ + void hfa384x_create(struct hfa384x *hw, struct usb_device *usb) + { +- memset(hw, 0, sizeof(*hw)); + hw->usb = usb; + +- /* set up the endpoints */ +- hw->endp_in = usb_rcvbulkpipe(usb, 1); +- hw->endp_out = usb_sndbulkpipe(usb, 2); +- + /* Set up the waitq */ + init_waitqueue_head(&hw->cmdq); + +diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c +index 456603fd26c0b..4b08dc1da4f97 100644 +--- a/drivers/staging/wlan-ng/prism2usb.c ++++ b/drivers/staging/wlan-ng/prism2usb.c +@@ -61,23 +61,14 @@ static int prism2sta_probe_usb(struct usb_interface *interface, + const struct usb_device_id *id) + { + struct usb_device *dev; +- const struct usb_endpoint_descriptor *epd; +- const struct usb_host_interface *iface_desc = interface->cur_altsetting; ++ struct usb_endpoint_descriptor *bulk_in, *bulk_out; ++ struct usb_host_interface *iface_desc = interface->cur_altsetting; + struct wlandevice *wlandev = NULL; + struct hfa384x *hw = NULL; + int result = 0; + +- if (iface_desc->desc.bNumEndpoints != 2) { +- result = -ENODEV; +- goto failed; +- } +- +- result = -EINVAL; +- epd = &iface_desc->endpoint[1].desc; +- if (!usb_endpoint_is_bulk_in(epd)) +- goto failed; +- epd = &iface_desc->endpoint[2].desc; +- if (!usb_endpoint_is_bulk_out(epd)) ++ result = usb_find_common_endpoints(iface_desc, &bulk_in, &bulk_out, NULL, NULL); ++ if (result) + goto failed; + + dev = interface_to_usbdev(interface); +@@ -96,6 +87,8 @@ static int prism2sta_probe_usb(struct usb_interface *interface, + } + + /* Initialize the hw data */ ++ hw->endp_in = usb_rcvbulkpipe(dev, bulk_in->bEndpointAddress); ++ hw->endp_out = usb_sndbulkpipe(dev, bulk_out->bEndpointAddress); + hfa384x_create(hw, dev); + hw->wlandev = wlandev; + +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c +index c9689610e186d..2ec778e97b1be 100644 +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -1389,14 +1389,27 @@ static u32 iscsit_do_crypto_hash_sg( + sg = cmd->first_data_sg; + page_off = cmd->first_data_sg_off; + ++ if (data_length && page_off) { ++ struct scatterlist first_sg; ++ u32 len = min_t(u32, data_length, sg->length - page_off); ++ ++ sg_init_table(&first_sg, 1); ++ sg_set_page(&first_sg, sg_page(sg), len, sg->offset + page_off); ++ ++ ahash_request_set_crypt(hash, &first_sg, NULL, len); ++ crypto_ahash_update(hash); ++ ++ data_length -= len; ++ sg = sg_next(sg); ++ } ++ + while (data_length) { +- u32 cur_len = min_t(u32, data_length, (sg->length - page_off)); ++ u32 cur_len = min_t(u32, data_length, sg->length); + + ahash_request_set_crypt(hash, sg, NULL, cur_len); + crypto_ahash_update(hash); + + data_length -= cur_len; +- page_off = 0; + /* iscsit_map_iovec has already checked for invalid sg pointers */ + sg = sg_next(sg); + } +diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c +index 85748e3388582..893d1b406c292 100644 +--- a/drivers/target/iscsi/iscsi_target_login.c ++++ b/drivers/target/iscsi/iscsi_target_login.c +@@ -1149,7 +1149,7 @@ void iscsit_free_conn(struct iscsi_conn *conn) + } + + void iscsi_target_login_sess_out(struct iscsi_conn *conn, +- struct iscsi_np *np, bool zero_tsih, bool new_sess) ++ bool zero_tsih, bool new_sess) + { + if (!new_sess) + goto old_sess_out; +@@ -1167,7 +1167,6 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn, + conn->sess = NULL; + + old_sess_out: +- iscsi_stop_login_thread_timer(np); + /* + * If login negotiation fails check if the Time2Retain timer + * needs to be restarted. +@@ -1407,8 +1406,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) + new_sess_out: + new_sess = true; + old_sess_out: ++ iscsi_stop_login_thread_timer(np); + tpg_np = conn->tpg_np; +- iscsi_target_login_sess_out(conn, np, zero_tsih, new_sess); ++ iscsi_target_login_sess_out(conn, zero_tsih, new_sess); + new_sess = false; + + if (tpg) { +diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h +index 3b8e3639ff5d0..fc95e6150253f 100644 +--- a/drivers/target/iscsi/iscsi_target_login.h ++++ b/drivers/target/iscsi/iscsi_target_login.h +@@ -22,8 +22,7 @@ extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); + extern void iscsit_free_conn(struct iscsi_conn *); + extern int iscsit_start_kthreads(struct iscsi_conn *); + extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); +-extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, +- bool, bool); ++extern void iscsi_target_login_sess_out(struct iscsi_conn *, bool, bool); + extern int iscsi_target_login_thread(void *); + extern void iscsi_handle_login_thread_timeout(struct timer_list *t); + +diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c +index 685d771b51d41..e32d93b927428 100644 +--- a/drivers/target/iscsi/iscsi_target_nego.c ++++ b/drivers/target/iscsi/iscsi_target_nego.c +@@ -535,12 +535,11 @@ static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned in + + static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) + { +- struct iscsi_np *np = login->np; + bool zero_tsih = login->zero_tsih; + + iscsi_remove_failed_auth_entry(conn); + iscsi_target_nego_release(conn); +- iscsi_target_login_sess_out(conn, np, zero_tsih, true); ++ iscsi_target_login_sess_out(conn, zero_tsih, true); + } + + struct conn_timeout { +diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c +index d7d60cd9226f7..43df502a36d6b 100644 +--- a/drivers/thunderbolt/switch.c ++++ b/drivers/thunderbolt/switch.c +@@ -739,6 +739,7 @@ static int tb_init_port(struct tb_port *port) + if (res == -ENODEV) { + tb_dbg(port->sw->tb, " Port %d: not implemented\n", + port->port); ++ port->disabled = true; + return 0; + } + return res; +diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h +index 2eb2bcd3cca35..72e48e86c476e 100644 +--- a/drivers/thunderbolt/tb.h ++++ b/drivers/thunderbolt/tb.h +@@ -167,7 +167,7 @@ struct tb_switch { + * @cap_adap: Offset of the adapter specific capability (%0 if not present) + * @cap_usb4: Offset to the USB4 port capability (%0 if not present) + * @port: Port number on switch +- * @disabled: Disabled by eeprom ++ * @disabled: Disabled by eeprom or enabled but not implemented + * @bonded: true if the port is bonded (two lanes combined as one) + * @dual_link_port: If the switch is connected using two ports, points + * to the other port. +diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c +index 6197938dcc2d8..ae1de9cc4b094 100644 +--- a/drivers/usb/core/message.c ++++ b/drivers/usb/core/message.c +@@ -1205,6 +1205,34 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf, + } + } + ++/* ++ * usb_disable_device_endpoints -- Disable all endpoints for a device ++ * @dev: the device whose endpoints are being disabled ++ * @skip_ep0: 0 to disable endpoint 0, 1 to skip it. ++ */ ++static void usb_disable_device_endpoints(struct usb_device *dev, int skip_ep0) ++{ ++ struct usb_hcd *hcd = bus_to_hcd(dev->bus); ++ int i; ++ ++ if (hcd->driver->check_bandwidth) { ++ /* First pass: Cancel URBs, leave endpoint pointers intact. */ ++ for (i = skip_ep0; i < 16; ++i) { ++ usb_disable_endpoint(dev, i, false); ++ usb_disable_endpoint(dev, i + USB_DIR_IN, false); ++ } ++ /* Remove endpoints from the host controller internal state */ ++ mutex_lock(hcd->bandwidth_mutex); ++ usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); ++ mutex_unlock(hcd->bandwidth_mutex); ++ } ++ /* Second pass: remove endpoint pointers */ ++ for (i = skip_ep0; i < 16; ++i) { ++ usb_disable_endpoint(dev, i, true); ++ usb_disable_endpoint(dev, i + USB_DIR_IN, true); ++ } ++} ++ + /** + * usb_disable_device - Disable all the endpoints for a USB device + * @dev: the device whose endpoints are being disabled +@@ -1218,7 +1246,6 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf, + void usb_disable_device(struct usb_device *dev, int skip_ep0) + { + int i; +- struct usb_hcd *hcd = bus_to_hcd(dev->bus); + + /* getting rid of interfaces will disconnect + * any drivers bound to them (a key side effect) +@@ -1264,22 +1291,8 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0) + + dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__, + skip_ep0 ? "non-ep0" : "all"); +- if (hcd->driver->check_bandwidth) { +- /* First pass: Cancel URBs, leave endpoint pointers intact. */ +- for (i = skip_ep0; i < 16; ++i) { +- usb_disable_endpoint(dev, i, false); +- usb_disable_endpoint(dev, i + USB_DIR_IN, false); +- } +- /* Remove endpoints from the host controller internal state */ +- mutex_lock(hcd->bandwidth_mutex); +- usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); +- mutex_unlock(hcd->bandwidth_mutex); +- /* Second pass: remove endpoint pointers */ +- } +- for (i = skip_ep0; i < 16; ++i) { +- usb_disable_endpoint(dev, i, true); +- usb_disable_endpoint(dev, i + USB_DIR_IN, true); +- } ++ ++ usb_disable_device_endpoints(dev, skip_ep0); + } + + /** +@@ -1522,6 +1535,9 @@ EXPORT_SYMBOL_GPL(usb_set_interface); + * The caller must own the device lock. + * + * Return: Zero on success, else a negative error code. ++ * ++ * If this routine fails the device will probably be in an unusable state ++ * with endpoints disabled, and interfaces only partially enabled. + */ + int usb_reset_configuration(struct usb_device *dev) + { +@@ -1537,10 +1553,7 @@ int usb_reset_configuration(struct usb_device *dev) + * calls during probe() are fine + */ + +- for (i = 1; i < 16; ++i) { +- usb_disable_endpoint(dev, i, true); +- usb_disable_endpoint(dev, i + USB_DIR_IN, true); +- } ++ usb_disable_device_endpoints(dev, 1); /* skip ep0*/ + + config = dev->actconfig; + retval = 0; +@@ -1553,34 +1566,10 @@ int usb_reset_configuration(struct usb_device *dev) + mutex_unlock(hcd->bandwidth_mutex); + return -ENOMEM; + } +- /* Make sure we have enough bandwidth for each alternate setting 0 */ +- for (i = 0; i < config->desc.bNumInterfaces; i++) { +- struct usb_interface *intf = config->interface[i]; +- struct usb_host_interface *alt; + +- alt = usb_altnum_to_altsetting(intf, 0); +- if (!alt) +- alt = &intf->altsetting[0]; +- if (alt != intf->cur_altsetting) +- retval = usb_hcd_alloc_bandwidth(dev, NULL, +- intf->cur_altsetting, alt); +- if (retval < 0) +- break; +- } +- /* If not, reinstate the old alternate settings */ ++ /* xHCI adds all endpoints in usb_hcd_alloc_bandwidth */ ++ retval = usb_hcd_alloc_bandwidth(dev, config, NULL, NULL); + if (retval < 0) { +-reset_old_alts: +- for (i--; i >= 0; i--) { +- struct usb_interface *intf = config->interface[i]; +- struct usb_host_interface *alt; +- +- alt = usb_altnum_to_altsetting(intf, 0); +- if (!alt) +- alt = &intf->altsetting[0]; +- if (alt != intf->cur_altsetting) +- usb_hcd_alloc_bandwidth(dev, NULL, +- alt, intf->cur_altsetting); +- } + usb_enable_lpm(dev); + mutex_unlock(hcd->bandwidth_mutex); + return retval; +@@ -1589,8 +1578,12 @@ reset_old_alts: + USB_REQ_SET_CONFIGURATION, 0, + config->desc.bConfigurationValue, 0, + NULL, 0, USB_CTRL_SET_TIMEOUT); +- if (retval < 0) +- goto reset_old_alts; ++ if (retval < 0) { ++ usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); ++ usb_enable_lpm(dev); ++ mutex_unlock(hcd->bandwidth_mutex); ++ return retval; ++ } + mutex_unlock(hcd->bandwidth_mutex); + + /* re-init hc/hcd interface/endpoint state */ +diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c +index a2ca38e25e0c3..8d134193fa0cf 100644 +--- a/drivers/usb/core/sysfs.c ++++ b/drivers/usb/core/sysfs.c +@@ -889,7 +889,11 @@ read_descriptors(struct file *filp, struct kobject *kobj, + size_t srclen, n; + int cfgno; + void *src; ++ int retval; + ++ retval = usb_lock_device_interruptible(udev); ++ if (retval < 0) ++ return -EINTR; + /* The binary attribute begins with the device descriptor. + * Following that are the raw descriptor entries for all the + * configurations (config plus subsidiary descriptors). +@@ -914,6 +918,7 @@ read_descriptors(struct file *filp, struct kobject *kobj, + off -= srclen; + } + } ++ usb_unlock_device(udev); + return count - nleft; + } + +diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c +index 88b75b5a039c9..1f7f4d88ed9d8 100644 +--- a/drivers/usb/dwc3/dwc3-meson-g12a.c ++++ b/drivers/usb/dwc3/dwc3-meson-g12a.c +@@ -737,13 +737,13 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev) + goto err_disable_clks; + } + +- ret = reset_control_deassert(priv->reset); ++ ret = reset_control_reset(priv->reset); + if (ret) +- goto err_assert_reset; ++ goto err_disable_clks; + + ret = dwc3_meson_g12a_get_phys(priv); + if (ret) +- goto err_assert_reset; ++ goto err_disable_clks; + + ret = priv->drvdata->setup_regmaps(priv, base); + if (ret) +@@ -752,7 +752,7 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev) + if (priv->vbus) { + ret = regulator_enable(priv->vbus); + if (ret) +- goto err_assert_reset; ++ goto err_disable_clks; + } + + /* Get dr_mode */ +@@ -765,13 +765,13 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev) + + ret = priv->drvdata->usb_init(priv); + if (ret) +- goto err_assert_reset; ++ goto err_disable_clks; + + /* Init PHYs */ + for (i = 0 ; i < PHY_COUNT ; ++i) { + ret = phy_init(priv->phys[i]); + if (ret) +- goto err_assert_reset; ++ goto err_disable_clks; + } + + /* Set PHY Power */ +@@ -809,9 +809,6 @@ err_phys_exit: + for (i = 0 ; i < PHY_COUNT ; ++i) + phy_exit(priv->phys[i]); + +-err_assert_reset: +- reset_control_assert(priv->reset); +- + err_disable_clks: + clk_bulk_disable_unprepare(priv->drvdata->num_clks, + priv->drvdata->clks); +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index 33f1cca7eaa61..ae98fe94fe91e 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -713,6 +713,7 @@ static const struct usb_device_id id_table_combined[] = { + { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) }, + { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) }, + { USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) }, ++ { USB_DEVICE(XSENS_VID, XSENS_MTIUSBCONVERTER_PID) }, + { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, + { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h +index e8373528264c3..b5ca17a5967a0 100644 +--- a/drivers/usb/serial/ftdi_sio_ids.h ++++ b/drivers/usb/serial/ftdi_sio_ids.h +@@ -160,6 +160,7 @@ + #define XSENS_AWINDA_DONGLE_PID 0x0102 + #define XSENS_MTW_PID 0x0200 /* Xsens MTw */ + #define XSENS_MTDEVBOARD_PID 0x0300 /* Motion Tracker Development Board */ ++#define XSENS_MTIUSBCONVERTER_PID 0x0301 /* MTi USB converter */ + #define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */ + + /* Xsens devices using FTDI VID */ +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 9b7cee98ea607..f7a6ac05ac57a 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -1094,14 +1094,18 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M), + .driver_info = RSVD(1) | RSVD(3) }, + /* Quectel products using Quectel vendor ID */ +- { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21), +- .driver_info = RSVD(4) }, +- { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25), +- .driver_info = RSVD(4) }, +- { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95), +- .driver_info = RSVD(4) }, +- { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), +- .driver_info = RSVD(4) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff), ++ .driver_info = NUMEP2 }, ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0, 0) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0xff, 0xff), ++ .driver_info = NUMEP2 }, ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0, 0) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff), ++ .driver_info = NUMEP2 }, ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0xff, 0xff), ++ .driver_info = NUMEP2 }, ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), + .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, +@@ -1819,6 +1823,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */ + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff), /* Simcom SIM7500/SIM7600 RNDIS mode */ + .driver_info = RSVD(7) }, ++ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9205, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT+ECM mode */ ++ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9206, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT-only mode */ + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), + .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) }, + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D), +diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c +index 70ddc9d6d49e4..6a17789208779 100644 +--- a/drivers/usb/typec/mux/intel_pmc_mux.c ++++ b/drivers/usb/typec/mux/intel_pmc_mux.c +@@ -56,14 +56,11 @@ enum { + + #define PMC_USB_ALTMODE_ORI_SHIFT 1 + #define PMC_USB_ALTMODE_UFP_SHIFT 3 +-#define PMC_USB_ALTMODE_ORI_AUX_SHIFT 4 +-#define PMC_USB_ALTMODE_ORI_HSL_SHIFT 5 + + /* DP specific Mode Data bits */ + #define PMC_USB_ALTMODE_DP_MODE_SHIFT 8 + + /* TBT specific Mode Data bits */ +-#define PMC_USB_ALTMODE_HPD_HIGH BIT(14) + #define PMC_USB_ALTMODE_TBT_TYPE BIT(17) + #define PMC_USB_ALTMODE_CABLE_TYPE BIT(18) + #define PMC_USB_ALTMODE_ACTIVE_LINK BIT(20) +@@ -174,15 +171,9 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state) + req.mode_data = (port->orientation - 1) << PMC_USB_ALTMODE_ORI_SHIFT; + req.mode_data |= (port->role - 1) << PMC_USB_ALTMODE_UFP_SHIFT; + +- req.mode_data |= sbu_orientation(port) << PMC_USB_ALTMODE_ORI_AUX_SHIFT; +- req.mode_data |= hsl_orientation(port) << PMC_USB_ALTMODE_ORI_HSL_SHIFT; +- + req.mode_data |= (state->mode - TYPEC_STATE_MODAL) << + PMC_USB_ALTMODE_DP_MODE_SHIFT; + +- if (data->status & DP_STATUS_HPD_STATE) +- req.mode_data |= PMC_USB_ALTMODE_HPD_HIGH; +- + ret = pmc_usb_command(port, (void *)&req, sizeof(req)); + if (ret) + return ret; +@@ -207,9 +198,6 @@ pmc_usb_mux_tbt(struct pmc_usb_port *port, struct typec_mux_state *state) + req.mode_data = (port->orientation - 1) << PMC_USB_ALTMODE_ORI_SHIFT; + req.mode_data |= (port->role - 1) << PMC_USB_ALTMODE_UFP_SHIFT; + +- req.mode_data |= sbu_orientation(port) << PMC_USB_ALTMODE_ORI_AUX_SHIFT; +- req.mode_data |= hsl_orientation(port) << PMC_USB_ALTMODE_ORI_HSL_SHIFT; +- + if (TBT_ADAPTER(data->device_mode) == TBT_ADAPTER_TBT3) + req.mode_data |= PMC_USB_ALTMODE_TBT_TYPE; + +@@ -441,6 +429,7 @@ err_remove_ports: + for (i = 0; i < pmc->num_ports; i++) { + typec_switch_unregister(pmc->port[i].typec_sw); + typec_mux_unregister(pmc->port[i].typec_mux); ++ usb_role_switch_unregister(pmc->port[i].usb_sw); + } + + return ret; +@@ -454,6 +443,7 @@ static int pmc_usb_remove(struct platform_device *pdev) + for (i = 0; i < pmc->num_ports; i++) { + typec_switch_unregister(pmc->port[i].typec_sw); + typec_mux_unregister(pmc->port[i].typec_mux); ++ usb_role_switch_unregister(pmc->port[i].usb_sw); + } + + return 0; +diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c +index 9fc4f338e8700..c0aca2f0f23f0 100644 +--- a/drivers/usb/typec/ucsi/ucsi_acpi.c ++++ b/drivers/usb/typec/ucsi/ucsi_acpi.c +@@ -112,11 +112,15 @@ static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data) + + static int ucsi_acpi_probe(struct platform_device *pdev) + { ++ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); + struct ucsi_acpi *ua; + struct resource *res; + acpi_status status; + int ret; + ++ if (adev->dep_unmet) ++ return -EPROBE_DEFER; ++ + ua = devm_kzalloc(&pdev->dev, sizeof(*ua), GFP_KERNEL); + if (!ua) + return -ENOMEM; +diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig +index 5e850cc9f891d..39deb22a41807 100644 +--- a/drivers/video/console/Kconfig ++++ b/drivers/video/console/Kconfig +@@ -22,52 +22,6 @@ config VGA_CONSOLE + + Say Y. + +-config VGACON_SOFT_SCROLLBACK +- bool "Enable Scrollback Buffer in System RAM" +- depends on VGA_CONSOLE +- default n +- help +- The scrollback buffer of the standard VGA console is located in +- the VGA RAM. The size of this RAM is fixed and is quite small. +- If you require a larger scrollback buffer, this can be placed in +- System RAM which is dynamically allocated during initialization. +- Placing the scrollback buffer in System RAM will slightly slow +- down the console. +- +- If you want this feature, say 'Y' here and enter the amount of +- RAM to allocate for this buffer. If unsure, say 'N'. +- +-config VGACON_SOFT_SCROLLBACK_SIZE +- int "Scrollback Buffer Size (in KB)" +- depends on VGACON_SOFT_SCROLLBACK +- range 1 1024 +- default "64" +- help +- Enter the amount of System RAM to allocate for scrollback +- buffers of VGA consoles. Each 64KB will give you approximately +- 16 80x25 screenfuls of scrollback buffer. +- +-config VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT +- bool "Persistent Scrollback History for each console by default" +- depends on VGACON_SOFT_SCROLLBACK +- default n +- help +- Say Y here if the scrollback history should persist by default when +- switching between consoles. Otherwise, the scrollback history will be +- flushed each time the console is switched. This feature can also be +- enabled using the boot command line parameter +- 'vgacon.scrollback_persistent=1'. +- +- This feature might break your tool of choice to flush the scrollback +- buffer, e.g. clear(1) will work fine but Debian's clear_console(1) +- will be broken, which might cause security issues. +- You can use the escape sequence \e[3J instead if this feature is +- activated. +- +- Note that a buffer of VGACON_SOFT_SCROLLBACK_SIZE is taken for each +- created tty device. +- So if you use a RAM-constrained system, say N here. +- + config MDA_CONSOLE + depends on !M68K && !PARISC && ISA + tristate "MDA text console (dual-headed)" +diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c +index e9254b3085a3e..6d0418e88ad71 100644 +--- a/drivers/video/console/vgacon.c ++++ b/drivers/video/console/vgacon.c +@@ -165,214 +165,6 @@ static inline void vga_set_mem_top(struct vc_data *c) + write_vga(12, (c->vc_visible_origin - vga_vram_base) / 2); + } + +-#ifdef CONFIG_VGACON_SOFT_SCROLLBACK +-/* software scrollback */ +-struct vgacon_scrollback_info { +- void *data; +- int tail; +- int size; +- int rows; +- int cnt; +- int cur; +- int save; +- int restore; +-}; +- +-static struct vgacon_scrollback_info *vgacon_scrollback_cur; +-static struct vgacon_scrollback_info vgacon_scrollbacks[MAX_NR_CONSOLES]; +-static bool scrollback_persistent = \ +- IS_ENABLED(CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT); +-module_param_named(scrollback_persistent, scrollback_persistent, bool, 0000); +-MODULE_PARM_DESC(scrollback_persistent, "Enable persistent scrollback for all vga consoles"); +- +-static void vgacon_scrollback_reset(int vc_num, size_t reset_size) +-{ +- struct vgacon_scrollback_info *scrollback = &vgacon_scrollbacks[vc_num]; +- +- if (scrollback->data && reset_size > 0) +- memset(scrollback->data, 0, reset_size); +- +- scrollback->cnt = 0; +- scrollback->tail = 0; +- scrollback->cur = 0; +-} +- +-static void vgacon_scrollback_init(int vc_num) +-{ +- int pitch = vga_video_num_columns * 2; +- size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024; +- int rows = size / pitch; +- void *data; +- +- data = kmalloc_array(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE, 1024, +- GFP_NOWAIT); +- +- vgacon_scrollbacks[vc_num].data = data; +- vgacon_scrollback_cur = &vgacon_scrollbacks[vc_num]; +- +- vgacon_scrollback_cur->rows = rows - 1; +- vgacon_scrollback_cur->size = rows * pitch; +- +- vgacon_scrollback_reset(vc_num, size); +-} +- +-static void vgacon_scrollback_switch(int vc_num) +-{ +- if (!scrollback_persistent) +- vc_num = 0; +- +- if (!vgacon_scrollbacks[vc_num].data) { +- vgacon_scrollback_init(vc_num); +- } else { +- if (scrollback_persistent) { +- vgacon_scrollback_cur = &vgacon_scrollbacks[vc_num]; +- } else { +- size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024; +- +- vgacon_scrollback_reset(vc_num, size); +- } +- } +-} +- +-static void vgacon_scrollback_startup(void) +-{ +- vgacon_scrollback_cur = &vgacon_scrollbacks[0]; +- vgacon_scrollback_init(0); +-} +- +-static void vgacon_scrollback_update(struct vc_data *c, int t, int count) +-{ +- void *p; +- +- if (!vgacon_scrollback_cur->data || !vgacon_scrollback_cur->size || +- c->vc_num != fg_console) +- return; +- +- p = (void *) (c->vc_origin + t * c->vc_size_row); +- +- while (count--) { +- if ((vgacon_scrollback_cur->tail + c->vc_size_row) > +- vgacon_scrollback_cur->size) +- vgacon_scrollback_cur->tail = 0; +- +- scr_memcpyw(vgacon_scrollback_cur->data + +- vgacon_scrollback_cur->tail, +- p, c->vc_size_row); +- +- vgacon_scrollback_cur->cnt++; +- p += c->vc_size_row; +- vgacon_scrollback_cur->tail += c->vc_size_row; +- +- if (vgacon_scrollback_cur->tail >= vgacon_scrollback_cur->size) +- vgacon_scrollback_cur->tail = 0; +- +- if (vgacon_scrollback_cur->cnt > vgacon_scrollback_cur->rows) +- vgacon_scrollback_cur->cnt = vgacon_scrollback_cur->rows; +- +- vgacon_scrollback_cur->cur = vgacon_scrollback_cur->cnt; +- } +-} +- +-static void vgacon_restore_screen(struct vc_data *c) +-{ +- c->vc_origin = c->vc_visible_origin; +- vgacon_scrollback_cur->save = 0; +- +- if (!vga_is_gfx && !vgacon_scrollback_cur->restore) { +- scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf, +- c->vc_screenbuf_size > vga_vram_size ? +- vga_vram_size : c->vc_screenbuf_size); +- vgacon_scrollback_cur->restore = 1; +- vgacon_scrollback_cur->cur = vgacon_scrollback_cur->cnt; +- } +-} +- +-static void vgacon_scrolldelta(struct vc_data *c, int lines) +-{ +- int start, end, count, soff; +- +- if (!lines) { +- vgacon_restore_screen(c); +- return; +- } +- +- if (!vgacon_scrollback_cur->data) +- return; +- +- if (!vgacon_scrollback_cur->save) { +- vgacon_cursor(c, CM_ERASE); +- vgacon_save_screen(c); +- c->vc_origin = (unsigned long)c->vc_screenbuf; +- vgacon_scrollback_cur->save = 1; +- } +- +- vgacon_scrollback_cur->restore = 0; +- start = vgacon_scrollback_cur->cur + lines; +- end = start + abs(lines); +- +- if (start < 0) +- start = 0; +- +- if (start > vgacon_scrollback_cur->cnt) +- start = vgacon_scrollback_cur->cnt; +- +- if (end < 0) +- end = 0; +- +- if (end > vgacon_scrollback_cur->cnt) +- end = vgacon_scrollback_cur->cnt; +- +- vgacon_scrollback_cur->cur = start; +- count = end - start; +- soff = vgacon_scrollback_cur->tail - +- ((vgacon_scrollback_cur->cnt - end) * c->vc_size_row); +- soff -= count * c->vc_size_row; +- +- if (soff < 0) +- soff += vgacon_scrollback_cur->size; +- +- count = vgacon_scrollback_cur->cnt - start; +- +- if (count > c->vc_rows) +- count = c->vc_rows; +- +- if (count) { +- int copysize; +- +- int diff = c->vc_rows - count; +- void *d = (void *) c->vc_visible_origin; +- void *s = (void *) c->vc_screenbuf; +- +- count *= c->vc_size_row; +- /* how much memory to end of buffer left? */ +- copysize = min(count, vgacon_scrollback_cur->size - soff); +- scr_memcpyw(d, vgacon_scrollback_cur->data + soff, copysize); +- d += copysize; +- count -= copysize; +- +- if (count) { +- scr_memcpyw(d, vgacon_scrollback_cur->data, count); +- d += count; +- } +- +- if (diff) +- scr_memcpyw(d, s, diff * c->vc_size_row); +- } else +- vgacon_cursor(c, CM_MOVE); +-} +- +-static void vgacon_flush_scrollback(struct vc_data *c) +-{ +- size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024; +- +- vgacon_scrollback_reset(c->vc_num, size); +-} +-#else +-#define vgacon_scrollback_startup(...) do { } while (0) +-#define vgacon_scrollback_init(...) do { } while (0) +-#define vgacon_scrollback_update(...) do { } while (0) +-#define vgacon_scrollback_switch(...) do { } while (0) +- + static void vgacon_restore_screen(struct vc_data *c) + { + if (c->vc_origin != c->vc_visible_origin) +@@ -386,11 +178,6 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines) + vga_set_mem_top(c); + } + +-static void vgacon_flush_scrollback(struct vc_data *c) +-{ +-} +-#endif /* CONFIG_VGACON_SOFT_SCROLLBACK */ +- + static const char *vgacon_startup(void) + { + const char *display_desc = NULL; +@@ -573,10 +360,7 @@ static const char *vgacon_startup(void) + vgacon_xres = screen_info.orig_video_cols * VGA_FONTWIDTH; + vgacon_yres = vga_scan_lines; + +- if (!vga_init_done) { +- vgacon_scrollback_startup(); +- vga_init_done = true; +- } ++ vga_init_done = true; + + return display_desc; + } +@@ -867,7 +651,6 @@ static int vgacon_switch(struct vc_data *c) + vgacon_doresize(c, c->vc_cols, c->vc_rows); + } + +- vgacon_scrollback_switch(c->vc_num); + return 0; /* Redrawing not needed */ + } + +@@ -1384,7 +1167,6 @@ static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b, + oldo = c->vc_origin; + delta = lines * c->vc_size_row; + if (dir == SM_UP) { +- vgacon_scrollback_update(c, t, lines); + if (c->vc_scr_end + delta >= vga_vram_end) { + scr_memcpyw((u16 *) vga_vram_base, + (u16 *) (oldo + delta), +@@ -1448,7 +1230,6 @@ const struct consw vga_con = { + .con_save_screen = vgacon_save_screen, + .con_build_attr = vgacon_build_attr, + .con_invert_region = vgacon_invert_region, +- .con_flush_scrollback = vgacon_flush_scrollback, + }; + EXPORT_SYMBOL(vga_con); + +diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c +index 35ebeeccde4df..436365efae731 100644 +--- a/drivers/video/fbdev/core/bitblit.c ++++ b/drivers/video/fbdev/core/bitblit.c +@@ -234,7 +234,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, + } + + static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode, +- int softback_lines, int fg, int bg) ++ int fg, int bg) + { + struct fb_cursor cursor; + struct fbcon_ops *ops = info->fbcon_par; +@@ -247,15 +247,6 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode, + + cursor.set = 0; + +- if (softback_lines) { +- if (y + softback_lines >= vc->vc_rows) { +- mode = CM_ERASE; +- ops->cursor_flash = 0; +- return; +- } else +- y += softback_lines; +- } +- + c = scr_readw((u16 *) vc->vc_pos); + attribute = get_attribute(info, c); + src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height)); +diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c +index fbf10e62bcde9..b36bfe10c712c 100644 +--- a/drivers/video/fbdev/core/fbcon.c ++++ b/drivers/video/fbdev/core/fbcon.c +@@ -122,12 +122,6 @@ static int logo_lines; + /* logo_shown is an index to vc_cons when >= 0; otherwise follows FBCON_LOGO + enums. */ + static int logo_shown = FBCON_LOGO_CANSHOW; +-/* Software scrollback */ +-static int fbcon_softback_size = 32768; +-static unsigned long softback_buf, softback_curr; +-static unsigned long softback_in; +-static unsigned long softback_top, softback_end; +-static int softback_lines; + /* console mappings */ + static int first_fb_vc; + static int last_fb_vc = MAX_NR_CONSOLES - 1; +@@ -167,8 +161,6 @@ static int margin_color; + + static const struct consw fb_con; + +-#define CM_SOFTBACK (8) +- + #define advance_row(p, delta) (unsigned short *)((unsigned long)(p) + (delta) * vc->vc_size_row) + + static int fbcon_set_origin(struct vc_data *); +@@ -373,18 +365,6 @@ static int get_color(struct vc_data *vc, struct fb_info *info, + return color; + } + +-static void fbcon_update_softback(struct vc_data *vc) +-{ +- int l = fbcon_softback_size / vc->vc_size_row; +- +- if (l > 5) +- softback_end = softback_buf + l * vc->vc_size_row; +- else +- /* Smaller scrollback makes no sense, and 0 would screw +- the operation totally */ +- softback_top = 0; +-} +- + static void fb_flashcursor(struct work_struct *work) + { + struct fb_info *info = container_of(work, struct fb_info, queue); +@@ -414,7 +394,7 @@ static void fb_flashcursor(struct work_struct *work) + c = scr_readw((u16 *) vc->vc_pos); + mode = (!ops->cursor_flash || ops->cursor_state.enable) ? + CM_ERASE : CM_DRAW; +- ops->cursor(vc, info, mode, softback_lines, get_color(vc, info, c, 1), ++ ops->cursor(vc, info, mode, get_color(vc, info, c, 1), + get_color(vc, info, c, 0)); + console_unlock(); + } +@@ -471,13 +451,7 @@ static int __init fb_console_setup(char *this_opt) + } + + if (!strncmp(options, "scrollback:", 11)) { +- options += 11; +- if (*options) { +- fbcon_softback_size = simple_strtoul(options, &options, 0); +- if (*options == 'k' || *options == 'K') { +- fbcon_softback_size *= 1024; +- } +- } ++ pr_warn("Ignoring scrollback size option\n"); + continue; + } + +@@ -1022,31 +996,6 @@ static const char *fbcon_startup(void) + + set_blitting_type(vc, info); + +- if (info->fix.type != FB_TYPE_TEXT) { +- if (fbcon_softback_size) { +- if (!softback_buf) { +- softback_buf = +- (unsigned long) +- kvmalloc(fbcon_softback_size, +- GFP_KERNEL); +- if (!softback_buf) { +- fbcon_softback_size = 0; +- softback_top = 0; +- } +- } +- } else { +- if (softback_buf) { +- kvfree((void *) softback_buf); +- softback_buf = 0; +- softback_top = 0; +- } +- } +- if (softback_buf) +- softback_in = softback_top = softback_curr = +- softback_buf; +- softback_lines = 0; +- } +- + /* Setup default font */ + if (!p->fontdata && !vc->vc_font.data) { + if (!fontname[0] || !(font = find_font(fontname))) +@@ -1220,9 +1169,6 @@ static void fbcon_init(struct vc_data *vc, int init) + if (logo) + fbcon_prepare_logo(vc, info, cols, rows, new_cols, new_rows); + +- if (vc == svc && softback_buf) +- fbcon_update_softback(vc); +- + if (ops->rotate_font && ops->rotate_font(info, vc)) { + ops->rotate = FB_ROTATE_UR; + set_blitting_type(vc, info); +@@ -1385,7 +1331,6 @@ static void fbcon_cursor(struct vc_data *vc, int mode) + { + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_ops *ops = info->fbcon_par; +- int y; + int c = scr_readw((u16 *) vc->vc_pos); + + ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); +@@ -1399,16 +1344,8 @@ static void fbcon_cursor(struct vc_data *vc, int mode) + fbcon_add_cursor_timer(info); + + ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1; +- if (mode & CM_SOFTBACK) { +- mode &= ~CM_SOFTBACK; +- y = softback_lines; +- } else { +- if (softback_lines) +- fbcon_set_origin(vc); +- y = 0; +- } + +- ops->cursor(vc, info, mode, y, get_color(vc, info, c, 1), ++ ops->cursor(vc, info, mode, get_color(vc, info, c, 1), + get_color(vc, info, c, 0)); + } + +@@ -1479,8 +1416,6 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, + + if (con_is_visible(vc)) { + update_screen(vc); +- if (softback_buf) +- fbcon_update_softback(vc); + } + } + +@@ -1618,99 +1553,6 @@ static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count) + scrollback_current = 0; + } + +-static void fbcon_redraw_softback(struct vc_data *vc, struct fbcon_display *p, +- long delta) +-{ +- int count = vc->vc_rows; +- unsigned short *d, *s; +- unsigned long n; +- int line = 0; +- +- d = (u16 *) softback_curr; +- if (d == (u16 *) softback_in) +- d = (u16 *) vc->vc_origin; +- n = softback_curr + delta * vc->vc_size_row; +- softback_lines -= delta; +- if (delta < 0) { +- if (softback_curr < softback_top && n < softback_buf) { +- n += softback_end - softback_buf; +- if (n < softback_top) { +- softback_lines -= +- (softback_top - n) / vc->vc_size_row; +- n = softback_top; +- } +- } else if (softback_curr >= softback_top +- && n < softback_top) { +- softback_lines -= +- (softback_top - n) / vc->vc_size_row; +- n = softback_top; +- } +- } else { +- if (softback_curr > softback_in && n >= softback_end) { +- n += softback_buf - softback_end; +- if (n > softback_in) { +- n = softback_in; +- softback_lines = 0; +- } +- } else if (softback_curr <= softback_in && n > softback_in) { +- n = softback_in; +- softback_lines = 0; +- } +- } +- if (n == softback_curr) +- return; +- softback_curr = n; +- s = (u16 *) softback_curr; +- if (s == (u16 *) softback_in) +- s = (u16 *) vc->vc_origin; +- while (count--) { +- unsigned short *start; +- unsigned short *le; +- unsigned short c; +- int x = 0; +- unsigned short attr = 1; +- +- start = s; +- le = advance_row(s, 1); +- do { +- c = scr_readw(s); +- if (attr != (c & 0xff00)) { +- attr = c & 0xff00; +- if (s > start) { +- fbcon_putcs(vc, start, s - start, +- line, x); +- x += s - start; +- start = s; +- } +- } +- if (c == scr_readw(d)) { +- if (s > start) { +- fbcon_putcs(vc, start, s - start, +- line, x); +- x += s - start + 1; +- start = s + 1; +- } else { +- x++; +- start++; +- } +- } +- s++; +- d++; +- } while (s < le); +- if (s > start) +- fbcon_putcs(vc, start, s - start, line, x); +- line++; +- if (d == (u16 *) softback_end) +- d = (u16 *) softback_buf; +- if (d == (u16 *) softback_in) +- d = (u16 *) vc->vc_origin; +- if (s == (u16 *) softback_end) +- s = (u16 *) softback_buf; +- if (s == (u16 *) softback_in) +- s = (u16 *) vc->vc_origin; +- } +-} +- + static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p, + int line, int count, int dy) + { +@@ -1850,31 +1692,6 @@ static void fbcon_redraw(struct vc_data *vc, struct fbcon_display *p, + } + } + +-static inline void fbcon_softback_note(struct vc_data *vc, int t, +- int count) +-{ +- unsigned short *p; +- +- if (vc->vc_num != fg_console) +- return; +- p = (unsigned short *) (vc->vc_origin + t * vc->vc_size_row); +- +- while (count) { +- scr_memcpyw((u16 *) softback_in, p, vc->vc_size_row); +- count--; +- p = advance_row(p, 1); +- softback_in += vc->vc_size_row; +- if (softback_in == softback_end) +- softback_in = softback_buf; +- if (softback_in == softback_top) { +- softback_top += vc->vc_size_row; +- if (softback_top == softback_end) +- softback_top = softback_buf; +- } +- } +- softback_curr = softback_in; +-} +- + static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, + enum con_scroll dir, unsigned int count) + { +@@ -1897,8 +1714,6 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, + case SM_UP: + if (count > vc->vc_rows) /* Maximum realistic size */ + count = vc->vc_rows; +- if (softback_top) +- fbcon_softback_note(vc, t, count); + if (logo_shown >= 0) + goto redraw_up; + switch (p->scrollmode) { +@@ -2269,14 +2084,6 @@ static int fbcon_switch(struct vc_data *vc) + info = registered_fb[con2fb_map[vc->vc_num]]; + ops = info->fbcon_par; + +- if (softback_top) { +- if (softback_lines) +- fbcon_set_origin(vc); +- softback_top = softback_curr = softback_in = softback_buf; +- softback_lines = 0; +- fbcon_update_softback(vc); +- } +- + if (logo_shown >= 0) { + struct vc_data *conp2 = vc_cons[logo_shown].d; + +@@ -2600,9 +2407,6 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, + int cnt; + char *old_data = NULL; + +- if (con_is_visible(vc) && softback_lines) +- fbcon_set_origin(vc); +- + resize = (w != vc->vc_font.width) || (h != vc->vc_font.height); + if (p->userfont) + old_data = vc->vc_font.data; +@@ -2628,8 +2432,6 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, + cols /= w; + rows /= h; + vc_resize(vc, cols, rows); +- if (con_is_visible(vc) && softback_buf) +- fbcon_update_softback(vc); + } else if (con_is_visible(vc) + && vc->vc_mode == KD_TEXT) { + fbcon_clear_margins(vc, 0); +@@ -2788,19 +2590,7 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table) + + static u16 *fbcon_screen_pos(struct vc_data *vc, int offset) + { +- unsigned long p; +- int line; +- +- if (vc->vc_num != fg_console || !softback_lines) +- return (u16 *) (vc->vc_origin + offset); +- line = offset / vc->vc_size_row; +- if (line >= softback_lines) +- return (u16 *) (vc->vc_origin + offset - +- softback_lines * vc->vc_size_row); +- p = softback_curr + offset; +- if (p >= softback_end) +- p += softback_buf - softback_end; +- return (u16 *) p; ++ return (u16 *) (vc->vc_origin + offset); + } + + static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos, +@@ -2814,22 +2604,7 @@ static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos, + + x = offset % vc->vc_cols; + y = offset / vc->vc_cols; +- if (vc->vc_num == fg_console) +- y += softback_lines; + ret = pos + (vc->vc_cols - x) * 2; +- } else if (vc->vc_num == fg_console && softback_lines) { +- unsigned long offset = pos - softback_curr; +- +- if (pos < softback_curr) +- offset += softback_end - softback_buf; +- offset /= 2; +- x = offset % vc->vc_cols; +- y = offset / vc->vc_cols; +- ret = pos + (vc->vc_cols - x) * 2; +- if (ret == softback_end) +- ret = softback_buf; +- if (ret == softback_in) +- ret = vc->vc_origin; + } else { + /* Should not happen */ + x = y = 0; +@@ -2857,106 +2632,11 @@ static void fbcon_invert_region(struct vc_data *vc, u16 * p, int cnt) + a = ((a) & 0x88ff) | (((a) & 0x7000) >> 4) | + (((a) & 0x0700) << 4); + scr_writew(a, p++); +- if (p == (u16 *) softback_end) +- p = (u16 *) softback_buf; +- if (p == (u16 *) softback_in) +- p = (u16 *) vc->vc_origin; +- } +-} +- +-static void fbcon_scrolldelta(struct vc_data *vc, int lines) +-{ +- struct fb_info *info = registered_fb[con2fb_map[fg_console]]; +- struct fbcon_ops *ops = info->fbcon_par; +- struct fbcon_display *disp = &fb_display[fg_console]; +- int offset, limit, scrollback_old; +- +- if (softback_top) { +- if (vc->vc_num != fg_console) +- return; +- if (vc->vc_mode != KD_TEXT || !lines) +- return; +- if (logo_shown >= 0) { +- struct vc_data *conp2 = vc_cons[logo_shown].d; +- +- if (conp2->vc_top == logo_lines +- && conp2->vc_bottom == conp2->vc_rows) +- conp2->vc_top = 0; +- if (logo_shown == vc->vc_num) { +- unsigned long p, q; +- int i; +- +- p = softback_in; +- q = vc->vc_origin + +- logo_lines * vc->vc_size_row; +- for (i = 0; i < logo_lines; i++) { +- if (p == softback_top) +- break; +- if (p == softback_buf) +- p = softback_end; +- p -= vc->vc_size_row; +- q -= vc->vc_size_row; +- scr_memcpyw((u16 *) q, (u16 *) p, +- vc->vc_size_row); +- } +- softback_in = softback_curr = p; +- update_region(vc, vc->vc_origin, +- logo_lines * vc->vc_cols); +- } +- logo_shown = FBCON_LOGO_CANSHOW; +- } +- fbcon_cursor(vc, CM_ERASE | CM_SOFTBACK); +- fbcon_redraw_softback(vc, disp, lines); +- fbcon_cursor(vc, CM_DRAW | CM_SOFTBACK); +- return; + } +- +- if (!scrollback_phys_max) +- return; +- +- scrollback_old = scrollback_current; +- scrollback_current -= lines; +- if (scrollback_current < 0) +- scrollback_current = 0; +- else if (scrollback_current > scrollback_max) +- scrollback_current = scrollback_max; +- if (scrollback_current == scrollback_old) +- return; +- +- if (fbcon_is_inactive(vc, info)) +- return; +- +- fbcon_cursor(vc, CM_ERASE); +- +- offset = disp->yscroll - scrollback_current; +- limit = disp->vrows; +- switch (disp->scrollmode) { +- case SCROLL_WRAP_MOVE: +- info->var.vmode |= FB_VMODE_YWRAP; +- break; +- case SCROLL_PAN_MOVE: +- case SCROLL_PAN_REDRAW: +- limit -= vc->vc_rows; +- info->var.vmode &= ~FB_VMODE_YWRAP; +- break; +- } +- if (offset < 0) +- offset += limit; +- else if (offset >= limit) +- offset -= limit; +- +- ops->var.xoffset = 0; +- ops->var.yoffset = offset * vc->vc_font.height; +- ops->update_start(info); +- +- if (!scrollback_current) +- fbcon_cursor(vc, CM_DRAW); + } + + static int fbcon_set_origin(struct vc_data *vc) + { +- if (softback_lines) +- fbcon_scrolldelta(vc, softback_lines); + return 0; + } + +@@ -3020,8 +2700,6 @@ static void fbcon_modechanged(struct fb_info *info) + + fbcon_set_palette(vc, color_table); + update_screen(vc); +- if (softback_buf) +- fbcon_update_softback(vc); + } + } + +@@ -3432,7 +3110,6 @@ static const struct consw fb_con = { + .con_font_default = fbcon_set_def_font, + .con_font_copy = fbcon_copy_font, + .con_set_palette = fbcon_set_palette, +- .con_scrolldelta = fbcon_scrolldelta, + .con_set_origin = fbcon_set_origin, + .con_invert_region = fbcon_invert_region, + .con_screen_pos = fbcon_screen_pos, +@@ -3667,9 +3344,6 @@ static void fbcon_exit(void) + } + #endif + +- kvfree((void *)softback_buf); +- softback_buf = 0UL; +- + for_each_registered_fb(i) { + int pending = 0; + +diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h +index 20dea853765f5..78bb14c03643e 100644 +--- a/drivers/video/fbdev/core/fbcon.h ++++ b/drivers/video/fbdev/core/fbcon.h +@@ -62,7 +62,7 @@ struct fbcon_ops { + void (*clear_margins)(struct vc_data *vc, struct fb_info *info, + int color, int bottom_only); + void (*cursor)(struct vc_data *vc, struct fb_info *info, int mode, +- int softback_lines, int fg, int bg); ++ int fg, int bg); + int (*update_start)(struct fb_info *info); + int (*rotate_font)(struct fb_info *info, struct vc_data *vc); + struct fb_var_screeninfo var; /* copy of the current fb_var_screeninfo */ +diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c +index 78f3a56214782..71ad6967a70ee 100644 +--- a/drivers/video/fbdev/core/fbcon_ccw.c ++++ b/drivers/video/fbdev/core/fbcon_ccw.c +@@ -219,7 +219,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, + } + + static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, +- int softback_lines, int fg, int bg) ++ int fg, int bg) + { + struct fb_cursor cursor; + struct fbcon_ops *ops = info->fbcon_par; +@@ -236,15 +236,6 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, + + cursor.set = 0; + +- if (softback_lines) { +- if (y + softback_lines >= vc->vc_rows) { +- mode = CM_ERASE; +- ops->cursor_flash = 0; +- return; +- } else +- y += softback_lines; +- } +- + c = scr_readw((u16 *) vc->vc_pos); + attribute = get_attribute(info, c); + src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); +diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c +index fd098ff17574b..31fe5dd651d44 100644 +--- a/drivers/video/fbdev/core/fbcon_cw.c ++++ b/drivers/video/fbdev/core/fbcon_cw.c +@@ -202,7 +202,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, + } + + static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, +- int softback_lines, int fg, int bg) ++ int fg, int bg) + { + struct fb_cursor cursor; + struct fbcon_ops *ops = info->fbcon_par; +@@ -219,15 +219,6 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, + + cursor.set = 0; + +- if (softback_lines) { +- if (y + softback_lines >= vc->vc_rows) { +- mode = CM_ERASE; +- ops->cursor_flash = 0; +- return; +- } else +- y += softback_lines; +- } +- + c = scr_readw((u16 *) vc->vc_pos); + attribute = get_attribute(info, c); + src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); +diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c +index e165a3fad29ad..b2dd1370e39b2 100644 +--- a/drivers/video/fbdev/core/fbcon_ud.c ++++ b/drivers/video/fbdev/core/fbcon_ud.c +@@ -249,7 +249,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, + } + + static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, +- int softback_lines, int fg, int bg) ++ int fg, int bg) + { + struct fb_cursor cursor; + struct fbcon_ops *ops = info->fbcon_par; +@@ -267,15 +267,6 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, + + cursor.set = 0; + +- if (softback_lines) { +- if (y + softback_lines >= vc->vc_rows) { +- mode = CM_ERASE; +- ops->cursor_flash = 0; +- return; +- } else +- y += softback_lines; +- } +- + c = scr_readw((u16 *) vc->vc_pos); + attribute = get_attribute(info, c); + src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.height)); +diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c +index 93390312957ff..eb664dbf96f66 100644 +--- a/drivers/video/fbdev/core/tileblit.c ++++ b/drivers/video/fbdev/core/tileblit.c +@@ -80,7 +80,7 @@ static void tile_clear_margins(struct vc_data *vc, struct fb_info *info, + } + + static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode, +- int softback_lines, int fg, int bg) ++ int fg, int bg) + { + struct fb_tilecursor cursor; + int use_sw = (vc->vc_cursor_type & 0x10); +diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c +index a20eeb8308ffd..578d3541e3d6f 100644 +--- a/drivers/video/fbdev/vga16fb.c ++++ b/drivers/video/fbdev/vga16fb.c +@@ -1121,7 +1121,7 @@ static void vga_8planes_imageblit(struct fb_info *info, const struct fb_image *i + char oldop = setop(0); + char oldsr = setsr(0); + char oldmask = selectmask(); +- const char *cdat = image->data; ++ const unsigned char *cdat = image->data; + u32 dx = image->dx; + char __iomem *where; + int y; +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index 983f4d58ae59b..6d3ed9542b6c1 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -3446,6 +3446,8 @@ fail_block_groups: + btrfs_put_block_group_cache(fs_info); + + fail_tree_roots: ++ if (fs_info->data_reloc_root) ++ btrfs_drop_and_free_fs_root(fs_info, fs_info->data_reloc_root); + free_root_pointers(fs_info, true); + invalidate_inode_pages2(fs_info->btree_inode->i_mapping); + +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index e9eedc053fc52..780b9c9a98fe3 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -400,12 +400,11 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, + if (type == BTRFS_SHARED_BLOCK_REF_KEY) { + ASSERT(eb->fs_info); + /* +- * Every shared one has parent tree +- * block, which must be aligned to +- * nodesize. ++ * Every shared one has parent tree block, ++ * which must be aligned to sector size. + */ + if (offset && +- IS_ALIGNED(offset, eb->fs_info->nodesize)) ++ IS_ALIGNED(offset, eb->fs_info->sectorsize)) + return type; + } + } else if (is_data == BTRFS_REF_TYPE_DATA) { +@@ -414,12 +413,11 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, + if (type == BTRFS_SHARED_DATA_REF_KEY) { + ASSERT(eb->fs_info); + /* +- * Every shared one has parent tree +- * block, which must be aligned to +- * nodesize. ++ * Every shared one has parent tree block, ++ * which must be aligned to sector size. + */ + if (offset && +- IS_ALIGNED(offset, eb->fs_info->nodesize)) ++ IS_ALIGNED(offset, eb->fs_info->sectorsize)) + return type; + } + } else { +@@ -429,8 +427,9 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, + } + + btrfs_print_leaf((struct extent_buffer *)eb); +- btrfs_err(eb->fs_info, "eb %llu invalid extent inline ref type %d", +- eb->start, type); ++ btrfs_err(eb->fs_info, ++ "eb %llu iref 0x%lx invalid extent inline ref type %d", ++ eb->start, (unsigned long)iref, type); + WARN_ON(1); + + return BTRFS_REF_TYPE_INVALID; +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c +index 5cbebf32082ab..2dc7707d4e600 100644 +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -2193,7 +2193,8 @@ static noinline int search_ioctl(struct inode *inode, + key.offset = sk->min_offset; + + while (1) { +- ret = fault_in_pages_writeable(ubuf, *buf_size - sk_offset); ++ ret = fault_in_pages_writeable(ubuf + sk_offset, ++ *buf_size - sk_offset); + if (ret) + break; + +diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c +index 61f44e78e3c9e..80567c11ec122 100644 +--- a/fs/btrfs/print-tree.c ++++ b/fs/btrfs/print-tree.c +@@ -95,9 +95,10 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type) + * offset is supposed to be a tree block which + * must be aligned to nodesize. + */ +- if (!IS_ALIGNED(offset, eb->fs_info->nodesize)) +- pr_info("\t\t\t(parent %llu is NOT ALIGNED to nodesize %llu)\n", +- offset, (unsigned long long)eb->fs_info->nodesize); ++ if (!IS_ALIGNED(offset, eb->fs_info->sectorsize)) ++ pr_info( ++ "\t\t\t(parent %llu not aligned to sectorsize %u)\n", ++ offset, eb->fs_info->sectorsize); + break; + case BTRFS_EXTENT_DATA_REF_KEY: + dref = (struct btrfs_extent_data_ref *)(&iref->offset); +@@ -112,8 +113,9 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type) + * must be aligned to nodesize. + */ + if (!IS_ALIGNED(offset, eb->fs_info->nodesize)) +- pr_info("\t\t\t(parent %llu is NOT ALIGNED to nodesize %llu)\n", +- offset, (unsigned long long)eb->fs_info->nodesize); ++ pr_info( ++ "\t\t\t(parent %llu not aligned to sectorsize %u)\n", ++ offset, eb->fs_info->sectorsize); + break; + default: + pr_cont("(extent %llu has INVALID ref type %d)\n", +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c +index 2710f8ddb95fb..b43ebf55b93e1 100644 +--- a/fs/btrfs/transaction.c ++++ b/fs/btrfs/transaction.c +@@ -1636,6 +1636,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, + pending->snap = btrfs_get_new_fs_root(fs_info, objectid, pending->anon_dev); + if (IS_ERR(pending->snap)) { + ret = PTR_ERR(pending->snap); ++ pending->snap = NULL; + btrfs_abort_transaction(trans, ret); + goto fail; + } +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c +index 0e50b885d3fd6..956eb0d6bc584 100644 +--- a/fs/btrfs/volumes.c ++++ b/fs/btrfs/volumes.c +@@ -4,6 +4,7 @@ + */ + + #include <linux/sched.h> ++#include <linux/sched/mm.h> + #include <linux/bio.h> + #include <linux/slab.h> + #include <linux/blkdev.h> +@@ -6500,8 +6501,17 @@ static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, + u64 devid, u8 *dev_uuid) + { + struct btrfs_device *device; ++ unsigned int nofs_flag; + ++ /* ++ * We call this under the chunk_mutex, so we want to use NOFS for this ++ * allocation, however we don't want to change btrfs_alloc_device() to ++ * always do NOFS because we use it in a lot of other GFP_KERNEL safe ++ * places. ++ */ ++ nofs_flag = memalloc_nofs_save(); + device = btrfs_alloc_device(NULL, &devid, dev_uuid); ++ memalloc_nofs_restore(nofs_flag); + if (IS_ERR(device)) + return device; + +diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c +index ae49a55bda001..1862331f1b48d 100644 +--- a/fs/debugfs/file.c ++++ b/fs/debugfs/file.c +@@ -177,7 +177,7 @@ static int open_proxy_open(struct inode *inode, struct file *filp) + goto out; + + if (!fops_get(real_fops)) { +-#ifdef MODULE ++#ifdef CONFIG_MODULES + if (real_fops->owner && + real_fops->owner->state == MODULE_STATE_GOING) + goto out; +@@ -312,7 +312,7 @@ static int full_proxy_open(struct inode *inode, struct file *filp) + goto out; + + if (!fops_get(real_fops)) { +-#ifdef MODULE ++#ifdef CONFIG_MODULES + if (real_fops->owner && + real_fops->owner->state == MODULE_STATE_GOING) + goto out; +diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c +index 4eb2ecd31b0d2..9bafe50a21240 100644 +--- a/fs/xfs/libxfs/xfs_attr_leaf.c ++++ b/fs/xfs/libxfs/xfs_attr_leaf.c +@@ -653,8 +653,8 @@ xfs_attr_shortform_create( + ASSERT(ifp->if_flags & XFS_IFINLINE); + } + xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK); +- hdr = (xfs_attr_sf_hdr_t *)ifp->if_u1.if_data; +- hdr->count = 0; ++ hdr = (struct xfs_attr_sf_hdr *)ifp->if_u1.if_data; ++ memset(hdr, 0, sizeof(*hdr)); + hdr->totsize = cpu_to_be16(sizeof(*hdr)); + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); + } +diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c +index 7fcf62b324b0d..8c1a7cc484b65 100644 +--- a/fs/xfs/libxfs/xfs_ialloc.c ++++ b/fs/xfs/libxfs/xfs_ialloc.c +@@ -688,7 +688,7 @@ xfs_ialloc_ag_alloc( + args.minalignslop = igeo->cluster_align - 1; + + /* Allow space for the inode btree to split. */ +- args.minleft = igeo->inobt_maxlevels - 1; ++ args.minleft = igeo->inobt_maxlevels; + if ((error = xfs_alloc_vextent(&args))) + return error; + +@@ -736,7 +736,7 @@ xfs_ialloc_ag_alloc( + /* + * Allow space for the inode btree to split. + */ +- args.minleft = igeo->inobt_maxlevels - 1; ++ args.minleft = igeo->inobt_maxlevels; + if ((error = xfs_alloc_vextent(&args))) + return error; + } +diff --git a/fs/xfs/libxfs/xfs_trans_space.h b/fs/xfs/libxfs/xfs_trans_space.h +index c6df01a2a1585..7ad3659c5d2a9 100644 +--- a/fs/xfs/libxfs/xfs_trans_space.h ++++ b/fs/xfs/libxfs/xfs_trans_space.h +@@ -58,7 +58,7 @@ + #define XFS_IALLOC_SPACE_RES(mp) \ + (M_IGEO(mp)->ialloc_blks + \ + ((xfs_sb_version_hasfinobt(&mp->m_sb) ? 2 : 1) * \ +- (M_IGEO(mp)->inobt_maxlevels - 1))) ++ M_IGEO(mp)->inobt_maxlevels)) + + /* + * Space reservation values for various transactions. +diff --git a/include/linux/efi_embedded_fw.h b/include/linux/efi_embedded_fw.h +index 57eac5241303a..a97a12bb2c9ef 100644 +--- a/include/linux/efi_embedded_fw.h ++++ b/include/linux/efi_embedded_fw.h +@@ -8,8 +8,8 @@ + #define EFI_EMBEDDED_FW_PREFIX_LEN 8 + + /* +- * This struct and efi_embedded_fw_list are private to the efi-embedded fw +- * implementation they are in this header for use by lib/test_firmware.c only! ++ * This struct is private to the efi-embedded fw implementation. ++ * They are in this header for use by lib/test_firmware.c only! + */ + struct efi_embedded_fw { + struct list_head list; +@@ -18,8 +18,6 @@ struct efi_embedded_fw { + size_t length; + }; + +-extern struct list_head efi_embedded_fw_list; +- + /** + * struct efi_embedded_fw_desc - This struct is used by the EFI embedded-fw + * code to search for embedded firmwares. +diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h +index 9a33f171aa822..625f491b95de8 100644 +--- a/include/linux/netfilter/nf_conntrack_sctp.h ++++ b/include/linux/netfilter/nf_conntrack_sctp.h +@@ -9,6 +9,8 @@ struct ip_ct_sctp { + enum sctp_conntrack state; + + __be32 vtag[IP_CT_DIR_MAX]; ++ u8 last_dir; ++ u8 flags; + }; + + #endif /* _NF_CONNTRACK_SCTP_H */ +diff --git a/include/soc/nps/common.h b/include/soc/nps/common.h +index 9b1d43d671a3f..8c18dc6d3fde5 100644 +--- a/include/soc/nps/common.h ++++ b/include/soc/nps/common.h +@@ -45,6 +45,12 @@ + #define CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST 0x5B60 + #define CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM 0x00010422 + ++#ifndef AUX_IENABLE ++#define AUX_IENABLE 0x40c ++#endif ++ ++#define CTOP_AUX_IACK (0xFFFFF800 + 0x088) ++ + #ifndef __ASSEMBLY__ + + /* In order to increase compilation test coverage */ +diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c +index 908fdf5098c32..53c67c87f141b 100644 +--- a/kernel/gcov/gcc_4_7.c ++++ b/kernel/gcov/gcc_4_7.c +@@ -19,7 +19,9 @@ + #include <linux/vmalloc.h> + #include "gcov.h" + +-#if (__GNUC__ >= 7) ++#if (__GNUC__ >= 10) ++#define GCOV_COUNTERS 8 ++#elif (__GNUC__ >= 7) + #define GCOV_COUNTERS 9 + #elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1) + #define GCOV_COUNTERS 10 +diff --git a/kernel/padata.c b/kernel/padata.c +index 4373f7adaa40a..3bc90fec0904c 100644 +--- a/kernel/padata.c ++++ b/kernel/padata.c +@@ -215,12 +215,13 @@ int padata_do_parallel(struct padata_shell *ps, + padata->pd = pd; + padata->cb_cpu = *cb_cpu; + +- rcu_read_unlock_bh(); +- + spin_lock(&padata_works_lock); + padata->seq_nr = ++pd->seq_nr; + pw = padata_work_alloc(); + spin_unlock(&padata_works_lock); ++ ++ rcu_read_unlock_bh(); ++ + if (pw) { + padata_work_init(pw, padata_parallel_worker, padata, 0); + queue_work(pinst->parallel_wq, &pw->pw_work); +diff --git a/kernel/seccomp.c b/kernel/seccomp.c +index c461ba9925136..54cf84bac3c9b 100644 +--- a/kernel/seccomp.c ++++ b/kernel/seccomp.c +@@ -997,13 +997,12 @@ out: + } + + #ifdef CONFIG_SECCOMP_FILTER +-static int seccomp_notify_release(struct inode *inode, struct file *file) ++static void seccomp_notify_detach(struct seccomp_filter *filter) + { +- struct seccomp_filter *filter = file->private_data; + struct seccomp_knotif *knotif; + + if (!filter) +- return 0; ++ return; + + mutex_lock(&filter->notify_lock); + +@@ -1025,6 +1024,13 @@ static int seccomp_notify_release(struct inode *inode, struct file *file) + kfree(filter->notif); + filter->notif = NULL; + mutex_unlock(&filter->notify_lock); ++} ++ ++static int seccomp_notify_release(struct inode *inode, struct file *file) ++{ ++ struct seccomp_filter *filter = file->private_data; ++ ++ seccomp_notify_detach(filter); + __put_seccomp_filter(filter); + return 0; + } +@@ -1358,6 +1364,7 @@ out_put_fd: + listener_f->private_data = NULL; + fput(listener_f); + put_unused_fd(listener); ++ seccomp_notify_detach(prepared); + } else { + fd_install(listener, listener_f); + ret = listener; +diff --git a/lib/kobject.c b/lib/kobject.c +index 3afb939f2a1cc..9dce68c378e61 100644 +--- a/lib/kobject.c ++++ b/lib/kobject.c +@@ -637,8 +637,12 @@ static void __kobject_del(struct kobject *kobj) + */ + void kobject_del(struct kobject *kobj) + { +- struct kobject *parent = kobj->parent; ++ struct kobject *parent; ++ ++ if (!kobj) ++ return; + ++ parent = kobj->parent; + __kobject_del(kobj); + kobject_put(parent); + } +diff --git a/lib/test_firmware.c b/lib/test_firmware.c +index 9fee2b93a8d18..06c9550577564 100644 +--- a/lib/test_firmware.c ++++ b/lib/test_firmware.c +@@ -26,6 +26,8 @@ + #include <linux/vmalloc.h> + #include <linux/efi_embedded_fw.h> + ++MODULE_IMPORT_NS(TEST_FIRMWARE); ++ + #define TEST_FIRMWARE_NAME "test-firmware.bin" + #define TEST_FIRMWARE_NUM_REQS 4 + #define TEST_FIRMWARE_BUF_SIZE SZ_1K +@@ -489,6 +491,9 @@ out: + static DEVICE_ATTR_WO(trigger_request); + + #ifdef CONFIG_EFI_EMBEDDED_FIRMWARE ++extern struct list_head efi_embedded_fw_list; ++extern bool efi_embedded_fw_checked; ++ + static ssize_t trigger_request_platform_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +@@ -501,6 +506,7 @@ static ssize_t trigger_request_platform_store(struct device *dev, + }; + struct efi_embedded_fw efi_embedded_fw; + const struct firmware *firmware = NULL; ++ bool saved_efi_embedded_fw_checked; + char *name; + int rc; + +@@ -513,6 +519,8 @@ static ssize_t trigger_request_platform_store(struct device *dev, + efi_embedded_fw.data = (void *)test_data; + efi_embedded_fw.length = sizeof(test_data); + list_add(&efi_embedded_fw.list, &efi_embedded_fw_list); ++ saved_efi_embedded_fw_checked = efi_embedded_fw_checked; ++ efi_embedded_fw_checked = true; + + pr_info("loading '%s'\n", name); + rc = firmware_request_platform(&firmware, name, dev); +@@ -530,6 +538,7 @@ static ssize_t trigger_request_platform_store(struct device *dev, + rc = count; + + out: ++ efi_embedded_fw_checked = saved_efi_embedded_fw_checked; + release_firmware(firmware); + list_del(&efi_embedded_fw.list); + kfree(name); +diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h +index 49728047dfad6..f66fcce8e6a45 100644 +--- a/net/mac80211/sta_info.h ++++ b/net/mac80211/sta_info.h +@@ -522,7 +522,7 @@ struct ieee80211_sta_rx_stats { + * @status_stats.retry_failed: # of frames that failed after retry + * @status_stats.retry_count: # of retries attempted + * @status_stats.lost_packets: # of lost packets +- * @status_stats.last_tdls_pkt_time: timestamp of last TDLS packet ++ * @status_stats.last_pkt_time: timestamp of last ACKed packet + * @status_stats.msdu_retries: # of MSDU retries + * @status_stats.msdu_failed: # of failed MSDUs + * @status_stats.last_ack: last ack timestamp (jiffies) +@@ -595,7 +595,7 @@ struct sta_info { + unsigned long filtered; + unsigned long retry_failed, retry_count; + unsigned int lost_packets; +- unsigned long last_tdls_pkt_time; ++ unsigned long last_pkt_time; + u64 msdu_retries[IEEE80211_NUM_TIDS + 1]; + u64 msdu_failed[IEEE80211_NUM_TIDS + 1]; + unsigned long last_ack; +diff --git a/net/mac80211/status.c b/net/mac80211/status.c +index cbc40b358ba26..819c4221c284e 100644 +--- a/net/mac80211/status.c ++++ b/net/mac80211/status.c +@@ -755,12 +755,16 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local, + * - current throughput (higher value for higher tpt)? + */ + #define STA_LOST_PKT_THRESHOLD 50 ++#define STA_LOST_PKT_TIME HZ /* 1 sec since last ACK */ + #define STA_LOST_TDLS_PKT_THRESHOLD 10 + #define STA_LOST_TDLS_PKT_TIME (10*HZ) /* 10secs since last ACK */ + + static void ieee80211_lost_packet(struct sta_info *sta, + struct ieee80211_tx_info *info) + { ++ unsigned long pkt_time = STA_LOST_PKT_TIME; ++ unsigned int pkt_thr = STA_LOST_PKT_THRESHOLD; ++ + /* If driver relies on its own algorithm for station kickout, skip + * mac80211 packet loss mechanism. + */ +@@ -773,21 +777,20 @@ static void ieee80211_lost_packet(struct sta_info *sta, + return; + + sta->status_stats.lost_packets++; +- if (!sta->sta.tdls && +- sta->status_stats.lost_packets < STA_LOST_PKT_THRESHOLD) +- return; ++ if (sta->sta.tdls) { ++ pkt_time = STA_LOST_TDLS_PKT_TIME; ++ pkt_thr = STA_LOST_PKT_THRESHOLD; ++ } + + /* + * If we're in TDLS mode, make sure that all STA_LOST_TDLS_PKT_THRESHOLD + * of the last packets were lost, and that no ACK was received in the + * last STA_LOST_TDLS_PKT_TIME ms, before triggering the CQM packet-loss + * mechanism. ++ * For non-TDLS, use STA_LOST_PKT_THRESHOLD and STA_LOST_PKT_TIME + */ +- if (sta->sta.tdls && +- (sta->status_stats.lost_packets < STA_LOST_TDLS_PKT_THRESHOLD || +- time_before(jiffies, +- sta->status_stats.last_tdls_pkt_time + +- STA_LOST_TDLS_PKT_TIME))) ++ if (sta->status_stats.lost_packets < pkt_thr || ++ !time_after(jiffies, sta->status_stats.last_pkt_time + pkt_time)) + return; + + cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr, +@@ -1035,9 +1038,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw, + sta->status_stats.lost_packets = 0; + + /* Track when last TDLS packet was ACKed */ +- if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) +- sta->status_stats.last_tdls_pkt_time = +- jiffies; ++ sta->status_stats.last_pkt_time = jiffies; + } else if (noack_success) { + /* nothing to do here, do not account as lost */ + } else { +@@ -1170,9 +1171,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw, + if (sta->status_stats.lost_packets) + sta->status_stats.lost_packets = 0; + +- /* Track when last TDLS packet was ACKed */ +- if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) +- sta->status_stats.last_tdls_pkt_time = jiffies; ++ /* Track when last packet was ACKed */ ++ sta->status_stats.last_pkt_time = jiffies; + } else if (test_sta_flag(sta, WLAN_STA_PS_STA)) { + return; + } else if (noack_success) { +@@ -1261,8 +1261,7 @@ void ieee80211_tx_status_8023(struct ieee80211_hw *hw, + if (sta->status_stats.lost_packets) + sta->status_stats.lost_packets = 0; + +- if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) +- sta->status_stats.last_tdls_pkt_time = jiffies; ++ sta->status_stats.last_pkt_time = jiffies; + } else { + ieee80211_lost_packet(sta, info); + } +diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c +index 4f897b14b6069..810cca24b3990 100644 +--- a/net/netfilter/nf_conntrack_proto_sctp.c ++++ b/net/netfilter/nf_conntrack_proto_sctp.c +@@ -62,6 +62,8 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = { + [SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS, + }; + ++#define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1 ++ + #define sNO SCTP_CONNTRACK_NONE + #define sCL SCTP_CONNTRACK_CLOSED + #define sCW SCTP_CONNTRACK_COOKIE_WAIT +@@ -369,6 +371,7 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, + u_int32_t offset, count; + unsigned int *timeouts; + unsigned long map[256 / sizeof(unsigned long)] = { 0 }; ++ bool ignore = false; + + if (sctp_error(skb, dataoff, state)) + return -NF_ACCEPT; +@@ -427,15 +430,39 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, + /* Sec 8.5.1 (D) */ + if (sh->vtag != ct->proto.sctp.vtag[dir]) + goto out_unlock; +- } else if (sch->type == SCTP_CID_HEARTBEAT || +- sch->type == SCTP_CID_HEARTBEAT_ACK) { ++ } else if (sch->type == SCTP_CID_HEARTBEAT) { ++ if (ct->proto.sctp.vtag[dir] == 0) { ++ pr_debug("Setting %d vtag %x for dir %d\n", sch->type, sh->vtag, dir); ++ ct->proto.sctp.vtag[dir] = sh->vtag; ++ } else if (sh->vtag != ct->proto.sctp.vtag[dir]) { ++ if (test_bit(SCTP_CID_DATA, map) || ignore) ++ goto out_unlock; ++ ++ ct->proto.sctp.flags |= SCTP_FLAG_HEARTBEAT_VTAG_FAILED; ++ ct->proto.sctp.last_dir = dir; ++ ignore = true; ++ continue; ++ } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) { ++ ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED; ++ } ++ } else if (sch->type == SCTP_CID_HEARTBEAT_ACK) { + if (ct->proto.sctp.vtag[dir] == 0) { + pr_debug("Setting vtag %x for dir %d\n", + sh->vtag, dir); + ct->proto.sctp.vtag[dir] = sh->vtag; + } else if (sh->vtag != ct->proto.sctp.vtag[dir]) { +- pr_debug("Verification tag check failed\n"); +- goto out_unlock; ++ if (test_bit(SCTP_CID_DATA, map) || ignore) ++ goto out_unlock; ++ ++ if ((ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) == 0 || ++ ct->proto.sctp.last_dir == dir) ++ goto out_unlock; ++ ++ ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED; ++ ct->proto.sctp.vtag[dir] = sh->vtag; ++ ct->proto.sctp.vtag[!dir] = 0; ++ } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) { ++ ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED; + } + } + +@@ -470,6 +497,10 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, + } + spin_unlock_bh(&ct->lock); + ++ /* allow but do not refresh timeout */ ++ if (ignore) ++ return NF_ACCEPT; ++ + timeouts = nf_ct_timeout_lookup(ct); + if (!timeouts) + timeouts = nf_sctp_pernet(nf_ct_net(ct))->timeouts; +diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c +index b85ce6f0c0a6f..f317ad80cd6bc 100644 +--- a/net/netfilter/nft_set_rbtree.c ++++ b/net/netfilter/nft_set_rbtree.c +@@ -218,11 +218,11 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, + struct nft_rbtree_elem *new, + struct nft_set_ext **ext) + { ++ bool overlap = false, dup_end_left = false, dup_end_right = false; + struct nft_rbtree *priv = nft_set_priv(set); + u8 genmask = nft_genmask_next(net); + struct nft_rbtree_elem *rbe; + struct rb_node *parent, **p; +- bool overlap = false; + int d; + + /* Detect overlaps as we descend the tree. Set the flag in these cases: +@@ -262,6 +262,20 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, + * + * which always happen as last step and imply that no further + * overlapping is possible. ++ * ++ * Another special case comes from the fact that start elements matching ++ * an already existing start element are allowed: insertion is not ++ * performed but we return -EEXIST in that case, and the error will be ++ * cleared by the caller if NLM_F_EXCL is not present in the request. ++ * This way, request for insertion of an exact overlap isn't reported as ++ * error to userspace if not desired. ++ * ++ * However, if the existing start matches a pre-existing start, but the ++ * end element doesn't match the corresponding pre-existing end element, ++ * we need to report a partial overlap. This is a local condition that ++ * can be noticed without need for a tracking flag, by checking for a ++ * local duplicated end for a corresponding start, from left and right, ++ * separately. + */ + + parent = NULL; +@@ -281,19 +295,35 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, + !nft_set_elem_expired(&rbe->ext) && !*p) + overlap = false; + } else { ++ if (dup_end_left && !*p) ++ return -ENOTEMPTY; ++ + overlap = nft_rbtree_interval_end(rbe) && + nft_set_elem_active(&rbe->ext, + genmask) && + !nft_set_elem_expired(&rbe->ext); ++ ++ if (overlap) { ++ dup_end_right = true; ++ continue; ++ } + } + } else if (d > 0) { + p = &parent->rb_right; + + if (nft_rbtree_interval_end(new)) { ++ if (dup_end_right && !*p) ++ return -ENOTEMPTY; ++ + overlap = nft_rbtree_interval_end(rbe) && + nft_set_elem_active(&rbe->ext, + genmask) && + !nft_set_elem_expired(&rbe->ext); ++ ++ if (overlap) { ++ dup_end_left = true; ++ continue; ++ } + } else if (nft_set_elem_active(&rbe->ext, genmask) && + !nft_set_elem_expired(&rbe->ext)) { + overlap = nft_rbtree_interval_end(rbe); +@@ -321,6 +351,8 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, + p = &parent->rb_left; + } + } ++ ++ dup_end_left = dup_end_right = false; + } + + if (overlap) +diff --git a/net/wireless/chan.c b/net/wireless/chan.c +index cddf92c5d09ef..7a7cc4ade2b36 100644 +--- a/net/wireless/chan.c ++++ b/net/wireless/chan.c +@@ -10,6 +10,7 @@ + */ + + #include <linux/export.h> ++#include <linux/bitfield.h> + #include <net/cfg80211.h> + #include "core.h" + #include "rdev-ops.h" +@@ -892,6 +893,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy, + struct ieee80211_sta_vht_cap *vht_cap; + struct ieee80211_edmg *edmg_cap; + u32 width, control_freq, cap; ++ bool support_80_80 = false; + + if (WARN_ON(!cfg80211_chandef_valid(chandef))) + return false; +@@ -944,9 +946,13 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy, + return false; + break; + case NL80211_CHAN_WIDTH_80P80: +- cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; +- if (chandef->chan->band != NL80211_BAND_6GHZ && +- cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) ++ cap = vht_cap->cap; ++ support_80_80 = ++ (cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) || ++ (cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ && ++ cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) || ++ u32_get_bits(cap, IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) > 1; ++ if (chandef->chan->band != NL80211_BAND_6GHZ && !support_80_80) + return false; + /* fall through */ + case NL80211_CHAN_WIDTH_80: +@@ -966,7 +972,8 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy, + return false; + cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; + if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ && +- cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) ++ cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ && ++ !(vht_cap->cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK)) + return false; + break; + default: +diff --git a/net/wireless/util.c b/net/wireless/util.c +index 4d3b76f94f55e..a72d2ad6ade8b 100644 +--- a/net/wireless/util.c ++++ b/net/wireless/util.c +@@ -121,11 +121,13 @@ int ieee80211_freq_khz_to_channel(u32 freq) + return (freq - 2407) / 5; + else if (freq >= 4910 && freq <= 4980) + return (freq - 4000) / 5; +- else if (freq < 5945) ++ else if (freq < 5925) + return (freq - 5000) / 5; ++ else if (freq == 5935) ++ return 2; + else if (freq <= 45000) /* DMG band lower limit */ +- /* see 802.11ax D4.1 27.3.22.2 */ +- return (freq - 5940) / 5; ++ /* see 802.11ax D6.1 27.3.22.2 */ ++ return (freq - 5950) / 5; + else if (freq >= 58320 && freq <= 70200) + return (freq - 56160) / 2160; + else +diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c +index 333220f0f8afc..3e9e9ac804f62 100644 +--- a/sound/hda/hdac_device.c ++++ b/sound/hda/hdac_device.c +@@ -127,6 +127,8 @@ EXPORT_SYMBOL_GPL(snd_hdac_device_init); + void snd_hdac_device_exit(struct hdac_device *codec) + { + pm_runtime_put_noidle(&codec->dev); ++ /* keep balance of runtime PM child_count in parent device */ ++ pm_runtime_set_suspended(&codec->dev); + snd_hdac_bus_remove_device(codec->bus, codec); + kfree(codec->vendor_name); + kfree(codec->chip_name); +diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c +index 99aec73491676..1c5114dedda92 100644 +--- a/sound/hda/intel-dsp-config.c ++++ b/sound/hda/intel-dsp-config.c +@@ -54,7 +54,7 @@ static const struct config_entry config_table[] = { + #endif + /* + * Apollolake (Broxton-P) +- * the legacy HDaudio driver is used except on Up Squared (SOF) and ++ * the legacy HDAudio driver is used except on Up Squared (SOF) and + * Chromebooks (SST) + */ + #if IS_ENABLED(CONFIG_SND_SOC_SOF_APOLLOLAKE) +@@ -89,7 +89,7 @@ static const struct config_entry config_table[] = { + }, + #endif + /* +- * Skylake and Kabylake use legacy HDaudio driver except for Google ++ * Skylake and Kabylake use legacy HDAudio driver except for Google + * Chromebooks (SST) + */ + +@@ -135,7 +135,7 @@ static const struct config_entry config_table[] = { + #endif + + /* +- * Geminilake uses legacy HDaudio driver except for Google ++ * Geminilake uses legacy HDAudio driver except for Google + * Chromebooks + */ + /* Geminilake */ +@@ -157,7 +157,7 @@ static const struct config_entry config_table[] = { + + /* + * CoffeeLake, CannonLake, CometLake, IceLake, TigerLake use legacy +- * HDaudio driver except for Google Chromebooks and when DMICs are ++ * HDAudio driver except for Google Chromebooks and when DMICs are + * present. Two cases are required since Coreboot does not expose NHLT + * tables. + * +@@ -391,7 +391,7 @@ int snd_intel_dsp_driver_probe(struct pci_dev *pci) + if (pci->class == 0x040300) + return SND_INTEL_DSP_DRIVER_LEGACY; + if (pci->class != 0x040100 && pci->class != 0x040380) { +- dev_err(&pci->dev, "Unknown PCI class/subclass/prog-if information (0x%06x) found, selecting HDA legacy driver\n", pci->class); ++ dev_err(&pci->dev, "Unknown PCI class/subclass/prog-if information (0x%06x) found, selecting HDAudio legacy driver\n", pci->class); + return SND_INTEL_DSP_DRIVER_LEGACY; + } + +diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c +index 0cc5fad1af8a9..ae40ca3f29837 100644 +--- a/sound/pci/hda/hda_tegra.c ++++ b/sound/pci/hda/hda_tegra.c +@@ -179,6 +179,10 @@ static int __maybe_unused hda_tegra_runtime_suspend(struct device *dev) + struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip); + + if (chip && chip->running) { ++ /* enable controller wake up event */ ++ azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) | ++ STATESTS_INT_MASK); ++ + azx_stop_chip(chip); + azx_enter_link_reset(chip); + } +@@ -200,6 +204,9 @@ static int __maybe_unused hda_tegra_runtime_resume(struct device *dev) + if (chip && chip->running) { + hda_tegra_init(hda); + azx_init_chip(chip, 1); ++ /* disable controller wake up event*/ ++ azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) & ++ ~STATESTS_INT_MASK); + } + + return 0; +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c +index fc22bdc30da3e..419f012b9853c 100644 +--- a/sound/pci/hda/patch_hdmi.c ++++ b/sound/pci/hda/patch_hdmi.c +@@ -3671,6 +3671,7 @@ static int tegra_hdmi_build_pcms(struct hda_codec *codec) + + static int patch_tegra_hdmi(struct hda_codec *codec) + { ++ struct hdmi_spec *spec; + int err; + + err = patch_generic_hdmi(codec); +@@ -3678,6 +3679,10 @@ static int patch_tegra_hdmi(struct hda_codec *codec) + return err; + + codec->patch_ops.build_pcms = tegra_hdmi_build_pcms; ++ spec = codec->spec; ++ spec->chmap.ops.chmap_cea_alloc_validate_get_type = ++ nvhdmi_chmap_cea_alloc_validate_get_type; ++ spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate; + + return 0; + } +@@ -4200,6 +4205,7 @@ HDA_CODEC_ENTRY(0x8086280c, "Cannonlake HDMI", patch_i915_glk_hdmi), + HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi), + HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI", patch_i915_icl_hdmi), + HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI", patch_i915_tgl_hdmi), ++HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI", patch_i915_tgl_hdmi), + HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi), + HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI", patch_i915_icl_hdmi), + HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi), +diff --git a/sound/x86/Kconfig b/sound/x86/Kconfig +index 77777192f6508..4ffcc5e623c22 100644 +--- a/sound/x86/Kconfig ++++ b/sound/x86/Kconfig +@@ -9,7 +9,7 @@ menuconfig SND_X86 + if SND_X86 + + config HDMI_LPE_AUDIO +- tristate "HDMI audio without HDaudio on Intel Atom platforms" ++ tristate "HDMI audio without HDAudio on Intel Atom platforms" + depends on DRM_I915 + select SND_PCM + help +diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile +index 7656c7ce79d90..0e73a16874c4c 100644 +--- a/tools/testing/selftests/timers/Makefile ++++ b/tools/testing/selftests/timers/Makefile +@@ -13,6 +13,7 @@ DESTRUCTIVE_TESTS = alarmtimer-suspend valid-adjtimex adjtick change_skew \ + + TEST_GEN_PROGS_EXTENDED = $(DESTRUCTIVE_TESTS) + ++TEST_FILES := settings + + include ../lib.mk + +diff --git a/tools/testing/selftests/timers/settings b/tools/testing/selftests/timers/settings +new file mode 100644 +index 0000000000000..e7b9417537fbc +--- /dev/null ++++ b/tools/testing/selftests/timers/settings +@@ -0,0 +1 @@ ++timeout=0 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c +index 9e925675a8868..49a877918e2f1 100644 +--- a/virt/kvm/kvm_main.c ++++ b/virt/kvm/kvm_main.c +@@ -4269,7 +4269,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, + void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, + struct kvm_io_device *dev) + { +- int i; ++ int i, j; + struct kvm_io_bus *new_bus, *bus; + + bus = kvm_get_bus(kvm, bus_idx); +@@ -4286,17 +4286,20 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, + + new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1), + GFP_KERNEL_ACCOUNT); +- if (!new_bus) { ++ if (new_bus) { ++ memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); ++ new_bus->dev_count--; ++ memcpy(new_bus->range + i, bus->range + i + 1, ++ (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); ++ } else { + pr_err("kvm: failed to shrink bus, removing it completely\n"); +- goto broken; ++ for (j = 0; j < bus->dev_count; j++) { ++ if (j == i) ++ continue; ++ kvm_iodevice_destructor(bus->range[j].dev); ++ } + } + +- memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); +- new_bus->dev_count--; +- memcpy(new_bus->range + i, bus->range + i + 1, +- (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); +- +-broken: + rcu_assign_pointer(kvm->buses[bus_idx], new_bus); + synchronize_srcu_expedited(&kvm->srcu); + kfree(bus); |