diff options
author | 2020-04-23 07:43:55 -0400 | |
---|---|---|
committer | 2020-04-23 07:43:55 -0400 | |
commit | 4904bc11bea920e7d4562f916355fd44ed6fa1b3 (patch) | |
tree | f89d789ef9b91d9c759ef9acb0986bb54a1afabd | |
parent | Linux patch 4.19.117 (diff) | |
download | linux-patches-4904bc11bea920e7d4562f916355fd44ed6fa1b3.tar.gz linux-patches-4904bc11bea920e7d4562f916355fd44ed6fa1b3.tar.bz2 linux-patches-4904bc11bea920e7d4562f916355fd44ed6fa1b3.zip |
Linux patch 4.19.1184.19-117
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1117_linux-4.19.118.patch | 2272 |
2 files changed, 2276 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 8db7823c..5c6dcb87 100644 --- a/0000_README +++ b/0000_README @@ -507,6 +507,10 @@ Patch: 1116_linux-4.19.117.patch From: https://www.kernel.org Desc: Linux 4.19.117 +Patch: 1117_linux-4.19.118.patch +From: https://www.kernel.org +Desc: Linux 4.19.118 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1117_linux-4.19.118.patch b/1117_linux-4.19.118.patch new file mode 100644 index 00000000..53446f12 --- /dev/null +++ b/1117_linux-4.19.118.patch @@ -0,0 +1,2272 @@ +diff --git a/Makefile b/Makefile +index 555dbaab7bad..72ae7e879077 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 19 +-SUBLEVEL = 117 ++SUBLEVEL = 118 + EXTRAVERSION = + NAME = "People's Front" + +diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi +index 00d44a60972f..e64ff80c83c5 100644 +--- a/arch/arm/boot/dts/imx6qdl.dtsi ++++ b/arch/arm/boot/dts/imx6qdl.dtsi +@@ -1013,9 +1013,8 @@ + compatible = "fsl,imx6q-fec"; + reg = <0x02188000 0x4000>; + interrupt-names = "int0", "pps"; +- interrupts-extended = +- <&intc 0 118 IRQ_TYPE_LEVEL_HIGH>, +- <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>; ++ interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>, ++ <0 119 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clks IMX6QDL_CLK_ENET>, + <&clks IMX6QDL_CLK_ENET>, + <&clks IMX6QDL_CLK_ENET_REF>; +diff --git a/arch/arm/boot/dts/imx6qp.dtsi b/arch/arm/boot/dts/imx6qp.dtsi +index 5f51f8e5c1fa..d91f92f944c5 100644 +--- a/arch/arm/boot/dts/imx6qp.dtsi ++++ b/arch/arm/boot/dts/imx6qp.dtsi +@@ -77,7 +77,6 @@ + }; + + &fec { +- /delete-property/interrupts-extended; + interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>, + <0 119 IRQ_TYPE_LEVEL_HIGH>; + }; +diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c +index 25b3ee85066e..328ced7bfaf2 100644 +--- a/arch/arm/net/bpf_jit_32.c ++++ b/arch/arm/net/bpf_jit_32.c +@@ -930,7 +930,11 @@ static inline void emit_a32_rsh_i64(const s8 dst[], + rd = arm_bpf_get_reg64(dst, tmp, ctx); + + /* Do LSR operation */ +- if (val < 32) { ++ if (val == 0) { ++ /* An immediate value of 0 encodes a shift amount of 32 ++ * for LSR. To shift by 0, don't do anything. ++ */ ++ } else if (val < 32) { + emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx); + emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx); + emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_LSR, val), ctx); +@@ -956,7 +960,11 @@ static inline void emit_a32_arsh_i64(const s8 dst[], + rd = arm_bpf_get_reg64(dst, tmp, ctx); + + /* Do ARSH operation */ +- if (val < 32) { ++ if (val == 0) { ++ /* An immediate value of 0 encodes a shift amount of 32 ++ * for ASR. To shift by 0, don't do anything. ++ */ ++ } else if (val < 32) { + emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx); + emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx); + emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, val), ctx); +@@ -993,21 +1001,35 @@ static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[], + arm_bpf_put_reg32(dst_hi, rd[0], ctx); + } + ++static bool is_ldst_imm(s16 off, const u8 size) ++{ ++ s16 off_max = 0; ++ ++ switch (size) { ++ case BPF_B: ++ case BPF_W: ++ off_max = 0xfff; ++ break; ++ case BPF_H: ++ off_max = 0xff; ++ break; ++ case BPF_DW: ++ /* Need to make sure off+4 does not overflow. */ ++ off_max = 0xfff - 4; ++ break; ++ } ++ return -off_max <= off && off <= off_max; ++} ++ + /* *(size *)(dst + off) = src */ + static inline void emit_str_r(const s8 dst, const s8 src[], +- s32 off, struct jit_ctx *ctx, const u8 sz){ ++ s16 off, struct jit_ctx *ctx, const u8 sz){ + const s8 *tmp = bpf2a32[TMP_REG_1]; +- s32 off_max; + s8 rd; + + rd = arm_bpf_get_reg32(dst, tmp[1], ctx); + +- if (sz == BPF_H) +- off_max = 0xff; +- else +- off_max = 0xfff; +- +- if (off < 0 || off > off_max) { ++ if (!is_ldst_imm(off, sz)) { + emit_a32_mov_i(tmp[0], off, ctx); + emit(ARM_ADD_R(tmp[0], tmp[0], rd), ctx); + rd = tmp[0]; +@@ -1036,18 +1058,12 @@ static inline void emit_str_r(const s8 dst, const s8 src[], + + /* dst = *(size*)(src + off) */ + static inline void emit_ldx_r(const s8 dst[], const s8 src, +- s32 off, struct jit_ctx *ctx, const u8 sz){ ++ s16 off, struct jit_ctx *ctx, const u8 sz){ + const s8 *tmp = bpf2a32[TMP_REG_1]; + const s8 *rd = is_stacked(dst_lo) ? tmp : dst; + s8 rm = src; +- s32 off_max; +- +- if (sz == BPF_H) +- off_max = 0xff; +- else +- off_max = 0xfff; + +- if (off < 0 || off > off_max) { ++ if (!is_ldst_imm(off, sz)) { + emit_a32_mov_i(tmp[0], off, ctx); + emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx); + rm = tmp[0]; +diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c +index b7f937563827..d1fee2d35b49 100644 +--- a/arch/powerpc/platforms/maple/setup.c ++++ b/arch/powerpc/platforms/maple/setup.c +@@ -299,23 +299,6 @@ static int __init maple_probe(void) + return 1; + } + +-define_machine(maple) { +- .name = "Maple", +- .probe = maple_probe, +- .setup_arch = maple_setup_arch, +- .init_IRQ = maple_init_IRQ, +- .pci_irq_fixup = maple_pci_irq_fixup, +- .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, +- .restart = maple_restart, +- .halt = maple_halt, +- .get_boot_time = maple_get_boot_time, +- .set_rtc_time = maple_set_rtc_time, +- .get_rtc_time = maple_get_rtc_time, +- .calibrate_decr = generic_calibrate_decr, +- .progress = maple_progress, +- .power_save = power4_idle, +-}; +- + #ifdef CONFIG_EDAC + /* + * Register a platform device for CPC925 memory controller on +@@ -372,3 +355,20 @@ static int __init maple_cpc925_edac_setup(void) + } + machine_device_initcall(maple, maple_cpc925_edac_setup); + #endif ++ ++define_machine(maple) { ++ .name = "Maple", ++ .probe = maple_probe, ++ .setup_arch = maple_setup_arch, ++ .init_IRQ = maple_init_IRQ, ++ .pci_irq_fixup = maple_pci_irq_fixup, ++ .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, ++ .restart = maple_restart, ++ .halt = maple_halt, ++ .get_boot_time = maple_get_boot_time, ++ .set_rtc_time = maple_set_rtc_time, ++ .get_rtc_time = maple_get_rtc_time, ++ .calibrate_decr = generic_calibrate_decr, ++ .progress = maple_progress, ++ .power_save = power4_idle, ++}; +diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c +index 5bfb1ce129f4..74a296cea21c 100644 +--- a/arch/s390/kernel/perf_cpum_sf.c ++++ b/arch/s390/kernel/perf_cpum_sf.c +@@ -1537,6 +1537,7 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw) + perf_aux_output_end(handle, size); + num_sdb = aux->sfb.num_sdb; + ++ num_sdb = aux->sfb.num_sdb; + while (!done) { + /* Get an output handle */ + aux = perf_aux_output_begin(handle, cpuhw->event); +diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c +index 6fe2e1875058..675d4be0c2b7 100644 +--- a/arch/s390/kernel/processor.c ++++ b/arch/s390/kernel/processor.c +@@ -157,8 +157,9 @@ static void show_cpu_mhz(struct seq_file *m, unsigned long n) + static int show_cpuinfo(struct seq_file *m, void *v) + { + unsigned long n = (unsigned long) v - 1; ++ unsigned long first = cpumask_first(cpu_online_mask); + +- if (!n) ++ if (n == first) + show_cpu_summary(m, v); + if (!machine_has_cpu_mhz) + return 0; +@@ -171,6 +172,8 @@ static inline void *c_update(loff_t *pos) + { + if (*pos) + *pos = cpumask_next(*pos - 1, cpu_online_mask); ++ else ++ *pos = cpumask_first(cpu_online_mask); + return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL; + } + +diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c +index b56c4fdb1517..7cde0f2f52e1 100644 +--- a/arch/s390/mm/gmap.c ++++ b/arch/s390/mm/gmap.c +@@ -1838,6 +1838,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t, + goto out_free; + } else if (*table & _REGION_ENTRY_ORIGIN) { + rc = -EAGAIN; /* Race with shadow */ ++ goto out_free; + } + crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY); + /* mark as invalid as long as the parent table is not protected */ +diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c +index 83c470364dfb..748bd0921dff 100644 +--- a/arch/um/drivers/ubd_kern.c ++++ b/arch/um/drivers/ubd_kern.c +@@ -1574,7 +1574,9 @@ int io_thread(void *arg) + written = 0; + + do { +- res = os_write_file(kernel_fd, ((char *) io_req_buffer) + written, n); ++ res = os_write_file(kernel_fd, ++ ((char *) io_req_buffer) + written, ++ n - written); + if (res >= 0) { + written += res; + } else { +diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c +index 8a9cff1f129d..1663ad84778b 100644 +--- a/arch/x86/hyperv/hv_init.c ++++ b/arch/x86/hyperv/hv_init.c +@@ -30,6 +30,7 @@ + #include <linux/clockchips.h> + #include <linux/hyperv.h> + #include <linux/slab.h> ++#include <linux/kernel.h> + #include <linux/cpuhotplug.h> + + #ifdef CONFIG_HYPERV_TSCPAGE +@@ -427,11 +428,14 @@ void hyperv_cleanup(void) + } + EXPORT_SYMBOL_GPL(hyperv_cleanup); + +-void hyperv_report_panic(struct pt_regs *regs, long err) ++void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die) + { + static bool panic_reported; + u64 guest_id; + ++ if (in_die && !panic_on_oops) ++ return; ++ + /* + * We prefer to report panic on 'die' chain as we have proper + * registers to report, but if we miss it (e.g. on BUG()) we need +diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h +index f37704497d8f..5b58a6cf487f 100644 +--- a/arch/x86/include/asm/mshyperv.h ++++ b/arch/x86/include/asm/mshyperv.h +@@ -338,7 +338,7 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset, + + void __init hyperv_init(void); + void hyperv_setup_mmu_ops(void); +-void hyperv_report_panic(struct pt_regs *regs, long err); ++void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die); + void hyperv_report_panic_msg(phys_addr_t pa, size_t size); + bool hv_is_hyperv_initialized(void); + void hyperv_cleanup(void); +diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c +index 158ad1483c43..92539a1c3e31 100644 +--- a/arch/x86/kernel/acpi/cstate.c ++++ b/arch/x86/kernel/acpi/cstate.c +@@ -133,7 +133,8 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, + + /* Make sure we are running on right CPU */ + +- retval = work_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx); ++ retval = call_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx, ++ false); + if (retval == 0) { + /* Use the hint in CST */ + percpu_entry->states[cx->index].eax = cx->address; +diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c +index 852e74e48890..fc93ae325515 100644 +--- a/arch/x86/kernel/cpu/mshyperv.c ++++ b/arch/x86/kernel/cpu/mshyperv.c +@@ -250,6 +250,16 @@ static void __init ms_hyperv_init_platform(void) + cpuid_eax(HYPERV_CPUID_NESTED_FEATURES); + } + ++ /* ++ * Hyper-V expects to get crash register data or kmsg when ++ * crash enlightment is available and system crashes. Set ++ * crash_kexec_post_notifiers to be true to make sure that ++ * calling crash enlightment interface before running kdump ++ * kernel. ++ */ ++ if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) ++ crash_kexec_post_notifiers = true; ++ + #ifdef CONFIG_X86_LOCAL_APIC + if (ms_hyperv.features & HV_X64_ACCESS_FREQUENCY_MSRS && + ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) { +diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c +index fbc936cf2025..62c0fe9ef412 100644 +--- a/drivers/acpi/processor_throttling.c ++++ b/drivers/acpi/processor_throttling.c +@@ -910,13 +910,6 @@ static long __acpi_processor_get_throttling(void *data) + return pr->throttling.acpi_processor_get_throttling(pr); + } + +-static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct) +-{ +- if (direct || (is_percpu_thread() && cpu == smp_processor_id())) +- return fn(arg); +- return work_on_cpu(cpu, fn, arg); +-} +- + static int acpi_processor_get_throttling(struct acpi_processor *pr) + { + if (!pr) +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c +index d3ad1b8c133e..110129097169 100644 +--- a/drivers/block/rbd.c ++++ b/drivers/block/rbd.c +@@ -3427,6 +3427,10 @@ static void cancel_tasks_sync(struct rbd_device *rbd_dev) + cancel_work_sync(&rbd_dev->unlock_work); + } + ++/* ++ * header_rwsem must not be held to avoid a deadlock with ++ * rbd_dev_refresh() when flushing notifies. ++ */ + static void rbd_unregister_watch(struct rbd_device *rbd_dev) + { + WARN_ON(waitqueue_active(&rbd_dev->lock_waitq)); +@@ -5719,9 +5723,10 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev) + + static void rbd_dev_image_release(struct rbd_device *rbd_dev) + { +- rbd_dev_unprobe(rbd_dev); + if (rbd_dev->opts) + rbd_unregister_watch(rbd_dev); ++ ++ rbd_dev_unprobe(rbd_dev); + rbd_dev->image_format = 0; + kfree(rbd_dev->spec->image_id); + rbd_dev->spec->image_id = NULL; +@@ -5732,6 +5737,9 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev) + * device. If this image is the one being mapped (i.e., not a + * parent), initiate a watch on its header object before using that + * object to get detailed information about the rbd image. ++ * ++ * On success, returns with header_rwsem held for write if called ++ * with @depth == 0. + */ + static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) + { +@@ -5764,9 +5772,12 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) + } + } + ++ if (!depth) ++ down_write(&rbd_dev->header_rwsem); ++ + ret = rbd_dev_header_info(rbd_dev); + if (ret) +- goto err_out_watch; ++ goto err_out_probe; + + /* + * If this image is the one being mapped, we have pool name and +@@ -5812,10 +5823,11 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) + return 0; + + err_out_probe: +- rbd_dev_unprobe(rbd_dev); +-err_out_watch: ++ if (!depth) ++ up_write(&rbd_dev->header_rwsem); + if (!depth) + rbd_unregister_watch(rbd_dev); ++ rbd_dev_unprobe(rbd_dev); + err_out_format: + rbd_dev->image_format = 0; + kfree(rbd_dev->spec->image_id); +@@ -5872,12 +5884,9 @@ static ssize_t do_rbd_add(struct bus_type *bus, + goto err_out_rbd_dev; + } + +- down_write(&rbd_dev->header_rwsem); + rc = rbd_dev_image_probe(rbd_dev, 0); +- if (rc < 0) { +- up_write(&rbd_dev->header_rwsem); ++ if (rc < 0) + goto err_out_rbd_dev; +- } + + /* If we are mapping a snapshot it must be marked read-only */ + if (rbd_dev->spec->snap_id != CEPH_NOSNAP) +diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c +index 791770a563fc..6fac6383d024 100644 +--- a/drivers/clk/at91/clk-usb.c ++++ b/drivers/clk/at91/clk-usb.c +@@ -78,6 +78,9 @@ static int at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw, + tmp_parent_rate = req->rate * div; + tmp_parent_rate = clk_hw_round_rate(parent, + tmp_parent_rate); ++ if (!tmp_parent_rate) ++ continue; ++ + tmp_rate = DIV_ROUND_CLOSEST(tmp_parent_rate, div); + if (tmp_rate < req->rate) + tmp_diff = req->rate - tmp_rate; +diff --git a/drivers/clk/tegra/clk-tegra-pmc.c b/drivers/clk/tegra/clk-tegra-pmc.c +index a35579a3f884..476dab494c44 100644 +--- a/drivers/clk/tegra/clk-tegra-pmc.c ++++ b/drivers/clk/tegra/clk-tegra-pmc.c +@@ -60,16 +60,16 @@ struct pmc_clk_init_data { + + static DEFINE_SPINLOCK(clk_out_lock); + +-static const char *clk_out1_parents[] = { "clk_m", "clk_m_div2", +- "clk_m_div4", "extern1", ++static const char *clk_out1_parents[] = { "osc", "osc_div2", ++ "osc_div4", "extern1", + }; + +-static const char *clk_out2_parents[] = { "clk_m", "clk_m_div2", +- "clk_m_div4", "extern2", ++static const char *clk_out2_parents[] = { "osc", "osc_div2", ++ "osc_div4", "extern2", + }; + +-static const char *clk_out3_parents[] = { "clk_m", "clk_m_div2", +- "clk_m_div4", "extern3", ++static const char *clk_out3_parents[] = { "osc", "osc_div2", ++ "osc_div4", "extern3", + }; + + static struct pmc_clk_init_data pmc_clks[] = { +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c +index 938d0053a820..28022d1cb0f0 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c +@@ -921,9 +921,9 @@ kfd_gtt_out: + return 0; + + kfd_gtt_no_free_chunk: +- pr_debug("Allocation failed with mem_obj = %p\n", mem_obj); ++ pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj); + mutex_unlock(&kfd->gtt_sa_lock); +- kfree(mem_obj); ++ kfree(*mem_obj); + return -ENOMEM; + } + +diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c +index fd5522fd179e..86b98856756d 100644 +--- a/drivers/gpu/drm/vc4/vc4_hdmi.c ++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c +@@ -698,11 +698,23 @@ static enum drm_mode_status + vc4_hdmi_encoder_mode_valid(struct drm_encoder *crtc, + const struct drm_display_mode *mode) + { +- /* HSM clock must be 108% of the pixel clock. Additionally, +- * the AXI clock needs to be at least 25% of pixel clock, but +- * HSM ends up being the limiting factor. ++ /* ++ * As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must ++ * be faster than pixel clock, infinitesimally faster, tested in ++ * simulation. Otherwise, exact value is unimportant for HDMI ++ * operation." This conflicts with bcm2835's vc4 documentation, which ++ * states HSM's clock has to be at least 108% of the pixel clock. ++ * ++ * Real life tests reveal that vc4's firmware statement holds up, and ++ * users are able to use pixel clocks closer to HSM's, namely for ++ * 1920x1200@60Hz. So it was decided to have leave a 1% margin between ++ * both clocks. Which, for RPi0-3 implies a maximum pixel clock of ++ * 162MHz. ++ * ++ * Additionally, the AXI clock needs to be at least 25% of ++ * pixel clock, but HSM ends up being the limiting factor. + */ +- if (mode->clock > HSM_CLOCK_FREQ / (1000 * 108 / 100)) ++ if (mode->clock > HSM_CLOCK_FREQ / (1000 * 101 / 100)) + return MODE_CLOCK_HIGH; + + return MODE_OK; +diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c +index 16eb9b3f1cb1..3bf1f9ef8ea2 100644 +--- a/drivers/hv/channel_mgmt.c ++++ b/drivers/hv/channel_mgmt.c +@@ -849,6 +849,9 @@ void vmbus_initiate_unload(bool crash) + { + struct vmbus_channel_message_header hdr; + ++ if (xchg(&vmbus_connection.conn_state, DISCONNECTED) == DISCONNECTED) ++ return; ++ + /* Pre-Win2012R2 hosts don't support reconnect */ + if (vmbus_proto_version < VERSION_WIN8_1) + return; +diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c +index 9aa18f387a34..fb22b72fd535 100644 +--- a/drivers/hv/vmbus_drv.c ++++ b/drivers/hv/vmbus_drv.c +@@ -43,6 +43,7 @@ + #include <linux/kdebug.h> + #include <linux/efi.h> + #include <linux/random.h> ++#include <linux/kernel.h> + #include "hyperv_vmbus.h" + + struct vmbus_dynid { +@@ -58,14 +59,35 @@ static int hyperv_cpuhp_online; + + static void *hv_panic_page; + ++/* ++ * Boolean to control whether to report panic messages over Hyper-V. ++ * ++ * It can be set via /proc/sys/kernel/hyperv/record_panic_msg ++ */ ++static int sysctl_record_panic_msg = 1; ++ ++static int hyperv_report_reg(void) ++{ ++ return !sysctl_record_panic_msg || !hv_panic_page; ++} ++ + static int hyperv_panic_event(struct notifier_block *nb, unsigned long val, + void *args) + { + struct pt_regs *regs; + +- regs = current_pt_regs(); ++ vmbus_initiate_unload(true); + +- hyperv_report_panic(regs, val); ++ /* ++ * Hyper-V should be notified only once about a panic. If we will be ++ * doing hyperv_report_panic_msg() later with kmsg data, don't do ++ * the notification here. ++ */ ++ if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE ++ && hyperv_report_reg()) { ++ regs = current_pt_regs(); ++ hyperv_report_panic(regs, val, false); ++ } + return NOTIFY_DONE; + } + +@@ -75,7 +97,13 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val, + struct die_args *die = (struct die_args *)args; + struct pt_regs *regs = die->regs; + +- hyperv_report_panic(regs, val); ++ /* ++ * Hyper-V should be notified only once about a panic. If we will be ++ * doing hyperv_report_panic_msg() later with kmsg data, don't do ++ * the notification here. ++ */ ++ if (hyperv_report_reg()) ++ hyperv_report_panic(regs, val, true); + return NOTIFY_DONE; + } + +@@ -1088,13 +1116,6 @@ static void vmbus_isr(void) + add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0); + } + +-/* +- * Boolean to control whether to report panic messages over Hyper-V. +- * +- * It can be set via /proc/sys/kernel/hyperv/record_panic_msg +- */ +-static int sysctl_record_panic_msg = 1; +- + /* + * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg + * buffer and call into Hyper-V to transfer the data. +@@ -1219,19 +1240,29 @@ static int vmbus_bus_init(void) + hv_panic_page = (void *)get_zeroed_page(GFP_KERNEL); + if (hv_panic_page) { + ret = kmsg_dump_register(&hv_kmsg_dumper); +- if (ret) ++ if (ret) { + pr_err("Hyper-V: kmsg dump register " + "error 0x%x\n", ret); ++ free_page( ++ (unsigned long)hv_panic_page); ++ hv_panic_page = NULL; ++ } + } else + pr_err("Hyper-V: panic message page memory " + "allocation failed"); + } + + register_die_notifier(&hyperv_die_block); +- atomic_notifier_chain_register(&panic_notifier_list, +- &hyperv_panic_block); + } + ++ /* ++ * Always register the panic notifier because we need to unload ++ * the VMbus channel connection to prevent any VMbus ++ * activity after the VM panics. ++ */ ++ atomic_notifier_chain_register(&panic_notifier_list, ++ &hyperv_panic_block); ++ + vmbus_request_offers(); + + return 0; +@@ -1243,7 +1274,6 @@ err_alloc: + hv_remove_vmbus_irq(); + + bus_unregister(&hv_bus); +- free_page((unsigned long)hv_panic_page); + unregister_sysctl_table(hv_ctl_table_hdr); + hv_ctl_table_hdr = NULL; + return ret; +@@ -1875,7 +1905,6 @@ static void hv_kexec_handler(void) + { + hv_synic_clockevents_cleanup(); + vmbus_initiate_unload(false); +- vmbus_connection.conn_state = DISCONNECTED; + /* Make sure conn_state is set as hv_synic_cleanup checks for it */ + mb(); + cpuhp_remove_state(hyperv_cpuhp_online); +@@ -1890,7 +1919,6 @@ static void hv_crash_handler(struct pt_regs *regs) + * doing the cleanup for current CPU only. This should be sufficient + * for kdump. + */ +- vmbus_connection.conn_state = DISCONNECTED; + hv_synic_cleanup(smp_processor_id()); + hyperv_cleanup(); + }; +diff --git a/drivers/iio/light/si1133.c b/drivers/iio/light/si1133.c +index 015a21f0c2ef..9174ab928880 100644 +--- a/drivers/iio/light/si1133.c ++++ b/drivers/iio/light/si1133.c +@@ -102,6 +102,9 @@ + #define SI1133_INPUT_FRACTION_LOW 15 + #define SI1133_LUX_OUTPUT_FRACTION 12 + #define SI1133_LUX_BUFFER_SIZE 9 ++#define SI1133_MEASURE_BUFFER_SIZE 3 ++ ++#define SI1133_SIGN_BIT_INDEX 23 + + static const int si1133_scale_available[] = { + 1, 2, 4, 8, 16, 32, 64, 128}; +@@ -234,13 +237,13 @@ static const struct si1133_lux_coeff lux_coeff = { + } + }; + +-static int si1133_calculate_polynomial_inner(u32 input, u8 fraction, u16 mag, ++static int si1133_calculate_polynomial_inner(s32 input, u8 fraction, u16 mag, + s8 shift) + { + return ((input << fraction) / mag) << shift; + } + +-static int si1133_calculate_output(u32 x, u32 y, u8 x_order, u8 y_order, ++static int si1133_calculate_output(s32 x, s32 y, u8 x_order, u8 y_order, + u8 input_fraction, s8 sign, + const struct si1133_coeff *coeffs) + { +@@ -276,7 +279,7 @@ static int si1133_calculate_output(u32 x, u32 y, u8 x_order, u8 y_order, + * The algorithm is from: + * https://siliconlabs.github.io/Gecko_SDK_Doc/efm32zg/html/si1133_8c_source.html#l00716 + */ +-static int si1133_calc_polynomial(u32 x, u32 y, u8 input_fraction, u8 num_coeff, ++static int si1133_calc_polynomial(s32 x, s32 y, u8 input_fraction, u8 num_coeff, + const struct si1133_coeff *coeffs) + { + u8 x_order, y_order; +@@ -614,7 +617,7 @@ static int si1133_measure(struct si1133_data *data, + { + int err; + +- __be16 resp; ++ u8 buffer[SI1133_MEASURE_BUFFER_SIZE]; + + err = si1133_set_adcmux(data, 0, chan->channel); + if (err) +@@ -625,12 +628,13 @@ static int si1133_measure(struct si1133_data *data, + if (err) + return err; + +- err = si1133_bulk_read(data, SI1133_REG_HOSTOUT(0), sizeof(resp), +- (u8 *)&resp); ++ err = si1133_bulk_read(data, SI1133_REG_HOSTOUT(0), sizeof(buffer), ++ buffer); + if (err) + return err; + +- *val = be16_to_cpu(resp); ++ *val = sign_extend32((buffer[0] << 16) | (buffer[1] << 8) | buffer[2], ++ SI1133_SIGN_BIT_INDEX); + + return err; + } +@@ -704,9 +708,9 @@ static int si1133_get_lux(struct si1133_data *data, int *val) + { + int err; + int lux; +- u32 high_vis; +- u32 low_vis; +- u32 ir; ++ s32 high_vis; ++ s32 low_vis; ++ s32 ir; + u8 buffer[SI1133_LUX_BUFFER_SIZE]; + + /* Activate lux channels */ +@@ -719,9 +723,16 @@ static int si1133_get_lux(struct si1133_data *data, int *val) + if (err) + return err; + +- high_vis = (buffer[0] << 16) | (buffer[1] << 8) | buffer[2]; +- low_vis = (buffer[3] << 16) | (buffer[4] << 8) | buffer[5]; +- ir = (buffer[6] << 16) | (buffer[7] << 8) | buffer[8]; ++ high_vis = ++ sign_extend32((buffer[0] << 16) | (buffer[1] << 8) | buffer[2], ++ SI1133_SIGN_BIT_INDEX); ++ ++ low_vis = ++ sign_extend32((buffer[3] << 16) | (buffer[4] << 8) | buffer[5], ++ SI1133_SIGN_BIT_INDEX); ++ ++ ir = sign_extend32((buffer[6] << 16) | (buffer[7] << 8) | buffer[8], ++ SI1133_SIGN_BIT_INDEX); + + if (high_vis > SI1133_ADC_THRESHOLD || ir > SI1133_ADC_THRESHOLD) + lux = si1133_calc_polynomial(high_vis, ir, +diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h +index 69f3d4c95b53..859b06424e5c 100644 +--- a/drivers/iommu/amd_iommu_types.h ++++ b/drivers/iommu/amd_iommu_types.h +@@ -352,7 +352,7 @@ + + #define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL) + #define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL) +-#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0xfffffULL) ++#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL) + + #define DTE_GCR3_INDEX_A 0 + #define DTE_GCR3_INDEX_B 1 +diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c +index 5944d3b4dca3..ef3aadec980e 100644 +--- a/drivers/iommu/intel-svm.c ++++ b/drivers/iommu/intel-svm.c +@@ -620,14 +620,15 @@ static irqreturn_t prq_event_thread(int irq, void *d) + * any faults on kernel addresses. */ + if (!svm->mm) + goto bad_req; +- /* If the mm is already defunct, don't handle faults. */ +- if (!mmget_not_zero(svm->mm)) +- goto bad_req; + + /* If address is not canonical, return invalid response */ + if (!is_canonical_address(address)) + goto bad_req; + ++ /* If the mm is already defunct, don't handle faults. */ ++ if (!mmget_not_zero(svm->mm)) ++ goto bad_req; ++ + down_read(&svm->mm->mmap_sem); + vma = find_extend_vma(svm->mm, address); + if (!vma || address < vma->vm_start) +diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c +index f7fdbf5d183b..c98358be0bc8 100644 +--- a/drivers/irqchip/irq-mbigen.c ++++ b/drivers/irqchip/irq-mbigen.c +@@ -231,10 +231,16 @@ static int mbigen_irq_domain_alloc(struct irq_domain *domain, + return 0; + } + ++static void mbigen_irq_domain_free(struct irq_domain *domain, unsigned int virq, ++ unsigned int nr_irqs) ++{ ++ platform_msi_domain_free(domain, virq, nr_irqs); ++} ++ + static const struct irq_domain_ops mbigen_domain_ops = { + .translate = mbigen_domain_translate, + .alloc = mbigen_irq_domain_alloc, +- .free = irq_domain_free_irqs_common, ++ .free = mbigen_irq_domain_free, + }; + + static int mbigen_of_create_domain(struct platform_device *pdev, +diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c +index 9ee04b5f9311..5a04ff638688 100644 +--- a/drivers/mtd/devices/phram.c ++++ b/drivers/mtd/devices/phram.c +@@ -240,22 +240,25 @@ static int phram_setup(const char *val) + + ret = parse_num64(&start, token[1]); + if (ret) { +- kfree(name); + parse_err("illegal start address\n"); ++ goto error; + } + + ret = parse_num64(&len, token[2]); + if (ret) { +- kfree(name); + parse_err("illegal device length\n"); ++ goto error; + } + + ret = register_device(name, start, len); +- if (!ret) +- pr_info("%s device: %#llx at %#llx\n", name, len, start); +- else +- kfree(name); ++ if (ret) ++ goto error; ++ ++ pr_info("%s device: %#llx at %#llx\n", name, len, start); ++ return 0; + ++error: ++ kfree(name); + return ret; + } + +diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c +index b13557fe52bd..947bb710bf16 100644 +--- a/drivers/mtd/lpddr/lpddr_cmds.c ++++ b/drivers/mtd/lpddr/lpddr_cmds.c +@@ -81,7 +81,6 @@ struct mtd_info *lpddr_cmdset(struct map_info *map) + shared = kmalloc_array(lpddr->numchips, sizeof(struct flchip_shared), + GFP_KERNEL); + if (!shared) { +- kfree(lpddr); + kfree(mtd); + return NULL; + } +diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c +index ee0c74b02220..a2f38b3b9776 100644 +--- a/drivers/mtd/nand/spi/core.c ++++ b/drivers/mtd/nand/spi/core.c +@@ -670,6 +670,7 @@ static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos) + .ooboffs = 0, + .ooblen = sizeof(marker), + .oobbuf.out = marker, ++ .mode = MTD_OPS_RAW, + }; + int ret; + +diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c +index 21db1804e85d..12156ab186a1 100644 +--- a/drivers/net/dsa/bcm_sf2_cfp.c ++++ b/drivers/net/dsa/bcm_sf2_cfp.c +@@ -742,17 +742,14 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, + fs->m_ext.data[1])) + return -EINVAL; + +- if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES) ++ if (fs->location != RX_CLS_LOC_ANY && ++ fs->location > bcm_sf2_cfp_rule_size(priv)) + return -EINVAL; + + if (fs->location != RX_CLS_LOC_ANY && + test_bit(fs->location, priv->cfp.used)) + return -EBUSY; + +- if (fs->location != RX_CLS_LOC_ANY && +- fs->location > bcm_sf2_cfp_rule_size(priv)) +- return -EINVAL; +- + /* This rule is a Wake-on-LAN filter and we must specifically + * target the CPU port in order for it to be working. + */ +@@ -839,7 +836,7 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, + u32 next_loc = 0; + int ret; + +- if (loc >= CFP_NUM_RULES) ++ if (loc > bcm_sf2_cfp_rule_size(priv)) + return -EINVAL; + + /* Refuse deleting unused rules, and those that are not unique since +diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c +index 54a633e8cb5d..48a070a37ea9 100644 +--- a/drivers/nvdimm/bus.c ++++ b/drivers/nvdimm/bus.c +@@ -984,8 +984,10 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, + return -EFAULT; + } + +- if (!desc || (desc->out_num + desc->in_num == 0) || +- !test_bit(cmd, &cmd_mask)) ++ if (!desc || ++ (desc->out_num + desc->in_num == 0) || ++ cmd > ND_CMD_CALL || ++ !test_bit(cmd, &cmd_mask)) + return -ENOTTY; + + /* fail write commands (when read-only) */ +diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c +index 514528b3566f..a77bfeac867d 100644 +--- a/drivers/of/overlay.c ++++ b/drivers/of/overlay.c +@@ -261,6 +261,8 @@ static struct property *dup_and_fixup_symbol_prop( + + of_property_set_flag(new_prop, OF_DYNAMIC); + ++ kfree(target_path); ++ + return new_prop; + + err_free_new_prop: +diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c +index 808571f7f6ef..29f17c3449aa 100644 +--- a/drivers/of/unittest.c ++++ b/drivers/of/unittest.c +@@ -772,6 +772,10 @@ static void __init of_unittest_changeset(void) + unittest(!of_changeset_revert(&chgset), "revert failed\n"); + + of_changeset_destroy(&chgset); ++ ++ of_node_put(n1); ++ of_node_put(n2); ++ of_node_put(n21); + #endif + } + +@@ -1055,10 +1059,13 @@ static void __init of_unittest_platform_populate(void) + + of_platform_populate(np, match, NULL, &test_bus->dev); + for_each_child_of_node(np, child) { +- for_each_child_of_node(child, grandchild) +- unittest(of_find_device_by_node(grandchild), ++ for_each_child_of_node(child, grandchild) { ++ pdev = of_find_device_by_node(grandchild); ++ unittest(pdev, + "Could not create device for node '%pOFn'\n", + grandchild); ++ of_dev_put(pdev); ++ } + } + + of_platform_depopulate(&test_bus->dev); +@@ -2441,8 +2448,11 @@ static __init void of_unittest_overlay_high_level(void) + goto err_unlock; + } + if (__of_add_property(of_symbols, new_prop)) { ++ kfree(new_prop->name); ++ kfree(new_prop->value); ++ kfree(new_prop); + /* "name" auto-generated by unflatten */ +- if (!strcmp(new_prop->name, "name")) ++ if (!strcmp(prop->name, "name")) + continue; + unittest(0, "duplicate property '%s' in overlay_base node __symbols__", + prop->name); +diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c +index ab0b6e78ca02..157cf5ec6b02 100644 +--- a/drivers/power/supply/axp288_fuel_gauge.c ++++ b/drivers/power/supply/axp288_fuel_gauge.c +@@ -718,14 +718,14 @@ static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = { + { + /* Intel Cherry Trail Compute Stick, Windows version */ + .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), ++ DMI_MATCH(DMI_SYS_VENDOR, "Intel"), + DMI_MATCH(DMI_PRODUCT_NAME, "STK1AW32SC"), + }, + }, + { + /* Intel Cherry Trail Compute Stick, version without an OS */ + .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), ++ DMI_MATCH(DMI_SYS_VENDOR, "Intel"), + DMI_MATCH(DMI_PRODUCT_NAME, "STK1A32SC"), + }, + }, +diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c +index f022e1b550df..ff02a917556a 100644 +--- a/drivers/power/supply/bq27xxx_battery.c ++++ b/drivers/power/supply/bq27xxx_battery.c +@@ -1887,7 +1887,10 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di) + + di->bat = power_supply_register_no_ws(di->dev, psy_desc, &psy_cfg); + if (IS_ERR(di->bat)) { +- dev_err(di->dev, "failed to register battery\n"); ++ if (PTR_ERR(di->bat) == -EPROBE_DEFER) ++ dev_dbg(di->dev, "failed to register battery, deferring probe\n"); ++ else ++ dev_err(di->dev, "failed to register battery\n"); + return PTR_ERR(di->bat); + } + +diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c +index 73697e4b18a9..9d4a59aa29a1 100644 +--- a/drivers/rtc/rtc-88pm860x.c ++++ b/drivers/rtc/rtc-88pm860x.c +@@ -341,6 +341,10 @@ static int pm860x_rtc_probe(struct platform_device *pdev) + info->dev = &pdev->dev; + dev_set_drvdata(&pdev->dev, info); + ++ info->rtc_dev = devm_rtc_allocate_device(&pdev->dev); ++ if (IS_ERR(info->rtc_dev)) ++ return PTR_ERR(info->rtc_dev); ++ + ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL, + rtc_update_handler, IRQF_ONESHOT, "rtc", + info); +@@ -382,13 +386,11 @@ static int pm860x_rtc_probe(struct platform_device *pdev) + } + } + +- info->rtc_dev = devm_rtc_device_register(&pdev->dev, "88pm860x-rtc", +- &pm860x_rtc_ops, THIS_MODULE); +- ret = PTR_ERR(info->rtc_dev); +- if (IS_ERR(info->rtc_dev)) { +- dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret); ++ info->rtc_dev->ops = &pm860x_rtc_ops; ++ ++ ret = rtc_register_device(info->rtc_dev); ++ if (ret) + return ret; +- } + + /* + * enable internal XO instead of internal 3.25MHz clock since it can +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index 8a254bb46a9b..ac8535d2b41a 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -808,8 +808,10 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, + "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", + (int) cmnd[0], (int) hp->cmd_len)); + +- if (hp->dxfer_len >= SZ_256M) ++ if (hp->dxfer_len >= SZ_256M) { ++ sg_remove_request(sfp, srp); + return -EINVAL; ++ } + + k = sg_start_req(srp, cmnd); + if (k) { +diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c +index d160fc2a7b7a..56c019ec7f14 100644 +--- a/drivers/soc/imx/gpc.c ++++ b/drivers/soc/imx/gpc.c +@@ -93,8 +93,8 @@ static int imx6_pm_domain_power_off(struct generic_pm_domain *genpd) + static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd) + { + struct imx_pm_domain *pd = to_imx_pm_domain(genpd); +- int i, ret, sw, sw2iso; +- u32 val; ++ int i, ret; ++ u32 val, req; + + if (pd->supply) { + ret = regulator_enable(pd->supply); +@@ -113,17 +113,18 @@ static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd) + regmap_update_bits(pd->regmap, pd->reg_offs + GPC_PGC_CTRL_OFFS, + 0x1, 0x1); + +- /* Read ISO and ISO2SW power up delays */ +- regmap_read(pd->regmap, pd->reg_offs + GPC_PGC_PUPSCR_OFFS, &val); +- sw = val & 0x3f; +- sw2iso = (val >> 8) & 0x3f; +- + /* Request GPC to power up domain */ +- val = BIT(pd->cntr_pdn_bit + 1); +- regmap_update_bits(pd->regmap, GPC_CNTR, val, val); ++ req = BIT(pd->cntr_pdn_bit + 1); ++ regmap_update_bits(pd->regmap, GPC_CNTR, req, req); + +- /* Wait ISO + ISO2SW IPG clock cycles */ +- udelay(DIV_ROUND_UP(sw + sw2iso, pd->ipg_rate_mhz)); ++ /* Wait for the PGC to handle the request */ ++ ret = regmap_read_poll_timeout(pd->regmap, GPC_CNTR, val, !(val & req), ++ 1, 50); ++ if (ret) ++ pr_err("powerup request on domain %s timed out\n", genpd->name); ++ ++ /* Wait for reset to propagate through peripherals */ ++ usleep_range(5, 10); + + /* Disable reset clocks for all devices in the domain */ + for (i = 0; i < pd->num_clks; i++) +@@ -345,6 +346,7 @@ static const struct regmap_config imx_gpc_regmap_config = { + .rd_table = &access_table, + .wr_table = &access_table, + .max_register = 0x2ac, ++ .fast_io = true, + }; + + static struct generic_pm_domain *imx_gpc_onecell_domains[] = { +diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c +index eea4049b5dcc..ca5004ae3024 100644 +--- a/drivers/tty/ehv_bytechan.c ++++ b/drivers/tty/ehv_bytechan.c +@@ -136,6 +136,21 @@ static int find_console_handle(void) + return 1; + } + ++static unsigned int local_ev_byte_channel_send(unsigned int handle, ++ unsigned int *count, ++ const char *p) ++{ ++ char buffer[EV_BYTE_CHANNEL_MAX_BYTES]; ++ unsigned int c = *count; ++ ++ if (c < sizeof(buffer)) { ++ memcpy(buffer, p, c); ++ memset(&buffer[c], 0, sizeof(buffer) - c); ++ p = buffer; ++ } ++ return ev_byte_channel_send(handle, count, p); ++} ++ + /*************************** EARLY CONSOLE DRIVER ***************************/ + + #ifdef CONFIG_PPC_EARLY_DEBUG_EHV_BC +@@ -154,7 +169,7 @@ static void byte_channel_spin_send(const char data) + + do { + count = 1; +- ret = ev_byte_channel_send(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE, ++ ret = local_ev_byte_channel_send(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE, + &count, &data); + } while (ret == EV_EAGAIN); + } +@@ -221,7 +236,7 @@ static int ehv_bc_console_byte_channel_send(unsigned int handle, const char *s, + while (count) { + len = min_t(unsigned int, count, EV_BYTE_CHANNEL_MAX_BYTES); + do { +- ret = ev_byte_channel_send(handle, &len, s); ++ ret = local_ev_byte_channel_send(handle, &len, s); + } while (ret == EV_EAGAIN); + count -= len; + s += len; +@@ -401,7 +416,7 @@ static void ehv_bc_tx_dequeue(struct ehv_bc_data *bc) + CIRC_CNT_TO_END(bc->head, bc->tail, BUF_SIZE), + EV_BYTE_CHANNEL_MAX_BYTES); + +- ret = ev_byte_channel_send(bc->handle, &len, bc->buf + bc->tail); ++ ret = local_ev_byte_channel_send(bc->handle, &len, bc->buf + bc->tail); + + /* 'len' is valid only if the return code is 0 or EV_EAGAIN */ + if (!ret || (ret == EV_EAGAIN)) +diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c +index c48f083d522a..84845275dbef 100644 +--- a/drivers/video/fbdev/core/fbmem.c ++++ b/drivers/video/fbdev/core/fbmem.c +@@ -1122,7 +1122,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, + case FBIOGET_FSCREENINFO: + if (!lock_fb_info(info)) + return -ENODEV; +- fix = info->fix; ++ memcpy(&fix, &info->fix, sizeof(fix)); + unlock_fb_info(info); + + ret = copy_to_user(argp, &fix, sizeof(fix)) ? -EFAULT : 0; +diff --git a/drivers/video/fbdev/sis/init301.c b/drivers/video/fbdev/sis/init301.c +index 27a2b72e50e8..a8fb41f1a258 100644 +--- a/drivers/video/fbdev/sis/init301.c ++++ b/drivers/video/fbdev/sis/init301.c +@@ -848,9 +848,7 @@ SiS_PanelDelay(struct SiS_Private *SiS_Pr, unsigned short DelayTime) + SiS_DDC2Delay(SiS_Pr, 0x4000); + } + +- } else if((SiS_Pr->SiS_IF_DEF_LVDS == 1) /* || +- (SiS_Pr->SiS_CustomT == CUT_COMPAQ1280) || +- (SiS_Pr->SiS_CustomT == CUT_CLEVO1400) */ ) { /* 315 series, LVDS; Special */ ++ } else if (SiS_Pr->SiS_IF_DEF_LVDS == 1) { /* 315 series, LVDS; Special */ + + if(SiS_Pr->SiS_IF_DEF_CH70xx == 0) { + PanelID = SiS_GetReg(SiS_Pr->SiS_P3d4,0x36); +diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c +index 072986d461b7..d8876fba686d 100644 +--- a/drivers/watchdog/sp805_wdt.c ++++ b/drivers/watchdog/sp805_wdt.c +@@ -137,10 +137,14 @@ wdt_restart(struct watchdog_device *wdd, unsigned long mode, void *cmd) + { + struct sp805_wdt *wdt = watchdog_get_drvdata(wdd); + ++ writel_relaxed(UNLOCK, wdt->base + WDTLOCK); + writel_relaxed(0, wdt->base + WDTCONTROL); + writel_relaxed(0, wdt->base + WDTLOAD); + writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base + WDTCONTROL); + ++ /* Flush posted writes. */ ++ readl_relaxed(wdt->base + WDTLOCK); ++ + return 0; + } + +diff --git a/fs/buffer.c b/fs/buffer.c +index a550e0d8e965..c49fdab5cb36 100644 +--- a/fs/buffer.c ++++ b/fs/buffer.c +@@ -1336,6 +1336,17 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size) + } + EXPORT_SYMBOL(__breadahead); + ++void __breadahead_gfp(struct block_device *bdev, sector_t block, unsigned size, ++ gfp_t gfp) ++{ ++ struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); ++ if (likely(bh)) { ++ ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh); ++ brelse(bh); ++ } ++} ++EXPORT_SYMBOL(__breadahead_gfp); ++ + /** + * __bread_gfp() - reads a specified block and returns the bh + * @bdev: the block_device to read from +diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c +index 0c4df56c825a..70412944b267 100644 +--- a/fs/cifs/transport.c ++++ b/fs/cifs/transport.c +@@ -392,7 +392,7 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, + struct smb_rqst *rqst, int flags) + { + struct kvec iov; +- struct smb2_transform_hdr tr_hdr; ++ struct smb2_transform_hdr *tr_hdr; + struct smb_rqst cur_rqst[MAX_COMPOUND]; + int rc; + +@@ -402,28 +402,34 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, + if (num_rqst > MAX_COMPOUND - 1) + return -ENOMEM; + +- memset(&cur_rqst[0], 0, sizeof(cur_rqst)); +- memset(&iov, 0, sizeof(iov)); +- memset(&tr_hdr, 0, sizeof(tr_hdr)); +- +- iov.iov_base = &tr_hdr; +- iov.iov_len = sizeof(tr_hdr); +- cur_rqst[0].rq_iov = &iov; +- cur_rqst[0].rq_nvec = 1; +- + if (!server->ops->init_transform_rq) { + cifs_dbg(VFS, "Encryption requested but transform callback " + "is missing\n"); + return -EIO; + } + ++ tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS); ++ if (!tr_hdr) ++ return -ENOMEM; ++ ++ memset(&cur_rqst[0], 0, sizeof(cur_rqst)); ++ memset(&iov, 0, sizeof(iov)); ++ memset(tr_hdr, 0, sizeof(*tr_hdr)); ++ ++ iov.iov_base = tr_hdr; ++ iov.iov_len = sizeof(*tr_hdr); ++ cur_rqst[0].rq_iov = &iov; ++ cur_rqst[0].rq_nvec = 1; ++ + rc = server->ops->init_transform_rq(server, num_rqst + 1, + &cur_rqst[0], rqst); + if (rc) +- return rc; ++ goto out; + + rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]); + smb3_free_compound_rqst(num_rqst, &cur_rqst[1]); ++out: ++ kfree(tr_hdr); + return rc; + } + +diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c +index dd8f10db82e9..bd1d68ff3a9f 100644 +--- a/fs/ext2/xattr.c ++++ b/fs/ext2/xattr.c +@@ -56,6 +56,7 @@ + + #include <linux/buffer_head.h> + #include <linux/init.h> ++#include <linux/printk.h> + #include <linux/slab.h> + #include <linux/mbcache.h> + #include <linux/quotaops.h> +@@ -84,8 +85,8 @@ + printk("\n"); \ + } while (0) + #else +-# define ea_idebug(f...) +-# define ea_bdebug(f...) ++# define ea_idebug(inode, f...) no_printk(f) ++# define ea_bdebug(bh, f...) no_printk(f) + #endif + + static int ext2_xattr_set2(struct inode *, struct buffer_head *, +@@ -838,8 +839,7 @@ ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh) + error = mb_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr, 1); + if (error) { + if (error == -EBUSY) { +- ea_bdebug(bh, "already in cache (%d cache entries)", +- atomic_read(&ext2_xattr_cache->c_entry_count)); ++ ea_bdebug(bh, "already in cache"); + error = 0; + } + } else +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index 56218c79a856..000fa0e39278 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -4690,7 +4690,7 @@ make_io: + if (end > table) + end = table; + while (b <= end) +- sb_breadahead(sb, b++); ++ sb_breadahead_unmovable(sb, b++); + } + + /* +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index c76962eba5dd..bf949fcc970a 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -388,7 +388,8 @@ static void save_error_info(struct super_block *sb, const char *func, + unsigned int line) + { + __save_error_info(sb, func, line); +- ext4_commit_super(sb, 1); ++ if (!bdev_read_only(sb->s_bdev)) ++ ext4_commit_super(sb, 1); + } + + /* +@@ -4207,7 +4208,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + /* Pre-read the descriptors into the buffer cache */ + for (i = 0; i < db_count; i++) { + block = descriptor_loc(sb, logical_sb_block, i); +- sb_breadahead(sb, block); ++ sb_breadahead_unmovable(sb, block); + } + + for (i = 0; i < db_count; i++) { +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c +index e5d474681471..f0714c1258c7 100644 +--- a/fs/f2fs/node.c ++++ b/fs/f2fs/node.c +@@ -1559,15 +1559,16 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted, + if (atomic && !test_opt(sbi, NOBARRIER)) + fio.op_flags |= REQ_PREFLUSH | REQ_FUA; + +- set_page_writeback(page); +- ClearPageError(page); +- ++ /* should add to global list before clearing PAGECACHE status */ + if (f2fs_in_warm_node_list(sbi, page)) { + seq = f2fs_add_fsync_node_entry(sbi, page); + if (seq_id) + *seq_id = seq; + } + ++ set_page_writeback(page); ++ ClearPageError(page); ++ + fio.old_blkaddr = ni.blk_addr; + f2fs_do_write_node_page(nid, &fio); + set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page)); +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c +index da348cf4ff56..45f8f6ec22a5 100644 +--- a/fs/f2fs/super.c ++++ b/fs/f2fs/super.c +@@ -1648,6 +1648,7 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type, + int offset = off & (sb->s_blocksize - 1); + size_t towrite = len; + struct page *page; ++ void *fsdata = NULL; + char *kaddr; + int err = 0; + int tocopy; +@@ -1657,7 +1658,7 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type, + towrite); + retry: + err = a_ops->write_begin(NULL, mapping, off, tocopy, 0, +- &page, NULL); ++ &page, &fsdata); + if (unlikely(err)) { + if (err == -ENOMEM) { + congestion_wait(BLK_RW_ASYNC, HZ/50); +@@ -1672,7 +1673,7 @@ retry: + flush_dcache_page(page); + + a_ops->write_end(NULL, mapping, off, tocopy, tocopy, +- page, NULL); ++ page, fsdata); + offset = 0; + towrite -= tocopy; + off += tocopy; +diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c +index 315967354954..bcc51f131a49 100644 +--- a/fs/nfs/callback_proc.c ++++ b/fs/nfs/callback_proc.c +@@ -130,6 +130,8 @@ static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp, + + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { + list_for_each_entry(lo, &server->layouts, plh_layouts) { ++ if (!pnfs_layout_is_valid(lo)) ++ continue; + if (stateid != NULL && + !nfs4_stateid_match_other(stateid, &lo->plh_stateid)) + continue; +diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c +index c61bd3fc723e..e5da9d7fb69e 100644 +--- a/fs/nfs/direct.c ++++ b/fs/nfs/direct.c +@@ -600,6 +600,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter) + l_ctx = nfs_get_lock_context(dreq->ctx); + if (IS_ERR(l_ctx)) { + result = PTR_ERR(l_ctx); ++ nfs_direct_req_release(dreq); + goto out_release; + } + dreq->l_ctx = l_ctx; +@@ -1023,6 +1024,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) + l_ctx = nfs_get_lock_context(dreq->ctx); + if (IS_ERR(l_ctx)) { + result = PTR_ERR(l_ctx); ++ nfs_direct_req_release(dreq); + goto out_release; + } + dreq->l_ctx = l_ctx; +diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c +index 9cf59e2622f8..5dae7c85d9b6 100644 +--- a/fs/nfs/pagelist.c ++++ b/fs/nfs/pagelist.c +@@ -865,15 +865,6 @@ static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio, + pgio->pg_mirror_count = mirror_count; + } + +-/* +- * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1) +- */ +-void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio) +-{ +- pgio->pg_mirror_count = 1; +- pgio->pg_mirror_idx = 0; +-} +- + static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio) + { + pgio->pg_mirror_count = 1; +@@ -1302,6 +1293,14 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index) + } + } + ++/* ++ * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1) ++ */ ++void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio) ++{ ++ nfs_pageio_complete(pgio); ++} ++ + int __init nfs_init_nfspagecache(void) + { + nfs_page_cachep = kmem_cache_create("nfs_page", +diff --git a/include/acpi/processor.h b/include/acpi/processor.h +index 1194a4c78d55..5b9eab15a1e6 100644 +--- a/include/acpi/processor.h ++++ b/include/acpi/processor.h +@@ -293,6 +293,14 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx + } + #endif + ++static inline int call_on_cpu(int cpu, long (*fn)(void *), void *arg, ++ bool direct) ++{ ++ if (direct || (is_percpu_thread() && cpu == smp_processor_id())) ++ return fn(arg); ++ return work_on_cpu(cpu, fn, arg); ++} ++ + /* in processor_perflib.c */ + + #ifdef CONFIG_CPU_FREQ +diff --git a/include/keys/big_key-type.h b/include/keys/big_key-type.h +index e0970a578188..a7207a965466 100644 +--- a/include/keys/big_key-type.h ++++ b/include/keys/big_key-type.h +@@ -21,6 +21,6 @@ extern void big_key_free_preparse(struct key_preparsed_payload *prep); + extern void big_key_revoke(struct key *key); + extern void big_key_destroy(struct key *key); + extern void big_key_describe(const struct key *big_key, struct seq_file *m); +-extern long big_key_read(const struct key *key, char __user *buffer, size_t buflen); ++extern long big_key_read(const struct key *key, char *buffer, size_t buflen); + + #endif /* _KEYS_BIG_KEY_TYPE_H */ +diff --git a/include/keys/user-type.h b/include/keys/user-type.h +index 12babe991594..0d8f3cd3056f 100644 +--- a/include/keys/user-type.h ++++ b/include/keys/user-type.h +@@ -45,8 +45,7 @@ extern int user_update(struct key *key, struct key_preparsed_payload *prep); + extern void user_revoke(struct key *key); + extern void user_destroy(struct key *key); + extern void user_describe(const struct key *user, struct seq_file *m); +-extern long user_read(const struct key *key, +- char __user *buffer, size_t buflen); ++extern long user_read(const struct key *key, char *buffer, size_t buflen); + + static inline const struct user_key_payload *user_key_payload_rcu(const struct key *key) + { +diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h +index 96225a77c112..9168fc33a4f7 100644 +--- a/include/linux/buffer_head.h ++++ b/include/linux/buffer_head.h +@@ -189,6 +189,8 @@ struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block, + void __brelse(struct buffer_head *); + void __bforget(struct buffer_head *); + void __breadahead(struct block_device *, sector_t block, unsigned int size); ++void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size, ++ gfp_t gfp); + struct buffer_head *__bread_gfp(struct block_device *, + sector_t block, unsigned size, gfp_t gfp); + void invalidate_bh_lrus(void); +@@ -319,6 +321,12 @@ sb_breadahead(struct super_block *sb, sector_t block) + __breadahead(sb->s_bdev, block, sb->s_blocksize); + } + ++static inline void ++sb_breadahead_unmovable(struct super_block *sb, sector_t block) ++{ ++ __breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0); ++} ++ + static inline struct buffer_head * + sb_getblk(struct super_block *sb, sector_t block) + { +diff --git a/include/linux/compiler.h b/include/linux/compiler.h +index bb22908c79e8..75112aa8064e 100644 +--- a/include/linux/compiler.h ++++ b/include/linux/compiler.h +@@ -345,7 +345,7 @@ static inline void *offset_to_ptr(const int *off) + * compiler has support to do so. + */ + #define compiletime_assert(condition, msg) \ +- _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) ++ _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) + + #define compiletime_assert_atomic_type(t) \ + compiletime_assert(__native_word(t), \ +diff --git a/include/linux/key-type.h b/include/linux/key-type.h +index d3c5ae8ad498..3341ddac2348 100644 +--- a/include/linux/key-type.h ++++ b/include/linux/key-type.h +@@ -125,7 +125,7 @@ struct key_type { + * much is copied into the buffer + * - shouldn't do the copy if the buffer is NULL + */ +- long (*read)(const struct key *key, char __user *buffer, size_t buflen); ++ long (*read)(const struct key *key, char *buffer, size_t buflen); + + /* handle request_key() for this type instead of invoking + * /sbin/request-key (optional) +diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h +index 4f052496cdfd..0a4f54dd4737 100644 +--- a/include/linux/percpu_counter.h ++++ b/include/linux/percpu_counter.h +@@ -78,9 +78,9 @@ static inline s64 percpu_counter_read(struct percpu_counter *fbc) + */ + static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) + { +- s64 ret = fbc->count; ++ /* Prevent reloads of fbc->count */ ++ s64 ret = READ_ONCE(fbc->count); + +- barrier(); /* Prevent reloads of fbc->count */ + if (ret >= 0) + return ret; + return 0; +diff --git a/include/linux/swapops.h b/include/linux/swapops.h +index 22af9d8a84ae..28d572b7ea73 100644 +--- a/include/linux/swapops.h ++++ b/include/linux/swapops.h +@@ -368,7 +368,8 @@ static inline void num_poisoned_pages_inc(void) + } + #endif + +-#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) ++#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) || \ ++ defined(CONFIG_DEVICE_PRIVATE) + static inline int non_swap_entry(swp_entry_t entry) + { + return swp_type(entry) >= MAX_SWAPFILES; +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index e85636fb81b9..daf0a9637d73 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -188,8 +188,7 @@ struct bpf_call_arg_meta { + bool pkt_access; + int regno; + int access_size; +- s64 msize_smax_value; +- u64 msize_umax_value; ++ u64 msize_max_value; + }; + + static DEFINE_MUTEX(bpf_verifier_lock); +@@ -2076,8 +2075,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, + /* remember the mem_size which may be used later + * to refine return values. + */ +- meta->msize_smax_value = reg->smax_value; +- meta->msize_umax_value = reg->umax_value; ++ meta->msize_max_value = reg->umax_value; + + /* The register is SCALAR_VALUE; the access check + * happens using its boundaries. +@@ -2448,21 +2446,44 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) + return 0; + } + +-static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type, +- int func_id, +- struct bpf_call_arg_meta *meta) ++static int do_refine_retval_range(struct bpf_verifier_env *env, ++ struct bpf_reg_state *regs, int ret_type, ++ int func_id, struct bpf_call_arg_meta *meta) + { + struct bpf_reg_state *ret_reg = ®s[BPF_REG_0]; ++ struct bpf_reg_state tmp_reg = *ret_reg; ++ bool ret; + + if (ret_type != RET_INTEGER || + (func_id != BPF_FUNC_get_stack && + func_id != BPF_FUNC_probe_read_str)) +- return; ++ return 0; ++ ++ /* Error case where ret is in interval [S32MIN, -1]. */ ++ ret_reg->smin_value = S32_MIN; ++ ret_reg->smax_value = -1; ++ ++ __reg_deduce_bounds(ret_reg); ++ __reg_bound_offset(ret_reg); ++ __update_reg_bounds(ret_reg); ++ ++ ret = push_stack(env, env->insn_idx + 1, env->insn_idx, false); ++ if (!ret) ++ return -EFAULT; ++ ++ *ret_reg = tmp_reg; ++ ++ /* Success case where ret is in range [0, msize_max_value]. */ ++ ret_reg->smin_value = 0; ++ ret_reg->smax_value = meta->msize_max_value; ++ ret_reg->umin_value = ret_reg->smin_value; ++ ret_reg->umax_value = ret_reg->smax_value; + +- ret_reg->smax_value = meta->msize_smax_value; +- ret_reg->umax_value = meta->msize_umax_value; + __reg_deduce_bounds(ret_reg); + __reg_bound_offset(ret_reg); ++ __update_reg_bounds(ret_reg); ++ ++ return 0; + } + + static int +@@ -2617,7 +2638,9 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn + return -EINVAL; + } + +- do_refine_retval_range(regs, fn->ret_type, func_id, &meta); ++ err = do_refine_retval_range(env, regs, fn->ret_type, func_id, &meta); ++ if (err) ++ return err; + + err = check_map_func_compatibility(env, meta.map_ptr, func_id); + if (err) +diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c +index 7d0b0ed74404..95395ef5922a 100644 +--- a/kernel/locking/locktorture.c ++++ b/kernel/locking/locktorture.c +@@ -710,10 +710,10 @@ static void __torture_print_stats(char *page, + if (statp[i].n_lock_fail) + fail = true; + sum += statp[i].n_lock_acquired; +- if (max < statp[i].n_lock_fail) +- max = statp[i].n_lock_fail; +- if (min > statp[i].n_lock_fail) +- min = statp[i].n_lock_fail; ++ if (max < statp[i].n_lock_acquired) ++ max = statp[i].n_lock_acquired; ++ if (min > statp[i].n_lock_acquired) ++ min = statp[i].n_lock_acquired; + } + page += sprintf(page, + "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n", +diff --git a/lib/raid6/neon.uc b/lib/raid6/neon.uc +index d5242f544551..b7c68030da4f 100644 +--- a/lib/raid6/neon.uc ++++ b/lib/raid6/neon.uc +@@ -28,7 +28,6 @@ + + typedef uint8x16_t unative_t; + +-#define NBYTES(x) ((unative_t){x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x}) + #define NSIZE sizeof(unative_t) + + /* +@@ -61,7 +60,7 @@ void raid6_neon$#_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs) + int d, z, z0; + + register unative_t wd$$, wq$$, wp$$, w1$$, w2$$; +- const unative_t x1d = NBYTES(0x1d); ++ const unative_t x1d = vdupq_n_u8(0x1d); + + z0 = disks - 3; /* Highest data disk */ + p = dptr[z0+1]; /* XOR parity */ +@@ -92,7 +91,7 @@ void raid6_neon$#_xor_syndrome_real(int disks, int start, int stop, + int d, z, z0; + + register unative_t wd$$, wq$$, wp$$, w1$$, w2$$; +- const unative_t x1d = NBYTES(0x1d); ++ const unative_t x1d = vdupq_n_u8(0x1d); + + z0 = stop; /* P/Q right side optimization */ + p = dptr[disks-2]; /* XOR parity */ +diff --git a/lib/raid6/recov_neon_inner.c b/lib/raid6/recov_neon_inner.c +index 8cd20c9f834a..7d00c31a6547 100644 +--- a/lib/raid6/recov_neon_inner.c ++++ b/lib/raid6/recov_neon_inner.c +@@ -10,11 +10,6 @@ + + #include <arm_neon.h> + +-static const uint8x16_t x0f = { +- 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, +- 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, +-}; +- + #ifdef CONFIG_ARM + /* + * AArch32 does not provide this intrinsic natively because it does not +@@ -41,6 +36,7 @@ void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp, + uint8x16_t pm1 = vld1q_u8(pbmul + 16); + uint8x16_t qm0 = vld1q_u8(qmul); + uint8x16_t qm1 = vld1q_u8(qmul + 16); ++ uint8x16_t x0f = vdupq_n_u8(0x0f); + + /* + * while ( bytes-- ) { +@@ -87,6 +83,7 @@ void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq, + { + uint8x16_t qm0 = vld1q_u8(qmul); + uint8x16_t qm1 = vld1q_u8(qmul + 16); ++ uint8x16_t x0f = vdupq_n_u8(0x0f); + + /* + * while (bytes--) { +diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c +index 7f4534828f6c..a0494206cfda 100644 +--- a/net/dns_resolver/dns_key.c ++++ b/net/dns_resolver/dns_key.c +@@ -241,7 +241,7 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m) + * - the key's semaphore is read-locked + */ + static long dns_resolver_read(const struct key *key, +- char __user *buffer, size_t buflen) ++ char *buffer, size_t buflen) + { + int err = PTR_ERR(key->payload.data[dns_key_error]); + +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c +index 5881f6668817..1b8a53081632 100644 +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -3450,7 +3450,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, + NFT_SET_INTERVAL | NFT_SET_TIMEOUT | + NFT_SET_MAP | NFT_SET_EVAL | + NFT_SET_OBJECT)) +- return -EINVAL; ++ return -EOPNOTSUPP; + /* Only one of these operations is supported */ + if ((flags & (NFT_SET_MAP | NFT_SET_OBJECT)) == + (NFT_SET_MAP | NFT_SET_OBJECT)) +@@ -3488,7 +3488,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, + objtype = ntohl(nla_get_be32(nla[NFTA_SET_OBJ_TYPE])); + if (objtype == NFT_OBJECT_UNSPEC || + objtype > NFT_OBJECT_MAX) +- return -EINVAL; ++ return -EOPNOTSUPP; + } else if (flags & NFT_SET_OBJECT) + return -EINVAL; + else +diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c +index e7f6b8823eb6..ad9d1b21cb0b 100644 +--- a/net/rxrpc/key.c ++++ b/net/rxrpc/key.c +@@ -35,7 +35,7 @@ static void rxrpc_free_preparse_s(struct key_preparsed_payload *); + static void rxrpc_destroy(struct key *); + static void rxrpc_destroy_s(struct key *); + static void rxrpc_describe(const struct key *, struct seq_file *); +-static long rxrpc_read(const struct key *, char __user *, size_t); ++static long rxrpc_read(const struct key *, char *, size_t); + + /* + * rxrpc defined keys take an arbitrary string as the description and an +@@ -1044,12 +1044,12 @@ EXPORT_SYMBOL(rxrpc_get_null_key); + * - this returns the result in XDR form + */ + static long rxrpc_read(const struct key *key, +- char __user *buffer, size_t buflen) ++ char *buffer, size_t buflen) + { + const struct rxrpc_key_token *token; + const struct krb5_principal *princ; + size_t size; +- __be32 __user *xdr, *oldxdr; ++ __be32 *xdr, *oldxdr; + u32 cnlen, toksize, ntoks, tok, zero; + u16 toksizes[AFSTOKEN_MAX]; + int loop; +@@ -1126,30 +1126,25 @@ static long rxrpc_read(const struct key *key, + if (!buffer || buflen < size) + return size; + +- xdr = (__be32 __user *) buffer; ++ xdr = (__be32 *)buffer; + zero = 0; + #define ENCODE(x) \ + do { \ +- __be32 y = htonl(x); \ +- if (put_user(y, xdr++) < 0) \ +- goto fault; \ ++ *xdr++ = htonl(x); \ + } while(0) + #define ENCODE_DATA(l, s) \ + do { \ + u32 _l = (l); \ + ENCODE(l); \ +- if (copy_to_user(xdr, (s), _l) != 0) \ +- goto fault; \ +- if (_l & 3 && \ +- copy_to_user((u8 __user *)xdr + _l, &zero, 4 - (_l & 3)) != 0) \ +- goto fault; \ ++ memcpy(xdr, (s), _l); \ ++ if (_l & 3) \ ++ memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \ + xdr += (_l + 3) >> 2; \ + } while(0) + #define ENCODE64(x) \ + do { \ + __be64 y = cpu_to_be64(x); \ +- if (copy_to_user(xdr, &y, 8) != 0) \ +- goto fault; \ ++ memcpy(xdr, &y, 8); \ + xdr += 8 >> 2; \ + } while(0) + #define ENCODE_STR(s) \ +@@ -1240,8 +1235,4 @@ static long rxrpc_read(const struct key *key, + ASSERTCMP((char __user *) xdr - buffer, ==, size); + _leave(" = %zu", size); + return size; +- +-fault: +- _leave(" = -EFAULT"); +- return -EFAULT; + } +diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c +index 556a649512b6..706fad12f22c 100644 +--- a/net/xdp/xdp_umem.c ++++ b/net/xdp/xdp_umem.c +@@ -260,7 +260,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) + u32 chunk_size = mr->chunk_size, headroom = mr->headroom; + unsigned int chunks, chunks_per_page; + u64 addr = mr->addr, size = mr->len; +- int size_chk, err, i; ++ int err, i; + + if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) { + /* Strictly speaking we could support this, if: +@@ -295,8 +295,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) + + headroom = ALIGN(headroom, 64); + +- size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM; +- if (size_chk < 0) ++ if (headroom >= chunk_size - XDP_PACKET_HEADROOM) + return -EINVAL; + + umem->address = (unsigned long)addr; +diff --git a/security/keys/big_key.c b/security/keys/big_key.c +index 2806e70d7f8f..630594a5b46e 100644 +--- a/security/keys/big_key.c ++++ b/security/keys/big_key.c +@@ -356,7 +356,7 @@ void big_key_describe(const struct key *key, struct seq_file *m) + * read the key data + * - the key's semaphore is read-locked + */ +-long big_key_read(const struct key *key, char __user *buffer, size_t buflen) ++long big_key_read(const struct key *key, char *buffer, size_t buflen) + { + size_t datalen = (size_t)key->payload.data[big_key_len]; + long ret; +@@ -395,9 +395,8 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen) + + ret = datalen; + +- /* copy decrypted data to user */ +- if (copy_to_user(buffer, buf->virt, datalen) != 0) +- ret = -EFAULT; ++ /* copy out decrypted data */ ++ memcpy(buffer, buf->virt, datalen); + + err_fput: + fput(file); +@@ -405,9 +404,7 @@ error: + big_key_free_buffer(buf); + } else { + ret = datalen; +- if (copy_to_user(buffer, key->payload.data[big_key_data], +- datalen) != 0) +- ret = -EFAULT; ++ memcpy(buffer, key->payload.data[big_key_data], datalen); + } + + return ret; +diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c +index d92cbf9687c3..571f6d486838 100644 +--- a/security/keys/encrypted-keys/encrypted.c ++++ b/security/keys/encrypted-keys/encrypted.c +@@ -895,14 +895,14 @@ out: + } + + /* +- * encrypted_read - format and copy the encrypted data to userspace ++ * encrypted_read - format and copy out the encrypted data + * + * The resulting datablob format is: + * <master-key name> <decrypted data length> <encrypted iv> <encrypted data> + * + * On success, return to userspace the encrypted key datablob size. + */ +-static long encrypted_read(const struct key *key, char __user *buffer, ++static long encrypted_read(const struct key *key, char *buffer, + size_t buflen) + { + struct encrypted_key_payload *epayload; +@@ -950,8 +950,7 @@ static long encrypted_read(const struct key *key, char __user *buffer, + key_put(mkey); + memzero_explicit(derived_key, sizeof(derived_key)); + +- if (copy_to_user(buffer, ascii_buf, asciiblob_len) != 0) +- ret = -EFAULT; ++ memcpy(buffer, ascii_buf, asciiblob_len); + kzfree(ascii_buf); + + return asciiblob_len; +diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c +index e00e20204de0..4b6a084e323b 100644 +--- a/security/keys/keyctl.c ++++ b/security/keys/keyctl.c +@@ -742,6 +742,21 @@ error: + return ret; + } + ++/* ++ * Call the read method ++ */ ++static long __keyctl_read_key(struct key *key, char *buffer, size_t buflen) ++{ ++ long ret; ++ ++ down_read(&key->sem); ++ ret = key_validate(key); ++ if (ret == 0) ++ ret = key->type->read(key, buffer, buflen); ++ up_read(&key->sem); ++ return ret; ++} ++ + /* + * Read a key's payload. + * +@@ -757,26 +772,27 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) + struct key *key; + key_ref_t key_ref; + long ret; ++ char *key_data; + + /* find the key first */ + key_ref = lookup_user_key(keyid, 0, 0); + if (IS_ERR(key_ref)) { + ret = -ENOKEY; +- goto error; ++ goto out; + } + + key = key_ref_to_ptr(key_ref); + + ret = key_read_state(key); + if (ret < 0) +- goto error2; /* Negatively instantiated */ ++ goto key_put_out; /* Negatively instantiated */ + + /* see if we can read it directly */ + ret = key_permission(key_ref, KEY_NEED_READ); + if (ret == 0) + goto can_read_key; + if (ret != -EACCES) +- goto error2; ++ goto key_put_out; + + /* we can't; see if it's searchable from this process's keyrings + * - we automatically take account of the fact that it may be +@@ -784,26 +800,51 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) + */ + if (!is_key_possessed(key_ref)) { + ret = -EACCES; +- goto error2; ++ goto key_put_out; + } + + /* the key is probably readable - now try to read it */ + can_read_key: +- ret = -EOPNOTSUPP; +- if (key->type->read) { +- /* Read the data with the semaphore held (since we might sleep) +- * to protect against the key being updated or revoked. +- */ +- down_read(&key->sem); +- ret = key_validate(key); +- if (ret == 0) +- ret = key->type->read(key, buffer, buflen); +- up_read(&key->sem); ++ if (!key->type->read) { ++ ret = -EOPNOTSUPP; ++ goto key_put_out; + } + +-error2: ++ if (!buffer || !buflen) { ++ /* Get the key length from the read method */ ++ ret = __keyctl_read_key(key, NULL, 0); ++ goto key_put_out; ++ } ++ ++ /* ++ * Read the data with the semaphore held (since we might sleep) ++ * to protect against the key being updated or revoked. ++ * ++ * Allocating a temporary buffer to hold the keys before ++ * transferring them to user buffer to avoid potential ++ * deadlock involving page fault and mmap_sem. ++ */ ++ key_data = kmalloc(buflen, GFP_KERNEL); ++ ++ if (!key_data) { ++ ret = -ENOMEM; ++ goto key_put_out; ++ } ++ ret = __keyctl_read_key(key, key_data, buflen); ++ ++ /* ++ * Read methods will just return the required length without ++ * any copying if the provided length isn't large enough. ++ */ ++ if (ret > 0 && ret <= buflen) { ++ if (copy_to_user(buffer, key_data, ret)) ++ ret = -EFAULT; ++ } ++ kzfree(key_data); ++ ++key_put_out: + key_put(key); +-error: ++out: + return ret; + } + +diff --git a/security/keys/keyring.c b/security/keys/keyring.c +index 99a55145ddcd..e8f2366021ea 100644 +--- a/security/keys/keyring.c ++++ b/security/keys/keyring.c +@@ -432,7 +432,6 @@ static int keyring_read_iterator(const void *object, void *data) + { + struct keyring_read_iterator_context *ctx = data; + const struct key *key = keyring_ptr_to_key(object); +- int ret; + + kenter("{%s,%d},,{%zu/%zu}", + key->type->name, key->serial, ctx->count, ctx->buflen); +@@ -440,10 +439,7 @@ static int keyring_read_iterator(const void *object, void *data) + if (ctx->count >= ctx->buflen) + return 1; + +- ret = put_user(key->serial, ctx->buffer); +- if (ret < 0) +- return ret; +- ctx->buffer++; ++ *ctx->buffer++ = key->serial; + ctx->count += sizeof(key->serial); + return 0; + } +diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c +index 1d34b2a5f485..13ac3b1e57da 100644 +--- a/security/keys/request_key_auth.c ++++ b/security/keys/request_key_auth.c +@@ -27,7 +27,7 @@ static int request_key_auth_instantiate(struct key *, + static void request_key_auth_describe(const struct key *, struct seq_file *); + static void request_key_auth_revoke(struct key *); + static void request_key_auth_destroy(struct key *); +-static long request_key_auth_read(const struct key *, char __user *, size_t); ++static long request_key_auth_read(const struct key *, char *, size_t); + + /* + * The request-key authorisation key type definition. +@@ -85,7 +85,7 @@ static void request_key_auth_describe(const struct key *key, + * - the key's semaphore is read-locked + */ + static long request_key_auth_read(const struct key *key, +- char __user *buffer, size_t buflen) ++ char *buffer, size_t buflen) + { + struct request_key_auth *rka = get_request_key_auth(key); + size_t datalen; +@@ -102,8 +102,7 @@ static long request_key_auth_read(const struct key *key, + if (buflen > datalen) + buflen = datalen; + +- if (copy_to_user(buffer, rka->callout_info, buflen) != 0) +- ret = -EFAULT; ++ memcpy(buffer, rka->callout_info, buflen); + } + + return ret; +diff --git a/security/keys/trusted.c b/security/keys/trusted.c +index b69d3b1777c2..09545c42977e 100644 +--- a/security/keys/trusted.c ++++ b/security/keys/trusted.c +@@ -1135,11 +1135,10 @@ out: + * trusted_read - copy the sealed blob data to userspace in hex. + * On success, return to userspace the trusted key datablob size. + */ +-static long trusted_read(const struct key *key, char __user *buffer, ++static long trusted_read(const struct key *key, char *buffer, + size_t buflen) + { + const struct trusted_key_payload *p; +- char *ascii_buf; + char *bufp; + int i; + +@@ -1148,18 +1147,9 @@ static long trusted_read(const struct key *key, char __user *buffer, + return -EINVAL; + + if (buffer && buflen >= 2 * p->blob_len) { +- ascii_buf = kmalloc_array(2, p->blob_len, GFP_KERNEL); +- if (!ascii_buf) +- return -ENOMEM; +- +- bufp = ascii_buf; ++ bufp = buffer; + for (i = 0; i < p->blob_len; i++) + bufp = hex_byte_pack(bufp, p->blob[i]); +- if (copy_to_user(buffer, ascii_buf, 2 * p->blob_len) != 0) { +- kzfree(ascii_buf); +- return -EFAULT; +- } +- kzfree(ascii_buf); + } + return 2 * p->blob_len; + } +diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c +index 9f558bedba23..0e723b676aef 100644 +--- a/security/keys/user_defined.c ++++ b/security/keys/user_defined.c +@@ -172,7 +172,7 @@ EXPORT_SYMBOL_GPL(user_describe); + * read the key data + * - the key's semaphore is read-locked + */ +-long user_read(const struct key *key, char __user *buffer, size_t buflen) ++long user_read(const struct key *key, char *buffer, size_t buflen) + { + const struct user_key_payload *upayload; + long ret; +@@ -185,8 +185,7 @@ long user_read(const struct key *key, char __user *buffer, size_t buflen) + if (buflen > upayload->datalen) + buflen = upayload->datalen; + +- if (copy_to_user(buffer, upayload->data, buflen) != 0) +- ret = -EFAULT; ++ memcpy(buffer, upayload->data, buflen); + } + + return ret; +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index a2eeb08fa61d..54a9b391ecce 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -2076,24 +2076,15 @@ static void azx_firmware_cb(const struct firmware *fw, void *context) + { + struct snd_card *card = context; + struct azx *chip = card->private_data; +- struct pci_dev *pci = chip->pci; +- +- if (!fw) { +- dev_err(card->dev, "Cannot load firmware, aborting\n"); +- goto error; +- } + +- chip->fw = fw; ++ if (fw) ++ chip->fw = fw; ++ else ++ dev_err(card->dev, "Cannot load firmware, continue without patching\n"); + if (!chip->disabled) { + /* continue probing */ +- if (azx_probe_continue(chip)) +- goto error; ++ azx_probe_continue(chip); + } +- return; /* OK */ +- +- error: +- snd_card_free(card); +- pci_set_drvdata(pci, NULL); + } + #endif + +diff --git a/tools/objtool/check.c b/tools/objtool/check.c +index ecf5fc77f50b..9479c74af9ba 100644 +--- a/tools/objtool/check.c ++++ b/tools/objtool/check.c +@@ -938,10 +938,7 @@ static struct rela *find_switch_table(struct objtool_file *file, + * it. + */ + for (; +- &insn->list != &file->insn_list && +- insn->sec == func->sec && +- insn->offset >= func->offset; +- ++ &insn->list != &file->insn_list && insn->func && insn->func->pfunc == func; + insn = insn->first_jump_src ?: list_prev_entry(insn, list)) { + + if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC) |