diff options
author | Mike Pagano <mpagano@gentoo.org> | 2022-04-20 08:07:48 -0400 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2022-04-20 08:07:48 -0400 |
commit | 760cba0b4f0006dcfc2311e65b17747a161e6dcd (patch) | |
tree | 12ac77d784ed6eb4101e61dd2dae9d6ac3e459db | |
parent | Remove deprecated select AUTOFS4_FS (diff) | |
download | linux-patches-760cba0b4f0006dcfc2311e65b17747a161e6dcd.tar.gz linux-patches-760cba0b4f0006dcfc2311e65b17747a161e6dcd.tar.bz2 linux-patches-760cba0b4f0006dcfc2311e65b17747a161e6dcd.zip |
Linux patch 5.10.1125.10-120
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1111_linux-5.10.112.patch | 4003 |
2 files changed, 4007 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 9076962f..a85d4035 100644 --- a/0000_README +++ b/0000_README @@ -487,6 +487,10 @@ Patch: 1110_linux-5.10.111.patch From: http://www.kernel.org Desc: Linux 5.10.111 +Patch: 1111_linux-5.10.112.patch +From: http://www.kernel.org +Desc: Linux 5.10.112 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1111_linux-5.10.112.patch b/1111_linux-5.10.112.patch new file mode 100644 index 00000000..fd0346bc --- /dev/null +++ b/1111_linux-5.10.112.patch @@ -0,0 +1,4003 @@ +diff --git a/Makefile b/Makefile +index 8695a13fe7cd6..05013bf5a469b 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 10 +-SUBLEVEL = 111 ++SUBLEVEL = 112 + EXTRAVERSION = + NAME = Dare mighty things + +diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c +index 428012687a802..7f7f6bae21c2d 100644 +--- a/arch/arm/mach-davinci/board-da850-evm.c ++++ b/arch/arm/mach-davinci/board-da850-evm.c +@@ -1101,11 +1101,13 @@ static int __init da850_evm_config_emac(void) + int ret; + u32 val; + struct davinci_soc_info *soc_info = &davinci_soc_info; +- u8 rmii_en = soc_info->emac_pdata->rmii_en; ++ u8 rmii_en; + + if (!machine_is_davinci_da850_evm()) + return 0; + ++ rmii_en = soc_info->emac_pdata->rmii_en; ++ + cfg_chip3_base = DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG); + + val = __raw_readl(cfg_chip3_base); +diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c +index 73039949b5ce2..5f8e4c2df53cc 100644 +--- a/arch/arm64/kernel/alternative.c ++++ b/arch/arm64/kernel/alternative.c +@@ -41,7 +41,7 @@ bool alternative_is_applied(u16 cpufeature) + /* + * Check if the target PC is within an alternative block. + */ +-static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) ++static __always_inline bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) + { + unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt); + return !(pc >= replptr && pc <= (replptr + alt->alt_len)); +@@ -49,7 +49,7 @@ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) + + #define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1)) + +-static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr) ++static __always_inline u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr) + { + u32 insn; + +@@ -94,7 +94,7 @@ static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnp + return insn; + } + +-static void patch_alternative(struct alt_instr *alt, ++static noinstr void patch_alternative(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, int nr_inst) + { + __le32 *replptr; +diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c +index b512b5503f6e6..d4ff9ae673fa4 100644 +--- a/arch/arm64/kernel/cpuidle.c ++++ b/arch/arm64/kernel/cpuidle.c +@@ -54,6 +54,9 @@ static int psci_acpi_cpu_init_idle(unsigned int cpu) + struct acpi_lpi_state *lpi; + struct acpi_processor *pr = per_cpu(processors, cpu); + ++ if (unlikely(!pr || !pr->flags.has_lpi)) ++ return -EINVAL; ++ + /* + * If the PSCI cpu_suspend function hook has not been initialized + * idle states must not be enabled, so bail out +@@ -61,9 +64,6 @@ static int psci_acpi_cpu_init_idle(unsigned int cpu) + if (!psci_ops.cpu_suspend) + return -EOPNOTSUPP; + +- if (unlikely(!pr || !pr->flags.has_lpi)) +- return -EINVAL; +- + count = pr->power.count - 1; + if (count <= 0) + return -ENODEV; +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h +index 0eb41dce55da3..b0e4001efb50f 100644 +--- a/arch/x86/include/asm/kvm_host.h ++++ b/arch/x86/include/asm/kvm_host.h +@@ -1340,8 +1340,9 @@ static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) + return -ENOTSUPP; + } + +-int kvm_mmu_module_init(void); +-void kvm_mmu_module_exit(void); ++void kvm_mmu_x86_module_init(void); ++int kvm_mmu_vendor_module_init(void); ++void kvm_mmu_vendor_module_exit(void); + + void kvm_mmu_destroy(struct kvm_vcpu *vcpu); + int kvm_mmu_create(struct kvm_vcpu *vcpu); +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c +index 20d29ae8ed702..99ea1ec12ffe0 100644 +--- a/arch/x86/kvm/mmu/mmu.c ++++ b/arch/x86/kvm/mmu/mmu.c +@@ -5876,12 +5876,24 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp) + return 0; + } + +-int kvm_mmu_module_init(void) ++/* ++ * nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as ++ * its default value of -1 is technically undefined behavior for a boolean. ++ */ ++void kvm_mmu_x86_module_init(void) + { +- int ret = -ENOMEM; +- + if (nx_huge_pages == -1) + __set_nx_huge_pages(get_nx_auto_mode()); ++} ++ ++/* ++ * The bulk of the MMU initialization is deferred until the vendor module is ++ * loaded as many of the masks/values may be modified by VMX or SVM, i.e. need ++ * to be reset when a potentially different vendor module is loaded. ++ */ ++int kvm_mmu_vendor_module_init(void) ++{ ++ int ret = -ENOMEM; + + /* + * MMU roles use union aliasing which is, generally speaking, an +@@ -5955,7 +5967,7 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu) + mmu_free_memory_caches(vcpu); + } + +-void kvm_mmu_module_exit(void) ++void kvm_mmu_vendor_module_exit(void) + { + mmu_destroy_caches(); + percpu_counter_destroy(&kvm_total_used_mmu_pages); +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 70d23bec09f5c..4588f73bf59a4 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -8005,7 +8005,7 @@ int kvm_arch_init(void *opaque) + goto out_free_x86_emulator_cache; + } + +- r = kvm_mmu_module_init(); ++ r = kvm_mmu_vendor_module_init(); + if (r) + goto out_free_percpu; + +@@ -8065,7 +8065,7 @@ void kvm_arch_exit(void) + cancel_work_sync(&pvclock_gtod_work); + #endif + kvm_x86_ops.hardware_enable = NULL; +- kvm_mmu_module_exit(); ++ kvm_mmu_vendor_module_exit(); + free_percpu(user_return_msrs); + kmem_cache_destroy(x86_emulator_cache); + kmem_cache_destroy(x86_fpu_cache); +@@ -11426,3 +11426,19 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access); + EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi); + EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log); + EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_update_request); ++ ++static int __init kvm_x86_init(void) ++{ ++ kvm_mmu_x86_module_init(); ++ return 0; ++} ++module_init(kvm_x86_init); ++ ++static void __exit kvm_x86_exit(void) ++{ ++ /* ++ * If module_init() is implemented, module_exit() must also be ++ * implemented to allow module unload. ++ */ ++} ++module_exit(kvm_x86_exit); +diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c +index 8377c3ed10ffa..9921b481c7ee1 100644 +--- a/drivers/acpi/processor_idle.c ++++ b/drivers/acpi/processor_idle.c +@@ -1080,6 +1080,11 @@ static int flatten_lpi_states(struct acpi_processor *pr, + return 0; + } + ++int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) ++{ ++ return -EOPNOTSUPP; ++} ++ + static int acpi_processor_get_lpi_info(struct acpi_processor *pr) + { + int ret, i; +@@ -1088,6 +1093,11 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr) + struct acpi_device *d = NULL; + struct acpi_lpi_states_array info[2], *tmp, *prev, *curr; + ++ /* make sure our architecture has support */ ++ ret = acpi_processor_ffh_lpi_probe(pr->id); ++ if (ret == -EOPNOTSUPP) ++ return ret; ++ + if (!osc_pc_lpi_support_confirmed) + return -EOPNOTSUPP; + +@@ -1139,11 +1149,6 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr) + return 0; + } + +-int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) +-{ +- return -ENODEV; +-} +- + int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) + { + return -ENODEV; +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c +index d2b544bdc7b5e..f963a0a7da46a 100644 +--- a/drivers/ata/libata-core.c ++++ b/drivers/ata/libata-core.c +@@ -3974,6 +3974,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { + ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, ++ { "Samsung SSD 840 EVO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ++ ATA_HORKAGE_NO_DMA_LOG | ++ ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | +diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c +index 4645677d86f1b..a45678cd9b740 100644 +--- a/drivers/firmware/arm_scmi/clock.c ++++ b/drivers/firmware/arm_scmi/clock.c +@@ -202,7 +202,8 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id, + + if (rate_discrete && rate) { + clk->list.num_rates = tot_rate_cnt; +- sort(rate, tot_rate_cnt, sizeof(*rate), rate_cmp_func, NULL); ++ sort(clk->list.rates, tot_rate_cnt, sizeof(*rate), ++ rate_cmp_func, NULL); + } + + clk->rate_discrete = rate_discrete; +diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c +index 55e4f402ec8b6..44ee319da1b35 100644 +--- a/drivers/gpio/gpiolib-acpi.c ++++ b/drivers/gpio/gpiolib-acpi.c +@@ -276,8 +276,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares, + pin = agpio->pin_table[0]; + + if (pin <= 255) { +- char ev_name[5]; +- sprintf(ev_name, "_%c%02hhX", ++ char ev_name[8]; ++ sprintf(ev_name, "_%c%02X", + agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L', + pin); + if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) +diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h +index 5b393622f5920..a0f0a17e224fe 100644 +--- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h ++++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h +@@ -119,6 +119,7 @@ + #define CONNECTOR_OBJECT_ID_eDP 0x14 + #define CONNECTOR_OBJECT_ID_MXM 0x15 + #define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16 ++#define CONNECTOR_OBJECT_ID_USBC 0x17 + + /* deleted */ + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +index 26f8a21383774..1b4c7ced8b92c 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +@@ -1024,11 +1024,15 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, + struct dma_fence **ef) + { + struct amdgpu_device *adev = get_amdgpu_device(kgd); +- struct drm_file *drm_priv = filp->private_data; +- struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv; +- struct amdgpu_vm *avm = &drv_priv->vm; ++ struct amdgpu_fpriv *drv_priv; ++ struct amdgpu_vm *avm; + int ret; + ++ ret = amdgpu_file_to_fpriv(filp, &drv_priv); ++ if (ret) ++ return ret; ++ avm = &drv_priv->vm; ++ + /* Already a compute VM? */ + if (avm->process_info) + return -EINVAL; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +index ed13a2f76884c..30659c1776e81 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +@@ -632,7 +632,7 @@ MODULE_PARM_DESC(sched_policy, + * Maximum number of processes that HWS can schedule concurrently. The maximum is the + * number of VMIDs assigned to the HWS, which is also the default. + */ +-int hws_max_conc_proc = 8; ++int hws_max_conc_proc = -1; + module_param(hws_max_conc_proc, int, 0444); + MODULE_PARM_DESC(hws_max_conc_proc, + "Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))"); +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +index b19f7bd37781f..405bb3efa2a96 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +@@ -1248,6 +1248,8 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = { + { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 }, + /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */ + { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 }, ++ /* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */ ++ { 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 }, + { 0, 0, 0, 0, 0 }, + }; + +diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +index 2099f6ebd8338..bdb8e596bda6a 100644 +--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +@@ -1429,8 +1429,11 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev) + + static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) + { ++ struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE}; + uint32_t tmp; + ++ vcn_v3_0_pause_dpg_mode(adev, 0, &state); ++ + /* Wait for power status to be 1 */ + SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c +index 84313135c2eae..148e43dee657a 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c +@@ -664,15 +664,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, + - kfd->vm_info.first_vmid_kfd + 1; + + /* Verify module parameters regarding mapped process number*/ +- if ((hws_max_conc_proc < 0) +- || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) { +- dev_err(kfd_device, +- "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n", +- hws_max_conc_proc, kfd->vm_info.vmid_num_kfd, +- kfd->vm_info.vmid_num_kfd); ++ if (hws_max_conc_proc >= 0) ++ kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd); ++ else + kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd; +- } else +- kfd->max_proc_per_quantum = hws_max_conc_proc; + + /* calculate max size of mqds needed for queues */ + size = max_num_of_queues_per_device * +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c +index ba2c2ce0c55af..159be13ef20bb 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c +@@ -531,6 +531,8 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events) + event_waiters = kmalloc_array(num_events, + sizeof(struct kfd_event_waiter), + GFP_KERNEL); ++ if (!event_waiters) ++ return NULL; + + for (i = 0; (event_waiters) && (i < num_events) ; i++) { + init_wait(&event_waiters[i].wait); +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index e828f9414ba2c..7bb151283f44b 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -2022,7 +2022,8 @@ static int dm_resume(void *handle) + * this is the case when traversing through already created + * MST connectors, should be skipped + */ +- if (aconnector->mst_port) ++ if (aconnector->dc_link && ++ aconnector->dc_link->type == dc_connection_mst_branch) + continue; + + mutex_lock(&aconnector->hpd_lock); +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +index 5c5ccbad96588..1e47afc4ccc1d 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +@@ -1701,8 +1701,8 @@ bool dc_is_stream_unchanged( + if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param) + return false; + +- // Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks +- if (old_stream->audio_info.mode_count != stream->audio_info.mode_count) ++ /*compare audio info*/ ++ if (memcmp(&old_stream->audio_info, &stream->audio_info, sizeof(stream->audio_info)) != 0) + return false; + + return true; +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +index 532f6a1145b55..31a13daf4289c 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +@@ -2387,14 +2387,18 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) + &blnd_cfg.black_color); + } + +- if (per_pixel_alpha) +- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; +- else +- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; +- + blnd_cfg.overlap_only = false; + blnd_cfg.global_gain = 0xff; + ++ if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) { ++ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN; ++ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value; ++ } else if (per_pixel_alpha) { ++ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; ++ } else { ++ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; ++ } ++ + if (pipe_ctx->plane_state->global_alpha) + blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value; + else +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +index 79a2b9c785f05..3d778760a3b55 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +@@ -2270,14 +2270,18 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) + pipe_ctx, &blnd_cfg.black_color); + } + +- if (per_pixel_alpha) +- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; +- else +- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; +- + blnd_cfg.overlap_only = false; + blnd_cfg.global_gain = 0xff; + ++ if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) { ++ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN; ++ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value; ++ } else if (per_pixel_alpha) { ++ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; ++ } else { ++ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; ++ } ++ + if (pipe_ctx->plane_state->global_alpha) + blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value; + else +diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +index 0fdf7a3e96dea..96e18050a6175 100644 +--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c ++++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +@@ -100,7 +100,8 @@ enum vsc_packet_revision { + //PB7 = MD0 + #define MASK_VTEM_MD0__VRR_EN 0x01 + #define MASK_VTEM_MD0__M_CONST 0x02 +-#define MASK_VTEM_MD0__RESERVED2 0x0C ++#define MASK_VTEM_MD0__QMS_EN 0x04 ++#define MASK_VTEM_MD0__RESERVED2 0x08 + #define MASK_VTEM_MD0__FVA_FACTOR_M1 0xF0 + + //MD1 +@@ -109,7 +110,7 @@ enum vsc_packet_revision { + //MD2 + #define MASK_VTEM_MD2__BASE_REFRESH_RATE_98 0x03 + #define MASK_VTEM_MD2__RB 0x04 +-#define MASK_VTEM_MD2__RESERVED3 0xF8 ++#define MASK_VTEM_MD2__NEXT_TFR 0xF8 + + //MD3 + #define MASK_VTEM_MD3__BASE_REFRESH_RATE_07 0xFF +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +index 9e09805575db4..39563daff4a0b 100644 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +@@ -1222,7 +1222,7 @@ a6xx_create_private_address_space(struct msm_gpu *gpu) + return ERR_CAST(mmu); + + return msm_gem_address_space_create(mmu, +- "gpu", 0x100000000ULL, 0x1ffffffffULL); ++ "gpu", 0x100000000ULL, SZ_4G); + } + + static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c +index 1d28dfba2c9bb..fb421ca56b3da 100644 +--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c ++++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c +@@ -644,7 +644,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id) + return connector; + + fail: +- connector->funcs->destroy(msm_dsi->connector); ++ connector->funcs->destroy(connector); + return ERR_PTR(ret); + } + +diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c +index 819567e40565c..9c05bf6c45510 100644 +--- a/drivers/gpu/drm/msm/msm_gem.c ++++ b/drivers/gpu/drm/msm/msm_gem.c +@@ -849,6 +849,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) + get_pid_task(aspace->pid, PIDTYPE_PID); + if (task) { + comm = kstrdup(task->comm, GFP_KERNEL); ++ put_task_struct(task); + } else { + comm = NULL; + } +diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c +index b4a31d506fccf..74eca68891add 100644 +--- a/drivers/gpu/ipu-v3/ipu-di.c ++++ b/drivers/gpu/ipu-v3/ipu-di.c +@@ -451,8 +451,9 @@ static void ipu_di_config_clock(struct ipu_di *di, + + error = rate / (sig->mode.pixelclock / 1000); + +- dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %d.%u%%\n", +- rate, div, (signed)(error - 1000) / 10, error % 10); ++ dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %c%d.%d%%\n", ++ rate, div, error < 1000 ? '-' : '+', ++ abs(error - 1000) / 10, abs(error - 1000) % 10); + + /* Allow a 1% error */ + if (error < 1010 && error >= 990) { +diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c +index 356e22159e834..769851b6e74c5 100644 +--- a/drivers/hv/ring_buffer.c ++++ b/drivers/hv/ring_buffer.c +@@ -378,7 +378,16 @@ int hv_ringbuffer_read(struct vmbus_channel *channel, + static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi) + { + u32 priv_read_loc = rbi->priv_read_index; +- u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index); ++ u32 write_loc; ++ ++ /* ++ * The Hyper-V host writes the packet data, then uses ++ * store_release() to update the write_index. Use load_acquire() ++ * here to prevent loads of the packet data from being re-ordered ++ * before the read of the write_index and potentially getting ++ * stale data. ++ */ ++ write_loc = virt_load_acquire(&rbi->ring_buffer->write_index); + + if (write_loc >= priv_read_loc) + return write_loc - priv_read_loc; +diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c +index 20f2772c0e79b..2c909522f0f38 100644 +--- a/drivers/i2c/busses/i2c-pasemi.c ++++ b/drivers/i2c/busses/i2c-pasemi.c +@@ -137,6 +137,12 @@ static int pasemi_i2c_xfer_msg(struct i2c_adapter *adapter, + + TXFIFO_WR(smbus, msg->buf[msg->len-1] | + (stop ? MTXFIFO_STOP : 0)); ++ ++ if (stop) { ++ err = pasemi_smb_waitready(smbus); ++ if (err) ++ goto reset_out; ++ } + } + + return 0; +diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c +index 3690e28cc7ea2..a16e066989fa5 100644 +--- a/drivers/infiniband/ulp/iser/iscsi_iser.c ++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c +@@ -499,6 +499,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, + iser_conn->iscsi_conn = conn; + + out: ++ iscsi_put_endpoint(ep); + mutex_unlock(&iser_conn->state_mutex); + return error; + } +@@ -988,6 +989,7 @@ static struct iscsi_transport iscsi_iser_transport = { + /* connection management */ + .create_conn = iscsi_iser_conn_create, + .bind_conn = iscsi_iser_conn_bind, ++ .unbind_conn = iscsi_conn_unbind, + .destroy_conn = iscsi_conn_teardown, + .attr_is_visible = iser_attr_is_visible, + .set_param = iscsi_iser_set_param, +diff --git a/drivers/md/dm-historical-service-time.c b/drivers/md/dm-historical-service-time.c +index 186f91e2752c1..06fe43c13ba38 100644 +--- a/drivers/md/dm-historical-service-time.c ++++ b/drivers/md/dm-historical-service-time.c +@@ -429,7 +429,7 @@ static struct dm_path *hst_select_path(struct path_selector *ps, + { + struct selector *s = ps->context; + struct path_info *pi = NULL, *best = NULL; +- u64 time_now = sched_clock(); ++ u64 time_now = ktime_get_ns(); + struct dm_path *ret = NULL; + unsigned long flags; + +@@ -470,7 +470,7 @@ static int hst_start_io(struct path_selector *ps, struct dm_path *path, + + static u64 path_service_time(struct path_info *pi, u64 start_time) + { +- u64 sched_now = ktime_get_ns(); ++ u64 now = ktime_get_ns(); + + /* if a previous disk request has finished after this IO was + * sent to the hardware, pretend the submission happened +@@ -479,11 +479,11 @@ static u64 path_service_time(struct path_info *pi, u64 start_time) + if (time_after64(pi->last_finish, start_time)) + start_time = pi->last_finish; + +- pi->last_finish = sched_now; +- if (time_before64(sched_now, start_time)) ++ pi->last_finish = now; ++ if (time_before64(now, start_time)) + return 0; + +- return sched_now - start_time; ++ return now - start_time; + } + + static int hst_end_io(struct path_selector *ps, struct dm_path *path, +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c +index f7471a2642dd4..6f085e96c3f33 100644 +--- a/drivers/md/dm-integrity.c ++++ b/drivers/md/dm-integrity.c +@@ -4232,6 +4232,7 @@ try_smaller_buffer: + } + + if (ic->internal_hash) { ++ size_t recalc_tags_size; + ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1); + if (!ic->recalc_wq ) { + ti->error = "Cannot allocate workqueue"; +@@ -4245,8 +4246,10 @@ try_smaller_buffer: + r = -ENOMEM; + goto bad; + } +- ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block, +- ic->tag_size, GFP_KERNEL); ++ recalc_tags_size = (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size; ++ if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size) ++ recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size; ++ ic->recalc_tags = kvmalloc(recalc_tags_size, GFP_KERNEL); + if (!ic->recalc_tags) { + ti->error = "Cannot allocate tags for recalculating"; + r = -ENOMEM; +diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c +index 6759091b15e09..d99ea8973b678 100644 +--- a/drivers/media/platform/rockchip/rga/rga.c ++++ b/drivers/media/platform/rockchip/rga/rga.c +@@ -895,7 +895,7 @@ static int rga_probe(struct platform_device *pdev) + } + rga->dst_mmu_pages = + (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3); +- if (rga->dst_mmu_pages) { ++ if (!rga->dst_mmu_pages) { + ret = -ENOMEM; + goto free_src_pages; + } +diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c +index c267283b01fda..e749dcb3ddea9 100644 +--- a/drivers/memory/atmel-ebi.c ++++ b/drivers/memory/atmel-ebi.c +@@ -544,20 +544,27 @@ static int atmel_ebi_probe(struct platform_device *pdev) + smc_np = of_parse_phandle(dev->of_node, "atmel,smc", 0); + + ebi->smc.regmap = syscon_node_to_regmap(smc_np); +- if (IS_ERR(ebi->smc.regmap)) +- return PTR_ERR(ebi->smc.regmap); ++ if (IS_ERR(ebi->smc.regmap)) { ++ ret = PTR_ERR(ebi->smc.regmap); ++ goto put_node; ++ } + + ebi->smc.layout = atmel_hsmc_get_reg_layout(smc_np); +- if (IS_ERR(ebi->smc.layout)) +- return PTR_ERR(ebi->smc.layout); ++ if (IS_ERR(ebi->smc.layout)) { ++ ret = PTR_ERR(ebi->smc.layout); ++ goto put_node; ++ } + + ebi->smc.clk = of_clk_get(smc_np, 0); + if (IS_ERR(ebi->smc.clk)) { +- if (PTR_ERR(ebi->smc.clk) != -ENOENT) +- return PTR_ERR(ebi->smc.clk); ++ if (PTR_ERR(ebi->smc.clk) != -ENOENT) { ++ ret = PTR_ERR(ebi->smc.clk); ++ goto put_node; ++ } + + ebi->smc.clk = NULL; + } ++ of_node_put(smc_np); + ret = clk_prepare_enable(ebi->smc.clk); + if (ret) + return ret; +@@ -608,6 +615,10 @@ static int atmel_ebi_probe(struct platform_device *pdev) + } + + return of_platform_populate(np, NULL, NULL, dev); ++ ++put_node: ++ of_node_put(smc_np); ++ return ret; + } + + static __maybe_unused int atmel_ebi_resume(struct device *dev) +diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c +index 9019121a80f53..781af51e3f793 100644 +--- a/drivers/memory/renesas-rpc-if.c ++++ b/drivers/memory/renesas-rpc-if.c +@@ -592,6 +592,7 @@ static int rpcif_probe(struct platform_device *pdev) + struct platform_device *vdev; + struct device_node *flash; + const char *name; ++ int ret; + + flash = of_get_next_child(pdev->dev.of_node, NULL); + if (!flash) { +@@ -615,7 +616,14 @@ static int rpcif_probe(struct platform_device *pdev) + return -ENOMEM; + vdev->dev.parent = &pdev->dev; + platform_set_drvdata(pdev, vdev); +- return platform_device_add(vdev); ++ ++ ret = platform_device_add(vdev); ++ if (ret) { ++ platform_device_put(vdev); ++ return ret; ++ } ++ ++ return 0; + } + + static int rpcif_remove(struct platform_device *pdev) +diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c +index cd8d9b0e0edb3..c96dfc11aa6fc 100644 +--- a/drivers/net/dsa/ocelot/felix_vsc9959.c ++++ b/drivers/net/dsa/ocelot/felix_vsc9959.c +@@ -1466,7 +1466,7 @@ static int felix_pci_probe(struct pci_dev *pdev, + + err = dsa_register_switch(ds); + if (err) { +- dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err); ++ dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n"); + goto err_register_ds; + } + +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c +index 7dcd5613ee56f..a2062144d7ca1 100644 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c +@@ -76,7 +76,7 @@ static inline void bcmgenet_writel(u32 value, void __iomem *offset) + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + __raw_writel(value, offset); + else +- writel(value, offset); ++ writel_relaxed(value, offset); + } + + static inline u32 bcmgenet_readl(void __iomem *offset) +@@ -84,7 +84,7 @@ static inline u32 bcmgenet_readl(void __iomem *offset) + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + return __raw_readl(offset); + else +- return readl(offset); ++ return readl_relaxed(offset); + } + + static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, +diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c +index 939b692ffc335..ce843ea914646 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c +@@ -650,6 +650,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client, + return 0; + + errout: ++ mutex_destroy(&mlxsw_i2c->cmd.lock); + i2c_set_clientdata(client, NULL); + + return err; +diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig +index 42bc014136fe3..9ceb7e1fb1696 100644 +--- a/drivers/net/ethernet/micrel/Kconfig ++++ b/drivers/net/ethernet/micrel/Kconfig +@@ -37,6 +37,7 @@ config KS8851 + config KS8851_MLL + tristate "Micrel KS8851 MLL" + depends on HAS_IOMEM ++ depends on PTP_1588_CLOCK_OPTIONAL + select MII + select CRC32 + select EEPROM_93CX6 +diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +index fc99ad8e4a388..1664e9184c9ca 100644 +--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c ++++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +@@ -2895,11 +2895,9 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb, + status = myri10ge_xmit(curr, dev); + if (status != 0) { + dev_kfree_skb_any(curr); +- if (segs != NULL) { +- curr = segs; +- segs = next; ++ skb_list_walk_safe(next, curr, next) { + curr->next = NULL; +- dev_kfree_skb_any(segs); ++ dev_kfree_skb_any(curr); + } + goto drop; + } +diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c +index cd478d2cd871a..00f6d347eaf75 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c ++++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c +@@ -57,10 +57,6 @@ + #define TSE_PCS_USE_SGMII_ENA BIT(0) + #define TSE_PCS_IF_USE_SGMII 0x03 + +-#define SGMII_ADAPTER_CTRL_REG 0x00 +-#define SGMII_ADAPTER_DISABLE 0x0001 +-#define SGMII_ADAPTER_ENABLE 0x0000 +- + #define AUTONEGO_LINK_TIMER 20 + + static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs) +@@ -202,12 +198,8 @@ void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev, + unsigned int speed) + { + void __iomem *tse_pcs_base = pcs->tse_pcs_base; +- void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base; + u32 val; + +- writew(SGMII_ADAPTER_ENABLE, +- sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); +- + pcs->autoneg = phy_dev->autoneg; + + if (phy_dev->autoneg == AUTONEG_ENABLE) { +diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h +index 442812c0a4bdc..694ac25ef426b 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h ++++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h +@@ -10,6 +10,10 @@ + #include <linux/phy.h> + #include <linux/timer.h> + ++#define SGMII_ADAPTER_CTRL_REG 0x00 ++#define SGMII_ADAPTER_ENABLE 0x0000 ++#define SGMII_ADAPTER_DISABLE 0x0001 ++ + struct tse_pcs { + struct device *dev; + void __iomem *tse_pcs_base; +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +index f37b6d57b2fe2..8bb0106cb7ea3 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +@@ -18,9 +18,6 @@ + + #include "altr_tse_pcs.h" + +-#define SGMII_ADAPTER_CTRL_REG 0x00 +-#define SGMII_ADAPTER_DISABLE 0x0001 +- + #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0 + #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1 + #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2 +@@ -62,16 +59,14 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed) + { + struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv; + void __iomem *splitter_base = dwmac->splitter_base; +- void __iomem *tse_pcs_base = dwmac->pcs.tse_pcs_base; + void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base; + struct device *dev = dwmac->dev; + struct net_device *ndev = dev_get_drvdata(dev); + struct phy_device *phy_dev = ndev->phydev; + u32 val; + +- if ((tse_pcs_base) && (sgmii_adapter_base)) +- writew(SGMII_ADAPTER_DISABLE, +- sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); ++ writew(SGMII_ADAPTER_DISABLE, ++ sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); + + if (splitter_base) { + val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG); +@@ -93,7 +88,9 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed) + writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG); + } + +- if (tse_pcs_base && sgmii_adapter_base) ++ writew(SGMII_ADAPTER_ENABLE, ++ sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); ++ if (phy_dev) + tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed); + } + +diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +index bbdcba88c021e..3d91baf2e55aa 100644 +--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c ++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +@@ -2060,15 +2060,14 @@ static int axienet_probe(struct platform_device *pdev) + if (ret) + goto cleanup_clk; + +- lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); +- if (lp->phy_node) { +- ret = axienet_mdio_setup(lp); +- if (ret) +- dev_warn(&pdev->dev, +- "error registering MDIO bus: %d\n", ret); +- } ++ ret = axienet_mdio_setup(lp); ++ if (ret) ++ dev_warn(&pdev->dev, ++ "error registering MDIO bus: %d\n", ret); ++ + if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || + lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { ++ lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); + if (!lp->phy_node) { + dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n"); + ret = -EINVAL; +diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c +index 02d6f3ad9aca8..83dc1c2c3b84b 100644 +--- a/drivers/net/hamradio/6pack.c ++++ b/drivers/net/hamradio/6pack.c +@@ -311,7 +311,6 @@ static void sp_setup(struct net_device *dev) + { + /* Finish setting up the DEVICE info. */ + dev->netdev_ops = &sp_netdev_ops; +- dev->needs_free_netdev = true; + dev->mtu = SIXP_MTU; + dev->hard_header_len = AX25_MAX_HEADER_LEN; + dev->header_ops = &ax25_header_ops; +@@ -679,9 +678,11 @@ static void sixpack_close(struct tty_struct *tty) + del_timer_sync(&sp->tx_t); + del_timer_sync(&sp->resync_t); + +- /* Free all 6pack frame buffers. */ ++ /* Free all 6pack frame buffers after unreg. */ + kfree(sp->rbuff); + kfree(sp->xbuff); ++ ++ free_netdev(sp->dev); + } + + /* Perform I/O control on an active 6pack channel. */ +diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c +index fbd36891ee643..5d171e7f118df 100644 +--- a/drivers/net/mdio/mdio-bcm-unimac.c ++++ b/drivers/net/mdio/mdio-bcm-unimac.c +@@ -5,20 +5,18 @@ + * Copyright (C) 2014-2017 Broadcom + */ + ++#include <linux/clk.h> ++#include <linux/delay.h> ++#include <linux/io.h> + #include <linux/kernel.h> +-#include <linux/phy.h> +-#include <linux/platform_device.h> +-#include <linux/sched.h> + #include <linux/module.h> +-#include <linux/io.h> +-#include <linux/delay.h> +-#include <linux/clk.h> +- + #include <linux/of.h> +-#include <linux/of_platform.h> + #include <linux/of_mdio.h> +- ++#include <linux/of_platform.h> ++#include <linux/phy.h> + #include <linux/platform_data/mdio-bcm-unimac.h> ++#include <linux/platform_device.h> ++#include <linux/sched.h> + + #define MDIO_CMD 0x00 + #define MDIO_START_BUSY (1 << 29) +diff --git a/drivers/net/mdio/mdio-bitbang.c b/drivers/net/mdio/mdio-bitbang.c +index 5136275c8e739..99588192cc78f 100644 +--- a/drivers/net/mdio/mdio-bitbang.c ++++ b/drivers/net/mdio/mdio-bitbang.c +@@ -14,10 +14,10 @@ + * Vitaly Bordug <vbordug@ru.mvista.com> + */ + +-#include <linux/module.h> ++#include <linux/delay.h> + #include <linux/mdio-bitbang.h> ++#include <linux/module.h> + #include <linux/types.h> +-#include <linux/delay.h> + + #define MDIO_READ 2 + #define MDIO_WRITE 1 +diff --git a/drivers/net/mdio/mdio-cavium.c b/drivers/net/mdio/mdio-cavium.c +index 1afd6fc1a3517..95ce274c1be14 100644 +--- a/drivers/net/mdio/mdio-cavium.c ++++ b/drivers/net/mdio/mdio-cavium.c +@@ -4,9 +4,9 @@ + */ + + #include <linux/delay.h> ++#include <linux/io.h> + #include <linux/module.h> + #include <linux/phy.h> +-#include <linux/io.h> + + #include "mdio-cavium.h" + +diff --git a/drivers/net/mdio/mdio-gpio.c b/drivers/net/mdio/mdio-gpio.c +index 1b00235d7dc5b..56c8f914f8930 100644 +--- a/drivers/net/mdio/mdio-gpio.c ++++ b/drivers/net/mdio/mdio-gpio.c +@@ -17,15 +17,15 @@ + * Vitaly Bordug <vbordug@ru.mvista.com> + */ + +-#include <linux/module.h> +-#include <linux/slab.h> ++#include <linux/gpio/consumer.h> + #include <linux/interrupt.h> +-#include <linux/platform_device.h> +-#include <linux/platform_data/mdio-gpio.h> + #include <linux/mdio-bitbang.h> + #include <linux/mdio-gpio.h> +-#include <linux/gpio/consumer.h> ++#include <linux/module.h> + #include <linux/of_mdio.h> ++#include <linux/platform_data/mdio-gpio.h> ++#include <linux/platform_device.h> ++#include <linux/slab.h> + + struct mdio_gpio_info { + struct mdiobb_ctrl ctrl; +diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c +index 25c25ea6da66f..9cd71d896963d 100644 +--- a/drivers/net/mdio/mdio-ipq4019.c ++++ b/drivers/net/mdio/mdio-ipq4019.c +@@ -3,10 +3,10 @@ + /* Copyright (c) 2020 Sartura Ltd. */ + + #include <linux/delay.h> +-#include <linux/kernel.h> +-#include <linux/module.h> + #include <linux/io.h> + #include <linux/iopoll.h> ++#include <linux/kernel.h> ++#include <linux/module.h> + #include <linux/of_address.h> + #include <linux/of_mdio.h> + #include <linux/phy.h> +diff --git a/drivers/net/mdio/mdio-ipq8064.c b/drivers/net/mdio/mdio-ipq8064.c +index f0a6bfa61645e..49d4e9aa30bbf 100644 +--- a/drivers/net/mdio/mdio-ipq8064.c ++++ b/drivers/net/mdio/mdio-ipq8064.c +@@ -7,12 +7,12 @@ + + #include <linux/delay.h> + #include <linux/kernel.h> ++#include <linux/mfd/syscon.h> + #include <linux/module.h> +-#include <linux/regmap.h> + #include <linux/of_mdio.h> + #include <linux/of_address.h> + #include <linux/platform_device.h> +-#include <linux/mfd/syscon.h> ++#include <linux/regmap.h> + + /* MII address register definitions */ + #define MII_ADDR_REG_ADDR 0x10 +diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c +index 1c9232fca1e2f..037649bef92ea 100644 +--- a/drivers/net/mdio/mdio-mscc-miim.c ++++ b/drivers/net/mdio/mdio-mscc-miim.c +@@ -6,14 +6,14 @@ + * Copyright (c) 2017 Microsemi Corporation + */ + +-#include <linux/kernel.h> +-#include <linux/module.h> +-#include <linux/phy.h> +-#include <linux/platform_device.h> + #include <linux/bitops.h> + #include <linux/io.h> + #include <linux/iopoll.h> ++#include <linux/kernel.h> ++#include <linux/module.h> + #include <linux/of_mdio.h> ++#include <linux/phy.h> ++#include <linux/platform_device.h> + + #define MSCC_MIIM_REG_STATUS 0x0 + #define MSCC_MIIM_STATUS_STAT_PENDING BIT(2) +diff --git a/drivers/net/mdio/mdio-mux-bcm-iproc.c b/drivers/net/mdio/mdio-mux-bcm-iproc.c +index 42fb5f166136b..641cfa41f492a 100644 +--- a/drivers/net/mdio/mdio-mux-bcm-iproc.c ++++ b/drivers/net/mdio/mdio-mux-bcm-iproc.c +@@ -3,14 +3,14 @@ + * Copyright 2016 Broadcom + */ + #include <linux/clk.h> +-#include <linux/platform_device.h> ++#include <linux/delay.h> + #include <linux/device.h> +-#include <linux/of_mdio.h> ++#include <linux/iopoll.h> ++#include <linux/mdio-mux.h> + #include <linux/module.h> ++#include <linux/of_mdio.h> + #include <linux/phy.h> +-#include <linux/mdio-mux.h> +-#include <linux/delay.h> +-#include <linux/iopoll.h> ++#include <linux/platform_device.h> + + #define MDIO_RATE_ADJ_EXT_OFFSET 0x000 + #define MDIO_RATE_ADJ_INT_OFFSET 0x004 +diff --git a/drivers/net/mdio/mdio-mux-gpio.c b/drivers/net/mdio/mdio-mux-gpio.c +index 10a758fdc9e63..3c7f16f06b452 100644 +--- a/drivers/net/mdio/mdio-mux-gpio.c ++++ b/drivers/net/mdio/mdio-mux-gpio.c +@@ -3,13 +3,13 @@ + * Copyright (C) 2011, 2012 Cavium, Inc. + */ + +-#include <linux/platform_device.h> + #include <linux/device.h> +-#include <linux/of_mdio.h> ++#include <linux/gpio/consumer.h> ++#include <linux/mdio-mux.h> + #include <linux/module.h> ++#include <linux/of_mdio.h> + #include <linux/phy.h> +-#include <linux/mdio-mux.h> +-#include <linux/gpio/consumer.h> ++#include <linux/platform_device.h> + + #define DRV_VERSION "1.1" + #define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver" +diff --git a/drivers/net/mdio/mdio-mux-mmioreg.c b/drivers/net/mdio/mdio-mux-mmioreg.c +index d1a8780e24d88..c02fb2a067eef 100644 +--- a/drivers/net/mdio/mdio-mux-mmioreg.c ++++ b/drivers/net/mdio/mdio-mux-mmioreg.c +@@ -7,13 +7,13 @@ + * Copyright 2012 Freescale Semiconductor, Inc. + */ + +-#include <linux/platform_device.h> + #include <linux/device.h> ++#include <linux/mdio-mux.h> ++#include <linux/module.h> + #include <linux/of_address.h> + #include <linux/of_mdio.h> +-#include <linux/module.h> + #include <linux/phy.h> +-#include <linux/mdio-mux.h> ++#include <linux/platform_device.h> + + struct mdio_mux_mmioreg_state { + void *mux_handle; +diff --git a/drivers/net/mdio/mdio-mux-multiplexer.c b/drivers/net/mdio/mdio-mux-multiplexer.c +index d6564381aa3e4..527acfc3c045a 100644 +--- a/drivers/net/mdio/mdio-mux-multiplexer.c ++++ b/drivers/net/mdio/mdio-mux-multiplexer.c +@@ -4,10 +4,10 @@ + * Copyright 2019 NXP + */ + +-#include <linux/platform_device.h> + #include <linux/mdio-mux.h> + #include <linux/module.h> + #include <linux/mux/consumer.h> ++#include <linux/platform_device.h> + + struct mdio_mux_multiplexer_state { + struct mux_control *muxc; +diff --git a/drivers/net/mdio/mdio-mux.c b/drivers/net/mdio/mdio-mux.c +index ccb3ee704eb1c..3dde0c2b3e097 100644 +--- a/drivers/net/mdio/mdio-mux.c ++++ b/drivers/net/mdio/mdio-mux.c +@@ -3,12 +3,12 @@ + * Copyright (C) 2011, 2012 Cavium, Inc. + */ + +-#include <linux/platform_device.h> +-#include <linux/mdio-mux.h> +-#include <linux/of_mdio.h> + #include <linux/device.h> ++#include <linux/mdio-mux.h> + #include <linux/module.h> ++#include <linux/of_mdio.h> + #include <linux/phy.h> ++#include <linux/platform_device.h> + + #define DRV_DESCRIPTION "MDIO bus multiplexer driver" + +diff --git a/drivers/net/mdio/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c +index 6faf39314ac93..e096e68ac667b 100644 +--- a/drivers/net/mdio/mdio-octeon.c ++++ b/drivers/net/mdio/mdio-octeon.c +@@ -3,13 +3,13 @@ + * Copyright (C) 2009-2015 Cavium, Inc. + */ + +-#include <linux/platform_device.h> ++#include <linux/gfp.h> ++#include <linux/io.h> ++#include <linux/module.h> + #include <linux/of_address.h> + #include <linux/of_mdio.h> +-#include <linux/module.h> +-#include <linux/gfp.h> + #include <linux/phy.h> +-#include <linux/io.h> ++#include <linux/platform_device.h> + + #include "mdio-cavium.h" + +diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c +index dd7430c998a2a..822d2cdd2f359 100644 +--- a/drivers/net/mdio/mdio-thunder.c ++++ b/drivers/net/mdio/mdio-thunder.c +@@ -3,14 +3,14 @@ + * Copyright (C) 2009-2016 Cavium, Inc. + */ + +-#include <linux/of_address.h> +-#include <linux/of_mdio.h> +-#include <linux/module.h> ++#include <linux/acpi.h> + #include <linux/gfp.h> +-#include <linux/phy.h> + #include <linux/io.h> +-#include <linux/acpi.h> ++#include <linux/module.h> ++#include <linux/of_address.h> ++#include <linux/of_mdio.h> + #include <linux/pci.h> ++#include <linux/phy.h> + + #include "mdio-cavium.h" + +diff --git a/drivers/net/mdio/mdio-xgene.c b/drivers/net/mdio/mdio-xgene.c +index 461207cdf5d6e..7ab4e26db08c2 100644 +--- a/drivers/net/mdio/mdio-xgene.c ++++ b/drivers/net/mdio/mdio-xgene.c +@@ -13,11 +13,11 @@ + #include <linux/io.h> + #include <linux/mdio/mdio-xgene.h> + #include <linux/module.h> +-#include <linux/of_platform.h> +-#include <linux/of_net.h> + #include <linux/of_mdio.h> +-#include <linux/prefetch.h> ++#include <linux/of_net.h> ++#include <linux/of_platform.h> + #include <linux/phy.h> ++#include <linux/prefetch.h> + #include <net/ip.h> + + static bool xgene_mdio_status; +diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c +index 4daf94bb56a53..ea0bf13e8ac3f 100644 +--- a/drivers/net/mdio/of_mdio.c ++++ b/drivers/net/mdio/of_mdio.c +@@ -8,17 +8,17 @@ + * out of the OpenFirmware device tree and using it to populate an mii_bus. + */ + +-#include <linux/kernel.h> + #include <linux/device.h> +-#include <linux/netdevice.h> + #include <linux/err.h> +-#include <linux/phy.h> +-#include <linux/phy_fixed.h> ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/netdevice.h> + #include <linux/of.h> + #include <linux/of_irq.h> + #include <linux/of_mdio.h> + #include <linux/of_net.h> +-#include <linux/module.h> ++#include <linux/phy.h> ++#include <linux/phy_fixed.h> + + #define DEFAULT_GPIO_RESET_DELAY 10 /* in microseconds */ + +diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c +index f81fb0b13a944..369bd30fed35f 100644 +--- a/drivers/net/slip/slip.c ++++ b/drivers/net/slip/slip.c +@@ -468,7 +468,7 @@ static void sl_tx_timeout(struct net_device *dev, unsigned int txqueue) + spin_lock(&sl->lock); + + if (netif_queue_stopped(dev)) { +- if (!netif_running(dev)) ++ if (!netif_running(dev) || !sl->tty) + goto out; + + /* May be we must check transmitter timeout here ? +diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c +index 0717c18015c9c..c9c4095181744 100644 +--- a/drivers/net/usb/aqc111.c ++++ b/drivers/net/usb/aqc111.c +@@ -1102,10 +1102,15 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb) + if (start_of_descs != desc_offset) + goto err; + +- /* self check desc_offset from header*/ +- if (desc_offset >= skb_len) ++ /* self check desc_offset from header and make sure that the ++ * bounds of the metadata array are inside the SKB ++ */ ++ if (pkt_count * 2 + desc_offset >= skb_len) + goto err; + ++ /* Packets must not overlap the metadata array */ ++ skb_trim(skb, desc_offset); ++ + if (pkt_count == 0) + goto err; + +diff --git a/drivers/net/veth.c b/drivers/net/veth.c +index f7e3eb309a26e..5be8ed9105535 100644 +--- a/drivers/net/veth.c ++++ b/drivers/net/veth.c +@@ -292,7 +292,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) + + rcu_read_lock(); + rcv = rcu_dereference(priv->peer); +- if (unlikely(!rcv)) { ++ if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) { + kfree_skb(skb); + goto drop; + } +diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c +index af367696fd92f..ac354dfc50559 100644 +--- a/drivers/net/wireless/ath/ath9k/main.c ++++ b/drivers/net/wireless/ath/ath9k/main.c +@@ -839,7 +839,7 @@ static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix) + continue; + + txinfo = IEEE80211_SKB_CB(bf->bf_mpdu); +- fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0]; ++ fi = (struct ath_frame_info *)&txinfo->status.status_driver_data[0]; + if (fi->keyix == keyix) + return true; + } +diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c +index 5691bd6eb82c2..6555abf02f18b 100644 +--- a/drivers/net/wireless/ath/ath9k/xmit.c ++++ b/drivers/net/wireless/ath/ath9k/xmit.c +@@ -141,8 +141,8 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb) + { + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + BUILD_BUG_ON(sizeof(struct ath_frame_info) > +- sizeof(tx_info->rate_driver_data)); +- return (struct ath_frame_info *) &tx_info->rate_driver_data[0]; ++ sizeof(tx_info->status.status_driver_data)); ++ return (struct ath_frame_info *) &tx_info->status.status_driver_data[0]; + } + + static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno) +@@ -2501,6 +2501,16 @@ skip_tx_complete: + spin_unlock_irqrestore(&sc->tx.txbuflock, flags); + } + ++static void ath_clear_tx_status(struct ieee80211_tx_info *tx_info) ++{ ++ void *ptr = &tx_info->status; ++ ++ memset(ptr + sizeof(tx_info->status.rates), 0, ++ sizeof(tx_info->status) - ++ sizeof(tx_info->status.rates) - ++ sizeof(tx_info->status.status_driver_data)); ++} ++ + static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, + struct ath_tx_status *ts, int nframes, int nbad, + int txok) +@@ -2512,6 +2522,8 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, + struct ath_hw *ah = sc->sc_ah; + u8 i, tx_rateindex; + ++ ath_clear_tx_status(tx_info); ++ + if (txok) + tx_info->status.ack_signal = ts->ts_rssi; + +@@ -2526,6 +2538,13 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, + tx_info->status.ampdu_len = nframes; + tx_info->status.ampdu_ack_len = nframes - nbad; + ++ tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; ++ ++ for (i = tx_rateindex + 1; i < hw->max_rates; i++) { ++ tx_info->status.rates[i].count = 0; ++ tx_info->status.rates[i].idx = -1; ++ } ++ + if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 && + (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) { + /* +@@ -2547,16 +2566,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, + tx_info->status.rates[tx_rateindex].count = + hw->max_rate_tries; + } +- +- for (i = tx_rateindex + 1; i < hw->max_rates; i++) { +- tx_info->status.rates[i].count = 0; +- tx_info->status.rates[i].idx = -1; +- } +- +- tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; +- +- /* we report airtime in ath_tx_count_airtime(), don't report twice */ +- tx_info->status.tx_time = 0; + } + + static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) +diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c +index 7f7bc0993670f..e09bbf3890c49 100644 +--- a/drivers/perf/fsl_imx8_ddr_perf.c ++++ b/drivers/perf/fsl_imx8_ddr_perf.c +@@ -29,7 +29,7 @@ + #define CNTL_OVER_MASK 0xFFFFFFFE + + #define CNTL_CSV_SHIFT 24 +-#define CNTL_CSV_MASK (0xFF << CNTL_CSV_SHIFT) ++#define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT) + + #define EVENT_CYCLES_ID 0 + #define EVENT_CYCLES_COUNTER 0 +diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c +index cadea0344486f..40befdd9dfa92 100644 +--- a/drivers/regulator/wm8994-regulator.c ++++ b/drivers/regulator/wm8994-regulator.c +@@ -71,6 +71,35 @@ static const struct regulator_ops wm8994_ldo2_ops = { + }; + + static const struct regulator_desc wm8994_ldo_desc[] = { ++ { ++ .name = "LDO1", ++ .id = 1, ++ .type = REGULATOR_VOLTAGE, ++ .n_voltages = WM8994_LDO1_MAX_SELECTOR + 1, ++ .vsel_reg = WM8994_LDO_1, ++ .vsel_mask = WM8994_LDO1_VSEL_MASK, ++ .ops = &wm8994_ldo1_ops, ++ .min_uV = 2400000, ++ .uV_step = 100000, ++ .enable_time = 3000, ++ .off_on_delay = 36000, ++ .owner = THIS_MODULE, ++ }, ++ { ++ .name = "LDO2", ++ .id = 2, ++ .type = REGULATOR_VOLTAGE, ++ .n_voltages = WM8994_LDO2_MAX_SELECTOR + 1, ++ .vsel_reg = WM8994_LDO_2, ++ .vsel_mask = WM8994_LDO2_VSEL_MASK, ++ .ops = &wm8994_ldo2_ops, ++ .enable_time = 3000, ++ .off_on_delay = 36000, ++ .owner = THIS_MODULE, ++ }, ++}; ++ ++static const struct regulator_desc wm8958_ldo_desc[] = { + { + .name = "LDO1", + .id = 1, +@@ -172,9 +201,16 @@ static int wm8994_ldo_probe(struct platform_device *pdev) + * regulator core and we need not worry about it on the + * error path. + */ +- ldo->regulator = devm_regulator_register(&pdev->dev, +- &wm8994_ldo_desc[id], +- &config); ++ if (ldo->wm8994->type == WM8994) { ++ ldo->regulator = devm_regulator_register(&pdev->dev, ++ &wm8994_ldo_desc[id], ++ &config); ++ } else { ++ ldo->regulator = devm_regulator_register(&pdev->dev, ++ &wm8958_ldo_desc[id], ++ &config); ++ } ++ + if (IS_ERR(ldo->regulator)) { + ret = PTR_ERR(ldo->regulator); + dev_err(wm8994->dev, "Failed to register LDO%d: %d\n", +diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c +index a13c203ef7a9a..c4881657a807b 100644 +--- a/drivers/scsi/be2iscsi/be_iscsi.c ++++ b/drivers/scsi/be2iscsi/be_iscsi.c +@@ -182,6 +182,7 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, + struct beiscsi_endpoint *beiscsi_ep; + struct iscsi_endpoint *ep; + uint16_t cri_index; ++ int rc = 0; + + ep = iscsi_lookup_endpoint(transport_fd); + if (!ep) +@@ -189,15 +190,17 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, + + beiscsi_ep = ep->dd_data; + +- if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) +- return -EINVAL; ++ if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) { ++ rc = -EINVAL; ++ goto put_ep; ++ } + + if (beiscsi_ep->phba != phba) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : beiscsi_ep->hba=%p not equal to phba=%p\n", + beiscsi_ep->phba, phba); +- +- return -EEXIST; ++ rc = -EEXIST; ++ goto put_ep; + } + cri_index = BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid); + if (phba->conn_table[cri_index]) { +@@ -209,7 +212,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, + beiscsi_ep->ep_cid, + beiscsi_conn, + phba->conn_table[cri_index]); +- return -EINVAL; ++ rc = -EINVAL; ++ goto put_ep; + } + } + +@@ -226,7 +230,10 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, + "BS_%d : cid %d phba->conn_table[%u]=%p\n", + beiscsi_ep->ep_cid, cri_index, beiscsi_conn); + phba->conn_table[cri_index] = beiscsi_conn; +- return 0; ++ ++put_ep: ++ iscsi_put_endpoint(ep); ++ return rc; + } + + static int beiscsi_iface_create_ipv4(struct beiscsi_hba *phba) +diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c +index 987dc8135a9b4..b977e039bb789 100644 +--- a/drivers/scsi/be2iscsi/be_main.c ++++ b/drivers/scsi/be2iscsi/be_main.c +@@ -5810,6 +5810,7 @@ struct iscsi_transport beiscsi_iscsi_transport = { + .destroy_session = beiscsi_session_destroy, + .create_conn = beiscsi_conn_create, + .bind_conn = beiscsi_conn_bind, ++ .unbind_conn = iscsi_conn_unbind, + .destroy_conn = iscsi_conn_teardown, + .attr_is_visible = beiscsi_attr_is_visible, + .set_iface_param = beiscsi_iface_set_param, +diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c +index 21efc73b87bee..649664dc6da44 100644 +--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c ++++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c +@@ -1422,17 +1422,23 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session, + * Forcefully terminate all in progress connection recovery at the + * earliest, either in bind(), send_pdu(LOGIN), or conn_start() + */ +- if (bnx2i_adapter_ready(hba)) +- return -EIO; ++ if (bnx2i_adapter_ready(hba)) { ++ ret_code = -EIO; ++ goto put_ep; ++ } + + bnx2i_ep = ep->dd_data; + if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) || +- (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) ++ (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) { + /* Peer disconnect via' FIN or RST */ +- return -EINVAL; ++ ret_code = -EINVAL; ++ goto put_ep; ++ } + +- if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) +- return -EINVAL; ++ if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) { ++ ret_code = -EINVAL; ++ goto put_ep; ++ } + + if (bnx2i_ep->hba != hba) { + /* Error - TCP connection does not belong to this device +@@ -1443,7 +1449,8 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session, + iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data, + "belong to hba (%s)\n", + hba->netdev->name); +- return -EEXIST; ++ ret_code = -EEXIST; ++ goto put_ep; + } + bnx2i_ep->conn = bnx2i_conn; + bnx2i_conn->ep = bnx2i_ep; +@@ -1460,6 +1467,8 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session, + bnx2i_put_rq_buf(bnx2i_conn, 0); + + bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE); ++put_ep: ++ iscsi_put_endpoint(ep); + return ret_code; + } + +@@ -2278,6 +2287,7 @@ struct iscsi_transport bnx2i_iscsi_transport = { + .destroy_session = bnx2i_session_destroy, + .create_conn = bnx2i_conn_create, + .bind_conn = bnx2i_conn_bind, ++ .unbind_conn = iscsi_conn_unbind, + .destroy_conn = bnx2i_conn_destroy, + .attr_is_visible = bnx2i_attr_is_visible, + .set_param = iscsi_set_param, +diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +index 37d99357120fa..edcd3fab6973c 100644 +--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c ++++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +@@ -117,6 +117,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = { + /* connection management */ + .create_conn = cxgbi_create_conn, + .bind_conn = cxgbi_bind_conn, ++ .unbind_conn = iscsi_conn_unbind, + .destroy_conn = iscsi_tcp_conn_teardown, + .start_conn = iscsi_conn_start, + .stop_conn = iscsi_conn_stop, +diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +index 2c3491528d424..efb3e2b3398e2 100644 +--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c ++++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +@@ -134,6 +134,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = { + /* connection management */ + .create_conn = cxgbi_create_conn, + .bind_conn = cxgbi_bind_conn, ++ .unbind_conn = iscsi_conn_unbind, + .destroy_conn = iscsi_tcp_conn_teardown, + .start_conn = iscsi_conn_start, + .stop_conn = iscsi_conn_stop, +diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c +index ecb134b4699f2..506b561670af0 100644 +--- a/drivers/scsi/cxgbi/libcxgbi.c ++++ b/drivers/scsi/cxgbi/libcxgbi.c +@@ -2690,11 +2690,13 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, + err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, + ppm->tformat.pgsz_idx_dflt); + if (err < 0) +- return err; ++ goto put_ep; + + err = iscsi_conn_bind(cls_session, cls_conn, is_leading); +- if (err) +- return -EINVAL; ++ if (err) { ++ err = -EINVAL; ++ goto put_ep; ++ } + + /* calculate the tag idx bits needed for this conn based on cmds_max */ + cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1; +@@ -2715,7 +2717,9 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, + /* init recv engine */ + iscsi_tcp_hdr_recv_prep(tcp_conn); + +- return 0; ++put_ep: ++ iscsi_put_endpoint(ep); ++ return err; + } + EXPORT_SYMBOL_GPL(cxgbi_bind_conn); + +diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +index cc3908c2d2f94..a3431485def8f 100644 +--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c ++++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +@@ -35,7 +35,7 @@ + + #define IBMVSCSIS_VERSION "v0.2" + +-#define INITIAL_SRP_LIMIT 800 ++#define INITIAL_SRP_LIMIT 1024 + #define DEFAULT_MAX_SECTORS 256 + #define MAX_TXU 1024 * 1024 + +diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c +index d4e66c595eb87..05799b41974d5 100644 +--- a/drivers/scsi/libiscsi.c ++++ b/drivers/scsi/libiscsi.c +@@ -1367,23 +1367,32 @@ void iscsi_session_failure(struct iscsi_session *session, + } + EXPORT_SYMBOL_GPL(iscsi_session_failure); + +-void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) ++static bool iscsi_set_conn_failed(struct iscsi_conn *conn) + { + struct iscsi_session *session = conn->session; + +- spin_lock_bh(&session->frwd_lock); +- if (session->state == ISCSI_STATE_FAILED) { +- spin_unlock_bh(&session->frwd_lock); +- return; +- } ++ if (session->state == ISCSI_STATE_FAILED) ++ return false; + + if (conn->stop_stage == 0) + session->state = ISCSI_STATE_FAILED; +- spin_unlock_bh(&session->frwd_lock); + + set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); + set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); +- iscsi_conn_error_event(conn->cls_conn, err); ++ return true; ++} ++ ++void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) ++{ ++ struct iscsi_session *session = conn->session; ++ bool needs_evt; ++ ++ spin_lock_bh(&session->frwd_lock); ++ needs_evt = iscsi_set_conn_failed(conn); ++ spin_unlock_bh(&session->frwd_lock); ++ ++ if (needs_evt) ++ iscsi_conn_error_event(conn->cls_conn, err); + } + EXPORT_SYMBOL_GPL(iscsi_conn_failure); + +@@ -2117,6 +2126,51 @@ done: + spin_unlock(&session->frwd_lock); + } + ++/** ++ * iscsi_conn_unbind - prevent queueing to conn. ++ * @cls_conn: iscsi conn ep is bound to. ++ * @is_active: is the conn in use for boot or is this for EH/termination ++ * ++ * This must be called by drivers implementing the ep_disconnect callout. ++ * It disables queueing to the connection from libiscsi in preparation for ++ * an ep_disconnect call. ++ */ ++void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active) ++{ ++ struct iscsi_session *session; ++ struct iscsi_conn *conn; ++ ++ if (!cls_conn) ++ return; ++ ++ conn = cls_conn->dd_data; ++ session = conn->session; ++ /* ++ * Wait for iscsi_eh calls to exit. We don't wait for the tmf to ++ * complete or timeout. The caller just wants to know what's running ++ * is everything that needs to be cleaned up, and no cmds will be ++ * queued. ++ */ ++ mutex_lock(&session->eh_mutex); ++ ++ iscsi_suspend_queue(conn); ++ iscsi_suspend_tx(conn); ++ ++ spin_lock_bh(&session->frwd_lock); ++ if (!is_active) { ++ /* ++ * if logout timed out before userspace could even send a PDU ++ * the state might still be in ISCSI_STATE_LOGGED_IN and ++ * allowing new cmds and TMFs. ++ */ ++ if (session->state == ISCSI_STATE_LOGGED_IN) ++ iscsi_set_conn_failed(conn); ++ } ++ spin_unlock_bh(&session->frwd_lock); ++ mutex_unlock(&session->eh_mutex); ++} ++EXPORT_SYMBOL_GPL(iscsi_conn_unbind); ++ + static void iscsi_prep_abort_task_pdu(struct iscsi_task *task, + struct iscsi_tm *hdr) + { +diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c +index 1149bfc42fe64..134e4ee5dc481 100644 +--- a/drivers/scsi/lpfc/lpfc_init.c ++++ b/drivers/scsi/lpfc/lpfc_init.c +@@ -13614,6 +13614,8 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev) + psli->sli_flag &= ~LPFC_SLI_ACTIVE; + spin_unlock_irq(&phba->hbalock); + ++ /* Init cpu_map array */ ++ lpfc_cpu_map_array_init(phba); + /* Configure and enable interrupt */ + intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); + if (intr_mode == LPFC_INTR_ERROR) { +diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h +index 6b8ec57e8bdfa..c088a848776ef 100644 +--- a/drivers/scsi/megaraid/megaraid_sas.h ++++ b/drivers/scsi/megaraid/megaraid_sas.h +@@ -2554,6 +2554,9 @@ struct megasas_instance_template { + #define MEGASAS_IS_LOGICAL(sdev) \ + ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1) + ++#define MEGASAS_IS_LUN_VALID(sdev) \ ++ (((sdev)->lun == 0) ? 1 : 0) ++ + #define MEGASAS_DEV_INDEX(scp) \ + (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \ + scp->device->id) +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c +index 1a70cc995c28c..84a2e9292fd03 100644 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c +@@ -2111,6 +2111,9 @@ static int megasas_slave_alloc(struct scsi_device *sdev) + goto scan_target; + } + return -ENXIO; ++ } else if (!MEGASAS_IS_LUN_VALID(sdev)) { ++ sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__); ++ return -ENXIO; + } + + scan_target: +@@ -2141,6 +2144,10 @@ static void megasas_slave_destroy(struct scsi_device *sdev) + instance = megasas_lookup_instance(sdev->host->host_no); + + if (MEGASAS_IS_LOGICAL(sdev)) { ++ if (!MEGASAS_IS_LUN_VALID(sdev)) { ++ sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__); ++ return; ++ } + ld_tgt_id = MEGASAS_TARGET_ID(sdev); + instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED; + if (megasas_dbg_lvl & LD_PD_DEBUG) +diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c +index 0cfea7b2ab13a..85ca8421fb862 100644 +--- a/drivers/scsi/mvsas/mv_init.c ++++ b/drivers/scsi/mvsas/mv_init.c +@@ -646,6 +646,7 @@ static struct pci_device_id mvs_pci_table[] = { + { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 }, + { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 }, + { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 }, ++ { PCI_VDEVICE(TTI, 0x2640), chip_6440 }, + { PCI_VDEVICE(TTI, 0x2710), chip_9480 }, + { PCI_VDEVICE(TTI, 0x2720), chip_9480 }, + { PCI_VDEVICE(TTI, 0x2721), chip_9480 }, +diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c +index 4c03bf08b543c..0305c8999ba5d 100644 +--- a/drivers/scsi/pm8001/pm80xx_hwi.c ++++ b/drivers/scsi/pm8001/pm80xx_hwi.c +@@ -765,6 +765,10 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01; + ++ /* Enable higher IQs and OQs, 32 to 63, bit 16 */ ++ if (pm8001_ha->max_q_num > 32) ++ pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |= ++ 1 << 16; + /* Disable end to end CRC checking */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16); + +@@ -1024,6 +1028,13 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha) + if (0x0000 != gst_len_mpistate) + return -EBUSY; + ++ /* ++ * As per controller datasheet, after successful MPI ++ * initialization minimum 500ms delay is required before ++ * issuing commands. ++ */ ++ msleep(500); ++ + return 0; + } + +@@ -1682,10 +1693,11 @@ static void + pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec) + { + #ifdef PM8001_USE_MSIX +- u32 mask; +- mask = (u32)(1 << vec); +- +- pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF)); ++ if (vec < 32) ++ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, 1U << vec); ++ else ++ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR_U, ++ 1U << (vec - 32)); + return; + #endif + pm80xx_chip_intx_interrupt_enable(pm8001_ha); +@@ -1701,12 +1713,15 @@ static void + pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec) + { + #ifdef PM8001_USE_MSIX +- u32 mask; +- if (vec == 0xFF) +- mask = 0xFFFFFFFF; ++ if (vec == 0xFF) { ++ /* disable all vectors 0-31, 32-63 */ ++ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 0xFFFFFFFF); ++ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, 0xFFFFFFFF); ++ } else if (vec < 32) ++ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 1U << vec); + else +- mask = (u32)(1 << vec); +- pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF)); ++ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, ++ 1U << (vec - 32)); + return; + #endif + pm80xx_chip_intx_interrupt_disable(pm8001_ha); +diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c +index f51723e2d5227..5f7e62f19d83a 100644 +--- a/drivers/scsi/qedi/qedi_iscsi.c ++++ b/drivers/scsi/qedi/qedi_iscsi.c +@@ -387,6 +387,7 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session, + struct qedi_ctx *qedi = iscsi_host_priv(shost); + struct qedi_endpoint *qedi_ep; + struct iscsi_endpoint *ep; ++ int rc = 0; + + ep = iscsi_lookup_endpoint(transport_fd); + if (!ep) +@@ -394,11 +395,16 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session, + + qedi_ep = ep->dd_data; + if ((qedi_ep->state == EP_STATE_TCP_FIN_RCVD) || +- (qedi_ep->state == EP_STATE_TCP_RST_RCVD)) +- return -EINVAL; ++ (qedi_ep->state == EP_STATE_TCP_RST_RCVD)) { ++ rc = -EINVAL; ++ goto put_ep; ++ } ++ ++ if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) { ++ rc = -EINVAL; ++ goto put_ep; ++ } + +- if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) +- return -EINVAL; + + qedi_ep->conn = qedi_conn; + qedi_conn->ep = qedi_ep; +@@ -408,13 +414,18 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session, + qedi_conn->cmd_cleanup_req = 0; + qedi_conn->cmd_cleanup_cmpl = 0; + +- if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) +- return -EINVAL; ++ if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) { ++ rc = -EINVAL; ++ goto put_ep; ++ } ++ + + spin_lock_init(&qedi_conn->tmf_work_lock); + INIT_LIST_HEAD(&qedi_conn->tmf_work_list); + init_waitqueue_head(&qedi_conn->wait_queue); +- return 0; ++put_ep: ++ iscsi_put_endpoint(ep); ++ return rc; + } + + static int qedi_iscsi_update_conn(struct qedi_ctx *qedi, +@@ -1428,6 +1439,7 @@ struct iscsi_transport qedi_iscsi_transport = { + .destroy_session = qedi_session_destroy, + .create_conn = qedi_conn_create, + .bind_conn = qedi_conn_bind, ++ .unbind_conn = iscsi_conn_unbind, + .start_conn = qedi_conn_start, + .stop_conn = iscsi_conn_stop, + .destroy_conn = qedi_conn_destroy, +diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c +index 2c23b692e318c..8d82d2a83059d 100644 +--- a/drivers/scsi/qla4xxx/ql4_os.c ++++ b/drivers/scsi/qla4xxx/ql4_os.c +@@ -259,6 +259,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = { + .start_conn = qla4xxx_conn_start, + .create_conn = qla4xxx_conn_create, + .bind_conn = qla4xxx_conn_bind, ++ .unbind_conn = iscsi_conn_unbind, + .stop_conn = iscsi_conn_stop, + .destroy_conn = qla4xxx_conn_destroy, + .set_param = iscsi_set_param, +@@ -3237,6 +3238,7 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, + conn = cls_conn->dd_data; + qla_conn = conn->dd_data; + qla_conn->qla_ep = ep->dd_data; ++ iscsi_put_endpoint(ep); + return 0; + } + +diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c +index a5759d0e388a8..ef7cd7520e7c7 100644 +--- a/drivers/scsi/scsi_transport_iscsi.c ++++ b/drivers/scsi/scsi_transport_iscsi.c +@@ -86,16 +86,10 @@ struct iscsi_internal { + struct transport_container session_cont; + }; + +-/* Worker to perform connection failure on unresponsive connections +- * completely in kernel space. +- */ +-static void stop_conn_work_fn(struct work_struct *work); +-static DECLARE_WORK(stop_conn_work, stop_conn_work_fn); +- + static atomic_t iscsi_session_nr; /* sysfs session id for next new session */ + static struct workqueue_struct *iscsi_eh_timer_workq; + +-static struct workqueue_struct *iscsi_destroy_workq; ++static struct workqueue_struct *iscsi_conn_cleanup_workq; + + static DEFINE_IDA(iscsi_sess_ida); + /* +@@ -268,9 +262,20 @@ void iscsi_destroy_endpoint(struct iscsi_endpoint *ep) + } + EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint); + ++void iscsi_put_endpoint(struct iscsi_endpoint *ep) ++{ ++ put_device(&ep->dev); ++} ++EXPORT_SYMBOL_GPL(iscsi_put_endpoint); ++ ++/** ++ * iscsi_lookup_endpoint - get ep from handle ++ * @handle: endpoint handle ++ * ++ * Caller must do a iscsi_put_endpoint. ++ */ + struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle) + { +- struct iscsi_endpoint *ep; + struct device *dev; + + dev = class_find_device(&iscsi_endpoint_class, NULL, &handle, +@@ -278,13 +283,7 @@ struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle) + if (!dev) + return NULL; + +- ep = iscsi_dev_to_endpoint(dev); +- /* +- * we can drop this now because the interface will prevent +- * removals and lookups from racing. +- */ +- put_device(dev); +- return ep; ++ return iscsi_dev_to_endpoint(dev); + } + EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint); + +@@ -1598,12 +1597,6 @@ static DECLARE_TRANSPORT_CLASS(iscsi_connection_class, + static struct sock *nls; + static DEFINE_MUTEX(rx_queue_mutex); + +-/* +- * conn_mutex protects the {start,bind,stop,destroy}_conn from racing +- * against the kernel stop_connection recovery mechanism +- */ +-static DEFINE_MUTEX(conn_mutex); +- + static LIST_HEAD(sesslist); + static DEFINE_SPINLOCK(sesslock); + static LIST_HEAD(connlist); +@@ -2225,6 +2218,155 @@ void iscsi_remove_session(struct iscsi_cls_session *session) + } + EXPORT_SYMBOL_GPL(iscsi_remove_session); + ++static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag) ++{ ++ ISCSI_DBG_TRANS_CONN(conn, "Stopping conn.\n"); ++ ++ switch (flag) { ++ case STOP_CONN_RECOVER: ++ WRITE_ONCE(conn->state, ISCSI_CONN_FAILED); ++ break; ++ case STOP_CONN_TERM: ++ WRITE_ONCE(conn->state, ISCSI_CONN_DOWN); ++ break; ++ default: ++ iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n", ++ flag); ++ return; ++ } ++ ++ conn->transport->stop_conn(conn, flag); ++ ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n"); ++} ++ ++static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active) ++{ ++ struct iscsi_cls_session *session = iscsi_conn_to_session(conn); ++ struct iscsi_endpoint *ep; ++ ++ ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n"); ++ WRITE_ONCE(conn->state, ISCSI_CONN_FAILED); ++ ++ if (!conn->ep || !session->transport->ep_disconnect) ++ return; ++ ++ ep = conn->ep; ++ conn->ep = NULL; ++ ++ session->transport->unbind_conn(conn, is_active); ++ session->transport->ep_disconnect(ep); ++ ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n"); ++} ++ ++static void iscsi_if_disconnect_bound_ep(struct iscsi_cls_conn *conn, ++ struct iscsi_endpoint *ep, ++ bool is_active) ++{ ++ /* Check if this was a conn error and the kernel took ownership */ ++ spin_lock_irq(&conn->lock); ++ if (!test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { ++ spin_unlock_irq(&conn->lock); ++ iscsi_ep_disconnect(conn, is_active); ++ } else { ++ spin_unlock_irq(&conn->lock); ++ ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n"); ++ mutex_unlock(&conn->ep_mutex); ++ ++ flush_work(&conn->cleanup_work); ++ /* ++ * Userspace is now done with the EP so we can release the ref ++ * iscsi_cleanup_conn_work_fn took. ++ */ ++ iscsi_put_endpoint(ep); ++ mutex_lock(&conn->ep_mutex); ++ } ++} ++ ++static int iscsi_if_stop_conn(struct iscsi_transport *transport, ++ struct iscsi_uevent *ev) ++{ ++ int flag = ev->u.stop_conn.flag; ++ struct iscsi_cls_conn *conn; ++ ++ conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid); ++ if (!conn) ++ return -EINVAL; ++ ++ ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop.\n"); ++ /* ++ * If this is a termination we have to call stop_conn with that flag ++ * so the correct states get set. If we haven't run the work yet try to ++ * avoid the extra run. ++ */ ++ if (flag == STOP_CONN_TERM) { ++ cancel_work_sync(&conn->cleanup_work); ++ iscsi_stop_conn(conn, flag); ++ } else { ++ /* ++ * For offload, when iscsid is restarted it won't know about ++ * existing endpoints so it can't do a ep_disconnect. We clean ++ * it up here for userspace. ++ */ ++ mutex_lock(&conn->ep_mutex); ++ if (conn->ep) ++ iscsi_if_disconnect_bound_ep(conn, conn->ep, true); ++ mutex_unlock(&conn->ep_mutex); ++ ++ /* ++ * Figure out if it was the kernel or userspace initiating this. ++ */ ++ spin_lock_irq(&conn->lock); ++ if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { ++ spin_unlock_irq(&conn->lock); ++ iscsi_stop_conn(conn, flag); ++ } else { ++ spin_unlock_irq(&conn->lock); ++ ISCSI_DBG_TRANS_CONN(conn, ++ "flush kernel conn cleanup.\n"); ++ flush_work(&conn->cleanup_work); ++ } ++ /* ++ * Only clear for recovery to avoid extra cleanup runs during ++ * termination. ++ */ ++ spin_lock_irq(&conn->lock); ++ clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags); ++ spin_unlock_irq(&conn->lock); ++ } ++ ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n"); ++ return 0; ++} ++ ++static void iscsi_cleanup_conn_work_fn(struct work_struct *work) ++{ ++ struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn, ++ cleanup_work); ++ struct iscsi_cls_session *session = iscsi_conn_to_session(conn); ++ ++ mutex_lock(&conn->ep_mutex); ++ /* ++ * Get a ref to the ep, so we don't release its ID until after ++ * userspace is done referencing it in iscsi_if_disconnect_bound_ep. ++ */ ++ if (conn->ep) ++ get_device(&conn->ep->dev); ++ iscsi_ep_disconnect(conn, false); ++ ++ if (system_state != SYSTEM_RUNNING) { ++ /* ++ * If the user has set up for the session to never timeout ++ * then hang like they wanted. For all other cases fail right ++ * away since userspace is not going to relogin. ++ */ ++ if (session->recovery_tmo > 0) ++ session->recovery_tmo = 0; ++ } ++ ++ iscsi_stop_conn(conn, STOP_CONN_RECOVER); ++ mutex_unlock(&conn->ep_mutex); ++ ISCSI_DBG_TRANS_CONN(conn, "cleanup done.\n"); ++} ++ + void iscsi_free_session(struct iscsi_cls_session *session) + { + ISCSI_DBG_TRANS_SESSION(session, "Freeing session\n"); +@@ -2263,11 +2405,12 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid) + conn->dd_data = &conn[1]; + + mutex_init(&conn->ep_mutex); ++ spin_lock_init(&conn->lock); + INIT_LIST_HEAD(&conn->conn_list); +- INIT_LIST_HEAD(&conn->conn_list_err); ++ INIT_WORK(&conn->cleanup_work, iscsi_cleanup_conn_work_fn); + conn->transport = transport; + conn->cid = cid; +- conn->state = ISCSI_CONN_DOWN; ++ WRITE_ONCE(conn->state, ISCSI_CONN_DOWN); + + /* this is released in the dev's release function */ + if (!get_device(&session->dev)) +@@ -2321,7 +2464,6 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn) + + spin_lock_irqsave(&connlock, flags); + list_del(&conn->conn_list); +- list_del(&conn->conn_list_err); + spin_unlock_irqrestore(&connlock, flags); + + transport_unregister_device(&conn->dev); +@@ -2448,77 +2590,6 @@ int iscsi_offload_mesg(struct Scsi_Host *shost, + } + EXPORT_SYMBOL_GPL(iscsi_offload_mesg); + +-/* +- * This can be called without the rx_queue_mutex, if invoked by the kernel +- * stop work. But, in that case, it is guaranteed not to race with +- * iscsi_destroy by conn_mutex. +- */ +-static void iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag) +-{ +- /* +- * It is important that this path doesn't rely on +- * rx_queue_mutex, otherwise, a thread doing allocation on a +- * start_session/start_connection could sleep waiting on a +- * writeback to a failed iscsi device, that cannot be recovered +- * because the lock is held. If we don't hold it here, the +- * kernel stop_conn_work_fn has a chance to stop the broken +- * session and resolve the allocation. +- * +- * Still, the user invoked .stop_conn() needs to be serialized +- * with stop_conn_work_fn by a private mutex. Not pretty, but +- * it works. +- */ +- mutex_lock(&conn_mutex); +- switch (flag) { +- case STOP_CONN_RECOVER: +- conn->state = ISCSI_CONN_FAILED; +- break; +- case STOP_CONN_TERM: +- conn->state = ISCSI_CONN_DOWN; +- break; +- default: +- iscsi_cls_conn_printk(KERN_ERR, conn, +- "invalid stop flag %d\n", flag); +- goto unlock; +- } +- +- conn->transport->stop_conn(conn, flag); +-unlock: +- mutex_unlock(&conn_mutex); +-} +- +-static void stop_conn_work_fn(struct work_struct *work) +-{ +- struct iscsi_cls_conn *conn, *tmp; +- unsigned long flags; +- LIST_HEAD(recovery_list); +- +- spin_lock_irqsave(&connlock, flags); +- if (list_empty(&connlist_err)) { +- spin_unlock_irqrestore(&connlock, flags); +- return; +- } +- list_splice_init(&connlist_err, &recovery_list); +- spin_unlock_irqrestore(&connlock, flags); +- +- list_for_each_entry_safe(conn, tmp, &recovery_list, conn_list_err) { +- uint32_t sid = iscsi_conn_get_sid(conn); +- struct iscsi_cls_session *session; +- +- session = iscsi_session_lookup(sid); +- if (session) { +- if (system_state != SYSTEM_RUNNING) { +- session->recovery_tmo = 0; +- iscsi_if_stop_conn(conn, STOP_CONN_TERM); +- } else { +- iscsi_if_stop_conn(conn, STOP_CONN_RECOVER); +- } +- } +- +- list_del_init(&conn->conn_list_err); +- } +-} +- + void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) + { + struct nlmsghdr *nlh; +@@ -2527,11 +2598,31 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) + struct iscsi_internal *priv; + int len = nlmsg_total_size(sizeof(*ev)); + unsigned long flags; ++ int state; + +- spin_lock_irqsave(&connlock, flags); +- list_add(&conn->conn_list_err, &connlist_err); +- spin_unlock_irqrestore(&connlock, flags); +- queue_work(system_unbound_wq, &stop_conn_work); ++ spin_lock_irqsave(&conn->lock, flags); ++ /* ++ * Userspace will only do a stop call if we are at least bound. And, we ++ * only need to do the in kernel cleanup if in the UP state so cmds can ++ * be released to upper layers. If in other states just wait for ++ * userspace to avoid races that can leave the cleanup_work queued. ++ */ ++ state = READ_ONCE(conn->state); ++ switch (state) { ++ case ISCSI_CONN_BOUND: ++ case ISCSI_CONN_UP: ++ if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, ++ &conn->flags)) { ++ queue_work(iscsi_conn_cleanup_workq, ++ &conn->cleanup_work); ++ } ++ break; ++ default: ++ ISCSI_DBG_TRANS_CONN(conn, "Got conn error in state %d\n", ++ state); ++ break; ++ } ++ spin_unlock_irqrestore(&conn->lock, flags); + + priv = iscsi_if_transport_lookup(conn->transport); + if (!priv) +@@ -2861,26 +2952,17 @@ static int + iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev) + { + struct iscsi_cls_conn *conn; +- unsigned long flags; + + conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid); + if (!conn) + return -EINVAL; + +- spin_lock_irqsave(&connlock, flags); +- if (!list_empty(&conn->conn_list_err)) { +- spin_unlock_irqrestore(&connlock, flags); +- return -EAGAIN; +- } +- spin_unlock_irqrestore(&connlock, flags); +- ++ ISCSI_DBG_TRANS_CONN(conn, "Flushing cleanup during destruction\n"); ++ flush_work(&conn->cleanup_work); + ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n"); + +- mutex_lock(&conn_mutex); + if (transport->destroy_conn) + transport->destroy_conn(conn); +- mutex_unlock(&conn_mutex); +- + return 0; + } + +@@ -2890,7 +2972,7 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) + char *data = (char*)ev + sizeof(*ev); + struct iscsi_cls_conn *conn; + struct iscsi_cls_session *session; +- int err = 0, value = 0; ++ int err = 0, value = 0, state; + + if (ev->u.set_param.len > PAGE_SIZE) + return -EINVAL; +@@ -2907,8 +2989,8 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) + session->recovery_tmo = value; + break; + default: +- if ((conn->state == ISCSI_CONN_BOUND) || +- (conn->state == ISCSI_CONN_UP)) { ++ state = READ_ONCE(conn->state); ++ if (state == ISCSI_CONN_BOUND || state == ISCSI_CONN_UP) { + err = transport->set_param(conn, ev->u.set_param.param, + data, ev->u.set_param.len); + } else { +@@ -2968,15 +3050,22 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport, + ep = iscsi_lookup_endpoint(ep_handle); + if (!ep) + return -EINVAL; ++ + conn = ep->conn; +- if (conn) { +- mutex_lock(&conn->ep_mutex); +- conn->ep = NULL; +- mutex_unlock(&conn->ep_mutex); +- conn->state = ISCSI_CONN_FAILED; ++ if (!conn) { ++ /* ++ * conn was not even bound yet, so we can't get iscsi conn ++ * failures yet. ++ */ ++ transport->ep_disconnect(ep); ++ goto put_ep; + } + +- transport->ep_disconnect(ep); ++ mutex_lock(&conn->ep_mutex); ++ iscsi_if_disconnect_bound_ep(conn, ep, false); ++ mutex_unlock(&conn->ep_mutex); ++put_ep: ++ iscsi_put_endpoint(ep); + return 0; + } + +@@ -3002,6 +3091,7 @@ iscsi_if_transport_ep(struct iscsi_transport *transport, + + ev->r.retcode = transport->ep_poll(ep, + ev->u.ep_poll.timeout_ms); ++ iscsi_put_endpoint(ep); + break; + case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: + rc = iscsi_if_ep_disconnect(transport, +@@ -3632,18 +3722,123 @@ exit_host_stats: + return err; + } + ++static int iscsi_if_transport_conn(struct iscsi_transport *transport, ++ struct nlmsghdr *nlh) ++{ ++ struct iscsi_uevent *ev = nlmsg_data(nlh); ++ struct iscsi_cls_session *session; ++ struct iscsi_cls_conn *conn = NULL; ++ struct iscsi_endpoint *ep; ++ uint32_t pdu_len; ++ int err = 0; ++ ++ switch (nlh->nlmsg_type) { ++ case ISCSI_UEVENT_CREATE_CONN: ++ return iscsi_if_create_conn(transport, ev); ++ case ISCSI_UEVENT_DESTROY_CONN: ++ return iscsi_if_destroy_conn(transport, ev); ++ case ISCSI_UEVENT_STOP_CONN: ++ return iscsi_if_stop_conn(transport, ev); ++ } ++ ++ /* ++ * The following cmds need to be run under the ep_mutex so in kernel ++ * conn cleanup (ep_disconnect + unbind and conn) is not done while ++ * these are running. They also must not run if we have just run a conn ++ * cleanup because they would set the state in a way that might allow ++ * IO or send IO themselves. ++ */ ++ switch (nlh->nlmsg_type) { ++ case ISCSI_UEVENT_START_CONN: ++ conn = iscsi_conn_lookup(ev->u.start_conn.sid, ++ ev->u.start_conn.cid); ++ break; ++ case ISCSI_UEVENT_BIND_CONN: ++ conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid); ++ break; ++ case ISCSI_UEVENT_SEND_PDU: ++ conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid); ++ break; ++ } ++ ++ if (!conn) ++ return -EINVAL; ++ ++ mutex_lock(&conn->ep_mutex); ++ spin_lock_irq(&conn->lock); ++ if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) { ++ spin_unlock_irq(&conn->lock); ++ mutex_unlock(&conn->ep_mutex); ++ ev->r.retcode = -ENOTCONN; ++ return 0; ++ } ++ spin_unlock_irq(&conn->lock); ++ ++ switch (nlh->nlmsg_type) { ++ case ISCSI_UEVENT_BIND_CONN: ++ session = iscsi_session_lookup(ev->u.b_conn.sid); ++ if (!session) { ++ err = -EINVAL; ++ break; ++ } ++ ++ ev->r.retcode = transport->bind_conn(session, conn, ++ ev->u.b_conn.transport_eph, ++ ev->u.b_conn.is_leading); ++ if (!ev->r.retcode) ++ WRITE_ONCE(conn->state, ISCSI_CONN_BOUND); ++ ++ if (ev->r.retcode || !transport->ep_connect) ++ break; ++ ++ ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph); ++ if (ep) { ++ ep->conn = conn; ++ conn->ep = ep; ++ iscsi_put_endpoint(ep); ++ } else { ++ err = -ENOTCONN; ++ iscsi_cls_conn_printk(KERN_ERR, conn, ++ "Could not set ep conn binding\n"); ++ } ++ break; ++ case ISCSI_UEVENT_START_CONN: ++ ev->r.retcode = transport->start_conn(conn); ++ if (!ev->r.retcode) ++ WRITE_ONCE(conn->state, ISCSI_CONN_UP); ++ ++ break; ++ case ISCSI_UEVENT_SEND_PDU: ++ pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev); ++ ++ if ((ev->u.send_pdu.hdr_size > pdu_len) || ++ (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) { ++ err = -EINVAL; ++ break; ++ } ++ ++ ev->r.retcode = transport->send_pdu(conn, ++ (struct iscsi_hdr *)((char *)ev + sizeof(*ev)), ++ (char *)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size, ++ ev->u.send_pdu.data_size); ++ break; ++ default: ++ err = -ENOSYS; ++ } ++ ++ mutex_unlock(&conn->ep_mutex); ++ return err; ++} + + static int + iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) + { + int err = 0; + u32 portid; +- u32 pdu_len; + struct iscsi_uevent *ev = nlmsg_data(nlh); + struct iscsi_transport *transport = NULL; + struct iscsi_internal *priv; + struct iscsi_cls_session *session; +- struct iscsi_cls_conn *conn; + struct iscsi_endpoint *ep = NULL; + + if (!netlink_capable(skb, CAP_SYS_ADMIN)) +@@ -3684,6 +3879,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) + ev->u.c_bound_session.initial_cmdsn, + ev->u.c_bound_session.cmds_max, + ev->u.c_bound_session.queue_depth); ++ iscsi_put_endpoint(ep); + break; + case ISCSI_UEVENT_DESTROY_SESSION: + session = iscsi_session_lookup(ev->u.d_session.sid); +@@ -3708,7 +3904,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) + list_del_init(&session->sess_list); + spin_unlock_irqrestore(&sesslock, flags); + +- queue_work(iscsi_destroy_workq, &session->destroy_work); ++ queue_work(system_unbound_wq, &session->destroy_work); + } + break; + case ISCSI_UEVENT_UNBIND_SESSION: +@@ -3719,89 +3915,16 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) + else + err = -EINVAL; + break; +- case ISCSI_UEVENT_CREATE_CONN: +- err = iscsi_if_create_conn(transport, ev); +- break; +- case ISCSI_UEVENT_DESTROY_CONN: +- err = iscsi_if_destroy_conn(transport, ev); +- break; +- case ISCSI_UEVENT_BIND_CONN: +- session = iscsi_session_lookup(ev->u.b_conn.sid); +- conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid); +- +- if (conn && conn->ep) +- iscsi_if_ep_disconnect(transport, conn->ep->id); +- +- if (!session || !conn) { +- err = -EINVAL; +- break; +- } +- +- mutex_lock(&conn_mutex); +- ev->r.retcode = transport->bind_conn(session, conn, +- ev->u.b_conn.transport_eph, +- ev->u.b_conn.is_leading); +- if (!ev->r.retcode) +- conn->state = ISCSI_CONN_BOUND; +- mutex_unlock(&conn_mutex); +- +- if (ev->r.retcode || !transport->ep_connect) +- break; +- +- ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph); +- if (ep) { +- ep->conn = conn; +- +- mutex_lock(&conn->ep_mutex); +- conn->ep = ep; +- mutex_unlock(&conn->ep_mutex); +- } else +- iscsi_cls_conn_printk(KERN_ERR, conn, +- "Could not set ep conn " +- "binding\n"); +- break; + case ISCSI_UEVENT_SET_PARAM: + err = iscsi_set_param(transport, ev); + break; +- case ISCSI_UEVENT_START_CONN: +- conn = iscsi_conn_lookup(ev->u.start_conn.sid, ev->u.start_conn.cid); +- if (conn) { +- mutex_lock(&conn_mutex); +- ev->r.retcode = transport->start_conn(conn); +- if (!ev->r.retcode) +- conn->state = ISCSI_CONN_UP; +- mutex_unlock(&conn_mutex); +- } +- else +- err = -EINVAL; +- break; ++ case ISCSI_UEVENT_CREATE_CONN: ++ case ISCSI_UEVENT_DESTROY_CONN: + case ISCSI_UEVENT_STOP_CONN: +- conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid); +- if (conn) +- iscsi_if_stop_conn(conn, ev->u.stop_conn.flag); +- else +- err = -EINVAL; +- break; ++ case ISCSI_UEVENT_START_CONN: ++ case ISCSI_UEVENT_BIND_CONN: + case ISCSI_UEVENT_SEND_PDU: +- pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev); +- +- if ((ev->u.send_pdu.hdr_size > pdu_len) || +- (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) { +- err = -EINVAL; +- break; +- } +- +- conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid); +- if (conn) { +- mutex_lock(&conn_mutex); +- ev->r.retcode = transport->send_pdu(conn, +- (struct iscsi_hdr*)((char*)ev + sizeof(*ev)), +- (char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size, +- ev->u.send_pdu.data_size); +- mutex_unlock(&conn_mutex); +- } +- else +- err = -EINVAL; ++ err = iscsi_if_transport_conn(transport, nlh); + break; + case ISCSI_UEVENT_GET_STATS: + err = iscsi_if_get_stats(transport, nlh); +@@ -3991,10 +4114,11 @@ static ssize_t show_conn_state(struct device *dev, + { + struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); + const char *state = "unknown"; ++ int conn_state = READ_ONCE(conn->state); + +- if (conn->state >= 0 && +- conn->state < ARRAY_SIZE(connection_state_names)) +- state = connection_state_names[conn->state]; ++ if (conn_state >= 0 && ++ conn_state < ARRAY_SIZE(connection_state_names)) ++ state = connection_state_names[conn_state]; + + return sysfs_emit(buf, "%s\n", state); + } +@@ -4649,6 +4773,7 @@ iscsi_register_transport(struct iscsi_transport *tt) + int err; + + BUG_ON(!tt); ++ WARN_ON(tt->ep_disconnect && !tt->unbind_conn); + + priv = iscsi_if_transport_lookup(tt); + if (priv) +@@ -4803,10 +4928,10 @@ static __init int iscsi_transport_init(void) + goto release_nls; + } + +- iscsi_destroy_workq = alloc_workqueue("%s", +- WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, +- 1, "iscsi_destroy"); +- if (!iscsi_destroy_workq) { ++ iscsi_conn_cleanup_workq = alloc_workqueue("%s", ++ WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 0, ++ "iscsi_conn_cleanup"); ++ if (!iscsi_conn_cleanup_workq) { + err = -ENOMEM; + goto destroy_wq; + } +@@ -4836,7 +4961,7 @@ unregister_transport_class: + + static void __exit iscsi_transport_exit(void) + { +- destroy_workqueue(iscsi_destroy_workq); ++ destroy_workqueue(iscsi_conn_cleanup_workq); + destroy_workqueue(iscsi_eh_timer_workq); + netlink_kernel_release(nls); + bus_unregister(&iscsi_flashnode_bus); +diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c +index c6950f157b99f..c283e45ac300b 100644 +--- a/drivers/target/target_core_user.c ++++ b/drivers/target/target_core_user.c +@@ -1676,6 +1676,7 @@ static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi) + mutex_lock(&udev->cmdr_lock); + page = tcmu_get_block_page(udev, dbi); + if (likely(page)) { ++ get_page(page); + mutex_unlock(&udev->cmdr_lock); + return page; + } +@@ -1714,6 +1715,7 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf) + /* For the vmalloc()ed cmd area pages */ + addr = (void *)(unsigned long)info->mem[mi].addr + offset; + page = vmalloc_to_page(addr); ++ get_page(page); + } else { + uint32_t dbi; + +@@ -1724,7 +1726,6 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf) + return VM_FAULT_SIGBUS; + } + +- get_page(page); + vmf->page = page; + return 0; + } +diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c +index c99e293b50f54..e351f53199505 100644 +--- a/fs/btrfs/block-group.c ++++ b/fs/btrfs/block-group.c +@@ -2570,7 +2570,6 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans) + struct btrfs_path *path = NULL; + LIST_HEAD(dirty); + struct list_head *io = &cur_trans->io_bgs; +- int num_started = 0; + int loops = 0; + + spin_lock(&cur_trans->dirty_bgs_lock); +@@ -2636,7 +2635,6 @@ again: + cache->io_ctl.inode = NULL; + ret = btrfs_write_out_cache(trans, cache, path); + if (ret == 0 && cache->io_ctl.inode) { +- num_started++; + should_put = 0; + + /* +@@ -2737,7 +2735,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) + int should_put; + struct btrfs_path *path; + struct list_head *io = &cur_trans->io_bgs; +- int num_started = 0; + + path = btrfs_alloc_path(); + if (!path) +@@ -2795,7 +2792,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) + cache->io_ctl.inode = NULL; + ret = btrfs_write_out_cache(trans, cache, path); + if (ret == 0 && cache->io_ctl.inode) { +- num_started++; + should_put = 0; + list_add_tail(&cache->io_list, io); + } else { +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index a5bcad0278835..87e55b024ac2e 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -1596,9 +1596,10 @@ again: + + ret = btrfs_insert_fs_root(fs_info, root); + if (ret) { +- btrfs_put_root(root); +- if (ret == -EEXIST) ++ if (ret == -EEXIST) { ++ btrfs_put_root(root); + goto again; ++ } + goto fail; + } + return root; +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c +index f59ec55e5feb2..416a1b753ff62 100644 +--- a/fs/btrfs/file.c ++++ b/fs/btrfs/file.c +@@ -2833,8 +2833,9 @@ out: + return ret; + } + +-static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) ++static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len) + { ++ struct inode *inode = file_inode(file); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_root *root = BTRFS_I(inode)->root; + struct extent_state *cached_state = NULL; +@@ -2866,6 +2867,10 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) + goto out_only_mutex; + } + ++ ret = file_modified(file); ++ if (ret) ++ goto out_only_mutex; ++ + lockstart = round_up(offset, btrfs_inode_sectorsize(BTRFS_I(inode))); + lockend = round_down(offset + len, + btrfs_inode_sectorsize(BTRFS_I(inode))) - 1; +@@ -3301,7 +3306,7 @@ static long btrfs_fallocate(struct file *file, int mode, + return -EOPNOTSUPP; + + if (mode & FALLOC_FL_PUNCH_HOLE) +- return btrfs_punch_hole(inode, offset, len); ++ return btrfs_punch_hole(file, offset, len); + + /* + * Only trigger disk allocation, don't trigger qgroup reserve +@@ -3323,6 +3328,10 @@ static long btrfs_fallocate(struct file *file, int mode, + goto out; + } + ++ ret = file_modified(file); ++ if (ret) ++ goto out; ++ + /* + * TODO: Move these two operations after we have checked + * accurate reserved space, or fallocate can still fail but +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index f7f4ac01589bc..4a5248097d7aa 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -995,7 +995,6 @@ static noinline int cow_file_range(struct btrfs_inode *inode, + int ret = 0; + + if (btrfs_is_free_space_inode(inode)) { +- WARN_ON_ONCE(1); + ret = -EINVAL; + goto out_unlock; + } +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c +index e462de9917237..366d047638646 100644 +--- a/fs/btrfs/volumes.c ++++ b/fs/btrfs/volumes.c +@@ -4220,10 +4220,12 @@ static int balance_kthread(void *data) + struct btrfs_fs_info *fs_info = data; + int ret = 0; + ++ sb_start_write(fs_info->sb); + mutex_lock(&fs_info->balance_mutex); + if (fs_info->balance_ctl) + ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); + mutex_unlock(&fs_info->balance_mutex); ++ sb_end_write(fs_info->sb); + + return ret; + } +diff --git a/fs/cifs/link.c b/fs/cifs/link.c +index 94dab4309fbb4..85d30fef98a29 100644 +--- a/fs/cifs/link.c ++++ b/fs/cifs/link.c +@@ -97,6 +97,9 @@ parse_mf_symlink(const u8 *buf, unsigned int buf_len, unsigned int *_link_len, + if (rc != 1) + return -EINVAL; + ++ if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN) ++ return -EINVAL; ++ + rc = symlink_hash(link_len, link_str, md5_hash); + if (rc) { + cifs_dbg(FYI, "%s: MD5 hash failure: %d\n", __func__, rc); +diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h +index 6661ee1cff479..a0c4b99d28994 100644 +--- a/include/asm-generic/tlb.h ++++ b/include/asm-generic/tlb.h +@@ -563,10 +563,14 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb, + #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ + do { \ + unsigned long _sz = huge_page_size(h); \ +- if (_sz == PMD_SIZE) \ +- tlb_flush_pmd_range(tlb, address, _sz); \ +- else if (_sz == PUD_SIZE) \ ++ if (_sz >= P4D_SIZE) \ ++ tlb_flush_p4d_range(tlb, address, _sz); \ ++ else if (_sz >= PUD_SIZE) \ + tlb_flush_pud_range(tlb, address, _sz); \ ++ else if (_sz >= PMD_SIZE) \ ++ tlb_flush_pmd_range(tlb, address, _sz); \ ++ else \ ++ tlb_flush_pte_range(tlb, address, _sz); \ + __tlb_remove_tlb_entry(tlb, ptep, address); \ + } while (0) + +diff --git a/include/net/ax25.h b/include/net/ax25.h +index 8b7eb46ad72d8..aadff553e4b73 100644 +--- a/include/net/ax25.h ++++ b/include/net/ax25.h +@@ -236,6 +236,7 @@ typedef struct ax25_dev { + #if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER) + ax25_dama_info dama; + #endif ++ refcount_t refcount; + } ax25_dev; + + typedef struct ax25_cb { +@@ -290,6 +291,17 @@ static __inline__ void ax25_cb_put(ax25_cb *ax25) + } + } + ++static inline void ax25_dev_hold(ax25_dev *ax25_dev) ++{ ++ refcount_inc(&ax25_dev->refcount); ++} ++ ++static inline void ax25_dev_put(ax25_dev *ax25_dev) ++{ ++ if (refcount_dec_and_test(&ax25_dev->refcount)) { ++ kfree(ax25_dev); ++ } ++} + static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev) + { + skb->dev = dev; +diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h +index cc10b10dc3a19..5eecf44369659 100644 +--- a/include/net/flow_dissector.h ++++ b/include/net/flow_dissector.h +@@ -59,6 +59,8 @@ struct flow_dissector_key_vlan { + __be16 vlan_tci; + }; + __be16 vlan_tpid; ++ __be16 vlan_eth_type; ++ u16 padding; + }; + + struct flow_dissector_mpls_lse { +diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h +index 2b5f97224f693..fa00e2543ad65 100644 +--- a/include/scsi/libiscsi.h ++++ b/include/scsi/libiscsi.h +@@ -421,6 +421,7 @@ extern int iscsi_conn_start(struct iscsi_cls_conn *); + extern void iscsi_conn_stop(struct iscsi_cls_conn *, int); + extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *, + int); ++extern void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active); + extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err); + extern void iscsi_session_failure(struct iscsi_session *session, + enum iscsi_err err); +diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h +index f28bb20d62713..037c77fb5dc55 100644 +--- a/include/scsi/scsi_transport_iscsi.h ++++ b/include/scsi/scsi_transport_iscsi.h +@@ -82,6 +82,7 @@ struct iscsi_transport { + void (*destroy_session) (struct iscsi_cls_session *session); + struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess, + uint32_t cid); ++ void (*unbind_conn) (struct iscsi_cls_conn *conn, bool is_active); + int (*bind_conn) (struct iscsi_cls_session *session, + struct iscsi_cls_conn *cls_conn, + uint64_t transport_eph, int is_leading); +@@ -196,15 +197,25 @@ enum iscsi_connection_state { + ISCSI_CONN_BOUND, + }; + ++#define ISCSI_CLS_CONN_BIT_CLEANUP 1 ++ + struct iscsi_cls_conn { + struct list_head conn_list; /* item in connlist */ +- struct list_head conn_list_err; /* item in connlist_err */ + void *dd_data; /* LLD private data */ + struct iscsi_transport *transport; + uint32_t cid; /* connection id */ ++ /* ++ * This protects the conn startup and binding/unbinding of the ep to ++ * the conn. Unbinding includes ep_disconnect and stop_conn. ++ */ + struct mutex ep_mutex; + struct iscsi_endpoint *ep; + ++ /* Used when accessing flags and queueing work. */ ++ spinlock_t lock; ++ unsigned long flags; ++ struct work_struct cleanup_work; ++ + struct device dev; /* sysfs transport/container device */ + enum iscsi_connection_state state; + }; +@@ -443,6 +454,7 @@ extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time); + extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size); + extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep); + extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle); ++extern void iscsi_put_endpoint(struct iscsi_endpoint *ep); + extern int iscsi_block_scsi_eh(struct scsi_cmnd *cmd); + extern struct iscsi_iface *iscsi_create_iface(struct Scsi_Host *shost, + struct iscsi_transport *t, +diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h +index 23db248a7fdbe..ed1bbac004d52 100644 +--- a/include/trace/events/sunrpc.h ++++ b/include/trace/events/sunrpc.h +@@ -1874,17 +1874,18 @@ DECLARE_EVENT_CLASS(svc_deferred_event, + TP_STRUCT__entry( + __field(const void *, dr) + __field(u32, xid) +- __string(addr, dr->xprt->xpt_remotebuf) ++ __array(__u8, addr, INET6_ADDRSTRLEN + 10) + ), + + TP_fast_assign( + __entry->dr = dr; + __entry->xid = be32_to_cpu(*(__be32 *)(dr->args + + (dr->xprt_hlen>>2))); +- __assign_str(addr, dr->xprt->xpt_remotebuf); ++ snprintf(__entry->addr, sizeof(__entry->addr) - 1, ++ "%pISpc", (struct sockaddr *)&dr->addr); + ), + +- TP_printk("addr=%s dr=%p xid=0x%08x", __get_str(addr), __entry->dr, ++ TP_printk("addr=%s dr=%p xid=0x%08x", __entry->addr, __entry->dr, + __entry->xid) + ); + +diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h +index b986155787376..c9d380318dd89 100644 +--- a/kernel/dma/direct.h ++++ b/kernel/dma/direct.h +@@ -114,6 +114,7 @@ static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, + dma_direct_sync_single_for_cpu(dev, addr, size, dir); + + if (unlikely(is_swiotlb_buffer(phys))) +- swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs); ++ swiotlb_tbl_unmap_single(dev, phys, size, size, dir, ++ attrs | DMA_ATTR_SKIP_CPU_SYNC); + } + #endif /* _KERNEL_DMA_DIRECT_H */ +diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c +index 4d89ad4fae3bb..5fb78addff51b 100644 +--- a/kernel/irq/affinity.c ++++ b/kernel/irq/affinity.c +@@ -269,8 +269,9 @@ static int __irq_build_affinity_masks(unsigned int startvec, + */ + if (numvecs <= nodes) { + for_each_node_mask(n, nodemsk) { +- cpumask_or(&masks[curvec].mask, &masks[curvec].mask, +- node_to_cpumask[n]); ++ /* Ensure that only CPUs which are in both masks are set */ ++ cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]); ++ cpumask_or(&masks[curvec].mask, &masks[curvec].mask, nmsk); + if (++curvec == last_affv) + curvec = firstvec; + } +diff --git a/kernel/smp.c b/kernel/smp.c +index f73a597c8e4cf..b0684b4c111e9 100644 +--- a/kernel/smp.c ++++ b/kernel/smp.c +@@ -346,7 +346,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) + + /* There shouldn't be any pending callbacks on an offline CPU. */ + if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) && +- !warned && !llist_empty(head))) { ++ !warned && entry != NULL)) { + warned = true; + WARN(1, "IPI on offline CPU %d\n", smp_processor_id()); + +diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c +index e8d351b7f9b03..9c352357fc8bc 100644 +--- a/kernel/time/tick-sched.c ++++ b/kernel/time/tick-sched.c +@@ -136,7 +136,7 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) + */ + if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) { + #ifdef CONFIG_NO_HZ_FULL +- WARN_ON(tick_nohz_full_running); ++ WARN_ON_ONCE(tick_nohz_full_running); + #endif + tick_do_timer_cpu = cpu; + } +diff --git a/kernel/time/timer.c b/kernel/time/timer.c +index a3ec21be3b140..e87e638c31bdf 100644 +--- a/kernel/time/timer.c ++++ b/kernel/time/timer.c +@@ -1738,11 +1738,14 @@ static inline void __run_timers(struct timer_base *base) + time_after_eq(jiffies, base->next_expiry)) { + levels = collect_expired_timers(base, heads); + /* +- * The only possible reason for not finding any expired +- * timer at this clk is that all matching timers have been +- * dequeued. ++ * The two possible reasons for not finding any expired ++ * timer at this clk are that all matching timers have been ++ * dequeued or no timer has been queued since ++ * base::next_expiry was set to base::clk + ++ * NEXT_TIMER_MAX_DELTA. + */ +- WARN_ON_ONCE(!levels && !base->next_expiry_recalc); ++ WARN_ON_ONCE(!levels && !base->next_expiry_recalc ++ && base->timers_pending); + base->clk++; + base->next_expiry = __next_timer_interrupt(base); + +diff --git a/mm/kmemleak.c b/mm/kmemleak.c +index 4801751cb6b6d..5bfae0686199e 100644 +--- a/mm/kmemleak.c ++++ b/mm/kmemleak.c +@@ -1123,7 +1123,7 @@ EXPORT_SYMBOL(kmemleak_no_scan); + void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count, + gfp_t gfp) + { +- if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) ++ if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) + kmemleak_alloc(__va(phys), size, min_count, gfp); + } + EXPORT_SYMBOL(kmemleak_alloc_phys); +@@ -1137,7 +1137,7 @@ EXPORT_SYMBOL(kmemleak_alloc_phys); + */ + void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size) + { +- if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) ++ if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) + kmemleak_free_part(__va(phys), size); + } + EXPORT_SYMBOL(kmemleak_free_part_phys); +@@ -1149,7 +1149,7 @@ EXPORT_SYMBOL(kmemleak_free_part_phys); + */ + void __ref kmemleak_not_leak_phys(phys_addr_t phys) + { +- if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) ++ if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) + kmemleak_not_leak(__va(phys)); + } + EXPORT_SYMBOL(kmemleak_not_leak_phys); +@@ -1161,7 +1161,7 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys); + */ + void __ref kmemleak_ignore_phys(phys_addr_t phys) + { +- if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) ++ if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) + kmemleak_ignore(__va(phys)); + } + EXPORT_SYMBOL(kmemleak_ignore_phys); +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 42f64ed2be478..f022e0024e8db 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -5653,7 +5653,7 @@ static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) + do { + zone_type--; + zone = pgdat->node_zones + zone_type; +- if (managed_zone(zone)) { ++ if (populated_zone(zone)) { + zoneref_set_zone(zone, &zonerefs[nr_zones++]); + check_highest_zone(zone_type); + } +diff --git a/mm/page_io.c b/mm/page_io.c +index 96479817ffae3..f0ada4455895c 100644 +--- a/mm/page_io.c ++++ b/mm/page_io.c +@@ -69,54 +69,6 @@ void end_swap_bio_write(struct bio *bio) + bio_put(bio); + } + +-static void swap_slot_free_notify(struct page *page) +-{ +- struct swap_info_struct *sis; +- struct gendisk *disk; +- swp_entry_t entry; +- +- /* +- * There is no guarantee that the page is in swap cache - the software +- * suspend code (at least) uses end_swap_bio_read() against a non- +- * swapcache page. So we must check PG_swapcache before proceeding with +- * this optimization. +- */ +- if (unlikely(!PageSwapCache(page))) +- return; +- +- sis = page_swap_info(page); +- if (data_race(!(sis->flags & SWP_BLKDEV))) +- return; +- +- /* +- * The swap subsystem performs lazy swap slot freeing, +- * expecting that the page will be swapped out again. +- * So we can avoid an unnecessary write if the page +- * isn't redirtied. +- * This is good for real swap storage because we can +- * reduce unnecessary I/O and enhance wear-leveling +- * if an SSD is used as the as swap device. +- * But if in-memory swap device (eg zram) is used, +- * this causes a duplicated copy between uncompressed +- * data in VM-owned memory and compressed data in +- * zram-owned memory. So let's free zram-owned memory +- * and make the VM-owned decompressed page *dirty*, +- * so the page should be swapped out somewhere again if +- * we again wish to reclaim it. +- */ +- disk = sis->bdev->bd_disk; +- entry.val = page_private(page); +- if (disk->fops->swap_slot_free_notify && __swap_count(entry) == 1) { +- unsigned long offset; +- +- offset = swp_offset(entry); +- +- SetPageDirty(page); +- disk->fops->swap_slot_free_notify(sis->bdev, +- offset); +- } +-} +- + static void end_swap_bio_read(struct bio *bio) + { + struct page *page = bio_first_page_all(bio); +@@ -132,7 +84,6 @@ static void end_swap_bio_read(struct bio *bio) + } + + SetPageUptodate(page); +- swap_slot_free_notify(page); + out: + unlock_page(page); + WRITE_ONCE(bio->bi_private, NULL); +@@ -409,11 +360,6 @@ int swap_readpage(struct page *page, bool synchronous) + if (sis->flags & SWP_SYNCHRONOUS_IO) { + ret = bdev_read_page(sis->bdev, swap_page_sector(page), page); + if (!ret) { +- if (trylock_page(page)) { +- swap_slot_free_notify(page); +- unlock_page(page); +- } +- + count_vm_event(PSWPIN); + goto out; + } +diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c +index 9e0eef7fe9add..5fff027f25fad 100644 +--- a/net/ax25/af_ax25.c ++++ b/net/ax25/af_ax25.c +@@ -89,17 +89,21 @@ again: + sk = s->sk; + if (!sk) { + spin_unlock_bh(&ax25_list_lock); +- s->ax25_dev = NULL; + ax25_disconnect(s, ENETUNREACH); ++ s->ax25_dev = NULL; + spin_lock_bh(&ax25_list_lock); + goto again; + } + sock_hold(sk); + spin_unlock_bh(&ax25_list_lock); + lock_sock(sk); ++ ax25_disconnect(s, ENETUNREACH); + s->ax25_dev = NULL; ++ if (sk->sk_socket) { ++ dev_put(ax25_dev->dev); ++ ax25_dev_put(ax25_dev); ++ } + release_sock(sk); +- ax25_disconnect(s, ENETUNREACH); + spin_lock_bh(&ax25_list_lock); + sock_put(sk); + /* The entry could have been deleted from the +@@ -365,21 +369,25 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg) + if (copy_from_user(&ax25_ctl, arg, sizeof(ax25_ctl))) + return -EFAULT; + +- if ((ax25_dev = ax25_addr_ax25dev(&ax25_ctl.port_addr)) == NULL) +- return -ENODEV; +- + if (ax25_ctl.digi_count > AX25_MAX_DIGIS) + return -EINVAL; + + if (ax25_ctl.arg > ULONG_MAX / HZ && ax25_ctl.cmd != AX25_KILL) + return -EINVAL; + ++ ax25_dev = ax25_addr_ax25dev(&ax25_ctl.port_addr); ++ if (!ax25_dev) ++ return -ENODEV; ++ + digi.ndigi = ax25_ctl.digi_count; + for (k = 0; k < digi.ndigi; k++) + digi.calls[k] = ax25_ctl.digi_addr[k]; + +- if ((ax25 = ax25_find_cb(&ax25_ctl.source_addr, &ax25_ctl.dest_addr, &digi, ax25_dev->dev)) == NULL) ++ ax25 = ax25_find_cb(&ax25_ctl.source_addr, &ax25_ctl.dest_addr, &digi, ax25_dev->dev); ++ if (!ax25) { ++ ax25_dev_put(ax25_dev); + return -ENOTCONN; ++ } + + switch (ax25_ctl.cmd) { + case AX25_KILL: +@@ -446,6 +454,7 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg) + } + + out_put: ++ ax25_dev_put(ax25_dev); + ax25_cb_put(ax25); + return ret; + +@@ -971,14 +980,16 @@ static int ax25_release(struct socket *sock) + { + struct sock *sk = sock->sk; + ax25_cb *ax25; ++ ax25_dev *ax25_dev; + + if (sk == NULL) + return 0; + + sock_hold(sk); +- sock_orphan(sk); + lock_sock(sk); ++ sock_orphan(sk); + ax25 = sk_to_ax25(sk); ++ ax25_dev = ax25->ax25_dev; + + if (sk->sk_type == SOCK_SEQPACKET) { + switch (ax25->state) { +@@ -1040,6 +1051,15 @@ static int ax25_release(struct socket *sock) + sk->sk_state_change(sk); + ax25_destroy_socket(ax25); + } ++ if (ax25_dev) { ++ del_timer_sync(&ax25->timer); ++ del_timer_sync(&ax25->t1timer); ++ del_timer_sync(&ax25->t2timer); ++ del_timer_sync(&ax25->t3timer); ++ del_timer_sync(&ax25->idletimer); ++ dev_put(ax25_dev->dev); ++ ax25_dev_put(ax25_dev); ++ } + + sock->sk = NULL; + release_sock(sk); +@@ -1116,8 +1136,10 @@ static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + } + } + +- if (ax25_dev != NULL) ++ if (ax25_dev) { + ax25_fillin_cb(ax25, ax25_dev); ++ dev_hold(ax25_dev->dev); ++ } + + done: + ax25_cb_add(ax25); +diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c +index 4ac2e0847652a..d2e0cc67d91a7 100644 +--- a/net/ax25/ax25_dev.c ++++ b/net/ax25/ax25_dev.c +@@ -37,6 +37,7 @@ ax25_dev *ax25_addr_ax25dev(ax25_address *addr) + for (ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) + if (ax25cmp(addr, (ax25_address *)ax25_dev->dev->dev_addr) == 0) { + res = ax25_dev; ++ ax25_dev_hold(ax25_dev); + } + spin_unlock_bh(&ax25_dev_lock); + +@@ -56,6 +57,7 @@ void ax25_dev_device_up(struct net_device *dev) + return; + } + ++ refcount_set(&ax25_dev->refcount, 1); + dev->ax25_ptr = ax25_dev; + ax25_dev->dev = dev; + dev_hold(dev); +@@ -84,6 +86,7 @@ void ax25_dev_device_up(struct net_device *dev) + ax25_dev->next = ax25_dev_list; + ax25_dev_list = ax25_dev; + spin_unlock_bh(&ax25_dev_lock); ++ ax25_dev_hold(ax25_dev); + + ax25_register_dev_sysctl(ax25_dev); + } +@@ -113,9 +116,10 @@ void ax25_dev_device_down(struct net_device *dev) + if ((s = ax25_dev_list) == ax25_dev) { + ax25_dev_list = s->next; + spin_unlock_bh(&ax25_dev_lock); ++ ax25_dev_put(ax25_dev); + dev->ax25_ptr = NULL; + dev_put(dev); +- kfree(ax25_dev); ++ ax25_dev_put(ax25_dev); + return; + } + +@@ -123,9 +127,10 @@ void ax25_dev_device_down(struct net_device *dev) + if (s->next == ax25_dev) { + s->next = ax25_dev->next; + spin_unlock_bh(&ax25_dev_lock); ++ ax25_dev_put(ax25_dev); + dev->ax25_ptr = NULL; + dev_put(dev); +- kfree(ax25_dev); ++ ax25_dev_put(ax25_dev); + return; + } + +@@ -133,6 +138,7 @@ void ax25_dev_device_down(struct net_device *dev) + } + spin_unlock_bh(&ax25_dev_lock); + dev->ax25_ptr = NULL; ++ ax25_dev_put(ax25_dev); + } + + int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd) +@@ -144,20 +150,32 @@ int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd) + + switch (cmd) { + case SIOCAX25ADDFWD: +- if ((fwd_dev = ax25_addr_ax25dev(&fwd->port_to)) == NULL) ++ fwd_dev = ax25_addr_ax25dev(&fwd->port_to); ++ if (!fwd_dev) { ++ ax25_dev_put(ax25_dev); + return -EINVAL; +- if (ax25_dev->forward != NULL) ++ } ++ if (ax25_dev->forward) { ++ ax25_dev_put(fwd_dev); ++ ax25_dev_put(ax25_dev); + return -EINVAL; ++ } + ax25_dev->forward = fwd_dev->dev; ++ ax25_dev_put(fwd_dev); ++ ax25_dev_put(ax25_dev); + break; + + case SIOCAX25DELFWD: +- if (ax25_dev->forward == NULL) ++ if (!ax25_dev->forward) { ++ ax25_dev_put(ax25_dev); + return -EINVAL; ++ } + ax25_dev->forward = NULL; ++ ax25_dev_put(ax25_dev); + break; + + default: ++ ax25_dev_put(ax25_dev); + return -EINVAL; + } + +diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c +index b40e0bce67ead..dc2168d2a32a9 100644 +--- a/net/ax25/ax25_route.c ++++ b/net/ax25/ax25_route.c +@@ -75,11 +75,13 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route) + ax25_dev *ax25_dev; + int i; + +- if ((ax25_dev = ax25_addr_ax25dev(&route->port_addr)) == NULL) +- return -EINVAL; + if (route->digi_count > AX25_MAX_DIGIS) + return -EINVAL; + ++ ax25_dev = ax25_addr_ax25dev(&route->port_addr); ++ if (!ax25_dev) ++ return -EINVAL; ++ + write_lock_bh(&ax25_route_lock); + + ax25_rt = ax25_route_list; +@@ -91,6 +93,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route) + if (route->digi_count != 0) { + if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) { + write_unlock_bh(&ax25_route_lock); ++ ax25_dev_put(ax25_dev); + return -ENOMEM; + } + ax25_rt->digipeat->lastrepeat = -1; +@@ -101,6 +104,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route) + } + } + write_unlock_bh(&ax25_route_lock); ++ ax25_dev_put(ax25_dev); + return 0; + } + ax25_rt = ax25_rt->next; +@@ -108,6 +112,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route) + + if ((ax25_rt = kmalloc(sizeof(ax25_route), GFP_ATOMIC)) == NULL) { + write_unlock_bh(&ax25_route_lock); ++ ax25_dev_put(ax25_dev); + return -ENOMEM; + } + +@@ -120,6 +125,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route) + if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) { + write_unlock_bh(&ax25_route_lock); + kfree(ax25_rt); ++ ax25_dev_put(ax25_dev); + return -ENOMEM; + } + ax25_rt->digipeat->lastrepeat = -1; +@@ -132,6 +138,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route) + ax25_rt->next = ax25_route_list; + ax25_route_list = ax25_rt; + write_unlock_bh(&ax25_route_lock); ++ ax25_dev_put(ax25_dev); + + return 0; + } +@@ -173,6 +180,7 @@ static int ax25_rt_del(struct ax25_routes_struct *route) + } + } + write_unlock_bh(&ax25_route_lock); ++ ax25_dev_put(ax25_dev); + + return 0; + } +@@ -215,6 +223,7 @@ static int ax25_rt_opt(struct ax25_route_opt_struct *rt_option) + + out: + write_unlock_bh(&ax25_route_lock); ++ ax25_dev_put(ax25_dev); + return err; + } + +diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c +index 15ab812c4fe4b..3a476e4f6cd0b 100644 +--- a/net/ax25/ax25_subr.c ++++ b/net/ax25/ax25_subr.c +@@ -261,12 +261,20 @@ void ax25_disconnect(ax25_cb *ax25, int reason) + { + ax25_clear_queues(ax25); + +- if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY)) +- ax25_stop_heartbeat(ax25); +- ax25_stop_t1timer(ax25); +- ax25_stop_t2timer(ax25); +- ax25_stop_t3timer(ax25); +- ax25_stop_idletimer(ax25); ++ if (reason == ENETUNREACH) { ++ del_timer_sync(&ax25->timer); ++ del_timer_sync(&ax25->t1timer); ++ del_timer_sync(&ax25->t2timer); ++ del_timer_sync(&ax25->t3timer); ++ del_timer_sync(&ax25->idletimer); ++ } else { ++ if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY)) ++ ax25_stop_heartbeat(ax25); ++ ax25_stop_t1timer(ax25); ++ ax25_stop_t2timer(ax25); ++ ax25_stop_t3timer(ax25); ++ ax25_stop_idletimer(ax25); ++ } + + ax25->state = AX25_STATE_0; + +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c +index 813c709c61cfb..f9baa9b1c77f7 100644 +--- a/net/core/flow_dissector.c ++++ b/net/core/flow_dissector.c +@@ -1171,6 +1171,7 @@ proto_again: + VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + } + key_vlan->vlan_tpid = saved_vlan_tpid; ++ key_vlan->vlan_eth_type = proto; + } + + fdret = FLOW_DISSECT_RET_PROTO_AGAIN; +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index 2aa39ce7093df..05e19e5d65140 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -508,7 +508,7 @@ int ip6_forward(struct sk_buff *skb) + goto drop; + + if (!net->ipv6.devconf_all->disable_policy && +- !idev->cnf.disable_policy && ++ (!idev || !idev->cnf.disable_policy) && + !xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { + __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS); + goto drop; +diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c +index e38719e2ee582..2cfff70f70e06 100644 +--- a/net/nfc/nci/core.c ++++ b/net/nfc/nci/core.c +@@ -548,6 +548,10 @@ static int nci_close_device(struct nci_dev *ndev) + mutex_lock(&ndev->req_lock); + + if (!test_and_clear_bit(NCI_UP, &ndev->flags)) { ++ /* Need to flush the cmd wq in case ++ * there is a queued/running cmd_work ++ */ ++ flush_workqueue(ndev->cmd_wq); + del_timer_sync(&ndev->cmd_timer); + del_timer_sync(&ndev->data_timer); + mutex_unlock(&ndev->req_lock); +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c +index 9a789a057a741..b8ffb7e4f696c 100644 +--- a/net/sched/cls_api.c ++++ b/net/sched/cls_api.c +@@ -1656,10 +1656,10 @@ static int tcf_chain_tp_insert(struct tcf_chain *chain, + if (chain->flushing) + return -EAGAIN; + ++ RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info)); + if (*chain_info->pprev == chain->filter_chain) + tcf_chain0_head_change(chain, tp); + tcf_proto_get(tp); +- RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info)); + rcu_assign_pointer(*chain_info->pprev, tp); + + return 0; +diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c +index 8ff6945b9f8f4..35ee6d8226e61 100644 +--- a/net/sched/cls_flower.c ++++ b/net/sched/cls_flower.c +@@ -998,6 +998,7 @@ static int fl_set_key_mpls(struct nlattr **tb, + static void fl_set_key_vlan(struct nlattr **tb, + __be16 ethertype, + int vlan_id_key, int vlan_prio_key, ++ int vlan_next_eth_type_key, + struct flow_dissector_key_vlan *key_val, + struct flow_dissector_key_vlan *key_mask) + { +@@ -1016,6 +1017,11 @@ static void fl_set_key_vlan(struct nlattr **tb, + } + key_val->vlan_tpid = ethertype; + key_mask->vlan_tpid = cpu_to_be16(~0); ++ if (tb[vlan_next_eth_type_key]) { ++ key_val->vlan_eth_type = ++ nla_get_be16(tb[vlan_next_eth_type_key]); ++ key_mask->vlan_eth_type = cpu_to_be16(~0); ++ } + } + + static void fl_set_key_flag(u32 flower_key, u32 flower_mask, +@@ -1497,8 +1503,9 @@ static int fl_set_key(struct net *net, struct nlattr **tb, + + if (eth_type_vlan(ethertype)) { + fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID, +- TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, +- &mask->vlan); ++ TCA_FLOWER_KEY_VLAN_PRIO, ++ TCA_FLOWER_KEY_VLAN_ETH_TYPE, ++ &key->vlan, &mask->vlan); + + if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) { + ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]); +@@ -1506,6 +1513,7 @@ static int fl_set_key(struct net *net, struct nlattr **tb, + fl_set_key_vlan(tb, ethertype, + TCA_FLOWER_KEY_CVLAN_ID, + TCA_FLOWER_KEY_CVLAN_PRIO, ++ TCA_FLOWER_KEY_CVLAN_ETH_TYPE, + &key->cvlan, &mask->cvlan); + fl_set_key_val(tb, &key->basic.n_proto, + TCA_FLOWER_KEY_CVLAN_ETH_TYPE, +@@ -2861,13 +2869,13 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net, + goto nla_put_failure; + + if (mask->basic.n_proto) { +- if (mask->cvlan.vlan_tpid) { ++ if (mask->cvlan.vlan_eth_type) { + if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE, + key->basic.n_proto)) + goto nla_put_failure; +- } else if (mask->vlan.vlan_tpid) { ++ } else if (mask->vlan.vlan_eth_type) { + if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, +- key->basic.n_proto)) ++ key->vlan.vlan_eth_type)) + goto nla_put_failure; + } + } +diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c +index 806babdd838d2..eca525791013e 100644 +--- a/net/sched/sch_taprio.c ++++ b/net/sched/sch_taprio.c +@@ -427,7 +427,8 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, + if (unlikely(!child)) + return qdisc_drop(skb, sch, to_free); + +- if (skb->sk && sock_flag(skb->sk, SOCK_TXTIME)) { ++ /* sk_flags are only safe to use on full sockets. */ ++ if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) { + if (!is_valid_interval(skb, sch)) + return qdisc_drop(skb, sch, to_free); + } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { +diff --git a/net/sctp/socket.c b/net/sctp/socket.c +index 0a9e2c7d8e5f5..e9b4ea3d934fa 100644 +--- a/net/sctp/socket.c ++++ b/net/sctp/socket.c +@@ -5518,7 +5518,7 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) + * Set the daddr and initialize id to something more random and also + * copy over any ip options. + */ +- sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk); ++ sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sock->sk); + sp->pf->copy_ip_options(sk, sock->sk); + + /* Populate the fields of the newsk from the oldsk and migrate the +diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c +index 9007c7e3bae4e..30bae60d626c6 100644 +--- a/net/smc/smc_pnet.c ++++ b/net/smc/smc_pnet.c +@@ -310,8 +310,9 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name) + list_for_each_entry(ibdev, &smc_ib_devices.list, list) { + if (!strncmp(ibdev->ibdev->name, ib_name, + sizeof(ibdev->ibdev->name)) || +- !strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name, +- IB_DEVICE_NAME_MAX - 1)) { ++ (ibdev->ibdev->dev.parent && ++ !strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name, ++ IB_DEVICE_NAME_MAX - 1))) { + goto out; + } + } +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c +index 0df8b9a19952c..12f44ad4e0d8e 100644 +--- a/net/wireless/nl80211.c ++++ b/net/wireless/nl80211.c +@@ -475,7 +475,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { + .len = IEEE80211_MAX_MESH_ID_LEN }, + [NL80211_ATTR_MPATH_NEXT_HOP] = NLA_POLICY_ETH_ADDR_COMPAT, + +- [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 }, ++ /* allow 3 for NUL-termination, we used to declare this NLA_STRING */ ++ [NL80211_ATTR_REG_ALPHA2] = NLA_POLICY_RANGE(NLA_BINARY, 2, 3), + [NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED }, + + [NL80211_ATTR_BSS_CTS_PROT] = { .type = NLA_U8 }, +diff --git a/net/wireless/scan.c b/net/wireless/scan.c +index c1b2655682a8a..6dc9b7e22b71d 100644 +--- a/net/wireless/scan.c ++++ b/net/wireless/scan.c +@@ -1968,11 +1968,13 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, + /* this is a nontransmitting bss, we need to add it to + * transmitting bss' list if it is not there + */ ++ spin_lock_bh(&rdev->bss_lock); + if (cfg80211_add_nontrans_list(non_tx_data->tx_bss, + &res->pub)) { + if (__cfg80211_unlink_bss(rdev, res)) + rdev->bss_generation++; + } ++ spin_unlock_bh(&rdev->bss_lock); + } + + trace_cfg80211_return_bss(&res->pub); +diff --git a/scripts/gcc-plugins/latent_entropy_plugin.c b/scripts/gcc-plugins/latent_entropy_plugin.c +index cbe1d6c4b1a51..c84bef1d28955 100644 +--- a/scripts/gcc-plugins/latent_entropy_plugin.c ++++ b/scripts/gcc-plugins/latent_entropy_plugin.c +@@ -86,25 +86,31 @@ static struct plugin_info latent_entropy_plugin_info = { + .help = "disable\tturn off latent entropy instrumentation\n", + }; + +-static unsigned HOST_WIDE_INT seed; +-/* +- * get_random_seed() (this is a GCC function) generates the seed. +- * This is a simple random generator without any cryptographic security because +- * the entropy doesn't come from here. +- */ ++static unsigned HOST_WIDE_INT deterministic_seed; ++static unsigned HOST_WIDE_INT rnd_buf[32]; ++static size_t rnd_idx = ARRAY_SIZE(rnd_buf); ++static int urandom_fd = -1; ++ + static unsigned HOST_WIDE_INT get_random_const(void) + { +- unsigned int i; +- unsigned HOST_WIDE_INT ret = 0; +- +- for (i = 0; i < 8 * sizeof(ret); i++) { +- ret = (ret << 1) | (seed & 1); +- seed >>= 1; +- if (ret & 1) +- seed ^= 0xD800000000000000ULL; ++ if (deterministic_seed) { ++ unsigned HOST_WIDE_INT w = deterministic_seed; ++ w ^= w << 13; ++ w ^= w >> 7; ++ w ^= w << 17; ++ deterministic_seed = w; ++ return deterministic_seed; + } + +- return ret; ++ if (urandom_fd < 0) { ++ urandom_fd = open("/dev/urandom", O_RDONLY); ++ gcc_assert(urandom_fd >= 0); ++ } ++ if (rnd_idx >= ARRAY_SIZE(rnd_buf)) { ++ gcc_assert(read(urandom_fd, rnd_buf, sizeof(rnd_buf)) == sizeof(rnd_buf)); ++ rnd_idx = 0; ++ } ++ return rnd_buf[rnd_idx++]; + } + + static tree tree_get_random_const(tree type) +@@ -549,8 +555,6 @@ static void latent_entropy_start_unit(void *gcc_data __unused, + tree type, id; + int quals; + +- seed = get_random_seed(false); +- + if (in_lto_p) + return; + +@@ -585,6 +589,12 @@ __visible int plugin_init(struct plugin_name_args *plugin_info, + const struct plugin_argument * const argv = plugin_info->argv; + int i; + ++ /* ++ * Call get_random_seed() with noinit=true, so that this returns ++ * 0 in the case where no seed has been passed via -frandom-seed. ++ */ ++ deterministic_seed = get_random_seed(true); ++ + static const struct ggc_root_tab gt_ggc_r_gt_latent_entropy[] = { + { + .base = &latent_entropy_decl, +diff --git a/sound/core/pcm_misc.c b/sound/core/pcm_misc.c +index 257d412eac5dd..30f0f96e00004 100644 +--- a/sound/core/pcm_misc.c ++++ b/sound/core/pcm_misc.c +@@ -429,7 +429,7 @@ int snd_pcm_format_set_silence(snd_pcm_format_t format, void *data, unsigned int + return 0; + width = pcm_formats[(INT)format].phys; /* physical width */ + pat = pcm_formats[(INT)format].silence; +- if (! width) ++ if (!width || !pat) + return -EINVAL; + /* signed or 1 byte data */ + if (pcm_formats[(INT)format].signd == 1 || width <= 8) { +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index b886326ce9b96..11d653190e6ea 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -2626,6 +2626,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { + SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x65f1, "Clevo PC50HS", ALC1220_FIXUP_CLEVO_PB51ED_PINS), ++ SND_PCI_QUIRK(0x1558, 0x65f5, "Clevo PD50PN[NRT]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS), +@@ -8994,6 +8995,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x17aa, 0x505d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), + SND_PCI_QUIRK(0x17aa, 0x505f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), + SND_PCI_QUIRK(0x17aa, 0x5062, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), ++ SND_PCI_QUIRK(0x17aa, 0x508b, "Thinkpad X12 Gen 1", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS), + SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), + SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), +diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c +index 3b273580fb840..3a0a7930cd10a 100644 +--- a/tools/perf/util/parse-events.c ++++ b/tools/perf/util/parse-events.c +@@ -1442,7 +1442,9 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, + bool use_uncore_alias; + LIST_HEAD(config_terms); + +- if (verbose > 1) { ++ pmu = parse_state->fake_pmu ?: perf_pmu__find(name); ++ ++ if (verbose > 1 && !(pmu && pmu->selectable)) { + fprintf(stderr, "Attempting to add event pmu '%s' with '", + name); + if (head_config) { +@@ -1455,7 +1457,6 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, + fprintf(stderr, "' that may result in non-fatal errors\n"); + } + +- pmu = parse_state->fake_pmu ?: perf_pmu__find(name); + if (!pmu) { + char *err_str; + +diff --git a/tools/testing/selftests/mqueue/mq_perf_tests.c b/tools/testing/selftests/mqueue/mq_perf_tests.c +index b019e0b8221c7..84fda3b490735 100644 +--- a/tools/testing/selftests/mqueue/mq_perf_tests.c ++++ b/tools/testing/selftests/mqueue/mq_perf_tests.c +@@ -180,6 +180,9 @@ void shutdown(int exit_val, char *err_cause, int line_no) + if (in_shutdown++) + return; + ++ /* Free the cpu_set allocated using CPU_ALLOC in main function */ ++ CPU_FREE(cpu_set); ++ + for (i = 0; i < num_cpus_to_pin; i++) + if (cpu_threads[i]) { + pthread_kill(cpu_threads[i], SIGUSR1); +@@ -551,6 +554,12 @@ int main(int argc, char *argv[]) + perror("sysconf(_SC_NPROCESSORS_ONLN)"); + exit(1); + } ++ ++ if (getuid() != 0) ++ ksft_exit_skip("Not running as root, but almost all tests " ++ "require root in order to modify\nsystem settings. " ++ "Exiting.\n"); ++ + cpus_online = min(MAX_CPUS, sysconf(_SC_NPROCESSORS_ONLN)); + cpu_set = CPU_ALLOC(cpus_online); + if (cpu_set == NULL) { +@@ -589,7 +598,7 @@ int main(int argc, char *argv[]) + cpu_set)) { + fprintf(stderr, "Any given CPU may " + "only be given once.\n"); +- exit(1); ++ goto err_code; + } else + CPU_SET_S(cpus_to_pin[cpu], + cpu_set_size, cpu_set); +@@ -607,7 +616,7 @@ int main(int argc, char *argv[]) + queue_path = malloc(strlen(option) + 2); + if (!queue_path) { + perror("malloc()"); +- exit(1); ++ goto err_code; + } + queue_path[0] = '/'; + queue_path[1] = 0; +@@ -622,17 +631,12 @@ int main(int argc, char *argv[]) + fprintf(stderr, "Must pass at least one CPU to continuous " + "mode.\n"); + poptPrintUsage(popt_context, stderr, 0); +- exit(1); ++ goto err_code; + } else if (!continuous_mode) { + num_cpus_to_pin = 1; + cpus_to_pin[0] = cpus_online - 1; + } + +- if (getuid() != 0) +- ksft_exit_skip("Not running as root, but almost all tests " +- "require root in order to modify\nsystem settings. " +- "Exiting.\n"); +- + max_msgs = fopen(MAX_MSGS, "r+"); + max_msgsize = fopen(MAX_MSGSIZE, "r+"); + if (!max_msgs) +@@ -740,4 +744,9 @@ int main(int argc, char *argv[]) + sleep(1); + } + shutdown(0, "", 0); ++ ++err_code: ++ CPU_FREE(cpu_set); ++ exit(1); ++ + } |