diff options
author | Mike Pagano <mpagano@gentoo.org> | 2015-09-14 12:23:16 -0400 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2015-09-14 12:23:16 -0400 |
commit | 475249545a9f6fc3bdea88cfba0af4c56044aac8 (patch) | |
tree | df974609f4953bc1a3e0de12ede3f160011865ca | |
parent | Linux patch 3.14.51 (diff) | |
download | linux-patches-475249545a9f6fc3bdea88cfba0af4c56044aac8.tar.gz linux-patches-475249545a9f6fc3bdea88cfba0af4c56044aac8.tar.bz2 linux-patches-475249545a9f6fc3bdea88cfba0af4c56044aac8.zip |
Linux patch 3.14.523.14-58
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1051_linux-3.14.52.patch | 568 |
2 files changed, 572 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 4df14cd9..97affdb4 100644 --- a/0000_README +++ b/0000_README @@ -246,6 +246,10 @@ Patch: 1050_linux-3.14.51.patch From: http://www.kernel.org Desc: Linux 3.14.51 +Patch: 1051_linux-3.14.52.patch +From: http://www.kernel.org +Desc: Linux 3.14.52 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1051_linux-3.14.52.patch b/1051_linux-3.14.52.patch new file mode 100644 index 00000000..1af14838 --- /dev/null +++ b/1051_linux-3.14.52.patch @@ -0,0 +1,568 @@ +diff --git a/Makefile b/Makefile +index 83275d8ed880..3a5d4316c4c7 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 14 +-SUBLEVEL = 51 ++SUBLEVEL = 52 + EXTRAVERSION = + NAME = Remembering Coco + +diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c +index 81a02a8762b0..86825f8883de 100644 +--- a/arch/arm64/kvm/inject_fault.c ++++ b/arch/arm64/kvm/inject_fault.c +@@ -168,8 +168,8 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) + { + if (!(vcpu->arch.hcr_el2 & HCR_RW)) + inject_abt32(vcpu, false, addr); +- +- inject_abt64(vcpu, false, addr); ++ else ++ inject_abt64(vcpu, false, addr); + } + + /** +@@ -184,8 +184,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) + { + if (!(vcpu->arch.hcr_el2 & HCR_RW)) + inject_abt32(vcpu, true, addr); +- +- inject_abt64(vcpu, true, addr); ++ else ++ inject_abt64(vcpu, true, addr); + } + + /** +@@ -198,6 +198,6 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu) + { + if (!(vcpu->arch.hcr_el2 & HCR_RW)) + inject_undef32(vcpu); +- +- inject_undef64(vcpu); ++ else ++ inject_undef64(vcpu); + } +diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c +index 8ed6cb1a900f..8f7ffffc63e9 100644 +--- a/arch/arm64/mm/mmap.c ++++ b/arch/arm64/mm/mmap.c +@@ -47,22 +47,14 @@ static int mmap_is_legacy(void) + return sysctl_legacy_va_layout; + } + +-/* +- * Since get_random_int() returns the same value within a 1 jiffy window, we +- * will almost always get the same randomisation for the stack and mmap +- * region. This will mean the relative distance between stack and mmap will be +- * the same. +- * +- * To avoid this we can shift the randomness by 1 bit. +- */ + static unsigned long mmap_rnd(void) + { + unsigned long rnd = 0; + + if (current->flags & PF_RANDOMIZE) +- rnd = (long)get_random_int() & (STACK_RND_MASK >> 1); ++ rnd = (long)get_random_int() & STACK_RND_MASK; + +- return rnd << (PAGE_SHIFT + 1); ++ return rnd << PAGE_SHIFT; + } + + static unsigned long mmap_base(void) +diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c +index 2b946bc4212d..f3f71369adc7 100644 +--- a/drivers/base/regmap/regcache-rbtree.c ++++ b/drivers/base/regmap/regcache-rbtree.c +@@ -302,11 +302,20 @@ static int regcache_rbtree_insert_to_block(struct regmap *map, + if (!blk) + return -ENOMEM; + +- present = krealloc(rbnode->cache_present, +- BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL); +- if (!present) { +- kfree(blk); +- return -ENOMEM; ++ if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) { ++ present = krealloc(rbnode->cache_present, ++ BITS_TO_LONGS(blklen) * sizeof(*present), ++ GFP_KERNEL); ++ if (!present) { ++ kfree(blk); ++ return -ENOMEM; ++ } ++ ++ memset(present + BITS_TO_LONGS(rbnode->blklen), 0, ++ (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen)) ++ * sizeof(*present)); ++ } else { ++ present = rbnode->cache_present; + } + + /* insert the register value in the correct place in the rbnode block */ +diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c +index efe1b4761735..e88556ac8318 100644 +--- a/drivers/block/xen-blkfront.c ++++ b/drivers/block/xen-blkfront.c +@@ -1093,8 +1093,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, + * Add the used indirect page back to the list of + * available pages for indirect grefs. + */ +- indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); +- list_add(&indirect_page->lru, &info->indirect_pages); ++ if (!info->feature_persistent) { ++ indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); ++ list_add(&indirect_page->lru, &info->indirect_pages); ++ } + s->indirect_grants[i]->gref = GRANT_INVALID_REF; + list_add_tail(&s->indirect_grants[i]->node, &info->grants); + } +diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c +index d97a03dbf42c..1489927bdda1 100644 +--- a/drivers/crypto/caam/caamhash.c ++++ b/drivers/crypto/caam/caamhash.c +@@ -900,13 +900,14 @@ static int ahash_final_ctx(struct ahash_request *req) + state->buflen_1; + u32 *sh_desc = ctx->sh_desc_fin, *desc; + dma_addr_t ptr = ctx->sh_desc_fin_dma; +- int sec4_sg_bytes; ++ int sec4_sg_bytes, sec4_sg_src_index; + int digestsize = crypto_ahash_digestsize(ahash); + struct ahash_edesc *edesc; + int ret = 0; + int sh_len; + +- sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry); ++ sec4_sg_src_index = 1 + (buflen ? 1 : 0); ++ sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); + + /* allocate space for base edesc and hw desc commands, link tables */ + edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + +@@ -933,7 +934,7 @@ static int ahash_final_ctx(struct ahash_request *req) + state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, + buf, state->buf_dma, buflen, + last_buflen); +- (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN; ++ (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN; + + append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, + LDST_SGF); +diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c +index ef6b7e08f485..5c361f3c66aa 100644 +--- a/drivers/edac/ppc4xx_edac.c ++++ b/drivers/edac/ppc4xx_edac.c +@@ -921,7 +921,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1) + */ + + for (row = 0; row < mci->nr_csrows; row++) { +- struct csrow_info *csi = &mci->csrows[row]; ++ struct csrow_info *csi = mci->csrows[row]; + + /* + * Get the configuration settings for this +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +index a3480c13eb1b..9fe10d1ad2e4 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +@@ -2475,7 +2475,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, + + ret = vmw_resources_validate(sw_context); + if (unlikely(ret != 0)) +- goto out_err; ++ goto out_err_nores; + + if (throttle_us) { + ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, +@@ -2511,6 +2511,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, + vmw_resource_relocations_free(&sw_context->res_relocations); + + vmw_fifo_commit(dev_priv, command_size); ++ mutex_unlock(&dev_priv->binding_mutex); + + vmw_query_bo_switch_commit(dev_priv, sw_context); + ret = vmw_execbuf_fence_commands(file_priv, dev_priv, +@@ -2526,7 +2527,6 @@ int vmw_execbuf_process(struct drm_file *file_priv, + DRM_ERROR("Fence submission error. Syncing.\n"); + + vmw_resource_list_unreserve(&sw_context->resource_list, false); +- mutex_unlock(&dev_priv->binding_mutex); + + ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, + (void *) fence); +diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c +index e9d33ad59df5..3412b86e79fd 100644 +--- a/drivers/md/dm-thin-metadata.c ++++ b/drivers/md/dm-thin-metadata.c +@@ -1295,8 +1295,8 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd) + return r; + + disk_super = dm_block_data(copy); +- dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root)); +- dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root)); ++ dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root)); ++ dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root)); + dm_sm_dec_block(pmd->metadata_sm, held_root); + + return dm_tm_unlock(pmd->tm, copy); +diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c +index 1b3a09473452..30f9ef0c0d4f 100644 +--- a/drivers/scsi/libfc/fc_exch.c ++++ b/drivers/scsi/libfc/fc_exch.c +@@ -733,8 +733,6 @@ static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp, + if (resp) { + resp(sp, fp, arg); + res = true; +- } else if (!IS_ERR(fp)) { +- fc_frame_free(fp); + } + + spin_lock_bh(&ep->ex_lock); +@@ -1596,7 +1594,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) + * If new exch resp handler is valid then call that + * first. + */ +- fc_invoke_resp(ep, sp, fp); ++ if (!fc_invoke_resp(ep, sp, fp)) ++ fc_frame_free(fp); + + fc_exch_release(ep); + return; +@@ -1695,7 +1694,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) + fc_exch_hold(ep); + if (!rc) + fc_exch_delete(ep); +- fc_invoke_resp(ep, sp, fp); ++ if (!fc_invoke_resp(ep, sp, fp)) ++ fc_frame_free(fp); + if (has_rec) + fc_exch_timer_set(ep, ep->r_a_tov); + fc_exch_release(ep); +diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c +index 1d7e76e8b447..ae6fc1a94568 100644 +--- a/drivers/scsi/libfc/fc_fcp.c ++++ b/drivers/scsi/libfc/fc_fcp.c +@@ -1039,11 +1039,26 @@ restart: + fc_fcp_pkt_hold(fsp); + spin_unlock_irqrestore(&si->scsi_queue_lock, flags); + +- if (!fc_fcp_lock_pkt(fsp)) { ++ spin_lock_bh(&fsp->scsi_pkt_lock); ++ if (!(fsp->state & FC_SRB_COMPL)) { ++ fsp->state |= FC_SRB_COMPL; ++ /* ++ * TODO: dropping scsi_pkt_lock and then reacquiring ++ * again around fc_fcp_cleanup_cmd() is required, ++ * since fc_fcp_cleanup_cmd() calls into ++ * fc_seq_set_resp() and that func preempts cpu using ++ * schedule. May be schedule and related code should be ++ * removed instead of unlocking here to avoid scheduling ++ * while atomic bug. ++ */ ++ spin_unlock_bh(&fsp->scsi_pkt_lock); ++ + fc_fcp_cleanup_cmd(fsp, error); ++ ++ spin_lock_bh(&fsp->scsi_pkt_lock); + fc_io_compl(fsp); +- fc_fcp_unlock_pkt(fsp); + } ++ spin_unlock_bh(&fsp->scsi_pkt_lock); + + fc_fcp_pkt_release(fsp); + spin_lock_irqsave(&si->scsi_queue_lock, flags); +diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c +index 001e9ceda4c3..a59be67b92d5 100644 +--- a/drivers/scsi/scsi_pm.c ++++ b/drivers/scsi/scsi_pm.c +@@ -149,15 +149,15 @@ static int sdev_runtime_suspend(struct device *dev) + { + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + struct scsi_device *sdev = to_scsi_device(dev); +- int err; ++ int err = 0; + +- err = blk_pre_runtime_suspend(sdev->request_queue); +- if (err) +- return err; +- if (pm && pm->runtime_suspend) ++ if (pm && pm->runtime_suspend) { ++ err = blk_pre_runtime_suspend(sdev->request_queue); ++ if (err) ++ return err; + err = pm->runtime_suspend(dev); +- blk_post_runtime_suspend(sdev->request_queue, err); +- ++ blk_post_runtime_suspend(sdev->request_queue, err); ++ } + return err; + } + +@@ -180,11 +180,11 @@ static int sdev_runtime_resume(struct device *dev) + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + int err = 0; + +- blk_pre_runtime_resume(sdev->request_queue); +- if (pm && pm->runtime_resume) ++ if (pm && pm->runtime_resume) { ++ blk_pre_runtime_resume(sdev->request_queue); + err = pm->runtime_resume(dev); +- blk_post_runtime_resume(sdev->request_queue, err); +- ++ blk_post_runtime_resume(sdev->request_queue, err); ++ } + return err; + } + +diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h +index bb5367d288fb..7e9a0a6c655b 100644 +--- a/include/drm/drm_pciids.h ++++ b/include/drm/drm_pciids.h +@@ -172,6 +172,7 @@ + {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ ++ {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ +diff --git a/ipc/sem.c b/ipc/sem.c +index bee555417312..e53c96f7db42 100644 +--- a/ipc/sem.c ++++ b/ipc/sem.c +@@ -253,6 +253,16 @@ static void sem_rcu_free(struct rcu_head *head) + } + + /* ++ * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they ++ * are only control barriers. ++ * The code must pair with spin_unlock(&sem->lock) or ++ * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient. ++ * ++ * smp_rmb() is sufficient, as writes cannot pass the control barrier. ++ */ ++#define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb() ++ ++/* + * Wait until all currently ongoing simple ops have completed. + * Caller must own sem_perm.lock. + * New simple ops cannot start, because simple ops first check +@@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma) + sem = sma->sem_base + i; + spin_unlock_wait(&sem->lock); + } ++ ipc_smp_acquire__after_spin_is_unlocked(); + } + + /* +@@ -326,8 +337,13 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, + + /* Then check that the global lock is free */ + if (!spin_is_locked(&sma->sem_perm.lock)) { +- /* spin_is_locked() is not a memory barrier */ +- smp_mb(); ++ /* ++ * We need a memory barrier with acquire semantics, ++ * otherwise we can race with another thread that does: ++ * complex_count++; ++ * spin_unlock(sem_perm.lock); ++ */ ++ ipc_smp_acquire__after_spin_is_unlocked(); + + /* Now repeat the test of complex_count: + * It can't change anymore until we drop sem->lock. +@@ -2055,17 +2071,28 @@ void exit_sem(struct task_struct *tsk) + rcu_read_lock(); + un = list_entry_rcu(ulp->list_proc.next, + struct sem_undo, list_proc); +- if (&un->list_proc == &ulp->list_proc) +- semid = -1; +- else +- semid = un->semid; ++ if (&un->list_proc == &ulp->list_proc) { ++ /* ++ * We must wait for freeary() before freeing this ulp, ++ * in case we raced with last sem_undo. There is a small ++ * possibility where we exit while freeary() didn't ++ * finish unlocking sem_undo_list. ++ */ ++ spin_unlock_wait(&ulp->lock); ++ rcu_read_unlock(); ++ break; ++ } ++ spin_lock(&ulp->lock); ++ semid = un->semid; ++ spin_unlock(&ulp->lock); + ++ /* exit_sem raced with IPC_RMID, nothing to do */ + if (semid == -1) { + rcu_read_unlock(); +- break; ++ continue; + } + +- sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid); ++ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid); + /* exit_sem raced with IPC_RMID, nothing to do */ + if (IS_ERR(sma)) { + rcu_read_unlock(); +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 60146febb9b3..3bf20e36a8e7 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -3562,28 +3562,21 @@ static void perf_event_for_each(struct perf_event *event, + mutex_unlock(&ctx->mutex); + } + +-static int perf_event_period(struct perf_event *event, u64 __user *arg) +-{ +- struct perf_event_context *ctx = event->ctx; +- int ret = 0, active; ++struct period_event { ++ struct perf_event *event; + u64 value; ++}; + +- if (!is_sampling_event(event)) +- return -EINVAL; +- +- if (copy_from_user(&value, arg, sizeof(value))) +- return -EFAULT; +- +- if (!value) +- return -EINVAL; ++static int __perf_event_period(void *info) ++{ ++ struct period_event *pe = info; ++ struct perf_event *event = pe->event; ++ struct perf_event_context *ctx = event->ctx; ++ u64 value = pe->value; ++ bool active; + +- raw_spin_lock_irq(&ctx->lock); ++ raw_spin_lock(&ctx->lock); + if (event->attr.freq) { +- if (value > sysctl_perf_event_sample_rate) { +- ret = -EINVAL; +- goto unlock; +- } +- + event->attr.sample_freq = value; + } else { + event->attr.sample_period = value; +@@ -3602,11 +3595,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) + event->pmu->start(event, PERF_EF_RELOAD); + perf_pmu_enable(ctx->pmu); + } ++ raw_spin_unlock(&ctx->lock); + +-unlock: ++ return 0; ++} ++ ++static int perf_event_period(struct perf_event *event, u64 __user *arg) ++{ ++ struct period_event pe = { .event = event, }; ++ struct perf_event_context *ctx = event->ctx; ++ struct task_struct *task; ++ u64 value; ++ ++ if (!is_sampling_event(event)) ++ return -EINVAL; ++ ++ if (copy_from_user(&value, arg, sizeof(value))) ++ return -EFAULT; ++ ++ if (!value) ++ return -EINVAL; ++ ++ if (event->attr.freq && value > sysctl_perf_event_sample_rate) ++ return -EINVAL; ++ ++ task = ctx->task; ++ pe.value = value; ++ ++ if (!task) { ++ cpu_function_call(event->cpu, __perf_event_period, &pe); ++ return 0; ++ } ++ ++retry: ++ if (!task_function_call(task, __perf_event_period, &pe)) ++ return 0; ++ ++ raw_spin_lock_irq(&ctx->lock); ++ if (ctx->is_active) { ++ raw_spin_unlock_irq(&ctx->lock); ++ task = ctx->task; ++ goto retry; ++ } ++ ++ __perf_event_period(&pe); + raw_spin_unlock_irq(&ctx->lock); + +- return ret; ++ return 0; + } + + static const struct file_operations perf_fops; +@@ -4218,12 +4253,20 @@ static const struct file_operations perf_fops = { + * to user-space before waking everybody up. + */ + ++static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) ++{ ++ /* only the parent has fasync state */ ++ if (event->parent) ++ event = event->parent; ++ return &event->fasync; ++} ++ + void perf_event_wakeup(struct perf_event *event) + { + ring_buffer_wakeup(event); + + if (event->pending_kill) { +- kill_fasync(&event->fasync, SIGIO, event->pending_kill); ++ kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); + event->pending_kill = 0; + } + } +@@ -5432,7 +5475,7 @@ static int __perf_event_overflow(struct perf_event *event, + else + perf_event_output(event, data, regs); + +- if (event->fasync && event->pending_kill) { ++ if (*perf_event_fasync(event) && event->pending_kill) { + event->pending_wakeup = 1; + irq_work_queue(&event->pending); + } +diff --git a/mm/memory-failure.c b/mm/memory-failure.c +index 9502057c3c54..42aeb848b8e9 100644 +--- a/mm/memory-failure.c ++++ b/mm/memory-failure.c +@@ -1510,6 +1510,8 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags) + */ + ret = __get_any_page(page, pfn, 0); + if (!PageLRU(page)) { ++ /* Drop page reference which is from __get_any_page() */ ++ put_page(page); + pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n", + pfn, page->flags); + return -EIO; +diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl +index 31331723e810..9969feefb720 100644 +--- a/scripts/kconfig/streamline_config.pl ++++ b/scripts/kconfig/streamline_config.pl +@@ -137,7 +137,7 @@ my $ksource = ($ARGV[0] ? $ARGV[0] : '.'); + my $kconfig = $ARGV[1]; + my $lsmod_file = $ENV{'LSMOD'}; + +-my @makefiles = `find $ksource -name Makefile 2>/dev/null`; ++my @makefiles = `find $ksource -name Makefile -or -name Kbuild 2>/dev/null`; + chomp @makefiles; + + my %depends; |