diff options
author | Alice Ferrazzi <alicef@gentoo.org> | 2017-02-15 16:22:20 +0000 |
---|---|---|
committer | Alice Ferrazzi <alicef@gentoo.org> | 2017-02-15 16:22:20 +0000 |
commit | bb3d53cb580146ec022c488681ed4863bf4587c4 (patch) | |
tree | 86fe08ab4fe9c76a393512767c39584abdd14321 | |
parent | Linux patch 4.4.48 (diff) | |
download | linux-patches-bb3d53cb580146ec022c488681ed4863bf4587c4.tar.gz linux-patches-bb3d53cb580146ec022c488681ed4863bf4587c4.tar.bz2 linux-patches-bb3d53cb580146ec022c488681ed4863bf4587c4.zip |
Linux patch 4.4.494.4-53
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1048_linux-4.4.49.patch | 531 |
2 files changed, 535 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 44fe8269..976dbf2b 100644 --- a/0000_README +++ b/0000_README @@ -235,6 +235,10 @@ Patch: 1047_linux-4.4.48.patch From: http://www.kernel.org Desc: Linux 4.4.48 +Patch: 1048_linux-4.4.49.patch +From: http://www.kernel.org +Desc: Linux 4.4.49 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1048_linux-4.4.49.patch b/1048_linux-4.4.49.patch new file mode 100644 index 00000000..d1ccaa90 --- /dev/null +++ b/1048_linux-4.4.49.patch @@ -0,0 +1,531 @@ +diff --git a/Makefile b/Makefile +index 0793cd412656..5fab6d4068b5 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 48 ++SUBLEVEL = 49 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c +index 91ebe382147f..5f69c3bd59bb 100644 +--- a/arch/arc/kernel/unaligned.c ++++ b/arch/arc/kernel/unaligned.c +@@ -243,7 +243,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs, + + /* clear any remanants of delay slot */ + if (delay_mode(regs)) { +- regs->ret = regs->bta ~1U; ++ regs->ret = regs->bta & ~1U; + regs->status32 &= ~STATUS_DE_MASK; + } else { + regs->ret += state.instr_len; +diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c +index 4d9375814b53..d54c53b7ab63 100644 +--- a/arch/arm/kernel/ptrace.c ++++ b/arch/arm/kernel/ptrace.c +@@ -600,7 +600,7 @@ static int gpr_set(struct task_struct *target, + const void *kbuf, const void __user *ubuf) + { + int ret; +- struct pt_regs newregs; ++ struct pt_regs newregs = *task_pt_regs(target); + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &newregs, +diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c +index daafcf121ce0..c095455d496e 100644 +--- a/arch/arm/mm/fault.c ++++ b/arch/arm/mm/fault.c +@@ -610,9 +610,9 @@ static int __init early_abort_handler(unsigned long addr, unsigned int fsr, + + void __init early_abt_enable(void) + { +- fsr_info[22].fn = early_abort_handler; ++ fsr_info[FSR_FS_AEA].fn = early_abort_handler; + local_abt_enable(); +- fsr_info[22].fn = do_bad; ++ fsr_info[FSR_FS_AEA].fn = do_bad; + } + + #ifndef CONFIG_ARM_LPAE +diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h +index 05ec5e0df32d..78830657cab3 100644 +--- a/arch/arm/mm/fault.h ++++ b/arch/arm/mm/fault.h +@@ -11,11 +11,15 @@ + #define FSR_FS5_0 (0x3f) + + #ifdef CONFIG_ARM_LPAE ++#define FSR_FS_AEA 17 ++ + static inline int fsr_fs(unsigned int fsr) + { + return fsr & FSR_FS5_0; + } + #else ++#define FSR_FS_AEA 22 ++ + static inline int fsr_fs(unsigned int fsr) + { + return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6; +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c +index 1e5d2f07416b..8ca533b8c606 100644 +--- a/arch/x86/kernel/apic/io_apic.c ++++ b/arch/x86/kernel/apic/io_apic.c +@@ -1875,7 +1875,6 @@ static struct irq_chip ioapic_chip __read_mostly = { + .irq_ack = irq_chip_ack_parent, + .irq_eoi = ioapic_ack_level, + .irq_set_affinity = ioapic_set_affinity, +- .irq_retrigger = irq_chip_retrigger_hierarchy, + .flags = IRQCHIP_SKIP_SET_WAKE, + }; + +@@ -1887,7 +1886,6 @@ static struct irq_chip ioapic_ir_chip __read_mostly = { + .irq_ack = irq_chip_ack_parent, + .irq_eoi = ioapic_ir_ack_level, + .irq_set_affinity = ioapic_set_affinity, +- .irq_retrigger = irq_chip_retrigger_hierarchy, + .flags = IRQCHIP_SKIP_SET_WAKE, + }; + +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c +index 909d1d71d130..4f5d07bb3511 100644 +--- a/drivers/gpu/drm/i915/intel_display.c ++++ b/drivers/gpu/drm/i915/intel_display.c +@@ -3948,10 +3948,10 @@ static void page_flip_completed(struct intel_crtc *intel_crtc) + drm_crtc_vblank_put(&intel_crtc->base); + + wake_up_all(&dev_priv->pending_flip_queue); +- queue_work(dev_priv->wq, &work->work); +- + trace_i915_flip_complete(intel_crtc->plane, + work->pending_flip_obj); ++ ++ queue_work(dev_priv->wq, &work->work); + } + + void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +index bdbd80423b17..9ff2881f933d 100644 +--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h ++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +@@ -900,9 +900,7 @@ + + static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) + { +- u8 __iomem *reg_addr = ACCESS_ONCE(base); +- +- writel(value, reg_addr + reg); ++ writel(value, base + reg); + } + + #define dsaf_write_dev(a, reg, value) \ +@@ -910,9 +908,7 @@ static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) + + static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg) + { +- u8 __iomem *reg_addr = ACCESS_ONCE(base); +- +- return readl(reg_addr + reg); ++ return readl(base + reg); + } + + #define dsaf_read_dev(a, reg) \ +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c +index 7a601d8c615e..e8a09ff9e724 100644 +--- a/drivers/net/hyperv/netvsc_drv.c ++++ b/drivers/net/hyperv/netvsc_drv.c +@@ -854,7 +854,6 @@ static int netvsc_set_channels(struct net_device *net, + } + goto recover; + } +- netif_set_gso_max_size(net, NETVSC_GSO_MAX_SIZE); + + out: + netvsc_open(net); +@@ -1142,6 +1141,7 @@ static int netvsc_probe(struct hv_device *dev, + nvdev = hv_get_drvdata(dev); + netif_set_real_num_tx_queues(net, nvdev->num_chn); + netif_set_real_num_rx_queues(net, nvdev->num_chn); ++ netif_set_gso_max_size(net, NETVSC_GSO_MAX_SIZE); + + ret = register_netdev(net); + if (ret != 0) { +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c +index d6abf191122a..1f445f357da1 100644 +--- a/drivers/net/xen-netfront.c ++++ b/drivers/net/xen-netfront.c +@@ -1391,6 +1391,8 @@ static void xennet_disconnect_backend(struct netfront_info *info) + for (i = 0; i < num_queues && info->queues; ++i) { + struct netfront_queue *queue = &info->queues[i]; + ++ del_timer_sync(&queue->rx_refill_timer); ++ + if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) + unbind_from_irqhandler(queue->tx_irq, queue); + if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { +@@ -1745,7 +1747,6 @@ static void xennet_destroy_queues(struct netfront_info *info) + + if (netif_running(info->netdev)) + napi_disable(&queue->napi); +- del_timer_sync(&queue->rx_refill_timer); + netif_napi_del(&queue->napi); + } + +diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c +index 75f820ca17b7..27ff38f839fc 100644 +--- a/drivers/s390/scsi/zfcp_fsf.c ++++ b/drivers/s390/scsi/zfcp_fsf.c +@@ -1583,7 +1583,7 @@ out: + int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) + { + struct zfcp_qdio *qdio = wka_port->adapter->qdio; +- struct zfcp_fsf_req *req = NULL; ++ struct zfcp_fsf_req *req; + int retval = -EIO; + + spin_lock_irq(&qdio->req_q_lock); +@@ -1612,7 +1612,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) + zfcp_fsf_req_free(req); + out: + spin_unlock_irq(&qdio->req_q_lock); +- if (req && !IS_ERR(req)) ++ if (!retval) + zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id); + return retval; + } +@@ -1638,7 +1638,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) + int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) + { + struct zfcp_qdio *qdio = wka_port->adapter->qdio; +- struct zfcp_fsf_req *req = NULL; ++ struct zfcp_fsf_req *req; + int retval = -EIO; + + spin_lock_irq(&qdio->req_q_lock); +@@ -1667,7 +1667,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) + zfcp_fsf_req_free(req); + out: + spin_unlock_irq(&qdio->req_q_lock); +- if (req && !IS_ERR(req)) ++ if (!retval) + zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id); + return retval; + } +diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c +index 0d351cd3191b..26d38b1a45ab 100644 +--- a/drivers/scsi/aacraid/comminit.c ++++ b/drivers/scsi/aacraid/comminit.c +@@ -50,9 +50,13 @@ struct aac_common aac_config = { + + static inline int aac_is_msix_mode(struct aac_dev *dev) + { +- u32 status; ++ u32 status = 0; + +- status = src_readl(dev, MUnit.OMR); ++ if (dev->pdev->device == PMC_DEVICE_S6 || ++ dev->pdev->device == PMC_DEVICE_S7 || ++ dev->pdev->device == PMC_DEVICE_S8) { ++ status = src_readl(dev, MUnit.OMR); ++ } + return (status & AAC_INT_MODE_MSIX); + } + +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +index 8cead04f26d6..f6a8e9958e75 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +@@ -51,6 +51,7 @@ + #include <linux/workqueue.h> + #include <linux/delay.h> + #include <linux/pci.h> ++#include <linux/pci-aspm.h> + #include <linux/interrupt.h> + #include <linux/aer.h> + #include <linux/raid_class.h> +@@ -8483,6 +8484,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) + + switch (hba_mpi_version) { + case MPI2_VERSION: ++ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | ++ PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); + /* Use mpt2sas driver host template for SAS 2.0 HBA's */ + shost = scsi_host_alloc(&mpt2sas_driver_template, + sizeof(struct MPT3SAS_ADAPTER)); +diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c +index dcd5ed26eb18..356c80fbb304 100644 +--- a/drivers/target/target_core_device.c ++++ b/drivers/target/target_core_device.c +@@ -362,7 +362,15 @@ int core_enable_device_list_for_node( + kfree(new); + return -EINVAL; + } +- BUG_ON(orig->se_lun_acl != NULL); ++ if (orig->se_lun_acl != NULL) { ++ pr_warn_ratelimited("Detected existing explicit" ++ " se_lun_acl->se_lun_group reference for %s" ++ " mapped_lun: %llu, failing\n", ++ nacl->initiatorname, mapped_lun); ++ mutex_unlock(&nacl->lun_entry_mutex); ++ kfree(new); ++ return -EINVAL; ++ } + + rcu_assign_pointer(new->se_lun, lun); + rcu_assign_pointer(new->se_lun_acl, lun_acl); +diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c +index c220bb8dfa9d..2e27b1034ede 100644 +--- a/drivers/target/target_core_sbc.c ++++ b/drivers/target/target_core_sbc.c +@@ -442,6 +442,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success, + int *post_ret) + { + struct se_device *dev = cmd->se_dev; ++ sense_reason_t ret = TCM_NO_SENSE; + + /* + * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through +@@ -449,9 +450,12 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success, + * sent to the backend driver. + */ + spin_lock_irq(&cmd->t_state_lock); +- if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) { ++ if (cmd->transport_state & CMD_T_SENT) { + cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; + *post_ret = 1; ++ ++ if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION) ++ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + spin_unlock_irq(&cmd->t_state_lock); + +@@ -461,7 +465,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success, + */ + up(&dev->caw_sem); + +- return TCM_NO_SENSE; ++ return ret; + } + + static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success, +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c +index 2a67af4e2e13..aa517c4fadb9 100644 +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -3058,7 +3058,6 @@ static void target_tmr_work(struct work_struct *work) + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + goto check_stop; + } +- cmd->t_state = TRANSPORT_ISTATE_PROCESSING; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + cmd->se_tfo->queue_tm_rsp(cmd); +@@ -3071,11 +3070,25 @@ int transport_generic_handle_tmr( + struct se_cmd *cmd) + { + unsigned long flags; ++ bool aborted = false; + + spin_lock_irqsave(&cmd->t_state_lock, flags); +- cmd->transport_state |= CMD_T_ACTIVE; ++ if (cmd->transport_state & CMD_T_ABORTED) { ++ aborted = true; ++ } else { ++ cmd->t_state = TRANSPORT_ISTATE_PROCESSING; ++ cmd->transport_state |= CMD_T_ACTIVE; ++ } + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + ++ if (aborted) { ++ pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d" ++ "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function, ++ cmd->se_tmr_req->ref_task_tag, cmd->tag); ++ transport_cmd_check_stop_to_fabric(cmd); ++ return 0; ++ } ++ + INIT_WORK(&cmd->work, target_tmr_work); + queue_work(cmd->se_dev->tmr_wq, &cmd->work); + return 0; +diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c +index 153a6f255b6d..6415e9b09a52 100644 +--- a/drivers/target/target_core_xcopy.c ++++ b/drivers/target/target_core_xcopy.c +@@ -836,7 +836,7 @@ out: + " CHECK_CONDITION -> sending response\n", rc); + ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION; + } +- target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION); ++ target_complete_cmd(ec_cmd, ec_cmd->scsi_status); + } + + sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) +diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h +index 59915ea5373c..a91b3b75da0f 100644 +--- a/include/linux/cpumask.h ++++ b/include/linux/cpumask.h +@@ -556,7 +556,7 @@ static inline void cpumask_copy(struct cpumask *dstp, + static inline int cpumask_parse_user(const char __user *buf, int len, + struct cpumask *dstp) + { +- return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids); ++ return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits); + } + + /** +@@ -571,7 +571,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len, + struct cpumask *dstp) + { + return bitmap_parselist_user(buf, len, cpumask_bits(dstp), +- nr_cpu_ids); ++ nr_cpumask_bits); + } + + /** +@@ -586,7 +586,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp) + char *nl = strchr(buf, '\n'); + unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf); + +- return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids); ++ return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits); + } + + /** +@@ -598,7 +598,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp) + */ + static inline int cpulist_parse(const char *buf, struct cpumask *dstp) + { +- return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids); ++ return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits); + } + + /** +diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c +index f7bb6829b415..9063e8e736ad 100644 +--- a/net/mac80211/mesh.c ++++ b/net/mac80211/mesh.c +@@ -355,7 +355,7 @@ int mesh_add_vendor_ies(struct ieee80211_sub_if_data *sdata, + /* fast-forward to vendor IEs */ + offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0); + +- if (offset) { ++ if (offset < ifmsh->ie_len) { + len = ifmsh->ie_len - offset; + data = ifmsh->ie + offset; + if (skb_tailroom(skb) < len) +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c +index d0cfaa9f19d0..4b56c3b6c25f 100644 +--- a/security/selinux/hooks.c ++++ b/security/selinux/hooks.c +@@ -5640,7 +5640,7 @@ static int selinux_setprocattr(struct task_struct *p, + return error; + + /* Obtain a SID for the context, if one was specified. */ +- if (size && str[1] && str[1] != '\n') { ++ if (size && str[0] && str[0] != '\n') { + if (str[size-1] == '\n') { + str[size-1] = 0; + size--; +diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c +index c850345c43b5..dfa5156f3585 100644 +--- a/sound/core/seq/seq_memory.c ++++ b/sound/core/seq/seq_memory.c +@@ -419,7 +419,6 @@ int snd_seq_pool_done(struct snd_seq_pool *pool) + { + unsigned long flags; + struct snd_seq_event_cell *ptr; +- int max_count = 5 * HZ; + + if (snd_BUG_ON(!pool)) + return -EINVAL; +@@ -432,14 +431,8 @@ int snd_seq_pool_done(struct snd_seq_pool *pool) + if (waitqueue_active(&pool->output_sleep)) + wake_up(&pool->output_sleep); + +- while (atomic_read(&pool->counter) > 0) { +- if (max_count == 0) { +- pr_warn("ALSA: snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter)); +- break; +- } ++ while (atomic_read(&pool->counter) > 0) + schedule_timeout_uninterruptible(1); +- max_count--; +- } + + /* release all resources */ + spin_lock_irqsave(&pool->lock, flags); +diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c +index 0bec02e89d51..450c5187eecb 100644 +--- a/sound/core/seq/seq_queue.c ++++ b/sound/core/seq/seq_queue.c +@@ -181,6 +181,8 @@ void __exit snd_seq_queues_delete(void) + } + } + ++static void queue_use(struct snd_seq_queue *queue, int client, int use); ++ + /* allocate a new queue - + * return queue index value or negative value for error + */ +@@ -192,11 +194,11 @@ int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags) + if (q == NULL) + return -ENOMEM; + q->info_flags = info_flags; ++ queue_use(q, client, 1); + if (queue_list_add(q) < 0) { + queue_delete(q); + return -ENOMEM; + } +- snd_seq_queue_use(q->queue, client, 1); /* use this queue */ + return q->queue; + } + +@@ -502,19 +504,9 @@ int snd_seq_queue_timer_set_tempo(int queueid, int client, + return result; + } + +- +-/* use or unuse this queue - +- * if it is the first client, starts the timer. +- * if it is not longer used by any clients, stop the timer. +- */ +-int snd_seq_queue_use(int queueid, int client, int use) ++/* use or unuse this queue */ ++static void queue_use(struct snd_seq_queue *queue, int client, int use) + { +- struct snd_seq_queue *queue; +- +- queue = queueptr(queueid); +- if (queue == NULL) +- return -EINVAL; +- mutex_lock(&queue->timer_mutex); + if (use) { + if (!test_and_set_bit(client, queue->clients_bitmap)) + queue->clients++; +@@ -529,6 +521,21 @@ int snd_seq_queue_use(int queueid, int client, int use) + } else { + snd_seq_timer_close(queue); + } ++} ++ ++/* use or unuse this queue - ++ * if it is the first client, starts the timer. ++ * if it is not longer used by any clients, stop the timer. ++ */ ++int snd_seq_queue_use(int queueid, int client, int use) ++{ ++ struct snd_seq_queue *queue; ++ ++ queue = queueptr(queueid); ++ if (queue == NULL) ++ return -EINVAL; ++ mutex_lock(&queue->timer_mutex); ++ queue_use(queue, client, use); + mutex_unlock(&queue->timer_mutex); + queuefree(queue); + return 0; |