summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--0000_README4
-rw-r--r--1060_linux-4.9.61.patch2646
2 files changed, 2650 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 1396f036..664ecce7 100644
--- a/0000_README
+++ b/0000_README
@@ -283,6 +283,10 @@ Patch: 1059_linux-4.9.60.patch
From: http://www.kernel.org
Desc: Linux 4.9.60
+Patch: 1060_linux-4.9.61.patch
+From: http://www.kernel.org
+Desc: Linux 4.9.61
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1060_linux-4.9.61.patch b/1060_linux-4.9.61.patch
new file mode 100644
index 00000000..b122ce9b
--- /dev/null
+++ b/1060_linux-4.9.61.patch
@@ -0,0 +1,2646 @@
+diff --git a/Documentation/devicetree/bindings/arm/arch_timer.txt b/Documentation/devicetree/bindings/arm/arch_timer.txt
+index ad440a2b8051..e926aea1147d 100644
+--- a/Documentation/devicetree/bindings/arm/arch_timer.txt
++++ b/Documentation/devicetree/bindings/arm/arch_timer.txt
+@@ -31,6 +31,12 @@ to deliver its interrupts via SPIs.
+ This also affects writes to the tval register, due to the implicit
+ counter read.
+
++- hisilicon,erratum-161010101 : A boolean property. Indicates the
++ presence of Hisilicon erratum 161010101, which says that reading the
++ counters is unreliable in some cases, and reads may return a value 32
++ beyond the correct value. This also affects writes to the tval
++ registers, due to the implicit counter read.
++
+ ** Optional properties:
+
+ - arm,cpu-registers-not-fw-configured : Firmware does not initialize
+diff --git a/Makefile b/Makefile
+index 2f7a386b1751..b56b99e20b30 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 60
++SUBLEVEL = 61
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+
+diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
+index cc952cf8ec30..024f1b75b0a3 100644
+--- a/arch/arm/boot/dts/armada-375.dtsi
++++ b/arch/arm/boot/dts/armada-375.dtsi
+@@ -176,9 +176,9 @@
+ reg = <0x8000 0x1000>;
+ cache-unified;
+ cache-level = <2>;
+- arm,double-linefill-incr = <1>;
++ arm,double-linefill-incr = <0>;
+ arm,double-linefill-wrap = <0>;
+- arm,double-linefill = <1>;
++ arm,double-linefill = <0>;
+ prefetch-data = <1>;
+ };
+
+diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
+index 2d7668848c5a..c60cfe9fd033 100644
+--- a/arch/arm/boot/dts/armada-38x.dtsi
++++ b/arch/arm/boot/dts/armada-38x.dtsi
+@@ -143,9 +143,9 @@
+ reg = <0x8000 0x1000>;
+ cache-unified;
+ cache-level = <2>;
+- arm,double-linefill-incr = <1>;
++ arm,double-linefill-incr = <0>;
+ arm,double-linefill-wrap = <0>;
+- arm,double-linefill = <1>;
++ arm,double-linefill = <0>;
+ prefetch-data = <1>;
+ };
+
+diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi
+index 34cba87f9200..aeecfa7e5ea3 100644
+--- a/arch/arm/boot/dts/armada-39x.dtsi
++++ b/arch/arm/boot/dts/armada-39x.dtsi
+@@ -111,9 +111,9 @@
+ reg = <0x8000 0x1000>;
+ cache-unified;
+ cache-level = <2>;
+- arm,double-linefill-incr = <1>;
++ arm,double-linefill-incr = <0>;
+ arm,double-linefill-wrap = <0>;
+- arm,double-linefill = <1>;
++ arm,double-linefill = <0>;
+ prefetch-data = <1>;
+ };
+
+diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
+index 55e0e3ea9cb6..bd12b98e2589 100644
+--- a/arch/arm/include/asm/Kbuild
++++ b/arch/arm/include/asm/Kbuild
+@@ -37,4 +37,3 @@ generic-y += termbits.h
+ generic-y += termios.h
+ generic-y += timex.h
+ generic-y += trace_clock.h
+-generic-y += unaligned.h
+diff --git a/arch/arm/include/asm/unaligned.h b/arch/arm/include/asm/unaligned.h
+new file mode 100644
+index 000000000000..ab905ffcf193
+--- /dev/null
++++ b/arch/arm/include/asm/unaligned.h
+@@ -0,0 +1,27 @@
++#ifndef __ASM_ARM_UNALIGNED_H
++#define __ASM_ARM_UNALIGNED_H
++
++/*
++ * We generally want to set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on ARMv6+,
++ * but we don't want to use linux/unaligned/access_ok.h since that can lead
++ * to traps on unaligned stm/ldm or strd/ldrd.
++ */
++#include <asm/byteorder.h>
++
++#if defined(__LITTLE_ENDIAN)
++# include <linux/unaligned/le_struct.h>
++# include <linux/unaligned/be_byteshift.h>
++# include <linux/unaligned/generic.h>
++# define get_unaligned __get_unaligned_le
++# define put_unaligned __put_unaligned_le
++#elif defined(__BIG_ENDIAN)
++# include <linux/unaligned/be_struct.h>
++# include <linux/unaligned/le_byteshift.h>
++# include <linux/unaligned/generic.h>
++# define get_unaligned __get_unaligned_be
++# define put_unaligned __put_unaligned_be
++#else
++# error need to define endianess
++#endif
++
++#endif /* __ASM_ARM_UNALIGNED_H */
+diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
+index 0064b86a2c87..30a13647c54c 100644
+--- a/arch/arm/kvm/emulate.c
++++ b/arch/arm/kvm/emulate.c
+@@ -227,7 +227,7 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
+ u32 return_offset = (is_thumb) ? 2 : 4;
+
+ kvm_update_psr(vcpu, UND_MODE);
+- *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset;
++ *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
+
+ /* Branch to exception vector */
+ *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
+@@ -239,10 +239,8 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
+ */
+ static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
+ {
+- unsigned long cpsr = *vcpu_cpsr(vcpu);
+- bool is_thumb = (cpsr & PSR_T_BIT);
+ u32 vect_offset;
+- u32 return_offset = (is_thumb) ? 4 : 0;
++ u32 return_offset = (is_pabt) ? 4 : 8;
+ bool is_lpae;
+
+ kvm_update_psr(vcpu, ABT_MODE);
+diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
+index 8679405b0b2b..92eab1d51785 100644
+--- a/arch/arm/kvm/hyp/Makefile
++++ b/arch/arm/kvm/hyp/Makefile
+@@ -2,7 +2,7 @@
+ # Makefile for Kernel-based Virtual Machine module, HYP part
+ #
+
+-ccflags-y += -fno-stack-protector
++ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
+
+ KVM=../../../../virt/kvm
+
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index f22826135c73..c743d1fd8286 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -112,7 +112,7 @@ static void __dump_instr(const char *lvl, struct pt_regs *regs)
+ for (i = -4; i < 1; i++) {
+ unsigned int val, bad;
+
+- bad = __get_user(val, &((u32 *)addr)[i]);
++ bad = get_user(val, &((u32 *)addr)[i]);
+
+ if (!bad)
+ p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
+diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
+index 14c4e3b14bcb..48b03547a969 100644
+--- a/arch/arm64/kvm/hyp/Makefile
++++ b/arch/arm64/kvm/hyp/Makefile
+@@ -2,7 +2,7 @@
+ # Makefile for Kernel-based Virtual Machine module, HYP part
+ #
+
+-ccflags-y += -fno-stack-protector
++ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
+
+ KVM=../../../../virt/kvm
+
+diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
+index da6a8cfa54a0..3556715a774e 100644
+--- a/arch/arm64/kvm/inject_fault.c
++++ b/arch/arm64/kvm/inject_fault.c
+@@ -33,12 +33,26 @@
+ #define LOWER_EL_AArch64_VECTOR 0x400
+ #define LOWER_EL_AArch32_VECTOR 0x600
+
++/*
++ * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
++ */
++static const u8 return_offsets[8][2] = {
++ [0] = { 0, 0 }, /* Reset, unused */
++ [1] = { 4, 2 }, /* Undefined */
++ [2] = { 0, 0 }, /* SVC, unused */
++ [3] = { 4, 4 }, /* Prefetch abort */
++ [4] = { 8, 8 }, /* Data abort */
++ [5] = { 0, 0 }, /* HVC, unused */
++ [6] = { 4, 4 }, /* IRQ, unused */
++ [7] = { 4, 4 }, /* FIQ, unused */
++};
++
+ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
+ {
+ unsigned long cpsr;
+ unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
+ bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
+- u32 return_offset = (is_thumb) ? 4 : 0;
++ u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
+ u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
+
+ cpsr = mode | COMPAT_PSR_I_BIT;
+diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
+index d5ce34dcf4d9..1e28747d677f 100644
+--- a/arch/powerpc/mm/init_64.c
++++ b/arch/powerpc/mm/init_64.c
+@@ -42,6 +42,8 @@
+ #include <linux/memblock.h>
+ #include <linux/hugetlb.h>
+ #include <linux/slab.h>
++#include <linux/of_fdt.h>
++#include <linux/libfdt.h>
+
+ #include <asm/pgalloc.h>
+ #include <asm/page.h>
+@@ -421,6 +423,28 @@ static int __init parse_disable_radix(char *p)
+ }
+ early_param("disable_radix", parse_disable_radix);
+
++/*
++ * If we're running under a hypervisor, we currently can't do radix
++ * since we don't have the code to do the H_REGISTER_PROC_TBL hcall.
++ * We tell that we're running under a hypervisor by looking for the
++ * /chosen/ibm,architecture-vec-5 property.
++ */
++static void early_check_vec5(void)
++{
++ unsigned long root, chosen;
++ int size;
++ const u8 *vec5;
++
++ root = of_get_flat_dt_root();
++ chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
++ if (chosen == -FDT_ERR_NOTFOUND)
++ return;
++ vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
++ if (!vec5)
++ return;
++ cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
++}
++
+ void __init mmu_early_init_devtree(void)
+ {
+ /* Disable radix mode based on kernel command line. */
+@@ -428,6 +452,15 @@ void __init mmu_early_init_devtree(void)
+ if (disable_radix || !(mfmsr() & MSR_HV))
+ cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
+
++ /*
++ * Check /chosen/ibm,architecture-vec-5 if running as a guest.
++ * When running bare-metal, we can use radix if we like
++ * even though the ibm,architecture-vec-5 property created by
++ * skiboot doesn't have the necessary bits set.
++ */
++ if (early_radix_enabled() && !(mfmsr() & MSR_HV))
++ early_check_vec5();
++
+ if (early_radix_enabled())
+ radix__early_init_devtree();
+ else
+diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
+index 303d28eb03a2..591cbdf615af 100644
+--- a/arch/s390/crypto/aes_s390.c
++++ b/arch/s390/crypto/aes_s390.c
+@@ -28,6 +28,7 @@
+ #include <linux/cpufeature.h>
+ #include <linux/init.h>
+ #include <linux/spinlock.h>
++#include <linux/fips.h>
+ #include <crypto/xts.h>
+ #include <asm/cpacf.h>
+
+@@ -501,6 +502,12 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ if (err)
+ return err;
+
++ /* In fips mode only 128 bit or 256 bit keys are valid */
++ if (fips_enabled && key_len != 32 && key_len != 64) {
++ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
++ return -EINVAL;
++ }
++
+ /* Pick the correct function code based on the key length */
+ fc = (key_len == 32) ? CPACF_KM_XTS_128 :
+ (key_len == 64) ? CPACF_KM_XTS_256 : 0;
+diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
+index 1113389d0a39..fe7368a41aa8 100644
+--- a/arch/s390/crypto/prng.c
++++ b/arch/s390/crypto/prng.c
+@@ -110,22 +110,30 @@ static const u8 initial_parm_block[32] __initconst = {
+
+ /*** helper functions ***/
+
++/*
++ * generate_entropy:
++ * This algorithm produces 64 bytes of entropy data based on 1024
++ * individual stckf() invocations assuming that each stckf() value
++ * contributes 0.25 bits of entropy. So the caller gets 256 bit
++ * entropy per 64 byte or 4 bits entropy per byte.
++ */
+ static int generate_entropy(u8 *ebuf, size_t nbytes)
+ {
+ int n, ret = 0;
+- u8 *pg, *h, hash[32];
++ u8 *pg, *h, hash[64];
+
+- pg = (u8 *) __get_free_page(GFP_KERNEL);
++ /* allocate 2 pages */
++ pg = (u8 *) __get_free_pages(GFP_KERNEL, 1);
+ if (!pg) {
+ prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
+ return -ENOMEM;
+ }
+
+ while (nbytes) {
+- /* fill page with urandom bytes */
+- get_random_bytes(pg, PAGE_SIZE);
+- /* exor page with stckf values */
+- for (n = 0; n < PAGE_SIZE / sizeof(u64); n++) {
++ /* fill pages with urandom bytes */
++ get_random_bytes(pg, 2*PAGE_SIZE);
++ /* exor pages with 1024 stckf values */
++ for (n = 0; n < 2 * PAGE_SIZE / sizeof(u64); n++) {
+ u64 *p = ((u64 *)pg) + n;
+ *p ^= get_tod_clock_fast();
+ }
+@@ -134,8 +142,8 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
+ h = hash;
+ else
+ h = ebuf;
+- /* generate sha256 from this page */
+- cpacf_kimd(CPACF_KIMD_SHA_256, h, pg, PAGE_SIZE);
++ /* hash over the filled pages */
++ cpacf_kimd(CPACF_KIMD_SHA_512, h, pg, 2*PAGE_SIZE);
+ if (n < sizeof(hash))
+ memcpy(ebuf, hash, n);
+ ret += n;
+@@ -143,7 +151,7 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
+ nbytes -= n;
+ }
+
+- free_page((unsigned long)pg);
++ free_pages((unsigned long)pg, 1);
+ return ret;
+ }
+
+@@ -334,7 +342,7 @@ static int __init prng_sha512_selftest(void)
+ static int __init prng_sha512_instantiate(void)
+ {
+ int ret, datalen;
+- u8 seed[64];
++ u8 seed[64 + 32 + 16];
+
+ pr_debug("prng runs in SHA-512 mode "
+ "with chunksize=%d and reseed_limit=%u\n",
+@@ -357,12 +365,12 @@ static int __init prng_sha512_instantiate(void)
+ if (ret)
+ goto outfree;
+
+- /* generate initial seed bytestring, first 48 bytes of entropy */
+- ret = generate_entropy(seed, 48);
+- if (ret != 48)
++ /* generate initial seed bytestring, with 256 + 128 bits entropy */
++ ret = generate_entropy(seed, 64 + 32);
++ if (ret != 64 + 32)
+ goto outfree;
+ /* followed by 16 bytes of unique nonce */
+- get_tod_clock_ext(seed + 48);
++ get_tod_clock_ext(seed + 64 + 32);
+
+ /* initial seed of the ppno drng */
+ cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
+@@ -395,9 +403,9 @@ static void prng_sha512_deinstantiate(void)
+ static int prng_sha512_reseed(void)
+ {
+ int ret;
+- u8 seed[32];
++ u8 seed[64];
+
+- /* generate 32 bytes of fresh entropy */
++ /* fetch 256 bits of fresh entropy */
+ ret = generate_entropy(seed, sizeof(seed));
+ if (ret != sizeof(seed))
+ return ret;
+diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
+index 404d94c6c8bc..feba1b211898 100644
+--- a/drivers/base/power/wakeirq.c
++++ b/drivers/base/power/wakeirq.c
+@@ -141,6 +141,13 @@ static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
+ struct wake_irq *wirq = _wirq;
+ int res;
+
++ /* Maybe abort suspend? */
++ if (irqd_is_wakeup_set(irq_get_irq_data(irq))) {
++ pm_wakeup_event(wirq->dev, 0);
++
++ return IRQ_HANDLED;
++ }
++
+ /* We don't want RPM_ASYNC or RPM_NOWAIT here */
+ res = pm_runtime_resume(wirq->dev);
+ if (res < 0)
+diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c
+index 51d4bac97ab3..01d0594c9716 100644
+--- a/drivers/clk/sunxi-ng/ccu_common.c
++++ b/drivers/clk/sunxi-ng/ccu_common.c
+@@ -70,6 +70,11 @@ int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
+ goto err_clk_unreg;
+
+ reset = kzalloc(sizeof(*reset), GFP_KERNEL);
++ if (!reset) {
++ ret = -ENOMEM;
++ goto err_alloc_reset;
++ }
++
+ reset->rcdev.of_node = node;
+ reset->rcdev.ops = &ccu_reset_ops;
+ reset->rcdev.owner = THIS_MODULE;
+@@ -85,6 +90,16 @@ int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
+ return 0;
+
+ err_of_clk_unreg:
++ kfree(reset);
++err_alloc_reset:
++ of_clk_del_provider(node);
+ err_clk_unreg:
++ while (--i >= 0) {
++ struct clk_hw *hw = desc->hw_clks->hws[i];
++
++ if (!hw)
++ continue;
++ clk_hw_unregister(hw);
++ }
+ return ret;
+ }
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 286d4d61bd0b..530f255a898b 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1172,8 +1172,6 @@ static int cpufreq_online(unsigned int cpu)
+ if (new_policy) {
+ /* related_cpus should at least include policy->cpus. */
+ cpumask_copy(policy->related_cpus, policy->cpus);
+- /* Clear mask of registered CPUs */
+- cpumask_clear(policy->real_cpus);
+ }
+
+ /*
+diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
+index 17b19a68e269..71980c41283b 100644
+--- a/drivers/crypto/ccp/ccp-dev-v5.c
++++ b/drivers/crypto/ccp/ccp-dev-v5.c
+@@ -278,8 +278,7 @@ static int ccp5_perform_aes(struct ccp_op *op)
+ CCP_AES_ENCRYPT(&function) = op->u.aes.action;
+ CCP_AES_MODE(&function) = op->u.aes.mode;
+ CCP_AES_TYPE(&function) = op->u.aes.type;
+- if (op->u.aes.mode == CCP_AES_MODE_CFB)
+- CCP_AES_SIZE(&function) = 0x7f;
++ CCP_AES_SIZE(&function) = op->u.aes.size;
+
+ CCP5_CMD_FUNCTION(&desc) = function.raw;
+
+diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
+index e23c36c7691c..347b77108baa 100644
+--- a/drivers/crypto/ccp/ccp-dev.h
++++ b/drivers/crypto/ccp/ccp-dev.h
+@@ -470,6 +470,7 @@ struct ccp_aes_op {
+ enum ccp_aes_type type;
+ enum ccp_aes_mode mode;
+ enum ccp_aes_action action;
++ unsigned int size;
+ };
+
+ struct ccp_xts_aes_op {
+diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
+index 64deb006c3be..7d4cd518e602 100644
+--- a/drivers/crypto/ccp/ccp-ops.c
++++ b/drivers/crypto/ccp/ccp-ops.c
+@@ -692,6 +692,14 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+ goto e_ctx;
+ }
+ }
++ switch (aes->mode) {
++ case CCP_AES_MODE_CFB: /* CFB128 only */
++ case CCP_AES_MODE_CTR:
++ op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1;
++ break;
++ default:
++ op.u.aes.size = 0;
++ }
+
+ /* Prepare the input and output data workareas. For in-place
+ * operations we need to set the dma direction to BIDIRECTIONAL
+diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
+index ed37e5908b91..12d417a4d4a8 100644
+--- a/drivers/gpio/Kconfig
++++ b/drivers/gpio/Kconfig
+@@ -1187,6 +1187,8 @@ config GPIO_MCP23S08
+ tristate "Microchip MCP23xxx I/O expander"
+ depends on OF_GPIO
+ select GPIOLIB_IRQCHIP
++ select REGMAP_I2C if I2C
++ select REGMAP if SPI_MASTER
+ help
+ SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
+ I/O expanders.
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+index 7fe8fd884f06..743a12df6971 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -315,6 +315,10 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
+ amdgpu_dpm_enable_vce(adev, false);
+ } else {
+ amdgpu_asic_set_vce_clocks(adev, 0, 0);
++ amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
++ AMD_PG_STATE_GATE);
++ amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
++ AMD_CG_STATE_GATE);
+ }
+ } else {
+ schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
+@@ -340,6 +344,11 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
+ amdgpu_dpm_enable_vce(adev, true);
+ } else {
+ amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
++ amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
++ AMD_CG_STATE_UNGATE);
++ amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
++ AMD_PG_STATE_UNGATE);
++
+ }
+ }
+ mutex_unlock(&adev->vce.idle_mutex);
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+index ab3df6d75656..3f445df9124d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+@@ -89,6 +89,10 @@ static int uvd_v6_0_early_init(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
++ if (!(adev->flags & AMD_IS_APU) &&
++ (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
++ return -ENOENT;
++
+ uvd_v6_0_set_ring_funcs(adev);
+ uvd_v6_0_set_irq_funcs(adev);
+
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+index fbd13fabdf2d..603d8425cca6 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+@@ -1193,6 +1193,17 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
+ if (!node)
+ return -ENOMEM;
+
++ /*
++ * To avoid an integer overflow for the later size computations, we
++ * enforce a maximum number of submitted commands here. This limit is
++ * sufficient for all conceivable usage cases of the G2D.
++ */
++ if (req->cmd_nr > G2D_CMDLIST_DATA_NUM ||
++ req->cmd_buf_nr > G2D_CMDLIST_DATA_NUM) {
++ dev_err(dev, "number of submitted G2D commands exceeds limit\n");
++ return -EINVAL;
++ }
++
+ node->event = NULL;
+
+ if (req->event_type != G2D_EVENT_NOT) {
+@@ -1250,7 +1261,11 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
+ cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF;
+ }
+
+- /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
++ /*
++ * Check the size of cmdlist. The 2 that is added last comes from
++ * the implicit G2D_BITBLT_START that is appended once we have
++ * checked all the submitted commands.
++ */
+ size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
+ if (size > G2D_CMDLIST_DATA_NUM) {
+ dev_err(dev, "cmdlist size is too big\n");
+diff --git a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
+index 3194e544ee27..faacc813254c 100644
+--- a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
++++ b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
+@@ -89,9 +89,13 @@ struct fsl_tcon *fsl_tcon_init(struct device *dev)
+ goto err_node_put;
+ }
+
+- of_node_put(np);
+- clk_prepare_enable(tcon->ipg_clk);
++ ret = clk_prepare_enable(tcon->ipg_clk);
++ if (ret) {
++ dev_err(dev, "Couldn't enable the TCON clock\n");
++ goto err_node_put;
++ }
+
++ of_node_put(np);
+ dev_info(dev, "Using TCON in bypass mode\n");
+
+ return tcon;
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index afa3d010c650..7fdc42e5aac8 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -3558,9 +3558,16 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
+ dev_priv->psr.psr2_support ? "supported" : "not supported");
+ }
+
+- /* Read the eDP Display control capabilities registers */
+- if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
+- drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
++ /*
++ * Read the eDP display control registers.
++ *
++ * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
++ * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
++ * set, but require eDP 1.4+ detection (e.g. for supported link rates
++ * method). The display control registers should read zero if they're
++ * not supported anyway.
++ */
++ if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
+ intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
+ sizeof(intel_dp->edp_dpcd))
+ DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
+diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
+index 393973016b52..322c7ca188e9 100644
+--- a/drivers/gpu/drm/msm/msm_gem_submit.c
++++ b/drivers/gpu/drm/msm/msm_gem_submit.c
+@@ -31,11 +31,14 @@
+ #define BO_PINNED 0x2000
+
+ static struct msm_gem_submit *submit_create(struct drm_device *dev,
+- struct msm_gpu *gpu, int nr_bos, int nr_cmds)
++ struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
+ {
+ struct msm_gem_submit *submit;
+- int sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
+- (nr_cmds * sizeof(*submit->cmd));
++ uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
++ ((u64)nr_cmds * sizeof(submit->cmd[0]));
++
++ if (sz > SIZE_MAX)
++ return NULL;
+
+ submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+ if (!submit)
+diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
+index 6263ea82d6ac..8f11d347b3ec 100644
+--- a/drivers/i2c/busses/i2c-riic.c
++++ b/drivers/i2c/busses/i2c-riic.c
+@@ -80,6 +80,7 @@
+ #define ICIER_TEIE 0x40
+ #define ICIER_RIE 0x20
+ #define ICIER_NAKIE 0x10
++#define ICIER_SPIE 0x08
+
+ #define ICSR2_NACKF 0x10
+
+@@ -216,11 +217,10 @@ static irqreturn_t riic_tend_isr(int irq, void *data)
+ return IRQ_NONE;
+ }
+
+- if (riic->is_last || riic->err)
++ if (riic->is_last || riic->err) {
++ riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER);
+ writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
+-
+- writeb(0, riic->base + RIIC_ICIER);
+- complete(&riic->msg_done);
++ }
+
+ return IRQ_HANDLED;
+ }
+@@ -240,13 +240,13 @@ static irqreturn_t riic_rdrf_isr(int irq, void *data)
+
+ if (riic->bytes_left == 1) {
+ /* STOP must come before we set ACKBT! */
+- if (riic->is_last)
++ if (riic->is_last) {
++ riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER);
+ writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
++ }
+
+ riic_clear_set_bit(riic, 0, ICMR3_ACKBT, RIIC_ICMR3);
+
+- writeb(0, riic->base + RIIC_ICIER);
+- complete(&riic->msg_done);
+ } else {
+ riic_clear_set_bit(riic, ICMR3_ACKBT, 0, RIIC_ICMR3);
+ }
+@@ -259,6 +259,21 @@ static irqreturn_t riic_rdrf_isr(int irq, void *data)
+ return IRQ_HANDLED;
+ }
+
++static irqreturn_t riic_stop_isr(int irq, void *data)
++{
++ struct riic_dev *riic = data;
++
++ /* read back registers to confirm writes have fully propagated */
++ writeb(0, riic->base + RIIC_ICSR2);
++ readb(riic->base + RIIC_ICSR2);
++ writeb(0, riic->base + RIIC_ICIER);
++ readb(riic->base + RIIC_ICIER);
++
++ complete(&riic->msg_done);
++
++ return IRQ_HANDLED;
++}
++
+ static u32 riic_func(struct i2c_adapter *adap)
+ {
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+@@ -326,6 +341,7 @@ static struct riic_irq_desc riic_irqs[] = {
+ { .res_num = 0, .isr = riic_tend_isr, .name = "riic-tend" },
+ { .res_num = 1, .isr = riic_rdrf_isr, .name = "riic-rdrf" },
+ { .res_num = 2, .isr = riic_tdre_isr, .name = "riic-tdre" },
++ { .res_num = 3, .isr = riic_stop_isr, .name = "riic-stop" },
+ { .res_num = 5, .isr = riic_tend_isr, .name = "riic-nack" },
+ };
+
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 282c9fb0ba95..786f640fc462 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -325,6 +325,27 @@ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
+ return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
+ }
+
++int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
++ int index, enum ib_gid_type *gid_type)
++{
++ struct ib_gid_attr attr;
++ union ib_gid gid;
++ int ret;
++
++ ret = ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr);
++ if (ret)
++ return ret;
++
++ if (!attr.ndev)
++ return -ENODEV;
++
++ dev_put(attr.ndev);
++
++ *gid_type = attr.gid_type;
++
++ return 0;
++}
++
+ static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
+ {
+ if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
+diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+index 7d689903c87c..86e1e08125ff 100644
+--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+@@ -892,6 +892,8 @@ int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
+
+ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
+ int index);
++int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
++ int index, enum ib_gid_type *gid_type);
+
+ /* GSI QP helper functions */
+ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index aee3942ec68d..2665414b4875 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -2226,6 +2226,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ {
+ enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port);
+ int err;
++ enum ib_gid_type gid_type;
+
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index :
+@@ -2244,10 +2245,16 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ if (ll == IB_LINK_LAYER_ETHERNET) {
+ if (!(ah->ah_flags & IB_AH_GRH))
+ return -EINVAL;
++ err = mlx5_get_roce_gid_type(dev, port, ah->grh.sgid_index,
++ &gid_type);
++ if (err)
++ return err;
+ memcpy(path->rmac, ah->dmac, sizeof(ah->dmac));
+ path->udp_sport = mlx5_get_roce_udp_sport(dev, port,
+ ah->grh.sgid_index);
+ path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4;
++ if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
++ path->ecn_dscp = (ah->grh.traffic_class >> 2) & 0x3f;
+ } else {
+ path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
+ path->fl_free_ar |=
+diff --git a/drivers/media/pci/bt8xx/dvb-bt8xx.c b/drivers/media/pci/bt8xx/dvb-bt8xx.c
+index e69d338ab9be..ae550a180364 100644
+--- a/drivers/media/pci/bt8xx/dvb-bt8xx.c
++++ b/drivers/media/pci/bt8xx/dvb-bt8xx.c
+@@ -680,6 +680,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
+ /* DST is not a frontend, attaching the ASIC */
+ if (dvb_attach(dst_attach, state, &card->dvb_adapter) == NULL) {
+ pr_err("%s: Could not find a Twinhan DST\n", __func__);
++ kfree(state);
+ break;
+ }
+ /* Attach other DST peripherals if any */
+diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
+index 518ad34f80d7..7f92144a1de3 100644
+--- a/drivers/media/platform/exynos4-is/fimc-is.c
++++ b/drivers/media/platform/exynos4-is/fimc-is.c
+@@ -825,12 +825,13 @@ static int fimc_is_probe(struct platform_device *pdev)
+ is->irq = irq_of_parse_and_map(dev->of_node, 0);
+ if (!is->irq) {
+ dev_err(dev, "no irq found\n");
+- return -EINVAL;
++ ret = -EINVAL;
++ goto err_iounmap;
+ }
+
+ ret = fimc_is_get_clocks(is);
+ if (ret < 0)
+- return ret;
++ goto err_iounmap;
+
+ platform_set_drvdata(pdev, is);
+
+@@ -891,6 +892,8 @@ static int fimc_is_probe(struct platform_device *pdev)
+ free_irq(is->irq, is);
+ err_clk:
+ fimc_is_put_clocks(is);
++err_iounmap:
++ iounmap(is->pmu_regs);
+ return ret;
+ }
+
+@@ -947,6 +950,7 @@ static int fimc_is_remove(struct platform_device *pdev)
+ fimc_is_unregister_subdevs(is);
+ vb2_dma_contig_clear_max_seg_size(dev);
+ fimc_is_put_clocks(is);
++ iounmap(is->pmu_regs);
+ fimc_is_debugfs_remove(is);
+ release_firmware(is->fw.f_w);
+ fimc_is_free_cpu_memory(is);
+diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
+index 8b099fe1d592..71b65ab573ac 100644
+--- a/drivers/media/usb/cx231xx/cx231xx-core.c
++++ b/drivers/media/usb/cx231xx/cx231xx-core.c
+@@ -356,7 +356,12 @@ int cx231xx_send_vendor_cmd(struct cx231xx *dev,
+ */
+ if ((ven_req->wLength > 4) && ((ven_req->bRequest == 0x4) ||
+ (ven_req->bRequest == 0x5) ||
+- (ven_req->bRequest == 0x6))) {
++ (ven_req->bRequest == 0x6) ||
++
++ /* Internal Master 3 Bus can send
++ * and receive only 4 bytes per time
++ */
++ (ven_req->bRequest == 0x2))) {
+ unsend_size = 0;
+ pdata = ven_req->pBuff;
+
+diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
+index 207cc497958a..8062d37b4ba4 100644
+--- a/drivers/mfd/ab8500-sysctrl.c
++++ b/drivers/mfd/ab8500-sysctrl.c
+@@ -98,7 +98,7 @@ int ab8500_sysctrl_read(u16 reg, u8 *value)
+ u8 bank;
+
+ if (sysctrl_dev == NULL)
+- return -EINVAL;
++ return -EPROBE_DEFER;
+
+ bank = (reg >> 8);
+ if (!valid_bank(bank))
+@@ -114,11 +114,13 @@ int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value)
+ u8 bank;
+
+ if (sysctrl_dev == NULL)
+- return -EINVAL;
++ return -EPROBE_DEFER;
+
+ bank = (reg >> 8);
+- if (!valid_bank(bank))
++ if (!valid_bank(bank)) {
++ pr_err("invalid bank\n");
+ return -EINVAL;
++ }
+
+ return abx500_mask_and_set_register_interruptible(sysctrl_dev, bank,
+ (u8)(reg & 0xFF), mask, value);
+@@ -145,9 +147,15 @@ static int ab8500_sysctrl_remove(struct platform_device *pdev)
+ return 0;
+ }
+
++static const struct of_device_id ab8500_sysctrl_match[] = {
++ { .compatible = "stericsson,ab8500-sysctrl", },
++ {}
++};
++
+ static struct platform_driver ab8500_sysctrl_driver = {
+ .driver = {
+ .name = "ab8500-sysctrl",
++ .of_match_table = ab8500_sysctrl_match,
+ },
+ .probe = ab8500_sysctrl_probe,
+ .remove = ab8500_sysctrl_remove,
+diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
+index ba130be32e61..9617fc323e15 100644
+--- a/drivers/mfd/axp20x.c
++++ b/drivers/mfd/axp20x.c
+@@ -205,14 +205,14 @@ static struct resource axp22x_pek_resources[] = {
+ static struct resource axp288_power_button_resources[] = {
+ {
+ .name = "PEK_DBR",
+- .start = AXP288_IRQ_POKN,
+- .end = AXP288_IRQ_POKN,
++ .start = AXP288_IRQ_POKP,
++ .end = AXP288_IRQ_POKP,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "PEK_DBF",
+- .start = AXP288_IRQ_POKP,
+- .end = AXP288_IRQ_POKP,
++ .start = AXP288_IRQ_POKN,
++ .end = AXP288_IRQ_POKN,
+ .flags = IORESOURCE_IRQ,
+ },
+ };
+diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
+index e2af61f7e3b6..451d417eb451 100644
+--- a/drivers/misc/mei/client.c
++++ b/drivers/misc/mei/client.c
+@@ -1320,6 +1320,9 @@ int mei_cl_notify_request(struct mei_cl *cl,
+ return -EOPNOTSUPP;
+ }
+
++ if (!mei_cl_is_connected(cl))
++ return -ENODEV;
++
+ rets = pm_runtime_get(dev->dev);
+ if (rets < 0 && rets != -EINPROGRESS) {
+ pm_runtime_put_noidle(dev->dev);
+diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
+index c531deef3258..8f27fe35e8af 100644
+--- a/drivers/mmc/host/s3cmci.c
++++ b/drivers/mmc/host/s3cmci.c
+@@ -21,6 +21,7 @@
+ #include <linux/debugfs.h>
+ #include <linux/seq_file.h>
+ #include <linux/gpio.h>
++#include <linux/interrupt.h>
+ #include <linux/irq.h>
+ #include <linux/io.h>
+
+diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
+index 8b8470c4e6d0..f9b2a771096b 100644
+--- a/drivers/mtd/nand/sunxi_nand.c
++++ b/drivers/mtd/nand/sunxi_nand.c
+@@ -320,6 +320,10 @@ static int sunxi_nfc_wait_events(struct sunxi_nfc *nfc, u32 events,
+
+ ret = wait_for_completion_timeout(&nfc->complete,
+ msecs_to_jiffies(timeout_ms));
++ if (!ret)
++ ret = -ETIMEDOUT;
++ else
++ ret = 0;
+
+ writel(0, nfc->regs + NFC_REG_INT);
+ } else {
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
+index 3066d9c99984..e2512ab41168 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_com.c
+@@ -36,9 +36,9 @@
+ /*****************************************************************************/
+
+ /* Timeout in micro-sec */
+-#define ADMIN_CMD_TIMEOUT_US (1000000)
++#define ADMIN_CMD_TIMEOUT_US (3000000)
+
+-#define ENA_ASYNC_QUEUE_DEPTH 4
++#define ENA_ASYNC_QUEUE_DEPTH 16
+ #define ENA_ADMIN_QUEUE_DEPTH 32
+
+ #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
+index 69d7e9ed5bc8..c5eaf7616939 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
+@@ -100,7 +100,7 @@
+ /* Number of queues to check for missing queues per timer service */
+ #define ENA_MONITORED_TX_QUEUES 4
+ /* Max timeout packets before device reset */
+-#define MAX_NUM_OF_TIMEOUTED_PACKETS 32
++#define MAX_NUM_OF_TIMEOUTED_PACKETS 128
+
+ #define ENA_TX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1))
+
+@@ -116,9 +116,9 @@
+ #define ENA_IO_IRQ_IDX(q) (ENA_IO_IRQ_FIRST_IDX + (q))
+
+ /* ENA device should send keep alive msg every 1 sec.
+- * We wait for 3 sec just to be on the safe side.
++ * We wait for 6 sec just to be on the safe side.
+ */
+-#define ENA_DEVICE_KALIVE_TIMEOUT (3 * HZ)
++#define ENA_DEVICE_KALIVE_TIMEOUT (6 * HZ)
+
+ #define ENA_MMIO_DISABLE_REG_READ BIT(0)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 20e569bd978a..333df540b375 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -97,6 +97,8 @@ enum board_idx {
+ BCM57407_NPAR,
+ BCM57414_NPAR,
+ BCM57416_NPAR,
++ BCM57452,
++ BCM57454,
+ NETXTREME_E_VF,
+ NETXTREME_C_VF,
+ };
+@@ -131,6 +133,8 @@ static const struct {
+ { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
+ { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
+ { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
++ { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
++ { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
+ { "Broadcom NetXtreme-E Ethernet Virtual Function" },
+ { "Broadcom NetXtreme-C Ethernet Virtual Function" },
+ };
+@@ -166,6 +170,8 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
+ { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
++ { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
++ { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
+ #ifdef CONFIG_BNXT_SRIOV
+ { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index 707bc4680b9b..6ea10a9f33e8 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -28,6 +28,7 @@
+ #include <linux/of_mdio.h>
+ #include <linux/of_net.h>
+ #include <linux/phy.h>
++#include <linux/phy_fixed.h>
+ #include <linux/platform_device.h>
+ #include <linux/skbuff.h>
+ #include <net/hwbm.h>
+diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
+index 01cf094bee18..8f8496102926 100644
+--- a/drivers/net/phy/dp83867.c
++++ b/drivers/net/phy/dp83867.c
+@@ -33,6 +33,7 @@
+
+ /* Extended Registers */
+ #define DP83867_RGMIICTL 0x0032
++#define DP83867_STRAP_STS1 0x006E
+ #define DP83867_RGMIIDCTL 0x0086
+
+ #define DP83867_SW_RESET BIT(15)
+@@ -56,9 +57,13 @@
+ #define DP83867_RGMII_TX_CLK_DELAY_EN BIT(1)
+ #define DP83867_RGMII_RX_CLK_DELAY_EN BIT(0)
+
++/* STRAP_STS1 bits */
++#define DP83867_STRAP_STS1_RESERVED BIT(11)
++
+ /* PHY CTRL bits */
+ #define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
+ #define DP83867_PHYCR_FIFO_DEPTH_MASK (3 << 14)
++#define DP83867_PHYCR_RESERVED_MASK BIT(11)
+
+ /* RGMIIDCTL bits */
+ #define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
+@@ -141,7 +146,7 @@ static int dp83867_of_init(struct phy_device *phydev)
+ static int dp83867_config_init(struct phy_device *phydev)
+ {
+ struct dp83867_private *dp83867;
+- int ret, val;
++ int ret, val, bs;
+ u16 delay;
+
+ if (!phydev->priv) {
+@@ -164,6 +169,22 @@ static int dp83867_config_init(struct phy_device *phydev)
+ return val;
+ val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK;
+ val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT);
++
++ /* The code below checks if "port mirroring" N/A MODE4 has been
++ * enabled during power on bootstrap.
++ *
++ * Such N/A mode enabled by mistake can put PHY IC in some
++ * internal testing mode and disable RGMII transmission.
++ *
++ * In this particular case one needs to check STRAP_STS1
++ * register's bit 11 (marked as RESERVED).
++ */
++
++ bs = phy_read_mmd_indirect(phydev, DP83867_STRAP_STS1,
++ DP83867_DEVADDR);
++ if (bs & DP83867_STRAP_STS1_RESERVED)
++ val &= ~DP83867_PHYCR_RESERVED_MASK;
++
+ ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
+ if (ret)
+ return ret;
+diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
+index 766c63bf05c4..45226dbee5ce 100644
+--- a/drivers/net/wireless/ath/ath10k/ahb.c
++++ b/drivers/net/wireless/ath/ath10k/ahb.c
+@@ -33,6 +33,9 @@ static const struct of_device_id ath10k_ahb_of_match[] = {
+
+ MODULE_DEVICE_TABLE(of, ath10k_ahb_of_match);
+
++#define QCA4019_SRAM_ADDR 0x000C0000
++#define QCA4019_SRAM_LEN 0x00040000 /* 256 kb */
++
+ static inline struct ath10k_ahb *ath10k_ahb_priv(struct ath10k *ar)
+ {
+ return &((struct ath10k_pci *)ar->drv_priv)->ahb[0];
+@@ -699,6 +702,25 @@ static int ath10k_ahb_hif_power_up(struct ath10k *ar)
+ return ret;
+ }
+
++static u32 ath10k_ahb_qca4019_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
++{
++ u32 val = 0, region = addr & 0xfffff;
++
++ val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
++
++ if (region >= QCA4019_SRAM_ADDR && region <=
++ (QCA4019_SRAM_ADDR + QCA4019_SRAM_LEN)) {
++ /* SRAM contents for QCA4019 can be directly accessed and
++ * no conversions are required
++ */
++ val |= region;
++ } else {
++ val |= 0x100000 | region;
++ }
++
++ return val;
++}
++
+ static const struct ath10k_hif_ops ath10k_ahb_hif_ops = {
+ .tx_sg = ath10k_pci_hif_tx_sg,
+ .diag_read = ath10k_pci_hif_diag_read,
+@@ -766,6 +788,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
+ ar_pci->mem_len = ar_ahb->mem_len;
+ ar_pci->ar = ar;
+ ar_pci->bus_ops = &ath10k_ahb_bus_ops;
++ ar_pci->targ_cpu_to_ce_addr = ath10k_ahb_qca4019_targ_cpu_to_ce_addr;
+
+ ret = ath10k_pci_setup_resource(ar);
+ if (ret) {
+diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
+index 410bcdaa9e87..25b8d501d437 100644
+--- a/drivers/net/wireless/ath/ath10k/pci.c
++++ b/drivers/net/wireless/ath/ath10k/pci.c
+@@ -840,31 +840,35 @@ void ath10k_pci_rx_replenish_retry(unsigned long ptr)
+ ath10k_pci_rx_post(ar);
+ }
+
+-static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
++static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+ {
+- u32 val = 0;
++ u32 val = 0, region = addr & 0xfffff;
+
+- switch (ar->hw_rev) {
+- case ATH10K_HW_QCA988X:
+- case ATH10K_HW_QCA9887:
+- case ATH10K_HW_QCA6174:
+- case ATH10K_HW_QCA9377:
+- val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+- CORE_CTRL_ADDRESS) &
+- 0x7ff) << 21;
+- break;
+- case ATH10K_HW_QCA9888:
+- case ATH10K_HW_QCA99X0:
+- case ATH10K_HW_QCA9984:
+- case ATH10K_HW_QCA4019:
+- val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
+- break;
+- }
++ val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
++ & 0x7ff) << 21;
++ val |= 0x100000 | region;
++ return val;
++}
++
++static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
++{
++ u32 val = 0, region = addr & 0xfffff;
+
+- val |= 0x100000 | (addr & 0xfffff);
++ val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
++ val |= 0x100000 | region;
+ return val;
+ }
+
++static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
++{
++ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
++
++ if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
++ return -ENOTSUPP;
++
++ return ar_pci->targ_cpu_to_ce_addr(ar, addr);
++}
++
+ /*
+ * Diagnostic read/write access is provided for startup/config/debug usage.
+ * Caller must guarantee proper alignment, when applicable, and single user
+@@ -3171,6 +3175,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
+ bool pci_ps;
+ int (*pci_soft_reset)(struct ath10k *ar);
+ int (*pci_hard_reset)(struct ath10k *ar);
++ u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
+
+ switch (pci_dev->device) {
+ case QCA988X_2_0_DEVICE_ID:
+@@ -3178,12 +3183,14 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
+ pci_ps = false;
+ pci_soft_reset = ath10k_pci_warm_reset;
+ pci_hard_reset = ath10k_pci_qca988x_chip_reset;
++ targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
+ break;
+ case QCA9887_1_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA9887;
+ pci_ps = false;
+ pci_soft_reset = ath10k_pci_warm_reset;
+ pci_hard_reset = ath10k_pci_qca988x_chip_reset;
++ targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
+ break;
+ case QCA6164_2_1_DEVICE_ID:
+ case QCA6174_2_1_DEVICE_ID:
+@@ -3191,30 +3198,35 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
+ pci_ps = true;
+ pci_soft_reset = ath10k_pci_warm_reset;
+ pci_hard_reset = ath10k_pci_qca6174_chip_reset;
++ targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
+ break;
+ case QCA99X0_2_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA99X0;
+ pci_ps = false;
+ pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
+ pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
++ targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
+ break;
+ case QCA9984_1_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA9984;
+ pci_ps = false;
+ pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
+ pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
++ targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
+ break;
+ case QCA9888_2_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA9888;
+ pci_ps = false;
+ pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
+ pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
++ targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
+ break;
+ case QCA9377_1_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA9377;
+ pci_ps = true;
+ pci_soft_reset = NULL;
+ pci_hard_reset = ath10k_pci_qca6174_chip_reset;
++ targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
+ break;
+ default:
+ WARN_ON(1);
+@@ -3241,6 +3253,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
+ ar_pci->bus_ops = &ath10k_pci_bus_ops;
+ ar_pci->pci_soft_reset = pci_soft_reset;
+ ar_pci->pci_hard_reset = pci_hard_reset;
++ ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
+
+ ar->id.vendor = pdev->vendor;
+ ar->id.device = pdev->device;
+diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
+index 9854ad56b2de..577bb87ab2f6 100644
+--- a/drivers/net/wireless/ath/ath10k/pci.h
++++ b/drivers/net/wireless/ath/ath10k/pci.h
+@@ -238,6 +238,11 @@ struct ath10k_pci {
+ /* Chip specific pci full reset function */
+ int (*pci_hard_reset)(struct ath10k *ar);
+
++ /* chip specific methods for converting target CPU virtual address
++ * space to CE address space
++ */
++ u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
++
+ /* Keep this entry in the last, memory for struct ath10k_ahb is
+ * allocated (ahb support enabled case) in the continuation of
+ * this struct.
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
+index e64557c35553..6f8a4b074c31 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
+@@ -32,16 +32,25 @@ static int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
+ {
+ void *dump;
+ size_t ramsize;
++ int err;
+
+ ramsize = brcmf_bus_get_ramsize(bus);
+- if (ramsize) {
+- dump = vzalloc(len + ramsize);
+- if (!dump)
+- return -ENOMEM;
+- memcpy(dump, data, len);
+- brcmf_bus_get_memdump(bus, dump + len, ramsize);
+- dev_coredumpv(bus->dev, dump, len + ramsize, GFP_KERNEL);
++ if (!ramsize)
++ return -ENOTSUPP;
++
++ dump = vzalloc(len + ramsize);
++ if (!dump)
++ return -ENOMEM;
++
++ memcpy(dump, data, len);
++ err = brcmf_bus_get_memdump(bus, dump + len, ramsize);
++ if (err) {
++ vfree(dump);
++ return err;
+ }
++
++ dev_coredumpv(bus->dev, dump, len + ramsize, GFP_KERNEL);
++
+ return 0;
+ }
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+index 0556d139b719..092ae0024f22 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -499,15 +499,17 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
+ switch (info->control.vif->type) {
+ case NL80211_IFTYPE_AP:
+ /*
+- * handle legacy hostapd as well, where station may be added
+- * only after assoc.
++ * Handle legacy hostapd as well, where station may be added
++ * only after assoc. Take care of the case where we send a
++ * deauth to a station that we don't have.
+ */
+- if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc))
++ if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) ||
++ ieee80211_is_deauth(fc))
+ return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+ if (info->hw_queue == info->control.vif->cab_queue)
+ return info->hw_queue;
+
+- WARN_ON_ONCE(1);
++ WARN_ONCE(1, "fc=0x%02x", le16_to_cpu(fc));
+ return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ if (ieee80211_is_mgmt(fc))
+diff --git a/drivers/pci/access.c b/drivers/pci/access.c
+index d11cdbb8fba3..7b5cf6d1181a 100644
+--- a/drivers/pci/access.c
++++ b/drivers/pci/access.c
+@@ -672,8 +672,9 @@ void pci_cfg_access_unlock(struct pci_dev *dev)
+ WARN_ON(!dev->block_cfg_access);
+
+ dev->block_cfg_access = 0;
+- wake_up_all(&pci_cfg_wait);
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
++
++ wake_up_all(&pci_cfg_wait);
+ }
+ EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
+
+diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
+index 3455f752d5e4..0e9a9dbeb184 100644
+--- a/drivers/pci/msi.c
++++ b/drivers/pci/msi.c
+@@ -730,7 +730,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
+ ret = 0;
+ out:
+ kfree(masks);
+- return 0;
++ return ret;
+ }
+
+ static void msix_program_entries(struct pci_dev *dev,
+diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
+index 9f713b832ba3..5c768c4627d3 100644
+--- a/drivers/platform/x86/intel_mid_thermal.c
++++ b/drivers/platform/x86/intel_mid_thermal.c
+@@ -550,6 +550,7 @@ static const struct platform_device_id therm_id_table[] = {
+ { "msic_thermal", 1 },
+ { }
+ };
++MODULE_DEVICE_TABLE(platform, therm_id_table);
+
+ static struct platform_driver mid_thermal_driver = {
+ .driver = {
+diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
+index 1de089019268..5ecd40884f01 100644
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -1704,8 +1704,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ /* check for for attention message */
+ if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
+ device = dasd_device_from_cdev_locked(cdev);
+- device->discipline->check_attention(device, irb->esw.esw1.lpum);
+- dasd_put_device(device);
++ if (!IS_ERR(device)) {
++ device->discipline->check_attention(device,
++ irb->esw.esw1.lpum);
++ dasd_put_device(device);
++ }
+ }
+
+ if (!cqr)
+diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
+index 6678d1fd897b..065f11a1964d 100644
+--- a/drivers/scsi/aacraid/aachba.c
++++ b/drivers/scsi/aacraid/aachba.c
+@@ -2954,16 +2954,11 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
+ return;
+
+ BUG_ON(fibptr == NULL);
+- dev = fibptr->dev;
+-
+- scsi_dma_unmap(scsicmd);
+
+- /* expose physical device if expose_physicald flag is on */
+- if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
+- && expose_physicals > 0)
+- aac_expose_phy_device(scsicmd);
++ dev = fibptr->dev;
+
+ srbreply = (struct aac_srb_reply *) fib_data(fibptr);
++
+ scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
+
+ if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
+@@ -2976,158 +2971,176 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
+ */
+ scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
+ - le32_to_cpu(srbreply->data_xfer_length));
+- /*
+- * First check the fib status
+- */
++ }
+
+- if (le32_to_cpu(srbreply->status) != ST_OK) {
+- int len;
+
+- printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status));
+- len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
+- SCSI_SENSE_BUFFERSIZE);
+- scsicmd->result = DID_ERROR << 16
+- | COMMAND_COMPLETE << 8
+- | SAM_STAT_CHECK_CONDITION;
+- memcpy(scsicmd->sense_buffer,
+- srbreply->sense_data, len);
+- }
++ scsi_dma_unmap(scsicmd);
+
+- /*
+- * Next check the srb status
+- */
+- switch ((le32_to_cpu(srbreply->srb_status))&0x3f) {
+- case SRB_STATUS_ERROR_RECOVERY:
+- case SRB_STATUS_PENDING:
+- case SRB_STATUS_SUCCESS:
+- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+- break;
+- case SRB_STATUS_DATA_OVERRUN:
+- switch (scsicmd->cmnd[0]) {
+- case READ_6:
+- case WRITE_6:
+- case READ_10:
+- case WRITE_10:
+- case READ_12:
+- case WRITE_12:
+- case READ_16:
+- case WRITE_16:
+- if (le32_to_cpu(srbreply->data_xfer_length)
+- < scsicmd->underflow)
+- printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
+- else
+- printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
+- scsicmd->result = DID_ERROR << 16
+- | COMMAND_COMPLETE << 8;
+- break;
+- case INQUIRY: {
+- scsicmd->result = DID_OK << 16
+- | COMMAND_COMPLETE << 8;
+- break;
+- }
+- default:
+- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+- break;
+- }
+- break;
+- case SRB_STATUS_ABORTED:
+- scsicmd->result = DID_ABORT << 16 | ABORT << 8;
+- break;
+- case SRB_STATUS_ABORT_FAILED:
+- /*
+- * Not sure about this one - but assuming the
+- * hba was trying to abort for some reason
+- */
+- scsicmd->result = DID_ERROR << 16 | ABORT << 8;
+- break;
+- case SRB_STATUS_PARITY_ERROR:
+- scsicmd->result = DID_PARITY << 16
+- | MSG_PARITY_ERROR << 8;
+- break;
+- case SRB_STATUS_NO_DEVICE:
+- case SRB_STATUS_INVALID_PATH_ID:
+- case SRB_STATUS_INVALID_TARGET_ID:
+- case SRB_STATUS_INVALID_LUN:
+- case SRB_STATUS_SELECTION_TIMEOUT:
+- scsicmd->result = DID_NO_CONNECT << 16
+- | COMMAND_COMPLETE << 8;
+- break;
++ /* expose physical device if expose_physicald flag is on */
++ if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
++ && expose_physicals > 0)
++ aac_expose_phy_device(scsicmd);
+
+- case SRB_STATUS_COMMAND_TIMEOUT:
+- case SRB_STATUS_TIMEOUT:
+- scsicmd->result = DID_TIME_OUT << 16
+- | COMMAND_COMPLETE << 8;
+- break;
++ /*
++ * First check the fib status
++ */
+
+- case SRB_STATUS_BUSY:
+- scsicmd->result = DID_BUS_BUSY << 16
+- | COMMAND_COMPLETE << 8;
+- break;
++ if (le32_to_cpu(srbreply->status) != ST_OK) {
++ int len;
+
+- case SRB_STATUS_BUS_RESET:
+- scsicmd->result = DID_RESET << 16
+- | COMMAND_COMPLETE << 8;
+- break;
++ pr_warn("aac_srb_callback: srb failed, status = %d\n",
++ le32_to_cpu(srbreply->status));
++ len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
++ SCSI_SENSE_BUFFERSIZE);
++ scsicmd->result = DID_ERROR << 16
++ | COMMAND_COMPLETE << 8
++ | SAM_STAT_CHECK_CONDITION;
++ memcpy(scsicmd->sense_buffer,
++ srbreply->sense_data, len);
++ }
+
+- case SRB_STATUS_MESSAGE_REJECTED:
++ /*
++ * Next check the srb status
++ */
++ switch ((le32_to_cpu(srbreply->srb_status))&0x3f) {
++ case SRB_STATUS_ERROR_RECOVERY:
++ case SRB_STATUS_PENDING:
++ case SRB_STATUS_SUCCESS:
++ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
++ break;
++ case SRB_STATUS_DATA_OVERRUN:
++ switch (scsicmd->cmnd[0]) {
++ case READ_6:
++ case WRITE_6:
++ case READ_10:
++ case WRITE_10:
++ case READ_12:
++ case WRITE_12:
++ case READ_16:
++ case WRITE_16:
++ if (le32_to_cpu(srbreply->data_xfer_length)
++ < scsicmd->underflow)
++ pr_warn("aacraid: SCSI CMD underflow\n");
++ else
++ pr_warn("aacraid: SCSI CMD Data Overrun\n");
+ scsicmd->result = DID_ERROR << 16
+- | MESSAGE_REJECT << 8;
++ | COMMAND_COMPLETE << 8;
++ break;
++ case INQUIRY:
++ scsicmd->result = DID_OK << 16
++ | COMMAND_COMPLETE << 8;
+ break;
+- case SRB_STATUS_REQUEST_FLUSHED:
+- case SRB_STATUS_ERROR:
+- case SRB_STATUS_INVALID_REQUEST:
+- case SRB_STATUS_REQUEST_SENSE_FAILED:
+- case SRB_STATUS_NO_HBA:
+- case SRB_STATUS_UNEXPECTED_BUS_FREE:
+- case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
+- case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
+- case SRB_STATUS_DELAYED_RETRY:
+- case SRB_STATUS_BAD_FUNCTION:
+- case SRB_STATUS_NOT_STARTED:
+- case SRB_STATUS_NOT_IN_USE:
+- case SRB_STATUS_FORCE_ABORT:
+- case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
+ default:
++ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
++ break;
++ }
++ break;
++ case SRB_STATUS_ABORTED:
++ scsicmd->result = DID_ABORT << 16 | ABORT << 8;
++ break;
++ case SRB_STATUS_ABORT_FAILED:
++ /*
++ * Not sure about this one - but assuming the
++ * hba was trying to abort for some reason
++ */
++ scsicmd->result = DID_ERROR << 16 | ABORT << 8;
++ break;
++ case SRB_STATUS_PARITY_ERROR:
++ scsicmd->result = DID_PARITY << 16
++ | MSG_PARITY_ERROR << 8;
++ break;
++ case SRB_STATUS_NO_DEVICE:
++ case SRB_STATUS_INVALID_PATH_ID:
++ case SRB_STATUS_INVALID_TARGET_ID:
++ case SRB_STATUS_INVALID_LUN:
++ case SRB_STATUS_SELECTION_TIMEOUT:
++ scsicmd->result = DID_NO_CONNECT << 16
++ | COMMAND_COMPLETE << 8;
++ break;
++
++ case SRB_STATUS_COMMAND_TIMEOUT:
++ case SRB_STATUS_TIMEOUT:
++ scsicmd->result = DID_TIME_OUT << 16
++ | COMMAND_COMPLETE << 8;
++ break;
++
++ case SRB_STATUS_BUSY:
++ scsicmd->result = DID_BUS_BUSY << 16
++ | COMMAND_COMPLETE << 8;
++ break;
++
++ case SRB_STATUS_BUS_RESET:
++ scsicmd->result = DID_RESET << 16
++ | COMMAND_COMPLETE << 8;
++ break;
++
++ case SRB_STATUS_MESSAGE_REJECTED:
++ scsicmd->result = DID_ERROR << 16
++ | MESSAGE_REJECT << 8;
++ break;
++ case SRB_STATUS_REQUEST_FLUSHED:
++ case SRB_STATUS_ERROR:
++ case SRB_STATUS_INVALID_REQUEST:
++ case SRB_STATUS_REQUEST_SENSE_FAILED:
++ case SRB_STATUS_NO_HBA:
++ case SRB_STATUS_UNEXPECTED_BUS_FREE:
++ case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
++ case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
++ case SRB_STATUS_DELAYED_RETRY:
++ case SRB_STATUS_BAD_FUNCTION:
++ case SRB_STATUS_NOT_STARTED:
++ case SRB_STATUS_NOT_IN_USE:
++ case SRB_STATUS_FORCE_ABORT:
++ case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
++ default:
+ #ifdef AAC_DETAILED_STATUS_INFO
+- printk(KERN_INFO "aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",
+- le32_to_cpu(srbreply->srb_status) & 0x3F,
+- aac_get_status_string(
+- le32_to_cpu(srbreply->srb_status) & 0x3F),
+- scsicmd->cmnd[0],
+- le32_to_cpu(srbreply->scsi_status));
++ pr_info("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x -scsi status 0x%x\n",
++ le32_to_cpu(srbreply->srb_status) & 0x3F,
++ aac_get_status_string(
++ le32_to_cpu(srbreply->srb_status) & 0x3F),
++ scsicmd->cmnd[0],
++ le32_to_cpu(srbreply->scsi_status));
+ #endif
+- if ((scsicmd->cmnd[0] == ATA_12)
+- || (scsicmd->cmnd[0] == ATA_16)) {
+- if (scsicmd->cmnd[2] & (0x01 << 5)) {
+- scsicmd->result = DID_OK << 16
+- | COMMAND_COMPLETE << 8;
+- break;
+- } else {
+- scsicmd->result = DID_ERROR << 16
+- | COMMAND_COMPLETE << 8;
+- break;
+- }
++ /*
++ * When the CC bit is SET by the host in ATA pass thru CDB,
++ * driver is supposed to return DID_OK
++ *
++ * When the CC bit is RESET by the host, driver should
++ * return DID_ERROR
++ */
++ if ((scsicmd->cmnd[0] == ATA_12)
++ || (scsicmd->cmnd[0] == ATA_16)) {
++
++ if (scsicmd->cmnd[2] & (0x01 << 5)) {
++ scsicmd->result = DID_OK << 16
++ | COMMAND_COMPLETE << 8;
++ break;
+ } else {
+ scsicmd->result = DID_ERROR << 16
+ | COMMAND_COMPLETE << 8;
+- break;
++ break;
+ }
++ } else {
++ scsicmd->result = DID_ERROR << 16
++ | COMMAND_COMPLETE << 8;
++ break;
+ }
+- if (le32_to_cpu(srbreply->scsi_status)
+- == SAM_STAT_CHECK_CONDITION) {
+- int len;
++ }
++ if (le32_to_cpu(srbreply->scsi_status)
++ == SAM_STAT_CHECK_CONDITION) {
++ int len;
+
+- scsicmd->result |= SAM_STAT_CHECK_CONDITION;
+- len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
+- SCSI_SENSE_BUFFERSIZE);
++ scsicmd->result |= SAM_STAT_CHECK_CONDITION;
++ len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
++ SCSI_SENSE_BUFFERSIZE);
+ #ifdef AAC_DETAILED_STATUS_INFO
+- printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
+- le32_to_cpu(srbreply->status), len);
++ pr_warn("aac_srb_callback: check condition, status = %d len=%d\n",
++ le32_to_cpu(srbreply->status), len);
+ #endif
+- memcpy(scsicmd->sense_buffer,
+- srbreply->sense_data, len);
+- }
++ memcpy(scsicmd->sense_buffer,
++ srbreply->sense_data, len);
+ }
++
+ /*
+ * OR in the scsi status (already shifted up a bit)
+ */
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+index bd04bd01d34a..a156451553a7 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+@@ -1960,7 +1960,8 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
+ */
+ static void
+ megasas_build_syspd_fusion(struct megasas_instance *instance,
+- struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, u8 fp_possible)
++ struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd,
++ bool fp_possible)
+ {
+ u32 device_id;
+ struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
+@@ -2064,6 +2065,8 @@ megasas_build_io_fusion(struct megasas_instance *instance,
+ u16 sge_count;
+ u8 cmd_type;
+ struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
++ struct MR_PRIV_DEVICE *mr_device_priv_data;
++ mr_device_priv_data = scp->device->hostdata;
+
+ /* Zero out some fields so they don't get reused */
+ memset(io_request->LUN, 0x0, 8);
+@@ -2092,12 +2095,14 @@ megasas_build_io_fusion(struct megasas_instance *instance,
+ megasas_build_ld_nonrw_fusion(instance, scp, cmd);
+ break;
+ case READ_WRITE_SYSPDIO:
++ megasas_build_syspd_fusion(instance, scp, cmd, true);
++ break;
+ case NON_READ_WRITE_SYSPDIO:
+- if (instance->secure_jbod_support &&
+- (cmd_type == NON_READ_WRITE_SYSPDIO))
+- megasas_build_syspd_fusion(instance, scp, cmd, 0);
++ if (instance->secure_jbod_support ||
++ mr_device_priv_data->is_tm_capable)
++ megasas_build_syspd_fusion(instance, scp, cmd, false);
+ else
+- megasas_build_syspd_fusion(instance, scp, cmd, 1);
++ megasas_build_syspd_fusion(instance, scp, cmd, true);
+ break;
+ default:
+ break;
+diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
+index 3d46b1b1fa18..7de992c19ff6 100644
+--- a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
++++ b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
+@@ -17,6 +17,7 @@
+ #include <linux/irqdomain.h>
+ #include <linux/msi.h>
+ #include "../include/mc-bus.h"
++#include "fsl-mc-private.h"
+
+ /*
+ * Generate a unique ID identifying the interrupt (only used within the MSI
+diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
+index 7a6ac640752f..eaeb3c51e14b 100644
+--- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
++++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
+@@ -17,6 +17,7 @@
+ #include <linux/of.h>
+ #include <linux/of_irq.h>
+ #include "../include/mc-bus.h"
++#include "fsl-mc-private.h"
+
+ static struct irq_chip its_msi_irq_chip = {
+ .name = "fsl-mc-bus-msi",
+diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+index 6fc985571cba..e533088c017c 100644
+--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
++++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+@@ -1213,23 +1213,21 @@ struct hsm_action_item {
+ * \retval buffer
+ */
+ static inline char *hai_dump_data_field(struct hsm_action_item *hai,
+- char *buffer, int len)
++ char *buffer, size_t len)
+ {
+- int i, sz, data_len;
++ int i, data_len;
+ char *ptr;
+
+ ptr = buffer;
+- sz = len;
+ data_len = hai->hai_len - sizeof(*hai);
+- for (i = 0 ; (i < data_len) && (sz > 0) ; i++) {
+- int cnt;
+-
+- cnt = snprintf(ptr, sz, "%.2X",
+- (unsigned char)hai->hai_data[i]);
+- ptr += cnt;
+- sz -= cnt;
++ for (i = 0; (i < data_len) && (len > 2); i++) {
++ snprintf(ptr, 3, "%02X", (unsigned char)hai->hai_data[i]);
++ ptr += 2;
++ len -= 2;
+ }
++
+ *ptr = '\0';
++
+ return buffer;
+ }
+
+diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+index 3c48b4fb96f1..d18ab3f28c70 100644
+--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
++++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+@@ -546,6 +546,13 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
+ if (!lock)
+ return NULL;
+
++ if (lock->l_export && lock->l_export->exp_failed) {
++ CDEBUG(D_INFO, "lock export failed: lock %p, exp %p\n",
++ lock, lock->l_export);
++ LDLM_LOCK_PUT(lock);
++ return NULL;
++ }
++
+ /* It's unlikely but possible that someone marked the lock as
+ * destroyed after we did handle2object on it
+ */
+diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
+index 26f3a37873a7..0cb70c3a1a0b 100644
+--- a/drivers/staging/lustre/lustre/llite/rw26.c
++++ b/drivers/staging/lustre/lustre/llite/rw26.c
+@@ -354,6 +354,10 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
+ if (!lli->lli_has_smd)
+ return -EBADF;
+
++ /* Check EOF by ourselves */
++ if (iov_iter_rw(iter) == READ && file_offset >= i_size_read(inode))
++ return 0;
++
+ /* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */
+ if ((file_offset & ~PAGE_MASK) || (count & ~PAGE_MASK))
+ return -EINVAL;
+diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+index 7dbb2b946acf..cd19ce811e62 100644
+--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
++++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+@@ -744,16 +744,18 @@ static int lmv_hsm_req_count(struct lmv_obd *lmv,
+ /* count how many requests must be sent to the given target */
+ for (i = 0; i < hur->hur_request.hr_itemcount; i++) {
+ curr_tgt = lmv_find_target(lmv, &hur->hur_user_item[i].hui_fid);
++ if (IS_ERR(curr_tgt))
++ return PTR_ERR(curr_tgt);
+ if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid))
+ nr++;
+ }
+ return nr;
+ }
+
+-static void lmv_hsm_req_build(struct lmv_obd *lmv,
+- struct hsm_user_request *hur_in,
+- const struct lmv_tgt_desc *tgt_mds,
+- struct hsm_user_request *hur_out)
++static int lmv_hsm_req_build(struct lmv_obd *lmv,
++ struct hsm_user_request *hur_in,
++ const struct lmv_tgt_desc *tgt_mds,
++ struct hsm_user_request *hur_out)
+ {
+ int i, nr_out;
+ struct lmv_tgt_desc *curr_tgt;
+@@ -764,6 +766,8 @@ static void lmv_hsm_req_build(struct lmv_obd *lmv,
+ for (i = 0; i < hur_in->hur_request.hr_itemcount; i++) {
+ curr_tgt = lmv_find_target(lmv,
+ &hur_in->hur_user_item[i].hui_fid);
++ if (IS_ERR(curr_tgt))
++ return PTR_ERR(curr_tgt);
+ if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid)) {
+ hur_out->hur_user_item[nr_out] =
+ hur_in->hur_user_item[i];
+@@ -773,6 +777,8 @@ static void lmv_hsm_req_build(struct lmv_obd *lmv,
+ hur_out->hur_request.hr_itemcount = nr_out;
+ memcpy(hur_data(hur_out), hur_data(hur_in),
+ hur_in->hur_request.hr_data_len);
++
++ return 0;
+ }
+
+ static int lmv_hsm_ct_unregister(struct lmv_obd *lmv, unsigned int cmd, int len,
+@@ -1052,15 +1058,17 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
+ } else {
+ /* split fid list to their respective MDS */
+ for (i = 0; i < count; i++) {
+- unsigned int nr, reqlen;
+- int rc1;
+ struct hsm_user_request *req;
++ size_t reqlen;
++ int nr, rc1;
+
+ tgt = lmv->tgts[i];
+ if (!tgt || !tgt->ltd_exp)
+ continue;
+
+ nr = lmv_hsm_req_count(lmv, hur, tgt);
++ if (nr < 0)
++ return nr;
+ if (nr == 0) /* nothing for this MDS */
+ continue;
+
+@@ -1072,10 +1080,13 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
+ if (!req)
+ return -ENOMEM;
+
+- lmv_hsm_req_build(lmv, hur, tgt, req);
++ rc1 = lmv_hsm_req_build(lmv, hur, tgt, req);
++ if (rc1 < 0)
++ goto hsm_req_err;
+
+ rc1 = obd_iocontrol(cmd, tgt->ltd_exp, reqlen,
+ req, uarg);
++hsm_req_err:
+ if (rc1 != 0 && rc == 0)
+ rc = rc1;
+ kvfree(req);
+diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
+index 72f39308eebb..9d34848d5458 100644
+--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
++++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
+@@ -1264,20 +1264,15 @@ static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt,
+ */
+ if (req->rq_ops->hpreq_check) {
+ rc = req->rq_ops->hpreq_check(req);
+- /**
+- * XXX: Out of all current
+- * ptlrpc_hpreq_ops::hpreq_check(), only
+- * ldlm_cancel_hpreq_check() can return an error code;
+- * other functions assert in similar places, which seems
+- * odd. What also does not seem right is that handlers
+- * for those RPCs do not assert on the same checks, but
+- * rather handle the error cases. e.g. see
+- * ost_rw_hpreq_check(), and ost_brw_read(),
+- * ost_brw_write().
++ if (rc == -ESTALE) {
++ req->rq_status = rc;
++ ptlrpc_error(req);
++ }
++ /** can only return error,
++ * 0 for normal request,
++ * or 1 for high priority request
+ */
+- if (rc < 0)
+- return rc;
+- LASSERT(rc == 0 || rc == 1);
++ LASSERT(rc <= 1);
+ }
+
+ spin_lock_bh(&req->rq_export->exp_rpc_lock);
+diff --git a/drivers/staging/rtl8712/ieee80211.h b/drivers/staging/rtl8712/ieee80211.h
+index 67ab58084e8a..68fd65e80906 100644
+--- a/drivers/staging/rtl8712/ieee80211.h
++++ b/drivers/staging/rtl8712/ieee80211.h
+@@ -138,51 +138,51 @@ struct ieee_ibss_seq {
+ };
+
+ struct ieee80211_hdr {
+- u16 frame_ctl;
+- u16 duration_id;
++ __le16 frame_ctl;
++ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+- u16 seq_ctl;
++ __le16 seq_ctl;
+ u8 addr4[ETH_ALEN];
+-} __packed;
++} __packed __aligned(2);
+
+ struct ieee80211_hdr_3addr {
+- u16 frame_ctl;
+- u16 duration_id;
++ __le16 frame_ctl;
++ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+- u16 seq_ctl;
+-} __packed;
++ __le16 seq_ctl;
++} __packed __aligned(2);
+
+ struct ieee80211_hdr_qos {
+- u16 frame_ctl;
+- u16 duration_id;
++ __le16 frame_ctl;
++ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+- u16 seq_ctl;
++ __le16 seq_ctl;
+ u8 addr4[ETH_ALEN];
+- u16 qc;
+-} __packed;
++ __le16 qc;
++} __packed __aligned(2);
+
+ struct ieee80211_hdr_3addr_qos {
+- u16 frame_ctl;
+- u16 duration_id;
++ __le16 frame_ctl;
++ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+- u16 seq_ctl;
+- u16 qc;
++ __le16 seq_ctl;
++ __le16 qc;
+ } __packed;
+
+ struct eapol {
+ u8 snap[6];
+- u16 ethertype;
++ __be16 ethertype;
+ u8 version;
+ u8 type;
+- u16 length;
++ __le16 length;
+ } __packed;
+
+ enum eap_type {
+@@ -514,13 +514,13 @@ struct ieee80211_security {
+ */
+
+ struct ieee80211_header_data {
+- u16 frame_ctl;
+- u16 duration_id;
++ __le16 frame_ctl;
++ __le16 duration_id;
+ u8 addr1[6];
+ u8 addr2[6];
+ u8 addr3[6];
+- u16 seq_ctrl;
+-};
++ __le16 seq_ctrl;
++} __packed __aligned(2);
+
+ #define BEACON_PROBE_SSID_ID_POSITION 12
+
+@@ -552,18 +552,18 @@ struct ieee80211_info_element {
+ /*
+ * These are the data types that can make up management packets
+ *
+- u16 auth_algorithm;
+- u16 auth_sequence;
+- u16 beacon_interval;
+- u16 capability;
++ __le16 auth_algorithm;
++ __le16 auth_sequence;
++ __le16 beacon_interval;
++ __le16 capability;
+ u8 current_ap[ETH_ALEN];
+- u16 listen_interval;
++ __le16 listen_interval;
+ struct {
+ u16 association_id:14, reserved:2;
+ } __packed;
+- u32 time_stamp[2];
+- u16 reason;
+- u16 status;
++ __le32 time_stamp[2];
++ __le16 reason;
++ __le16 status;
+ */
+
+ #define IEEE80211_DEFAULT_TX_ESSID "Penguin"
+@@ -571,16 +571,16 @@ struct ieee80211_info_element {
+
+ struct ieee80211_authentication {
+ struct ieee80211_header_data header;
+- u16 algorithm;
+- u16 transaction;
+- u16 status;
++ __le16 algorithm;
++ __le16 transaction;
++ __le16 status;
+ } __packed;
+
+ struct ieee80211_probe_response {
+ struct ieee80211_header_data header;
+- u32 time_stamp[2];
+- u16 beacon_interval;
+- u16 capability;
++ __le32 time_stamp[2];
++ __le16 beacon_interval;
++ __le16 capability;
+ struct ieee80211_info_element info_element;
+ } __packed;
+
+@@ -590,16 +590,16 @@ struct ieee80211_probe_request {
+
+ struct ieee80211_assoc_request_frame {
+ struct ieee80211_hdr_3addr header;
+- u16 capability;
+- u16 listen_interval;
++ __le16 capability;
++ __le16 listen_interval;
+ struct ieee80211_info_element_hdr info_element;
+ } __packed;
+
+ struct ieee80211_assoc_response_frame {
+ struct ieee80211_hdr_3addr header;
+- u16 capability;
+- u16 status;
+- u16 aid;
++ __le16 capability;
++ __le16 status;
++ __le16 aid;
+ } __packed;
+
+ struct ieee80211_txb {
+diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
+index be38364c8a7c..c47863936858 100644
+--- a/drivers/staging/rtl8712/rtl871x_xmit.c
++++ b/drivers/staging/rtl8712/rtl871x_xmit.c
+@@ -344,7 +344,8 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
+ * some settings above.
+ */
+ if (check_fwstate(pmlmepriv, WIFI_MP_STATE))
+- pattrib->priority = (txdesc.txdw1 >> QSEL_SHT) & 0x1f;
++ pattrib->priority =
++ (le32_to_cpu(txdesc.txdw1) >> QSEL_SHT) & 0x1f;
+ return _SUCCESS;
+ }
+
+@@ -485,7 +486,7 @@ static sint make_wlanhdr(struct _adapter *padapter, u8 *hdr,
+ struct ieee80211_hdr *pwlanhdr = (struct ieee80211_hdr *)hdr;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct qos_priv *pqospriv = &pmlmepriv->qospriv;
+- u16 *fctrl = &pwlanhdr->frame_ctl;
++ __le16 *fctrl = &pwlanhdr->frame_ctl;
+
+ memset(hdr, 0, WLANHDR_OFFSET);
+ SetFrameSubType(fctrl, pattrib->subtype);
+@@ -574,7 +575,7 @@ static sint r8712_put_snap(u8 *data, u16 h_proto)
+ snap->oui[0] = oui[0];
+ snap->oui[1] = oui[1];
+ snap->oui[2] = oui[2];
+- *(u16 *)(data + SNAP_SIZE) = htons(h_proto);
++ *(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
+ return SNAP_SIZE + sizeof(u16);
+ }
+
+diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
+index 26e5e8507f03..9122ba25bb00 100644
+--- a/drivers/xen/manage.c
++++ b/drivers/xen/manage.c
+@@ -277,8 +277,16 @@ static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
+ err = xenbus_transaction_start(&xbt);
+ if (err)
+ return;
+- if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) {
+- pr_err("Unable to read sysrq code in control/sysrq\n");
++ err = xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key);
++ if (err < 0) {
++ /*
++ * The Xenstore watch fires directly after registering it and
++ * after a suspend/resume cycle. So ENOENT is no error but
++ * might happen in those cases.
++ */
++ if (err != -ENOENT)
++ pr_err("Error %d reading sysrq code in control/sysrq\n",
++ err);
+ xenbus_transaction_end(xbt, 1);
+ return;
+ }
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index dd3e236d7a2b..d9cbda269462 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -193,7 +193,8 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon)
+ struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
+ int i;
+
+- if (unlikely(direntry->d_name.len >
++ if (unlikely(tcon->fsAttrInfo.MaxPathNameComponentLength &&
++ direntry->d_name.len >
+ le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
+ return -ENAMETOOLONG;
+
+@@ -509,7 +510,7 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
+
+ rc = check_name(direntry, tcon);
+ if (rc)
+- goto out_free_xid;
++ goto out;
+
+ server = tcon->ses->server;
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 846b57ff58de..64056c6eb857 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2136,8 +2136,10 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+ * We search using buddy data only if the order of the request
+ * is greater than equal to the sbi_s_mb_order2_reqs
+ * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
++ * We also support searching for power-of-two requests only for
++ * requests upto maximum buddy size we have constructed.
+ */
+- if (i >= sbi->s_mb_order2_reqs) {
++ if (i >= sbi->s_mb_order2_reqs && i <= sb->s_blocksize_bits + 2) {
+ /*
+ * This should tell if fe_len is exactly power of 2
+ */
+@@ -2207,7 +2209,7 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+ }
+
+ ac->ac_groups_scanned++;
+- if (cr == 0 && ac->ac_2order < sb->s_blocksize_bits+2)
++ if (cr == 0)
+ ext4_mb_simple_scan_group(ac, &e4b);
+ else if (cr == 1 && sbi->s_stripe &&
+ !(ac->ac_g_ex.fe_len % sbi->s_stripe))
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index f72535e1898f..1f581791b39d 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2628,9 +2628,9 @@ static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
+
+ if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
+ ret = sbi->s_stripe;
+- else if (stripe_width <= sbi->s_blocks_per_group)
++ else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
+ ret = stripe_width;
+- else if (stride <= sbi->s_blocks_per_group)
++ else if (stride && stride <= sbi->s_blocks_per_group)
+ ret = stride;
+ else
+ ret = 0;
+diff --git a/fs/namei.c b/fs/namei.c
+index 66209f720146..e7d125c23aa6 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2971,10 +2971,16 @@ static inline int open_to_namei_flags(int flag)
+
+ static int may_o_create(const struct path *dir, struct dentry *dentry, umode_t mode)
+ {
++ struct user_namespace *s_user_ns;
+ int error = security_path_mknod(dir, dentry, mode, 0);
+ if (error)
+ return error;
+
++ s_user_ns = dir->dentry->d_sb->s_user_ns;
++ if (!kuid_has_mapping(s_user_ns, current_fsuid()) ||
++ !kgid_has_mapping(s_user_ns, current_fsgid()))
++ return -EOVERFLOW;
++
+ error = inode_permission(dir->dentry->d_inode, MAY_WRITE | MAY_EXEC);
+ if (error)
+ return error;
+diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
+index f72712f6c28d..06089becca60 100644
+--- a/fs/ocfs2/alloc.c
++++ b/fs/ocfs2/alloc.c
+@@ -7310,13 +7310,24 @@ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
+
+ static int ocfs2_trim_extent(struct super_block *sb,
+ struct ocfs2_group_desc *gd,
+- u32 start, u32 count)
++ u64 group, u32 start, u32 count)
+ {
+ u64 discard, bcount;
++ struct ocfs2_super *osb = OCFS2_SB(sb);
+
+ bcount = ocfs2_clusters_to_blocks(sb, count);
+- discard = le64_to_cpu(gd->bg_blkno) +
+- ocfs2_clusters_to_blocks(sb, start);
++ discard = ocfs2_clusters_to_blocks(sb, start);
++
++ /*
++ * For the first cluster group, the gd->bg_blkno is not at the start
++ * of the group, but at an offset from the start. If we add it while
++ * calculating discard for first group, we will wrongly start fstrim a
++ * few blocks after the desried start block and the range can cross
++ * over into the next cluster group. So, add it only if this is not
++ * the first cluster group.
++ */
++ if (group != osb->first_cluster_group_blkno)
++ discard += le64_to_cpu(gd->bg_blkno);
+
+ trace_ocfs2_trim_extent(sb, (unsigned long long)discard, bcount);
+
+@@ -7324,7 +7335,7 @@ static int ocfs2_trim_extent(struct super_block *sb,
+ }
+
+ static int ocfs2_trim_group(struct super_block *sb,
+- struct ocfs2_group_desc *gd,
++ struct ocfs2_group_desc *gd, u64 group,
+ u32 start, u32 max, u32 minbits)
+ {
+ int ret = 0, count = 0, next;
+@@ -7343,7 +7354,7 @@ static int ocfs2_trim_group(struct super_block *sb,
+ next = ocfs2_find_next_bit(bitmap, max, start);
+
+ if ((next - start) >= minbits) {
+- ret = ocfs2_trim_extent(sb, gd,
++ ret = ocfs2_trim_extent(sb, gd, group,
+ start, next - start);
+ if (ret < 0) {
+ mlog_errno(ret);
+@@ -7441,7 +7452,8 @@ int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range)
+ }
+
+ gd = (struct ocfs2_group_desc *)gd_bh->b_data;
+- cnt = ocfs2_trim_group(sb, gd, first_bit, last_bit, minlen);
++ cnt = ocfs2_trim_group(sb, gd, group,
++ first_bit, last_bit, minlen);
+ brelse(gd_bh);
+ gd_bh = NULL;
+ if (cnt < 0) {
+diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
+index 0bd8a611eb83..fef5d2e114be 100644
+--- a/lib/asn1_decoder.c
++++ b/lib/asn1_decoder.c
+@@ -284,6 +284,9 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder,
+ if (unlikely(len > datalen - dp))
+ goto data_overrun_error;
+ }
++ } else {
++ if (unlikely(len > datalen - dp))
++ goto data_overrun_error;
+ }
+
+ if (flags & FLAG_CONS) {
+diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c
+index 880a7d1d27d2..4ccff66523c9 100644
+--- a/samples/trace_events/trace-events-sample.c
++++ b/samples/trace_events/trace-events-sample.c
+@@ -78,28 +78,36 @@ static int simple_thread_fn(void *arg)
+ }
+
+ static DEFINE_MUTEX(thread_mutex);
++static int simple_thread_cnt;
+
+ void foo_bar_reg(void)
+ {
++ mutex_lock(&thread_mutex);
++ if (simple_thread_cnt++)
++ goto out;
++
+ pr_info("Starting thread for foo_bar_fn\n");
+ /*
+ * We shouldn't be able to start a trace when the module is
+ * unloading (there's other locks to prevent that). But
+ * for consistency sake, we still take the thread_mutex.
+ */
+- mutex_lock(&thread_mutex);
+ simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn");
++ out:
+ mutex_unlock(&thread_mutex);
+ }
+
+ void foo_bar_unreg(void)
+ {
+- pr_info("Killing thread for foo_bar_fn\n");
+- /* protect against module unloading */
+ mutex_lock(&thread_mutex);
++ if (--simple_thread_cnt)
++ goto out;
++
++ pr_info("Killing thread for foo_bar_fn\n");
+ if (simple_tsk_fn)
+ kthread_stop(simple_tsk_fn);
+ simple_tsk_fn = NULL;
++ out:
+ mutex_unlock(&thread_mutex);
+ }
+
+diff --git a/security/keys/keyring.c b/security/keys/keyring.c
+index 32969f630438..4e9b4d23e20e 100644
+--- a/security/keys/keyring.c
++++ b/security/keys/keyring.c
+@@ -452,34 +452,33 @@ static long keyring_read(const struct key *keyring,
+ char __user *buffer, size_t buflen)
+ {
+ struct keyring_read_iterator_context ctx;
+- unsigned long nr_keys;
+- int ret;
++ long ret;
+
+ kenter("{%d},,%zu", key_serial(keyring), buflen);
+
+ if (buflen & (sizeof(key_serial_t) - 1))
+ return -EINVAL;
+
+- nr_keys = keyring->keys.nr_leaves_on_tree;
+- if (nr_keys == 0)
+- return 0;
+-
+- /* Calculate how much data we could return */
+- if (!buffer || !buflen)
+- return nr_keys * sizeof(key_serial_t);
+-
+- /* Copy the IDs of the subscribed keys into the buffer */
+- ctx.buffer = (key_serial_t __user *)buffer;
+- ctx.buflen = buflen;
+- ctx.count = 0;
+- ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
+- if (ret < 0) {
+- kleave(" = %d [iterate]", ret);
+- return ret;
++ /* Copy as many key IDs as fit into the buffer */
++ if (buffer && buflen) {
++ ctx.buffer = (key_serial_t __user *)buffer;
++ ctx.buflen = buflen;
++ ctx.count = 0;
++ ret = assoc_array_iterate(&keyring->keys,
++ keyring_read_iterator, &ctx);
++ if (ret < 0) {
++ kleave(" = %ld [iterate]", ret);
++ return ret;
++ }
+ }
+
+- kleave(" = %zu [ok]", ctx.count);
+- return ctx.count;
++ /* Return the size of the buffer needed */
++ ret = keyring->keys.nr_leaves_on_tree * sizeof(key_serial_t);
++ if (ret <= buflen)
++ kleave("= %ld [ok]", ret);
++ else
++ kleave("= %ld [buffer too small]", ret);
++ return ret;
+ }
+
+ /*
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index c41148353e19..45ef5915462c 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -663,7 +663,7 @@ static int deliver_to_subscribers(struct snd_seq_client *client,
+ if (atomic)
+ read_lock(&grp->list_lock);
+ else
+- down_read(&grp->list_mutex);
++ down_read_nested(&grp->list_mutex, hop);
+ list_for_each_entry(subs, &grp->list_head, src_list) {
+ /* both ports ready? */
+ if (atomic_read(&subs->ref_count) != 2)
+diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
+index 6a437eb66115..59127b6ef39e 100644
+--- a/sound/core/timer_compat.c
++++ b/sound/core/timer_compat.c
+@@ -133,7 +133,8 @@ enum {
+ #endif /* CONFIG_X86_X32 */
+ };
+
+-static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
++static long __snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd,
++ unsigned long arg)
+ {
+ void __user *argp = compat_ptr(arg);
+
+@@ -153,7 +154,7 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
+ case SNDRV_TIMER_IOCTL_PAUSE:
+ case SNDRV_TIMER_IOCTL_PAUSE_OLD:
+ case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
+- return snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
++ return __snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
+ case SNDRV_TIMER_IOCTL_GPARAMS32:
+ return snd_timer_user_gparams_compat(file, argp);
+ case SNDRV_TIMER_IOCTL_INFO32:
+@@ -167,3 +168,15 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
+ }
+ return -ENOIOCTLCMD;
+ }
++
++static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd,
++ unsigned long arg)
++{
++ struct snd_timer_user *tu = file->private_data;
++ long ret;
++
++ mutex_lock(&tu->ioctl_lock);
++ ret = __snd_timer_user_ioctl_compat(file, cmd, arg);
++ mutex_unlock(&tu->ioctl_lock);
++ return ret;
++}
+diff --git a/sound/soc/codecs/adau17x1.c b/sound/soc/codecs/adau17x1.c
+index 439aa3ff1f99..79dcb1e34baa 100644
+--- a/sound/soc/codecs/adau17x1.c
++++ b/sound/soc/codecs/adau17x1.c
+@@ -91,6 +91,27 @@ static int adau17x1_pll_event(struct snd_soc_dapm_widget *w,
+ return 0;
+ }
+
++static int adau17x1_adc_fixup(struct snd_soc_dapm_widget *w,
++ struct snd_kcontrol *kcontrol, int event)
++{
++ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
++ struct adau *adau = snd_soc_codec_get_drvdata(codec);
++
++ /*
++ * If we are capturing, toggle the ADOSR bit in Converter Control 0 to
++ * avoid losing SNR (workaround from ADI). This must be done after
++ * the ADC(s) have been enabled. According to the data sheet, it is
++ * normally illegal to set this bit when the sampling rate is 96 kHz,
++ * but according to ADI it is acceptable for this workaround.
++ */
++ regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
++ ADAU17X1_CONVERTER0_ADOSR, ADAU17X1_CONVERTER0_ADOSR);
++ regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
++ ADAU17X1_CONVERTER0_ADOSR, 0);
++
++ return 0;
++}
++
+ static const char * const adau17x1_mono_stereo_text[] = {
+ "Stereo",
+ "Mono Left Channel (L+R)",
+@@ -122,7 +143,8 @@ static const struct snd_soc_dapm_widget adau17x1_dapm_widgets[] = {
+ SND_SOC_DAPM_MUX("Right DAC Mode Mux", SND_SOC_NOPM, 0, 0,
+ &adau17x1_dac_mode_mux),
+
+- SND_SOC_DAPM_ADC("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0),
++ SND_SOC_DAPM_ADC_E("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0,
++ adau17x1_adc_fixup, SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_ADC("Right Decimator", NULL, ADAU17X1_ADC_CONTROL, 1, 0),
+ SND_SOC_DAPM_DAC("Left DAC", NULL, ADAU17X1_DAC_CONTROL0, 0, 0),
+ SND_SOC_DAPM_DAC("Right DAC", NULL, ADAU17X1_DAC_CONTROL0, 1, 0),
+diff --git a/sound/soc/codecs/adau17x1.h b/sound/soc/codecs/adau17x1.h
+index bf04b7efee40..db350035fad7 100644
+--- a/sound/soc/codecs/adau17x1.h
++++ b/sound/soc/codecs/adau17x1.h
+@@ -129,5 +129,7 @@ bool adau17x1_has_dsp(struct adau *adau);
+
+ #define ADAU17X1_CONVERTER0_CONVSR_MASK 0x7
+
++#define ADAU17X1_CONVERTER0_ADOSR BIT(3)
++
+
+ #endif
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index bd19fad2d91b..c17f262f0834 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -807,7 +807,6 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
+ static struct platform_driver snd_byt_rt5640_mc_driver = {
+ .driver = {
+ .name = "bytcr_rt5640",
+- .pm = &snd_soc_pm_ops,
+ },
+ .probe = snd_byt_rt5640_mc_probe,
+ };
+diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
+index eabff3a857d0..ae49f8199e45 100644
+--- a/sound/soc/intel/boards/bytcr_rt5651.c
++++ b/sound/soc/intel/boards/bytcr_rt5651.c
+@@ -317,7 +317,6 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
+ static struct platform_driver snd_byt_rt5651_mc_driver = {
+ .driver = {
+ .name = "bytcr_rt5651",
+- .pm = &snd_soc_pm_ops,
+ },
+ .probe = snd_byt_rt5651_mc_probe,
+ };
+diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
+index 4e778eae1510..415a9c38d9f0 100644
+--- a/tools/perf/util/parse-events.c
++++ b/tools/perf/util/parse-events.c
+@@ -309,10 +309,11 @@ __add_event(struct list_head *list, int *idx,
+
+ event_attr_init(attr);
+
+- evsel = perf_evsel__new_idx(attr, (*idx)++);
++ evsel = perf_evsel__new_idx(attr, *idx);
+ if (!evsel)
+ return NULL;
+
++ (*idx)++;
+ evsel->cpus = cpu_map__get(cpus);
+ evsel->own_cpus = cpu_map__get(cpus);
+