diff options
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1070_linux-4.9.71.patch | 5365 |
2 files changed, 5369 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 085faede..e02f58e6 100644 --- a/0000_README +++ b/0000_README @@ -323,6 +323,10 @@ Patch: 1069_linux-4.9.70.patch From: http://www.kernel.org Desc: Linux 4.9.70 +Patch: 1070_linux-4.9.71.patch +From: http://www.kernel.org +Desc: Linux 4.9.71 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1070_linux-4.9.71.patch b/1070_linux-4.9.71.patch new file mode 100644 index 00000000..1db34638 --- /dev/null +++ b/1070_linux-4.9.71.patch @@ -0,0 +1,5365 @@ +diff --git a/Makefile b/Makefile +index 7ad3271a1a1d..5f2736bb4877 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 9 +-SUBLEVEL = 70 ++SUBLEVEL = 71 + EXTRAVERSION = + NAME = Roaring Lionus + +diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile +index 3635b8662724..92110c2c6c59 100644 +--- a/arch/arm64/Makefile ++++ b/arch/arm64/Makefile +@@ -14,8 +14,12 @@ LDFLAGS_vmlinux :=-p --no-undefined -X + CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET) + GZFLAGS :=-9 + +-ifneq ($(CONFIG_RELOCATABLE),) +-LDFLAGS_vmlinux += -pie -shared -Bsymbolic ++ifeq ($(CONFIG_RELOCATABLE), y) ++# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour ++# for relative relocs, since this leads to better Image compression ++# with the relocation offsets always being zero. ++LDFLAGS_vmlinux += -pie -shared -Bsymbolic \ ++ $(call ld-option, --no-apply-dynamic-relocs) + endif + + ifeq ($(CONFIG_ARM64_ERRATUM_843419),y) +diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig +index 3c1bd640042a..88c4b77ec8d2 100644 +--- a/arch/blackfin/Kconfig ++++ b/arch/blackfin/Kconfig +@@ -319,11 +319,14 @@ config BF53x + + config GPIO_ADI + def_bool y ++ depends on !PINCTRL + depends on (BF51x || BF52x || BF53x || BF538 || BF539 || BF561) + +-config PINCTRL ++config PINCTRL_BLACKFIN_ADI2 + def_bool y +- depends on BF54x || BF60x ++ depends on (BF54x || BF60x) ++ select PINCTRL ++ select PINCTRL_ADI2 + + config MEM_MT48LC64M4A2FB_7E + bool +diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug +index f3337ee03621..a93cf06a4d6f 100644 +--- a/arch/blackfin/Kconfig.debug ++++ b/arch/blackfin/Kconfig.debug +@@ -17,6 +17,7 @@ config DEBUG_VERBOSE + + config DEBUG_MMRS + tristate "Generate Blackfin MMR tree" ++ depends on !PINCTRL + select DEBUG_FS + help + Create a tree of Blackfin MMRs via the debugfs tree. If +diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h +index 140faa16685a..1311e6b13991 100644 +--- a/arch/openrisc/include/asm/uaccess.h ++++ b/arch/openrisc/include/asm/uaccess.h +@@ -211,7 +211,7 @@ do { \ + case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break; \ + case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \ + case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \ +- case 8: __get_user_asm2(x, ptr, retval); \ ++ case 8: __get_user_asm2(x, ptr, retval); break; \ + default: (x) = __get_user_bad(); \ + } \ + } while (0) +diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c +index 7b2ca16b1eb4..991c6a517ddc 100644 +--- a/arch/powerpc/perf/hv-24x7.c ++++ b/arch/powerpc/perf/hv-24x7.c +@@ -516,7 +516,7 @@ static int memord(const void *d1, size_t s1, const void *d2, size_t s2) + { + if (s1 < s2) + return 1; +- if (s2 > s1) ++ if (s1 > s2) + return -1; + + return memcmp(d1, d2, s1); +diff --git a/arch/powerpc/platforms/powernv/opal-async.c b/arch/powerpc/platforms/powernv/opal-async.c +index 83bebeec0fea..0f7b16e29347 100644 +--- a/arch/powerpc/platforms/powernv/opal-async.c ++++ b/arch/powerpc/platforms/powernv/opal-async.c +@@ -39,18 +39,18 @@ int __opal_async_get_token(void) + int token; + + spin_lock_irqsave(&opal_async_comp_lock, flags); +- token = find_first_bit(opal_async_complete_map, opal_max_async_tokens); ++ token = find_first_zero_bit(opal_async_token_map, opal_max_async_tokens); + if (token >= opal_max_async_tokens) { + token = -EBUSY; + goto out; + } + +- if (__test_and_set_bit(token, opal_async_token_map)) { ++ if (!__test_and_clear_bit(token, opal_async_complete_map)) { + token = -EBUSY; + goto out; + } + +- __clear_bit(token, opal_async_complete_map); ++ __set_bit(token, opal_async_token_map); + + out: + spin_unlock_irqrestore(&opal_async_comp_lock, flags); +diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c +index efe8b6bb168b..b33faa0015cc 100644 +--- a/arch/powerpc/platforms/powernv/setup.c ++++ b/arch/powerpc/platforms/powernv/setup.c +@@ -289,7 +289,7 @@ static unsigned long pnv_get_proc_freq(unsigned int cpu) + { + unsigned long ret_freq; + +- ret_freq = cpufreq_quick_get(cpu) * 1000ul; ++ ret_freq = cpufreq_get(cpu) * 1000ul; + + /* + * If the backend cpufreq driver does not exist, +diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c +index f267ee0afc08..716353b247de 100644 +--- a/arch/powerpc/sysdev/ipic.c ++++ b/arch/powerpc/sysdev/ipic.c +@@ -845,12 +845,12 @@ void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq) + + u32 ipic_get_mcp_status(void) + { +- return ipic_read(primary_ipic->regs, IPIC_SERMR); ++ return ipic_read(primary_ipic->regs, IPIC_SERSR); + } + + void ipic_clear_mcp_status(u32 mask) + { +- ipic_write(primary_ipic->regs, IPIC_SERMR, mask); ++ ipic_write(primary_ipic->regs, IPIC_SERSR, mask); + } + + /* Return an interrupt vector or 0 if no interrupt is pending. */ +diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c +index 399a29d067d6..cb91a64a99e7 100644 +--- a/arch/x86/crypto/salsa20_glue.c ++++ b/arch/x86/crypto/salsa20_glue.c +@@ -59,13 +59,6 @@ static int encrypt(struct blkcipher_desc *desc, + + salsa20_ivsetup(ctx, walk.iv); + +- if (likely(walk.nbytes == nbytes)) +- { +- salsa20_encrypt_bytes(ctx, walk.src.virt.addr, +- walk.dst.virt.addr, nbytes); +- return blkcipher_walk_done(desc, &walk, 0); +- } +- + while (walk.nbytes >= 64) { + salsa20_encrypt_bytes(ctx, walk.src.virt.addr, + walk.dst.virt.addr, +diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c +index b89bef95f63b..11cc600f4df0 100644 +--- a/arch/x86/kernel/acpi/boot.c ++++ b/arch/x86/kernel/acpi/boot.c +@@ -720,7 +720,7 @@ static void __init acpi_set_irq_model_ioapic(void) + #ifdef CONFIG_ACPI_HOTPLUG_CPU + #include <acpi/processor.h> + +-int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) ++static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) + { + #ifdef CONFIG_ACPI_NUMA + int nid; +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 9aa62ab13ae8..a929ca03b7ed 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -9543,10 +9543,8 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, + return false; + + page = nested_get_page(vcpu, vmcs12->msr_bitmap); +- if (!page) { +- WARN_ON(1); ++ if (!page) + return false; +- } + msr_bitmap_l1 = (unsigned long *)kmap(page); + if (!msr_bitmap_l1) { + nested_release_page_clean(page); +diff --git a/block/badblocks.c b/block/badblocks.c +index 6ebcef282314..2fe6c117ac96 100644 +--- a/block/badblocks.c ++++ b/block/badblocks.c +@@ -178,7 +178,7 @@ int badblocks_set(struct badblocks *bb, sector_t s, int sectors, + + if (bb->shift < 0) + /* badblocks are disabled */ +- return 0; ++ return 1; + + if (bb->shift) { + /* round the start down, and the end up */ +diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c +index dcf5ce3ba4bf..4bc701b32ce2 100644 +--- a/block/blk-mq-tag.c ++++ b/block/blk-mq-tag.c +@@ -311,6 +311,9 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set) + for (i = 0; i < set->nr_hw_queues; i++) { + struct blk_mq_tags *tags = set->tags[i]; + ++ if (!tags) ++ continue; ++ + for (j = 0; j < tags->nr_tags; j++) { + if (!tags->rqs[j]) + continue; +diff --git a/crypto/hmac.c b/crypto/hmac.c +index 72e38c098bb3..ba07fb6221ae 100644 +--- a/crypto/hmac.c ++++ b/crypto/hmac.c +@@ -194,11 +194,15 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) + salg = shash_attr_alg(tb[1], 0, 0); + if (IS_ERR(salg)) + return PTR_ERR(salg); ++ alg = &salg->base; + ++ /* The underlying hash algorithm must be unkeyed */ + err = -EINVAL; ++ if (crypto_shash_alg_has_setkey(salg)) ++ goto out_put_alg; ++ + ds = salg->digestsize; + ss = salg->statesize; +- alg = &salg->base; + if (ds > alg->cra_blocksize || + ss < alg->cra_blocksize) + goto out_put_alg; +diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c +index 0b66dc824606..cad395d70d78 100644 +--- a/crypto/rsa_helper.c ++++ b/crypto/rsa_helper.c +@@ -30,7 +30,7 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag, + return -EINVAL; + + if (fips_enabled) { +- while (!*ptr && n_sz) { ++ while (n_sz && !*ptr) { + ptr++; + n_sz--; + } +diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c +index f550b5d94630..d7da0eea5622 100644 +--- a/crypto/salsa20_generic.c ++++ b/crypto/salsa20_generic.c +@@ -188,13 +188,6 @@ static int encrypt(struct blkcipher_desc *desc, + + salsa20_ivsetup(ctx, walk.iv); + +- if (likely(walk.nbytes == nbytes)) +- { +- salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, +- walk.src.virt.addr, nbytes); +- return blkcipher_walk_done(desc, &walk, 0); +- } +- + while (walk.nbytes >= 64) { + salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, + walk.src.virt.addr, +diff --git a/crypto/shash.c b/crypto/shash.c +index 4d8a671d1614..9bd5044d467b 100644 +--- a/crypto/shash.c ++++ b/crypto/shash.c +@@ -24,11 +24,12 @@ + + static const struct crypto_type crypto_shash_type; + +-static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, +- unsigned int keylen) ++int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, ++ unsigned int keylen) + { + return -ENOSYS; + } ++EXPORT_SYMBOL_GPL(shash_no_setkey); + + static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen) +diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c +index ae22f05d5936..e3af318af2db 100644 +--- a/crypto/tcrypt.c ++++ b/crypto/tcrypt.c +@@ -342,7 +342,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, + } + + sg_init_aead(sg, xbuf, +- *b_size + (enc ? authsize : 0)); ++ *b_size + (enc ? 0 : authsize)); + + sg_init_aead(sgout, xoutbuf, + *b_size + (enc ? authsize : 0)); +@@ -350,7 +350,9 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, + sg_set_buf(&sg[0], assoc, aad_size); + sg_set_buf(&sgout[0], assoc, aad_size); + +- aead_request_set_crypt(req, sg, sgout, *b_size, iv); ++ aead_request_set_crypt(req, sg, sgout, ++ *b_size + (enc ? 0 : authsize), ++ iv); + aead_request_set_ad(req, aad_size); + + if (secs) +diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c +index 3de3b6b8f0f1..f43a586236ea 100644 +--- a/drivers/acpi/acpi_processor.c ++++ b/drivers/acpi/acpi_processor.c +@@ -182,11 +182,6 @@ int __weak arch_register_cpu(int cpu) + + void __weak arch_unregister_cpu(int cpu) {} + +-int __weak acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) +-{ +- return -ENODEV; +-} +- + static int acpi_processor_hotadd_init(struct acpi_processor *pr) + { + unsigned long long sta; +diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c +index 56190d00fd87..0a3ca20f99af 100644 +--- a/drivers/acpi/bus.c ++++ b/drivers/acpi/bus.c +@@ -1197,7 +1197,6 @@ static int __init acpi_init(void) + acpi_wakeup_device_init(); + acpi_debugger_init(); + acpi_setup_sb_notify_handler(); +- acpi_set_processor_mapping(); + return 0; + } + +diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c +index 5c78ee1860b0..fd59ae871db3 100644 +--- a/drivers/acpi/processor_core.c ++++ b/drivers/acpi/processor_core.c +@@ -280,79 +280,6 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) + } + EXPORT_SYMBOL_GPL(acpi_get_cpuid); + +-#ifdef CONFIG_ACPI_HOTPLUG_CPU +-static bool __init +-map_processor(acpi_handle handle, phys_cpuid_t *phys_id, int *cpuid) +-{ +- int type, id; +- u32 acpi_id; +- acpi_status status; +- acpi_object_type acpi_type; +- unsigned long long tmp; +- union acpi_object object = { 0 }; +- struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; +- +- status = acpi_get_type(handle, &acpi_type); +- if (ACPI_FAILURE(status)) +- return false; +- +- switch (acpi_type) { +- case ACPI_TYPE_PROCESSOR: +- status = acpi_evaluate_object(handle, NULL, NULL, &buffer); +- if (ACPI_FAILURE(status)) +- return false; +- acpi_id = object.processor.proc_id; +- +- /* validate the acpi_id */ +- if(acpi_processor_validate_proc_id(acpi_id)) +- return false; +- break; +- case ACPI_TYPE_DEVICE: +- status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp); +- if (ACPI_FAILURE(status)) +- return false; +- acpi_id = tmp; +- break; +- default: +- return false; +- } +- +- type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0; +- +- *phys_id = __acpi_get_phys_id(handle, type, acpi_id, false); +- id = acpi_map_cpuid(*phys_id, acpi_id); +- +- if (id < 0) +- return false; +- *cpuid = id; +- return true; +-} +- +-static acpi_status __init +-set_processor_node_mapping(acpi_handle handle, u32 lvl, void *context, +- void **rv) +-{ +- phys_cpuid_t phys_id; +- int cpu_id; +- +- if (!map_processor(handle, &phys_id, &cpu_id)) +- return AE_ERROR; +- +- acpi_map_cpu2node(handle, cpu_id, phys_id); +- return AE_OK; +-} +- +-void __init acpi_set_processor_mapping(void) +-{ +- /* Set persistent cpu <-> node mapping for all processors. */ +- acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, +- ACPI_UINT32_MAX, set_processor_node_mapping, +- NULL, NULL, NULL); +-} +-#else +-void __init acpi_set_processor_mapping(void) {} +-#endif /* CONFIG_ACPI_HOTPLUG_CPU */ +- + #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC + static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base, + u64 *phys_addr, int *ioapic_id) +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index 693028659ccc..3ae950c82922 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -1059,6 +1059,10 @@ static int btusb_open(struct hci_dev *hdev) + } + + data->intf->needs_remote_wakeup = 1; ++ /* device specific wakeup source enabled and required for USB ++ * remote wakeup while host is suspended ++ */ ++ device_wakeup_enable(&data->udev->dev); + + if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags)) + goto done; +@@ -1122,6 +1126,7 @@ static int btusb_close(struct hci_dev *hdev) + goto failed; + + data->intf->needs_remote_wakeup = 0; ++ device_wakeup_disable(&data->udev->dev); + usb_autopm_put_interface(data->intf); + + failed: +diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c +index f0249899fc96..45d7ecc66b22 100644 +--- a/drivers/bus/arm-ccn.c ++++ b/drivers/bus/arm-ccn.c +@@ -1280,6 +1280,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) + + /* Perf driver registration */ + ccn->dt.pmu = (struct pmu) { ++ .module = THIS_MODULE, + .attr_groups = arm_ccn_pmu_attr_groups, + .task_ctx_nr = perf_invalid_context, + .event_init = arm_ccn_pmu_event_init, +diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c +index c0e8e1f196aa..2bfaf22e6ffc 100644 +--- a/drivers/clk/hisilicon/clk-hi6220.c ++++ b/drivers/clk/hisilicon/clk-hi6220.c +@@ -144,7 +144,7 @@ static struct hisi_gate_clock hi6220_separated_gate_clks_sys[] __initdata = { + { HI6220_BBPPLL_SEL, "bbppll_sel", "pll0_bbp_gate", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 9, 0, }, + { HI6220_MEDIA_PLL_SRC, "media_pll_src", "pll_media_gate", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 10, 0, }, + { HI6220_MMC2_SEL, "mmc2_sel", "mmc2_mux1", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 11, 0, }, +- { HI6220_CS_ATB_SYSPLL, "cs_atb_syspll", "syspll", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 12, 0, }, ++ { HI6220_CS_ATB_SYSPLL, "cs_atb_syspll", "syspll", CLK_SET_RATE_PARENT|CLK_IS_CRITICAL, 0x270, 12, 0, }, + }; + + static struct hisi_mux_clock hi6220_mux_clks_sys[] __initdata = { +diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c +index ce8ea10407e4..93a19667003d 100644 +--- a/drivers/clk/imx/clk-imx6q.c ++++ b/drivers/clk/imx/clk-imx6q.c +@@ -487,7 +487,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) + clk[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24); + clk[IMX6QDL_CLK_GPU3D_CORE] = imx_clk_gate2("gpu3d_core", "gpu3d_core_podf", base + 0x6c, 26); + clk[IMX6QDL_CLK_HDMI_IAHB] = imx_clk_gate2("hdmi_iahb", "ahb", base + 0x70, 0); +- clk[IMX6QDL_CLK_HDMI_ISFR] = imx_clk_gate2("hdmi_isfr", "video_27m", base + 0x70, 4); ++ clk[IMX6QDL_CLK_HDMI_ISFR] = imx_clk_gate2("hdmi_isfr", "mipi_core_cfg", base + 0x70, 4); + clk[IMX6QDL_CLK_I2C1] = imx_clk_gate2("i2c1", "ipg_per", base + 0x70, 6); + clk[IMX6QDL_CLK_I2C2] = imx_clk_gate2("i2c2", "ipg_per", base + 0x70, 8); + clk[IMX6QDL_CLK_I2C3] = imx_clk_gate2("i2c3", "ipg_per", base + 0x70, 10); +diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h +index 9f24fcfa304f..e425e50173c5 100644 +--- a/drivers/clk/mediatek/clk-mtk.h ++++ b/drivers/clk/mediatek/clk-mtk.h +@@ -185,6 +185,7 @@ struct mtk_pll_data { + uint32_t pcw_reg; + int pcw_shift; + const struct mtk_pll_div_table *div_table; ++ const char *parent_name; + }; + + void mtk_clk_register_plls(struct device_node *node, +diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c +index 0c2deac17ce9..1502384a3093 100644 +--- a/drivers/clk/mediatek/clk-pll.c ++++ b/drivers/clk/mediatek/clk-pll.c +@@ -302,7 +302,10 @@ static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data, + + init.name = data->name; + init.ops = &mtk_pll_ops; +- init.parent_names = &parent_name; ++ if (data->parent_name) ++ init.parent_names = &data->parent_name; ++ else ++ init.parent_names = &parent_name; + init.num_parents = 1; + + clk = clk_register(NULL, &pll->hw); +diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c +index 8e2db5ead8da..af520d81525f 100644 +--- a/drivers/clk/tegra/clk-tegra30.c ++++ b/drivers/clk/tegra/clk-tegra30.c +@@ -963,7 +963,7 @@ static void __init tegra30_super_clk_init(void) + * U71 divider of cclk_lp. + */ + clk = tegra_clk_register_divider("pll_p_out3_cclklp", "pll_p_out3", +- clk_base + SUPER_CCLKG_DIVIDER, 0, ++ clk_base + SUPER_CCLKLP_DIVIDER, 0, + TEGRA_DIVIDER_INT, 16, 8, 1, NULL); + clk_register_clkdev(clk, "pll_p_out3_cclklp", NULL); + +diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c +index 6b535262ac5d..3db94e81bc14 100644 +--- a/drivers/dma/dmaengine.c ++++ b/drivers/dma/dmaengine.c +@@ -1107,12 +1107,14 @@ static struct dmaengine_unmap_pool *__get_unmap_pool(int nr) + switch (order) { + case 0 ... 1: + return &unmap_pool[0]; ++#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) + case 2 ... 4: + return &unmap_pool[1]; + case 5 ... 7: + return &unmap_pool[2]; + case 8: + return &unmap_pool[3]; ++#endif + default: + BUG(); + return NULL; +diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c +index fbb75514dfb4..e0bd578a253a 100644 +--- a/drivers/dma/dmatest.c ++++ b/drivers/dma/dmatest.c +@@ -158,6 +158,12 @@ MODULE_PARM_DESC(run, "Run the test (default: false)"); + #define PATTERN_OVERWRITE 0x20 + #define PATTERN_COUNT_MASK 0x1f + ++/* poor man's completion - we want to use wait_event_freezable() on it */ ++struct dmatest_done { ++ bool done; ++ wait_queue_head_t *wait; ++}; ++ + struct dmatest_thread { + struct list_head node; + struct dmatest_info *info; +@@ -166,6 +172,8 @@ struct dmatest_thread { + u8 **srcs; + u8 **dsts; + enum dma_transaction_type type; ++ wait_queue_head_t done_wait; ++ struct dmatest_done test_done; + bool done; + }; + +@@ -326,18 +334,25 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start, + return error_count; + } + +-/* poor man's completion - we want to use wait_event_freezable() on it */ +-struct dmatest_done { +- bool done; +- wait_queue_head_t *wait; +-}; + + static void dmatest_callback(void *arg) + { + struct dmatest_done *done = arg; +- +- done->done = true; +- wake_up_all(done->wait); ++ struct dmatest_thread *thread = ++ container_of(arg, struct dmatest_thread, done_wait); ++ if (!thread->done) { ++ done->done = true; ++ wake_up_all(done->wait); ++ } else { ++ /* ++ * If thread->done, it means that this callback occurred ++ * after the parent thread has cleaned up. This can ++ * happen in the case that driver doesn't implement ++ * the terminate_all() functionality and a dma operation ++ * did not occur within the timeout period ++ */ ++ WARN(1, "dmatest: Kernel memory may be corrupted!!\n"); ++ } + } + + static unsigned int min_odd(unsigned int x, unsigned int y) +@@ -408,9 +423,8 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len) + */ + static int dmatest_func(void *data) + { +- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait); + struct dmatest_thread *thread = data; +- struct dmatest_done done = { .wait = &done_wait }; ++ struct dmatest_done *done = &thread->test_done; + struct dmatest_info *info; + struct dmatest_params *params; + struct dma_chan *chan; +@@ -637,9 +651,9 @@ static int dmatest_func(void *data) + continue; + } + +- done.done = false; ++ done->done = false; + tx->callback = dmatest_callback; +- tx->callback_param = &done; ++ tx->callback_param = done; + cookie = tx->tx_submit(tx); + + if (dma_submit_error(cookie)) { +@@ -652,21 +666,12 @@ static int dmatest_func(void *data) + } + dma_async_issue_pending(chan); + +- wait_event_freezable_timeout(done_wait, done.done, ++ wait_event_freezable_timeout(thread->done_wait, done->done, + msecs_to_jiffies(params->timeout)); + + status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); + +- if (!done.done) { +- /* +- * We're leaving the timed out dma operation with +- * dangling pointer to done_wait. To make this +- * correct, we'll need to allocate wait_done for +- * each test iteration and perform "who's gonna +- * free it this time?" dancing. For now, just +- * leave it dangling. +- */ +- WARN(1, "dmatest: Kernel stack may be corrupted!!\n"); ++ if (!done->done) { + dmaengine_unmap_put(um); + result("test timed out", total_tests, src_off, dst_off, + len, 0); +@@ -747,7 +752,7 @@ static int dmatest_func(void *data) + dmatest_KBs(runtime, total_len), ret); + + /* terminate all transfers on specified channels */ +- if (ret) ++ if (ret || failed_tests) + dmaengine_terminate_all(chan); + + thread->done = true; +@@ -807,6 +812,8 @@ static int dmatest_add_threads(struct dmatest_info *info, + thread->info = info; + thread->chan = dtc->chan; + thread->type = type; ++ thread->test_done.wait = &thread->done_wait; ++ init_waitqueue_head(&thread->done_wait); + smp_wmb(); + thread->task = kthread_create(dmatest_func, thread, "%s-%s%u", + dma_chan_name(chan), op, i); +diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c +index 88a00d06def6..43e88d85129e 100644 +--- a/drivers/dma/ti-dma-crossbar.c ++++ b/drivers/dma/ti-dma-crossbar.c +@@ -49,12 +49,12 @@ struct ti_am335x_xbar_data { + + struct ti_am335x_xbar_map { + u16 dma_line; +- u16 mux_val; ++ u8 mux_val; + }; + +-static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u16 val) ++static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val) + { +- writeb_relaxed(val & 0x1f, iomem + event); ++ writeb_relaxed(val, iomem + event); + } + + static void ti_am335x_xbar_free(struct device *dev, void *route_data) +@@ -105,7 +105,7 @@ static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec, + } + + map->dma_line = (u16)dma_spec->args[0]; +- map->mux_val = (u16)dma_spec->args[2]; ++ map->mux_val = (u8)dma_spec->args[2]; + + dma_spec->args[2] = 0; + dma_spec->args_count = 2; +diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c +index 2f48f848865f..2f47c5b5f4cb 100644 +--- a/drivers/firmware/efi/efi.c ++++ b/drivers/firmware/efi/efi.c +@@ -384,7 +384,6 @@ int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) + return 0; + } + } +- pr_err_once("requested map not found.\n"); + return -ENOENT; + } + +diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c +index 307ec1c11276..311c9d0e8cbb 100644 +--- a/drivers/firmware/efi/esrt.c ++++ b/drivers/firmware/efi/esrt.c +@@ -251,7 +251,7 @@ void __init efi_esrt_init(void) + + rc = efi_mem_desc_lookup(efi.esrt, &md); + if (rc < 0) { +- pr_err("ESRT header is not in the memory map.\n"); ++ pr_warn("ESRT header is not in the memory map.\n"); + return; + } + +diff --git a/drivers/gpu/drm/amd/acp/Makefile b/drivers/gpu/drm/amd/acp/Makefile +index 8363cb57915b..8a08e81ee90d 100644 +--- a/drivers/gpu/drm/amd/acp/Makefile ++++ b/drivers/gpu/drm/amd/acp/Makefile +@@ -3,6 +3,4 @@ + # of AMDSOC/AMDGPU drm driver. + # It provides the HW control for ACP related functionalities. + +-subdir-ccflags-y += -I$(AMDACPPATH)/ -I$(AMDACPPATH)/include +- + AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +index bfb4b91869e7..f26d1fd53bef 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +@@ -240,6 +240,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) + for (; i >= 0; i--) + drm_free_large(p->chunks[i].kdata); + kfree(p->chunks); ++ p->chunks = NULL; ++ p->nchunks = 0; + put_ctx: + amdgpu_ctx_put(p->ctx); + free_chunk: +diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +index af267c35d813..ee5883f59be5 100644 +--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c ++++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +@@ -147,9 +147,6 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer, + struct drm_gem_object *obj = buffer->priv; + int ret = 0; + +- if (WARN_ON(!obj->filp)) +- return -EINVAL; +- + ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); + if (ret < 0) + return ret; +diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c +index 13ba73fd9b68..8bd9e6c371d1 100644 +--- a/drivers/gpu/drm/radeon/si_dpm.c ++++ b/drivers/gpu/drm/radeon/si_dpm.c +@@ -3029,6 +3029,16 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, + max_sclk = 75000; + max_mclk = 80000; + } ++ } else if (rdev->family == CHIP_OLAND) { ++ if ((rdev->pdev->revision == 0xC7) || ++ (rdev->pdev->revision == 0x80) || ++ (rdev->pdev->revision == 0x81) || ++ (rdev->pdev->revision == 0x83) || ++ (rdev->pdev->revision == 0x87) || ++ (rdev->pdev->device == 0x6604) || ++ (rdev->pdev->device == 0x6605)) { ++ max_sclk = 75000; ++ } + } + /* Apply dpm quirks */ + while (p && p->chip_device != 0) { +diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c +index e06c1344c913..7af77818efc3 100644 +--- a/drivers/hid/hid-cp2112.c ++++ b/drivers/hid/hid-cp2112.c +@@ -188,6 +188,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset) + HID_REQ_GET_REPORT); + if (ret != CP2112_GPIO_CONFIG_LENGTH) { + hid_err(hdev, "error requesting GPIO config: %d\n", ret); ++ if (ret >= 0) ++ ret = -EIO; + goto exit; + } + +@@ -197,8 +199,10 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset) + ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, + CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, + HID_REQ_SET_REPORT); +- if (ret < 0) { ++ if (ret != CP2112_GPIO_CONFIG_LENGTH) { + hid_err(hdev, "error setting GPIO config: %d\n", ret); ++ if (ret >= 0) ++ ret = -EIO; + goto exit; + } + +@@ -206,7 +210,7 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset) + + exit: + mutex_unlock(&dev->lock); +- return ret < 0 ? ret : -EIO; ++ return ret; + } + + static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value) +diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c +index 75126e4e3f05..44420073edda 100644 +--- a/drivers/hv/hv_fcopy.c ++++ b/drivers/hv/hv_fcopy.c +@@ -61,7 +61,6 @@ static DECLARE_WORK(fcopy_send_work, fcopy_send_data); + static const char fcopy_devname[] = "vmbus/hv_fcopy"; + static u8 *recv_buffer; + static struct hvutil_transport *hvt; +-static struct completion release_event; + /* + * This state maintains the version number registered by the daemon. + */ +@@ -322,7 +321,6 @@ static void fcopy_on_reset(void) + + if (cancel_delayed_work_sync(&fcopy_timeout_work)) + fcopy_respond_to_host(HV_E_FAIL); +- complete(&release_event); + } + + int hv_fcopy_init(struct hv_util_service *srv) +@@ -330,7 +328,6 @@ int hv_fcopy_init(struct hv_util_service *srv) + recv_buffer = srv->recv_buffer; + fcopy_transaction.recv_channel = srv->channel; + +- init_completion(&release_event); + /* + * When this driver loads, the user level daemon that + * processes the host requests may not yet be running. +@@ -352,5 +349,4 @@ void hv_fcopy_deinit(void) + fcopy_transaction.state = HVUTIL_DEVICE_DYING; + cancel_delayed_work_sync(&fcopy_timeout_work); + hvutil_transport_destroy(hvt); +- wait_for_completion(&release_event); + } +diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c +index 3abfc5983c97..5e1fdc8d32ab 100644 +--- a/drivers/hv/hv_kvp.c ++++ b/drivers/hv/hv_kvp.c +@@ -88,7 +88,6 @@ static DECLARE_WORK(kvp_sendkey_work, kvp_send_key); + static const char kvp_devname[] = "vmbus/hv_kvp"; + static u8 *recv_buffer; + static struct hvutil_transport *hvt; +-static struct completion release_event; + /* + * Register the kernel component with the user-level daemon. + * As part of this registration, pass the LIC version number. +@@ -717,7 +716,6 @@ static void kvp_on_reset(void) + if (cancel_delayed_work_sync(&kvp_timeout_work)) + kvp_respond_to_host(NULL, HV_E_FAIL); + kvp_transaction.state = HVUTIL_DEVICE_INIT; +- complete(&release_event); + } + + int +@@ -726,7 +724,6 @@ hv_kvp_init(struct hv_util_service *srv) + recv_buffer = srv->recv_buffer; + kvp_transaction.recv_channel = srv->channel; + +- init_completion(&release_event); + /* + * When this driver loads, the user level daemon that + * processes the host requests may not yet be running. +@@ -750,5 +747,4 @@ void hv_kvp_deinit(void) + cancel_delayed_work_sync(&kvp_timeout_work); + cancel_work_sync(&kvp_sendkey_work); + hvutil_transport_destroy(hvt); +- wait_for_completion(&release_event); + } +diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c +index a76e3db0d01f..a6707133c297 100644 +--- a/drivers/hv/hv_snapshot.c ++++ b/drivers/hv/hv_snapshot.c +@@ -66,7 +66,6 @@ static int dm_reg_value; + static const char vss_devname[] = "vmbus/hv_vss"; + static __u8 *recv_buffer; + static struct hvutil_transport *hvt; +-static struct completion release_event; + + static void vss_timeout_func(struct work_struct *dummy); + static void vss_handle_request(struct work_struct *dummy); +@@ -331,13 +330,11 @@ static void vss_on_reset(void) + if (cancel_delayed_work_sync(&vss_timeout_work)) + vss_respond_to_host(HV_E_FAIL); + vss_transaction.state = HVUTIL_DEVICE_INIT; +- complete(&release_event); + } + + int + hv_vss_init(struct hv_util_service *srv) + { +- init_completion(&release_event); + if (vmbus_proto_version < VERSION_WIN8_1) { + pr_warn("Integration service 'Backup (volume snapshot)'" + " not supported on this host version.\n"); +@@ -368,5 +365,4 @@ void hv_vss_deinit(void) + cancel_delayed_work_sync(&vss_timeout_work); + cancel_work_sync(&vss_handle_request_work); + hvutil_transport_destroy(hvt); +- wait_for_completion(&release_event); + } +diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c +index c235a9515267..4402a71e23f7 100644 +--- a/drivers/hv/hv_utils_transport.c ++++ b/drivers/hv/hv_utils_transport.c +@@ -182,10 +182,11 @@ static int hvt_op_release(struct inode *inode, struct file *file) + * connects back. + */ + hvt_reset(hvt); +- mutex_unlock(&hvt->lock); + + if (mode_old == HVUTIL_TRANSPORT_DESTROY) +- hvt_transport_free(hvt); ++ complete(&hvt->release); ++ ++ mutex_unlock(&hvt->lock); + + return 0; + } +@@ -304,6 +305,7 @@ struct hvutil_transport *hvutil_transport_init(const char *name, + + init_waitqueue_head(&hvt->outmsg_q); + mutex_init(&hvt->lock); ++ init_completion(&hvt->release); + + spin_lock(&hvt_list_lock); + list_add(&hvt->list, &hvt_list); +@@ -351,6 +353,8 @@ void hvutil_transport_destroy(struct hvutil_transport *hvt) + if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0) + cn_del_callback(&hvt->cn_id); + +- if (mode_old != HVUTIL_TRANSPORT_CHARDEV) +- hvt_transport_free(hvt); ++ if (mode_old == HVUTIL_TRANSPORT_CHARDEV) ++ wait_for_completion(&hvt->release); ++ ++ hvt_transport_free(hvt); + } +diff --git a/drivers/hv/hv_utils_transport.h b/drivers/hv/hv_utils_transport.h +index d98f5225c3e6..79afb626e166 100644 +--- a/drivers/hv/hv_utils_transport.h ++++ b/drivers/hv/hv_utils_transport.h +@@ -41,6 +41,7 @@ struct hvutil_transport { + int outmsg_len; /* its length */ + wait_queue_head_t outmsg_q; /* poll/read wait queue */ + struct mutex lock; /* protects struct members */ ++ struct completion release; /* synchronize with fd release */ + }; + + struct hvutil_transport *hvutil_transport_init(const char *name, +diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c +index 63b5db4e4070..e0f3244505d3 100644 +--- a/drivers/hwtracing/intel_th/pci.c ++++ b/drivers/hwtracing/intel_th/pci.c +@@ -95,6 +95,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6), + .driver_data = (kernel_ulong_t)0, + }, ++ { ++ /* Gemini Lake */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e), ++ .driver_data = (kernel_ulong_t)0, ++ }, + { 0 }, + }; + +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c +index 809a02800102..a09d6eed3b88 100644 +--- a/drivers/infiniband/core/cma.c ++++ b/drivers/infiniband/core/cma.c +@@ -1482,7 +1482,7 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, + return id_priv; + } + +-static inline int cma_user_data_offset(struct rdma_id_private *id_priv) ++static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv) + { + return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr); + } +@@ -1877,7 +1877,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) + struct rdma_id_private *listen_id, *conn_id = NULL; + struct rdma_cm_event event; + struct net_device *net_dev; +- int offset, ret; ++ u8 offset; ++ int ret; + + listen_id = cma_id_from_event(cm_id, ib_event, &net_dev); + if (IS_ERR(listen_id)) +@@ -3309,7 +3310,8 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, + struct ib_cm_sidr_req_param req; + struct ib_cm_id *id; + void *private_data; +- int offset, ret; ++ u8 offset; ++ int ret; + + memset(&req, 0, sizeof req); + offset = cma_user_data_offset(id_priv); +@@ -3366,7 +3368,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv, + struct rdma_route *route; + void *private_data; + struct ib_cm_id *id; +- int offset, ret; ++ u8 offset; ++ int ret; + + memset(&req, 0, sizeof req); + offset = cma_user_data_offset(id_priv); +diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h +index 862381aa83c8..b55adf53c758 100644 +--- a/drivers/infiniband/hw/cxgb4/t4.h ++++ b/drivers/infiniband/hw/cxgb4/t4.h +@@ -171,7 +171,7 @@ struct t4_cqe { + __be32 msn; + } rcqe; + struct { +- u32 stag; ++ __be32 stag; + u16 nada2; + u16 cidx; + } scqe; +diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c +index 24d0820873cf..4682909b021b 100644 +--- a/drivers/infiniband/hw/hfi1/chip.c ++++ b/drivers/infiniband/hw/hfi1/chip.c +@@ -9769,7 +9769,7 @@ int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which) + goto unimplemented; + + case HFI1_IB_CFG_OP_VLS: +- val = ppd->vls_operational; ++ val = ppd->actual_vls_operational; + break; + case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */ + val = VL_ARB_HIGH_PRIO_TABLE_SIZE; +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c +index 830fecb6934c..335bd2c9e16e 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c +@@ -1177,10 +1177,15 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, + ipoib_ib_dev_down(dev); + + if (level == IPOIB_FLUSH_HEAVY) { ++ rtnl_lock(); + if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) + ipoib_ib_dev_stop(dev); +- if (ipoib_ib_dev_open(dev) != 0) ++ ++ result = ipoib_ib_dev_open(dev); ++ rtnl_unlock(); ++ if (result) + return; ++ + if (netif_queue_stopped(dev)) + netif_start_queue(dev); + } +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index dbf09836ff30..d1051e3ce819 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -520,6 +520,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"), + }, + }, ++ { ++ /* TUXEDO BU1406 */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"), ++ }, ++ }, + { } + }; + +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c +index 1a0b110f12c0..0c910a863581 100644 +--- a/drivers/iommu/amd_iommu.c ++++ b/drivers/iommu/amd_iommu.c +@@ -3211,7 +3211,7 @@ static void amd_iommu_apply_dm_region(struct device *dev, + unsigned long start, end; + + start = IOVA_PFN(region->start); +- end = IOVA_PFN(region->start + region->length); ++ end = IOVA_PFN(region->start + region->length - 1); + + WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL); + } +diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c +index f50e51c1a9c8..d68a552cfe8d 100644 +--- a/drivers/iommu/io-pgtable-arm-v7s.c ++++ b/drivers/iommu/io-pgtable-arm-v7s.c +@@ -418,8 +418,12 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, + pte |= ARM_V7S_ATTR_NS_TABLE; + + __arm_v7s_set_pte(ptep, pte, 1, cfg); +- } else { ++ } else if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) { + cptep = iopte_deref(pte, lvl); ++ } else { ++ /* We require an unmap first */ ++ WARN_ON(!selftest_running); ++ return -EEXIST; + } + + /* Rinse, repeat */ +diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c +index b8aeb0768483..68c6050d1efb 100644 +--- a/drivers/iommu/mtk_iommu_v1.c ++++ b/drivers/iommu/mtk_iommu_v1.c +@@ -703,7 +703,7 @@ static struct platform_driver mtk_iommu_driver = { + .probe = mtk_iommu_probe, + .remove = mtk_iommu_remove, + .driver = { +- .name = "mtk-iommu", ++ .name = "mtk-iommu-v1", + .of_match_table = mtk_iommu_of_ids, + .pm = &mtk_iommu_pm_ops, + } +diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig +index bc0af3307bbf..910cb5e23371 100644 +--- a/drivers/irqchip/Kconfig ++++ b/drivers/irqchip/Kconfig +@@ -258,6 +258,7 @@ config IRQ_MXS + + config MVEBU_ODMI + bool ++ select GENERIC_MSI_IRQ_DOMAIN + + config MVEBU_PIC + bool +diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c +index ab8a1b36af21..edb8d1a1a69f 100644 +--- a/drivers/md/bcache/request.c ++++ b/drivers/md/bcache/request.c +@@ -468,6 +468,7 @@ struct search { + unsigned recoverable:1; + unsigned write:1; + unsigned read_dirty_data:1; ++ unsigned cache_missed:1; + + unsigned long start_time; + +@@ -653,6 +654,7 @@ static inline struct search *search_alloc(struct bio *bio, + + s->orig_bio = bio; + s->cache_miss = NULL; ++ s->cache_missed = 0; + s->d = d; + s->recoverable = 1; + s->write = op_is_write(bio_op(bio)); +@@ -771,7 +773,7 @@ static void cached_dev_read_done_bh(struct closure *cl) + struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); + + bch_mark_cache_accounting(s->iop.c, s->d, +- !s->cache_miss, s->iop.bypass); ++ !s->cache_missed, s->iop.bypass); + trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); + + if (s->iop.error) +@@ -790,6 +792,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, + struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); + struct bio *miss, *cache_bio; + ++ s->cache_missed = 1; ++ + if (s->cache_miss || s->iop.bypass) { + miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split); + ret = miss == bio ? MAP_DONE : MAP_CONTINUE; +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c +index f4557f558b24..28ce342348a9 100644 +--- a/drivers/md/bcache/super.c ++++ b/drivers/md/bcache/super.c +@@ -2091,6 +2091,7 @@ static void bcache_exit(void) + if (bcache_major) + unregister_blkdev(bcache_major, "bcache"); + unregister_reboot_notifier(&reboot); ++ mutex_destroy(&bch_register_lock); + } + + static int __init bcache_init(void) +@@ -2109,14 +2110,15 @@ static int __init bcache_init(void) + bcache_major = register_blkdev(0, "bcache"); + if (bcache_major < 0) { + unregister_reboot_notifier(&reboot); ++ mutex_destroy(&bch_register_lock); + return bcache_major; + } + + if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) || + !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || +- sysfs_create_files(bcache_kobj, files) || + bch_request_init() || +- bch_debug_init(bcache_kobj)) ++ bch_debug_init(bcache_kobj) || ++ sysfs_create_files(bcache_kobj, files)) + goto err; + + return 0; +diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c +index 2b13117fb918..ba7edcdd09ce 100644 +--- a/drivers/md/md-cluster.c ++++ b/drivers/md/md-cluster.c +@@ -974,6 +974,7 @@ static int leave(struct mddev *mddev) + lockres_free(cinfo->bitmap_lockres); + unlock_all_bitmaps(mddev); + dlm_release_lockspace(cinfo->lockspace, 2); ++ kfree(cinfo); + return 0; + } + +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index 7aea0221530c..475a7a1bcfe0 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -1689,8 +1689,11 @@ static void ops_complete_reconstruct(void *stripe_head_ref) + struct r5dev *dev = &sh->dev[i]; + + if (dev->written || i == pd_idx || i == qd_idx) { +- if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) ++ if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) { + set_bit(R5_UPTODATE, &dev->flags); ++ if (test_bit(STRIPE_EXPAND_READY, &sh->state)) ++ set_bit(R5_Expanded, &dev->flags); ++ } + if (fua) + set_bit(R5_WantFUA, &dev->flags); + if (sync) +diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c +index 77b2675cf8f5..92e176009ffe 100644 +--- a/drivers/mfd/fsl-imx25-tsadc.c ++++ b/drivers/mfd/fsl-imx25-tsadc.c +@@ -183,6 +183,19 @@ static int mx25_tsadc_probe(struct platform_device *pdev) + return 0; + } + ++static int mx25_tsadc_remove(struct platform_device *pdev) ++{ ++ struct mx25_tsadc *tsadc = platform_get_drvdata(pdev); ++ int irq = platform_get_irq(pdev, 0); ++ ++ if (irq) { ++ irq_set_chained_handler_and_data(irq, NULL, NULL); ++ irq_domain_remove(tsadc->domain); ++ } ++ ++ return 0; ++} ++ + static const struct of_device_id mx25_tsadc_ids[] = { + { .compatible = "fsl,imx25-tsadc" }, + { /* Sentinel */ } +@@ -194,6 +207,7 @@ static struct platform_driver mx25_tsadc_driver = { + .of_match_table = of_match_ptr(mx25_tsadc_ids), + }, + .probe = mx25_tsadc_probe, ++ .remove = mx25_tsadc_remove, + }; + module_platform_driver(mx25_tsadc_driver); + +diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c +index 19c10dc56513..d8a485f1798b 100644 +--- a/drivers/misc/eeprom/at24.c ++++ b/drivers/misc/eeprom/at24.c +@@ -783,7 +783,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) + at24->nvmem_config.reg_read = at24_read; + at24->nvmem_config.reg_write = at24_write; + at24->nvmem_config.priv = at24; +- at24->nvmem_config.stride = 4; ++ at24->nvmem_config.stride = 1; + at24->nvmem_config.word_size = 1; + at24->nvmem_config.size = chip.byte_len; + +diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c +index 84e9afcb5c09..6f9535e5e584 100644 +--- a/drivers/mmc/host/mtk-sd.c ++++ b/drivers/mmc/host/mtk-sd.c +@@ -579,7 +579,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) + } + } + sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV, +- (mode << 8) | (div % 0xff)); ++ (mode << 8) | div); + sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); + while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB)) + cpu_relax(); +@@ -1562,7 +1562,7 @@ static int msdc_drv_probe(struct platform_device *pdev) + host->src_clk_freq = clk_get_rate(host->src_clk); + /* Set host parameters to mmc */ + mmc->ops = &mt_msdc_ops; +- mmc->f_min = host->src_clk_freq / (4 * 255); ++ mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255); + + mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23; + /* MMC core transfer sizes tunable parameters */ +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +index 5d2cf56aed0e..0b894d76aa41 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +@@ -5132,8 +5132,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) + bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & + PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; + } +- link_info->support_auto_speeds = +- le16_to_cpu(resp->supported_speeds_auto_mode); ++ if (resp->supported_speeds_auto_mode) ++ link_info->support_auto_speeds = ++ le16_to_cpu(resp->supported_speeds_auto_mode); + + hwrm_phy_qcaps_exit: + mutex_unlock(&bp->hwrm_cmd_lock); +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c +index 0975af2903ef..3480b3078775 100644 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c +@@ -1,7 +1,7 @@ + /* + * Broadcom GENET (Gigabit Ethernet) controller driver + * +- * Copyright (c) 2014 Broadcom Corporation ++ * Copyright (c) 2014-2017 Broadcom + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as +@@ -778,8 +778,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { + STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes), + /* Misc UniMAC counters */ + STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, +- UMAC_RBUF_OVFL_CNT), +- STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), ++ UMAC_RBUF_OVFL_CNT_V1), ++ STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, ++ UMAC_RBUF_ERR_CNT_V1), + STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), + STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), + STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed), +@@ -821,6 +822,45 @@ static void bcmgenet_get_strings(struct net_device *dev, u32 stringset, + } + } + ++static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset) ++{ ++ u16 new_offset; ++ u32 val; ++ ++ switch (offset) { ++ case UMAC_RBUF_OVFL_CNT_V1: ++ if (GENET_IS_V2(priv)) ++ new_offset = RBUF_OVFL_CNT_V2; ++ else ++ new_offset = RBUF_OVFL_CNT_V3PLUS; ++ ++ val = bcmgenet_rbuf_readl(priv, new_offset); ++ /* clear if overflowed */ ++ if (val == ~0) ++ bcmgenet_rbuf_writel(priv, 0, new_offset); ++ break; ++ case UMAC_RBUF_ERR_CNT_V1: ++ if (GENET_IS_V2(priv)) ++ new_offset = RBUF_ERR_CNT_V2; ++ else ++ new_offset = RBUF_ERR_CNT_V3PLUS; ++ ++ val = bcmgenet_rbuf_readl(priv, new_offset); ++ /* clear if overflowed */ ++ if (val == ~0) ++ bcmgenet_rbuf_writel(priv, 0, new_offset); ++ break; ++ default: ++ val = bcmgenet_umac_readl(priv, offset); ++ /* clear if overflowed */ ++ if (val == ~0) ++ bcmgenet_umac_writel(priv, 0, offset); ++ break; ++ } ++ ++ return val; ++} ++ + static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) + { + int i, j = 0; +@@ -836,19 +876,28 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) + case BCMGENET_STAT_NETDEV: + case BCMGENET_STAT_SOFT: + continue; +- case BCMGENET_STAT_MIB_RX: +- case BCMGENET_STAT_MIB_TX: + case BCMGENET_STAT_RUNT: +- if (s->type != BCMGENET_STAT_MIB_RX) +- offset = BCMGENET_STAT_OFFSET; ++ offset += BCMGENET_STAT_OFFSET; ++ /* fall through */ ++ case BCMGENET_STAT_MIB_TX: ++ offset += BCMGENET_STAT_OFFSET; ++ /* fall through */ ++ case BCMGENET_STAT_MIB_RX: + val = bcmgenet_umac_readl(priv, + UMAC_MIB_START + j + offset); ++ offset = 0; /* Reset Offset */ + break; + case BCMGENET_STAT_MISC: +- val = bcmgenet_umac_readl(priv, s->reg_offset); +- /* clear if overflowed */ +- if (val == ~0) +- bcmgenet_umac_writel(priv, 0, s->reg_offset); ++ if (GENET_IS_V1(priv)) { ++ val = bcmgenet_umac_readl(priv, s->reg_offset); ++ /* clear if overflowed */ ++ if (val == ~0) ++ bcmgenet_umac_writel(priv, 0, ++ s->reg_offset); ++ } else { ++ val = bcmgenet_update_stat_misc(priv, ++ s->reg_offset); ++ } + break; + } + +@@ -2464,24 +2513,28 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv) + /* Interrupt bottom half */ + static void bcmgenet_irq_task(struct work_struct *work) + { ++ unsigned long flags; ++ unsigned int status; + struct bcmgenet_priv *priv = container_of( + work, struct bcmgenet_priv, bcmgenet_irq_work); + + netif_dbg(priv, intr, priv->dev, "%s\n", __func__); + +- if (priv->irq0_stat & UMAC_IRQ_MPD_R) { +- priv->irq0_stat &= ~UMAC_IRQ_MPD_R; ++ spin_lock_irqsave(&priv->lock, flags); ++ status = priv->irq0_stat; ++ priv->irq0_stat = 0; ++ spin_unlock_irqrestore(&priv->lock, flags); ++ ++ if (status & UMAC_IRQ_MPD_R) { + netif_dbg(priv, wol, priv->dev, + "magic packet detected, waking up\n"); + bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); + } + + /* Link UP/DOWN event */ +- if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) { ++ if (status & UMAC_IRQ_LINK_EVENT) + phy_mac_interrupt(priv->phydev, +- !!(priv->irq0_stat & UMAC_IRQ_LINK_UP)); +- priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT; +- } ++ !!(status & UMAC_IRQ_LINK_UP)); + } + + /* bcmgenet_isr1: handle Rx and Tx priority queues */ +@@ -2490,22 +2543,21 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) + struct bcmgenet_priv *priv = dev_id; + struct bcmgenet_rx_ring *rx_ring; + struct bcmgenet_tx_ring *tx_ring; +- unsigned int index; ++ unsigned int index, status; + +- /* Save irq status for bottom-half processing. */ +- priv->irq1_stat = +- bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & ++ /* Read irq status */ ++ status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & + ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); + + /* clear interrupts */ +- bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); ++ bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR); + + netif_dbg(priv, intr, priv->dev, +- "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); ++ "%s: IRQ=0x%x\n", __func__, status); + + /* Check Rx priority queue interrupts */ + for (index = 0; index < priv->hw_params->rx_queues; index++) { +- if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index))) ++ if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index))) + continue; + + rx_ring = &priv->rx_rings[index]; +@@ -2518,7 +2570,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) + + /* Check Tx priority queue interrupts */ + for (index = 0; index < priv->hw_params->tx_queues; index++) { +- if (!(priv->irq1_stat & BIT(index))) ++ if (!(status & BIT(index))) + continue; + + tx_ring = &priv->tx_rings[index]; +@@ -2538,19 +2590,20 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) + struct bcmgenet_priv *priv = dev_id; + struct bcmgenet_rx_ring *rx_ring; + struct bcmgenet_tx_ring *tx_ring; ++ unsigned int status; ++ unsigned long flags; + +- /* Save irq status for bottom-half processing. */ +- priv->irq0_stat = +- bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & ++ /* Read irq status */ ++ status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & + ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); + + /* clear interrupts */ +- bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); ++ bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR); + + netif_dbg(priv, intr, priv->dev, +- "IRQ=0x%x\n", priv->irq0_stat); ++ "IRQ=0x%x\n", status); + +- if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) { ++ if (status & UMAC_IRQ_RXDMA_DONE) { + rx_ring = &priv->rx_rings[DESC_INDEX]; + + if (likely(napi_schedule_prep(&rx_ring->napi))) { +@@ -2559,7 +2612,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) + } + } + +- if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) { ++ if (status & UMAC_IRQ_TXDMA_DONE) { + tx_ring = &priv->tx_rings[DESC_INDEX]; + + if (likely(napi_schedule_prep(&tx_ring->napi))) { +@@ -2568,22 +2621,23 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) + } + } + +- if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | +- UMAC_IRQ_PHY_DET_F | +- UMAC_IRQ_LINK_EVENT | +- UMAC_IRQ_HFB_SM | +- UMAC_IRQ_HFB_MM | +- UMAC_IRQ_MPD_R)) { +- /* all other interested interrupts handled in bottom half */ +- schedule_work(&priv->bcmgenet_irq_work); +- } +- + if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && +- priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { +- priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); ++ status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { + wake_up(&priv->wq); + } + ++ /* all other interested interrupts handled in bottom half */ ++ status &= (UMAC_IRQ_LINK_EVENT | ++ UMAC_IRQ_MPD_R); ++ if (status) { ++ /* Save irq status for bottom-half processing. */ ++ spin_lock_irqsave(&priv->lock, flags); ++ priv->irq0_stat |= status; ++ spin_unlock_irqrestore(&priv->lock, flags); ++ ++ schedule_work(&priv->bcmgenet_irq_work); ++ } ++ + return IRQ_HANDLED; + } + +@@ -2808,6 +2862,8 @@ static int bcmgenet_open(struct net_device *dev) + err_fini_dma: + bcmgenet_fini_dma(priv); + err_clk_disable: ++ if (priv->internal_phy) ++ bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + clk_disable_unprepare(priv->clk); + return ret; + } +@@ -3184,6 +3240,12 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) + */ + gphy_rev = reg & 0xffff; + ++ /* This is reserved so should require special treatment */ ++ if (gphy_rev == 0 || gphy_rev == 0x01ff) { ++ pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev); ++ return; ++ } ++ + /* This is the good old scheme, just GPHY major, no minor nor patch */ + if ((gphy_rev & 0xf0) != 0) + priv->gphy_rev = gphy_rev << 8; +@@ -3192,12 +3254,6 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) + else if ((gphy_rev & 0xff00) != 0) + priv->gphy_rev = gphy_rev; + +- /* This is reserved so should require special treatment */ +- else if (gphy_rev == 0 || gphy_rev == 0x01ff) { +- pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev); +- return; +- } +- + #ifdef CONFIG_PHYS_ADDR_T_64BIT + if (!(params->flags & GENET_HAS_40BITS)) + pr_warn("GENET does not support 40-bits PA\n"); +@@ -3240,6 +3296,7 @@ static int bcmgenet_probe(struct platform_device *pdev) + const void *macaddr; + struct resource *r; + int err = -EIO; ++ const char *phy_mode_str; + + /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */ + dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, +@@ -3283,6 +3340,8 @@ static int bcmgenet_probe(struct platform_device *pdev) + goto err; + } + ++ spin_lock_init(&priv->lock); ++ + SET_NETDEV_DEV(dev, &pdev->dev); + dev_set_drvdata(&pdev->dev, dev); + ether_addr_copy(dev->dev_addr, macaddr); +@@ -3345,6 +3404,13 @@ static int bcmgenet_probe(struct platform_device *pdev) + priv->clk_eee = NULL; + } + ++ /* If this is an internal GPHY, power it on now, before UniMAC is ++ * brought out of reset as absolutely no UniMAC activity is allowed ++ */ ++ if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) && ++ !strcasecmp(phy_mode_str, "internal")) ++ bcmgenet_power_up(priv, GENET_POWER_PASSIVE); ++ + err = reset_umac(priv); + if (err) + goto err_clk_disable; +@@ -3511,6 +3577,8 @@ static int bcmgenet_resume(struct device *d) + return 0; + + out_clk_disable: ++ if (priv->internal_phy) ++ bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + clk_disable_unprepare(priv->clk); + return ret; + } +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h +index 1e2dc34d331a..db7f289d65ae 100644 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2014 Broadcom Corporation ++ * Copyright (c) 2014-2017 Broadcom + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as +@@ -214,7 +214,9 @@ struct bcmgenet_mib_counters { + #define MDIO_REG_SHIFT 16 + #define MDIO_REG_MASK 0x1F + +-#define UMAC_RBUF_OVFL_CNT 0x61C ++#define UMAC_RBUF_OVFL_CNT_V1 0x61C ++#define RBUF_OVFL_CNT_V2 0x80 ++#define RBUF_OVFL_CNT_V3PLUS 0x94 + + #define UMAC_MPD_CTRL 0x620 + #define MPD_EN (1 << 0) +@@ -224,7 +226,9 @@ struct bcmgenet_mib_counters { + + #define UMAC_MPD_PW_MS 0x624 + #define UMAC_MPD_PW_LS 0x628 +-#define UMAC_RBUF_ERR_CNT 0x634 ++#define UMAC_RBUF_ERR_CNT_V1 0x634 ++#define RBUF_ERR_CNT_V2 0x84 ++#define RBUF_ERR_CNT_V3PLUS 0x98 + #define UMAC_MDF_ERR_CNT 0x638 + #define UMAC_MDF_CTRL 0x650 + #define UMAC_MDF_ADDR 0x654 +@@ -619,11 +623,13 @@ struct bcmgenet_priv { + struct work_struct bcmgenet_irq_work; + int irq0; + int irq1; +- unsigned int irq0_stat; +- unsigned int irq1_stat; + int wol_irq; + bool wol_irq_disabled; + ++ /* shared status */ ++ spinlock_t lock; ++ unsigned int irq0_stat; ++ + /* HW descriptors/checksum variables */ + bool desc_64b_en; + bool desc_rxchk_en; +diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c +index e36bebcab3f2..dae9dcfa8f36 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c ++++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c +@@ -2304,6 +2304,17 @@ static int sync_toggles(struct mlx4_dev *dev) + rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)); + if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) { + /* PCI might be offline */ ++ ++ /* If device removal has been requested, ++ * do not continue retrying. ++ */ ++ if (dev->persist->interface_state & ++ MLX4_INTERFACE_STATE_NOWAIT) { ++ mlx4_warn(dev, ++ "communication channel is offline\n"); ++ return -EIO; ++ } ++ + msleep(100); + wr_toggle = swab32(readl(&priv->mfunc.comm-> + slave_write)); +diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c +index 727122de7df0..5411ca48978a 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/main.c ++++ b/drivers/net/ethernet/mellanox/mlx4/main.c +@@ -1940,6 +1940,14 @@ static int mlx4_comm_check_offline(struct mlx4_dev *dev) + (u32)(1 << COMM_CHAN_OFFLINE_OFFSET)); + if (!offline_bit) + return 0; ++ ++ /* If device removal has been requested, ++ * do not continue retrying. ++ */ ++ if (dev->persist->interface_state & ++ MLX4_INTERFACE_STATE_NOWAIT) ++ break; ++ + /* There are cases as part of AER/Reset flow that PF needs + * around 100 msec to load. We therefore sleep for 100 msec + * to allow other tasks to make use of that CPU during this +@@ -3954,6 +3962,9 @@ static void mlx4_remove_one(struct pci_dev *pdev) + struct devlink *devlink = priv_to_devlink(priv); + int active_vfs = 0; + ++ if (mlx4_is_slave(dev)) ++ persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT; ++ + mutex_lock(&persist->interface_state_mutex); + persist->interface_state |= MLX4_INTERFACE_STATE_DELETION; + mutex_unlock(&persist->interface_state_mutex); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 4de3c28b0547..331a6ca4856d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -1015,7 +1015,7 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft, + u32 *match_criteria) + { + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); +- struct list_head *prev = ft->node.children.prev; ++ struct list_head *prev = &ft->node.children; + unsigned int candidate_index = 0; + struct mlx5_flow_group *fg; + void *match_criteria_addr; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c +index b3309f2ed7dc..981cd1d84a5b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c +@@ -1283,6 +1283,7 @@ static int init_one(struct pci_dev *pdev, + if (err) + goto clean_load; + ++ pci_save_state(pdev); + return 0; + + clean_load: +@@ -1331,9 +1332,8 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, + + mlx5_enter_error_state(dev); + mlx5_unload_one(dev, priv, false); +- /* In case of kernel call save the pci state and drain the health wq */ ++ /* In case of kernel call drain the health wq */ + if (state) { +- pci_save_state(pdev); + mlx5_drain_health_wq(dev); + mlx5_pci_disable_device(dev); + } +@@ -1385,6 +1385,7 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) + + pci_set_master(pdev); + pci_restore_state(pdev); ++ pci_save_state(pdev); + + if (wait_vital(pdev)) { + dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__); +diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h +index 6460c7256f2b..a01e6c0d0cd1 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h ++++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h +@@ -788,7 +788,7 @@ static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid) + #define MLXSW_REG_SPVM_ID 0x200F + #define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */ + #define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */ +-#define MLXSW_REG_SPVM_REC_MAX_COUNT 256 ++#define MLXSW_REG_SPVM_REC_MAX_COUNT 255 + #define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \ + MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT) + +@@ -1757,7 +1757,7 @@ static inline void mlxsw_reg_sfmr_pack(char *payload, + #define MLXSW_REG_SPVMLR_ID 0x2020 + #define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */ + #define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */ +-#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 256 ++#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 255 + #define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \ + MLXSW_REG_SPVMLR_REC_LEN * \ + MLXSW_REG_SPVMLR_REC_MAX_COUNT) +diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c +index 0c42c240b5cf..ed014bdbbabd 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c +@@ -373,8 +373,9 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn, + u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val; + u32 cxt_size = CONN_CXT_SIZE(p_hwfn); + u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; ++ u32 align = elems_per_page * DQ_RANGE_ALIGN; + +- p_conn->cid_count = roundup(p_conn->cid_count, elems_per_page); ++ p_conn->cid_count = roundup(p_conn->cid_count, align); + } + } + +diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c +index 62ae55bd81b8..a3360cbdb30b 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c +@@ -187,6 +187,8 @@ static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn, + /* If need to reuse or there's no replacement buffer, repost this */ + if (rc) + goto out_post; ++ dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr, ++ cdev->ll2->rx_size, DMA_FROM_DEVICE); + + skb = build_skb(buffer->data, 0); + if (!skb) { +@@ -441,7 +443,7 @@ qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn, + static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn, + union core_rx_cqe_union *p_cqe, +- unsigned long lock_flags, ++ unsigned long *p_lock_flags, + bool b_last_cqe) + { + struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; +@@ -462,10 +464,10 @@ static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn, + "Mismatch between active_descq and the LL2 Rx chain\n"); + list_add_tail(&p_pkt->list_entry, &p_rx->free_descq); + +- spin_unlock_irqrestore(&p_rx->lock, lock_flags); ++ spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags); + qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id, + p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe); +- spin_lock_irqsave(&p_rx->lock, lock_flags); ++ spin_lock_irqsave(&p_rx->lock, *p_lock_flags); + + return 0; + } +@@ -505,7 +507,8 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) + break; + case CORE_RX_CQE_TYPE_REGULAR: + rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn, +- cqe, flags, b_last_cqe); ++ cqe, &flags, ++ b_last_cqe); + break; + default: + rc = -EIO; +diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c +index 1d85109cb8ed..3d5d5d54c103 100644 +--- a/drivers/net/ethernet/sfc/ef10.c ++++ b/drivers/net/ethernet/sfc/ef10.c +@@ -4967,7 +4967,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) + * MCFW do not support VFs. + */ + rc = efx_ef10_vport_set_mac_address(efx); +- } else { ++ } else if (rc) { + efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC, + sizeof(inbuf), NULL, 0, rc); + } +diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c +index e46b1ebbbff4..7ea8ead4fd1c 100644 +--- a/drivers/net/fjes/fjes_main.c ++++ b/drivers/net/fjes/fjes_main.c +@@ -1277,7 +1277,7 @@ static void fjes_netdev_setup(struct net_device *netdev) + fjes_set_ethtool_ops(netdev); + netdev->mtu = fjes_support_mtu[3]; + netdev->flags |= IFF_BROADCAST; +- netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER; ++ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + } + + static void fjes_irq_watch_task(struct work_struct *work) +diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c +index dc8ccac0a01d..6d55049cd3dc 100644 +--- a/drivers/net/macvlan.c ++++ b/drivers/net/macvlan.c +@@ -452,7 +452,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) + struct macvlan_dev, list); + else + vlan = macvlan_hash_lookup(port, eth->h_dest); +- if (vlan == NULL) ++ if (!vlan || vlan->mode == MACVLAN_MODE_SOURCE) + return RX_HANDLER_PASS; + + dev = vlan->dev; +diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c +index 440d5f42810f..b883af93929c 100644 +--- a/drivers/net/ppp/ppp_generic.c ++++ b/drivers/net/ppp/ppp_generic.c +@@ -958,6 +958,7 @@ static __net_exit void ppp_exit_net(struct net *net) + unregister_netdevice_many(&list); + rtnl_unlock(); + ++ mutex_destroy(&pn->all_ppp_mutex); + idr_destroy(&pn->units_idr); + } + +diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c +index e7f5910a6519..f8eb66ef2944 100644 +--- a/drivers/net/wimax/i2400m/usb.c ++++ b/drivers/net/wimax/i2400m/usb.c +@@ -467,6 +467,9 @@ int i2400mu_probe(struct usb_interface *iface, + struct i2400mu *i2400mu; + struct usb_device *usb_dev = interface_to_usbdev(iface); + ++ if (iface->cur_altsetting->desc.bNumEndpoints < 4) ++ return -ENODEV; ++ + if (usb_dev->speed != USB_SPEED_HIGH) + dev_err(dev, "device not connected as high speed\n"); + +diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c +index 1fa7f844b5da..8e9480cc33e1 100644 +--- a/drivers/net/wireless/ath/ath9k/tx99.c ++++ b/drivers/net/wireless/ath/ath9k/tx99.c +@@ -179,6 +179,9 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf, + ssize_t len; + int r; + ++ if (count < 1) ++ return -EINVAL; ++ + if (sc->cur_chan->nvifs > 1) + return -EOPNOTSUPP; + +@@ -186,6 +189,8 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf, + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + ++ buf[len] = '\0'; ++ + if (strtobool(buf, &start)) + return -EINVAL; + +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +index 9789f3c5a785..f1231c0ea336 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +@@ -2320,7 +2320,7 @@ iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw, + { + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + +- /* Called when we need to transmit (a) frame(s) from agg queue */ ++ /* Called when we need to transmit (a) frame(s) from agg or dqa queue */ + + iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, + tids, more_data, true); +@@ -2340,7 +2340,8 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, + for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { + struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; + +- if (tid_data->state != IWL_AGG_ON && ++ if (!iwl_mvm_is_dqa_supported(mvm) && ++ tid_data->state != IWL_AGG_ON && + tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA) + continue; + +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +index e64aeb4a2204..bdd1deed55a4 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +@@ -3032,7 +3032,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + enum ieee80211_frame_release_type reason, + u16 cnt, u16 tids, bool more_data, +- bool agg) ++ bool single_sta_queue) + { + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_add_sta_cmd cmd = { +@@ -3052,14 +3052,14 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, + for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) + cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]); + +- /* If we're releasing frames from aggregation queues then check if the +- * all queues combined that we're releasing frames from have ++ /* If we're releasing frames from aggregation or dqa queues then check ++ * if all the queues that we're releasing frames from, combined, have: + * - more frames than the service period, in which case more_data + * needs to be set + * - fewer than 'cnt' frames, in which case we need to adjust the + * firmware command (but do that unconditionally) + */ +- if (agg) { ++ if (single_sta_queue) { + int remaining = cnt; + int sleep_tx_count; + +@@ -3069,7 +3069,8 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, + u16 n_queued; + + tid_data = &mvmsta->tid_data[tid]; +- if (WARN(tid_data->state != IWL_AGG_ON && ++ if (WARN(!iwl_mvm_is_dqa_supported(mvm) && ++ tid_data->state != IWL_AGG_ON && + tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA, + "TID %d state is %d\n", + tid, tid_data->state)) { +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +index e068d5355865..f65950e91ed5 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +@@ -545,7 +545,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + enum ieee80211_frame_release_type reason, + u16 cnt, u16 tids, bool more_data, +- bool agg); ++ bool single_sta_queue); + int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, + bool drain); + void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +index 092ae0024f22..7465d4db136f 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +@@ -7,7 +7,7 @@ + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH +- * Copyright(c) 2016 Intel Deutschland GmbH ++ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as +@@ -34,6 +34,7 @@ + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH ++ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without +@@ -621,8 +622,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) + * values. + * Note that we don't need to make sure it isn't agg'd, since we're + * TXing non-sta ++ * For DQA mode - we shouldn't increase it though + */ +- atomic_inc(&mvm->pending_frames[sta_id]); ++ if (!iwl_mvm_is_dqa_supported(mvm)) ++ atomic_inc(&mvm->pending_frames[sta_id]); + + return 0; + } +@@ -1009,11 +1012,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, + + spin_unlock(&mvmsta->lock); + +- /* Increase pending frames count if this isn't AMPDU */ +- if ((iwl_mvm_is_dqa_supported(mvm) && +- mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_ON && +- mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_STARTING) || +- (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu)) ++ /* Increase pending frames count if this isn't AMPDU or DQA queue */ ++ if (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu) + atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); + + return 0; +@@ -1083,12 +1083,13 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, + lockdep_assert_held(&mvmsta->lock); + + if ((tid_data->state == IWL_AGG_ON || +- tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) && ++ tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA || ++ iwl_mvm_is_dqa_supported(mvm)) && + iwl_mvm_tid_queued(tid_data) == 0) { + /* +- * Now that this aggregation queue is empty tell mac80211 so it +- * knows we no longer have frames buffered for the station on +- * this TID (for the TIM bitmap calculation.) ++ * Now that this aggregation or DQA queue is empty tell ++ * mac80211 so it knows we no longer have frames buffered for ++ * the station on this TID (for the TIM bitmap calculation.) + */ + ieee80211_sta_set_buffered(sta, tid, false); + } +@@ -1261,7 +1262,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, + u8 skb_freed = 0; + u16 next_reclaimed, seq_ctl; + bool is_ndp = false; +- bool txq_agg = false; /* Is this TXQ aggregated */ + + __skb_queue_head_init(&skbs); + +@@ -1287,6 +1287,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, + info->flags |= IEEE80211_TX_STAT_ACK; + break; + case TX_STATUS_FAIL_DEST_PS: ++ /* In DQA, the FW should have stopped the queue and not ++ * return this status ++ */ ++ WARN_ON(iwl_mvm_is_dqa_supported(mvm)); + info->flags |= IEEE80211_TX_STAT_TX_FILTERED; + break; + default: +@@ -1391,15 +1395,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, + bool send_eosp_ndp = false; + + spin_lock_bh(&mvmsta->lock); +- if (iwl_mvm_is_dqa_supported(mvm)) { +- enum iwl_mvm_agg_state state; +- +- state = mvmsta->tid_data[tid].state; +- txq_agg = (state == IWL_AGG_ON || +- state == IWL_EMPTYING_HW_QUEUE_DELBA); +- } else { +- txq_agg = txq_id >= mvm->first_agg_queue; +- } + + if (!is_ndp) { + tid_data->next_reclaimed = next_reclaimed; +@@ -1456,11 +1451,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, + * If the txq is not an AMPDU queue, there is no chance we freed + * several skbs. Check that out... + */ +- if (txq_agg) ++ if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue) + goto out; + + /* We can't free more than one frame at once on a shared queue */ +- WARN_ON(!iwl_mvm_is_dqa_supported(mvm) && (skb_freed > 1)); ++ WARN_ON(skb_freed > 1); + + /* If we have still frames for this STA nothing to do here */ + if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c +index fbeca065f18c..719ee5fb2626 100644 +--- a/drivers/nvme/host/core.c ++++ b/drivers/nvme/host/core.c +@@ -1619,7 +1619,8 @@ static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) + mutex_lock(&ctrl->namespaces_mutex); + list_for_each_entry(ns, &ctrl->namespaces, list) { + if (ns->ns_id == nsid) { +- kref_get(&ns->kref); ++ if (!kref_get_unless_zero(&ns->kref)) ++ continue; + ret = ns; + break; + } +diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c +index fbd6d487103f..c89d68a76f3d 100644 +--- a/drivers/nvme/target/core.c ++++ b/drivers/nvme/target/core.c +@@ -422,6 +422,13 @@ void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, + ctrl->sqs[qid] = sq; + } + ++static void nvmet_confirm_sq(struct percpu_ref *ref) ++{ ++ struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); ++ ++ complete(&sq->confirm_done); ++} ++ + void nvmet_sq_destroy(struct nvmet_sq *sq) + { + /* +@@ -430,7 +437,8 @@ void nvmet_sq_destroy(struct nvmet_sq *sq) + */ + if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq) + nvmet_async_events_free(sq->ctrl); +- percpu_ref_kill(&sq->ref); ++ percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq); ++ wait_for_completion(&sq->confirm_done); + wait_for_completion(&sq->free_done); + percpu_ref_exit(&sq->ref); + +@@ -458,6 +466,7 @@ int nvmet_sq_init(struct nvmet_sq *sq) + return ret; + } + init_completion(&sq->free_done); ++ init_completion(&sq->confirm_done); + + return 0; + } +diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c +index d5df77d686b2..c8e612c1c72f 100644 +--- a/drivers/nvme/target/loop.c ++++ b/drivers/nvme/target/loop.c +@@ -288,9 +288,9 @@ static struct blk_mq_ops nvme_loop_admin_mq_ops = { + + static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) + { ++ nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); + blk_cleanup_queue(ctrl->ctrl.admin_q); + blk_mq_free_tag_set(&ctrl->admin_tag_set); +- nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); + } + + static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl) +diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h +index 7655a351320f..26b87dc843d2 100644 +--- a/drivers/nvme/target/nvmet.h ++++ b/drivers/nvme/target/nvmet.h +@@ -73,6 +73,7 @@ struct nvmet_sq { + u16 qid; + u16 size; + struct completion free_done; ++ struct completion confirm_done; + }; + + /** +diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c +index ca8ddc3fb19e..53bd32550867 100644 +--- a/drivers/nvme/target/rdma.c ++++ b/drivers/nvme/target/rdma.c +@@ -703,11 +703,6 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, + { + u16 status; + +- cmd->queue = queue; +- cmd->n_rdma = 0; +- cmd->req.port = queue->port; +- +- + ib_dma_sync_single_for_cpu(queue->dev->device, + cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, + DMA_FROM_DEVICE); +@@ -760,9 +755,12 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) + + cmd->queue = queue; + rsp = nvmet_rdma_get_rsp(queue); ++ rsp->queue = queue; + rsp->cmd = cmd; + rsp->flags = 0; + rsp->req.cmd = cmd->nvme_cmd; ++ rsp->req.port = queue->port; ++ rsp->n_rdma = 0; + + if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { + unsigned long flags; +diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c +index 4b703492376a..00f61225386c 100644 +--- a/drivers/pci/pcie/pme.c ++++ b/drivers/pci/pcie/pme.c +@@ -232,6 +232,9 @@ static void pcie_pme_work_fn(struct work_struct *work) + break; + + pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta); ++ if (rtsta == (u32) ~0) ++ break; ++ + if (rtsta & PCI_EXP_RTSTA_PME) { + /* + * Clear PME status of the port. If there are other +@@ -279,7 +282,7 @@ static irqreturn_t pcie_pme_irq(int irq, void *context) + spin_lock_irqsave(&data->lock, flags); + pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta); + +- if (!(rtsta & PCI_EXP_RTSTA_PME)) { ++ if (rtsta == (u32) ~0 || !(rtsta & PCI_EXP_RTSTA_PME)) { + spin_unlock_irqrestore(&data->lock, flags); + return IRQ_NONE; + } +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c +index 60bada90cd75..a98be6db7e93 100644 +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -932,7 +932,8 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) + child = pci_add_new_bus(bus, dev, max+1); + if (!child) + goto out; +- pci_bus_insert_busn_res(child, max+1, 0xff); ++ pci_bus_insert_busn_res(child, max+1, ++ bus->busn_res.end); + } + max++; + buses = (buses & 0xff000000) +@@ -2136,6 +2137,10 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus) + if (bus->self && bus->self->is_hotplug_bridge && pci_hotplug_bus_size) { + if (max - bus->busn_res.start < pci_hotplug_bus_size - 1) + max = bus->busn_res.start + pci_hotplug_bus_size - 1; ++ ++ /* Do not allocate more buses than we have room left */ ++ if (max > bus->busn_res.end) ++ max = bus->busn_res.end; + } + + /* +diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c +index f9357e09e9b3..b6b9b5b74e30 100644 +--- a/drivers/pci/remove.c ++++ b/drivers/pci/remove.c +@@ -19,9 +19,9 @@ static void pci_stop_dev(struct pci_dev *dev) + pci_pme_active(dev, false); + + if (dev->is_added) { ++ device_release_driver(&dev->dev); + pci_proc_detach_device(dev); + pci_remove_sysfs_dev_files(dev); +- device_release_driver(&dev->dev); + dev->is_added = 0; + } + +diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig +index 671610c989b6..b0c0fa0444dd 100644 +--- a/drivers/pinctrl/Kconfig ++++ b/drivers/pinctrl/Kconfig +@@ -26,7 +26,8 @@ config DEBUG_PINCTRL + + config PINCTRL_ADI2 + bool "ADI pin controller driver" +- depends on BLACKFIN ++ depends on (BF54x || BF60x) ++ depends on !GPIO_ADI + select PINMUX + select IRQ_DOMAIN + help +diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c +index 09356684c32f..abd9d83f6009 100644 +--- a/drivers/platform/x86/hp_accel.c ++++ b/drivers/platform/x86/hp_accel.c +@@ -240,6 +240,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = { + AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted), + AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left), + AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd), ++ AXIS_DMI_MATCH("HPB440G4", "HP ProBook 440 G4", x_inverted), + AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left), + AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted), + AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap), +diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c +index a47a41fc10ad..b5b890127479 100644 +--- a/drivers/platform/x86/intel_punit_ipc.c ++++ b/drivers/platform/x86/intel_punit_ipc.c +@@ -252,28 +252,28 @@ static int intel_punit_get_bars(struct platform_device *pdev) + * - GTDRIVER_IPC BASE_IFACE + */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 2); +- if (res) { ++ if (res && resource_size(res) > 1) { + addr = devm_ioremap_resource(&pdev->dev, res); + if (!IS_ERR(addr)) + punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 3); +- if (res) { ++ if (res && resource_size(res) > 1) { + addr = devm_ioremap_resource(&pdev->dev, res); + if (!IS_ERR(addr)) + punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 4); +- if (res) { ++ if (res && resource_size(res) > 1) { + addr = devm_ioremap_resource(&pdev->dev, res); + if (!IS_ERR(addr)) + punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 5); +- if (res) { ++ if (res && resource_size(res) > 1) { + addr = devm_ioremap_resource(&pdev->dev, res); + if (!IS_ERR(addr)) + punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr; +diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c +index 1227ceab61ee..a4b8b603c807 100644 +--- a/drivers/rtc/rtc-pcf8563.c ++++ b/drivers/rtc/rtc-pcf8563.c +@@ -422,7 +422,7 @@ static unsigned long pcf8563_clkout_recalc_rate(struct clk_hw *hw, + return 0; + + buf &= PCF8563_REG_CLKO_F_MASK; +- return clkout_rates[ret]; ++ return clkout_rates[buf]; + } + + static long pcf8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate, +diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c +index 8dcd8c70c7ee..05f523971348 100644 +--- a/drivers/scsi/bfa/bfad_debugfs.c ++++ b/drivers/scsi/bfa/bfad_debugfs.c +@@ -255,7 +255,8 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf, + struct bfad_s *bfad = port->bfad; + struct bfa_s *bfa = &bfad->bfa; + struct bfa_ioc_s *ioc = &bfa->ioc; +- int addr, len, rc, i; ++ int addr, rc, i; ++ u32 len; + u32 *regbuf; + void __iomem *rb, *reg_addr; + unsigned long flags; +@@ -266,7 +267,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf, + return PTR_ERR(kern_buf); + + rc = sscanf(kern_buf, "%x:%x", &addr, &len); +- if (rc < 2) { ++ if (rc < 2 || len > (UINT_MAX >> 2)) { + printk(KERN_INFO + "bfad[%d]: %s failed to read user buf\n", + bfad->inst_no, __func__); +diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c +index a1d6ab76a514..99623701fc3d 100644 +--- a/drivers/scsi/hpsa.c ++++ b/drivers/scsi/hpsa.c +@@ -2951,7 +2951,7 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, + /* fill_cmd can't fail here, no data buffer to map. */ + (void) fill_cmd(c, reset_type, h, NULL, 0, 0, + scsi3addr, TYPE_MSG); +- rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); ++ rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); + if (rc) { + dev_warn(&h->pdev->dev, "Failed to send reset command\n"); + goto out; +@@ -3686,7 +3686,7 @@ static int hpsa_get_volume_status(struct ctlr_info *h, + * # (integer code indicating one of several NOT READY states + * describing why a volume is to be kept offline) + */ +-static int hpsa_volume_offline(struct ctlr_info *h, ++static unsigned char hpsa_volume_offline(struct ctlr_info *h, + unsigned char scsi3addr[]) + { + struct CommandList *c; +@@ -3707,7 +3707,7 @@ static int hpsa_volume_offline(struct ctlr_info *h, + DEFAULT_TIMEOUT); + if (rc) { + cmd_free(h, c); +- return 0; ++ return HPSA_VPD_LV_STATUS_UNSUPPORTED; + } + sense = c->err_info->SenseInfo; + if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) +@@ -3718,19 +3718,13 @@ static int hpsa_volume_offline(struct ctlr_info *h, + cmd_status = c->err_info->CommandStatus; + scsi_status = c->err_info->ScsiStatus; + cmd_free(h, c); +- /* Is the volume 'not ready'? */ +- if (cmd_status != CMD_TARGET_STATUS || +- scsi_status != SAM_STAT_CHECK_CONDITION || +- sense_key != NOT_READY || +- asc != ASC_LUN_NOT_READY) { +- return 0; +- } + + /* Determine the reason for not ready state */ + ldstat = hpsa_get_volume_status(h, scsi3addr); + + /* Keep volume offline in certain cases: */ + switch (ldstat) { ++ case HPSA_LV_FAILED: + case HPSA_LV_UNDERGOING_ERASE: + case HPSA_LV_NOT_AVAILABLE: + case HPSA_LV_UNDERGOING_RPI: +@@ -3752,7 +3746,7 @@ static int hpsa_volume_offline(struct ctlr_info *h, + default: + break; + } +- return 0; ++ return HPSA_LV_OK; + } + + /* +@@ -3825,10 +3819,10 @@ static int hpsa_update_device_info(struct ctlr_info *h, + /* Do an inquiry to the device to see what it is. */ + if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, + (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { +- /* Inquiry failed (msg printed already) */ + dev_err(&h->pdev->dev, +- "hpsa_update_device_info: inquiry failed\n"); +- rc = -EIO; ++ "%s: inquiry failed, device will be skipped.\n", ++ __func__); ++ rc = HPSA_INQUIRY_FAILED; + goto bail_out; + } + +@@ -3857,15 +3851,19 @@ static int hpsa_update_device_info(struct ctlr_info *h, + if ((this_device->devtype == TYPE_DISK || + this_device->devtype == TYPE_ZBC) && + is_logical_dev_addr_mode(scsi3addr)) { +- int volume_offline; ++ unsigned char volume_offline; + + hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); + if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) + hpsa_get_ioaccel_status(h, scsi3addr, this_device); + volume_offline = hpsa_volume_offline(h, scsi3addr); +- if (volume_offline < 0 || volume_offline > 0xff) +- volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED; +- this_device->volume_offline = volume_offline & 0xff; ++ if (volume_offline == HPSA_LV_FAILED) { ++ rc = HPSA_LV_FAILED; ++ dev_err(&h->pdev->dev, ++ "%s: LV failed, device will be skipped.\n", ++ __func__); ++ goto bail_out; ++ } + } else { + this_device->raid_level = RAID_UNKNOWN; + this_device->offload_config = 0; +@@ -4353,8 +4351,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h) + goto out; + } + if (rc) { +- dev_warn(&h->pdev->dev, +- "Inquiry failed, skipping device.\n"); ++ h->drv_req_rescan = 1; + continue; + } + +@@ -5532,7 +5529,7 @@ static void hpsa_scan_complete(struct ctlr_info *h) + + spin_lock_irqsave(&h->scan_lock, flags); + h->scan_finished = 1; +- wake_up_all(&h->scan_wait_queue); ++ wake_up(&h->scan_wait_queue); + spin_unlock_irqrestore(&h->scan_lock, flags); + } + +@@ -5550,11 +5547,23 @@ static void hpsa_scan_start(struct Scsi_Host *sh) + if (unlikely(lockup_detected(h))) + return hpsa_scan_complete(h); + ++ /* ++ * If a scan is already waiting to run, no need to add another ++ */ ++ spin_lock_irqsave(&h->scan_lock, flags); ++ if (h->scan_waiting) { ++ spin_unlock_irqrestore(&h->scan_lock, flags); ++ return; ++ } ++ ++ spin_unlock_irqrestore(&h->scan_lock, flags); ++ + /* wait until any scan already in progress is finished. */ + while (1) { + spin_lock_irqsave(&h->scan_lock, flags); + if (h->scan_finished) + break; ++ h->scan_waiting = 1; + spin_unlock_irqrestore(&h->scan_lock, flags); + wait_event(h->scan_wait_queue, h->scan_finished); + /* Note: We don't need to worry about a race between this +@@ -5564,6 +5573,7 @@ static void hpsa_scan_start(struct Scsi_Host *sh) + */ + } + h->scan_finished = 0; /* mark scan as in progress */ ++ h->scan_waiting = 0; + spin_unlock_irqrestore(&h->scan_lock, flags); + + if (unlikely(lockup_detected(h))) +@@ -8802,6 +8812,7 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) + init_waitqueue_head(&h->event_sync_wait_queue); + mutex_init(&h->reset_mutex); + h->scan_finished = 1; /* no scan currently in progress */ ++ h->scan_waiting = 0; + + pci_set_drvdata(pdev, h); + h->ndevices = 0; +@@ -9094,6 +9105,8 @@ static void hpsa_remove_one(struct pci_dev *pdev) + destroy_workqueue(h->rescan_ctlr_wq); + destroy_workqueue(h->resubmit_wq); + ++ hpsa_delete_sas_host(h); ++ + /* + * Call before disabling interrupts. + * scsi_remove_host can trigger I/O operations especially +@@ -9128,8 +9141,6 @@ static void hpsa_remove_one(struct pci_dev *pdev) + h->lockup_detected = NULL; /* init_one 2 */ + /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */ + +- hpsa_delete_sas_host(h); +- + kfree(h); /* init_one 1 */ + } + +@@ -9621,9 +9632,9 @@ static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy) + struct sas_phy *phy = hpsa_sas_phy->phy; + + sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy); +- sas_phy_free(phy); + if (hpsa_sas_phy->added_to_port) + list_del(&hpsa_sas_phy->phy_list_entry); ++ sas_phy_delete(phy); + kfree(hpsa_sas_phy); + } + +diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h +index 9ea162de80dc..e16f2945f6ac 100644 +--- a/drivers/scsi/hpsa.h ++++ b/drivers/scsi/hpsa.h +@@ -203,6 +203,7 @@ struct ctlr_info { + dma_addr_t errinfo_pool_dhandle; + unsigned long *cmd_pool_bits; + int scan_finished; ++ u8 scan_waiting : 1; + spinlock_t scan_lock; + wait_queue_head_t scan_wait_queue; + +diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h +index a584cdf07058..5961705eef76 100644 +--- a/drivers/scsi/hpsa_cmd.h ++++ b/drivers/scsi/hpsa_cmd.h +@@ -156,6 +156,7 @@ + #define CFGTBL_BusType_Fibre2G 0x00000200l + + /* VPD Inquiry types */ ++#define HPSA_INQUIRY_FAILED 0x02 + #define HPSA_VPD_SUPPORTED_PAGES 0x00 + #define HPSA_VPD_LV_DEVICE_ID 0x83 + #define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1 +@@ -166,6 +167,7 @@ + /* Logical volume states */ + #define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff + #define HPSA_LV_OK 0x0 ++#define HPSA_LV_FAILED 0x01 + #define HPSA_LV_NOT_AVAILABLE 0x0b + #define HPSA_LV_UNDERGOING_ERASE 0x0F + #define HPSA_LV_UNDERGOING_RPI 0x12 +diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c +index cf04a364fd8b..2b0e61557317 100644 +--- a/drivers/scsi/scsi_debug.c ++++ b/drivers/scsi/scsi_debug.c +@@ -2996,11 +2996,11 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, + if (-1 == ret) { + write_unlock_irqrestore(&atomic_rw, iflags); + return DID_ERROR << 16; +- } else if (sdebug_verbose && (ret < (num * sdebug_sector_size))) ++ } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size)) + sdev_printk(KERN_INFO, scp->device, +- "%s: %s: cdb indicated=%u, IO sent=%d bytes\n", ++ "%s: %s: lb size=%u, IO sent=%d bytes\n", + my_name, "write same", +- num * sdebug_sector_size, ret); ++ sdebug_sector_size, ret); + + /* Copy first sector to remaining blocks */ + for (i = 1 ; i < num ; i++) +diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c +index 246456925335..26e6b05d05fc 100644 +--- a/drivers/scsi/scsi_devinfo.c ++++ b/drivers/scsi/scsi_devinfo.c +@@ -160,7 +160,7 @@ static struct { + {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, storage on LUN 0 */ + {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, no storage on LUN 0 */ + {"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, +- {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_FORCELUN}, ++ {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2}, + {"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN}, + {"easyRAID", "16P", NULL, BLIST_NOREPORTLUN}, + {"easyRAID", "X6P", NULL, BLIST_NOREPORTLUN}, +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c +index 09fa1fd0c4ce..ace56c5e61e1 100644 +--- a/drivers/scsi/sd.c ++++ b/drivers/scsi/sd.c +@@ -234,11 +234,15 @@ manage_start_stop_store(struct device *dev, struct device_attribute *attr, + { + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; ++ bool v; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + +- sdp->manage_start_stop = simple_strtoul(buf, NULL, 10); ++ if (kstrtobool(buf, &v)) ++ return -EINVAL; ++ ++ sdp->manage_start_stop = v; + + return count; + } +@@ -256,6 +260,7 @@ static ssize_t + allow_restart_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) + { ++ bool v; + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + +@@ -265,7 +270,10 @@ allow_restart_store(struct device *dev, struct device_attribute *attr, + if (sdp->type != TYPE_DISK) + return -EINVAL; + +- sdp->allow_restart = simple_strtoul(buf, NULL, 10); ++ if (kstrtobool(buf, &v)) ++ return -EINVAL; ++ ++ sdp->allow_restart = v; + + return count; + } +diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c +index a5f10936fb9c..e929f5142862 100644 +--- a/drivers/soc/mediatek/mtk-pmic-wrap.c ++++ b/drivers/soc/mediatek/mtk-pmic-wrap.c +@@ -522,7 +522,7 @@ struct pmic_wrapper_type { + u32 int_en_all; + u32 spi_w; + u32 wdt_src; +- int has_bridge:1; ++ unsigned int has_bridge:1; + int (*init_reg_clock)(struct pmic_wrapper *wrp); + int (*init_soc_specific)(struct pmic_wrapper *wrp); + }; +diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c +index f1f4788dbd86..6051a7ba0797 100644 +--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c ++++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c +@@ -342,7 +342,7 @@ u8 rtw_createbss_cmd(struct adapter *padapter) + else + RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid)); + +- pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); ++ pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); + if (!pcmd) { + res = _FAIL; + goto exit; +@@ -522,7 +522,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu + + if (enqueue) { + /* need enqueue, prepare cmd_obj and enqueue */ +- cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL); ++ cmdobj = kzalloc(sizeof(*cmdobj), GFP_ATOMIC); + if (!cmdobj) { + res = _FAIL; + kfree(param); +diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c +index f109eeac358d..ab96629b7889 100644 +--- a/drivers/staging/vt6655/device_main.c ++++ b/drivers/staging/vt6655/device_main.c +@@ -1698,10 +1698,11 @@ static int vt6655_suspend(struct pci_dev *pcid, pm_message_t state) + MACbShutdown(priv); + + pci_disable_device(pcid); +- pci_set_power_state(pcid, pci_choose_state(pcid, state)); + + spin_unlock_irqrestore(&priv->lock, flags); + ++ pci_set_power_state(pcid, pci_choose_state(pcid, state)); ++ + return 0; + } + +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c +index 0d578297d9f9..72e926d9868f 100644 +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -841,6 +841,7 @@ static int iscsit_add_reject_from_cmd( + unsigned char *buf) + { + struct iscsi_conn *conn; ++ const bool do_put = cmd->se_cmd.se_tfo != NULL; + + if (!cmd->conn) { + pr_err("cmd->conn is NULL for ITT: 0x%08x\n", +@@ -871,7 +872,7 @@ static int iscsit_add_reject_from_cmd( + * Perform the kref_put now if se_cmd has already been setup by + * scsit_setup_scsi_cmd() + */ +- if (cmd->se_cmd.se_tfo != NULL) { ++ if (do_put) { + pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n"); + target_put_sess_cmd(&cmd->se_cmd); + } +diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c +index 9cbbc9cf63fb..8a4bc15bc3f5 100644 +--- a/drivers/target/iscsi/iscsi_target_configfs.c ++++ b/drivers/target/iscsi/iscsi_target_configfs.c +@@ -1144,7 +1144,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg( + + ret = core_tpg_register(wwn, &tpg->tpg_se_tpg, SCSI_PROTOCOL_ISCSI); + if (ret < 0) +- return NULL; ++ goto free_out; + + ret = iscsit_tpg_add_portal_group(tiqn, tpg); + if (ret != 0) +@@ -1156,6 +1156,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg( + return &tpg->tpg_se_tpg; + out: + core_tpg_deregister(&tpg->tpg_se_tpg); ++free_out: + kfree(tpg); + return NULL; + } +diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c +index 4c82bbe19003..ee5b29aed54b 100644 +--- a/drivers/target/target_core_alua.c ++++ b/drivers/target/target_core_alua.c +@@ -1010,7 +1010,7 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp) + static void core_alua_do_transition_tg_pt_work(struct work_struct *work) + { + struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work, +- struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work); ++ struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work); + struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; + bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status == + ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG); +@@ -1073,17 +1073,8 @@ static int core_alua_do_transition_tg_pt( + /* + * Flush any pending transitions + */ +- if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs && +- atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == +- ALUA_ACCESS_STATE_TRANSITION) { +- /* Just in case */ +- tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; +- tg_pt_gp->tg_pt_gp_transition_complete = &wait; +- flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); +- wait_for_completion(&wait); +- tg_pt_gp->tg_pt_gp_transition_complete = NULL; +- return 0; +- } ++ if (!explicit) ++ flush_work(&tg_pt_gp->tg_pt_gp_transition_work); + + /* + * Save the old primary ALUA access state, and set the current state +@@ -1114,17 +1105,9 @@ static int core_alua_do_transition_tg_pt( + atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + +- if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { +- unsigned long transition_tmo; +- +- transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ; +- queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq, +- &tg_pt_gp->tg_pt_gp_transition_work, +- transition_tmo); +- } else { ++ schedule_work(&tg_pt_gp->tg_pt_gp_transition_work); ++ if (explicit) { + tg_pt_gp->tg_pt_gp_transition_complete = &wait; +- queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq, +- &tg_pt_gp->tg_pt_gp_transition_work, 0); + wait_for_completion(&wait); + tg_pt_gp->tg_pt_gp_transition_complete = NULL; + } +@@ -1692,8 +1675,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, + mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); + spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); + atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); +- INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work, +- core_alua_do_transition_tg_pt_work); ++ INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work, ++ core_alua_do_transition_tg_pt_work); + tg_pt_gp->tg_pt_gp_dev = dev; + atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, + ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); +@@ -1801,7 +1784,7 @@ void core_alua_free_tg_pt_gp( + dev->t10_alua.alua_tg_pt_gps_counter--; + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + +- flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); ++ flush_work(&tg_pt_gp->tg_pt_gp_transition_work); + + /* + * Allow a struct t10_alua_tg_pt_gp_member * referenced by +diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c +index 29f807b29e74..97928b42ad62 100644 +--- a/drivers/target/target_core_file.c ++++ b/drivers/target/target_core_file.c +@@ -466,6 +466,10 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) + struct inode *inode = file->f_mapping->host; + int ret; + ++ if (!nolb) { ++ return 0; ++ } ++ + if (cmd->se_dev->dev_attrib.pi_prot_type) { + ret = fd_do_prot_unmap(cmd, lba, nolb); + if (ret) +diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c +index 47463c99c318..df20921c233c 100644 +--- a/drivers/target/target_core_pr.c ++++ b/drivers/target/target_core_pr.c +@@ -56,8 +56,10 @@ void core_pr_dump_initiator_port( + char *buf, + u32 size) + { +- if (!pr_reg->isid_present_at_reg) ++ if (!pr_reg->isid_present_at_reg) { + buf[0] = '\0'; ++ return; ++ } + + snprintf(buf, size, ",i,0x%s", pr_reg->pr_reg_isid); + } +diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c +index bcef2e7c4ec9..1eea63caa451 100644 +--- a/drivers/thermal/step_wise.c ++++ b/drivers/thermal/step_wise.c +@@ -31,8 +31,7 @@ + * If the temperature is higher than a trip point, + * a. if the trend is THERMAL_TREND_RAISING, use higher cooling + * state for this trip point +- * b. if the trend is THERMAL_TREND_DROPPING, use lower cooling +- * state for this trip point ++ * b. if the trend is THERMAL_TREND_DROPPING, do nothing + * c. if the trend is THERMAL_TREND_RAISE_FULL, use upper limit + * for this trip point + * d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit +@@ -94,9 +93,11 @@ static unsigned long get_target_state(struct thermal_instance *instance, + if (!throttle) + next_target = THERMAL_NO_TARGET; + } else { +- next_target = cur_state - 1; +- if (next_target > instance->upper) +- next_target = instance->upper; ++ if (!throttle) { ++ next_target = cur_state - 1; ++ if (next_target > instance->upper) ++ next_target = instance->upper; ++ } + } + break; + case THERMAL_TREND_DROP_FULL: +diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c +index 68947f6de5ad..b0500a0a87b8 100644 +--- a/drivers/tty/tty_ldisc.c ++++ b/drivers/tty/tty_ldisc.c +@@ -271,10 +271,13 @@ const struct file_operations tty_ldiscs_proc_fops = { + + struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty) + { ++ struct tty_ldisc *ld; ++ + ldsem_down_read(&tty->ldisc_sem, MAX_SCHEDULE_TIMEOUT); +- if (!tty->ldisc) ++ ld = tty->ldisc; ++ if (!ld) + ldsem_up_read(&tty->ldisc_sem); +- return tty->ldisc; ++ return ld; + } + EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait); + +@@ -488,41 +491,6 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld) + tty_ldisc_debug(tty, "%p: closed\n", ld); + } + +-/** +- * tty_ldisc_restore - helper for tty ldisc change +- * @tty: tty to recover +- * @old: previous ldisc +- * +- * Restore the previous line discipline or N_TTY when a line discipline +- * change fails due to an open error +- */ +- +-static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old) +-{ +- struct tty_ldisc *new_ldisc; +- int r; +- +- /* There is an outstanding reference here so this is safe */ +- old = tty_ldisc_get(tty, old->ops->num); +- WARN_ON(IS_ERR(old)); +- tty->ldisc = old; +- tty_set_termios_ldisc(tty, old->ops->num); +- if (tty_ldisc_open(tty, old) < 0) { +- tty_ldisc_put(old); +- /* This driver is always present */ +- new_ldisc = tty_ldisc_get(tty, N_TTY); +- if (IS_ERR(new_ldisc)) +- panic("n_tty: get"); +- tty->ldisc = new_ldisc; +- tty_set_termios_ldisc(tty, N_TTY); +- r = tty_ldisc_open(tty, new_ldisc); +- if (r < 0) +- panic("Couldn't open N_TTY ldisc for " +- "%s --- error %d.", +- tty_name(tty), r); +- } +-} +- + /** + * tty_set_ldisc - set line discipline + * @tty: the terminal to set +@@ -536,12 +504,7 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old) + + int tty_set_ldisc(struct tty_struct *tty, int disc) + { +- int retval; +- struct tty_ldisc *old_ldisc, *new_ldisc; +- +- new_ldisc = tty_ldisc_get(tty, disc); +- if (IS_ERR(new_ldisc)) +- return PTR_ERR(new_ldisc); ++ int retval, old_disc; + + tty_lock(tty); + retval = tty_ldisc_lock(tty, 5 * HZ); +@@ -554,7 +517,8 @@ int tty_set_ldisc(struct tty_struct *tty, int disc) + } + + /* Check the no-op case */ +- if (tty->ldisc->ops->num == disc) ++ old_disc = tty->ldisc->ops->num; ++ if (old_disc == disc) + goto out; + + if (test_bit(TTY_HUPPED, &tty->flags)) { +@@ -563,34 +527,25 @@ int tty_set_ldisc(struct tty_struct *tty, int disc) + goto out; + } + +- old_ldisc = tty->ldisc; +- +- /* Shutdown the old discipline. */ +- tty_ldisc_close(tty, old_ldisc); +- +- /* Now set up the new line discipline. */ +- tty->ldisc = new_ldisc; +- tty_set_termios_ldisc(tty, disc); +- +- retval = tty_ldisc_open(tty, new_ldisc); ++ retval = tty_ldisc_reinit(tty, disc); + if (retval < 0) { + /* Back to the old one or N_TTY if we can't */ +- tty_ldisc_put(new_ldisc); +- tty_ldisc_restore(tty, old_ldisc); ++ if (tty_ldisc_reinit(tty, old_disc) < 0) { ++ pr_err("tty: TIOCSETD failed, reinitializing N_TTY\n"); ++ if (tty_ldisc_reinit(tty, N_TTY) < 0) { ++ /* At this point we have tty->ldisc == NULL. */ ++ pr_err("tty: reinitializing N_TTY failed\n"); ++ } ++ } + } + +- if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc) { ++ if (tty->ldisc && tty->ldisc->ops->num != old_disc && ++ tty->ops->set_ldisc) { + down_read(&tty->termios_rwsem); + tty->ops->set_ldisc(tty); + up_read(&tty->termios_rwsem); + } + +- /* At this point we hold a reference to the new ldisc and a +- reference to the old ldisc, or we hold two references to +- the old ldisc (if it was restored as part of error cleanup +- above). In either case, releasing a single reference from +- the old ldisc is correct. */ +- new_ldisc = old_ldisc; + out: + tty_ldisc_unlock(tty); + +@@ -598,7 +553,6 @@ int tty_set_ldisc(struct tty_struct *tty, int disc) + already running */ + tty_buffer_restart_work(tty->port); + err: +- tty_ldisc_put(new_ldisc); /* drop the extra reference */ + tty_unlock(tty); + return retval; + } +@@ -659,10 +613,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc) + int retval; + + ld = tty_ldisc_get(tty, disc); +- if (IS_ERR(ld)) { +- BUG_ON(disc == N_TTY); ++ if (IS_ERR(ld)) + return PTR_ERR(ld); +- } + + if (tty->ldisc) { + tty_ldisc_close(tty, tty->ldisc); +@@ -674,10 +626,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc) + tty_set_termios_ldisc(tty, disc); + retval = tty_ldisc_open(tty, tty->ldisc); + if (retval) { +- if (!WARN_ON(disc == N_TTY)) { +- tty_ldisc_put(tty->ldisc); +- tty->ldisc = NULL; +- } ++ tty_ldisc_put(tty->ldisc); ++ tty->ldisc = NULL; + } + return retval; + } +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c +index 5ebe04d3598b..ba9b29bc441f 100644 +--- a/drivers/usb/core/config.c ++++ b/drivers/usb/core/config.c +@@ -550,6 +550,9 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, + unsigned iad_num = 0; + + memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE); ++ nintf = nintf_orig = config->desc.bNumInterfaces; ++ config->desc.bNumInterfaces = 0; // Adjusted later ++ + if (config->desc.bDescriptorType != USB_DT_CONFIG || + config->desc.bLength < USB_DT_CONFIG_SIZE || + config->desc.bLength > size) { +@@ -563,7 +566,6 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, + buffer += config->desc.bLength; + size -= config->desc.bLength; + +- nintf = nintf_orig = config->desc.bNumInterfaces; + if (nintf > USB_MAXINTERFACES) { + dev_warn(ddev, "config %d has too many interfaces: %d, " + "using maximum allowed: %d\n", +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c +index a3ecd8bd5324..82eea55a7b5c 100644 +--- a/drivers/usb/host/xhci-mem.c ++++ b/drivers/usb/host/xhci-mem.c +@@ -1032,10 +1032,9 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, + return 0; + } + +- xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags); +- if (!xhci->devs[slot_id]) ++ dev = kzalloc(sizeof(*dev), flags); ++ if (!dev) + return 0; +- dev = xhci->devs[slot_id]; + + /* Allocate the (output) device context that will be used in the HC. */ + dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); +@@ -1083,9 +1082,17 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, + &xhci->dcbaa->dev_context_ptrs[slot_id], + le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id])); + ++ xhci->devs[slot_id] = dev; ++ + return 1; + fail: +- xhci_free_virt_device(xhci, slot_id); ++ ++ if (dev->in_ctx) ++ xhci_free_container_ctx(xhci, dev->in_ctx); ++ if (dev->out_ctx) ++ xhci_free_container_ctx(xhci, dev->out_ctx); ++ kfree(dev); ++ + return 0; + } + +diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c +index f2365a47fa4a..ce9e457e60c3 100644 +--- a/drivers/usb/host/xhci-mtk.c ++++ b/drivers/usb/host/xhci-mtk.c +@@ -632,13 +632,13 @@ static int xhci_mtk_probe(struct platform_device *pdev) + goto power_off_phys; + } + +- if (HCC_MAX_PSA(xhci->hcc_params) >= 4) +- xhci->shared_hcd->can_do_streams = 1; +- + ret = usb_add_hcd(hcd, irq, IRQF_SHARED); + if (ret) + goto put_usb3_hcd; + ++ if (HCC_MAX_PSA(xhci->hcc_params) >= 4) ++ xhci->shared_hcd->can_do_streams = 1; ++ + ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED); + if (ret) + goto dealloc_usb2_hcd; +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 63735b5310bb..89a14d5f6ad8 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -3132,7 +3132,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, + { + u32 maxp, total_packet_count; + +- /* MTK xHCI is mostly 0.97 but contains some features from 1.0 */ ++ /* MTK xHCI 0.96 contains some features from 1.0 */ + if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST)) + return ((td_total_len - transferred) >> 10); + +@@ -3141,8 +3141,8 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, + trb_buff_len == td_total_len) + return 0; + +- /* for MTK xHCI, TD size doesn't include this TRB */ +- if (xhci->quirks & XHCI_MTK_HOST) ++ /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */ ++ if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100)) + trb_buff_len = 0; + + maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc)); +diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c +index bacee0fd4dd3..ea5bad49394b 100644 +--- a/drivers/usb/musb/da8xx.c ++++ b/drivers/usb/musb/da8xx.c +@@ -302,7 +302,15 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci) + musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE; + portstate(musb->port1_status |= USB_PORT_STAT_POWER); + del_timer(&otg_workaround); +- } else { ++ } else if (!(musb->int_usb & MUSB_INTR_BABBLE)){ ++ /* ++ * When babble condition happens, drvvbus interrupt ++ * is also generated. Ignore this drvvbus interrupt ++ * and let babble interrupt handler recovers the ++ * controller; otherwise, the host-mode flag is lost ++ * due to the MUSB_DEV_MODE() call below and babble ++ * recovery logic will not called. ++ */ + musb->is_active = 0; + MUSB_DEV_MODE(musb); + otg->default_a = 0; +diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c +index db68156568e6..b3b33cf7ddf6 100644 +--- a/drivers/usb/phy/phy-isp1301.c ++++ b/drivers/usb/phy/phy-isp1301.c +@@ -33,6 +33,12 @@ static const struct i2c_device_id isp1301_id[] = { + }; + MODULE_DEVICE_TABLE(i2c, isp1301_id); + ++static const struct of_device_id isp1301_of_match[] = { ++ {.compatible = "nxp,isp1301" }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(of, isp1301_of_match); ++ + static struct i2c_client *isp1301_i2c_client; + + static int __isp1301_write(struct isp1301 *isp, u8 reg, u8 value, u8 clear) +@@ -130,6 +136,7 @@ static int isp1301_remove(struct i2c_client *client) + static struct i2c_driver isp1301_driver = { + .driver = { + .name = DRV_NAME, ++ .of_match_table = of_match_ptr(isp1301_of_match), + }, + .probe = isp1301_probe, + .remove = isp1301_remove, +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h +index 2572fd5cd2bb..b605115eb47a 100644 +--- a/drivers/usb/storage/unusual_devs.h ++++ b/drivers/usb/storage/unusual_devs.h +@@ -2113,6 +2113,13 @@ UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116, + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BROKEN_FUA ), + ++/* Reported by David Kozub <zub@linux.fjfi.cvut.cz> */ ++UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999, ++ "JMicron", ++ "JMS567", ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL, ++ US_FL_BROKEN_FUA), ++ + /* + * Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br> + * JMicron responds to USN and several other SCSI ioctls with a +diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h +index cde115359793..9f356f7cf7d5 100644 +--- a/drivers/usb/storage/unusual_uas.h ++++ b/drivers/usb/storage/unusual_uas.h +@@ -142,6 +142,13 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999, + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES), + ++/* Reported-by: David Kozub <zub@linux.fjfi.cvut.cz> */ ++UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999, ++ "JMicron", ++ "JMS567", ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL, ++ US_FL_BROKEN_FUA), ++ + /* Reported-by: Hans de Goede <hdegoede@redhat.com> */ + UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, + "VIA", +diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c +index 191b176ffedf..283a9be77a22 100644 +--- a/drivers/usb/usbip/stub_rx.c ++++ b/drivers/usb/usbip/stub_rx.c +@@ -336,23 +336,34 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev, + return priv; + } + +-static int get_pipe(struct stub_device *sdev, int epnum, int dir) ++static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu) + { + struct usb_device *udev = sdev->udev; + struct usb_host_endpoint *ep; + struct usb_endpoint_descriptor *epd = NULL; ++ int epnum = pdu->base.ep; ++ int dir = pdu->base.direction; ++ ++ if (epnum < 0 || epnum > 15) ++ goto err_ret; + + if (dir == USBIP_DIR_IN) + ep = udev->ep_in[epnum & 0x7f]; + else + ep = udev->ep_out[epnum & 0x7f]; +- if (!ep) { +- dev_err(&sdev->udev->dev, "no such endpoint?, %d\n", +- epnum); +- BUG(); +- } ++ if (!ep) ++ goto err_ret; + + epd = &ep->desc; ++ ++ /* validate transfer_buffer_length */ ++ if (pdu->u.cmd_submit.transfer_buffer_length > INT_MAX) { ++ dev_err(&sdev->udev->dev, ++ "CMD_SUBMIT: -EMSGSIZE transfer_buffer_length %d\n", ++ pdu->u.cmd_submit.transfer_buffer_length); ++ return -1; ++ } ++ + if (usb_endpoint_xfer_control(epd)) { + if (dir == USBIP_DIR_OUT) + return usb_sndctrlpipe(udev, epnum); +@@ -375,15 +386,31 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir) + } + + if (usb_endpoint_xfer_isoc(epd)) { ++ /* validate packet size and number of packets */ ++ unsigned int maxp, packets, bytes; ++ ++ maxp = usb_endpoint_maxp(epd); ++ maxp *= usb_endpoint_maxp_mult(epd); ++ bytes = pdu->u.cmd_submit.transfer_buffer_length; ++ packets = DIV_ROUND_UP(bytes, maxp); ++ ++ if (pdu->u.cmd_submit.number_of_packets < 0 || ++ pdu->u.cmd_submit.number_of_packets > packets) { ++ dev_err(&sdev->udev->dev, ++ "CMD_SUBMIT: isoc invalid num packets %d\n", ++ pdu->u.cmd_submit.number_of_packets); ++ return -1; ++ } + if (dir == USBIP_DIR_OUT) + return usb_sndisocpipe(udev, epnum); + else + return usb_rcvisocpipe(udev, epnum); + } + ++err_ret: + /* NOT REACHED */ +- dev_err(&sdev->udev->dev, "get pipe, epnum %d\n", epnum); +- return 0; ++ dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum); ++ return -1; + } + + static void masking_bogus_flags(struct urb *urb) +@@ -447,7 +474,10 @@ static void stub_recv_cmd_submit(struct stub_device *sdev, + struct stub_priv *priv; + struct usbip_device *ud = &sdev->ud; + struct usb_device *udev = sdev->udev; +- int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction); ++ int pipe = get_pipe(sdev, pdu); ++ ++ if (pipe == -1) ++ return; + + priv = stub_priv_alloc(sdev, pdu); + if (!priv) +@@ -466,7 +496,8 @@ static void stub_recv_cmd_submit(struct stub_device *sdev, + } + + /* allocate urb transfer buffer, if needed */ +- if (pdu->u.cmd_submit.transfer_buffer_length > 0) { ++ if (pdu->u.cmd_submit.transfer_buffer_length > 0 && ++ pdu->u.cmd_submit.transfer_buffer_length <= INT_MAX) { + priv->urb->transfer_buffer = + kzalloc(pdu->u.cmd_submit.transfer_buffer_length, + GFP_KERNEL); +diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c +index be50cef645d8..87ff94be4235 100644 +--- a/drivers/usb/usbip/stub_tx.c ++++ b/drivers/usb/usbip/stub_tx.c +@@ -181,6 +181,13 @@ static int stub_send_ret_submit(struct stub_device *sdev) + memset(&pdu_header, 0, sizeof(pdu_header)); + memset(&msg, 0, sizeof(msg)); + ++ if (urb->actual_length > 0 && !urb->transfer_buffer) { ++ dev_err(&sdev->udev->dev, ++ "urb: actual_length %d transfer_buffer null\n", ++ urb->actual_length); ++ return -1; ++ } ++ + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) + iovnum = 2 + urb->number_of_packets; + else +diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c +index 6c2b2ca4a909..44c2be15a08b 100644 +--- a/drivers/video/fbdev/au1200fb.c ++++ b/drivers/video/fbdev/au1200fb.c +@@ -1681,8 +1681,10 @@ static int au1200fb_drv_probe(struct platform_device *dev) + + fbi = framebuffer_alloc(sizeof(struct au1200fb_device), + &dev->dev); +- if (!fbi) ++ if (!fbi) { ++ ret = -ENOMEM; + goto failed; ++ } + + _au1200fb_infos[plane] = fbi; + fbdev = fbi->par; +@@ -1700,7 +1702,8 @@ static int au1200fb_drv_probe(struct platform_device *dev) + if (!fbdev->fb_mem) { + print_err("fail to allocate frambuffer (size: %dK))", + fbdev->fb_len / 1024); +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto failed; + } + + /* +diff --git a/drivers/video/fbdev/controlfb.h b/drivers/video/fbdev/controlfb.h +index 6026c60fc100..261522fabdac 100644 +--- a/drivers/video/fbdev/controlfb.h ++++ b/drivers/video/fbdev/controlfb.h +@@ -141,5 +141,7 @@ static struct max_cmodes control_mac_modes[] = { + {{ 1, 2}}, /* 1152x870, 75Hz */ + {{ 0, 1}}, /* 1280x960, 75Hz */ + {{ 0, 1}}, /* 1280x1024, 75Hz */ ++ {{ 1, 2}}, /* 1152x768, 60Hz */ ++ {{ 0, 1}}, /* 1600x1024, 60Hz */ + }; + +diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c +index e9c2f7ba3c8e..53326badfb61 100644 +--- a/drivers/video/fbdev/udlfb.c ++++ b/drivers/video/fbdev/udlfb.c +@@ -769,11 +769,11 @@ static int dlfb_get_edid(struct dlfb_data *dev, char *edid, int len) + + for (i = 0; i < len; i++) { + ret = usb_control_msg(dev->udev, +- usb_rcvctrlpipe(dev->udev, 0), (0x02), +- (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2, +- HZ); +- if (ret < 1) { +- pr_err("Read EDID byte %d failed err %x\n", i, ret); ++ usb_rcvctrlpipe(dev->udev, 0), 0x02, ++ (0x80 | (0x02 << 5)), i << 8, 0xA1, ++ rbuf, 2, USB_CTRL_GET_TIMEOUT); ++ if (ret < 2) { ++ pr_err("Read EDID byte %d failed: %d\n", i, ret); + i--; + break; + } +diff --git a/fs/afs/callback.c b/fs/afs/callback.c +index 1e9d2f84e5b5..1592dc613200 100644 +--- a/fs/afs/callback.c ++++ b/fs/afs/callback.c +@@ -362,7 +362,7 @@ static void afs_callback_updater(struct work_struct *work) + { + struct afs_server *server; + struct afs_vnode *vnode, *xvnode; +- time_t now; ++ time64_t now; + long timeout; + int ret; + +@@ -370,7 +370,7 @@ static void afs_callback_updater(struct work_struct *work) + + _enter(""); + +- now = get_seconds(); ++ now = ktime_get_real_seconds(); + + /* find the first vnode to update */ + spin_lock(&server->cb_lock); +@@ -424,7 +424,8 @@ static void afs_callback_updater(struct work_struct *work) + + /* and then reschedule */ + _debug("reschedule"); +- vnode->update_at = get_seconds() + afs_vnode_update_timeout; ++ vnode->update_at = ktime_get_real_seconds() + ++ afs_vnode_update_timeout; + + spin_lock(&server->cb_lock); + +diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c +index 8d2c5180e015..168f2a4d1180 100644 +--- a/fs/afs/cmservice.c ++++ b/fs/afs/cmservice.c +@@ -168,7 +168,6 @@ static int afs_deliver_cb_callback(struct afs_call *call) + struct afs_callback *cb; + struct afs_server *server; + __be32 *bp; +- u32 tmp; + int ret, loop; + + _enter("{%u}", call->unmarshall); +@@ -230,9 +229,9 @@ static int afs_deliver_cb_callback(struct afs_call *call) + if (ret < 0) + return ret; + +- tmp = ntohl(call->tmp); +- _debug("CB count: %u", tmp); +- if (tmp != call->count && tmp != 0) ++ call->count2 = ntohl(call->tmp); ++ _debug("CB count: %u", call->count2); ++ if (call->count2 != call->count && call->count2 != 0) + return -EBADMSG; + call->offset = 0; + call->unmarshall++; +@@ -240,14 +239,14 @@ static int afs_deliver_cb_callback(struct afs_call *call) + case 4: + _debug("extract CB array"); + ret = afs_extract_data(call, call->buffer, +- call->count * 3 * 4, false); ++ call->count2 * 3 * 4, false); + if (ret < 0) + return ret; + + _debug("unmarshall CB array"); + cb = call->request; + bp = call->buffer; +- for (loop = call->count; loop > 0; loop--, cb++) { ++ for (loop = call->count2; loop > 0; loop--, cb++) { + cb->version = ntohl(*bp++); + cb->expiry = ntohl(*bp++); + cb->type = ntohl(*bp++); +diff --git a/fs/afs/file.c b/fs/afs/file.c +index 6344aee4ac4b..72372970725b 100644 +--- a/fs/afs/file.c ++++ b/fs/afs/file.c +@@ -29,6 +29,7 @@ static int afs_readpages(struct file *filp, struct address_space *mapping, + + const struct file_operations afs_file_operations = { + .open = afs_open, ++ .flush = afs_flush, + .release = afs_release, + .llseek = generic_file_llseek, + .read_iter = generic_file_read_iter, +diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c +index 31c616ab9b40..88e440607ed7 100644 +--- a/fs/afs/fsclient.c ++++ b/fs/afs/fsclient.c +@@ -105,7 +105,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp, + vnode->vfs_inode.i_mode = mode; + } + +- vnode->vfs_inode.i_ctime.tv_sec = status->mtime_server; ++ vnode->vfs_inode.i_ctime.tv_sec = status->mtime_client; + vnode->vfs_inode.i_mtime = vnode->vfs_inode.i_ctime; + vnode->vfs_inode.i_atime = vnode->vfs_inode.i_ctime; + vnode->vfs_inode.i_version = data_version; +@@ -139,7 +139,7 @@ static void xdr_decode_AFSCallBack(const __be32 **_bp, struct afs_vnode *vnode) + vnode->cb_version = ntohl(*bp++); + vnode->cb_expiry = ntohl(*bp++); + vnode->cb_type = ntohl(*bp++); +- vnode->cb_expires = vnode->cb_expiry + get_seconds(); ++ vnode->cb_expires = vnode->cb_expiry + ktime_get_real_seconds(); + *_bp = bp; + } + +@@ -676,8 +676,8 @@ int afs_fs_create(struct afs_server *server, + memset(bp, 0, padsz); + bp = (void *) bp + padsz; + } +- *bp++ = htonl(AFS_SET_MODE); +- *bp++ = 0; /* mtime */ ++ *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME); ++ *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */ + *bp++ = 0; /* owner */ + *bp++ = 0; /* group */ + *bp++ = htonl(mode & S_IALLUGO); /* unix mode */ +@@ -945,8 +945,8 @@ int afs_fs_symlink(struct afs_server *server, + memset(bp, 0, c_padsz); + bp = (void *) bp + c_padsz; + } +- *bp++ = htonl(AFS_SET_MODE); +- *bp++ = 0; /* mtime */ ++ *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME); ++ *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */ + *bp++ = 0; /* owner */ + *bp++ = 0; /* group */ + *bp++ = htonl(S_IRWXUGO); /* unix mode */ +@@ -1145,8 +1145,8 @@ static int afs_fs_store_data64(struct afs_server *server, + *bp++ = htonl(vnode->fid.vnode); + *bp++ = htonl(vnode->fid.unique); + +- *bp++ = 0; /* mask */ +- *bp++ = 0; /* mtime */ ++ *bp++ = htonl(AFS_SET_MTIME); /* mask */ ++ *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */ + *bp++ = 0; /* owner */ + *bp++ = 0; /* group */ + *bp++ = 0; /* unix mode */ +@@ -1178,7 +1178,7 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb, + _enter(",%x,{%x:%u},,", + key_serial(wb->key), vnode->fid.vid, vnode->fid.vnode); + +- size = to - offset; ++ size = (loff_t)to - (loff_t)offset; + if (first != last) + size += (loff_t)(last - first) << PAGE_SHIFT; + pos = (loff_t)first << PAGE_SHIFT; +@@ -1222,8 +1222,8 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb, + *bp++ = htonl(vnode->fid.vnode); + *bp++ = htonl(vnode->fid.unique); + +- *bp++ = 0; /* mask */ +- *bp++ = 0; /* mtime */ ++ *bp++ = htonl(AFS_SET_MTIME); /* mask */ ++ *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */ + *bp++ = 0; /* owner */ + *bp++ = 0; /* group */ + *bp++ = 0; /* unix mode */ +diff --git a/fs/afs/inode.c b/fs/afs/inode.c +index 86cc7264c21c..42582e41948f 100644 +--- a/fs/afs/inode.c ++++ b/fs/afs/inode.c +@@ -70,9 +70,9 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key) + + set_nlink(inode, vnode->status.nlink); + inode->i_uid = vnode->status.owner; +- inode->i_gid = GLOBAL_ROOT_GID; ++ inode->i_gid = vnode->status.group; + inode->i_size = vnode->status.size; +- inode->i_ctime.tv_sec = vnode->status.mtime_server; ++ inode->i_ctime.tv_sec = vnode->status.mtime_client; + inode->i_ctime.tv_nsec = 0; + inode->i_atime = inode->i_mtime = inode->i_ctime; + inode->i_blocks = 0; +@@ -245,12 +245,13 @@ struct inode *afs_iget(struct super_block *sb, struct key *key, + vnode->cb_version = 0; + vnode->cb_expiry = 0; + vnode->cb_type = 0; +- vnode->cb_expires = get_seconds(); ++ vnode->cb_expires = ktime_get_real_seconds(); + } else { + vnode->cb_version = cb->version; + vnode->cb_expiry = cb->expiry; + vnode->cb_type = cb->type; +- vnode->cb_expires = vnode->cb_expiry + get_seconds(); ++ vnode->cb_expires = vnode->cb_expiry + ++ ktime_get_real_seconds(); + } + } + +@@ -323,7 +324,7 @@ int afs_validate(struct afs_vnode *vnode, struct key *key) + !test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) && + !test_bit(AFS_VNODE_MODIFIED, &vnode->flags) && + !test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) { +- if (vnode->cb_expires < get_seconds() + 10) { ++ if (vnode->cb_expires < ktime_get_real_seconds() + 10) { + _debug("callback expired"); + set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags); + } else { +diff --git a/fs/afs/internal.h b/fs/afs/internal.h +index 535a38d2c1d0..dd98dcda6a3f 100644 +--- a/fs/afs/internal.h ++++ b/fs/afs/internal.h +@@ -11,6 +11,7 @@ + + #include <linux/compiler.h> + #include <linux/kernel.h> ++#include <linux/ktime.h> + #include <linux/fs.h> + #include <linux/pagemap.h> + #include <linux/rxrpc.h> +@@ -105,7 +106,10 @@ struct afs_call { + unsigned request_size; /* size of request data */ + unsigned reply_max; /* maximum size of reply */ + unsigned first_offset; /* offset into mapping[first] */ +- unsigned last_to; /* amount of mapping[last] */ ++ union { ++ unsigned last_to; /* amount of mapping[last] */ ++ unsigned count2; /* count used in unmarshalling */ ++ }; + unsigned char unmarshall; /* unmarshalling phase */ + bool incoming; /* T if incoming call */ + bool send_pages; /* T if data from mapping should be sent */ +@@ -242,7 +246,7 @@ struct afs_cache_vhash { + */ + struct afs_vlocation { + atomic_t usage; +- time_t time_of_death; /* time at which put reduced usage to 0 */ ++ time64_t time_of_death; /* time at which put reduced usage to 0 */ + struct list_head link; /* link in cell volume location list */ + struct list_head grave; /* link in master graveyard list */ + struct list_head update; /* link in master update list */ +@@ -253,7 +257,7 @@ struct afs_vlocation { + struct afs_cache_vlocation vldb; /* volume information DB record */ + struct afs_volume *vols[3]; /* volume access record pointer (index by type) */ + wait_queue_head_t waitq; /* status change waitqueue */ +- time_t update_at; /* time at which record should be updated */ ++ time64_t update_at; /* time at which record should be updated */ + spinlock_t lock; /* access lock */ + afs_vlocation_state_t state; /* volume location state */ + unsigned short upd_rej_cnt; /* ENOMEDIUM count during update */ +@@ -266,7 +270,7 @@ struct afs_vlocation { + */ + struct afs_server { + atomic_t usage; +- time_t time_of_death; /* time at which put reduced usage to 0 */ ++ time64_t time_of_death; /* time at which put reduced usage to 0 */ + struct in_addr addr; /* server address */ + struct afs_cell *cell; /* cell in which server resides */ + struct list_head link; /* link in cell's server list */ +@@ -369,8 +373,8 @@ struct afs_vnode { + struct rb_node server_rb; /* link in server->fs_vnodes */ + struct rb_node cb_promise; /* link in server->cb_promises */ + struct work_struct cb_broken_work; /* work to be done on callback break */ +- time_t cb_expires; /* time at which callback expires */ +- time_t cb_expires_at; /* time used to order cb_promise */ ++ time64_t cb_expires; /* time at which callback expires */ ++ time64_t cb_expires_at; /* time used to order cb_promise */ + unsigned cb_version; /* callback version */ + unsigned cb_expiry; /* callback expiry time */ + afs_callback_type_t cb_type; /* type of callback */ +@@ -749,6 +753,7 @@ extern int afs_writepages(struct address_space *, struct writeback_control *); + extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *); + extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *); + extern int afs_writeback_all(struct afs_vnode *); ++extern int afs_flush(struct file *, fl_owner_t); + extern int afs_fsync(struct file *, loff_t, loff_t, int); + + +diff --git a/fs/afs/misc.c b/fs/afs/misc.c +index 91ea1aa0d8b3..100b207efc9e 100644 +--- a/fs/afs/misc.c ++++ b/fs/afs/misc.c +@@ -84,6 +84,8 @@ int afs_abort_to_error(u32 abort_code) + case RXKADDATALEN: return -EKEYREJECTED; + case RXKADILLEGALLEVEL: return -EKEYREJECTED; + ++ case RXGEN_OPCODE: return -ENOTSUPP; ++ + default: return -EREMOTEIO; + } + } +diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c +index 25f05a8d21b1..523b1d3ca2c6 100644 +--- a/fs/afs/rxrpc.c ++++ b/fs/afs/rxrpc.c +@@ -321,6 +321,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, + struct rxrpc_call *rxcall; + struct msghdr msg; + struct kvec iov[1]; ++ size_t offset; ++ u32 abort_code; + int ret; + + _enter("%x,{%d},", addr->s_addr, ntohs(call->port)); +@@ -368,9 +370,11 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, + msg.msg_controllen = 0; + msg.msg_flags = (call->send_pages ? MSG_MORE : 0); + +- /* have to change the state *before* sending the last packet as RxRPC +- * might give us the reply before it returns from sending the +- * request */ ++ /* We have to change the state *before* sending the last packet as ++ * rxrpc might give us the reply before it returns from sending the ++ * request. Further, if the send fails, we may already have been given ++ * a notification and may have collected it. ++ */ + if (!call->send_pages) + call->state = AFS_CALL_AWAIT_REPLY; + ret = rxrpc_kernel_send_data(afs_socket, rxcall, +@@ -389,7 +393,17 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, + return wait_mode->wait(call); + + error_do_abort: +- rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, -ret, "KSD"); ++ call->state = AFS_CALL_COMPLETE; ++ if (ret != -ECONNABORTED) { ++ rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, ++ -ret, "KSD"); ++ } else { ++ abort_code = 0; ++ offset = 0; ++ rxrpc_kernel_recv_data(afs_socket, rxcall, NULL, 0, &offset, ++ false, &abort_code); ++ ret = call->type->abort_to_error(abort_code); ++ } + error_kill_call: + afs_end_call(call); + _leave(" = %d", ret); +@@ -434,16 +448,18 @@ static void afs_deliver_to_call(struct afs_call *call) + case -EINPROGRESS: + case -EAGAIN: + goto out; ++ case -ECONNABORTED: ++ goto call_complete; + case -ENOTCONN: + abort_code = RX_CALL_DEAD; + rxrpc_kernel_abort_call(afs_socket, call->rxcall, + abort_code, -ret, "KNC"); +- goto do_abort; ++ goto save_error; + case -ENOTSUPP: +- abort_code = RX_INVALID_OPERATION; ++ abort_code = RXGEN_OPCODE; + rxrpc_kernel_abort_call(afs_socket, call->rxcall, + abort_code, -ret, "KIV"); +- goto do_abort; ++ goto save_error; + case -ENODATA: + case -EBADMSG: + case -EMSGSIZE: +@@ -453,7 +469,7 @@ static void afs_deliver_to_call(struct afs_call *call) + abort_code = RXGEN_SS_UNMARSHAL; + rxrpc_kernel_abort_call(afs_socket, call->rxcall, + abort_code, EBADMSG, "KUM"); +- goto do_abort; ++ goto save_error; + } + } + +@@ -464,8 +480,9 @@ static void afs_deliver_to_call(struct afs_call *call) + _leave(""); + return; + +-do_abort: ++save_error: + call->error = ret; ++call_complete: + call->state = AFS_CALL_COMPLETE; + goto done; + } +@@ -475,7 +492,6 @@ static void afs_deliver_to_call(struct afs_call *call) + */ + static int afs_wait_for_call_to_complete(struct afs_call *call) + { +- const char *abort_why; + int ret; + + DECLARE_WAITQUEUE(myself, current); +@@ -494,13 +510,8 @@ static int afs_wait_for_call_to_complete(struct afs_call *call) + continue; + } + +- abort_why = "KWC"; +- ret = call->error; +- if (call->state == AFS_CALL_COMPLETE) +- break; +- abort_why = "KWI"; +- ret = -EINTR; +- if (signal_pending(current)) ++ if (call->state == AFS_CALL_COMPLETE || ++ signal_pending(current)) + break; + schedule(); + } +@@ -508,13 +519,14 @@ static int afs_wait_for_call_to_complete(struct afs_call *call) + remove_wait_queue(&call->waitq, &myself); + __set_current_state(TASK_RUNNING); + +- /* kill the call */ ++ /* Kill off the call if it's still live. */ + if (call->state < AFS_CALL_COMPLETE) { +- _debug("call incomplete"); ++ _debug("call interrupted"); + rxrpc_kernel_abort_call(afs_socket, call->rxcall, +- RX_CALL_DEAD, -ret, abort_why); ++ RX_USER_ABORT, -EINTR, "KWI"); + } + ++ ret = call->error; + _debug("call complete"); + afs_end_call(call); + _leave(" = %d", ret); +diff --git a/fs/afs/security.c b/fs/afs/security.c +index 8d010422dc89..bfa9d3428383 100644 +--- a/fs/afs/security.c ++++ b/fs/afs/security.c +@@ -340,17 +340,22 @@ int afs_permission(struct inode *inode, int mask) + } else { + if (!(access & AFS_ACE_LOOKUP)) + goto permission_denied; ++ if ((mask & MAY_EXEC) && !(inode->i_mode & S_IXUSR)) ++ goto permission_denied; + if (mask & (MAY_EXEC | MAY_READ)) { + if (!(access & AFS_ACE_READ)) + goto permission_denied; ++ if (!(inode->i_mode & S_IRUSR)) ++ goto permission_denied; + } else if (mask & MAY_WRITE) { + if (!(access & AFS_ACE_WRITE)) + goto permission_denied; ++ if (!(inode->i_mode & S_IWUSR)) ++ goto permission_denied; + } + } + + key_put(key); +- ret = generic_permission(inode, mask); + _leave(" = %d", ret); + return ret; + +diff --git a/fs/afs/server.c b/fs/afs/server.c +index d4066ab7dd55..c001b1f2455f 100644 +--- a/fs/afs/server.c ++++ b/fs/afs/server.c +@@ -242,7 +242,7 @@ void afs_put_server(struct afs_server *server) + spin_lock(&afs_server_graveyard_lock); + if (atomic_read(&server->usage) == 0) { + list_move_tail(&server->grave, &afs_server_graveyard); +- server->time_of_death = get_seconds(); ++ server->time_of_death = ktime_get_real_seconds(); + queue_delayed_work(afs_wq, &afs_server_reaper, + afs_server_timeout * HZ); + } +@@ -277,9 +277,9 @@ static void afs_reap_server(struct work_struct *work) + LIST_HEAD(corpses); + struct afs_server *server; + unsigned long delay, expiry; +- time_t now; ++ time64_t now; + +- now = get_seconds(); ++ now = ktime_get_real_seconds(); + spin_lock(&afs_server_graveyard_lock); + + while (!list_empty(&afs_server_graveyard)) { +diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c +index 45a86396fd2d..92bd5553b8c9 100644 +--- a/fs/afs/vlocation.c ++++ b/fs/afs/vlocation.c +@@ -340,7 +340,8 @@ static void afs_vlocation_queue_for_updates(struct afs_vlocation *vl) + struct afs_vlocation *xvl; + + /* wait at least 10 minutes before updating... */ +- vl->update_at = get_seconds() + afs_vlocation_update_timeout; ++ vl->update_at = ktime_get_real_seconds() + ++ afs_vlocation_update_timeout; + + spin_lock(&afs_vlocation_updates_lock); + +@@ -506,7 +507,7 @@ void afs_put_vlocation(struct afs_vlocation *vl) + if (atomic_read(&vl->usage) == 0) { + _debug("buried"); + list_move_tail(&vl->grave, &afs_vlocation_graveyard); +- vl->time_of_death = get_seconds(); ++ vl->time_of_death = ktime_get_real_seconds(); + queue_delayed_work(afs_wq, &afs_vlocation_reap, + afs_vlocation_timeout * HZ); + +@@ -543,11 +544,11 @@ static void afs_vlocation_reaper(struct work_struct *work) + LIST_HEAD(corpses); + struct afs_vlocation *vl; + unsigned long delay, expiry; +- time_t now; ++ time64_t now; + + _enter(""); + +- now = get_seconds(); ++ now = ktime_get_real_seconds(); + spin_lock(&afs_vlocation_graveyard_lock); + + while (!list_empty(&afs_vlocation_graveyard)) { +@@ -622,13 +623,13 @@ static void afs_vlocation_updater(struct work_struct *work) + { + struct afs_cache_vlocation vldb; + struct afs_vlocation *vl, *xvl; +- time_t now; ++ time64_t now; + long timeout; + int ret; + + _enter(""); + +- now = get_seconds(); ++ now = ktime_get_real_seconds(); + + /* find a record to update */ + spin_lock(&afs_vlocation_updates_lock); +@@ -684,7 +685,8 @@ static void afs_vlocation_updater(struct work_struct *work) + + /* and then reschedule */ + _debug("reschedule"); +- vl->update_at = get_seconds() + afs_vlocation_update_timeout; ++ vl->update_at = ktime_get_real_seconds() + ++ afs_vlocation_update_timeout; + + spin_lock(&afs_vlocation_updates_lock); + +diff --git a/fs/afs/write.c b/fs/afs/write.c +index f865c3f05bea..3fba2b573c86 100644 +--- a/fs/afs/write.c ++++ b/fs/afs/write.c +@@ -148,12 +148,12 @@ int afs_write_begin(struct file *file, struct address_space *mapping, + kfree(candidate); + return -ENOMEM; + } +- *pagep = page; +- /* page won't leak in error case: it eventually gets cleaned off LRU */ + + if (!PageUptodate(page) && len != PAGE_SIZE) { + ret = afs_fill_page(vnode, key, index << PAGE_SHIFT, page); + if (ret < 0) { ++ unlock_page(page); ++ put_page(page); + kfree(candidate); + _leave(" = %d [prep]", ret); + return ret; +@@ -161,6 +161,9 @@ int afs_write_begin(struct file *file, struct address_space *mapping, + SetPageUptodate(page); + } + ++ /* page won't leak in error case: it eventually gets cleaned off LRU */ ++ *pagep = page; ++ + try_again: + spin_lock(&vnode->writeback_lock); + +@@ -296,10 +299,14 @@ static void afs_kill_pages(struct afs_vnode *vnode, bool error, + ASSERTCMP(pv.nr, ==, count); + + for (loop = 0; loop < count; loop++) { +- ClearPageUptodate(pv.pages[loop]); ++ struct page *page = pv.pages[loop]; ++ ClearPageUptodate(page); + if (error) +- SetPageError(pv.pages[loop]); +- end_page_writeback(pv.pages[loop]); ++ SetPageError(page); ++ if (PageWriteback(page)) ++ end_page_writeback(page); ++ if (page->index >= first) ++ first = page->index + 1; + } + + __pagevec_release(&pv); +@@ -502,6 +509,7 @@ static int afs_writepages_region(struct address_space *mapping, + + if (PageWriteback(page) || !PageDirty(page)) { + unlock_page(page); ++ put_page(page); + continue; + } + +@@ -734,6 +742,20 @@ int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) + return ret; + } + ++/* ++ * Flush out all outstanding writes on a file opened for writing when it is ++ * closed. ++ */ ++int afs_flush(struct file *file, fl_owner_t id) ++{ ++ _enter(""); ++ ++ if ((file->f_mode & FMODE_WRITE) == 0) ++ return 0; ++ ++ return vfs_fsync(file, 0); ++} ++ + /* + * notification that a previously read-only page is about to become writable + * - if it returns an error, the caller will deliver a bus error signal +diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c +index 4c71dba90120..0ea31a53fd5b 100644 +--- a/fs/autofs4/waitq.c ++++ b/fs/autofs4/waitq.c +@@ -176,7 +176,6 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi, + + mutex_unlock(&sbi->wq_mutex); + +- if (autofs4_write(sbi, pipe, &pkt, pktsz)) + switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) { + case 0: + break; +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index f089d7d8afe7..894d56361ea9 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -6812,6 +6812,20 @@ static noinline int uncompress_inline(struct btrfs_path *path, + max_size = min_t(unsigned long, PAGE_SIZE, max_size); + ret = btrfs_decompress(compress_type, tmp, page, + extent_offset, inline_size, max_size); ++ ++ /* ++ * decompression code contains a memset to fill in any space between the end ++ * of the uncompressed data and the end of max_size in case the decompressed ++ * data ends up shorter than ram_bytes. That doesn't cover the hole between ++ * the end of an inline extent and the beginning of the next block, so we ++ * cover that region here. ++ */ ++ ++ if (max_size + pg_offset < PAGE_SIZE) { ++ char *map = kmap(page); ++ memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset); ++ kunmap(page); ++ } + kfree(tmp); + return ret; + } +diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c +index 6e144048a72e..a724d9a79bd2 100644 +--- a/fs/btrfs/tests/free-space-tree-tests.c ++++ b/fs/btrfs/tests/free-space-tree-tests.c +@@ -501,7 +501,8 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize, + path = btrfs_alloc_path(); + if (!path) { + test_msg("Couldn't allocate path\n"); +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto out; + } + + ret = add_block_group_free_space(&trans, root->fs_info, cache); +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c +index c0f52c443c34..3d2639c30018 100644 +--- a/fs/ceph/mds_client.c ++++ b/fs/ceph/mds_client.c +@@ -1396,6 +1396,29 @@ static int __close_session(struct ceph_mds_client *mdsc, + return request_close_session(mdsc, session); + } + ++static bool drop_negative_children(struct dentry *dentry) ++{ ++ struct dentry *child; ++ bool all_negative = true; ++ ++ if (!d_is_dir(dentry)) ++ goto out; ++ ++ spin_lock(&dentry->d_lock); ++ list_for_each_entry(child, &dentry->d_subdirs, d_child) { ++ if (d_really_is_positive(child)) { ++ all_negative = false; ++ break; ++ } ++ } ++ spin_unlock(&dentry->d_lock); ++ ++ if (all_negative) ++ shrink_dcache_parent(dentry); ++out: ++ return all_negative; ++} ++ + /* + * Trim old(er) caps. + * +@@ -1441,16 +1464,27 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg) + if ((used | wanted) & ~oissued & mine) + goto out; /* we need these caps */ + +- session->s_trim_caps--; + if (oissued) { + /* we aren't the only cap.. just remove us */ + __ceph_remove_cap(cap, true); ++ session->s_trim_caps--; + } else { ++ struct dentry *dentry; + /* try dropping referring dentries */ + spin_unlock(&ci->i_ceph_lock); +- d_prune_aliases(inode); +- dout("trim_caps_cb %p cap %p pruned, count now %d\n", +- inode, cap, atomic_read(&inode->i_count)); ++ dentry = d_find_any_alias(inode); ++ if (dentry && drop_negative_children(dentry)) { ++ int count; ++ dput(dentry); ++ d_prune_aliases(inode); ++ count = atomic_read(&inode->i_count); ++ if (count == 1) ++ session->s_trim_caps--; ++ dout("trim_caps_cb %p cap %p pruned, count now %d\n", ++ inode, cap, count); ++ } else { ++ dput(dentry); ++ } + return 0; + } + +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c +index a77cbc5b657b..1a0c57100f28 100644 +--- a/fs/ext4/extents.c ++++ b/fs/ext4/extents.c +@@ -4731,6 +4731,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset, + EXT4_INODE_EOFBLOCKS); + } + ext4_mark_inode_dirty(handle, inode); ++ ext4_update_inode_fsync_trans(handle, inode, 1); + ret2 = ext4_journal_stop(handle); + if (ret2) + break; +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c +index 4438b93f6fd6..b1766a67d2eb 100644 +--- a/fs/ext4/namei.c ++++ b/fs/ext4/namei.c +@@ -1417,6 +1417,10 @@ static struct buffer_head * ext4_find_entry (struct inode *dir, + "falling back\n")); + } + nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); ++ if (!nblocks) { ++ ret = NULL; ++ goto cleanup_and_exit; ++ } + start = EXT4_I(dir)->i_dir_start_lookup; + if (start >= nblocks) + start = 0; +diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c +index 05713a5da083..0703a1179847 100644 +--- a/fs/fs-writeback.c ++++ b/fs/fs-writeback.c +@@ -173,19 +173,33 @@ static void wb_wakeup(struct bdi_writeback *wb) + spin_unlock_bh(&wb->work_lock); + } + ++static void finish_writeback_work(struct bdi_writeback *wb, ++ struct wb_writeback_work *work) ++{ ++ struct wb_completion *done = work->done; ++ ++ if (work->auto_free) ++ kfree(work); ++ if (done && atomic_dec_and_test(&done->cnt)) ++ wake_up_all(&wb->bdi->wb_waitq); ++} ++ + static void wb_queue_work(struct bdi_writeback *wb, + struct wb_writeback_work *work) + { + trace_writeback_queue(wb, work); + +- spin_lock_bh(&wb->work_lock); +- if (!test_bit(WB_registered, &wb->state)) +- goto out_unlock; + if (work->done) + atomic_inc(&work->done->cnt); +- list_add_tail(&work->list, &wb->work_list); +- mod_delayed_work(bdi_wq, &wb->dwork, 0); +-out_unlock: ++ ++ spin_lock_bh(&wb->work_lock); ++ ++ if (test_bit(WB_registered, &wb->state)) { ++ list_add_tail(&work->list, &wb->work_list); ++ mod_delayed_work(bdi_wq, &wb->dwork, 0); ++ } else ++ finish_writeback_work(wb, work); ++ + spin_unlock_bh(&wb->work_lock); + } + +@@ -1875,16 +1889,9 @@ static long wb_do_writeback(struct bdi_writeback *wb) + + set_bit(WB_writeback_running, &wb->state); + while ((work = get_next_work_item(wb)) != NULL) { +- struct wb_completion *done = work->done; +- + trace_writeback_exec(wb, work); +- + wrote += wb_writeback(wb, work); +- +- if (work->auto_free) +- kfree(work); +- if (done && atomic_dec_and_test(&done->cnt)) +- wake_up_all(&wb->bdi->wb_waitq); ++ finish_writeback_work(wb, work); + } + + /* +diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c +index e23ff70b3435..39c382f16272 100644 +--- a/fs/gfs2/file.c ++++ b/fs/gfs2/file.c +@@ -256,7 +256,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask) + goto out; + } + if ((flags ^ new_flags) & GFS2_DIF_JDATA) { +- if (flags & GFS2_DIF_JDATA) ++ if (new_flags & GFS2_DIF_JDATA) + gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH); + error = filemap_fdatawrite(inode->i_mapping); + if (error) +@@ -264,6 +264,8 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask) + error = filemap_fdatawait(inode->i_mapping); + if (error) + goto out; ++ if (new_flags & GFS2_DIF_JDATA) ++ gfs2_ordered_del_inode(ip); + } + error = gfs2_trans_begin(sdp, RES_DINODE, 0); + if (error) +diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c +index 074ac7131459..f6b0848cc831 100644 +--- a/fs/nfs/nfs4client.c ++++ b/fs/nfs/nfs4client.c +@@ -1004,9 +1004,9 @@ static void nfs4_session_set_rwsize(struct nfs_server *server) + server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead; + server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead; + +- if (server->rsize > server_resp_sz) ++ if (!server->rsize || server->rsize > server_resp_sz) + server->rsize = server_resp_sz; +- if (server->wsize > server_rqst_sz) ++ if (!server->wsize || server->wsize > server_rqst_sz) + server->wsize = server_rqst_sz; + #endif /* CONFIG_NFS_V4_1 */ + } +diff --git a/fs/nfs/write.c b/fs/nfs/write.c +index e4772a8340f8..9905735463a4 100644 +--- a/fs/nfs/write.c ++++ b/fs/nfs/write.c +@@ -1859,6 +1859,8 @@ int nfs_commit_inode(struct inode *inode, int how) + if (res) + error = nfs_generic_commit_list(inode, &head, how, &cinfo); + nfs_commit_end(cinfo.mds); ++ if (res == 0) ++ return res; + if (error < 0) + goto out_error; + if (!may_wait) +diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c +index 1645b977c9c6..5c4800626f13 100644 +--- a/fs/nfsd/nfssvc.c ++++ b/fs/nfsd/nfssvc.c +@@ -155,7 +155,8 @@ int nfsd_vers(int vers, enum vers_op change) + + int nfsd_minorversion(u32 minorversion, enum vers_op change) + { +- if (minorversion > NFSD_SUPPORTED_MINOR_VERSION) ++ if (minorversion > NFSD_SUPPORTED_MINOR_VERSION && ++ change != NFSD_AVAIL) + return -1; + switch(change) { + case NFSD_SET: +@@ -399,23 +400,20 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net) + + void nfsd_reset_versions(void) + { +- int found_one = 0; + int i; + +- for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) { +- if (nfsd_program.pg_vers[i]) +- found_one = 1; +- } +- +- if (!found_one) { +- for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) +- nfsd_program.pg_vers[i] = nfsd_version[i]; +-#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) +- for (i = NFSD_ACL_MINVERS; i < NFSD_ACL_NRVERS; i++) +- nfsd_acl_program.pg_vers[i] = +- nfsd_acl_version[i]; +-#endif +- } ++ for (i = 0; i < NFSD_NRVERS; i++) ++ if (nfsd_vers(i, NFSD_TEST)) ++ return; ++ ++ for (i = 0; i < NFSD_NRVERS; i++) ++ if (i != 4) ++ nfsd_vers(i, NFSD_SET); ++ else { ++ int minor = 0; ++ while (nfsd_minorversion(minor, NFSD_SET) >= 0) ++ minor++; ++ } + } + + /* +diff --git a/fs/proc/proc_tty.c b/fs/proc/proc_tty.c +index 15f327bed8c6..7340c36978a3 100644 +--- a/fs/proc/proc_tty.c ++++ b/fs/proc/proc_tty.c +@@ -14,6 +14,7 @@ + #include <linux/tty.h> + #include <linux/seq_file.h> + #include <linux/bitops.h> ++#include "internal.h" + + /* + * The /proc/tty directory inodes... +@@ -164,7 +165,7 @@ void proc_tty_unregister_driver(struct tty_driver *driver) + if (!ent) + return; + +- remove_proc_entry(driver->driver_name, proc_tty_driver); ++ remove_proc_entry(ent->name, proc_tty_driver); + + driver->proc_entry = NULL; + } +diff --git a/fs/udf/super.c b/fs/udf/super.c +index 4942549e7dc8..4b1f6d5372c3 100644 +--- a/fs/udf/super.c ++++ b/fs/udf/super.c +@@ -710,7 +710,7 @@ static loff_t udf_check_vsd(struct super_block *sb) + else + sectorsize = sb->s_blocksize; + +- sector += (sbi->s_session << sb->s_blocksize_bits); ++ sector += (((loff_t)sbi->s_session) << sb->s_blocksize_bits); + + udf_debug("Starting at sector %u (%ld byte sectors)\n", + (unsigned int)(sector >> sb->s_blocksize_bits), +diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c +index b86054cc41db..784d667475ae 100644 +--- a/fs/userfaultfd.c ++++ b/fs/userfaultfd.c +@@ -419,7 +419,7 @@ int handle_userfault(struct fault_env *fe, unsigned long reason) + * in such case. + */ + down_read(&mm->mmap_sem); +- ret = 0; ++ ret = VM_FAULT_NOPAGE; + } + } + +diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c +index 7eb99701054f..8ad65d43b65d 100644 +--- a/fs/xfs/libxfs/xfs_bmap.c ++++ b/fs/xfs/libxfs/xfs_bmap.c +@@ -2713,7 +2713,7 @@ xfs_bmap_add_extent_unwritten_real( + &i))) + goto done; + XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done); +- cur->bc_rec.b.br_state = XFS_EXT_NORM; ++ cur->bc_rec.b.br_state = new->br_state; + if ((error = xfs_btree_insert(cur, &i))) + goto done; + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); +diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c +index 5b81f7f41b80..33c389934238 100644 +--- a/fs/xfs/xfs_iops.c ++++ b/fs/xfs/xfs_iops.c +@@ -870,22 +870,6 @@ xfs_setattr_size( + if (error) + return error; + +- /* +- * We are going to log the inode size change in this transaction so +- * any previous writes that are beyond the on disk EOF and the new +- * EOF that have not been written out need to be written here. If we +- * do not write the data out, we expose ourselves to the null files +- * problem. Note that this includes any block zeroing we did above; +- * otherwise those blocks may not be zeroed after a crash. +- */ +- if (did_zeroing || +- (newsize > ip->i_d.di_size && oldsize != ip->i_d.di_size)) { +- error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, +- ip->i_d.di_size, newsize); +- if (error) +- return error; +- } +- + /* + * We've already locked out new page faults, so now we can safely remove + * pages from the page cache knowing they won't get refaulted until we +@@ -902,9 +886,29 @@ xfs_setattr_size( + * user visible changes). There's not much we can do about this, except + * to hope that the caller sees ENOMEM and retries the truncate + * operation. ++ * ++ * And we update in-core i_size and truncate page cache beyond newsize ++ * before writeback the [di_size, newsize] range, so we're guaranteed ++ * not to write stale data past the new EOF on truncate down. + */ + truncate_setsize(inode, newsize); + ++ /* ++ * We are going to log the inode size change in this transaction so ++ * any previous writes that are beyond the on disk EOF and the new ++ * EOF that have not been written out need to be written here. If we ++ * do not write the data out, we expose ourselves to the null files ++ * problem. Note that this includes any block zeroing we did above; ++ * otherwise those blocks may not be zeroed after a crash. ++ */ ++ if (did_zeroing || ++ (newsize > ip->i_d.di_size && oldsize != ip->i_d.di_size)) { ++ error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ++ ip->i_d.di_size, newsize - 1); ++ if (error) ++ return error; ++ } ++ + error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp); + if (error) + return error; +diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c +index 05909269f973..1e26f4504eed 100644 +--- a/fs/xfs/xfs_log_recover.c ++++ b/fs/xfs/xfs_log_recover.c +@@ -753,7 +753,7 @@ xlog_find_head( + * in the in-core log. The following number can be made tighter if + * we actually look at the block size of the filesystem. + */ +- num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log); ++ num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log)); + if (head_blk >= num_scan_bblks) { + /* + * We are guaranteed that the entire check can be performed +diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h +index f6d9af3efa45..cac57358f7af 100644 +--- a/include/crypto/internal/hash.h ++++ b/include/crypto/internal/hash.h +@@ -80,6 +80,14 @@ int ahash_register_instance(struct crypto_template *tmpl, + struct ahash_instance *inst); + void ahash_free_instance(struct crypto_instance *inst); + ++int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, ++ unsigned int keylen); ++ ++static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg) ++{ ++ return alg->setkey != shash_no_setkey; ++} ++ + int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, + struct hash_alg_common *alg, + struct crypto_instance *inst); +diff --git a/include/linux/acpi.h b/include/linux/acpi.h +index 61a3d90f32b3..ca2b4c4aec42 100644 +--- a/include/linux/acpi.h ++++ b/include/linux/acpi.h +@@ -276,11 +276,8 @@ bool acpi_processor_validate_proc_id(int proc_id); + /* Arch dependent functions for cpu hotplug support */ + int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu); + int acpi_unmap_cpu(int cpu); +-int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid); + #endif /* CONFIG_ACPI_HOTPLUG_CPU */ + +-void acpi_set_processor_mapping(void); +- + #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC + int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr); + #endif +diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h +index 80faf44b8887..dd1b009106a5 100644 +--- a/include/linux/mlx4/device.h ++++ b/include/linux/mlx4/device.h +@@ -476,6 +476,7 @@ enum { + enum { + MLX4_INTERFACE_STATE_UP = 1 << 0, + MLX4_INTERFACE_STATE_DELETION = 1 << 1, ++ MLX4_INTERFACE_STATE_NOWAIT = 1 << 2, + }; + + #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ +diff --git a/include/linux/mman.h b/include/linux/mman.h +index 634c4c51fe3a..c540001ca861 100644 +--- a/include/linux/mman.h ++++ b/include/linux/mman.h +@@ -63,8 +63,9 @@ static inline bool arch_validate_prot(unsigned long prot) + * ("bit1" and "bit2" must be single bits) + */ + #define _calc_vm_trans(x, bit1, bit2) \ ++ ((!(bit1) || !(bit2)) ? 0 : \ + ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \ +- : ((x) & (bit1)) / ((bit1) / (bit2))) ++ : ((x) & (bit1)) / ((bit1) / (bit2)))) + + /* + * Combine the mmap "prot" argument into "vm_flags" used internally. +diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h +index 1beab5532035..818a38f99221 100644 +--- a/include/rdma/ib_addr.h ++++ b/include/rdma/ib_addr.h +@@ -243,10 +243,11 @@ static inline void rdma_addr_set_dgid(struct rdma_dev_addr *dev_addr, union ib_g + static inline enum ib_mtu iboe_get_mtu(int mtu) + { + /* +- * reduce IB headers from effective IBoE MTU. 28 stands for +- * atomic header which is the biggest possible header after BTH ++ * Reduce IB headers from effective IBoE MTU. + */ +- mtu = mtu - IB_GRH_BYTES - IB_BTH_BYTES - 28; ++ mtu = mtu - (IB_GRH_BYTES + IB_UDP_BYTES + IB_BTH_BYTES + ++ IB_EXT_XRC_BYTES + IB_EXT_ATOMICETH_BYTES + ++ IB_ICRC_BYTES); + + if (mtu >= ib_mtu_enum_to_int(IB_MTU_4096)) + return IB_MTU_4096; +diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h +index b13419ce99ff..e02b78a38eba 100644 +--- a/include/rdma/ib_pack.h ++++ b/include/rdma/ib_pack.h +@@ -37,14 +37,17 @@ + #include <uapi/linux/if_ether.h> + + enum { +- IB_LRH_BYTES = 8, +- IB_ETH_BYTES = 14, +- IB_VLAN_BYTES = 4, +- IB_GRH_BYTES = 40, +- IB_IP4_BYTES = 20, +- IB_UDP_BYTES = 8, +- IB_BTH_BYTES = 12, +- IB_DETH_BYTES = 8 ++ IB_LRH_BYTES = 8, ++ IB_ETH_BYTES = 14, ++ IB_VLAN_BYTES = 4, ++ IB_GRH_BYTES = 40, ++ IB_IP4_BYTES = 20, ++ IB_UDP_BYTES = 8, ++ IB_BTH_BYTES = 12, ++ IB_DETH_BYTES = 8, ++ IB_EXT_ATOMICETH_BYTES = 28, ++ IB_EXT_XRC_BYTES = 4, ++ IB_ICRC_BYTES = 4 + }; + + struct ib_field { +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h +index a87e8940fe57..eb3b23b6ec54 100644 +--- a/include/target/target_core_base.h ++++ b/include/target/target_core_base.h +@@ -297,7 +297,7 @@ struct t10_alua_tg_pt_gp { + struct list_head tg_pt_gp_lun_list; + struct se_lun *tg_pt_gp_alua_lun; + struct se_node_acl *tg_pt_gp_alua_nacl; +- struct delayed_work tg_pt_gp_transition_work; ++ struct work_struct tg_pt_gp_transition_work; + struct completion *tg_pt_gp_transition_complete; + }; + +diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h +index ab1dadba9923..33c603dd7cd3 100644 +--- a/include/uapi/linux/usb/ch9.h ++++ b/include/uapi/linux/usb/ch9.h +@@ -423,6 +423,11 @@ struct usb_endpoint_descriptor { + #define USB_ENDPOINT_XFER_INT 3 + #define USB_ENDPOINT_MAX_ADJUSTABLE 0x80 + ++#define USB_EP_MAXP_MULT_SHIFT 11 ++#define USB_EP_MAXP_MULT_MASK (3 << USB_EP_MAXP_MULT_SHIFT) ++#define USB_EP_MAXP_MULT(m) \ ++ (((m) & USB_EP_MAXP_MULT_MASK) >> USB_EP_MAXP_MULT_SHIFT) ++ + /* The USB 3.0 spec redefines bits 5:4 of bmAttributes as interrupt ep type. */ + #define USB_ENDPOINT_INTRTYPE 0x30 + #define USB_ENDPOINT_INTR_PERIODIC (0 << 4) +@@ -630,6 +635,20 @@ static inline int usb_endpoint_maxp(const struct usb_endpoint_descriptor *epd) + return __le16_to_cpu(epd->wMaxPacketSize); + } + ++/** ++ * usb_endpoint_maxp_mult - get endpoint's transactional opportunities ++ * @epd: endpoint to be checked ++ * ++ * Return @epd's wMaxPacketSize[12:11] + 1 ++ */ ++static inline int ++usb_endpoint_maxp_mult(const struct usb_endpoint_descriptor *epd) ++{ ++ int maxp = __le16_to_cpu(epd->wMaxPacketSize); ++ ++ return USB_EP_MAXP_MULT(maxp) + 1; ++} ++ + static inline int usb_endpoint_interrupt_type( + const struct usb_endpoint_descriptor *epd) + { +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c +index c95c5122b105..df5c32a0c6ed 100644 +--- a/kernel/sched/deadline.c ++++ b/kernel/sched/deadline.c +@@ -445,13 +445,13 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se, + * + * This function returns true if: + * +- * runtime / (deadline - t) > dl_runtime / dl_period , ++ * runtime / (deadline - t) > dl_runtime / dl_deadline , + * + * IOW we can't recycle current parameters. + * +- * Notice that the bandwidth check is done against the period. For ++ * Notice that the bandwidth check is done against the deadline. For + * task with deadline equal to period this is the same of using +- * dl_deadline instead of dl_period in the equation above. ++ * dl_period instead of dl_deadline in the equation above. + */ + static bool dl_entity_overflow(struct sched_dl_entity *dl_se, + struct sched_dl_entity *pi_se, u64 t) +@@ -476,7 +476,7 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se, + * of anything below microseconds resolution is actually fiction + * (but still we want to give the user that illusion >;). + */ +- left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE); ++ left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE); + right = ((dl_se->deadline - t) >> DL_SCALE) * + (pi_se->dl_runtime >> DL_SCALE); + +@@ -505,10 +505,15 @@ static void update_dl_entity(struct sched_dl_entity *dl_se, + } + } + ++static inline u64 dl_next_period(struct sched_dl_entity *dl_se) ++{ ++ return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period; ++} ++ + /* + * If the entity depleted all its runtime, and if we want it to sleep + * while waiting for some new execution time to become available, we +- * set the bandwidth enforcement timer to the replenishment instant ++ * set the bandwidth replenishment timer to the replenishment instant + * and try to activate it. + * + * Notice that it is important for the caller to know if the timer +@@ -530,7 +535,7 @@ static int start_dl_timer(struct task_struct *p) + * that it is actually coming from rq->clock and not from + * hrtimer's time base reading. + */ +- act = ns_to_ktime(dl_se->deadline); ++ act = ns_to_ktime(dl_next_period(dl_se)); + now = hrtimer_cb_get_time(timer); + delta = ktime_to_ns(now) - rq_clock(rq); + act = ktime_add_ns(act, delta); +@@ -638,6 +643,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) + lockdep_unpin_lock(&rq->lock, rf.cookie); + rq = dl_task_offline_migration(rq, p); + rf.cookie = lockdep_pin_lock(&rq->lock); ++ update_rq_clock(rq); + + /* + * Now that the task has been migrated to the new RQ and we +@@ -689,6 +695,37 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se) + timer->function = dl_task_timer; + } + ++/* ++ * During the activation, CBS checks if it can reuse the current task's ++ * runtime and period. If the deadline of the task is in the past, CBS ++ * cannot use the runtime, and so it replenishes the task. This rule ++ * works fine for implicit deadline tasks (deadline == period), and the ++ * CBS was designed for implicit deadline tasks. However, a task with ++ * constrained deadline (deadine < period) might be awakened after the ++ * deadline, but before the next period. In this case, replenishing the ++ * task would allow it to run for runtime / deadline. As in this case ++ * deadline < period, CBS enables a task to run for more than the ++ * runtime / period. In a very loaded system, this can cause a domino ++ * effect, making other tasks miss their deadlines. ++ * ++ * To avoid this problem, in the activation of a constrained deadline ++ * task after the deadline but before the next period, throttle the ++ * task and set the replenishing timer to the begin of the next period, ++ * unless it is boosted. ++ */ ++static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se) ++{ ++ struct task_struct *p = dl_task_of(dl_se); ++ struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se)); ++ ++ if (dl_time_before(dl_se->deadline, rq_clock(rq)) && ++ dl_time_before(rq_clock(rq), dl_next_period(dl_se))) { ++ if (unlikely(dl_se->dl_boosted || !start_dl_timer(p))) ++ return; ++ dl_se->dl_throttled = 1; ++ } ++} ++ + static + int dl_runtime_exceeded(struct sched_dl_entity *dl_se) + { +@@ -922,6 +959,11 @@ static void dequeue_dl_entity(struct sched_dl_entity *dl_se) + __dequeue_dl_entity(dl_se); + } + ++static inline bool dl_is_constrained(struct sched_dl_entity *dl_se) ++{ ++ return dl_se->dl_deadline < dl_se->dl_period; ++} ++ + static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) + { + struct task_struct *pi_task = rt_mutex_get_top_task(p); +@@ -947,6 +989,15 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) + return; + } + ++ /* ++ * Check if a constrained deadline task was activated ++ * after the deadline but before the next period. ++ * If that is the case, the task will be throttled and ++ * the replenishment timer will be set to the next period. ++ */ ++ if (!p->dl.dl_throttled && dl_is_constrained(&p->dl)) ++ dl_check_constrained_dl(&p->dl); ++ + /* + * If p is throttled, we do nothing. In fact, if it exhausted + * its budget it needs a replenishment and, since it now is on +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c +index 9c131168d933..7a360d6f6798 100644 +--- a/kernel/sched/rt.c ++++ b/kernel/sched/rt.c +@@ -2022,8 +2022,9 @@ static void pull_rt_task(struct rq *this_rq) + bool resched = false; + struct task_struct *p; + struct rq *src_rq; ++ int rt_overload_count = rt_overloaded(this_rq); + +- if (likely(!rt_overloaded(this_rq))) ++ if (likely(!rt_overload_count)) + return; + + /* +@@ -2032,6 +2033,11 @@ static void pull_rt_task(struct rq *this_rq) + */ + smp_rmb(); + ++ /* If we are the only overloaded CPU do nothing */ ++ if (rt_overload_count == 1 && ++ cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask)) ++ return; ++ + #ifdef HAVE_RT_PUSH_IPI + if (sched_feat(RT_PUSH_IPI)) { + tell_cpu_to_push(this_rq); +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index c1e50cc0d7b0..4214cd960b8e 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -3727,37 +3727,30 @@ static const struct file_operations show_traces_fops = { + .llseek = seq_lseek, + }; + +-/* +- * The tracer itself will not take this lock, but still we want +- * to provide a consistent cpumask to user-space: +- */ +-static DEFINE_MUTEX(tracing_cpumask_update_lock); +- +-/* +- * Temporary storage for the character representation of the +- * CPU bitmask (and one more byte for the newline): +- */ +-static char mask_str[NR_CPUS + 1]; +- + static ssize_t + tracing_cpumask_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) + { + struct trace_array *tr = file_inode(filp)->i_private; ++ char *mask_str; + int len; + +- mutex_lock(&tracing_cpumask_update_lock); ++ len = snprintf(NULL, 0, "%*pb\n", ++ cpumask_pr_args(tr->tracing_cpumask)) + 1; ++ mask_str = kmalloc(len, GFP_KERNEL); ++ if (!mask_str) ++ return -ENOMEM; + +- len = snprintf(mask_str, count, "%*pb\n", ++ len = snprintf(mask_str, len, "%*pb\n", + cpumask_pr_args(tr->tracing_cpumask)); + if (len >= count) { + count = -EINVAL; + goto out_err; + } +- count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1); ++ count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len); + + out_err: +- mutex_unlock(&tracing_cpumask_update_lock); ++ kfree(mask_str); + + return count; + } +@@ -3777,8 +3770,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, + if (err) + goto err_unlock; + +- mutex_lock(&tracing_cpumask_update_lock); +- + local_irq_disable(); + arch_spin_lock(&tr->max_lock); + for_each_tracing_cpu(cpu) { +@@ -3801,8 +3792,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, + local_irq_enable(); + + cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); +- +- mutex_unlock(&tracing_cpumask_update_lock); + free_cpumask_var(tracing_cpumask_new); + + return count; +diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c +index aa1df1a10dd7..82ce5713f744 100644 +--- a/net/bridge/br_netfilter_hooks.c ++++ b/net/bridge/br_netfilter_hooks.c +@@ -706,18 +706,20 @@ static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb) + + static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) + { +- struct nf_bridge_info *nf_bridge; +- unsigned int mtu_reserved; ++ struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); ++ unsigned int mtu, mtu_reserved; + + mtu_reserved = nf_bridge_mtu_reduction(skb); ++ mtu = skb->dev->mtu; ++ ++ if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu) ++ mtu = nf_bridge->frag_max_size; + +- if (skb_is_gso(skb) || skb->len + mtu_reserved <= skb->dev->mtu) { ++ if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) { + nf_bridge_info_free(skb); + return br_dev_queue_push_xmit(net, sk, skb); + } + +- nf_bridge = nf_bridge_info_get(skb); +- + /* This is wrong! We should preserve the original fragment + * boundaries by preserving frag_list rather than refragmenting. + */ +diff --git a/net/core/dev.c b/net/core/dev.c +index c37891828e4e..09007a71c8dd 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -1304,6 +1304,7 @@ void netdev_notify_peers(struct net_device *dev) + { + rtnl_lock(); + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); ++ call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev); + rtnl_unlock(); + } + EXPORT_SYMBOL(netdev_notify_peers); +diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c +index 48734ee6293f..31f17f0bbd1c 100644 +--- a/net/ipv4/icmp.c ++++ b/net/ipv4/icmp.c +@@ -766,7 +766,7 @@ static bool icmp_tag_validation(int proto) + } + + /* +- * Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEED, ICMP_QUENCH, and ++ * Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEEDED, ICMP_QUENCH, and + * ICMP_PARAMETERPROB. + */ + +@@ -794,7 +794,8 @@ static bool icmp_unreach(struct sk_buff *skb) + if (iph->ihl < 5) /* Mangled header, drop. */ + goto out_err; + +- if (icmph->type == ICMP_DEST_UNREACH) { ++ switch (icmph->type) { ++ case ICMP_DEST_UNREACH: + switch (icmph->code & 15) { + case ICMP_NET_UNREACH: + case ICMP_HOST_UNREACH: +@@ -830,8 +831,16 @@ static bool icmp_unreach(struct sk_buff *skb) + } + if (icmph->code > NR_ICMP_UNREACH) + goto out; +- } else if (icmph->type == ICMP_PARAMETERPROB) ++ break; ++ case ICMP_PARAMETERPROB: + info = ntohl(icmph->un.gateway) >> 24; ++ break; ++ case ICMP_TIME_EXCEEDED: ++ __ICMP_INC_STATS(net, ICMP_MIB_INTIMEEXCDS); ++ if (icmph->code == ICMP_EXC_FRAGTIME) ++ goto out; ++ break; ++ } + + /* + * Throw it at our lower layers +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c +index b06acd0f400d..cfc4dd8997e5 100644 +--- a/net/l2tp/l2tp_core.c ++++ b/net/l2tp/l2tp_core.c +@@ -1944,7 +1944,7 @@ static __net_exit void l2tp_exit_net(struct net *net) + + rcu_read_lock_bh(); + list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { +- (void)l2tp_tunnel_delete(tunnel); ++ l2tp_tunnel_delete(tunnel); + } + rcu_read_unlock_bh(); + +diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c +index 1ccd310d01a5..ee03bc866d1b 100644 +--- a/net/l2tp/l2tp_netlink.c ++++ b/net/l2tp/l2tp_netlink.c +@@ -287,7 +287,7 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info + l2tp_tunnel_notify(&l2tp_nl_family, info, + tunnel, L2TP_CMD_TUNNEL_DELETE); + +- (void) l2tp_tunnel_delete(tunnel); ++ l2tp_tunnel_delete(tunnel); + + out: + return ret; +diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c +index 5c67a696e046..b4b3fe078868 100644 +--- a/net/mac80211/mesh.c ++++ b/net/mac80211/mesh.c +@@ -279,8 +279,6 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata, + /* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */ + *pos |= ifmsh->ps_peers_deep_sleep ? + IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL : 0x00; +- *pos++ = 0x00; +- + return 0; + } + +diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c +index 1309e2c34764..c5a5a6959c1b 100644 +--- a/net/mpls/af_mpls.c ++++ b/net/mpls/af_mpls.c +@@ -937,6 +937,8 @@ static void mpls_ifdown(struct net_device *dev, int event) + { + struct mpls_route __rcu **platform_label; + struct net *net = dev_net(dev); ++ unsigned int nh_flags = RTNH_F_DEAD | RTNH_F_LINKDOWN; ++ unsigned int alive; + unsigned index; + + platform_label = rtnl_dereference(net->mpls.platform_label); +@@ -946,9 +948,11 @@ static void mpls_ifdown(struct net_device *dev, int event) + if (!rt) + continue; + ++ alive = 0; + change_nexthops(rt) { + if (rtnl_dereference(nh->nh_dev) != dev) +- continue; ++ goto next; ++ + switch (event) { + case NETDEV_DOWN: + case NETDEV_UNREGISTER: +@@ -956,13 +960,16 @@ static void mpls_ifdown(struct net_device *dev, int event) + /* fall through */ + case NETDEV_CHANGE: + nh->nh_flags |= RTNH_F_LINKDOWN; +- if (event != NETDEV_UNREGISTER) +- ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1; + break; + } + if (event == NETDEV_UNREGISTER) + RCU_INIT_POINTER(nh->nh_dev, NULL); ++next: ++ if (!(nh->nh_flags & nh_flags)) ++ alive++; + } endfor_nexthops(rt); ++ ++ WRITE_ONCE(rt->rt_nhn_alive, alive); + } + } + +diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c +index a6e44ef2ec9a..2155c2498aed 100644 +--- a/net/netfilter/ipvs/ip_vs_ctl.c ++++ b/net/netfilter/ipvs/ip_vs_ctl.c +@@ -2040,12 +2040,16 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v) + seq_puts(seq, + " -> RemoteAddress:Port Forward Weight ActiveConn InActConn\n"); + } else { ++ struct net *net = seq_file_net(seq); ++ struct netns_ipvs *ipvs = net_ipvs(net); + const struct ip_vs_service *svc = v; + const struct ip_vs_iter *iter = seq->private; + const struct ip_vs_dest *dest; + struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler); + char *sched_name = sched ? sched->name : "none"; + ++ if (svc->ipvs != ipvs) ++ return 0; + if (iter->table == ip_vs_svc_table) { + #ifdef CONFIG_IP_VS_IPV6 + if (svc->af == AF_INET6) +diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c +index 3f9d8d7ec632..b099b64366f3 100644 +--- a/net/rxrpc/conn_event.c ++++ b/net/rxrpc/conn_event.c +@@ -275,6 +275,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, + rxrpc_conn_retransmit_call(conn, skb); + return 0; + ++ case RXRPC_PACKET_TYPE_BUSY: ++ /* Just ignore BUSY packets for now. */ ++ return 0; ++ + case RXRPC_PACKET_TYPE_ABORT: + if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header), + &wtmp, sizeof(wtmp)) < 0) +diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c +index 44fb8d893c7d..1060d14d4e6a 100644 +--- a/net/rxrpc/input.c ++++ b/net/rxrpc/input.c +@@ -649,6 +649,7 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + struct rxrpc_peer *peer; + unsigned int mtu; ++ bool wake = false; + u32 rwind = ntohl(ackinfo->rwind); + + _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }", +@@ -656,9 +657,14 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, + ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU), + rwind, ntohl(ackinfo->jumbo_max)); + +- if (rwind > RXRPC_RXTX_BUFF_SIZE - 1) +- rwind = RXRPC_RXTX_BUFF_SIZE - 1; +- call->tx_winsize = rwind; ++ if (call->tx_winsize != rwind) { ++ if (rwind > RXRPC_RXTX_BUFF_SIZE - 1) ++ rwind = RXRPC_RXTX_BUFF_SIZE - 1; ++ if (rwind > call->tx_winsize) ++ wake = true; ++ call->tx_winsize = rwind; ++ } ++ + if (call->cong_ssthresh > rwind) + call->cong_ssthresh = rwind; + +@@ -672,6 +678,9 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, + spin_unlock_bh(&peer->lock); + _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata); + } ++ ++ if (wake) ++ wake_up(&call->waitq); + } + + /* +diff --git a/net/socket.c b/net/socket.c +index 6bbccf05854f..05f13b24572c 100644 +--- a/net/socket.c ++++ b/net/socket.c +@@ -1702,6 +1702,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, + /* We assume all kernel code knows the size of sockaddr_storage */ + msg.msg_namelen = 0; + msg.msg_iocb = NULL; ++ msg.msg_flags = 0; + if (sock->file->f_flags & O_NONBLOCK) + flags |= MSG_DONTWAIT; + err = sock_recvmsg(sock, &msg, flags); +diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c +index ea162fbf68e5..d5adc04bb724 100644 +--- a/sound/soc/intel/skylake/skl-sst-utils.c ++++ b/sound/soc/intel/skylake/skl-sst-utils.c +@@ -295,6 +295,7 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw, + struct uuid_module *module; + struct firmware stripped_fw; + unsigned int safe_file; ++ int ret = 0; + + /* Get the FW pointer to derive ADSP header */ + stripped_fw.data = fw->data; +@@ -343,8 +344,10 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw, + + for (i = 0; i < num_entry; i++, mod_entry++) { + module = kzalloc(sizeof(*module), GFP_KERNEL); +- if (!module) +- return -ENOMEM; ++ if (!module) { ++ ret = -ENOMEM; ++ goto free_uuid_list; ++ } + + uuid_bin = (uuid_le *)mod_entry->uuid.id; + memcpy(&module->uuid, uuid_bin, sizeof(module->uuid)); +@@ -355,8 +358,8 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw, + size = sizeof(int) * mod_entry->instance_max_count; + module->instance_id = devm_kzalloc(ctx->dev, size, GFP_KERNEL); + if (!module->instance_id) { +- kfree(module); +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto free_uuid_list; + } + + list_add_tail(&module->list, &skl->uuid_list); +@@ -367,6 +370,10 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw, + } + + return 0; ++ ++free_uuid_list: ++ skl_freeup_uuid_list(skl); ++ return ret; + } + + void skl_freeup_uuid_list(struct skl_sst *ctx) +diff --git a/sound/soc/sh/rcar/cmd.c b/sound/soc/sh/rcar/cmd.c +index abb5eaac854a..7d92a24b7cfa 100644 +--- a/sound/soc/sh/rcar/cmd.c ++++ b/sound/soc/sh/rcar/cmd.c +@@ -31,23 +31,24 @@ static int rsnd_cmd_init(struct rsnd_mod *mod, + struct rsnd_mod *mix = rsnd_io_to_mod_mix(io); + struct device *dev = rsnd_priv_to_dev(priv); + u32 data; ++ u32 path[] = { ++ [1] = 1 << 0, ++ [5] = 1 << 8, ++ [6] = 1 << 12, ++ [9] = 1 << 15, ++ }; + + if (!mix && !dvc) + return 0; + ++ if (ARRAY_SIZE(path) < rsnd_mod_id(mod) + 1) ++ return -ENXIO; ++ + if (mix) { + struct rsnd_dai *rdai; + struct rsnd_mod *src; + struct rsnd_dai_stream *tio; + int i; +- u32 path[] = { +- [0] = 0, +- [1] = 1 << 0, +- [2] = 0, +- [3] = 0, +- [4] = 0, +- [5] = 1 << 8 +- }; + + /* + * it is assuming that integrater is well understanding about +@@ -70,16 +71,19 @@ static int rsnd_cmd_init(struct rsnd_mod *mod, + } else { + struct rsnd_mod *src = rsnd_io_to_mod_src(io); + +- u32 path[] = { +- [0] = 0x30000, +- [1] = 0x30001, +- [2] = 0x40000, +- [3] = 0x10000, +- [4] = 0x20000, +- [5] = 0x40100 ++ u8 cmd_case[] = { ++ [0] = 0x3, ++ [1] = 0x3, ++ [2] = 0x4, ++ [3] = 0x1, ++ [4] = 0x2, ++ [5] = 0x4, ++ [6] = 0x1, ++ [9] = 0x2, + }; + +- data = path[rsnd_mod_id(src)]; ++ data = path[rsnd_mod_id(src)] | ++ cmd_case[rsnd_mod_id(src)] << 16; + } + + dev_dbg(dev, "ctu/mix path = 0x%08x", data); +diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c +index 6bc93cbb3049..edeb74a13c0f 100644 +--- a/sound/soc/sh/rcar/dma.c ++++ b/sound/soc/sh/rcar/dma.c +@@ -361,6 +361,20 @@ static u32 rsnd_dmapp_read(struct rsnd_dma *dma, u32 reg) + return ioread32(rsnd_dmapp_addr(dmac, dma, reg)); + } + ++static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg) ++{ ++ struct rsnd_mod *mod = rsnd_mod_get(dma); ++ struct rsnd_priv *priv = rsnd_mod_to_priv(mod); ++ struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv); ++ volatile void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg); ++ u32 val = ioread32(addr); ++ ++ val &= ~mask; ++ val |= (data & mask); ++ ++ iowrite32(val, addr); ++} ++ + static int rsnd_dmapp_stop(struct rsnd_mod *mod, + struct rsnd_dai_stream *io, + struct rsnd_priv *priv) +@@ -368,10 +382,10 @@ static int rsnd_dmapp_stop(struct rsnd_mod *mod, + struct rsnd_dma *dma = rsnd_mod_to_dma(mod); + int i; + +- rsnd_dmapp_write(dma, 0, PDMACHCR); ++ rsnd_dmapp_bset(dma, 0, PDMACHCR_DE, PDMACHCR); + + for (i = 0; i < 1024; i++) { +- if (0 == rsnd_dmapp_read(dma, PDMACHCR)) ++ if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE)) + return 0; + udelay(1); + } +diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c +index 6cb6db005fc4..560cf4b51a99 100644 +--- a/sound/soc/sh/rcar/ssi.c ++++ b/sound/soc/sh/rcar/ssi.c +@@ -172,10 +172,15 @@ static u32 rsnd_ssi_run_mods(struct rsnd_dai_stream *io) + { + struct rsnd_mod *ssi_mod = rsnd_io_to_mod_ssi(io); + struct rsnd_mod *ssi_parent_mod = rsnd_io_to_mod_ssip(io); ++ u32 mods; + +- return rsnd_ssi_multi_slaves_runtime(io) | +- 1 << rsnd_mod_id(ssi_mod) | +- 1 << rsnd_mod_id(ssi_parent_mod); ++ mods = rsnd_ssi_multi_slaves_runtime(io) | ++ 1 << rsnd_mod_id(ssi_mod); ++ ++ if (ssi_parent_mod) ++ mods |= 1 << rsnd_mod_id(ssi_parent_mod); ++ ++ return mods; + } + + u32 rsnd_ssi_multi_slaves_runtime(struct rsnd_dai_stream *io) +diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c +index f7b35e178582..f199d5b11d76 100644 +--- a/tools/perf/util/symbol.c ++++ b/tools/perf/util/symbol.c +@@ -202,7 +202,7 @@ void symbols__fixup_end(struct rb_root *symbols) + + /* Last entry */ + if (curr->end == curr->start) +- curr->end = roundup(curr->start, 4096); ++ curr->end = roundup(curr->start, 4096) + 4096; + } + + void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) +diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile +index bbab7f4664ac..d116a19477a7 100644 +--- a/tools/testing/selftests/vm/Makefile ++++ b/tools/testing/selftests/vm/Makefile +@@ -1,5 +1,9 @@ + # Makefile for vm selftests + ++ifndef OUTPUT ++ OUTPUT := $(shell pwd) ++endif ++ + CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS) + BINARIES = compaction_test + BINARIES += hugepage-mmap |