diff options
-rw-r--r-- | 3.14.43/0000_README | 2 | ||||
-rw-r--r-- | 3.14.43/4420_grsecurity-3.1-3.14.43-201506021902.patch (renamed from 3.14.43/4420_grsecurity-3.1-3.14.43-201505272112.patch) | 87 | ||||
-rw-r--r-- | 3.2.69/0000_README | 2 | ||||
-rw-r--r-- | 3.2.69/4420_grsecurity-3.1-3.2.69-201506021858.patch (renamed from 3.2.69/4420_grsecurity-3.1-3.2.69-201505272108.patch) | 81 | ||||
-rw-r--r-- | 4.0.4/0000_README | 2 | ||||
-rw-r--r-- | 4.0.4/4420_grsecurity-3.1-4.0.4-201506021902.patch (renamed from 4.0.4/4420_grsecurity-3.1-4.0.4-201505272113.patch) | 135 |
6 files changed, 263 insertions, 46 deletions
diff --git a/3.14.43/0000_README b/3.14.43/0000_README index 09a43ed..6fbd85f 100644 --- a/3.14.43/0000_README +++ b/3.14.43/0000_README @@ -2,7 +2,7 @@ README ----------------------------------------------------------------------------- Individual Patch Descriptions: ----------------------------------------------------------------------------- -Patch: 4420_grsecurity-3.1-3.14.43-201505272112.patch +Patch: 4420_grsecurity-3.1-3.14.43-201506021902.patch From: http://www.grsecurity.net Desc: hardened-sources base patch from upstream grsecurity diff --git a/3.14.43/4420_grsecurity-3.1-3.14.43-201505272112.patch b/3.14.43/4420_grsecurity-3.1-3.14.43-201506021902.patch index 2ecf955..af87f48 100644 --- a/3.14.43/4420_grsecurity-3.1-3.14.43-201505272112.patch +++ b/3.14.43/4420_grsecurity-3.1-3.14.43-201506021902.patch @@ -45141,7 +45141,7 @@ index 5b8f938..b73d657 100644 .callback = ss4200_led_dmi_callback, .ident = "Intel SS4200-E", diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c -index 0bf1e4e..b4bf44e 100644 +index 0bf1e4e..0552eb9 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c @@ -97,9 +97,17 @@ static __init int map_switcher(void) @@ -45171,6 +45171,15 @@ index 0bf1e4e..b4bf44e 100644 end_switcher_text - start_switcher_text); printk(KERN_INFO "lguest: mapped switcher at %p\n", +@@ -176,7 +184,7 @@ static void unmap_switcher(void) + bool lguest_address_ok(const struct lguest *lg, + unsigned long addr, unsigned long len) + { +- return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr); ++ return addr+len <= lg->pfn_limit * PAGE_SIZE && (addr+len >= addr); + } + + /* diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index bfb39bb..08a603b 100644 --- a/drivers/lguest/page_tables.c @@ -68007,7 +68016,7 @@ index a93f7e6..d58bcbe 100644 return 0; while (nr) { diff --git a/fs/dcache.c b/fs/dcache.c -index a9231c8..46b359c 100644 +index a9231c8..f87d4b8 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -250,7 +250,7 @@ static void __d_free(struct rcu_head *head) @@ -68084,6 +68093,24 @@ index a9231c8..46b359c 100644 d_lru_isolate(dentry); spin_unlock(&dentry->d_lock); return LRU_REMOVED; +@@ -1135,13 +1135,13 @@ ascend: + /* might go back up the wrong parent if we have had a rename. */ + if (need_seqretry(&rename_lock, seq)) + goto rename_retry; +- next = child->d_child.next; +- while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) { ++ /* go into the first sibling still alive */ ++ do { ++ next = child->d_child.next; + if (next == &this_parent->d_subdirs) + goto ascend; + child = list_entry(next, struct dentry, d_child); +- next = next->next; +- } ++ } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)); + rcu_read_unlock(); + goto resume; + } @@ -1269,7 +1269,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry) * loop in shrink_dcache_parent() might not make any progress * and loop forever. @@ -104058,7 +104085,7 @@ index bb2b201..46abaf9 100644 /* diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c -index a28df52..3d55877 100644 +index a28df52..02dccaa 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c @@ -26,7 +26,7 @@ @@ -104070,6 +104097,16 @@ index a28df52..3d55877 100644 long align, res = 0; unsigned long c; +@@ -57,7 +57,8 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, + return res + find_zero(data) + 1 - align; + } + res += sizeof(unsigned long); +- if (unlikely(max < sizeof(unsigned long))) ++ /* We already handled 'unsigned long' bytes. Did we do it all ? */ ++ if (unlikely(max <= sizeof(unsigned long))) + break; + max -= sizeof(unsigned long); + if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) diff --git a/lib/swiotlb.c b/lib/swiotlb.c index b604b83..c0547f6 100644 --- a/lib/swiotlb.c @@ -112340,7 +112377,7 @@ index 64f0354..a81b39d 100644 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) { /* Has it gone just too far? */ diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c -index b25e852..cdc3258 100644 +index b25e852..f578c52 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -87,6 +87,7 @@ @@ -112420,7 +112457,20 @@ index b25e852..cdc3258 100644 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } -@@ -1566,7 +1587,7 @@ csum_error: +@@ -1317,10 +1338,8 @@ csum_copy_err: + } + unlock_sock_fast(sk, slow); + +- if (noblock) +- return -EAGAIN; +- +- /* starting over for a new packet */ ++ /* starting over for a new packet, but check if we need to yield */ ++ cond_resched(); + msg->msg_flags &= ~MSG_TRUNC; + goto try_again; + } +@@ -1566,7 +1585,7 @@ csum_error: UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); drop: UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); @@ -112429,7 +112479,7 @@ index b25e852..cdc3258 100644 kfree_skb(skb); return -1; } -@@ -1585,7 +1606,7 @@ static void flush_stack(struct sock **stack, unsigned int count, +@@ -1585,7 +1604,7 @@ static void flush_stack(struct sock **stack, unsigned int count, skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); if (!skb1) { @@ -112438,7 +112488,7 @@ index b25e852..cdc3258 100644 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, -@@ -1786,6 +1807,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, +@@ -1786,6 +1805,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, goto csum_error; UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); @@ -112448,7 +112498,7 @@ index b25e852..cdc3258 100644 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); /* -@@ -2354,7 +2378,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f, +@@ -2354,7 +2376,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f, from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, @@ -113276,7 +113326,7 @@ index 9d4332d..4292595 100644 } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c -index 20b63d2..babfcb8 100644 +index 20b63d2..9f371ac 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -76,10 +76,13 @@ static unsigned int udp6_ehashfn(struct net *net, @@ -113312,7 +113362,20 @@ index 20b63d2..babfcb8 100644 if (is_udp4) UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, -@@ -690,7 +693,7 @@ csum_error: +@@ -515,10 +518,8 @@ csum_copy_err: + } + unlock_sock_fast(sk, slow); + +- if (noblock) +- return -EAGAIN; +- +- /* starting over for a new packet */ ++ /* starting over for a new packet, but check if we need to yield */ ++ cond_resched(); + msg->msg_flags &= ~MSG_TRUNC; + goto try_again; + } +@@ -690,7 +691,7 @@ csum_error: UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); drop: UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); @@ -113321,7 +113384,7 @@ index 20b63d2..babfcb8 100644 kfree_skb(skb); return -1; } -@@ -747,7 +750,7 @@ static void flush_stack(struct sock **stack, unsigned int count, +@@ -747,7 +748,7 @@ static void flush_stack(struct sock **stack, unsigned int count, if (likely(skb1 == NULL)) skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); if (!skb1) { @@ -113330,7 +113393,7 @@ index 20b63d2..babfcb8 100644 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, -@@ -886,6 +889,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, +@@ -886,6 +887,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, goto csum_error; UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); diff --git a/3.2.69/0000_README b/3.2.69/0000_README index 4b24d8f..26a7110 100644 --- a/3.2.69/0000_README +++ b/3.2.69/0000_README @@ -194,7 +194,7 @@ Patch: 1068_linux-3.2.69.patch From: http://www.kernel.org Desc: Linux 3.2.69 -Patch: 4420_grsecurity-3.1-3.2.69-201505272108.patch +Patch: 4420_grsecurity-3.1-3.2.69-201506021858.patch From: http://www.grsecurity.net Desc: hardened-sources base patch from upstream grsecurity diff --git a/3.2.69/4420_grsecurity-3.1-3.2.69-201505272108.patch b/3.2.69/4420_grsecurity-3.1-3.2.69-201506021858.patch index 58543ee..e2400cb 100644 --- a/3.2.69/4420_grsecurity-3.1-3.2.69-201505272108.patch +++ b/3.2.69/4420_grsecurity-3.1-3.2.69-201506021858.patch @@ -39511,7 +39511,7 @@ index 429d5a0..7e899ed 100644 return IRQ_HANDLED; } diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c -index a9e33ce..09edd4b 100644 +index a9e33ce6..09edd4b 100644 --- a/drivers/gpu/drm/r128/r128_state.c +++ b/drivers/gpu/drm/r128/r128_state.c @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv, @@ -42867,7 +42867,7 @@ index 614ebeb..ce439fd 100644 .callback = ss4200_led_dmi_callback, .ident = "Intel SS4200-E", diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c -index b5fdcb7..8ed3519 100644 +index b5fdcb7..3cb34b8 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c @@ -92,9 +92,17 @@ static __init int map_switcher(void) @@ -42897,6 +42897,15 @@ index b5fdcb7..8ed3519 100644 end_switcher_text - start_switcher_text); printk(KERN_INFO "lguest: mapped switcher at %p\n", +@@ -171,7 +179,7 @@ static void unmap_switcher(void) + bool lguest_address_ok(const struct lguest *lg, + unsigned long addr, unsigned long len) + { +- return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr); ++ return addr+len <= lg->pfn_limit * PAGE_SIZE && (addr+len >= addr); + } + + /* diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index 3b62be16..e33134a 100644 --- a/drivers/lguest/page_tables.c @@ -59058,7 +59067,7 @@ index 739fb59..5385976 100644 static int __init init_cramfs_fs(void) { diff --git a/fs/dcache.c b/fs/dcache.c -index 8bc98af..a49e6f0 100644 +index 8bc98af..2cc0298 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -103,11 +103,11 @@ static unsigned int d_hash_shift __read_mostly; @@ -59077,6 +59086,24 @@ index 8bc98af..a49e6f0 100644 return dentry_hashtable + (hash & D_HASHMASK); } +@@ -1016,13 +1016,13 @@ ascend: + /* might go back up the wrong parent if we have had a rename */ + if (!locked && read_seqretry(&rename_lock, seq)) + goto rename_retry; +- next = child->d_child.next; +- while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) { ++ /* go into the first sibling still alive */ ++ do { ++ next = child->d_child.next; + if (next == &this_parent->d_subdirs) + goto ascend; + child = list_entry(next, struct dentry, d_child); +- next = next->next; +- } ++ } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)); + rcu_read_unlock(); + goto resume; + } @@ -1235,6 +1235,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) dentry->d_sb = sb; dentry->d_op = NULL; @@ -106270,7 +106297,7 @@ index 2e0f0af..e2948bf 100644 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) { /* Has it gone just too far? */ diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c -index 8c2e259..076bc5b 100644 +index 8c2e259..90d7b4e 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -86,6 +86,7 @@ @@ -106341,7 +106368,20 @@ index 8c2e259..076bc5b 100644 ulen = skb->len - sizeof(struct udphdr); copied = len; if (copied > ulen) -@@ -1486,7 +1507,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) +@@ -1248,10 +1269,8 @@ csum_copy_err: + UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); + unlock_sock_fast(sk, slow); + +- if (noblock) +- return -EAGAIN; +- +- /* starting over for a new packet */ ++ /* starting over for a new packet, but check if we need to yield */ ++ cond_resched(); + msg->msg_flags &= ~MSG_TRUNC; + goto try_again; + } +@@ -1486,7 +1505,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) drop: UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); @@ -106350,7 +106390,7 @@ index 8c2e259..076bc5b 100644 kfree_skb(skb); return -1; } -@@ -1505,7 +1526,7 @@ static void flush_stack(struct sock **stack, unsigned int count, +@@ -1505,7 +1524,7 @@ static void flush_stack(struct sock **stack, unsigned int count, skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); if (!skb1) { @@ -106359,7 +106399,7 @@ index 8c2e259..076bc5b 100644 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, -@@ -1674,6 +1695,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, +@@ -1674,6 +1693,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, goto csum_error; UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); @@ -106369,7 +106409,7 @@ index 8c2e259..076bc5b 100644 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); /* -@@ -2097,8 +2121,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f, +@@ -2097,8 +2119,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), @@ -106964,7 +107004,7 @@ index 655cc60..c49497a 100644 static int tcp6_seq_show(struct seq_file *seq, void *v) diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c -index d131a95..e2c60f8 100644 +index d131a95..59d5161 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -50,6 +50,10 @@ @@ -106978,7 +107018,20 @@ index d131a95..e2c60f8 100644 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) { const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; -@@ -546,7 +550,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) +@@ -451,10 +455,8 @@ csum_copy_err: + } + unlock_sock_fast(sk, slow); + +- if (noblock) +- return -EAGAIN; +- +- /* starting over for a new packet */ ++ /* starting over for a new packet, but check if we need to yield */ ++ cond_resched(); + msg->msg_flags &= ~MSG_TRUNC; + goto try_again; + } +@@ -546,7 +548,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) return 0; drop: @@ -106987,7 +107040,7 @@ index d131a95..e2c60f8 100644 drop_no_sk_drops_inc: UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); kfree_skb(skb); -@@ -622,7 +626,7 @@ static void flush_stack(struct sock **stack, unsigned int count, +@@ -622,7 +624,7 @@ static void flush_stack(struct sock **stack, unsigned int count, continue; } drop: @@ -106996,7 +107049,7 @@ index d131a95..e2c60f8 100644 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); UDP6_INC_STATS_BH(sock_net(sk), -@@ -777,6 +781,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, +@@ -777,6 +779,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); @@ -107006,7 +107059,7 @@ index d131a95..e2c60f8 100644 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); kfree_skb(skb); -@@ -793,7 +800,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, +@@ -793,7 +798,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, if (!sock_owned_by_user(sk)) udpv6_queue_rcv_skb(sk, skb); else if (sk_add_backlog(sk, skb)) { @@ -107015,7 +107068,7 @@ index d131a95..e2c60f8 100644 bh_unlock_sock(sk); sock_put(sk); goto discard; -@@ -1409,8 +1416,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket +@@ -1409,8 +1414,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), diff --git a/4.0.4/0000_README b/4.0.4/0000_README index e870dbc..2b2ce68 100644 --- a/4.0.4/0000_README +++ b/4.0.4/0000_README @@ -2,7 +2,7 @@ README ----------------------------------------------------------------------------- Individual Patch Descriptions: ----------------------------------------------------------------------------- -Patch: 4420_grsecurity-3.1-4.0.4-201505272113.patch +Patch: 4420_grsecurity-3.1-4.0.4-201506021902.patch From: http://www.grsecurity.net Desc: hardened-sources base patch from upstream grsecurity diff --git a/4.0.4/4420_grsecurity-3.1-4.0.4-201505272113.patch b/4.0.4/4420_grsecurity-3.1-4.0.4-201506021902.patch index b338663..802855c 100644 --- a/4.0.4/4420_grsecurity-3.1-4.0.4-201505272113.patch +++ b/4.0.4/4420_grsecurity-3.1-4.0.4-201506021902.patch @@ -45246,7 +45246,7 @@ index 87f7dff..7300125 100644 { struct dsp_conf *conf; diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c -index 7dc93aa..8272379 100644 +index 7dc93aa..9263d05 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c @@ -96,9 +96,17 @@ static __init int map_switcher(void) @@ -45276,6 +45276,15 @@ index 7dc93aa..8272379 100644 end_switcher_text - start_switcher_text); printk(KERN_INFO "lguest: mapped switcher at %p\n", +@@ -173,7 +181,7 @@ static void unmap_switcher(void) + bool lguest_address_ok(const struct lguest *lg, + unsigned long addr, unsigned long len) + { +- return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr); ++ return addr+len <= lg->pfn_limit * PAGE_SIZE && (addr+len >= addr); + } + + /* diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index e3abebc9..6a35328 100644 --- a/drivers/lguest/page_tables.c @@ -45613,7 +45622,7 @@ index 79f6941..b33b4e0 100644 pmd->bl_info.value_type.inc = data_block_inc; pmd->bl_info.value_type.dec = data_block_dec; diff --git a/drivers/md/dm.c b/drivers/md/dm.c -index 8001fe9..abdd0d0 100644 +index 8001fe9..83c927d 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -188,9 +188,9 @@ struct mapped_device { @@ -45628,7 +45637,45 @@ index 8001fe9..abdd0d0 100644 struct list_head uevent_list; spinlock_t uevent_lock; /* Protect access to uevent_list */ -@@ -2163,8 +2163,8 @@ static struct mapped_device *alloc_dev(int minor) +@@ -1642,8 +1642,7 @@ static int dm_merge_bvec(struct request_queue *q, + struct mapped_device *md = q->queuedata; + struct dm_table *map = dm_get_live_table_fast(md); + struct dm_target *ti; +- sector_t max_sectors; +- int max_size = 0; ++ sector_t max_sectors, max_size = 0; + + if (unlikely(!map)) + goto out; +@@ -1658,8 +1657,16 @@ static int dm_merge_bvec(struct request_queue *q, + max_sectors = min(max_io_len(bvm->bi_sector, ti), + (sector_t) queue_max_sectors(q)); + max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; +- if (unlikely(max_size < 0)) /* this shouldn't _ever_ happen */ +- max_size = 0; ++ ++ /* ++ * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t ++ * to the targets' merge function since it holds sectors not bytes). ++ * Just doing this as an interim fix for stable@ because the more ++ * comprehensive cleanup of switching to sector_t will impact every ++ * DM target that implements a ->merge hook. ++ */ ++ if (max_size > INT_MAX) ++ max_size = INT_MAX; + + /* + * merge_bvec_fn() returns number of bytes +@@ -1667,7 +1674,7 @@ static int dm_merge_bvec(struct request_queue *q, + * max is precomputed maximal io size + */ + if (max_size && ti->type->merge) +- max_size = ti->type->merge(ti, bvm, biovec, max_size); ++ max_size = ti->type->merge(ti, bvm, biovec, (int) max_size); + /* + * If the target doesn't support merge method and some of the devices + * provided their merge_bvec method (we know this by looking for the +@@ -2163,8 +2170,8 @@ static struct mapped_device *alloc_dev(int minor) spin_lock_init(&md->deferred_lock); atomic_set(&md->holders, 1); atomic_set(&md->open_count, 0); @@ -45639,7 +45686,7 @@ index 8001fe9..abdd0d0 100644 INIT_LIST_HEAD(&md->uevent_list); INIT_LIST_HEAD(&md->table_devices); spin_lock_init(&md->uevent_lock); -@@ -2329,7 +2329,7 @@ static void event_callback(void *context) +@@ -2329,7 +2336,7 @@ static void event_callback(void *context) dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); @@ -45648,7 +45695,7 @@ index 8001fe9..abdd0d0 100644 wake_up(&md->eventq); } -@@ -3175,18 +3175,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, +@@ -3175,18 +3182,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, uint32_t dm_next_uevent_seq(struct mapped_device *md) { @@ -67612,7 +67659,7 @@ index d72fe37..ded5511 100644 atomic_set(&midCount, 0); diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h -index 22b289a..bbbba08 100644 +index 22b289a..bbbba082 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -823,35 +823,35 @@ struct cifs_tcon { @@ -68340,7 +68387,7 @@ index bbbe139..b76fae5 100644 return 0; while (nr) { diff --git a/fs/dcache.c b/fs/dcache.c -index c71e373..5c1f656 100644 +index c71e373..05e38ae 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -511,7 +511,7 @@ static void __dentry_kill(struct dentry *dentry) @@ -68460,6 +68507,24 @@ index c71e373..5c1f656 100644 d_lru_isolate(lru, dentry); spin_unlock(&dentry->d_lock); return LRU_REMOVED; +@@ -1205,13 +1205,13 @@ ascend: + /* might go back up the wrong parent if we have had a rename. */ + if (need_seqretry(&rename_lock, seq)) + goto rename_retry; +- next = child->d_child.next; +- while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) { ++ /* go into the first sibling still alive */ ++ do { ++ next = child->d_child.next; + if (next == &this_parent->d_subdirs) + goto ascend; + child = list_entry(next, struct dentry, d_child); +- next = next->next; +- } ++ } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)); + rcu_read_unlock(); + goto resume; + } @@ -1336,7 +1336,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry) } else { if (dentry->d_flags & DCACHE_LRU_LIST) @@ -103905,7 +103970,7 @@ index e0af6ff..fcc9f15 100644 /* diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c -index a28df52..3d55877 100644 +index a28df52..02dccaa 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c @@ -26,7 +26,7 @@ @@ -103917,6 +103982,16 @@ index a28df52..3d55877 100644 long align, res = 0; unsigned long c; +@@ -57,7 +57,8 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, + return res + find_zero(data) + 1 - align; + } + res += sizeof(unsigned long); +- if (unlikely(max < sizeof(unsigned long))) ++ /* We already handled 'unsigned long' bytes. Did we do it all ? */ ++ if (unlikely(max <= sizeof(unsigned long))) + break; + max -= sizeof(unsigned long); + if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 4abda07..b9d3765 100644 --- a/lib/swiotlb.c @@ -112032,7 +112107,7 @@ index 0732b78..a82bdc6 100644 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) { /* Has it gone just too far? */ diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c -index 97ef1f8b..e446c33 100644 +index 97ef1f8b..abeb965 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -87,6 +87,7 @@ @@ -112112,7 +112187,20 @@ index 97ef1f8b..e446c33 100644 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } -@@ -1605,7 +1626,7 @@ csum_error: +@@ -1348,10 +1369,8 @@ csum_copy_err: + } + unlock_sock_fast(sk, slow); + +- if (noblock) +- return -EAGAIN; +- +- /* starting over for a new packet */ ++ /* starting over for a new packet, but check if we need to yield */ ++ cond_resched(); + msg->msg_flags &= ~MSG_TRUNC; + goto try_again; + } +@@ -1605,7 +1624,7 @@ csum_error: UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); drop: UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); @@ -112121,7 +112209,7 @@ index 97ef1f8b..e446c33 100644 kfree_skb(skb); return -1; } -@@ -1624,7 +1645,7 @@ static void flush_stack(struct sock **stack, unsigned int count, +@@ -1624,7 +1643,7 @@ static void flush_stack(struct sock **stack, unsigned int count, skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); if (!skb1) { @@ -112130,7 +112218,7 @@ index 97ef1f8b..e446c33 100644 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, -@@ -1830,6 +1851,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, +@@ -1830,6 +1849,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, goto csum_error; UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); @@ -112140,7 +112228,7 @@ index 97ef1f8b..e446c33 100644 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); /* -@@ -2416,7 +2440,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f, +@@ -2416,7 +2438,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f, from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, @@ -112853,7 +112941,7 @@ index 1f5e622..8387d90 100644 } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c -index d048d46..bf141c3 100644 +index d048d46..cacb4d2 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net, @@ -112876,7 +112964,20 @@ index d048d46..bf141c3 100644 if (is_udp4) UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, -@@ -714,7 +718,7 @@ csum_error: +@@ -528,10 +532,8 @@ csum_copy_err: + } + unlock_sock_fast(sk, slow); + +- if (noblock) +- return -EAGAIN; +- +- /* starting over for a new packet */ ++ /* starting over for a new packet, but check if we need to yield */ ++ cond_resched(); + msg->msg_flags &= ~MSG_TRUNC; + goto try_again; + } +@@ -714,7 +716,7 @@ csum_error: UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); drop: UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); @@ -112885,7 +112986,7 @@ index d048d46..bf141c3 100644 kfree_skb(skb); return -1; } -@@ -753,7 +757,7 @@ static void flush_stack(struct sock **stack, unsigned int count, +@@ -753,7 +755,7 @@ static void flush_stack(struct sock **stack, unsigned int count, if (likely(skb1 == NULL)) skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); if (!skb1) { @@ -112894,7 +112995,7 @@ index d048d46..bf141c3 100644 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, -@@ -937,6 +941,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, +@@ -937,6 +939,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, goto csum_error; UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); |