LINUX_VERSION-3.18 = .71
LINUX_VERSION-4.4 = .119
-LINUX_VERSION-4.9 = .86
+LINUX_VERSION-4.9 = .87
LINUX_VERSION-4.14 = .25
LINUX_KERNEL_HASH-3.18.71 = 5abc9778ad44ce02ed6c8ab52ece8a21c6d20d21f6ed8a19287b4a38a50c1240
LINUX_KERNEL_HASH-4.4.119 = 4f1f9b7b6b2ee93597239d89bb3b6b60c71ebd8c91d706fadd36f515c68443e6
-LINUX_KERNEL_HASH-4.9.86 = a7cf6eb5efcf182f1760fdfc06118eecce5d8c9d82d6945e68fc15db990c6e85
+LINUX_KERNEL_HASH-4.9.87 = 7ac9f6af69dc5a7e38bf35cc3fa889e3a4b22504a85f57fdc87734a8abe4c917
LINUX_KERNEL_HASH-4.14.25 = 6dcfbf79c068e51c1b06edb1ce58ddc9ca351f862bf2a144e96106ec3f21e587
remove_uri_prefix=$(subst git://,,$(subst http://,,$(subst https://,,$(1))))
for (p = *head; p; p = p->next) {
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
-@@ -458,7 +458,7 @@ static struct neighbour *ipv4_neigh_look
+@@ -461,7 +461,7 @@ static struct neighbour *ipv4_neigh_look
else if (skb)
pkey = &ip_hdr(skb)->daddr;
nval = cmpxchg(&tp->tsq_flags, oval, nval);
if (nval != oval)
continue;
-@@ -2183,6 +2183,8 @@ static bool tcp_write_xmit(struct sock *
+@@ -2210,6 +2210,8 @@ static bool tcp_write_xmit(struct sock *
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
break;
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
-@@ -2088,6 +2088,15 @@ static bool tcp_small_queue_check(struct
+@@ -2115,6 +2115,15 @@ static bool tcp_small_queue_check(struct
limit <<= factor;
if (atomic_read(&sk->sk_wmem_alloc) > limit) {
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
-@@ -1928,26 +1928,26 @@ static inline void tcp_mtu_check_reprobe
+@@ -1948,26 +1948,26 @@ static bool tcp_can_coalesce_send_queue_
*/
static int tcp_mtu_probe(struct sock *sk)
{
if (nval != oval)
continue;
-@@ -2097,7 +2097,7 @@ static bool tcp_small_queue_check(struct
+@@ -2124,7 +2124,7 @@ static bool tcp_small_queue_check(struct
skb->prev == sk->sk_write_queue.next)
return false;
/* It is possible TX completion already happened
* before we set TSQ_THROTTLED, so we must
* test again the condition.
-@@ -2195,8 +2195,8 @@ static bool tcp_write_xmit(struct sock *
+@@ -2222,8 +2222,8 @@ static bool tcp_write_xmit(struct sock *
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
break;
if (tcp_small_queue_check(sk, skb, 0))
break;
-@@ -3504,8 +3504,6 @@ void tcp_send_ack(struct sock *sk)
+@@ -3531,8 +3531,6 @@ void tcp_send_ack(struct sock *sk)
/* We do not want pure acks influencing TCP Small Queues or fq/pacing
* too much.
* SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
/* Goal is to send at least one packet per ms,
-@@ -2084,7 +2084,7 @@ static bool tcp_small_queue_check(struct
+@@ -2111,7 +2111,7 @@ static bool tcp_small_queue_check(struct
{
unsigned int limit;
---
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
-@@ -665,16 +665,48 @@ static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start,
+@@ -665,16 +665,48 @@ static inline void prot##extra##blast_##
unsigned long end) \
{ \
unsigned long lsize = cpu_##desc##_line_size(); \
help
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -2943,10 +2943,20 @@ static int xmit_one(struct sk_buff *skb,
+@@ -2950,10 +2950,20 @@ static int xmit_one(struct sk_buff *skb,
if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
dev_queue_xmit_nit(skb, dev);
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
-@@ -420,6 +420,9 @@ static struct pernet_operations ip_rt_pr
+@@ -423,6 +423,9 @@ static struct pernet_operations ip_rt_pr
static int __init ip_rt_proc_init(void)
{
__u16 tc_index; /* traffic control index */
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -4535,6 +4535,9 @@ static enum gro_result dev_gro_receive(s
+@@ -4542,6 +4542,9 @@ static enum gro_result dev_gro_receive(s
enum gro_result ret;
int grow;
if (!(skb->dev->features & NETIF_F_GRO))
goto normal;
-@@ -5823,6 +5826,48 @@ static void __netdev_adjacent_dev_unlink
+@@ -5830,6 +5833,48 @@ static void __netdev_adjacent_dev_unlink
&upper_dev->adj_list.lower);
}
static int __netdev_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev, bool master,
void *upper_priv, void *upper_info)
-@@ -5895,6 +5940,7 @@ static int __netdev_upper_dev_link(struc
+@@ -5902,6 +5947,7 @@ static int __netdev_upper_dev_link(struc
goto rollback_lower_mesh;
}
ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
&changeupper_info.info);
ret = notifier_to_errno(ret);
-@@ -6021,6 +6067,7 @@ void netdev_upper_dev_unlink(struct net_
+@@ -6028,6 +6074,7 @@ void netdev_upper_dev_unlink(struct net_
list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
__netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr);
call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
&changeupper_info.info);
}
-@@ -6621,6 +6668,7 @@ int dev_set_mac_address(struct net_devic
+@@ -6628,6 +6675,7 @@ int dev_set_mac_address(struct net_devic
if (err)
return err;
dev->addr_assign_type = NET_ADDR_SET;
* These are the defined Ethernet Protocol ID's.
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -6615,9 +6615,18 @@ int dev_set_mtu(struct net_device *dev,
+@@ -6622,9 +6622,18 @@ int dev_set_mtu(struct net_device *dev,
if (new_mtu == dev->mtu)
return 0;