Refreshed all patches.
Compile-tested on: none
Runtime-tested on: none
Signed-off-by: Koen Vandeputte <koen.vandeputte@ncentric.com>
KERNEL_PATCHVER:=$(KERNEL_TESTING_PATCHVER)
endif
-LINUX_VERSION-4.9 = .191
+LINUX_VERSION-4.9 = .192
LINUX_VERSION-4.14 = .142
LINUX_VERSION-4.19 = .71
-LINUX_KERNEL_HASH-4.9.191 = ded4b87406deb67112b25a2283e8b5c89c2b47e2de14a97acda57f74cd38b7bc
+LINUX_KERNEL_HASH-4.9.192 = 7a1a300cce70a4fd0d49b7fff7b1673159b61c4040c5a7c08ea333a7cb328d54
LINUX_KERNEL_HASH-4.14.142 = e67ba535991170a8383be68203af7d9b4262474ceeff2f9afedeac6043b590f3
LINUX_KERNEL_HASH-4.19.71 = 1f53ebffd4842099db429172f6bac6a23e6f355f1278efb321860a34cf9a5cae
nval = cmpxchg(&tp->tsq_flags, oval, nval);
if (nval != oval)
continue;
-@@ -2235,6 +2235,8 @@ static bool tcp_write_xmit(struct sock *
+@@ -2236,6 +2236,8 @@ static bool tcp_write_xmit(struct sock *
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
break;
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
-@@ -2140,6 +2140,15 @@ static bool tcp_small_queue_check(struct
+@@ -2141,6 +2141,15 @@ static bool tcp_small_queue_check(struct
limit <<= factor;
if (atomic_read(&sk->sk_wmem_alloc) > limit) {
if (nval != oval)
continue;
-@@ -2149,7 +2149,7 @@ static bool tcp_small_queue_check(struct
+@@ -2150,7 +2150,7 @@ static bool tcp_small_queue_check(struct
skb->prev == sk->sk_write_queue.next)
return false;
/* It is possible TX completion already happened
* before we set TSQ_THROTTLED, so we must
* test again the condition.
-@@ -2247,8 +2247,8 @@ static bool tcp_write_xmit(struct sock *
+@@ -2248,8 +2248,8 @@ static bool tcp_write_xmit(struct sock *
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
break;
if (tcp_small_queue_check(sk, skb, 0))
break;
-@@ -3559,8 +3559,6 @@ void __tcp_send_ack(struct sock *sk, u32
+@@ -3560,8 +3560,6 @@ void __tcp_send_ack(struct sock *sk, u32
/* We do not want pure acks influencing TCP Small Queues or fq/pacing
* too much.
* SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
/* Goal is to send at least one packet per ms,
-@@ -2136,7 +2136,7 @@ static bool tcp_small_queue_check(struct
+@@ -2137,7 +2137,7 @@ static bool tcp_small_queue_check(struct
{
unsigned int limit;
}
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
-@@ -887,7 +887,7 @@ static int tsi108_poll(struct napi_struc
+@@ -888,7 +888,7 @@ static int tsi108_poll(struct napi_struc
if (num_received < budget) {
data->rxpending = 0;