[TCP]: Tighten tcp_sock's belt, drop left_out
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>
Thu, 9 Aug 2007 11:33:18 +0000 (14:33 +0300)
committerDavid S. Miller <davem@sunset.davemloft.net>
Wed, 10 Oct 2007 23:47:55 +0000 (16:47 -0700)
It is easily calculable when needed and user are not that many
after all.

Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/tcp.h
include/net/tcp.h
net/ipv4/tcp_input.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c

index d64734389fb62f83e6b05d5a22533e78a0eaa863..1f12fa0b67d7074add85604fee70a85ace330efb 100644 (file)
@@ -304,7 +304,6 @@ struct tcp_sock {
        u32     rtt_seq;        /* sequence number to update rttvar     */
 
        u32     packets_out;    /* Packets which are "in flight"        */
-       u32     left_out;       /* Packets which leaved network */
        u32     retrans_out;    /* Retransmitted packets out            */
 /*
  *      Options received (usually on last packet, some only on SYN packets).
index 9d3438f6b52f3ba469f1dd85acd33cfc444245d3..299872d461c7c5bb3f0d136d6be5cecb788883b1 100644 (file)
@@ -735,7 +735,8 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
  */
 static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
 {
-       return (tp->packets_out - tp->left_out + tp->retrans_out);
+       return tp->packets_out - (tp->sacked_out + tp->lost_out) +
+               tp->retrans_out;
 }
 
 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
@@ -757,7 +758,6 @@ static inline void tcp_sync_left_out(struct tcp_sock *tp)
 {
        BUG_ON(tp->rx_opt.sack_ok &&
               (tp->sacked_out + tp->lost_out > tp->packets_out));
-       tp->left_out = tp->sacked_out + tp->lost_out;
 }
 
 extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
index ea690afa592a9e8b7e6531058aa3e0ae0cc1cce9..957e0fb8afb7b7b75f3770f5214fcc13870a4e4e 100644 (file)
@@ -1346,8 +1346,6 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
                }
        }
 
-       tp->left_out = tp->sacked_out + tp->lost_out;
-
        if ((reord < tp->fackets_out) && icsk->icsk_ca_state != TCP_CA_Loss &&
            (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark)))
                tcp_update_reordering(sk, ((tp->fackets_out + 1) - reord), 0);
@@ -1408,7 +1406,6 @@ static void tcp_remove_reno_sacks(struct sock *sk, int acked)
 static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
 {
        tp->sacked_out = 0;
-       tp->left_out = tp->lost_out;
 }
 
 int tcp_use_frto(struct sock *sk)
@@ -1573,7 +1570,6 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
 
 void tcp_clear_retrans(struct tcp_sock *tp)
 {
-       tp->left_out = 0;
        tp->retrans_out = 0;
 
        tp->fackets_out = 0;
@@ -1973,7 +1969,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
        printk(KERN_DEBUG "Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n",
               msg,
               NIPQUAD(inet->daddr), ntohs(inet->dport),
-              tp->snd_cwnd, tp->left_out,
+              tp->snd_cwnd, tp->sacked_out + tp->lost_out,
               tp->snd_ssthresh, tp->prior_ssthresh,
               tp->packets_out);
 }
@@ -2102,7 +2098,6 @@ static int tcp_try_undo_loss(struct sock *sk)
 
                DBGUNDO(sk, "partial loss");
                tp->lost_out = 0;
-               tp->left_out = tp->sacked_out;
                tcp_undo_cwr(sk, 1);
                NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
                inet_csk(sk)->icsk_retransmits = 0;
@@ -2126,8 +2121,6 @@ static void tcp_try_to_open(struct sock *sk, int flag)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       tcp_sync_left_out(tp);
-
        if (tp->retrans_out == 0)
                tp->retrans_stamp = 0;
 
@@ -2137,7 +2130,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
        if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
                int state = TCP_CA_Open;
 
-               if (tp->left_out || tp->retrans_out || tp->undo_marker)
+               if (tp->sacked_out || tp->retrans_out || tp->undo_marker)
                        state = TCP_CA_Disorder;
 
                if (inet_csk(sk)->icsk_ca_state != state) {
index 36a8fbd0e64ef799a6009176fb537af92b64cd73..fdfe89fe646b8f9d6667dd7c32f32ce534dd41fb 100644 (file)
@@ -405,7 +405,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                newicsk->icsk_rto = TCP_TIMEOUT_INIT;
 
                newtp->packets_out = 0;
-               newtp->left_out = 0;
                newtp->retrans_out = 0;
                newtp->sacked_out = 0;
                newtp->fackets_out = 0;
index 3c8c8e7f6f6d331aa0001ba0bdeb6bbec85a135d..7434944caa8fb0a17b9a3bcce7d78e8b6c46333b 100644 (file)
@@ -732,10 +732,8 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
                if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
                        tp->retrans_out -= diff;
 
-               if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
+               if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
                        tp->lost_out -= diff;
-                       tp->left_out -= diff;
-               }
 
                if (diff > 0) {
                        /* Adjust Reno SACK estimate. */
@@ -1727,15 +1725,11 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
                TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked&(TCPCB_EVER_RETRANS|TCPCB_AT_TAIL);
                if (TCP_SKB_CB(next_skb)->sacked&TCPCB_SACKED_RETRANS)
                        tp->retrans_out -= tcp_skb_pcount(next_skb);
-               if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST) {
+               if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST)
                        tp->lost_out -= tcp_skb_pcount(next_skb);
-                       tp->left_out -= tcp_skb_pcount(next_skb);
-               }
                /* Reno case is special. Sigh... */
-               if (!tp->rx_opt.sack_ok && tp->sacked_out) {
+               if (!tp->rx_opt.sack_ok && tp->sacked_out)
                        tcp_dec_pcount_approx(&tp->sacked_out, next_skb);
-                       tp->left_out -= tcp_skb_pcount(next_skb);
-               }
 
                /* Not quite right: it can be > snd.fack, but
                 * it is better to underestimate fackets.