diff --git a/include/net/tcp.h b/include/net/tcp.h index b71a446d58f6..56b76027b85e 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -793,6 +793,13 @@ static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) return tp->packets_out - tcp_left_out(tp) + tp->retrans_out; } +#define TCP_INFINITE_SSTHRESH 0x7fffffff + +static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp) +{ + return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH; +} + /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd. * The exception is rate halving phase, when cwnd is decreasing towards * ssthresh. diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index edeea060db44..19a0612b8a20 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2012,7 +2012,7 @@ int tcp_disconnect(struct sock *sk, int flags) tp->snd_cwnd = 2; icsk->icsk_probes_out = 0; tp->packets_out = 0; - tp->snd_ssthresh = 0x7fffffff; + tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; tp->snd_cwnd_cnt = 0; tp->bytes_acked = 0; tcp_set_ca_state(sk, TCP_CA_Open); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index af6d6fa00db1..d86784be7ab3 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -761,7 +761,7 @@ void tcp_update_metrics(struct sock *sk) set_dst_metric_rtt(dst, RTAX_RTTVAR, var); } - if (tp->snd_ssthresh >= 0xFFFF) { + if (tcp_in_initial_slowstart(tp)) { /* Slow start still did not finish. */ if (dst_metric(dst, RTAX_SSTHRESH) && !dst_metric_locked(dst, RTAX_SSTHRESH) && diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 0543561da999..7cda24b53f61 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1808,7 +1808,7 @@ static int tcp_v4_init_sock(struct sock *sk) /* See draft-stevens-tcpca-spec-01 for discussion of the * initialization of these values. */ - tp->snd_ssthresh = 0x7fffffff; /* Infinity */ + tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; tp->snd_cwnd_clamp = ~0; tp->mss_cache = 536; @@ -2284,7 +2284,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len) jiffies_to_clock_t(icsk->icsk_ack.ato), (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, tp->snd_cwnd, - tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh, + tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh, len); } diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index e48c37d74d77..045bcfd3f288 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -410,7 +410,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, newtp->retrans_out = 0; newtp->sacked_out = 0; newtp->fackets_out = 0; - newtp->snd_ssthresh = 0x7fffffff; + newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; /* So many TCP implementations out there (incorrectly) count the * initial SYN frame in their delayed-ACK and congestion control diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 7718a9261efb..21d100b68b19 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1846,7 +1846,7 @@ static int tcp_v6_init_sock(struct sock *sk) /* See draft-stevens-tcpca-spec-01 for discussion of the * initialization of these values. */ - tp->snd_ssthresh = 0x7fffffff; + tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; tp->snd_cwnd_clamp = ~0; tp->mss_cache = 536; @@ -1969,7 +1969,8 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) jiffies_to_clock_t(icsk->icsk_rto), jiffies_to_clock_t(icsk->icsk_ack.ato), (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong, - tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh + tp->snd_cwnd, + tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh ); }