summaryrefslogtreecommitdiff
path: root/net/ipv4/tcp_cubic.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-12-05 00:13:23 (GMT)
committerDavid S. Miller <davem@davemloft.net>2014-12-09 19:58:23 (GMT)
commit6e3a8a937c2f86ee0b2d354808fc026a143b4518 (patch)
treef787c8960845386d7950fa04d704224d94c4cc38 /net/ipv4/tcp_cubic.c
parent5cccc702fd54e5c3dc5ee16a129770aae79ae60b (diff)
downloadlinux-6e3a8a937c2f86ee0b2d354808fc026a143b4518.tar.xz
tcp_cubic: add SNMP counters to track how effective is Hystart
When deploying FQ pacing, one thing we noticed is that CUBIC Hystart triggers too soon. Having SNMP counters to have an idea of how often the various Hystart methods trigger is useful prior to any modifications. This patch adds SNMP counters tracking, how many time "ack train" or "Delay" based Hystart triggers, and cumulative sum of cwnd at the time Hystart decided to end SS (Slow Start) myhost:~# nstat -a | grep Hystart TcpExtTCPHystartTrainDetect 9 0.0 TcpExtTCPHystartTrainCwnd 20650 0.0 TcpExtTCPHystartDelayDetect 10 0.0 TcpExtTCPHystartDelayCwnd 360 0.0 -> Train detection was triggered 9 times, and average cwnd was 20650/9=2294, Delay detection was triggered 10 times and average cwnd was 36 Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_cubic.c')
-rw-r--r--net/ipv4/tcp_cubic.c31
1 files changed, 22 insertions, 9 deletions
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 20de011..c1d07c7 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -363,16 +363,28 @@ static void hystart_update(struct sock *sk, u32 delay)
struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
- if (!(ca->found & hystart_detect)) {
+ if (ca->found & hystart_detect)
+ return;
+
+ if (hystart_detect & HYSTART_ACK_TRAIN) {
u32 now = bictcp_clock();
/* first detection parameter - ack-train detection */
if ((s32)(now - ca->last_ack) <= hystart_ack_delta) {
ca->last_ack = now;
- if ((s32)(now - ca->round_start) > ca->delay_min >> 4)
+ if ((s32)(now - ca->round_start) > ca->delay_min >> 4) {
ca->found |= HYSTART_ACK_TRAIN;
+ NET_INC_STATS_BH(sock_net(sk),
+ LINUX_MIB_TCPHYSTARTTRAINDETECT);
+ NET_ADD_STATS_BH(sock_net(sk),
+ LINUX_MIB_TCPHYSTARTTRAINCWND,
+ tp->snd_cwnd);
+ tp->snd_ssthresh = tp->snd_cwnd;
+ }
}
+ }
+ if (hystart_detect & HYSTART_DELAY) {
/* obtain the minimum delay of more than sampling packets */
if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
if (ca->curr_rtt == 0 || ca->curr_rtt > delay)
@@ -381,15 +393,16 @@ static void hystart_update(struct sock *sk, u32 delay)
ca->sample_cnt++;
} else {
if (ca->curr_rtt > ca->delay_min +
- HYSTART_DELAY_THRESH(ca->delay_min>>4))
+ HYSTART_DELAY_THRESH(ca->delay_min>>4)) {
ca->found |= HYSTART_DELAY;
+ NET_INC_STATS_BH(sock_net(sk),
+ LINUX_MIB_TCPHYSTARTDELAYDETECT);
+ NET_ADD_STATS_BH(sock_net(sk),
+ LINUX_MIB_TCPHYSTARTDELAYCWND,
+ tp->snd_cwnd);
+ tp->snd_ssthresh = tp->snd_cwnd;
+ }
}
- /*
- * Either one of two conditions are met,
- * we exit from slow start immediately.
- */
- if (ca->found & hystart_detect)
- tp->snd_ssthresh = tp->snd_cwnd;
}
}