@@ -127,16 +127,27 @@ static void rxrpc_set_rto(struct rxrpc_peer *peer)
127
127
peer -> rto_us = rxrpc_bound_rto (rto );
128
128
}
129
129
130
- static void rxrpc_ack_update_rtt (struct rxrpc_peer * peer , long rtt_us )
130
+ static void rxrpc_update_rtt_min (struct rxrpc_peer * peer , ktime_t resp_time , long rtt_us )
131
+ {
132
+ /* Window size 5mins in approx usec (ipv4.sysctl_tcp_min_rtt_wlen) */
133
+ u32 wlen_us = 5ULL * NSEC_PER_SEC / 1024 ;
134
+
135
+ minmax_running_min (& peer -> min_rtt , wlen_us , resp_time / 1024 ,
136
+ (u32 )rtt_us ? : jiffies_to_usecs (1 ));
137
+ }
138
+
139
+ static void rxrpc_ack_update_rtt (struct rxrpc_peer * peer , ktime_t resp_time , long rtt_us )
131
140
{
132
141
if (rtt_us < 0 )
133
142
return ;
134
143
135
- //rxrpc_update_rtt_min(peer, rtt_us);
144
+ /* Update RACK min RTT [RFC8985 6.1 Step 1]. */
145
+ rxrpc_update_rtt_min (peer , resp_time , rtt_us );
146
+
136
147
rxrpc_rtt_estimator (peer , rtt_us );
137
148
rxrpc_set_rto (peer );
138
149
139
- /* RFC6298: only reset backoff on valid RTT measurement. */
150
+ /* Only reset backoff on valid RTT measurement [RFC6298] . */
140
151
peer -> backoff = 0 ;
141
152
}
142
153
@@ -157,9 +168,10 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
157
168
return ;
158
169
159
170
spin_lock (& peer -> rtt_input_lock );
160
- rxrpc_ack_update_rtt (peer , rtt_us );
171
+ rxrpc_ack_update_rtt (peer , resp_time , rtt_us );
161
172
if (peer -> rtt_count < 3 )
162
173
peer -> rtt_count ++ ;
174
+ peer -> rtt_taken ++ ;
163
175
spin_unlock (& peer -> rtt_input_lock );
164
176
165
177
trace_rxrpc_rtt_rx (call , why , rtt_slot , send_serial , resp_serial ,
0 commit comments