Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Implementation of the Transmission Control Protocol(TCP).
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Mark Evans, <evansmp@uhura.aston.ac.uk>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15 * Linus Torvalds, <torvalds@cs.helsinki.fi>
16 * Alan Cox, <gw4pts@gw4pts.ampr.org>
17 * Matthew Dillon, <dillon@apollo.west.oic.com>
18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19 * Jorge Cwik, <jorge@laser.satlink.net>
20 */
21
22#include <linux/module.h>
23#include <linux/gfp.h>
24#include <net/tcp.h>
25#include <net/tcp_ecn.h>
26#include <net/rstreason.h>
27
28static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
29{
30 const struct inet_connection_sock *icsk = inet_csk(sk);
31 const struct tcp_sock *tp = tcp_sk(sk);
32 u32 elapsed, user_timeout;
33 s32 remaining;
34
35 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
36 if (!user_timeout)
37 return icsk->icsk_rto;
38
39 elapsed = tcp_time_stamp_ts(tp) - tp->retrans_stamp;
40 if (tp->tcp_usec_ts)
41 elapsed /= USEC_PER_MSEC;
42
43 remaining = user_timeout - elapsed;
44 if (remaining <= 0)
45 return 1; /* user timeout has passed; fire ASAP */
46
47 return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
48}
49
50u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
51{
52 const struct inet_connection_sock *icsk = inet_csk(sk);
53 u32 remaining, user_timeout;
54 s32 elapsed;
55
56 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
57 if (!user_timeout || !icsk->icsk_probes_tstamp)
58 return when;
59
60 elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp;
61 if (unlikely(elapsed < 0))
62 elapsed = 0;
63 remaining = msecs_to_jiffies(user_timeout) - elapsed;
64 remaining = max_t(u32, remaining, TCP_TIMEOUT_MIN);
65
66 return min_t(u32, remaining, when);
67}
68
69/**
70 * tcp_write_err() - close socket and save error info
71 * @sk: The socket the error has appeared on.
72 *
73 * Returns: Nothing (void)
74 */
75
76static void tcp_write_err(struct sock *sk)
77{
78 tcp_done_with_error(sk, READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT);
79 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
80}
81
82/**
83 * tcp_out_of_resources() - Close socket if out of resources
84 * @sk: pointer to current socket
85 * @do_reset: send a last packet with reset flag
86 *
87 * Do not allow orphaned sockets to eat all our resources.
88 * This is direct violation of TCP specs, but it is required
89 * to prevent DoS attacks. It is called when a retransmission timeout
90 * or zero probe timeout occurs on orphaned socket.
91 *
92 * Also close if our net namespace is exiting; in that case there is no
93 * hope of ever communicating again since all netns interfaces are already
94 * down (or about to be down), and we need to release our dst references,
95 * which have been moved to the netns loopback interface, so the namespace
96 * can finish exiting. This condition is only possible if we are a kernel
97 * socket, as those do not hold references to the namespace.
98 *
99 * Criteria is still not confirmed experimentally and may change.
100 * We kill the socket, if:
101 * 1. If number of orphaned sockets exceeds an administratively configured
102 * limit.
103 * 2. If we have strong memory pressure.
104 * 3. If our net namespace is exiting.
105 */
106static int tcp_out_of_resources(struct sock *sk, bool do_reset)
107{
108 struct tcp_sock *tp = tcp_sk(sk);
109 int shift = 0;
110
111 /* If peer does not open window for long time, or did not transmit
112 * anything for long time, penalize it. */
113 if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*tcp_rto_max(sk) || !do_reset)
114 shift++;
115
116 /* If some dubious ICMP arrived, penalize even more. */
117 if (READ_ONCE(sk->sk_err_soft))
118 shift++;
119
120 if (tcp_check_oom(sk, shift)) {
121 /* Catch exceptional cases, when connection requires reset.
122 * 1. Last segment was sent recently. */
123 if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
124 /* 2. Window is closed. */
125 (!tp->snd_wnd && !tp->packets_out))
126 do_reset = true;
127 if (do_reset)
128 tcp_send_active_reset(sk, GFP_ATOMIC,
129 SK_RST_REASON_TCP_ABORT_ON_MEMORY);
130 tcp_done(sk);
131 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
132 return 1;
133 }
134
135 if (!check_net(sock_net(sk))) {
136 /* Not possible to send reset; just close */
137 tcp_done(sk);
138 return 1;
139 }
140
141 return 0;
142}
143
144/**
145 * tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket
146 * @sk: Pointer to the current socket.
147 * @alive: bool, socket alive state
148 */
149static int tcp_orphan_retries(struct sock *sk, bool alive)
150{
151 int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */
152
153 /* We know from an ICMP that something is wrong. */
154 if (READ_ONCE(sk->sk_err_soft) && !alive)
155 retries = 0;
156
157 /* However, if socket sent something recently, select some safe
158 * number of retries. 8 corresponds to >100 seconds with minimal
159 * RTO of 200msec. */
160 if (retries == 0 && alive)
161 retries = 8;
162 return retries;
163}
164
165static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
166{
167 const struct net *net = sock_net(sk);
168 int mss;
169
170 /* Black hole detection */
171 if (!READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing))
172 return;
173
174 if (!icsk->icsk_mtup.enabled) {
175 icsk->icsk_mtup.enabled = 1;
176 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
177 } else {
178 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
179 mss = min(READ_ONCE(net->ipv4.sysctl_tcp_base_mss), mss);
180 mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_mtu_probe_floor));
181 mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_min_snd_mss));
182 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
183 }
184 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
185}
186
187static unsigned int tcp_model_timeout(struct sock *sk,
188 unsigned int boundary,
189 unsigned int rto_base)
190{
191 unsigned int linear_backoff_thresh, timeout;
192
193 linear_backoff_thresh = ilog2(tcp_rto_max(sk) / rto_base);
194 if (boundary <= linear_backoff_thresh)
195 timeout = ((2 << boundary) - 1) * rto_base;
196 else
197 timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
198 (boundary - linear_backoff_thresh) * tcp_rto_max(sk);
199 return jiffies_to_msecs(timeout);
200}
201/**
202 * retransmits_timed_out() - returns true if this connection has timed out
203 * @sk: The current socket
204 * @boundary: max number of retransmissions
205 * @timeout: A custom timeout value.
206 * If set to 0 the default timeout is calculated and used.
207 * Using TCP_RTO_MIN and the number of unsuccessful retransmits.
208 *
209 * The default "timeout" value this function can calculate and use
210 * is equivalent to the timeout of a TCP Connection
211 * after "boundary" unsuccessful, exponentially backed-off
212 * retransmissions with an initial RTO of TCP_RTO_MIN.
213 */
214static bool retransmits_timed_out(struct sock *sk,
215 unsigned int boundary,
216 unsigned int timeout)
217{
218 struct tcp_sock *tp = tcp_sk(sk);
219 unsigned int start_ts, delta;
220
221 if (!inet_csk(sk)->icsk_retransmits)
222 return false;
223
224 start_ts = tp->retrans_stamp;
225 if (likely(timeout == 0)) {
226 unsigned int rto_base = TCP_RTO_MIN;
227
228 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
229 rto_base = tcp_timeout_init(sk);
230 timeout = tcp_model_timeout(sk, boundary, rto_base);
231 }
232
233 if (tp->tcp_usec_ts) {
234 /* delta maybe off up to a jiffy due to timer granularity. */
235 delta = tp->tcp_mstamp - start_ts + jiffies_to_usecs(1);
236 return (s32)(delta - timeout * USEC_PER_MSEC) >= 0;
237 }
238 return (s32)(tcp_time_stamp_ts(tp) - start_ts - timeout) >= 0;
239}
240
241/* A write timeout has occurred. Process the after effects. */
242static int tcp_write_timeout(struct sock *sk)
243{
244 struct inet_connection_sock *icsk = inet_csk(sk);
245 struct tcp_sock *tp = tcp_sk(sk);
246 struct net *net = sock_net(sk);
247 bool expired = false, do_reset;
248 int retry_until, max_retransmits;
249
250 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
251 if (icsk->icsk_retransmits)
252 __dst_negative_advice(sk);
253 /* Paired with WRITE_ONCE() in tcp_sock_set_syncnt() */
254 retry_until = READ_ONCE(icsk->icsk_syn_retries) ? :
255 READ_ONCE(net->ipv4.sysctl_tcp_syn_retries);
256
257 max_retransmits = retry_until;
258 if (sk->sk_state == TCP_SYN_SENT)
259 max_retransmits += READ_ONCE(net->ipv4.sysctl_tcp_syn_linear_timeouts);
260
261 expired = icsk->icsk_retransmits >= max_retransmits;
262 } else {
263 if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1), 0)) {
264 /* Black hole detection */
265 tcp_mtu_probing(icsk, sk);
266
267 __dst_negative_advice(sk);
268 }
269
270 retry_until = READ_ONCE(net->ipv4.sysctl_tcp_retries2);
271 if (sock_flag(sk, SOCK_DEAD)) {
272 const bool alive = icsk->icsk_rto < tcp_rto_max(sk);
273
274 retry_until = tcp_orphan_retries(sk, alive);
275 do_reset = alive ||
276 !retransmits_timed_out(sk, retry_until, 0);
277
278 if (tcp_out_of_resources(sk, do_reset))
279 return 1;
280 }
281 }
282 if (!expired)
283 expired = retransmits_timed_out(sk, retry_until,
284 READ_ONCE(icsk->icsk_user_timeout));
285 tcp_fastopen_active_detect_blackhole(sk, expired);
286 mptcp_active_detect_blackhole(sk, expired);
287
288 if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
289 tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
290 icsk->icsk_retransmits,
291 icsk->icsk_rto, (int)expired);
292
293 if (expired) {
294 /* Has it gone just too far? */
295 tcp_write_err(sk);
296 return 1;
297 }
298
299 if (sk_rethink_txhash(sk)) {
300 WRITE_ONCE(tp->timeout_rehash, tp->timeout_rehash + 1);
301 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH);
302 }
303
304 return 0;
305}
306
307/* Called with BH disabled */
308void tcp_delack_timer_handler(struct sock *sk)
309{
310 struct inet_connection_sock *icsk = inet_csk(sk);
311 struct tcp_sock *tp = tcp_sk(sk);
312
313 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
314 return;
315
316 /* Handling the sack compression case */
317 if (tp->compressed_ack) {
318 tcp_mstamp_refresh(tp);
319 tcp_sack_compress_send_ack(sk);
320 return;
321 }
322
323 if (!(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
324 return;
325
326 if (time_after(icsk_delack_timeout(icsk), jiffies)) {
327 sk_reset_timer(sk, &icsk->icsk_delack_timer,
328 icsk_delack_timeout(icsk));
329 return;
330 }
331 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
332
333 if (inet_csk_ack_scheduled(sk)) {
334 if (!inet_csk_in_pingpong_mode(sk)) {
335 /* Delayed ACK missed: inflate ATO. */
336 icsk->icsk_ack.ato = min_t(u32, icsk->icsk_ack.ato << 1, icsk->icsk_rto);
337 } else {
338 /* Delayed ACK missed: leave pingpong mode and
339 * deflate ATO.
340 */
341 inet_csk_exit_pingpong_mode(sk);
342 icsk->icsk_ack.ato = TCP_ATO_MIN;
343 }
344 tcp_mstamp_refresh(tp);
345 tcp_send_ack(sk);
346 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
347 }
348}
349
350
351/**
352 * tcp_delack_timer() - The TCP delayed ACK timeout handler
353 * @t: Pointer to the timer. (gets casted to struct sock *)
354 *
355 * This function gets (indirectly) called when the kernel timer for a TCP packet
356 * of this socket expires. Calls tcp_delack_timer_handler() to do the actual work.
357 *
358 * Returns: Nothing (void)
359 */
360static void tcp_delack_timer(struct timer_list *t)
361{
362 struct inet_connection_sock *icsk =
363 timer_container_of(icsk, t, icsk_delack_timer);
364 struct sock *sk = &icsk->icsk_inet.sk;
365
366 /* Avoid taking socket spinlock if there is no ACK to send.
367 * The compressed_ack check is racy, but a separate hrtimer
368 * will take care of it eventually.
369 */
370 if (!(smp_load_acquire(&icsk->icsk_ack.pending) & ICSK_ACK_TIMER) &&
371 !READ_ONCE(tcp_sk(sk)->compressed_ack))
372 goto out;
373
374 bh_lock_sock(sk);
375 if (!sock_owned_by_user(sk)) {
376 tcp_delack_timer_handler(sk);
377 } else {
378 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
379 /* deleguate our work to tcp_release_cb() */
380 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
381 sock_hold(sk);
382 }
383 bh_unlock_sock(sk);
384out:
385 sock_put(sk);
386}
387
388static void tcp_probe_timer(struct sock *sk)
389{
390 struct inet_connection_sock *icsk = inet_csk(sk);
391 struct sk_buff *skb = tcp_send_head(sk);
392 struct tcp_sock *tp = tcp_sk(sk);
393 int max_probes;
394
395 if (tp->packets_out || !skb) {
396 WRITE_ONCE(icsk->icsk_probes_out, 0);
397 icsk->icsk_probes_tstamp = 0;
398 return;
399 }
400
401 /* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
402 * long as the receiver continues to respond probes. We support this by
403 * default and reset icsk_probes_out with incoming ACKs. But if the
404 * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
405 * kill the socket when the retry count and the time exceeds the
406 * corresponding system limit. We also implement similar policy when
407 * we use RTO to probe window in tcp_retransmit_timer().
408 */
409 if (!icsk->icsk_probes_tstamp) {
410 icsk->icsk_probes_tstamp = tcp_jiffies32;
411 } else {
412 u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
413
414 if (user_timeout &&
415 (s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >=
416 msecs_to_jiffies(user_timeout))
417 goto abort;
418 }
419 max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2);
420 if (sock_flag(sk, SOCK_DEAD)) {
421 unsigned int rto_max = tcp_rto_max(sk);
422 const bool alive = inet_csk_rto_backoff(icsk, rto_max) < rto_max;
423
424 max_probes = tcp_orphan_retries(sk, alive);
425 if (!alive && icsk->icsk_backoff >= max_probes)
426 goto abort;
427 if (tcp_out_of_resources(sk, true))
428 return;
429 }
430
431 if (icsk->icsk_probes_out >= max_probes) {
432abort: tcp_write_err(sk);
433 } else {
434 /* Only send another probe if we didn't close things up. */
435 tcp_send_probe0(sk);
436 }
437}
438
439static void tcp_update_rto_stats(struct sock *sk)
440{
441 struct inet_connection_sock *icsk = inet_csk(sk);
442 struct tcp_sock *tp = tcp_sk(sk);
443
444 if (!icsk->icsk_retransmits) {
445 tp->total_rto_recoveries++;
446 tp->rto_stamp = tcp_time_stamp_ms(tp);
447 }
448 WRITE_ONCE(icsk->icsk_retransmits, icsk->icsk_retransmits + 1);
449 tp->total_rto++;
450}
451
452/*
453 * Timer for Fast Open socket to retransmit SYNACK. Note that the
454 * sk here is the child socket, not the parent (listener) socket.
455 */
456static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
457{
458 struct inet_connection_sock *icsk = inet_csk(sk);
459 struct tcp_sock *tp = tcp_sk(sk);
460 int max_retries;
461
462 tcp_syn_ack_timeout(req);
463
464 /* Add one more retry for fastopen.
465 * Paired with WRITE_ONCE() in tcp_sock_set_syncnt()
466 */
467 max_retries = READ_ONCE(icsk->icsk_syn_retries) ? :
468 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_synack_retries) + 1;
469
470 if (req->num_timeout >= max_retries) {
471 tcp_write_err(sk);
472 return;
473 }
474 /* Lower cwnd after certain SYNACK timeout like tcp_init_transfer() */
475 if (icsk->icsk_retransmits == 1)
476 tcp_enter_loss(sk);
477 /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
478 * returned from rtx_syn_ack() to make it more persistent like
479 * regular retransmit because if the child socket has been accepted
480 * it's not good to give up too easily.
481 */
482 tcp_rtx_synack(sk, req);
483 if (req->num_retrans > 1 && tcp_rsk(req)->accecn_ok)
484 tcp_rsk(req)->accecn_fail_mode |= TCP_ACCECN_ACE_FAIL_SEND;
485 req->num_timeout++;
486 tcp_update_rto_stats(sk);
487 if (!tp->retrans_stamp)
488 tp->retrans_stamp = tcp_time_stamp_ts(tp);
489 tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
490 req->timeout << req->num_timeout, false);
491}
492
493static bool tcp_rtx_probe0_timed_out(const struct sock *sk,
494 const struct sk_buff *skb,
495 u32 rtx_delta)
496{
497 const struct inet_connection_sock *icsk = inet_csk(sk);
498 u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
499 const struct tcp_sock *tp = tcp_sk(sk);
500 int timeout = tcp_rto_max(sk) * 2;
501 s32 rcv_delta;
502
503 if (user_timeout) {
504 /* If user application specified a TCP_USER_TIMEOUT,
505 * it does not want win 0 packets to 'reset the timer'
506 * while retransmits are not making progress.
507 */
508 if (rtx_delta > user_timeout)
509 return true;
510 timeout = min_t(u32, timeout, msecs_to_jiffies(user_timeout));
511 }
512 /* Note: timer interrupt might have been delayed by at least one jiffy,
513 * and tp->rcv_tstamp might very well have been written recently.
514 * rcv_delta can thus be negative.
515 */
516 rcv_delta = tcp_timeout_expires(sk) - tp->rcv_tstamp;
517 if (rcv_delta <= timeout)
518 return false;
519
520 return msecs_to_jiffies(rtx_delta) > timeout;
521}
522
523/**
524 * tcp_retransmit_timer() - The TCP retransmit timeout handler
525 * @sk: Pointer to the current socket.
526 *
527 * This function gets called when the kernel timer for a TCP packet
528 * of this socket expires.
529 *
530 * It handles retransmission, timer adjustment and other necessary measures.
531 *
532 * Returns: Nothing (void)
533 */
534void tcp_retransmit_timer(struct sock *sk)
535{
536 struct tcp_sock *tp = tcp_sk(sk);
537 struct net *net = sock_net(sk);
538 struct inet_connection_sock *icsk = inet_csk(sk);
539 struct request_sock *req;
540 struct sk_buff *skb;
541
542 req = rcu_dereference_protected(tp->fastopen_rsk,
543 lockdep_sock_is_held(sk));
544 if (req) {
545 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
546 sk->sk_state != TCP_FIN_WAIT1);
547 tcp_fastopen_synack_timer(sk, req);
548 /* Before we receive ACK to our SYN-ACK don't retransmit
549 * anything else (e.g., data or FIN segments).
550 */
551 return;
552 }
553
554 if (!tp->packets_out)
555 return;
556
557 skb = tcp_rtx_queue_head(sk);
558 if (WARN_ON_ONCE(!skb))
559 return;
560
561 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
562 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
563 /* Receiver dastardly shrinks window. Our retransmits
564 * become zero probes, but we should not timeout this
565 * connection. If the socket is an orphan, time it out,
566 * we cannot allow such beasts to hang infinitely.
567 */
568 struct inet_sock *inet = inet_sk(sk);
569 u32 rtx_delta;
570
571 rtx_delta = tcp_time_stamp_ts(tp) - (tp->retrans_stamp ?:
572 tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb));
573 if (tp->tcp_usec_ts)
574 rtx_delta /= USEC_PER_MSEC;
575
576 if (sk->sk_family == AF_INET) {
577 net_dbg_ratelimited("Probing zero-window on %pI4:%u/%u, seq=%u:%u, recv %ums ago, lasting %ums\n",
578 &inet->inet_daddr, ntohs(inet->inet_dport),
579 inet->inet_num, tp->snd_una, tp->snd_nxt,
580 jiffies_to_msecs(jiffies - tp->rcv_tstamp),
581 rtx_delta);
582 }
583#if IS_ENABLED(CONFIG_IPV6)
584 else if (sk->sk_family == AF_INET6) {
585 net_dbg_ratelimited("Probing zero-window on %pI6:%u/%u, seq=%u:%u, recv %ums ago, lasting %ums\n",
586 &sk->sk_v6_daddr, ntohs(inet->inet_dport),
587 inet->inet_num, tp->snd_una, tp->snd_nxt,
588 jiffies_to_msecs(jiffies - tp->rcv_tstamp),
589 rtx_delta);
590 }
591#endif
592 if (tcp_rtx_probe0_timed_out(sk, skb, rtx_delta)) {
593 tcp_write_err(sk);
594 goto out;
595 }
596 tcp_enter_loss(sk);
597 tcp_retransmit_skb(sk, skb, 1);
598 __sk_dst_reset(sk);
599 goto out_reset_timer;
600 }
601
602 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
603 if (tcp_write_timeout(sk))
604 goto out;
605
606 if (icsk->icsk_retransmits == 0) {
607 int mib_idx = 0;
608
609 if (icsk->icsk_ca_state == TCP_CA_Recovery) {
610 if (tcp_is_sack(tp))
611 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
612 else
613 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
614 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
615 mib_idx = LINUX_MIB_TCPLOSSFAILURES;
616 } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
617 tp->sacked_out) {
618 if (tcp_is_sack(tp))
619 mib_idx = LINUX_MIB_TCPSACKFAILURES;
620 else
621 mib_idx = LINUX_MIB_TCPRENOFAILURES;
622 }
623 if (mib_idx)
624 __NET_INC_STATS(sock_net(sk), mib_idx);
625 }
626
627 tcp_enter_loss(sk);
628
629 tcp_update_rto_stats(sk);
630 if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
631 /* Retransmission failed because of local congestion,
632 * Let senders fight for local resources conservatively.
633 */
634 tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
635 TCP_RESOURCE_PROBE_INTERVAL,
636 false);
637 goto out;
638 }
639
640 /* Increase the timeout each time we retransmit. Note that
641 * we do not increase the rtt estimate. rto is initialized
642 * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests
643 * that doubling rto each time is the least we can get away with.
644 * In KA9Q, Karn uses this for the first few times, and then
645 * goes to quadratic. netBSD doubles, but only goes up to *64,
646 * and clamps at 1 to 64 sec afterwards. Note that 120 sec is
647 * defined in the protocol as the maximum possible RTT. I guess
648 * we'll have to use something other than TCP to talk to the
649 * University of Mars.
650 *
651 * PAWS allows us longer timeouts and large windows, so once
652 * implemented ftp to mars will work nicely. We will have to fix
653 * the 120 second clamps though!
654 */
655
656out_reset_timer:
657 /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
658 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
659 * might be increased if the stream oscillates between thin and thick,
660 * thus the old value might already be too high compared to the value
661 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
662 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
663 * exponential backoff behaviour to avoid continue hammering
664 * linear-timeout retransmissions into a black hole
665 */
666 if (sk->sk_state == TCP_ESTABLISHED &&
667 (tp->thin_lto || READ_ONCE(net->ipv4.sysctl_tcp_thin_linear_timeouts)) &&
668 tcp_stream_is_thin(tp) &&
669 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
670 icsk->icsk_backoff = 0;
671 icsk->icsk_rto = clamp(__tcp_set_rto(tp),
672 tcp_rto_min(sk),
673 tcp_rto_max(sk));
674 } else if (sk->sk_state != TCP_SYN_SENT ||
675 tp->total_rto >
676 READ_ONCE(net->ipv4.sysctl_tcp_syn_linear_timeouts)) {
677 /* Use normal (exponential) backoff unless linear timeouts are
678 * activated.
679 */
680 icsk->icsk_backoff++;
681 icsk->icsk_rto = min(icsk->icsk_rto << 1, tcp_rto_max(sk));
682 }
683 tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
684 tcp_clamp_rto_to_user_timeout(sk), false);
685 if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1) + 1, 0))
686 __sk_dst_reset(sk);
687
688out:;
689}
690
691/* Called with bottom-half processing disabled.
692 * Called by tcp_write_timer() and tcp_release_cb().
693 */
694void tcp_write_timer_handler(struct sock *sk)
695{
696 struct inet_connection_sock *icsk = inet_csk(sk);
697 int event;
698
699 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
700 !icsk->icsk_pending)
701 return;
702
703 if (time_after(tcp_timeout_expires(sk), jiffies)) {
704 sk_reset_timer(sk, &sk->tcp_retransmit_timer,
705 tcp_timeout_expires(sk));
706 return;
707 }
708 tcp_mstamp_refresh(tcp_sk(sk));
709 event = icsk->icsk_pending;
710
711 switch (event) {
712 case ICSK_TIME_REO_TIMEOUT:
713 tcp_rack_reo_timeout(sk);
714 break;
715 case ICSK_TIME_LOSS_PROBE:
716 tcp_send_loss_probe(sk);
717 break;
718 case ICSK_TIME_RETRANS:
719 smp_store_release(&icsk->icsk_pending, 0);
720 tcp_retransmit_timer(sk);
721 break;
722 case ICSK_TIME_PROBE0:
723 smp_store_release(&icsk->icsk_pending, 0);
724 tcp_probe_timer(sk);
725 break;
726 }
727}
728
729static void tcp_write_timer(struct timer_list *t)
730{
731 struct sock *sk = timer_container_of(sk, t, tcp_retransmit_timer);
732
733 /* Avoid locking the socket when there is no pending event. */
734 if (!smp_load_acquire(&inet_csk(sk)->icsk_pending))
735 goto out;
736
737 bh_lock_sock(sk);
738 if (!sock_owned_by_user(sk)) {
739 tcp_write_timer_handler(sk);
740 } else {
741 /* delegate our work to tcp_release_cb() */
742 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
743 sock_hold(sk);
744 }
745 bh_unlock_sock(sk);
746out:
747 sock_put(sk);
748}
749
750void tcp_syn_ack_timeout(const struct request_sock *req)
751{
752 struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
753
754 __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
755}
756
757void tcp_reset_keepalive_timer(struct sock *sk, unsigned long len)
758{
759 sk_reset_timer(sk, &inet_csk(sk)->icsk_keepalive_timer, jiffies + len);
760}
761
762static void tcp_delete_keepalive_timer(struct sock *sk)
763{
764 sk_stop_timer(sk, &inet_csk(sk)->icsk_keepalive_timer);
765}
766
767void tcp_set_keepalive(struct sock *sk, int val)
768{
769 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
770 return;
771
772 if (val && !sock_flag(sk, SOCK_KEEPOPEN))
773 tcp_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
774 else if (!val)
775 tcp_delete_keepalive_timer(sk);
776}
777
778static void tcp_keepalive_timer(struct timer_list *t)
779{
780 struct inet_connection_sock *icsk =
781 timer_container_of(icsk, t, icsk_keepalive_timer);
782 struct sock *sk = &icsk->icsk_inet.sk;
783 struct tcp_sock *tp = tcp_sk(sk);
784 u32 elapsed;
785
786 /* Only process if socket is not in use. */
787 bh_lock_sock(sk);
788 if (sock_owned_by_user(sk)) {
789 /* Try again later. */
790 tcp_reset_keepalive_timer(sk, HZ/20);
791 goto out;
792 }
793
794 if (sk->sk_state == TCP_LISTEN) {
795 pr_err("Hmm... keepalive on a LISTEN ???\n");
796 goto out;
797 }
798
799 tcp_mstamp_refresh(tp);
800 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
801 if (READ_ONCE(tp->linger2) >= 0) {
802 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
803
804 if (tmo > 0) {
805 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
806 goto out;
807 }
808 }
809 tcp_send_active_reset(sk, GFP_ATOMIC, SK_RST_REASON_TCP_STATE);
810 goto death;
811 }
812
813 if (!sock_flag(sk, SOCK_KEEPOPEN) ||
814 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
815 goto out;
816
817 elapsed = keepalive_time_when(tp);
818
819 /* It is alive without keepalive 8) */
820 if (tp->packets_out || !tcp_write_queue_empty(sk))
821 goto resched;
822
823 elapsed = keepalive_time_elapsed(tp);
824
825 if (elapsed >= keepalive_time_when(tp)) {
826 u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
827
828 /* If the TCP_USER_TIMEOUT option is enabled, use that
829 * to determine when to timeout instead.
830 */
831 if ((user_timeout != 0 &&
832 elapsed >= msecs_to_jiffies(user_timeout) &&
833 icsk->icsk_probes_out > 0) ||
834 (user_timeout == 0 &&
835 icsk->icsk_probes_out >= keepalive_probes(tp))) {
836 tcp_send_active_reset(sk, GFP_ATOMIC,
837 SK_RST_REASON_TCP_KEEPALIVE_TIMEOUT);
838 tcp_write_err(sk);
839 goto out;
840 }
841 if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
842 WRITE_ONCE(icsk->icsk_probes_out, icsk->icsk_probes_out + 1);
843 elapsed = keepalive_intvl_when(tp);
844 } else {
845 /* If keepalive was lost due to local congestion,
846 * try harder.
847 */
848 elapsed = TCP_RESOURCE_PROBE_INTERVAL;
849 }
850 } else {
851 /* It is tp->rcv_tstamp + keepalive_time_when(tp) */
852 elapsed = keepalive_time_when(tp) - elapsed;
853 }
854
855resched:
856 tcp_reset_keepalive_timer(sk, elapsed);
857 goto out;
858
859death:
860 tcp_done(sk);
861
862out:
863 bh_unlock_sock(sk);
864 sock_put(sk);
865}
866
867static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
868{
869 struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer);
870 struct sock *sk = (struct sock *)tp;
871
872 bh_lock_sock(sk);
873 if (!sock_owned_by_user(sk)) {
874 if (tp->compressed_ack) {
875 /* Since we have to send one ack finally,
876 * subtract one from tp->compressed_ack to keep
877 * LINUX_MIB_TCPACKCOMPRESSED accurate.
878 */
879 tp->compressed_ack--;
880 tcp_mstamp_refresh(tp);
881 tcp_send_ack(sk);
882 }
883 } else {
884 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
885 &sk->sk_tsq_flags))
886 sock_hold(sk);
887 }
888 bh_unlock_sock(sk);
889
890 sock_put(sk);
891
892 return HRTIMER_NORESTART;
893}
894
895void tcp_init_xmit_timers(struct sock *sk)
896{
897 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
898 &tcp_keepalive_timer);
899 hrtimer_setup(&tcp_sk(sk)->pacing_timer, tcp_pace_kick, CLOCK_MONOTONIC,
900 HRTIMER_MODE_ABS_PINNED_SOFT);
901
902 hrtimer_setup(&tcp_sk(sk)->compressed_ack_timer, tcp_compressed_ack_kick, CLOCK_MONOTONIC,
903 HRTIMER_MODE_REL_PINNED_SOFT);
904}