Skip to content

Commit d5ac99a

Browse files
David S. Millerdavem330
authored andcommitted
[TCP]: skb pcount with MTU discovery
The problem is that when doing MTU discovery, the too-large segments in the write queue will be calculated as having a pcount of >1. When tcp_write_xmit() is trying to send, tcp_snd_test() fails the cwnd test when pcount > cwnd. The segments are eventually transmitted one at a time by keepalive, but this can take a long time. This patch checks if TSO is enabled when setting pcount. Signed-off-by: John Heffner <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 158a0e4 commit d5ac99a

File tree

2 files changed

+20
-16
lines changed

2 files changed

+20
-16
lines changed

include/net/tcp.h

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1417,19 +1417,20 @@ tcp_nagle_check(const struct tcp_sock *tp, const struct sk_buff *skb,
14171417
tcp_minshall_check(tp))));
14181418
}
14191419

1420-
extern void tcp_set_skb_tso_segs(struct sk_buff *, unsigned int);
1420+
extern void tcp_set_skb_tso_segs(struct sock *, struct sk_buff *);
14211421

14221422
/* This checks if the data bearing packet SKB (usually sk->sk_send_head)
14231423
* should be put on the wire right now.
14241424
*/
1425-
static __inline__ int tcp_snd_test(const struct tcp_sock *tp,
1425+
static __inline__ int tcp_snd_test(struct sock *sk,
14261426
struct sk_buff *skb,
14271427
unsigned cur_mss, int nonagle)
14281428
{
1429+
struct tcp_sock *tp = tcp_sk(sk);
14291430
int pkts = tcp_skb_pcount(skb);
14301431

14311432
if (!pkts) {
1432-
tcp_set_skb_tso_segs(skb, tp->mss_cache_std);
1433+
tcp_set_skb_tso_segs(sk, skb);
14331434
pkts = tcp_skb_pcount(skb);
14341435
}
14351436

@@ -1490,7 +1491,7 @@ static __inline__ void __tcp_push_pending_frames(struct sock *sk,
14901491
if (skb) {
14911492
if (!tcp_skb_is_last(sk, skb))
14921493
nonagle = TCP_NAGLE_PUSH;
1493-
if (!tcp_snd_test(tp, skb, cur_mss, nonagle) ||
1494+
if (!tcp_snd_test(sk, skb, cur_mss, nonagle) ||
14941495
tcp_write_xmit(sk, nonagle))
14951496
tcp_check_probe_timer(sk, tp);
14961497
}
@@ -1508,7 +1509,7 @@ static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp)
15081509
struct sk_buff *skb = sk->sk_send_head;
15091510

15101511
return (skb &&
1511-
tcp_snd_test(tp, skb, tcp_current_mss(sk, 1),
1512+
tcp_snd_test(sk, skb, tcp_current_mss(sk, 1),
15121513
tcp_skb_is_last(sk, skb) ? TCP_NAGLE_PUSH : tp->nonagle));
15131514
}
15141515

net/ipv4/tcp_output.c

Lines changed: 14 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -427,7 +427,7 @@ void tcp_push_one(struct sock *sk, unsigned cur_mss)
427427
struct tcp_sock *tp = tcp_sk(sk);
428428
struct sk_buff *skb = sk->sk_send_head;
429429

430-
if (tcp_snd_test(tp, skb, cur_mss, TCP_NAGLE_PUSH)) {
430+
if (tcp_snd_test(sk, skb, cur_mss, TCP_NAGLE_PUSH)) {
431431
/* Send it out now. */
432432
TCP_SKB_CB(skb)->when = tcp_time_stamp;
433433
tcp_tso_set_push(skb);
@@ -440,9 +440,12 @@ void tcp_push_one(struct sock *sk, unsigned cur_mss)
440440
}
441441
}
442442

443-
void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_std)
443+
void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb)
444444
{
445-
if (skb->len <= mss_std) {
445+
struct tcp_sock *tp = tcp_sk(sk);
446+
447+
if (skb->len <= tp->mss_cache_std ||
448+
!(sk->sk_route_caps & NETIF_F_TSO)) {
446449
/* Avoid the costly divide in the normal
447450
* non-TSO case.
448451
*/
@@ -451,10 +454,10 @@ void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_std)
451454
} else {
452455
unsigned int factor;
453456

454-
factor = skb->len + (mss_std - 1);
455-
factor /= mss_std;
457+
factor = skb->len + (tp->mss_cache_std - 1);
458+
factor /= tp->mss_cache_std;
456459
skb_shinfo(skb)->tso_segs = factor;
457-
skb_shinfo(skb)->tso_size = mss_std;
460+
skb_shinfo(skb)->tso_size = tp->mss_cache_std;
458461
}
459462
}
460463

@@ -525,8 +528,8 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len)
525528
}
526529

527530
/* Fix up tso_factor for both original and new SKB. */
528-
tcp_set_skb_tso_segs(skb, tp->mss_cache_std);
529-
tcp_set_skb_tso_segs(buff, tp->mss_cache_std);
531+
tcp_set_skb_tso_segs(sk, skb);
532+
tcp_set_skb_tso_segs(sk, buff);
530533

531534
if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
532535
tp->lost_out += tcp_skb_pcount(skb);
@@ -601,7 +604,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
601604
* factor and mss.
602605
*/
603606
if (tcp_skb_pcount(skb) > 1)
604-
tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb));
607+
tcp_set_skb_tso_segs(sk, skb);
605608

606609
return 0;
607610
}
@@ -752,7 +755,7 @@ int tcp_write_xmit(struct sock *sk, int nonagle)
752755
mss_now = tcp_current_mss(sk, 1);
753756

754757
while ((skb = sk->sk_send_head) &&
755-
tcp_snd_test(tp, skb, mss_now,
758+
tcp_snd_test(sk, skb, mss_now,
756759
tcp_skb_is_last(sk, skb) ? nonagle :
757760
TCP_NAGLE_PUSH)) {
758761
if (skb->len > mss_now) {
@@ -1676,7 +1679,7 @@ int tcp_write_wakeup(struct sock *sk)
16761679
tp->mss_cache = tp->mss_cache_std;
16771680
}
16781681
} else if (!tcp_skb_pcount(skb))
1679-
tcp_set_skb_tso_segs(skb, tp->mss_cache_std);
1682+
tcp_set_skb_tso_segs(sk, skb);
16801683

16811684
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
16821685
TCP_SKB_CB(skb)->when = tcp_time_stamp;

0 commit comments

Comments
 (0)