aboutsummaryrefslogtreecommitdiffstats
path: root/include/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2005-04-24 19:12:33 -0700
committerDavid S. Miller <davem@davemloft.net>2005-04-24 19:12:33 -0700
commitd5ac99a648b8c61d0c7f1c32a8ab7f1dca0123d2 (patch)
tree3811d84e83dab918c57aeca1081ae343cb97df8b /include/net
parent158a0e45b69254a9ee4d7795e3b98d8c959fb799 (diff)
downloadkernel_goldelico_gta04-d5ac99a648b8c61d0c7f1c32a8ab7f1dca0123d2.zip
kernel_goldelico_gta04-d5ac99a648b8c61d0c7f1c32a8ab7f1dca0123d2.tar.gz
kernel_goldelico_gta04-d5ac99a648b8c61d0c7f1c32a8ab7f1dca0123d2.tar.bz2
[TCP]: skb pcount with MTU discovery
The problem is that when doing MTU discovery, the too-large segments in the write queue will be calculated as having a pcount of >1. When tcp_write_xmit() is trying to send, tcp_snd_test() fails the cwnd test when pcount > cwnd. The segments are eventually transmitted one at a time by keepalive, but this can take a long time. This patch checks if TSO is enabled when setting pcount. Signed-off-by: John Heffner <jheffner@psc.edu> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net')
-rw-r--r--include/net/tcp.h11
1 files changed, 6 insertions, 5 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 503810a..9355ae5 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1417,19 +1417,20 @@ tcp_nagle_check(const struct tcp_sock *tp, const struct sk_buff *skb,
tcp_minshall_check(tp))));
}
-extern void tcp_set_skb_tso_segs(struct sk_buff *, unsigned int);
+extern void tcp_set_skb_tso_segs(struct sock *, struct sk_buff *);
/* This checks if the data bearing packet SKB (usually sk->sk_send_head)
* should be put on the wire right now.
*/
-static __inline__ int tcp_snd_test(const struct tcp_sock *tp,
+static __inline__ int tcp_snd_test(struct sock *sk,
struct sk_buff *skb,
unsigned cur_mss, int nonagle)
{
+ struct tcp_sock *tp = tcp_sk(sk);
int pkts = tcp_skb_pcount(skb);
if (!pkts) {
- tcp_set_skb_tso_segs(skb, tp->mss_cache_std);
+ tcp_set_skb_tso_segs(sk, skb);
pkts = tcp_skb_pcount(skb);
}
@@ -1490,7 +1491,7 @@ static __inline__ void __tcp_push_pending_frames(struct sock *sk,
if (skb) {
if (!tcp_skb_is_last(sk, skb))
nonagle = TCP_NAGLE_PUSH;
- if (!tcp_snd_test(tp, skb, cur_mss, nonagle) ||
+ if (!tcp_snd_test(sk, skb, cur_mss, nonagle) ||
tcp_write_xmit(sk, nonagle))
tcp_check_probe_timer(sk, tp);
}
@@ -1508,7 +1509,7 @@ static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp)
struct sk_buff *skb = sk->sk_send_head;
return (skb &&
- tcp_snd_test(tp, skb, tcp_current_mss(sk, 1),
+ tcp_snd_test(sk, skb, tcp_current_mss(sk, 1),
tcp_skb_is_last(sk, skb) ? TCP_NAGLE_PUSH : tp->nonagle));
}