aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
authorTodd Poynor <toddpoynor@google.com>2012-11-01 13:36:34 -0700
committerTodd Poynor <toddpoynor@google.com>2012-11-01 13:36:34 -0700
commit925d49abc38dcc7ef1cbfe125c6f0b2202ae3df3 (patch)
treea56506710f0340db055191e3cf0a207699c1b849 /net/ipv4/tcp.c
parent834029ac9d0ad8dea4e6a21bc34877dc3740b9f4 (diff)
parent27d0858dbcf199838b8c50a3e94d397bf326d986 (diff)
downloadkernel_samsung_tuna-925d49abc38dcc7ef1cbfe125c6f0b2202ae3df3.zip
kernel_samsung_tuna-925d49abc38dcc7ef1cbfe125c6f0b2202ae3df3.tar.gz
kernel_samsung_tuna-925d49abc38dcc7ef1cbfe125c6f0b2202ae3df3.tar.bz2
Merge remote-tracking branch 'stable/linux-3.0.y' into android-3.0
Change-Id: I9685feb9277b450da10d78a455b3c0674d6cfe18 Signed-off-by: Todd Poynor <toddpoynor@google.com>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c31
1 files changed, 21 insertions, 10 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b0e5330..08d2244 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -743,7 +743,9 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
old_size_goal + mss_now > xmit_size_goal)) {
xmit_size_goal = old_size_goal;
} else {
- tp->xmit_size_goal_segs = xmit_size_goal / mss_now;
+ tp->xmit_size_goal_segs =
+ min_t(u16, xmit_size_goal / mss_now,
+ sk->sk_gso_max_segs);
xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
}
}
@@ -854,8 +856,7 @@ new_segment:
wait_for_sndbuf:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
- if (copied)
- tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
+ tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
goto do_error;
@@ -1601,8 +1602,14 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
}
#ifdef CONFIG_NET_DMA
- if (tp->ucopy.dma_chan)
- dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+ if (tp->ucopy.dma_chan) {
+ if (tp->rcv_wnd == 0 &&
+ !skb_queue_empty(&sk->sk_async_wait_queue)) {
+ tcp_service_net_dma(sk, true);
+ tcp_cleanup_rbuf(sk, copied);
+ } else
+ dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+ }
#endif
if (copied >= target) {
/* Do not sleep, just process backlog. */
@@ -2410,7 +2417,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
/* Cap the max timeout in ms TCP will retry/retrans
* before giving up and aborting (ETIMEDOUT) a connection.
*/
- icsk->icsk_user_timeout = msecs_to_jiffies(val);
+ if (val < 0)
+ err = -EINVAL;
+ else
+ icsk->icsk_user_timeout = msecs_to_jiffies(val);
break;
default:
err = -ENOPROTOOPT;
@@ -3236,7 +3246,7 @@ void __init tcp_init(void)
{
struct sk_buff *skb = NULL;
unsigned long limit;
- int i, max_share, cnt;
+ int i, max_rshare, max_wshare, cnt;
unsigned long jiffy = jiffies;
BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
@@ -3300,15 +3310,16 @@ void __init tcp_init(void)
/* Set per-socket limits to no more than 1/128 the pressure threshold */
limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
- max_share = min(4UL*1024*1024, limit);
+ max_wshare = min(4UL*1024*1024, limit);
+ max_rshare = min(6UL*1024*1024, limit);
sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
sysctl_tcp_wmem[1] = 16*1024;
- sysctl_tcp_wmem[2] = max(64*1024, max_share);
+ sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
sysctl_tcp_rmem[1] = 87380;
- sysctl_tcp_rmem[2] = max(87380, max_share);
+ sysctl_tcp_rmem[2] = max(87380, max_rshare);
printk(KERN_INFO "TCP: Hash tables configured "
"(established %u bind %u)\n",