aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/fib_frontend.c5
-rw-r--r--net/ipv4/netfilter/ip_tables.c3
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_output.c20
4 files changed, 19 insertions, 11 deletions
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 9c399a7..af0190d 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -482,9 +482,7 @@ static int rtm_to_fib_config(struct sk_buff *skb, struct nlmsghdr *nlh,
memset(cfg, 0, sizeof(*cfg));
rtm = nlmsg_data(nlh);
- cfg->fc_family = rtm->rtm_family;
cfg->fc_dst_len = rtm->rtm_dst_len;
- cfg->fc_src_len = rtm->rtm_src_len;
cfg->fc_tos = rtm->rtm_tos;
cfg->fc_table = rtm->rtm_table;
cfg->fc_protocol = rtm->rtm_protocol;
@@ -501,9 +499,6 @@ static int rtm_to_fib_config(struct sk_buff *skb, struct nlmsghdr *nlh,
case RTA_DST:
cfg->fc_dst = nla_get_be32(attr);
break;
- case RTA_SRC:
- cfg->fc_src = nla_get_be32(attr);
- break;
case RTA_OIF:
cfg->fc_oif = nla_get_u32(attr);
break;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 78a44b0..4b90927 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1932,6 +1932,9 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
int ret;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
switch (cmd) {
case IPT_SO_GET_INFO:
ret = get_info(user, len, 1);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 6bbd985..22ef8bd 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -373,7 +373,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
seq = ntohl(th->seq);
if (sk->sk_state != TCP_LISTEN &&
!between(seq, tp->snd_una, tp->snd_nxt)) {
- NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
+ NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index f22536e..ca40615 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1096,10 +1096,14 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_
u32 send_win, cong_win, limit, in_flight;
if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
- return 0;
+ goto send_now;
if (icsk->icsk_ca_state != TCP_CA_Open)
- return 0;
+ goto send_now;
+
+ /* Defer for less than two clock ticks. */
+ if (!tp->tso_deferred && ((jiffies<<1)>>1) - (tp->tso_deferred>>1) > 1)
+ goto send_now;
in_flight = tcp_packets_in_flight(tp);
@@ -1115,7 +1119,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_
/* If a full-sized TSO skb can be sent, do it. */
if (limit >= 65536)
- return 0;
+ goto send_now;
if (sysctl_tcp_tso_win_divisor) {
u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
@@ -1125,7 +1129,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_
*/
chunk /= sysctl_tcp_tso_win_divisor;
if (limit >= chunk)
- return 0;
+ goto send_now;
} else {
/* Different approach, try not to defer past a single
* ACK. Receiver should ACK every other full sized
@@ -1133,11 +1137,17 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_
* then send now.
*/
if (limit > tcp_max_burst(tp) * tp->mss_cache)
- return 0;
+ goto send_now;
}
/* Ok, it looks like it is advisable to defer. */
+ tp->tso_deferred = 1 | (jiffies<<1);
+
return 1;
+
+send_now:
+ tp->tso_deferred = 0;
+ return 0;
}
/* Create a new MTU probe if we are ready.