diff options
author | David S. Miller <davem@davemloft.net> | 2012-07-23 16:29:00 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-07-23 16:36:26 -0700 |
commit | 92101b3b2e3178087127709a556b091dae314e9e (patch) | |
tree | 06359f8823da3ed7617c5ea78e4a56bc5e958fea /net/ipv4/tcp_ipv4.c | |
parent | fe3edf45792a7d2f0edff4e2fcdd9a84c1a388a0 (diff) | |
download | kernel_goldelico_gta04-92101b3b2e3178087127709a556b091dae314e9e.zip kernel_goldelico_gta04-92101b3b2e3178087127709a556b091dae314e9e.tar.gz kernel_goldelico_gta04-92101b3b2e3178087127709a556b091dae314e9e.tar.bz2 |
ipv4: Prepare for change of rt->rt_iif encoding.
Use inet_iif() consistently, and for TCP record the input interface of
cached RX dst in inet sock.
rt->rt_iif is going to be encoded differently, so that we can
legitimately cache input routes in the FIB info more aggressively.
When the input interface is "use SKB device index" the rt->rt_iif will
be set to zero.
This forces us to move the TCP RX dst cache installation into the ipv4
specific code, and as well it should since doing the route caching for
ipv6 is pointless at the moment since it is not inspected in the ipv6
input paths yet.
Also, remove the unlikely on dst->obsolete, all ipv4 dsts have
obsolete set to a non-zero value to force invocation of the check
callback.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 24 |
1 files changed, 18 insertions, 6 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index bc5432e..3e30548 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1618,6 +1618,20 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ sock_rps_save_rxhash(sk, skb); + if (sk->sk_rx_dst) { + struct dst_entry *dst = sk->sk_rx_dst; + if (dst->ops->check(dst, 0) == NULL) { + dst_release(dst); + sk->sk_rx_dst = NULL; + } + } + if (unlikely(sk->sk_rx_dst == NULL)) { + struct inet_sock *icsk = inet_sk(sk); + struct rtable *rt = skb_rtable(skb); + + sk->sk_rx_dst = dst_clone(&rt->dst); + icsk->rx_dst_ifindex = inet_iif(skb); + } if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { rsk = sk; goto reset; @@ -1700,14 +1714,12 @@ void tcp_v4_early_demux(struct sk_buff *skb) skb->destructor = sock_edemux; if (sk->sk_state != TCP_TIME_WAIT) { struct dst_entry *dst = sk->sk_rx_dst; + struct inet_sock *icsk = inet_sk(sk); if (dst) dst = dst_check(dst, 0); - if (dst) { - struct rtable *rt = (struct rtable *) dst; - - if (rt->rt_iif == dev->ifindex) - skb_dst_set_noref(skb, dst); - } + if (dst && + icsk->rx_dst_ifindex == dev->ifindex) + skb_dst_set_noref(skb, dst); } } } |