aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/ipv6_sockglue.c
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-04-08 23:03:29 +0000
committerDavid S. Miller <davem@davemloft.net>2010-04-13 01:41:33 -0700
commitb6c6712a42ca3f9fa7f4a3d7c40e3a9dd1fd9e03 (patch)
tree42032b4978874e8ffcf6c851d13324b8c8c7c113 /net/ipv6/ipv6_sockglue.c
parent7a161ea92471087a1579239d7a58dd06eaa5601c (diff)
downloadkernel_goldelico_gta04-b6c6712a42ca3f9fa7f4a3d7c40e3a9dd1fd9e03.zip
kernel_goldelico_gta04-b6c6712a42ca3f9fa7f4a3d7c40e3a9dd1fd9e03.tar.gz
kernel_goldelico_gta04-b6c6712a42ca3f9fa7f4a3d7c40e3a9dd1fd9e03.tar.bz2
net: sk_dst_cache RCUification
With latest CONFIG_PROVE_RCU stuff, I felt more comfortable to make this work. sk->sk_dst_cache is currently protected by a rwlock (sk_dst_lock) This rwlock is readlocked for a very small amount of time, and dst entries are already freed after RCU grace period. This calls for RCU again :) This patch converts sk_dst_lock to a spinlock, and use RCU for readers. __sk_dst_get() is supposed to be called with rcu_read_lock() or if socket locked by user, so use appropriate rcu_dereference_check() condition (rcu_read_lock_held() || sock_owned_by_user(sk)) This patch avoids two atomic ops per tx packet on UDP connected sockets, for example, and permits sk_dst_lock to be much less dirtied. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/ipv6_sockglue.c')
-rw-r--r--net/ipv6/ipv6_sockglue.c25
1 files changed, 13 insertions, 12 deletions
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 33f60fc..1160400 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -114,9 +114,9 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
}
opt = xchg(&inet6_sk(sk)->opt, opt);
} else {
- write_lock(&sk->sk_dst_lock);
+ spin_lock(&sk->sk_dst_lock);
opt = xchg(&inet6_sk(sk)->opt, opt);
- write_unlock(&sk->sk_dst_lock);
+ spin_unlock(&sk->sk_dst_lock);
}
sk_dst_reset(sk);
@@ -971,14 +971,13 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
case IPV6_MTU:
{
struct dst_entry *dst;
+
val = 0;
- lock_sock(sk);
- dst = sk_dst_get(sk);
- if (dst) {
+ rcu_read_lock();
+ dst = __sk_dst_get(sk);
+ if (dst)
val = dst_mtu(dst);
- dst_release(dst);
- }
- release_sock(sk);
+ rcu_read_unlock();
if (!val)
return -ENOTCONN;
break;
@@ -1066,12 +1065,14 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
else
val = np->mcast_hops;
- dst = sk_dst_get(sk);
- if (dst) {
- if (val < 0)
+ if (val < 0) {
+ rcu_read_lock();
+ dst = __sk_dst_get(sk);
+ if (dst)
val = ip6_dst_hoplimit(dst);
- dst_release(dst);
+ rcu_read_unlock();
}
+
if (val < 0)
val = sock_net(sk)->ipv6.devconf_all->hop_limit;
break;