aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJohannes Berg <johannes@sipsolutions.net>2009-09-12 03:03:15 +0000
committerDavid S. Miller <davem@davemloft.net>2009-09-14 17:02:50 -0700
commitd136f1bd366fdb7e747ca7e0218171e7a00a98a5 (patch)
treecee39b3249c36aba4b765cae6d9d3579c9f10a2d /net
parent8be8057e72d7d319f8e97b26e16de8021fe63988 (diff)
downloadkernel_samsung_aries-d136f1bd366fdb7e747ca7e0218171e7a00a98a5.zip
kernel_samsung_aries-d136f1bd366fdb7e747ca7e0218171e7a00a98a5.tar.gz
kernel_samsung_aries-d136f1bd366fdb7e747ca7e0218171e7a00a98a5.tar.bz2
genetlink: fix netns vs. netlink table locking
Since my commits introducing netns awareness into genetlink we can get this problem: BUG: scheduling while atomic: modprobe/1178/0x00000002 2 locks held by modprobe/1178: #0: (genl_mutex){+.+.+.}, at: [<ffffffff8135ee1a>] genl_register_mc_grou #1: (rcu_read_lock){.+.+..}, at: [<ffffffff8135eeb5>] genl_register_mc_g Pid: 1178, comm: modprobe Not tainted 2.6.31-rc8-wl-34789-g95cb731-dirty # Call Trace: [<ffffffff8103e285>] __schedule_bug+0x85/0x90 [<ffffffff81403138>] schedule+0x108/0x588 [<ffffffff8135b131>] netlink_table_grab+0xa1/0xf0 [<ffffffff8135c3a7>] netlink_change_ngroups+0x47/0x100 [<ffffffff8135ef0f>] genl_register_mc_group+0x12f/0x290 because I overlooked that netlink_table_grab() will schedule, thinking it was just the rwlock. However, in the contention case, that isn't actually true. Fix this by letting the code grab the netlink table lock first and then the RCU for netns protection. Signed-off-by: Johannes Berg <johannes@sipsolutions.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/netlink/af_netlink.c51
-rw-r--r--net/netlink/genetlink.c5
2 files changed, 33 insertions, 23 deletions
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index d0ff382..c5aab6a 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -177,9 +177,11 @@ static void netlink_sock_destruct(struct sock *sk)
* this, _but_ remember, it adds useless work on UP machines.
*/
-static void netlink_table_grab(void)
+void netlink_table_grab(void)
__acquires(nl_table_lock)
{
+ might_sleep();
+
write_lock_irq(&nl_table_lock);
if (atomic_read(&nl_table_users)) {
@@ -200,7 +202,7 @@ static void netlink_table_grab(void)
}
}
-static void netlink_table_ungrab(void)
+void netlink_table_ungrab(void)
__releases(nl_table_lock)
{
write_unlock_irq(&nl_table_lock);
@@ -1549,37 +1551,21 @@ static void netlink_free_old_listeners(struct rcu_head *rcu_head)
kfree(lrh->ptr);
}
-/**
- * netlink_change_ngroups - change number of multicast groups
- *
- * This changes the number of multicast groups that are available
- * on a certain netlink family. Note that it is not possible to
- * change the number of groups to below 32. Also note that it does
- * not implicitly call netlink_clear_multicast_users() when the
- * number of groups is reduced.
- *
- * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
- * @groups: The new number of groups.
- */
-int netlink_change_ngroups(struct sock *sk, unsigned int groups)
+int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
{
unsigned long *listeners, *old = NULL;
struct listeners_rcu_head *old_rcu_head;
struct netlink_table *tbl = &nl_table[sk->sk_protocol];
- int err = 0;
if (groups < 32)
groups = 32;
- netlink_table_grab();
if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
listeners = kzalloc(NLGRPSZ(groups) +
sizeof(struct listeners_rcu_head),
GFP_ATOMIC);
- if (!listeners) {
- err = -ENOMEM;
- goto out_ungrab;
- }
+ if (!listeners)
+ return -ENOMEM;
old = tbl->listeners;
memcpy(listeners, old, NLGRPSZ(tbl->groups));
rcu_assign_pointer(tbl->listeners, listeners);
@@ -1597,8 +1583,29 @@ int netlink_change_ngroups(struct sock *sk, unsigned int groups)
}
tbl->groups = groups;
- out_ungrab:
+ return 0;
+}
+
+/**
+ * netlink_change_ngroups - change number of multicast groups
+ *
+ * This changes the number of multicast groups that are available
+ * on a certain netlink family. Note that it is not possible to
+ * change the number of groups to below 32. Also note that it does
+ * not implicitly call netlink_clear_multicast_users() when the
+ * number of groups is reduced.
+ *
+ * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
+ * @groups: The new number of groups.
+ */
+int netlink_change_ngroups(struct sock *sk, unsigned int groups)
+{
+ int err;
+
+ netlink_table_grab();
+ err = __netlink_change_ngroups(sk, groups);
netlink_table_ungrab();
+
return err;
}
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 66f6ba0..566941e 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -176,9 +176,10 @@ int genl_register_mc_group(struct genl_family *family,
if (family->netnsok) {
struct net *net;
+ netlink_table_grab();
rcu_read_lock();
for_each_net_rcu(net) {
- err = netlink_change_ngroups(net->genl_sock,
+ err = __netlink_change_ngroups(net->genl_sock,
mc_groups_longs * BITS_PER_LONG);
if (err) {
/*
@@ -188,10 +189,12 @@ int genl_register_mc_group(struct genl_family *family,
* increased on some sockets which is ok.
*/
rcu_read_unlock();
+ netlink_table_ungrab();
goto out;
}
}
rcu_read_unlock();
+ netlink_table_ungrab();
} else {
err = netlink_change_ngroups(init_net.genl_sock,
mc_groups_longs * BITS_PER_LONG);