diff options
author | Marek Lindner <lindner_marek@yahoo.de> | 2011-02-10 14:33:49 +0000 |
---|---|---|
committer | Marek Lindner <lindner_marek@yahoo.de> | 2011-03-05 12:50:04 +0100 |
commit | 25b6d3c17eaa92ae9700eb8235bc79782613354a (patch) | |
tree | 1c9949a6adf8144f77b91e9f3a785d0557525f39 /net/batman-adv/gateway_client.c | |
parent | 44524fcdf6ca19b58c24f7622c4af1d8d8fe59f8 (diff) | |
download | kernel_samsung_aries-25b6d3c17eaa92ae9700eb8235bc79782613354a.zip kernel_samsung_aries-25b6d3c17eaa92ae9700eb8235bc79782613354a.tar.gz kernel_samsung_aries-25b6d3c17eaa92ae9700eb8235bc79782613354a.tar.bz2 |
batman-adv: Correct rcu refcounting for gw_node
It might be possible that 2 threads access the same data in the same
rcu grace period. The first thread calls call_rcu() to decrement the
refcount and free the data while the second thread increases the
refcount to use the data. To avoid this race condition all refcount
operations have to be atomic.
Reported-by: Sven Eckelmann <sven@narfation.org>
Signed-off-by: Marek Lindner <lindner_marek@yahoo.de>
Diffstat (limited to 'net/batman-adv/gateway_client.c')
-rw-r--r-- | net/batman-adv/gateway_client.c | 37 |
1 files changed, 16 insertions, 21 deletions
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 429a013..517e001 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -28,20 +28,18 @@ #include <linux/udp.h> #include <linux/if_vlan.h> -static void gw_node_free_ref(struct kref *refcount) +static void gw_node_free_rcu(struct rcu_head *rcu) { struct gw_node *gw_node; - gw_node = container_of(refcount, struct gw_node, refcount); + gw_node = container_of(rcu, struct gw_node, rcu); kfree(gw_node); } -static void gw_node_free_rcu(struct rcu_head *rcu) +static void gw_node_free_ref(struct gw_node *gw_node) { - struct gw_node *gw_node; - - gw_node = container_of(rcu, struct gw_node, rcu); - kref_put(&gw_node->refcount, gw_node_free_ref); + if (atomic_dec_and_test(&gw_node->refcount)) + call_rcu(&gw_node->rcu, gw_node_free_rcu); } void *gw_get_selected(struct bat_priv *bat_priv) @@ -61,25 +59,26 @@ void gw_deselect(struct bat_priv *bat_priv) bat_priv->curr_gw = NULL; if (gw_node) - kref_put(&gw_node->refcount, gw_node_free_ref); + gw_node_free_ref(gw_node); } -static struct gw_node *gw_select(struct bat_priv *bat_priv, - struct gw_node *new_gw_node) +static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node) { struct gw_node *curr_gw_node = bat_priv->curr_gw; - if (new_gw_node) - kref_get(&new_gw_node->refcount); + if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount)) + new_gw_node = NULL; bat_priv->curr_gw = new_gw_node; - return curr_gw_node; + + if (curr_gw_node) + gw_node_free_ref(curr_gw_node); } void gw_election(struct bat_priv *bat_priv) { struct hlist_node *node; - struct gw_node *gw_node, *curr_gw_tmp = NULL, *old_gw_node = NULL; + struct gw_node *gw_node, *curr_gw_tmp = NULL; uint8_t max_tq = 0; uint32_t max_gw_factor = 0, tmp_gw_factor = 0; int down, up; @@ -174,14 +173,10 @@ void gw_election(struct bat_priv *bat_priv) curr_gw_tmp->orig_node->gw_flags, curr_gw_tmp->orig_node->router->tq_avg); - old_gw_node = gw_select(bat_priv, curr_gw_tmp); + gw_select(bat_priv, curr_gw_tmp); } rcu_read_unlock(); - - /* the kfree() has to be outside of the rcu lock */ - if (old_gw_node) - kref_put(&old_gw_node->refcount, gw_node_free_ref); } void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node) @@ -242,7 +237,7 @@ static void gw_node_add(struct bat_priv *bat_priv, memset(gw_node, 0, sizeof(struct gw_node)); INIT_HLIST_NODE(&gw_node->list); gw_node->orig_node = orig_node; - kref_init(&gw_node->refcount); + atomic_set(&gw_node->refcount, 1); spin_lock_bh(&bat_priv->gw_list_lock); hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list); @@ -325,7 +320,7 @@ void gw_node_purge(struct bat_priv *bat_priv) gw_deselect(bat_priv); hlist_del_rcu(&gw_node->list); - call_rcu(&gw_node->rcu, gw_node_free_rcu); + gw_node_free_ref(gw_node); } |