aboutsummaryrefslogtreecommitdiffstats
path: root/net/batman-adv/originator.c
diff options
context:
space:
mode:
authorMarek Lindner <lindner_marek@yahoo.de>2011-02-18 12:28:10 +0000
committerMarek Lindner <lindner_marek@yahoo.de>2011-03-05 12:52:01 +0100
commit7b36e8eef989fc59535b4f1d3fc0f83afaf419d4 (patch)
treef0900101542966e0655ca5f115b5b0bc409b1e74 /net/batman-adv/originator.c
parent7aadf889e897155c45cda230d2a6701ad1fbff61 (diff)
downloadkernel_samsung_aries-7b36e8eef989fc59535b4f1d3fc0f83afaf419d4.zip
kernel_samsung_aries-7b36e8eef989fc59535b4f1d3fc0f83afaf419d4.tar.gz
kernel_samsung_aries-7b36e8eef989fc59535b4f1d3fc0f83afaf419d4.tar.bz2
batman-adv: Correct rcu refcounting for orig_node
It might be possible that 2 threads access the same data in the same rcu grace period. The first thread calls call_rcu() to decrement the refcount and free the data while the second thread increases the refcount to use the data. To avoid this race condition all refcount operations have to be atomic. Reported-by: Sven Eckelmann <sven@narfation.org> Signed-off-by: Marek Lindner <lindner_marek@yahoo.de>
Diffstat (limited to 'net/batman-adv/originator.c')
-rw-r--r--net/batman-adv/originator.c21
1 files changed, 13 insertions, 8 deletions
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index bdcb399..a70debe 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -102,13 +102,13 @@ struct neigh_node *create_neighbor(struct orig_node *orig_node,
return neigh_node;
}
-void orig_node_free_ref(struct kref *refcount)
+static void orig_node_free_rcu(struct rcu_head *rcu)
{
struct hlist_node *node, *node_tmp;
struct neigh_node *neigh_node, *tmp_neigh_node;
struct orig_node *orig_node;
- orig_node = container_of(refcount, struct orig_node, refcount);
+ orig_node = container_of(rcu, struct orig_node, rcu);
spin_lock_bh(&orig_node->neigh_list_lock);
@@ -137,6 +137,12 @@ void orig_node_free_ref(struct kref *refcount)
kfree(orig_node);
}
+void orig_node_free_ref(struct orig_node *orig_node)
+{
+ if (atomic_dec_and_test(&orig_node->refcount))
+ call_rcu(&orig_node->rcu, orig_node_free_rcu);
+}
+
void originator_free(struct bat_priv *bat_priv)
{
struct hashtable_t *hash = bat_priv->orig_hash;
@@ -163,7 +169,7 @@ void originator_free(struct bat_priv *bat_priv)
head, hash_entry) {
hlist_del_rcu(node);
- kref_put(&orig_node->refcount, orig_node_free_ref);
+ orig_node_free_ref(orig_node);
}
spin_unlock_bh(list_lock);
}
@@ -196,7 +202,9 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
spin_lock_init(&orig_node->ogm_cnt_lock);
spin_lock_init(&orig_node->bcast_seqno_lock);
spin_lock_init(&orig_node->neigh_list_lock);
- kref_init(&orig_node->refcount);
+
+ /* extra reference for return */
+ atomic_set(&orig_node->refcount, 2);
orig_node->bat_priv = bat_priv;
memcpy(orig_node->orig, addr, ETH_ALEN);
@@ -229,8 +237,6 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
if (hash_added < 0)
goto free_bcast_own_sum;
- /* extra reference for return */
- kref_get(&orig_node->refcount);
return orig_node;
free_bcast_own_sum:
kfree(orig_node->bcast_own_sum);
@@ -348,8 +354,7 @@ static void _purge_orig(struct bat_priv *bat_priv)
if (orig_node->gw_flags)
gw_node_delete(bat_priv, orig_node);
hlist_del_rcu(node);
- kref_put(&orig_node->refcount,
- orig_node_free_ref);
+ orig_node_free_ref(orig_node);
continue;
}