diff options
author | Andrew Vagin <avagin@openvz.org> | 2012-03-07 14:49:56 +0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-04-02 09:27:09 -0700 |
commit | 377c2f4aa985f937d77a6bb9b938b4deda2dc282 (patch) | |
tree | eda5f5a602c6072e5d083d0c794401b9c55b5db3 /lib | |
parent | d16d493a1c1079e4e065ee6070b2413add846719 (diff) | |
download | kernel_samsung_smdk4412-377c2f4aa985f937d77a6bb9b938b4deda2dc282.zip kernel_samsung_smdk4412-377c2f4aa985f937d77a6bb9b938b4deda2dc282.tar.gz kernel_samsung_smdk4412-377c2f4aa985f937d77a6bb9b938b4deda2dc282.tar.bz2 |
uevent: send events in correct order according to seqnum (v3)
commit 7b60a18da393ed70db043a777fd9e6d5363077c4 upstream.
The queue handling in the udev daemon assumes that the events are
ordered.
Before this patch uevent_seqnum is incremented under sequence_lock,
than an event is send uner uevent_sock_mutex. I want to say that code
contained a window between incrementing seqnum and sending an event.
This patch locks uevent_sock_mutex before incrementing uevent_seqnum.
v2: delete sequence_lock, uevent_seqnum is protected by uevent_sock_mutex
v3: unlock the mutex before the goto exit
Thanks for Kay for the comments.
Signed-off-by: Andrew Vagin <avagin@openvz.org>
Tested-By: Kay Sievers <kay.sievers@vrfy.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/kobject_uevent.c | 19 |
1 files changed, 9 insertions, 10 deletions
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index ad72a03..6d40244 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -29,16 +29,17 @@ u64 uevent_seqnum; char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH; -static DEFINE_SPINLOCK(sequence_lock); #ifdef CONFIG_NET struct uevent_sock { struct list_head list; struct sock *sk; }; static LIST_HEAD(uevent_sock_list); -static DEFINE_MUTEX(uevent_sock_mutex); #endif +/* This lock protects uevent_seqnum and uevent_sock_list */ +static DEFINE_MUTEX(uevent_sock_mutex); + /* the strings here must match the enum in include/linux/kobject.h */ static const char *kobject_actions[] = { [KOBJ_ADD] = "add", @@ -136,7 +137,6 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, struct kobject *top_kobj; struct kset *kset; const struct kset_uevent_ops *uevent_ops; - u64 seq; int i = 0; int retval = 0; #ifdef CONFIG_NET @@ -243,17 +243,16 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, else if (action == KOBJ_REMOVE) kobj->state_remove_uevent_sent = 1; + mutex_lock(&uevent_sock_mutex); /* we will send an event, so request a new sequence number */ - spin_lock(&sequence_lock); - seq = ++uevent_seqnum; - spin_unlock(&sequence_lock); - retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)seq); - if (retval) + retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)++uevent_seqnum); + if (retval) { + mutex_unlock(&uevent_sock_mutex); goto exit; + } #if defined(CONFIG_NET) /* send netlink message */ - mutex_lock(&uevent_sock_mutex); list_for_each_entry(ue_sk, &uevent_sock_list, list) { struct sock *uevent_sock = ue_sk->sk; struct sk_buff *skb; @@ -287,8 +286,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, } else retval = -ENOMEM; } - mutex_unlock(&uevent_sock_mutex); #endif + mutex_unlock(&uevent_sock_mutex); /* call uevent_helper, usually only enabled during early boot */ if (uevent_helper[0] && !kobj_usermode_filter(kobj)) { |