diff options
author | Andrew Vagin <avagin@openvz.org> | 2012-03-07 05:49:56 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-03-08 15:56:40 -0500 |
commit | 7b60a18da393ed70db043a777fd9e6d5363077c4 (patch) | |
tree | cf83f1b5fdd5b7fab96c256f40db877112b277fe /lib/kobject_uevent.c | |
parent | 8b0372a258e6bd0e9e5ea3f3d5f05a6bf3972fee (diff) |
uevent: send events in correct order according to seqnum (v3)
The queue handling in the udev daemon assumes that the events are
ordered.
Before this patch uevent_seqnum is incremented under sequence_lock,
than an event is send uner uevent_sock_mutex. I want to say that code
contained a window between incrementing seqnum and sending an event.
This patch locks uevent_sock_mutex before incrementing uevent_seqnum.
v2: delete sequence_lock, uevent_seqnum is protected by uevent_sock_mutex
v3: unlock the mutex before the goto exit
Thanks for Kay for the comments.
Signed-off-by: Andrew Vagin <avagin@openvz.org>
Tested-By: Kay Sievers <kay.sievers@vrfy.org>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'lib/kobject_uevent.c')
-rw-r--r-- | lib/kobject_uevent.c | 19 |
1 files changed, 9 insertions, 10 deletions
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index e66e9b632617..75cbdb52bf5c 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
@@ -29,16 +29,17 @@ | |||
29 | 29 | ||
30 | u64 uevent_seqnum; | 30 | u64 uevent_seqnum; |
31 | char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH; | 31 | char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH; |
32 | static DEFINE_SPINLOCK(sequence_lock); | ||
33 | #ifdef CONFIG_NET | 32 | #ifdef CONFIG_NET |
34 | struct uevent_sock { | 33 | struct uevent_sock { |
35 | struct list_head list; | 34 | struct list_head list; |
36 | struct sock *sk; | 35 | struct sock *sk; |
37 | }; | 36 | }; |
38 | static LIST_HEAD(uevent_sock_list); | 37 | static LIST_HEAD(uevent_sock_list); |
39 | static DEFINE_MUTEX(uevent_sock_mutex); | ||
40 | #endif | 38 | #endif |
41 | 39 | ||
40 | /* This lock protects uevent_seqnum and uevent_sock_list */ | ||
41 | static DEFINE_MUTEX(uevent_sock_mutex); | ||
42 | |||
42 | /* the strings here must match the enum in include/linux/kobject.h */ | 43 | /* the strings here must match the enum in include/linux/kobject.h */ |
43 | static const char *kobject_actions[] = { | 44 | static const char *kobject_actions[] = { |
44 | [KOBJ_ADD] = "add", | 45 | [KOBJ_ADD] = "add", |
@@ -136,7 +137,6 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
136 | struct kobject *top_kobj; | 137 | struct kobject *top_kobj; |
137 | struct kset *kset; | 138 | struct kset *kset; |
138 | const struct kset_uevent_ops *uevent_ops; | 139 | const struct kset_uevent_ops *uevent_ops; |
139 | u64 seq; | ||
140 | int i = 0; | 140 | int i = 0; |
141 | int retval = 0; | 141 | int retval = 0; |
142 | #ifdef CONFIG_NET | 142 | #ifdef CONFIG_NET |
@@ -243,17 +243,16 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
243 | else if (action == KOBJ_REMOVE) | 243 | else if (action == KOBJ_REMOVE) |
244 | kobj->state_remove_uevent_sent = 1; | 244 | kobj->state_remove_uevent_sent = 1; |
245 | 245 | ||
246 | mutex_lock(&uevent_sock_mutex); | ||
246 | /* we will send an event, so request a new sequence number */ | 247 | /* we will send an event, so request a new sequence number */ |
247 | spin_lock(&sequence_lock); | 248 | retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)++uevent_seqnum); |
248 | seq = ++uevent_seqnum; | 249 | if (retval) { |
249 | spin_unlock(&sequence_lock); | 250 | mutex_unlock(&uevent_sock_mutex); |
250 | retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)seq); | ||
251 | if (retval) | ||
252 | goto exit; | 251 | goto exit; |
252 | } | ||
253 | 253 | ||
254 | #if defined(CONFIG_NET) | 254 | #if defined(CONFIG_NET) |
255 | /* send netlink message */ | 255 | /* send netlink message */ |
256 | mutex_lock(&uevent_sock_mutex); | ||
257 | list_for_each_entry(ue_sk, &uevent_sock_list, list) { | 256 | list_for_each_entry(ue_sk, &uevent_sock_list, list) { |
258 | struct sock *uevent_sock = ue_sk->sk; | 257 | struct sock *uevent_sock = ue_sk->sk; |
259 | struct sk_buff *skb; | 258 | struct sk_buff *skb; |
@@ -290,8 +289,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
290 | } else | 289 | } else |
291 | retval = -ENOMEM; | 290 | retval = -ENOMEM; |
292 | } | 291 | } |
293 | mutex_unlock(&uevent_sock_mutex); | ||
294 | #endif | 292 | #endif |
293 | mutex_unlock(&uevent_sock_mutex); | ||
295 | 294 | ||
296 | /* call uevent_helper, usually only enabled during early boot */ | 295 | /* call uevent_helper, usually only enabled during early boot */ |
297 | if (uevent_helper[0] && !kobj_usermode_filter(kobj)) { | 296 | if (uevent_helper[0] && !kobj_usermode_filter(kobj)) { |