diff options
author | Mike Anderson <andmike@linux.vnet.ibm.com> | 2007-10-19 17:48:01 -0400 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2007-10-19 21:01:26 -0400 |
commit | 7a8c3d3b92883798e4ead21dd48c16db0ec0ff6f (patch) | |
tree | 21a25dc6bd6afa11430e1ab8d997a4b1c0b960f0 /drivers/md/dm.c | |
parent | 51e5b2bd34ded40ef48cade8a6a8f1baa0b4275e (diff) |
dm: uevent generate events
This patch adds support for the dm_path_event dm_send_event functions which
create and send udev events.
Signed-off-by: Mike Anderson <andmike@linux.vnet.ibm.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r-- | drivers/md/dm.c | 28 |
1 files changed, 28 insertions, 0 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index bb5c1eaca52b..07cbbb8eb3e0 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -113,6 +113,9 @@ struct mapped_device { | |||
113 | */ | 113 | */ |
114 | atomic_t event_nr; | 114 | atomic_t event_nr; |
115 | wait_queue_head_t eventq; | 115 | wait_queue_head_t eventq; |
116 | atomic_t uevent_seq; | ||
117 | struct list_head uevent_list; | ||
118 | spinlock_t uevent_lock; /* Protect access to uevent_list */ | ||
116 | 119 | ||
117 | /* | 120 | /* |
118 | * freeze/thaw support require holding onto a super block | 121 | * freeze/thaw support require holding onto a super block |
@@ -985,6 +988,9 @@ static struct mapped_device *alloc_dev(int minor) | |||
985 | atomic_set(&md->holders, 1); | 988 | atomic_set(&md->holders, 1); |
986 | atomic_set(&md->open_count, 0); | 989 | atomic_set(&md->open_count, 0); |
987 | atomic_set(&md->event_nr, 0); | 990 | atomic_set(&md->event_nr, 0); |
991 | atomic_set(&md->uevent_seq, 0); | ||
992 | INIT_LIST_HEAD(&md->uevent_list); | ||
993 | spin_lock_init(&md->uevent_lock); | ||
988 | 994 | ||
989 | md->queue = blk_alloc_queue(GFP_KERNEL); | 995 | md->queue = blk_alloc_queue(GFP_KERNEL); |
990 | if (!md->queue) | 996 | if (!md->queue) |
@@ -1083,8 +1089,16 @@ static void free_dev(struct mapped_device *md) | |||
1083 | */ | 1089 | */ |
1084 | static void event_callback(void *context) | 1090 | static void event_callback(void *context) |
1085 | { | 1091 | { |
1092 | unsigned long flags; | ||
1093 | LIST_HEAD(uevents); | ||
1086 | struct mapped_device *md = (struct mapped_device *) context; | 1094 | struct mapped_device *md = (struct mapped_device *) context; |
1087 | 1095 | ||
1096 | spin_lock_irqsave(&md->uevent_lock, flags); | ||
1097 | list_splice_init(&md->uevent_list, &uevents); | ||
1098 | spin_unlock_irqrestore(&md->uevent_lock, flags); | ||
1099 | |||
1100 | dm_send_uevents(&uevents, &md->disk->kobj); | ||
1101 | |||
1088 | atomic_inc(&md->event_nr); | 1102 | atomic_inc(&md->event_nr); |
1089 | wake_up(&md->eventq); | 1103 | wake_up(&md->eventq); |
1090 | } | 1104 | } |
@@ -1502,6 +1516,11 @@ out: | |||
1502 | /*----------------------------------------------------------------- | 1516 | /*----------------------------------------------------------------- |
1503 | * Event notification. | 1517 | * Event notification. |
1504 | *---------------------------------------------------------------*/ | 1518 | *---------------------------------------------------------------*/ |
1519 | uint32_t dm_next_uevent_seq(struct mapped_device *md) | ||
1520 | { | ||
1521 | return atomic_add_return(1, &md->uevent_seq); | ||
1522 | } | ||
1523 | |||
1505 | uint32_t dm_get_event_nr(struct mapped_device *md) | 1524 | uint32_t dm_get_event_nr(struct mapped_device *md) |
1506 | { | 1525 | { |
1507 | return atomic_read(&md->event_nr); | 1526 | return atomic_read(&md->event_nr); |
@@ -1513,6 +1532,15 @@ int dm_wait_event(struct mapped_device *md, int event_nr) | |||
1513 | (event_nr != atomic_read(&md->event_nr))); | 1532 | (event_nr != atomic_read(&md->event_nr))); |
1514 | } | 1533 | } |
1515 | 1534 | ||
1535 | void dm_uevent_add(struct mapped_device *md, struct list_head *elist) | ||
1536 | { | ||
1537 | unsigned long flags; | ||
1538 | |||
1539 | spin_lock_irqsave(&md->uevent_lock, flags); | ||
1540 | list_add(elist, &md->uevent_list); | ||
1541 | spin_unlock_irqrestore(&md->uevent_lock, flags); | ||
1542 | } | ||
1543 | |||
1516 | /* | 1544 | /* |
1517 | * The gendisk is only valid as long as you have a reference | 1545 | * The gendisk is only valid as long as you have a reference |
1518 | * count on 'md'. | 1546 | * count on 'md'. |