diff options
-rw-r--r-- | Documentation/device-mapper/dm-uevent.txt | 97 | ||||
-rw-r--r-- | drivers/md/dm-uevent.c | 150 | ||||
-rw-r--r-- | drivers/md/dm-uevent.h | 18 | ||||
-rw-r--r-- | drivers/md/dm.c | 28 | ||||
-rw-r--r-- | include/linux/device-mapper.h | 2 |
5 files changed, 295 insertions, 0 deletions
diff --git a/Documentation/device-mapper/dm-uevent.txt b/Documentation/device-mapper/dm-uevent.txt new file mode 100644 index 000000000000..07edbd85c714 --- /dev/null +++ b/Documentation/device-mapper/dm-uevent.txt | |||
@@ -0,0 +1,97 @@ | |||
1 | The device-mapper uevent code adds the capability to device-mapper to create | ||
2 | and send kobject uevents (uevents). Previously device-mapper events were only | ||
3 | available through the ioctl interface. The advantage of the uevents interface | ||
4 | is the event contains environment attributes providing increased context for | ||
5 | the event avoiding the need to query the state of the device-mapper device after | ||
6 | the event is received. | ||
7 | |||
8 | There are two functions currently for device-mapper events. The first function | ||
9 | listed creates the event and the second function sends the event(s). | ||
10 | |||
11 | void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti, | ||
12 | const char *path, unsigned nr_valid_paths) | ||
13 | |||
14 | void dm_send_uevents(struct list_head *events, struct kobject *kobj) | ||
15 | |||
16 | |||
17 | The variables added to the uevent environment are: | ||
18 | |||
19 | Variable Name: DM_TARGET | ||
20 | Uevent Action(s): KOBJ_CHANGE | ||
21 | Type: string | ||
22 | Description: | ||
23 | Value: Name of device-mapper target that generated the event. | ||
24 | |||
25 | Variable Name: DM_ACTION | ||
26 | Uevent Action(s): KOBJ_CHANGE | ||
27 | Type: string | ||
28 | Description: | ||
29 | Value: Device-mapper specific action that caused the uevent action. | ||
30 | PATH_FAILED - A path has failed. | ||
31 | PATH_REINSTATED - A path has been reinstated. | ||
32 | |||
33 | Variable Name: DM_SEQNUM | ||
34 | Uevent Action(s): KOBJ_CHANGE | ||
35 | Type: unsigned integer | ||
36 | Description: A sequence number for this specific device-mapper device. | ||
37 | Value: Valid unsigned integer range. | ||
38 | |||
39 | Variable Name: DM_PATH | ||
40 | Uevent Action(s): KOBJ_CHANGE | ||
41 | Type: string | ||
42 | Description: Major and minor number of the path device pertaining to this | ||
43 | event. | ||
44 | Value: Path name in the form of "Major:Minor" | ||
45 | |||
46 | Variable Name: DM_NR_VALID_PATHS | ||
47 | Uevent Action(s): KOBJ_CHANGE | ||
48 | Type: unsigned integer | ||
49 | Description: | ||
50 | Value: Valid unsigned integer range. | ||
51 | |||
52 | Variable Name: DM_NAME | ||
53 | Uevent Action(s): KOBJ_CHANGE | ||
54 | Type: string | ||
55 | Description: Name of the device-mapper device. | ||
56 | Value: Name | ||
57 | |||
58 | Variable Name: DM_UUID | ||
59 | Uevent Action(s): KOBJ_CHANGE | ||
60 | Type: string | ||
61 | Description: UUID of the device-mapper device. | ||
62 | Value: UUID. (Empty string if there isn't one.) | ||
63 | |||
64 | An example of the uevents generated as captured by udevmonitor is shown | ||
65 | below. | ||
66 | |||
67 | 1.) Path failure. | ||
68 | UEVENT[1192521009.711215] change@/block/dm-3 | ||
69 | ACTION=change | ||
70 | DEVPATH=/block/dm-3 | ||
71 | SUBSYSTEM=block | ||
72 | DM_TARGET=multipath | ||
73 | DM_ACTION=PATH_FAILED | ||
74 | DM_SEQNUM=1 | ||
75 | DM_PATH=8:32 | ||
76 | DM_NR_VALID_PATHS=0 | ||
77 | DM_NAME=mpath2 | ||
78 | DM_UUID=mpath-35333333000002328 | ||
79 | MINOR=3 | ||
80 | MAJOR=253 | ||
81 | SEQNUM=1130 | ||
82 | |||
83 | 2.) Path reinstate. | ||
84 | UEVENT[1192521132.989927] change@/block/dm-3 | ||
85 | ACTION=change | ||
86 | DEVPATH=/block/dm-3 | ||
87 | SUBSYSTEM=block | ||
88 | DM_TARGET=multipath | ||
89 | DM_ACTION=PATH_REINSTATED | ||
90 | DM_SEQNUM=2 | ||
91 | DM_PATH=8:32 | ||
92 | DM_NR_VALID_PATHS=1 | ||
93 | DM_NAME=mpath2 | ||
94 | DM_UUID=mpath-35333333000002328 | ||
95 | MINOR=3 | ||
96 | MAJOR=253 | ||
97 | SEQNUM=1131 | ||
diff --git a/drivers/md/dm-uevent.c b/drivers/md/dm-uevent.c index 53200c96bcb4..50377e5dc2a3 100644 --- a/drivers/md/dm-uevent.c +++ b/drivers/md/dm-uevent.c | |||
@@ -21,12 +21,22 @@ | |||
21 | #include <linux/list.h> | 21 | #include <linux/list.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/kobject.h> | 23 | #include <linux/kobject.h> |
24 | #include <linux/dm-ioctl.h> | ||
24 | 25 | ||
25 | #include "dm.h" | 26 | #include "dm.h" |
26 | #include "dm-uevent.h" | 27 | #include "dm-uevent.h" |
27 | 28 | ||
28 | #define DM_MSG_PREFIX "uevent" | 29 | #define DM_MSG_PREFIX "uevent" |
29 | 30 | ||
31 | static const struct { | ||
32 | enum dm_uevent_type type; | ||
33 | enum kobject_action action; | ||
34 | char *name; | ||
35 | } _dm_uevent_type_names[] = { | ||
36 | {DM_UEVENT_PATH_FAILED, KOBJ_CHANGE, "PATH_FAILED"}, | ||
37 | {DM_UEVENT_PATH_REINSTATED, KOBJ_CHANGE, "PATH_REINSTATED"}, | ||
38 | }; | ||
39 | |||
30 | static struct kmem_cache *_dm_event_cache; | 40 | static struct kmem_cache *_dm_event_cache; |
31 | 41 | ||
32 | struct dm_uevent { | 42 | struct dm_uevent { |
@@ -34,6 +44,8 @@ struct dm_uevent { | |||
34 | enum kobject_action action; | 44 | enum kobject_action action; |
35 | struct kobj_uevent_env ku_env; | 45 | struct kobj_uevent_env ku_env; |
36 | struct list_head elist; | 46 | struct list_head elist; |
47 | char name[DM_NAME_LEN]; | ||
48 | char uuid[DM_UUID_LEN]; | ||
37 | }; | 49 | }; |
38 | 50 | ||
39 | static void dm_uevent_free(struct dm_uevent *event) | 51 | static void dm_uevent_free(struct dm_uevent *event) |
@@ -55,6 +67,144 @@ static struct dm_uevent *dm_uevent_alloc(struct mapped_device *md) | |||
55 | return event; | 67 | return event; |
56 | } | 68 | } |
57 | 69 | ||
70 | static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md, | ||
71 | struct dm_target *ti, | ||
72 | enum kobject_action action, | ||
73 | const char *dm_action, | ||
74 | const char *path, | ||
75 | unsigned nr_valid_paths) | ||
76 | { | ||
77 | struct dm_uevent *event; | ||
78 | |||
79 | event = dm_uevent_alloc(md); | ||
80 | if (!event) { | ||
81 | DMERR("%s: dm_uevent_alloc() failed", __FUNCTION__); | ||
82 | goto err_nomem; | ||
83 | } | ||
84 | |||
85 | event->action = action; | ||
86 | |||
87 | if (add_uevent_var(&event->ku_env, "DM_TARGET=%s", ti->type->name)) { | ||
88 | DMERR("%s: add_uevent_var() for DM_TARGET failed", | ||
89 | __FUNCTION__); | ||
90 | goto err_add; | ||
91 | } | ||
92 | |||
93 | if (add_uevent_var(&event->ku_env, "DM_ACTION=%s", dm_action)) { | ||
94 | DMERR("%s: add_uevent_var() for DM_ACTION failed", | ||
95 | __FUNCTION__); | ||
96 | goto err_add; | ||
97 | } | ||
98 | |||
99 | if (add_uevent_var(&event->ku_env, "DM_SEQNUM=%u", | ||
100 | dm_next_uevent_seq(md))) { | ||
101 | DMERR("%s: add_uevent_var() for DM_SEQNUM failed", | ||
102 | __FUNCTION__); | ||
103 | goto err_add; | ||
104 | } | ||
105 | |||
106 | if (add_uevent_var(&event->ku_env, "DM_PATH=%s", path)) { | ||
107 | DMERR("%s: add_uevent_var() for DM_PATH failed", __FUNCTION__); | ||
108 | goto err_add; | ||
109 | } | ||
110 | |||
111 | if (add_uevent_var(&event->ku_env, "DM_NR_VALID_PATHS=%d", | ||
112 | nr_valid_paths)) { | ||
113 | DMERR("%s: add_uevent_var() for DM_NR_VALID_PATHS failed", | ||
114 | __FUNCTION__); | ||
115 | goto err_add; | ||
116 | } | ||
117 | |||
118 | return event; | ||
119 | |||
120 | err_add: | ||
121 | dm_uevent_free(event); | ||
122 | err_nomem: | ||
123 | return ERR_PTR(-ENOMEM); | ||
124 | } | ||
125 | |||
126 | /** | ||
127 | * dm_send_uevents - send uevents for given list | ||
128 | * | ||
129 | * @events: list of events to send | ||
130 | * @kobj: kobject generating event | ||
131 | * | ||
132 | */ | ||
133 | void dm_send_uevents(struct list_head *events, struct kobject *kobj) | ||
134 | { | ||
135 | int r; | ||
136 | struct dm_uevent *event, *next; | ||
137 | |||
138 | list_for_each_entry_safe(event, next, events, elist) { | ||
139 | list_del_init(&event->elist); | ||
140 | |||
141 | /* | ||
142 | * Need to call dm_copy_name_and_uuid from here for now. | ||
143 | * Context of previous var adds and locking used for | ||
144 | * hash_cell not compatable. | ||
145 | */ | ||
146 | if (dm_copy_name_and_uuid(event->md, event->name, | ||
147 | event->uuid)) { | ||
148 | DMERR("%s: dm_copy_name_and_uuid() failed", | ||
149 | __FUNCTION__); | ||
150 | goto uevent_free; | ||
151 | } | ||
152 | |||
153 | if (add_uevent_var(&event->ku_env, "DM_NAME=%s", event->name)) { | ||
154 | DMERR("%s: add_uevent_var() for DM_NAME failed", | ||
155 | __FUNCTION__); | ||
156 | goto uevent_free; | ||
157 | } | ||
158 | |||
159 | if (add_uevent_var(&event->ku_env, "DM_UUID=%s", event->uuid)) { | ||
160 | DMERR("%s: add_uevent_var() for DM_UUID failed", | ||
161 | __FUNCTION__); | ||
162 | goto uevent_free; | ||
163 | } | ||
164 | |||
165 | r = kobject_uevent_env(kobj, event->action, event->ku_env.envp); | ||
166 | if (r) | ||
167 | DMERR("%s: kobject_uevent_env failed", __FUNCTION__); | ||
168 | uevent_free: | ||
169 | dm_uevent_free(event); | ||
170 | } | ||
171 | } | ||
172 | EXPORT_SYMBOL_GPL(dm_send_uevents); | ||
173 | |||
174 | /** | ||
175 | * dm_path_uevent - called to create a new path event and queue it | ||
176 | * | ||
177 | * @event_type: path event type enum | ||
178 | * @ti: pointer to a dm_target | ||
179 | * @path: string containing pathname | ||
180 | * @nr_valid_paths: number of valid paths remaining | ||
181 | * | ||
182 | */ | ||
183 | void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti, | ||
184 | const char *path, unsigned nr_valid_paths) | ||
185 | { | ||
186 | struct mapped_device *md = dm_table_get_md(ti->table); | ||
187 | struct dm_uevent *event; | ||
188 | |||
189 | if (event_type >= ARRAY_SIZE(_dm_uevent_type_names)) { | ||
190 | DMERR("%s: Invalid event_type %d", __FUNCTION__, event_type); | ||
191 | goto out; | ||
192 | } | ||
193 | |||
194 | event = dm_build_path_uevent(md, ti, | ||
195 | _dm_uevent_type_names[event_type].action, | ||
196 | _dm_uevent_type_names[event_type].name, | ||
197 | path, nr_valid_paths); | ||
198 | if (IS_ERR(event)) | ||
199 | goto out; | ||
200 | |||
201 | dm_uevent_add(md, &event->elist); | ||
202 | |||
203 | out: | ||
204 | dm_put(md); | ||
205 | } | ||
206 | EXPORT_SYMBOL_GPL(dm_path_uevent); | ||
207 | |||
58 | int dm_uevent_init(void) | 208 | int dm_uevent_init(void) |
59 | { | 209 | { |
60 | _dm_event_cache = KMEM_CACHE(dm_uevent, 0); | 210 | _dm_event_cache = KMEM_CACHE(dm_uevent, 0); |
diff --git a/drivers/md/dm-uevent.h b/drivers/md/dm-uevent.h index 9d776836489b..2eccc8bd671a 100644 --- a/drivers/md/dm-uevent.h +++ b/drivers/md/dm-uevent.h | |||
@@ -21,10 +21,19 @@ | |||
21 | #ifndef DM_UEVENT_H | 21 | #ifndef DM_UEVENT_H |
22 | #define DM_UEVENT_H | 22 | #define DM_UEVENT_H |
23 | 23 | ||
24 | enum dm_uevent_type { | ||
25 | DM_UEVENT_PATH_FAILED, | ||
26 | DM_UEVENT_PATH_REINSTATED, | ||
27 | }; | ||
28 | |||
24 | #ifdef CONFIG_DM_UEVENT | 29 | #ifdef CONFIG_DM_UEVENT |
25 | 30 | ||
26 | extern int dm_uevent_init(void); | 31 | extern int dm_uevent_init(void); |
27 | extern void dm_uevent_exit(void); | 32 | extern void dm_uevent_exit(void); |
33 | extern void dm_send_uevents(struct list_head *events, struct kobject *kobj); | ||
34 | extern void dm_path_uevent(enum dm_uevent_type event_type, | ||
35 | struct dm_target *ti, const char *path, | ||
36 | unsigned nr_valid_paths); | ||
28 | 37 | ||
29 | #else | 38 | #else |
30 | 39 | ||
@@ -35,6 +44,15 @@ static inline int dm_uevent_init(void) | |||
35 | static inline void dm_uevent_exit(void) | 44 | static inline void dm_uevent_exit(void) |
36 | { | 45 | { |
37 | } | 46 | } |
47 | static inline void dm_send_uevents(struct list_head *events, | ||
48 | struct kobject *kobj) | ||
49 | { | ||
50 | } | ||
51 | static inline void dm_path_uevent(enum dm_uevent_type event_type, | ||
52 | struct dm_target *ti, const char *path, | ||
53 | unsigned nr_valid_paths) | ||
54 | { | ||
55 | } | ||
38 | 56 | ||
39 | #endif /* CONFIG_DM_UEVENT */ | 57 | #endif /* CONFIG_DM_UEVENT */ |
40 | 58 | ||
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index bb5c1eaca52b..07cbbb8eb3e0 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -113,6 +113,9 @@ struct mapped_device { | |||
113 | */ | 113 | */ |
114 | atomic_t event_nr; | 114 | atomic_t event_nr; |
115 | wait_queue_head_t eventq; | 115 | wait_queue_head_t eventq; |
116 | atomic_t uevent_seq; | ||
117 | struct list_head uevent_list; | ||
118 | spinlock_t uevent_lock; /* Protect access to uevent_list */ | ||
116 | 119 | ||
117 | /* | 120 | /* |
118 | * freeze/thaw support require holding onto a super block | 121 | * freeze/thaw support require holding onto a super block |
@@ -985,6 +988,9 @@ static struct mapped_device *alloc_dev(int minor) | |||
985 | atomic_set(&md->holders, 1); | 988 | atomic_set(&md->holders, 1); |
986 | atomic_set(&md->open_count, 0); | 989 | atomic_set(&md->open_count, 0); |
987 | atomic_set(&md->event_nr, 0); | 990 | atomic_set(&md->event_nr, 0); |
991 | atomic_set(&md->uevent_seq, 0); | ||
992 | INIT_LIST_HEAD(&md->uevent_list); | ||
993 | spin_lock_init(&md->uevent_lock); | ||
988 | 994 | ||
989 | md->queue = blk_alloc_queue(GFP_KERNEL); | 995 | md->queue = blk_alloc_queue(GFP_KERNEL); |
990 | if (!md->queue) | 996 | if (!md->queue) |
@@ -1083,8 +1089,16 @@ static void free_dev(struct mapped_device *md) | |||
1083 | */ | 1089 | */ |
1084 | static void event_callback(void *context) | 1090 | static void event_callback(void *context) |
1085 | { | 1091 | { |
1092 | unsigned long flags; | ||
1093 | LIST_HEAD(uevents); | ||
1086 | struct mapped_device *md = (struct mapped_device *) context; | 1094 | struct mapped_device *md = (struct mapped_device *) context; |
1087 | 1095 | ||
1096 | spin_lock_irqsave(&md->uevent_lock, flags); | ||
1097 | list_splice_init(&md->uevent_list, &uevents); | ||
1098 | spin_unlock_irqrestore(&md->uevent_lock, flags); | ||
1099 | |||
1100 | dm_send_uevents(&uevents, &md->disk->kobj); | ||
1101 | |||
1088 | atomic_inc(&md->event_nr); | 1102 | atomic_inc(&md->event_nr); |
1089 | wake_up(&md->eventq); | 1103 | wake_up(&md->eventq); |
1090 | } | 1104 | } |
@@ -1502,6 +1516,11 @@ out: | |||
1502 | /*----------------------------------------------------------------- | 1516 | /*----------------------------------------------------------------- |
1503 | * Event notification. | 1517 | * Event notification. |
1504 | *---------------------------------------------------------------*/ | 1518 | *---------------------------------------------------------------*/ |
1519 | uint32_t dm_next_uevent_seq(struct mapped_device *md) | ||
1520 | { | ||
1521 | return atomic_add_return(1, &md->uevent_seq); | ||
1522 | } | ||
1523 | |||
1505 | uint32_t dm_get_event_nr(struct mapped_device *md) | 1524 | uint32_t dm_get_event_nr(struct mapped_device *md) |
1506 | { | 1525 | { |
1507 | return atomic_read(&md->event_nr); | 1526 | return atomic_read(&md->event_nr); |
@@ -1513,6 +1532,15 @@ int dm_wait_event(struct mapped_device *md, int event_nr) | |||
1513 | (event_nr != atomic_read(&md->event_nr))); | 1532 | (event_nr != atomic_read(&md->event_nr))); |
1514 | } | 1533 | } |
1515 | 1534 | ||
1535 | void dm_uevent_add(struct mapped_device *md, struct list_head *elist) | ||
1536 | { | ||
1537 | unsigned long flags; | ||
1538 | |||
1539 | spin_lock_irqsave(&md->uevent_lock, flags); | ||
1540 | list_add(elist, &md->uevent_list); | ||
1541 | spin_unlock_irqrestore(&md->uevent_lock, flags); | ||
1542 | } | ||
1543 | |||
1516 | /* | 1544 | /* |
1517 | * The gendisk is only valid as long as you have a reference | 1545 | * The gendisk is only valid as long as you have a reference |
1518 | * count on 'md'. | 1546 | * count on 'md'. |
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 8b3c7cdc599c..37c66d1254b5 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -183,6 +183,8 @@ int dm_resume(struct mapped_device *md); | |||
183 | */ | 183 | */ |
184 | uint32_t dm_get_event_nr(struct mapped_device *md); | 184 | uint32_t dm_get_event_nr(struct mapped_device *md); |
185 | int dm_wait_event(struct mapped_device *md, int event_nr); | 185 | int dm_wait_event(struct mapped_device *md, int event_nr); |
186 | uint32_t dm_next_uevent_seq(struct mapped_device *md); | ||
187 | void dm_uevent_add(struct mapped_device *md, struct list_head *elist); | ||
186 | 188 | ||
187 | /* | 189 | /* |
188 | * Info functions. | 190 | * Info functions. |