diff options
Diffstat (limited to 'net/netlink/genetlink.c')
-rw-r--r-- | net/netlink/genetlink.c | 524 |
1 files changed, 244 insertions, 280 deletions
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 0c741cec4d0d..7dbc4f732c75 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -65,12 +65,24 @@ static struct list_head family_ht[GENL_FAM_TAB_SIZE]; | |||
65 | * To avoid an allocation at boot of just one unsigned long, | 65 | * To avoid an allocation at boot of just one unsigned long, |
66 | * declare it global instead. | 66 | * declare it global instead. |
67 | * Bit 0 is marked as already used since group 0 is invalid. | 67 | * Bit 0 is marked as already used since group 0 is invalid. |
68 | * Bit 1 is marked as already used since the drop-monitor code | ||
69 | * abuses the API and thinks it can statically use group 1. | ||
70 | * That group will typically conflict with other groups that | ||
71 | * any proper users use. | ||
72 | * Bit 16 is marked as used since it's used for generic netlink | ||
73 | * and the code no longer marks pre-reserved IDs as used. | ||
74 | * Bit 17 is marked as already used since the VFS quota code | ||
75 | * also abused this API and relied on family == group ID, we | ||
76 | * cater to that by giving it a static family and group ID. | ||
68 | */ | 77 | */ |
69 | static unsigned long mc_group_start = 0x1; | 78 | static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) | |
79 | BIT(GENL_ID_VFS_DQUOT); | ||
70 | static unsigned long *mc_groups = &mc_group_start; | 80 | static unsigned long *mc_groups = &mc_group_start; |
71 | static unsigned long mc_groups_longs = 1; | 81 | static unsigned long mc_groups_longs = 1; |
72 | 82 | ||
73 | static int genl_ctrl_event(int event, void *data); | 83 | static int genl_ctrl_event(int event, struct genl_family *family, |
84 | const struct genl_multicast_group *grp, | ||
85 | int grp_id); | ||
74 | 86 | ||
75 | static inline unsigned int genl_family_hash(unsigned int id) | 87 | static inline unsigned int genl_family_hash(unsigned int id) |
76 | { | 88 | { |
@@ -106,13 +118,13 @@ static struct genl_family *genl_family_find_byname(char *name) | |||
106 | return NULL; | 118 | return NULL; |
107 | } | 119 | } |
108 | 120 | ||
109 | static struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family) | 121 | static const struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family) |
110 | { | 122 | { |
111 | struct genl_ops *ops; | 123 | int i; |
112 | 124 | ||
113 | list_for_each_entry(ops, &family->ops_list, ops_list) | 125 | for (i = 0; i < family->n_ops; i++) |
114 | if (ops->cmd == cmd) | 126 | if (family->ops[i].cmd == cmd) |
115 | return ops; | 127 | return &family->ops[i]; |
116 | 128 | ||
117 | return NULL; | 129 | return NULL; |
118 | } | 130 | } |
@@ -126,7 +138,8 @@ static u16 genl_generate_id(void) | |||
126 | int i; | 138 | int i; |
127 | 139 | ||
128 | for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) { | 140 | for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) { |
129 | if (!genl_family_find_byid(id_gen_idx)) | 141 | if (id_gen_idx != GENL_ID_VFS_DQUOT && |
142 | !genl_family_find_byid(id_gen_idx)) | ||
130 | return id_gen_idx; | 143 | return id_gen_idx; |
131 | if (++id_gen_idx > GENL_MAX_ID) | 144 | if (++id_gen_idx > GENL_MAX_ID) |
132 | id_gen_idx = GENL_MIN_ID; | 145 | id_gen_idx = GENL_MIN_ID; |
@@ -135,62 +148,110 @@ static u16 genl_generate_id(void) | |||
135 | return 0; | 148 | return 0; |
136 | } | 149 | } |
137 | 150 | ||
138 | static struct genl_multicast_group notify_grp; | 151 | static int genl_allocate_reserve_groups(int n_groups, int *first_id) |
139 | |||
140 | /** | ||
141 | * genl_register_mc_group - register a multicast group | ||
142 | * | ||
143 | * Registers the specified multicast group and notifies userspace | ||
144 | * about the new group. | ||
145 | * | ||
146 | * Returns 0 on success or a negative error code. | ||
147 | * | ||
148 | * @family: The generic netlink family the group shall be registered for. | ||
149 | * @grp: The group to register, must have a name. | ||
150 | */ | ||
151 | int genl_register_mc_group(struct genl_family *family, | ||
152 | struct genl_multicast_group *grp) | ||
153 | { | 152 | { |
154 | int id; | ||
155 | unsigned long *new_groups; | 153 | unsigned long *new_groups; |
156 | int err = 0; | 154 | int start = 0; |
155 | int i; | ||
156 | int id; | ||
157 | bool fits; | ||
158 | |||
159 | do { | ||
160 | if (start == 0) | ||
161 | id = find_first_zero_bit(mc_groups, | ||
162 | mc_groups_longs * | ||
163 | BITS_PER_LONG); | ||
164 | else | ||
165 | id = find_next_zero_bit(mc_groups, | ||
166 | mc_groups_longs * BITS_PER_LONG, | ||
167 | start); | ||
168 | |||
169 | fits = true; | ||
170 | for (i = id; | ||
171 | i < min_t(int, id + n_groups, | ||
172 | mc_groups_longs * BITS_PER_LONG); | ||
173 | i++) { | ||
174 | if (test_bit(i, mc_groups)) { | ||
175 | start = i; | ||
176 | fits = false; | ||
177 | break; | ||
178 | } | ||
179 | } | ||
157 | 180 | ||
158 | BUG_ON(grp->name[0] == '\0'); | 181 | if (id >= mc_groups_longs * BITS_PER_LONG) { |
159 | BUG_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL); | 182 | unsigned long new_longs = mc_groups_longs + |
183 | BITS_TO_LONGS(n_groups); | ||
184 | size_t nlen = new_longs * sizeof(unsigned long); | ||
185 | |||
186 | if (mc_groups == &mc_group_start) { | ||
187 | new_groups = kzalloc(nlen, GFP_KERNEL); | ||
188 | if (!new_groups) | ||
189 | return -ENOMEM; | ||
190 | mc_groups = new_groups; | ||
191 | *mc_groups = mc_group_start; | ||
192 | } else { | ||
193 | new_groups = krealloc(mc_groups, nlen, | ||
194 | GFP_KERNEL); | ||
195 | if (!new_groups) | ||
196 | return -ENOMEM; | ||
197 | mc_groups = new_groups; | ||
198 | for (i = 0; i < BITS_TO_LONGS(n_groups); i++) | ||
199 | mc_groups[mc_groups_longs + i] = 0; | ||
200 | } | ||
201 | mc_groups_longs = new_longs; | ||
202 | } | ||
203 | } while (!fits); | ||
160 | 204 | ||
161 | genl_lock_all(); | 205 | for (i = id; i < id + n_groups; i++) |
206 | set_bit(i, mc_groups); | ||
207 | *first_id = id; | ||
208 | return 0; | ||
209 | } | ||
162 | 210 | ||
163 | /* special-case our own group */ | 211 | static struct genl_family genl_ctrl; |
164 | if (grp == ¬ify_grp) | ||
165 | id = GENL_ID_CTRL; | ||
166 | else | ||
167 | id = find_first_zero_bit(mc_groups, | ||
168 | mc_groups_longs * BITS_PER_LONG); | ||
169 | 212 | ||
213 | static int genl_validate_assign_mc_groups(struct genl_family *family) | ||
214 | { | ||
215 | int first_id; | ||
216 | int n_groups = family->n_mcgrps; | ||
217 | int err, i; | ||
218 | bool groups_allocated = false; | ||
170 | 219 | ||
171 | if (id >= mc_groups_longs * BITS_PER_LONG) { | 220 | if (!n_groups) |
172 | size_t nlen = (mc_groups_longs + 1) * sizeof(unsigned long); | 221 | return 0; |
173 | 222 | ||
174 | if (mc_groups == &mc_group_start) { | 223 | for (i = 0; i < n_groups; i++) { |
175 | new_groups = kzalloc(nlen, GFP_KERNEL); | 224 | const struct genl_multicast_group *grp = &family->mcgrps[i]; |
176 | if (!new_groups) { | 225 | |
177 | err = -ENOMEM; | 226 | if (WARN_ON(grp->name[0] == '\0')) |
178 | goto out; | 227 | return -EINVAL; |
179 | } | 228 | if (WARN_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL)) |
180 | mc_groups = new_groups; | 229 | return -EINVAL; |
181 | *mc_groups = mc_group_start; | 230 | } |
182 | } else { | 231 | |
183 | new_groups = krealloc(mc_groups, nlen, GFP_KERNEL); | 232 | /* special-case our own group and hacks */ |
184 | if (!new_groups) { | 233 | if (family == &genl_ctrl) { |
185 | err = -ENOMEM; | 234 | first_id = GENL_ID_CTRL; |
186 | goto out; | 235 | BUG_ON(n_groups != 1); |
187 | } | 236 | } else if (strcmp(family->name, "NET_DM") == 0) { |
188 | mc_groups = new_groups; | 237 | first_id = 1; |
189 | mc_groups[mc_groups_longs] = 0; | 238 | BUG_ON(n_groups != 1); |
190 | } | 239 | } else if (strcmp(family->name, "VFS_DQUOT") == 0) { |
191 | mc_groups_longs++; | 240 | first_id = GENL_ID_VFS_DQUOT; |
241 | BUG_ON(n_groups != 1); | ||
242 | } else { | ||
243 | groups_allocated = true; | ||
244 | err = genl_allocate_reserve_groups(n_groups, &first_id); | ||
245 | if (err) | ||
246 | return err; | ||
192 | } | 247 | } |
193 | 248 | ||
249 | family->mcgrp_offset = first_id; | ||
250 | |||
251 | /* if still initializing, can't and don't need to to realloc bitmaps */ | ||
252 | if (!init_net.genl_sock) | ||
253 | return 0; | ||
254 | |||
194 | if (family->netnsok) { | 255 | if (family->netnsok) { |
195 | struct net *net; | 256 | struct net *net; |
196 | 257 | ||
@@ -206,9 +267,7 @@ int genl_register_mc_group(struct genl_family *family, | |||
206 | * number of _possible_ groups has been | 267 | * number of _possible_ groups has been |
207 | * increased on some sockets which is ok. | 268 | * increased on some sockets which is ok. |
208 | */ | 269 | */ |
209 | rcu_read_unlock(); | 270 | break; |
210 | netlink_table_ungrab(); | ||
211 | goto out; | ||
212 | } | 271 | } |
213 | } | 272 | } |
214 | rcu_read_unlock(); | 273 | rcu_read_unlock(); |
@@ -216,152 +275,67 @@ int genl_register_mc_group(struct genl_family *family, | |||
216 | } else { | 275 | } else { |
217 | err = netlink_change_ngroups(init_net.genl_sock, | 276 | err = netlink_change_ngroups(init_net.genl_sock, |
218 | mc_groups_longs * BITS_PER_LONG); | 277 | mc_groups_longs * BITS_PER_LONG); |
219 | if (err) | ||
220 | goto out; | ||
221 | } | 278 | } |
222 | 279 | ||
223 | grp->id = id; | 280 | if (groups_allocated && err) { |
224 | set_bit(id, mc_groups); | 281 | for (i = 0; i < family->n_mcgrps; i++) |
225 | list_add_tail(&grp->list, &family->mcast_groups); | 282 | clear_bit(family->mcgrp_offset + i, mc_groups); |
226 | grp->family = family; | 283 | } |
227 | 284 | ||
228 | genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, grp); | ||
229 | out: | ||
230 | genl_unlock_all(); | ||
231 | return err; | 285 | return err; |
232 | } | 286 | } |
233 | EXPORT_SYMBOL(genl_register_mc_group); | ||
234 | 287 | ||
235 | static void __genl_unregister_mc_group(struct genl_family *family, | 288 | static void genl_unregister_mc_groups(struct genl_family *family) |
236 | struct genl_multicast_group *grp) | ||
237 | { | 289 | { |
238 | struct net *net; | 290 | struct net *net; |
239 | BUG_ON(grp->family != family); | 291 | int i; |
240 | 292 | ||
241 | netlink_table_grab(); | 293 | netlink_table_grab(); |
242 | rcu_read_lock(); | 294 | rcu_read_lock(); |
243 | for_each_net_rcu(net) | 295 | for_each_net_rcu(net) { |
244 | __netlink_clear_multicast_users(net->genl_sock, grp->id); | 296 | for (i = 0; i < family->n_mcgrps; i++) |
297 | __netlink_clear_multicast_users( | ||
298 | net->genl_sock, family->mcgrp_offset + i); | ||
299 | } | ||
245 | rcu_read_unlock(); | 300 | rcu_read_unlock(); |
246 | netlink_table_ungrab(); | 301 | netlink_table_ungrab(); |
247 | 302 | ||
248 | clear_bit(grp->id, mc_groups); | 303 | for (i = 0; i < family->n_mcgrps; i++) { |
249 | list_del(&grp->list); | 304 | int grp_id = family->mcgrp_offset + i; |
250 | genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, grp); | ||
251 | grp->id = 0; | ||
252 | grp->family = NULL; | ||
253 | } | ||
254 | 305 | ||
255 | /** | 306 | if (grp_id != 1) |
256 | * genl_unregister_mc_group - unregister a multicast group | 307 | clear_bit(grp_id, mc_groups); |
257 | * | 308 | genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, family, |
258 | * Unregisters the specified multicast group and notifies userspace | 309 | &family->mcgrps[i], grp_id); |
259 | * about it. All current listeners on the group are removed. | 310 | } |
260 | * | ||
261 | * Note: It is not necessary to unregister all multicast groups before | ||
262 | * unregistering the family, unregistering the family will cause | ||
263 | * all assigned multicast groups to be unregistered automatically. | ||
264 | * | ||
265 | * @family: Generic netlink family the group belongs to. | ||
266 | * @grp: The group to unregister, must have been registered successfully | ||
267 | * previously. | ||
268 | */ | ||
269 | void genl_unregister_mc_group(struct genl_family *family, | ||
270 | struct genl_multicast_group *grp) | ||
271 | { | ||
272 | genl_lock_all(); | ||
273 | __genl_unregister_mc_group(family, grp); | ||
274 | genl_unlock_all(); | ||
275 | } | 311 | } |
276 | EXPORT_SYMBOL(genl_unregister_mc_group); | ||
277 | 312 | ||
278 | static void genl_unregister_mc_groups(struct genl_family *family) | 313 | static int genl_validate_ops(struct genl_family *family) |
279 | { | 314 | { |
280 | struct genl_multicast_group *grp, *tmp; | 315 | const struct genl_ops *ops = family->ops; |
316 | unsigned int n_ops = family->n_ops; | ||
317 | int i, j; | ||
281 | 318 | ||
282 | list_for_each_entry_safe(grp, tmp, &family->mcast_groups, list) | 319 | if (WARN_ON(n_ops && !ops)) |
283 | __genl_unregister_mc_group(family, grp); | 320 | return -EINVAL; |
284 | } | ||
285 | |||
286 | /** | ||
287 | * genl_register_ops - register generic netlink operations | ||
288 | * @family: generic netlink family | ||
289 | * @ops: operations to be registered | ||
290 | * | ||
291 | * Registers the specified operations and assigns them to the specified | ||
292 | * family. Either a doit or dumpit callback must be specified or the | ||
293 | * operation will fail. Only one operation structure per command | ||
294 | * identifier may be registered. | ||
295 | * | ||
296 | * See include/net/genetlink.h for more documenation on the operations | ||
297 | * structure. | ||
298 | * | ||
299 | * Returns 0 on success or a negative error code. | ||
300 | */ | ||
301 | int genl_register_ops(struct genl_family *family, struct genl_ops *ops) | ||
302 | { | ||
303 | int err = -EINVAL; | ||
304 | 321 | ||
305 | if (ops->dumpit == NULL && ops->doit == NULL) | 322 | if (!n_ops) |
306 | goto errout; | 323 | return 0; |
307 | 324 | ||
308 | if (genl_get_cmd(ops->cmd, family)) { | 325 | for (i = 0; i < n_ops; i++) { |
309 | err = -EEXIST; | 326 | if (ops[i].dumpit == NULL && ops[i].doit == NULL) |
310 | goto errout; | 327 | return -EINVAL; |
328 | for (j = i + 1; j < n_ops; j++) | ||
329 | if (ops[i].cmd == ops[j].cmd) | ||
330 | return -EINVAL; | ||
311 | } | 331 | } |
312 | 332 | ||
313 | if (ops->dumpit) | 333 | /* family is not registered yet, so no locking needed */ |
314 | ops->flags |= GENL_CMD_CAP_DUMP; | 334 | family->ops = ops; |
315 | if (ops->doit) | 335 | family->n_ops = n_ops; |
316 | ops->flags |= GENL_CMD_CAP_DO; | ||
317 | if (ops->policy) | ||
318 | ops->flags |= GENL_CMD_CAP_HASPOL; | ||
319 | 336 | ||
320 | genl_lock_all(); | 337 | return 0; |
321 | list_add_tail(&ops->ops_list, &family->ops_list); | ||
322 | genl_unlock_all(); | ||
323 | |||
324 | genl_ctrl_event(CTRL_CMD_NEWOPS, ops); | ||
325 | err = 0; | ||
326 | errout: | ||
327 | return err; | ||
328 | } | ||
329 | EXPORT_SYMBOL(genl_register_ops); | ||
330 | |||
331 | /** | ||
332 | * genl_unregister_ops - unregister generic netlink operations | ||
333 | * @family: generic netlink family | ||
334 | * @ops: operations to be unregistered | ||
335 | * | ||
336 | * Unregisters the specified operations and unassigns them from the | ||
337 | * specified family. The operation blocks until the current message | ||
338 | * processing has finished and doesn't start again until the | ||
339 | * unregister process has finished. | ||
340 | * | ||
341 | * Note: It is not necessary to unregister all operations before | ||
342 | * unregistering the family, unregistering the family will cause | ||
343 | * all assigned operations to be unregistered automatically. | ||
344 | * | ||
345 | * Returns 0 on success or a negative error code. | ||
346 | */ | ||
347 | int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops) | ||
348 | { | ||
349 | struct genl_ops *rc; | ||
350 | |||
351 | genl_lock_all(); | ||
352 | list_for_each_entry(rc, &family->ops_list, ops_list) { | ||
353 | if (rc == ops) { | ||
354 | list_del(&ops->ops_list); | ||
355 | genl_unlock_all(); | ||
356 | genl_ctrl_event(CTRL_CMD_DELOPS, ops); | ||
357 | return 0; | ||
358 | } | ||
359 | } | ||
360 | genl_unlock_all(); | ||
361 | |||
362 | return -ENOENT; | ||
363 | } | 338 | } |
364 | EXPORT_SYMBOL(genl_unregister_ops); | ||
365 | 339 | ||
366 | /** | 340 | /** |
367 | * __genl_register_family - register a generic netlink family | 341 | * __genl_register_family - register a generic netlink family |
@@ -372,11 +346,14 @@ EXPORT_SYMBOL(genl_unregister_ops); | |||
372 | * The family id may equal GENL_ID_GENERATE causing an unique id to | 346 | * The family id may equal GENL_ID_GENERATE causing an unique id to |
373 | * be automatically generated and assigned. | 347 | * be automatically generated and assigned. |
374 | * | 348 | * |
349 | * The family's ops array must already be assigned, you can use the | ||
350 | * genl_register_family_with_ops() helper function. | ||
351 | * | ||
375 | * Return 0 on success or a negative error code. | 352 | * Return 0 on success or a negative error code. |
376 | */ | 353 | */ |
377 | int __genl_register_family(struct genl_family *family) | 354 | int __genl_register_family(struct genl_family *family) |
378 | { | 355 | { |
379 | int err = -EINVAL; | 356 | int err = -EINVAL, i; |
380 | 357 | ||
381 | if (family->id && family->id < GENL_MIN_ID) | 358 | if (family->id && family->id < GENL_MIN_ID) |
382 | goto errout; | 359 | goto errout; |
@@ -384,8 +361,9 @@ int __genl_register_family(struct genl_family *family) | |||
384 | if (family->id > GENL_MAX_ID) | 361 | if (family->id > GENL_MAX_ID) |
385 | goto errout; | 362 | goto errout; |
386 | 363 | ||
387 | INIT_LIST_HEAD(&family->ops_list); | 364 | err = genl_validate_ops(family); |
388 | INIT_LIST_HEAD(&family->mcast_groups); | 365 | if (err) |
366 | return err; | ||
389 | 367 | ||
390 | genl_lock_all(); | 368 | genl_lock_all(); |
391 | 369 | ||
@@ -418,10 +396,18 @@ int __genl_register_family(struct genl_family *family) | |||
418 | } else | 396 | } else |
419 | family->attrbuf = NULL; | 397 | family->attrbuf = NULL; |
420 | 398 | ||
399 | err = genl_validate_assign_mc_groups(family); | ||
400 | if (err) | ||
401 | goto errout_locked; | ||
402 | |||
421 | list_add_tail(&family->family_list, genl_family_chain(family->id)); | 403 | list_add_tail(&family->family_list, genl_family_chain(family->id)); |
422 | genl_unlock_all(); | 404 | genl_unlock_all(); |
423 | 405 | ||
424 | genl_ctrl_event(CTRL_CMD_NEWFAMILY, family); | 406 | /* send all events */ |
407 | genl_ctrl_event(CTRL_CMD_NEWFAMILY, family, NULL, 0); | ||
408 | for (i = 0; i < family->n_mcgrps; i++) | ||
409 | genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, family, | ||
410 | &family->mcgrps[i], family->mcgrp_offset + i); | ||
425 | 411 | ||
426 | return 0; | 412 | return 0; |
427 | 413 | ||
@@ -433,52 +419,6 @@ errout: | |||
433 | EXPORT_SYMBOL(__genl_register_family); | 419 | EXPORT_SYMBOL(__genl_register_family); |
434 | 420 | ||
435 | /** | 421 | /** |
436 | * __genl_register_family_with_ops - register a generic netlink family | ||
437 | * @family: generic netlink family | ||
438 | * @ops: operations to be registered | ||
439 | * @n_ops: number of elements to register | ||
440 | * | ||
441 | * Registers the specified family and operations from the specified table. | ||
442 | * Only one family may be registered with the same family name or identifier. | ||
443 | * | ||
444 | * The family id may equal GENL_ID_GENERATE causing an unique id to | ||
445 | * be automatically generated and assigned. | ||
446 | * | ||
447 | * Either a doit or dumpit callback must be specified for every registered | ||
448 | * operation or the function will fail. Only one operation structure per | ||
449 | * command identifier may be registered. | ||
450 | * | ||
451 | * See include/net/genetlink.h for more documenation on the operations | ||
452 | * structure. | ||
453 | * | ||
454 | * This is equivalent to calling genl_register_family() followed by | ||
455 | * genl_register_ops() for every operation entry in the table taking | ||
456 | * care to unregister the family on error path. | ||
457 | * | ||
458 | * Return 0 on success or a negative error code. | ||
459 | */ | ||
460 | int __genl_register_family_with_ops(struct genl_family *family, | ||
461 | struct genl_ops *ops, size_t n_ops) | ||
462 | { | ||
463 | int err, i; | ||
464 | |||
465 | err = __genl_register_family(family); | ||
466 | if (err) | ||
467 | return err; | ||
468 | |||
469 | for (i = 0; i < n_ops; ++i, ++ops) { | ||
470 | err = genl_register_ops(family, ops); | ||
471 | if (err) | ||
472 | goto err_out; | ||
473 | } | ||
474 | return 0; | ||
475 | err_out: | ||
476 | genl_unregister_family(family); | ||
477 | return err; | ||
478 | } | ||
479 | EXPORT_SYMBOL(__genl_register_family_with_ops); | ||
480 | |||
481 | /** | ||
482 | * genl_unregister_family - unregister generic netlink family | 422 | * genl_unregister_family - unregister generic netlink family |
483 | * @family: generic netlink family | 423 | * @family: generic netlink family |
484 | * | 424 | * |
@@ -499,11 +439,11 @@ int genl_unregister_family(struct genl_family *family) | |||
499 | continue; | 439 | continue; |
500 | 440 | ||
501 | list_del(&rc->family_list); | 441 | list_del(&rc->family_list); |
502 | INIT_LIST_HEAD(&family->ops_list); | 442 | family->n_ops = 0; |
503 | genl_unlock_all(); | 443 | genl_unlock_all(); |
504 | 444 | ||
505 | kfree(family->attrbuf); | 445 | kfree(family->attrbuf); |
506 | genl_ctrl_event(CTRL_CMD_DELFAMILY, family); | 446 | genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0); |
507 | return 0; | 447 | return 0; |
508 | } | 448 | } |
509 | 449 | ||
@@ -546,7 +486,8 @@ EXPORT_SYMBOL(genlmsg_put); | |||
546 | 486 | ||
547 | static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb) | 487 | static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb) |
548 | { | 488 | { |
549 | struct genl_ops *ops = cb->data; | 489 | /* our ops are always const - netlink API doesn't propagate that */ |
490 | const struct genl_ops *ops = cb->data; | ||
550 | int rc; | 491 | int rc; |
551 | 492 | ||
552 | genl_lock(); | 493 | genl_lock(); |
@@ -557,7 +498,8 @@ static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb) | |||
557 | 498 | ||
558 | static int genl_lock_done(struct netlink_callback *cb) | 499 | static int genl_lock_done(struct netlink_callback *cb) |
559 | { | 500 | { |
560 | struct genl_ops *ops = cb->data; | 501 | /* our ops are always const - netlink API doesn't propagate that */ |
502 | const struct genl_ops *ops = cb->data; | ||
561 | int rc = 0; | 503 | int rc = 0; |
562 | 504 | ||
563 | if (ops->done) { | 505 | if (ops->done) { |
@@ -572,7 +514,7 @@ static int genl_family_rcv_msg(struct genl_family *family, | |||
572 | struct sk_buff *skb, | 514 | struct sk_buff *skb, |
573 | struct nlmsghdr *nlh) | 515 | struct nlmsghdr *nlh) |
574 | { | 516 | { |
575 | struct genl_ops *ops; | 517 | const struct genl_ops *ops; |
576 | struct net *net = sock_net(skb->sk); | 518 | struct net *net = sock_net(skb->sk); |
577 | struct genl_info info; | 519 | struct genl_info info; |
578 | struct genlmsghdr *hdr = nlmsg_data(nlh); | 520 | struct genlmsghdr *hdr = nlmsg_data(nlh); |
@@ -604,7 +546,8 @@ static int genl_family_rcv_msg(struct genl_family *family, | |||
604 | if (!family->parallel_ops) { | 546 | if (!family->parallel_ops) { |
605 | struct netlink_dump_control c = { | 547 | struct netlink_dump_control c = { |
606 | .module = family->module, | 548 | .module = family->module, |
607 | .data = ops, | 549 | /* we have const, but the netlink API doesn't */ |
550 | .data = (void *)ops, | ||
608 | .dump = genl_lock_dumpit, | 551 | .dump = genl_lock_dumpit, |
609 | .done = genl_lock_done, | 552 | .done = genl_lock_done, |
610 | }; | 553 | }; |
@@ -726,24 +669,32 @@ static int ctrl_fill_info(struct genl_family *family, u32 portid, u32 seq, | |||
726 | nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr)) | 669 | nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr)) |
727 | goto nla_put_failure; | 670 | goto nla_put_failure; |
728 | 671 | ||
729 | if (!list_empty(&family->ops_list)) { | 672 | if (family->n_ops) { |
730 | struct nlattr *nla_ops; | 673 | struct nlattr *nla_ops; |
731 | struct genl_ops *ops; | 674 | int i; |
732 | int idx = 1; | ||
733 | 675 | ||
734 | nla_ops = nla_nest_start(skb, CTRL_ATTR_OPS); | 676 | nla_ops = nla_nest_start(skb, CTRL_ATTR_OPS); |
735 | if (nla_ops == NULL) | 677 | if (nla_ops == NULL) |
736 | goto nla_put_failure; | 678 | goto nla_put_failure; |
737 | 679 | ||
738 | list_for_each_entry(ops, &family->ops_list, ops_list) { | 680 | for (i = 0; i < family->n_ops; i++) { |
739 | struct nlattr *nest; | 681 | struct nlattr *nest; |
682 | const struct genl_ops *ops = &family->ops[i]; | ||
683 | u32 op_flags = ops->flags; | ||
740 | 684 | ||
741 | nest = nla_nest_start(skb, idx++); | 685 | if (ops->dumpit) |
686 | op_flags |= GENL_CMD_CAP_DUMP; | ||
687 | if (ops->doit) | ||
688 | op_flags |= GENL_CMD_CAP_DO; | ||
689 | if (ops->policy) | ||
690 | op_flags |= GENL_CMD_CAP_HASPOL; | ||
691 | |||
692 | nest = nla_nest_start(skb, i + 1); | ||
742 | if (nest == NULL) | 693 | if (nest == NULL) |
743 | goto nla_put_failure; | 694 | goto nla_put_failure; |
744 | 695 | ||
745 | if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) || | 696 | if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) || |
746 | nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, ops->flags)) | 697 | nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags)) |
747 | goto nla_put_failure; | 698 | goto nla_put_failure; |
748 | 699 | ||
749 | nla_nest_end(skb, nest); | 700 | nla_nest_end(skb, nest); |
@@ -752,23 +703,26 @@ static int ctrl_fill_info(struct genl_family *family, u32 portid, u32 seq, | |||
752 | nla_nest_end(skb, nla_ops); | 703 | nla_nest_end(skb, nla_ops); |
753 | } | 704 | } |
754 | 705 | ||
755 | if (!list_empty(&family->mcast_groups)) { | 706 | if (family->n_mcgrps) { |
756 | struct genl_multicast_group *grp; | ||
757 | struct nlattr *nla_grps; | 707 | struct nlattr *nla_grps; |
758 | int idx = 1; | 708 | int i; |
759 | 709 | ||
760 | nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS); | 710 | nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS); |
761 | if (nla_grps == NULL) | 711 | if (nla_grps == NULL) |
762 | goto nla_put_failure; | 712 | goto nla_put_failure; |
763 | 713 | ||
764 | list_for_each_entry(grp, &family->mcast_groups, list) { | 714 | for (i = 0; i < family->n_mcgrps; i++) { |
765 | struct nlattr *nest; | 715 | struct nlattr *nest; |
716 | const struct genl_multicast_group *grp; | ||
717 | |||
718 | grp = &family->mcgrps[i]; | ||
766 | 719 | ||
767 | nest = nla_nest_start(skb, idx++); | 720 | nest = nla_nest_start(skb, i + 1); |
768 | if (nest == NULL) | 721 | if (nest == NULL) |
769 | goto nla_put_failure; | 722 | goto nla_put_failure; |
770 | 723 | ||
771 | if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) || | 724 | if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, |
725 | family->mcgrp_offset + i) || | ||
772 | nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME, | 726 | nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME, |
773 | grp->name)) | 727 | grp->name)) |
774 | goto nla_put_failure; | 728 | goto nla_put_failure; |
@@ -785,9 +739,10 @@ nla_put_failure: | |||
785 | return -EMSGSIZE; | 739 | return -EMSGSIZE; |
786 | } | 740 | } |
787 | 741 | ||
788 | static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 portid, | 742 | static int ctrl_fill_mcgrp_info(struct genl_family *family, |
789 | u32 seq, u32 flags, struct sk_buff *skb, | 743 | const struct genl_multicast_group *grp, |
790 | u8 cmd) | 744 | int grp_id, u32 portid, u32 seq, u32 flags, |
745 | struct sk_buff *skb, u8 cmd) | ||
791 | { | 746 | { |
792 | void *hdr; | 747 | void *hdr; |
793 | struct nlattr *nla_grps; | 748 | struct nlattr *nla_grps; |
@@ -797,8 +752,8 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 portid, | |||
797 | if (hdr == NULL) | 752 | if (hdr == NULL) |
798 | return -1; | 753 | return -1; |
799 | 754 | ||
800 | if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, grp->family->name) || | 755 | if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) || |
801 | nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, grp->family->id)) | 756 | nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id)) |
802 | goto nla_put_failure; | 757 | goto nla_put_failure; |
803 | 758 | ||
804 | nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS); | 759 | nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS); |
@@ -809,7 +764,7 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 portid, | |||
809 | if (nest == NULL) | 764 | if (nest == NULL) |
810 | goto nla_put_failure; | 765 | goto nla_put_failure; |
811 | 766 | ||
812 | if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) || | 767 | if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp_id) || |
813 | nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME, | 768 | nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME, |
814 | grp->name)) | 769 | grp->name)) |
815 | goto nla_put_failure; | 770 | goto nla_put_failure; |
@@ -875,8 +830,10 @@ static struct sk_buff *ctrl_build_family_msg(struct genl_family *family, | |||
875 | return skb; | 830 | return skb; |
876 | } | 831 | } |
877 | 832 | ||
878 | static struct sk_buff *ctrl_build_mcgrp_msg(struct genl_multicast_group *grp, | 833 | static struct sk_buff * |
879 | u32 portid, int seq, u8 cmd) | 834 | ctrl_build_mcgrp_msg(struct genl_family *family, |
835 | const struct genl_multicast_group *grp, | ||
836 | int grp_id, u32 portid, int seq, u8 cmd) | ||
880 | { | 837 | { |
881 | struct sk_buff *skb; | 838 | struct sk_buff *skb; |
882 | int err; | 839 | int err; |
@@ -885,7 +842,8 @@ static struct sk_buff *ctrl_build_mcgrp_msg(struct genl_multicast_group *grp, | |||
885 | if (skb == NULL) | 842 | if (skb == NULL) |
886 | return ERR_PTR(-ENOBUFS); | 843 | return ERR_PTR(-ENOBUFS); |
887 | 844 | ||
888 | err = ctrl_fill_mcgrp_info(grp, portid, seq, 0, skb, cmd); | 845 | err = ctrl_fill_mcgrp_info(family, grp, grp_id, portid, |
846 | seq, 0, skb, cmd); | ||
889 | if (err < 0) { | 847 | if (err < 0) { |
890 | nlmsg_free(skb); | 848 | nlmsg_free(skb); |
891 | return ERR_PTR(err); | 849 | return ERR_PTR(err); |
@@ -947,11 +905,11 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info) | |||
947 | return genlmsg_reply(msg, info); | 905 | return genlmsg_reply(msg, info); |
948 | } | 906 | } |
949 | 907 | ||
950 | static int genl_ctrl_event(int event, void *data) | 908 | static int genl_ctrl_event(int event, struct genl_family *family, |
909 | const struct genl_multicast_group *grp, | ||
910 | int grp_id) | ||
951 | { | 911 | { |
952 | struct sk_buff *msg; | 912 | struct sk_buff *msg; |
953 | struct genl_family *family; | ||
954 | struct genl_multicast_group *grp; | ||
955 | 913 | ||
956 | /* genl is still initialising */ | 914 | /* genl is still initialising */ |
957 | if (!init_net.genl_sock) | 915 | if (!init_net.genl_sock) |
@@ -960,14 +918,13 @@ static int genl_ctrl_event(int event, void *data) | |||
960 | switch (event) { | 918 | switch (event) { |
961 | case CTRL_CMD_NEWFAMILY: | 919 | case CTRL_CMD_NEWFAMILY: |
962 | case CTRL_CMD_DELFAMILY: | 920 | case CTRL_CMD_DELFAMILY: |
963 | family = data; | 921 | WARN_ON(grp); |
964 | msg = ctrl_build_family_msg(family, 0, 0, event); | 922 | msg = ctrl_build_family_msg(family, 0, 0, event); |
965 | break; | 923 | break; |
966 | case CTRL_CMD_NEWMCAST_GRP: | 924 | case CTRL_CMD_NEWMCAST_GRP: |
967 | case CTRL_CMD_DELMCAST_GRP: | 925 | case CTRL_CMD_DELMCAST_GRP: |
968 | grp = data; | 926 | BUG_ON(!grp); |
969 | family = grp->family; | 927 | msg = ctrl_build_mcgrp_msg(family, grp, grp_id, 0, 0, event); |
970 | msg = ctrl_build_mcgrp_msg(data, 0, 0, event); | ||
971 | break; | 928 | break; |
972 | default: | 929 | default: |
973 | return -EINVAL; | 930 | return -EINVAL; |
@@ -977,26 +934,29 @@ static int genl_ctrl_event(int event, void *data) | |||
977 | return PTR_ERR(msg); | 934 | return PTR_ERR(msg); |
978 | 935 | ||
979 | if (!family->netnsok) { | 936 | if (!family->netnsok) { |
980 | genlmsg_multicast_netns(&init_net, msg, 0, | 937 | genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0, |
981 | GENL_ID_CTRL, GFP_KERNEL); | 938 | 0, GFP_KERNEL); |
982 | } else { | 939 | } else { |
983 | rcu_read_lock(); | 940 | rcu_read_lock(); |
984 | genlmsg_multicast_allns(msg, 0, GENL_ID_CTRL, GFP_ATOMIC); | 941 | genlmsg_multicast_allns(&genl_ctrl, msg, 0, |
942 | 0, GFP_ATOMIC); | ||
985 | rcu_read_unlock(); | 943 | rcu_read_unlock(); |
986 | } | 944 | } |
987 | 945 | ||
988 | return 0; | 946 | return 0; |
989 | } | 947 | } |
990 | 948 | ||
991 | static struct genl_ops genl_ctrl_ops = { | 949 | static struct genl_ops genl_ctrl_ops[] = { |
992 | .cmd = CTRL_CMD_GETFAMILY, | 950 | { |
993 | .doit = ctrl_getfamily, | 951 | .cmd = CTRL_CMD_GETFAMILY, |
994 | .dumpit = ctrl_dumpfamily, | 952 | .doit = ctrl_getfamily, |
995 | .policy = ctrl_policy, | 953 | .dumpit = ctrl_dumpfamily, |
954 | .policy = ctrl_policy, | ||
955 | }, | ||
996 | }; | 956 | }; |
997 | 957 | ||
998 | static struct genl_multicast_group notify_grp = { | 958 | static struct genl_multicast_group genl_ctrl_groups[] = { |
999 | .name = "notify", | 959 | { .name = "notify", }, |
1000 | }; | 960 | }; |
1001 | 961 | ||
1002 | static int __net_init genl_pernet_init(struct net *net) | 962 | static int __net_init genl_pernet_init(struct net *net) |
@@ -1036,7 +996,8 @@ static int __init genl_init(void) | |||
1036 | for (i = 0; i < GENL_FAM_TAB_SIZE; i++) | 996 | for (i = 0; i < GENL_FAM_TAB_SIZE; i++) |
1037 | INIT_LIST_HEAD(&family_ht[i]); | 997 | INIT_LIST_HEAD(&family_ht[i]); |
1038 | 998 | ||
1039 | err = genl_register_family_with_ops(&genl_ctrl, &genl_ctrl_ops, 1); | 999 | err = genl_register_family_with_ops_groups(&genl_ctrl, genl_ctrl_ops, |
1000 | genl_ctrl_groups); | ||
1040 | if (err < 0) | 1001 | if (err < 0) |
1041 | goto problem; | 1002 | goto problem; |
1042 | 1003 | ||
@@ -1044,10 +1005,6 @@ static int __init genl_init(void) | |||
1044 | if (err) | 1005 | if (err) |
1045 | goto problem; | 1006 | goto problem; |
1046 | 1007 | ||
1047 | err = genl_register_mc_group(&genl_ctrl, ¬ify_grp); | ||
1048 | if (err < 0) | ||
1049 | goto problem; | ||
1050 | |||
1051 | return 0; | 1008 | return 0; |
1052 | 1009 | ||
1053 | problem: | 1010 | problem: |
@@ -1085,14 +1042,18 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group, | |||
1085 | return err; | 1042 | return err; |
1086 | } | 1043 | } |
1087 | 1044 | ||
1088 | int genlmsg_multicast_allns(struct sk_buff *skb, u32 portid, unsigned int group, | 1045 | int genlmsg_multicast_allns(struct genl_family *family, struct sk_buff *skb, |
1089 | gfp_t flags) | 1046 | u32 portid, unsigned int group, gfp_t flags) |
1090 | { | 1047 | { |
1048 | if (group >= family->n_mcgrps) | ||
1049 | return -EINVAL; | ||
1050 | group = family->mcgrp_offset + group; | ||
1091 | return genlmsg_mcast(skb, portid, group, flags); | 1051 | return genlmsg_mcast(skb, portid, group, flags); |
1092 | } | 1052 | } |
1093 | EXPORT_SYMBOL(genlmsg_multicast_allns); | 1053 | EXPORT_SYMBOL(genlmsg_multicast_allns); |
1094 | 1054 | ||
1095 | void genl_notify(struct sk_buff *skb, struct net *net, u32 portid, u32 group, | 1055 | void genl_notify(struct genl_family *family, |
1056 | struct sk_buff *skb, struct net *net, u32 portid, u32 group, | ||
1096 | struct nlmsghdr *nlh, gfp_t flags) | 1057 | struct nlmsghdr *nlh, gfp_t flags) |
1097 | { | 1058 | { |
1098 | struct sock *sk = net->genl_sock; | 1059 | struct sock *sk = net->genl_sock; |
@@ -1101,6 +1062,9 @@ void genl_notify(struct sk_buff *skb, struct net *net, u32 portid, u32 group, | |||
1101 | if (nlh) | 1062 | if (nlh) |
1102 | report = nlmsg_report(nlh); | 1063 | report = nlmsg_report(nlh); |
1103 | 1064 | ||
1065 | if (group >= family->n_mcgrps) | ||
1066 | return; | ||
1067 | group = family->mcgrp_offset + group; | ||
1104 | nlmsg_notify(sk, skb, portid, group, report, flags); | 1068 | nlmsg_notify(sk, skb, portid, group, report, flags); |
1105 | } | 1069 | } |
1106 | EXPORT_SYMBOL(genl_notify); | 1070 | EXPORT_SYMBOL(genl_notify); |