blob: 03e2d6fd9b18c99e6141f94f6f0a493ce0c8b133 (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
|
#include <linux/user-return-notifier.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/module.h>
static DEFINE_PER_CPU(struct hlist_head, return_notifier_list);
#define URN_LIST_HEAD per_cpu(return_notifier_list, raw_smp_processor_id())
/*
* Request a notification when the current cpu returns to userspace. Must be
* called in atomic context. The notifier will also be called in atomic
* context.
*/
void user_return_notifier_register(struct user_return_notifier *urn)
{
set_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
hlist_add_head(&urn->link, &URN_LIST_HEAD);
}
EXPORT_SYMBOL_GPL(user_return_notifier_register);
/*
* Removes a registered user return notifier. Must be called from atomic
* context, and from the same cpu registration occured in.
*/
void user_return_notifier_unregister(struct user_return_notifier *urn)
{
hlist_del(&urn->link);
if (hlist_empty(&URN_LIST_HEAD))
clear_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
}
EXPORT_SYMBOL_GPL(user_return_notifier_unregister);
/* Calls registered user return notifiers */
void fire_user_return_notifiers(void)
{
struct user_return_notifier *urn;
struct hlist_node *tmp1, *tmp2;
struct hlist_head *head;
head = &get_cpu_var(return_notifier_list);
hlist_for_each_entry_safe(urn, tmp1, tmp2, head, link)
urn->on_user_return(urn);
put_cpu_var(return_notifier_list);
}
|