diff options
Diffstat (limited to 'kernel/sched_autogroup.c')
-rw-r--r-- | kernel/sched_autogroup.c | 229 |
1 files changed, 229 insertions, 0 deletions
diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c new file mode 100644 index 000000000000..57a7ac286a02 --- /dev/null +++ b/kernel/sched_autogroup.c | |||
@@ -0,0 +1,229 @@ | |||
1 | #ifdef CONFIG_SCHED_AUTOGROUP | ||
2 | |||
3 | #include <linux/proc_fs.h> | ||
4 | #include <linux/seq_file.h> | ||
5 | #include <linux/kallsyms.h> | ||
6 | #include <linux/utsname.h> | ||
7 | |||
8 | unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; | ||
9 | static struct autogroup autogroup_default; | ||
10 | static atomic_t autogroup_seq_nr; | ||
11 | |||
12 | static void autogroup_init(struct task_struct *init_task) | ||
13 | { | ||
14 | autogroup_default.tg = &init_task_group; | ||
15 | init_task_group.autogroup = &autogroup_default; | ||
16 | kref_init(&autogroup_default.kref); | ||
17 | init_rwsem(&autogroup_default.lock); | ||
18 | init_task->signal->autogroup = &autogroup_default; | ||
19 | } | ||
20 | |||
21 | static inline void autogroup_free(struct task_group *tg) | ||
22 | { | ||
23 | kfree(tg->autogroup); | ||
24 | } | ||
25 | |||
26 | static inline void autogroup_destroy(struct kref *kref) | ||
27 | { | ||
28 | struct autogroup *ag = container_of(kref, struct autogroup, kref); | ||
29 | |||
30 | sched_destroy_group(ag->tg); | ||
31 | } | ||
32 | |||
33 | static inline void autogroup_kref_put(struct autogroup *ag) | ||
34 | { | ||
35 | kref_put(&ag->kref, autogroup_destroy); | ||
36 | } | ||
37 | |||
38 | static inline struct autogroup *autogroup_kref_get(struct autogroup *ag) | ||
39 | { | ||
40 | kref_get(&ag->kref); | ||
41 | return ag; | ||
42 | } | ||
43 | |||
44 | static inline struct autogroup *autogroup_create(void) | ||
45 | { | ||
46 | struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL); | ||
47 | struct task_group *tg; | ||
48 | |||
49 | if (!ag) | ||
50 | goto out_fail; | ||
51 | |||
52 | tg = sched_create_group(&init_task_group); | ||
53 | |||
54 | if (IS_ERR(tg)) | ||
55 | goto out_free; | ||
56 | |||
57 | kref_init(&ag->kref); | ||
58 | init_rwsem(&ag->lock); | ||
59 | ag->id = atomic_inc_return(&autogroup_seq_nr); | ||
60 | ag->tg = tg; | ||
61 | tg->autogroup = ag; | ||
62 | |||
63 | return ag; | ||
64 | |||
65 | out_free: | ||
66 | kfree(ag); | ||
67 | out_fail: | ||
68 | if (printk_ratelimit()) { | ||
69 | printk(KERN_WARNING "autogroup_create: %s failure.\n", | ||
70 | ag ? "sched_create_group()" : "kmalloc()"); | ||
71 | } | ||
72 | |||
73 | return autogroup_kref_get(&autogroup_default); | ||
74 | } | ||
75 | |||
76 | static inline bool | ||
77 | task_wants_autogroup(struct task_struct *p, struct task_group *tg) | ||
78 | { | ||
79 | if (tg != &root_task_group) | ||
80 | return false; | ||
81 | |||
82 | if (p->sched_class != &fair_sched_class) | ||
83 | return false; | ||
84 | |||
85 | /* | ||
86 | * We can only assume the task group can't go away on us if | ||
87 | * autogroup_move_group() can see us on ->thread_group list. | ||
88 | */ | ||
89 | if (p->flags & PF_EXITING) | ||
90 | return false; | ||
91 | |||
92 | return true; | ||
93 | } | ||
94 | |||
95 | static inline struct task_group * | ||
96 | autogroup_task_group(struct task_struct *p, struct task_group *tg) | ||
97 | { | ||
98 | int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled); | ||
99 | |||
100 | if (enabled && task_wants_autogroup(p, tg)) | ||
101 | return p->signal->autogroup->tg; | ||
102 | |||
103 | return tg; | ||
104 | } | ||
105 | |||
106 | static void | ||
107 | autogroup_move_group(struct task_struct *p, struct autogroup *ag) | ||
108 | { | ||
109 | struct autogroup *prev; | ||
110 | struct task_struct *t; | ||
111 | unsigned long flags; | ||
112 | |||
113 | BUG_ON(!lock_task_sighand(p, &flags)); | ||
114 | |||
115 | prev = p->signal->autogroup; | ||
116 | if (prev == ag) { | ||
117 | unlock_task_sighand(p, &flags); | ||
118 | return; | ||
119 | } | ||
120 | |||
121 | p->signal->autogroup = autogroup_kref_get(ag); | ||
122 | |||
123 | t = p; | ||
124 | do { | ||
125 | sched_move_task(t); | ||
126 | } while_each_thread(p, t); | ||
127 | |||
128 | unlock_task_sighand(p, &flags); | ||
129 | autogroup_kref_put(prev); | ||
130 | } | ||
131 | |||
132 | /* Allocates GFP_KERNEL, cannot be called under any spinlock */ | ||
133 | void sched_autogroup_create_attach(struct task_struct *p) | ||
134 | { | ||
135 | struct autogroup *ag = autogroup_create(); | ||
136 | |||
137 | autogroup_move_group(p, ag); | ||
138 | /* drop extra refrence added by autogroup_create() */ | ||
139 | autogroup_kref_put(ag); | ||
140 | } | ||
141 | EXPORT_SYMBOL(sched_autogroup_create_attach); | ||
142 | |||
143 | /* Cannot be called under siglock. Currently has no users */ | ||
144 | void sched_autogroup_detach(struct task_struct *p) | ||
145 | { | ||
146 | autogroup_move_group(p, &autogroup_default); | ||
147 | } | ||
148 | EXPORT_SYMBOL(sched_autogroup_detach); | ||
149 | |||
150 | void sched_autogroup_fork(struct signal_struct *sig) | ||
151 | { | ||
152 | struct task_struct *p = current; | ||
153 | |||
154 | spin_lock_irq(&p->sighand->siglock); | ||
155 | sig->autogroup = autogroup_kref_get(p->signal->autogroup); | ||
156 | spin_unlock_irq(&p->sighand->siglock); | ||
157 | } | ||
158 | |||
159 | void sched_autogroup_exit(struct signal_struct *sig) | ||
160 | { | ||
161 | autogroup_kref_put(sig->autogroup); | ||
162 | } | ||
163 | |||
164 | static int __init setup_autogroup(char *str) | ||
165 | { | ||
166 | sysctl_sched_autogroup_enabled = 0; | ||
167 | |||
168 | return 1; | ||
169 | } | ||
170 | |||
171 | __setup("noautogroup", setup_autogroup); | ||
172 | |||
173 | #ifdef CONFIG_PROC_FS | ||
174 | |||
175 | /* Called with siglock held. */ | ||
176 | int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice) | ||
177 | { | ||
178 | static unsigned long next = INITIAL_JIFFIES; | ||
179 | struct autogroup *ag; | ||
180 | int err; | ||
181 | |||
182 | if (*nice < -20 || *nice > 19) | ||
183 | return -EINVAL; | ||
184 | |||
185 | err = security_task_setnice(current, *nice); | ||
186 | if (err) | ||
187 | return err; | ||
188 | |||
189 | if (*nice < 0 && !can_nice(current, *nice)) | ||
190 | return -EPERM; | ||
191 | |||
192 | /* this is a heavy operation taking global locks.. */ | ||
193 | if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next)) | ||
194 | return -EAGAIN; | ||
195 | |||
196 | next = HZ / 10 + jiffies; | ||
197 | ag = autogroup_kref_get(p->signal->autogroup); | ||
198 | |||
199 | down_write(&ag->lock); | ||
200 | err = sched_group_set_shares(ag->tg, prio_to_weight[*nice + 20]); | ||
201 | if (!err) | ||
202 | ag->nice = *nice; | ||
203 | up_write(&ag->lock); | ||
204 | |||
205 | autogroup_kref_put(ag); | ||
206 | |||
207 | return err; | ||
208 | } | ||
209 | |||
210 | void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m) | ||
211 | { | ||
212 | struct autogroup *ag = autogroup_kref_get(p->signal->autogroup); | ||
213 | |||
214 | down_read(&ag->lock); | ||
215 | seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice); | ||
216 | up_read(&ag->lock); | ||
217 | |||
218 | autogroup_kref_put(ag); | ||
219 | } | ||
220 | #endif /* CONFIG_PROC_FS */ | ||
221 | |||
222 | #ifdef CONFIG_SCHED_DEBUG | ||
223 | static inline int autogroup_path(struct task_group *tg, char *buf, int buflen) | ||
224 | { | ||
225 | return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id); | ||
226 | } | ||
227 | #endif /* CONFIG_SCHED_DEBUG */ | ||
228 | |||
229 | #endif /* CONFIG_SCHED_AUTOGROUP */ | ||