aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorMarkus Metzger <markus.t.metzger@intel.com>2008-11-25 02:52:56 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-25 11:29:02 -0500
commitde90add30e79261c3b5be68bb0f22d2ef98e8113 (patch)
tree44c3da676ddb8e160fe1d0e9f29d2ab6a51ed475 /arch/x86
parentc4858ffc8f2dc850cb1f609c679b1ac1ad36ef0c (diff)
x86, bts: fix wrmsr and spinlock over kmalloc
Impact: fix sleeping-with-spinlock-held bugs/crashes - Turn a wrmsr to write the DS_AREA MSR into a wrmsrl. - Use irqsave variants of spinlocks. - Do not allocate memory while holding spinlocks. Reported-by: Stephane Eranian <eranian@googlemail.com> Reported-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Markus Metzger <markus.t.metzger@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/ds.c77
1 files changed, 40 insertions, 37 deletions
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index 04e38ef646af..a2d1176c38ee 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -209,14 +209,15 @@ static DEFINE_PER_CPU(struct ds_context *, system_context);
209static inline struct ds_context *ds_get_context(struct task_struct *task) 209static inline struct ds_context *ds_get_context(struct task_struct *task)
210{ 210{
211 struct ds_context *context; 211 struct ds_context *context;
212 unsigned long irq;
212 213
213 spin_lock(&ds_lock); 214 spin_lock_irqsave(&ds_lock, irq);
214 215
215 context = (task ? task->thread.ds_ctx : this_system_context); 216 context = (task ? task->thread.ds_ctx : this_system_context);
216 if (context) 217 if (context)
217 context->count++; 218 context->count++;
218 219
219 spin_unlock(&ds_lock); 220 spin_unlock_irqrestore(&ds_lock, irq);
220 221
221 return context; 222 return context;
222} 223}
@@ -224,55 +225,46 @@ static inline struct ds_context *ds_get_context(struct task_struct *task)
224/* 225/*
225 * Same as ds_get_context, but allocates the context and it's DS 226 * Same as ds_get_context, but allocates the context and it's DS
226 * structure, if necessary; returns NULL; if out of memory. 227 * structure, if necessary; returns NULL; if out of memory.
227 *
228 * pre: requires ds_lock to be held
229 */ 228 */
230static inline struct ds_context *ds_alloc_context(struct task_struct *task) 229static inline struct ds_context *ds_alloc_context(struct task_struct *task)
231{ 230{
232 struct ds_context **p_context = 231 struct ds_context **p_context =
233 (task ? &task->thread.ds_ctx : &this_system_context); 232 (task ? &task->thread.ds_ctx : &this_system_context);
234 struct ds_context *context = *p_context; 233 struct ds_context *context = *p_context;
234 unsigned long irq;
235 235
236 if (!context) { 236 if (!context) {
237 spin_unlock(&ds_lock);
238
239 context = kzalloc(sizeof(*context), GFP_KERNEL); 237 context = kzalloc(sizeof(*context), GFP_KERNEL);
240 238 if (!context)
241 if (!context) {
242 spin_lock(&ds_lock);
243 return NULL; 239 return NULL;
244 }
245 240
246 context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL); 241 context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL);
247 if (!context->ds) { 242 if (!context->ds) {
248 kfree(context); 243 kfree(context);
249 spin_lock(&ds_lock);
250 return NULL; 244 return NULL;
251 } 245 }
252 246
253 spin_lock(&ds_lock); 247 spin_lock_irqsave(&ds_lock, irq);
254 /* 248
255 * Check for race - another CPU could have allocated
256 * it meanwhile:
257 */
258 if (*p_context) { 249 if (*p_context) {
259 kfree(context->ds); 250 kfree(context->ds);
260 kfree(context); 251 kfree(context);
261 return *p_context;
262 }
263
264 *p_context = context;
265 252
266 context->this = p_context; 253 context = *p_context;
267 context->task = task; 254 } else {
255 *p_context = context;
268 256
269 if (task) 257 context->this = p_context;
270 set_tsk_thread_flag(task, TIF_DS_AREA_MSR); 258 context->task = task;
271 259
272 if (!task || (task == current)) 260 if (task)
273 wrmsr(MSR_IA32_DS_AREA, (unsigned long)context->ds, 0); 261 set_tsk_thread_flag(task, TIF_DS_AREA_MSR);
274 262
275 get_tracer(task); 263 if (!task || (task == current))
264 wrmsrl(MSR_IA32_DS_AREA,
265 (unsigned long)context->ds);
266 }
267 spin_unlock_irqrestore(&ds_lock, irq);
276 } 268 }
277 269
278 context->count++; 270 context->count++;
@@ -286,10 +278,12 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task)
286 */ 278 */
287static inline void ds_put_context(struct ds_context *context) 279static inline void ds_put_context(struct ds_context *context)
288{ 280{
281 unsigned long irq;
282
289 if (!context) 283 if (!context)
290 return; 284 return;
291 285
292 spin_lock(&ds_lock); 286 spin_lock_irqsave(&ds_lock, irq);
293 287
294 if (--context->count) 288 if (--context->count)
295 goto out; 289 goto out;
@@ -311,7 +305,7 @@ static inline void ds_put_context(struct ds_context *context)
311 kfree(context->ds); 305 kfree(context->ds);
312 kfree(context); 306 kfree(context);
313 out: 307 out:
314 spin_unlock(&ds_lock); 308 spin_unlock_irqrestore(&ds_lock, irq);
315} 309}
316 310
317 311
@@ -382,6 +376,7 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
382 struct ds_context *context; 376 struct ds_context *context;
383 unsigned long buffer, adj; 377 unsigned long buffer, adj;
384 const unsigned long alignment = (1 << 3); 378 const unsigned long alignment = (1 << 3);
379 unsigned long irq;
385 int error = 0; 380 int error = 0;
386 381
387 if (!ds_cfg.sizeof_ds) 382 if (!ds_cfg.sizeof_ds)
@@ -396,26 +391,27 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
396 return -EOPNOTSUPP; 391 return -EOPNOTSUPP;
397 392
398 393
399 spin_lock(&ds_lock);
400
401 error = -ENOMEM;
402 context = ds_alloc_context(task); 394 context = ds_alloc_context(task);
403 if (!context) 395 if (!context)
404 goto out_unlock; 396 return -ENOMEM;
397
398 spin_lock_irqsave(&ds_lock, irq);
405 399
406 error = -EPERM; 400 error = -EPERM;
407 if (!check_tracer(task)) 401 if (!check_tracer(task))
408 goto out_unlock; 402 goto out_unlock;
409 403
404 get_tracer(task);
405
410 error = -EALREADY; 406 error = -EALREADY;
411 if (context->owner[qual] == current) 407 if (context->owner[qual] == current)
412 goto out_unlock; 408 goto out_put_tracer;
413 error = -EPERM; 409 error = -EPERM;
414 if (context->owner[qual] != NULL) 410 if (context->owner[qual] != NULL)
415 goto out_unlock; 411 goto out_put_tracer;
416 context->owner[qual] = current; 412 context->owner[qual] = current;
417 413
418 spin_unlock(&ds_lock); 414 spin_unlock_irqrestore(&ds_lock, irq);
419 415
420 416
421 error = -ENOMEM; 417 error = -ENOMEM;
@@ -463,10 +459,17 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
463 out_release: 459 out_release:
464 context->owner[qual] = NULL; 460 context->owner[qual] = NULL;
465 ds_put_context(context); 461 ds_put_context(context);
462 put_tracer(task);
463 return error;
464
465 out_put_tracer:
466 spin_unlock_irqrestore(&ds_lock, irq);
467 ds_put_context(context);
468 put_tracer(task);
466 return error; 469 return error;
467 470
468 out_unlock: 471 out_unlock:
469 spin_unlock(&ds_lock); 472 spin_unlock_irqrestore(&ds_lock, irq);
470 ds_put_context(context); 473 ds_put_context(context);
471 return error; 474 return error;
472} 475}