diff options
author | Tejun Heo <tj@kernel.org> | 2010-04-04 22:37:28 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-04-04 22:37:28 -0400 |
commit | 336f5899d287f06d8329e208fc14ce50f7ec9698 (patch) | |
tree | 9b762d450d5eb248a6ff8317badb7e223d93ed58 /kernel | |
parent | a4ab2773205e8b94c18625455f85e3b6bb9d7ad6 (diff) | |
parent | db217dece3003df0841bacf9556b5c06aa097dae (diff) |
Merge branch 'master' into export-slabh
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cgroup_freezer.c | 9 | ||||
-rw-r--r-- | kernel/cred.c | 6 | ||||
-rw-r--r-- | kernel/early_res.c | 6 | ||||
-rw-r--r-- | kernel/kgdb.c | 205 | ||||
-rw-r--r-- | kernel/perf_event.c | 22 | ||||
-rw-r--r-- | kernel/power/process.c | 5 | ||||
-rw-r--r-- | kernel/sched.c | 2 | ||||
-rw-r--r-- | kernel/sched_debug.c | 4 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace_clock.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_event_perf.c | 11 |
11 files changed, 153 insertions, 129 deletions
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index d2ccd2798d7a..da5e13975531 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c | |||
@@ -48,17 +48,20 @@ static inline struct freezer *task_freezer(struct task_struct *task) | |||
48 | struct freezer, css); | 48 | struct freezer, css); |
49 | } | 49 | } |
50 | 50 | ||
51 | int cgroup_frozen(struct task_struct *task) | 51 | int cgroup_freezing_or_frozen(struct task_struct *task) |
52 | { | 52 | { |
53 | struct freezer *freezer; | 53 | struct freezer *freezer; |
54 | enum freezer_state state; | 54 | enum freezer_state state; |
55 | 55 | ||
56 | task_lock(task); | 56 | task_lock(task); |
57 | freezer = task_freezer(task); | 57 | freezer = task_freezer(task); |
58 | state = freezer->state; | 58 | if (!freezer->css.cgroup->parent) |
59 | state = CGROUP_THAWED; /* root cgroup can't be frozen */ | ||
60 | else | ||
61 | state = freezer->state; | ||
59 | task_unlock(task); | 62 | task_unlock(task); |
60 | 63 | ||
61 | return state == CGROUP_FROZEN; | 64 | return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN); |
62 | } | 65 | } |
63 | 66 | ||
64 | /* | 67 | /* |
diff --git a/kernel/cred.c b/kernel/cred.c index d84bdef17c9f..e1dbe9eef800 100644 --- a/kernel/cred.c +++ b/kernel/cred.c | |||
@@ -365,7 +365,7 @@ struct cred *prepare_usermodehelper_creds(void) | |||
365 | 365 | ||
366 | new = kmem_cache_alloc(cred_jar, GFP_ATOMIC); | 366 | new = kmem_cache_alloc(cred_jar, GFP_ATOMIC); |
367 | if (!new) | 367 | if (!new) |
368 | return NULL; | 368 | goto free_tgcred; |
369 | 369 | ||
370 | kdebug("prepare_usermodehelper_creds() alloc %p", new); | 370 | kdebug("prepare_usermodehelper_creds() alloc %p", new); |
371 | 371 | ||
@@ -398,6 +398,10 @@ struct cred *prepare_usermodehelper_creds(void) | |||
398 | 398 | ||
399 | error: | 399 | error: |
400 | put_cred(new); | 400 | put_cred(new); |
401 | free_tgcred: | ||
402 | #ifdef CONFIG_KEYS | ||
403 | kfree(tgcred); | ||
404 | #endif | ||
401 | return NULL; | 405 | return NULL; |
402 | } | 406 | } |
403 | 407 | ||
diff --git a/kernel/early_res.c b/kernel/early_res.c index 3cb2c661bb78..31aa9332ef3f 100644 --- a/kernel/early_res.c +++ b/kernel/early_res.c | |||
@@ -333,6 +333,12 @@ void __init free_early_partial(u64 start, u64 end) | |||
333 | struct early_res *r; | 333 | struct early_res *r; |
334 | int i; | 334 | int i; |
335 | 335 | ||
336 | if (start == end) | ||
337 | return; | ||
338 | |||
339 | if (WARN_ONCE(start > end, " wrong range [%#llx, %#llx]\n", start, end)) | ||
340 | return; | ||
341 | |||
336 | try_next: | 342 | try_next: |
337 | i = find_overlapped_early(start, end); | 343 | i = find_overlapped_early(start, end); |
338 | if (i >= max_early_res) | 344 | if (i >= max_early_res) |
diff --git a/kernel/kgdb.c b/kernel/kgdb.c index 761fdd2b3034..11f3515ca83f 100644 --- a/kernel/kgdb.c +++ b/kernel/kgdb.c | |||
@@ -69,9 +69,16 @@ struct kgdb_state { | |||
69 | struct pt_regs *linux_regs; | 69 | struct pt_regs *linux_regs; |
70 | }; | 70 | }; |
71 | 71 | ||
72 | /* Exception state values */ | ||
73 | #define DCPU_WANT_MASTER 0x1 /* Waiting to become a master kgdb cpu */ | ||
74 | #define DCPU_NEXT_MASTER 0x2 /* Transition from one master cpu to another */ | ||
75 | #define DCPU_IS_SLAVE 0x4 /* Slave cpu enter exception */ | ||
76 | #define DCPU_SSTEP 0x8 /* CPU is single stepping */ | ||
77 | |||
72 | static struct debuggerinfo_struct { | 78 | static struct debuggerinfo_struct { |
73 | void *debuggerinfo; | 79 | void *debuggerinfo; |
74 | struct task_struct *task; | 80 | struct task_struct *task; |
81 | int exception_state; | ||
75 | } kgdb_info[NR_CPUS]; | 82 | } kgdb_info[NR_CPUS]; |
76 | 83 | ||
77 | /** | 84 | /** |
@@ -391,27 +398,22 @@ int kgdb_mem2hex(char *mem, char *buf, int count) | |||
391 | 398 | ||
392 | /* | 399 | /* |
393 | * Copy the binary array pointed to by buf into mem. Fix $, #, and | 400 | * Copy the binary array pointed to by buf into mem. Fix $, #, and |
394 | * 0x7d escaped with 0x7d. Return a pointer to the character after | 401 | * 0x7d escaped with 0x7d. Return -EFAULT on failure or 0 on success. |
395 | * the last byte written. | 402 | * The input buf is overwitten with the result to write to mem. |
396 | */ | 403 | */ |
397 | static int kgdb_ebin2mem(char *buf, char *mem, int count) | 404 | static int kgdb_ebin2mem(char *buf, char *mem, int count) |
398 | { | 405 | { |
399 | int err = 0; | 406 | int size = 0; |
400 | char c; | 407 | char *c = buf; |
401 | 408 | ||
402 | while (count-- > 0) { | 409 | while (count-- > 0) { |
403 | c = *buf++; | 410 | c[size] = *buf++; |
404 | if (c == 0x7d) | 411 | if (c[size] == 0x7d) |
405 | c = *buf++ ^ 0x20; | 412 | c[size] = *buf++ ^ 0x20; |
406 | 413 | size++; | |
407 | err = probe_kernel_write(mem, &c, 1); | ||
408 | if (err) | ||
409 | break; | ||
410 | |||
411 | mem++; | ||
412 | } | 414 | } |
413 | 415 | ||
414 | return err; | 416 | return probe_kernel_write(mem, c, size); |
415 | } | 417 | } |
416 | 418 | ||
417 | /* | 419 | /* |
@@ -563,49 +565,6 @@ static struct task_struct *getthread(struct pt_regs *regs, int tid) | |||
563 | } | 565 | } |
564 | 566 | ||
565 | /* | 567 | /* |
566 | * CPU debug state control: | ||
567 | */ | ||
568 | |||
569 | #ifdef CONFIG_SMP | ||
570 | static void kgdb_wait(struct pt_regs *regs) | ||
571 | { | ||
572 | unsigned long flags; | ||
573 | int cpu; | ||
574 | |||
575 | local_irq_save(flags); | ||
576 | cpu = raw_smp_processor_id(); | ||
577 | kgdb_info[cpu].debuggerinfo = regs; | ||
578 | kgdb_info[cpu].task = current; | ||
579 | /* | ||
580 | * Make sure the above info reaches the primary CPU before | ||
581 | * our cpu_in_kgdb[] flag setting does: | ||
582 | */ | ||
583 | smp_wmb(); | ||
584 | atomic_set(&cpu_in_kgdb[cpu], 1); | ||
585 | |||
586 | /* Disable any cpu specific hw breakpoints */ | ||
587 | kgdb_disable_hw_debug(regs); | ||
588 | |||
589 | /* Wait till primary CPU is done with debugging */ | ||
590 | while (atomic_read(&passive_cpu_wait[cpu])) | ||
591 | cpu_relax(); | ||
592 | |||
593 | kgdb_info[cpu].debuggerinfo = NULL; | ||
594 | kgdb_info[cpu].task = NULL; | ||
595 | |||
596 | /* fix up hardware debug registers on local cpu */ | ||
597 | if (arch_kgdb_ops.correct_hw_break) | ||
598 | arch_kgdb_ops.correct_hw_break(); | ||
599 | |||
600 | /* Signal the primary CPU that we are done: */ | ||
601 | atomic_set(&cpu_in_kgdb[cpu], 0); | ||
602 | touch_softlockup_watchdog_sync(); | ||
603 | clocksource_touch_watchdog(); | ||
604 | local_irq_restore(flags); | ||
605 | } | ||
606 | #endif | ||
607 | |||
608 | /* | ||
609 | * Some architectures need cache flushes when we set/clear a | 568 | * Some architectures need cache flushes when we set/clear a |
610 | * breakpoint: | 569 | * breakpoint: |
611 | */ | 570 | */ |
@@ -1400,34 +1359,13 @@ static int kgdb_reenter_check(struct kgdb_state *ks) | |||
1400 | return 1; | 1359 | return 1; |
1401 | } | 1360 | } |
1402 | 1361 | ||
1403 | /* | 1362 | static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs) |
1404 | * kgdb_handle_exception() - main entry point from a kernel exception | ||
1405 | * | ||
1406 | * Locking hierarchy: | ||
1407 | * interface locks, if any (begin_session) | ||
1408 | * kgdb lock (kgdb_active) | ||
1409 | */ | ||
1410 | int | ||
1411 | kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) | ||
1412 | { | 1363 | { |
1413 | struct kgdb_state kgdb_var; | ||
1414 | struct kgdb_state *ks = &kgdb_var; | ||
1415 | unsigned long flags; | 1364 | unsigned long flags; |
1416 | int sstep_tries = 100; | 1365 | int sstep_tries = 100; |
1417 | int error = 0; | 1366 | int error = 0; |
1418 | int i, cpu; | 1367 | int i, cpu; |
1419 | 1368 | int trace_on = 0; | |
1420 | ks->cpu = raw_smp_processor_id(); | ||
1421 | ks->ex_vector = evector; | ||
1422 | ks->signo = signo; | ||
1423 | ks->ex_vector = evector; | ||
1424 | ks->err_code = ecode; | ||
1425 | ks->kgdb_usethreadid = 0; | ||
1426 | ks->linux_regs = regs; | ||
1427 | |||
1428 | if (kgdb_reenter_check(ks)) | ||
1429 | return 0; /* Ouch, double exception ! */ | ||
1430 | |||
1431 | acquirelock: | 1369 | acquirelock: |
1432 | /* | 1370 | /* |
1433 | * Interrupts will be restored by the 'trap return' code, except when | 1371 | * Interrupts will be restored by the 'trap return' code, except when |
@@ -1435,13 +1373,43 @@ acquirelock: | |||
1435 | */ | 1373 | */ |
1436 | local_irq_save(flags); | 1374 | local_irq_save(flags); |
1437 | 1375 | ||
1438 | cpu = raw_smp_processor_id(); | 1376 | cpu = ks->cpu; |
1377 | kgdb_info[cpu].debuggerinfo = regs; | ||
1378 | kgdb_info[cpu].task = current; | ||
1379 | /* | ||
1380 | * Make sure the above info reaches the primary CPU before | ||
1381 | * our cpu_in_kgdb[] flag setting does: | ||
1382 | */ | ||
1383 | atomic_inc(&cpu_in_kgdb[cpu]); | ||
1439 | 1384 | ||
1440 | /* | 1385 | /* |
1441 | * Acquire the kgdb_active lock: | 1386 | * CPU will loop if it is a slave or request to become a kgdb |
1387 | * master cpu and acquire the kgdb_active lock: | ||
1442 | */ | 1388 | */ |
1443 | while (atomic_cmpxchg(&kgdb_active, -1, cpu) != -1) | 1389 | while (1) { |
1390 | if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) { | ||
1391 | if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu) | ||
1392 | break; | ||
1393 | } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) { | ||
1394 | if (!atomic_read(&passive_cpu_wait[cpu])) | ||
1395 | goto return_normal; | ||
1396 | } else { | ||
1397 | return_normal: | ||
1398 | /* Return to normal operation by executing any | ||
1399 | * hw breakpoint fixup. | ||
1400 | */ | ||
1401 | if (arch_kgdb_ops.correct_hw_break) | ||
1402 | arch_kgdb_ops.correct_hw_break(); | ||
1403 | if (trace_on) | ||
1404 | tracing_on(); | ||
1405 | atomic_dec(&cpu_in_kgdb[cpu]); | ||
1406 | touch_softlockup_watchdog_sync(); | ||
1407 | clocksource_touch_watchdog(); | ||
1408 | local_irq_restore(flags); | ||
1409 | return 0; | ||
1410 | } | ||
1444 | cpu_relax(); | 1411 | cpu_relax(); |
1412 | } | ||
1445 | 1413 | ||
1446 | /* | 1414 | /* |
1447 | * For single stepping, try to only enter on the processor | 1415 | * For single stepping, try to only enter on the processor |
@@ -1475,9 +1443,6 @@ acquirelock: | |||
1475 | if (kgdb_io_ops->pre_exception) | 1443 | if (kgdb_io_ops->pre_exception) |
1476 | kgdb_io_ops->pre_exception(); | 1444 | kgdb_io_ops->pre_exception(); |
1477 | 1445 | ||
1478 | kgdb_info[ks->cpu].debuggerinfo = ks->linux_regs; | ||
1479 | kgdb_info[ks->cpu].task = current; | ||
1480 | |||
1481 | kgdb_disable_hw_debug(ks->linux_regs); | 1446 | kgdb_disable_hw_debug(ks->linux_regs); |
1482 | 1447 | ||
1483 | /* | 1448 | /* |
@@ -1486,15 +1451,9 @@ acquirelock: | |||
1486 | */ | 1451 | */ |
1487 | if (!kgdb_single_step) { | 1452 | if (!kgdb_single_step) { |
1488 | for (i = 0; i < NR_CPUS; i++) | 1453 | for (i = 0; i < NR_CPUS; i++) |
1489 | atomic_set(&passive_cpu_wait[i], 1); | 1454 | atomic_inc(&passive_cpu_wait[i]); |
1490 | } | 1455 | } |
1491 | 1456 | ||
1492 | /* | ||
1493 | * spin_lock code is good enough as a barrier so we don't | ||
1494 | * need one here: | ||
1495 | */ | ||
1496 | atomic_set(&cpu_in_kgdb[ks->cpu], 1); | ||
1497 | |||
1498 | #ifdef CONFIG_SMP | 1457 | #ifdef CONFIG_SMP |
1499 | /* Signal the other CPUs to enter kgdb_wait() */ | 1458 | /* Signal the other CPUs to enter kgdb_wait() */ |
1500 | if ((!kgdb_single_step) && kgdb_do_roundup) | 1459 | if ((!kgdb_single_step) && kgdb_do_roundup) |
@@ -1518,6 +1477,9 @@ acquirelock: | |||
1518 | kgdb_single_step = 0; | 1477 | kgdb_single_step = 0; |
1519 | kgdb_contthread = current; | 1478 | kgdb_contthread = current; |
1520 | exception_level = 0; | 1479 | exception_level = 0; |
1480 | trace_on = tracing_is_on(); | ||
1481 | if (trace_on) | ||
1482 | tracing_off(); | ||
1521 | 1483 | ||
1522 | /* Talk to debugger with gdbserial protocol */ | 1484 | /* Talk to debugger with gdbserial protocol */ |
1523 | error = gdb_serial_stub(ks); | 1485 | error = gdb_serial_stub(ks); |
@@ -1526,13 +1488,11 @@ acquirelock: | |||
1526 | if (kgdb_io_ops->post_exception) | 1488 | if (kgdb_io_ops->post_exception) |
1527 | kgdb_io_ops->post_exception(); | 1489 | kgdb_io_ops->post_exception(); |
1528 | 1490 | ||
1529 | kgdb_info[ks->cpu].debuggerinfo = NULL; | 1491 | atomic_dec(&cpu_in_kgdb[ks->cpu]); |
1530 | kgdb_info[ks->cpu].task = NULL; | ||
1531 | atomic_set(&cpu_in_kgdb[ks->cpu], 0); | ||
1532 | 1492 | ||
1533 | if (!kgdb_single_step) { | 1493 | if (!kgdb_single_step) { |
1534 | for (i = NR_CPUS-1; i >= 0; i--) | 1494 | for (i = NR_CPUS-1; i >= 0; i--) |
1535 | atomic_set(&passive_cpu_wait[i], 0); | 1495 | atomic_dec(&passive_cpu_wait[i]); |
1536 | /* | 1496 | /* |
1537 | * Wait till all the CPUs have quit | 1497 | * Wait till all the CPUs have quit |
1538 | * from the debugger. | 1498 | * from the debugger. |
@@ -1551,6 +1511,8 @@ kgdb_restore: | |||
1551 | else | 1511 | else |
1552 | kgdb_sstep_pid = 0; | 1512 | kgdb_sstep_pid = 0; |
1553 | } | 1513 | } |
1514 | if (trace_on) | ||
1515 | tracing_on(); | ||
1554 | /* Free kgdb_active */ | 1516 | /* Free kgdb_active */ |
1555 | atomic_set(&kgdb_active, -1); | 1517 | atomic_set(&kgdb_active, -1); |
1556 | touch_softlockup_watchdog_sync(); | 1518 | touch_softlockup_watchdog_sync(); |
@@ -1560,13 +1522,52 @@ kgdb_restore: | |||
1560 | return error; | 1522 | return error; |
1561 | } | 1523 | } |
1562 | 1524 | ||
1525 | /* | ||
1526 | * kgdb_handle_exception() - main entry point from a kernel exception | ||
1527 | * | ||
1528 | * Locking hierarchy: | ||
1529 | * interface locks, if any (begin_session) | ||
1530 | * kgdb lock (kgdb_active) | ||
1531 | */ | ||
1532 | int | ||
1533 | kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) | ||
1534 | { | ||
1535 | struct kgdb_state kgdb_var; | ||
1536 | struct kgdb_state *ks = &kgdb_var; | ||
1537 | int ret; | ||
1538 | |||
1539 | ks->cpu = raw_smp_processor_id(); | ||
1540 | ks->ex_vector = evector; | ||
1541 | ks->signo = signo; | ||
1542 | ks->ex_vector = evector; | ||
1543 | ks->err_code = ecode; | ||
1544 | ks->kgdb_usethreadid = 0; | ||
1545 | ks->linux_regs = regs; | ||
1546 | |||
1547 | if (kgdb_reenter_check(ks)) | ||
1548 | return 0; /* Ouch, double exception ! */ | ||
1549 | kgdb_info[ks->cpu].exception_state |= DCPU_WANT_MASTER; | ||
1550 | ret = kgdb_cpu_enter(ks, regs); | ||
1551 | kgdb_info[ks->cpu].exception_state &= ~DCPU_WANT_MASTER; | ||
1552 | return ret; | ||
1553 | } | ||
1554 | |||
1563 | int kgdb_nmicallback(int cpu, void *regs) | 1555 | int kgdb_nmicallback(int cpu, void *regs) |
1564 | { | 1556 | { |
1565 | #ifdef CONFIG_SMP | 1557 | #ifdef CONFIG_SMP |
1558 | struct kgdb_state kgdb_var; | ||
1559 | struct kgdb_state *ks = &kgdb_var; | ||
1560 | |||
1561 | memset(ks, 0, sizeof(struct kgdb_state)); | ||
1562 | ks->cpu = cpu; | ||
1563 | ks->linux_regs = regs; | ||
1564 | |||
1566 | if (!atomic_read(&cpu_in_kgdb[cpu]) && | 1565 | if (!atomic_read(&cpu_in_kgdb[cpu]) && |
1567 | atomic_read(&kgdb_active) != cpu && | 1566 | atomic_read(&kgdb_active) != -1 && |
1568 | atomic_read(&cpu_in_kgdb[atomic_read(&kgdb_active)])) { | 1567 | atomic_read(&kgdb_active) != cpu) { |
1569 | kgdb_wait((struct pt_regs *)regs); | 1568 | kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE; |
1569 | kgdb_cpu_enter(ks, regs); | ||
1570 | kgdb_info[cpu].exception_state &= ~DCPU_IS_SLAVE; | ||
1570 | return 0; | 1571 | return 0; |
1571 | } | 1572 | } |
1572 | #endif | 1573 | #endif |
@@ -1742,11 +1743,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module); | |||
1742 | */ | 1743 | */ |
1743 | void kgdb_breakpoint(void) | 1744 | void kgdb_breakpoint(void) |
1744 | { | 1745 | { |
1745 | atomic_set(&kgdb_setting_breakpoint, 1); | 1746 | atomic_inc(&kgdb_setting_breakpoint); |
1746 | wmb(); /* Sync point before breakpoint */ | 1747 | wmb(); /* Sync point before breakpoint */ |
1747 | arch_kgdb_breakpoint(); | 1748 | arch_kgdb_breakpoint(); |
1748 | wmb(); /* Sync point after breakpoint */ | 1749 | wmb(); /* Sync point after breakpoint */ |
1749 | atomic_set(&kgdb_setting_breakpoint, 0); | 1750 | atomic_dec(&kgdb_setting_breakpoint); |
1750 | } | 1751 | } |
1751 | EXPORT_SYMBOL_GPL(kgdb_breakpoint); | 1752 | EXPORT_SYMBOL_GPL(kgdb_breakpoint); |
1752 | 1753 | ||
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index a77266e3e3e1..2f3fbf84215a 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -1165,11 +1165,9 @@ void perf_event_task_sched_out(struct task_struct *task, | |||
1165 | struct perf_event_context *ctx = task->perf_event_ctxp; | 1165 | struct perf_event_context *ctx = task->perf_event_ctxp; |
1166 | struct perf_event_context *next_ctx; | 1166 | struct perf_event_context *next_ctx; |
1167 | struct perf_event_context *parent; | 1167 | struct perf_event_context *parent; |
1168 | struct pt_regs *regs; | ||
1169 | int do_switch = 1; | 1168 | int do_switch = 1; |
1170 | 1169 | ||
1171 | regs = task_pt_regs(task); | 1170 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); |
1172 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0); | ||
1173 | 1171 | ||
1174 | if (likely(!ctx || !cpuctx->task_ctx)) | 1172 | if (likely(!ctx || !cpuctx->task_ctx)) |
1175 | return; | 1173 | return; |
@@ -2787,12 +2785,11 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |||
2787 | return NULL; | 2785 | return NULL; |
2788 | } | 2786 | } |
2789 | 2787 | ||
2790 | #ifdef CONFIG_EVENT_TRACING | ||
2791 | __weak | 2788 | __weak |
2792 | void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) | 2789 | void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) |
2793 | { | 2790 | { |
2794 | } | 2791 | } |
2795 | #endif | 2792 | |
2796 | 2793 | ||
2797 | /* | 2794 | /* |
2798 | * Output | 2795 | * Output |
@@ -3379,15 +3376,23 @@ static void perf_event_task_output(struct perf_event *event, | |||
3379 | struct perf_task_event *task_event) | 3376 | struct perf_task_event *task_event) |
3380 | { | 3377 | { |
3381 | struct perf_output_handle handle; | 3378 | struct perf_output_handle handle; |
3382 | int size; | ||
3383 | struct task_struct *task = task_event->task; | 3379 | struct task_struct *task = task_event->task; |
3384 | int ret; | 3380 | unsigned long flags; |
3381 | int size, ret; | ||
3382 | |||
3383 | /* | ||
3384 | * If this CPU attempts to acquire an rq lock held by a CPU spinning | ||
3385 | * in perf_output_lock() from interrupt context, it's game over. | ||
3386 | */ | ||
3387 | local_irq_save(flags); | ||
3385 | 3388 | ||
3386 | size = task_event->event_id.header.size; | 3389 | size = task_event->event_id.header.size; |
3387 | ret = perf_output_begin(&handle, event, size, 0, 0); | 3390 | ret = perf_output_begin(&handle, event, size, 0, 0); |
3388 | 3391 | ||
3389 | if (ret) | 3392 | if (ret) { |
3393 | local_irq_restore(flags); | ||
3390 | return; | 3394 | return; |
3395 | } | ||
3391 | 3396 | ||
3392 | task_event->event_id.pid = perf_event_pid(event, task); | 3397 | task_event->event_id.pid = perf_event_pid(event, task); |
3393 | task_event->event_id.ppid = perf_event_pid(event, current); | 3398 | task_event->event_id.ppid = perf_event_pid(event, current); |
@@ -3398,6 +3403,7 @@ static void perf_event_task_output(struct perf_event *event, | |||
3398 | perf_output_put(&handle, task_event->event_id); | 3403 | perf_output_put(&handle, task_event->event_id); |
3399 | 3404 | ||
3400 | perf_output_end(&handle); | 3405 | perf_output_end(&handle); |
3406 | local_irq_restore(flags); | ||
3401 | } | 3407 | } |
3402 | 3408 | ||
3403 | static int perf_event_task_match(struct perf_event *event) | 3409 | static int perf_event_task_match(struct perf_event *event) |
diff --git a/kernel/power/process.c b/kernel/power/process.c index 5ade1bdcf366..71ae29052ab6 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
@@ -88,12 +88,11 @@ static int try_to_freeze_tasks(bool sig_only) | |||
88 | printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds " | 88 | printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds " |
89 | "(%d tasks refusing to freeze):\n", | 89 | "(%d tasks refusing to freeze):\n", |
90 | elapsed_csecs / 100, elapsed_csecs % 100, todo); | 90 | elapsed_csecs / 100, elapsed_csecs % 100, todo); |
91 | show_state(); | ||
92 | read_lock(&tasklist_lock); | 91 | read_lock(&tasklist_lock); |
93 | do_each_thread(g, p) { | 92 | do_each_thread(g, p) { |
94 | task_lock(p); | 93 | task_lock(p); |
95 | if (freezing(p) && !freezer_should_skip(p)) | 94 | if (freezing(p) && !freezer_should_skip(p)) |
96 | printk(KERN_ERR " %s\n", p->comm); | 95 | sched_show_task(p); |
97 | cancel_freezing(p); | 96 | cancel_freezing(p); |
98 | task_unlock(p); | 97 | task_unlock(p); |
99 | } while_each_thread(g, p); | 98 | } while_each_thread(g, p); |
@@ -145,7 +144,7 @@ static void thaw_tasks(bool nosig_only) | |||
145 | if (nosig_only && should_send_signal(p)) | 144 | if (nosig_only && should_send_signal(p)) |
146 | continue; | 145 | continue; |
147 | 146 | ||
148 | if (cgroup_frozen(p)) | 147 | if (cgroup_freezing_or_frozen(p)) |
149 | continue; | 148 | continue; |
150 | 149 | ||
151 | thaw_process(p); | 150 | thaw_process(p); |
diff --git a/kernel/sched.c b/kernel/sched.c index 86c7cc1d7c9d..a3dff1f3f9b0 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -5388,7 +5388,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) | |||
5388 | 5388 | ||
5389 | get_task_struct(mt); | 5389 | get_task_struct(mt); |
5390 | task_rq_unlock(rq, &flags); | 5390 | task_rq_unlock(rq, &flags); |
5391 | wake_up_process(rq->migration_thread); | 5391 | wake_up_process(mt); |
5392 | put_task_struct(mt); | 5392 | put_task_struct(mt); |
5393 | wait_for_completion(&req.done); | 5393 | wait_for_completion(&req.done); |
5394 | tlb_migrate_finish(p->mm); | 5394 | tlb_migrate_finish(p->mm); |
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 67f95aada4b9..9b49db144037 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -518,8 +518,4 @@ void proc_sched_set_task(struct task_struct *p) | |||
518 | p->se.nr_wakeups_idle = 0; | 518 | p->se.nr_wakeups_idle = 0; |
519 | p->sched_info.bkl_count = 0; | 519 | p->sched_info.bkl_count = 0; |
520 | #endif | 520 | #endif |
521 | p->se.sum_exec_runtime = 0; | ||
522 | p->se.prev_sum_exec_runtime = 0; | ||
523 | p->nvcsw = 0; | ||
524 | p->nivcsw = 0; | ||
525 | } | 521 | } |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 2c839ca5e5ce..41ca394feb22 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1210,18 +1210,19 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
1210 | 1210 | ||
1211 | for (i = 0; i < nr_pages; i++) { | 1211 | for (i = 0; i < nr_pages; i++) { |
1212 | if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) | 1212 | if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) |
1213 | return; | 1213 | goto out; |
1214 | p = cpu_buffer->pages->next; | 1214 | p = cpu_buffer->pages->next; |
1215 | bpage = list_entry(p, struct buffer_page, list); | 1215 | bpage = list_entry(p, struct buffer_page, list); |
1216 | list_del_init(&bpage->list); | 1216 | list_del_init(&bpage->list); |
1217 | free_buffer_page(bpage); | 1217 | free_buffer_page(bpage); |
1218 | } | 1218 | } |
1219 | if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) | 1219 | if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) |
1220 | return; | 1220 | goto out; |
1221 | 1221 | ||
1222 | rb_reset_cpu(cpu_buffer); | 1222 | rb_reset_cpu(cpu_buffer); |
1223 | rb_check_pages(cpu_buffer); | 1223 | rb_check_pages(cpu_buffer); |
1224 | 1224 | ||
1225 | out: | ||
1225 | spin_unlock_irq(&cpu_buffer->reader_lock); | 1226 | spin_unlock_irq(&cpu_buffer->reader_lock); |
1226 | } | 1227 | } |
1227 | 1228 | ||
@@ -1238,7 +1239,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
1238 | 1239 | ||
1239 | for (i = 0; i < nr_pages; i++) { | 1240 | for (i = 0; i < nr_pages; i++) { |
1240 | if (RB_WARN_ON(cpu_buffer, list_empty(pages))) | 1241 | if (RB_WARN_ON(cpu_buffer, list_empty(pages))) |
1241 | return; | 1242 | goto out; |
1242 | p = pages->next; | 1243 | p = pages->next; |
1243 | bpage = list_entry(p, struct buffer_page, list); | 1244 | bpage = list_entry(p, struct buffer_page, list); |
1244 | list_del_init(&bpage->list); | 1245 | list_del_init(&bpage->list); |
@@ -1247,6 +1248,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
1247 | rb_reset_cpu(cpu_buffer); | 1248 | rb_reset_cpu(cpu_buffer); |
1248 | rb_check_pages(cpu_buffer); | 1249 | rb_check_pages(cpu_buffer); |
1249 | 1250 | ||
1251 | out: | ||
1250 | spin_unlock_irq(&cpu_buffer->reader_lock); | 1252 | spin_unlock_irq(&cpu_buffer->reader_lock); |
1251 | } | 1253 | } |
1252 | 1254 | ||
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 6fbfb8f417b9..9d589d8dcd1a 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c | |||
@@ -84,7 +84,7 @@ u64 notrace trace_clock_global(void) | |||
84 | int this_cpu; | 84 | int this_cpu; |
85 | u64 now; | 85 | u64 now; |
86 | 86 | ||
87 | raw_local_irq_save(flags); | 87 | local_irq_save(flags); |
88 | 88 | ||
89 | this_cpu = raw_smp_processor_id(); | 89 | this_cpu = raw_smp_processor_id(); |
90 | now = cpu_clock(this_cpu); | 90 | now = cpu_clock(this_cpu); |
@@ -110,7 +110,7 @@ u64 notrace trace_clock_global(void) | |||
110 | arch_spin_unlock(&trace_clock_struct.lock); | 110 | arch_spin_unlock(&trace_clock_struct.lock); |
111 | 111 | ||
112 | out: | 112 | out: |
113 | raw_local_irq_restore(flags); | 113 | local_irq_restore(flags); |
114 | 114 | ||
115 | return now; | 115 | return now; |
116 | } | 116 | } |
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 81f691eb3a30..0565bb42566f 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -17,7 +17,12 @@ EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); | |||
17 | static char *perf_trace_buf; | 17 | static char *perf_trace_buf; |
18 | static char *perf_trace_buf_nmi; | 18 | static char *perf_trace_buf_nmi; |
19 | 19 | ||
20 | typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ; | 20 | /* |
21 | * Force it to be aligned to unsigned long to avoid misaligned accesses | ||
22 | * suprises | ||
23 | */ | ||
24 | typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)]) | ||
25 | perf_trace_t; | ||
21 | 26 | ||
22 | /* Count the events in use (per event id, not per instance) */ | 27 | /* Count the events in use (per event id, not per instance) */ |
23 | static int total_ref_count; | 28 | static int total_ref_count; |
@@ -130,6 +135,8 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, | |||
130 | char *trace_buf, *raw_data; | 135 | char *trace_buf, *raw_data; |
131 | int pc, cpu; | 136 | int pc, cpu; |
132 | 137 | ||
138 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); | ||
139 | |||
133 | pc = preempt_count(); | 140 | pc = preempt_count(); |
134 | 141 | ||
135 | /* Protect the per cpu buffer, begin the rcu read side */ | 142 | /* Protect the per cpu buffer, begin the rcu read side */ |
@@ -152,7 +159,7 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, | |||
152 | raw_data = per_cpu_ptr(trace_buf, cpu); | 159 | raw_data = per_cpu_ptr(trace_buf, cpu); |
153 | 160 | ||
154 | /* zero the dead bytes from align to not leak stack to user */ | 161 | /* zero the dead bytes from align to not leak stack to user */ |
155 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; | 162 | memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64)); |
156 | 163 | ||
157 | entry = (struct trace_entry *)raw_data; | 164 | entry = (struct trace_entry *)raw_data; |
158 | tracing_generic_entry_update(entry, *irq_flags, pc); | 165 | tracing_generic_entry_update(entry, *irq_flags, pc); |