diff options
Diffstat (limited to 'include/linux')
33 files changed, 684 insertions, 357 deletions
diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 01ce3997cb42..1e8e88bdaf09 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h | |||
| @@ -2,6 +2,8 @@ | |||
| 2 | /* Atomic operations usable in machine independent code */ | 2 | /* Atomic operations usable in machine independent code */ |
| 3 | #ifndef _LINUX_ATOMIC_H | 3 | #ifndef _LINUX_ATOMIC_H |
| 4 | #define _LINUX_ATOMIC_H | 4 | #define _LINUX_ATOMIC_H |
| 5 | #include <linux/types.h> | ||
| 6 | |||
| 5 | #include <asm/atomic.h> | 7 | #include <asm/atomic.h> |
| 6 | #include <asm/barrier.h> | 8 | #include <asm/barrier.h> |
| 7 | 9 | ||
| @@ -36,40 +38,46 @@ | |||
| 36 | * barriers on top of the relaxed variant. In the case where the relaxed | 38 | * barriers on top of the relaxed variant. In the case where the relaxed |
| 37 | * variant is already fully ordered, no additional barriers are needed. | 39 | * variant is already fully ordered, no additional barriers are needed. |
| 38 | * | 40 | * |
| 39 | * Besides, if an arch has a special barrier for acquire/release, it could | 41 | * If an architecture overrides __atomic_acquire_fence() it will probably |
| 40 | * implement its own __atomic_op_* and use the same framework for building | 42 | * want to define smp_mb__after_spinlock(). |
| 41 | * variants | ||
| 42 | * | ||
| 43 | * If an architecture overrides __atomic_op_acquire() it will probably want | ||
| 44 | * to define smp_mb__after_spinlock(). | ||
| 45 | */ | 43 | */ |
| 46 | #ifndef __atomic_op_acquire | 44 | #ifndef __atomic_acquire_fence |
| 45 | #define __atomic_acquire_fence smp_mb__after_atomic | ||
| 46 | #endif | ||
| 47 | |||
| 48 | #ifndef __atomic_release_fence | ||
| 49 | #define __atomic_release_fence smp_mb__before_atomic | ||
| 50 | #endif | ||
| 51 | |||
| 52 | #ifndef __atomic_pre_full_fence | ||
| 53 | #define __atomic_pre_full_fence smp_mb__before_atomic | ||
| 54 | #endif | ||
| 55 | |||
| 56 | #ifndef __atomic_post_full_fence | ||
| 57 | #define __atomic_post_full_fence smp_mb__after_atomic | ||
| 58 | #endif | ||
| 59 | |||
| 47 | #define __atomic_op_acquire(op, args...) \ | 60 | #define __atomic_op_acquire(op, args...) \ |
| 48 | ({ \ | 61 | ({ \ |
| 49 | typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \ | 62 | typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \ |
| 50 | smp_mb__after_atomic(); \ | 63 | __atomic_acquire_fence(); \ |
| 51 | __ret; \ | 64 | __ret; \ |
| 52 | }) | 65 | }) |
| 53 | #endif | ||
| 54 | 66 | ||
| 55 | #ifndef __atomic_op_release | ||
| 56 | #define __atomic_op_release(op, args...) \ | 67 | #define __atomic_op_release(op, args...) \ |
| 57 | ({ \ | 68 | ({ \ |
| 58 | smp_mb__before_atomic(); \ | 69 | __atomic_release_fence(); \ |
| 59 | op##_relaxed(args); \ | 70 | op##_relaxed(args); \ |
| 60 | }) | 71 | }) |
| 61 | #endif | ||
| 62 | 72 | ||
| 63 | #ifndef __atomic_op_fence | ||
| 64 | #define __atomic_op_fence(op, args...) \ | 73 | #define __atomic_op_fence(op, args...) \ |
| 65 | ({ \ | 74 | ({ \ |
| 66 | typeof(op##_relaxed(args)) __ret; \ | 75 | typeof(op##_relaxed(args)) __ret; \ |
| 67 | smp_mb__before_atomic(); \ | 76 | __atomic_pre_full_fence(); \ |
| 68 | __ret = op##_relaxed(args); \ | 77 | __ret = op##_relaxed(args); \ |
| 69 | smp_mb__after_atomic(); \ | 78 | __atomic_post_full_fence(); \ |
| 70 | __ret; \ | 79 | __ret; \ |
| 71 | }) | 80 | }) |
| 72 | #endif | ||
| 73 | 81 | ||
| 74 | /* atomic_add_return_relaxed */ | 82 | /* atomic_add_return_relaxed */ |
| 75 | #ifndef atomic_add_return_relaxed | 83 | #ifndef atomic_add_return_relaxed |
| @@ -95,11 +103,23 @@ | |||
| 95 | #endif | 103 | #endif |
| 96 | #endif /* atomic_add_return_relaxed */ | 104 | #endif /* atomic_add_return_relaxed */ |
| 97 | 105 | ||
| 106 | #ifndef atomic_inc | ||
| 107 | #define atomic_inc(v) atomic_add(1, (v)) | ||
| 108 | #endif | ||
| 109 | |||
| 98 | /* atomic_inc_return_relaxed */ | 110 | /* atomic_inc_return_relaxed */ |
| 99 | #ifndef atomic_inc_return_relaxed | 111 | #ifndef atomic_inc_return_relaxed |
| 112 | |||
| 113 | #ifndef atomic_inc_return | ||
| 114 | #define atomic_inc_return(v) atomic_add_return(1, (v)) | ||
| 115 | #define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v)) | ||
| 116 | #define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v)) | ||
| 117 | #define atomic_inc_return_release(v) atomic_add_return_release(1, (v)) | ||
| 118 | #else /* atomic_inc_return */ | ||
| 100 | #define atomic_inc_return_relaxed atomic_inc_return | 119 | #define atomic_inc_return_relaxed atomic_inc_return |
| 101 | #define atomic_inc_return_acquire atomic_inc_return | 120 | #define atomic_inc_return_acquire atomic_inc_return |
| 102 | #define atomic_inc_return_release atomic_inc_return | 121 | #define atomic_inc_return_release atomic_inc_return |
| 122 | #endif /* atomic_inc_return */ | ||
| 103 | 123 | ||
| 104 | #else /* atomic_inc_return_relaxed */ | 124 | #else /* atomic_inc_return_relaxed */ |
| 105 | 125 | ||
| @@ -143,11 +163,23 @@ | |||
| 143 | #endif | 163 | #endif |
| 144 | #endif /* atomic_sub_return_relaxed */ | 164 | #endif /* atomic_sub_return_relaxed */ |
| 145 | 165 | ||
| 166 | #ifndef atomic_dec | ||
| 167 | #define atomic_dec(v) atomic_sub(1, (v)) | ||
| 168 | #endif | ||
| 169 | |||
| 146 | /* atomic_dec_return_relaxed */ | 170 | /* atomic_dec_return_relaxed */ |
| 147 | #ifndef atomic_dec_return_relaxed | 171 | #ifndef atomic_dec_return_relaxed |
| 172 | |||
| 173 | #ifndef atomic_dec_return | ||
| 174 | #define atomic_dec_return(v) atomic_sub_return(1, (v)) | ||
| 175 | #define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v)) | ||
| 176 | #define atomic_dec_return_acquire(v) atomic_sub_return_acquire(1, (v)) | ||
| 177 | #define atomic_dec_return_release(v) atomic_sub_return_release(1, (v)) | ||
| 178 | #else /* atomic_dec_return */ | ||
| 148 | #define atomic_dec_return_relaxed atomic_dec_return | 179 | #define atomic_dec_return_relaxed atomic_dec_return |
| 149 | #define atomic_dec_return_acquire atomic_dec_return | 180 | #define atomic_dec_return_acquire atomic_dec_return |
| 150 | #define atomic_dec_return_release atomic_dec_return | 181 | #define atomic_dec_return_release atomic_dec_return |
| 182 | #endif /* atomic_dec_return */ | ||
| 151 | 183 | ||
| 152 | #else /* atomic_dec_return_relaxed */ | 184 | #else /* atomic_dec_return_relaxed */ |
| 153 | 185 | ||
| @@ -328,12 +360,22 @@ | |||
| 328 | #endif | 360 | #endif |
| 329 | #endif /* atomic_fetch_and_relaxed */ | 361 | #endif /* atomic_fetch_and_relaxed */ |
| 330 | 362 | ||
| 331 | #ifdef atomic_andnot | 363 | #ifndef atomic_andnot |
| 332 | /* atomic_fetch_andnot_relaxed */ | 364 | #define atomic_andnot(i, v) atomic_and(~(int)(i), (v)) |
| 365 | #endif | ||
| 366 | |||
| 333 | #ifndef atomic_fetch_andnot_relaxed | 367 | #ifndef atomic_fetch_andnot_relaxed |
| 334 | #define atomic_fetch_andnot_relaxed atomic_fetch_andnot | 368 | |
| 335 | #define atomic_fetch_andnot_acquire atomic_fetch_andnot | 369 | #ifndef atomic_fetch_andnot |
| 336 | #define atomic_fetch_andnot_release atomic_fetch_andnot | 370 | #define atomic_fetch_andnot(i, v) atomic_fetch_and(~(int)(i), (v)) |
| 371 | #define atomic_fetch_andnot_relaxed(i, v) atomic_fetch_and_relaxed(~(int)(i), (v)) | ||
| 372 | #define atomic_fetch_andnot_acquire(i, v) atomic_fetch_and_acquire(~(int)(i), (v)) | ||
| 373 | #define atomic_fetch_andnot_release(i, v) atomic_fetch_and_release(~(int)(i), (v)) | ||
| 374 | #else /* atomic_fetch_andnot */ | ||
| 375 | #define atomic_fetch_andnot_relaxed atomic_fetch_andnot | ||
| 376 | #define atomic_fetch_andnot_acquire atomic_fetch_andnot | ||
| 377 | #define atomic_fetch_andnot_release atomic_fetch_andnot | ||
| 378 | #endif /* atomic_fetch_andnot */ | ||
| 337 | 379 | ||
| 338 | #else /* atomic_fetch_andnot_relaxed */ | 380 | #else /* atomic_fetch_andnot_relaxed */ |
| 339 | 381 | ||
| @@ -352,7 +394,6 @@ | |||
| 352 | __atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__) | 394 | __atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__) |
| 353 | #endif | 395 | #endif |
| 354 | #endif /* atomic_fetch_andnot_relaxed */ | 396 | #endif /* atomic_fetch_andnot_relaxed */ |
| 355 | #endif /* atomic_andnot */ | ||
| 356 | 397 | ||
| 357 | /* atomic_fetch_xor_relaxed */ | 398 | /* atomic_fetch_xor_relaxed */ |
| 358 | #ifndef atomic_fetch_xor_relaxed | 399 | #ifndef atomic_fetch_xor_relaxed |
| @@ -520,112 +561,140 @@ | |||
| 520 | #endif /* xchg_relaxed */ | 561 | #endif /* xchg_relaxed */ |
| 521 | 562 | ||
| 522 | /** | 563 | /** |
| 564 | * atomic_fetch_add_unless - add unless the number is already a given value | ||
| 565 | * @v: pointer of type atomic_t | ||
| 566 | * @a: the amount to add to v... | ||
| 567 | * @u: ...unless v is equal to u. | ||
| 568 | * | ||
| 569 | * Atomically adds @a to @v, if @v was not already @u. | ||
| 570 | * Returns the original value of @v. | ||
| 571 | */ | ||
| 572 | #ifndef atomic_fetch_add_unless | ||
| 573 | static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) | ||
| 574 | { | ||
| 575 | int c = atomic_read(v); | ||
| 576 | |||
| 577 | do { | ||
| 578 | if (unlikely(c == u)) | ||
| 579 | break; | ||
| 580 | } while (!atomic_try_cmpxchg(v, &c, c + a)); | ||
| 581 | |||
| 582 | return c; | ||
| 583 | } | ||
| 584 | #endif | ||
| 585 | |||
| 586 | /** | ||
| 523 | * atomic_add_unless - add unless the number is already a given value | 587 | * atomic_add_unless - add unless the number is already a given value |
| 524 | * @v: pointer of type atomic_t | 588 | * @v: pointer of type atomic_t |
| 525 | * @a: the amount to add to v... | 589 | * @a: the amount to add to v... |
| 526 | * @u: ...unless v is equal to u. | 590 | * @u: ...unless v is equal to u. |
| 527 | * | 591 | * |
| 528 | * Atomically adds @a to @v, so long as @v was not already @u. | 592 | * Atomically adds @a to @v, if @v was not already @u. |
| 529 | * Returns non-zero if @v was not @u, and zero otherwise. | 593 | * Returns true if the addition was done. |
| 530 | */ | 594 | */ |
| 531 | static inline int atomic_add_unless(atomic_t *v, int a, int u) | 595 | static inline bool atomic_add_unless(atomic_t *v, int a, int u) |
| 532 | { | 596 | { |
| 533 | return __atomic_add_unless(v, a, u) != u; | 597 | return atomic_fetch_add_unless(v, a, u) != u; |
| 534 | } | 598 | } |
| 535 | 599 | ||
| 536 | /** | 600 | /** |
| 537 | * atomic_inc_not_zero - increment unless the number is zero | 601 | * atomic_inc_not_zero - increment unless the number is zero |
| 538 | * @v: pointer of type atomic_t | 602 | * @v: pointer of type atomic_t |
| 539 | * | 603 | * |
| 540 | * Atomically increments @v by 1, so long as @v is non-zero. | 604 | * Atomically increments @v by 1, if @v is non-zero. |
| 541 | * Returns non-zero if @v was non-zero, and zero otherwise. | 605 | * Returns true if the increment was done. |
| 542 | */ | 606 | */ |
| 543 | #ifndef atomic_inc_not_zero | 607 | #ifndef atomic_inc_not_zero |
| 544 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | 608 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
| 545 | #endif | 609 | #endif |
| 546 | 610 | ||
| 547 | #ifndef atomic_andnot | 611 | /** |
| 548 | static inline void atomic_andnot(int i, atomic_t *v) | 612 | * atomic_inc_and_test - increment and test |
| 549 | { | 613 | * @v: pointer of type atomic_t |
| 550 | atomic_and(~i, v); | 614 | * |
| 551 | } | 615 | * Atomically increments @v by 1 |
| 552 | 616 | * and returns true if the result is zero, or false for all | |
| 553 | static inline int atomic_fetch_andnot(int i, atomic_t *v) | 617 | * other cases. |
| 554 | { | 618 | */ |
| 555 | return atomic_fetch_and(~i, v); | 619 | #ifndef atomic_inc_and_test |
| 556 | } | 620 | static inline bool atomic_inc_and_test(atomic_t *v) |
| 557 | |||
| 558 | static inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) | ||
| 559 | { | 621 | { |
| 560 | return atomic_fetch_and_relaxed(~i, v); | 622 | return atomic_inc_return(v) == 0; |
| 561 | } | 623 | } |
| 624 | #endif | ||
| 562 | 625 | ||
| 563 | static inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) | 626 | /** |
| 627 | * atomic_dec_and_test - decrement and test | ||
| 628 | * @v: pointer of type atomic_t | ||
| 629 | * | ||
| 630 | * Atomically decrements @v by 1 and | ||
| 631 | * returns true if the result is 0, or false for all other | ||
| 632 | * cases. | ||
| 633 | */ | ||
| 634 | #ifndef atomic_dec_and_test | ||
| 635 | static inline bool atomic_dec_and_test(atomic_t *v) | ||
| 564 | { | 636 | { |
| 565 | return atomic_fetch_and_acquire(~i, v); | 637 | return atomic_dec_return(v) == 0; |
| 566 | } | 638 | } |
| 639 | #endif | ||
| 567 | 640 | ||
| 568 | static inline int atomic_fetch_andnot_release(int i, atomic_t *v) | 641 | /** |
| 642 | * atomic_sub_and_test - subtract value from variable and test result | ||
| 643 | * @i: integer value to subtract | ||
| 644 | * @v: pointer of type atomic_t | ||
| 645 | * | ||
| 646 | * Atomically subtracts @i from @v and returns | ||
| 647 | * true if the result is zero, or false for all | ||
| 648 | * other cases. | ||
| 649 | */ | ||
| 650 | #ifndef atomic_sub_and_test | ||
| 651 | static inline bool atomic_sub_and_test(int i, atomic_t *v) | ||
| 569 | { | 652 | { |
| 570 | return atomic_fetch_and_release(~i, v); | 653 | return atomic_sub_return(i, v) == 0; |
| 571 | } | 654 | } |
| 572 | #endif | 655 | #endif |
| 573 | 656 | ||
| 574 | /** | 657 | /** |
| 575 | * atomic_inc_not_zero_hint - increment if not null | 658 | * atomic_add_negative - add and test if negative |
| 659 | * @i: integer value to add | ||
| 576 | * @v: pointer of type atomic_t | 660 | * @v: pointer of type atomic_t |
| 577 | * @hint: probable value of the atomic before the increment | ||
| 578 | * | ||
| 579 | * This version of atomic_inc_not_zero() gives a hint of probable | ||
| 580 | * value of the atomic. This helps processor to not read the memory | ||
| 581 | * before doing the atomic read/modify/write cycle, lowering | ||
| 582 | * number of bus transactions on some arches. | ||
| 583 | * | 661 | * |
| 584 | * Returns: 0 if increment was not done, 1 otherwise. | 662 | * Atomically adds @i to @v and returns true |
| 663 | * if the result is negative, or false when | ||
| 664 | * result is greater than or equal to zero. | ||
| 585 | */ | 665 | */ |
| 586 | #ifndef atomic_inc_not_zero_hint | 666 | #ifndef atomic_add_negative |
| 587 | static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint) | 667 | static inline bool atomic_add_negative(int i, atomic_t *v) |
| 588 | { | 668 | { |
| 589 | int val, c = hint; | 669 | return atomic_add_return(i, v) < 0; |
| 590 | |||
| 591 | /* sanity test, should be removed by compiler if hint is a constant */ | ||
| 592 | if (!hint) | ||
| 593 | return atomic_inc_not_zero(v); | ||
| 594 | |||
| 595 | do { | ||
| 596 | val = atomic_cmpxchg(v, c, c + 1); | ||
| 597 | if (val == c) | ||
| 598 | return 1; | ||
| 599 | c = val; | ||
| 600 | } while (c); | ||
| 601 | |||
| 602 | return 0; | ||
| 603 | } | 670 | } |
| 604 | #endif | 671 | #endif |
| 605 | 672 | ||
| 606 | #ifndef atomic_inc_unless_negative | 673 | #ifndef atomic_inc_unless_negative |
| 607 | static inline int atomic_inc_unless_negative(atomic_t *p) | 674 | static inline bool atomic_inc_unless_negative(atomic_t *v) |
| 608 | { | 675 | { |
| 609 | int v, v1; | 676 | int c = atomic_read(v); |
| 610 | for (v = 0; v >= 0; v = v1) { | 677 | |
| 611 | v1 = atomic_cmpxchg(p, v, v + 1); | 678 | do { |
| 612 | if (likely(v1 == v)) | 679 | if (unlikely(c < 0)) |
| 613 | return 1; | 680 | return false; |
| 614 | } | 681 | } while (!atomic_try_cmpxchg(v, &c, c + 1)); |
| 615 | return 0; | 682 | |
| 683 | return true; | ||
| 616 | } | 684 | } |
| 617 | #endif | 685 | #endif |
| 618 | 686 | ||
| 619 | #ifndef atomic_dec_unless_positive | 687 | #ifndef atomic_dec_unless_positive |
| 620 | static inline int atomic_dec_unless_positive(atomic_t *p) | 688 | static inline bool atomic_dec_unless_positive(atomic_t *v) |
| 621 | { | 689 | { |
| 622 | int v, v1; | 690 | int c = atomic_read(v); |
| 623 | for (v = 0; v <= 0; v = v1) { | 691 | |
| 624 | v1 = atomic_cmpxchg(p, v, v - 1); | 692 | do { |
| 625 | if (likely(v1 == v)) | 693 | if (unlikely(c > 0)) |
| 626 | return 1; | 694 | return false; |
| 627 | } | 695 | } while (!atomic_try_cmpxchg(v, &c, c - 1)); |
| 628 | return 0; | 696 | |
| 697 | return true; | ||
| 629 | } | 698 | } |
| 630 | #endif | 699 | #endif |
| 631 | 700 | ||
| @@ -639,17 +708,14 @@ static inline int atomic_dec_unless_positive(atomic_t *p) | |||
| 639 | #ifndef atomic_dec_if_positive | 708 | #ifndef atomic_dec_if_positive |
| 640 | static inline int atomic_dec_if_positive(atomic_t *v) | 709 | static inline int atomic_dec_if_positive(atomic_t *v) |
| 641 | { | 710 | { |
| 642 | int c, old, dec; | 711 | int dec, c = atomic_read(v); |
| 643 | c = atomic_read(v); | 712 | |
| 644 | for (;;) { | 713 | do { |
| 645 | dec = c - 1; | 714 | dec = c - 1; |
| 646 | if (unlikely(dec < 0)) | 715 | if (unlikely(dec < 0)) |
| 647 | break; | 716 | break; |
| 648 | old = atomic_cmpxchg((v), c, dec); | 717 | } while (!atomic_try_cmpxchg(v, &c, dec)); |
| 649 | if (likely(old == c)) | 718 | |
| 650 | break; | ||
| 651 | c = old; | ||
| 652 | } | ||
| 653 | return dec; | 719 | return dec; |
| 654 | } | 720 | } |
| 655 | #endif | 721 | #endif |
| @@ -693,11 +759,23 @@ static inline int atomic_dec_if_positive(atomic_t *v) | |||
| 693 | #endif | 759 | #endif |
| 694 | #endif /* atomic64_add_return_relaxed */ | 760 | #endif /* atomic64_add_return_relaxed */ |
| 695 | 761 | ||
| 762 | #ifndef atomic64_inc | ||
| 763 | #define atomic64_inc(v) atomic64_add(1, (v)) | ||
| 764 | #endif | ||
| 765 | |||
| 696 | /* atomic64_inc_return_relaxed */ | 766 | /* atomic64_inc_return_relaxed */ |
| 697 | #ifndef atomic64_inc_return_relaxed | 767 | #ifndef atomic64_inc_return_relaxed |
| 768 | |||
| 769 | #ifndef atomic64_inc_return | ||
| 770 | #define atomic64_inc_return(v) atomic64_add_return(1, (v)) | ||
| 771 | #define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v)) | ||
| 772 | #define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v)) | ||
| 773 | #define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v)) | ||
| 774 | #else /* atomic64_inc_return */ | ||
| 698 | #define atomic64_inc_return_relaxed atomic64_inc_return | 775 | #define atomic64_inc_return_relaxed atomic64_inc_return |
| 699 | #define atomic64_inc_return_acquire atomic64_inc_return | 776 | #define atomic64_inc_return_acquire atomic64_inc_return |
| 700 | #define atomic64_inc_return_release atomic64_inc_return | 777 | #define atomic64_inc_return_release atomic64_inc_return |
| 778 | #endif /* atomic64_inc_return */ | ||
| 701 | 779 | ||
| 702 | #else /* atomic64_inc_return_relaxed */ | 780 | #else /* atomic64_inc_return_relaxed */ |
| 703 | 781 | ||
| @@ -742,11 +820,23 @@ static inline int atomic_dec_if_positive(atomic_t *v) | |||
| 742 | #endif | 820 | #endif |
| 743 | #endif /* atomic64_sub_return_relaxed */ | 821 | #endif /* atomic64_sub_return_relaxed */ |
| 744 | 822 | ||
| 823 | #ifndef atomic64_dec | ||
| 824 | #define atomic64_dec(v) atomic64_sub(1, (v)) | ||
| 825 | #endif | ||
| 826 | |||
| 745 | /* atomic64_dec_return_relaxed */ | 827 | /* atomic64_dec_return_relaxed */ |
| 746 | #ifndef atomic64_dec_return_relaxed | 828 | #ifndef atomic64_dec_return_relaxed |
| 829 | |||
| 830 | #ifndef atomic64_dec_return | ||
| 831 | #define atomic64_dec_return(v) atomic64_sub_return(1, (v)) | ||
| 832 | #define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v)) | ||
| 833 | #define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v)) | ||
| 834 | #define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v)) | ||
| 835 | #else /* atomic64_dec_return */ | ||
| 747 | #define atomic64_dec_return_relaxed atomic64_dec_return | 836 | #define atomic64_dec_return_relaxed atomic64_dec_return |
| 748 | #define atomic64_dec_return_acquire atomic64_dec_return | 837 | #define atomic64_dec_return_acquire atomic64_dec_return |
| 749 | #define atomic64_dec_return_release atomic64_dec_return | 838 | #define atomic64_dec_return_release atomic64_dec_return |
| 839 | #endif /* atomic64_dec_return */ | ||
| 750 | 840 | ||
| 751 | #else /* atomic64_dec_return_relaxed */ | 841 | #else /* atomic64_dec_return_relaxed */ |
| 752 | 842 | ||
| @@ -927,12 +1017,22 @@ static inline int atomic_dec_if_positive(atomic_t *v) | |||
| 927 | #endif | 1017 | #endif |
| 928 | #endif /* atomic64_fetch_and_relaxed */ | 1018 | #endif /* atomic64_fetch_and_relaxed */ |
| 929 | 1019 | ||
| 930 | #ifdef atomic64_andnot | 1020 | #ifndef atomic64_andnot |
| 931 | /* atomic64_fetch_andnot_relaxed */ | 1021 | #define atomic64_andnot(i, v) atomic64_and(~(long long)(i), (v)) |
| 1022 | #endif | ||
| 1023 | |||
| 932 | #ifndef atomic64_fetch_andnot_relaxed | 1024 | #ifndef atomic64_fetch_andnot_relaxed |
| 933 | #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot | 1025 | |
| 934 | #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot | 1026 | #ifndef atomic64_fetch_andnot |
| 935 | #define atomic64_fetch_andnot_release atomic64_fetch_andnot | 1027 | #define atomic64_fetch_andnot(i, v) atomic64_fetch_and(~(long long)(i), (v)) |
| 1028 | #define atomic64_fetch_andnot_relaxed(i, v) atomic64_fetch_and_relaxed(~(long long)(i), (v)) | ||
| 1029 | #define atomic64_fetch_andnot_acquire(i, v) atomic64_fetch_and_acquire(~(long long)(i), (v)) | ||
| 1030 | #define atomic64_fetch_andnot_release(i, v) atomic64_fetch_and_release(~(long long)(i), (v)) | ||
| 1031 | #else /* atomic64_fetch_andnot */ | ||
| 1032 | #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot | ||
| 1033 | #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot | ||
| 1034 | #define atomic64_fetch_andnot_release atomic64_fetch_andnot | ||
| 1035 | #endif /* atomic64_fetch_andnot */ | ||
| 936 | 1036 | ||
| 937 | #else /* atomic64_fetch_andnot_relaxed */ | 1037 | #else /* atomic64_fetch_andnot_relaxed */ |
| 938 | 1038 | ||
| @@ -951,7 +1051,6 @@ static inline int atomic_dec_if_positive(atomic_t *v) | |||
| 951 | __atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__) | 1051 | __atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__) |
| 952 | #endif | 1052 | #endif |
| 953 | #endif /* atomic64_fetch_andnot_relaxed */ | 1053 | #endif /* atomic64_fetch_andnot_relaxed */ |
| 954 | #endif /* atomic64_andnot */ | ||
| 955 | 1054 | ||
| 956 | /* atomic64_fetch_xor_relaxed */ | 1055 | /* atomic64_fetch_xor_relaxed */ |
| 957 | #ifndef atomic64_fetch_xor_relaxed | 1056 | #ifndef atomic64_fetch_xor_relaxed |
| @@ -1049,30 +1148,164 @@ static inline int atomic_dec_if_positive(atomic_t *v) | |||
| 1049 | #define atomic64_try_cmpxchg_release atomic64_try_cmpxchg | 1148 | #define atomic64_try_cmpxchg_release atomic64_try_cmpxchg |
| 1050 | #endif /* atomic64_try_cmpxchg */ | 1149 | #endif /* atomic64_try_cmpxchg */ |
| 1051 | 1150 | ||
| 1052 | #ifndef atomic64_andnot | 1151 | /** |
| 1053 | static inline void atomic64_andnot(long long i, atomic64_t *v) | 1152 | * atomic64_fetch_add_unless - add unless the number is already a given value |
| 1153 | * @v: pointer of type atomic64_t | ||
| 1154 | * @a: the amount to add to v... | ||
| 1155 | * @u: ...unless v is equal to u. | ||
| 1156 | * | ||
| 1157 | * Atomically adds @a to @v, if @v was not already @u. | ||
| 1158 | * Returns the original value of @v. | ||
| 1159 | */ | ||
| 1160 | #ifndef atomic64_fetch_add_unless | ||
| 1161 | static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a, | ||
| 1162 | long long u) | ||
| 1054 | { | 1163 | { |
| 1055 | atomic64_and(~i, v); | 1164 | long long c = atomic64_read(v); |
| 1165 | |||
| 1166 | do { | ||
| 1167 | if (unlikely(c == u)) | ||
| 1168 | break; | ||
| 1169 | } while (!atomic64_try_cmpxchg(v, &c, c + a)); | ||
| 1170 | |||
| 1171 | return c; | ||
| 1056 | } | 1172 | } |
| 1173 | #endif | ||
| 1057 | 1174 | ||
| 1058 | static inline long long atomic64_fetch_andnot(long long i, atomic64_t *v) | 1175 | /** |
| 1176 | * atomic64_add_unless - add unless the number is already a given value | ||
| 1177 | * @v: pointer of type atomic_t | ||
| 1178 | * @a: the amount to add to v... | ||
| 1179 | * @u: ...unless v is equal to u. | ||
| 1180 | * | ||
| 1181 | * Atomically adds @a to @v, if @v was not already @u. | ||
| 1182 | * Returns true if the addition was done. | ||
| 1183 | */ | ||
| 1184 | static inline bool atomic64_add_unless(atomic64_t *v, long long a, long long u) | ||
| 1059 | { | 1185 | { |
| 1060 | return atomic64_fetch_and(~i, v); | 1186 | return atomic64_fetch_add_unless(v, a, u) != u; |
| 1061 | } | 1187 | } |
| 1062 | 1188 | ||
| 1063 | static inline long long atomic64_fetch_andnot_relaxed(long long i, atomic64_t *v) | 1189 | /** |
| 1190 | * atomic64_inc_not_zero - increment unless the number is zero | ||
| 1191 | * @v: pointer of type atomic64_t | ||
| 1192 | * | ||
| 1193 | * Atomically increments @v by 1, if @v is non-zero. | ||
| 1194 | * Returns true if the increment was done. | ||
| 1195 | */ | ||
| 1196 | #ifndef atomic64_inc_not_zero | ||
| 1197 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | ||
| 1198 | #endif | ||
| 1199 | |||
| 1200 | /** | ||
| 1201 | * atomic64_inc_and_test - increment and test | ||
| 1202 | * @v: pointer of type atomic64_t | ||
| 1203 | * | ||
| 1204 | * Atomically increments @v by 1 | ||
| 1205 | * and returns true if the result is zero, or false for all | ||
| 1206 | * other cases. | ||
| 1207 | */ | ||
| 1208 | #ifndef atomic64_inc_and_test | ||
| 1209 | static inline bool atomic64_inc_and_test(atomic64_t *v) | ||
| 1064 | { | 1210 | { |
| 1065 | return atomic64_fetch_and_relaxed(~i, v); | 1211 | return atomic64_inc_return(v) == 0; |
| 1066 | } | 1212 | } |
| 1213 | #endif | ||
| 1067 | 1214 | ||
| 1068 | static inline long long atomic64_fetch_andnot_acquire(long long i, atomic64_t *v) | 1215 | /** |
| 1216 | * atomic64_dec_and_test - decrement and test | ||
| 1217 | * @v: pointer of type atomic64_t | ||
| 1218 | * | ||
| 1219 | * Atomically decrements @v by 1 and | ||
| 1220 | * returns true if the result is 0, or false for all other | ||
| 1221 | * cases. | ||
| 1222 | */ | ||
| 1223 | #ifndef atomic64_dec_and_test | ||
| 1224 | static inline bool atomic64_dec_and_test(atomic64_t *v) | ||
| 1069 | { | 1225 | { |
| 1070 | return atomic64_fetch_and_acquire(~i, v); | 1226 | return atomic64_dec_return(v) == 0; |
| 1071 | } | 1227 | } |
| 1228 | #endif | ||
| 1072 | 1229 | ||
| 1073 | static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v) | 1230 | /** |
| 1231 | * atomic64_sub_and_test - subtract value from variable and test result | ||
| 1232 | * @i: integer value to subtract | ||
| 1233 | * @v: pointer of type atomic64_t | ||
| 1234 | * | ||
| 1235 | * Atomically subtracts @i from @v and returns | ||
| 1236 | * true if the result is zero, or false for all | ||
| 1237 | * other cases. | ||
| 1238 | */ | ||
| 1239 | #ifndef atomic64_sub_and_test | ||
| 1240 | static inline bool atomic64_sub_and_test(long long i, atomic64_t *v) | ||
| 1241 | { | ||
| 1242 | return atomic64_sub_return(i, v) == 0; | ||
| 1243 | } | ||
| 1244 | #endif | ||
| 1245 | |||
| 1246 | /** | ||
| 1247 | * atomic64_add_negative - add and test if negative | ||
| 1248 | * @i: integer value to add | ||
| 1249 | * @v: pointer of type atomic64_t | ||
| 1250 | * | ||
| 1251 | * Atomically adds @i to @v and returns true | ||
| 1252 | * if the result is negative, or false when | ||
| 1253 | * result is greater than or equal to zero. | ||
| 1254 | */ | ||
| 1255 | #ifndef atomic64_add_negative | ||
| 1256 | static inline bool atomic64_add_negative(long long i, atomic64_t *v) | ||
| 1074 | { | 1257 | { |
| 1075 | return atomic64_fetch_and_release(~i, v); | 1258 | return atomic64_add_return(i, v) < 0; |
| 1259 | } | ||
| 1260 | #endif | ||
| 1261 | |||
| 1262 | #ifndef atomic64_inc_unless_negative | ||
| 1263 | static inline bool atomic64_inc_unless_negative(atomic64_t *v) | ||
| 1264 | { | ||
| 1265 | long long c = atomic64_read(v); | ||
| 1266 | |||
| 1267 | do { | ||
| 1268 | if (unlikely(c < 0)) | ||
| 1269 | return false; | ||
| 1270 | } while (!atomic64_try_cmpxchg(v, &c, c + 1)); | ||
| 1271 | |||
| 1272 | return true; | ||
| 1273 | } | ||
| 1274 | #endif | ||
| 1275 | |||
| 1276 | #ifndef atomic64_dec_unless_positive | ||
| 1277 | static inline bool atomic64_dec_unless_positive(atomic64_t *v) | ||
| 1278 | { | ||
| 1279 | long long c = atomic64_read(v); | ||
| 1280 | |||
| 1281 | do { | ||
| 1282 | if (unlikely(c > 0)) | ||
| 1283 | return false; | ||
| 1284 | } while (!atomic64_try_cmpxchg(v, &c, c - 1)); | ||
| 1285 | |||
| 1286 | return true; | ||
| 1287 | } | ||
| 1288 | #endif | ||
| 1289 | |||
| 1290 | /* | ||
| 1291 | * atomic64_dec_if_positive - decrement by 1 if old value positive | ||
| 1292 | * @v: pointer of type atomic64_t | ||
| 1293 | * | ||
| 1294 | * The function returns the old value of *v minus 1, even if | ||
| 1295 | * the atomic64 variable, v, was not decremented. | ||
| 1296 | */ | ||
| 1297 | #ifndef atomic64_dec_if_positive | ||
| 1298 | static inline long long atomic64_dec_if_positive(atomic64_t *v) | ||
| 1299 | { | ||
| 1300 | long long dec, c = atomic64_read(v); | ||
| 1301 | |||
| 1302 | do { | ||
| 1303 | dec = c - 1; | ||
| 1304 | if (unlikely(dec < 0)) | ||
| 1305 | break; | ||
| 1306 | } while (!atomic64_try_cmpxchg(v, &c, dec)); | ||
| 1307 | |||
| 1308 | return dec; | ||
| 1076 | } | 1309 | } |
| 1077 | #endif | 1310 | #endif |
| 1078 | 1311 | ||
diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 4cac4e1a72ff..af419012d77d 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h | |||
| @@ -2,29 +2,9 @@ | |||
| 2 | #ifndef _LINUX_BITOPS_H | 2 | #ifndef _LINUX_BITOPS_H |
| 3 | #define _LINUX_BITOPS_H | 3 | #define _LINUX_BITOPS_H |
| 4 | #include <asm/types.h> | 4 | #include <asm/types.h> |
| 5 | #include <linux/bits.h> | ||
| 5 | 6 | ||
| 6 | #ifdef __KERNEL__ | ||
| 7 | #define BIT(nr) (1UL << (nr)) | ||
| 8 | #define BIT_ULL(nr) (1ULL << (nr)) | ||
| 9 | #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) | ||
| 10 | #define BIT_WORD(nr) ((nr) / BITS_PER_LONG) | ||
| 11 | #define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG)) | ||
| 12 | #define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG) | ||
| 13 | #define BITS_PER_BYTE 8 | ||
| 14 | #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) | 7 | #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) |
| 15 | #endif | ||
| 16 | |||
| 17 | /* | ||
| 18 | * Create a contiguous bitmask starting at bit position @l and ending at | ||
| 19 | * position @h. For example | ||
| 20 | * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. | ||
| 21 | */ | ||
| 22 | #define GENMASK(h, l) \ | ||
| 23 | (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) | ||
| 24 | |||
| 25 | #define GENMASK_ULL(h, l) \ | ||
| 26 | (((~0ULL) - (1ULL << (l)) + 1) & \ | ||
| 27 | (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) | ||
| 28 | 8 | ||
| 29 | extern unsigned int __sw_hweight8(unsigned int w); | 9 | extern unsigned int __sw_hweight8(unsigned int w); |
| 30 | extern unsigned int __sw_hweight16(unsigned int w); | 10 | extern unsigned int __sw_hweight16(unsigned int w); |
diff --git a/include/linux/bits.h b/include/linux/bits.h new file mode 100644 index 000000000000..2b7b532c1d51 --- /dev/null +++ b/include/linux/bits.h | |||
| @@ -0,0 +1,26 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | #ifndef __LINUX_BITS_H | ||
| 3 | #define __LINUX_BITS_H | ||
| 4 | #include <asm/bitsperlong.h> | ||
| 5 | |||
| 6 | #define BIT(nr) (1UL << (nr)) | ||
| 7 | #define BIT_ULL(nr) (1ULL << (nr)) | ||
| 8 | #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) | ||
| 9 | #define BIT_WORD(nr) ((nr) / BITS_PER_LONG) | ||
| 10 | #define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG)) | ||
| 11 | #define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG) | ||
| 12 | #define BITS_PER_BYTE 8 | ||
| 13 | |||
| 14 | /* | ||
| 15 | * Create a contiguous bitmask starting at bit position @l and ending at | ||
| 16 | * position @h. For example | ||
| 17 | * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. | ||
| 18 | */ | ||
| 19 | #define GENMASK(h, l) \ | ||
| 20 | (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) | ||
| 21 | |||
| 22 | #define GENMASK_ULL(h, l) \ | ||
| 23 | (((~0ULL) - (1ULL << (l)) + 1) & \ | ||
| 24 | (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) | ||
| 25 | |||
| 26 | #endif /* __LINUX_BITS_H */ | ||
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 7dff1963c185..308918928767 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h | |||
| @@ -194,6 +194,9 @@ extern void clocksource_suspend(void); | |||
| 194 | extern void clocksource_resume(void); | 194 | extern void clocksource_resume(void); |
| 195 | extern struct clocksource * __init clocksource_default_clock(void); | 195 | extern struct clocksource * __init clocksource_default_clock(void); |
| 196 | extern void clocksource_mark_unstable(struct clocksource *cs); | 196 | extern void clocksource_mark_unstable(struct clocksource *cs); |
| 197 | extern void | ||
| 198 | clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles); | ||
| 199 | extern u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 now); | ||
| 197 | 200 | ||
| 198 | extern u64 | 201 | extern u64 |
| 199 | clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles); | 202 | clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles); |
diff --git a/include/linux/compat.h b/include/linux/compat.h index c68acc47da57..df45ee8413d6 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h | |||
| @@ -115,11 +115,6 @@ typedef compat_ulong_t compat_aio_context_t; | |||
| 115 | struct compat_sel_arg_struct; | 115 | struct compat_sel_arg_struct; |
| 116 | struct rusage; | 116 | struct rusage; |
| 117 | 117 | ||
| 118 | struct compat_itimerspec { | ||
| 119 | struct compat_timespec it_interval; | ||
| 120 | struct compat_timespec it_value; | ||
| 121 | }; | ||
| 122 | |||
| 123 | struct compat_utimbuf { | 118 | struct compat_utimbuf { |
| 124 | compat_time_t actime; | 119 | compat_time_t actime; |
| 125 | compat_time_t modtime; | 120 | compat_time_t modtime; |
| @@ -300,10 +295,6 @@ extern int compat_get_timespec(struct timespec *, const void __user *); | |||
| 300 | extern int compat_put_timespec(const struct timespec *, void __user *); | 295 | extern int compat_put_timespec(const struct timespec *, void __user *); |
| 301 | extern int compat_get_timeval(struct timeval *, const void __user *); | 296 | extern int compat_get_timeval(struct timeval *, const void __user *); |
| 302 | extern int compat_put_timeval(const struct timeval *, void __user *); | 297 | extern int compat_put_timeval(const struct timeval *, void __user *); |
| 303 | extern int get_compat_itimerspec64(struct itimerspec64 *its, | ||
| 304 | const struct compat_itimerspec __user *uits); | ||
| 305 | extern int put_compat_itimerspec64(const struct itimerspec64 *its, | ||
| 306 | struct compat_itimerspec __user *uits); | ||
| 307 | 298 | ||
| 308 | struct compat_iovec { | 299 | struct compat_iovec { |
| 309 | compat_uptr_t iov_base; | 300 | compat_uptr_t iov_base; |
diff --git a/include/linux/compat_time.h b/include/linux/compat_time.h index 31f2774f1994..e70bfd1d2c3f 100644 --- a/include/linux/compat_time.h +++ b/include/linux/compat_time.h | |||
| @@ -17,7 +17,16 @@ struct compat_timeval { | |||
| 17 | s32 tv_usec; | 17 | s32 tv_usec; |
| 18 | }; | 18 | }; |
| 19 | 19 | ||
| 20 | struct compat_itimerspec { | ||
| 21 | struct compat_timespec it_interval; | ||
| 22 | struct compat_timespec it_value; | ||
| 23 | }; | ||
| 24 | |||
| 20 | extern int compat_get_timespec64(struct timespec64 *, const void __user *); | 25 | extern int compat_get_timespec64(struct timespec64 *, const void __user *); |
| 21 | extern int compat_put_timespec64(const struct timespec64 *, void __user *); | 26 | extern int compat_put_timespec64(const struct timespec64 *, void __user *); |
| 27 | extern int get_compat_itimerspec64(struct itimerspec64 *its, | ||
| 28 | const struct compat_itimerspec __user *uits); | ||
| 29 | extern int put_compat_itimerspec64(const struct itimerspec64 *its, | ||
| 30 | struct compat_itimerspec __user *uits); | ||
| 22 | 31 | ||
| 23 | #endif /* _LINUX_COMPAT_TIME_H */ | 32 | #endif /* _LINUX_COMPAT_TIME_H */ |
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index a97a63eef59f..3233fbe23594 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
| @@ -30,7 +30,7 @@ struct cpu { | |||
| 30 | }; | 30 | }; |
| 31 | 31 | ||
| 32 | extern void boot_cpu_init(void); | 32 | extern void boot_cpu_init(void); |
| 33 | extern void boot_cpu_state_init(void); | 33 | extern void boot_cpu_hotplug_init(void); |
| 34 | extern void cpu_init(void); | 34 | extern void cpu_init(void); |
| 35 | extern void trap_init(void); | 35 | extern void trap_init(void); |
| 36 | 36 | ||
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 8796ba387152..4cf06a64bc02 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h | |||
| @@ -164,6 +164,7 @@ enum cpuhp_state { | |||
| 164 | CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE, | 164 | CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE, |
| 165 | CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE, | 165 | CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE, |
| 166 | CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, | 166 | CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, |
| 167 | CPUHP_AP_WATCHDOG_ONLINE, | ||
| 167 | CPUHP_AP_WORKQUEUE_ONLINE, | 168 | CPUHP_AP_WORKQUEUE_ONLINE, |
| 168 | CPUHP_AP_RCUTREE_ONLINE, | 169 | CPUHP_AP_RCUTREE_ONLINE, |
| 169 | CPUHP_AP_ONLINE_DYN, | 170 | CPUHP_AP_ONLINE_DYN, |
diff --git a/include/linux/efi.h b/include/linux/efi.h index 56add823f190..401e4b254e30 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h | |||
| @@ -894,6 +894,16 @@ typedef struct _efi_file_handle { | |||
| 894 | void *flush; | 894 | void *flush; |
| 895 | } efi_file_handle_t; | 895 | } efi_file_handle_t; |
| 896 | 896 | ||
| 897 | typedef struct { | ||
| 898 | u64 revision; | ||
| 899 | u32 open_volume; | ||
| 900 | } efi_file_io_interface_32_t; | ||
| 901 | |||
| 902 | typedef struct { | ||
| 903 | u64 revision; | ||
| 904 | u64 open_volume; | ||
| 905 | } efi_file_io_interface_64_t; | ||
| 906 | |||
| 897 | typedef struct _efi_file_io_interface { | 907 | typedef struct _efi_file_io_interface { |
| 898 | u64 revision; | 908 | u64 revision; |
| 899 | int (*open_volume)(struct _efi_file_io_interface *, | 909 | int (*open_volume)(struct _efi_file_io_interface *, |
| @@ -988,14 +998,12 @@ extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); | |||
| 988 | extern void efi_gettimeofday (struct timespec64 *ts); | 998 | extern void efi_gettimeofday (struct timespec64 *ts); |
| 989 | extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ | 999 | extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ |
| 990 | #ifdef CONFIG_X86 | 1000 | #ifdef CONFIG_X86 |
| 991 | extern void efi_late_init(void); | ||
| 992 | extern void efi_free_boot_services(void); | 1001 | extern void efi_free_boot_services(void); |
| 993 | extern efi_status_t efi_query_variable_store(u32 attributes, | 1002 | extern efi_status_t efi_query_variable_store(u32 attributes, |
| 994 | unsigned long size, | 1003 | unsigned long size, |
| 995 | bool nonblocking); | 1004 | bool nonblocking); |
| 996 | extern void efi_find_mirror(void); | 1005 | extern void efi_find_mirror(void); |
| 997 | #else | 1006 | #else |
| 998 | static inline void efi_late_init(void) {} | ||
| 999 | static inline void efi_free_boot_services(void) {} | 1007 | static inline void efi_free_boot_services(void) {} |
| 1000 | 1008 | ||
| 1001 | static inline efi_status_t efi_query_variable_store(u32 attributes, | 1009 | static inline efi_status_t efi_query_variable_store(u32 attributes, |
| @@ -1651,4 +1659,7 @@ struct linux_efi_tpm_eventlog { | |||
| 1651 | 1659 | ||
| 1652 | extern int efi_tpm_eventlog_init(void); | 1660 | extern int efi_tpm_eventlog_init(void); |
| 1653 | 1661 | ||
| 1662 | /* Workqueue to queue EFI Runtime Services */ | ||
| 1663 | extern struct workqueue_struct *efi_rts_wq; | ||
| 1664 | |||
| 1654 | #endif /* _LINUX_EFI_H */ | 1665 | #endif /* _LINUX_EFI_H */ |
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index cbb872c1b607..9d2ea3e907d0 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h | |||
| @@ -73,6 +73,7 @@ | |||
| 73 | #define GICD_TYPER_MBIS (1U << 16) | 73 | #define GICD_TYPER_MBIS (1U << 16) |
| 74 | 74 | ||
| 75 | #define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) | 75 | #define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) |
| 76 | #define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1) | ||
| 76 | #define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32) | 77 | #define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32) |
| 77 | 78 | ||
| 78 | #define GICD_IROUTER_SPI_MODE_ONE (0U << 31) | 79 | #define GICD_IROUTER_SPI_MODE_ONE (0U << 31) |
| @@ -576,8 +577,8 @@ struct rdists { | |||
| 576 | phys_addr_t phys_base; | 577 | phys_addr_t phys_base; |
| 577 | } __percpu *rdist; | 578 | } __percpu *rdist; |
| 578 | struct page *prop_page; | 579 | struct page *prop_page; |
| 579 | int id_bits; | ||
| 580 | u64 flags; | 580 | u64 flags; |
| 581 | u32 gicd_typer; | ||
| 581 | bool has_vlpis; | 582 | bool has_vlpis; |
| 582 | bool has_direct_lpi; | 583 | bool has_direct_lpi; |
| 583 | }; | 584 | }; |
diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 5b9fddbaac41..b2bb44f87f5a 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h | |||
| @@ -93,8 +93,11 @@ static inline ktime_t timeval_to_ktime(struct timeval tv) | |||
| 93 | /* Map the ktime_t to timeval conversion to ns_to_timeval function */ | 93 | /* Map the ktime_t to timeval conversion to ns_to_timeval function */ |
| 94 | #define ktime_to_timeval(kt) ns_to_timeval((kt)) | 94 | #define ktime_to_timeval(kt) ns_to_timeval((kt)) |
| 95 | 95 | ||
| 96 | /* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */ | 96 | /* Convert ktime_t to nanoseconds */ |
| 97 | #define ktime_to_ns(kt) (kt) | 97 | static inline s64 ktime_to_ns(const ktime_t kt) |
| 98 | { | ||
| 99 | return kt; | ||
| 100 | } | ||
| 98 | 101 | ||
| 99 | /** | 102 | /** |
| 100 | * ktime_compare - Compares two ktime_t variables for less, greater or equal | 103 | * ktime_compare - Compares two ktime_t variables for less, greater or equal |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 7ba6d356d18f..68a5121694ef 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -466,6 +466,9 @@ static inline void vma_set_anonymous(struct vm_area_struct *vma) | |||
| 466 | vma->vm_ops = NULL; | 466 | vma->vm_ops = NULL; |
| 467 | } | 467 | } |
| 468 | 468 | ||
| 469 | /* flush_tlb_range() takes a vma, not a mm, and can care about flags */ | ||
| 470 | #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) } | ||
| 471 | |||
| 469 | struct mmu_gather; | 472 | struct mmu_gather; |
| 470 | struct inode; | 473 | struct inode; |
| 471 | 474 | ||
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 99ce070e7dcb..efdc24dd9e97 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
| @@ -335,176 +335,183 @@ struct core_state { | |||
| 335 | 335 | ||
| 336 | struct kioctx_table; | 336 | struct kioctx_table; |
| 337 | struct mm_struct { | 337 | struct mm_struct { |
| 338 | struct vm_area_struct *mmap; /* list of VMAs */ | 338 | struct { |
| 339 | struct rb_root mm_rb; | 339 | struct vm_area_struct *mmap; /* list of VMAs */ |
| 340 | u32 vmacache_seqnum; /* per-thread vmacache */ | 340 | struct rb_root mm_rb; |
| 341 | u32 vmacache_seqnum; /* per-thread vmacache */ | ||
| 341 | #ifdef CONFIG_MMU | 342 | #ifdef CONFIG_MMU |
| 342 | unsigned long (*get_unmapped_area) (struct file *filp, | 343 | unsigned long (*get_unmapped_area) (struct file *filp, |
| 343 | unsigned long addr, unsigned long len, | 344 | unsigned long addr, unsigned long len, |
| 344 | unsigned long pgoff, unsigned long flags); | 345 | unsigned long pgoff, unsigned long flags); |
| 345 | #endif | 346 | #endif |
| 346 | unsigned long mmap_base; /* base of mmap area */ | 347 | unsigned long mmap_base; /* base of mmap area */ |
| 347 | unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */ | 348 | unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */ |
| 348 | #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES | 349 | #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES |
| 349 | /* Base adresses for compatible mmap() */ | 350 | /* Base adresses for compatible mmap() */ |
| 350 | unsigned long mmap_compat_base; | 351 | unsigned long mmap_compat_base; |
| 351 | unsigned long mmap_compat_legacy_base; | 352 | unsigned long mmap_compat_legacy_base; |
| 352 | #endif | 353 | #endif |
| 353 | unsigned long task_size; /* size of task vm space */ | 354 | unsigned long task_size; /* size of task vm space */ |
| 354 | unsigned long highest_vm_end; /* highest vma end address */ | 355 | unsigned long highest_vm_end; /* highest vma end address */ |
| 355 | pgd_t * pgd; | 356 | pgd_t * pgd; |
| 356 | 357 | ||
| 357 | /** | 358 | /** |
| 358 | * @mm_users: The number of users including userspace. | 359 | * @mm_users: The number of users including userspace. |
| 359 | * | 360 | * |
| 360 | * Use mmget()/mmget_not_zero()/mmput() to modify. When this drops | 361 | * Use mmget()/mmget_not_zero()/mmput() to modify. When this |
| 361 | * to 0 (i.e. when the task exits and there are no other temporary | 362 | * drops to 0 (i.e. when the task exits and there are no other |
| 362 | * reference holders), we also release a reference on @mm_count | 363 | * temporary reference holders), we also release a reference on |
| 363 | * (which may then free the &struct mm_struct if @mm_count also | 364 | * @mm_count (which may then free the &struct mm_struct if |
| 364 | * drops to 0). | 365 | * @mm_count also drops to 0). |
| 365 | */ | 366 | */ |
| 366 | atomic_t mm_users; | 367 | atomic_t mm_users; |
| 367 | 368 | ||
| 368 | /** | 369 | /** |
| 369 | * @mm_count: The number of references to &struct mm_struct | 370 | * @mm_count: The number of references to &struct mm_struct |
| 370 | * (@mm_users count as 1). | 371 | * (@mm_users count as 1). |
| 371 | * | 372 | * |
| 372 | * Use mmgrab()/mmdrop() to modify. When this drops to 0, the | 373 | * Use mmgrab()/mmdrop() to modify. When this drops to 0, the |
| 373 | * &struct mm_struct is freed. | 374 | * &struct mm_struct is freed. |
| 374 | */ | 375 | */ |
| 375 | atomic_t mm_count; | 376 | atomic_t mm_count; |
| 376 | 377 | ||
| 377 | #ifdef CONFIG_MMU | 378 | #ifdef CONFIG_MMU |
| 378 | atomic_long_t pgtables_bytes; /* PTE page table pages */ | 379 | atomic_long_t pgtables_bytes; /* PTE page table pages */ |
| 379 | #endif | 380 | #endif |
| 380 | int map_count; /* number of VMAs */ | 381 | int map_count; /* number of VMAs */ |
| 381 | 382 | ||
| 382 | spinlock_t page_table_lock; /* Protects page tables and some counters */ | 383 | spinlock_t page_table_lock; /* Protects page tables and some |
| 383 | struct rw_semaphore mmap_sem; | 384 | * counters |
| 385 | */ | ||
| 386 | struct rw_semaphore mmap_sem; | ||
| 384 | 387 | ||
| 385 | struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung | 388 | struct list_head mmlist; /* List of maybe swapped mm's. These |
| 386 | * together off init_mm.mmlist, and are protected | 389 | * are globally strung together off |
| 387 | * by mmlist_lock | 390 | * init_mm.mmlist, and are protected |
| 388 | */ | 391 | * by mmlist_lock |
| 392 | */ | ||
| 389 | 393 | ||
| 390 | 394 | ||
| 391 | unsigned long hiwater_rss; /* High-watermark of RSS usage */ | 395 | unsigned long hiwater_rss; /* High-watermark of RSS usage */ |
| 392 | unsigned long hiwater_vm; /* High-water virtual memory usage */ | 396 | unsigned long hiwater_vm; /* High-water virtual memory usage */ |
| 393 | 397 | ||
| 394 | unsigned long total_vm; /* Total pages mapped */ | 398 | unsigned long total_vm; /* Total pages mapped */ |
| 395 | unsigned long locked_vm; /* Pages that have PG_mlocked set */ | 399 | unsigned long locked_vm; /* Pages that have PG_mlocked set */ |
| 396 | unsigned long pinned_vm; /* Refcount permanently increased */ | 400 | unsigned long pinned_vm; /* Refcount permanently increased */ |
| 397 | unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */ | 401 | unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */ |
| 398 | unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */ | 402 | unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */ |
| 399 | unsigned long stack_vm; /* VM_STACK */ | 403 | unsigned long stack_vm; /* VM_STACK */ |
| 400 | unsigned long def_flags; | 404 | unsigned long def_flags; |
| 401 | 405 | ||
| 402 | spinlock_t arg_lock; /* protect the below fields */ | 406 | spinlock_t arg_lock; /* protect the below fields */ |
| 403 | unsigned long start_code, end_code, start_data, end_data; | 407 | unsigned long start_code, end_code, start_data, end_data; |
| 404 | unsigned long start_brk, brk, start_stack; | 408 | unsigned long start_brk, brk, start_stack; |
| 405 | unsigned long arg_start, arg_end, env_start, env_end; | 409 | unsigned long arg_start, arg_end, env_start, env_end; |
| 406 | 410 | ||
| 407 | unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ | 411 | unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ |
| 408 | 412 | ||
| 409 | /* | 413 | /* |
| 410 | * Special counters, in some configurations protected by the | 414 | * Special counters, in some configurations protected by the |
| 411 | * page_table_lock, in other configurations by being atomic. | 415 | * page_table_lock, in other configurations by being atomic. |
| 412 | */ | 416 | */ |
| 413 | struct mm_rss_stat rss_stat; | 417 | struct mm_rss_stat rss_stat; |
| 414 | |||
| 415 | struct linux_binfmt *binfmt; | ||
| 416 | 418 | ||
| 417 | cpumask_var_t cpu_vm_mask_var; | 419 | struct linux_binfmt *binfmt; |
| 418 | 420 | ||
| 419 | /* Architecture-specific MM context */ | 421 | /* Architecture-specific MM context */ |
| 420 | mm_context_t context; | 422 | mm_context_t context; |
| 421 | 423 | ||
| 422 | unsigned long flags; /* Must use atomic bitops to access the bits */ | 424 | unsigned long flags; /* Must use atomic bitops to access */ |
| 423 | 425 | ||
| 424 | struct core_state *core_state; /* coredumping support */ | 426 | struct core_state *core_state; /* coredumping support */ |
| 425 | #ifdef CONFIG_MEMBARRIER | 427 | #ifdef CONFIG_MEMBARRIER |
| 426 | atomic_t membarrier_state; | 428 | atomic_t membarrier_state; |
| 427 | #endif | 429 | #endif |
| 428 | #ifdef CONFIG_AIO | 430 | #ifdef CONFIG_AIO |
| 429 | spinlock_t ioctx_lock; | 431 | spinlock_t ioctx_lock; |
| 430 | struct kioctx_table __rcu *ioctx_table; | 432 | struct kioctx_table __rcu *ioctx_table; |
| 431 | #endif | 433 | #endif |
| 432 | #ifdef CONFIG_MEMCG | 434 | #ifdef CONFIG_MEMCG |
| 433 | /* | 435 | /* |
| 434 | * "owner" points to a task that is regarded as the canonical | 436 | * "owner" points to a task that is regarded as the canonical |
| 435 | * user/owner of this mm. All of the following must be true in | 437 | * user/owner of this mm. All of the following must be true in |
| 436 | * order for it to be changed: | 438 | * order for it to be changed: |
| 437 | * | 439 | * |
| 438 | * current == mm->owner | 440 | * current == mm->owner |
| 439 | * current->mm != mm | 441 | * current->mm != mm |
| 440 | * new_owner->mm == mm | 442 | * new_owner->mm == mm |
| 441 | * new_owner->alloc_lock is held | 443 | * new_owner->alloc_lock is held |
| 442 | */ | 444 | */ |
| 443 | struct task_struct __rcu *owner; | 445 | struct task_struct __rcu *owner; |
| 444 | #endif | 446 | #endif |
| 445 | struct user_namespace *user_ns; | 447 | struct user_namespace *user_ns; |
| 446 | 448 | ||
| 447 | /* store ref to file /proc/<pid>/exe symlink points to */ | 449 | /* store ref to file /proc/<pid>/exe symlink points to */ |
| 448 | struct file __rcu *exe_file; | 450 | struct file __rcu *exe_file; |
| 449 | #ifdef CONFIG_MMU_NOTIFIER | 451 | #ifdef CONFIG_MMU_NOTIFIER |
| 450 | struct mmu_notifier_mm *mmu_notifier_mm; | 452 | struct mmu_notifier_mm *mmu_notifier_mm; |
| 451 | #endif | 453 | #endif |
| 452 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS | 454 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS |
| 453 | pgtable_t pmd_huge_pte; /* protected by page_table_lock */ | 455 | pgtable_t pmd_huge_pte; /* protected by page_table_lock */ |
| 454 | #endif | ||
| 455 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
| 456 | struct cpumask cpumask_allocation; | ||
| 457 | #endif | 456 | #endif |
| 458 | #ifdef CONFIG_NUMA_BALANCING | 457 | #ifdef CONFIG_NUMA_BALANCING |
| 459 | /* | 458 | /* |
| 460 | * numa_next_scan is the next time that the PTEs will be marked | 459 | * numa_next_scan is the next time that the PTEs will be marked |
| 461 | * pte_numa. NUMA hinting faults will gather statistics and migrate | 460 | * pte_numa. NUMA hinting faults will gather statistics and |
| 462 | * pages to new nodes if necessary. | 461 | * migrate pages to new nodes if necessary. |
| 463 | */ | 462 | */ |
| 464 | unsigned long numa_next_scan; | 463 | unsigned long numa_next_scan; |
| 465 | 464 | ||
| 466 | /* Restart point for scanning and setting pte_numa */ | 465 | /* Restart point for scanning and setting pte_numa */ |
| 467 | unsigned long numa_scan_offset; | 466 | unsigned long numa_scan_offset; |
| 468 | 467 | ||
| 469 | /* numa_scan_seq prevents two threads setting pte_numa */ | 468 | /* numa_scan_seq prevents two threads setting pte_numa */ |
| 470 | int numa_scan_seq; | 469 | int numa_scan_seq; |
| 471 | #endif | 470 | #endif |
| 472 | /* | 471 | /* |
| 473 | * An operation with batched TLB flushing is going on. Anything that | 472 | * An operation with batched TLB flushing is going on. Anything |
| 474 | * can move process memory needs to flush the TLB when moving a | 473 | * that can move process memory needs to flush the TLB when |
| 475 | * PROT_NONE or PROT_NUMA mapped page. | 474 | * moving a PROT_NONE or PROT_NUMA mapped page. |
| 476 | */ | 475 | */ |
| 477 | atomic_t tlb_flush_pending; | 476 | atomic_t tlb_flush_pending; |
| 478 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH | 477 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH |
| 479 | /* See flush_tlb_batched_pending() */ | 478 | /* See flush_tlb_batched_pending() */ |
| 480 | bool tlb_flush_batched; | 479 | bool tlb_flush_batched; |
| 481 | #endif | 480 | #endif |
| 482 | struct uprobes_state uprobes_state; | 481 | struct uprobes_state uprobes_state; |
| 483 | #ifdef CONFIG_HUGETLB_PAGE | 482 | #ifdef CONFIG_HUGETLB_PAGE |
| 484 | atomic_long_t hugetlb_usage; | 483 | atomic_long_t hugetlb_usage; |
| 485 | #endif | 484 | #endif |
| 486 | struct work_struct async_put_work; | 485 | struct work_struct async_put_work; |
| 487 | 486 | ||
| 488 | #if IS_ENABLED(CONFIG_HMM) | 487 | #if IS_ENABLED(CONFIG_HMM) |
| 489 | /* HMM needs to track a few things per mm */ | 488 | /* HMM needs to track a few things per mm */ |
| 490 | struct hmm *hmm; | 489 | struct hmm *hmm; |
| 491 | #endif | 490 | #endif |
| 492 | } __randomize_layout; | 491 | } __randomize_layout; |
| 492 | |||
| 493 | /* | ||
| 494 | * The mm_cpumask needs to be at the end of mm_struct, because it | ||
| 495 | * is dynamically sized based on nr_cpu_ids. | ||
| 496 | */ | ||
| 497 | unsigned long cpu_bitmap[]; | ||
| 498 | }; | ||
| 493 | 499 | ||
| 494 | extern struct mm_struct init_mm; | 500 | extern struct mm_struct init_mm; |
| 495 | 501 | ||
| 502 | /* Pointer magic because the dynamic array size confuses some compilers. */ | ||
| 496 | static inline void mm_init_cpumask(struct mm_struct *mm) | 503 | static inline void mm_init_cpumask(struct mm_struct *mm) |
| 497 | { | 504 | { |
| 498 | #ifdef CONFIG_CPUMASK_OFFSTACK | 505 | unsigned long cpu_bitmap = (unsigned long)mm; |
| 499 | mm->cpu_vm_mask_var = &mm->cpumask_allocation; | 506 | |
| 500 | #endif | 507 | cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap); |
| 501 | cpumask_clear(mm->cpu_vm_mask_var); | 508 | cpumask_clear((struct cpumask *)cpu_bitmap); |
| 502 | } | 509 | } |
| 503 | 510 | ||
| 504 | /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ | 511 | /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ |
| 505 | static inline cpumask_t *mm_cpumask(struct mm_struct *mm) | 512 | static inline cpumask_t *mm_cpumask(struct mm_struct *mm) |
| 506 | { | 513 | { |
| 507 | return mm->cpu_vm_mask_var; | 514 | return (struct cpumask *)&mm->cpu_bitmap; |
| 508 | } | 515 | } |
| 509 | 516 | ||
| 510 | struct mmu_gather; | 517 | struct mmu_gather; |
diff --git a/include/linux/nmi.h b/include/linux/nmi.h index b8d868d23e79..08f9247e9827 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h | |||
| @@ -45,12 +45,18 @@ extern void touch_softlockup_watchdog(void); | |||
| 45 | extern void touch_softlockup_watchdog_sync(void); | 45 | extern void touch_softlockup_watchdog_sync(void); |
| 46 | extern void touch_all_softlockup_watchdogs(void); | 46 | extern void touch_all_softlockup_watchdogs(void); |
| 47 | extern unsigned int softlockup_panic; | 47 | extern unsigned int softlockup_panic; |
| 48 | #else | 48 | |
| 49 | extern int lockup_detector_online_cpu(unsigned int cpu); | ||
| 50 | extern int lockup_detector_offline_cpu(unsigned int cpu); | ||
| 51 | #else /* CONFIG_SOFTLOCKUP_DETECTOR */ | ||
| 49 | static inline void touch_softlockup_watchdog_sched(void) { } | 52 | static inline void touch_softlockup_watchdog_sched(void) { } |
| 50 | static inline void touch_softlockup_watchdog(void) { } | 53 | static inline void touch_softlockup_watchdog(void) { } |
| 51 | static inline void touch_softlockup_watchdog_sync(void) { } | 54 | static inline void touch_softlockup_watchdog_sync(void) { } |
| 52 | static inline void touch_all_softlockup_watchdogs(void) { } | 55 | static inline void touch_all_softlockup_watchdogs(void) { } |
| 53 | #endif | 56 | |
| 57 | #define lockup_detector_online_cpu NULL | ||
| 58 | #define lockup_detector_offline_cpu NULL | ||
| 59 | #endif /* CONFIG_SOFTLOCKUP_DETECTOR */ | ||
| 54 | 60 | ||
| 55 | #ifdef CONFIG_DETECT_HUNG_TASK | 61 | #ifdef CONFIG_DETECT_HUNG_TASK |
| 56 | void reset_hung_task_detector(void); | 62 | void reset_hung_task_detector(void); |
diff --git a/include/linux/pci.h b/include/linux/pci.h index abd5d5e17aee..c133ccfa002e 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
| @@ -368,7 +368,6 @@ struct pci_dev { | |||
| 368 | unsigned int transparent:1; /* Subtractive decode bridge */ | 368 | unsigned int transparent:1; /* Subtractive decode bridge */ |
| 369 | unsigned int multifunction:1; /* Multi-function device */ | 369 | unsigned int multifunction:1; /* Multi-function device */ |
| 370 | 370 | ||
| 371 | unsigned int is_added:1; | ||
| 372 | unsigned int is_busmaster:1; /* Is busmaster */ | 371 | unsigned int is_busmaster:1; /* Is busmaster */ |
| 373 | unsigned int no_msi:1; /* May not use MSI */ | 372 | unsigned int no_msi:1; /* May not use MSI */ |
| 374 | unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */ | 373 | unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */ |
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index c85704fcdbd2..ee7e987ea1b4 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h | |||
| @@ -95,8 +95,8 @@ struct k_itimer { | |||
| 95 | clockid_t it_clock; | 95 | clockid_t it_clock; |
| 96 | timer_t it_id; | 96 | timer_t it_id; |
| 97 | int it_active; | 97 | int it_active; |
| 98 | int it_overrun; | 98 | s64 it_overrun; |
| 99 | int it_overrun_last; | 99 | s64 it_overrun_last; |
| 100 | int it_requeue_pending; | 100 | int it_requeue_pending; |
| 101 | int it_sigev_notify; | 101 | int it_sigev_notify; |
| 102 | ktime_t it_interval; | 102 | ktime_t it_interval; |
diff --git a/include/linux/pti.h b/include/linux/pti.h index 0174883a935a..1a941efcaa62 100644 --- a/include/linux/pti.h +++ b/include/linux/pti.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #include <asm/pti.h> | 6 | #include <asm/pti.h> |
| 7 | #else | 7 | #else |
| 8 | static inline void pti_init(void) { } | 8 | static inline void pti_init(void) { } |
| 9 | static inline void pti_finalize(void) { } | ||
| 9 | #endif | 10 | #endif |
| 10 | 11 | ||
| 11 | #endif | 12 | #endif |
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 36df6ccbc874..4786c2235b98 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
| @@ -396,7 +396,16 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, | |||
| 396 | * @member: the name of the list_head within the struct. | 396 | * @member: the name of the list_head within the struct. |
| 397 | * | 397 | * |
| 398 | * Continue to iterate over list of given type, continuing after | 398 | * Continue to iterate over list of given type, continuing after |
| 399 | * the current position. | 399 | * the current position which must have been in the list when the RCU read |
| 400 | * lock was taken. | ||
| 401 | * This would typically require either that you obtained the node from a | ||
| 402 | * previous walk of the list in the same RCU read-side critical section, or | ||
| 403 | * that you held some sort of non-RCU reference (such as a reference count) | ||
| 404 | * to keep the node alive *and* in the list. | ||
| 405 | * | ||
| 406 | * This iterator is similar to list_for_each_entry_from_rcu() except | ||
| 407 | * this starts after the given position and that one starts at the given | ||
| 408 | * position. | ||
| 400 | */ | 409 | */ |
| 401 | #define list_for_each_entry_continue_rcu(pos, head, member) \ | 410 | #define list_for_each_entry_continue_rcu(pos, head, member) \ |
| 402 | for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ | 411 | for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ |
| @@ -411,6 +420,14 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, | |||
| 411 | * | 420 | * |
| 412 | * Iterate over the tail of a list starting from a given position, | 421 | * Iterate over the tail of a list starting from a given position, |
| 413 | * which must have been in the list when the RCU read lock was taken. | 422 | * which must have been in the list when the RCU read lock was taken. |
| 423 | * This would typically require either that you obtained the node from a | ||
| 424 | * previous walk of the list in the same RCU read-side critical section, or | ||
| 425 | * that you held some sort of non-RCU reference (such as a reference count) | ||
| 426 | * to keep the node alive *and* in the list. | ||
| 427 | * | ||
| 428 | * This iterator is similar to list_for_each_entry_continue_rcu() except | ||
| 429 | * this starts from the given position and that one starts from the position | ||
| 430 | * after the given position. | ||
| 414 | */ | 431 | */ |
| 415 | #define list_for_each_entry_from_rcu(pos, head, member) \ | 432 | #define list_for_each_entry_from_rcu(pos, head, member) \ |
| 416 | for (; &(pos)->member != (head); \ | 433 | for (; &(pos)->member != (head); \ |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 65163aa0bb04..75e5b393cf44 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
| @@ -64,7 +64,6 @@ void rcu_barrier_tasks(void); | |||
| 64 | 64 | ||
| 65 | void __rcu_read_lock(void); | 65 | void __rcu_read_lock(void); |
| 66 | void __rcu_read_unlock(void); | 66 | void __rcu_read_unlock(void); |
| 67 | void rcu_read_unlock_special(struct task_struct *t); | ||
| 68 | void synchronize_rcu(void); | 67 | void synchronize_rcu(void); |
| 69 | 68 | ||
| 70 | /* | 69 | /* |
| @@ -159,11 +158,11 @@ static inline void rcu_init_nohz(void) { } | |||
| 159 | } while (0) | 158 | } while (0) |
| 160 | 159 | ||
| 161 | /* | 160 | /* |
| 162 | * Note a voluntary context switch for RCU-tasks benefit. This is a | 161 | * Note a quasi-voluntary context switch for RCU-tasks's benefit. |
| 163 | * macro rather than an inline function to avoid #include hell. | 162 | * This is a macro rather than an inline function to avoid #include hell. |
| 164 | */ | 163 | */ |
| 165 | #ifdef CONFIG_TASKS_RCU | 164 | #ifdef CONFIG_TASKS_RCU |
| 166 | #define rcu_note_voluntary_context_switch_lite(t) \ | 165 | #define rcu_tasks_qs(t) \ |
| 167 | do { \ | 166 | do { \ |
| 168 | if (READ_ONCE((t)->rcu_tasks_holdout)) \ | 167 | if (READ_ONCE((t)->rcu_tasks_holdout)) \ |
| 169 | WRITE_ONCE((t)->rcu_tasks_holdout, false); \ | 168 | WRITE_ONCE((t)->rcu_tasks_holdout, false); \ |
| @@ -171,14 +170,14 @@ static inline void rcu_init_nohz(void) { } | |||
| 171 | #define rcu_note_voluntary_context_switch(t) \ | 170 | #define rcu_note_voluntary_context_switch(t) \ |
| 172 | do { \ | 171 | do { \ |
| 173 | rcu_all_qs(); \ | 172 | rcu_all_qs(); \ |
| 174 | rcu_note_voluntary_context_switch_lite(t); \ | 173 | rcu_tasks_qs(t); \ |
| 175 | } while (0) | 174 | } while (0) |
| 176 | void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); | 175 | void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); |
| 177 | void synchronize_rcu_tasks(void); | 176 | void synchronize_rcu_tasks(void); |
| 178 | void exit_tasks_rcu_start(void); | 177 | void exit_tasks_rcu_start(void); |
| 179 | void exit_tasks_rcu_finish(void); | 178 | void exit_tasks_rcu_finish(void); |
| 180 | #else /* #ifdef CONFIG_TASKS_RCU */ | 179 | #else /* #ifdef CONFIG_TASKS_RCU */ |
| 181 | #define rcu_note_voluntary_context_switch_lite(t) do { } while (0) | 180 | #define rcu_tasks_qs(t) do { } while (0) |
| 182 | #define rcu_note_voluntary_context_switch(t) rcu_all_qs() | 181 | #define rcu_note_voluntary_context_switch(t) rcu_all_qs() |
| 183 | #define call_rcu_tasks call_rcu_sched | 182 | #define call_rcu_tasks call_rcu_sched |
| 184 | #define synchronize_rcu_tasks synchronize_sched | 183 | #define synchronize_rcu_tasks synchronize_sched |
| @@ -195,8 +194,8 @@ static inline void exit_tasks_rcu_finish(void) { } | |||
| 195 | */ | 194 | */ |
| 196 | #define cond_resched_tasks_rcu_qs() \ | 195 | #define cond_resched_tasks_rcu_qs() \ |
| 197 | do { \ | 196 | do { \ |
| 198 | if (!cond_resched()) \ | 197 | rcu_tasks_qs(current); \ |
| 199 | rcu_note_voluntary_context_switch_lite(current); \ | 198 | cond_resched(); \ |
| 200 | } while (0) | 199 | } while (0) |
| 201 | 200 | ||
| 202 | /* | 201 | /* |
| @@ -567,8 +566,8 @@ static inline void rcu_preempt_sleep_check(void) { } | |||
| 567 | * This is simply an identity function, but it documents where a pointer | 566 | * This is simply an identity function, but it documents where a pointer |
| 568 | * is handed off from RCU to some other synchronization mechanism, for | 567 | * is handed off from RCU to some other synchronization mechanism, for |
| 569 | * example, reference counting or locking. In C11, it would map to | 568 | * example, reference counting or locking. In C11, it would map to |
| 570 | * kill_dependency(). It could be used as follows: | 569 | * kill_dependency(). It could be used as follows:: |
| 571 | * `` | 570 | * |
| 572 | * rcu_read_lock(); | 571 | * rcu_read_lock(); |
| 573 | * p = rcu_dereference(gp); | 572 | * p = rcu_dereference(gp); |
| 574 | * long_lived = is_long_lived(p); | 573 | * long_lived = is_long_lived(p); |
| @@ -579,7 +578,6 @@ static inline void rcu_preempt_sleep_check(void) { } | |||
| 579 | * p = rcu_pointer_handoff(p); | 578 | * p = rcu_pointer_handoff(p); |
| 580 | * } | 579 | * } |
| 581 | * rcu_read_unlock(); | 580 | * rcu_read_unlock(); |
| 582 | *`` | ||
| 583 | */ | 581 | */ |
| 584 | #define rcu_pointer_handoff(p) (p) | 582 | #define rcu_pointer_handoff(p) (p) |
| 585 | 583 | ||
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 7b3c82e8a625..8d9a0ea8f0b5 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
| @@ -93,7 +93,7 @@ static inline void kfree_call_rcu(struct rcu_head *head, | |||
| 93 | #define rcu_note_context_switch(preempt) \ | 93 | #define rcu_note_context_switch(preempt) \ |
| 94 | do { \ | 94 | do { \ |
| 95 | rcu_sched_qs(); \ | 95 | rcu_sched_qs(); \ |
| 96 | rcu_note_voluntary_context_switch_lite(current); \ | 96 | rcu_tasks_qs(current); \ |
| 97 | } while (0) | 97 | } while (0) |
| 98 | 98 | ||
| 99 | static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) | 99 | static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) |
diff --git a/include/linux/refcount.h b/include/linux/refcount.h index a685da2c4522..e28cce21bad6 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h | |||
| @@ -3,9 +3,10 @@ | |||
| 3 | #define _LINUX_REFCOUNT_H | 3 | #define _LINUX_REFCOUNT_H |
| 4 | 4 | ||
| 5 | #include <linux/atomic.h> | 5 | #include <linux/atomic.h> |
| 6 | #include <linux/mutex.h> | 6 | #include <linux/compiler.h> |
| 7 | #include <linux/spinlock.h> | 7 | #include <linux/spinlock_types.h> |
| 8 | #include <linux/kernel.h> | 8 | |
| 9 | struct mutex; | ||
| 9 | 10 | ||
| 10 | /** | 11 | /** |
| 11 | * struct refcount_t - variant of atomic_t specialized for reference counts | 12 | * struct refcount_t - variant of atomic_t specialized for reference counts |
| @@ -42,17 +43,30 @@ static inline unsigned int refcount_read(const refcount_t *r) | |||
| 42 | return atomic_read(&r->refs); | 43 | return atomic_read(&r->refs); |
| 43 | } | 44 | } |
| 44 | 45 | ||
| 46 | extern __must_check bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r); | ||
| 47 | extern void refcount_add_checked(unsigned int i, refcount_t *r); | ||
| 48 | |||
| 49 | extern __must_check bool refcount_inc_not_zero_checked(refcount_t *r); | ||
| 50 | extern void refcount_inc_checked(refcount_t *r); | ||
| 51 | |||
| 52 | extern __must_check bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r); | ||
| 53 | |||
| 54 | extern __must_check bool refcount_dec_and_test_checked(refcount_t *r); | ||
| 55 | extern void refcount_dec_checked(refcount_t *r); | ||
| 56 | |||
| 45 | #ifdef CONFIG_REFCOUNT_FULL | 57 | #ifdef CONFIG_REFCOUNT_FULL |
| 46 | extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r); | ||
| 47 | extern void refcount_add(unsigned int i, refcount_t *r); | ||
| 48 | 58 | ||
| 49 | extern __must_check bool refcount_inc_not_zero(refcount_t *r); | 59 | #define refcount_add_not_zero refcount_add_not_zero_checked |
| 50 | extern void refcount_inc(refcount_t *r); | 60 | #define refcount_add refcount_add_checked |
| 61 | |||
| 62 | #define refcount_inc_not_zero refcount_inc_not_zero_checked | ||
| 63 | #define refcount_inc refcount_inc_checked | ||
| 64 | |||
| 65 | #define refcount_sub_and_test refcount_sub_and_test_checked | ||
| 51 | 66 | ||
| 52 | extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r); | 67 | #define refcount_dec_and_test refcount_dec_and_test_checked |
| 68 | #define refcount_dec refcount_dec_checked | ||
| 53 | 69 | ||
| 54 | extern __must_check bool refcount_dec_and_test(refcount_t *r); | ||
| 55 | extern void refcount_dec(refcount_t *r); | ||
| 56 | #else | 70 | #else |
| 57 | # ifdef CONFIG_ARCH_HAS_REFCOUNT | 71 | # ifdef CONFIG_ARCH_HAS_REFCOUNT |
| 58 | # include <asm/refcount.h> | 72 | # include <asm/refcount.h> |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 43731fe51c97..dac5086e3815 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -167,8 +167,8 @@ struct task_group; | |||
| 167 | * need_sleep = false; | 167 | * need_sleep = false; |
| 168 | * wake_up_state(p, TASK_UNINTERRUPTIBLE); | 168 | * wake_up_state(p, TASK_UNINTERRUPTIBLE); |
| 169 | * | 169 | * |
| 170 | * Where wake_up_state() (and all other wakeup primitives) imply enough | 170 | * where wake_up_state() executes a full memory barrier before accessing the |
| 171 | * barriers to order the store of the variable against wakeup. | 171 | * task state. |
| 172 | * | 172 | * |
| 173 | * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is, | 173 | * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is, |
| 174 | * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a | 174 | * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a |
| @@ -1017,7 +1017,6 @@ struct task_struct { | |||
| 1017 | u64 last_sum_exec_runtime; | 1017 | u64 last_sum_exec_runtime; |
| 1018 | struct callback_head numa_work; | 1018 | struct callback_head numa_work; |
| 1019 | 1019 | ||
| 1020 | struct list_head numa_entry; | ||
| 1021 | struct numa_group *numa_group; | 1020 | struct numa_group *numa_group; |
| 1022 | 1021 | ||
| 1023 | /* | 1022 | /* |
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 1c1a1512ec55..913488d828cb 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h | |||
| @@ -40,7 +40,6 @@ extern unsigned int sysctl_numa_balancing_scan_size; | |||
| 40 | #ifdef CONFIG_SCHED_DEBUG | 40 | #ifdef CONFIG_SCHED_DEBUG |
| 41 | extern __read_mostly unsigned int sysctl_sched_migration_cost; | 41 | extern __read_mostly unsigned int sysctl_sched_migration_cost; |
| 42 | extern __read_mostly unsigned int sysctl_sched_nr_migrate; | 42 | extern __read_mostly unsigned int sysctl_sched_nr_migrate; |
| 43 | extern __read_mostly unsigned int sysctl_sched_time_avg; | ||
| 44 | 43 | ||
| 45 | int sched_proc_update_handler(struct ctl_table *table, int write, | 44 | int sched_proc_update_handler(struct ctl_table *table, int write, |
| 46 | void __user *buffer, size_t *length, | 45 | void __user *buffer, size_t *length, |
diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h index 411b52e424e1..abe28d5cb3f4 100644 --- a/include/linux/sched_clock.h +++ b/include/linux/sched_clock.h | |||
| @@ -9,17 +9,16 @@ | |||
| 9 | #define LINUX_SCHED_CLOCK | 9 | #define LINUX_SCHED_CLOCK |
| 10 | 10 | ||
| 11 | #ifdef CONFIG_GENERIC_SCHED_CLOCK | 11 | #ifdef CONFIG_GENERIC_SCHED_CLOCK |
| 12 | extern void sched_clock_postinit(void); | 12 | extern void generic_sched_clock_init(void); |
| 13 | 13 | ||
| 14 | extern void sched_clock_register(u64 (*read)(void), int bits, | 14 | extern void sched_clock_register(u64 (*read)(void), int bits, |
| 15 | unsigned long rate); | 15 | unsigned long rate); |
| 16 | #else | 16 | #else |
| 17 | static inline void sched_clock_postinit(void) { } | 17 | static inline void generic_sched_clock_init(void) { } |
| 18 | 18 | ||
| 19 | static inline void sched_clock_register(u64 (*read)(void), int bits, | 19 | static inline void sched_clock_register(u64 (*read)(void), int bits, |
| 20 | unsigned long rate) | 20 | unsigned long rate) |
| 21 | { | 21 | { |
| 22 | ; | ||
| 23 | } | 22 | } |
| 24 | #endif | 23 | #endif |
| 25 | 24 | ||
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h index c174844cf663..d0884b525001 100644 --- a/include/linux/smpboot.h +++ b/include/linux/smpboot.h | |||
| @@ -25,8 +25,6 @@ struct smpboot_thread_data; | |||
| 25 | * parked (cpu offline) | 25 | * parked (cpu offline) |
| 26 | * @unpark: Optional unpark function, called when the thread is | 26 | * @unpark: Optional unpark function, called when the thread is |
| 27 | * unparked (cpu online) | 27 | * unparked (cpu online) |
| 28 | * @cpumask: Internal state. To update which threads are unparked, | ||
| 29 | * call smpboot_update_cpumask_percpu_thread(). | ||
| 30 | * @selfparking: Thread is not parked by the park function. | 28 | * @selfparking: Thread is not parked by the park function. |
| 31 | * @thread_comm: The base name of the thread | 29 | * @thread_comm: The base name of the thread |
| 32 | */ | 30 | */ |
| @@ -40,23 +38,12 @@ struct smp_hotplug_thread { | |||
| 40 | void (*cleanup)(unsigned int cpu, bool online); | 38 | void (*cleanup)(unsigned int cpu, bool online); |
| 41 | void (*park)(unsigned int cpu); | 39 | void (*park)(unsigned int cpu); |
| 42 | void (*unpark)(unsigned int cpu); | 40 | void (*unpark)(unsigned int cpu); |
| 43 | cpumask_var_t cpumask; | ||
| 44 | bool selfparking; | 41 | bool selfparking; |
| 45 | const char *thread_comm; | 42 | const char *thread_comm; |
| 46 | }; | 43 | }; |
| 47 | 44 | ||
| 48 | int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread, | 45 | int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread); |
| 49 | const struct cpumask *cpumask); | ||
| 50 | |||
| 51 | static inline int | ||
| 52 | smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread) | ||
| 53 | { | ||
| 54 | return smpboot_register_percpu_thread_cpumask(plug_thread, | ||
| 55 | cpu_possible_mask); | ||
| 56 | } | ||
| 57 | 46 | ||
| 58 | void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread); | 47 | void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread); |
| 59 | void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread, | ||
| 60 | const struct cpumask *); | ||
| 61 | 48 | ||
| 62 | #endif | 49 | #endif |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index fd57888d4942..3190997df9ca 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
| @@ -114,29 +114,48 @@ do { \ | |||
| 114 | #endif /*arch_spin_is_contended*/ | 114 | #endif /*arch_spin_is_contended*/ |
| 115 | 115 | ||
| 116 | /* | 116 | /* |
| 117 | * This barrier must provide two things: | 117 | * smp_mb__after_spinlock() provides the equivalent of a full memory barrier |
| 118 | * between program-order earlier lock acquisitions and program-order later | ||
| 119 | * memory accesses. | ||
| 118 | * | 120 | * |
| 119 | * - it must guarantee a STORE before the spin_lock() is ordered against a | 121 | * This guarantees that the following two properties hold: |
| 120 | * LOAD after it, see the comments at its two usage sites. | ||
| 121 | * | 122 | * |
| 122 | * - it must ensure the critical section is RCsc. | 123 | * 1) Given the snippet: |
| 123 | * | 124 | * |
| 124 | * The latter is important for cases where we observe values written by other | 125 | * { X = 0; Y = 0; } |
| 125 | * CPUs in spin-loops, without barriers, while being subject to scheduling. | ||
| 126 | * | 126 | * |
| 127 | * CPU0 CPU1 CPU2 | 127 | * CPU0 CPU1 |
| 128 | * | 128 | * |
| 129 | * for (;;) { | 129 | * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1); |
| 130 | * if (READ_ONCE(X)) | 130 | * spin_lock(S); smp_mb(); |
| 131 | * break; | 131 | * smp_mb__after_spinlock(); r1 = READ_ONCE(X); |
| 132 | * } | 132 | * r0 = READ_ONCE(Y); |
| 133 | * X=1 | 133 | * spin_unlock(S); |
| 134 | * <sched-out> | ||
| 135 | * <sched-in> | ||
| 136 | * r = X; | ||
| 137 | * | 134 | * |
| 138 | * without transitivity it could be that CPU1 observes X!=0 breaks the loop, | 135 | * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0) |
| 139 | * we get migrated and CPU2 sees X==0. | 136 | * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments |
| 137 | * preceding the call to smp_mb__after_spinlock() in __schedule() and in | ||
| 138 | * try_to_wake_up(). | ||
| 139 | * | ||
| 140 | * 2) Given the snippet: | ||
| 141 | * | ||
| 142 | * { X = 0; Y = 0; } | ||
| 143 | * | ||
| 144 | * CPU0 CPU1 CPU2 | ||
| 145 | * | ||
| 146 | * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y); | ||
| 147 | * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb(); | ||
| 148 | * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X); | ||
| 149 | * WRITE_ONCE(Y, 1); | ||
| 150 | * spin_unlock(S); | ||
| 151 | * | ||
| 152 | * it is forbidden that CPU0's critical section executes before CPU1's | ||
| 153 | * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1) | ||
| 154 | * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments | ||
| 155 | * preceding the calls to smp_rmb() in try_to_wake_up() for similar | ||
| 156 | * snippets but "projected" onto two CPUs. | ||
| 157 | * | ||
| 158 | * Property (2) upgrades the lock to an RCsc lock. | ||
| 140 | * | 159 | * |
| 141 | * Since most load-store architectures implement ACQUIRE with an smp_mb() after | 160 | * Since most load-store architectures implement ACQUIRE with an smp_mb() after |
| 142 | * the LL/SC loop, they need no further barriers. Similarly all our TSO | 161 | * the LL/SC loop, they need no further barriers. Similarly all our TSO |
diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 91494d7e8e41..3e72a291c401 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h | |||
| @@ -195,6 +195,16 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) | |||
| 195 | return retval; | 195 | return retval; |
| 196 | } | 196 | } |
| 197 | 197 | ||
| 198 | /* Used by tracing, cannot be traced and cannot invoke lockdep. */ | ||
| 199 | static inline notrace int | ||
| 200 | srcu_read_lock_notrace(struct srcu_struct *sp) __acquires(sp) | ||
| 201 | { | ||
| 202 | int retval; | ||
| 203 | |||
| 204 | retval = __srcu_read_lock(sp); | ||
| 205 | return retval; | ||
| 206 | } | ||
| 207 | |||
| 198 | /** | 208 | /** |
| 199 | * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. | 209 | * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. |
| 200 | * @sp: srcu_struct in which to unregister the old reader. | 210 | * @sp: srcu_struct in which to unregister the old reader. |
| @@ -209,6 +219,13 @@ static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) | |||
| 209 | __srcu_read_unlock(sp, idx); | 219 | __srcu_read_unlock(sp, idx); |
| 210 | } | 220 | } |
| 211 | 221 | ||
| 222 | /* Used by tracing, cannot be traced and cannot call lockdep. */ | ||
| 223 | static inline notrace void | ||
| 224 | srcu_read_unlock_notrace(struct srcu_struct *sp, int idx) __releases(sp) | ||
| 225 | { | ||
| 226 | __srcu_read_unlock(sp, idx); | ||
| 227 | } | ||
| 228 | |||
| 212 | /** | 229 | /** |
| 213 | * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock | 230 | * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock |
| 214 | * | 231 | * |
diff --git a/include/linux/swait.h b/include/linux/swait.h index bf8cb0dee23c..73e06e9986d4 100644 --- a/include/linux/swait.h +++ b/include/linux/swait.h | |||
| @@ -16,7 +16,7 @@ | |||
| 16 | * wait-queues, but the semantics are actually completely different, and | 16 | * wait-queues, but the semantics are actually completely different, and |
| 17 | * every single user we have ever had has been buggy (or pointless). | 17 | * every single user we have ever had has been buggy (or pointless). |
| 18 | * | 18 | * |
| 19 | * A "swake_up()" only wakes up _one_ waiter, which is not at all what | 19 | * A "swake_up_one()" only wakes up _one_ waiter, which is not at all what |
| 20 | * "wake_up()" does, and has led to problems. In other cases, it has | 20 | * "wake_up()" does, and has led to problems. In other cases, it has |
| 21 | * been fine, because there's only ever one waiter (kvm), but in that | 21 | * been fine, because there's only ever one waiter (kvm), but in that |
| 22 | * case gthe whole "simple" wait-queue is just pointless to begin with, | 22 | * case gthe whole "simple" wait-queue is just pointless to begin with, |
| @@ -38,8 +38,8 @@ | |||
| 38 | * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right | 38 | * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right |
| 39 | * sleeper state. | 39 | * sleeper state. |
| 40 | * | 40 | * |
| 41 | * - the exclusive mode; because this requires preserving the list order | 41 | * - the !exclusive mode; because that leads to O(n) wakeups, everything is |
| 42 | * and this is hard. | 42 | * exclusive. |
| 43 | * | 43 | * |
| 44 | * - custom wake callback functions; because you cannot give any guarantees | 44 | * - custom wake callback functions; because you cannot give any guarantees |
| 45 | * about random code. This also allows swait to be used in RT, such that | 45 | * about random code. This also allows swait to be used in RT, such that |
| @@ -115,7 +115,7 @@ extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name | |||
| 115 | * CPU0 - waker CPU1 - waiter | 115 | * CPU0 - waker CPU1 - waiter |
| 116 | * | 116 | * |
| 117 | * for (;;) { | 117 | * for (;;) { |
| 118 | * @cond = true; prepare_to_swait(&wq_head, &wait, state); | 118 | * @cond = true; prepare_to_swait_exclusive(&wq_head, &wait, state); |
| 119 | * smp_mb(); // smp_mb() from set_current_state() | 119 | * smp_mb(); // smp_mb() from set_current_state() |
| 120 | * if (swait_active(wq_head)) if (@cond) | 120 | * if (swait_active(wq_head)) if (@cond) |
| 121 | * wake_up(wq_head); break; | 121 | * wake_up(wq_head); break; |
| @@ -157,20 +157,20 @@ static inline bool swq_has_sleeper(struct swait_queue_head *wq) | |||
| 157 | return swait_active(wq); | 157 | return swait_active(wq); |
| 158 | } | 158 | } |
| 159 | 159 | ||
| 160 | extern void swake_up(struct swait_queue_head *q); | 160 | extern void swake_up_one(struct swait_queue_head *q); |
| 161 | extern void swake_up_all(struct swait_queue_head *q); | 161 | extern void swake_up_all(struct swait_queue_head *q); |
| 162 | extern void swake_up_locked(struct swait_queue_head *q); | 162 | extern void swake_up_locked(struct swait_queue_head *q); |
| 163 | 163 | ||
| 164 | extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); | 164 | extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state); |
| 165 | extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state); | ||
| 166 | extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); | 165 | extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); |
| 167 | 166 | ||
| 168 | extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait); | 167 | extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait); |
| 169 | extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait); | 168 | extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait); |
| 170 | 169 | ||
| 171 | /* as per ___wait_event() but for swait, therefore "exclusive == 0" */ | 170 | /* as per ___wait_event() but for swait, therefore "exclusive == 1" */ |
| 172 | #define ___swait_event(wq, condition, state, ret, cmd) \ | 171 | #define ___swait_event(wq, condition, state, ret, cmd) \ |
| 173 | ({ \ | 172 | ({ \ |
| 173 | __label__ __out; \ | ||
| 174 | struct swait_queue __wait; \ | 174 | struct swait_queue __wait; \ |
| 175 | long __ret = ret; \ | 175 | long __ret = ret; \ |
| 176 | \ | 176 | \ |
| @@ -183,20 +183,20 @@ extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait); | |||
| 183 | \ | 183 | \ |
| 184 | if (___wait_is_interruptible(state) && __int) { \ | 184 | if (___wait_is_interruptible(state) && __int) { \ |
| 185 | __ret = __int; \ | 185 | __ret = __int; \ |
| 186 | break; \ | 186 | goto __out; \ |
| 187 | } \ | 187 | } \ |
| 188 | \ | 188 | \ |
| 189 | cmd; \ | 189 | cmd; \ |
| 190 | } \ | 190 | } \ |
| 191 | finish_swait(&wq, &__wait); \ | 191 | finish_swait(&wq, &__wait); \ |
| 192 | __ret; \ | 192 | __out: __ret; \ |
| 193 | }) | 193 | }) |
| 194 | 194 | ||
| 195 | #define __swait_event(wq, condition) \ | 195 | #define __swait_event(wq, condition) \ |
| 196 | (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ | 196 | (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ |
| 197 | schedule()) | 197 | schedule()) |
| 198 | 198 | ||
| 199 | #define swait_event(wq, condition) \ | 199 | #define swait_event_exclusive(wq, condition) \ |
| 200 | do { \ | 200 | do { \ |
| 201 | if (condition) \ | 201 | if (condition) \ |
| 202 | break; \ | 202 | break; \ |
| @@ -208,7 +208,7 @@ do { \ | |||
| 208 | TASK_UNINTERRUPTIBLE, timeout, \ | 208 | TASK_UNINTERRUPTIBLE, timeout, \ |
| 209 | __ret = schedule_timeout(__ret)) | 209 | __ret = schedule_timeout(__ret)) |
| 210 | 210 | ||
| 211 | #define swait_event_timeout(wq, condition, timeout) \ | 211 | #define swait_event_timeout_exclusive(wq, condition, timeout) \ |
| 212 | ({ \ | 212 | ({ \ |
| 213 | long __ret = timeout; \ | 213 | long __ret = timeout; \ |
| 214 | if (!___wait_cond_timeout(condition)) \ | 214 | if (!___wait_cond_timeout(condition)) \ |
| @@ -220,7 +220,7 @@ do { \ | |||
| 220 | ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \ | 220 | ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \ |
| 221 | schedule()) | 221 | schedule()) |
| 222 | 222 | ||
| 223 | #define swait_event_interruptible(wq, condition) \ | 223 | #define swait_event_interruptible_exclusive(wq, condition) \ |
| 224 | ({ \ | 224 | ({ \ |
| 225 | int __ret = 0; \ | 225 | int __ret = 0; \ |
| 226 | if (!(condition)) \ | 226 | if (!(condition)) \ |
| @@ -233,7 +233,7 @@ do { \ | |||
| 233 | TASK_INTERRUPTIBLE, timeout, \ | 233 | TASK_INTERRUPTIBLE, timeout, \ |
| 234 | __ret = schedule_timeout(__ret)) | 234 | __ret = schedule_timeout(__ret)) |
| 235 | 235 | ||
| 236 | #define swait_event_interruptible_timeout(wq, condition, timeout) \ | 236 | #define swait_event_interruptible_timeout_exclusive(wq, condition, timeout)\ |
| 237 | ({ \ | 237 | ({ \ |
| 238 | long __ret = timeout; \ | 238 | long __ret = timeout; \ |
| 239 | if (!___wait_cond_timeout(condition)) \ | 239 | if (!___wait_cond_timeout(condition)) \ |
| @@ -246,7 +246,7 @@ do { \ | |||
| 246 | (void)___swait_event(wq, condition, TASK_IDLE, 0, schedule()) | 246 | (void)___swait_event(wq, condition, TASK_IDLE, 0, schedule()) |
| 247 | 247 | ||
| 248 | /** | 248 | /** |
| 249 | * swait_event_idle - wait without system load contribution | 249 | * swait_event_idle_exclusive - wait without system load contribution |
| 250 | * @wq: the waitqueue to wait on | 250 | * @wq: the waitqueue to wait on |
| 251 | * @condition: a C expression for the event to wait for | 251 | * @condition: a C expression for the event to wait for |
| 252 | * | 252 | * |
| @@ -257,7 +257,7 @@ do { \ | |||
| 257 | * condition and doesn't want to contribute to system load. Signals are | 257 | * condition and doesn't want to contribute to system load. Signals are |
| 258 | * ignored. | 258 | * ignored. |
| 259 | */ | 259 | */ |
| 260 | #define swait_event_idle(wq, condition) \ | 260 | #define swait_event_idle_exclusive(wq, condition) \ |
| 261 | do { \ | 261 | do { \ |
| 262 | if (condition) \ | 262 | if (condition) \ |
| 263 | break; \ | 263 | break; \ |
| @@ -270,7 +270,7 @@ do { \ | |||
| 270 | __ret = schedule_timeout(__ret)) | 270 | __ret = schedule_timeout(__ret)) |
| 271 | 271 | ||
| 272 | /** | 272 | /** |
| 273 | * swait_event_idle_timeout - wait up to timeout without load contribution | 273 | * swait_event_idle_timeout_exclusive - wait up to timeout without load contribution |
| 274 | * @wq: the waitqueue to wait on | 274 | * @wq: the waitqueue to wait on |
| 275 | * @condition: a C expression for the event to wait for | 275 | * @condition: a C expression for the event to wait for |
| 276 | * @timeout: timeout at which we'll give up in jiffies | 276 | * @timeout: timeout at which we'll give up in jiffies |
| @@ -288,7 +288,7 @@ do { \ | |||
| 288 | * or the remaining jiffies (at least 1) if the @condition evaluated | 288 | * or the remaining jiffies (at least 1) if the @condition evaluated |
| 289 | * to %true before the @timeout elapsed. | 289 | * to %true before the @timeout elapsed. |
| 290 | */ | 290 | */ |
| 291 | #define swait_event_idle_timeout(wq, condition, timeout) \ | 291 | #define swait_event_idle_timeout_exclusive(wq, condition, timeout) \ |
| 292 | ({ \ | 292 | ({ \ |
| 293 | long __ret = timeout; \ | 293 | long __ret = timeout; \ |
| 294 | if (!___wait_cond_timeout(condition)) \ | 294 | if (!___wait_cond_timeout(condition)) \ |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 5c1a0933768e..ebb2f24027e8 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
| @@ -506,9 +506,9 @@ asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes, | |||
| 506 | /* fs/timerfd.c */ | 506 | /* fs/timerfd.c */ |
| 507 | asmlinkage long sys_timerfd_create(int clockid, int flags); | 507 | asmlinkage long sys_timerfd_create(int clockid, int flags); |
| 508 | asmlinkage long sys_timerfd_settime(int ufd, int flags, | 508 | asmlinkage long sys_timerfd_settime(int ufd, int flags, |
| 509 | const struct itimerspec __user *utmr, | 509 | const struct __kernel_itimerspec __user *utmr, |
| 510 | struct itimerspec __user *otmr); | 510 | struct __kernel_itimerspec __user *otmr); |
| 511 | asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr); | 511 | asmlinkage long sys_timerfd_gettime(int ufd, struct __kernel_itimerspec __user *otmr); |
| 512 | 512 | ||
| 513 | /* fs/utimes.c */ | 513 | /* fs/utimes.c */ |
| 514 | asmlinkage long sys_utimensat(int dfd, const char __user *filename, | 514 | asmlinkage long sys_utimensat(int dfd, const char __user *filename, |
| @@ -573,10 +573,10 @@ asmlinkage long sys_timer_create(clockid_t which_clock, | |||
| 573 | struct sigevent __user *timer_event_spec, | 573 | struct sigevent __user *timer_event_spec, |
| 574 | timer_t __user * created_timer_id); | 574 | timer_t __user * created_timer_id); |
| 575 | asmlinkage long sys_timer_gettime(timer_t timer_id, | 575 | asmlinkage long sys_timer_gettime(timer_t timer_id, |
| 576 | struct itimerspec __user *setting); | 576 | struct __kernel_itimerspec __user *setting); |
| 577 | asmlinkage long sys_timer_getoverrun(timer_t timer_id); | 577 | asmlinkage long sys_timer_getoverrun(timer_t timer_id); |
| 578 | asmlinkage long sys_timer_settime(timer_t timer_id, int flags, | 578 | asmlinkage long sys_timer_settime(timer_t timer_id, int flags, |
| 579 | const struct itimerspec __user *new_setting, | 579 | const struct __kernel_itimerspec __user *new_setting, |
| 580 | struct itimerspec __user *old_setting); | 580 | struct itimerspec __user *old_setting); |
| 581 | asmlinkage long sys_timer_delete(timer_t timer_id); | 581 | asmlinkage long sys_timer_delete(timer_t timer_id); |
| 582 | asmlinkage long sys_clock_settime(clockid_t which_clock, | 582 | asmlinkage long sys_clock_settime(clockid_t which_clock, |
diff --git a/include/linux/time.h b/include/linux/time.h index aed74463592d..27d83fd2ae61 100644 --- a/include/linux/time.h +++ b/include/linux/time.h | |||
| @@ -14,9 +14,9 @@ int get_timespec64(struct timespec64 *ts, | |||
| 14 | int put_timespec64(const struct timespec64 *ts, | 14 | int put_timespec64(const struct timespec64 *ts, |
| 15 | struct __kernel_timespec __user *uts); | 15 | struct __kernel_timespec __user *uts); |
| 16 | int get_itimerspec64(struct itimerspec64 *it, | 16 | int get_itimerspec64(struct itimerspec64 *it, |
| 17 | const struct itimerspec __user *uit); | 17 | const struct __kernel_itimerspec __user *uit); |
| 18 | int put_itimerspec64(const struct itimerspec64 *it, | 18 | int put_itimerspec64(const struct itimerspec64 *it, |
| 19 | struct itimerspec __user *uit); | 19 | struct __kernel_itimerspec __user *uit); |
| 20 | 20 | ||
| 21 | extern time64_t mktime64(const unsigned int year, const unsigned int mon, | 21 | extern time64_t mktime64(const unsigned int year, const unsigned int mon, |
| 22 | const unsigned int day, const unsigned int hour, | 22 | const unsigned int day, const unsigned int hour, |
diff --git a/include/linux/time64.h b/include/linux/time64.h index 0a7b2f79cec7..05634afba0db 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h | |||
| @@ -12,6 +12,7 @@ typedef __u64 timeu64_t; | |||
| 12 | */ | 12 | */ |
| 13 | #ifndef CONFIG_64BIT_TIME | 13 | #ifndef CONFIG_64BIT_TIME |
| 14 | #define __kernel_timespec timespec | 14 | #define __kernel_timespec timespec |
| 15 | #define __kernel_itimerspec itimerspec | ||
| 15 | #endif | 16 | #endif |
| 16 | 17 | ||
| 17 | #include <uapi/linux/time.h> | 18 | #include <uapi/linux/time.h> |
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 86bc2026efce..e79861418fd7 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h | |||
| @@ -177,7 +177,7 @@ static inline time64_t ktime_get_clocktai_seconds(void) | |||
| 177 | extern bool timekeeping_rtc_skipsuspend(void); | 177 | extern bool timekeeping_rtc_skipsuspend(void); |
| 178 | extern bool timekeeping_rtc_skipresume(void); | 178 | extern bool timekeeping_rtc_skipresume(void); |
| 179 | 179 | ||
| 180 | extern void timekeeping_inject_sleeptime64(struct timespec64 *delta); | 180 | extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta); |
| 181 | 181 | ||
| 182 | /* | 182 | /* |
| 183 | * struct system_time_snapshot - simultaneous raw/real time capture with | 183 | * struct system_time_snapshot - simultaneous raw/real time capture with |
| @@ -243,7 +243,8 @@ extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot); | |||
| 243 | extern int persistent_clock_is_local; | 243 | extern int persistent_clock_is_local; |
| 244 | 244 | ||
| 245 | extern void read_persistent_clock64(struct timespec64 *ts); | 245 | extern void read_persistent_clock64(struct timespec64 *ts); |
| 246 | extern void read_boot_clock64(struct timespec64 *ts); | 246 | void read_persistent_clock_and_boot_offset(struct timespec64 *wall_clock, |
| 247 | struct timespec64 *boot_offset); | ||
| 247 | extern int update_persistent_clock64(struct timespec64 now); | 248 | extern int update_persistent_clock64(struct timespec64 now); |
| 248 | 249 | ||
| 249 | /* | 250 | /* |
diff --git a/include/linux/torture.h b/include/linux/torture.h index 66272862070b..61dfd93b6ee4 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h | |||
| @@ -64,6 +64,8 @@ struct torture_random_state { | |||
| 64 | long trs_count; | 64 | long trs_count; |
| 65 | }; | 65 | }; |
| 66 | #define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 } | 66 | #define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 } |
| 67 | #define DEFINE_TORTURE_RANDOM_PERCPU(name) \ | ||
| 68 | DEFINE_PER_CPU(struct torture_random_state, name) | ||
| 67 | unsigned long torture_random(struct torture_random_state *trsp); | 69 | unsigned long torture_random(struct torture_random_state *trsp); |
| 68 | 70 | ||
| 69 | /* Task shuffler, which causes CPUs to occasionally go idle. */ | 71 | /* Task shuffler, which causes CPUs to occasionally go idle. */ |
| @@ -79,7 +81,7 @@ void stutter_wait(const char *title); | |||
| 79 | int torture_stutter_init(int s); | 81 | int torture_stutter_init(int s); |
| 80 | 82 | ||
| 81 | /* Initialization and cleanup. */ | 83 | /* Initialization and cleanup. */ |
| 82 | bool torture_init_begin(char *ttype, bool v); | 84 | bool torture_init_begin(char *ttype, int v); |
| 83 | void torture_init_end(void); | 85 | void torture_init_end(void); |
| 84 | bool torture_cleanup_begin(void); | 86 | bool torture_cleanup_begin(void); |
| 85 | void torture_cleanup_end(void); | 87 | void torture_cleanup_end(void); |
