diff options
Diffstat (limited to 'include')
76 files changed, 1331 insertions, 540 deletions
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index f5c40b0fadc2..e6a83d712ef6 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h | |||
| @@ -66,8 +66,8 @@ | |||
| 66 | #define smp_read_barrier_depends() do { } while (0) | 66 | #define smp_read_barrier_depends() do { } while (0) |
| 67 | #endif | 67 | #endif |
| 68 | 68 | ||
| 69 | #ifndef set_mb | 69 | #ifndef smp_store_mb |
| 70 | #define set_mb(var, value) do { (var) = (value); mb(); } while (0) | 70 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) |
| 71 | #endif | 71 | #endif |
| 72 | 72 | ||
| 73 | #ifndef smp_mb__before_atomic | 73 | #ifndef smp_mb__before_atomic |
diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h index 811fb1e9b061..3766ab34aa45 100644 --- a/include/asm-generic/cmpxchg.h +++ b/include/asm-generic/cmpxchg.h | |||
| @@ -86,9 +86,6 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size) | |||
| 86 | 86 | ||
| 87 | /* | 87 | /* |
| 88 | * Atomic compare and exchange. | 88 | * Atomic compare and exchange. |
| 89 | * | ||
| 90 | * Do not define __HAVE_ARCH_CMPXCHG because we want to use it to check whether | ||
| 91 | * a cmpxchg primitive faster than repeated local irq save/restore exists. | ||
| 92 | */ | 89 | */ |
| 93 | #include <asm-generic/cmpxchg-local.h> | 90 | #include <asm-generic/cmpxchg-local.h> |
| 94 | 91 | ||
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h index b59b5a52637e..e56272c919b5 100644 --- a/include/asm-generic/futex.h +++ b/include/asm-generic/futex.h | |||
| @@ -8,8 +8,7 @@ | |||
| 8 | #ifndef CONFIG_SMP | 8 | #ifndef CONFIG_SMP |
| 9 | /* | 9 | /* |
| 10 | * The following implementation only for uniprocessor machines. | 10 | * The following implementation only for uniprocessor machines. |
| 11 | * For UP, it's relies on the fact that pagefault_disable() also disables | 11 | * It relies on preempt_disable() ensuring mutual exclusion. |
| 12 | * preemption to ensure mutual exclusion. | ||
| 13 | * | 12 | * |
| 14 | */ | 13 | */ |
| 15 | 14 | ||
| @@ -38,6 +37,7 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | |||
| 38 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 37 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
| 39 | oparg = 1 << oparg; | 38 | oparg = 1 << oparg; |
| 40 | 39 | ||
| 40 | preempt_disable(); | ||
| 41 | pagefault_disable(); | 41 | pagefault_disable(); |
| 42 | 42 | ||
| 43 | ret = -EFAULT; | 43 | ret = -EFAULT; |
| @@ -72,6 +72,7 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | |||
| 72 | 72 | ||
| 73 | out_pagefault_enable: | 73 | out_pagefault_enable: |
| 74 | pagefault_enable(); | 74 | pagefault_enable(); |
| 75 | preempt_enable(); | ||
| 75 | 76 | ||
| 76 | if (ret == 0) { | 77 | if (ret == 0) { |
| 77 | switch (cmp) { | 78 | switch (cmp) { |
| @@ -106,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |||
| 106 | { | 107 | { |
| 107 | u32 val; | 108 | u32 val; |
| 108 | 109 | ||
| 110 | preempt_disable(); | ||
| 109 | if (unlikely(get_user(val, uaddr) != 0)) | 111 | if (unlikely(get_user(val, uaddr) != 0)) |
| 110 | return -EFAULT; | 112 | return -EFAULT; |
| 111 | 113 | ||
| @@ -113,6 +115,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |||
| 113 | return -EFAULT; | 115 | return -EFAULT; |
| 114 | 116 | ||
| 115 | *uval = val; | 117 | *uval = val; |
| 118 | preempt_enable(); | ||
| 116 | 119 | ||
| 117 | return 0; | 120 | return 0; |
| 118 | } | 121 | } |
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 9db042304df3..f56094cfdeff 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h | |||
| @@ -769,6 +769,14 @@ static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size) | |||
| 769 | } | 769 | } |
| 770 | #endif | 770 | #endif |
| 771 | 771 | ||
| 772 | #ifndef ioremap_uc | ||
| 773 | #define ioremap_uc ioremap_uc | ||
| 774 | static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size) | ||
| 775 | { | ||
| 776 | return ioremap_nocache(offset, size); | ||
| 777 | } | ||
| 778 | #endif | ||
| 779 | |||
| 772 | #ifndef ioremap_wc | 780 | #ifndef ioremap_wc |
| 773 | #define ioremap_wc ioremap_wc | 781 | #define ioremap_wc ioremap_wc |
| 774 | static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size) | 782 | static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size) |
| @@ -777,8 +785,17 @@ static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size) | |||
| 777 | } | 785 | } |
| 778 | #endif | 786 | #endif |
| 779 | 787 | ||
| 788 | #ifndef ioremap_wt | ||
| 789 | #define ioremap_wt ioremap_wt | ||
| 790 | static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size) | ||
| 791 | { | ||
| 792 | return ioremap_nocache(offset, size); | ||
| 793 | } | ||
| 794 | #endif | ||
| 795 | |||
| 780 | #ifndef iounmap | 796 | #ifndef iounmap |
| 781 | #define iounmap iounmap | 797 | #define iounmap iounmap |
| 798 | |||
| 782 | static inline void iounmap(void __iomem *addr) | 799 | static inline void iounmap(void __iomem *addr) |
| 783 | { | 800 | { |
| 784 | } | 801 | } |
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h index 1b41011643a5..d8f8622fa044 100644 --- a/include/asm-generic/iomap.h +++ b/include/asm-generic/iomap.h | |||
| @@ -66,6 +66,10 @@ extern void ioport_unmap(void __iomem *); | |||
| 66 | #define ioremap_wc ioremap_nocache | 66 | #define ioremap_wc ioremap_nocache |
| 67 | #endif | 67 | #endif |
| 68 | 68 | ||
| 69 | #ifndef ARCH_HAS_IOREMAP_WT | ||
| 70 | #define ioremap_wt ioremap_nocache | ||
| 71 | #endif | ||
| 72 | |||
| 69 | #ifdef CONFIG_PCI | 73 | #ifdef CONFIG_PCI |
| 70 | /* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */ | 74 | /* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */ |
| 71 | struct pci_dev; | 75 | struct pci_dev; |
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 39f1d6a2b04d..bd910ceaccfa 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
| @@ -262,6 +262,10 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) | |||
| 262 | #define pgprot_writecombine pgprot_noncached | 262 | #define pgprot_writecombine pgprot_noncached |
| 263 | #endif | 263 | #endif |
| 264 | 264 | ||
| 265 | #ifndef pgprot_writethrough | ||
| 266 | #define pgprot_writethrough pgprot_noncached | ||
| 267 | #endif | ||
| 268 | |||
| 265 | #ifndef pgprot_device | 269 | #ifndef pgprot_device |
| 266 | #define pgprot_device pgprot_noncached | 270 | #define pgprot_device pgprot_noncached |
| 267 | #endif | 271 | #endif |
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h index eb6f9e6c3075..d0a7a4753db2 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h | |||
| @@ -79,11 +79,8 @@ static __always_inline bool should_resched(void) | |||
| 79 | #ifdef CONFIG_PREEMPT | 79 | #ifdef CONFIG_PREEMPT |
| 80 | extern asmlinkage void preempt_schedule(void); | 80 | extern asmlinkage void preempt_schedule(void); |
| 81 | #define __preempt_schedule() preempt_schedule() | 81 | #define __preempt_schedule() preempt_schedule() |
| 82 | 82 | extern asmlinkage void preempt_schedule_notrace(void); | |
| 83 | #ifdef CONFIG_CONTEXT_TRACKING | 83 | #define __preempt_schedule_notrace() preempt_schedule_notrace() |
| 84 | extern asmlinkage void preempt_schedule_context(void); | ||
| 85 | #define __preempt_schedule_context() preempt_schedule_context() | ||
| 86 | #endif | ||
| 87 | #endif /* CONFIG_PREEMPT */ | 84 | #endif /* CONFIG_PREEMPT */ |
| 88 | 85 | ||
| 89 | #endif /* __ASM_PREEMPT_H */ | 86 | #endif /* __ASM_PREEMPT_H */ |
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h new file mode 100644 index 000000000000..83bfb87f5bf1 --- /dev/null +++ b/include/asm-generic/qspinlock.h | |||
| @@ -0,0 +1,139 @@ | |||
| 1 | /* | ||
| 2 | * Queued spinlock | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. | ||
| 15 | * | ||
| 16 | * Authors: Waiman Long <waiman.long@hp.com> | ||
| 17 | */ | ||
| 18 | #ifndef __ASM_GENERIC_QSPINLOCK_H | ||
| 19 | #define __ASM_GENERIC_QSPINLOCK_H | ||
| 20 | |||
| 21 | #include <asm-generic/qspinlock_types.h> | ||
| 22 | |||
| 23 | /** | ||
| 24 | * queued_spin_is_locked - is the spinlock locked? | ||
| 25 | * @lock: Pointer to queued spinlock structure | ||
| 26 | * Return: 1 if it is locked, 0 otherwise | ||
| 27 | */ | ||
| 28 | static __always_inline int queued_spin_is_locked(struct qspinlock *lock) | ||
| 29 | { | ||
| 30 | return atomic_read(&lock->val); | ||
| 31 | } | ||
| 32 | |||
| 33 | /** | ||
| 34 | * queued_spin_value_unlocked - is the spinlock structure unlocked? | ||
| 35 | * @lock: queued spinlock structure | ||
| 36 | * Return: 1 if it is unlocked, 0 otherwise | ||
| 37 | * | ||
| 38 | * N.B. Whenever there are tasks waiting for the lock, it is considered | ||
| 39 | * locked wrt the lockref code to avoid lock stealing by the lockref | ||
| 40 | * code and change things underneath the lock. This also allows some | ||
| 41 | * optimizations to be applied without conflict with lockref. | ||
| 42 | */ | ||
| 43 | static __always_inline int queued_spin_value_unlocked(struct qspinlock lock) | ||
| 44 | { | ||
| 45 | return !atomic_read(&lock.val); | ||
| 46 | } | ||
| 47 | |||
| 48 | /** | ||
| 49 | * queued_spin_is_contended - check if the lock is contended | ||
| 50 | * @lock : Pointer to queued spinlock structure | ||
| 51 | * Return: 1 if lock contended, 0 otherwise | ||
| 52 | */ | ||
| 53 | static __always_inline int queued_spin_is_contended(struct qspinlock *lock) | ||
| 54 | { | ||
| 55 | return atomic_read(&lock->val) & ~_Q_LOCKED_MASK; | ||
| 56 | } | ||
| 57 | /** | ||
| 58 | * queued_spin_trylock - try to acquire the queued spinlock | ||
| 59 | * @lock : Pointer to queued spinlock structure | ||
| 60 | * Return: 1 if lock acquired, 0 if failed | ||
| 61 | */ | ||
| 62 | static __always_inline int queued_spin_trylock(struct qspinlock *lock) | ||
| 63 | { | ||
| 64 | if (!atomic_read(&lock->val) && | ||
| 65 | (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) == 0)) | ||
| 66 | return 1; | ||
| 67 | return 0; | ||
| 68 | } | ||
| 69 | |||
| 70 | extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); | ||
| 71 | |||
| 72 | /** | ||
| 73 | * queued_spin_lock - acquire a queued spinlock | ||
| 74 | * @lock: Pointer to queued spinlock structure | ||
| 75 | */ | ||
| 76 | static __always_inline void queued_spin_lock(struct qspinlock *lock) | ||
| 77 | { | ||
| 78 | u32 val; | ||
| 79 | |||
| 80 | val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL); | ||
| 81 | if (likely(val == 0)) | ||
| 82 | return; | ||
| 83 | queued_spin_lock_slowpath(lock, val); | ||
| 84 | } | ||
| 85 | |||
| 86 | #ifndef queued_spin_unlock | ||
| 87 | /** | ||
| 88 | * queued_spin_unlock - release a queued spinlock | ||
| 89 | * @lock : Pointer to queued spinlock structure | ||
| 90 | */ | ||
| 91 | static __always_inline void queued_spin_unlock(struct qspinlock *lock) | ||
| 92 | { | ||
| 93 | /* | ||
| 94 | * smp_mb__before_atomic() in order to guarantee release semantics | ||
| 95 | */ | ||
| 96 | smp_mb__before_atomic_dec(); | ||
| 97 | atomic_sub(_Q_LOCKED_VAL, &lock->val); | ||
| 98 | } | ||
| 99 | #endif | ||
| 100 | |||
| 101 | /** | ||
| 102 | * queued_spin_unlock_wait - wait until current lock holder releases the lock | ||
| 103 | * @lock : Pointer to queued spinlock structure | ||
| 104 | * | ||
| 105 | * There is a very slight possibility of live-lock if the lockers keep coming | ||
| 106 | * and the waiter is just unfortunate enough to not see any unlock state. | ||
| 107 | */ | ||
| 108 | static inline void queued_spin_unlock_wait(struct qspinlock *lock) | ||
| 109 | { | ||
| 110 | while (atomic_read(&lock->val) & _Q_LOCKED_MASK) | ||
| 111 | cpu_relax(); | ||
| 112 | } | ||
| 113 | |||
| 114 | #ifndef virt_queued_spin_lock | ||
| 115 | static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock) | ||
| 116 | { | ||
| 117 | return false; | ||
| 118 | } | ||
| 119 | #endif | ||
| 120 | |||
| 121 | /* | ||
| 122 | * Initializier | ||
| 123 | */ | ||
| 124 | #define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) } | ||
| 125 | |||
| 126 | /* | ||
| 127 | * Remapping spinlock architecture specific functions to the corresponding | ||
| 128 | * queued spinlock functions. | ||
| 129 | */ | ||
| 130 | #define arch_spin_is_locked(l) queued_spin_is_locked(l) | ||
| 131 | #define arch_spin_is_contended(l) queued_spin_is_contended(l) | ||
| 132 | #define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l) | ||
| 133 | #define arch_spin_lock(l) queued_spin_lock(l) | ||
| 134 | #define arch_spin_trylock(l) queued_spin_trylock(l) | ||
| 135 | #define arch_spin_unlock(l) queued_spin_unlock(l) | ||
| 136 | #define arch_spin_lock_flags(l, f) queued_spin_lock(l) | ||
| 137 | #define arch_spin_unlock_wait(l) queued_spin_unlock_wait(l) | ||
| 138 | |||
| 139 | #endif /* __ASM_GENERIC_QSPINLOCK_H */ | ||
diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h new file mode 100644 index 000000000000..85f888e86761 --- /dev/null +++ b/include/asm-generic/qspinlock_types.h | |||
| @@ -0,0 +1,79 @@ | |||
| 1 | /* | ||
| 2 | * Queued spinlock | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. | ||
| 15 | * | ||
| 16 | * Authors: Waiman Long <waiman.long@hp.com> | ||
| 17 | */ | ||
| 18 | #ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H | ||
| 19 | #define __ASM_GENERIC_QSPINLOCK_TYPES_H | ||
| 20 | |||
| 21 | /* | ||
| 22 | * Including atomic.h with PARAVIRT on will cause compilation errors because | ||
| 23 | * of recursive header file incluson via paravirt_types.h. So don't include | ||
| 24 | * it if PARAVIRT is on. | ||
| 25 | */ | ||
| 26 | #ifndef CONFIG_PARAVIRT | ||
| 27 | #include <linux/types.h> | ||
| 28 | #include <linux/atomic.h> | ||
| 29 | #endif | ||
| 30 | |||
| 31 | typedef struct qspinlock { | ||
| 32 | atomic_t val; | ||
| 33 | } arch_spinlock_t; | ||
| 34 | |||
| 35 | /* | ||
| 36 | * Bitfields in the atomic value: | ||
| 37 | * | ||
| 38 | * When NR_CPUS < 16K | ||
| 39 | * 0- 7: locked byte | ||
| 40 | * 8: pending | ||
| 41 | * 9-15: not used | ||
| 42 | * 16-17: tail index | ||
| 43 | * 18-31: tail cpu (+1) | ||
| 44 | * | ||
| 45 | * When NR_CPUS >= 16K | ||
| 46 | * 0- 7: locked byte | ||
| 47 | * 8: pending | ||
| 48 | * 9-10: tail index | ||
| 49 | * 11-31: tail cpu (+1) | ||
| 50 | */ | ||
| 51 | #define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\ | ||
| 52 | << _Q_ ## type ## _OFFSET) | ||
| 53 | #define _Q_LOCKED_OFFSET 0 | ||
| 54 | #define _Q_LOCKED_BITS 8 | ||
| 55 | #define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED) | ||
| 56 | |||
| 57 | #define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS) | ||
| 58 | #if CONFIG_NR_CPUS < (1U << 14) | ||
| 59 | #define _Q_PENDING_BITS 8 | ||
| 60 | #else | ||
| 61 | #define _Q_PENDING_BITS 1 | ||
| 62 | #endif | ||
| 63 | #define _Q_PENDING_MASK _Q_SET_MASK(PENDING) | ||
| 64 | |||
| 65 | #define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS) | ||
| 66 | #define _Q_TAIL_IDX_BITS 2 | ||
| 67 | #define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX) | ||
| 68 | |||
| 69 | #define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS) | ||
| 70 | #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET) | ||
| 71 | #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU) | ||
| 72 | |||
| 73 | #define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET | ||
| 74 | #define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK) | ||
| 75 | |||
| 76 | #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) | ||
| 77 | #define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET) | ||
| 78 | |||
| 79 | #endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */ | ||
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h index a899402a5a0e..52f3b7da4f2d 100644 --- a/include/linux/alarmtimer.h +++ b/include/linux/alarmtimer.h | |||
| @@ -43,8 +43,8 @@ struct alarm { | |||
| 43 | 43 | ||
| 44 | void alarm_init(struct alarm *alarm, enum alarmtimer_type type, | 44 | void alarm_init(struct alarm *alarm, enum alarmtimer_type type, |
| 45 | enum alarmtimer_restart (*function)(struct alarm *, ktime_t)); | 45 | enum alarmtimer_restart (*function)(struct alarm *, ktime_t)); |
| 46 | int alarm_start(struct alarm *alarm, ktime_t start); | 46 | void alarm_start(struct alarm *alarm, ktime_t start); |
| 47 | int alarm_start_relative(struct alarm *alarm, ktime_t start); | 47 | void alarm_start_relative(struct alarm *alarm, ktime_t start); |
| 48 | void alarm_restart(struct alarm *alarm); | 48 | void alarm_restart(struct alarm *alarm); |
| 49 | int alarm_try_to_cancel(struct alarm *alarm); | 49 | int alarm_try_to_cancel(struct alarm *alarm); |
| 50 | int alarm_cancel(struct alarm *alarm); | 50 | int alarm_cancel(struct alarm *alarm); |
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index aff923ae8c4b..d87d8eced064 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h | |||
| @@ -116,7 +116,6 @@ __printf(3, 4) | |||
| 116 | int bdi_register(struct backing_dev_info *bdi, struct device *parent, | 116 | int bdi_register(struct backing_dev_info *bdi, struct device *parent, |
| 117 | const char *fmt, ...); | 117 | const char *fmt, ...); |
| 118 | int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); | 118 | int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); |
| 119 | void bdi_unregister(struct backing_dev_info *bdi); | ||
| 120 | int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); | 119 | int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); |
| 121 | void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, | 120 | void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, |
| 122 | enum wb_reason reason); | 121 | enum wb_reason reason); |
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h index 86c12c93e3cf..8fdcb783197d 100644 --- a/include/linux/bottom_half.h +++ b/include/linux/bottom_half.h | |||
| @@ -2,7 +2,6 @@ | |||
| 2 | #define _LINUX_BH_H | 2 | #define _LINUX_BH_H |
| 3 | 3 | ||
| 4 | #include <linux/preempt.h> | 4 | #include <linux/preempt.h> |
| 5 | #include <linux/preempt_mask.h> | ||
| 6 | 5 | ||
| 7 | #ifdef CONFIG_TRACE_IRQFLAGS | 6 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 8 | extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); | 7 | extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); |
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index ae2982c0f7a6..656da2a12ffe 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h | |||
| @@ -17,7 +17,7 @@ | |||
| 17 | #define PHY_ID_BCM7250 0xae025280 | 17 | #define PHY_ID_BCM7250 0xae025280 |
| 18 | #define PHY_ID_BCM7364 0xae025260 | 18 | #define PHY_ID_BCM7364 0xae025260 |
| 19 | #define PHY_ID_BCM7366 0x600d8490 | 19 | #define PHY_ID_BCM7366 0x600d8490 |
| 20 | #define PHY_ID_BCM7425 0x03625e60 | 20 | #define PHY_ID_BCM7425 0x600d86b0 |
| 21 | #define PHY_ID_BCM7429 0x600d8730 | 21 | #define PHY_ID_BCM7429 0x600d8730 |
| 22 | #define PHY_ID_BCM7439 0x600d8480 | 22 | #define PHY_ID_BCM7439 0x600d8480 |
| 23 | #define PHY_ID_BCM7439_2 0xae025080 | 23 | #define PHY_ID_BCM7439_2 0xae025080 |
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index 96c280b2c263..597a1e836f22 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h | |||
| @@ -37,12 +37,15 @@ enum clock_event_mode { | |||
| 37 | * reached from DETACHED or SHUTDOWN. | 37 | * reached from DETACHED or SHUTDOWN. |
| 38 | * ONESHOT: Device is programmed to generate event only once. Can be reached | 38 | * ONESHOT: Device is programmed to generate event only once. Can be reached |
| 39 | * from DETACHED or SHUTDOWN. | 39 | * from DETACHED or SHUTDOWN. |
| 40 | * ONESHOT_STOPPED: Device was programmed in ONESHOT mode and is temporarily | ||
| 41 | * stopped. | ||
| 40 | */ | 42 | */ |
| 41 | enum clock_event_state { | 43 | enum clock_event_state { |
| 42 | CLOCK_EVT_STATE_DETACHED, | 44 | CLOCK_EVT_STATE_DETACHED, |
| 43 | CLOCK_EVT_STATE_SHUTDOWN, | 45 | CLOCK_EVT_STATE_SHUTDOWN, |
| 44 | CLOCK_EVT_STATE_PERIODIC, | 46 | CLOCK_EVT_STATE_PERIODIC, |
| 45 | CLOCK_EVT_STATE_ONESHOT, | 47 | CLOCK_EVT_STATE_ONESHOT, |
| 48 | CLOCK_EVT_STATE_ONESHOT_STOPPED, | ||
| 46 | }; | 49 | }; |
| 47 | 50 | ||
| 48 | /* | 51 | /* |
| @@ -84,12 +87,13 @@ enum clock_event_state { | |||
| 84 | * @mult: nanosecond to cycles multiplier | 87 | * @mult: nanosecond to cycles multiplier |
| 85 | * @shift: nanoseconds to cycles divisor (power of two) | 88 | * @shift: nanoseconds to cycles divisor (power of two) |
| 86 | * @mode: operating mode, relevant only to ->set_mode(), OBSOLETE | 89 | * @mode: operating mode, relevant only to ->set_mode(), OBSOLETE |
| 87 | * @state: current state of the device, assigned by the core code | 90 | * @state_use_accessors:current state of the device, assigned by the core code |
| 88 | * @features: features | 91 | * @features: features |
| 89 | * @retries: number of forced programming retries | 92 | * @retries: number of forced programming retries |
| 90 | * @set_mode: legacy set mode function, only for modes <= CLOCK_EVT_MODE_RESUME. | 93 | * @set_mode: legacy set mode function, only for modes <= CLOCK_EVT_MODE_RESUME. |
| 91 | * @set_state_periodic: switch state to periodic, if !set_mode | 94 | * @set_state_periodic: switch state to periodic, if !set_mode |
| 92 | * @set_state_oneshot: switch state to oneshot, if !set_mode | 95 | * @set_state_oneshot: switch state to oneshot, if !set_mode |
| 96 | * @set_state_oneshot_stopped: switch state to oneshot_stopped, if !set_mode | ||
| 93 | * @set_state_shutdown: switch state to shutdown, if !set_mode | 97 | * @set_state_shutdown: switch state to shutdown, if !set_mode |
| 94 | * @tick_resume: resume clkevt device, if !set_mode | 98 | * @tick_resume: resume clkevt device, if !set_mode |
| 95 | * @broadcast: function to broadcast events | 99 | * @broadcast: function to broadcast events |
| @@ -113,7 +117,7 @@ struct clock_event_device { | |||
| 113 | u32 mult; | 117 | u32 mult; |
| 114 | u32 shift; | 118 | u32 shift; |
| 115 | enum clock_event_mode mode; | 119 | enum clock_event_mode mode; |
| 116 | enum clock_event_state state; | 120 | enum clock_event_state state_use_accessors; |
| 117 | unsigned int features; | 121 | unsigned int features; |
| 118 | unsigned long retries; | 122 | unsigned long retries; |
| 119 | 123 | ||
| @@ -121,11 +125,12 @@ struct clock_event_device { | |||
| 121 | * State transition callback(s): Only one of the two groups should be | 125 | * State transition callback(s): Only one of the two groups should be |
| 122 | * defined: | 126 | * defined: |
| 123 | * - set_mode(), only for modes <= CLOCK_EVT_MODE_RESUME. | 127 | * - set_mode(), only for modes <= CLOCK_EVT_MODE_RESUME. |
| 124 | * - set_state_{shutdown|periodic|oneshot}(), tick_resume(). | 128 | * - set_state_{shutdown|periodic|oneshot|oneshot_stopped}(), tick_resume(). |
| 125 | */ | 129 | */ |
| 126 | void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *); | 130 | void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *); |
| 127 | int (*set_state_periodic)(struct clock_event_device *); | 131 | int (*set_state_periodic)(struct clock_event_device *); |
| 128 | int (*set_state_oneshot)(struct clock_event_device *); | 132 | int (*set_state_oneshot)(struct clock_event_device *); |
| 133 | int (*set_state_oneshot_stopped)(struct clock_event_device *); | ||
| 129 | int (*set_state_shutdown)(struct clock_event_device *); | 134 | int (*set_state_shutdown)(struct clock_event_device *); |
| 130 | int (*tick_resume)(struct clock_event_device *); | 135 | int (*tick_resume)(struct clock_event_device *); |
| 131 | 136 | ||
| @@ -144,6 +149,32 @@ struct clock_event_device { | |||
| 144 | struct module *owner; | 149 | struct module *owner; |
| 145 | } ____cacheline_aligned; | 150 | } ____cacheline_aligned; |
| 146 | 151 | ||
| 152 | /* Helpers to verify state of a clockevent device */ | ||
| 153 | static inline bool clockevent_state_detached(struct clock_event_device *dev) | ||
| 154 | { | ||
| 155 | return dev->state_use_accessors == CLOCK_EVT_STATE_DETACHED; | ||
| 156 | } | ||
| 157 | |||
| 158 | static inline bool clockevent_state_shutdown(struct clock_event_device *dev) | ||
| 159 | { | ||
| 160 | return dev->state_use_accessors == CLOCK_EVT_STATE_SHUTDOWN; | ||
| 161 | } | ||
| 162 | |||
| 163 | static inline bool clockevent_state_periodic(struct clock_event_device *dev) | ||
| 164 | { | ||
| 165 | return dev->state_use_accessors == CLOCK_EVT_STATE_PERIODIC; | ||
| 166 | } | ||
| 167 | |||
| 168 | static inline bool clockevent_state_oneshot(struct clock_event_device *dev) | ||
| 169 | { | ||
| 170 | return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT; | ||
| 171 | } | ||
| 172 | |||
| 173 | static inline bool clockevent_state_oneshot_stopped(struct clock_event_device *dev) | ||
| 174 | { | ||
| 175 | return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT_STOPPED; | ||
| 176 | } | ||
| 177 | |||
| 147 | /* | 178 | /* |
| 148 | * Calculate a multiplication factor for scaled math, which is used to convert | 179 | * Calculate a multiplication factor for scaled math, which is used to convert |
| 149 | * nanoseconds based values to clock ticks: | 180 | * nanoseconds based values to clock ticks: |
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index d27d0152271f..278dd279a7a8 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h | |||
| @@ -181,7 +181,6 @@ static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift) | |||
| 181 | 181 | ||
| 182 | extern int clocksource_unregister(struct clocksource*); | 182 | extern int clocksource_unregister(struct clocksource*); |
| 183 | extern void clocksource_touch_watchdog(void); | 183 | extern void clocksource_touch_watchdog(void); |
| 184 | extern struct clocksource* clocksource_get_next(void); | ||
| 185 | extern void clocksource_change_rating(struct clocksource *cs, int rating); | 184 | extern void clocksource_change_rating(struct clocksource *cs, int rating); |
| 186 | extern void clocksource_suspend(void); | 185 | extern void clocksource_suspend(void); |
| 187 | extern void clocksource_resume(void); | 186 | extern void clocksource_resume(void); |
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 867722591be2..05be2352fef8 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
| @@ -250,7 +250,23 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
| 250 | ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) | 250 | ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) |
| 251 | 251 | ||
| 252 | #define WRITE_ONCE(x, val) \ | 252 | #define WRITE_ONCE(x, val) \ |
| 253 | ({ typeof(x) __val = (val); __write_once_size(&(x), &__val, sizeof(__val)); __val; }) | 253 | ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) |
| 254 | |||
| 255 | /** | ||
| 256 | * READ_ONCE_CTRL - Read a value heading a control dependency | ||
| 257 | * @x: The value to be read, heading the control dependency | ||
| 258 | * | ||
| 259 | * Control dependencies are tricky. See Documentation/memory-barriers.txt | ||
| 260 | * for important information on how to use them. Note that in many cases, | ||
| 261 | * use of smp_load_acquire() will be much simpler. Control dependencies | ||
| 262 | * should be avoided except on the hottest of hotpaths. | ||
| 263 | */ | ||
| 264 | #define READ_ONCE_CTRL(x) \ | ||
| 265 | ({ \ | ||
| 266 | typeof(x) __val = READ_ONCE(x); \ | ||
| 267 | smp_read_barrier_depends(); /* Enforce control dependency. */ \ | ||
| 268 | __val; \ | ||
| 269 | }) | ||
| 254 | 270 | ||
| 255 | #endif /* __KERNEL__ */ | 271 | #endif /* __KERNEL__ */ |
| 256 | 272 | ||
| @@ -450,7 +466,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
| 450 | * with an explicit memory barrier or atomic instruction that provides the | 466 | * with an explicit memory barrier or atomic instruction that provides the |
| 451 | * required ordering. | 467 | * required ordering. |
| 452 | * | 468 | * |
| 453 | * If possible use READ_ONCE/ASSIGN_ONCE instead. | 469 | * If possible use READ_ONCE()/WRITE_ONCE() instead. |
| 454 | */ | 470 | */ |
| 455 | #define __ACCESS_ONCE(x) ({ \ | 471 | #define __ACCESS_ONCE(x) ({ \ |
| 456 | __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \ | 472 | __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \ |
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index 2821838256b4..b96bd299966f 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h | |||
| @@ -14,8 +14,6 @@ extern void context_tracking_enter(enum ctx_state state); | |||
| 14 | extern void context_tracking_exit(enum ctx_state state); | 14 | extern void context_tracking_exit(enum ctx_state state); |
| 15 | extern void context_tracking_user_enter(void); | 15 | extern void context_tracking_user_enter(void); |
| 16 | extern void context_tracking_user_exit(void); | 16 | extern void context_tracking_user_exit(void); |
| 17 | extern void __context_tracking_task_switch(struct task_struct *prev, | ||
| 18 | struct task_struct *next); | ||
| 19 | 17 | ||
| 20 | static inline void user_enter(void) | 18 | static inline void user_enter(void) |
| 21 | { | 19 | { |
| @@ -51,19 +49,11 @@ static inline void exception_exit(enum ctx_state prev_ctx) | |||
| 51 | } | 49 | } |
| 52 | } | 50 | } |
| 53 | 51 | ||
| 54 | static inline void context_tracking_task_switch(struct task_struct *prev, | ||
| 55 | struct task_struct *next) | ||
| 56 | { | ||
| 57 | if (context_tracking_is_enabled()) | ||
| 58 | __context_tracking_task_switch(prev, next); | ||
| 59 | } | ||
| 60 | #else | 52 | #else |
| 61 | static inline void user_enter(void) { } | 53 | static inline void user_enter(void) { } |
| 62 | static inline void user_exit(void) { } | 54 | static inline void user_exit(void) { } |
| 63 | static inline enum ctx_state exception_enter(void) { return 0; } | 55 | static inline enum ctx_state exception_enter(void) { return 0; } |
| 64 | static inline void exception_exit(enum ctx_state prev_ctx) { } | 56 | static inline void exception_exit(enum ctx_state prev_ctx) { } |
| 65 | static inline void context_tracking_task_switch(struct task_struct *prev, | ||
| 66 | struct task_struct *next) { } | ||
| 67 | #endif /* !CONFIG_CONTEXT_TRACKING */ | 57 | #endif /* !CONFIG_CONTEXT_TRACKING */ |
| 68 | 58 | ||
| 69 | 59 | ||
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h index 6b7b96a32b75..678ecdf90cf6 100644 --- a/include/linux/context_tracking_state.h +++ b/include/linux/context_tracking_state.h | |||
| @@ -12,6 +12,7 @@ struct context_tracking { | |||
| 12 | * may be further optimized using static keys. | 12 | * may be further optimized using static keys. |
| 13 | */ | 13 | */ |
| 14 | bool active; | 14 | bool active; |
| 15 | int recursion; | ||
| 15 | enum ctx_state { | 16 | enum ctx_state { |
| 16 | CONTEXT_KERNEL = 0, | 17 | CONTEXT_KERNEL = 0, |
| 17 | CONTEXT_USER, | 18 | CONTEXT_USER, |
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 27e285b92b5f..59915ea5373c 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
| @@ -151,10 +151,8 @@ static inline unsigned int cpumask_any_but(const struct cpumask *mask, | |||
| 151 | return 1; | 151 | return 1; |
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | static inline int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp) | 154 | static inline unsigned int cpumask_local_spread(unsigned int i, int node) |
| 155 | { | 155 | { |
| 156 | set_bit(0, cpumask_bits(dstp)); | ||
| 157 | |||
| 158 | return 0; | 156 | return 0; |
| 159 | } | 157 | } |
| 160 | 158 | ||
| @@ -208,7 +206,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) | |||
| 208 | 206 | ||
| 209 | int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); | 207 | int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); |
| 210 | int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); | 208 | int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); |
| 211 | int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp); | 209 | unsigned int cpumask_local_spread(unsigned int i, int node); |
| 212 | 210 | ||
| 213 | /** | 211 | /** |
| 214 | * for_each_cpu - iterate over every cpu in a mask | 212 | * for_each_cpu - iterate over every cpu in a mask |
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index cb25af461054..420311bcee38 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h | |||
| @@ -45,7 +45,6 @@ extern struct dentry *arch_debugfs_dir; | |||
| 45 | 45 | ||
| 46 | /* declared over in file.c */ | 46 | /* declared over in file.c */ |
| 47 | extern const struct file_operations debugfs_file_operations; | 47 | extern const struct file_operations debugfs_file_operations; |
| 48 | extern const struct inode_operations debugfs_link_operations; | ||
| 49 | 48 | ||
| 50 | struct dentry *debugfs_create_file(const char *name, umode_t mode, | 49 | struct dentry *debugfs_create_file(const char *name, umode_t mode, |
| 51 | struct dentry *parent, void *data, | 50 | struct dentry *parent, void *data, |
diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 30624954dec5..e9bc9292bd3a 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h | |||
| @@ -185,33 +185,85 @@ static inline int dmar_device_remove(void *handle) | |||
| 185 | 185 | ||
| 186 | struct irte { | 186 | struct irte { |
| 187 | union { | 187 | union { |
| 188 | /* Shared between remapped and posted mode*/ | ||
| 188 | struct { | 189 | struct { |
| 189 | __u64 present : 1, | 190 | __u64 present : 1, /* 0 */ |
| 190 | fpd : 1, | 191 | fpd : 1, /* 1 */ |
| 191 | dst_mode : 1, | 192 | __res0 : 6, /* 2 - 6 */ |
| 192 | redir_hint : 1, | 193 | avail : 4, /* 8 - 11 */ |
| 193 | trigger_mode : 1, | 194 | __res1 : 3, /* 12 - 14 */ |
| 194 | dlvry_mode : 3, | 195 | pst : 1, /* 15 */ |
| 195 | avail : 4, | 196 | vector : 8, /* 16 - 23 */ |
| 196 | __reserved_1 : 4, | 197 | __res2 : 40; /* 24 - 63 */ |
| 197 | vector : 8, | 198 | }; |
| 198 | __reserved_2 : 8, | 199 | |
| 199 | dest_id : 32; | 200 | /* Remapped mode */ |
| 201 | struct { | ||
| 202 | __u64 r_present : 1, /* 0 */ | ||
| 203 | r_fpd : 1, /* 1 */ | ||
| 204 | dst_mode : 1, /* 2 */ | ||
| 205 | redir_hint : 1, /* 3 */ | ||
| 206 | trigger_mode : 1, /* 4 */ | ||
| 207 | dlvry_mode : 3, /* 5 - 7 */ | ||
| 208 | r_avail : 4, /* 8 - 11 */ | ||
| 209 | r_res0 : 4, /* 12 - 15 */ | ||
| 210 | r_vector : 8, /* 16 - 23 */ | ||
| 211 | r_res1 : 8, /* 24 - 31 */ | ||
| 212 | dest_id : 32; /* 32 - 63 */ | ||
| 213 | }; | ||
| 214 | |||
| 215 | /* Posted mode */ | ||
| 216 | struct { | ||
| 217 | __u64 p_present : 1, /* 0 */ | ||
| 218 | p_fpd : 1, /* 1 */ | ||
| 219 | p_res0 : 6, /* 2 - 7 */ | ||
| 220 | p_avail : 4, /* 8 - 11 */ | ||
| 221 | p_res1 : 2, /* 12 - 13 */ | ||
| 222 | p_urgent : 1, /* 14 */ | ||
| 223 | p_pst : 1, /* 15 */ | ||
| 224 | p_vector : 8, /* 16 - 23 */ | ||
| 225 | p_res2 : 14, /* 24 - 37 */ | ||
| 226 | pda_l : 26; /* 38 - 63 */ | ||
| 200 | }; | 227 | }; |
| 201 | __u64 low; | 228 | __u64 low; |
| 202 | }; | 229 | }; |
| 203 | 230 | ||
| 204 | union { | 231 | union { |
| 232 | /* Shared between remapped and posted mode*/ | ||
| 205 | struct { | 233 | struct { |
| 206 | __u64 sid : 16, | 234 | __u64 sid : 16, /* 64 - 79 */ |
| 207 | sq : 2, | 235 | sq : 2, /* 80 - 81 */ |
| 208 | svt : 2, | 236 | svt : 2, /* 82 - 83 */ |
| 209 | __reserved_3 : 44; | 237 | __res3 : 44; /* 84 - 127 */ |
| 238 | }; | ||
| 239 | |||
| 240 | /* Posted mode*/ | ||
| 241 | struct { | ||
| 242 | __u64 p_sid : 16, /* 64 - 79 */ | ||
| 243 | p_sq : 2, /* 80 - 81 */ | ||
| 244 | p_svt : 2, /* 82 - 83 */ | ||
| 245 | p_res3 : 12, /* 84 - 95 */ | ||
| 246 | pda_h : 32; /* 96 - 127 */ | ||
| 210 | }; | 247 | }; |
| 211 | __u64 high; | 248 | __u64 high; |
| 212 | }; | 249 | }; |
| 213 | }; | 250 | }; |
| 214 | 251 | ||
| 252 | static inline void dmar_copy_shared_irte(struct irte *dst, struct irte *src) | ||
| 253 | { | ||
| 254 | dst->present = src->present; | ||
| 255 | dst->fpd = src->fpd; | ||
| 256 | dst->avail = src->avail; | ||
| 257 | dst->pst = src->pst; | ||
| 258 | dst->vector = src->vector; | ||
| 259 | dst->sid = src->sid; | ||
| 260 | dst->sq = src->sq; | ||
| 261 | dst->svt = src->svt; | ||
| 262 | } | ||
| 263 | |||
| 264 | #define PDA_LOW_BIT 26 | ||
| 265 | #define PDA_HIGH_BIT 32 | ||
| 266 | |||
| 215 | enum { | 267 | enum { |
| 216 | IRQ_REMAP_XAPIC_MODE, | 268 | IRQ_REMAP_XAPIC_MODE, |
| 217 | IRQ_REMAP_X2APIC_MODE, | 269 | IRQ_REMAP_X2APIC_MODE, |
| @@ -227,6 +279,7 @@ extern void dmar_msi_read(int irq, struct msi_msg *msg); | |||
| 227 | extern void dmar_msi_write(int irq, struct msi_msg *msg); | 279 | extern void dmar_msi_write(int irq, struct msi_msg *msg); |
| 228 | extern int dmar_set_interrupt(struct intel_iommu *iommu); | 280 | extern int dmar_set_interrupt(struct intel_iommu *iommu); |
| 229 | extern irqreturn_t dmar_fault(int irq, void *dev_id); | 281 | extern irqreturn_t dmar_fault(int irq, void *dev_id); |
| 230 | extern int arch_setup_dmar_msi(unsigned int irq); | 282 | extern int dmar_alloc_hwirq(int id, int node, void *arg); |
| 283 | extern void dmar_free_hwirq(int irq); | ||
| 231 | 284 | ||
| 232 | #endif /* __DMAR_H__ */ | 285 | #endif /* __DMAR_H__ */ |
diff --git a/include/linux/efi.h b/include/linux/efi.h index af5be0368dec..2092965afca3 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h | |||
| @@ -583,6 +583,9 @@ void efi_native_runtime_setup(void); | |||
| 583 | #define EFI_FILE_INFO_ID \ | 583 | #define EFI_FILE_INFO_ID \ |
| 584 | EFI_GUID( 0x9576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b ) | 584 | EFI_GUID( 0x9576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b ) |
| 585 | 585 | ||
| 586 | #define EFI_SYSTEM_RESOURCE_TABLE_GUID \ | ||
| 587 | EFI_GUID( 0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80 ) | ||
| 588 | |||
| 586 | #define EFI_FILE_SYSTEM_GUID \ | 589 | #define EFI_FILE_SYSTEM_GUID \ |
| 587 | EFI_GUID( 0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b ) | 590 | EFI_GUID( 0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b ) |
| 588 | 591 | ||
| @@ -823,6 +826,7 @@ extern struct efi { | |||
| 823 | unsigned long fw_vendor; /* fw_vendor */ | 826 | unsigned long fw_vendor; /* fw_vendor */ |
| 824 | unsigned long runtime; /* runtime table */ | 827 | unsigned long runtime; /* runtime table */ |
| 825 | unsigned long config_table; /* config tables */ | 828 | unsigned long config_table; /* config tables */ |
| 829 | unsigned long esrt; /* ESRT table */ | ||
| 826 | efi_get_time_t *get_time; | 830 | efi_get_time_t *get_time; |
| 827 | efi_set_time_t *set_time; | 831 | efi_set_time_t *set_time; |
| 828 | efi_get_wakeup_time_t *get_wakeup_time; | 832 | efi_get_wakeup_time_t *get_wakeup_time; |
| @@ -875,6 +879,11 @@ static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned lon | |||
| 875 | #endif | 879 | #endif |
| 876 | extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr); | 880 | extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr); |
| 877 | extern int efi_config_init(efi_config_table_type_t *arch_tables); | 881 | extern int efi_config_init(efi_config_table_type_t *arch_tables); |
| 882 | #ifdef CONFIG_EFI_ESRT | ||
| 883 | extern void __init efi_esrt_init(void); | ||
| 884 | #else | ||
| 885 | static inline void efi_esrt_init(void) { } | ||
| 886 | #endif | ||
| 878 | extern int efi_config_parse_tables(void *config_tables, int count, int sz, | 887 | extern int efi_config_parse_tables(void *config_tables, int count, int sz, |
| 879 | efi_config_table_type_t *arch_tables); | 888 | efi_config_table_type_t *arch_tables); |
| 880 | extern u64 efi_get_iobase (void); | 889 | extern u64 efi_get_iobase (void); |
| @@ -882,12 +891,15 @@ extern u32 efi_mem_type (unsigned long phys_addr); | |||
| 882 | extern u64 efi_mem_attributes (unsigned long phys_addr); | 891 | extern u64 efi_mem_attributes (unsigned long phys_addr); |
| 883 | extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size); | 892 | extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size); |
| 884 | extern int __init efi_uart_console_only (void); | 893 | extern int __init efi_uart_console_only (void); |
| 894 | extern u64 efi_mem_desc_end(efi_memory_desc_t *md); | ||
| 895 | extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md); | ||
| 885 | extern void efi_initialize_iomem_resources(struct resource *code_resource, | 896 | extern void efi_initialize_iomem_resources(struct resource *code_resource, |
| 886 | struct resource *data_resource, struct resource *bss_resource); | 897 | struct resource *data_resource, struct resource *bss_resource); |
| 887 | extern void efi_get_time(struct timespec *now); | 898 | extern void efi_get_time(struct timespec *now); |
| 888 | extern void efi_reserve_boot_services(void); | 899 | extern void efi_reserve_boot_services(void); |
| 889 | extern int efi_get_fdt_params(struct efi_fdt_params *params, int verbose); | 900 | extern int efi_get_fdt_params(struct efi_fdt_params *params, int verbose); |
| 890 | extern struct efi_memory_map memmap; | 901 | extern struct efi_memory_map memmap; |
| 902 | extern struct kobject *efi_kobj; | ||
| 891 | 903 | ||
| 892 | extern int efi_reboot_quirk_mode; | 904 | extern int efi_reboot_quirk_mode; |
| 893 | extern bool efi_poweroff_required(void); | 905 | extern bool efi_poweroff_required(void); |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 35ec87e490b1..b577e801b4af 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -38,7 +38,6 @@ struct backing_dev_info; | |||
| 38 | struct export_operations; | 38 | struct export_operations; |
| 39 | struct hd_geometry; | 39 | struct hd_geometry; |
| 40 | struct iovec; | 40 | struct iovec; |
| 41 | struct nameidata; | ||
| 42 | struct kiocb; | 41 | struct kiocb; |
| 43 | struct kobject; | 42 | struct kobject; |
| 44 | struct pipe_inode_info; | 43 | struct pipe_inode_info; |
| @@ -656,6 +655,7 @@ struct inode { | |||
| 656 | struct pipe_inode_info *i_pipe; | 655 | struct pipe_inode_info *i_pipe; |
| 657 | struct block_device *i_bdev; | 656 | struct block_device *i_bdev; |
| 658 | struct cdev *i_cdev; | 657 | struct cdev *i_cdev; |
| 658 | char *i_link; | ||
| 659 | }; | 659 | }; |
| 660 | 660 | ||
| 661 | __u32 i_generation; | 661 | __u32 i_generation; |
| @@ -1607,12 +1607,12 @@ struct file_operations { | |||
| 1607 | 1607 | ||
| 1608 | struct inode_operations { | 1608 | struct inode_operations { |
| 1609 | struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int); | 1609 | struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int); |
| 1610 | void * (*follow_link) (struct dentry *, struct nameidata *); | 1610 | const char * (*follow_link) (struct dentry *, void **); |
| 1611 | int (*permission) (struct inode *, int); | 1611 | int (*permission) (struct inode *, int); |
| 1612 | struct posix_acl * (*get_acl)(struct inode *, int); | 1612 | struct posix_acl * (*get_acl)(struct inode *, int); |
| 1613 | 1613 | ||
| 1614 | int (*readlink) (struct dentry *, char __user *,int); | 1614 | int (*readlink) (struct dentry *, char __user *,int); |
| 1615 | void (*put_link) (struct dentry *, struct nameidata *, void *); | 1615 | void (*put_link) (struct inode *, void *); |
| 1616 | 1616 | ||
| 1617 | int (*create) (struct inode *,struct dentry *, umode_t, bool); | 1617 | int (*create) (struct inode *,struct dentry *, umode_t, bool); |
| 1618 | int (*link) (struct dentry *,struct inode *,struct dentry *); | 1618 | int (*link) (struct dentry *,struct inode *,struct dentry *); |
| @@ -1879,6 +1879,7 @@ enum file_time_flags { | |||
| 1879 | S_VERSION = 8, | 1879 | S_VERSION = 8, |
| 1880 | }; | 1880 | }; |
| 1881 | 1881 | ||
| 1882 | extern bool atime_needs_update(const struct path *, struct inode *); | ||
| 1882 | extern void touch_atime(const struct path *); | 1883 | extern void touch_atime(const struct path *); |
| 1883 | static inline void file_accessed(struct file *file) | 1884 | static inline void file_accessed(struct file *file) |
| 1884 | { | 1885 | { |
| @@ -2704,13 +2705,14 @@ extern const struct file_operations generic_ro_fops; | |||
| 2704 | 2705 | ||
| 2705 | extern int readlink_copy(char __user *, int, const char *); | 2706 | extern int readlink_copy(char __user *, int, const char *); |
| 2706 | extern int page_readlink(struct dentry *, char __user *, int); | 2707 | extern int page_readlink(struct dentry *, char __user *, int); |
| 2707 | extern void *page_follow_link_light(struct dentry *, struct nameidata *); | 2708 | extern const char *page_follow_link_light(struct dentry *, void **); |
| 2708 | extern void page_put_link(struct dentry *, struct nameidata *, void *); | 2709 | extern void page_put_link(struct inode *, void *); |
| 2709 | extern int __page_symlink(struct inode *inode, const char *symname, int len, | 2710 | extern int __page_symlink(struct inode *inode, const char *symname, int len, |
| 2710 | int nofs); | 2711 | int nofs); |
| 2711 | extern int page_symlink(struct inode *inode, const char *symname, int len); | 2712 | extern int page_symlink(struct inode *inode, const char *symname, int len); |
| 2712 | extern const struct inode_operations page_symlink_inode_operations; | 2713 | extern const struct inode_operations page_symlink_inode_operations; |
| 2713 | extern void kfree_put_link(struct dentry *, struct nameidata *, void *); | 2714 | extern void kfree_put_link(struct inode *, void *); |
| 2715 | extern void free_page_put_link(struct inode *, void *); | ||
| 2714 | extern int generic_readlink(struct dentry *, char __user *, int); | 2716 | extern int generic_readlink(struct dentry *, char __user *, int); |
| 2715 | extern void generic_fillattr(struct inode *, struct kstat *); | 2717 | extern void generic_fillattr(struct inode *, struct kstat *); |
| 2716 | int vfs_getattr_nosec(struct path *path, struct kstat *stat); | 2718 | int vfs_getattr_nosec(struct path *path, struct kstat *stat); |
| @@ -2721,6 +2723,8 @@ void __inode_sub_bytes(struct inode *inode, loff_t bytes); | |||
| 2721 | void inode_sub_bytes(struct inode *inode, loff_t bytes); | 2723 | void inode_sub_bytes(struct inode *inode, loff_t bytes); |
| 2722 | loff_t inode_get_bytes(struct inode *inode); | 2724 | loff_t inode_get_bytes(struct inode *inode); |
| 2723 | void inode_set_bytes(struct inode *inode, loff_t bytes); | 2725 | void inode_set_bytes(struct inode *inode, loff_t bytes); |
| 2726 | const char *simple_follow_link(struct dentry *, void **); | ||
| 2727 | extern const struct inode_operations simple_symlink_inode_operations; | ||
| 2724 | 2728 | ||
| 2725 | extern int iterate_dir(struct file *, struct dir_context *); | 2729 | extern int iterate_dir(struct file *, struct dir_context *); |
| 2726 | 2730 | ||
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index f4af03404b97..dfd59d6bc6f0 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | #ifndef LINUX_HARDIRQ_H | 1 | #ifndef LINUX_HARDIRQ_H |
| 2 | #define LINUX_HARDIRQ_H | 2 | #define LINUX_HARDIRQ_H |
| 3 | 3 | ||
| 4 | #include <linux/preempt_mask.h> | 4 | #include <linux/preempt.h> |
| 5 | #include <linux/lockdep.h> | 5 | #include <linux/lockdep.h> |
| 6 | #include <linux/ftrace_irq.h> | 6 | #include <linux/ftrace_irq.h> |
| 7 | #include <linux/vtime.h> | 7 | #include <linux/vtime.h> |
diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 9286a46b7d69..6aefcd0031a6 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h | |||
| @@ -65,6 +65,7 @@ static inline void kunmap(struct page *page) | |||
| 65 | 65 | ||
| 66 | static inline void *kmap_atomic(struct page *page) | 66 | static inline void *kmap_atomic(struct page *page) |
| 67 | { | 67 | { |
| 68 | preempt_disable(); | ||
| 68 | pagefault_disable(); | 69 | pagefault_disable(); |
| 69 | return page_address(page); | 70 | return page_address(page); |
| 70 | } | 71 | } |
| @@ -73,6 +74,7 @@ static inline void *kmap_atomic(struct page *page) | |||
| 73 | static inline void __kunmap_atomic(void *addr) | 74 | static inline void __kunmap_atomic(void *addr) |
| 74 | { | 75 | { |
| 75 | pagefault_enable(); | 76 | pagefault_enable(); |
| 77 | preempt_enable(); | ||
| 76 | } | 78 | } |
| 77 | 79 | ||
| 78 | #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) | 80 | #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) |
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 05f6df1fdf5b..76dd4f0da5ca 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
| @@ -53,34 +53,25 @@ enum hrtimer_restart { | |||
| 53 | * | 53 | * |
| 54 | * 0x00 inactive | 54 | * 0x00 inactive |
| 55 | * 0x01 enqueued into rbtree | 55 | * 0x01 enqueued into rbtree |
| 56 | * 0x02 callback function running | ||
| 57 | * 0x04 timer is migrated to another cpu | ||
| 58 | * | 56 | * |
| 59 | * Special cases: | 57 | * The callback state is not part of the timer->state because clearing it would |
| 60 | * 0x03 callback function running and enqueued | 58 | * mean touching the timer after the callback, this makes it impossible to free |
| 61 | * (was requeued on another CPU) | 59 | * the timer from the callback function. |
| 62 | * 0x05 timer was migrated on CPU hotunplug | ||
| 63 | * | 60 | * |
| 64 | * The "callback function running and enqueued" status is only possible on | 61 | * Therefore we track the callback state in: |
| 65 | * SMP. It happens for example when a posix timer expired and the callback | 62 | * |
| 63 | * timer->base->cpu_base->running == timer | ||
| 64 | * | ||
| 65 | * On SMP it is possible to have a "callback function running and enqueued" | ||
| 66 | * status. It happens for example when a posix timer expired and the callback | ||
| 66 | * queued a signal. Between dropping the lock which protects the posix timer | 67 | * queued a signal. Between dropping the lock which protects the posix timer |
| 67 | * and reacquiring the base lock of the hrtimer, another CPU can deliver the | 68 | * and reacquiring the base lock of the hrtimer, another CPU can deliver the |
| 68 | * signal and rearm the timer. We have to preserve the callback running state, | 69 | * signal and rearm the timer. |
| 69 | * as otherwise the timer could be removed before the softirq code finishes the | ||
| 70 | * the handling of the timer. | ||
| 71 | * | ||
| 72 | * The HRTIMER_STATE_ENQUEUED bit is always or'ed to the current state | ||
| 73 | * to preserve the HRTIMER_STATE_CALLBACK in the above scenario. This | ||
| 74 | * also affects HRTIMER_STATE_MIGRATE where the preservation is not | ||
| 75 | * necessary. HRTIMER_STATE_MIGRATE is cleared after the timer is | ||
| 76 | * enqueued on the new cpu. | ||
| 77 | * | 70 | * |
| 78 | * All state transitions are protected by cpu_base->lock. | 71 | * All state transitions are protected by cpu_base->lock. |
| 79 | */ | 72 | */ |
| 80 | #define HRTIMER_STATE_INACTIVE 0x00 | 73 | #define HRTIMER_STATE_INACTIVE 0x00 |
| 81 | #define HRTIMER_STATE_ENQUEUED 0x01 | 74 | #define HRTIMER_STATE_ENQUEUED 0x01 |
| 82 | #define HRTIMER_STATE_CALLBACK 0x02 | ||
| 83 | #define HRTIMER_STATE_MIGRATE 0x04 | ||
| 84 | 75 | ||
| 85 | /** | 76 | /** |
| 86 | * struct hrtimer - the basic hrtimer structure | 77 | * struct hrtimer - the basic hrtimer structure |
| @@ -130,6 +121,12 @@ struct hrtimer_sleeper { | |||
| 130 | struct task_struct *task; | 121 | struct task_struct *task; |
| 131 | }; | 122 | }; |
| 132 | 123 | ||
| 124 | #ifdef CONFIG_64BIT | ||
| 125 | # define HRTIMER_CLOCK_BASE_ALIGN 64 | ||
| 126 | #else | ||
| 127 | # define HRTIMER_CLOCK_BASE_ALIGN 32 | ||
| 128 | #endif | ||
| 129 | |||
| 133 | /** | 130 | /** |
| 134 | * struct hrtimer_clock_base - the timer base for a specific clock | 131 | * struct hrtimer_clock_base - the timer base for a specific clock |
| 135 | * @cpu_base: per cpu clock base | 132 | * @cpu_base: per cpu clock base |
| @@ -137,9 +134,7 @@ struct hrtimer_sleeper { | |||
| 137 | * timer to a base on another cpu. | 134 | * timer to a base on another cpu. |
| 138 | * @clockid: clock id for per_cpu support | 135 | * @clockid: clock id for per_cpu support |
| 139 | * @active: red black tree root node for the active timers | 136 | * @active: red black tree root node for the active timers |
| 140 | * @resolution: the resolution of the clock, in nanoseconds | ||
| 141 | * @get_time: function to retrieve the current time of the clock | 137 | * @get_time: function to retrieve the current time of the clock |
| 142 | * @softirq_time: the time when running the hrtimer queue in the softirq | ||
| 143 | * @offset: offset of this clock to the monotonic base | 138 | * @offset: offset of this clock to the monotonic base |
| 144 | */ | 139 | */ |
| 145 | struct hrtimer_clock_base { | 140 | struct hrtimer_clock_base { |
| @@ -147,11 +142,9 @@ struct hrtimer_clock_base { | |||
| 147 | int index; | 142 | int index; |
| 148 | clockid_t clockid; | 143 | clockid_t clockid; |
| 149 | struct timerqueue_head active; | 144 | struct timerqueue_head active; |
| 150 | ktime_t resolution; | ||
| 151 | ktime_t (*get_time)(void); | 145 | ktime_t (*get_time)(void); |
| 152 | ktime_t softirq_time; | ||
| 153 | ktime_t offset; | 146 | ktime_t offset; |
| 154 | }; | 147 | } __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN))); |
| 155 | 148 | ||
| 156 | enum hrtimer_base_type { | 149 | enum hrtimer_base_type { |
| 157 | HRTIMER_BASE_MONOTONIC, | 150 | HRTIMER_BASE_MONOTONIC, |
| @@ -165,11 +158,16 @@ enum hrtimer_base_type { | |||
| 165 | * struct hrtimer_cpu_base - the per cpu clock bases | 158 | * struct hrtimer_cpu_base - the per cpu clock bases |
| 166 | * @lock: lock protecting the base and associated clock bases | 159 | * @lock: lock protecting the base and associated clock bases |
| 167 | * and timers | 160 | * and timers |
| 161 | * @seq: seqcount around __run_hrtimer | ||
| 162 | * @running: pointer to the currently running hrtimer | ||
| 168 | * @cpu: cpu number | 163 | * @cpu: cpu number |
| 169 | * @active_bases: Bitfield to mark bases with active timers | 164 | * @active_bases: Bitfield to mark bases with active timers |
| 170 | * @clock_was_set: Indicates that clock was set from irq context. | 165 | * @clock_was_set_seq: Sequence counter of clock was set events |
| 166 | * @migration_enabled: The migration of hrtimers to other cpus is enabled | ||
| 167 | * @nohz_active: The nohz functionality is enabled | ||
| 171 | * @expires_next: absolute time of the next event which was scheduled | 168 | * @expires_next: absolute time of the next event which was scheduled |
| 172 | * via clock_set_next_event() | 169 | * via clock_set_next_event() |
| 170 | * @next_timer: Pointer to the first expiring timer | ||
| 173 | * @in_hrtirq: hrtimer_interrupt() is currently executing | 171 | * @in_hrtirq: hrtimer_interrupt() is currently executing |
| 174 | * @hres_active: State of high resolution mode | 172 | * @hres_active: State of high resolution mode |
| 175 | * @hang_detected: The last hrtimer interrupt detected a hang | 173 | * @hang_detected: The last hrtimer interrupt detected a hang |
| @@ -178,27 +176,38 @@ enum hrtimer_base_type { | |||
| 178 | * @nr_hangs: Total number of hrtimer interrupt hangs | 176 | * @nr_hangs: Total number of hrtimer interrupt hangs |
| 179 | * @max_hang_time: Maximum time spent in hrtimer_interrupt | 177 | * @max_hang_time: Maximum time spent in hrtimer_interrupt |
| 180 | * @clock_base: array of clock bases for this cpu | 178 | * @clock_base: array of clock bases for this cpu |
| 179 | * | ||
| 180 | * Note: next_timer is just an optimization for __remove_hrtimer(). | ||
| 181 | * Do not dereference the pointer because it is not reliable on | ||
| 182 | * cross cpu removals. | ||
| 181 | */ | 183 | */ |
| 182 | struct hrtimer_cpu_base { | 184 | struct hrtimer_cpu_base { |
| 183 | raw_spinlock_t lock; | 185 | raw_spinlock_t lock; |
| 186 | seqcount_t seq; | ||
| 187 | struct hrtimer *running; | ||
| 184 | unsigned int cpu; | 188 | unsigned int cpu; |
| 185 | unsigned int active_bases; | 189 | unsigned int active_bases; |
| 186 | unsigned int clock_was_set; | 190 | unsigned int clock_was_set_seq; |
| 191 | bool migration_enabled; | ||
| 192 | bool nohz_active; | ||
| 187 | #ifdef CONFIG_HIGH_RES_TIMERS | 193 | #ifdef CONFIG_HIGH_RES_TIMERS |
| 194 | unsigned int in_hrtirq : 1, | ||
| 195 | hres_active : 1, | ||
| 196 | hang_detected : 1; | ||
| 188 | ktime_t expires_next; | 197 | ktime_t expires_next; |
| 189 | int in_hrtirq; | 198 | struct hrtimer *next_timer; |
| 190 | int hres_active; | 199 | unsigned int nr_events; |
| 191 | int hang_detected; | 200 | unsigned int nr_retries; |
| 192 | unsigned long nr_events; | 201 | unsigned int nr_hangs; |
| 193 | unsigned long nr_retries; | 202 | unsigned int max_hang_time; |
| 194 | unsigned long nr_hangs; | ||
| 195 | ktime_t max_hang_time; | ||
| 196 | #endif | 203 | #endif |
| 197 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; | 204 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; |
| 198 | }; | 205 | } ____cacheline_aligned; |
| 199 | 206 | ||
| 200 | static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) | 207 | static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) |
| 201 | { | 208 | { |
| 209 | BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN); | ||
| 210 | |||
| 202 | timer->node.expires = time; | 211 | timer->node.expires = time; |
| 203 | timer->_softexpires = time; | 212 | timer->_softexpires = time; |
| 204 | } | 213 | } |
| @@ -262,19 +271,16 @@ static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer) | |||
| 262 | return ktime_sub(timer->node.expires, timer->base->get_time()); | 271 | return ktime_sub(timer->node.expires, timer->base->get_time()); |
| 263 | } | 272 | } |
| 264 | 273 | ||
| 265 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
| 266 | struct clock_event_device; | ||
| 267 | |||
| 268 | extern void hrtimer_interrupt(struct clock_event_device *dev); | ||
| 269 | |||
| 270 | /* | ||
| 271 | * In high resolution mode the time reference must be read accurate | ||
| 272 | */ | ||
| 273 | static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) | 274 | static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) |
| 274 | { | 275 | { |
| 275 | return timer->base->get_time(); | 276 | return timer->base->get_time(); |
| 276 | } | 277 | } |
| 277 | 278 | ||
| 279 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
| 280 | struct clock_event_device; | ||
| 281 | |||
| 282 | extern void hrtimer_interrupt(struct clock_event_device *dev); | ||
| 283 | |||
| 278 | static inline int hrtimer_is_hres_active(struct hrtimer *timer) | 284 | static inline int hrtimer_is_hres_active(struct hrtimer *timer) |
| 279 | { | 285 | { |
| 280 | return timer->base->cpu_base->hres_active; | 286 | return timer->base->cpu_base->hres_active; |
| @@ -295,21 +301,16 @@ extern void hrtimer_peek_ahead_timers(void); | |||
| 295 | 301 | ||
| 296 | extern void clock_was_set_delayed(void); | 302 | extern void clock_was_set_delayed(void); |
| 297 | 303 | ||
| 304 | extern unsigned int hrtimer_resolution; | ||
| 305 | |||
| 298 | #else | 306 | #else |
| 299 | 307 | ||
| 300 | # define MONOTONIC_RES_NSEC LOW_RES_NSEC | 308 | # define MONOTONIC_RES_NSEC LOW_RES_NSEC |
| 301 | # define KTIME_MONOTONIC_RES KTIME_LOW_RES | 309 | # define KTIME_MONOTONIC_RES KTIME_LOW_RES |
| 302 | 310 | ||
| 303 | static inline void hrtimer_peek_ahead_timers(void) { } | 311 | #define hrtimer_resolution (unsigned int)LOW_RES_NSEC |
| 304 | 312 | ||
| 305 | /* | 313 | static inline void hrtimer_peek_ahead_timers(void) { } |
| 306 | * In non high resolution mode the time reference is taken from | ||
| 307 | * the base softirq time variable. | ||
| 308 | */ | ||
| 309 | static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) | ||
| 310 | { | ||
| 311 | return timer->base->softirq_time; | ||
| 312 | } | ||
| 313 | 314 | ||
| 314 | static inline int hrtimer_is_hres_active(struct hrtimer *timer) | 315 | static inline int hrtimer_is_hres_active(struct hrtimer *timer) |
| 315 | { | 316 | { |
| @@ -353,49 +354,47 @@ static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { } | |||
| 353 | #endif | 354 | #endif |
| 354 | 355 | ||
| 355 | /* Basic timer operations: */ | 356 | /* Basic timer operations: */ |
| 356 | extern int hrtimer_start(struct hrtimer *timer, ktime_t tim, | 357 | extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, |
| 357 | const enum hrtimer_mode mode); | ||
| 358 | extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | ||
| 359 | unsigned long range_ns, const enum hrtimer_mode mode); | 358 | unsigned long range_ns, const enum hrtimer_mode mode); |
| 360 | extern int | 359 | |
| 361 | __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | 360 | /** |
| 362 | unsigned long delta_ns, | 361 | * hrtimer_start - (re)start an hrtimer on the current CPU |
| 363 | const enum hrtimer_mode mode, int wakeup); | 362 | * @timer: the timer to be added |
| 363 | * @tim: expiry time | ||
| 364 | * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or | ||
| 365 | * relative (HRTIMER_MODE_REL) | ||
| 366 | */ | ||
| 367 | static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim, | ||
| 368 | const enum hrtimer_mode mode) | ||
| 369 | { | ||
| 370 | hrtimer_start_range_ns(timer, tim, 0, mode); | ||
| 371 | } | ||
| 364 | 372 | ||
| 365 | extern int hrtimer_cancel(struct hrtimer *timer); | 373 | extern int hrtimer_cancel(struct hrtimer *timer); |
| 366 | extern int hrtimer_try_to_cancel(struct hrtimer *timer); | 374 | extern int hrtimer_try_to_cancel(struct hrtimer *timer); |
| 367 | 375 | ||
| 368 | static inline int hrtimer_start_expires(struct hrtimer *timer, | 376 | static inline void hrtimer_start_expires(struct hrtimer *timer, |
| 369 | enum hrtimer_mode mode) | 377 | enum hrtimer_mode mode) |
| 370 | { | 378 | { |
| 371 | unsigned long delta; | 379 | unsigned long delta; |
| 372 | ktime_t soft, hard; | 380 | ktime_t soft, hard; |
| 373 | soft = hrtimer_get_softexpires(timer); | 381 | soft = hrtimer_get_softexpires(timer); |
| 374 | hard = hrtimer_get_expires(timer); | 382 | hard = hrtimer_get_expires(timer); |
| 375 | delta = ktime_to_ns(ktime_sub(hard, soft)); | 383 | delta = ktime_to_ns(ktime_sub(hard, soft)); |
| 376 | return hrtimer_start_range_ns(timer, soft, delta, mode); | 384 | hrtimer_start_range_ns(timer, soft, delta, mode); |
| 377 | } | 385 | } |
| 378 | 386 | ||
| 379 | static inline int hrtimer_restart(struct hrtimer *timer) | 387 | static inline void hrtimer_restart(struct hrtimer *timer) |
| 380 | { | 388 | { |
| 381 | return hrtimer_start_expires(timer, HRTIMER_MODE_ABS); | 389 | hrtimer_start_expires(timer, HRTIMER_MODE_ABS); |
| 382 | } | 390 | } |
| 383 | 391 | ||
| 384 | /* Query timers: */ | 392 | /* Query timers: */ |
| 385 | extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer); | 393 | extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer); |
| 386 | extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp); | ||
| 387 | 394 | ||
| 388 | extern ktime_t hrtimer_get_next_event(void); | 395 | extern u64 hrtimer_get_next_event(void); |
| 389 | 396 | ||
| 390 | /* | 397 | extern bool hrtimer_active(const struct hrtimer *timer); |
| 391 | * A timer is active, when it is enqueued into the rbtree or the | ||
| 392 | * callback function is running or it's in the state of being migrated | ||
| 393 | * to another cpu. | ||
| 394 | */ | ||
| 395 | static inline int hrtimer_active(const struct hrtimer *timer) | ||
| 396 | { | ||
| 397 | return timer->state != HRTIMER_STATE_INACTIVE; | ||
| 398 | } | ||
| 399 | 398 | ||
| 400 | /* | 399 | /* |
| 401 | * Helper function to check, whether the timer is on one of the queues | 400 | * Helper function to check, whether the timer is on one of the queues |
| @@ -411,14 +410,29 @@ static inline int hrtimer_is_queued(struct hrtimer *timer) | |||
| 411 | */ | 410 | */ |
| 412 | static inline int hrtimer_callback_running(struct hrtimer *timer) | 411 | static inline int hrtimer_callback_running(struct hrtimer *timer) |
| 413 | { | 412 | { |
| 414 | return timer->state & HRTIMER_STATE_CALLBACK; | 413 | return timer->base->cpu_base->running == timer; |
| 415 | } | 414 | } |
| 416 | 415 | ||
| 417 | /* Forward a hrtimer so it expires after now: */ | 416 | /* Forward a hrtimer so it expires after now: */ |
| 418 | extern u64 | 417 | extern u64 |
| 419 | hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval); | 418 | hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval); |
| 420 | 419 | ||
| 421 | /* Forward a hrtimer so it expires after the hrtimer's current now */ | 420 | /** |
| 421 | * hrtimer_forward_now - forward the timer expiry so it expires after now | ||
| 422 | * @timer: hrtimer to forward | ||
| 423 | * @interval: the interval to forward | ||
| 424 | * | ||
| 425 | * Forward the timer expiry so it will expire after the current time | ||
| 426 | * of the hrtimer clock base. Returns the number of overruns. | ||
| 427 | * | ||
| 428 | * Can be safely called from the callback function of @timer. If | ||
| 429 | * called from other contexts @timer must neither be enqueued nor | ||
| 430 | * running the callback and the caller needs to take care of | ||
| 431 | * serialization. | ||
| 432 | * | ||
| 433 | * Note: This only updates the timer expiry value and does not requeue | ||
| 434 | * the timer. | ||
| 435 | */ | ||
| 422 | static inline u64 hrtimer_forward_now(struct hrtimer *timer, | 436 | static inline u64 hrtimer_forward_now(struct hrtimer *timer, |
| 423 | ktime_t interval) | 437 | ktime_t interval) |
| 424 | { | 438 | { |
| @@ -443,7 +457,6 @@ extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); | |||
| 443 | 457 | ||
| 444 | /* Soft interrupt function to run the hrtimer queues: */ | 458 | /* Soft interrupt function to run the hrtimer queues: */ |
| 445 | extern void hrtimer_run_queues(void); | 459 | extern void hrtimer_run_queues(void); |
| 446 | extern void hrtimer_run_pending(void); | ||
| 447 | 460 | ||
| 448 | /* Bootup initialization: */ | 461 | /* Bootup initialization: */ |
| 449 | extern void __init hrtimers_init(void); | 462 | extern void __init hrtimers_init(void); |
diff --git a/include/linux/htirq.h b/include/linux/htirq.h index 70a1dbbf2093..d4a527e58434 100644 --- a/include/linux/htirq.h +++ b/include/linux/htirq.h | |||
| @@ -1,24 +1,38 @@ | |||
| 1 | #ifndef LINUX_HTIRQ_H | 1 | #ifndef LINUX_HTIRQ_H |
| 2 | #define LINUX_HTIRQ_H | 2 | #define LINUX_HTIRQ_H |
| 3 | 3 | ||
| 4 | struct pci_dev; | ||
| 5 | struct irq_data; | ||
| 6 | |||
| 4 | struct ht_irq_msg { | 7 | struct ht_irq_msg { |
| 5 | u32 address_lo; /* low 32 bits of the ht irq message */ | 8 | u32 address_lo; /* low 32 bits of the ht irq message */ |
| 6 | u32 address_hi; /* high 32 bits of the it irq message */ | 9 | u32 address_hi; /* high 32 bits of the it irq message */ |
| 7 | }; | 10 | }; |
| 8 | 11 | ||
| 12 | typedef void (ht_irq_update_t)(struct pci_dev *dev, int irq, | ||
| 13 | struct ht_irq_msg *msg); | ||
| 14 | |||
| 15 | struct ht_irq_cfg { | ||
| 16 | struct pci_dev *dev; | ||
| 17 | /* Update callback used to cope with buggy hardware */ | ||
| 18 | ht_irq_update_t *update; | ||
| 19 | unsigned pos; | ||
| 20 | unsigned idx; | ||
| 21 | struct ht_irq_msg msg; | ||
| 22 | }; | ||
| 23 | |||
| 9 | /* Helper functions.. */ | 24 | /* Helper functions.. */ |
| 10 | void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); | 25 | void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); |
| 11 | void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); | 26 | void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); |
| 12 | struct irq_data; | ||
| 13 | void mask_ht_irq(struct irq_data *data); | 27 | void mask_ht_irq(struct irq_data *data); |
| 14 | void unmask_ht_irq(struct irq_data *data); | 28 | void unmask_ht_irq(struct irq_data *data); |
| 15 | 29 | ||
| 16 | /* The arch hook for getting things started */ | 30 | /* The arch hook for getting things started */ |
| 17 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev); | 31 | int arch_setup_ht_irq(int idx, int pos, struct pci_dev *dev, |
| 32 | ht_irq_update_t *update); | ||
| 33 | void arch_teardown_ht_irq(unsigned int irq); | ||
| 18 | 34 | ||
| 19 | /* For drivers of buggy hardware */ | 35 | /* For drivers of buggy hardware */ |
| 20 | typedef void (ht_irq_update_t)(struct pci_dev *dev, int irq, | ||
| 21 | struct ht_irq_msg *msg); | ||
| 22 | int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update); | 36 | int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update); |
| 23 | 37 | ||
| 24 | #endif /* LINUX_HTIRQ_H */ | 38 | #endif /* LINUX_HTIRQ_H */ |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 696d22312b31..bb9b075f0eb0 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
| @@ -50,9 +50,8 @@ extern struct fs_struct init_fs; | |||
| 50 | .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \ | 50 | .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \ |
| 51 | .rlim = INIT_RLIMITS, \ | 51 | .rlim = INIT_RLIMITS, \ |
| 52 | .cputimer = { \ | 52 | .cputimer = { \ |
| 53 | .cputime = INIT_CPUTIME, \ | 53 | .cputime_atomic = INIT_CPUTIME_ATOMIC, \ |
| 54 | .running = 0, \ | 54 | .running = 0, \ |
| 55 | .lock = __RAW_SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \ | ||
| 56 | }, \ | 55 | }, \ |
| 57 | .cred_guard_mutex = \ | 56 | .cred_guard_mutex = \ |
| 58 | __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ | 57 | __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 796ef9645827..3665cb331ca1 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
| @@ -87,6 +87,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
| 87 | /* | 87 | /* |
| 88 | * Decoding Capability Register | 88 | * Decoding Capability Register |
| 89 | */ | 89 | */ |
| 90 | #define cap_pi_support(c) (((c) >> 59) & 1) | ||
| 90 | #define cap_read_drain(c) (((c) >> 55) & 1) | 91 | #define cap_read_drain(c) (((c) >> 55) & 1) |
| 91 | #define cap_write_drain(c) (((c) >> 54) & 1) | 92 | #define cap_write_drain(c) (((c) >> 54) & 1) |
| 92 | #define cap_max_amask_val(c) (((c) >> 48) & 0x3f) | 93 | #define cap_max_amask_val(c) (((c) >> 48) & 0x3f) |
| @@ -115,13 +116,14 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
| 115 | * Extended Capability Register | 116 | * Extended Capability Register |
| 116 | */ | 117 | */ |
| 117 | 118 | ||
| 119 | #define ecap_pasid(e) ((e >> 40) & 0x1) | ||
| 118 | #define ecap_pss(e) ((e >> 35) & 0x1f) | 120 | #define ecap_pss(e) ((e >> 35) & 0x1f) |
| 119 | #define ecap_eafs(e) ((e >> 34) & 0x1) | 121 | #define ecap_eafs(e) ((e >> 34) & 0x1) |
| 120 | #define ecap_nwfs(e) ((e >> 33) & 0x1) | 122 | #define ecap_nwfs(e) ((e >> 33) & 0x1) |
| 121 | #define ecap_srs(e) ((e >> 31) & 0x1) | 123 | #define ecap_srs(e) ((e >> 31) & 0x1) |
| 122 | #define ecap_ers(e) ((e >> 30) & 0x1) | 124 | #define ecap_ers(e) ((e >> 30) & 0x1) |
| 123 | #define ecap_prs(e) ((e >> 29) & 0x1) | 125 | #define ecap_prs(e) ((e >> 29) & 0x1) |
| 124 | #define ecap_pasid(e) ((e >> 28) & 0x1) | 126 | /* PASID support used to be on bit 28 */ |
| 125 | #define ecap_dis(e) ((e >> 27) & 0x1) | 127 | #define ecap_dis(e) ((e >> 27) & 0x1) |
| 126 | #define ecap_nest(e) ((e >> 26) & 0x1) | 128 | #define ecap_nest(e) ((e >> 26) & 0x1) |
| 127 | #define ecap_mts(e) ((e >> 25) & 0x1) | 129 | #define ecap_mts(e) ((e >> 25) & 0x1) |
| @@ -298,6 +300,8 @@ struct q_inval { | |||
| 298 | 300 | ||
| 299 | #define INTR_REMAP_TABLE_ENTRIES 65536 | 301 | #define INTR_REMAP_TABLE_ENTRIES 65536 |
| 300 | 302 | ||
| 303 | struct irq_domain; | ||
| 304 | |||
| 301 | struct ir_table { | 305 | struct ir_table { |
| 302 | struct irte *base; | 306 | struct irte *base; |
| 303 | unsigned long *bitmap; | 307 | unsigned long *bitmap; |
| @@ -347,6 +351,8 @@ struct intel_iommu { | |||
| 347 | 351 | ||
| 348 | #ifdef CONFIG_IRQ_REMAP | 352 | #ifdef CONFIG_IRQ_REMAP |
| 349 | struct ir_table *ir_table; /* Interrupt remapping info */ | 353 | struct ir_table *ir_table; /* Interrupt remapping info */ |
| 354 | struct irq_domain *ir_domain; | ||
| 355 | struct irq_domain *ir_msi_domain; | ||
| 350 | #endif | 356 | #endif |
| 351 | struct device *iommu_dev; /* IOMMU-sysfs device */ | 357 | struct device *iommu_dev; /* IOMMU-sysfs device */ |
| 352 | int node; | 358 | int node; |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 950ae4501826..be7e75c945e9 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
| @@ -413,7 +413,8 @@ enum | |||
| 413 | BLOCK_IOPOLL_SOFTIRQ, | 413 | BLOCK_IOPOLL_SOFTIRQ, |
| 414 | TASKLET_SOFTIRQ, | 414 | TASKLET_SOFTIRQ, |
| 415 | SCHED_SOFTIRQ, | 415 | SCHED_SOFTIRQ, |
| 416 | HRTIMER_SOFTIRQ, | 416 | HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the |
| 417 | numbering. Sigh! */ | ||
| 417 | RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ | 418 | RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ |
| 418 | 419 | ||
| 419 | NR_SOFTIRQS | 420 | NR_SOFTIRQS |
| @@ -592,10 +593,10 @@ tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, | |||
| 592 | clockid_t which_clock, enum hrtimer_mode mode); | 593 | clockid_t which_clock, enum hrtimer_mode mode); |
| 593 | 594 | ||
| 594 | static inline | 595 | static inline |
| 595 | int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time, | 596 | void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time, |
| 596 | const enum hrtimer_mode mode) | 597 | const enum hrtimer_mode mode) |
| 597 | { | 598 | { |
| 598 | return hrtimer_start(&ttimer->timer, time, mode); | 599 | hrtimer_start(&ttimer->timer, time, mode); |
| 599 | } | 600 | } |
| 600 | 601 | ||
| 601 | static inline | 602 | static inline |
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index 657fab4efab3..c27dde7215b5 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h | |||
| @@ -141,6 +141,7 @@ static inline void __iomem * | |||
| 141 | io_mapping_map_atomic_wc(struct io_mapping *mapping, | 141 | io_mapping_map_atomic_wc(struct io_mapping *mapping, |
| 142 | unsigned long offset) | 142 | unsigned long offset) |
| 143 | { | 143 | { |
| 144 | preempt_disable(); | ||
| 144 | pagefault_disable(); | 145 | pagefault_disable(); |
| 145 | return ((char __force __iomem *) mapping) + offset; | 146 | return ((char __force __iomem *) mapping) + offset; |
| 146 | } | 147 | } |
| @@ -149,6 +150,7 @@ static inline void | |||
| 149 | io_mapping_unmap_atomic(void __iomem *vaddr) | 150 | io_mapping_unmap_atomic(void __iomem *vaddr) |
| 150 | { | 151 | { |
| 151 | pagefault_enable(); | 152 | pagefault_enable(); |
| 153 | preempt_enable(); | ||
| 152 | } | 154 | } |
| 153 | 155 | ||
| 154 | /* Non-atomic map/unmap */ | 156 | /* Non-atomic map/unmap */ |
diff --git a/include/linux/io.h b/include/linux/io.h index 986f2bffea1e..fb5a99800e77 100644 --- a/include/linux/io.h +++ b/include/linux/io.h | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #define _LINUX_IO_H | 19 | #define _LINUX_IO_H |
| 20 | 20 | ||
| 21 | #include <linux/types.h> | 21 | #include <linux/types.h> |
| 22 | #include <linux/init.h> | ||
| 22 | #include <asm/io.h> | 23 | #include <asm/io.h> |
| 23 | #include <asm/page.h> | 24 | #include <asm/page.h> |
| 24 | 25 | ||
| @@ -111,6 +112,13 @@ static inline void arch_phys_wc_del(int handle) | |||
| 111 | } | 112 | } |
| 112 | 113 | ||
| 113 | #define arch_phys_wc_add arch_phys_wc_add | 114 | #define arch_phys_wc_add arch_phys_wc_add |
| 115 | #ifndef arch_phys_wc_index | ||
| 116 | static inline int arch_phys_wc_index(int handle) | ||
| 117 | { | ||
| 118 | return -1; | ||
| 119 | } | ||
| 120 | #define arch_phys_wc_index arch_phys_wc_index | ||
| 121 | #endif | ||
| 114 | #endif | 122 | #endif |
| 115 | 123 | ||
| 116 | #endif /* _LINUX_IO_H */ | 124 | #endif /* _LINUX_IO_H */ |
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index c367cbdf73ab..535fd3bb1ba8 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include <linux/time.h> | 7 | #include <linux/time.h> |
| 8 | #include <linux/timex.h> | 8 | #include <linux/timex.h> |
| 9 | #include <asm/param.h> /* for HZ */ | 9 | #include <asm/param.h> /* for HZ */ |
| 10 | #include <generated/timeconst.h> | ||
| 10 | 11 | ||
| 11 | /* | 12 | /* |
| 12 | * The following defines establish the engineering parameters of the PLL | 13 | * The following defines establish the engineering parameters of the PLL |
| @@ -288,8 +289,133 @@ static inline u64 jiffies_to_nsecs(const unsigned long j) | |||
| 288 | return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC; | 289 | return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC; |
| 289 | } | 290 | } |
| 290 | 291 | ||
| 291 | extern unsigned long msecs_to_jiffies(const unsigned int m); | 292 | extern unsigned long __msecs_to_jiffies(const unsigned int m); |
| 292 | extern unsigned long usecs_to_jiffies(const unsigned int u); | 293 | #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) |
| 294 | /* | ||
| 295 | * HZ is equal to or smaller than 1000, and 1000 is a nice round | ||
| 296 | * multiple of HZ, divide with the factor between them, but round | ||
| 297 | * upwards: | ||
| 298 | */ | ||
| 299 | static inline unsigned long _msecs_to_jiffies(const unsigned int m) | ||
| 300 | { | ||
| 301 | return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); | ||
| 302 | } | ||
| 303 | #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) | ||
| 304 | /* | ||
| 305 | * HZ is larger than 1000, and HZ is a nice round multiple of 1000 - | ||
| 306 | * simply multiply with the factor between them. | ||
| 307 | * | ||
| 308 | * But first make sure the multiplication result cannot overflow: | ||
| 309 | */ | ||
| 310 | static inline unsigned long _msecs_to_jiffies(const unsigned int m) | ||
| 311 | { | ||
| 312 | if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) | ||
| 313 | return MAX_JIFFY_OFFSET; | ||
| 314 | return m * (HZ / MSEC_PER_SEC); | ||
| 315 | } | ||
| 316 | #else | ||
| 317 | /* | ||
| 318 | * Generic case - multiply, round and divide. But first check that if | ||
| 319 | * we are doing a net multiplication, that we wouldn't overflow: | ||
| 320 | */ | ||
| 321 | static inline unsigned long _msecs_to_jiffies(const unsigned int m) | ||
| 322 | { | ||
| 323 | if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) | ||
| 324 | return MAX_JIFFY_OFFSET; | ||
| 325 | |||
| 326 | return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32) >> MSEC_TO_HZ_SHR32; | ||
| 327 | } | ||
| 328 | #endif | ||
| 329 | /** | ||
| 330 | * msecs_to_jiffies: - convert milliseconds to jiffies | ||
| 331 | * @m: time in milliseconds | ||
| 332 | * | ||
| 333 | * conversion is done as follows: | ||
| 334 | * | ||
| 335 | * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET) | ||
| 336 | * | ||
| 337 | * - 'too large' values [that would result in larger than | ||
| 338 | * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too. | ||
| 339 | * | ||
| 340 | * - all other values are converted to jiffies by either multiplying | ||
| 341 | * the input value by a factor or dividing it with a factor and | ||
| 342 | * handling any 32-bit overflows. | ||
| 343 | * for the details see __msecs_to_jiffies() | ||
| 344 | * | ||
| 345 | * msecs_to_jiffies() checks for the passed in value being a constant | ||
| 346 | * via __builtin_constant_p() allowing gcc to eliminate most of the | ||
| 347 | * code, __msecs_to_jiffies() is called if the value passed does not | ||
| 348 | * allow constant folding and the actual conversion must be done at | ||
| 349 | * runtime. | ||
| 350 | * the HZ range specific helpers _msecs_to_jiffies() are called both | ||
| 351 | * directly here and from __msecs_to_jiffies() in the case where | ||
| 352 | * constant folding is not possible. | ||
| 353 | */ | ||
| 354 | static inline unsigned long msecs_to_jiffies(const unsigned int m) | ||
| 355 | { | ||
| 356 | if (__builtin_constant_p(m)) { | ||
| 357 | if ((int)m < 0) | ||
| 358 | return MAX_JIFFY_OFFSET; | ||
| 359 | return _msecs_to_jiffies(m); | ||
| 360 | } else { | ||
| 361 | return __msecs_to_jiffies(m); | ||
| 362 | } | ||
| 363 | } | ||
| 364 | |||
| 365 | extern unsigned long __usecs_to_jiffies(const unsigned int u); | ||
| 366 | #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) | ||
| 367 | static inline unsigned long _usecs_to_jiffies(const unsigned int u) | ||
| 368 | { | ||
| 369 | return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ); | ||
| 370 | } | ||
| 371 | #elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC) | ||
| 372 | static inline unsigned long _usecs_to_jiffies(const unsigned int u) | ||
| 373 | { | ||
| 374 | return u * (HZ / USEC_PER_SEC); | ||
| 375 | } | ||
| 376 | static inline unsigned long _usecs_to_jiffies(const unsigned int u) | ||
| 377 | { | ||
| 378 | #else | ||
| 379 | static inline unsigned long _usecs_to_jiffies(const unsigned int u) | ||
| 380 | { | ||
| 381 | return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32) | ||
| 382 | >> USEC_TO_HZ_SHR32; | ||
| 383 | } | ||
| 384 | #endif | ||
| 385 | |||
| 386 | /** | ||
| 387 | * usecs_to_jiffies: - convert microseconds to jiffies | ||
| 388 | * @u: time in microseconds | ||
| 389 | * | ||
| 390 | * conversion is done as follows: | ||
| 391 | * | ||
| 392 | * - 'too large' values [that would result in larger than | ||
| 393 | * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too. | ||
| 394 | * | ||
| 395 | * - all other values are converted to jiffies by either multiplying | ||
| 396 | * the input value by a factor or dividing it with a factor and | ||
| 397 | * handling any 32-bit overflows as for msecs_to_jiffies. | ||
| 398 | * | ||
| 399 | * usecs_to_jiffies() checks for the passed in value being a constant | ||
| 400 | * via __builtin_constant_p() allowing gcc to eliminate most of the | ||
| 401 | * code, __usecs_to_jiffies() is called if the value passed does not | ||
| 402 | * allow constant folding and the actual conversion must be done at | ||
| 403 | * runtime. | ||
| 404 | * the HZ range specific helpers _usecs_to_jiffies() are called both | ||
| 405 | * directly here and from __msecs_to_jiffies() in the case where | ||
| 406 | * constant folding is not possible. | ||
| 407 | */ | ||
| 408 | static inline unsigned long usecs_to_jiffies(const unsigned int u) | ||
| 409 | { | ||
| 410 | if (__builtin_constant_p(u)) { | ||
| 411 | if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET)) | ||
| 412 | return MAX_JIFFY_OFFSET; | ||
| 413 | return _usecs_to_jiffies(u); | ||
| 414 | } else { | ||
| 415 | return __usecs_to_jiffies(u); | ||
| 416 | } | ||
| 417 | } | ||
| 418 | |||
| 293 | extern unsigned long timespec_to_jiffies(const struct timespec *value); | 419 | extern unsigned long timespec_to_jiffies(const struct timespec *value); |
| 294 | extern void jiffies_to_timespec(const unsigned long jiffies, | 420 | extern void jiffies_to_timespec(const unsigned long jiffies, |
| 295 | struct timespec *value); | 421 | struct timespec *value); |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 3a5b48e52a9e..060dd7b61c6d 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
| @@ -244,7 +244,8 @@ static inline u32 reciprocal_scale(u32 val, u32 ep_ro) | |||
| 244 | 244 | ||
| 245 | #if defined(CONFIG_MMU) && \ | 245 | #if defined(CONFIG_MMU) && \ |
| 246 | (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)) | 246 | (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)) |
| 247 | void might_fault(void); | 247 | #define might_fault() __might_fault(__FILE__, __LINE__) |
| 248 | void __might_fault(const char *file, int line); | ||
| 248 | #else | 249 | #else |
| 249 | static inline void might_fault(void) { } | 250 | static inline void might_fault(void) { } |
| 250 | #endif | 251 | #endif |
diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 5fc3d1083071..2b6a204bd8d4 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h | |||
| @@ -166,19 +166,34 @@ static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2) | |||
| 166 | } | 166 | } |
| 167 | 167 | ||
| 168 | #if BITS_PER_LONG < 64 | 168 | #if BITS_PER_LONG < 64 |
| 169 | extern u64 __ktime_divns(const ktime_t kt, s64 div); | 169 | extern s64 __ktime_divns(const ktime_t kt, s64 div); |
| 170 | static inline u64 ktime_divns(const ktime_t kt, s64 div) | 170 | static inline s64 ktime_divns(const ktime_t kt, s64 div) |
| 171 | { | 171 | { |
| 172 | /* | ||
| 173 | * Negative divisors could cause an inf loop, | ||
| 174 | * so bug out here. | ||
| 175 | */ | ||
| 176 | BUG_ON(div < 0); | ||
| 172 | if (__builtin_constant_p(div) && !(div >> 32)) { | 177 | if (__builtin_constant_p(div) && !(div >> 32)) { |
| 173 | u64 ns = kt.tv64; | 178 | s64 ns = kt.tv64; |
| 174 | do_div(ns, div); | 179 | u64 tmp = ns < 0 ? -ns : ns; |
| 175 | return ns; | 180 | |
| 181 | do_div(tmp, div); | ||
| 182 | return ns < 0 ? -tmp : tmp; | ||
| 176 | } else { | 183 | } else { |
| 177 | return __ktime_divns(kt, div); | 184 | return __ktime_divns(kt, div); |
| 178 | } | 185 | } |
| 179 | } | 186 | } |
| 180 | #else /* BITS_PER_LONG < 64 */ | 187 | #else /* BITS_PER_LONG < 64 */ |
| 181 | # define ktime_divns(kt, div) (u64)((kt).tv64 / (div)) | 188 | static inline s64 ktime_divns(const ktime_t kt, s64 div) |
| 189 | { | ||
| 190 | /* | ||
| 191 | * 32-bit implementation cannot handle negative divisors, | ||
| 192 | * so catch them on 64bit as well. | ||
| 193 | */ | ||
| 194 | WARN_ON(div < 0); | ||
| 195 | return kt.tv64 / div; | ||
| 196 | } | ||
| 182 | #endif | 197 | #endif |
| 183 | 198 | ||
| 184 | static inline s64 ktime_to_us(const ktime_t kt) | 199 | static inline s64 ktime_to_us(const ktime_t kt) |
diff --git a/include/linux/lglock.h b/include/linux/lglock.h index 0081f000e34b..c92ebd100d9b 100644 --- a/include/linux/lglock.h +++ b/include/linux/lglock.h | |||
| @@ -52,10 +52,15 @@ struct lglock { | |||
| 52 | static struct lglock name = { .lock = &name ## _lock } | 52 | static struct lglock name = { .lock = &name ## _lock } |
| 53 | 53 | ||
| 54 | void lg_lock_init(struct lglock *lg, char *name); | 54 | void lg_lock_init(struct lglock *lg, char *name); |
| 55 | |||
| 55 | void lg_local_lock(struct lglock *lg); | 56 | void lg_local_lock(struct lglock *lg); |
| 56 | void lg_local_unlock(struct lglock *lg); | 57 | void lg_local_unlock(struct lglock *lg); |
| 57 | void lg_local_lock_cpu(struct lglock *lg, int cpu); | 58 | void lg_local_lock_cpu(struct lglock *lg, int cpu); |
| 58 | void lg_local_unlock_cpu(struct lglock *lg, int cpu); | 59 | void lg_local_unlock_cpu(struct lglock *lg, int cpu); |
| 60 | |||
| 61 | void lg_double_lock(struct lglock *lg, int cpu1, int cpu2); | ||
| 62 | void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2); | ||
| 63 | |||
| 59 | void lg_global_lock(struct lglock *lg); | 64 | void lg_global_lock(struct lglock *lg); |
| 60 | void lg_global_unlock(struct lglock *lg); | 65 | void lg_global_unlock(struct lglock *lg); |
| 61 | 66 | ||
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 066ba4157541..2722111591a3 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
| @@ -130,8 +130,8 @@ enum bounce_type { | |||
| 130 | }; | 130 | }; |
| 131 | 131 | ||
| 132 | struct lock_class_stats { | 132 | struct lock_class_stats { |
| 133 | unsigned long contention_point[4]; | 133 | unsigned long contention_point[LOCKSTAT_POINTS]; |
| 134 | unsigned long contending_point[4]; | 134 | unsigned long contending_point[LOCKSTAT_POINTS]; |
| 135 | struct lock_time read_waittime; | 135 | struct lock_time read_waittime; |
| 136 | struct lock_time write_waittime; | 136 | struct lock_time write_waittime; |
| 137 | struct lock_time read_holdtime; | 137 | struct lock_time read_holdtime; |
diff --git a/include/linux/namei.h b/include/linux/namei.h index c8990779f0c3..d8c6334cd150 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h | |||
| @@ -1,16 +1,15 @@ | |||
| 1 | #ifndef _LINUX_NAMEI_H | 1 | #ifndef _LINUX_NAMEI_H |
| 2 | #define _LINUX_NAMEI_H | 2 | #define _LINUX_NAMEI_H |
| 3 | 3 | ||
| 4 | #include <linux/dcache.h> | 4 | #include <linux/kernel.h> |
| 5 | #include <linux/errno.h> | ||
| 6 | #include <linux/linkage.h> | ||
| 7 | #include <linux/path.h> | 5 | #include <linux/path.h> |
| 8 | 6 | #include <linux/fcntl.h> | |
| 9 | struct vfsmount; | 7 | #include <linux/errno.h> |
| 10 | struct nameidata; | ||
| 11 | 8 | ||
| 12 | enum { MAX_NESTED_LINKS = 8 }; | 9 | enum { MAX_NESTED_LINKS = 8 }; |
| 13 | 10 | ||
| 11 | #define MAXSYMLINKS 40 | ||
| 12 | |||
| 14 | /* | 13 | /* |
| 15 | * Type of the last component on LOOKUP_PARENT | 14 | * Type of the last component on LOOKUP_PARENT |
| 16 | */ | 15 | */ |
| @@ -45,13 +44,29 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; | |||
| 45 | #define LOOKUP_ROOT 0x2000 | 44 | #define LOOKUP_ROOT 0x2000 |
| 46 | #define LOOKUP_EMPTY 0x4000 | 45 | #define LOOKUP_EMPTY 0x4000 |
| 47 | 46 | ||
| 48 | extern int user_path_at(int, const char __user *, unsigned, struct path *); | ||
| 49 | extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty); | 47 | extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty); |
| 50 | 48 | ||
| 51 | #define user_path(name, path) user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW, path) | 49 | static inline int user_path_at(int dfd, const char __user *name, unsigned flags, |
| 52 | #define user_lpath(name, path) user_path_at(AT_FDCWD, name, 0, path) | 50 | struct path *path) |
| 53 | #define user_path_dir(name, path) \ | 51 | { |
| 54 | user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path) | 52 | return user_path_at_empty(dfd, name, flags, path, NULL); |
| 53 | } | ||
| 54 | |||
| 55 | static inline int user_path(const char __user *name, struct path *path) | ||
| 56 | { | ||
| 57 | return user_path_at_empty(AT_FDCWD, name, LOOKUP_FOLLOW, path, NULL); | ||
| 58 | } | ||
| 59 | |||
| 60 | static inline int user_lpath(const char __user *name, struct path *path) | ||
| 61 | { | ||
| 62 | return user_path_at_empty(AT_FDCWD, name, 0, path, NULL); | ||
| 63 | } | ||
| 64 | |||
| 65 | static inline int user_path_dir(const char __user *name, struct path *path) | ||
| 66 | { | ||
| 67 | return user_path_at_empty(AT_FDCWD, name, | ||
| 68 | LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path, NULL); | ||
| 69 | } | ||
| 55 | 70 | ||
| 56 | extern int kern_path(const char *, unsigned, struct path *); | 71 | extern int kern_path(const char *, unsigned, struct path *); |
| 57 | 72 | ||
| @@ -70,9 +85,7 @@ extern int follow_up(struct path *); | |||
| 70 | extern struct dentry *lock_rename(struct dentry *, struct dentry *); | 85 | extern struct dentry *lock_rename(struct dentry *, struct dentry *); |
| 71 | extern void unlock_rename(struct dentry *, struct dentry *); | 86 | extern void unlock_rename(struct dentry *, struct dentry *); |
| 72 | 87 | ||
| 73 | extern void nd_jump_link(struct nameidata *nd, struct path *path); | 88 | extern void nd_jump_link(struct path *path); |
| 74 | extern void nd_set_link(struct nameidata *nd, char *path); | ||
| 75 | extern char *nd_get_link(struct nameidata *nd); | ||
| 76 | 89 | ||
| 77 | static inline void nd_terminate_link(void *name, size_t len, size_t maxlen) | 90 | static inline void nd_terminate_link(void *name, size_t len, size_t maxlen) |
| 78 | { | 91 | { |
diff --git a/include/linux/of.h b/include/linux/of.h index ddeaae6d2083..b871ff9d81d7 100644 --- a/include/linux/of.h +++ b/include/linux/of.h | |||
| @@ -121,6 +121,8 @@ extern struct device_node *of_stdout; | |||
| 121 | extern raw_spinlock_t devtree_lock; | 121 | extern raw_spinlock_t devtree_lock; |
| 122 | 122 | ||
| 123 | #ifdef CONFIG_OF | 123 | #ifdef CONFIG_OF |
| 124 | void of_core_init(void); | ||
| 125 | |||
| 124 | static inline bool is_of_node(struct fwnode_handle *fwnode) | 126 | static inline bool is_of_node(struct fwnode_handle *fwnode) |
| 125 | { | 127 | { |
| 126 | return fwnode && fwnode->type == FWNODE_OF; | 128 | return fwnode && fwnode->type == FWNODE_OF; |
| @@ -376,6 +378,10 @@ bool of_console_check(struct device_node *dn, char *name, int index); | |||
| 376 | 378 | ||
| 377 | #else /* CONFIG_OF */ | 379 | #else /* CONFIG_OF */ |
| 378 | 380 | ||
| 381 | static inline void of_core_init(void) | ||
| 382 | { | ||
| 383 | } | ||
| 384 | |||
| 379 | static inline bool is_of_node(struct fwnode_handle *fwnode) | 385 | static inline bool is_of_node(struct fwnode_handle *fwnode) |
| 380 | { | 386 | { |
| 381 | return false; | 387 | return false; |
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h index 3a6490e81b28..703ea5c30a33 100644 --- a/include/linux/osq_lock.h +++ b/include/linux/osq_lock.h | |||
| @@ -32,4 +32,9 @@ static inline void osq_lock_init(struct optimistic_spin_queue *lock) | |||
| 32 | extern bool osq_lock(struct optimistic_spin_queue *lock); | 32 | extern bool osq_lock(struct optimistic_spin_queue *lock); |
| 33 | extern void osq_unlock(struct optimistic_spin_queue *lock); | 33 | extern void osq_unlock(struct optimistic_spin_queue *lock); |
| 34 | 34 | ||
| 35 | static inline bool osq_is_locked(struct optimistic_spin_queue *lock) | ||
| 36 | { | ||
| 37 | return atomic_read(&lock->tail) != OSQ_UNLOCKED_VAL; | ||
| 38 | } | ||
| 39 | |||
| 35 | #endif | 40 | #endif |
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 50e50095c8d1..84a109449610 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h | |||
| @@ -41,7 +41,12 @@ void percpu_counter_destroy(struct percpu_counter *fbc); | |||
| 41 | void percpu_counter_set(struct percpu_counter *fbc, s64 amount); | 41 | void percpu_counter_set(struct percpu_counter *fbc, s64 amount); |
| 42 | void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); | 42 | void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); |
| 43 | s64 __percpu_counter_sum(struct percpu_counter *fbc); | 43 | s64 __percpu_counter_sum(struct percpu_counter *fbc); |
| 44 | int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs); | 44 | int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); |
| 45 | |||
| 46 | static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) | ||
| 47 | { | ||
| 48 | return __percpu_counter_compare(fbc, rhs, percpu_counter_batch); | ||
| 49 | } | ||
| 45 | 50 | ||
| 46 | static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) | 51 | static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) |
| 47 | { | 52 | { |
| @@ -116,6 +121,12 @@ static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) | |||
| 116 | return 0; | 121 | return 0; |
| 117 | } | 122 | } |
| 118 | 123 | ||
| 124 | static inline int | ||
| 125 | __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) | ||
| 126 | { | ||
| 127 | return percpu_counter_compare(fbc, rhs); | ||
| 128 | } | ||
| 129 | |||
| 119 | static inline void | 130 | static inline void |
| 120 | percpu_counter_add(struct percpu_counter *fbc, s64 amount) | 131 | percpu_counter_add(struct percpu_counter *fbc, s64 amount) |
| 121 | { | 132 | { |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 61992cf2e977..1b82d44b0a02 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
| @@ -92,8 +92,6 @@ struct hw_perf_event_extra { | |||
| 92 | int idx; /* index in shared_regs->regs[] */ | 92 | int idx; /* index in shared_regs->regs[] */ |
| 93 | }; | 93 | }; |
| 94 | 94 | ||
| 95 | struct event_constraint; | ||
| 96 | |||
| 97 | /** | 95 | /** |
| 98 | * struct hw_perf_event - performance event hardware details: | 96 | * struct hw_perf_event - performance event hardware details: |
| 99 | */ | 97 | */ |
| @@ -112,8 +110,6 @@ struct hw_perf_event { | |||
| 112 | 110 | ||
| 113 | struct hw_perf_event_extra extra_reg; | 111 | struct hw_perf_event_extra extra_reg; |
| 114 | struct hw_perf_event_extra branch_reg; | 112 | struct hw_perf_event_extra branch_reg; |
| 115 | |||
| 116 | struct event_constraint *constraint; | ||
| 117 | }; | 113 | }; |
| 118 | struct { /* software */ | 114 | struct { /* software */ |
| 119 | struct hrtimer hrtimer; | 115 | struct hrtimer hrtimer; |
| @@ -124,7 +120,7 @@ struct hw_perf_event { | |||
| 124 | }; | 120 | }; |
| 125 | struct { /* intel_cqm */ | 121 | struct { /* intel_cqm */ |
| 126 | int cqm_state; | 122 | int cqm_state; |
| 127 | int cqm_rmid; | 123 | u32 cqm_rmid; |
| 128 | struct list_head cqm_events_entry; | 124 | struct list_head cqm_events_entry; |
| 129 | struct list_head cqm_groups_entry; | 125 | struct list_head cqm_groups_entry; |
| 130 | struct list_head cqm_group_entry; | 126 | struct list_head cqm_group_entry; |
| @@ -566,8 +562,12 @@ struct perf_cpu_context { | |||
| 566 | struct perf_event_context *task_ctx; | 562 | struct perf_event_context *task_ctx; |
| 567 | int active_oncpu; | 563 | int active_oncpu; |
| 568 | int exclusive; | 564 | int exclusive; |
| 565 | |||
| 566 | raw_spinlock_t hrtimer_lock; | ||
| 569 | struct hrtimer hrtimer; | 567 | struct hrtimer hrtimer; |
| 570 | ktime_t hrtimer_interval; | 568 | ktime_t hrtimer_interval; |
| 569 | unsigned int hrtimer_active; | ||
| 570 | |||
| 571 | struct pmu *unique_pmu; | 571 | struct pmu *unique_pmu; |
| 572 | struct perf_cgroup *cgrp; | 572 | struct perf_cgroup *cgrp; |
| 573 | }; | 573 | }; |
| @@ -734,6 +734,22 @@ extern int perf_event_overflow(struct perf_event *event, | |||
| 734 | struct perf_sample_data *data, | 734 | struct perf_sample_data *data, |
| 735 | struct pt_regs *regs); | 735 | struct pt_regs *regs); |
| 736 | 736 | ||
| 737 | extern void perf_event_output(struct perf_event *event, | ||
| 738 | struct perf_sample_data *data, | ||
| 739 | struct pt_regs *regs); | ||
| 740 | |||
| 741 | extern void | ||
| 742 | perf_event_header__init_id(struct perf_event_header *header, | ||
| 743 | struct perf_sample_data *data, | ||
| 744 | struct perf_event *event); | ||
| 745 | extern void | ||
| 746 | perf_event__output_id_sample(struct perf_event *event, | ||
| 747 | struct perf_output_handle *handle, | ||
| 748 | struct perf_sample_data *sample); | ||
| 749 | |||
| 750 | extern void | ||
| 751 | perf_log_lost_samples(struct perf_event *event, u64 lost); | ||
| 752 | |||
| 737 | static inline bool is_sampling_event(struct perf_event *event) | 753 | static inline bool is_sampling_event(struct perf_event *event) |
| 738 | { | 754 | { |
| 739 | return event->attr.sample_period != 0; | 755 | return event->attr.sample_period != 0; |
| @@ -798,11 +814,33 @@ perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) | |||
| 798 | 814 | ||
| 799 | extern struct static_key_deferred perf_sched_events; | 815 | extern struct static_key_deferred perf_sched_events; |
| 800 | 816 | ||
| 817 | static __always_inline bool | ||
| 818 | perf_sw_migrate_enabled(void) | ||
| 819 | { | ||
| 820 | if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS])) | ||
| 821 | return true; | ||
| 822 | return false; | ||
| 823 | } | ||
| 824 | |||
| 825 | static inline void perf_event_task_migrate(struct task_struct *task) | ||
| 826 | { | ||
| 827 | if (perf_sw_migrate_enabled()) | ||
| 828 | task->sched_migrated = 1; | ||
| 829 | } | ||
| 830 | |||
| 801 | static inline void perf_event_task_sched_in(struct task_struct *prev, | 831 | static inline void perf_event_task_sched_in(struct task_struct *prev, |
| 802 | struct task_struct *task) | 832 | struct task_struct *task) |
| 803 | { | 833 | { |
| 804 | if (static_key_false(&perf_sched_events.key)) | 834 | if (static_key_false(&perf_sched_events.key)) |
| 805 | __perf_event_task_sched_in(prev, task); | 835 | __perf_event_task_sched_in(prev, task); |
| 836 | |||
| 837 | if (perf_sw_migrate_enabled() && task->sched_migrated) { | ||
| 838 | struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); | ||
| 839 | |||
| 840 | perf_fetch_caller_regs(regs); | ||
| 841 | ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0); | ||
| 842 | task->sched_migrated = 0; | ||
| 843 | } | ||
| 806 | } | 844 | } |
| 807 | 845 | ||
| 808 | static inline void perf_event_task_sched_out(struct task_struct *prev, | 846 | static inline void perf_event_task_sched_out(struct task_struct *prev, |
| @@ -925,6 +963,8 @@ perf_aux_output_skip(struct perf_output_handle *handle, | |||
| 925 | static inline void * | 963 | static inline void * |
| 926 | perf_get_aux(struct perf_output_handle *handle) { return NULL; } | 964 | perf_get_aux(struct perf_output_handle *handle) { return NULL; } |
| 927 | static inline void | 965 | static inline void |
| 966 | perf_event_task_migrate(struct task_struct *task) { } | ||
| 967 | static inline void | ||
| 928 | perf_event_task_sched_in(struct task_struct *prev, | 968 | perf_event_task_sched_in(struct task_struct *prev, |
| 929 | struct task_struct *task) { } | 969 | struct task_struct *task) { } |
| 930 | static inline void | 970 | static inline void |
diff --git a/include/linux/preempt.h b/include/linux/preempt.h index de83b4eb1642..0f1534acaf60 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h | |||
| @@ -10,13 +10,117 @@ | |||
| 10 | #include <linux/list.h> | 10 | #include <linux/list.h> |
| 11 | 11 | ||
| 12 | /* | 12 | /* |
| 13 | * We use the MSB mostly because its available; see <linux/preempt_mask.h> for | 13 | * We put the hardirq and softirq counter into the preemption |
| 14 | * the other bits -- can't include that header due to inclusion hell. | 14 | * counter. The bitmask has the following meaning: |
| 15 | * | ||
| 16 | * - bits 0-7 are the preemption count (max preemption depth: 256) | ||
| 17 | * - bits 8-15 are the softirq count (max # of softirqs: 256) | ||
| 18 | * | ||
| 19 | * The hardirq count could in theory be the same as the number of | ||
| 20 | * interrupts in the system, but we run all interrupt handlers with | ||
| 21 | * interrupts disabled, so we cannot have nesting interrupts. Though | ||
| 22 | * there are a few palaeontologic drivers which reenable interrupts in | ||
| 23 | * the handler, so we need more than one bit here. | ||
| 24 | * | ||
| 25 | * PREEMPT_MASK: 0x000000ff | ||
| 26 | * SOFTIRQ_MASK: 0x0000ff00 | ||
| 27 | * HARDIRQ_MASK: 0x000f0000 | ||
| 28 | * NMI_MASK: 0x00100000 | ||
| 29 | * PREEMPT_ACTIVE: 0x00200000 | ||
| 30 | * PREEMPT_NEED_RESCHED: 0x80000000 | ||
| 15 | */ | 31 | */ |
| 32 | #define PREEMPT_BITS 8 | ||
| 33 | #define SOFTIRQ_BITS 8 | ||
| 34 | #define HARDIRQ_BITS 4 | ||
| 35 | #define NMI_BITS 1 | ||
| 36 | |||
| 37 | #define PREEMPT_SHIFT 0 | ||
| 38 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) | ||
| 39 | #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) | ||
| 40 | #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) | ||
| 41 | |||
| 42 | #define __IRQ_MASK(x) ((1UL << (x))-1) | ||
| 43 | |||
| 44 | #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) | ||
| 45 | #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | ||
| 46 | #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) | ||
| 47 | #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) | ||
| 48 | |||
| 49 | #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) | ||
| 50 | #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) | ||
| 51 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) | ||
| 52 | #define NMI_OFFSET (1UL << NMI_SHIFT) | ||
| 53 | |||
| 54 | #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) | ||
| 55 | |||
| 56 | #define PREEMPT_ACTIVE_BITS 1 | ||
| 57 | #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) | ||
| 58 | #define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) | ||
| 59 | |||
| 60 | /* We use the MSB mostly because its available */ | ||
| 16 | #define PREEMPT_NEED_RESCHED 0x80000000 | 61 | #define PREEMPT_NEED_RESCHED 0x80000000 |
| 17 | 62 | ||
| 63 | /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */ | ||
| 18 | #include <asm/preempt.h> | 64 | #include <asm/preempt.h> |
| 19 | 65 | ||
| 66 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) | ||
| 67 | #define softirq_count() (preempt_count() & SOFTIRQ_MASK) | ||
| 68 | #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ | ||
| 69 | | NMI_MASK)) | ||
| 70 | |||
| 71 | /* | ||
| 72 | * Are we doing bottom half or hardware interrupt processing? | ||
| 73 | * Are we in a softirq context? Interrupt context? | ||
| 74 | * in_softirq - Are we currently processing softirq or have bh disabled? | ||
| 75 | * in_serving_softirq - Are we currently processing softirq? | ||
| 76 | */ | ||
| 77 | #define in_irq() (hardirq_count()) | ||
| 78 | #define in_softirq() (softirq_count()) | ||
| 79 | #define in_interrupt() (irq_count()) | ||
| 80 | #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) | ||
| 81 | |||
| 82 | /* | ||
| 83 | * Are we in NMI context? | ||
| 84 | */ | ||
| 85 | #define in_nmi() (preempt_count() & NMI_MASK) | ||
| 86 | |||
| 87 | #if defined(CONFIG_PREEMPT_COUNT) | ||
| 88 | # define PREEMPT_DISABLE_OFFSET 1 | ||
| 89 | #else | ||
| 90 | # define PREEMPT_DISABLE_OFFSET 0 | ||
| 91 | #endif | ||
| 92 | |||
| 93 | /* | ||
| 94 | * The preempt_count offset needed for things like: | ||
| 95 | * | ||
| 96 | * spin_lock_bh() | ||
| 97 | * | ||
| 98 | * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and | ||
| 99 | * softirqs, such that unlock sequences of: | ||
| 100 | * | ||
| 101 | * spin_unlock(); | ||
| 102 | * local_bh_enable(); | ||
| 103 | * | ||
| 104 | * Work as expected. | ||
| 105 | */ | ||
| 106 | #define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET) | ||
| 107 | |||
| 108 | /* | ||
| 109 | * Are we running in atomic context? WARNING: this macro cannot | ||
| 110 | * always detect atomic context; in particular, it cannot know about | ||
| 111 | * held spinlocks in non-preemptible kernels. Thus it should not be | ||
| 112 | * used in the general case to determine whether sleeping is possible. | ||
| 113 | * Do not use in_atomic() in driver code. | ||
| 114 | */ | ||
| 115 | #define in_atomic() (preempt_count() != 0) | ||
| 116 | |||
| 117 | /* | ||
| 118 | * Check whether we were atomic before we did preempt_disable(): | ||
| 119 | * (used by the scheduler) | ||
| 120 | */ | ||
| 121 | #define in_atomic_preempt_off() \ | ||
| 122 | ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_DISABLE_OFFSET) | ||
| 123 | |||
| 20 | #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) | 124 | #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) |
| 21 | extern void preempt_count_add(int val); | 125 | extern void preempt_count_add(int val); |
| 22 | extern void preempt_count_sub(int val); | 126 | extern void preempt_count_sub(int val); |
| @@ -33,6 +137,18 @@ extern void preempt_count_sub(int val); | |||
| 33 | #define preempt_count_inc() preempt_count_add(1) | 137 | #define preempt_count_inc() preempt_count_add(1) |
| 34 | #define preempt_count_dec() preempt_count_sub(1) | 138 | #define preempt_count_dec() preempt_count_sub(1) |
| 35 | 139 | ||
| 140 | #define preempt_active_enter() \ | ||
| 141 | do { \ | ||
| 142 | preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \ | ||
| 143 | barrier(); \ | ||
| 144 | } while (0) | ||
| 145 | |||
| 146 | #define preempt_active_exit() \ | ||
| 147 | do { \ | ||
| 148 | barrier(); \ | ||
| 149 | preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \ | ||
| 150 | } while (0) | ||
| 151 | |||
| 36 | #ifdef CONFIG_PREEMPT_COUNT | 152 | #ifdef CONFIG_PREEMPT_COUNT |
| 37 | 153 | ||
| 38 | #define preempt_disable() \ | 154 | #define preempt_disable() \ |
| @@ -49,6 +165,8 @@ do { \ | |||
| 49 | 165 | ||
| 50 | #define preempt_enable_no_resched() sched_preempt_enable_no_resched() | 166 | #define preempt_enable_no_resched() sched_preempt_enable_no_resched() |
| 51 | 167 | ||
| 168 | #define preemptible() (preempt_count() == 0 && !irqs_disabled()) | ||
| 169 | |||
| 52 | #ifdef CONFIG_PREEMPT | 170 | #ifdef CONFIG_PREEMPT |
| 53 | #define preempt_enable() \ | 171 | #define preempt_enable() \ |
| 54 | do { \ | 172 | do { \ |
| @@ -57,52 +175,46 @@ do { \ | |||
| 57 | __preempt_schedule(); \ | 175 | __preempt_schedule(); \ |
| 58 | } while (0) | 176 | } while (0) |
| 59 | 177 | ||
| 178 | #define preempt_enable_notrace() \ | ||
| 179 | do { \ | ||
| 180 | barrier(); \ | ||
| 181 | if (unlikely(__preempt_count_dec_and_test())) \ | ||
| 182 | __preempt_schedule_notrace(); \ | ||
| 183 | } while (0) | ||
| 184 | |||
| 60 | #define preempt_check_resched() \ | 185 | #define preempt_check_resched() \ |
| 61 | do { \ | 186 | do { \ |
| 62 | if (should_resched()) \ | 187 | if (should_resched()) \ |
| 63 | __preempt_schedule(); \ | 188 | __preempt_schedule(); \ |
| 64 | } while (0) | 189 | } while (0) |
| 65 | 190 | ||
| 66 | #else | 191 | #else /* !CONFIG_PREEMPT */ |
| 67 | #define preempt_enable() \ | 192 | #define preempt_enable() \ |
| 68 | do { \ | 193 | do { \ |
| 69 | barrier(); \ | 194 | barrier(); \ |
| 70 | preempt_count_dec(); \ | 195 | preempt_count_dec(); \ |
| 71 | } while (0) | 196 | } while (0) |
| 72 | #define preempt_check_resched() do { } while (0) | ||
| 73 | #endif | ||
| 74 | |||
| 75 | #define preempt_disable_notrace() \ | ||
| 76 | do { \ | ||
| 77 | __preempt_count_inc(); \ | ||
| 78 | barrier(); \ | ||
| 79 | } while (0) | ||
| 80 | 197 | ||
| 81 | #define preempt_enable_no_resched_notrace() \ | 198 | #define preempt_enable_notrace() \ |
| 82 | do { \ | 199 | do { \ |
| 83 | barrier(); \ | 200 | barrier(); \ |
| 84 | __preempt_count_dec(); \ | 201 | __preempt_count_dec(); \ |
| 85 | } while (0) | 202 | } while (0) |
| 86 | 203 | ||
| 87 | #ifdef CONFIG_PREEMPT | 204 | #define preempt_check_resched() do { } while (0) |
| 88 | 205 | #endif /* CONFIG_PREEMPT */ | |
| 89 | #ifndef CONFIG_CONTEXT_TRACKING | ||
| 90 | #define __preempt_schedule_context() __preempt_schedule() | ||
| 91 | #endif | ||
| 92 | 206 | ||
| 93 | #define preempt_enable_notrace() \ | 207 | #define preempt_disable_notrace() \ |
| 94 | do { \ | 208 | do { \ |
| 209 | __preempt_count_inc(); \ | ||
| 95 | barrier(); \ | 210 | barrier(); \ |
| 96 | if (unlikely(__preempt_count_dec_and_test())) \ | ||
| 97 | __preempt_schedule_context(); \ | ||
| 98 | } while (0) | 211 | } while (0) |
| 99 | #else | 212 | |
| 100 | #define preempt_enable_notrace() \ | 213 | #define preempt_enable_no_resched_notrace() \ |
| 101 | do { \ | 214 | do { \ |
| 102 | barrier(); \ | 215 | barrier(); \ |
| 103 | __preempt_count_dec(); \ | 216 | __preempt_count_dec(); \ |
| 104 | } while (0) | 217 | } while (0) |
| 105 | #endif | ||
| 106 | 218 | ||
| 107 | #else /* !CONFIG_PREEMPT_COUNT */ | 219 | #else /* !CONFIG_PREEMPT_COUNT */ |
| 108 | 220 | ||
| @@ -121,6 +233,7 @@ do { \ | |||
| 121 | #define preempt_disable_notrace() barrier() | 233 | #define preempt_disable_notrace() barrier() |
| 122 | #define preempt_enable_no_resched_notrace() barrier() | 234 | #define preempt_enable_no_resched_notrace() barrier() |
| 123 | #define preempt_enable_notrace() barrier() | 235 | #define preempt_enable_notrace() barrier() |
| 236 | #define preemptible() 0 | ||
| 124 | 237 | ||
| 125 | #endif /* CONFIG_PREEMPT_COUNT */ | 238 | #endif /* CONFIG_PREEMPT_COUNT */ |
| 126 | 239 | ||
diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h deleted file mode 100644 index dbeec4d4a3be..000000000000 --- a/include/linux/preempt_mask.h +++ /dev/null | |||
| @@ -1,117 +0,0 @@ | |||
| 1 | #ifndef LINUX_PREEMPT_MASK_H | ||
| 2 | #define LINUX_PREEMPT_MASK_H | ||
| 3 | |||
| 4 | #include <linux/preempt.h> | ||
| 5 | |||
| 6 | /* | ||
| 7 | * We put the hardirq and softirq counter into the preemption | ||
| 8 | * counter. The bitmask has the following meaning: | ||
| 9 | * | ||
| 10 | * - bits 0-7 are the preemption count (max preemption depth: 256) | ||
| 11 | * - bits 8-15 are the softirq count (max # of softirqs: 256) | ||
| 12 | * | ||
| 13 | * The hardirq count could in theory be the same as the number of | ||
| 14 | * interrupts in the system, but we run all interrupt handlers with | ||
| 15 | * interrupts disabled, so we cannot have nesting interrupts. Though | ||
| 16 | * there are a few palaeontologic drivers which reenable interrupts in | ||
| 17 | * the handler, so we need more than one bit here. | ||
| 18 | * | ||
| 19 | * PREEMPT_MASK: 0x000000ff | ||
| 20 | * SOFTIRQ_MASK: 0x0000ff00 | ||
| 21 | * HARDIRQ_MASK: 0x000f0000 | ||
| 22 | * NMI_MASK: 0x00100000 | ||
| 23 | * PREEMPT_ACTIVE: 0x00200000 | ||
| 24 | */ | ||
| 25 | #define PREEMPT_BITS 8 | ||
| 26 | #define SOFTIRQ_BITS 8 | ||
| 27 | #define HARDIRQ_BITS 4 | ||
| 28 | #define NMI_BITS 1 | ||
| 29 | |||
| 30 | #define PREEMPT_SHIFT 0 | ||
| 31 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) | ||
| 32 | #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) | ||
| 33 | #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) | ||
| 34 | |||
| 35 | #define __IRQ_MASK(x) ((1UL << (x))-1) | ||
| 36 | |||
| 37 | #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) | ||
| 38 | #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | ||
| 39 | #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) | ||
| 40 | #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) | ||
| 41 | |||
| 42 | #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) | ||
| 43 | #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) | ||
| 44 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) | ||
| 45 | #define NMI_OFFSET (1UL << NMI_SHIFT) | ||
| 46 | |||
| 47 | #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) | ||
| 48 | |||
| 49 | #define PREEMPT_ACTIVE_BITS 1 | ||
| 50 | #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) | ||
| 51 | #define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) | ||
| 52 | |||
| 53 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) | ||
| 54 | #define softirq_count() (preempt_count() & SOFTIRQ_MASK) | ||
| 55 | #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ | ||
| 56 | | NMI_MASK)) | ||
| 57 | |||
| 58 | /* | ||
| 59 | * Are we doing bottom half or hardware interrupt processing? | ||
| 60 | * Are we in a softirq context? Interrupt context? | ||
| 61 | * in_softirq - Are we currently processing softirq or have bh disabled? | ||
| 62 | * in_serving_softirq - Are we currently processing softirq? | ||
| 63 | */ | ||
| 64 | #define in_irq() (hardirq_count()) | ||
| 65 | #define in_softirq() (softirq_count()) | ||
| 66 | #define in_interrupt() (irq_count()) | ||
| 67 | #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) | ||
| 68 | |||
| 69 | /* | ||
| 70 | * Are we in NMI context? | ||
| 71 | */ | ||
| 72 | #define in_nmi() (preempt_count() & NMI_MASK) | ||
| 73 | |||
| 74 | #if defined(CONFIG_PREEMPT_COUNT) | ||
| 75 | # define PREEMPT_CHECK_OFFSET 1 | ||
| 76 | #else | ||
| 77 | # define PREEMPT_CHECK_OFFSET 0 | ||
| 78 | #endif | ||
| 79 | |||
| 80 | /* | ||
| 81 | * The preempt_count offset needed for things like: | ||
| 82 | * | ||
| 83 | * spin_lock_bh() | ||
| 84 | * | ||
| 85 | * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and | ||
| 86 | * softirqs, such that unlock sequences of: | ||
| 87 | * | ||
| 88 | * spin_unlock(); | ||
| 89 | * local_bh_enable(); | ||
| 90 | * | ||
| 91 | * Work as expected. | ||
| 92 | */ | ||
| 93 | #define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_CHECK_OFFSET) | ||
| 94 | |||
| 95 | /* | ||
| 96 | * Are we running in atomic context? WARNING: this macro cannot | ||
| 97 | * always detect atomic context; in particular, it cannot know about | ||
| 98 | * held spinlocks in non-preemptible kernels. Thus it should not be | ||
| 99 | * used in the general case to determine whether sleeping is possible. | ||
| 100 | * Do not use in_atomic() in driver code. | ||
| 101 | */ | ||
| 102 | #define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0) | ||
| 103 | |||
| 104 | /* | ||
| 105 | * Check whether we were atomic before we did preempt_disable(): | ||
| 106 | * (used by the scheduler, *after* releasing the kernel lock) | ||
| 107 | */ | ||
| 108 | #define in_atomic_preempt_off() \ | ||
| 109 | ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET) | ||
| 110 | |||
| 111 | #ifdef CONFIG_PREEMPT_COUNT | ||
| 112 | # define preemptible() (preempt_count() == 0 && !irqs_disabled()) | ||
| 113 | #else | ||
| 114 | # define preemptible() 0 | ||
| 115 | #endif | ||
| 116 | |||
| 117 | #endif /* LINUX_PREEMPT_MASK_H */ | ||
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index a18b16f1dc0e..17c6b1f84a77 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
| @@ -29,8 +29,8 @@ | |||
| 29 | */ | 29 | */ |
| 30 | static inline void INIT_LIST_HEAD_RCU(struct list_head *list) | 30 | static inline void INIT_LIST_HEAD_RCU(struct list_head *list) |
| 31 | { | 31 | { |
| 32 | ACCESS_ONCE(list->next) = list; | 32 | WRITE_ONCE(list->next, list); |
| 33 | ACCESS_ONCE(list->prev) = list; | 33 | WRITE_ONCE(list->prev, list); |
| 34 | } | 34 | } |
| 35 | 35 | ||
| 36 | /* | 36 | /* |
| @@ -288,7 +288,7 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
| 288 | #define list_first_or_null_rcu(ptr, type, member) \ | 288 | #define list_first_or_null_rcu(ptr, type, member) \ |
| 289 | ({ \ | 289 | ({ \ |
| 290 | struct list_head *__ptr = (ptr); \ | 290 | struct list_head *__ptr = (ptr); \ |
| 291 | struct list_head *__next = ACCESS_ONCE(__ptr->next); \ | 291 | struct list_head *__next = READ_ONCE(__ptr->next); \ |
| 292 | likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \ | 292 | likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \ |
| 293 | }) | 293 | }) |
| 294 | 294 | ||
| @@ -549,8 +549,8 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, | |||
| 549 | */ | 549 | */ |
| 550 | #define hlist_for_each_entry_from_rcu(pos, member) \ | 550 | #define hlist_for_each_entry_from_rcu(pos, member) \ |
| 551 | for (; pos; \ | 551 | for (; pos; \ |
| 552 | pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ | 552 | pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ |
| 553 | typeof(*(pos)), member)) | 553 | &(pos)->member)), typeof(*(pos)), member)) |
| 554 | 554 | ||
| 555 | #endif /* __KERNEL__ */ | 555 | #endif /* __KERNEL__ */ |
| 556 | #endif | 556 | #endif |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 573a5afd5ed8..33a056bb886f 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
| @@ -44,6 +44,8 @@ | |||
| 44 | #include <linux/debugobjects.h> | 44 | #include <linux/debugobjects.h> |
| 45 | #include <linux/bug.h> | 45 | #include <linux/bug.h> |
| 46 | #include <linux/compiler.h> | 46 | #include <linux/compiler.h> |
| 47 | #include <linux/ktime.h> | ||
| 48 | |||
| 47 | #include <asm/barrier.h> | 49 | #include <asm/barrier.h> |
| 48 | 50 | ||
| 49 | extern int rcu_expedited; /* for sysctl */ | 51 | extern int rcu_expedited; /* for sysctl */ |
| @@ -292,10 +294,6 @@ void rcu_sched_qs(void); | |||
| 292 | void rcu_bh_qs(void); | 294 | void rcu_bh_qs(void); |
| 293 | void rcu_check_callbacks(int user); | 295 | void rcu_check_callbacks(int user); |
| 294 | struct notifier_block; | 296 | struct notifier_block; |
| 295 | void rcu_idle_enter(void); | ||
| 296 | void rcu_idle_exit(void); | ||
| 297 | void rcu_irq_enter(void); | ||
| 298 | void rcu_irq_exit(void); | ||
| 299 | int rcu_cpu_notify(struct notifier_block *self, | 297 | int rcu_cpu_notify(struct notifier_block *self, |
| 300 | unsigned long action, void *hcpu); | 298 | unsigned long action, void *hcpu); |
| 301 | 299 | ||
| @@ -364,8 +362,8 @@ extern struct srcu_struct tasks_rcu_exit_srcu; | |||
| 364 | #define rcu_note_voluntary_context_switch(t) \ | 362 | #define rcu_note_voluntary_context_switch(t) \ |
| 365 | do { \ | 363 | do { \ |
| 366 | rcu_all_qs(); \ | 364 | rcu_all_qs(); \ |
| 367 | if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \ | 365 | if (READ_ONCE((t)->rcu_tasks_holdout)) \ |
| 368 | ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \ | 366 | WRITE_ONCE((t)->rcu_tasks_holdout, false); \ |
| 369 | } while (0) | 367 | } while (0) |
| 370 | #else /* #ifdef CONFIG_TASKS_RCU */ | 368 | #else /* #ifdef CONFIG_TASKS_RCU */ |
| 371 | #define TASKS_RCU(x) do { } while (0) | 369 | #define TASKS_RCU(x) do { } while (0) |
| @@ -609,7 +607,7 @@ static inline void rcu_preempt_sleep_check(void) | |||
| 609 | 607 | ||
| 610 | #define __rcu_access_pointer(p, space) \ | 608 | #define __rcu_access_pointer(p, space) \ |
| 611 | ({ \ | 609 | ({ \ |
| 612 | typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \ | 610 | typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \ |
| 613 | rcu_dereference_sparse(p, space); \ | 611 | rcu_dereference_sparse(p, space); \ |
| 614 | ((typeof(*p) __force __kernel *)(_________p1)); \ | 612 | ((typeof(*p) __force __kernel *)(_________p1)); \ |
| 615 | }) | 613 | }) |
| @@ -628,21 +626,6 @@ static inline void rcu_preempt_sleep_check(void) | |||
| 628 | ((typeof(*p) __force __kernel *)(p)); \ | 626 | ((typeof(*p) __force __kernel *)(p)); \ |
| 629 | }) | 627 | }) |
| 630 | 628 | ||
| 631 | #define __rcu_access_index(p, space) \ | ||
| 632 | ({ \ | ||
| 633 | typeof(p) _________p1 = ACCESS_ONCE(p); \ | ||
| 634 | rcu_dereference_sparse(p, space); \ | ||
| 635 | (_________p1); \ | ||
| 636 | }) | ||
| 637 | #define __rcu_dereference_index_check(p, c) \ | ||
| 638 | ({ \ | ||
| 639 | /* Dependency order vs. p above. */ \ | ||
| 640 | typeof(p) _________p1 = lockless_dereference(p); \ | ||
| 641 | rcu_lockdep_assert(c, \ | ||
| 642 | "suspicious rcu_dereference_index_check() usage"); \ | ||
| 643 | (_________p1); \ | ||
| 644 | }) | ||
| 645 | |||
| 646 | /** | 629 | /** |
| 647 | * RCU_INITIALIZER() - statically initialize an RCU-protected global variable | 630 | * RCU_INITIALIZER() - statically initialize an RCU-protected global variable |
| 648 | * @v: The value to statically initialize with. | 631 | * @v: The value to statically initialize with. |
| @@ -659,7 +642,7 @@ static inline void rcu_preempt_sleep_check(void) | |||
| 659 | */ | 642 | */ |
| 660 | #define lockless_dereference(p) \ | 643 | #define lockless_dereference(p) \ |
| 661 | ({ \ | 644 | ({ \ |
| 662 | typeof(p) _________p1 = ACCESS_ONCE(p); \ | 645 | typeof(p) _________p1 = READ_ONCE(p); \ |
| 663 | smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ | 646 | smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ |
| 664 | (_________p1); \ | 647 | (_________p1); \ |
| 665 | }) | 648 | }) |
| @@ -702,7 +685,7 @@ static inline void rcu_preempt_sleep_check(void) | |||
| 702 | * @p: The pointer to read | 685 | * @p: The pointer to read |
| 703 | * | 686 | * |
| 704 | * Return the value of the specified RCU-protected pointer, but omit the | 687 | * Return the value of the specified RCU-protected pointer, but omit the |
| 705 | * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful | 688 | * smp_read_barrier_depends() and keep the READ_ONCE(). This is useful |
| 706 | * when the value of this pointer is accessed, but the pointer is not | 689 | * when the value of this pointer is accessed, but the pointer is not |
| 707 | * dereferenced, for example, when testing an RCU-protected pointer against | 690 | * dereferenced, for example, when testing an RCU-protected pointer against |
| 708 | * NULL. Although rcu_access_pointer() may also be used in cases where | 691 | * NULL. Although rcu_access_pointer() may also be used in cases where |
| @@ -787,47 +770,12 @@ static inline void rcu_preempt_sleep_check(void) | |||
| 787 | #define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu) | 770 | #define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu) |
| 788 | 771 | ||
| 789 | /** | 772 | /** |
| 790 | * rcu_access_index() - fetch RCU index with no dereferencing | ||
| 791 | * @p: The index to read | ||
| 792 | * | ||
| 793 | * Return the value of the specified RCU-protected index, but omit the | ||
| 794 | * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful | ||
| 795 | * when the value of this index is accessed, but the index is not | ||
| 796 | * dereferenced, for example, when testing an RCU-protected index against | ||
| 797 | * -1. Although rcu_access_index() may also be used in cases where | ||
| 798 | * update-side locks prevent the value of the index from changing, you | ||
| 799 | * should instead use rcu_dereference_index_protected() for this use case. | ||
| 800 | */ | ||
| 801 | #define rcu_access_index(p) __rcu_access_index((p), __rcu) | ||
| 802 | |||
| 803 | /** | ||
| 804 | * rcu_dereference_index_check() - rcu_dereference for indices with debug checking | ||
| 805 | * @p: The pointer to read, prior to dereferencing | ||
| 806 | * @c: The conditions under which the dereference will take place | ||
| 807 | * | ||
| 808 | * Similar to rcu_dereference_check(), but omits the sparse checking. | ||
| 809 | * This allows rcu_dereference_index_check() to be used on integers, | ||
| 810 | * which can then be used as array indices. Attempting to use | ||
| 811 | * rcu_dereference_check() on an integer will give compiler warnings | ||
| 812 | * because the sparse address-space mechanism relies on dereferencing | ||
| 813 | * the RCU-protected pointer. Dereferencing integers is not something | ||
| 814 | * that even gcc will put up with. | ||
| 815 | * | ||
| 816 | * Note that this function does not implicitly check for RCU read-side | ||
| 817 | * critical sections. If this function gains lots of uses, it might | ||
| 818 | * make sense to provide versions for each flavor of RCU, but it does | ||
| 819 | * not make sense as of early 2010. | ||
| 820 | */ | ||
| 821 | #define rcu_dereference_index_check(p, c) \ | ||
| 822 | __rcu_dereference_index_check((p), (c)) | ||
| 823 | |||
| 824 | /** | ||
| 825 | * rcu_dereference_protected() - fetch RCU pointer when updates prevented | 773 | * rcu_dereference_protected() - fetch RCU pointer when updates prevented |
| 826 | * @p: The pointer to read, prior to dereferencing | 774 | * @p: The pointer to read, prior to dereferencing |
| 827 | * @c: The conditions under which the dereference will take place | 775 | * @c: The conditions under which the dereference will take place |
| 828 | * | 776 | * |
| 829 | * Return the value of the specified RCU-protected pointer, but omit | 777 | * Return the value of the specified RCU-protected pointer, but omit |
| 830 | * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This | 778 | * both the smp_read_barrier_depends() and the READ_ONCE(). This |
| 831 | * is useful in cases where update-side locks prevent the value of the | 779 | * is useful in cases where update-side locks prevent the value of the |
| 832 | * pointer from changing. Please note that this primitive does -not- | 780 | * pointer from changing. Please note that this primitive does -not- |
| 833 | * prevent the compiler from repeating this reference or combining it | 781 | * prevent the compiler from repeating this reference or combining it |
| @@ -1153,13 +1101,13 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
| 1153 | #define kfree_rcu(ptr, rcu_head) \ | 1101 | #define kfree_rcu(ptr, rcu_head) \ |
| 1154 | __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) | 1102 | __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) |
| 1155 | 1103 | ||
| 1156 | #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) | 1104 | #ifdef CONFIG_TINY_RCU |
| 1157 | static inline int rcu_needs_cpu(unsigned long *delta_jiffies) | 1105 | static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) |
| 1158 | { | 1106 | { |
| 1159 | *delta_jiffies = ULONG_MAX; | 1107 | *nextevt = KTIME_MAX; |
| 1160 | return 0; | 1108 | return 0; |
| 1161 | } | 1109 | } |
| 1162 | #endif /* #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) */ | 1110 | #endif /* #ifdef CONFIG_TINY_RCU */ |
| 1163 | 1111 | ||
| 1164 | #if defined(CONFIG_RCU_NOCB_CPU_ALL) | 1112 | #if defined(CONFIG_RCU_NOCB_CPU_ALL) |
| 1165 | static inline bool rcu_is_nocb_cpu(int cpu) { return true; } | 1113 | static inline bool rcu_is_nocb_cpu(int cpu) { return true; } |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 937edaeb150d..3df6c1ec4e25 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
| @@ -159,6 +159,22 @@ static inline void rcu_cpu_stall_reset(void) | |||
| 159 | { | 159 | { |
| 160 | } | 160 | } |
| 161 | 161 | ||
| 162 | static inline void rcu_idle_enter(void) | ||
| 163 | { | ||
| 164 | } | ||
| 165 | |||
| 166 | static inline void rcu_idle_exit(void) | ||
| 167 | { | ||
| 168 | } | ||
| 169 | |||
| 170 | static inline void rcu_irq_enter(void) | ||
| 171 | { | ||
| 172 | } | ||
| 173 | |||
| 174 | static inline void rcu_irq_exit(void) | ||
| 175 | { | ||
| 176 | } | ||
| 177 | |||
| 162 | static inline void exit_rcu(void) | 178 | static inline void exit_rcu(void) |
| 163 | { | 179 | { |
| 164 | } | 180 | } |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index d2e583a6aaca..456879143f89 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
| @@ -31,9 +31,7 @@ | |||
| 31 | #define __LINUX_RCUTREE_H | 31 | #define __LINUX_RCUTREE_H |
| 32 | 32 | ||
| 33 | void rcu_note_context_switch(void); | 33 | void rcu_note_context_switch(void); |
| 34 | #ifndef CONFIG_RCU_NOCB_CPU_ALL | 34 | int rcu_needs_cpu(u64 basem, u64 *nextevt); |
| 35 | int rcu_needs_cpu(unsigned long *delta_jiffies); | ||
| 36 | #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ | ||
| 37 | void rcu_cpu_stall_reset(void); | 35 | void rcu_cpu_stall_reset(void); |
| 38 | 36 | ||
| 39 | /* | 37 | /* |
| @@ -93,6 +91,11 @@ void rcu_force_quiescent_state(void); | |||
| 93 | void rcu_bh_force_quiescent_state(void); | 91 | void rcu_bh_force_quiescent_state(void); |
| 94 | void rcu_sched_force_quiescent_state(void); | 92 | void rcu_sched_force_quiescent_state(void); |
| 95 | 93 | ||
| 94 | void rcu_idle_enter(void); | ||
| 95 | void rcu_idle_exit(void); | ||
| 96 | void rcu_irq_enter(void); | ||
| 97 | void rcu_irq_exit(void); | ||
| 98 | |||
| 96 | void exit_rcu(void); | 99 | void exit_rcu(void); |
| 97 | 100 | ||
| 98 | void rcu_scheduler_starting(void); | 101 | void rcu_scheduler_starting(void); |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 26a2e6122734..6633e83e608a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -25,7 +25,7 @@ struct sched_param { | |||
| 25 | #include <linux/errno.h> | 25 | #include <linux/errno.h> |
| 26 | #include <linux/nodemask.h> | 26 | #include <linux/nodemask.h> |
| 27 | #include <linux/mm_types.h> | 27 | #include <linux/mm_types.h> |
| 28 | #include <linux/preempt_mask.h> | 28 | #include <linux/preempt.h> |
| 29 | 29 | ||
| 30 | #include <asm/page.h> | 30 | #include <asm/page.h> |
| 31 | #include <asm/ptrace.h> | 31 | #include <asm/ptrace.h> |
| @@ -132,6 +132,7 @@ struct fs_struct; | |||
| 132 | struct perf_event_context; | 132 | struct perf_event_context; |
| 133 | struct blk_plug; | 133 | struct blk_plug; |
| 134 | struct filename; | 134 | struct filename; |
| 135 | struct nameidata; | ||
| 135 | 136 | ||
| 136 | #define VMACACHE_BITS 2 | 137 | #define VMACACHE_BITS 2 |
| 137 | #define VMACACHE_SIZE (1U << VMACACHE_BITS) | 138 | #define VMACACHE_SIZE (1U << VMACACHE_BITS) |
| @@ -173,7 +174,12 @@ extern unsigned long nr_iowait_cpu(int cpu); | |||
| 173 | extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); | 174 | extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); |
| 174 | 175 | ||
| 175 | extern void calc_global_load(unsigned long ticks); | 176 | extern void calc_global_load(unsigned long ticks); |
| 177 | |||
| 178 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) | ||
| 176 | extern void update_cpu_load_nohz(void); | 179 | extern void update_cpu_load_nohz(void); |
| 180 | #else | ||
| 181 | static inline void update_cpu_load_nohz(void) { } | ||
| 182 | #endif | ||
| 177 | 183 | ||
| 178 | extern unsigned long get_parent_ip(unsigned long addr); | 184 | extern unsigned long get_parent_ip(unsigned long addr); |
| 179 | 185 | ||
| @@ -213,9 +219,10 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); | |||
| 213 | #define TASK_WAKEKILL 128 | 219 | #define TASK_WAKEKILL 128 |
| 214 | #define TASK_WAKING 256 | 220 | #define TASK_WAKING 256 |
| 215 | #define TASK_PARKED 512 | 221 | #define TASK_PARKED 512 |
| 216 | #define TASK_STATE_MAX 1024 | 222 | #define TASK_NOLOAD 1024 |
| 223 | #define TASK_STATE_MAX 2048 | ||
| 217 | 224 | ||
| 218 | #define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP" | 225 | #define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPN" |
| 219 | 226 | ||
| 220 | extern char ___assert_task_state[1 - 2*!!( | 227 | extern char ___assert_task_state[1 - 2*!!( |
| 221 | sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; | 228 | sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; |
| @@ -225,6 +232,8 @@ extern char ___assert_task_state[1 - 2*!!( | |||
| 225 | #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) | 232 | #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) |
| 226 | #define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) | 233 | #define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) |
| 227 | 234 | ||
| 235 | #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) | ||
| 236 | |||
| 228 | /* Convenience macros for the sake of wake_up */ | 237 | /* Convenience macros for the sake of wake_up */ |
| 229 | #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) | 238 | #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) |
| 230 | #define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) | 239 | #define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) |
| @@ -240,7 +249,8 @@ extern char ___assert_task_state[1 - 2*!!( | |||
| 240 | ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) | 249 | ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) |
| 241 | #define task_contributes_to_load(task) \ | 250 | #define task_contributes_to_load(task) \ |
| 242 | ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ | 251 | ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ |
| 243 | (task->flags & PF_FROZEN) == 0) | 252 | (task->flags & PF_FROZEN) == 0 && \ |
| 253 | (task->state & TASK_NOLOAD) == 0) | ||
| 244 | 254 | ||
| 245 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP | 255 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
| 246 | 256 | ||
| @@ -252,7 +262,7 @@ extern char ___assert_task_state[1 - 2*!!( | |||
| 252 | #define set_task_state(tsk, state_value) \ | 262 | #define set_task_state(tsk, state_value) \ |
| 253 | do { \ | 263 | do { \ |
| 254 | (tsk)->task_state_change = _THIS_IP_; \ | 264 | (tsk)->task_state_change = _THIS_IP_; \ |
| 255 | set_mb((tsk)->state, (state_value)); \ | 265 | smp_store_mb((tsk)->state, (state_value)); \ |
| 256 | } while (0) | 266 | } while (0) |
| 257 | 267 | ||
| 258 | /* | 268 | /* |
| @@ -274,7 +284,7 @@ extern char ___assert_task_state[1 - 2*!!( | |||
| 274 | #define set_current_state(state_value) \ | 284 | #define set_current_state(state_value) \ |
| 275 | do { \ | 285 | do { \ |
| 276 | current->task_state_change = _THIS_IP_; \ | 286 | current->task_state_change = _THIS_IP_; \ |
| 277 | set_mb(current->state, (state_value)); \ | 287 | smp_store_mb(current->state, (state_value)); \ |
| 278 | } while (0) | 288 | } while (0) |
| 279 | 289 | ||
| 280 | #else | 290 | #else |
| @@ -282,7 +292,7 @@ extern char ___assert_task_state[1 - 2*!!( | |||
| 282 | #define __set_task_state(tsk, state_value) \ | 292 | #define __set_task_state(tsk, state_value) \ |
| 283 | do { (tsk)->state = (state_value); } while (0) | 293 | do { (tsk)->state = (state_value); } while (0) |
| 284 | #define set_task_state(tsk, state_value) \ | 294 | #define set_task_state(tsk, state_value) \ |
| 285 | set_mb((tsk)->state, (state_value)) | 295 | smp_store_mb((tsk)->state, (state_value)) |
| 286 | 296 | ||
| 287 | /* | 297 | /* |
| 288 | * set_current_state() includes a barrier so that the write of current->state | 298 | * set_current_state() includes a barrier so that the write of current->state |
| @@ -298,7 +308,7 @@ extern char ___assert_task_state[1 - 2*!!( | |||
| 298 | #define __set_current_state(state_value) \ | 308 | #define __set_current_state(state_value) \ |
| 299 | do { current->state = (state_value); } while (0) | 309 | do { current->state = (state_value); } while (0) |
| 300 | #define set_current_state(state_value) \ | 310 | #define set_current_state(state_value) \ |
| 301 | set_mb(current->state, (state_value)) | 311 | smp_store_mb(current->state, (state_value)) |
| 302 | 312 | ||
| 303 | #endif | 313 | #endif |
| 304 | 314 | ||
| @@ -335,14 +345,10 @@ extern int runqueue_is_locked(int cpu); | |||
| 335 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) | 345 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) |
| 336 | extern void nohz_balance_enter_idle(int cpu); | 346 | extern void nohz_balance_enter_idle(int cpu); |
| 337 | extern void set_cpu_sd_state_idle(void); | 347 | extern void set_cpu_sd_state_idle(void); |
| 338 | extern int get_nohz_timer_target(int pinned); | 348 | extern int get_nohz_timer_target(void); |
| 339 | #else | 349 | #else |
| 340 | static inline void nohz_balance_enter_idle(int cpu) { } | 350 | static inline void nohz_balance_enter_idle(int cpu) { } |
| 341 | static inline void set_cpu_sd_state_idle(void) { } | 351 | static inline void set_cpu_sd_state_idle(void) { } |
| 342 | static inline int get_nohz_timer_target(int pinned) | ||
| 343 | { | ||
| 344 | return smp_processor_id(); | ||
| 345 | } | ||
| 346 | #endif | 352 | #endif |
| 347 | 353 | ||
| 348 | /* | 354 | /* |
| @@ -567,6 +573,23 @@ struct task_cputime { | |||
| 567 | .sum_exec_runtime = 0, \ | 573 | .sum_exec_runtime = 0, \ |
| 568 | } | 574 | } |
| 569 | 575 | ||
| 576 | /* | ||
| 577 | * This is the atomic variant of task_cputime, which can be used for | ||
| 578 | * storing and updating task_cputime statistics without locking. | ||
| 579 | */ | ||
| 580 | struct task_cputime_atomic { | ||
| 581 | atomic64_t utime; | ||
| 582 | atomic64_t stime; | ||
| 583 | atomic64_t sum_exec_runtime; | ||
| 584 | }; | ||
| 585 | |||
| 586 | #define INIT_CPUTIME_ATOMIC \ | ||
| 587 | (struct task_cputime_atomic) { \ | ||
| 588 | .utime = ATOMIC64_INIT(0), \ | ||
| 589 | .stime = ATOMIC64_INIT(0), \ | ||
| 590 | .sum_exec_runtime = ATOMIC64_INIT(0), \ | ||
| 591 | } | ||
| 592 | |||
| 570 | #ifdef CONFIG_PREEMPT_COUNT | 593 | #ifdef CONFIG_PREEMPT_COUNT |
| 571 | #define PREEMPT_DISABLED (1 + PREEMPT_ENABLED) | 594 | #define PREEMPT_DISABLED (1 + PREEMPT_ENABLED) |
| 572 | #else | 595 | #else |
| @@ -584,18 +607,16 @@ struct task_cputime { | |||
| 584 | 607 | ||
| 585 | /** | 608 | /** |
| 586 | * struct thread_group_cputimer - thread group interval timer counts | 609 | * struct thread_group_cputimer - thread group interval timer counts |
| 587 | * @cputime: thread group interval timers. | 610 | * @cputime_atomic: atomic thread group interval timers. |
| 588 | * @running: non-zero when there are timers running and | 611 | * @running: non-zero when there are timers running and |
| 589 | * @cputime receives updates. | 612 | * @cputime receives updates. |
| 590 | * @lock: lock for fields in this struct. | ||
| 591 | * | 613 | * |
| 592 | * This structure contains the version of task_cputime, above, that is | 614 | * This structure contains the version of task_cputime, above, that is |
| 593 | * used for thread group CPU timer calculations. | 615 | * used for thread group CPU timer calculations. |
| 594 | */ | 616 | */ |
| 595 | struct thread_group_cputimer { | 617 | struct thread_group_cputimer { |
| 596 | struct task_cputime cputime; | 618 | struct task_cputime_atomic cputime_atomic; |
| 597 | int running; | 619 | int running; |
| 598 | raw_spinlock_t lock; | ||
| 599 | }; | 620 | }; |
| 600 | 621 | ||
| 601 | #include <linux/rwsem.h> | 622 | #include <linux/rwsem.h> |
| @@ -900,6 +921,50 @@ enum cpu_idle_type { | |||
| 900 | #define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) | 921 | #define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) |
| 901 | 922 | ||
| 902 | /* | 923 | /* |
| 924 | * Wake-queues are lists of tasks with a pending wakeup, whose | ||
| 925 | * callers have already marked the task as woken internally, | ||
| 926 | * and can thus carry on. A common use case is being able to | ||
| 927 | * do the wakeups once the corresponding user lock as been | ||
| 928 | * released. | ||
| 929 | * | ||
| 930 | * We hold reference to each task in the list across the wakeup, | ||
| 931 | * thus guaranteeing that the memory is still valid by the time | ||
| 932 | * the actual wakeups are performed in wake_up_q(). | ||
| 933 | * | ||
| 934 | * One per task suffices, because there's never a need for a task to be | ||
| 935 | * in two wake queues simultaneously; it is forbidden to abandon a task | ||
| 936 | * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is | ||
| 937 | * already in a wake queue, the wakeup will happen soon and the second | ||
| 938 | * waker can just skip it. | ||
| 939 | * | ||
| 940 | * The WAKE_Q macro declares and initializes the list head. | ||
| 941 | * wake_up_q() does NOT reinitialize the list; it's expected to be | ||
| 942 | * called near the end of a function, where the fact that the queue is | ||
| 943 | * not used again will be easy to see by inspection. | ||
| 944 | * | ||
| 945 | * Note that this can cause spurious wakeups. schedule() callers | ||
| 946 | * must ensure the call is done inside a loop, confirming that the | ||
| 947 | * wakeup condition has in fact occurred. | ||
| 948 | */ | ||
| 949 | struct wake_q_node { | ||
| 950 | struct wake_q_node *next; | ||
| 951 | }; | ||
| 952 | |||
| 953 | struct wake_q_head { | ||
| 954 | struct wake_q_node *first; | ||
| 955 | struct wake_q_node **lastp; | ||
| 956 | }; | ||
| 957 | |||
| 958 | #define WAKE_Q_TAIL ((struct wake_q_node *) 0x01) | ||
| 959 | |||
| 960 | #define WAKE_Q(name) \ | ||
| 961 | struct wake_q_head name = { WAKE_Q_TAIL, &name.first } | ||
| 962 | |||
| 963 | extern void wake_q_add(struct wake_q_head *head, | ||
| 964 | struct task_struct *task); | ||
| 965 | extern void wake_up_q(struct wake_q_head *head); | ||
| 966 | |||
| 967 | /* | ||
| 903 | * sched-domains (multiprocessor balancing) declarations: | 968 | * sched-domains (multiprocessor balancing) declarations: |
| 904 | */ | 969 | */ |
| 905 | #ifdef CONFIG_SMP | 970 | #ifdef CONFIG_SMP |
| @@ -1334,8 +1399,6 @@ struct task_struct { | |||
| 1334 | int rcu_read_lock_nesting; | 1399 | int rcu_read_lock_nesting; |
| 1335 | union rcu_special rcu_read_unlock_special; | 1400 | union rcu_special rcu_read_unlock_special; |
| 1336 | struct list_head rcu_node_entry; | 1401 | struct list_head rcu_node_entry; |
| 1337 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ | ||
| 1338 | #ifdef CONFIG_PREEMPT_RCU | ||
| 1339 | struct rcu_node *rcu_blocked_node; | 1402 | struct rcu_node *rcu_blocked_node; |
| 1340 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ | 1403 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ |
| 1341 | #ifdef CONFIG_TASKS_RCU | 1404 | #ifdef CONFIG_TASKS_RCU |
| @@ -1356,9 +1419,6 @@ struct task_struct { | |||
| 1356 | #endif | 1419 | #endif |
| 1357 | 1420 | ||
| 1358 | struct mm_struct *mm, *active_mm; | 1421 | struct mm_struct *mm, *active_mm; |
| 1359 | #ifdef CONFIG_COMPAT_BRK | ||
| 1360 | unsigned brk_randomized:1; | ||
| 1361 | #endif | ||
| 1362 | /* per-thread vma caching */ | 1422 | /* per-thread vma caching */ |
| 1363 | u32 vmacache_seqnum; | 1423 | u32 vmacache_seqnum; |
| 1364 | struct vm_area_struct *vmacache[VMACACHE_SIZE]; | 1424 | struct vm_area_struct *vmacache[VMACACHE_SIZE]; |
| @@ -1369,7 +1429,7 @@ struct task_struct { | |||
| 1369 | int exit_state; | 1429 | int exit_state; |
| 1370 | int exit_code, exit_signal; | 1430 | int exit_code, exit_signal; |
| 1371 | int pdeath_signal; /* The signal sent when the parent dies */ | 1431 | int pdeath_signal; /* The signal sent when the parent dies */ |
| 1372 | unsigned int jobctl; /* JOBCTL_*, siglock protected */ | 1432 | unsigned long jobctl; /* JOBCTL_*, siglock protected */ |
| 1373 | 1433 | ||
| 1374 | /* Used for emulating ABI behavior of previous Linux versions */ | 1434 | /* Used for emulating ABI behavior of previous Linux versions */ |
| 1375 | unsigned int personality; | 1435 | unsigned int personality; |
| @@ -1381,10 +1441,14 @@ struct task_struct { | |||
| 1381 | /* Revert to default priority/policy when forking */ | 1441 | /* Revert to default priority/policy when forking */ |
| 1382 | unsigned sched_reset_on_fork:1; | 1442 | unsigned sched_reset_on_fork:1; |
| 1383 | unsigned sched_contributes_to_load:1; | 1443 | unsigned sched_contributes_to_load:1; |
| 1444 | unsigned sched_migrated:1; | ||
| 1384 | 1445 | ||
| 1385 | #ifdef CONFIG_MEMCG_KMEM | 1446 | #ifdef CONFIG_MEMCG_KMEM |
| 1386 | unsigned memcg_kmem_skip_account:1; | 1447 | unsigned memcg_kmem_skip_account:1; |
| 1387 | #endif | 1448 | #endif |
| 1449 | #ifdef CONFIG_COMPAT_BRK | ||
| 1450 | unsigned brk_randomized:1; | ||
| 1451 | #endif | ||
| 1388 | 1452 | ||
| 1389 | unsigned long atomic_flags; /* Flags needing atomic access. */ | 1453 | unsigned long atomic_flags; /* Flags needing atomic access. */ |
| 1390 | 1454 | ||
| @@ -1461,7 +1525,7 @@ struct task_struct { | |||
| 1461 | it with task_lock()) | 1525 | it with task_lock()) |
| 1462 | - initialized normally by setup_new_exec */ | 1526 | - initialized normally by setup_new_exec */ |
| 1463 | /* file system info */ | 1527 | /* file system info */ |
| 1464 | int link_count, total_link_count; | 1528 | struct nameidata *nameidata; |
| 1465 | #ifdef CONFIG_SYSVIPC | 1529 | #ifdef CONFIG_SYSVIPC |
| 1466 | /* ipc stuff */ | 1530 | /* ipc stuff */ |
| 1467 | struct sysv_sem sysvsem; | 1531 | struct sysv_sem sysvsem; |
| @@ -1511,6 +1575,8 @@ struct task_struct { | |||
| 1511 | /* Protection of the PI data structures: */ | 1575 | /* Protection of the PI data structures: */ |
| 1512 | raw_spinlock_t pi_lock; | 1576 | raw_spinlock_t pi_lock; |
| 1513 | 1577 | ||
| 1578 | struct wake_q_node wake_q; | ||
| 1579 | |||
| 1514 | #ifdef CONFIG_RT_MUTEXES | 1580 | #ifdef CONFIG_RT_MUTEXES |
| 1515 | /* PI waiters blocked on a rt_mutex held by this task */ | 1581 | /* PI waiters blocked on a rt_mutex held by this task */ |
| 1516 | struct rb_root pi_waiters; | 1582 | struct rb_root pi_waiters; |
| @@ -1724,6 +1790,7 @@ struct task_struct { | |||
| 1724 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP | 1790 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
| 1725 | unsigned long task_state_change; | 1791 | unsigned long task_state_change; |
| 1726 | #endif | 1792 | #endif |
| 1793 | int pagefault_disabled; | ||
| 1727 | }; | 1794 | }; |
| 1728 | 1795 | ||
| 1729 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ | 1796 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ |
| @@ -2077,22 +2144,22 @@ TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) | |||
| 2077 | #define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ | 2144 | #define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ |
| 2078 | #define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ | 2145 | #define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ |
| 2079 | 2146 | ||
| 2080 | #define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT) | 2147 | #define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT) |
| 2081 | #define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT) | 2148 | #define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT) |
| 2082 | #define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT) | 2149 | #define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT) |
| 2083 | #define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT) | 2150 | #define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT) |
| 2084 | #define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT) | 2151 | #define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT) |
| 2085 | #define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT) | 2152 | #define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT) |
| 2086 | #define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT) | 2153 | #define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT) |
| 2087 | 2154 | ||
| 2088 | #define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) | 2155 | #define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) |
| 2089 | #define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) | 2156 | #define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) |
| 2090 | 2157 | ||
| 2091 | extern bool task_set_jobctl_pending(struct task_struct *task, | 2158 | extern bool task_set_jobctl_pending(struct task_struct *task, |
| 2092 | unsigned int mask); | 2159 | unsigned long mask); |
| 2093 | extern void task_clear_jobctl_trapping(struct task_struct *task); | 2160 | extern void task_clear_jobctl_trapping(struct task_struct *task); |
| 2094 | extern void task_clear_jobctl_pending(struct task_struct *task, | 2161 | extern void task_clear_jobctl_pending(struct task_struct *task, |
| 2095 | unsigned int mask); | 2162 | unsigned long mask); |
| 2096 | 2163 | ||
| 2097 | static inline void rcu_copy_process(struct task_struct *p) | 2164 | static inline void rcu_copy_process(struct task_struct *p) |
| 2098 | { | 2165 | { |
| @@ -2532,6 +2599,9 @@ static inline unsigned long wait_task_inactive(struct task_struct *p, | |||
| 2532 | } | 2599 | } |
| 2533 | #endif | 2600 | #endif |
| 2534 | 2601 | ||
| 2602 | #define tasklist_empty() \ | ||
| 2603 | list_empty(&init_task.tasks) | ||
| 2604 | |||
| 2535 | #define next_task(p) \ | 2605 | #define next_task(p) \ |
| 2536 | list_entry_rcu((p)->tasks.next, struct task_struct, tasks) | 2606 | list_entry_rcu((p)->tasks.next, struct task_struct, tasks) |
| 2537 | 2607 | ||
| @@ -2962,11 +3032,6 @@ static __always_inline bool need_resched(void) | |||
| 2962 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); | 3032 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); |
| 2963 | void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); | 3033 | void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); |
| 2964 | 3034 | ||
| 2965 | static inline void thread_group_cputime_init(struct signal_struct *sig) | ||
| 2966 | { | ||
| 2967 | raw_spin_lock_init(&sig->cputimer.lock); | ||
| 2968 | } | ||
| 2969 | |||
| 2970 | /* | 3035 | /* |
| 2971 | * Reevaluate whether the task has signals pending delivery. | 3036 | * Reevaluate whether the task has signals pending delivery. |
| 2972 | * Wake the task if so. | 3037 | * Wake the task if so. |
| @@ -3080,13 +3145,13 @@ static inline void mm_update_next_owner(struct mm_struct *mm) | |||
| 3080 | static inline unsigned long task_rlimit(const struct task_struct *tsk, | 3145 | static inline unsigned long task_rlimit(const struct task_struct *tsk, |
| 3081 | unsigned int limit) | 3146 | unsigned int limit) |
| 3082 | { | 3147 | { |
| 3083 | return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur); | 3148 | return READ_ONCE(tsk->signal->rlim[limit].rlim_cur); |
| 3084 | } | 3149 | } |
| 3085 | 3150 | ||
| 3086 | static inline unsigned long task_rlimit_max(const struct task_struct *tsk, | 3151 | static inline unsigned long task_rlimit_max(const struct task_struct *tsk, |
| 3087 | unsigned int limit) | 3152 | unsigned int limit) |
| 3088 | { | 3153 | { |
| 3089 | return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max); | 3154 | return READ_ONCE(tsk->signal->rlim[limit].rlim_max); |
| 3090 | } | 3155 | } |
| 3091 | 3156 | ||
| 3092 | static inline unsigned long rlimit(unsigned int limit) | 3157 | static inline unsigned long rlimit(unsigned int limit) |
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 596a0e007c62..c9e4731cf10b 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h | |||
| @@ -57,24 +57,12 @@ extern unsigned int sysctl_numa_balancing_scan_size; | |||
| 57 | extern unsigned int sysctl_sched_migration_cost; | 57 | extern unsigned int sysctl_sched_migration_cost; |
| 58 | extern unsigned int sysctl_sched_nr_migrate; | 58 | extern unsigned int sysctl_sched_nr_migrate; |
| 59 | extern unsigned int sysctl_sched_time_avg; | 59 | extern unsigned int sysctl_sched_time_avg; |
| 60 | extern unsigned int sysctl_timer_migration; | ||
| 61 | extern unsigned int sysctl_sched_shares_window; | 60 | extern unsigned int sysctl_sched_shares_window; |
| 62 | 61 | ||
| 63 | int sched_proc_update_handler(struct ctl_table *table, int write, | 62 | int sched_proc_update_handler(struct ctl_table *table, int write, |
| 64 | void __user *buffer, size_t *length, | 63 | void __user *buffer, size_t *length, |
| 65 | loff_t *ppos); | 64 | loff_t *ppos); |
| 66 | #endif | 65 | #endif |
| 67 | #ifdef CONFIG_SCHED_DEBUG | ||
| 68 | static inline unsigned int get_sysctl_timer_migration(void) | ||
| 69 | { | ||
| 70 | return sysctl_timer_migration; | ||
| 71 | } | ||
| 72 | #else | ||
| 73 | static inline unsigned int get_sysctl_timer_migration(void) | ||
| 74 | { | ||
| 75 | return 1; | ||
| 76 | } | ||
| 77 | #endif | ||
| 78 | 66 | ||
| 79 | /* | 67 | /* |
| 80 | * control realtime throttling: | 68 | * control realtime throttling: |
diff --git a/include/linux/security.h b/include/linux/security.h index 18264ea9e314..52febde52479 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
| @@ -43,7 +43,6 @@ struct file; | |||
| 43 | struct vfsmount; | 43 | struct vfsmount; |
| 44 | struct path; | 44 | struct path; |
| 45 | struct qstr; | 45 | struct qstr; |
| 46 | struct nameidata; | ||
| 47 | struct iattr; | 46 | struct iattr; |
| 48 | struct fown_struct; | 47 | struct fown_struct; |
| 49 | struct file_operations; | 48 | struct file_operations; |
| @@ -477,7 +476,8 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
| 477 | * @inode_follow_link: | 476 | * @inode_follow_link: |
| 478 | * Check permission to follow a symbolic link when looking up a pathname. | 477 | * Check permission to follow a symbolic link when looking up a pathname. |
| 479 | * @dentry contains the dentry structure for the link. | 478 | * @dentry contains the dentry structure for the link. |
| 480 | * @nd contains the nameidata structure for the parent directory. | 479 | * @inode contains the inode, which itself is not stable in RCU-walk |
| 480 | * @rcu indicates whether we are in RCU-walk mode. | ||
| 481 | * Return 0 if permission is granted. | 481 | * Return 0 if permission is granted. |
| 482 | * @inode_permission: | 482 | * @inode_permission: |
| 483 | * Check permission before accessing an inode. This hook is called by the | 483 | * Check permission before accessing an inode. This hook is called by the |
| @@ -1553,7 +1553,8 @@ struct security_operations { | |||
| 1553 | int (*inode_rename) (struct inode *old_dir, struct dentry *old_dentry, | 1553 | int (*inode_rename) (struct inode *old_dir, struct dentry *old_dentry, |
| 1554 | struct inode *new_dir, struct dentry *new_dentry); | 1554 | struct inode *new_dir, struct dentry *new_dentry); |
| 1555 | int (*inode_readlink) (struct dentry *dentry); | 1555 | int (*inode_readlink) (struct dentry *dentry); |
| 1556 | int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd); | 1556 | int (*inode_follow_link) (struct dentry *dentry, struct inode *inode, |
| 1557 | bool rcu); | ||
| 1557 | int (*inode_permission) (struct inode *inode, int mask); | 1558 | int (*inode_permission) (struct inode *inode, int mask); |
| 1558 | int (*inode_setattr) (struct dentry *dentry, struct iattr *attr); | 1559 | int (*inode_setattr) (struct dentry *dentry, struct iattr *attr); |
| 1559 | int (*inode_getattr) (const struct path *path); | 1560 | int (*inode_getattr) (const struct path *path); |
| @@ -1839,7 +1840,8 @@ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 1839 | struct inode *new_dir, struct dentry *new_dentry, | 1840 | struct inode *new_dir, struct dentry *new_dentry, |
| 1840 | unsigned int flags); | 1841 | unsigned int flags); |
| 1841 | int security_inode_readlink(struct dentry *dentry); | 1842 | int security_inode_readlink(struct dentry *dentry); |
| 1842 | int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd); | 1843 | int security_inode_follow_link(struct dentry *dentry, struct inode *inode, |
| 1844 | bool rcu); | ||
| 1843 | int security_inode_permission(struct inode *inode, int mask); | 1845 | int security_inode_permission(struct inode *inode, int mask); |
| 1844 | int security_inode_setattr(struct dentry *dentry, struct iattr *attr); | 1846 | int security_inode_setattr(struct dentry *dentry, struct iattr *attr); |
| 1845 | int security_inode_getattr(const struct path *path); | 1847 | int security_inode_getattr(const struct path *path); |
| @@ -2242,7 +2244,8 @@ static inline int security_inode_readlink(struct dentry *dentry) | |||
| 2242 | } | 2244 | } |
| 2243 | 2245 | ||
| 2244 | static inline int security_inode_follow_link(struct dentry *dentry, | 2246 | static inline int security_inode_follow_link(struct dentry *dentry, |
| 2245 | struct nameidata *nd) | 2247 | struct inode *inode, |
| 2248 | bool rcu) | ||
| 2246 | { | 2249 | { |
| 2247 | return 0; | 2250 | return 0; |
| 2248 | } | 2251 | } |
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 5f68d0a391ce..486e685a226a 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h | |||
| @@ -233,6 +233,47 @@ static inline void raw_write_seqcount_end(seqcount_t *s) | |||
| 233 | s->sequence++; | 233 | s->sequence++; |
| 234 | } | 234 | } |
| 235 | 235 | ||
| 236 | /** | ||
| 237 | * raw_write_seqcount_barrier - do a seq write barrier | ||
| 238 | * @s: pointer to seqcount_t | ||
| 239 | * | ||
| 240 | * This can be used to provide an ordering guarantee instead of the | ||
| 241 | * usual consistency guarantee. It is one wmb cheaper, because we can | ||
| 242 | * collapse the two back-to-back wmb()s. | ||
| 243 | * | ||
| 244 | * seqcount_t seq; | ||
| 245 | * bool X = true, Y = false; | ||
| 246 | * | ||
| 247 | * void read(void) | ||
| 248 | * { | ||
| 249 | * bool x, y; | ||
| 250 | * | ||
| 251 | * do { | ||
| 252 | * int s = read_seqcount_begin(&seq); | ||
| 253 | * | ||
| 254 | * x = X; y = Y; | ||
| 255 | * | ||
| 256 | * } while (read_seqcount_retry(&seq, s)); | ||
| 257 | * | ||
| 258 | * BUG_ON(!x && !y); | ||
| 259 | * } | ||
| 260 | * | ||
| 261 | * void write(void) | ||
| 262 | * { | ||
| 263 | * Y = true; | ||
| 264 | * | ||
| 265 | * raw_write_seqcount_barrier(seq); | ||
| 266 | * | ||
| 267 | * X = false; | ||
| 268 | * } | ||
| 269 | */ | ||
| 270 | static inline void raw_write_seqcount_barrier(seqcount_t *s) | ||
| 271 | { | ||
| 272 | s->sequence++; | ||
| 273 | smp_wmb(); | ||
| 274 | s->sequence++; | ||
| 275 | } | ||
| 276 | |||
| 236 | /* | 277 | /* |
| 237 | * raw_write_seqcount_latch - redirect readers to even/odd copy | 278 | * raw_write_seqcount_latch - redirect readers to even/odd copy |
| 238 | * @s: pointer to seqcount_t | 279 | * @s: pointer to seqcount_t |
| @@ -266,13 +307,13 @@ static inline void write_seqcount_end(seqcount_t *s) | |||
| 266 | } | 307 | } |
| 267 | 308 | ||
| 268 | /** | 309 | /** |
| 269 | * write_seqcount_barrier - invalidate in-progress read-side seq operations | 310 | * write_seqcount_invalidate - invalidate in-progress read-side seq operations |
| 270 | * @s: pointer to seqcount_t | 311 | * @s: pointer to seqcount_t |
| 271 | * | 312 | * |
| 272 | * After write_seqcount_barrier, no read-side seq operations will complete | 313 | * After write_seqcount_invalidate, no read-side seq operations will complete |
| 273 | * successfully and see data older than this. | 314 | * successfully and see data older than this. |
| 274 | */ | 315 | */ |
| 275 | static inline void write_seqcount_barrier(seqcount_t *s) | 316 | static inline void write_seqcount_invalidate(seqcount_t *s) |
| 276 | { | 317 | { |
| 277 | smp_wmb(); | 318 | smp_wmb(); |
| 278 | s->sequence+=2; | 319 | s->sequence+=2; |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 3e18379dfa6f..0063b24b4f36 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
| @@ -120,7 +120,7 @@ do { \ | |||
| 120 | /* | 120 | /* |
| 121 | * Despite its name it doesn't necessarily has to be a full barrier. | 121 | * Despite its name it doesn't necessarily has to be a full barrier. |
| 122 | * It should only guarantee that a STORE before the critical section | 122 | * It should only guarantee that a STORE before the critical section |
| 123 | * can not be reordered with a LOAD inside this section. | 123 | * can not be reordered with LOADs and STOREs inside this section. |
| 124 | * spin_lock() is the one-way barrier, this LOAD can not escape out | 124 | * spin_lock() is the one-way barrier, this LOAD can not escape out |
| 125 | * of the region. So the default implementation simply ensures that | 125 | * of the region. So the default implementation simply ensures that |
| 126 | * a STORE can not move into the critical section, smp_wmb() should | 126 | * a STORE can not move into the critical section, smp_wmb() should |
diff --git a/include/linux/tick.h b/include/linux/tick.h index f8492da57ad3..4191b5623a28 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h | |||
| @@ -134,6 +134,12 @@ static inline bool tick_nohz_full_cpu(int cpu) | |||
| 134 | return cpumask_test_cpu(cpu, tick_nohz_full_mask); | 134 | return cpumask_test_cpu(cpu, tick_nohz_full_mask); |
| 135 | } | 135 | } |
| 136 | 136 | ||
| 137 | static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) | ||
| 138 | { | ||
| 139 | if (tick_nohz_full_enabled()) | ||
| 140 | cpumask_or(mask, mask, tick_nohz_full_mask); | ||
| 141 | } | ||
| 142 | |||
| 137 | extern void __tick_nohz_full_check(void); | 143 | extern void __tick_nohz_full_check(void); |
| 138 | extern void tick_nohz_full_kick(void); | 144 | extern void tick_nohz_full_kick(void); |
| 139 | extern void tick_nohz_full_kick_cpu(int cpu); | 145 | extern void tick_nohz_full_kick_cpu(int cpu); |
| @@ -142,6 +148,7 @@ extern void __tick_nohz_task_switch(struct task_struct *tsk); | |||
| 142 | #else | 148 | #else |
| 143 | static inline bool tick_nohz_full_enabled(void) { return false; } | 149 | static inline bool tick_nohz_full_enabled(void) { return false; } |
| 144 | static inline bool tick_nohz_full_cpu(int cpu) { return false; } | 150 | static inline bool tick_nohz_full_cpu(int cpu) { return false; } |
| 151 | static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { } | ||
| 145 | static inline void __tick_nohz_full_check(void) { } | 152 | static inline void __tick_nohz_full_check(void) { } |
| 146 | static inline void tick_nohz_full_kick_cpu(int cpu) { } | 153 | static inline void tick_nohz_full_kick_cpu(int cpu) { } |
| 147 | static inline void tick_nohz_full_kick(void) { } | 154 | static inline void tick_nohz_full_kick(void) { } |
diff --git a/include/linux/time64.h b/include/linux/time64.h index a3831478d9cf..77b5df2acd2a 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | #define _LINUX_TIME64_H | 2 | #define _LINUX_TIME64_H |
| 3 | 3 | ||
| 4 | #include <uapi/linux/time.h> | 4 | #include <uapi/linux/time.h> |
| 5 | #include <linux/math64.h> | ||
| 5 | 6 | ||
| 6 | typedef __s64 time64_t; | 7 | typedef __s64 time64_t; |
| 7 | 8 | ||
| @@ -28,6 +29,7 @@ struct timespec64 { | |||
| 28 | #define FSEC_PER_SEC 1000000000000000LL | 29 | #define FSEC_PER_SEC 1000000000000000LL |
| 29 | 30 | ||
| 30 | /* Located here for timespec[64]_valid_strict */ | 31 | /* Located here for timespec[64]_valid_strict */ |
| 32 | #define TIME64_MAX ((s64)~((u64)1 << 63)) | ||
| 31 | #define KTIME_MAX ((s64)~((u64)1 << 63)) | 33 | #define KTIME_MAX ((s64)~((u64)1 << 63)) |
| 32 | #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) | 34 | #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) |
| 33 | 35 | ||
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index fb86963859c7..25247220b4b7 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h | |||
| @@ -49,6 +49,8 @@ struct tk_read_base { | |||
| 49 | * @offs_boot: Offset clock monotonic -> clock boottime | 49 | * @offs_boot: Offset clock monotonic -> clock boottime |
| 50 | * @offs_tai: Offset clock monotonic -> clock tai | 50 | * @offs_tai: Offset clock monotonic -> clock tai |
| 51 | * @tai_offset: The current UTC to TAI offset in seconds | 51 | * @tai_offset: The current UTC to TAI offset in seconds |
| 52 | * @clock_was_set_seq: The sequence number of clock was set events | ||
| 53 | * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second | ||
| 52 | * @raw_time: Monotonic raw base time in timespec64 format | 54 | * @raw_time: Monotonic raw base time in timespec64 format |
| 53 | * @cycle_interval: Number of clock cycles in one NTP interval | 55 | * @cycle_interval: Number of clock cycles in one NTP interval |
| 54 | * @xtime_interval: Number of clock shifted nano seconds in one NTP | 56 | * @xtime_interval: Number of clock shifted nano seconds in one NTP |
| @@ -60,6 +62,9 @@ struct tk_read_base { | |||
| 60 | * shifted nano seconds. | 62 | * shifted nano seconds. |
| 61 | * @ntp_error_shift: Shift conversion between clock shifted nano seconds and | 63 | * @ntp_error_shift: Shift conversion between clock shifted nano seconds and |
| 62 | * ntp shifted nano seconds. | 64 | * ntp shifted nano seconds. |
| 65 | * @last_warning: Warning ratelimiter (DEBUG_TIMEKEEPING) | ||
| 66 | * @underflow_seen: Underflow warning flag (DEBUG_TIMEKEEPING) | ||
| 67 | * @overflow_seen: Overflow warning flag (DEBUG_TIMEKEEPING) | ||
| 63 | * | 68 | * |
| 64 | * Note: For timespec(64) based interfaces wall_to_monotonic is what | 69 | * Note: For timespec(64) based interfaces wall_to_monotonic is what |
| 65 | * we need to add to xtime (or xtime corrected for sub jiffie times) | 70 | * we need to add to xtime (or xtime corrected for sub jiffie times) |
| @@ -85,6 +90,8 @@ struct timekeeper { | |||
| 85 | ktime_t offs_boot; | 90 | ktime_t offs_boot; |
| 86 | ktime_t offs_tai; | 91 | ktime_t offs_tai; |
| 87 | s32 tai_offset; | 92 | s32 tai_offset; |
| 93 | unsigned int clock_was_set_seq; | ||
| 94 | ktime_t next_leap_ktime; | ||
| 88 | struct timespec64 raw_time; | 95 | struct timespec64 raw_time; |
| 89 | 96 | ||
| 90 | /* The following members are for timekeeping internal use */ | 97 | /* The following members are for timekeeping internal use */ |
| @@ -104,6 +111,18 @@ struct timekeeper { | |||
| 104 | s64 ntp_error; | 111 | s64 ntp_error; |
| 105 | u32 ntp_error_shift; | 112 | u32 ntp_error_shift; |
| 106 | u32 ntp_err_mult; | 113 | u32 ntp_err_mult; |
| 114 | #ifdef CONFIG_DEBUG_TIMEKEEPING | ||
| 115 | long last_warning; | ||
| 116 | /* | ||
| 117 | * These simple flag variables are managed | ||
| 118 | * without locks, which is racy, but they are | ||
| 119 | * ok since we don't really care about being | ||
| 120 | * super precise about how many events were | ||
| 121 | * seen, just that a problem was observed. | ||
| 122 | */ | ||
| 123 | int underflow_seen; | ||
| 124 | int overflow_seen; | ||
| 125 | #endif | ||
| 107 | }; | 126 | }; |
| 108 | 127 | ||
| 109 | #ifdef CONFIG_GENERIC_TIME_VSYSCALL | 128 | #ifdef CONFIG_GENERIC_TIME_VSYSCALL |
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 99176af216af..3aa72e648650 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h | |||
| @@ -163,6 +163,7 @@ extern ktime_t ktime_get(void); | |||
| 163 | extern ktime_t ktime_get_with_offset(enum tk_offsets offs); | 163 | extern ktime_t ktime_get_with_offset(enum tk_offsets offs); |
| 164 | extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs); | 164 | extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs); |
| 165 | extern ktime_t ktime_get_raw(void); | 165 | extern ktime_t ktime_get_raw(void); |
| 166 | extern u32 ktime_get_resolution_ns(void); | ||
| 166 | 167 | ||
| 167 | /** | 168 | /** |
| 168 | * ktime_get_real - get the real (wall-) time in ktime_t format | 169 | * ktime_get_real - get the real (wall-) time in ktime_t format |
| @@ -266,7 +267,6 @@ extern int persistent_clock_is_local; | |||
| 266 | 267 | ||
| 267 | extern void read_persistent_clock(struct timespec *ts); | 268 | extern void read_persistent_clock(struct timespec *ts); |
| 268 | extern void read_persistent_clock64(struct timespec64 *ts); | 269 | extern void read_persistent_clock64(struct timespec64 *ts); |
| 269 | extern void read_boot_clock(struct timespec *ts); | ||
| 270 | extern void read_boot_clock64(struct timespec64 *ts); | 270 | extern void read_boot_clock64(struct timespec64 *ts); |
| 271 | extern int update_persistent_clock(struct timespec now); | 271 | extern int update_persistent_clock(struct timespec now); |
| 272 | extern int update_persistent_clock64(struct timespec64 now); | 272 | extern int update_persistent_clock64(struct timespec64 now); |
diff --git a/include/linux/timer.h b/include/linux/timer.h index 8c5a197e1587..61aa61dc410c 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h | |||
| @@ -14,27 +14,23 @@ struct timer_list { | |||
| 14 | * All fields that change during normal runtime grouped to the | 14 | * All fields that change during normal runtime grouped to the |
| 15 | * same cacheline | 15 | * same cacheline |
| 16 | */ | 16 | */ |
| 17 | struct list_head entry; | 17 | struct hlist_node entry; |
| 18 | unsigned long expires; | 18 | unsigned long expires; |
| 19 | struct tvec_base *base; | 19 | void (*function)(unsigned long); |
| 20 | 20 | unsigned long data; | |
| 21 | void (*function)(unsigned long); | 21 | u32 flags; |
| 22 | unsigned long data; | 22 | int slack; |
| 23 | |||
| 24 | int slack; | ||
| 25 | 23 | ||
| 26 | #ifdef CONFIG_TIMER_STATS | 24 | #ifdef CONFIG_TIMER_STATS |
| 27 | int start_pid; | 25 | int start_pid; |
| 28 | void *start_site; | 26 | void *start_site; |
| 29 | char start_comm[16]; | 27 | char start_comm[16]; |
| 30 | #endif | 28 | #endif |
| 31 | #ifdef CONFIG_LOCKDEP | 29 | #ifdef CONFIG_LOCKDEP |
| 32 | struct lockdep_map lockdep_map; | 30 | struct lockdep_map lockdep_map; |
| 33 | #endif | 31 | #endif |
| 34 | }; | 32 | }; |
| 35 | 33 | ||
| 36 | extern struct tvec_base boot_tvec_bases; | ||
| 37 | |||
| 38 | #ifdef CONFIG_LOCKDEP | 34 | #ifdef CONFIG_LOCKDEP |
| 39 | /* | 35 | /* |
| 40 | * NB: because we have to copy the lockdep_map, setting the lockdep_map key | 36 | * NB: because we have to copy the lockdep_map, setting the lockdep_map key |
| @@ -49,9 +45,6 @@ extern struct tvec_base boot_tvec_bases; | |||
| 49 | #endif | 45 | #endif |
| 50 | 46 | ||
| 51 | /* | 47 | /* |
| 52 | * Note that all tvec_bases are at least 4 byte aligned and lower two bits | ||
| 53 | * of base in timer_list is guaranteed to be zero. Use them for flags. | ||
| 54 | * | ||
| 55 | * A deferrable timer will work normally when the system is busy, but | 48 | * A deferrable timer will work normally when the system is busy, but |
| 56 | * will not cause a CPU to come out of idle just to service it; instead, | 49 | * will not cause a CPU to come out of idle just to service it; instead, |
| 57 | * the timer will be serviced when the CPU eventually wakes up with a | 50 | * the timer will be serviced when the CPU eventually wakes up with a |
| @@ -65,17 +58,18 @@ extern struct tvec_base boot_tvec_bases; | |||
| 65 | * workqueue locking issues. It's not meant for executing random crap | 58 | * workqueue locking issues. It's not meant for executing random crap |
| 66 | * with interrupts disabled. Abuse is monitored! | 59 | * with interrupts disabled. Abuse is monitored! |
| 67 | */ | 60 | */ |
| 68 | #define TIMER_DEFERRABLE 0x1LU | 61 | #define TIMER_CPUMASK 0x0007FFFF |
| 69 | #define TIMER_IRQSAFE 0x2LU | 62 | #define TIMER_MIGRATING 0x00080000 |
| 70 | 63 | #define TIMER_BASEMASK (TIMER_CPUMASK | TIMER_MIGRATING) | |
| 71 | #define TIMER_FLAG_MASK 0x3LU | 64 | #define TIMER_DEFERRABLE 0x00100000 |
| 65 | #define TIMER_IRQSAFE 0x00200000 | ||
| 72 | 66 | ||
| 73 | #define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \ | 67 | #define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \ |
| 74 | .entry = { .prev = TIMER_ENTRY_STATIC }, \ | 68 | .entry = { .next = TIMER_ENTRY_STATIC }, \ |
| 75 | .function = (_function), \ | 69 | .function = (_function), \ |
| 76 | .expires = (_expires), \ | 70 | .expires = (_expires), \ |
| 77 | .data = (_data), \ | 71 | .data = (_data), \ |
| 78 | .base = (void *)((unsigned long)&boot_tvec_bases + (_flags)), \ | 72 | .flags = (_flags), \ |
| 79 | .slack = -1, \ | 73 | .slack = -1, \ |
| 80 | __TIMER_LOCKDEP_MAP_INITIALIZER( \ | 74 | __TIMER_LOCKDEP_MAP_INITIALIZER( \ |
| 81 | __FILE__ ":" __stringify(__LINE__)) \ | 75 | __FILE__ ":" __stringify(__LINE__)) \ |
| @@ -168,7 +162,7 @@ static inline void init_timer_on_stack_key(struct timer_list *timer, | |||
| 168 | */ | 162 | */ |
| 169 | static inline int timer_pending(const struct timer_list * timer) | 163 | static inline int timer_pending(const struct timer_list * timer) |
| 170 | { | 164 | { |
| 171 | return timer->entry.next != NULL; | 165 | return timer->entry.pprev != NULL; |
| 172 | } | 166 | } |
| 173 | 167 | ||
| 174 | extern void add_timer_on(struct timer_list *timer, int cpu); | 168 | extern void add_timer_on(struct timer_list *timer, int cpu); |
| @@ -188,26 +182,16 @@ extern void set_timer_slack(struct timer_list *time, int slack_hz); | |||
| 188 | #define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1) | 182 | #define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1) |
| 189 | 183 | ||
| 190 | /* | 184 | /* |
| 191 | * Return when the next timer-wheel timeout occurs (in absolute jiffies), | ||
| 192 | * locks the timer base and does the comparison against the given | ||
| 193 | * jiffie. | ||
| 194 | */ | ||
| 195 | extern unsigned long get_next_timer_interrupt(unsigned long now); | ||
| 196 | |||
| 197 | /* | ||
| 198 | * Timer-statistics info: | 185 | * Timer-statistics info: |
| 199 | */ | 186 | */ |
| 200 | #ifdef CONFIG_TIMER_STATS | 187 | #ifdef CONFIG_TIMER_STATS |
| 201 | 188 | ||
| 202 | extern int timer_stats_active; | 189 | extern int timer_stats_active; |
| 203 | 190 | ||
| 204 | #define TIMER_STATS_FLAG_DEFERRABLE 0x1 | ||
| 205 | |||
| 206 | extern void init_timer_stats(void); | 191 | extern void init_timer_stats(void); |
| 207 | 192 | ||
| 208 | extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf, | 193 | extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf, |
| 209 | void *timerf, char *comm, | 194 | void *timerf, char *comm, u32 flags); |
| 210 | unsigned int timer_flag); | ||
| 211 | 195 | ||
| 212 | extern void __timer_stats_timer_set_start_info(struct timer_list *timer, | 196 | extern void __timer_stats_timer_set_start_info(struct timer_list *timer, |
| 213 | void *addr); | 197 | void *addr); |
| @@ -254,6 +238,15 @@ extern void run_local_timers(void); | |||
| 254 | struct hrtimer; | 238 | struct hrtimer; |
| 255 | extern enum hrtimer_restart it_real_fn(struct hrtimer *); | 239 | extern enum hrtimer_restart it_real_fn(struct hrtimer *); |
| 256 | 240 | ||
| 241 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) | ||
| 242 | #include <linux/sysctl.h> | ||
| 243 | |||
| 244 | extern unsigned int sysctl_timer_migration; | ||
| 245 | int timer_migration_handler(struct ctl_table *table, int write, | ||
| 246 | void __user *buffer, size_t *lenp, | ||
| 247 | loff_t *ppos); | ||
| 248 | #endif | ||
| 249 | |||
| 257 | unsigned long __round_jiffies(unsigned long j, int cpu); | 250 | unsigned long __round_jiffies(unsigned long j, int cpu); |
| 258 | unsigned long __round_jiffies_relative(unsigned long j, int cpu); | 251 | unsigned long __round_jiffies_relative(unsigned long j, int cpu); |
| 259 | unsigned long round_jiffies(unsigned long j); | 252 | unsigned long round_jiffies(unsigned long j); |
diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h index a520fd70a59f..7eec17ad7fa1 100644 --- a/include/linux/timerqueue.h +++ b/include/linux/timerqueue.h | |||
| @@ -16,10 +16,10 @@ struct timerqueue_head { | |||
| 16 | }; | 16 | }; |
| 17 | 17 | ||
| 18 | 18 | ||
| 19 | extern void timerqueue_add(struct timerqueue_head *head, | 19 | extern bool timerqueue_add(struct timerqueue_head *head, |
| 20 | struct timerqueue_node *node); | 20 | struct timerqueue_node *node); |
| 21 | extern void timerqueue_del(struct timerqueue_head *head, | 21 | extern bool timerqueue_del(struct timerqueue_head *head, |
| 22 | struct timerqueue_node *node); | 22 | struct timerqueue_node *node); |
| 23 | extern struct timerqueue_node *timerqueue_iterate_next( | 23 | extern struct timerqueue_node *timerqueue_iterate_next( |
| 24 | struct timerqueue_node *node); | 24 | struct timerqueue_node *node); |
| 25 | 25 | ||
diff --git a/include/linux/topology.h b/include/linux/topology.h index 909b6e43b694..73ddad1e0fa3 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h | |||
| @@ -191,8 +191,8 @@ static inline int cpu_to_mem(int cpu) | |||
| 191 | #ifndef topology_core_id | 191 | #ifndef topology_core_id |
| 192 | #define topology_core_id(cpu) ((void)(cpu), 0) | 192 | #define topology_core_id(cpu) ((void)(cpu), 0) |
| 193 | #endif | 193 | #endif |
| 194 | #ifndef topology_thread_cpumask | 194 | #ifndef topology_sibling_cpumask |
| 195 | #define topology_thread_cpumask(cpu) cpumask_of(cpu) | 195 | #define topology_sibling_cpumask(cpu) cpumask_of(cpu) |
| 196 | #endif | 196 | #endif |
| 197 | #ifndef topology_core_cpumask | 197 | #ifndef topology_core_cpumask |
| 198 | #define topology_core_cpumask(cpu) cpumask_of(cpu) | 198 | #define topology_core_cpumask(cpu) cpumask_of(cpu) |
| @@ -201,7 +201,7 @@ static inline int cpu_to_mem(int cpu) | |||
| 201 | #ifdef CONFIG_SCHED_SMT | 201 | #ifdef CONFIG_SCHED_SMT |
| 202 | static inline const struct cpumask *cpu_smt_mask(int cpu) | 202 | static inline const struct cpumask *cpu_smt_mask(int cpu) |
| 203 | { | 203 | { |
| 204 | return topology_thread_cpumask(cpu); | 204 | return topology_sibling_cpumask(cpu); |
| 205 | } | 205 | } |
| 206 | #endif | 206 | #endif |
| 207 | 207 | ||
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index ecd3319dac33..ae572c138607 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h | |||
| @@ -1,21 +1,30 @@ | |||
| 1 | #ifndef __LINUX_UACCESS_H__ | 1 | #ifndef __LINUX_UACCESS_H__ |
| 2 | #define __LINUX_UACCESS_H__ | 2 | #define __LINUX_UACCESS_H__ |
| 3 | 3 | ||
| 4 | #include <linux/preempt.h> | 4 | #include <linux/sched.h> |
| 5 | #include <asm/uaccess.h> | 5 | #include <asm/uaccess.h> |
| 6 | 6 | ||
| 7 | static __always_inline void pagefault_disabled_inc(void) | ||
| 8 | { | ||
| 9 | current->pagefault_disabled++; | ||
| 10 | } | ||
| 11 | |||
| 12 | static __always_inline void pagefault_disabled_dec(void) | ||
| 13 | { | ||
| 14 | current->pagefault_disabled--; | ||
| 15 | WARN_ON(current->pagefault_disabled < 0); | ||
| 16 | } | ||
| 17 | |||
| 7 | /* | 18 | /* |
| 8 | * These routines enable/disable the pagefault handler in that | 19 | * These routines enable/disable the pagefault handler. If disabled, it will |
| 9 | * it will not take any locks and go straight to the fixup table. | 20 | * not take any locks and go straight to the fixup table. |
| 10 | * | 21 | * |
| 11 | * They have great resemblance to the preempt_disable/enable calls | 22 | * User access methods will not sleep when called from a pagefault_disabled() |
| 12 | * and in fact they are identical; this is because currently there is | 23 | * environment. |
| 13 | * no other way to make the pagefault handlers do this. So we do | ||
| 14 | * disable preemption but we don't necessarily care about that. | ||
| 15 | */ | 24 | */ |
| 16 | static inline void pagefault_disable(void) | 25 | static inline void pagefault_disable(void) |
| 17 | { | 26 | { |
| 18 | preempt_count_inc(); | 27 | pagefault_disabled_inc(); |
| 19 | /* | 28 | /* |
| 20 | * make sure to have issued the store before a pagefault | 29 | * make sure to have issued the store before a pagefault |
| 21 | * can hit. | 30 | * can hit. |
| @@ -25,18 +34,31 @@ static inline void pagefault_disable(void) | |||
| 25 | 34 | ||
| 26 | static inline void pagefault_enable(void) | 35 | static inline void pagefault_enable(void) |
| 27 | { | 36 | { |
| 28 | #ifndef CONFIG_PREEMPT | ||
| 29 | /* | 37 | /* |
| 30 | * make sure to issue those last loads/stores before enabling | 38 | * make sure to issue those last loads/stores before enabling |
| 31 | * the pagefault handler again. | 39 | * the pagefault handler again. |
| 32 | */ | 40 | */ |
| 33 | barrier(); | 41 | barrier(); |
| 34 | preempt_count_dec(); | 42 | pagefault_disabled_dec(); |
| 35 | #else | ||
| 36 | preempt_enable(); | ||
| 37 | #endif | ||
| 38 | } | 43 | } |
| 39 | 44 | ||
| 45 | /* | ||
| 46 | * Is the pagefault handler disabled? If so, user access methods will not sleep. | ||
| 47 | */ | ||
| 48 | #define pagefault_disabled() (current->pagefault_disabled != 0) | ||
| 49 | |||
| 50 | /* | ||
| 51 | * The pagefault handler is in general disabled by pagefault_disable() or | ||
| 52 | * when in irq context (via in_atomic()). | ||
| 53 | * | ||
| 54 | * This function should only be used by the fault handlers. Other users should | ||
| 55 | * stick to pagefault_disabled(). | ||
| 56 | * Please NEVER use preempt_disable() to disable the fault handler. With | ||
| 57 | * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled. | ||
| 58 | * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT. | ||
| 59 | */ | ||
| 60 | #define faulthandler_disabled() (pagefault_disabled() || in_atomic()) | ||
| 61 | |||
| 40 | #ifndef ARCH_HAS_NOCACHE_UACCESS | 62 | #ifndef ARCH_HAS_NOCACHE_UACCESS |
| 41 | 63 | ||
| 42 | static inline unsigned long __copy_from_user_inatomic_nocache(void *to, | 64 | static inline unsigned long __copy_from_user_inatomic_nocache(void *to, |
diff --git a/include/linux/wait.h b/include/linux/wait.h index 2db83349865b..d69ac4ecc88b 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
| @@ -969,7 +969,7 @@ extern int bit_wait_io_timeout(struct wait_bit_key *); | |||
| 969 | * on that signal. | 969 | * on that signal. |
| 970 | */ | 970 | */ |
| 971 | static inline int | 971 | static inline int |
| 972 | wait_on_bit(void *word, int bit, unsigned mode) | 972 | wait_on_bit(unsigned long *word, int bit, unsigned mode) |
| 973 | { | 973 | { |
| 974 | might_sleep(); | 974 | might_sleep(); |
| 975 | if (!test_bit(bit, word)) | 975 | if (!test_bit(bit, word)) |
| @@ -994,7 +994,7 @@ wait_on_bit(void *word, int bit, unsigned mode) | |||
| 994 | * on that signal. | 994 | * on that signal. |
| 995 | */ | 995 | */ |
| 996 | static inline int | 996 | static inline int |
| 997 | wait_on_bit_io(void *word, int bit, unsigned mode) | 997 | wait_on_bit_io(unsigned long *word, int bit, unsigned mode) |
| 998 | { | 998 | { |
| 999 | might_sleep(); | 999 | might_sleep(); |
| 1000 | if (!test_bit(bit, word)) | 1000 | if (!test_bit(bit, word)) |
| @@ -1020,7 +1020,8 @@ wait_on_bit_io(void *word, int bit, unsigned mode) | |||
| 1020 | * received a signal and the mode permitted wakeup on that signal. | 1020 | * received a signal and the mode permitted wakeup on that signal. |
| 1021 | */ | 1021 | */ |
| 1022 | static inline int | 1022 | static inline int |
| 1023 | wait_on_bit_timeout(void *word, int bit, unsigned mode, unsigned long timeout) | 1023 | wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, |
| 1024 | unsigned long timeout) | ||
| 1024 | { | 1025 | { |
| 1025 | might_sleep(); | 1026 | might_sleep(); |
| 1026 | if (!test_bit(bit, word)) | 1027 | if (!test_bit(bit, word)) |
| @@ -1047,7 +1048,8 @@ wait_on_bit_timeout(void *word, int bit, unsigned mode, unsigned long timeout) | |||
| 1047 | * on that signal. | 1048 | * on that signal. |
| 1048 | */ | 1049 | */ |
| 1049 | static inline int | 1050 | static inline int |
| 1050 | wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode) | 1051 | wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action, |
| 1052 | unsigned mode) | ||
| 1051 | { | 1053 | { |
| 1052 | might_sleep(); | 1054 | might_sleep(); |
| 1053 | if (!test_bit(bit, word)) | 1055 | if (!test_bit(bit, word)) |
| @@ -1075,7 +1077,7 @@ wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode | |||
| 1075 | * the @mode allows that signal to wake the process. | 1077 | * the @mode allows that signal to wake the process. |
| 1076 | */ | 1078 | */ |
| 1077 | static inline int | 1079 | static inline int |
| 1078 | wait_on_bit_lock(void *word, int bit, unsigned mode) | 1080 | wait_on_bit_lock(unsigned long *word, int bit, unsigned mode) |
| 1079 | { | 1081 | { |
| 1080 | might_sleep(); | 1082 | might_sleep(); |
| 1081 | if (!test_and_set_bit(bit, word)) | 1083 | if (!test_and_set_bit(bit, word)) |
| @@ -1099,7 +1101,7 @@ wait_on_bit_lock(void *word, int bit, unsigned mode) | |||
| 1099 | * the @mode allows that signal to wake the process. | 1101 | * the @mode allows that signal to wake the process. |
| 1100 | */ | 1102 | */ |
| 1101 | static inline int | 1103 | static inline int |
| 1102 | wait_on_bit_lock_io(void *word, int bit, unsigned mode) | 1104 | wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode) |
| 1103 | { | 1105 | { |
| 1104 | might_sleep(); | 1106 | might_sleep(); |
| 1105 | if (!test_and_set_bit(bit, word)) | 1107 | if (!test_and_set_bit(bit, word)) |
| @@ -1125,7 +1127,8 @@ wait_on_bit_lock_io(void *word, int bit, unsigned mode) | |||
| 1125 | * the @mode allows that signal to wake the process. | 1127 | * the @mode allows that signal to wake the process. |
| 1126 | */ | 1128 | */ |
| 1127 | static inline int | 1129 | static inline int |
| 1128 | wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode) | 1130 | wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action, |
| 1131 | unsigned mode) | ||
| 1129 | { | 1132 | { |
| 1130 | might_sleep(); | 1133 | might_sleep(); |
| 1131 | if (!test_and_set_bit(bit, word)) | 1134 | if (!test_and_set_bit(bit, word)) |
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 497bc14cdb85..0320bbb7d7b5 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h | |||
| @@ -98,7 +98,8 @@ struct inet_connection_sock { | |||
| 98 | const struct tcp_congestion_ops *icsk_ca_ops; | 98 | const struct tcp_congestion_ops *icsk_ca_ops; |
| 99 | const struct inet_connection_sock_af_ops *icsk_af_ops; | 99 | const struct inet_connection_sock_af_ops *icsk_af_ops; |
| 100 | unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu); | 100 | unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu); |
| 101 | __u8 icsk_ca_state:7, | 101 | __u8 icsk_ca_state:6, |
| 102 | icsk_ca_setsockopt:1, | ||
| 102 | icsk_ca_dst_locked:1; | 103 | icsk_ca_dst_locked:1; |
| 103 | __u8 icsk_retransmits; | 104 | __u8 icsk_retransmits; |
| 104 | __u8 icsk_pending; | 105 | __u8 icsk_pending; |
diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 8e3668b44c29..fc57f6b82fc5 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h | |||
| @@ -354,7 +354,7 @@ enum ieee80211_rssi_event_data { | |||
| 354 | }; | 354 | }; |
| 355 | 355 | ||
| 356 | /** | 356 | /** |
| 357 | * enum ieee80211_rssi_event - data attached to an %RSSI_EVENT | 357 | * struct ieee80211_rssi_event - data attached to an %RSSI_EVENT |
| 358 | * @data: See &enum ieee80211_rssi_event_data | 358 | * @data: See &enum ieee80211_rssi_event_data |
| 359 | */ | 359 | */ |
| 360 | struct ieee80211_rssi_event { | 360 | struct ieee80211_rssi_event { |
| @@ -388,7 +388,7 @@ enum ieee80211_mlme_event_status { | |||
| 388 | }; | 388 | }; |
| 389 | 389 | ||
| 390 | /** | 390 | /** |
| 391 | * enum ieee80211_mlme_event - data attached to an %MLME_EVENT | 391 | * struct ieee80211_mlme_event - data attached to an %MLME_EVENT |
| 392 | * @data: See &enum ieee80211_mlme_event_data | 392 | * @data: See &enum ieee80211_mlme_event_data |
| 393 | * @status: See &enum ieee80211_mlme_event_status | 393 | * @status: See &enum ieee80211_mlme_event_status |
| 394 | * @reason: the reason code if applicable | 394 | * @reason: the reason code if applicable |
| @@ -401,9 +401,10 @@ struct ieee80211_mlme_event { | |||
| 401 | 401 | ||
| 402 | /** | 402 | /** |
| 403 | * struct ieee80211_event - event to be sent to the driver | 403 | * struct ieee80211_event - event to be sent to the driver |
| 404 | * @type The event itself. See &enum ieee80211_event_type. | 404 | * @type: The event itself. See &enum ieee80211_event_type. |
| 405 | * @rssi: relevant if &type is %RSSI_EVENT | 405 | * @rssi: relevant if &type is %RSSI_EVENT |
| 406 | * @mlme: relevant if &type is %AUTH_EVENT | 406 | * @mlme: relevant if &type is %AUTH_EVENT |
| 407 | * @u: union holding the above two fields | ||
| 407 | */ | 408 | */ |
| 408 | struct ieee80211_event { | 409 | struct ieee80211_event { |
| 409 | enum ieee80211_event_type type; | 410 | enum ieee80211_event_type type; |
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index c56a438c3a1e..ce13cf20f625 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h | |||
| @@ -574,11 +574,14 @@ static inline void sctp_v6_map_v4(union sctp_addr *addr) | |||
| 574 | /* Map v4 address to v4-mapped v6 address */ | 574 | /* Map v4 address to v4-mapped v6 address */ |
| 575 | static inline void sctp_v4_map_v6(union sctp_addr *addr) | 575 | static inline void sctp_v4_map_v6(union sctp_addr *addr) |
| 576 | { | 576 | { |
| 577 | __be16 port; | ||
| 578 | |||
| 579 | port = addr->v4.sin_port; | ||
| 580 | addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr; | ||
| 581 | addr->v6.sin6_port = port; | ||
| 577 | addr->v6.sin6_family = AF_INET6; | 582 | addr->v6.sin6_family = AF_INET6; |
| 578 | addr->v6.sin6_flowinfo = 0; | 583 | addr->v6.sin6_flowinfo = 0; |
| 579 | addr->v6.sin6_scope_id = 0; | 584 | addr->v6.sin6_scope_id = 0; |
| 580 | addr->v6.sin6_port = addr->v4.sin_port; | ||
| 581 | addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr; | ||
| 582 | addr->v6.sin6_addr.s6_addr32[0] = 0; | 585 | addr->v6.sin6_addr.s6_addr32[0] = 0; |
| 583 | addr->v6.sin6_addr.s6_addr32[1] = 0; | 586 | addr->v6.sin6_addr.s6_addr32[1] = 0; |
| 584 | addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff); | 587 | addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff); |
diff --git a/include/sound/hda_regmap.h b/include/sound/hda_regmap.h index 53a18b3635e2..df705908480a 100644 --- a/include/sound/hda_regmap.h +++ b/include/sound/hda_regmap.h | |||
| @@ -9,6 +9,8 @@ | |||
| 9 | #include <sound/core.h> | 9 | #include <sound/core.h> |
| 10 | #include <sound/hdaudio.h> | 10 | #include <sound/hdaudio.h> |
| 11 | 11 | ||
| 12 | #define AC_AMP_FAKE_MUTE 0x10 /* fake mute bit set to amp verbs */ | ||
| 13 | |||
| 12 | int snd_hdac_regmap_init(struct hdac_device *codec); | 14 | int snd_hdac_regmap_init(struct hdac_device *codec); |
| 13 | void snd_hdac_regmap_exit(struct hdac_device *codec); | 15 | void snd_hdac_regmap_exit(struct hdac_device *codec); |
| 14 | int snd_hdac_regmap_add_vendor_verb(struct hdac_device *codec, | 16 | int snd_hdac_regmap_add_vendor_verb(struct hdac_device *codec, |
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index d61be7297b2c..5f1225706993 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h | |||
| @@ -1,9 +1,7 @@ | |||
| 1 | #ifndef TARGET_CORE_BACKEND_H | 1 | #ifndef TARGET_CORE_BACKEND_H |
| 2 | #define TARGET_CORE_BACKEND_H | 2 | #define TARGET_CORE_BACKEND_H |
| 3 | 3 | ||
| 4 | #define TRANSPORT_PLUGIN_PHBA_PDEV 1 | 4 | #define TRANSPORT_FLAG_PASSTHROUGH 1 |
| 5 | #define TRANSPORT_PLUGIN_VHBA_PDEV 2 | ||
| 6 | #define TRANSPORT_PLUGIN_VHBA_VDEV 3 | ||
| 7 | 5 | ||
| 8 | struct target_backend_cits { | 6 | struct target_backend_cits { |
| 9 | struct config_item_type tb_dev_cit; | 7 | struct config_item_type tb_dev_cit; |
| @@ -22,7 +20,7 @@ struct se_subsystem_api { | |||
| 22 | char inquiry_rev[4]; | 20 | char inquiry_rev[4]; |
| 23 | struct module *owner; | 21 | struct module *owner; |
| 24 | 22 | ||
| 25 | u8 transport_type; | 23 | u8 transport_flags; |
| 26 | 24 | ||
| 27 | int (*attach_hba)(struct se_hba *, u32); | 25 | int (*attach_hba)(struct se_hba *, u32); |
| 28 | void (*detach_hba)(struct se_hba *); | 26 | void (*detach_hba)(struct se_hba *); |
| @@ -138,5 +136,7 @@ int se_dev_set_queue_depth(struct se_device *, u32); | |||
| 138 | int se_dev_set_max_sectors(struct se_device *, u32); | 136 | int se_dev_set_max_sectors(struct se_device *, u32); |
| 139 | int se_dev_set_optimal_sectors(struct se_device *, u32); | 137 | int se_dev_set_optimal_sectors(struct se_device *, u32); |
| 140 | int se_dev_set_block_size(struct se_device *, u32); | 138 | int se_dev_set_block_size(struct se_device *, u32); |
| 139 | sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd, | ||
| 140 | sense_reason_t (*exec_cmd)(struct se_cmd *cmd)); | ||
| 141 | 141 | ||
| 142 | #endif /* TARGET_CORE_BACKEND_H */ | 142 | #endif /* TARGET_CORE_BACKEND_H */ |
diff --git a/include/target/target_core_configfs.h b/include/target/target_core_configfs.h index 25bb04c4209e..b99c01170392 100644 --- a/include/target/target_core_configfs.h +++ b/include/target/target_core_configfs.h | |||
| @@ -40,8 +40,6 @@ struct target_fabric_configfs { | |||
| 40 | struct config_item *tf_fabric; | 40 | struct config_item *tf_fabric; |
| 41 | /* Passed from fabric modules */ | 41 | /* Passed from fabric modules */ |
| 42 | struct config_item_type *tf_fabric_cit; | 42 | struct config_item_type *tf_fabric_cit; |
| 43 | /* Pointer to target core subsystem */ | ||
| 44 | struct configfs_subsystem *tf_subsys; | ||
| 45 | /* Pointer to fabric's struct module */ | 43 | /* Pointer to fabric's struct module */ |
| 46 | struct module *tf_module; | 44 | struct module *tf_module; |
| 47 | struct target_core_fabric_ops tf_ops; | 45 | struct target_core_fabric_ops tf_ops; |
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index 17c7f5ac7ea0..0f4dc3768587 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h | |||
| @@ -4,7 +4,6 @@ | |||
| 4 | struct target_core_fabric_ops { | 4 | struct target_core_fabric_ops { |
| 5 | struct module *module; | 5 | struct module *module; |
| 6 | const char *name; | 6 | const char *name; |
| 7 | struct configfs_subsystem *tf_subsys; | ||
| 8 | char *(*get_fabric_name)(void); | 7 | char *(*get_fabric_name)(void); |
| 9 | u8 (*get_fabric_proto_ident)(struct se_portal_group *); | 8 | u8 (*get_fabric_proto_ident)(struct se_portal_group *); |
| 10 | char *(*tpg_get_wwn)(struct se_portal_group *); | 9 | char *(*tpg_get_wwn)(struct se_portal_group *); |
| @@ -109,6 +108,9 @@ struct target_core_fabric_ops { | |||
| 109 | int target_register_template(const struct target_core_fabric_ops *fo); | 108 | int target_register_template(const struct target_core_fabric_ops *fo); |
| 110 | void target_unregister_template(const struct target_core_fabric_ops *fo); | 109 | void target_unregister_template(const struct target_core_fabric_ops *fo); |
| 111 | 110 | ||
| 111 | int target_depend_item(struct config_item *item); | ||
| 112 | void target_undepend_item(struct config_item *item); | ||
| 113 | |||
| 112 | struct se_session *transport_init_session(enum target_prot_op); | 114 | struct se_session *transport_init_session(enum target_prot_op); |
| 113 | int transport_alloc_session_tags(struct se_session *, unsigned int, | 115 | int transport_alloc_session_tags(struct se_session *, unsigned int, |
| 114 | unsigned int); | 116 | unsigned int); |
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index 81ea59812117..f7554fd7fc62 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h | |||
| @@ -140,19 +140,42 @@ DEFINE_EVENT(kmem_free, kfree, | |||
| 140 | TP_ARGS(call_site, ptr) | 140 | TP_ARGS(call_site, ptr) |
| 141 | ); | 141 | ); |
| 142 | 142 | ||
| 143 | DEFINE_EVENT(kmem_free, kmem_cache_free, | 143 | DEFINE_EVENT_CONDITION(kmem_free, kmem_cache_free, |
| 144 | 144 | ||
| 145 | TP_PROTO(unsigned long call_site, const void *ptr), | 145 | TP_PROTO(unsigned long call_site, const void *ptr), |
| 146 | 146 | ||
| 147 | TP_ARGS(call_site, ptr) | 147 | TP_ARGS(call_site, ptr), |
| 148 | |||
| 149 | /* | ||
| 150 | * This trace can be potentially called from an offlined cpu. | ||
| 151 | * Since trace points use RCU and RCU should not be used from | ||
| 152 | * offline cpus, filter such calls out. | ||
| 153 | * While this trace can be called from a preemptable section, | ||
| 154 | * it has no impact on the condition since tasks can migrate | ||
| 155 | * only from online cpus to other online cpus. Thus its safe | ||
| 156 | * to use raw_smp_processor_id. | ||
| 157 | */ | ||
| 158 | TP_CONDITION(cpu_online(raw_smp_processor_id())) | ||
| 148 | ); | 159 | ); |
| 149 | 160 | ||
| 150 | TRACE_EVENT(mm_page_free, | 161 | TRACE_EVENT_CONDITION(mm_page_free, |
| 151 | 162 | ||
| 152 | TP_PROTO(struct page *page, unsigned int order), | 163 | TP_PROTO(struct page *page, unsigned int order), |
| 153 | 164 | ||
| 154 | TP_ARGS(page, order), | 165 | TP_ARGS(page, order), |
| 155 | 166 | ||
| 167 | |||
| 168 | /* | ||
| 169 | * This trace can be potentially called from an offlined cpu. | ||
| 170 | * Since trace points use RCU and RCU should not be used from | ||
| 171 | * offline cpus, filter such calls out. | ||
| 172 | * While this trace can be called from a preemptable section, | ||
| 173 | * it has no impact on the condition since tasks can migrate | ||
| 174 | * only from online cpus to other online cpus. Thus its safe | ||
| 175 | * to use raw_smp_processor_id. | ||
| 176 | */ | ||
| 177 | TP_CONDITION(cpu_online(raw_smp_processor_id())), | ||
| 178 | |||
| 156 | TP_STRUCT__entry( | 179 | TP_STRUCT__entry( |
| 157 | __field( unsigned long, pfn ) | 180 | __field( unsigned long, pfn ) |
| 158 | __field( unsigned int, order ) | 181 | __field( unsigned int, order ) |
| @@ -253,12 +276,35 @@ DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked, | |||
| 253 | TP_ARGS(page, order, migratetype) | 276 | TP_ARGS(page, order, migratetype) |
| 254 | ); | 277 | ); |
| 255 | 278 | ||
| 256 | DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain, | 279 | TRACE_EVENT_CONDITION(mm_page_pcpu_drain, |
| 257 | 280 | ||
| 258 | TP_PROTO(struct page *page, unsigned int order, int migratetype), | 281 | TP_PROTO(struct page *page, unsigned int order, int migratetype), |
| 259 | 282 | ||
| 260 | TP_ARGS(page, order, migratetype), | 283 | TP_ARGS(page, order, migratetype), |
| 261 | 284 | ||
| 285 | /* | ||
| 286 | * This trace can be potentially called from an offlined cpu. | ||
| 287 | * Since trace points use RCU and RCU should not be used from | ||
| 288 | * offline cpus, filter such calls out. | ||
| 289 | * While this trace can be called from a preemptable section, | ||
| 290 | * it has no impact on the condition since tasks can migrate | ||
| 291 | * only from online cpus to other online cpus. Thus its safe | ||
| 292 | * to use raw_smp_processor_id. | ||
| 293 | */ | ||
| 294 | TP_CONDITION(cpu_online(raw_smp_processor_id())), | ||
| 295 | |||
| 296 | TP_STRUCT__entry( | ||
| 297 | __field( unsigned long, pfn ) | ||
| 298 | __field( unsigned int, order ) | ||
| 299 | __field( int, migratetype ) | ||
| 300 | ), | ||
| 301 | |||
| 302 | TP_fast_assign( | ||
| 303 | __entry->pfn = page ? page_to_pfn(page) : -1UL; | ||
| 304 | __entry->order = order; | ||
| 305 | __entry->migratetype = migratetype; | ||
| 306 | ), | ||
| 307 | |||
| 262 | TP_printk("page=%p pfn=%lu order=%d migratetype=%d", | 308 | TP_printk("page=%p pfn=%lu order=%d migratetype=%d", |
| 263 | pfn_to_page(__entry->pfn), __entry->pfn, | 309 | pfn_to_page(__entry->pfn), __entry->pfn, |
| 264 | __entry->order, __entry->migratetype) | 310 | __entry->order, __entry->migratetype) |
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 30fedaf3e56a..d57a575fe31f 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h | |||
| @@ -147,7 +147,8 @@ TRACE_EVENT(sched_switch, | |||
| 147 | __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|", | 147 | __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|", |
| 148 | { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" }, | 148 | { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" }, |
| 149 | { 16, "Z" }, { 32, "X" }, { 64, "x" }, | 149 | { 16, "Z" }, { 32, "X" }, { 64, "x" }, |
| 150 | { 128, "K" }, { 256, "W" }, { 512, "P" }) : "R", | 150 | { 128, "K" }, { 256, "W" }, { 512, "P" }, |
| 151 | { 1024, "N" }) : "R", | ||
| 151 | __entry->prev_state & TASK_STATE_MAX ? "+" : "", | 152 | __entry->prev_state & TASK_STATE_MAX ? "+" : "", |
| 152 | __entry->next_comm, __entry->next_pid, __entry->next_prio) | 153 | __entry->next_comm, __entry->next_pid, __entry->next_prio) |
| 153 | ); | 154 | ); |
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index 68c2c2000f02..073b9ac245ba 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h | |||
| @@ -43,15 +43,18 @@ DEFINE_EVENT(timer_class, timer_init, | |||
| 43 | */ | 43 | */ |
| 44 | TRACE_EVENT(timer_start, | 44 | TRACE_EVENT(timer_start, |
| 45 | 45 | ||
| 46 | TP_PROTO(struct timer_list *timer, unsigned long expires), | 46 | TP_PROTO(struct timer_list *timer, |
| 47 | unsigned long expires, | ||
| 48 | unsigned int flags), | ||
| 47 | 49 | ||
| 48 | TP_ARGS(timer, expires), | 50 | TP_ARGS(timer, expires, flags), |
| 49 | 51 | ||
| 50 | TP_STRUCT__entry( | 52 | TP_STRUCT__entry( |
| 51 | __field( void *, timer ) | 53 | __field( void *, timer ) |
| 52 | __field( void *, function ) | 54 | __field( void *, function ) |
| 53 | __field( unsigned long, expires ) | 55 | __field( unsigned long, expires ) |
| 54 | __field( unsigned long, now ) | 56 | __field( unsigned long, now ) |
| 57 | __field( unsigned int, flags ) | ||
| 55 | ), | 58 | ), |
| 56 | 59 | ||
| 57 | TP_fast_assign( | 60 | TP_fast_assign( |
| @@ -59,11 +62,12 @@ TRACE_EVENT(timer_start, | |||
| 59 | __entry->function = timer->function; | 62 | __entry->function = timer->function; |
| 60 | __entry->expires = expires; | 63 | __entry->expires = expires; |
| 61 | __entry->now = jiffies; | 64 | __entry->now = jiffies; |
| 65 | __entry->flags = flags; | ||
| 62 | ), | 66 | ), |
| 63 | 67 | ||
| 64 | TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld]", | 68 | TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] flags=0x%08x", |
| 65 | __entry->timer, __entry->function, __entry->expires, | 69 | __entry->timer, __entry->function, __entry->expires, |
| 66 | (long)__entry->expires - __entry->now) | 70 | (long)__entry->expires - __entry->now, __entry->flags) |
| 67 | ); | 71 | ); |
| 68 | 72 | ||
| 69 | /** | 73 | /** |
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 880dd7437172..c178d13d6f4c 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h | |||
| @@ -250,7 +250,6 @@ DEFINE_EVENT(writeback_class, name, \ | |||
| 250 | DEFINE_WRITEBACK_EVENT(writeback_nowork); | 250 | DEFINE_WRITEBACK_EVENT(writeback_nowork); |
| 251 | DEFINE_WRITEBACK_EVENT(writeback_wake_background); | 251 | DEFINE_WRITEBACK_EVENT(writeback_wake_background); |
| 252 | DEFINE_WRITEBACK_EVENT(writeback_bdi_register); | 252 | DEFINE_WRITEBACK_EVENT(writeback_bdi_register); |
| 253 | DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister); | ||
| 254 | 253 | ||
| 255 | DECLARE_EVENT_CLASS(wbc_class, | 254 | DECLARE_EVENT_CLASS(wbc_class, |
| 256 | TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), | 255 | TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), |
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 871e73f99a4d..94d44ab2fda1 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h | |||
| @@ -1038,6 +1038,7 @@ struct drm_radeon_cs { | |||
| 1038 | #define RADEON_INFO_CURRENT_GPU_SCLK 0x22 | 1038 | #define RADEON_INFO_CURRENT_GPU_SCLK 0x22 |
| 1039 | #define RADEON_INFO_CURRENT_GPU_MCLK 0x23 | 1039 | #define RADEON_INFO_CURRENT_GPU_MCLK 0x23 |
| 1040 | #define RADEON_INFO_READ_REG 0x24 | 1040 | #define RADEON_INFO_READ_REG 0x24 |
| 1041 | #define RADEON_INFO_VA_UNMAP_WORKING 0x25 | ||
| 1041 | 1042 | ||
| 1042 | struct drm_radeon_info { | 1043 | struct drm_radeon_info { |
| 1043 | uint32_t request; | 1044 | uint32_t request; |
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 309211b3eb67..d97f84c080da 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h | |||
| @@ -167,6 +167,7 @@ enum perf_branch_sample_type_shift { | |||
| 167 | PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */ | 167 | PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */ |
| 168 | 168 | ||
| 169 | PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */ | 169 | PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */ |
| 170 | PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */ | ||
| 170 | 171 | ||
| 171 | PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */ | 172 | PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */ |
| 172 | }; | 173 | }; |
| @@ -186,6 +187,7 @@ enum perf_branch_sample_type { | |||
| 186 | PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT, | 187 | PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT, |
| 187 | 188 | ||
| 188 | PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT, | 189 | PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT, |
| 190 | PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT, | ||
| 189 | 191 | ||
| 190 | PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, | 192 | PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, |
| 191 | }; | 193 | }; |
| @@ -564,6 +566,10 @@ struct perf_event_mmap_page { | |||
| 564 | #define PERF_RECORD_MISC_GUEST_USER (5 << 0) | 566 | #define PERF_RECORD_MISC_GUEST_USER (5 << 0) |
| 565 | 567 | ||
| 566 | /* | 568 | /* |
| 569 | * Indicates that /proc/PID/maps parsing are truncated by time out. | ||
| 570 | */ | ||
| 571 | #define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12) | ||
| 572 | /* | ||
| 567 | * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on | 573 | * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on |
| 568 | * different events so can reuse the same bit position. | 574 | * different events so can reuse the same bit position. |
| 569 | */ | 575 | */ |
| @@ -800,6 +806,18 @@ enum perf_event_type { | |||
| 800 | */ | 806 | */ |
| 801 | PERF_RECORD_ITRACE_START = 12, | 807 | PERF_RECORD_ITRACE_START = 12, |
| 802 | 808 | ||
| 809 | /* | ||
| 810 | * Records the dropped/lost sample number. | ||
| 811 | * | ||
| 812 | * struct { | ||
| 813 | * struct perf_event_header header; | ||
| 814 | * | ||
| 815 | * u64 lost; | ||
| 816 | * struct sample_id sample_id; | ||
| 817 | * }; | ||
| 818 | */ | ||
| 819 | PERF_RECORD_LOST_SAMPLES = 13, | ||
| 820 | |||
| 803 | PERF_RECORD_MAX, /* non-ABI */ | 821 | PERF_RECORD_MAX, /* non-ABI */ |
| 804 | }; | 822 | }; |
| 805 | 823 | ||
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h index 984169a819ee..d7f1cbc3766c 100644 --- a/include/uapi/linux/virtio_balloon.h +++ b/include/uapi/linux/virtio_balloon.h | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | 26 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 27 | * SUCH DAMAGE. */ | 27 | * SUCH DAMAGE. */ |
| 28 | #include <linux/types.h> | 28 | #include <linux/types.h> |
| 29 | #include <linux/virtio_types.h> | ||
| 29 | #include <linux/virtio_ids.h> | 30 | #include <linux/virtio_ids.h> |
| 30 | #include <linux/virtio_config.h> | 31 | #include <linux/virtio_config.h> |
| 31 | 32 | ||
