diff options
Diffstat (limited to 'include')
49 files changed, 731 insertions, 493 deletions
| diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index c99c64dc5f3d..c33749f95b32 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h | |||
| @@ -33,7 +33,7 @@ | |||
| 33 | * Atomically reads the value of @v. Note that the guaranteed | 33 | * Atomically reads the value of @v. Note that the guaranteed | 
| 34 | * useful range of an atomic_t is only 24 bits. | 34 | * useful range of an atomic_t is only 24 bits. | 
| 35 | */ | 35 | */ | 
| 36 | #define atomic_read(v) ((v)->counter) | 36 | #define atomic_read(v) (*(volatile int *)&(v)->counter) | 
| 37 | 37 | ||
| 38 | /** | 38 | /** | 
| 39 | * atomic_set - set atomic variable | 39 | * atomic_set - set atomic variable | 
| diff --git a/include/asm-generic/bitops/arch_hweight.h b/include/asm-generic/bitops/arch_hweight.h new file mode 100644 index 000000000000..6a211f40665c --- /dev/null +++ b/include/asm-generic/bitops/arch_hweight.h | |||
| @@ -0,0 +1,25 @@ | |||
| 1 | #ifndef _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_ | ||
| 2 | #define _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_ | ||
| 3 | |||
| 4 | #include <asm/types.h> | ||
| 5 | |||
| 6 | static inline unsigned int __arch_hweight32(unsigned int w) | ||
| 7 | { | ||
| 8 | return __sw_hweight32(w); | ||
| 9 | } | ||
| 10 | |||
| 11 | static inline unsigned int __arch_hweight16(unsigned int w) | ||
| 12 | { | ||
| 13 | return __sw_hweight16(w); | ||
| 14 | } | ||
| 15 | |||
| 16 | static inline unsigned int __arch_hweight8(unsigned int w) | ||
| 17 | { | ||
| 18 | return __sw_hweight8(w); | ||
| 19 | } | ||
| 20 | |||
| 21 | static inline unsigned long __arch_hweight64(__u64 w) | ||
| 22 | { | ||
| 23 | return __sw_hweight64(w); | ||
| 24 | } | ||
| 25 | #endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ | ||
| diff --git a/include/asm-generic/bitops/const_hweight.h b/include/asm-generic/bitops/const_hweight.h new file mode 100644 index 000000000000..fa2a50b7ee66 --- /dev/null +++ b/include/asm-generic/bitops/const_hweight.h | |||
| @@ -0,0 +1,42 @@ | |||
| 1 | #ifndef _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ | ||
| 2 | #define _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ | ||
| 3 | |||
| 4 | /* | ||
| 5 | * Compile time versions of __arch_hweightN() | ||
| 6 | */ | ||
| 7 | #define __const_hweight8(w) \ | ||
| 8 | ( (!!((w) & (1ULL << 0))) + \ | ||
| 9 | (!!((w) & (1ULL << 1))) + \ | ||
| 10 | (!!((w) & (1ULL << 2))) + \ | ||
| 11 | (!!((w) & (1ULL << 3))) + \ | ||
| 12 | (!!((w) & (1ULL << 4))) + \ | ||
| 13 | (!!((w) & (1ULL << 5))) + \ | ||
| 14 | (!!((w) & (1ULL << 6))) + \ | ||
| 15 | (!!((w) & (1ULL << 7))) ) | ||
| 16 | |||
| 17 | #define __const_hweight16(w) (__const_hweight8(w) + __const_hweight8((w) >> 8 )) | ||
| 18 | #define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16)) | ||
| 19 | #define __const_hweight64(w) (__const_hweight32(w) + __const_hweight32((w) >> 32)) | ||
| 20 | |||
| 21 | /* | ||
| 22 | * Generic interface. | ||
| 23 | */ | ||
| 24 | #define hweight8(w) (__builtin_constant_p(w) ? __const_hweight8(w) : __arch_hweight8(w)) | ||
| 25 | #define hweight16(w) (__builtin_constant_p(w) ? __const_hweight16(w) : __arch_hweight16(w)) | ||
| 26 | #define hweight32(w) (__builtin_constant_p(w) ? __const_hweight32(w) : __arch_hweight32(w)) | ||
| 27 | #define hweight64(w) (__builtin_constant_p(w) ? __const_hweight64(w) : __arch_hweight64(w)) | ||
| 28 | |||
| 29 | /* | ||
| 30 | * Interface for known constant arguments | ||
| 31 | */ | ||
| 32 | #define HWEIGHT8(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight8(w)) | ||
| 33 | #define HWEIGHT16(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight16(w)) | ||
| 34 | #define HWEIGHT32(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight32(w)) | ||
| 35 | #define HWEIGHT64(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight64(w)) | ||
| 36 | |||
| 37 | /* | ||
| 38 | * Type invariant interface to the compile time constant hweight functions. | ||
| 39 | */ | ||
| 40 | #define HWEIGHT(w) HWEIGHT64((u64)w) | ||
| 41 | |||
| 42 | #endif /* _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ */ | ||
| diff --git a/include/asm-generic/bitops/hweight.h b/include/asm-generic/bitops/hweight.h index fbbc383771da..a94d6519c7ed 100644 --- a/include/asm-generic/bitops/hweight.h +++ b/include/asm-generic/bitops/hweight.h | |||
| @@ -1,11 +1,7 @@ | |||
| 1 | #ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_ | 1 | #ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_ | 
| 2 | #define _ASM_GENERIC_BITOPS_HWEIGHT_H_ | 2 | #define _ASM_GENERIC_BITOPS_HWEIGHT_H_ | 
| 3 | 3 | ||
| 4 | #include <asm/types.h> | 4 | #include <asm-generic/bitops/arch_hweight.h> | 
| 5 | 5 | #include <asm-generic/bitops/const_hweight.h> | |
| 6 | extern unsigned int hweight32(unsigned int w); | ||
| 7 | extern unsigned int hweight16(unsigned int w); | ||
| 8 | extern unsigned int hweight8(unsigned int w); | ||
| 9 | extern unsigned long hweight64(__u64 w); | ||
| 10 | 6 | ||
| 11 | #endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ | 7 | #endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ | 
| diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h index e694263445f7..69206957b72c 100644 --- a/include/asm-generic/dma-mapping-common.h +++ b/include/asm-generic/dma-mapping-common.h | |||
| @@ -131,7 +131,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, | |||
| 131 | debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); | 131 | debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); | 
| 132 | 132 | ||
| 133 | } else | 133 | } else | 
| 134 | dma_sync_single_for_cpu(dev, addr, size, dir); | 134 | dma_sync_single_for_cpu(dev, addr + offset, size, dir); | 
| 135 | } | 135 | } | 
| 136 | 136 | ||
| 137 | static inline void dma_sync_single_range_for_device(struct device *dev, | 137 | static inline void dma_sync_single_range_for_device(struct device *dev, | 
| @@ -148,7 +148,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev, | |||
| 148 | debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); | 148 | debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); | 
| 149 | 149 | ||
| 150 | } else | 150 | } else | 
| 151 | dma_sync_single_for_device(dev, addr, size, dir); | 151 | dma_sync_single_for_device(dev, addr + offset, size, dir); | 
| 152 | } | 152 | } | 
| 153 | 153 | ||
| 154 | static inline void | 154 | static inline void | 
| diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index e929c27ede22..6b9db917e717 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
| @@ -789,34 +789,6 @@ extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); | |||
| 789 | extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, | 789 | extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, | 
| 790 | bool interruptible); | 790 | bool interruptible); | 
| 791 | 791 | ||
| 792 | /** | ||
| 793 | * ttm_bo_block_reservation | ||
| 794 | * | ||
| 795 | * @bo: A pointer to a struct ttm_buffer_object. | ||
| 796 | * @interruptible: Use interruptible sleep when waiting. | ||
| 797 | * @no_wait: Don't sleep, but rather return -EBUSY. | ||
| 798 | * | ||
| 799 | * Block reservation for validation by simply reserving the buffer. | ||
| 800 | * This is intended for single buffer use only without eviction, | ||
| 801 | * and thus needs no deadlock protection. | ||
| 802 | * | ||
| 803 | * Returns: | ||
| 804 | * -EBUSY: If no_wait == 1 and the buffer is already reserved. | ||
| 805 | * -ERESTARTSYS: If interruptible == 1 and the process received a signal | ||
| 806 | * while sleeping. | ||
| 807 | */ | ||
| 808 | extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo, | ||
| 809 | bool interruptible, bool no_wait); | ||
| 810 | |||
| 811 | /** | ||
| 812 | * ttm_bo_unblock_reservation | ||
| 813 | * | ||
| 814 | * @bo: A pointer to a struct ttm_buffer_object. | ||
| 815 | * | ||
| 816 | * Unblocks reservation leaving lru lists untouched. | ||
| 817 | */ | ||
| 818 | extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo); | ||
| 819 | |||
| 820 | /* | 792 | /* | 
| 821 | * ttm_bo_util.c | 793 | * ttm_bo_util.c | 
| 822 | */ | 794 | */ | 
| diff --git a/include/linux/acpi.h b/include/linux/acpi.h index b926afe8c03e..3da73f5f0ae9 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
| @@ -116,11 +116,12 @@ extern unsigned long acpi_realmode_flags; | |||
| 116 | 116 | ||
| 117 | int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity); | 117 | int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity); | 
| 118 | int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); | 118 | int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); | 
| 119 | int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); | ||
| 119 | 120 | ||
| 120 | #ifdef CONFIG_X86_IO_APIC | 121 | #ifdef CONFIG_X86_IO_APIC | 
| 121 | extern int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity); | 122 | extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); | 
| 122 | #else | 123 | #else | 
| 123 | #define acpi_get_override_irq(bus, trigger, polarity) (-1) | 124 | #define acpi_get_override_irq(gsi, trigger, polarity) (-1) | 
| 124 | #endif | 125 | #endif | 
| 125 | /* | 126 | /* | 
| 126 | * This function undoes the effect of one call to acpi_register_gsi(). | 127 | * This function undoes the effect of one call to acpi_register_gsi(). | 
| diff --git a/include/linux/bitops.h b/include/linux/bitops.h index b796eab5ca75..fc68053378ce 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h | |||
| @@ -10,6 +10,11 @@ | |||
| 10 | #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) | 10 | #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) | 
| 11 | #endif | 11 | #endif | 
| 12 | 12 | ||
| 13 | extern unsigned int __sw_hweight8(unsigned int w); | ||
| 14 | extern unsigned int __sw_hweight16(unsigned int w); | ||
| 15 | extern unsigned int __sw_hweight32(unsigned int w); | ||
| 16 | extern unsigned long __sw_hweight64(__u64 w); | ||
| 17 | |||
| 13 | /* | 18 | /* | 
| 14 | * Include this here because some architectures need generic_ffs/fls in | 19 | * Include this here because some architectures need generic_ffs/fls in | 
| 15 | * scope | 20 | * scope | 
| @@ -44,31 +49,6 @@ static inline unsigned long hweight_long(unsigned long w) | |||
| 44 | return sizeof(w) == 4 ? hweight32(w) : hweight64(w); | 49 | return sizeof(w) == 4 ? hweight32(w) : hweight64(w); | 
| 45 | } | 50 | } | 
| 46 | 51 | ||
| 47 | /* | ||
| 48 | * Clearly slow versions of the hweightN() functions, their benefit is | ||
| 49 | * of course compile time evaluation of constant arguments. | ||
| 50 | */ | ||
| 51 | #define HWEIGHT8(w) \ | ||
| 52 | ( BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + \ | ||
| 53 | (!!((w) & (1ULL << 0))) + \ | ||
| 54 | (!!((w) & (1ULL << 1))) + \ | ||
| 55 | (!!((w) & (1ULL << 2))) + \ | ||
| 56 | (!!((w) & (1ULL << 3))) + \ | ||
| 57 | (!!((w) & (1ULL << 4))) + \ | ||
| 58 | (!!((w) & (1ULL << 5))) + \ | ||
| 59 | (!!((w) & (1ULL << 6))) + \ | ||
| 60 | (!!((w) & (1ULL << 7))) ) | ||
| 61 | |||
| 62 | #define HWEIGHT16(w) (HWEIGHT8(w) + HWEIGHT8((w) >> 8)) | ||
| 63 | #define HWEIGHT32(w) (HWEIGHT16(w) + HWEIGHT16((w) >> 16)) | ||
| 64 | #define HWEIGHT64(w) (HWEIGHT32(w) + HWEIGHT32((w) >> 32)) | ||
| 65 | |||
| 66 | /* | ||
| 67 | * Type invariant version that simply casts things to the | ||
| 68 | * largest type. | ||
| 69 | */ | ||
| 70 | #define HWEIGHT(w) HWEIGHT64((u64)(w)) | ||
| 71 | |||
| 72 | /** | 52 | /** | 
| 73 | * rol32 - rotate a 32-bit value left | 53 | * rol32 - rotate a 32-bit value left | 
| 74 | * @word: value to rotate | 54 | * @word: value to rotate | 
| diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index b8ad1ea99586..8f78073d7caa 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
| @@ -530,6 +530,7 @@ static inline struct cgroup_subsys_state *task_subsys_state( | |||
| 530 | { | 530 | { | 
| 531 | return rcu_dereference_check(task->cgroups->subsys[subsys_id], | 531 | return rcu_dereference_check(task->cgroups->subsys[subsys_id], | 
| 532 | rcu_read_lock_held() || | 532 | rcu_read_lock_held() || | 
| 533 | lockdep_is_held(&task->alloc_lock) || | ||
| 533 | cgroup_lock_is_held()); | 534 | cgroup_lock_is_held()); | 
| 534 | } | 535 | } | 
| 535 | 536 | ||
| diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 4de02b10007f..9f15150ce8d6 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
| @@ -278,6 +278,27 @@ struct freq_attr { | |||
| 278 | ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count); | 278 | ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count); | 
| 279 | }; | 279 | }; | 
| 280 | 280 | ||
| 281 | #define cpufreq_freq_attr_ro(_name) \ | ||
| 282 | static struct freq_attr _name = \ | ||
| 283 | __ATTR(_name, 0444, show_##_name, NULL) | ||
| 284 | |||
| 285 | #define cpufreq_freq_attr_ro_perm(_name, _perm) \ | ||
| 286 | static struct freq_attr _name = \ | ||
| 287 | __ATTR(_name, _perm, show_##_name, NULL) | ||
| 288 | |||
| 289 | #define cpufreq_freq_attr_ro_old(_name) \ | ||
| 290 | static struct freq_attr _name##_old = \ | ||
| 291 | __ATTR(_name, 0444, show_##_name##_old, NULL) | ||
| 292 | |||
| 293 | #define cpufreq_freq_attr_rw(_name) \ | ||
| 294 | static struct freq_attr _name = \ | ||
| 295 | __ATTR(_name, 0644, show_##_name, store_##_name) | ||
| 296 | |||
| 297 | #define cpufreq_freq_attr_rw_old(_name) \ | ||
| 298 | static struct freq_attr _name##_old = \ | ||
| 299 | __ATTR(_name, 0644, show_##_name##_old, store_##_name##_old) | ||
| 300 | |||
| 301 | |||
| 281 | struct global_attr { | 302 | struct global_attr { | 
| 282 | struct attribute attr; | 303 | struct attribute attr; | 
| 283 | ssize_t (*show)(struct kobject *kobj, | 304 | ssize_t (*show)(struct kobject *kobj, | 
| @@ -286,6 +307,15 @@ struct global_attr { | |||
| 286 | const char *c, size_t count); | 307 | const char *c, size_t count); | 
| 287 | }; | 308 | }; | 
| 288 | 309 | ||
| 310 | #define define_one_global_ro(_name) \ | ||
| 311 | static struct global_attr _name = \ | ||
| 312 | __ATTR(_name, 0444, show_##_name, NULL) | ||
| 313 | |||
| 314 | #define define_one_global_rw(_name) \ | ||
| 315 | static struct global_attr _name = \ | ||
| 316 | __ATTR(_name, 0644, show_##_name, store_##_name) | ||
| 317 | |||
| 318 | |||
| 289 | /********************************************************************* | 319 | /********************************************************************* | 
| 290 | * CPUFREQ 2.6. INTERFACE * | 320 | * CPUFREQ 2.6. INTERFACE * | 
| 291 | *********************************************************************/ | 321 | *********************************************************************/ | 
| diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index a5740fc4d04b..a73454aec333 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
| @@ -21,8 +21,7 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */ | |||
| 21 | extern int cpuset_init(void); | 21 | extern int cpuset_init(void); | 
| 22 | extern void cpuset_init_smp(void); | 22 | extern void cpuset_init_smp(void); | 
| 23 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); | 23 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); | 
| 24 | extern void cpuset_cpus_allowed_locked(struct task_struct *p, | 24 | extern int cpuset_cpus_allowed_fallback(struct task_struct *p); | 
| 25 | struct cpumask *mask); | ||
| 26 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); | 25 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); | 
| 27 | #define cpuset_current_mems_allowed (current->mems_allowed) | 26 | #define cpuset_current_mems_allowed (current->mems_allowed) | 
| 28 | void cpuset_init_current_mems_allowed(void); | 27 | void cpuset_init_current_mems_allowed(void); | 
| @@ -69,9 +68,6 @@ struct seq_file; | |||
| 69 | extern void cpuset_task_status_allowed(struct seq_file *m, | 68 | extern void cpuset_task_status_allowed(struct seq_file *m, | 
| 70 | struct task_struct *task); | 69 | struct task_struct *task); | 
| 71 | 70 | ||
| 72 | extern void cpuset_lock(void); | ||
| 73 | extern void cpuset_unlock(void); | ||
| 74 | |||
| 75 | extern int cpuset_mem_spread_node(void); | 71 | extern int cpuset_mem_spread_node(void); | 
| 76 | 72 | ||
| 77 | static inline int cpuset_do_page_mem_spread(void) | 73 | static inline int cpuset_do_page_mem_spread(void) | 
| @@ -105,10 +101,11 @@ static inline void cpuset_cpus_allowed(struct task_struct *p, | |||
| 105 | { | 101 | { | 
| 106 | cpumask_copy(mask, cpu_possible_mask); | 102 | cpumask_copy(mask, cpu_possible_mask); | 
| 107 | } | 103 | } | 
| 108 | static inline void cpuset_cpus_allowed_locked(struct task_struct *p, | 104 | |
| 109 | struct cpumask *mask) | 105 | static inline int cpuset_cpus_allowed_fallback(struct task_struct *p) | 
| 110 | { | 106 | { | 
| 111 | cpumask_copy(mask, cpu_possible_mask); | 107 | cpumask_copy(&p->cpus_allowed, cpu_possible_mask); | 
| 108 | return cpumask_any(cpu_active_mask); | ||
| 112 | } | 109 | } | 
| 113 | 110 | ||
| 114 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) | 111 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) | 
| @@ -157,9 +154,6 @@ static inline void cpuset_task_status_allowed(struct seq_file *m, | |||
| 157 | { | 154 | { | 
| 158 | } | 155 | } | 
| 159 | 156 | ||
| 160 | static inline void cpuset_lock(void) {} | ||
| 161 | static inline void cpuset_unlock(void) {} | ||
| 162 | |||
| 163 | static inline int cpuset_mem_spread_node(void) | 157 | static inline int cpuset_mem_spread_node(void) | 
| 164 | { | 158 | { | 
| 165 | return 0; | 159 | return 0; | 
| diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 30b93b2a01a4..eebb617c17d8 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
| @@ -186,6 +186,8 @@ d_iput: no no no yes | |||
| 186 | 186 | ||
| 187 | #define DCACHE_FSNOTIFY_PARENT_WATCHED 0x0080 /* Parent inode is watched by some fsnotify listener */ | 187 | #define DCACHE_FSNOTIFY_PARENT_WATCHED 0x0080 /* Parent inode is watched by some fsnotify listener */ | 
| 188 | 188 | ||
| 189 | #define DCACHE_CANT_MOUNT 0x0100 | ||
| 190 | |||
| 189 | extern spinlock_t dcache_lock; | 191 | extern spinlock_t dcache_lock; | 
| 190 | extern seqlock_t rename_lock; | 192 | extern seqlock_t rename_lock; | 
| 191 | 193 | ||
| @@ -358,6 +360,18 @@ static inline int d_unlinked(struct dentry *dentry) | |||
| 358 | return d_unhashed(dentry) && !IS_ROOT(dentry); | 360 | return d_unhashed(dentry) && !IS_ROOT(dentry); | 
| 359 | } | 361 | } | 
| 360 | 362 | ||
| 363 | static inline int cant_mount(struct dentry *dentry) | ||
| 364 | { | ||
| 365 | return (dentry->d_flags & DCACHE_CANT_MOUNT); | ||
| 366 | } | ||
| 367 | |||
| 368 | static inline void dont_mount(struct dentry *dentry) | ||
| 369 | { | ||
| 370 | spin_lock(&dentry->d_lock); | ||
| 371 | dentry->d_flags |= DCACHE_CANT_MOUNT; | ||
| 372 | spin_unlock(&dentry->d_lock); | ||
| 373 | } | ||
| 374 | |||
| 361 | static inline struct dentry *dget_parent(struct dentry *dentry) | 375 | static inline struct dentry *dget_parent(struct dentry *dentry) | 
| 362 | { | 376 | { | 
| 363 | struct dentry *ret; | 377 | struct dentry *ret; | 
| diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h index 8c243aaa86a7..597692f1fc8d 100644 --- a/include/linux/debugobjects.h +++ b/include/linux/debugobjects.h | |||
| @@ -20,12 +20,14 @@ struct debug_obj_descr; | |||
| 20 | * struct debug_obj - representaion of an tracked object | 20 | * struct debug_obj - representaion of an tracked object | 
| 21 | * @node: hlist node to link the object into the tracker list | 21 | * @node: hlist node to link the object into the tracker list | 
| 22 | * @state: tracked object state | 22 | * @state: tracked object state | 
| 23 | * @astate: current active state | ||
| 23 | * @object: pointer to the real object | 24 | * @object: pointer to the real object | 
| 24 | * @descr: pointer to an object type specific debug description structure | 25 | * @descr: pointer to an object type specific debug description structure | 
| 25 | */ | 26 | */ | 
| 26 | struct debug_obj { | 27 | struct debug_obj { | 
| 27 | struct hlist_node node; | 28 | struct hlist_node node; | 
| 28 | enum debug_obj_state state; | 29 | enum debug_obj_state state; | 
| 30 | unsigned int astate; | ||
| 29 | void *object; | 31 | void *object; | 
| 30 | struct debug_obj_descr *descr; | 32 | struct debug_obj_descr *descr; | 
| 31 | }; | 33 | }; | 
| @@ -60,6 +62,15 @@ extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr); | |||
| 60 | extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); | 62 | extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); | 
| 61 | extern void debug_object_free (void *addr, struct debug_obj_descr *descr); | 63 | extern void debug_object_free (void *addr, struct debug_obj_descr *descr); | 
| 62 | 64 | ||
| 65 | /* | ||
| 66 | * Active state: | ||
| 67 | * - Set at 0 upon initialization. | ||
| 68 | * - Must return to 0 before deactivation. | ||
| 69 | */ | ||
| 70 | extern void | ||
| 71 | debug_object_active_state(void *addr, struct debug_obj_descr *descr, | ||
| 72 | unsigned int expect, unsigned int next); | ||
| 73 | |||
| 63 | extern void debug_objects_early_init(void); | 74 | extern void debug_objects_early_init(void); | 
| 64 | extern void debug_objects_mem_init(void); | 75 | extern void debug_objects_mem_init(void); | 
| 65 | #else | 76 | #else | 
| diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 01e6adea07ec..41e46330d9be 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
| @@ -82,9 +82,13 @@ void clear_ftrace_function(void); | |||
| 82 | extern void ftrace_stub(unsigned long a0, unsigned long a1); | 82 | extern void ftrace_stub(unsigned long a0, unsigned long a1); | 
| 83 | 83 | ||
| 84 | #else /* !CONFIG_FUNCTION_TRACER */ | 84 | #else /* !CONFIG_FUNCTION_TRACER */ | 
| 85 | # define register_ftrace_function(ops) do { } while (0) | 85 | /* | 
| 86 | # define unregister_ftrace_function(ops) do { } while (0) | 86 | * (un)register_ftrace_function must be a macro since the ops parameter | 
| 87 | # define clear_ftrace_function(ops) do { } while (0) | 87 | * must not be evaluated. | 
| 88 | */ | ||
| 89 | #define register_ftrace_function(ops) ({ 0; }) | ||
| 90 | #define unregister_ftrace_function(ops) ({ 0; }) | ||
| 91 | static inline void clear_ftrace_function(void) { } | ||
| 88 | static inline void ftrace_kill(void) { } | 92 | static inline void ftrace_kill(void) { } | 
| 89 | static inline void ftrace_stop(void) { } | 93 | static inline void ftrace_stop(void) { } | 
| 90 | static inline void ftrace_start(void) { } | 94 | static inline void ftrace_start(void) { } | 
| @@ -237,11 +241,13 @@ extern int skip_trace(unsigned long ip); | |||
| 237 | extern void ftrace_disable_daemon(void); | 241 | extern void ftrace_disable_daemon(void); | 
| 238 | extern void ftrace_enable_daemon(void); | 242 | extern void ftrace_enable_daemon(void); | 
| 239 | #else | 243 | #else | 
| 240 | # define skip_trace(ip) ({ 0; }) | 244 | static inline int skip_trace(unsigned long ip) { return 0; } | 
| 241 | # define ftrace_force_update() ({ 0; }) | 245 | static inline int ftrace_force_update(void) { return 0; } | 
| 242 | # define ftrace_set_filter(buf, len, reset) do { } while (0) | 246 | static inline void ftrace_set_filter(unsigned char *buf, int len, int reset) | 
| 243 | # define ftrace_disable_daemon() do { } while (0) | 247 | { | 
| 244 | # define ftrace_enable_daemon() do { } while (0) | 248 | } | 
| 249 | static inline void ftrace_disable_daemon(void) { } | ||
| 250 | static inline void ftrace_enable_daemon(void) { } | ||
| 245 | static inline void ftrace_release_mod(struct module *mod) {} | 251 | static inline void ftrace_release_mod(struct module *mod) {} | 
| 246 | static inline int register_ftrace_command(struct ftrace_func_command *cmd) | 252 | static inline int register_ftrace_command(struct ftrace_func_command *cmd) | 
| 247 | { | 253 | { | 
| @@ -314,16 +320,16 @@ static inline void __ftrace_enabled_restore(int enabled) | |||
| 314 | extern void time_hardirqs_on(unsigned long a0, unsigned long a1); | 320 | extern void time_hardirqs_on(unsigned long a0, unsigned long a1); | 
| 315 | extern void time_hardirqs_off(unsigned long a0, unsigned long a1); | 321 | extern void time_hardirqs_off(unsigned long a0, unsigned long a1); | 
| 316 | #else | 322 | #else | 
| 317 | # define time_hardirqs_on(a0, a1) do { } while (0) | 323 | static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { } | 
| 318 | # define time_hardirqs_off(a0, a1) do { } while (0) | 324 | static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { } | 
| 319 | #endif | 325 | #endif | 
| 320 | 326 | ||
| 321 | #ifdef CONFIG_PREEMPT_TRACER | 327 | #ifdef CONFIG_PREEMPT_TRACER | 
| 322 | extern void trace_preempt_on(unsigned long a0, unsigned long a1); | 328 | extern void trace_preempt_on(unsigned long a0, unsigned long a1); | 
| 323 | extern void trace_preempt_off(unsigned long a0, unsigned long a1); | 329 | extern void trace_preempt_off(unsigned long a0, unsigned long a1); | 
| 324 | #else | 330 | #else | 
| 325 | # define trace_preempt_on(a0, a1) do { } while (0) | 331 | static inline void trace_preempt_on(unsigned long a0, unsigned long a1) { } | 
| 326 | # define trace_preempt_off(a0, a1) do { } while (0) | 332 | static inline void trace_preempt_off(unsigned long a0, unsigned long a1) { } | 
| 327 | #endif | 333 | #endif | 
| 328 | 334 | ||
| 329 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | 335 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | 
| @@ -352,6 +358,10 @@ struct ftrace_graph_ret { | |||
| 352 | int depth; | 358 | int depth; | 
| 353 | }; | 359 | }; | 
| 354 | 360 | ||
| 361 | /* Type of the callback handlers for tracing function graph*/ | ||
| 362 | typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ | ||
| 363 | typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ | ||
| 364 | |||
| 355 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 365 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 
| 356 | 366 | ||
| 357 | /* for init task */ | 367 | /* for init task */ | 
| @@ -400,10 +410,6 @@ extern char __irqentry_text_end[]; | |||
| 400 | 410 | ||
| 401 | #define FTRACE_RETFUNC_DEPTH 50 | 411 | #define FTRACE_RETFUNC_DEPTH 50 | 
| 402 | #define FTRACE_RETSTACK_ALLOC_SIZE 32 | 412 | #define FTRACE_RETSTACK_ALLOC_SIZE 32 | 
| 403 | /* Type of the callback handlers for tracing function graph*/ | ||
| 404 | typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ | ||
| 405 | typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ | ||
| 406 | |||
| 407 | extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, | 413 | extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, | 
| 408 | trace_func_graph_ent_t entryfunc); | 414 | trace_func_graph_ent_t entryfunc); | 
| 409 | 415 | ||
| @@ -441,6 +447,13 @@ static inline void unpause_graph_tracing(void) | |||
| 441 | static inline void ftrace_graph_init_task(struct task_struct *t) { } | 447 | static inline void ftrace_graph_init_task(struct task_struct *t) { } | 
| 442 | static inline void ftrace_graph_exit_task(struct task_struct *t) { } | 448 | static inline void ftrace_graph_exit_task(struct task_struct *t) { } | 
| 443 | 449 | ||
| 450 | static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, | ||
| 451 | trace_func_graph_ent_t entryfunc) | ||
| 452 | { | ||
| 453 | return -1; | ||
| 454 | } | ||
| 455 | static inline void unregister_ftrace_graph(void) { } | ||
| 456 | |||
| 444 | static inline int task_curr_ret_stack(struct task_struct *tsk) | 457 | static inline int task_curr_ret_stack(struct task_struct *tsk) | 
| 445 | { | 458 | { | 
| 446 | return -1; | 459 | return -1; | 
| @@ -492,7 +505,9 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk) | |||
| 492 | return tsk->trace & TSK_TRACE_FL_GRAPH; | 505 | return tsk->trace & TSK_TRACE_FL_GRAPH; | 
| 493 | } | 506 | } | 
| 494 | 507 | ||
| 495 | extern int ftrace_dump_on_oops; | 508 | enum ftrace_dump_mode; | 
| 509 | |||
| 510 | extern enum ftrace_dump_mode ftrace_dump_on_oops; | ||
| 496 | 511 | ||
| 497 | #ifdef CONFIG_PREEMPT | 512 | #ifdef CONFIG_PREEMPT | 
| 498 | #define INIT_TRACE_RECURSION .trace_recursion = 0, | 513 | #define INIT_TRACE_RECURSION .trace_recursion = 0, | 
| @@ -504,18 +519,6 @@ extern int ftrace_dump_on_oops; | |||
| 504 | #define INIT_TRACE_RECURSION | 519 | #define INIT_TRACE_RECURSION | 
| 505 | #endif | 520 | #endif | 
| 506 | 521 | ||
| 507 | #ifdef CONFIG_HW_BRANCH_TRACER | ||
| 508 | |||
| 509 | void trace_hw_branch(u64 from, u64 to); | ||
| 510 | void trace_hw_branch_oops(void); | ||
| 511 | |||
| 512 | #else /* CONFIG_HW_BRANCH_TRACER */ | ||
| 513 | |||
| 514 | static inline void trace_hw_branch(u64 from, u64 to) {} | ||
| 515 | static inline void trace_hw_branch_oops(void) {} | ||
| 516 | |||
| 517 | #endif /* CONFIG_HW_BRANCH_TRACER */ | ||
| 518 | |||
| 519 | #ifdef CONFIG_FTRACE_SYSCALLS | 522 | #ifdef CONFIG_FTRACE_SYSCALLS | 
| 520 | 523 | ||
| 521 | unsigned long arch_syscall_addr(int nr); | 524 | unsigned long arch_syscall_addr(int nr); | 
| diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index c0f4b364c711..39e71b0a3bfd 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
| @@ -58,6 +58,7 @@ struct trace_iterator { | |||
| 58 | /* The below is zeroed out in pipe_read */ | 58 | /* The below is zeroed out in pipe_read */ | 
| 59 | struct trace_seq seq; | 59 | struct trace_seq seq; | 
| 60 | struct trace_entry *ent; | 60 | struct trace_entry *ent; | 
| 61 | unsigned long lost_events; | ||
| 61 | int leftover; | 62 | int leftover; | 
| 62 | int cpu; | 63 | int cpu; | 
| 63 | u64 ts; | 64 | u64 ts; | 
| diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index c70d27af03f9..a2d6ea49ec56 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h | |||
| @@ -9,9 +9,22 @@ enum { | |||
| 9 | }; | 9 | }; | 
| 10 | 10 | ||
| 11 | enum { | 11 | enum { | 
| 12 | HW_BREAKPOINT_R = 1, | 12 | HW_BREAKPOINT_EMPTY = 0, | 
| 13 | HW_BREAKPOINT_W = 2, | 13 | HW_BREAKPOINT_R = 1, | 
| 14 | HW_BREAKPOINT_X = 4, | 14 | HW_BREAKPOINT_W = 2, | 
| 15 | HW_BREAKPOINT_RW = HW_BREAKPOINT_R | HW_BREAKPOINT_W, | ||
| 16 | HW_BREAKPOINT_X = 4, | ||
| 17 | HW_BREAKPOINT_INVALID = HW_BREAKPOINT_RW | HW_BREAKPOINT_X, | ||
| 18 | }; | ||
| 19 | |||
| 20 | enum bp_type_idx { | ||
| 21 | TYPE_INST = 0, | ||
| 22 | #ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS | ||
| 23 | TYPE_DATA = 0, | ||
| 24 | #else | ||
| 25 | TYPE_DATA = 1, | ||
| 26 | #endif | ||
| 27 | TYPE_MAX | ||
| 15 | }; | 28 | }; | 
| 16 | 29 | ||
| 17 | #ifdef __KERNEL__ | 30 | #ifdef __KERNEL__ | 
| @@ -34,6 +47,12 @@ static inline void hw_breakpoint_init(struct perf_event_attr *attr) | |||
| 34 | attr->sample_period = 1; | 47 | attr->sample_period = 1; | 
| 35 | } | 48 | } | 
| 36 | 49 | ||
| 50 | static inline void ptrace_breakpoint_init(struct perf_event_attr *attr) | ||
| 51 | { | ||
| 52 | hw_breakpoint_init(attr); | ||
| 53 | attr->exclude_kernel = 1; | ||
| 54 | } | ||
| 55 | |||
| 37 | static inline unsigned long hw_breakpoint_addr(struct perf_event *bp) | 56 | static inline unsigned long hw_breakpoint_addr(struct perf_event *bp) | 
| 38 | { | 57 | { | 
| 39 | return bp->attr.bp_addr; | 58 | return bp->attr.bp_addr; | 
| diff --git a/include/linux/if_link.h b/include/linux/if_link.h index c9bf92cd7653..d94963b379d9 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h | |||
| @@ -79,10 +79,7 @@ enum { | |||
| 79 | IFLA_NET_NS_PID, | 79 | IFLA_NET_NS_PID, | 
| 80 | IFLA_IFALIAS, | 80 | IFLA_IFALIAS, | 
| 81 | IFLA_NUM_VF, /* Number of VFs if device is SR-IOV PF */ | 81 | IFLA_NUM_VF, /* Number of VFs if device is SR-IOV PF */ | 
| 82 | IFLA_VF_MAC, /* Hardware queue specific attributes */ | 82 | IFLA_VFINFO_LIST, | 
| 83 | IFLA_VF_VLAN, | ||
| 84 | IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */ | ||
| 85 | IFLA_VFINFO, | ||
| 86 | __IFLA_MAX | 83 | __IFLA_MAX | 
| 87 | }; | 84 | }; | 
| 88 | 85 | ||
| @@ -203,6 +200,24 @@ enum macvlan_mode { | |||
| 203 | 200 | ||
| 204 | /* SR-IOV virtual function managment section */ | 201 | /* SR-IOV virtual function managment section */ | 
| 205 | 202 | ||
| 203 | enum { | ||
| 204 | IFLA_VF_INFO_UNSPEC, | ||
| 205 | IFLA_VF_INFO, | ||
| 206 | __IFLA_VF_INFO_MAX, | ||
| 207 | }; | ||
| 208 | |||
| 209 | #define IFLA_VF_INFO_MAX (__IFLA_VF_INFO_MAX - 1) | ||
| 210 | |||
| 211 | enum { | ||
| 212 | IFLA_VF_UNSPEC, | ||
| 213 | IFLA_VF_MAC, /* Hardware queue specific attributes */ | ||
| 214 | IFLA_VF_VLAN, | ||
| 215 | IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */ | ||
| 216 | __IFLA_VF_MAX, | ||
| 217 | }; | ||
| 218 | |||
| 219 | #define IFLA_VF_MAX (__IFLA_VF_MAX - 1) | ||
| 220 | |||
| 206 | struct ifla_vf_mac { | 221 | struct ifla_vf_mac { | 
| 207 | __u32 vf; | 222 | __u32 vf; | 
| 208 | __u8 mac[32]; /* MAX_ADDR_LEN */ | 223 | __u8 mac[32]; /* MAX_ADDR_LEN */ | 
| diff --git a/include/linux/init_task.h b/include/linux/init_task.h index b1ed1cd8e2a8..7996fc2c9ba9 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
| @@ -49,7 +49,6 @@ extern struct group_info init_groups; | |||
| 49 | { .first = &init_task.pids[PIDTYPE_PGID].node }, \ | 49 | { .first = &init_task.pids[PIDTYPE_PGID].node }, \ | 
| 50 | { .first = &init_task.pids[PIDTYPE_SID].node }, \ | 50 | { .first = &init_task.pids[PIDTYPE_SID].node }, \ | 
| 51 | }, \ | 51 | }, \ | 
| 52 | .rcu = RCU_HEAD_INIT, \ | ||
| 53 | .level = 0, \ | 52 | .level = 0, \ | 
| 54 | .numbers = { { \ | 53 | .numbers = { { \ | 
| 55 | .nr = 0, \ | 54 | .nr = 0, \ | 
| diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 3af4ffd591b9..be22ad83689c 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
| @@ -37,9 +37,9 @@ struct iommu_ops { | |||
| 37 | int (*attach_dev)(struct iommu_domain *domain, struct device *dev); | 37 | int (*attach_dev)(struct iommu_domain *domain, struct device *dev); | 
| 38 | void (*detach_dev)(struct iommu_domain *domain, struct device *dev); | 38 | void (*detach_dev)(struct iommu_domain *domain, struct device *dev); | 
| 39 | int (*map)(struct iommu_domain *domain, unsigned long iova, | 39 | int (*map)(struct iommu_domain *domain, unsigned long iova, | 
| 40 | phys_addr_t paddr, size_t size, int prot); | 40 | phys_addr_t paddr, int gfp_order, int prot); | 
| 41 | void (*unmap)(struct iommu_domain *domain, unsigned long iova, | 41 | int (*unmap)(struct iommu_domain *domain, unsigned long iova, | 
| 42 | size_t size); | 42 | int gfp_order); | 
| 43 | phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, | 43 | phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, | 
| 44 | unsigned long iova); | 44 | unsigned long iova); | 
| 45 | int (*domain_has_cap)(struct iommu_domain *domain, | 45 | int (*domain_has_cap)(struct iommu_domain *domain, | 
| @@ -56,10 +56,10 @@ extern int iommu_attach_device(struct iommu_domain *domain, | |||
| 56 | struct device *dev); | 56 | struct device *dev); | 
| 57 | extern void iommu_detach_device(struct iommu_domain *domain, | 57 | extern void iommu_detach_device(struct iommu_domain *domain, | 
| 58 | struct device *dev); | 58 | struct device *dev); | 
| 59 | extern int iommu_map_range(struct iommu_domain *domain, unsigned long iova, | 59 | extern int iommu_map(struct iommu_domain *domain, unsigned long iova, | 
| 60 | phys_addr_t paddr, size_t size, int prot); | 60 | phys_addr_t paddr, int gfp_order, int prot); | 
| 61 | extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, | 61 | extern int iommu_unmap(struct iommu_domain *domain, unsigned long iova, | 
| 62 | size_t size); | 62 | int gfp_order); | 
| 63 | extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, | 63 | extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, | 
| 64 | unsigned long iova); | 64 | unsigned long iova); | 
| 65 | extern int iommu_domain_has_cap(struct iommu_domain *domain, | 65 | extern int iommu_domain_has_cap(struct iommu_domain *domain, | 
| @@ -96,16 +96,16 @@ static inline void iommu_detach_device(struct iommu_domain *domain, | |||
| 96 | { | 96 | { | 
| 97 | } | 97 | } | 
| 98 | 98 | ||
| 99 | static inline int iommu_map_range(struct iommu_domain *domain, | 99 | static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, | 
| 100 | unsigned long iova, phys_addr_t paddr, | 100 | phys_addr_t paddr, int gfp_order, int prot) | 
| 101 | size_t size, int prot) | ||
| 102 | { | 101 | { | 
| 103 | return -ENODEV; | 102 | return -ENODEV; | 
| 104 | } | 103 | } | 
| 105 | 104 | ||
| 106 | static inline void iommu_unmap_range(struct iommu_domain *domain, | 105 | static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova, | 
| 107 | unsigned long iova, size_t size) | 106 | int gfp_order) | 
| 108 | { | 107 | { | 
| 108 | return -ENODEV; | ||
| 109 | } | 109 | } | 
| 110 | 110 | ||
| 111 | static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, | 111 | static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, | 
| diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 9365227dbaf6..9fb1c1299032 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
| @@ -490,6 +490,13 @@ static inline void tracing_off(void) { } | |||
| 490 | static inline void tracing_off_permanent(void) { } | 490 | static inline void tracing_off_permanent(void) { } | 
| 491 | static inline int tracing_is_on(void) { return 0; } | 491 | static inline int tracing_is_on(void) { return 0; } | 
| 492 | #endif | 492 | #endif | 
| 493 | |||
| 494 | enum ftrace_dump_mode { | ||
| 495 | DUMP_NONE, | ||
| 496 | DUMP_ALL, | ||
| 497 | DUMP_ORIG, | ||
| 498 | }; | ||
| 499 | |||
| 493 | #ifdef CONFIG_TRACING | 500 | #ifdef CONFIG_TRACING | 
| 494 | extern void tracing_start(void); | 501 | extern void tracing_start(void); | 
| 495 | extern void tracing_stop(void); | 502 | extern void tracing_stop(void); | 
| @@ -571,7 +578,7 @@ __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap); | |||
| 571 | extern int | 578 | extern int | 
| 572 | __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); | 579 | __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); | 
| 573 | 580 | ||
| 574 | extern void ftrace_dump(void); | 581 | extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); | 
| 575 | #else | 582 | #else | 
| 576 | static inline void | 583 | static inline void | 
| 577 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } | 584 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } | 
| @@ -592,7 +599,7 @@ ftrace_vprintk(const char *fmt, va_list ap) | |||
| 592 | { | 599 | { | 
| 593 | return 0; | 600 | return 0; | 
| 594 | } | 601 | } | 
| 595 | static inline void ftrace_dump(void) { } | 602 | static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } | 
| 596 | #endif /* CONFIG_TRACING */ | 603 | #endif /* CONFIG_TRACING */ | 
| 597 | 604 | ||
| 598 | /* | 605 | /* | 
| diff --git a/include/linux/mm.h b/include/linux/mm.h index 462acaf36f3a..fb19bb92b809 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -19,7 +19,6 @@ struct anon_vma; | |||
| 19 | struct file_ra_state; | 19 | struct file_ra_state; | 
| 20 | struct user_struct; | 20 | struct user_struct; | 
| 21 | struct writeback_control; | 21 | struct writeback_control; | 
| 22 | struct rlimit; | ||
| 23 | 22 | ||
| 24 | #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ | 23 | #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ | 
| 25 | extern unsigned long max_mapnr; | 24 | extern unsigned long max_mapnr; | 
| @@ -1449,9 +1448,6 @@ int vmemmap_populate_basepages(struct page *start_page, | |||
| 1449 | int vmemmap_populate(struct page *start_page, unsigned long pages, int node); | 1448 | int vmemmap_populate(struct page *start_page, unsigned long pages, int node); | 
| 1450 | void vmemmap_populate_print_last(void); | 1449 | void vmemmap_populate_print_last(void); | 
| 1451 | 1450 | ||
| 1452 | extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim, | ||
| 1453 | size_t size); | ||
| 1454 | extern void refund_locked_memory(struct mm_struct *mm, size_t size); | ||
| 1455 | 1451 | ||
| 1456 | enum mf_flags { | 1452 | enum mf_flags { | 
| 1457 | MF_COUNT_INCREASED = 1 << 0, | 1453 | MF_COUNT_INCREASED = 1 << 0, | 
| diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index f58e9d836f32..56fde4364e4c 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h | |||
| @@ -474,4 +474,13 @@ struct platform_device_id { | |||
| 474 | __attribute__((aligned(sizeof(kernel_ulong_t)))); | 474 | __attribute__((aligned(sizeof(kernel_ulong_t)))); | 
| 475 | }; | 475 | }; | 
| 476 | 476 | ||
| 477 | struct zorro_device_id { | ||
| 478 | __u32 id; /* Device ID or ZORRO_WILDCARD */ | ||
| 479 | kernel_ulong_t driver_data; /* Data private to the driver */ | ||
| 480 | }; | ||
| 481 | |||
| 482 | #define ZORRO_WILDCARD (0xffffffff) /* not official */ | ||
| 483 | |||
| 484 | #define ZORRO_DEVICE_MODALIAS_FMT "zorro:i%08X" | ||
| 485 | |||
| 477 | #endif /* LINUX_MOD_DEVICETABLE_H */ | 486 | #endif /* LINUX_MOD_DEVICETABLE_H */ | 
| diff --git a/include/linux/module.h b/include/linux/module.h index 515d53ae6a79..6914fcad4673 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
| @@ -465,8 +465,7 @@ static inline void __module_get(struct module *module) | |||
| 465 | if (module) { | 465 | if (module) { | 
| 466 | preempt_disable(); | 466 | preempt_disable(); | 
| 467 | __this_cpu_inc(module->refptr->incs); | 467 | __this_cpu_inc(module->refptr->incs); | 
| 468 | trace_module_get(module, _THIS_IP_, | 468 | trace_module_get(module, _THIS_IP_); | 
| 469 | __this_cpu_read(module->refptr->incs)); | ||
| 470 | preempt_enable(); | 469 | preempt_enable(); | 
| 471 | } | 470 | } | 
| 472 | } | 471 | } | 
| @@ -480,8 +479,7 @@ static inline int try_module_get(struct module *module) | |||
| 480 | 479 | ||
| 481 | if (likely(module_is_live(module))) { | 480 | if (likely(module_is_live(module))) { | 
| 482 | __this_cpu_inc(module->refptr->incs); | 481 | __this_cpu_inc(module->refptr->incs); | 
| 483 | trace_module_get(module, _THIS_IP_, | 482 | trace_module_get(module, _THIS_IP_); | 
| 484 | __this_cpu_read(module->refptr->incs)); | ||
| 485 | } else | 483 | } else | 
| 486 | ret = 0; | 484 | ret = 0; | 
| 487 | 485 | ||
| diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index c8e375440403..3fd5c82e0e18 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
| @@ -203,8 +203,19 @@ struct perf_event_attr { | |||
| 203 | enable_on_exec : 1, /* next exec enables */ | 203 | enable_on_exec : 1, /* next exec enables */ | 
| 204 | task : 1, /* trace fork/exit */ | 204 | task : 1, /* trace fork/exit */ | 
| 205 | watermark : 1, /* wakeup_watermark */ | 205 | watermark : 1, /* wakeup_watermark */ | 
| 206 | 206 | /* | |
| 207 | __reserved_1 : 49; | 207 | * precise_ip: | 
| 208 | * | ||
| 209 | * 0 - SAMPLE_IP can have arbitrary skid | ||
| 210 | * 1 - SAMPLE_IP must have constant skid | ||
| 211 | * 2 - SAMPLE_IP requested to have 0 skid | ||
| 212 | * 3 - SAMPLE_IP must have 0 skid | ||
| 213 | * | ||
| 214 | * See also PERF_RECORD_MISC_EXACT_IP | ||
| 215 | */ | ||
| 216 | precise_ip : 2, /* skid constraint */ | ||
| 217 | |||
| 218 | __reserved_1 : 47; | ||
| 208 | 219 | ||
| 209 | union { | 220 | union { | 
| 210 | __u32 wakeup_events; /* wakeup every n events */ | 221 | __u32 wakeup_events; /* wakeup every n events */ | 
| @@ -287,11 +298,24 @@ struct perf_event_mmap_page { | |||
| 287 | __u64 data_tail; /* user-space written tail */ | 298 | __u64 data_tail; /* user-space written tail */ | 
| 288 | }; | 299 | }; | 
| 289 | 300 | ||
| 290 | #define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0) | 301 | #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0) | 
| 291 | #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) | 302 | #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) | 
| 292 | #define PERF_RECORD_MISC_KERNEL (1 << 0) | 303 | #define PERF_RECORD_MISC_KERNEL (1 << 0) | 
| 293 | #define PERF_RECORD_MISC_USER (2 << 0) | 304 | #define PERF_RECORD_MISC_USER (2 << 0) | 
| 294 | #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) | 305 | #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) | 
| 306 | #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0) | ||
| 307 | #define PERF_RECORD_MISC_GUEST_USER (5 << 0) | ||
| 308 | |||
| 309 | /* | ||
| 310 | * Indicates that the content of PERF_SAMPLE_IP points to | ||
| 311 | * the actual instruction that triggered the event. See also | ||
| 312 | * perf_event_attr::precise_ip. | ||
| 313 | */ | ||
| 314 | #define PERF_RECORD_MISC_EXACT_IP (1 << 14) | ||
| 315 | /* | ||
| 316 | * Reserve the last bit to indicate some extended misc field | ||
| 317 | */ | ||
| 318 | #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15) | ||
| 295 | 319 | ||
| 296 | struct perf_event_header { | 320 | struct perf_event_header { | 
| 297 | __u32 type; | 321 | __u32 type; | 
| @@ -439,6 +463,12 @@ enum perf_callchain_context { | |||
| 439 | # include <asm/perf_event.h> | 463 | # include <asm/perf_event.h> | 
| 440 | #endif | 464 | #endif | 
| 441 | 465 | ||
| 466 | struct perf_guest_info_callbacks { | ||
| 467 | int (*is_in_guest) (void); | ||
| 468 | int (*is_user_mode) (void); | ||
| 469 | unsigned long (*get_guest_ip) (void); | ||
| 470 | }; | ||
| 471 | |||
| 442 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 472 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 
| 443 | #include <asm/hw_breakpoint.h> | 473 | #include <asm/hw_breakpoint.h> | 
| 444 | #endif | 474 | #endif | 
| @@ -468,6 +498,17 @@ struct perf_raw_record { | |||
| 468 | void *data; | 498 | void *data; | 
| 469 | }; | 499 | }; | 
| 470 | 500 | ||
| 501 | struct perf_branch_entry { | ||
| 502 | __u64 from; | ||
| 503 | __u64 to; | ||
| 504 | __u64 flags; | ||
| 505 | }; | ||
| 506 | |||
| 507 | struct perf_branch_stack { | ||
| 508 | __u64 nr; | ||
| 509 | struct perf_branch_entry entries[0]; | ||
| 510 | }; | ||
| 511 | |||
| 471 | struct task_struct; | 512 | struct task_struct; | 
| 472 | 513 | ||
| 473 | /** | 514 | /** | 
| @@ -506,6 +547,8 @@ struct hw_perf_event { | |||
| 506 | 547 | ||
| 507 | struct perf_event; | 548 | struct perf_event; | 
| 508 | 549 | ||
| 550 | #define PERF_EVENT_TXN_STARTED 1 | ||
| 551 | |||
| 509 | /** | 552 | /** | 
| 510 | * struct pmu - generic performance monitoring unit | 553 | * struct pmu - generic performance monitoring unit | 
| 511 | */ | 554 | */ | 
| @@ -516,6 +559,16 @@ struct pmu { | |||
| 516 | void (*stop) (struct perf_event *event); | 559 | void (*stop) (struct perf_event *event); | 
| 517 | void (*read) (struct perf_event *event); | 560 | void (*read) (struct perf_event *event); | 
| 518 | void (*unthrottle) (struct perf_event *event); | 561 | void (*unthrottle) (struct perf_event *event); | 
| 562 | |||
| 563 | /* | ||
| 564 | * group events scheduling is treated as a transaction, | ||
| 565 | * add group events as a whole and perform one schedulability test. | ||
| 566 | * If test fails, roll back the whole group | ||
| 567 | */ | ||
| 568 | |||
| 569 | void (*start_txn) (const struct pmu *pmu); | ||
| 570 | void (*cancel_txn) (const struct pmu *pmu); | ||
| 571 | int (*commit_txn) (const struct pmu *pmu); | ||
| 519 | }; | 572 | }; | 
| 520 | 573 | ||
| 521 | /** | 574 | /** | 
| @@ -571,6 +624,14 @@ enum perf_group_flag { | |||
| 571 | PERF_GROUP_SOFTWARE = 0x1, | 624 | PERF_GROUP_SOFTWARE = 0x1, | 
| 572 | }; | 625 | }; | 
| 573 | 626 | ||
| 627 | #define SWEVENT_HLIST_BITS 8 | ||
| 628 | #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) | ||
| 629 | |||
| 630 | struct swevent_hlist { | ||
| 631 | struct hlist_head heads[SWEVENT_HLIST_SIZE]; | ||
| 632 | struct rcu_head rcu_head; | ||
| 633 | }; | ||
| 634 | |||
| 574 | /** | 635 | /** | 
| 575 | * struct perf_event - performance event kernel representation: | 636 | * struct perf_event - performance event kernel representation: | 
| 576 | */ | 637 | */ | 
| @@ -579,6 +640,7 @@ struct perf_event { | |||
| 579 | struct list_head group_entry; | 640 | struct list_head group_entry; | 
| 580 | struct list_head event_entry; | 641 | struct list_head event_entry; | 
| 581 | struct list_head sibling_list; | 642 | struct list_head sibling_list; | 
| 643 | struct hlist_node hlist_entry; | ||
| 582 | int nr_siblings; | 644 | int nr_siblings; | 
| 583 | int group_flags; | 645 | int group_flags; | 
| 584 | struct perf_event *group_leader; | 646 | struct perf_event *group_leader; | 
| @@ -726,6 +788,9 @@ struct perf_cpu_context { | |||
| 726 | int active_oncpu; | 788 | int active_oncpu; | 
| 727 | int max_pertask; | 789 | int max_pertask; | 
| 728 | int exclusive; | 790 | int exclusive; | 
| 791 | struct swevent_hlist *swevent_hlist; | ||
| 792 | struct mutex hlist_mutex; | ||
| 793 | int hlist_refcount; | ||
| 729 | 794 | ||
| 730 | /* | 795 | /* | 
| 731 | * Recursion avoidance: | 796 | * Recursion avoidance: | 
| @@ -769,9 +834,6 @@ extern void perf_disable(void); | |||
| 769 | extern void perf_enable(void); | 834 | extern void perf_enable(void); | 
| 770 | extern int perf_event_task_disable(void); | 835 | extern int perf_event_task_disable(void); | 
| 771 | extern int perf_event_task_enable(void); | 836 | extern int perf_event_task_enable(void); | 
| 772 | extern int hw_perf_group_sched_in(struct perf_event *group_leader, | ||
| 773 | struct perf_cpu_context *cpuctx, | ||
| 774 | struct perf_event_context *ctx); | ||
| 775 | extern void perf_event_update_userpage(struct perf_event *event); | 837 | extern void perf_event_update_userpage(struct perf_event *event); | 
| 776 | extern int perf_event_release_kernel(struct perf_event *event); | 838 | extern int perf_event_release_kernel(struct perf_event *event); | 
| 777 | extern struct perf_event * | 839 | extern struct perf_event * | 
| @@ -902,6 +964,10 @@ static inline void perf_event_mmap(struct vm_area_struct *vma) | |||
| 902 | __perf_event_mmap(vma); | 964 | __perf_event_mmap(vma); | 
| 903 | } | 965 | } | 
| 904 | 966 | ||
| 967 | extern struct perf_guest_info_callbacks *perf_guest_cbs; | ||
| 968 | extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); | ||
| 969 | extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); | ||
| 970 | |||
| 905 | extern void perf_event_comm(struct task_struct *tsk); | 971 | extern void perf_event_comm(struct task_struct *tsk); | 
| 906 | extern void perf_event_fork(struct task_struct *tsk); | 972 | extern void perf_event_fork(struct task_struct *tsk); | 
| 907 | 973 | ||
| @@ -971,6 +1037,11 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, | |||
| 971 | static inline void | 1037 | static inline void | 
| 972 | perf_bp_event(struct perf_event *event, void *data) { } | 1038 | perf_bp_event(struct perf_event *event, void *data) { } | 
| 973 | 1039 | ||
| 1040 | static inline int perf_register_guest_info_callbacks | ||
| 1041 | (struct perf_guest_info_callbacks *callbacks) { return 0; } | ||
| 1042 | static inline int perf_unregister_guest_info_callbacks | ||
| 1043 | (struct perf_guest_info_callbacks *callbacks) { return 0; } | ||
| 1044 | |||
| 974 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } | 1045 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } | 
| 975 | static inline void perf_event_comm(struct task_struct *tsk) { } | 1046 | static inline void perf_event_comm(struct task_struct *tsk) { } | 
| 976 | static inline void perf_event_fork(struct task_struct *tsk) { } | 1047 | static inline void perf_event_fork(struct task_struct *tsk) { } | 
| diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 212da17d06af..5417944d3687 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h | |||
| @@ -44,12 +44,14 @@ extern int platform_get_irq_byname(struct platform_device *, const char *); | |||
| 44 | extern int platform_add_devices(struct platform_device **, int); | 44 | extern int platform_add_devices(struct platform_device **, int); | 
| 45 | 45 | ||
| 46 | extern struct platform_device *platform_device_register_simple(const char *, int id, | 46 | extern struct platform_device *platform_device_register_simple(const char *, int id, | 
| 47 | struct resource *, unsigned int); | 47 | const struct resource *, unsigned int); | 
| 48 | extern struct platform_device *platform_device_register_data(struct device *, | 48 | extern struct platform_device *platform_device_register_data(struct device *, | 
| 49 | const char *, int, const void *, size_t); | 49 | const char *, int, const void *, size_t); | 
| 50 | 50 | ||
| 51 | extern struct platform_device *platform_device_alloc(const char *name, int id); | 51 | extern struct platform_device *platform_device_alloc(const char *name, int id); | 
| 52 | extern int platform_device_add_resources(struct platform_device *pdev, struct resource *res, unsigned int num); | 52 | extern int platform_device_add_resources(struct platform_device *pdev, | 
| 53 | const struct resource *res, | ||
| 54 | unsigned int num); | ||
| 53 | extern int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size); | 55 | extern int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size); | 
| 54 | extern int platform_device_add(struct platform_device *pdev); | 56 | extern int platform_device_add(struct platform_device *pdev); | 
| 55 | extern void platform_device_del(struct platform_device *pdev); | 57 | extern void platform_device_del(struct platform_device *pdev); | 
| diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index e1fb60729979..4272521e29e9 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
| @@ -345,18 +345,6 @@ static inline void user_single_step_siginfo(struct task_struct *tsk, | |||
| 345 | #define arch_ptrace_stop(code, info) do { } while (0) | 345 | #define arch_ptrace_stop(code, info) do { } while (0) | 
| 346 | #endif | 346 | #endif | 
| 347 | 347 | ||
| 348 | #ifndef arch_ptrace_untrace | ||
| 349 | /* | ||
| 350 | * Do machine-specific work before untracing child. | ||
| 351 | * | ||
| 352 | * This is called for a normal detach as well as from ptrace_exit() | ||
| 353 | * when the tracing task dies. | ||
| 354 | * | ||
| 355 | * Called with write_lock(&tasklist_lock) held. | ||
| 356 | */ | ||
| 357 | #define arch_ptrace_untrace(task) do { } while (0) | ||
| 358 | #endif | ||
| 359 | |||
| 360 | extern int task_current_syscall(struct task_struct *target, long *callno, | 348 | extern int task_current_syscall(struct task_struct *target, long *callno, | 
| 361 | unsigned long args[6], unsigned int maxargs, | 349 | unsigned long args[6], unsigned int maxargs, | 
| 362 | unsigned long *sp, unsigned long *pc); | 350 | unsigned long *sp, unsigned long *pc); | 
| diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 07db2feb8572..b653b4aaa8a6 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
| @@ -56,8 +56,6 @@ struct rcu_head { | |||
| 56 | }; | 56 | }; | 
| 57 | 57 | ||
| 58 | /* Exported common interfaces */ | 58 | /* Exported common interfaces */ | 
| 59 | extern void synchronize_rcu_bh(void); | ||
| 60 | extern void synchronize_sched(void); | ||
| 61 | extern void rcu_barrier(void); | 59 | extern void rcu_barrier(void); | 
| 62 | extern void rcu_barrier_bh(void); | 60 | extern void rcu_barrier_bh(void); | 
| 63 | extern void rcu_barrier_sched(void); | 61 | extern void rcu_barrier_sched(void); | 
| @@ -66,8 +64,6 @@ extern int sched_expedited_torture_stats(char *page); | |||
| 66 | 64 | ||
| 67 | /* Internal to kernel */ | 65 | /* Internal to kernel */ | 
| 68 | extern void rcu_init(void); | 66 | extern void rcu_init(void); | 
| 69 | extern int rcu_scheduler_active; | ||
| 70 | extern void rcu_scheduler_starting(void); | ||
| 71 | 67 | ||
| 72 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | 68 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | 
| 73 | #include <linux/rcutree.h> | 69 | #include <linux/rcutree.h> | 
| @@ -83,6 +79,14 @@ extern void rcu_scheduler_starting(void); | |||
| 83 | (ptr)->next = NULL; (ptr)->func = NULL; \ | 79 | (ptr)->next = NULL; (ptr)->func = NULL; \ | 
| 84 | } while (0) | 80 | } while (0) | 
| 85 | 81 | ||
| 82 | static inline void init_rcu_head_on_stack(struct rcu_head *head) | ||
| 83 | { | ||
| 84 | } | ||
| 85 | |||
| 86 | static inline void destroy_rcu_head_on_stack(struct rcu_head *head) | ||
| 87 | { | ||
| 88 | } | ||
| 89 | |||
| 86 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 90 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 
| 87 | 91 | ||
| 88 | extern struct lockdep_map rcu_lock_map; | 92 | extern struct lockdep_map rcu_lock_map; | 
| @@ -106,12 +110,13 @@ extern int debug_lockdep_rcu_enabled(void); | |||
| 106 | /** | 110 | /** | 
| 107 | * rcu_read_lock_held - might we be in RCU read-side critical section? | 111 | * rcu_read_lock_held - might we be in RCU read-side critical section? | 
| 108 | * | 112 | * | 
| 109 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in | 113 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU | 
| 110 | * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, | 114 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, | 
| 111 | * this assumes we are in an RCU read-side critical section unless it can | 115 | * this assumes we are in an RCU read-side critical section unless it can | 
| 112 | * prove otherwise. | 116 | * prove otherwise. | 
| 113 | * | 117 | * | 
| 114 | * Check rcu_scheduler_active to prevent false positives during boot. | 118 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot | 
| 119 | * and while lockdep is disabled. | ||
| 115 | */ | 120 | */ | 
| 116 | static inline int rcu_read_lock_held(void) | 121 | static inline int rcu_read_lock_held(void) | 
| 117 | { | 122 | { | 
| @@ -129,13 +134,15 @@ extern int rcu_read_lock_bh_held(void); | |||
| 129 | /** | 134 | /** | 
| 130 | * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? | 135 | * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? | 
| 131 | * | 136 | * | 
| 132 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in an | 137 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an | 
| 133 | * RCU-sched read-side critical section. In absence of CONFIG_PROVE_LOCKING, | 138 | * RCU-sched read-side critical section. In absence of | 
| 134 | * this assumes we are in an RCU-sched read-side critical section unless it | 139 | * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side | 
| 135 | * can prove otherwise. Note that disabling of preemption (including | 140 | * critical section unless it can prove otherwise. Note that disabling | 
| 136 | * disabling irqs) counts as an RCU-sched read-side critical section. | 141 | * of preemption (including disabling irqs) counts as an RCU-sched | 
| 142 | * read-side critical section. | ||
| 137 | * | 143 | * | 
| 138 | * Check rcu_scheduler_active to prevent false positives during boot. | 144 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot | 
| 145 | * and while lockdep is disabled. | ||
| 139 | */ | 146 | */ | 
| 140 | #ifdef CONFIG_PREEMPT | 147 | #ifdef CONFIG_PREEMPT | 
| 141 | static inline int rcu_read_lock_sched_held(void) | 148 | static inline int rcu_read_lock_sched_held(void) | 
| @@ -177,7 +184,7 @@ static inline int rcu_read_lock_bh_held(void) | |||
| 177 | #ifdef CONFIG_PREEMPT | 184 | #ifdef CONFIG_PREEMPT | 
| 178 | static inline int rcu_read_lock_sched_held(void) | 185 | static inline int rcu_read_lock_sched_held(void) | 
| 179 | { | 186 | { | 
| 180 | return !rcu_scheduler_active || preempt_count() != 0 || irqs_disabled(); | 187 | return preempt_count() != 0 || irqs_disabled(); | 
| 181 | } | 188 | } | 
| 182 | #else /* #ifdef CONFIG_PREEMPT */ | 189 | #else /* #ifdef CONFIG_PREEMPT */ | 
| 183 | static inline int rcu_read_lock_sched_held(void) | 190 | static inline int rcu_read_lock_sched_held(void) | 
| @@ -190,6 +197,17 @@ static inline int rcu_read_lock_sched_held(void) | |||
| 190 | 197 | ||
| 191 | #ifdef CONFIG_PROVE_RCU | 198 | #ifdef CONFIG_PROVE_RCU | 
| 192 | 199 | ||
| 200 | extern int rcu_my_thread_group_empty(void); | ||
| 201 | |||
| 202 | #define __do_rcu_dereference_check(c) \ | ||
| 203 | do { \ | ||
| 204 | static bool __warned; \ | ||
| 205 | if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ | ||
| 206 | __warned = true; \ | ||
| 207 | lockdep_rcu_dereference(__FILE__, __LINE__); \ | ||
| 208 | } \ | ||
| 209 | } while (0) | ||
| 210 | |||
| 193 | /** | 211 | /** | 
| 194 | * rcu_dereference_check - rcu_dereference with debug checking | 212 | * rcu_dereference_check - rcu_dereference with debug checking | 
| 195 | * @p: The pointer to read, prior to dereferencing | 213 | * @p: The pointer to read, prior to dereferencing | 
| @@ -219,8 +237,7 @@ static inline int rcu_read_lock_sched_held(void) | |||
| 219 | */ | 237 | */ | 
| 220 | #define rcu_dereference_check(p, c) \ | 238 | #define rcu_dereference_check(p, c) \ | 
| 221 | ({ \ | 239 | ({ \ | 
| 222 | if (debug_lockdep_rcu_enabled() && !(c)) \ | 240 | __do_rcu_dereference_check(c); \ | 
| 223 | lockdep_rcu_dereference(__FILE__, __LINE__); \ | ||
| 224 | rcu_dereference_raw(p); \ | 241 | rcu_dereference_raw(p); \ | 
| 225 | }) | 242 | }) | 
| 226 | 243 | ||
| @@ -237,8 +254,7 @@ static inline int rcu_read_lock_sched_held(void) | |||
| 237 | */ | 254 | */ | 
| 238 | #define rcu_dereference_protected(p, c) \ | 255 | #define rcu_dereference_protected(p, c) \ | 
| 239 | ({ \ | 256 | ({ \ | 
| 240 | if (debug_lockdep_rcu_enabled() && !(c)) \ | 257 | __do_rcu_dereference_check(c); \ | 
| 241 | lockdep_rcu_dereference(__FILE__, __LINE__); \ | ||
| 242 | (p); \ | 258 | (p); \ | 
| 243 | }) | 259 | }) | 
| 244 | 260 | ||
| diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index a5195875480a..e2e893144a84 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
| @@ -29,6 +29,10 @@ | |||
| 29 | 29 | ||
| 30 | void rcu_sched_qs(int cpu); | 30 | void rcu_sched_qs(int cpu); | 
| 31 | void rcu_bh_qs(int cpu); | 31 | void rcu_bh_qs(int cpu); | 
| 32 | static inline void rcu_note_context_switch(int cpu) | ||
| 33 | { | ||
| 34 | rcu_sched_qs(cpu); | ||
| 35 | } | ||
| 32 | 36 | ||
| 33 | #define __rcu_read_lock() preempt_disable() | 37 | #define __rcu_read_lock() preempt_disable() | 
| 34 | #define __rcu_read_unlock() preempt_enable() | 38 | #define __rcu_read_unlock() preempt_enable() | 
| @@ -60,8 +64,6 @@ static inline long rcu_batches_completed_bh(void) | |||
| 60 | return 0; | 64 | return 0; | 
| 61 | } | 65 | } | 
| 62 | 66 | ||
| 63 | extern int rcu_expedited_torture_stats(char *page); | ||
| 64 | |||
| 65 | static inline void rcu_force_quiescent_state(void) | 67 | static inline void rcu_force_quiescent_state(void) | 
| 66 | { | 68 | { | 
| 67 | } | 69 | } | 
| @@ -74,7 +76,17 @@ static inline void rcu_sched_force_quiescent_state(void) | |||
| 74 | { | 76 | { | 
| 75 | } | 77 | } | 
| 76 | 78 | ||
| 77 | #define synchronize_rcu synchronize_sched | 79 | extern void synchronize_sched(void); | 
| 80 | |||
| 81 | static inline void synchronize_rcu(void) | ||
| 82 | { | ||
| 83 | synchronize_sched(); | ||
| 84 | } | ||
| 85 | |||
| 86 | static inline void synchronize_rcu_bh(void) | ||
| 87 | { | ||
| 88 | synchronize_sched(); | ||
| 89 | } | ||
| 78 | 90 | ||
| 79 | static inline void synchronize_rcu_expedited(void) | 91 | static inline void synchronize_rcu_expedited(void) | 
| 80 | { | 92 | { | 
| @@ -114,4 +126,17 @@ static inline int rcu_preempt_depth(void) | |||
| 114 | return 0; | 126 | return 0; | 
| 115 | } | 127 | } | 
| 116 | 128 | ||
| 129 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
| 130 | |||
| 131 | extern int rcu_scheduler_active __read_mostly; | ||
| 132 | extern void rcu_scheduler_starting(void); | ||
| 133 | |||
| 134 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
| 135 | |||
| 136 | static inline void rcu_scheduler_starting(void) | ||
| 137 | { | ||
| 138 | } | ||
| 139 | |||
| 140 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
| 141 | |||
| 117 | #endif /* __LINUX_RCUTINY_H */ | 142 | #endif /* __LINUX_RCUTINY_H */ | 
| diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 42cc3a04779e..c0ed1c056f29 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
| @@ -34,8 +34,8 @@ struct notifier_block; | |||
| 34 | 34 | ||
| 35 | extern void rcu_sched_qs(int cpu); | 35 | extern void rcu_sched_qs(int cpu); | 
| 36 | extern void rcu_bh_qs(int cpu); | 36 | extern void rcu_bh_qs(int cpu); | 
| 37 | extern void rcu_note_context_switch(int cpu); | ||
| 37 | extern int rcu_needs_cpu(int cpu); | 38 | extern int rcu_needs_cpu(int cpu); | 
| 38 | extern int rcu_expedited_torture_stats(char *page); | ||
| 39 | 39 | ||
| 40 | #ifdef CONFIG_TREE_PREEMPT_RCU | 40 | #ifdef CONFIG_TREE_PREEMPT_RCU | 
| 41 | 41 | ||
| @@ -86,6 +86,8 @@ static inline void __rcu_read_unlock_bh(void) | |||
| 86 | 86 | ||
| 87 | extern void call_rcu_sched(struct rcu_head *head, | 87 | extern void call_rcu_sched(struct rcu_head *head, | 
| 88 | void (*func)(struct rcu_head *rcu)); | 88 | void (*func)(struct rcu_head *rcu)); | 
| 89 | extern void synchronize_rcu_bh(void); | ||
| 90 | extern void synchronize_sched(void); | ||
| 89 | extern void synchronize_rcu_expedited(void); | 91 | extern void synchronize_rcu_expedited(void); | 
| 90 | 92 | ||
| 91 | static inline void synchronize_rcu_bh_expedited(void) | 93 | static inline void synchronize_rcu_bh_expedited(void) | 
| @@ -120,4 +122,7 @@ static inline int rcu_blocking_is_gp(void) | |||
| 120 | return num_online_cpus() == 1; | 122 | return num_online_cpus() == 1; | 
| 121 | } | 123 | } | 
| 122 | 124 | ||
| 125 | extern void rcu_scheduler_starting(void); | ||
| 126 | extern int rcu_scheduler_active __read_mostly; | ||
| 127 | |||
| 123 | #endif /* __LINUX_RCUTREE_H */ | 128 | #endif /* __LINUX_RCUTREE_H */ | 
| diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 5fcc31ed5771..25b4f686d918 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h | |||
| @@ -120,12 +120,16 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
| 120 | unsigned long length, void *data); | 120 | unsigned long length, void *data); | 
| 121 | 121 | ||
| 122 | struct ring_buffer_event * | 122 | struct ring_buffer_event * | 
| 123 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts); | 123 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, | 
| 124 | unsigned long *lost_events); | ||
| 124 | struct ring_buffer_event * | 125 | struct ring_buffer_event * | 
| 125 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts); | 126 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, | 
| 127 | unsigned long *lost_events); | ||
| 126 | 128 | ||
| 127 | struct ring_buffer_iter * | 129 | struct ring_buffer_iter * | 
| 128 | ring_buffer_read_start(struct ring_buffer *buffer, int cpu); | 130 | ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu); | 
| 131 | void ring_buffer_read_prepare_sync(void); | ||
| 132 | void ring_buffer_read_start(struct ring_buffer_iter *iter); | ||
| 129 | void ring_buffer_read_finish(struct ring_buffer_iter *iter); | 133 | void ring_buffer_read_finish(struct ring_buffer_iter *iter); | 
| 130 | 134 | ||
| 131 | struct ring_buffer_event * | 135 | struct ring_buffer_event * | 
| diff --git a/include/linux/sched.h b/include/linux/sched.h index dad7f668ebf7..b55e988988b5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -99,7 +99,6 @@ struct futex_pi_state; | |||
| 99 | struct robust_list_head; | 99 | struct robust_list_head; | 
| 100 | struct bio_list; | 100 | struct bio_list; | 
| 101 | struct fs_struct; | 101 | struct fs_struct; | 
| 102 | struct bts_context; | ||
| 103 | struct perf_event_context; | 102 | struct perf_event_context; | 
| 104 | 103 | ||
| 105 | /* | 104 | /* | 
| @@ -275,11 +274,17 @@ extern cpumask_var_t nohz_cpu_mask; | |||
| 275 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) | 274 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) | 
| 276 | extern int select_nohz_load_balancer(int cpu); | 275 | extern int select_nohz_load_balancer(int cpu); | 
| 277 | extern int get_nohz_load_balancer(void); | 276 | extern int get_nohz_load_balancer(void); | 
| 277 | extern int nohz_ratelimit(int cpu); | ||
| 278 | #else | 278 | #else | 
| 279 | static inline int select_nohz_load_balancer(int cpu) | 279 | static inline int select_nohz_load_balancer(int cpu) | 
| 280 | { | 280 | { | 
| 281 | return 0; | 281 | return 0; | 
| 282 | } | 282 | } | 
| 283 | |||
| 284 | static inline int nohz_ratelimit(int cpu) | ||
| 285 | { | ||
| 286 | return 0; | ||
| 287 | } | ||
| 283 | #endif | 288 | #endif | 
| 284 | 289 | ||
| 285 | /* | 290 | /* | 
| @@ -954,6 +959,7 @@ struct sched_domain { | |||
| 954 | char *name; | 959 | char *name; | 
| 955 | #endif | 960 | #endif | 
| 956 | 961 | ||
| 962 | unsigned int span_weight; | ||
| 957 | /* | 963 | /* | 
| 958 | * Span of all CPUs in this domain. | 964 | * Span of all CPUs in this domain. | 
| 959 | * | 965 | * | 
| @@ -1026,12 +1032,17 @@ struct sched_domain; | |||
| 1026 | #define WF_SYNC 0x01 /* waker goes to sleep after wakup */ | 1032 | #define WF_SYNC 0x01 /* waker goes to sleep after wakup */ | 
| 1027 | #define WF_FORK 0x02 /* child wakeup after fork */ | 1033 | #define WF_FORK 0x02 /* child wakeup after fork */ | 
| 1028 | 1034 | ||
| 1035 | #define ENQUEUE_WAKEUP 1 | ||
| 1036 | #define ENQUEUE_WAKING 2 | ||
| 1037 | #define ENQUEUE_HEAD 4 | ||
| 1038 | |||
| 1039 | #define DEQUEUE_SLEEP 1 | ||
| 1040 | |||
| 1029 | struct sched_class { | 1041 | struct sched_class { | 
| 1030 | const struct sched_class *next; | 1042 | const struct sched_class *next; | 
| 1031 | 1043 | ||
| 1032 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup, | 1044 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); | 
| 1033 | bool head); | 1045 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); | 
| 1034 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); | ||
| 1035 | void (*yield_task) (struct rq *rq); | 1046 | void (*yield_task) (struct rq *rq); | 
| 1036 | 1047 | ||
| 1037 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); | 1048 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); | 
| @@ -1040,7 +1051,8 @@ struct sched_class { | |||
| 1040 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); | 1051 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); | 
| 1041 | 1052 | ||
| 1042 | #ifdef CONFIG_SMP | 1053 | #ifdef CONFIG_SMP | 
| 1043 | int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); | 1054 | int (*select_task_rq)(struct rq *rq, struct task_struct *p, | 
| 1055 | int sd_flag, int flags); | ||
| 1044 | 1056 | ||
| 1045 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | 1057 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | 
| 1046 | void (*post_schedule) (struct rq *this_rq); | 1058 | void (*post_schedule) (struct rq *this_rq); | 
| @@ -1077,36 +1089,8 @@ struct load_weight { | |||
| 1077 | unsigned long weight, inv_weight; | 1089 | unsigned long weight, inv_weight; | 
| 1078 | }; | 1090 | }; | 
| 1079 | 1091 | ||
| 1080 | /* | ||
| 1081 | * CFS stats for a schedulable entity (task, task-group etc) | ||
| 1082 | * | ||
| 1083 | * Current field usage histogram: | ||
| 1084 | * | ||
| 1085 | * 4 se->block_start | ||
| 1086 | * 4 se->run_node | ||
| 1087 | * 4 se->sleep_start | ||
| 1088 | * 6 se->load.weight | ||
| 1089 | */ | ||
| 1090 | struct sched_entity { | ||
| 1091 | struct load_weight load; /* for load-balancing */ | ||
| 1092 | struct rb_node run_node; | ||
| 1093 | struct list_head group_node; | ||
| 1094 | unsigned int on_rq; | ||
| 1095 | |||
| 1096 | u64 exec_start; | ||
| 1097 | u64 sum_exec_runtime; | ||
| 1098 | u64 vruntime; | ||
| 1099 | u64 prev_sum_exec_runtime; | ||
| 1100 | |||
| 1101 | u64 last_wakeup; | ||
| 1102 | u64 avg_overlap; | ||
| 1103 | |||
| 1104 | u64 nr_migrations; | ||
| 1105 | |||
| 1106 | u64 start_runtime; | ||
| 1107 | u64 avg_wakeup; | ||
| 1108 | |||
| 1109 | #ifdef CONFIG_SCHEDSTATS | 1092 | #ifdef CONFIG_SCHEDSTATS | 
| 1093 | struct sched_statistics { | ||
| 1110 | u64 wait_start; | 1094 | u64 wait_start; | 
| 1111 | u64 wait_max; | 1095 | u64 wait_max; | 
| 1112 | u64 wait_count; | 1096 | u64 wait_count; | 
| @@ -1138,6 +1122,24 @@ struct sched_entity { | |||
| 1138 | u64 nr_wakeups_affine_attempts; | 1122 | u64 nr_wakeups_affine_attempts; | 
| 1139 | u64 nr_wakeups_passive; | 1123 | u64 nr_wakeups_passive; | 
| 1140 | u64 nr_wakeups_idle; | 1124 | u64 nr_wakeups_idle; | 
| 1125 | }; | ||
| 1126 | #endif | ||
| 1127 | |||
| 1128 | struct sched_entity { | ||
| 1129 | struct load_weight load; /* for load-balancing */ | ||
| 1130 | struct rb_node run_node; | ||
| 1131 | struct list_head group_node; | ||
| 1132 | unsigned int on_rq; | ||
| 1133 | |||
| 1134 | u64 exec_start; | ||
| 1135 | u64 sum_exec_runtime; | ||
| 1136 | u64 vruntime; | ||
| 1137 | u64 prev_sum_exec_runtime; | ||
| 1138 | |||
| 1139 | u64 nr_migrations; | ||
| 1140 | |||
| 1141 | #ifdef CONFIG_SCHEDSTATS | ||
| 1142 | struct sched_statistics statistics; | ||
| 1141 | #endif | 1143 | #endif | 
| 1142 | 1144 | ||
| 1143 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1145 | #ifdef CONFIG_FAIR_GROUP_SCHED | 
| @@ -1272,12 +1274,6 @@ struct task_struct { | |||
| 1272 | struct list_head ptraced; | 1274 | struct list_head ptraced; | 
| 1273 | struct list_head ptrace_entry; | 1275 | struct list_head ptrace_entry; | 
| 1274 | 1276 | ||
| 1275 | /* | ||
| 1276 | * This is the tracer handle for the ptrace BTS extension. | ||
| 1277 | * This field actually belongs to the ptracer task. | ||
| 1278 | */ | ||
| 1279 | struct bts_context *bts; | ||
| 1280 | |||
| 1281 | /* PID/PID hash table linkage. */ | 1277 | /* PID/PID hash table linkage. */ | 
| 1282 | struct pid_link pids[PIDTYPE_MAX]; | 1278 | struct pid_link pids[PIDTYPE_MAX]; | 
| 1283 | struct list_head thread_group; | 1279 | struct list_head thread_group; | 
| @@ -1497,7 +1493,6 @@ struct task_struct { | |||
| 1497 | /* bitmask of trace recursion */ | 1493 | /* bitmask of trace recursion */ | 
| 1498 | unsigned long trace_recursion; | 1494 | unsigned long trace_recursion; | 
| 1499 | #endif /* CONFIG_TRACING */ | 1495 | #endif /* CONFIG_TRACING */ | 
| 1500 | unsigned long stack_start; | ||
| 1501 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ | 1496 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ | 
| 1502 | struct memcg_batch_info { | 1497 | struct memcg_batch_info { | 
| 1503 | int do_batch; /* incremented when batch uncharge started */ | 1498 | int do_batch; /* incremented when batch uncharge started */ | 
| @@ -1847,6 +1842,7 @@ extern void sched_clock_idle_sleep_event(void); | |||
| 1847 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); | 1842 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); | 
| 1848 | 1843 | ||
| 1849 | #ifdef CONFIG_HOTPLUG_CPU | 1844 | #ifdef CONFIG_HOTPLUG_CPU | 
| 1845 | extern void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p); | ||
| 1850 | extern void idle_task_exit(void); | 1846 | extern void idle_task_exit(void); | 
| 1851 | #else | 1847 | #else | 
| 1852 | static inline void idle_task_exit(void) {} | 1848 | static inline void idle_task_exit(void) {} | 
| @@ -2123,10 +2119,8 @@ extern void set_task_comm(struct task_struct *tsk, char *from); | |||
| 2123 | extern char *get_task_comm(char *to, struct task_struct *tsk); | 2119 | extern char *get_task_comm(char *to, struct task_struct *tsk); | 
| 2124 | 2120 | ||
| 2125 | #ifdef CONFIG_SMP | 2121 | #ifdef CONFIG_SMP | 
| 2126 | extern void wait_task_context_switch(struct task_struct *p); | ||
| 2127 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); | 2122 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); | 
| 2128 | #else | 2123 | #else | 
| 2129 | static inline void wait_task_context_switch(struct task_struct *p) {} | ||
| 2130 | static inline unsigned long wait_task_inactive(struct task_struct *p, | 2124 | static inline unsigned long wait_task_inactive(struct task_struct *p, | 
| 2131 | long match_state) | 2125 | long match_state) | 
| 2132 | { | 2126 | { | 
| diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 4d5ecb222af9..4d5d2f546dbf 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h | |||
| @@ -27,6 +27,8 @@ | |||
| 27 | #ifndef _LINUX_SRCU_H | 27 | #ifndef _LINUX_SRCU_H | 
| 28 | #define _LINUX_SRCU_H | 28 | #define _LINUX_SRCU_H | 
| 29 | 29 | ||
| 30 | #include <linux/mutex.h> | ||
| 31 | |||
| 30 | struct srcu_struct_array { | 32 | struct srcu_struct_array { | 
| 31 | int c[2]; | 33 | int c[2]; | 
| 32 | }; | 34 | }; | 
| @@ -84,8 +86,8 @@ long srcu_batches_completed(struct srcu_struct *sp); | |||
| 84 | /** | 86 | /** | 
| 85 | * srcu_read_lock_held - might we be in SRCU read-side critical section? | 87 | * srcu_read_lock_held - might we be in SRCU read-side critical section? | 
| 86 | * | 88 | * | 
| 87 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in | 89 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU | 
| 88 | * an SRCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, | 90 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, | 
| 89 | * this assumes we are in an SRCU read-side critical section unless it can | 91 | * this assumes we are in an SRCU read-side critical section unless it can | 
| 90 | * prove otherwise. | 92 | * prove otherwise. | 
| 91 | */ | 93 | */ | 
| diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index baba3a23a814..6b524a0d02e4 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h | |||
| @@ -1,13 +1,101 @@ | |||
| 1 | #ifndef _LINUX_STOP_MACHINE | 1 | #ifndef _LINUX_STOP_MACHINE | 
| 2 | #define _LINUX_STOP_MACHINE | 2 | #define _LINUX_STOP_MACHINE | 
| 3 | /* "Bogolock": stop the entire machine, disable interrupts. This is a | 3 | |
| 4 | very heavy lock, which is equivalent to grabbing every spinlock | ||
| 5 | (and more). So the "read" side to such a lock is anything which | ||
| 6 | disables preeempt. */ | ||
| 7 | #include <linux/cpu.h> | 4 | #include <linux/cpu.h> | 
| 8 | #include <linux/cpumask.h> | 5 | #include <linux/cpumask.h> | 
| 6 | #include <linux/list.h> | ||
| 9 | #include <asm/system.h> | 7 | #include <asm/system.h> | 
| 10 | 8 | ||
| 9 | /* | ||
| 10 | * stop_cpu[s]() is simplistic per-cpu maximum priority cpu | ||
| 11 | * monopolization mechanism. The caller can specify a non-sleeping | ||
| 12 | * function to be executed on a single or multiple cpus preempting all | ||
| 13 | * other processes and monopolizing those cpus until it finishes. | ||
| 14 | * | ||
| 15 | * Resources for this mechanism are preallocated when a cpu is brought | ||
| 16 | * up and requests are guaranteed to be served as long as the target | ||
| 17 | * cpus are online. | ||
| 18 | */ | ||
| 19 | typedef int (*cpu_stop_fn_t)(void *arg); | ||
| 20 | |||
| 21 | #ifdef CONFIG_SMP | ||
| 22 | |||
| 23 | struct cpu_stop_work { | ||
| 24 | struct list_head list; /* cpu_stopper->works */ | ||
| 25 | cpu_stop_fn_t fn; | ||
| 26 | void *arg; | ||
| 27 | struct cpu_stop_done *done; | ||
| 28 | }; | ||
| 29 | |||
| 30 | int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg); | ||
| 31 | void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, | ||
| 32 | struct cpu_stop_work *work_buf); | ||
| 33 | int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); | ||
| 34 | int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); | ||
| 35 | |||
| 36 | #else /* CONFIG_SMP */ | ||
| 37 | |||
| 38 | #include <linux/workqueue.h> | ||
| 39 | |||
| 40 | struct cpu_stop_work { | ||
| 41 | struct work_struct work; | ||
| 42 | cpu_stop_fn_t fn; | ||
| 43 | void *arg; | ||
| 44 | }; | ||
| 45 | |||
| 46 | static inline int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) | ||
| 47 | { | ||
| 48 | int ret = -ENOENT; | ||
| 49 | preempt_disable(); | ||
| 50 | if (cpu == smp_processor_id()) | ||
| 51 | ret = fn(arg); | ||
| 52 | preempt_enable(); | ||
| 53 | return ret; | ||
| 54 | } | ||
| 55 | |||
| 56 | static void stop_one_cpu_nowait_workfn(struct work_struct *work) | ||
| 57 | { | ||
| 58 | struct cpu_stop_work *stwork = | ||
| 59 | container_of(work, struct cpu_stop_work, work); | ||
| 60 | preempt_disable(); | ||
| 61 | stwork->fn(stwork->arg); | ||
| 62 | preempt_enable(); | ||
| 63 | } | ||
| 64 | |||
| 65 | static inline void stop_one_cpu_nowait(unsigned int cpu, | ||
| 66 | cpu_stop_fn_t fn, void *arg, | ||
| 67 | struct cpu_stop_work *work_buf) | ||
| 68 | { | ||
| 69 | if (cpu == smp_processor_id()) { | ||
| 70 | INIT_WORK(&work_buf->work, stop_one_cpu_nowait_workfn); | ||
| 71 | work_buf->fn = fn; | ||
| 72 | work_buf->arg = arg; | ||
| 73 | schedule_work(&work_buf->work); | ||
| 74 | } | ||
| 75 | } | ||
| 76 | |||
| 77 | static inline int stop_cpus(const struct cpumask *cpumask, | ||
| 78 | cpu_stop_fn_t fn, void *arg) | ||
| 79 | { | ||
| 80 | if (cpumask_test_cpu(raw_smp_processor_id(), cpumask)) | ||
| 81 | return stop_one_cpu(raw_smp_processor_id(), fn, arg); | ||
| 82 | return -ENOENT; | ||
| 83 | } | ||
| 84 | |||
| 85 | static inline int try_stop_cpus(const struct cpumask *cpumask, | ||
| 86 | cpu_stop_fn_t fn, void *arg) | ||
| 87 | { | ||
| 88 | return stop_cpus(cpumask, fn, arg); | ||
| 89 | } | ||
| 90 | |||
| 91 | #endif /* CONFIG_SMP */ | ||
| 92 | |||
| 93 | /* | ||
| 94 | * stop_machine "Bogolock": stop the entire machine, disable | ||
| 95 | * interrupts. This is a very heavy lock, which is equivalent to | ||
| 96 | * grabbing every spinlock (and more). So the "read" side to such a | ||
| 97 | * lock is anything which disables preeempt. | ||
| 98 | */ | ||
| 11 | #if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP) | 99 | #if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP) | 
| 12 | 100 | ||
| 13 | /** | 101 | /** | 
| @@ -36,24 +124,7 @@ int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); | |||
| 36 | */ | 124 | */ | 
| 37 | int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); | 125 | int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); | 
| 38 | 126 | ||
| 39 | /** | 127 | #else /* CONFIG_STOP_MACHINE && CONFIG_SMP */ | 
| 40 | * stop_machine_create: create all stop_machine threads | ||
| 41 | * | ||
| 42 | * Description: This causes all stop_machine threads to be created before | ||
| 43 | * stop_machine actually gets called. This can be used by subsystems that | ||
| 44 | * need a non failing stop_machine infrastructure. | ||
| 45 | */ | ||
| 46 | int stop_machine_create(void); | ||
| 47 | |||
| 48 | /** | ||
| 49 | * stop_machine_destroy: destroy all stop_machine threads | ||
| 50 | * | ||
| 51 | * Description: This causes all stop_machine threads which were created with | ||
| 52 | * stop_machine_create to be destroyed again. | ||
| 53 | */ | ||
| 54 | void stop_machine_destroy(void); | ||
| 55 | |||
| 56 | #else | ||
| 57 | 128 | ||
| 58 | static inline int stop_machine(int (*fn)(void *), void *data, | 129 | static inline int stop_machine(int (*fn)(void *), void *data, | 
| 59 | const struct cpumask *cpus) | 130 | const struct cpumask *cpus) | 
| @@ -65,8 +136,5 @@ static inline int stop_machine(int (*fn)(void *), void *data, | |||
| 65 | return ret; | 136 | return ret; | 
| 66 | } | 137 | } | 
| 67 | 138 | ||
| 68 | static inline int stop_machine_create(void) { return 0; } | 139 | #endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */ | 
| 69 | static inline void stop_machine_destroy(void) { } | 140 | #endif /* _LINUX_STOP_MACHINE */ | 
| 70 | |||
| 71 | #endif /* CONFIG_SMP */ | ||
| 72 | #endif /* _LINUX_STOP_MACHINE */ | ||
| diff --git a/include/linux/tick.h b/include/linux/tick.h index d2ae79e21be3..b232ccc0ee29 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h | |||
| @@ -42,6 +42,7 @@ enum tick_nohz_mode { | |||
| 42 | * @idle_waketime: Time when the idle was interrupted | 42 | * @idle_waketime: Time when the idle was interrupted | 
| 43 | * @idle_exittime: Time when the idle state was left | 43 | * @idle_exittime: Time when the idle state was left | 
| 44 | * @idle_sleeptime: Sum of the time slept in idle with sched tick stopped | 44 | * @idle_sleeptime: Sum of the time slept in idle with sched tick stopped | 
| 45 | * @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding | ||
| 45 | * @sleep_length: Duration of the current idle sleep | 46 | * @sleep_length: Duration of the current idle sleep | 
| 46 | * @do_timer_lst: CPU was the last one doing do_timer before going idle | 47 | * @do_timer_lst: CPU was the last one doing do_timer before going idle | 
| 47 | */ | 48 | */ | 
| @@ -60,7 +61,7 @@ struct tick_sched { | |||
| 60 | ktime_t idle_waketime; | 61 | ktime_t idle_waketime; | 
| 61 | ktime_t idle_exittime; | 62 | ktime_t idle_exittime; | 
| 62 | ktime_t idle_sleeptime; | 63 | ktime_t idle_sleeptime; | 
| 63 | ktime_t idle_lastupdate; | 64 | ktime_t iowait_sleeptime; | 
| 64 | ktime_t sleep_length; | 65 | ktime_t sleep_length; | 
| 65 | unsigned long last_jiffies; | 66 | unsigned long last_jiffies; | 
| 66 | unsigned long next_jiffies; | 67 | unsigned long next_jiffies; | 
| @@ -124,6 +125,7 @@ extern void tick_nohz_stop_sched_tick(int inidle); | |||
| 124 | extern void tick_nohz_restart_sched_tick(void); | 125 | extern void tick_nohz_restart_sched_tick(void); | 
| 125 | extern ktime_t tick_nohz_get_sleep_length(void); | 126 | extern ktime_t tick_nohz_get_sleep_length(void); | 
| 126 | extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); | 127 | extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); | 
| 128 | extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); | ||
| 127 | # else | 129 | # else | 
| 128 | static inline void tick_nohz_stop_sched_tick(int inidle) { } | 130 | static inline void tick_nohz_stop_sched_tick(int inidle) { } | 
| 129 | static inline void tick_nohz_restart_sched_tick(void) { } | 131 | static inline void tick_nohz_restart_sched_tick(void) { } | 
| @@ -134,6 +136,7 @@ static inline ktime_t tick_nohz_get_sleep_length(void) | |||
| 134 | return len; | 136 | return len; | 
| 135 | } | 137 | } | 
| 136 | static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } | 138 | static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } | 
| 139 | static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } | ||
| 137 | # endif /* !NO_HZ */ | 140 | # endif /* !NO_HZ */ | 
| 138 | 141 | ||
| 139 | #endif | 142 | #endif | 
| diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 78b4bd3be496..1d85f9a6a199 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
| @@ -33,6 +33,65 @@ struct tracepoint { | |||
| 33 | * Keep in sync with vmlinux.lds.h. | 33 | * Keep in sync with vmlinux.lds.h. | 
| 34 | */ | 34 | */ | 
| 35 | 35 | ||
| 36 | /* | ||
| 37 | * Connect a probe to a tracepoint. | ||
| 38 | * Internal API, should not be used directly. | ||
| 39 | */ | ||
| 40 | extern int tracepoint_probe_register(const char *name, void *probe); | ||
| 41 | |||
| 42 | /* | ||
| 43 | * Disconnect a probe from a tracepoint. | ||
| 44 | * Internal API, should not be used directly. | ||
| 45 | */ | ||
| 46 | extern int tracepoint_probe_unregister(const char *name, void *probe); | ||
| 47 | |||
| 48 | extern int tracepoint_probe_register_noupdate(const char *name, void *probe); | ||
| 49 | extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe); | ||
| 50 | extern void tracepoint_probe_update_all(void); | ||
| 51 | |||
| 52 | struct tracepoint_iter { | ||
| 53 | struct module *module; | ||
| 54 | struct tracepoint *tracepoint; | ||
| 55 | }; | ||
| 56 | |||
| 57 | extern void tracepoint_iter_start(struct tracepoint_iter *iter); | ||
| 58 | extern void tracepoint_iter_next(struct tracepoint_iter *iter); | ||
| 59 | extern void tracepoint_iter_stop(struct tracepoint_iter *iter); | ||
| 60 | extern void tracepoint_iter_reset(struct tracepoint_iter *iter); | ||
| 61 | extern int tracepoint_get_iter_range(struct tracepoint **tracepoint, | ||
| 62 | struct tracepoint *begin, struct tracepoint *end); | ||
| 63 | |||
| 64 | /* | ||
| 65 | * tracepoint_synchronize_unregister must be called between the last tracepoint | ||
| 66 | * probe unregistration and the end of module exit to make sure there is no | ||
| 67 | * caller executing a probe when it is freed. | ||
| 68 | */ | ||
| 69 | static inline void tracepoint_synchronize_unregister(void) | ||
| 70 | { | ||
| 71 | synchronize_sched(); | ||
| 72 | } | ||
| 73 | |||
| 74 | #define PARAMS(args...) args | ||
| 75 | |||
| 76 | #ifdef CONFIG_TRACEPOINTS | ||
| 77 | extern void tracepoint_update_probe_range(struct tracepoint *begin, | ||
| 78 | struct tracepoint *end); | ||
| 79 | #else | ||
| 80 | static inline void tracepoint_update_probe_range(struct tracepoint *begin, | ||
| 81 | struct tracepoint *end) | ||
| 82 | { } | ||
| 83 | #endif /* CONFIG_TRACEPOINTS */ | ||
| 84 | |||
| 85 | #endif /* _LINUX_TRACEPOINT_H */ | ||
| 86 | |||
| 87 | /* | ||
| 88 | * Note: we keep the TRACE_EVENT and DECLARE_TRACE outside the include | ||
| 89 | * file ifdef protection. | ||
| 90 | * This is due to the way trace events work. If a file includes two | ||
| 91 | * trace event headers under one "CREATE_TRACE_POINTS" the first include | ||
| 92 | * will override the TRACE_EVENT and break the second include. | ||
| 93 | */ | ||
| 94 | |||
| 36 | #ifndef DECLARE_TRACE | 95 | #ifndef DECLARE_TRACE | 
| 37 | 96 | ||
| 38 | #define TP_PROTO(args...) args | 97 | #define TP_PROTO(args...) args | 
| @@ -96,9 +155,6 @@ struct tracepoint { | |||
| 96 | #define EXPORT_TRACEPOINT_SYMBOL(name) \ | 155 | #define EXPORT_TRACEPOINT_SYMBOL(name) \ | 
| 97 | EXPORT_SYMBOL(__tracepoint_##name) | 156 | EXPORT_SYMBOL(__tracepoint_##name) | 
| 98 | 157 | ||
| 99 | extern void tracepoint_update_probe_range(struct tracepoint *begin, | ||
| 100 | struct tracepoint *end); | ||
| 101 | |||
| 102 | #else /* !CONFIG_TRACEPOINTS */ | 158 | #else /* !CONFIG_TRACEPOINTS */ | 
| 103 | #define DECLARE_TRACE(name, proto, args) \ | 159 | #define DECLARE_TRACE(name, proto, args) \ | 
| 104 | static inline void _do_trace_##name(struct tracepoint *tp, proto) \ | 160 | static inline void _do_trace_##name(struct tracepoint *tp, proto) \ | 
| @@ -119,61 +175,9 @@ extern void tracepoint_update_probe_range(struct tracepoint *begin, | |||
| 119 | #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) | 175 | #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) | 
| 120 | #define EXPORT_TRACEPOINT_SYMBOL(name) | 176 | #define EXPORT_TRACEPOINT_SYMBOL(name) | 
| 121 | 177 | ||
| 122 | static inline void tracepoint_update_probe_range(struct tracepoint *begin, | ||
| 123 | struct tracepoint *end) | ||
| 124 | { } | ||
| 125 | #endif /* CONFIG_TRACEPOINTS */ | 178 | #endif /* CONFIG_TRACEPOINTS */ | 
| 126 | #endif /* DECLARE_TRACE */ | 179 | #endif /* DECLARE_TRACE */ | 
| 127 | 180 | ||
| 128 | /* | ||
| 129 | * Connect a probe to a tracepoint. | ||
| 130 | * Internal API, should not be used directly. | ||
| 131 | */ | ||
| 132 | extern int tracepoint_probe_register(const char *name, void *probe); | ||
| 133 | |||
| 134 | /* | ||
| 135 | * Disconnect a probe from a tracepoint. | ||
| 136 | * Internal API, should not be used directly. | ||
| 137 | */ | ||
| 138 | extern int tracepoint_probe_unregister(const char *name, void *probe); | ||
| 139 | |||
| 140 | extern int tracepoint_probe_register_noupdate(const char *name, void *probe); | ||
| 141 | extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe); | ||
| 142 | extern void tracepoint_probe_update_all(void); | ||
| 143 | |||
| 144 | struct tracepoint_iter { | ||
| 145 | struct module *module; | ||
| 146 | struct tracepoint *tracepoint; | ||
| 147 | }; | ||
| 148 | |||
| 149 | extern void tracepoint_iter_start(struct tracepoint_iter *iter); | ||
| 150 | extern void tracepoint_iter_next(struct tracepoint_iter *iter); | ||
| 151 | extern void tracepoint_iter_stop(struct tracepoint_iter *iter); | ||
| 152 | extern void tracepoint_iter_reset(struct tracepoint_iter *iter); | ||
| 153 | extern int tracepoint_get_iter_range(struct tracepoint **tracepoint, | ||
| 154 | struct tracepoint *begin, struct tracepoint *end); | ||
| 155 | |||
| 156 | /* | ||
| 157 | * tracepoint_synchronize_unregister must be called between the last tracepoint | ||
| 158 | * probe unregistration and the end of module exit to make sure there is no | ||
| 159 | * caller executing a probe when it is freed. | ||
| 160 | */ | ||
| 161 | static inline void tracepoint_synchronize_unregister(void) | ||
| 162 | { | ||
| 163 | synchronize_sched(); | ||
| 164 | } | ||
| 165 | |||
| 166 | #define PARAMS(args...) args | ||
| 167 | |||
| 168 | #endif /* _LINUX_TRACEPOINT_H */ | ||
| 169 | |||
| 170 | /* | ||
| 171 | * Note: we keep the TRACE_EVENT outside the include file ifdef protection. | ||
| 172 | * This is due to the way trace events work. If a file includes two | ||
| 173 | * trace event headers under one "CREATE_TRACE_POINTS" the first include | ||
| 174 | * will override the TRACE_EVENT and break the second include. | ||
| 175 | */ | ||
| 176 | |||
| 177 | #ifndef TRACE_EVENT | 181 | #ifndef TRACE_EVENT | 
| 178 | /* | 182 | /* | 
| 179 | * For use with the TRACE_EVENT macro: | 183 | * For use with the TRACE_EVENT macro: | 
| diff --git a/include/linux/types.h b/include/linux/types.h index c42724f8c802..23d237a075e2 100644 --- a/include/linux/types.h +++ b/include/linux/types.h | |||
| @@ -188,12 +188,12 @@ typedef u32 phys_addr_t; | |||
| 188 | typedef phys_addr_t resource_size_t; | 188 | typedef phys_addr_t resource_size_t; | 
| 189 | 189 | ||
| 190 | typedef struct { | 190 | typedef struct { | 
| 191 | volatile int counter; | 191 | int counter; | 
| 192 | } atomic_t; | 192 | } atomic_t; | 
| 193 | 193 | ||
| 194 | #ifdef CONFIG_64BIT | 194 | #ifdef CONFIG_64BIT | 
| 195 | typedef struct { | 195 | typedef struct { | 
| 196 | volatile long counter; | 196 | long counter; | 
| 197 | } atomic64_t; | 197 | } atomic64_t; | 
| 198 | #endif | 198 | #endif | 
| 199 | 199 | ||
| diff --git a/include/linux/wait.h b/include/linux/wait.h index a48e16b77d5e..76d96d035ea0 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
| @@ -127,12 +127,26 @@ static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) | |||
| 127 | /* | 127 | /* | 
| 128 | * Used for wake-one threads: | 128 | * Used for wake-one threads: | 
| 129 | */ | 129 | */ | 
| 130 | static inline void __add_wait_queue_exclusive(wait_queue_head_t *q, | ||
| 131 | wait_queue_t *wait) | ||
| 132 | { | ||
| 133 | wait->flags |= WQ_FLAG_EXCLUSIVE; | ||
| 134 | __add_wait_queue(q, wait); | ||
| 135 | } | ||
| 136 | |||
| 130 | static inline void __add_wait_queue_tail(wait_queue_head_t *head, | 137 | static inline void __add_wait_queue_tail(wait_queue_head_t *head, | 
| 131 | wait_queue_t *new) | 138 | wait_queue_t *new) | 
| 132 | { | 139 | { | 
| 133 | list_add_tail(&new->task_list, &head->task_list); | 140 | list_add_tail(&new->task_list, &head->task_list); | 
| 134 | } | 141 | } | 
| 135 | 142 | ||
| 143 | static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q, | ||
| 144 | wait_queue_t *wait) | ||
| 145 | { | ||
| 146 | wait->flags |= WQ_FLAG_EXCLUSIVE; | ||
| 147 | __add_wait_queue_tail(q, wait); | ||
| 148 | } | ||
| 149 | |||
| 136 | static inline void __remove_wait_queue(wait_queue_head_t *head, | 150 | static inline void __remove_wait_queue(wait_queue_head_t *head, | 
| 137 | wait_queue_t *old) | 151 | wait_queue_t *old) | 
| 138 | { | 152 | { | 
| @@ -404,25 +418,6 @@ do { \ | |||
| 404 | }) | 418 | }) | 
| 405 | 419 | ||
| 406 | /* | 420 | /* | 
| 407 | * Must be called with the spinlock in the wait_queue_head_t held. | ||
| 408 | */ | ||
| 409 | static inline void add_wait_queue_exclusive_locked(wait_queue_head_t *q, | ||
| 410 | wait_queue_t * wait) | ||
| 411 | { | ||
| 412 | wait->flags |= WQ_FLAG_EXCLUSIVE; | ||
| 413 | __add_wait_queue_tail(q, wait); | ||
| 414 | } | ||
| 415 | |||
| 416 | /* | ||
| 417 | * Must be called with the spinlock in the wait_queue_head_t held. | ||
| 418 | */ | ||
| 419 | static inline void remove_wait_queue_locked(wait_queue_head_t *q, | ||
| 420 | wait_queue_t * wait) | ||
| 421 | { | ||
| 422 | __remove_wait_queue(q, wait); | ||
| 423 | } | ||
| 424 | |||
| 425 | /* | ||
| 426 | * These are the old interfaces to sleep waiting for an event. | 421 | * These are the old interfaces to sleep waiting for an event. | 
| 427 | * They are racy. DO NOT use them, use the wait_event* interfaces above. | 422 | * They are racy. DO NOT use them, use the wait_event* interfaces above. | 
| 428 | * We plan to remove these interfaces. | 423 | * We plan to remove these interfaces. | 
| diff --git a/include/linux/zorro.h b/include/linux/zorro.h index 913bfc226dda..7bf9db525e9e 100644 --- a/include/linux/zorro.h +++ b/include/linux/zorro.h | |||
| @@ -38,8 +38,6 @@ | |||
| 38 | typedef __u32 zorro_id; | 38 | typedef __u32 zorro_id; | 
| 39 | 39 | ||
| 40 | 40 | ||
| 41 | #define ZORRO_WILDCARD (0xffffffff) /* not official */ | ||
| 42 | |||
| 43 | /* Include the ID list */ | 41 | /* Include the ID list */ | 
| 44 | #include <linux/zorro_ids.h> | 42 | #include <linux/zorro_ids.h> | 
| 45 | 43 | ||
| @@ -116,6 +114,7 @@ struct ConfigDev { | |||
| 116 | 114 | ||
| 117 | #include <linux/init.h> | 115 | #include <linux/init.h> | 
| 118 | #include <linux/ioport.h> | 116 | #include <linux/ioport.h> | 
| 117 | #include <linux/mod_devicetable.h> | ||
| 119 | 118 | ||
| 120 | #include <asm/zorro.h> | 119 | #include <asm/zorro.h> | 
| 121 | 120 | ||
| @@ -142,29 +141,10 @@ struct zorro_dev { | |||
| 142 | * Zorro bus | 141 | * Zorro bus | 
| 143 | */ | 142 | */ | 
| 144 | 143 | ||
| 145 | struct zorro_bus { | ||
| 146 | struct list_head devices; /* list of devices on this bus */ | ||
| 147 | unsigned int num_resources; /* number of resources */ | ||
| 148 | struct resource resources[4]; /* address space routed to this bus */ | ||
| 149 | struct device dev; | ||
| 150 | char name[10]; | ||
| 151 | }; | ||
| 152 | |||
| 153 | extern struct zorro_bus zorro_bus; /* single Zorro bus */ | ||
| 154 | extern struct bus_type zorro_bus_type; | 144 | extern struct bus_type zorro_bus_type; | 
| 155 | 145 | ||
| 156 | 146 | ||
| 157 | /* | 147 | /* | 
| 158 | * Zorro device IDs | ||
| 159 | */ | ||
| 160 | |||
| 161 | struct zorro_device_id { | ||
| 162 | zorro_id id; /* Device ID or ZORRO_WILDCARD */ | ||
| 163 | unsigned long driver_data; /* Data private to the driver */ | ||
| 164 | }; | ||
| 165 | |||
| 166 | |||
| 167 | /* | ||
| 168 | * Zorro device drivers | 148 | * Zorro device drivers | 
| 169 | */ | 149 | */ | 
| 170 | 150 | ||
| diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h index b9da1f5591e7..4aeff96ff7d8 100644 --- a/include/media/saa7146_vv.h +++ b/include/media/saa7146_vv.h | |||
| @@ -188,7 +188,6 @@ void saa7146_buffer_timeout(unsigned long data); | |||
| 188 | void saa7146_dma_free(struct saa7146_dev* dev,struct videobuf_queue *q, | 188 | void saa7146_dma_free(struct saa7146_dev* dev,struct videobuf_queue *q, | 
| 189 | struct saa7146_buf *buf); | 189 | struct saa7146_buf *buf); | 
| 190 | 190 | ||
| 191 | int saa7146_vv_devinit(struct saa7146_dev *dev); | ||
| 192 | int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv); | 191 | int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv); | 
| 193 | int saa7146_vv_release(struct saa7146_dev* dev); | 192 | int saa7146_vv_release(struct saa7146_dev* dev); | 
| 194 | 193 | ||
| diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 851c813adb3a..61d73e37d543 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h | |||
| @@ -279,6 +279,7 @@ int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype, | |||
| 279 | /* 2nd level prototypes */ | 279 | /* 2nd level prototypes */ | 
| 280 | void sctp_generate_t3_rtx_event(unsigned long peer); | 280 | void sctp_generate_t3_rtx_event(unsigned long peer); | 
| 281 | void sctp_generate_heartbeat_event(unsigned long peer); | 281 | void sctp_generate_heartbeat_event(unsigned long peer); | 
| 282 | void sctp_generate_proto_unreach_event(unsigned long peer); | ||
| 282 | 283 | ||
| 283 | void sctp_ootb_pkt_free(struct sctp_packet *); | 284 | void sctp_ootb_pkt_free(struct sctp_packet *); | 
| 284 | 285 | ||
| diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 597f8e27aaf6..219043a67bf7 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
| @@ -1010,6 +1010,9 @@ struct sctp_transport { | |||
| 1010 | /* Heartbeat timer is per destination. */ | 1010 | /* Heartbeat timer is per destination. */ | 
| 1011 | struct timer_list hb_timer; | 1011 | struct timer_list hb_timer; | 
| 1012 | 1012 | ||
| 1013 | /* Timer to handle ICMP proto unreachable envets */ | ||
| 1014 | struct timer_list proto_unreach_timer; | ||
| 1015 | |||
| 1013 | /* Since we're using per-destination retransmission timers | 1016 | /* Since we're using per-destination retransmission timers | 
| 1014 | * (see above), we're also using per-destination "transmitted" | 1017 | * (see above), we're also using per-destination "transmitted" | 
| 1015 | * queues. This probably ought to be a private struct | 1018 | * queues. This probably ought to be a private struct | 
| diff --git a/include/net/tcp.h b/include/net/tcp.h index 75be5a28815d..aa04b9a5093b 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
| @@ -1197,30 +1197,15 @@ extern int tcp_v4_md5_do_del(struct sock *sk, | |||
| 1197 | extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *); | 1197 | extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *); | 
| 1198 | extern void tcp_free_md5sig_pool(void); | 1198 | extern void tcp_free_md5sig_pool(void); | 
| 1199 | 1199 | ||
| 1200 | extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu); | 1200 | extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void); | 
| 1201 | extern void __tcp_put_md5sig_pool(void); | 1201 | extern void tcp_put_md5sig_pool(void); | 
| 1202 | |||
| 1202 | extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *); | 1203 | extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *); | 
| 1203 | extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *, | 1204 | extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *, | 
| 1204 | unsigned header_len); | 1205 | unsigned header_len); | 
| 1205 | extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, | 1206 | extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, | 
| 1206 | struct tcp_md5sig_key *key); | 1207 | struct tcp_md5sig_key *key); | 
| 1207 | 1208 | ||
| 1208 | static inline | ||
| 1209 | struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) | ||
| 1210 | { | ||
| 1211 | int cpu = get_cpu(); | ||
| 1212 | struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu); | ||
| 1213 | if (!ret) | ||
| 1214 | put_cpu(); | ||
| 1215 | return ret; | ||
| 1216 | } | ||
| 1217 | |||
| 1218 | static inline void tcp_put_md5sig_pool(void) | ||
| 1219 | { | ||
| 1220 | __tcp_put_md5sig_pool(); | ||
| 1221 | put_cpu(); | ||
| 1222 | } | ||
| 1223 | |||
| 1224 | /* write queue abstraction */ | 1209 | /* write queue abstraction */ | 
| 1225 | static inline void tcp_write_queue_purge(struct sock *sk) | 1210 | static inline void tcp_write_queue_purge(struct sock *sk) | 
| 1226 | { | 1211 | { | 
| diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h index 5acfb1eb4df9..1dfab5401511 100644 --- a/include/trace/define_trace.h +++ b/include/trace/define_trace.h | |||
| @@ -65,6 +65,10 @@ | |||
| 65 | 65 | ||
| 66 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 66 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 
| 67 | 67 | ||
| 68 | /* Make all open coded DECLARE_TRACE nops */ | ||
| 69 | #undef DECLARE_TRACE | ||
| 70 | #define DECLARE_TRACE(name, proto, args) | ||
| 71 | |||
| 68 | #ifdef CONFIG_EVENT_TRACING | 72 | #ifdef CONFIG_EVENT_TRACING | 
| 69 | #include <trace/ftrace.h> | 73 | #include <trace/ftrace.h> | 
| 70 | #endif | 74 | #endif | 
| @@ -75,6 +79,7 @@ | |||
| 75 | #undef DEFINE_EVENT | 79 | #undef DEFINE_EVENT | 
| 76 | #undef DEFINE_EVENT_PRINT | 80 | #undef DEFINE_EVENT_PRINT | 
| 77 | #undef TRACE_HEADER_MULTI_READ | 81 | #undef TRACE_HEADER_MULTI_READ | 
| 82 | #undef DECLARE_TRACE | ||
| 78 | 83 | ||
| 79 | /* Only undef what we defined in this file */ | 84 | /* Only undef what we defined in this file */ | 
| 80 | #ifdef UNDEF_TRACE_INCLUDE_FILE | 85 | #ifdef UNDEF_TRACE_INCLUDE_FILE | 
| diff --git a/include/trace/events/lock.h b/include/trace/events/lock.h index 5c1dcfc16c60..2821b86de63b 100644 --- a/include/trace/events/lock.h +++ b/include/trace/events/lock.h | |||
| @@ -35,15 +35,15 @@ TRACE_EVENT(lock_acquire, | |||
| 35 | __get_str(name)) | 35 | __get_str(name)) | 
| 36 | ); | 36 | ); | 
| 37 | 37 | ||
| 38 | TRACE_EVENT(lock_release, | 38 | DECLARE_EVENT_CLASS(lock, | 
| 39 | 39 | ||
| 40 | TP_PROTO(struct lockdep_map *lock, int nested, unsigned long ip), | 40 | TP_PROTO(struct lockdep_map *lock, unsigned long ip), | 
| 41 | 41 | ||
| 42 | TP_ARGS(lock, nested, ip), | 42 | TP_ARGS(lock, ip), | 
| 43 | 43 | ||
| 44 | TP_STRUCT__entry( | 44 | TP_STRUCT__entry( | 
| 45 | __string(name, lock->name) | 45 | __string( name, lock->name ) | 
| 46 | __field(void *, lockdep_addr) | 46 | __field( void *, lockdep_addr ) | 
| 47 | ), | 47 | ), | 
| 48 | 48 | ||
| 49 | TP_fast_assign( | 49 | TP_fast_assign( | 
| @@ -51,51 +51,30 @@ TRACE_EVENT(lock_release, | |||
| 51 | __entry->lockdep_addr = lock; | 51 | __entry->lockdep_addr = lock; | 
| 52 | ), | 52 | ), | 
| 53 | 53 | ||
| 54 | TP_printk("%p %s", | 54 | TP_printk("%p %s", __entry->lockdep_addr, __get_str(name)) | 
| 55 | __entry->lockdep_addr, __get_str(name)) | ||
| 56 | ); | 55 | ); | 
| 57 | 56 | ||
| 58 | #ifdef CONFIG_LOCK_STAT | 57 | DEFINE_EVENT(lock, lock_release, | 
| 59 | |||
| 60 | TRACE_EVENT(lock_contended, | ||
| 61 | 58 | ||
| 62 | TP_PROTO(struct lockdep_map *lock, unsigned long ip), | 59 | TP_PROTO(struct lockdep_map *lock, unsigned long ip), | 
| 63 | 60 | ||
| 64 | TP_ARGS(lock, ip), | 61 | TP_ARGS(lock, ip) | 
| 62 | ); | ||
| 65 | 63 | ||
| 66 | TP_STRUCT__entry( | 64 | #ifdef CONFIG_LOCK_STAT | 
| 67 | __string(name, lock->name) | ||
| 68 | __field(void *, lockdep_addr) | ||
| 69 | ), | ||
| 70 | 65 | ||
| 71 | TP_fast_assign( | 66 | DEFINE_EVENT(lock, lock_contended, | 
| 72 | __assign_str(name, lock->name); | ||
| 73 | __entry->lockdep_addr = lock; | ||
| 74 | ), | ||
| 75 | 67 | ||
| 76 | TP_printk("%p %s", | 68 | TP_PROTO(struct lockdep_map *lock, unsigned long ip), | 
| 77 | __entry->lockdep_addr, __get_str(name)) | ||
| 78 | ); | ||
| 79 | 69 | ||
| 80 | TRACE_EVENT(lock_acquired, | 70 | TP_ARGS(lock, ip) | 
| 81 | TP_PROTO(struct lockdep_map *lock, unsigned long ip, s64 waittime), | 71 | ); | 
| 82 | 72 | ||
| 83 | TP_ARGS(lock, ip, waittime), | 73 | DEFINE_EVENT(lock, lock_acquired, | 
| 84 | 74 | ||
| 85 | TP_STRUCT__entry( | 75 | TP_PROTO(struct lockdep_map *lock, unsigned long ip), | 
| 86 | __string(name, lock->name) | ||
| 87 | __field(s64, wait_nsec) | ||
| 88 | __field(void *, lockdep_addr) | ||
| 89 | ), | ||
| 90 | 76 | ||
| 91 | TP_fast_assign( | 77 | TP_ARGS(lock, ip) | 
| 92 | __assign_str(name, lock->name); | ||
| 93 | __entry->wait_nsec = waittime; | ||
| 94 | __entry->lockdep_addr = lock; | ||
| 95 | ), | ||
| 96 | TP_printk("%p %s (%llu ns)", __entry->lockdep_addr, | ||
| 97 | __get_str(name), | ||
| 98 | __entry->wait_nsec) | ||
| 99 | ); | 78 | ); | 
| 100 | 79 | ||
| 101 | #endif | 80 | #endif | 
| diff --git a/include/trace/events/module.h b/include/trace/events/module.h index 4b0f48ba16a6..c7bb2f0482fe 100644 --- a/include/trace/events/module.h +++ b/include/trace/events/module.h | |||
| @@ -51,11 +51,14 @@ TRACE_EVENT(module_free, | |||
| 51 | TP_printk("%s", __get_str(name)) | 51 | TP_printk("%s", __get_str(name)) | 
| 52 | ); | 52 | ); | 
| 53 | 53 | ||
| 54 | #ifdef CONFIG_MODULE_UNLOAD | ||
| 55 | /* trace_module_get/put are only used if CONFIG_MODULE_UNLOAD is defined */ | ||
| 56 | |||
| 54 | DECLARE_EVENT_CLASS(module_refcnt, | 57 | DECLARE_EVENT_CLASS(module_refcnt, | 
| 55 | 58 | ||
| 56 | TP_PROTO(struct module *mod, unsigned long ip, int refcnt), | 59 | TP_PROTO(struct module *mod, unsigned long ip), | 
| 57 | 60 | ||
| 58 | TP_ARGS(mod, ip, refcnt), | 61 | TP_ARGS(mod, ip), | 
| 59 | 62 | ||
| 60 | TP_STRUCT__entry( | 63 | TP_STRUCT__entry( | 
| 61 | __field( unsigned long, ip ) | 64 | __field( unsigned long, ip ) | 
| @@ -65,7 +68,7 @@ DECLARE_EVENT_CLASS(module_refcnt, | |||
| 65 | 68 | ||
| 66 | TP_fast_assign( | 69 | TP_fast_assign( | 
| 67 | __entry->ip = ip; | 70 | __entry->ip = ip; | 
| 68 | __entry->refcnt = refcnt; | 71 | __entry->refcnt = __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs); | 
| 69 | __assign_str(name, mod->name); | 72 | __assign_str(name, mod->name); | 
| 70 | ), | 73 | ), | 
| 71 | 74 | ||
| @@ -75,17 +78,18 @@ DECLARE_EVENT_CLASS(module_refcnt, | |||
| 75 | 78 | ||
| 76 | DEFINE_EVENT(module_refcnt, module_get, | 79 | DEFINE_EVENT(module_refcnt, module_get, | 
| 77 | 80 | ||
| 78 | TP_PROTO(struct module *mod, unsigned long ip, int refcnt), | 81 | TP_PROTO(struct module *mod, unsigned long ip), | 
| 79 | 82 | ||
| 80 | TP_ARGS(mod, ip, refcnt) | 83 | TP_ARGS(mod, ip) | 
| 81 | ); | 84 | ); | 
| 82 | 85 | ||
| 83 | DEFINE_EVENT(module_refcnt, module_put, | 86 | DEFINE_EVENT(module_refcnt, module_put, | 
| 84 | 87 | ||
| 85 | TP_PROTO(struct module *mod, unsigned long ip, int refcnt), | 88 | TP_PROTO(struct module *mod, unsigned long ip), | 
| 86 | 89 | ||
| 87 | TP_ARGS(mod, ip, refcnt) | 90 | TP_ARGS(mod, ip) | 
| 88 | ); | 91 | ); | 
| 92 | #endif /* CONFIG_MODULE_UNLOAD */ | ||
| 89 | 93 | ||
| 90 | TRACE_EVENT(module_request, | 94 | TRACE_EVENT(module_request, | 
| 91 | 95 | ||
| diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h index a8989c4547e7..188deca2f3c7 100644 --- a/include/trace/events/napi.h +++ b/include/trace/events/napi.h | |||
| @@ -1,4 +1,7 @@ | |||
| 1 | #ifndef _TRACE_NAPI_H_ | 1 | #undef TRACE_SYSTEM | 
| 2 | #define TRACE_SYSTEM napi | ||
| 3 | |||
| 4 | #if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ) | ||
| 2 | #define _TRACE_NAPI_H_ | 5 | #define _TRACE_NAPI_H_ | 
| 3 | 6 | ||
| 4 | #include <linux/netdevice.h> | 7 | #include <linux/netdevice.h> | 
| @@ -8,4 +11,7 @@ DECLARE_TRACE(napi_poll, | |||
| 8 | TP_PROTO(struct napi_struct *napi), | 11 | TP_PROTO(struct napi_struct *napi), | 
| 9 | TP_ARGS(napi)); | 12 | TP_ARGS(napi)); | 
| 10 | 13 | ||
| 11 | #endif | 14 | #endif /* _TRACE_NAPI_H_ */ | 
| 15 | |||
| 16 | /* This part must be outside protection */ | ||
| 17 | #include <trace/define_trace.h> | ||
| diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index cfceb0b73e20..4f733ecea46e 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h | |||
| @@ -51,15 +51,12 @@ TRACE_EVENT(sched_kthread_stop_ret, | |||
| 51 | 51 | ||
| 52 | /* | 52 | /* | 
| 53 | * Tracepoint for waiting on task to unschedule: | 53 | * Tracepoint for waiting on task to unschedule: | 
| 54 | * | ||
| 55 | * (NOTE: the 'rq' argument is not used by generic trace events, | ||
| 56 | * but used by the latency tracer plugin. ) | ||
| 57 | */ | 54 | */ | 
| 58 | TRACE_EVENT(sched_wait_task, | 55 | TRACE_EVENT(sched_wait_task, | 
| 59 | 56 | ||
| 60 | TP_PROTO(struct rq *rq, struct task_struct *p), | 57 | TP_PROTO(struct task_struct *p), | 
| 61 | 58 | ||
| 62 | TP_ARGS(rq, p), | 59 | TP_ARGS(p), | 
| 63 | 60 | ||
| 64 | TP_STRUCT__entry( | 61 | TP_STRUCT__entry( | 
| 65 | __array( char, comm, TASK_COMM_LEN ) | 62 | __array( char, comm, TASK_COMM_LEN ) | 
| @@ -79,15 +76,12 @@ TRACE_EVENT(sched_wait_task, | |||
| 79 | 76 | ||
| 80 | /* | 77 | /* | 
| 81 | * Tracepoint for waking up a task: | 78 | * Tracepoint for waking up a task: | 
| 82 | * | ||
| 83 | * (NOTE: the 'rq' argument is not used by generic trace events, | ||
| 84 | * but used by the latency tracer plugin. ) | ||
| 85 | */ | 79 | */ | 
| 86 | DECLARE_EVENT_CLASS(sched_wakeup_template, | 80 | DECLARE_EVENT_CLASS(sched_wakeup_template, | 
| 87 | 81 | ||
| 88 | TP_PROTO(struct rq *rq, struct task_struct *p, int success), | 82 | TP_PROTO(struct task_struct *p, int success), | 
| 89 | 83 | ||
| 90 | TP_ARGS(rq, p, success), | 84 | TP_ARGS(p, success), | 
| 91 | 85 | ||
| 92 | TP_STRUCT__entry( | 86 | TP_STRUCT__entry( | 
| 93 | __array( char, comm, TASK_COMM_LEN ) | 87 | __array( char, comm, TASK_COMM_LEN ) | 
| @@ -111,31 +105,25 @@ DECLARE_EVENT_CLASS(sched_wakeup_template, | |||
| 111 | ); | 105 | ); | 
| 112 | 106 | ||
| 113 | DEFINE_EVENT(sched_wakeup_template, sched_wakeup, | 107 | DEFINE_EVENT(sched_wakeup_template, sched_wakeup, | 
| 114 | TP_PROTO(struct rq *rq, struct task_struct *p, int success), | 108 | TP_PROTO(struct task_struct *p, int success), | 
| 115 | TP_ARGS(rq, p, success)); | 109 | TP_ARGS(p, success)); | 
| 116 | 110 | ||
| 117 | /* | 111 | /* | 
| 118 | * Tracepoint for waking up a new task: | 112 | * Tracepoint for waking up a new task: | 
| 119 | * | ||
| 120 | * (NOTE: the 'rq' argument is not used by generic trace events, | ||
| 121 | * but used by the latency tracer plugin. ) | ||
| 122 | */ | 113 | */ | 
| 123 | DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, | 114 | DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, | 
| 124 | TP_PROTO(struct rq *rq, struct task_struct *p, int success), | 115 | TP_PROTO(struct task_struct *p, int success), | 
| 125 | TP_ARGS(rq, p, success)); | 116 | TP_ARGS(p, success)); | 
| 126 | 117 | ||
| 127 | /* | 118 | /* | 
| 128 | * Tracepoint for task switches, performed by the scheduler: | 119 | * Tracepoint for task switches, performed by the scheduler: | 
| 129 | * | ||
| 130 | * (NOTE: the 'rq' argument is not used by generic trace events, | ||
| 131 | * but used by the latency tracer plugin. ) | ||
| 132 | */ | 120 | */ | 
| 133 | TRACE_EVENT(sched_switch, | 121 | TRACE_EVENT(sched_switch, | 
| 134 | 122 | ||
| 135 | TP_PROTO(struct rq *rq, struct task_struct *prev, | 123 | TP_PROTO(struct task_struct *prev, | 
| 136 | struct task_struct *next), | 124 | struct task_struct *next), | 
| 137 | 125 | ||
| 138 | TP_ARGS(rq, prev, next), | 126 | TP_ARGS(prev, next), | 
| 139 | 127 | ||
| 140 | TP_STRUCT__entry( | 128 | TP_STRUCT__entry( | 
| 141 | __array( char, prev_comm, TASK_COMM_LEN ) | 129 | __array( char, prev_comm, TASK_COMM_LEN ) | 
| diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h index a510b75ac304..814566c99d29 100644 --- a/include/trace/events/signal.h +++ b/include/trace/events/signal.h | |||
| @@ -100,18 +100,7 @@ TRACE_EVENT(signal_deliver, | |||
| 100 | __entry->sa_handler, __entry->sa_flags) | 100 | __entry->sa_handler, __entry->sa_flags) | 
| 101 | ); | 101 | ); | 
| 102 | 102 | ||
| 103 | /** | 103 | DECLARE_EVENT_CLASS(signal_queue_overflow, | 
| 104 | * signal_overflow_fail - called when signal queue is overflow | ||
| 105 | * @sig: signal number | ||
| 106 | * @group: signal to process group or not (bool) | ||
| 107 | * @info: pointer to struct siginfo | ||
| 108 | * | ||
| 109 | * Kernel fails to generate 'sig' signal with 'info' siginfo, because | ||
| 110 | * siginfo queue is overflow, and the signal is dropped. | ||
| 111 | * 'group' is not 0 if the signal will be sent to a process group. | ||
| 112 | * 'sig' is always one of RT signals. | ||
| 113 | */ | ||
| 114 | TRACE_EVENT(signal_overflow_fail, | ||
| 115 | 104 | ||
| 116 | TP_PROTO(int sig, int group, struct siginfo *info), | 105 | TP_PROTO(int sig, int group, struct siginfo *info), | 
| 117 | 106 | ||
| @@ -135,6 +124,24 @@ TRACE_EVENT(signal_overflow_fail, | |||
| 135 | ); | 124 | ); | 
| 136 | 125 | ||
| 137 | /** | 126 | /** | 
| 127 | * signal_overflow_fail - called when signal queue is overflow | ||
| 128 | * @sig: signal number | ||
| 129 | * @group: signal to process group or not (bool) | ||
| 130 | * @info: pointer to struct siginfo | ||
| 131 | * | ||
| 132 | * Kernel fails to generate 'sig' signal with 'info' siginfo, because | ||
| 133 | * siginfo queue is overflow, and the signal is dropped. | ||
| 134 | * 'group' is not 0 if the signal will be sent to a process group. | ||
| 135 | * 'sig' is always one of RT signals. | ||
| 136 | */ | ||
| 137 | DEFINE_EVENT(signal_queue_overflow, signal_overflow_fail, | ||
| 138 | |||
| 139 | TP_PROTO(int sig, int group, struct siginfo *info), | ||
| 140 | |||
| 141 | TP_ARGS(sig, group, info) | ||
| 142 | ); | ||
| 143 | |||
| 144 | /** | ||
| 138 | * signal_lose_info - called when siginfo is lost | 145 | * signal_lose_info - called when siginfo is lost | 
| 139 | * @sig: signal number | 146 | * @sig: signal number | 
| 140 | * @group: signal to process group or not (bool) | 147 | * @group: signal to process group or not (bool) | 
| @@ -145,28 +152,13 @@ TRACE_EVENT(signal_overflow_fail, | |||
| 145 | * 'group' is not 0 if the signal will be sent to a process group. | 152 | * 'group' is not 0 if the signal will be sent to a process group. | 
| 146 | * 'sig' is always one of non-RT signals. | 153 | * 'sig' is always one of non-RT signals. | 
| 147 | */ | 154 | */ | 
| 148 | TRACE_EVENT(signal_lose_info, | 155 | DEFINE_EVENT(signal_queue_overflow, signal_lose_info, | 
| 149 | 156 | ||
| 150 | TP_PROTO(int sig, int group, struct siginfo *info), | 157 | TP_PROTO(int sig, int group, struct siginfo *info), | 
| 151 | 158 | ||
| 152 | TP_ARGS(sig, group, info), | 159 | TP_ARGS(sig, group, info) | 
| 153 | |||
| 154 | TP_STRUCT__entry( | ||
| 155 | __field( int, sig ) | ||
| 156 | __field( int, group ) | ||
| 157 | __field( int, errno ) | ||
| 158 | __field( int, code ) | ||
| 159 | ), | ||
| 160 | |||
| 161 | TP_fast_assign( | ||
| 162 | __entry->sig = sig; | ||
| 163 | __entry->group = group; | ||
| 164 | TP_STORE_SIGINFO(__entry, info); | ||
| 165 | ), | ||
| 166 | |||
| 167 | TP_printk("sig=%d group=%d errno=%d code=%d", | ||
| 168 | __entry->sig, __entry->group, __entry->errno, __entry->code) | ||
| 169 | ); | 160 | ); | 
| 161 | |||
| 170 | #endif /* _TRACE_SIGNAL_H */ | 162 | #endif /* _TRACE_SIGNAL_H */ | 
| 171 | 163 | ||
| 172 | /* This part must be outside protection */ | 164 | /* This part must be outside protection */ | 
| diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index ea6f9d4a20e9..16253db38d73 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
| @@ -154,9 +154,11 @@ | |||
| 154 | * | 154 | * | 
| 155 | * field = (typeof(field))entry; | 155 | * field = (typeof(field))entry; | 
| 156 | * | 156 | * | 
| 157 | * p = get_cpu_var(ftrace_event_seq); | 157 | * p = &get_cpu_var(ftrace_event_seq); | 
| 158 | * trace_seq_init(p); | 158 | * trace_seq_init(p); | 
| 159 | * ret = trace_seq_printf(s, <TP_printk> "\n"); | 159 | * ret = trace_seq_printf(s, "%s: ", <call>); | 
| 160 | * if (ret) | ||
| 161 | * ret = trace_seq_printf(s, <TP_printk> "\n"); | ||
| 160 | * put_cpu(); | 162 | * put_cpu(); | 
| 161 | * if (!ret) | 163 | * if (!ret) | 
| 162 | * return TRACE_TYPE_PARTIAL_LINE; | 164 | * return TRACE_TYPE_PARTIAL_LINE; | 
| @@ -450,38 +452,38 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ | |||
| 450 | * | 452 | * | 
| 451 | * static void ftrace_raw_event_<call>(proto) | 453 | * static void ftrace_raw_event_<call>(proto) | 
| 452 | * { | 454 | * { | 
| 455 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; | ||
| 453 | * struct ring_buffer_event *event; | 456 | * struct ring_buffer_event *event; | 
| 454 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | 457 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | 
| 455 | * struct ring_buffer *buffer; | 458 | * struct ring_buffer *buffer; | 
| 456 | * unsigned long irq_flags; | 459 | * unsigned long irq_flags; | 
| 460 | * int __data_size; | ||
| 457 | * int pc; | 461 | * int pc; | 
| 458 | * | 462 | * | 
| 459 | * local_save_flags(irq_flags); | 463 | * local_save_flags(irq_flags); | 
| 460 | * pc = preempt_count(); | 464 | * pc = preempt_count(); | 
| 461 | * | 465 | * | 
| 466 | * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); | ||
| 467 | * | ||
| 462 | * event = trace_current_buffer_lock_reserve(&buffer, | 468 | * event = trace_current_buffer_lock_reserve(&buffer, | 
| 463 | * event_<call>.id, | 469 | * event_<call>.id, | 
| 464 | * sizeof(struct ftrace_raw_<call>), | 470 | * sizeof(*entry) + __data_size, | 
| 465 | * irq_flags, pc); | 471 | * irq_flags, pc); | 
| 466 | * if (!event) | 472 | * if (!event) | 
| 467 | * return; | 473 | * return; | 
| 468 | * entry = ring_buffer_event_data(event); | 474 | * entry = ring_buffer_event_data(event); | 
| 469 | * | 475 | * | 
| 470 | * <assign>; <-- Here we assign the entries by the __field and | 476 | * { <assign>; } <-- Here we assign the entries by the __field and | 
| 471 | * __array macros. | 477 | * __array macros. | 
| 472 | * | 478 | * | 
| 473 | * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc); | 479 | * if (!filter_current_check_discard(buffer, event_call, entry, event)) | 
| 480 | * trace_current_buffer_unlock_commit(buffer, | ||
| 481 | * event, irq_flags, pc); | ||
| 474 | * } | 482 | * } | 
| 475 | * | 483 | * | 
| 476 | * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused) | 484 | * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused) | 
| 477 | * { | 485 | * { | 
| 478 | * int ret; | 486 | * return register_trace_<call>(ftrace_raw_event_<call>); | 
| 479 | * | ||
| 480 | * ret = register_trace_<call>(ftrace_raw_event_<call>); | ||
| 481 | * if (!ret) | ||
| 482 | * pr_info("event trace: Could not activate trace point " | ||
| 483 | * "probe to <call>"); | ||
| 484 | * return ret; | ||
| 485 | * } | 487 | * } | 
| 486 | * | 488 | * | 
| 487 | * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) | 489 | * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) | 
| @@ -493,6 +495,8 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ | |||
| 493 | * .trace = ftrace_raw_output_<call>, <-- stage 2 | 495 | * .trace = ftrace_raw_output_<call>, <-- stage 2 | 
| 494 | * }; | 496 | * }; | 
| 495 | * | 497 | * | 
| 498 | * static const char print_fmt_<call>[] = <TP_printk>; | ||
| 499 | * | ||
| 496 | * static struct ftrace_event_call __used | 500 | * static struct ftrace_event_call __used | 
| 497 | * __attribute__((__aligned__(4))) | 501 | * __attribute__((__aligned__(4))) | 
| 498 | * __attribute__((section("_ftrace_events"))) event_<call> = { | 502 | * __attribute__((section("_ftrace_events"))) event_<call> = { | 
| @@ -501,6 +505,8 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ | |||
| 501 | * .raw_init = trace_event_raw_init, | 505 | * .raw_init = trace_event_raw_init, | 
| 502 | * .regfunc = ftrace_reg_event_<call>, | 506 | * .regfunc = ftrace_reg_event_<call>, | 
| 503 | * .unregfunc = ftrace_unreg_event_<call>, | 507 | * .unregfunc = ftrace_unreg_event_<call>, | 
| 508 | * .print_fmt = print_fmt_<call>, | ||
| 509 | * .define_fields = ftrace_define_fields_<call>, | ||
| 504 | * } | 510 | * } | 
| 505 | * | 511 | * | 
| 506 | */ | 512 | */ | 
| @@ -569,7 +575,6 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ | |||
| 569 | return; \ | 575 | return; \ | 
| 570 | entry = ring_buffer_event_data(event); \ | 576 | entry = ring_buffer_event_data(event); \ | 
| 571 | \ | 577 | \ | 
| 572 | \ | ||
| 573 | tstruct \ | 578 | tstruct \ | 
| 574 | \ | 579 | \ | 
| 575 | { assign; } \ | 580 | { assign; } \ | 
| @@ -758,13 +763,12 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
| 758 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | 763 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | 
| 759 | static notrace void \ | 764 | static notrace void \ | 
| 760 | perf_trace_templ_##call(struct ftrace_event_call *event_call, \ | 765 | perf_trace_templ_##call(struct ftrace_event_call *event_call, \ | 
| 761 | proto) \ | 766 | struct pt_regs *__regs, proto) \ | 
| 762 | { \ | 767 | { \ | 
| 763 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | 768 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | 
| 764 | struct ftrace_raw_##call *entry; \ | 769 | struct ftrace_raw_##call *entry; \ | 
| 765 | u64 __addr = 0, __count = 1; \ | 770 | u64 __addr = 0, __count = 1; \ | 
| 766 | unsigned long irq_flags; \ | 771 | unsigned long irq_flags; \ | 
| 767 | struct pt_regs *__regs; \ | ||
| 768 | int __entry_size; \ | 772 | int __entry_size; \ | 
| 769 | int __data_size; \ | 773 | int __data_size; \ | 
| 770 | int rctx; \ | 774 | int rctx; \ | 
| @@ -785,20 +789,22 @@ perf_trace_templ_##call(struct ftrace_event_call *event_call, \ | |||
| 785 | \ | 789 | \ | 
| 786 | { assign; } \ | 790 | { assign; } \ | 
| 787 | \ | 791 | \ | 
| 788 | __regs = &__get_cpu_var(perf_trace_regs); \ | ||
| 789 | perf_fetch_caller_regs(__regs, 2); \ | ||
| 790 | \ | ||
| 791 | perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ | 792 | perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ | 
| 792 | __count, irq_flags, __regs); \ | 793 | __count, irq_flags, __regs); \ | 
| 793 | } | 794 | } | 
| 794 | 795 | ||
| 795 | #undef DEFINE_EVENT | 796 | #undef DEFINE_EVENT | 
| 796 | #define DEFINE_EVENT(template, call, proto, args) \ | 797 | #define DEFINE_EVENT(template, call, proto, args) \ | 
| 797 | static notrace void perf_trace_##call(proto) \ | 798 | static notrace void perf_trace_##call(proto) \ | 
| 798 | { \ | 799 | { \ | 
| 799 | struct ftrace_event_call *event_call = &event_##call; \ | 800 | struct ftrace_event_call *event_call = &event_##call; \ | 
| 800 | \ | 801 | struct pt_regs *__regs = &get_cpu_var(perf_trace_regs); \ | 
| 801 | perf_trace_templ_##template(event_call, args); \ | 802 | \ | 
| 803 | perf_fetch_caller_regs(__regs, 1); \ | ||
| 804 | \ | ||
| 805 | perf_trace_templ_##template(event_call, __regs, args); \ | ||
| 806 | \ | ||
| 807 | put_cpu_var(perf_trace_regs); \ | ||
| 802 | } | 808 | } | 
| 803 | 809 | ||
| 804 | #undef DEFINE_EVENT_PRINT | 810 | #undef DEFINE_EVENT_PRINT | 
