diff options
Diffstat (limited to 'include')
28 files changed, 214 insertions, 171 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index c99c64dc5f3d..c33749f95b32 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h | |||
@@ -33,7 +33,7 @@ | |||
33 | * Atomically reads the value of @v. Note that the guaranteed | 33 | * Atomically reads the value of @v. Note that the guaranteed |
34 | * useful range of an atomic_t is only 24 bits. | 34 | * useful range of an atomic_t is only 24 bits. |
35 | */ | 35 | */ |
36 | #define atomic_read(v) ((v)->counter) | 36 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
37 | 37 | ||
38 | /** | 38 | /** |
39 | * atomic_set - set atomic variable | 39 | * atomic_set - set atomic variable |
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h index e694263445f7..69206957b72c 100644 --- a/include/asm-generic/dma-mapping-common.h +++ b/include/asm-generic/dma-mapping-common.h | |||
@@ -131,7 +131,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, | |||
131 | debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); | 131 | debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); |
132 | 132 | ||
133 | } else | 133 | } else |
134 | dma_sync_single_for_cpu(dev, addr, size, dir); | 134 | dma_sync_single_for_cpu(dev, addr + offset, size, dir); |
135 | } | 135 | } |
136 | 136 | ||
137 | static inline void dma_sync_single_range_for_device(struct device *dev, | 137 | static inline void dma_sync_single_range_for_device(struct device *dev, |
@@ -148,7 +148,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev, | |||
148 | debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); | 148 | debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); |
149 | 149 | ||
150 | } else | 150 | } else |
151 | dma_sync_single_for_device(dev, addr, size, dir); | 151 | dma_sync_single_for_device(dev, addr + offset, size, dir); |
152 | } | 152 | } |
153 | 153 | ||
154 | static inline void | 154 | static inline void |
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index e929c27ede22..6b9db917e717 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
@@ -789,34 +789,6 @@ extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); | |||
789 | extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, | 789 | extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, |
790 | bool interruptible); | 790 | bool interruptible); |
791 | 791 | ||
792 | /** | ||
793 | * ttm_bo_block_reservation | ||
794 | * | ||
795 | * @bo: A pointer to a struct ttm_buffer_object. | ||
796 | * @interruptible: Use interruptible sleep when waiting. | ||
797 | * @no_wait: Don't sleep, but rather return -EBUSY. | ||
798 | * | ||
799 | * Block reservation for validation by simply reserving the buffer. | ||
800 | * This is intended for single buffer use only without eviction, | ||
801 | * and thus needs no deadlock protection. | ||
802 | * | ||
803 | * Returns: | ||
804 | * -EBUSY: If no_wait == 1 and the buffer is already reserved. | ||
805 | * -ERESTARTSYS: If interruptible == 1 and the process received a signal | ||
806 | * while sleeping. | ||
807 | */ | ||
808 | extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo, | ||
809 | bool interruptible, bool no_wait); | ||
810 | |||
811 | /** | ||
812 | * ttm_bo_unblock_reservation | ||
813 | * | ||
814 | * @bo: A pointer to a struct ttm_buffer_object. | ||
815 | * | ||
816 | * Unblocks reservation leaving lru lists untouched. | ||
817 | */ | ||
818 | extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo); | ||
819 | |||
820 | /* | 792 | /* |
821 | * ttm_bo_util.c | 793 | * ttm_bo_util.c |
822 | */ | 794 | */ |
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index b8ad1ea99586..8f78073d7caa 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
@@ -530,6 +530,7 @@ static inline struct cgroup_subsys_state *task_subsys_state( | |||
530 | { | 530 | { |
531 | return rcu_dereference_check(task->cgroups->subsys[subsys_id], | 531 | return rcu_dereference_check(task->cgroups->subsys[subsys_id], |
532 | rcu_read_lock_held() || | 532 | rcu_read_lock_held() || |
533 | lockdep_is_held(&task->alloc_lock) || | ||
533 | cgroup_lock_is_held()); | 534 | cgroup_lock_is_held()); |
534 | } | 535 | } |
535 | 536 | ||
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 30b93b2a01a4..eebb617c17d8 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
@@ -186,6 +186,8 @@ d_iput: no no no yes | |||
186 | 186 | ||
187 | #define DCACHE_FSNOTIFY_PARENT_WATCHED 0x0080 /* Parent inode is watched by some fsnotify listener */ | 187 | #define DCACHE_FSNOTIFY_PARENT_WATCHED 0x0080 /* Parent inode is watched by some fsnotify listener */ |
188 | 188 | ||
189 | #define DCACHE_CANT_MOUNT 0x0100 | ||
190 | |||
189 | extern spinlock_t dcache_lock; | 191 | extern spinlock_t dcache_lock; |
190 | extern seqlock_t rename_lock; | 192 | extern seqlock_t rename_lock; |
191 | 193 | ||
@@ -358,6 +360,18 @@ static inline int d_unlinked(struct dentry *dentry) | |||
358 | return d_unhashed(dentry) && !IS_ROOT(dentry); | 360 | return d_unhashed(dentry) && !IS_ROOT(dentry); |
359 | } | 361 | } |
360 | 362 | ||
363 | static inline int cant_mount(struct dentry *dentry) | ||
364 | { | ||
365 | return (dentry->d_flags & DCACHE_CANT_MOUNT); | ||
366 | } | ||
367 | |||
368 | static inline void dont_mount(struct dentry *dentry) | ||
369 | { | ||
370 | spin_lock(&dentry->d_lock); | ||
371 | dentry->d_flags |= DCACHE_CANT_MOUNT; | ||
372 | spin_unlock(&dentry->d_lock); | ||
373 | } | ||
374 | |||
361 | static inline struct dentry *dget_parent(struct dentry *dentry) | 375 | static inline struct dentry *dget_parent(struct dentry *dentry) |
362 | { | 376 | { |
363 | struct dentry *ret; | 377 | struct dentry *ret; |
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h index 8c243aaa86a7..597692f1fc8d 100644 --- a/include/linux/debugobjects.h +++ b/include/linux/debugobjects.h | |||
@@ -20,12 +20,14 @@ struct debug_obj_descr; | |||
20 | * struct debug_obj - representaion of an tracked object | 20 | * struct debug_obj - representaion of an tracked object |
21 | * @node: hlist node to link the object into the tracker list | 21 | * @node: hlist node to link the object into the tracker list |
22 | * @state: tracked object state | 22 | * @state: tracked object state |
23 | * @astate: current active state | ||
23 | * @object: pointer to the real object | 24 | * @object: pointer to the real object |
24 | * @descr: pointer to an object type specific debug description structure | 25 | * @descr: pointer to an object type specific debug description structure |
25 | */ | 26 | */ |
26 | struct debug_obj { | 27 | struct debug_obj { |
27 | struct hlist_node node; | 28 | struct hlist_node node; |
28 | enum debug_obj_state state; | 29 | enum debug_obj_state state; |
30 | unsigned int astate; | ||
29 | void *object; | 31 | void *object; |
30 | struct debug_obj_descr *descr; | 32 | struct debug_obj_descr *descr; |
31 | }; | 33 | }; |
@@ -60,6 +62,15 @@ extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr); | |||
60 | extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); | 62 | extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); |
61 | extern void debug_object_free (void *addr, struct debug_obj_descr *descr); | 63 | extern void debug_object_free (void *addr, struct debug_obj_descr *descr); |
62 | 64 | ||
65 | /* | ||
66 | * Active state: | ||
67 | * - Set at 0 upon initialization. | ||
68 | * - Must return to 0 before deactivation. | ||
69 | */ | ||
70 | extern void | ||
71 | debug_object_active_state(void *addr, struct debug_obj_descr *descr, | ||
72 | unsigned int expect, unsigned int next); | ||
73 | |||
63 | extern void debug_objects_early_init(void); | 74 | extern void debug_objects_early_init(void); |
64 | extern void debug_objects_mem_init(void); | 75 | extern void debug_objects_mem_init(void); |
65 | #else | 76 | #else |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index c0f4b364c711..39e71b0a3bfd 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -58,6 +58,7 @@ struct trace_iterator { | |||
58 | /* The below is zeroed out in pipe_read */ | 58 | /* The below is zeroed out in pipe_read */ |
59 | struct trace_seq seq; | 59 | struct trace_seq seq; |
60 | struct trace_entry *ent; | 60 | struct trace_entry *ent; |
61 | unsigned long lost_events; | ||
61 | int leftover; | 62 | int leftover; |
62 | int cpu; | 63 | int cpu; |
63 | u64 ts; | 64 | u64 ts; |
diff --git a/include/linux/if_link.h b/include/linux/if_link.h index c9bf92cd7653..d94963b379d9 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h | |||
@@ -79,10 +79,7 @@ enum { | |||
79 | IFLA_NET_NS_PID, | 79 | IFLA_NET_NS_PID, |
80 | IFLA_IFALIAS, | 80 | IFLA_IFALIAS, |
81 | IFLA_NUM_VF, /* Number of VFs if device is SR-IOV PF */ | 81 | IFLA_NUM_VF, /* Number of VFs if device is SR-IOV PF */ |
82 | IFLA_VF_MAC, /* Hardware queue specific attributes */ | 82 | IFLA_VFINFO_LIST, |
83 | IFLA_VF_VLAN, | ||
84 | IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */ | ||
85 | IFLA_VFINFO, | ||
86 | __IFLA_MAX | 83 | __IFLA_MAX |
87 | }; | 84 | }; |
88 | 85 | ||
@@ -203,6 +200,24 @@ enum macvlan_mode { | |||
203 | 200 | ||
204 | /* SR-IOV virtual function managment section */ | 201 | /* SR-IOV virtual function managment section */ |
205 | 202 | ||
203 | enum { | ||
204 | IFLA_VF_INFO_UNSPEC, | ||
205 | IFLA_VF_INFO, | ||
206 | __IFLA_VF_INFO_MAX, | ||
207 | }; | ||
208 | |||
209 | #define IFLA_VF_INFO_MAX (__IFLA_VF_INFO_MAX - 1) | ||
210 | |||
211 | enum { | ||
212 | IFLA_VF_UNSPEC, | ||
213 | IFLA_VF_MAC, /* Hardware queue specific attributes */ | ||
214 | IFLA_VF_VLAN, | ||
215 | IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */ | ||
216 | __IFLA_VF_MAX, | ||
217 | }; | ||
218 | |||
219 | #define IFLA_VF_MAX (__IFLA_VF_MAX - 1) | ||
220 | |||
206 | struct ifla_vf_mac { | 221 | struct ifla_vf_mac { |
207 | __u32 vf; | 222 | __u32 vf; |
208 | __u8 mac[32]; /* MAX_ADDR_LEN */ | 223 | __u8 mac[32]; /* MAX_ADDR_LEN */ |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index b1ed1cd8e2a8..7996fc2c9ba9 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -49,7 +49,6 @@ extern struct group_info init_groups; | |||
49 | { .first = &init_task.pids[PIDTYPE_PGID].node }, \ | 49 | { .first = &init_task.pids[PIDTYPE_PGID].node }, \ |
50 | { .first = &init_task.pids[PIDTYPE_SID].node }, \ | 50 | { .first = &init_task.pids[PIDTYPE_SID].node }, \ |
51 | }, \ | 51 | }, \ |
52 | .rcu = RCU_HEAD_INIT, \ | ||
53 | .level = 0, \ | 52 | .level = 0, \ |
54 | .numbers = { { \ | 53 | .numbers = { { \ |
55 | .nr = 0, \ | 54 | .nr = 0, \ |
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 3af4ffd591b9..be22ad83689c 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
@@ -37,9 +37,9 @@ struct iommu_ops { | |||
37 | int (*attach_dev)(struct iommu_domain *domain, struct device *dev); | 37 | int (*attach_dev)(struct iommu_domain *domain, struct device *dev); |
38 | void (*detach_dev)(struct iommu_domain *domain, struct device *dev); | 38 | void (*detach_dev)(struct iommu_domain *domain, struct device *dev); |
39 | int (*map)(struct iommu_domain *domain, unsigned long iova, | 39 | int (*map)(struct iommu_domain *domain, unsigned long iova, |
40 | phys_addr_t paddr, size_t size, int prot); | 40 | phys_addr_t paddr, int gfp_order, int prot); |
41 | void (*unmap)(struct iommu_domain *domain, unsigned long iova, | 41 | int (*unmap)(struct iommu_domain *domain, unsigned long iova, |
42 | size_t size); | 42 | int gfp_order); |
43 | phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, | 43 | phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, |
44 | unsigned long iova); | 44 | unsigned long iova); |
45 | int (*domain_has_cap)(struct iommu_domain *domain, | 45 | int (*domain_has_cap)(struct iommu_domain *domain, |
@@ -56,10 +56,10 @@ extern int iommu_attach_device(struct iommu_domain *domain, | |||
56 | struct device *dev); | 56 | struct device *dev); |
57 | extern void iommu_detach_device(struct iommu_domain *domain, | 57 | extern void iommu_detach_device(struct iommu_domain *domain, |
58 | struct device *dev); | 58 | struct device *dev); |
59 | extern int iommu_map_range(struct iommu_domain *domain, unsigned long iova, | 59 | extern int iommu_map(struct iommu_domain *domain, unsigned long iova, |
60 | phys_addr_t paddr, size_t size, int prot); | 60 | phys_addr_t paddr, int gfp_order, int prot); |
61 | extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, | 61 | extern int iommu_unmap(struct iommu_domain *domain, unsigned long iova, |
62 | size_t size); | 62 | int gfp_order); |
63 | extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, | 63 | extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, |
64 | unsigned long iova); | 64 | unsigned long iova); |
65 | extern int iommu_domain_has_cap(struct iommu_domain *domain, | 65 | extern int iommu_domain_has_cap(struct iommu_domain *domain, |
@@ -96,16 +96,16 @@ static inline void iommu_detach_device(struct iommu_domain *domain, | |||
96 | { | 96 | { |
97 | } | 97 | } |
98 | 98 | ||
99 | static inline int iommu_map_range(struct iommu_domain *domain, | 99 | static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, |
100 | unsigned long iova, phys_addr_t paddr, | 100 | phys_addr_t paddr, int gfp_order, int prot) |
101 | size_t size, int prot) | ||
102 | { | 101 | { |
103 | return -ENODEV; | 102 | return -ENODEV; |
104 | } | 103 | } |
105 | 104 | ||
106 | static inline void iommu_unmap_range(struct iommu_domain *domain, | 105 | static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova, |
107 | unsigned long iova, size_t size) | 106 | int gfp_order) |
108 | { | 107 | { |
108 | return -ENODEV; | ||
109 | } | 109 | } |
110 | 110 | ||
111 | static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, | 111 | static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, |
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index f58e9d836f32..56fde4364e4c 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h | |||
@@ -474,4 +474,13 @@ struct platform_device_id { | |||
474 | __attribute__((aligned(sizeof(kernel_ulong_t)))); | 474 | __attribute__((aligned(sizeof(kernel_ulong_t)))); |
475 | }; | 475 | }; |
476 | 476 | ||
477 | struct zorro_device_id { | ||
478 | __u32 id; /* Device ID or ZORRO_WILDCARD */ | ||
479 | kernel_ulong_t driver_data; /* Data private to the driver */ | ||
480 | }; | ||
481 | |||
482 | #define ZORRO_WILDCARD (0xffffffff) /* not official */ | ||
483 | |||
484 | #define ZORRO_DEVICE_MODALIAS_FMT "zorro:i%08X" | ||
485 | |||
477 | #endif /* LINUX_MOD_DEVICETABLE_H */ | 486 | #endif /* LINUX_MOD_DEVICETABLE_H */ |
diff --git a/include/linux/module.h b/include/linux/module.h index 515d53ae6a79..6914fcad4673 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -465,8 +465,7 @@ static inline void __module_get(struct module *module) | |||
465 | if (module) { | 465 | if (module) { |
466 | preempt_disable(); | 466 | preempt_disable(); |
467 | __this_cpu_inc(module->refptr->incs); | 467 | __this_cpu_inc(module->refptr->incs); |
468 | trace_module_get(module, _THIS_IP_, | 468 | trace_module_get(module, _THIS_IP_); |
469 | __this_cpu_read(module->refptr->incs)); | ||
470 | preempt_enable(); | 469 | preempt_enable(); |
471 | } | 470 | } |
472 | } | 471 | } |
@@ -480,8 +479,7 @@ static inline int try_module_get(struct module *module) | |||
480 | 479 | ||
481 | if (likely(module_is_live(module))) { | 480 | if (likely(module_is_live(module))) { |
482 | __this_cpu_inc(module->refptr->incs); | 481 | __this_cpu_inc(module->refptr->incs); |
483 | trace_module_get(module, _THIS_IP_, | 482 | trace_module_get(module, _THIS_IP_); |
484 | __this_cpu_read(module->refptr->incs)); | ||
485 | } else | 483 | } else |
486 | ret = 0; | 484 | ret = 0; |
487 | 485 | ||
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 212da17d06af..5417944d3687 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h | |||
@@ -44,12 +44,14 @@ extern int platform_get_irq_byname(struct platform_device *, const char *); | |||
44 | extern int platform_add_devices(struct platform_device **, int); | 44 | extern int platform_add_devices(struct platform_device **, int); |
45 | 45 | ||
46 | extern struct platform_device *platform_device_register_simple(const char *, int id, | 46 | extern struct platform_device *platform_device_register_simple(const char *, int id, |
47 | struct resource *, unsigned int); | 47 | const struct resource *, unsigned int); |
48 | extern struct platform_device *platform_device_register_data(struct device *, | 48 | extern struct platform_device *platform_device_register_data(struct device *, |
49 | const char *, int, const void *, size_t); | 49 | const char *, int, const void *, size_t); |
50 | 50 | ||
51 | extern struct platform_device *platform_device_alloc(const char *name, int id); | 51 | extern struct platform_device *platform_device_alloc(const char *name, int id); |
52 | extern int platform_device_add_resources(struct platform_device *pdev, struct resource *res, unsigned int num); | 52 | extern int platform_device_add_resources(struct platform_device *pdev, |
53 | const struct resource *res, | ||
54 | unsigned int num); | ||
53 | extern int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size); | 55 | extern int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size); |
54 | extern int platform_device_add(struct platform_device *pdev); | 56 | extern int platform_device_add(struct platform_device *pdev); |
55 | extern void platform_device_del(struct platform_device *pdev); | 57 | extern void platform_device_del(struct platform_device *pdev); |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 07db2feb8572..b653b4aaa8a6 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -56,8 +56,6 @@ struct rcu_head { | |||
56 | }; | 56 | }; |
57 | 57 | ||
58 | /* Exported common interfaces */ | 58 | /* Exported common interfaces */ |
59 | extern void synchronize_rcu_bh(void); | ||
60 | extern void synchronize_sched(void); | ||
61 | extern void rcu_barrier(void); | 59 | extern void rcu_barrier(void); |
62 | extern void rcu_barrier_bh(void); | 60 | extern void rcu_barrier_bh(void); |
63 | extern void rcu_barrier_sched(void); | 61 | extern void rcu_barrier_sched(void); |
@@ -66,8 +64,6 @@ extern int sched_expedited_torture_stats(char *page); | |||
66 | 64 | ||
67 | /* Internal to kernel */ | 65 | /* Internal to kernel */ |
68 | extern void rcu_init(void); | 66 | extern void rcu_init(void); |
69 | extern int rcu_scheduler_active; | ||
70 | extern void rcu_scheduler_starting(void); | ||
71 | 67 | ||
72 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | 68 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) |
73 | #include <linux/rcutree.h> | 69 | #include <linux/rcutree.h> |
@@ -83,6 +79,14 @@ extern void rcu_scheduler_starting(void); | |||
83 | (ptr)->next = NULL; (ptr)->func = NULL; \ | 79 | (ptr)->next = NULL; (ptr)->func = NULL; \ |
84 | } while (0) | 80 | } while (0) |
85 | 81 | ||
82 | static inline void init_rcu_head_on_stack(struct rcu_head *head) | ||
83 | { | ||
84 | } | ||
85 | |||
86 | static inline void destroy_rcu_head_on_stack(struct rcu_head *head) | ||
87 | { | ||
88 | } | ||
89 | |||
86 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 90 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
87 | 91 | ||
88 | extern struct lockdep_map rcu_lock_map; | 92 | extern struct lockdep_map rcu_lock_map; |
@@ -106,12 +110,13 @@ extern int debug_lockdep_rcu_enabled(void); | |||
106 | /** | 110 | /** |
107 | * rcu_read_lock_held - might we be in RCU read-side critical section? | 111 | * rcu_read_lock_held - might we be in RCU read-side critical section? |
108 | * | 112 | * |
109 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in | 113 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU |
110 | * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, | 114 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, |
111 | * this assumes we are in an RCU read-side critical section unless it can | 115 | * this assumes we are in an RCU read-side critical section unless it can |
112 | * prove otherwise. | 116 | * prove otherwise. |
113 | * | 117 | * |
114 | * Check rcu_scheduler_active to prevent false positives during boot. | 118 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot |
119 | * and while lockdep is disabled. | ||
115 | */ | 120 | */ |
116 | static inline int rcu_read_lock_held(void) | 121 | static inline int rcu_read_lock_held(void) |
117 | { | 122 | { |
@@ -129,13 +134,15 @@ extern int rcu_read_lock_bh_held(void); | |||
129 | /** | 134 | /** |
130 | * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? | 135 | * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? |
131 | * | 136 | * |
132 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in an | 137 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an |
133 | * RCU-sched read-side critical section. In absence of CONFIG_PROVE_LOCKING, | 138 | * RCU-sched read-side critical section. In absence of |
134 | * this assumes we are in an RCU-sched read-side critical section unless it | 139 | * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side |
135 | * can prove otherwise. Note that disabling of preemption (including | 140 | * critical section unless it can prove otherwise. Note that disabling |
136 | * disabling irqs) counts as an RCU-sched read-side critical section. | 141 | * of preemption (including disabling irqs) counts as an RCU-sched |
142 | * read-side critical section. | ||
137 | * | 143 | * |
138 | * Check rcu_scheduler_active to prevent false positives during boot. | 144 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot |
145 | * and while lockdep is disabled. | ||
139 | */ | 146 | */ |
140 | #ifdef CONFIG_PREEMPT | 147 | #ifdef CONFIG_PREEMPT |
141 | static inline int rcu_read_lock_sched_held(void) | 148 | static inline int rcu_read_lock_sched_held(void) |
@@ -177,7 +184,7 @@ static inline int rcu_read_lock_bh_held(void) | |||
177 | #ifdef CONFIG_PREEMPT | 184 | #ifdef CONFIG_PREEMPT |
178 | static inline int rcu_read_lock_sched_held(void) | 185 | static inline int rcu_read_lock_sched_held(void) |
179 | { | 186 | { |
180 | return !rcu_scheduler_active || preempt_count() != 0 || irqs_disabled(); | 187 | return preempt_count() != 0 || irqs_disabled(); |
181 | } | 188 | } |
182 | #else /* #ifdef CONFIG_PREEMPT */ | 189 | #else /* #ifdef CONFIG_PREEMPT */ |
183 | static inline int rcu_read_lock_sched_held(void) | 190 | static inline int rcu_read_lock_sched_held(void) |
@@ -190,6 +197,17 @@ static inline int rcu_read_lock_sched_held(void) | |||
190 | 197 | ||
191 | #ifdef CONFIG_PROVE_RCU | 198 | #ifdef CONFIG_PROVE_RCU |
192 | 199 | ||
200 | extern int rcu_my_thread_group_empty(void); | ||
201 | |||
202 | #define __do_rcu_dereference_check(c) \ | ||
203 | do { \ | ||
204 | static bool __warned; \ | ||
205 | if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ | ||
206 | __warned = true; \ | ||
207 | lockdep_rcu_dereference(__FILE__, __LINE__); \ | ||
208 | } \ | ||
209 | } while (0) | ||
210 | |||
193 | /** | 211 | /** |
194 | * rcu_dereference_check - rcu_dereference with debug checking | 212 | * rcu_dereference_check - rcu_dereference with debug checking |
195 | * @p: The pointer to read, prior to dereferencing | 213 | * @p: The pointer to read, prior to dereferencing |
@@ -219,8 +237,7 @@ static inline int rcu_read_lock_sched_held(void) | |||
219 | */ | 237 | */ |
220 | #define rcu_dereference_check(p, c) \ | 238 | #define rcu_dereference_check(p, c) \ |
221 | ({ \ | 239 | ({ \ |
222 | if (debug_lockdep_rcu_enabled() && !(c)) \ | 240 | __do_rcu_dereference_check(c); \ |
223 | lockdep_rcu_dereference(__FILE__, __LINE__); \ | ||
224 | rcu_dereference_raw(p); \ | 241 | rcu_dereference_raw(p); \ |
225 | }) | 242 | }) |
226 | 243 | ||
@@ -237,8 +254,7 @@ static inline int rcu_read_lock_sched_held(void) | |||
237 | */ | 254 | */ |
238 | #define rcu_dereference_protected(p, c) \ | 255 | #define rcu_dereference_protected(p, c) \ |
239 | ({ \ | 256 | ({ \ |
240 | if (debug_lockdep_rcu_enabled() && !(c)) \ | 257 | __do_rcu_dereference_check(c); \ |
241 | lockdep_rcu_dereference(__FILE__, __LINE__); \ | ||
242 | (p); \ | 258 | (p); \ |
243 | }) | 259 | }) |
244 | 260 | ||
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index a5195875480a..14e5a76b2c06 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
@@ -29,6 +29,10 @@ | |||
29 | 29 | ||
30 | void rcu_sched_qs(int cpu); | 30 | void rcu_sched_qs(int cpu); |
31 | void rcu_bh_qs(int cpu); | 31 | void rcu_bh_qs(int cpu); |
32 | static inline void rcu_note_context_switch(int cpu) | ||
33 | { | ||
34 | rcu_sched_qs(cpu); | ||
35 | } | ||
32 | 36 | ||
33 | #define __rcu_read_lock() preempt_disable() | 37 | #define __rcu_read_lock() preempt_disable() |
34 | #define __rcu_read_unlock() preempt_enable() | 38 | #define __rcu_read_unlock() preempt_enable() |
@@ -74,7 +78,17 @@ static inline void rcu_sched_force_quiescent_state(void) | |||
74 | { | 78 | { |
75 | } | 79 | } |
76 | 80 | ||
77 | #define synchronize_rcu synchronize_sched | 81 | extern void synchronize_sched(void); |
82 | |||
83 | static inline void synchronize_rcu(void) | ||
84 | { | ||
85 | synchronize_sched(); | ||
86 | } | ||
87 | |||
88 | static inline void synchronize_rcu_bh(void) | ||
89 | { | ||
90 | synchronize_sched(); | ||
91 | } | ||
78 | 92 | ||
79 | static inline void synchronize_rcu_expedited(void) | 93 | static inline void synchronize_rcu_expedited(void) |
80 | { | 94 | { |
@@ -114,4 +128,17 @@ static inline int rcu_preempt_depth(void) | |||
114 | return 0; | 128 | return 0; |
115 | } | 129 | } |
116 | 130 | ||
131 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
132 | |||
133 | extern int rcu_scheduler_active __read_mostly; | ||
134 | extern void rcu_scheduler_starting(void); | ||
135 | |||
136 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
137 | |||
138 | static inline void rcu_scheduler_starting(void) | ||
139 | { | ||
140 | } | ||
141 | |||
142 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
143 | |||
117 | #endif /* __LINUX_RCUTINY_H */ | 144 | #endif /* __LINUX_RCUTINY_H */ |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 42cc3a04779e..48282055e83d 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -34,6 +34,7 @@ struct notifier_block; | |||
34 | 34 | ||
35 | extern void rcu_sched_qs(int cpu); | 35 | extern void rcu_sched_qs(int cpu); |
36 | extern void rcu_bh_qs(int cpu); | 36 | extern void rcu_bh_qs(int cpu); |
37 | extern void rcu_note_context_switch(int cpu); | ||
37 | extern int rcu_needs_cpu(int cpu); | 38 | extern int rcu_needs_cpu(int cpu); |
38 | extern int rcu_expedited_torture_stats(char *page); | 39 | extern int rcu_expedited_torture_stats(char *page); |
39 | 40 | ||
@@ -86,6 +87,8 @@ static inline void __rcu_read_unlock_bh(void) | |||
86 | 87 | ||
87 | extern void call_rcu_sched(struct rcu_head *head, | 88 | extern void call_rcu_sched(struct rcu_head *head, |
88 | void (*func)(struct rcu_head *rcu)); | 89 | void (*func)(struct rcu_head *rcu)); |
90 | extern void synchronize_rcu_bh(void); | ||
91 | extern void synchronize_sched(void); | ||
89 | extern void synchronize_rcu_expedited(void); | 92 | extern void synchronize_rcu_expedited(void); |
90 | 93 | ||
91 | static inline void synchronize_rcu_bh_expedited(void) | 94 | static inline void synchronize_rcu_bh_expedited(void) |
@@ -120,4 +123,7 @@ static inline int rcu_blocking_is_gp(void) | |||
120 | return num_online_cpus() == 1; | 123 | return num_online_cpus() == 1; |
121 | } | 124 | } |
122 | 125 | ||
126 | extern void rcu_scheduler_starting(void); | ||
127 | extern int rcu_scheduler_active __read_mostly; | ||
128 | |||
123 | #endif /* __LINUX_RCUTREE_H */ | 129 | #endif /* __LINUX_RCUTREE_H */ |
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 5fcc31ed5771..c8297761e414 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h | |||
@@ -120,9 +120,11 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
120 | unsigned long length, void *data); | 120 | unsigned long length, void *data); |
121 | 121 | ||
122 | struct ring_buffer_event * | 122 | struct ring_buffer_event * |
123 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts); | 123 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, |
124 | unsigned long *lost_events); | ||
124 | struct ring_buffer_event * | 125 | struct ring_buffer_event * |
125 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts); | 126 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, |
127 | unsigned long *lost_events); | ||
126 | 128 | ||
127 | struct ring_buffer_iter * | 129 | struct ring_buffer_iter * |
128 | ring_buffer_read_start(struct ring_buffer *buffer, int cpu); | 130 | ring_buffer_read_start(struct ring_buffer *buffer, int cpu); |
diff --git a/include/linux/sched.h b/include/linux/sched.h index e0447c64af6a..28b71ee133f0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1490,7 +1490,6 @@ struct task_struct { | |||
1490 | /* bitmask of trace recursion */ | 1490 | /* bitmask of trace recursion */ |
1491 | unsigned long trace_recursion; | 1491 | unsigned long trace_recursion; |
1492 | #endif /* CONFIG_TRACING */ | 1492 | #endif /* CONFIG_TRACING */ |
1493 | unsigned long stack_start; | ||
1494 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ | 1493 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ |
1495 | struct memcg_batch_info { | 1494 | struct memcg_batch_info { |
1496 | int do_batch; /* incremented when batch uncharge started */ | 1495 | int do_batch; /* incremented when batch uncharge started */ |
diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 4d5ecb222af9..4d5d2f546dbf 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h | |||
@@ -27,6 +27,8 @@ | |||
27 | #ifndef _LINUX_SRCU_H | 27 | #ifndef _LINUX_SRCU_H |
28 | #define _LINUX_SRCU_H | 28 | #define _LINUX_SRCU_H |
29 | 29 | ||
30 | #include <linux/mutex.h> | ||
31 | |||
30 | struct srcu_struct_array { | 32 | struct srcu_struct_array { |
31 | int c[2]; | 33 | int c[2]; |
32 | }; | 34 | }; |
@@ -84,8 +86,8 @@ long srcu_batches_completed(struct srcu_struct *sp); | |||
84 | /** | 86 | /** |
85 | * srcu_read_lock_held - might we be in SRCU read-side critical section? | 87 | * srcu_read_lock_held - might we be in SRCU read-side critical section? |
86 | * | 88 | * |
87 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in | 89 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU |
88 | * an SRCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, | 90 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, |
89 | * this assumes we are in an SRCU read-side critical section unless it can | 91 | * this assumes we are in an SRCU read-side critical section unless it can |
90 | * prove otherwise. | 92 | * prove otherwise. |
91 | */ | 93 | */ |
diff --git a/include/linux/types.h b/include/linux/types.h index c42724f8c802..23d237a075e2 100644 --- a/include/linux/types.h +++ b/include/linux/types.h | |||
@@ -188,12 +188,12 @@ typedef u32 phys_addr_t; | |||
188 | typedef phys_addr_t resource_size_t; | 188 | typedef phys_addr_t resource_size_t; |
189 | 189 | ||
190 | typedef struct { | 190 | typedef struct { |
191 | volatile int counter; | 191 | int counter; |
192 | } atomic_t; | 192 | } atomic_t; |
193 | 193 | ||
194 | #ifdef CONFIG_64BIT | 194 | #ifdef CONFIG_64BIT |
195 | typedef struct { | 195 | typedef struct { |
196 | volatile long counter; | 196 | long counter; |
197 | } atomic64_t; | 197 | } atomic64_t; |
198 | #endif | 198 | #endif |
199 | 199 | ||
diff --git a/include/linux/zorro.h b/include/linux/zorro.h index 913bfc226dda..7bf9db525e9e 100644 --- a/include/linux/zorro.h +++ b/include/linux/zorro.h | |||
@@ -38,8 +38,6 @@ | |||
38 | typedef __u32 zorro_id; | 38 | typedef __u32 zorro_id; |
39 | 39 | ||
40 | 40 | ||
41 | #define ZORRO_WILDCARD (0xffffffff) /* not official */ | ||
42 | |||
43 | /* Include the ID list */ | 41 | /* Include the ID list */ |
44 | #include <linux/zorro_ids.h> | 42 | #include <linux/zorro_ids.h> |
45 | 43 | ||
@@ -116,6 +114,7 @@ struct ConfigDev { | |||
116 | 114 | ||
117 | #include <linux/init.h> | 115 | #include <linux/init.h> |
118 | #include <linux/ioport.h> | 116 | #include <linux/ioport.h> |
117 | #include <linux/mod_devicetable.h> | ||
119 | 118 | ||
120 | #include <asm/zorro.h> | 119 | #include <asm/zorro.h> |
121 | 120 | ||
@@ -142,29 +141,10 @@ struct zorro_dev { | |||
142 | * Zorro bus | 141 | * Zorro bus |
143 | */ | 142 | */ |
144 | 143 | ||
145 | struct zorro_bus { | ||
146 | struct list_head devices; /* list of devices on this bus */ | ||
147 | unsigned int num_resources; /* number of resources */ | ||
148 | struct resource resources[4]; /* address space routed to this bus */ | ||
149 | struct device dev; | ||
150 | char name[10]; | ||
151 | }; | ||
152 | |||
153 | extern struct zorro_bus zorro_bus; /* single Zorro bus */ | ||
154 | extern struct bus_type zorro_bus_type; | 144 | extern struct bus_type zorro_bus_type; |
155 | 145 | ||
156 | 146 | ||
157 | /* | 147 | /* |
158 | * Zorro device IDs | ||
159 | */ | ||
160 | |||
161 | struct zorro_device_id { | ||
162 | zorro_id id; /* Device ID or ZORRO_WILDCARD */ | ||
163 | unsigned long driver_data; /* Data private to the driver */ | ||
164 | }; | ||
165 | |||
166 | |||
167 | /* | ||
168 | * Zorro device drivers | 148 | * Zorro device drivers |
169 | */ | 149 | */ |
170 | 150 | ||
diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h index b9da1f5591e7..4aeff96ff7d8 100644 --- a/include/media/saa7146_vv.h +++ b/include/media/saa7146_vv.h | |||
@@ -188,7 +188,6 @@ void saa7146_buffer_timeout(unsigned long data); | |||
188 | void saa7146_dma_free(struct saa7146_dev* dev,struct videobuf_queue *q, | 188 | void saa7146_dma_free(struct saa7146_dev* dev,struct videobuf_queue *q, |
189 | struct saa7146_buf *buf); | 189 | struct saa7146_buf *buf); |
190 | 190 | ||
191 | int saa7146_vv_devinit(struct saa7146_dev *dev); | ||
192 | int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv); | 191 | int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv); |
193 | int saa7146_vv_release(struct saa7146_dev* dev); | 192 | int saa7146_vv_release(struct saa7146_dev* dev); |
194 | 193 | ||
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 851c813adb3a..61d73e37d543 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h | |||
@@ -279,6 +279,7 @@ int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype, | |||
279 | /* 2nd level prototypes */ | 279 | /* 2nd level prototypes */ |
280 | void sctp_generate_t3_rtx_event(unsigned long peer); | 280 | void sctp_generate_t3_rtx_event(unsigned long peer); |
281 | void sctp_generate_heartbeat_event(unsigned long peer); | 281 | void sctp_generate_heartbeat_event(unsigned long peer); |
282 | void sctp_generate_proto_unreach_event(unsigned long peer); | ||
282 | 283 | ||
283 | void sctp_ootb_pkt_free(struct sctp_packet *); | 284 | void sctp_ootb_pkt_free(struct sctp_packet *); |
284 | 285 | ||
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 597f8e27aaf6..219043a67bf7 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
@@ -1010,6 +1010,9 @@ struct sctp_transport { | |||
1010 | /* Heartbeat timer is per destination. */ | 1010 | /* Heartbeat timer is per destination. */ |
1011 | struct timer_list hb_timer; | 1011 | struct timer_list hb_timer; |
1012 | 1012 | ||
1013 | /* Timer to handle ICMP proto unreachable envets */ | ||
1014 | struct timer_list proto_unreach_timer; | ||
1015 | |||
1013 | /* Since we're using per-destination retransmission timers | 1016 | /* Since we're using per-destination retransmission timers |
1014 | * (see above), we're also using per-destination "transmitted" | 1017 | * (see above), we're also using per-destination "transmitted" |
1015 | * queues. This probably ought to be a private struct | 1018 | * queues. This probably ought to be a private struct |
diff --git a/include/net/tcp.h b/include/net/tcp.h index 75be5a28815d..aa04b9a5093b 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -1197,30 +1197,15 @@ extern int tcp_v4_md5_do_del(struct sock *sk, | |||
1197 | extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *); | 1197 | extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *); |
1198 | extern void tcp_free_md5sig_pool(void); | 1198 | extern void tcp_free_md5sig_pool(void); |
1199 | 1199 | ||
1200 | extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu); | 1200 | extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void); |
1201 | extern void __tcp_put_md5sig_pool(void); | 1201 | extern void tcp_put_md5sig_pool(void); |
1202 | |||
1202 | extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *); | 1203 | extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *); |
1203 | extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *, | 1204 | extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *, |
1204 | unsigned header_len); | 1205 | unsigned header_len); |
1205 | extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, | 1206 | extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, |
1206 | struct tcp_md5sig_key *key); | 1207 | struct tcp_md5sig_key *key); |
1207 | 1208 | ||
1208 | static inline | ||
1209 | struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) | ||
1210 | { | ||
1211 | int cpu = get_cpu(); | ||
1212 | struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu); | ||
1213 | if (!ret) | ||
1214 | put_cpu(); | ||
1215 | return ret; | ||
1216 | } | ||
1217 | |||
1218 | static inline void tcp_put_md5sig_pool(void) | ||
1219 | { | ||
1220 | __tcp_put_md5sig_pool(); | ||
1221 | put_cpu(); | ||
1222 | } | ||
1223 | |||
1224 | /* write queue abstraction */ | 1209 | /* write queue abstraction */ |
1225 | static inline void tcp_write_queue_purge(struct sock *sk) | 1210 | static inline void tcp_write_queue_purge(struct sock *sk) |
1226 | { | 1211 | { |
diff --git a/include/trace/events/module.h b/include/trace/events/module.h index 4b0f48ba16a6..c7bb2f0482fe 100644 --- a/include/trace/events/module.h +++ b/include/trace/events/module.h | |||
@@ -51,11 +51,14 @@ TRACE_EVENT(module_free, | |||
51 | TP_printk("%s", __get_str(name)) | 51 | TP_printk("%s", __get_str(name)) |
52 | ); | 52 | ); |
53 | 53 | ||
54 | #ifdef CONFIG_MODULE_UNLOAD | ||
55 | /* trace_module_get/put are only used if CONFIG_MODULE_UNLOAD is defined */ | ||
56 | |||
54 | DECLARE_EVENT_CLASS(module_refcnt, | 57 | DECLARE_EVENT_CLASS(module_refcnt, |
55 | 58 | ||
56 | TP_PROTO(struct module *mod, unsigned long ip, int refcnt), | 59 | TP_PROTO(struct module *mod, unsigned long ip), |
57 | 60 | ||
58 | TP_ARGS(mod, ip, refcnt), | 61 | TP_ARGS(mod, ip), |
59 | 62 | ||
60 | TP_STRUCT__entry( | 63 | TP_STRUCT__entry( |
61 | __field( unsigned long, ip ) | 64 | __field( unsigned long, ip ) |
@@ -65,7 +68,7 @@ DECLARE_EVENT_CLASS(module_refcnt, | |||
65 | 68 | ||
66 | TP_fast_assign( | 69 | TP_fast_assign( |
67 | __entry->ip = ip; | 70 | __entry->ip = ip; |
68 | __entry->refcnt = refcnt; | 71 | __entry->refcnt = __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs); |
69 | __assign_str(name, mod->name); | 72 | __assign_str(name, mod->name); |
70 | ), | 73 | ), |
71 | 74 | ||
@@ -75,17 +78,18 @@ DECLARE_EVENT_CLASS(module_refcnt, | |||
75 | 78 | ||
76 | DEFINE_EVENT(module_refcnt, module_get, | 79 | DEFINE_EVENT(module_refcnt, module_get, |
77 | 80 | ||
78 | TP_PROTO(struct module *mod, unsigned long ip, int refcnt), | 81 | TP_PROTO(struct module *mod, unsigned long ip), |
79 | 82 | ||
80 | TP_ARGS(mod, ip, refcnt) | 83 | TP_ARGS(mod, ip) |
81 | ); | 84 | ); |
82 | 85 | ||
83 | DEFINE_EVENT(module_refcnt, module_put, | 86 | DEFINE_EVENT(module_refcnt, module_put, |
84 | 87 | ||
85 | TP_PROTO(struct module *mod, unsigned long ip, int refcnt), | 88 | TP_PROTO(struct module *mod, unsigned long ip), |
86 | 89 | ||
87 | TP_ARGS(mod, ip, refcnt) | 90 | TP_ARGS(mod, ip) |
88 | ); | 91 | ); |
92 | #endif /* CONFIG_MODULE_UNLOAD */ | ||
89 | 93 | ||
90 | TRACE_EVENT(module_request, | 94 | TRACE_EVENT(module_request, |
91 | 95 | ||
diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h index a510b75ac304..814566c99d29 100644 --- a/include/trace/events/signal.h +++ b/include/trace/events/signal.h | |||
@@ -100,18 +100,7 @@ TRACE_EVENT(signal_deliver, | |||
100 | __entry->sa_handler, __entry->sa_flags) | 100 | __entry->sa_handler, __entry->sa_flags) |
101 | ); | 101 | ); |
102 | 102 | ||
103 | /** | 103 | DECLARE_EVENT_CLASS(signal_queue_overflow, |
104 | * signal_overflow_fail - called when signal queue is overflow | ||
105 | * @sig: signal number | ||
106 | * @group: signal to process group or not (bool) | ||
107 | * @info: pointer to struct siginfo | ||
108 | * | ||
109 | * Kernel fails to generate 'sig' signal with 'info' siginfo, because | ||
110 | * siginfo queue is overflow, and the signal is dropped. | ||
111 | * 'group' is not 0 if the signal will be sent to a process group. | ||
112 | * 'sig' is always one of RT signals. | ||
113 | */ | ||
114 | TRACE_EVENT(signal_overflow_fail, | ||
115 | 104 | ||
116 | TP_PROTO(int sig, int group, struct siginfo *info), | 105 | TP_PROTO(int sig, int group, struct siginfo *info), |
117 | 106 | ||
@@ -135,6 +124,24 @@ TRACE_EVENT(signal_overflow_fail, | |||
135 | ); | 124 | ); |
136 | 125 | ||
137 | /** | 126 | /** |
127 | * signal_overflow_fail - called when signal queue is overflow | ||
128 | * @sig: signal number | ||
129 | * @group: signal to process group or not (bool) | ||
130 | * @info: pointer to struct siginfo | ||
131 | * | ||
132 | * Kernel fails to generate 'sig' signal with 'info' siginfo, because | ||
133 | * siginfo queue is overflow, and the signal is dropped. | ||
134 | * 'group' is not 0 if the signal will be sent to a process group. | ||
135 | * 'sig' is always one of RT signals. | ||
136 | */ | ||
137 | DEFINE_EVENT(signal_queue_overflow, signal_overflow_fail, | ||
138 | |||
139 | TP_PROTO(int sig, int group, struct siginfo *info), | ||
140 | |||
141 | TP_ARGS(sig, group, info) | ||
142 | ); | ||
143 | |||
144 | /** | ||
138 | * signal_lose_info - called when siginfo is lost | 145 | * signal_lose_info - called when siginfo is lost |
139 | * @sig: signal number | 146 | * @sig: signal number |
140 | * @group: signal to process group or not (bool) | 147 | * @group: signal to process group or not (bool) |
@@ -145,28 +152,13 @@ TRACE_EVENT(signal_overflow_fail, | |||
145 | * 'group' is not 0 if the signal will be sent to a process group. | 152 | * 'group' is not 0 if the signal will be sent to a process group. |
146 | * 'sig' is always one of non-RT signals. | 153 | * 'sig' is always one of non-RT signals. |
147 | */ | 154 | */ |
148 | TRACE_EVENT(signal_lose_info, | 155 | DEFINE_EVENT(signal_queue_overflow, signal_lose_info, |
149 | 156 | ||
150 | TP_PROTO(int sig, int group, struct siginfo *info), | 157 | TP_PROTO(int sig, int group, struct siginfo *info), |
151 | 158 | ||
152 | TP_ARGS(sig, group, info), | 159 | TP_ARGS(sig, group, info) |
153 | |||
154 | TP_STRUCT__entry( | ||
155 | __field( int, sig ) | ||
156 | __field( int, group ) | ||
157 | __field( int, errno ) | ||
158 | __field( int, code ) | ||
159 | ), | ||
160 | |||
161 | TP_fast_assign( | ||
162 | __entry->sig = sig; | ||
163 | __entry->group = group; | ||
164 | TP_STORE_SIGINFO(__entry, info); | ||
165 | ), | ||
166 | |||
167 | TP_printk("sig=%d group=%d errno=%d code=%d", | ||
168 | __entry->sig, __entry->group, __entry->errno, __entry->code) | ||
169 | ); | 160 | ); |
161 | |||
170 | #endif /* _TRACE_SIGNAL_H */ | 162 | #endif /* _TRACE_SIGNAL_H */ |
171 | 163 | ||
172 | /* This part must be outside protection */ | 164 | /* This part must be outside protection */ |
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 882c64832ffe..16253db38d73 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -154,9 +154,11 @@ | |||
154 | * | 154 | * |
155 | * field = (typeof(field))entry; | 155 | * field = (typeof(field))entry; |
156 | * | 156 | * |
157 | * p = get_cpu_var(ftrace_event_seq); | 157 | * p = &get_cpu_var(ftrace_event_seq); |
158 | * trace_seq_init(p); | 158 | * trace_seq_init(p); |
159 | * ret = trace_seq_printf(s, <TP_printk> "\n"); | 159 | * ret = trace_seq_printf(s, "%s: ", <call>); |
160 | * if (ret) | ||
161 | * ret = trace_seq_printf(s, <TP_printk> "\n"); | ||
160 | * put_cpu(); | 162 | * put_cpu(); |
161 | * if (!ret) | 163 | * if (!ret) |
162 | * return TRACE_TYPE_PARTIAL_LINE; | 164 | * return TRACE_TYPE_PARTIAL_LINE; |
@@ -450,38 +452,38 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ | |||
450 | * | 452 | * |
451 | * static void ftrace_raw_event_<call>(proto) | 453 | * static void ftrace_raw_event_<call>(proto) |
452 | * { | 454 | * { |
455 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; | ||
453 | * struct ring_buffer_event *event; | 456 | * struct ring_buffer_event *event; |
454 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | 457 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 |
455 | * struct ring_buffer *buffer; | 458 | * struct ring_buffer *buffer; |
456 | * unsigned long irq_flags; | 459 | * unsigned long irq_flags; |
460 | * int __data_size; | ||
457 | * int pc; | 461 | * int pc; |
458 | * | 462 | * |
459 | * local_save_flags(irq_flags); | 463 | * local_save_flags(irq_flags); |
460 | * pc = preempt_count(); | 464 | * pc = preempt_count(); |
461 | * | 465 | * |
466 | * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); | ||
467 | * | ||
462 | * event = trace_current_buffer_lock_reserve(&buffer, | 468 | * event = trace_current_buffer_lock_reserve(&buffer, |
463 | * event_<call>.id, | 469 | * event_<call>.id, |
464 | * sizeof(struct ftrace_raw_<call>), | 470 | * sizeof(*entry) + __data_size, |
465 | * irq_flags, pc); | 471 | * irq_flags, pc); |
466 | * if (!event) | 472 | * if (!event) |
467 | * return; | 473 | * return; |
468 | * entry = ring_buffer_event_data(event); | 474 | * entry = ring_buffer_event_data(event); |
469 | * | 475 | * |
470 | * <assign>; <-- Here we assign the entries by the __field and | 476 | * { <assign>; } <-- Here we assign the entries by the __field and |
471 | * __array macros. | 477 | * __array macros. |
472 | * | 478 | * |
473 | * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc); | 479 | * if (!filter_current_check_discard(buffer, event_call, entry, event)) |
480 | * trace_current_buffer_unlock_commit(buffer, | ||
481 | * event, irq_flags, pc); | ||
474 | * } | 482 | * } |
475 | * | 483 | * |
476 | * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused) | 484 | * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused) |
477 | * { | 485 | * { |
478 | * int ret; | 486 | * return register_trace_<call>(ftrace_raw_event_<call>); |
479 | * | ||
480 | * ret = register_trace_<call>(ftrace_raw_event_<call>); | ||
481 | * if (!ret) | ||
482 | * pr_info("event trace: Could not activate trace point " | ||
483 | * "probe to <call>"); | ||
484 | * return ret; | ||
485 | * } | 487 | * } |
486 | * | 488 | * |
487 | * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) | 489 | * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) |
@@ -493,6 +495,8 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ | |||
493 | * .trace = ftrace_raw_output_<call>, <-- stage 2 | 495 | * .trace = ftrace_raw_output_<call>, <-- stage 2 |
494 | * }; | 496 | * }; |
495 | * | 497 | * |
498 | * static const char print_fmt_<call>[] = <TP_printk>; | ||
499 | * | ||
496 | * static struct ftrace_event_call __used | 500 | * static struct ftrace_event_call __used |
497 | * __attribute__((__aligned__(4))) | 501 | * __attribute__((__aligned__(4))) |
498 | * __attribute__((section("_ftrace_events"))) event_<call> = { | 502 | * __attribute__((section("_ftrace_events"))) event_<call> = { |
@@ -501,6 +505,8 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ | |||
501 | * .raw_init = trace_event_raw_init, | 505 | * .raw_init = trace_event_raw_init, |
502 | * .regfunc = ftrace_reg_event_<call>, | 506 | * .regfunc = ftrace_reg_event_<call>, |
503 | * .unregfunc = ftrace_unreg_event_<call>, | 507 | * .unregfunc = ftrace_unreg_event_<call>, |
508 | * .print_fmt = print_fmt_<call>, | ||
509 | * .define_fields = ftrace_define_fields_<call>, | ||
504 | * } | 510 | * } |
505 | * | 511 | * |
506 | */ | 512 | */ |
@@ -569,7 +575,6 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ | |||
569 | return; \ | 575 | return; \ |
570 | entry = ring_buffer_event_data(event); \ | 576 | entry = ring_buffer_event_data(event); \ |
571 | \ | 577 | \ |
572 | \ | ||
573 | tstruct \ | 578 | tstruct \ |
574 | \ | 579 | \ |
575 | { assign; } \ | 580 | { assign; } \ |