aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/blkdev.h10
-rw-r--r--include/linux/bootmem.h2
-rw-r--r--include/linux/ceph/libceph.h6
-rw-r--r--include/linux/cnt32_to_63.h20
-rw-r--r--include/linux/kthread.h45
-rw-r--r--include/linux/netlink.h2
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/taskstats.h3
-rw-r--r--include/linux/unaligned/packed_struct.h6
10 files changed, 75 insertions, 22 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index aae86fd10c4f..36ab42c9bb99 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -250,7 +250,7 @@ struct queue_limits {
250 250
251 unsigned char misaligned; 251 unsigned char misaligned;
252 unsigned char discard_misaligned; 252 unsigned char discard_misaligned;
253 unsigned char no_cluster; 253 unsigned char cluster;
254 signed char discard_zeroes_data; 254 signed char discard_zeroes_data;
255}; 255};
256 256
@@ -380,7 +380,6 @@ struct request_queue
380#endif 380#endif
381}; 381};
382 382
383#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
384#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 383#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
385#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 384#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
386#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 385#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
@@ -403,7 +402,6 @@ struct request_queue
403#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ 402#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */
404 403
405#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 404#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
406 (1 << QUEUE_FLAG_CLUSTER) | \
407 (1 << QUEUE_FLAG_STACKABLE) | \ 405 (1 << QUEUE_FLAG_STACKABLE) | \
408 (1 << QUEUE_FLAG_SAME_COMP) | \ 406 (1 << QUEUE_FLAG_SAME_COMP) | \
409 (1 << QUEUE_FLAG_ADD_RANDOM)) 407 (1 << QUEUE_FLAG_ADD_RANDOM))
@@ -510,6 +508,11 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
510 508
511#define rq_data_dir(rq) ((rq)->cmd_flags & 1) 509#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
512 510
511static inline unsigned int blk_queue_cluster(struct request_queue *q)
512{
513 return q->limits.cluster;
514}
515
513/* 516/*
514 * We regard a request as sync, if either a read or a sync write 517 * We regard a request as sync, if either a read or a sync write
515 */ 518 */
@@ -805,6 +808,7 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
805extern void blk_cleanup_queue(struct request_queue *); 808extern void blk_cleanup_queue(struct request_queue *);
806extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 809extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
807extern void blk_queue_bounce_limit(struct request_queue *, u64); 810extern void blk_queue_bounce_limit(struct request_queue *, u64);
811extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
808extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 812extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
809extern void blk_queue_max_segments(struct request_queue *, unsigned short); 813extern void blk_queue_max_segments(struct request_queue *, unsigned short);
810extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 814extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 266ab9291232..499dfe982a0e 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -105,6 +105,8 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
105 105
106#define alloc_bootmem(x) \ 106#define alloc_bootmem(x) \
107 __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 107 __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
108#define alloc_bootmem_align(x, align) \
109 __alloc_bootmem(x, align, __pa(MAX_DMA_ADDRESS))
108#define alloc_bootmem_nopanic(x) \ 110#define alloc_bootmem_nopanic(x) \
109 __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 111 __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
110#define alloc_bootmem_pages(x) \ 112#define alloc_bootmem_pages(x) \
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 9e76d35670d2..72c72bfccb88 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -227,8 +227,10 @@ extern int ceph_open_session(struct ceph_client *client);
227extern void ceph_release_page_vector(struct page **pages, int num_pages); 227extern void ceph_release_page_vector(struct page **pages, int num_pages);
228 228
229extern struct page **ceph_get_direct_page_vector(const char __user *data, 229extern struct page **ceph_get_direct_page_vector(const char __user *data,
230 int num_pages); 230 int num_pages,
231extern void ceph_put_page_vector(struct page **pages, int num_pages); 231 bool write_page);
232extern void ceph_put_page_vector(struct page **pages, int num_pages,
233 bool dirty);
232extern void ceph_release_page_vector(struct page **pages, int num_pages); 234extern void ceph_release_page_vector(struct page **pages, int num_pages);
233extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags); 235extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
234extern int ceph_copy_user_to_page_vector(struct page **pages, 236extern int ceph_copy_user_to_page_vector(struct page **pages,
diff --git a/include/linux/cnt32_to_63.h b/include/linux/cnt32_to_63.h
index 7605fdd1eb65..e3d8bf26e5eb 100644
--- a/include/linux/cnt32_to_63.h
+++ b/include/linux/cnt32_to_63.h
@@ -61,13 +61,31 @@ union cnt32_to_63 {
61 * 61 *
62 * 2) this code must not be preempted for a duration longer than the 62 * 2) this code must not be preempted for a duration longer than the
63 * 32-bit counter half period minus the longest period between two 63 * 32-bit counter half period minus the longest period between two
64 * calls to this code. 64 * calls to this code;
65 * 65 *
66 * Those requirements ensure proper update to the state bit in memory. 66 * Those requirements ensure proper update to the state bit in memory.
67 * This is usually not a problem in practice, but if it is then a kernel 67 * This is usually not a problem in practice, but if it is then a kernel
68 * timer should be scheduled to manage for this code to be executed often 68 * timer should be scheduled to manage for this code to be executed often
69 * enough. 69 * enough.
70 * 70 *
71 * And finally:
72 *
73 * 3) the cnt_lo argument must be seen as a globally incrementing value,
74 * meaning that it should be a direct reference to the counter data which
75 * can be evaluated according to a specific ordering within the macro,
76 * and not the result of a previous evaluation stored in a variable.
77 *
78 * For example, this is wrong:
79 *
80 * u32 partial = get_hw_count();
81 * u64 full = cnt32_to_63(partial);
82 * return full;
83 *
84 * This is fine:
85 *
86 * u64 full = cnt32_to_63(get_hw_count());
87 * return full;
88 *
71 * Note that the top bit (bit 63) in the returned value should be considered 89 * Note that the top bit (bit 63) in the returned value should be considered
72 * as garbage. It is not cleared here because callers are likely to use a 90 * as garbage. It is not cleared here because callers are likely to use a
73 * multiplier on the returned value which can get rid of the top bit 91 * multiplier on the returned value which can get rid of the top bit
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 685ea65eb803..ce0775aa64c3 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -81,16 +81,41 @@ struct kthread_work {
81#define DEFINE_KTHREAD_WORK(work, fn) \ 81#define DEFINE_KTHREAD_WORK(work, fn) \
82 struct kthread_work work = KTHREAD_WORK_INIT(work, fn) 82 struct kthread_work work = KTHREAD_WORK_INIT(work, fn)
83 83
84static inline void init_kthread_worker(struct kthread_worker *worker) 84/*
85{ 85 * kthread_worker.lock and kthread_work.done need their own lockdep class
86 *worker = (struct kthread_worker)KTHREAD_WORKER_INIT(*worker); 86 * keys if they are defined on stack with lockdep enabled. Use the
87} 87 * following macros when defining them on stack.
88 88 */
89static inline void init_kthread_work(struct kthread_work *work, 89#ifdef CONFIG_LOCKDEP
90 kthread_work_func_t fn) 90# define KTHREAD_WORKER_INIT_ONSTACK(worker) \
91{ 91 ({ init_kthread_worker(&worker); worker; })
92 *work = (struct kthread_work)KTHREAD_WORK_INIT(*work, fn); 92# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \
93} 93 struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker)
94# define KTHREAD_WORK_INIT_ONSTACK(work, fn) \
95 ({ init_kthread_work((&work), fn); work; })
96# define DEFINE_KTHREAD_WORK_ONSTACK(work, fn) \
97 struct kthread_work work = KTHREAD_WORK_INIT_ONSTACK(work, fn)
98#else
99# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker)
100# define DEFINE_KTHREAD_WORK_ONSTACK(work, fn) DEFINE_KTHREAD_WORK(work, fn)
101#endif
102
103extern void __init_kthread_worker(struct kthread_worker *worker,
104 const char *name, struct lock_class_key *key);
105
106#define init_kthread_worker(worker) \
107 do { \
108 static struct lock_class_key __key; \
109 __init_kthread_worker((worker), "("#worker")->lock", &__key); \
110 } while (0)
111
112#define init_kthread_work(work, fn) \
113 do { \
114 memset((work), 0, sizeof(struct kthread_work)); \
115 INIT_LIST_HEAD(&(work)->node); \
116 (work)->func = (fn); \
117 init_waitqueue_head(&(work)->done); \
118 } while (0)
94 119
95int kthread_worker_fn(void *worker_ptr); 120int kthread_worker_fn(void *worker_ptr);
96 121
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 123566912d73..e2b9e63afa68 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -70,7 +70,7 @@ struct nlmsghdr {
70 Check NLM_F_EXCL 70 Check NLM_F_EXCL
71 */ 71 */
72 72
73#define NLMSG_ALIGNTO 4 73#define NLMSG_ALIGNTO 4U
74#define NLMSG_ALIGN(len) ( ((len)+NLMSG_ALIGNTO-1) & ~(NLMSG_ALIGNTO-1) ) 74#define NLMSG_ALIGN(len) ( ((len)+NLMSG_ALIGNTO-1) & ~(NLMSG_ALIGNTO-1) )
75#define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr))) 75#define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr)))
76#define NLMSG_LENGTH(len) ((len)+NLMSG_ALIGN(NLMSG_HDRLEN)) 76#define NLMSG_LENGTH(len) ((len)+NLMSG_ALIGN(NLMSG_HDRLEN))
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index de2c41758e29..4f1279e105ee 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -887,6 +887,7 @@ struct perf_cpu_context {
887 int exclusive; 887 int exclusive;
888 struct list_head rotation_list; 888 struct list_head rotation_list;
889 int jiffies_interval; 889 int jiffies_interval;
890 struct pmu *active_pmu;
890}; 891};
891 892
892struct perf_output_handle { 893struct perf_output_handle {
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2c79e921a68b..223874538b33 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -143,7 +143,7 @@ extern unsigned long nr_iowait_cpu(int cpu);
143extern unsigned long this_cpu_load(void); 143extern unsigned long this_cpu_load(void);
144 144
145 145
146extern void calc_global_load(void); 146extern void calc_global_load(unsigned long ticks);
147 147
148extern unsigned long get_parent_ip(unsigned long addr); 148extern unsigned long get_parent_ip(unsigned long addr);
149 149
diff --git a/include/linux/taskstats.h b/include/linux/taskstats.h
index 341dddb55090..2466e550a41d 100644
--- a/include/linux/taskstats.h
+++ b/include/linux/taskstats.h
@@ -33,7 +33,7 @@
33 */ 33 */
34 34
35 35
36#define TASKSTATS_VERSION 7 36#define TASKSTATS_VERSION 8
37#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN 37#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN
38 * in linux/sched.h */ 38 * in linux/sched.h */
39 39
@@ -188,6 +188,7 @@ enum {
188 TASKSTATS_TYPE_STATS, /* taskstats structure */ 188 TASKSTATS_TYPE_STATS, /* taskstats structure */
189 TASKSTATS_TYPE_AGGR_PID, /* contains pid + stats */ 189 TASKSTATS_TYPE_AGGR_PID, /* contains pid + stats */
190 TASKSTATS_TYPE_AGGR_TGID, /* contains tgid + stats */ 190 TASKSTATS_TYPE_AGGR_TGID, /* contains tgid + stats */
191 TASKSTATS_TYPE_NULL, /* contains nothing */
191 __TASKSTATS_TYPE_MAX, 192 __TASKSTATS_TYPE_MAX,
192}; 193};
193 194
diff --git a/include/linux/unaligned/packed_struct.h b/include/linux/unaligned/packed_struct.h
index 2498bb9fe002..c9a6abd972a1 100644
--- a/include/linux/unaligned/packed_struct.h
+++ b/include/linux/unaligned/packed_struct.h
@@ -3,9 +3,9 @@
3 3
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5 5
6struct __una_u16 { u16 x __attribute__((packed)); }; 6struct __una_u16 { u16 x; } __attribute__((packed));
7struct __una_u32 { u32 x __attribute__((packed)); }; 7struct __una_u32 { u32 x; } __attribute__((packed));
8struct __una_u64 { u64 x __attribute__((packed)); }; 8struct __una_u64 { u64 x; } __attribute__((packed));
9 9
10static inline u16 __get_unaligned_cpu16(const void *p) 10static inline u16 __get_unaligned_cpu16(const void *p)
11{ 11{