diff options
Diffstat (limited to 'include/linux')
60 files changed, 880 insertions, 229 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index e531783e5d78..95ac82340c3b 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild | |||
@@ -313,6 +313,7 @@ unifdef-y += ptrace.h | |||
313 | unifdef-y += qnx4_fs.h | 313 | unifdef-y += qnx4_fs.h |
314 | unifdef-y += quota.h | 314 | unifdef-y += quota.h |
315 | unifdef-y += random.h | 315 | unifdef-y += random.h |
316 | unifdef-y += irqnr.h | ||
316 | unifdef-y += reboot.h | 317 | unifdef-y += reboot.h |
317 | unifdef-y += reiserfs_fs.h | 318 | unifdef-y += reiserfs_fs.h |
318 | unifdef-y += reiserfs_xattr.h | 319 | unifdef-y += reiserfs_xattr.h |
diff --git a/include/linux/aio.h b/include/linux/aio.h index f6b8cf99b596..b16a957030f8 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/workqueue.h> | 5 | #include <linux/workqueue.h> |
6 | #include <linux/aio_abi.h> | 6 | #include <linux/aio_abi.h> |
7 | #include <linux/uio.h> | 7 | #include <linux/uio.h> |
8 | #include <linux/rcupdate.h> | ||
8 | 9 | ||
9 | #include <asm/atomic.h> | 10 | #include <asm/atomic.h> |
10 | 11 | ||
@@ -183,7 +184,7 @@ struct kioctx { | |||
183 | 184 | ||
184 | /* This needs improving */ | 185 | /* This needs improving */ |
185 | unsigned long user_id; | 186 | unsigned long user_id; |
186 | struct kioctx *next; | 187 | struct hlist_node list; |
187 | 188 | ||
188 | wait_queue_head_t wait; | 189 | wait_queue_head_t wait; |
189 | 190 | ||
@@ -199,6 +200,8 @@ struct kioctx { | |||
199 | struct aio_ring_info ring_info; | 200 | struct aio_ring_info ring_info; |
200 | 201 | ||
201 | struct delayed_work wq; | 202 | struct delayed_work wq; |
203 | |||
204 | struct rcu_head rcu_head; | ||
202 | }; | 205 | }; |
203 | 206 | ||
204 | /* prototypes */ | 207 | /* prototypes */ |
diff --git a/include/linux/bio.h b/include/linux/bio.h index 6a642098e5c3..18462c5b8fff 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -90,10 +90,11 @@ struct bio { | |||
90 | 90 | ||
91 | unsigned int bi_comp_cpu; /* completion CPU */ | 91 | unsigned int bi_comp_cpu; /* completion CPU */ |
92 | 92 | ||
93 | atomic_t bi_cnt; /* pin count */ | ||
94 | |||
93 | struct bio_vec *bi_io_vec; /* the actual vec list */ | 95 | struct bio_vec *bi_io_vec; /* the actual vec list */ |
94 | 96 | ||
95 | bio_end_io_t *bi_end_io; | 97 | bio_end_io_t *bi_end_io; |
96 | atomic_t bi_cnt; /* pin count */ | ||
97 | 98 | ||
98 | void *bi_private; | 99 | void *bi_private; |
99 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | 100 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
@@ -101,6 +102,13 @@ struct bio { | |||
101 | #endif | 102 | #endif |
102 | 103 | ||
103 | bio_destructor_t *bi_destructor; /* destructor */ | 104 | bio_destructor_t *bi_destructor; /* destructor */ |
105 | |||
106 | /* | ||
107 | * We can inline a number of vecs at the end of the bio, to avoid | ||
108 | * double allocations for a small number of bio_vecs. This member | ||
109 | * MUST obviously be kept at the very end of the bio. | ||
110 | */ | ||
111 | struct bio_vec bi_inline_vecs[0]; | ||
104 | }; | 112 | }; |
105 | 113 | ||
106 | /* | 114 | /* |
@@ -117,6 +125,7 @@ struct bio { | |||
117 | #define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */ | 125 | #define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */ |
118 | #define BIO_NULL_MAPPED 9 /* contains invalid user pages */ | 126 | #define BIO_NULL_MAPPED 9 /* contains invalid user pages */ |
119 | #define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */ | 127 | #define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */ |
128 | #define BIO_QUIET 11 /* Make BIO Quiet */ | ||
120 | #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) | 129 | #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) |
121 | 130 | ||
122 | /* | 131 | /* |
@@ -211,6 +220,11 @@ static inline void *bio_data(struct bio *bio) | |||
211 | return NULL; | 220 | return NULL; |
212 | } | 221 | } |
213 | 222 | ||
223 | static inline int bio_has_allocated_vec(struct bio *bio) | ||
224 | { | ||
225 | return bio->bi_io_vec && bio->bi_io_vec != bio->bi_inline_vecs; | ||
226 | } | ||
227 | |||
214 | /* | 228 | /* |
215 | * will die | 229 | * will die |
216 | */ | 230 | */ |
@@ -332,7 +346,7 @@ struct bio_pair { | |||
332 | extern struct bio_pair *bio_split(struct bio *bi, int first_sectors); | 346 | extern struct bio_pair *bio_split(struct bio *bi, int first_sectors); |
333 | extern void bio_pair_release(struct bio_pair *dbio); | 347 | extern void bio_pair_release(struct bio_pair *dbio); |
334 | 348 | ||
335 | extern struct bio_set *bioset_create(int, int); | 349 | extern struct bio_set *bioset_create(unsigned int, unsigned int); |
336 | extern void bioset_free(struct bio_set *); | 350 | extern void bioset_free(struct bio_set *); |
337 | 351 | ||
338 | extern struct bio *bio_alloc(gfp_t, int); | 352 | extern struct bio *bio_alloc(gfp_t, int); |
@@ -377,6 +391,7 @@ extern struct bio *bio_copy_user_iov(struct request_queue *, | |||
377 | extern int bio_uncopy_user(struct bio *); | 391 | extern int bio_uncopy_user(struct bio *); |
378 | void zero_fill_bio(struct bio *bio); | 392 | void zero_fill_bio(struct bio *bio); |
379 | extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *); | 393 | extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *); |
394 | extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int); | ||
380 | extern unsigned int bvec_nr_vecs(unsigned short idx); | 395 | extern unsigned int bvec_nr_vecs(unsigned short idx); |
381 | 396 | ||
382 | /* | 397 | /* |
@@ -395,13 +410,17 @@ static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu) | |||
395 | */ | 410 | */ |
396 | #define BIO_POOL_SIZE 2 | 411 | #define BIO_POOL_SIZE 2 |
397 | #define BIOVEC_NR_POOLS 6 | 412 | #define BIOVEC_NR_POOLS 6 |
413 | #define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1) | ||
398 | 414 | ||
399 | struct bio_set { | 415 | struct bio_set { |
416 | struct kmem_cache *bio_slab; | ||
417 | unsigned int front_pad; | ||
418 | |||
400 | mempool_t *bio_pool; | 419 | mempool_t *bio_pool; |
401 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | 420 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
402 | mempool_t *bio_integrity_pool; | 421 | mempool_t *bio_integrity_pool; |
403 | #endif | 422 | #endif |
404 | mempool_t *bvec_pools[BIOVEC_NR_POOLS]; | 423 | mempool_t *bvec_pool; |
405 | }; | 424 | }; |
406 | 425 | ||
407 | struct biovec_slab { | 426 | struct biovec_slab { |
@@ -411,6 +430,7 @@ struct biovec_slab { | |||
411 | }; | 430 | }; |
412 | 431 | ||
413 | extern struct bio_set *fs_bio_set; | 432 | extern struct bio_set *fs_bio_set; |
433 | extern struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly; | ||
414 | 434 | ||
415 | /* | 435 | /* |
416 | * a small number of entries is fine, not going to be performance critical. | 436 | * a small number of entries is fine, not going to be performance critical. |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 031a315c0509..7035cec583b6 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -26,7 +26,6 @@ struct scsi_ioctl_command; | |||
26 | 26 | ||
27 | struct request_queue; | 27 | struct request_queue; |
28 | struct elevator_queue; | 28 | struct elevator_queue; |
29 | typedef struct elevator_queue elevator_t; | ||
30 | struct request_pm_state; | 29 | struct request_pm_state; |
31 | struct blk_trace; | 30 | struct blk_trace; |
32 | struct request; | 31 | struct request; |
@@ -313,7 +312,7 @@ struct request_queue | |||
313 | */ | 312 | */ |
314 | struct list_head queue_head; | 313 | struct list_head queue_head; |
315 | struct request *last_merge; | 314 | struct request *last_merge; |
316 | elevator_t *elevator; | 315 | struct elevator_queue *elevator; |
317 | 316 | ||
318 | /* | 317 | /* |
319 | * the queue request freelist, one for reads and one for writes | 318 | * the queue request freelist, one for reads and one for writes |
@@ -449,6 +448,7 @@ struct request_queue | |||
449 | #define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ | 448 | #define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ |
450 | #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ | 449 | #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ |
451 | #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ | 450 | #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ |
451 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ | ||
452 | 452 | ||
453 | static inline int queue_is_locked(struct request_queue *q) | 453 | static inline int queue_is_locked(struct request_queue *q) |
454 | { | 454 | { |
@@ -522,22 +522,32 @@ enum { | |||
522 | * TAG_FLUSH : ordering by tag w/ pre and post flushes | 522 | * TAG_FLUSH : ordering by tag w/ pre and post flushes |
523 | * TAG_FUA : ordering by tag w/ pre flush and FUA write | 523 | * TAG_FUA : ordering by tag w/ pre flush and FUA write |
524 | */ | 524 | */ |
525 | QUEUE_ORDERED_NONE = 0x00, | 525 | QUEUE_ORDERED_BY_DRAIN = 0x01, |
526 | QUEUE_ORDERED_DRAIN = 0x01, | 526 | QUEUE_ORDERED_BY_TAG = 0x02, |
527 | QUEUE_ORDERED_TAG = 0x02, | 527 | QUEUE_ORDERED_DO_PREFLUSH = 0x10, |
528 | 528 | QUEUE_ORDERED_DO_BAR = 0x20, | |
529 | QUEUE_ORDERED_PREFLUSH = 0x10, | 529 | QUEUE_ORDERED_DO_POSTFLUSH = 0x40, |
530 | QUEUE_ORDERED_POSTFLUSH = 0x20, | 530 | QUEUE_ORDERED_DO_FUA = 0x80, |
531 | QUEUE_ORDERED_FUA = 0x40, | 531 | |
532 | 532 | QUEUE_ORDERED_NONE = 0x00, | |
533 | QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | | 533 | |
534 | QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, | 534 | QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN | |
535 | QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | | 535 | QUEUE_ORDERED_DO_BAR, |
536 | QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, | 536 | QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | |
537 | QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | | 537 | QUEUE_ORDERED_DO_PREFLUSH | |
538 | QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, | 538 | QUEUE_ORDERED_DO_POSTFLUSH, |
539 | QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | | 539 | QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | |
540 | QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, | 540 | QUEUE_ORDERED_DO_PREFLUSH | |
541 | QUEUE_ORDERED_DO_FUA, | ||
542 | |||
543 | QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG | | ||
544 | QUEUE_ORDERED_DO_BAR, | ||
545 | QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | | ||
546 | QUEUE_ORDERED_DO_PREFLUSH | | ||
547 | QUEUE_ORDERED_DO_POSTFLUSH, | ||
548 | QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | | ||
549 | QUEUE_ORDERED_DO_PREFLUSH | | ||
550 | QUEUE_ORDERED_DO_FUA, | ||
541 | 551 | ||
542 | /* | 552 | /* |
543 | * Ordered operation sequence | 553 | * Ordered operation sequence |
@@ -585,7 +595,6 @@ enum { | |||
585 | #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) | 595 | #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) |
586 | #define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) | 596 | #define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) |
587 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) | 597 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) |
588 | #define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) | ||
589 | /* rq->queuelist of dequeued request must be list_empty() */ | 598 | /* rq->queuelist of dequeued request must be list_empty() */ |
590 | #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) | 599 | #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) |
591 | 600 | ||
@@ -855,10 +864,10 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); | |||
855 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); | 864 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); |
856 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 865 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
857 | extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); | 866 | extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); |
858 | extern int blk_do_ordered(struct request_queue *, struct request **); | 867 | extern bool blk_do_ordered(struct request_queue *, struct request **); |
859 | extern unsigned blk_ordered_cur_seq(struct request_queue *); | 868 | extern unsigned blk_ordered_cur_seq(struct request_queue *); |
860 | extern unsigned blk_ordered_req_seq(struct request *); | 869 | extern unsigned blk_ordered_req_seq(struct request *); |
861 | extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int); | 870 | extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int); |
862 | 871 | ||
863 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); | 872 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); |
864 | extern void blk_dump_rq_flags(struct request *, char *); | 873 | extern void blk_dump_rq_flags(struct request *, char *); |
@@ -977,7 +986,6 @@ static inline void put_dev_sector(Sector p) | |||
977 | 986 | ||
978 | struct work_struct; | 987 | struct work_struct; |
979 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); | 988 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); |
980 | void kblockd_flush_work(struct work_struct *work); | ||
981 | 989 | ||
982 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ | 990 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ |
983 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) | 991 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) |
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h index 777dbf695d44..27b1bcffe408 100644 --- a/include/linux/bottom_half.h +++ b/include/linux/bottom_half.h | |||
@@ -2,7 +2,6 @@ | |||
2 | #define _LINUX_BH_H | 2 | #define _LINUX_BH_H |
3 | 3 | ||
4 | extern void local_bh_disable(void); | 4 | extern void local_bh_disable(void); |
5 | extern void __local_bh_enable(void); | ||
6 | extern void _local_bh_enable(void); | 5 | extern void _local_bh_enable(void); |
7 | extern void local_bh_enable(void); | 6 | extern void local_bh_enable(void); |
8 | extern void local_bh_enable_ip(unsigned long ip); | 7 | extern void local_bh_enable_ip(unsigned long ip); |
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 3ce64b90118c..8605f8a74df9 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
@@ -35,6 +35,7 @@ enum bh_state_bits { | |||
35 | BH_Ordered, /* ordered write */ | 35 | BH_Ordered, /* ordered write */ |
36 | BH_Eopnotsupp, /* operation not supported (barrier) */ | 36 | BH_Eopnotsupp, /* operation not supported (barrier) */ |
37 | BH_Unwritten, /* Buffer is allocated on disk but not written */ | 37 | BH_Unwritten, /* Buffer is allocated on disk but not written */ |
38 | BH_Quiet, /* Buffer Error Prinks to be quiet */ | ||
38 | 39 | ||
39 | BH_PrivateStart,/* not a state bit, but the first bit available | 40 | BH_PrivateStart,/* not a state bit, but the first bit available |
40 | * for private allocation by other entities | 41 | * for private allocation by other entities |
diff --git a/include/linux/console.h b/include/linux/console.h index 248e6e3b9b73..a67a90cf8268 100644 --- a/include/linux/console.h +++ b/include/linux/console.h | |||
@@ -153,4 +153,8 @@ void vcs_remove_sysfs(struct tty_struct *tty); | |||
153 | #define VESA_HSYNC_SUSPEND 2 | 153 | #define VESA_HSYNC_SUSPEND 2 |
154 | #define VESA_POWERDOWN 3 | 154 | #define VESA_POWERDOWN 3 |
155 | 155 | ||
156 | #ifdef CONFIG_VGA_CONSOLE | ||
157 | extern bool vgacon_text_force(void); | ||
158 | #endif | ||
159 | |||
156 | #endif /* _LINUX_CONSOLE_H */ | 160 | #endif /* _LINUX_CONSOLE_H */ |
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 4aaa4afb1cb9..096476f1fb35 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h | |||
@@ -17,7 +17,7 @@ extern int debug_locks_off(void); | |||
17 | ({ \ | 17 | ({ \ |
18 | int __ret = 0; \ | 18 | int __ret = 0; \ |
19 | \ | 19 | \ |
20 | if (unlikely(c)) { \ | 20 | if (!oops_in_progress && unlikely(c)) { \ |
21 | if (debug_locks_off() && !debug_locks_silent) \ | 21 | if (debug_locks_off() && !debug_locks_silent) \ |
22 | WARN_ON(1); \ | 22 | WARN_ON(1); \ |
23 | __ret = 1; \ | 23 | __ret = 1; \ |
diff --git a/include/linux/dmi.h b/include/linux/dmi.h index 2bfda178f274..34161907b2f8 100644 --- a/include/linux/dmi.h +++ b/include/linux/dmi.h | |||
@@ -47,6 +47,7 @@ extern int dmi_name_in_vendors(const char *str); | |||
47 | extern int dmi_name_in_serial(const char *str); | 47 | extern int dmi_name_in_serial(const char *str); |
48 | extern int dmi_available; | 48 | extern int dmi_available; |
49 | extern int dmi_walk(void (*decode)(const struct dmi_header *)); | 49 | extern int dmi_walk(void (*decode)(const struct dmi_header *)); |
50 | extern bool dmi_match(enum dmi_field f, const char *str); | ||
50 | 51 | ||
51 | #else | 52 | #else |
52 | 53 | ||
@@ -61,6 +62,8 @@ static inline int dmi_name_in_serial(const char *s) { return 0; } | |||
61 | #define dmi_available 0 | 62 | #define dmi_available 0 |
62 | static inline int dmi_walk(void (*decode)(const struct dmi_header *)) | 63 | static inline int dmi_walk(void (*decode)(const struct dmi_header *)) |
63 | { return -1; } | 64 | { return -1; } |
65 | static inline bool dmi_match(enum dmi_field f, const char *str) | ||
66 | { return false; } | ||
64 | 67 | ||
65 | #endif | 68 | #endif |
66 | 69 | ||
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 92f6f634e3e6..7a204256b155 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
@@ -28,7 +28,7 @@ typedef void (elevator_activate_req_fn) (struct request_queue *, struct request | |||
28 | typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *); | 28 | typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *); |
29 | 29 | ||
30 | typedef void *(elevator_init_fn) (struct request_queue *); | 30 | typedef void *(elevator_init_fn) (struct request_queue *); |
31 | typedef void (elevator_exit_fn) (elevator_t *); | 31 | typedef void (elevator_exit_fn) (struct elevator_queue *); |
32 | 32 | ||
33 | struct elevator_ops | 33 | struct elevator_ops |
34 | { | 34 | { |
@@ -62,8 +62,8 @@ struct elevator_ops | |||
62 | 62 | ||
63 | struct elv_fs_entry { | 63 | struct elv_fs_entry { |
64 | struct attribute attr; | 64 | struct attribute attr; |
65 | ssize_t (*show)(elevator_t *, char *); | 65 | ssize_t (*show)(struct elevator_queue *, char *); |
66 | ssize_t (*store)(elevator_t *, const char *, size_t); | 66 | ssize_t (*store)(struct elevator_queue *, const char *, size_t); |
67 | }; | 67 | }; |
68 | 68 | ||
69 | /* | 69 | /* |
@@ -130,7 +130,7 @@ extern ssize_t elv_iosched_show(struct request_queue *, char *); | |||
130 | extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); | 130 | extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); |
131 | 131 | ||
132 | extern int elevator_init(struct request_queue *, char *); | 132 | extern int elevator_init(struct request_queue *, char *); |
133 | extern void elevator_exit(elevator_t *); | 133 | extern void elevator_exit(struct elevator_queue *); |
134 | extern int elv_rq_merge_ok(struct request *, struct bio *); | 134 | extern int elv_rq_merge_ok(struct request *, struct bio *); |
135 | 135 | ||
136 | /* | 136 | /* |
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index 32368c4f0326..06ca9b21dad2 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h | |||
@@ -81,4 +81,13 @@ static inline void cleanup_fault_attr_dentries(struct fault_attr *attr) | |||
81 | 81 | ||
82 | #endif /* CONFIG_FAULT_INJECTION */ | 82 | #endif /* CONFIG_FAULT_INJECTION */ |
83 | 83 | ||
84 | #ifdef CONFIG_FAILSLAB | ||
85 | extern bool should_failslab(size_t size, gfp_t gfpflags); | ||
86 | #else | ||
87 | static inline bool should_failslab(size_t size, gfp_t gfpflags) | ||
88 | { | ||
89 | return false; | ||
90 | } | ||
91 | #endif /* CONFIG_FAILSLAB */ | ||
92 | |||
84 | #endif /* _LINUX_FAULT_INJECT_H */ | 93 | #endif /* _LINUX_FAULT_INJECT_H */ |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 195a8cb2a749..001ded4845b4 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -82,6 +82,14 @@ extern int dir_notify_enable; | |||
82 | (specialy hack for floppy.c) */ | 82 | (specialy hack for floppy.c) */ |
83 | #define FMODE_WRITE_IOCTL ((__force fmode_t)128) | 83 | #define FMODE_WRITE_IOCTL ((__force fmode_t)128) |
84 | 84 | ||
85 | /* | ||
86 | * Don't update ctime and mtime. | ||
87 | * | ||
88 | * Currently a special hack for the XFS open_by_handle ioctl, but we'll | ||
89 | * hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon. | ||
90 | */ | ||
91 | #define FMODE_NOCMTIME ((__force fmode_t)2048) | ||
92 | |||
85 | #define RW_MASK 1 | 93 | #define RW_MASK 1 |
86 | #define RWA_MASK 2 | 94 | #define RWA_MASK 2 |
87 | #define READ 0 | 95 | #define READ 0 |
@@ -1877,7 +1885,9 @@ extern loff_t default_llseek(struct file *file, loff_t offset, int origin); | |||
1877 | 1885 | ||
1878 | extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin); | 1886 | extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin); |
1879 | 1887 | ||
1888 | extern struct inode * inode_init_always(struct super_block *, struct inode *); | ||
1880 | extern void inode_init_once(struct inode *); | 1889 | extern void inode_init_once(struct inode *); |
1890 | extern void inode_add_to_lists(struct super_block *, struct inode *); | ||
1881 | extern void iput(struct inode *); | 1891 | extern void iput(struct inode *); |
1882 | extern struct inode * igrab(struct inode *); | 1892 | extern struct inode * igrab(struct inode *); |
1883 | extern ino_t iunique(struct super_block *, ino_t); | 1893 | extern ino_t iunique(struct super_block *, ino_t); |
diff --git a/include/linux/futex.h b/include/linux/futex.h index 586ab56a3ec3..3bf5bb5a34f9 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h | |||
@@ -25,7 +25,8 @@ union ktime; | |||
25 | #define FUTEX_WAKE_BITSET 10 | 25 | #define FUTEX_WAKE_BITSET 10 |
26 | 26 | ||
27 | #define FUTEX_PRIVATE_FLAG 128 | 27 | #define FUTEX_PRIVATE_FLAG 128 |
28 | #define FUTEX_CMD_MASK ~FUTEX_PRIVATE_FLAG | 28 | #define FUTEX_CLOCK_REALTIME 256 |
29 | #define FUTEX_CMD_MASK ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME) | ||
29 | 30 | ||
30 | #define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG) | 31 | #define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG) |
31 | #define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG) | 32 | #define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG) |
@@ -164,6 +165,8 @@ union futex_key { | |||
164 | } both; | 165 | } both; |
165 | }; | 166 | }; |
166 | 167 | ||
168 | #define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } } | ||
169 | |||
167 | #ifdef CONFIG_FUTEX | 170 | #ifdef CONFIG_FUTEX |
168 | extern void exit_robust_list(struct task_struct *curr); | 171 | extern void exit_robust_list(struct task_struct *curr); |
169 | extern void exit_pi_state_list(struct task_struct *curr); | 172 | extern void exit_pi_state_list(struct task_struct *curr); |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 3df7742ce246..16948eaecae3 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
@@ -126,6 +126,7 @@ struct blk_scsi_cmd_filter { | |||
126 | struct disk_part_tbl { | 126 | struct disk_part_tbl { |
127 | struct rcu_head rcu_head; | 127 | struct rcu_head rcu_head; |
128 | int len; | 128 | int len; |
129 | struct hd_struct *last_lookup; | ||
129 | struct hd_struct *part[]; | 130 | struct hd_struct *part[]; |
130 | }; | 131 | }; |
131 | 132 | ||
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 89a56d79e4c6..f83288347dda 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
@@ -119,13 +119,17 @@ static inline void account_system_vtime(struct task_struct *tsk) | |||
119 | } | 119 | } |
120 | #endif | 120 | #endif |
121 | 121 | ||
122 | #if defined(CONFIG_PREEMPT_RCU) && defined(CONFIG_NO_HZ) | 122 | #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) |
123 | extern void rcu_irq_enter(void); | 123 | extern void rcu_irq_enter(void); |
124 | extern void rcu_irq_exit(void); | 124 | extern void rcu_irq_exit(void); |
125 | extern void rcu_nmi_enter(void); | ||
126 | extern void rcu_nmi_exit(void); | ||
125 | #else | 127 | #else |
126 | # define rcu_irq_enter() do { } while (0) | 128 | # define rcu_irq_enter() do { } while (0) |
127 | # define rcu_irq_exit() do { } while (0) | 129 | # define rcu_irq_exit() do { } while (0) |
128 | #endif /* CONFIG_PREEMPT_RCU */ | 130 | # define rcu_nmi_enter() do { } while (0) |
131 | # define rcu_nmi_exit() do { } while (0) | ||
132 | #endif /* #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) */ | ||
129 | 133 | ||
130 | /* | 134 | /* |
131 | * It is safe to do non-atomic ops on ->hardirq_context, | 135 | * It is safe to do non-atomic ops on ->hardirq_context, |
@@ -135,7 +139,6 @@ extern void rcu_irq_exit(void); | |||
135 | */ | 139 | */ |
136 | #define __irq_enter() \ | 140 | #define __irq_enter() \ |
137 | do { \ | 141 | do { \ |
138 | rcu_irq_enter(); \ | ||
139 | account_system_vtime(current); \ | 142 | account_system_vtime(current); \ |
140 | add_preempt_count(HARDIRQ_OFFSET); \ | 143 | add_preempt_count(HARDIRQ_OFFSET); \ |
141 | trace_hardirq_enter(); \ | 144 | trace_hardirq_enter(); \ |
@@ -154,7 +157,6 @@ extern void irq_enter(void); | |||
154 | trace_hardirq_exit(); \ | 157 | trace_hardirq_exit(); \ |
155 | account_system_vtime(current); \ | 158 | account_system_vtime(current); \ |
156 | sub_preempt_count(HARDIRQ_OFFSET); \ | 159 | sub_preempt_count(HARDIRQ_OFFSET); \ |
157 | rcu_irq_exit(); \ | ||
158 | } while (0) | 160 | } while (0) |
159 | 161 | ||
160 | /* | 162 | /* |
@@ -166,11 +168,14 @@ extern void irq_exit(void); | |||
166 | do { \ | 168 | do { \ |
167 | ftrace_nmi_enter(); \ | 169 | ftrace_nmi_enter(); \ |
168 | lockdep_off(); \ | 170 | lockdep_off(); \ |
171 | rcu_nmi_enter(); \ | ||
169 | __irq_enter(); \ | 172 | __irq_enter(); \ |
170 | } while (0) | 173 | } while (0) |
174 | |||
171 | #define nmi_exit() \ | 175 | #define nmi_exit() \ |
172 | do { \ | 176 | do { \ |
173 | __irq_exit(); \ | 177 | __irq_exit(); \ |
178 | rcu_nmi_exit(); \ | ||
174 | lockdep_on(); \ | 179 | lockdep_on(); \ |
175 | ftrace_nmi_exit(); \ | 180 | ftrace_nmi_exit(); \ |
176 | } while (0) | 181 | } while (0) |
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 3eba43878dcb..bd37078c2d7d 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
@@ -43,26 +43,6 @@ enum hrtimer_restart { | |||
43 | }; | 43 | }; |
44 | 44 | ||
45 | /* | 45 | /* |
46 | * hrtimer callback modes: | ||
47 | * | ||
48 | * HRTIMER_CB_SOFTIRQ: Callback must run in softirq context | ||
49 | * HRTIMER_CB_IRQSAFE_PERCPU: Callback must run in hardirq context | ||
50 | * Special mode for tick emulation and | ||
51 | * scheduler timer. Such timers are per | ||
52 | * cpu and not allowed to be migrated on | ||
53 | * cpu unplug. | ||
54 | * HRTIMER_CB_IRQSAFE_UNLOCKED: Callback should run in hardirq context | ||
55 | * with timer->base lock unlocked | ||
56 | * used for timers which call wakeup to | ||
57 | * avoid lock order problems with rq->lock | ||
58 | */ | ||
59 | enum hrtimer_cb_mode { | ||
60 | HRTIMER_CB_SOFTIRQ, | ||
61 | HRTIMER_CB_IRQSAFE_PERCPU, | ||
62 | HRTIMER_CB_IRQSAFE_UNLOCKED, | ||
63 | }; | ||
64 | |||
65 | /* | ||
66 | * Values to track state of the timer | 46 | * Values to track state of the timer |
67 | * | 47 | * |
68 | * Possible states: | 48 | * Possible states: |
@@ -70,7 +50,6 @@ enum hrtimer_cb_mode { | |||
70 | * 0x00 inactive | 50 | * 0x00 inactive |
71 | * 0x01 enqueued into rbtree | 51 | * 0x01 enqueued into rbtree |
72 | * 0x02 callback function running | 52 | * 0x02 callback function running |
73 | * 0x04 callback pending (high resolution mode) | ||
74 | * | 53 | * |
75 | * Special cases: | 54 | * Special cases: |
76 | * 0x03 callback function running and enqueued | 55 | * 0x03 callback function running and enqueued |
@@ -92,8 +71,7 @@ enum hrtimer_cb_mode { | |||
92 | #define HRTIMER_STATE_INACTIVE 0x00 | 71 | #define HRTIMER_STATE_INACTIVE 0x00 |
93 | #define HRTIMER_STATE_ENQUEUED 0x01 | 72 | #define HRTIMER_STATE_ENQUEUED 0x01 |
94 | #define HRTIMER_STATE_CALLBACK 0x02 | 73 | #define HRTIMER_STATE_CALLBACK 0x02 |
95 | #define HRTIMER_STATE_PENDING 0x04 | 74 | #define HRTIMER_STATE_MIGRATE 0x04 |
96 | #define HRTIMER_STATE_MIGRATE 0x08 | ||
97 | 75 | ||
98 | /** | 76 | /** |
99 | * struct hrtimer - the basic hrtimer structure | 77 | * struct hrtimer - the basic hrtimer structure |
@@ -109,8 +87,6 @@ enum hrtimer_cb_mode { | |||
109 | * @function: timer expiry callback function | 87 | * @function: timer expiry callback function |
110 | * @base: pointer to the timer base (per cpu and per clock) | 88 | * @base: pointer to the timer base (per cpu and per clock) |
111 | * @state: state information (See bit values above) | 89 | * @state: state information (See bit values above) |
112 | * @cb_mode: high resolution timer feature to select the callback execution | ||
113 | * mode | ||
114 | * @cb_entry: list head to enqueue an expired timer into the callback list | 90 | * @cb_entry: list head to enqueue an expired timer into the callback list |
115 | * @start_site: timer statistics field to store the site where the timer | 91 | * @start_site: timer statistics field to store the site where the timer |
116 | * was started | 92 | * was started |
@@ -129,7 +105,6 @@ struct hrtimer { | |||
129 | struct hrtimer_clock_base *base; | 105 | struct hrtimer_clock_base *base; |
130 | unsigned long state; | 106 | unsigned long state; |
131 | struct list_head cb_entry; | 107 | struct list_head cb_entry; |
132 | enum hrtimer_cb_mode cb_mode; | ||
133 | #ifdef CONFIG_TIMER_STATS | 108 | #ifdef CONFIG_TIMER_STATS |
134 | int start_pid; | 109 | int start_pid; |
135 | void *start_site; | 110 | void *start_site; |
@@ -188,15 +163,11 @@ struct hrtimer_clock_base { | |||
188 | * @check_clocks: Indictator, when set evaluate time source and clock | 163 | * @check_clocks: Indictator, when set evaluate time source and clock |
189 | * event devices whether high resolution mode can be | 164 | * event devices whether high resolution mode can be |
190 | * activated. | 165 | * activated. |
191 | * @cb_pending: Expired timers are moved from the rbtree to this | ||
192 | * list in the timer interrupt. The list is processed | ||
193 | * in the softirq. | ||
194 | * @nr_events: Total number of timer interrupt events | 166 | * @nr_events: Total number of timer interrupt events |
195 | */ | 167 | */ |
196 | struct hrtimer_cpu_base { | 168 | struct hrtimer_cpu_base { |
197 | spinlock_t lock; | 169 | spinlock_t lock; |
198 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; | 170 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; |
199 | struct list_head cb_pending; | ||
200 | #ifdef CONFIG_HIGH_RES_TIMERS | 171 | #ifdef CONFIG_HIGH_RES_TIMERS |
201 | ktime_t expires_next; | 172 | ktime_t expires_next; |
202 | int hres_active; | 173 | int hres_active; |
@@ -404,8 +375,7 @@ static inline int hrtimer_active(const struct hrtimer *timer) | |||
404 | */ | 375 | */ |
405 | static inline int hrtimer_is_queued(struct hrtimer *timer) | 376 | static inline int hrtimer_is_queued(struct hrtimer *timer) |
406 | { | 377 | { |
407 | return timer->state & | 378 | return timer->state & HRTIMER_STATE_ENQUEUED; |
408 | (HRTIMER_STATE_ENQUEUED | HRTIMER_STATE_PENDING); | ||
409 | } | 379 | } |
410 | 380 | ||
411 | /* | 381 | /* |
diff --git a/include/linux/ide.h b/include/linux/ide.h index 010fb26a1579..e99c56de7f56 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
@@ -122,8 +122,6 @@ struct ide_io_ports { | |||
122 | #define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */ | 122 | #define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */ |
123 | #define SECTOR_SIZE 512 | 123 | #define SECTOR_SIZE 512 |
124 | 124 | ||
125 | #define IDE_LARGE_SEEK(b1,b2,t) (((b1) > (b2) + (t)) || ((b2) > (b1) + (t))) | ||
126 | |||
127 | /* | 125 | /* |
128 | * Timeouts for various operations: | 126 | * Timeouts for various operations: |
129 | */ | 127 | */ |
@@ -172,9 +170,7 @@ typedef int (ide_ack_intr_t)(struct hwif_s *); | |||
172 | enum { ide_unknown, ide_generic, ide_pci, | 170 | enum { ide_unknown, ide_generic, ide_pci, |
173 | ide_cmd640, ide_dtc2278, ide_ali14xx, | 171 | ide_cmd640, ide_dtc2278, ide_ali14xx, |
174 | ide_qd65xx, ide_umc8672, ide_ht6560b, | 172 | ide_qd65xx, ide_umc8672, ide_ht6560b, |
175 | ide_rz1000, ide_trm290, | 173 | ide_4drives, ide_pmac, ide_acorn, |
176 | ide_cmd646, ide_cy82c693, ide_4drives, | ||
177 | ide_pmac, ide_acorn, | ||
178 | ide_au1xxx, ide_palm3710 | 174 | ide_au1xxx, ide_palm3710 |
179 | }; | 175 | }; |
180 | 176 | ||
@@ -496,8 +492,6 @@ enum { | |||
496 | * when more than one interrupt is needed. | 492 | * when more than one interrupt is needed. |
497 | */ | 493 | */ |
498 | IDE_AFLAG_LIMIT_NFRAMES = (1 << 7), | 494 | IDE_AFLAG_LIMIT_NFRAMES = (1 << 7), |
499 | /* Seeking in progress. */ | ||
500 | IDE_AFLAG_SEEKING = (1 << 8), | ||
501 | /* Saved TOC information is current. */ | 495 | /* Saved TOC information is current. */ |
502 | IDE_AFLAG_TOC_VALID = (1 << 9), | 496 | IDE_AFLAG_TOC_VALID = (1 << 9), |
503 | /* We think that the drive door is locked. */ | 497 | /* We think that the drive door is locked. */ |
@@ -845,8 +839,6 @@ typedef struct hwif_s { | |||
845 | unsigned extra_ports; /* number of extra dma ports */ | 839 | unsigned extra_ports; /* number of extra dma ports */ |
846 | 840 | ||
847 | unsigned present : 1; /* this interface exists */ | 841 | unsigned present : 1; /* this interface exists */ |
848 | unsigned serialized : 1; /* serialized all channel operation */ | ||
849 | unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */ | ||
850 | unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */ | 842 | unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */ |
851 | 843 | ||
852 | struct device gendev; | 844 | struct device gendev; |
@@ -909,6 +901,8 @@ typedef struct hwgroup_s { | |||
909 | 901 | ||
910 | int req_gen; | 902 | int req_gen; |
911 | int req_gen_timer; | 903 | int req_gen_timer; |
904 | |||
905 | spinlock_t lock; | ||
912 | } ide_hwgroup_t; | 906 | } ide_hwgroup_t; |
913 | 907 | ||
914 | typedef struct ide_driver_s ide_driver_t; | 908 | typedef struct ide_driver_s ide_driver_t; |
@@ -1122,6 +1116,14 @@ enum { | |||
1122 | IDE_PM_COMPLETED, | 1116 | IDE_PM_COMPLETED, |
1123 | }; | 1117 | }; |
1124 | 1118 | ||
1119 | int generic_ide_suspend(struct device *, pm_message_t); | ||
1120 | int generic_ide_resume(struct device *); | ||
1121 | |||
1122 | void ide_complete_power_step(ide_drive_t *, struct request *); | ||
1123 | ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *); | ||
1124 | void ide_complete_pm_request(ide_drive_t *, struct request *); | ||
1125 | void ide_check_pm_state(ide_drive_t *, struct request *); | ||
1126 | |||
1125 | /* | 1127 | /* |
1126 | * Subdrivers support. | 1128 | * Subdrivers support. |
1127 | * | 1129 | * |
@@ -1376,8 +1378,8 @@ enum { | |||
1376 | IDE_HFLAG_LEGACY_IRQS = (1 << 21), | 1378 | IDE_HFLAG_LEGACY_IRQS = (1 << 21), |
1377 | /* force use of legacy IRQs */ | 1379 | /* force use of legacy IRQs */ |
1378 | IDE_HFLAG_FORCE_LEGACY_IRQS = (1 << 22), | 1380 | IDE_HFLAG_FORCE_LEGACY_IRQS = (1 << 22), |
1379 | /* limit LBA48 requests to 256 sectors */ | 1381 | /* host is TRM290 */ |
1380 | IDE_HFLAG_RQSIZE_256 = (1 << 23), | 1382 | IDE_HFLAG_TRM290 = (1 << 23), |
1381 | /* use 32-bit I/O ops */ | 1383 | /* use 32-bit I/O ops */ |
1382 | IDE_HFLAG_IO_32BIT = (1 << 24), | 1384 | IDE_HFLAG_IO_32BIT = (1 << 24), |
1383 | /* unmask IRQs */ | 1385 | /* unmask IRQs */ |
@@ -1415,6 +1417,9 @@ struct ide_port_info { | |||
1415 | 1417 | ||
1416 | ide_pci_enablebit_t enablebits[2]; | 1418 | ide_pci_enablebit_t enablebits[2]; |
1417 | hwif_chipset_t chipset; | 1419 | hwif_chipset_t chipset; |
1420 | |||
1421 | u16 max_sectors; /* if < than the default one */ | ||
1422 | |||
1418 | u32 host_flags; | 1423 | u32 host_flags; |
1419 | u8 pio_mask; | 1424 | u8 pio_mask; |
1420 | u8 swdma_mask; | 1425 | u8 swdma_mask; |
@@ -1610,13 +1615,13 @@ extern struct mutex ide_cfg_mtx; | |||
1610 | /* | 1615 | /* |
1611 | * Structure locking: | 1616 | * Structure locking: |
1612 | * | 1617 | * |
1613 | * ide_cfg_mtx and ide_lock together protect changes to | 1618 | * ide_cfg_mtx and hwgroup->lock together protect changes to |
1614 | * ide_hwif_t->{next,hwgroup} | 1619 | * ide_hwif_t->next |
1615 | * ide_drive_t->next | 1620 | * ide_drive_t->next |
1616 | * | 1621 | * |
1617 | * ide_hwgroup_t->busy: ide_lock | 1622 | * ide_hwgroup_t->busy: hwgroup->lock |
1618 | * ide_hwgroup_t->hwif: ide_lock | 1623 | * ide_hwgroup_t->hwif: hwgroup->lock |
1619 | * ide_hwif_t->mate: constant, no locking | 1624 | * ide_hwif_t->{hwgroup,mate}: constant, no locking |
1620 | * ide_drive_t->hwif: constant, no locking | 1625 | * ide_drive_t->hwif: constant, no locking |
1621 | */ | 1626 | */ |
1622 | 1627 | ||
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 48e63934fabe..dfaee6bd265b 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -14,6 +14,8 @@ | |||
14 | #include <linux/irqflags.h> | 14 | #include <linux/irqflags.h> |
15 | #include <linux/smp.h> | 15 | #include <linux/smp.h> |
16 | #include <linux/percpu.h> | 16 | #include <linux/percpu.h> |
17 | #include <linux/irqnr.h> | ||
18 | |||
17 | #include <asm/atomic.h> | 19 | #include <asm/atomic.h> |
18 | #include <asm/ptrace.h> | 20 | #include <asm/ptrace.h> |
19 | #include <asm/system.h> | 21 | #include <asm/system.h> |
@@ -251,9 +253,6 @@ enum | |||
251 | BLOCK_SOFTIRQ, | 253 | BLOCK_SOFTIRQ, |
252 | TASKLET_SOFTIRQ, | 254 | TASKLET_SOFTIRQ, |
253 | SCHED_SOFTIRQ, | 255 | SCHED_SOFTIRQ, |
254 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
255 | HRTIMER_SOFTIRQ, | ||
256 | #endif | ||
257 | RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ | 256 | RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ |
258 | 257 | ||
259 | NR_SOFTIRQS | 258 | NR_SOFTIRQS |
diff --git a/include/linux/irq.h b/include/linux/irq.h index ab70fd604d3a..5845bdc1ac09 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -130,9 +130,14 @@ struct irq_chip { | |||
130 | const char *typename; | 130 | const char *typename; |
131 | }; | 131 | }; |
132 | 132 | ||
133 | struct timer_rand_state; | ||
134 | struct irq_2_iommu; | ||
133 | /** | 135 | /** |
134 | * struct irq_desc - interrupt descriptor | 136 | * struct irq_desc - interrupt descriptor |
135 | * @irq: interrupt number for this descriptor | 137 | * @irq: interrupt number for this descriptor |
138 | * @timer_rand_state: pointer to timer rand state struct | ||
139 | * @kstat_irqs: irq stats per cpu | ||
140 | * @irq_2_iommu: iommu with this irq | ||
136 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] | 141 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] |
137 | * @chip: low level interrupt hardware access | 142 | * @chip: low level interrupt hardware access |
138 | * @msi_desc: MSI descriptor | 143 | * @msi_desc: MSI descriptor |
@@ -144,8 +149,8 @@ struct irq_chip { | |||
144 | * @depth: disable-depth, for nested irq_disable() calls | 149 | * @depth: disable-depth, for nested irq_disable() calls |
145 | * @wake_depth: enable depth, for multiple set_irq_wake() callers | 150 | * @wake_depth: enable depth, for multiple set_irq_wake() callers |
146 | * @irq_count: stats field to detect stalled irqs | 151 | * @irq_count: stats field to detect stalled irqs |
147 | * @irqs_unhandled: stats field for spurious unhandled interrupts | ||
148 | * @last_unhandled: aging timer for unhandled count | 152 | * @last_unhandled: aging timer for unhandled count |
153 | * @irqs_unhandled: stats field for spurious unhandled interrupts | ||
149 | * @lock: locking for SMP | 154 | * @lock: locking for SMP |
150 | * @affinity: IRQ affinity on SMP | 155 | * @affinity: IRQ affinity on SMP |
151 | * @cpu: cpu index useful for balancing | 156 | * @cpu: cpu index useful for balancing |
@@ -155,6 +160,13 @@ struct irq_chip { | |||
155 | */ | 160 | */ |
156 | struct irq_desc { | 161 | struct irq_desc { |
157 | unsigned int irq; | 162 | unsigned int irq; |
163 | #ifdef CONFIG_SPARSE_IRQ | ||
164 | struct timer_rand_state *timer_rand_state; | ||
165 | unsigned int *kstat_irqs; | ||
166 | # ifdef CONFIG_INTR_REMAP | ||
167 | struct irq_2_iommu *irq_2_iommu; | ||
168 | # endif | ||
169 | #endif | ||
158 | irq_flow_handler_t handle_irq; | 170 | irq_flow_handler_t handle_irq; |
159 | struct irq_chip *chip; | 171 | struct irq_chip *chip; |
160 | struct msi_desc *msi_desc; | 172 | struct msi_desc *msi_desc; |
@@ -166,8 +178,8 @@ struct irq_desc { | |||
166 | unsigned int depth; /* nested irq disables */ | 178 | unsigned int depth; /* nested irq disables */ |
167 | unsigned int wake_depth; /* nested wake enables */ | 179 | unsigned int wake_depth; /* nested wake enables */ |
168 | unsigned int irq_count; /* For detecting broken IRQs */ | 180 | unsigned int irq_count; /* For detecting broken IRQs */ |
169 | unsigned int irqs_unhandled; | ||
170 | unsigned long last_unhandled; /* Aging timer for unhandled count */ | 181 | unsigned long last_unhandled; /* Aging timer for unhandled count */ |
182 | unsigned int irqs_unhandled; | ||
171 | spinlock_t lock; | 183 | spinlock_t lock; |
172 | #ifdef CONFIG_SMP | 184 | #ifdef CONFIG_SMP |
173 | cpumask_t affinity; | 185 | cpumask_t affinity; |
@@ -182,12 +194,51 @@ struct irq_desc { | |||
182 | const char *name; | 194 | const char *name; |
183 | } ____cacheline_internodealigned_in_smp; | 195 | } ____cacheline_internodealigned_in_smp; |
184 | 196 | ||
197 | extern void early_irq_init(void); | ||
198 | extern void arch_early_irq_init(void); | ||
199 | extern void arch_init_chip_data(struct irq_desc *desc, int cpu); | ||
200 | extern void arch_init_copy_chip_data(struct irq_desc *old_desc, | ||
201 | struct irq_desc *desc, int cpu); | ||
202 | extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); | ||
185 | 203 | ||
204 | #ifndef CONFIG_SPARSE_IRQ | ||
186 | extern struct irq_desc irq_desc[NR_IRQS]; | 205 | extern struct irq_desc irq_desc[NR_IRQS]; |
187 | 206 | ||
188 | static inline struct irq_desc *irq_to_desc(unsigned int irq) | 207 | static inline struct irq_desc *irq_to_desc(unsigned int irq) |
189 | { | 208 | { |
190 | return (irq < nr_irqs) ? irq_desc + irq : NULL; | 209 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; |
210 | } | ||
211 | static inline struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | ||
212 | { | ||
213 | return irq_to_desc(irq); | ||
214 | } | ||
215 | |||
216 | #else | ||
217 | |||
218 | extern struct irq_desc *irq_to_desc(unsigned int irq); | ||
219 | extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu); | ||
220 | extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu); | ||
221 | |||
222 | # define for_each_irq_desc(irq, desc) \ | ||
223 | for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; irq++, desc = irq_to_desc(irq)) | ||
224 | # define for_each_irq_desc_reverse(irq, desc) \ | ||
225 | for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; irq--, desc = irq_to_desc(irq)) | ||
226 | |||
227 | #define kstat_irqs_this_cpu(DESC) \ | ||
228 | ((DESC)->kstat_irqs[smp_processor_id()]) | ||
229 | #define kstat_incr_irqs_this_cpu(irqno, DESC) \ | ||
230 | ((DESC)->kstat_irqs[smp_processor_id()]++) | ||
231 | |||
232 | #endif | ||
233 | |||
234 | static inline struct irq_desc * | ||
235 | irq_remap_to_desc(unsigned int irq, struct irq_desc *desc) | ||
236 | { | ||
237 | #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC | ||
238 | return irq_to_desc(irq); | ||
239 | #else | ||
240 | return desc; | ||
241 | #endif | ||
191 | } | 242 | } |
192 | 243 | ||
193 | /* | 244 | /* |
@@ -381,6 +432,11 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); | |||
381 | #define get_irq_data(irq) (irq_to_desc(irq)->handler_data) | 432 | #define get_irq_data(irq) (irq_to_desc(irq)->handler_data) |
382 | #define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc) | 433 | #define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc) |
383 | 434 | ||
435 | #define get_irq_desc_chip(desc) ((desc)->chip) | ||
436 | #define get_irq_desc_chip_data(desc) ((desc)->chip_data) | ||
437 | #define get_irq_desc_data(desc) ((desc)->handler_data) | ||
438 | #define get_irq_desc_msi(desc) ((desc)->msi_desc) | ||
439 | |||
384 | #endif /* CONFIG_GENERIC_HARDIRQS */ | 440 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
385 | 441 | ||
386 | #endif /* !CONFIG_S390 */ | 442 | #endif /* !CONFIG_S390 */ |
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h index 452c280c8115..95d2b74641f5 100644 --- a/include/linux/irqnr.h +++ b/include/linux/irqnr.h | |||
@@ -1,24 +1,38 @@ | |||
1 | #ifndef _LINUX_IRQNR_H | 1 | #ifndef _LINUX_IRQNR_H |
2 | #define _LINUX_IRQNR_H | 2 | #define _LINUX_IRQNR_H |
3 | 3 | ||
4 | /* | ||
5 | * Generic irq_desc iterators: | ||
6 | */ | ||
7 | #ifdef __KERNEL__ | ||
8 | |||
4 | #ifndef CONFIG_GENERIC_HARDIRQS | 9 | #ifndef CONFIG_GENERIC_HARDIRQS |
5 | #include <asm/irq.h> | 10 | #include <asm/irq.h> |
6 | # define nr_irqs NR_IRQS | 11 | # define nr_irqs NR_IRQS |
7 | 12 | ||
8 | # define for_each_irq_desc(irq, desc) \ | 13 | # define for_each_irq_desc(irq, desc) \ |
9 | for (irq = 0; irq < nr_irqs; irq++) | 14 | for (irq = 0; irq < nr_irqs; irq++) |
15 | |||
16 | # define for_each_irq_desc_reverse(irq, desc) \ | ||
17 | for (irq = nr_irqs - 1; irq >= 0; irq--) | ||
10 | #else | 18 | #else |
19 | |||
11 | extern int nr_irqs; | 20 | extern int nr_irqs; |
12 | 21 | ||
22 | #ifndef CONFIG_SPARSE_IRQ | ||
23 | |||
24 | struct irq_desc; | ||
13 | # define for_each_irq_desc(irq, desc) \ | 25 | # define for_each_irq_desc(irq, desc) \ |
14 | for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc++) | 26 | for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc++) |
15 | 27 | # define for_each_irq_desc_reverse(irq, desc) \ | |
16 | # define for_each_irq_desc_reverse(irq, desc) \ | 28 | for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1); \ |
17 | for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1); \ | 29 | irq >= 0; irq--, desc--) |
18 | irq >= 0; irq--, desc--) | 30 | #endif |
19 | #endif | 31 | #endif |
20 | 32 | ||
21 | #define for_each_irq_nr(irq) \ | 33 | #define for_each_irq_nr(irq) \ |
22 | for (irq = 0; irq < nr_irqs; irq++) | 34 | for (irq = 0; irq < nr_irqs; irq++) |
35 | |||
36 | #endif /* __KERNEL__ */ | ||
23 | 37 | ||
24 | #endif | 38 | #endif |
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index abb6ac639e8e..1a9cf78bfce5 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h | |||
@@ -115,10 +115,20 @@ static inline u64 get_jiffies_64(void) | |||
115 | ((long)(a) - (long)(b) >= 0)) | 115 | ((long)(a) - (long)(b) >= 0)) |
116 | #define time_before_eq(a,b) time_after_eq(b,a) | 116 | #define time_before_eq(a,b) time_after_eq(b,a) |
117 | 117 | ||
118 | /* | ||
119 | * Calculate whether a is in the range of [b, c]. | ||
120 | */ | ||
118 | #define time_in_range(a,b,c) \ | 121 | #define time_in_range(a,b,c) \ |
119 | (time_after_eq(a,b) && \ | 122 | (time_after_eq(a,b) && \ |
120 | time_before_eq(a,c)) | 123 | time_before_eq(a,c)) |
121 | 124 | ||
125 | /* | ||
126 | * Calculate whether a is in the range of [b, c). | ||
127 | */ | ||
128 | #define time_in_range_open(a,b,c) \ | ||
129 | (time_after_eq(a,b) && \ | ||
130 | time_before(a,c)) | ||
131 | |||
122 | /* Same as above, but does so with platform independent 64bit types. | 132 | /* Same as above, but does so with platform independent 64bit types. |
123 | * These must be used when utilizing jiffies_64 (i.e. return value of | 133 | * These must be used when utilizing jiffies_64 (i.e. return value of |
124 | * get_jiffies_64() */ | 134 | * get_jiffies_64() */ |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 6002ae76785c..ca9ff6411dfa 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -141,6 +141,15 @@ extern int _cond_resched(void); | |||
141 | (__x < 0) ? -__x : __x; \ | 141 | (__x < 0) ? -__x : __x; \ |
142 | }) | 142 | }) |
143 | 143 | ||
144 | #ifdef CONFIG_PROVE_LOCKING | ||
145 | void might_fault(void); | ||
146 | #else | ||
147 | static inline void might_fault(void) | ||
148 | { | ||
149 | might_sleep(); | ||
150 | } | ||
151 | #endif | ||
152 | |||
144 | extern struct atomic_notifier_head panic_notifier_list; | 153 | extern struct atomic_notifier_head panic_notifier_list; |
145 | extern long (*panic_blink)(long time); | 154 | extern long (*panic_blink)(long time); |
146 | NORET_TYPE void panic(const char * fmt, ...) | 155 | NORET_TYPE void panic(const char * fmt, ...) |
@@ -188,6 +197,8 @@ extern unsigned long long memparse(const char *ptr, char **retptr); | |||
188 | extern int core_kernel_text(unsigned long addr); | 197 | extern int core_kernel_text(unsigned long addr); |
189 | extern int __kernel_text_address(unsigned long addr); | 198 | extern int __kernel_text_address(unsigned long addr); |
190 | extern int kernel_text_address(unsigned long addr); | 199 | extern int kernel_text_address(unsigned long addr); |
200 | extern int func_ptr_is_kernel_text(void *ptr); | ||
201 | |||
191 | struct pid; | 202 | struct pid; |
192 | extern struct pid *session_of_pgrp(struct pid *pgrp); | 203 | extern struct pid *session_of_pgrp(struct pid *pgrp); |
193 | 204 | ||
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 4a145caeee07..4ee4b3d2316f 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h | |||
@@ -28,7 +28,9 @@ struct cpu_usage_stat { | |||
28 | 28 | ||
29 | struct kernel_stat { | 29 | struct kernel_stat { |
30 | struct cpu_usage_stat cpustat; | 30 | struct cpu_usage_stat cpustat; |
31 | unsigned int irqs[NR_IRQS]; | 31 | #ifndef CONFIG_SPARSE_IRQ |
32 | unsigned int irqs[NR_IRQS]; | ||
33 | #endif | ||
32 | }; | 34 | }; |
33 | 35 | ||
34 | DECLARE_PER_CPU(struct kernel_stat, kstat); | 36 | DECLARE_PER_CPU(struct kernel_stat, kstat); |
@@ -39,6 +41,10 @@ DECLARE_PER_CPU(struct kernel_stat, kstat); | |||
39 | 41 | ||
40 | extern unsigned long long nr_context_switches(void); | 42 | extern unsigned long long nr_context_switches(void); |
41 | 43 | ||
44 | #ifndef CONFIG_SPARSE_IRQ | ||
45 | #define kstat_irqs_this_cpu(irq) \ | ||
46 | (kstat_this_cpu.irqs[irq]) | ||
47 | |||
42 | struct irq_desc; | 48 | struct irq_desc; |
43 | 49 | ||
44 | static inline void kstat_incr_irqs_this_cpu(unsigned int irq, | 50 | static inline void kstat_incr_irqs_this_cpu(unsigned int irq, |
@@ -46,11 +52,17 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq, | |||
46 | { | 52 | { |
47 | kstat_this_cpu.irqs[irq]++; | 53 | kstat_this_cpu.irqs[irq]++; |
48 | } | 54 | } |
55 | #endif | ||
56 | |||
49 | 57 | ||
58 | #ifndef CONFIG_SPARSE_IRQ | ||
50 | static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | 59 | static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
51 | { | 60 | { |
52 | return kstat_cpu(cpu).irqs[irq]; | 61 | return kstat_cpu(cpu).irqs[irq]; |
53 | } | 62 | } |
63 | #else | ||
64 | extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); | ||
65 | #endif | ||
54 | 66 | ||
55 | /* | 67 | /* |
56 | * Number of interrupts per specific IRQ source, since bootup | 68 | * Number of interrupts per specific IRQ source, since bootup |
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h index e7217dc58f39..a53407a4165c 100644 --- a/include/linux/lguest_launcher.h +++ b/include/linux/lguest_launcher.h | |||
@@ -54,9 +54,13 @@ struct lguest_vqconfig { | |||
54 | /* Write command first word is a request. */ | 54 | /* Write command first word is a request. */ |
55 | enum lguest_req | 55 | enum lguest_req |
56 | { | 56 | { |
57 | LHREQ_INITIALIZE, /* + base, pfnlimit, pgdir, start */ | 57 | LHREQ_INITIALIZE, /* + base, pfnlimit, start */ |
58 | LHREQ_GETDMA, /* No longer used */ | 58 | LHREQ_GETDMA, /* No longer used */ |
59 | LHREQ_IRQ, /* + irq */ | 59 | LHREQ_IRQ, /* + irq */ |
60 | LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */ | 60 | LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */ |
61 | }; | 61 | }; |
62 | |||
63 | /* The alignment to use between consumer and producer parts of vring. | ||
64 | * x86 pagesize for historical reasons. */ | ||
65 | #define LGUEST_VRING_ALIGN 4096 | ||
62 | #endif /* _LINUX_LGUEST_LAUNCHER */ | 66 | #endif /* _LINUX_LGUEST_LAUNCHER */ |
diff --git a/include/linux/libata.h b/include/linux/libata.h index ed3f26eb5df1..3449de597eff 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -213,10 +213,11 @@ enum { | |||
213 | ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */ | 213 | ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */ |
214 | ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */ | 214 | ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */ |
215 | ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */ | 215 | ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */ |
216 | ATA_PFLAG_UNLOADING = (1 << 5), /* module is unloading */ | ||
217 | ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */ | 216 | ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */ |
218 | ATA_PFLAG_INITIALIZING = (1 << 7), /* being initialized, don't touch */ | 217 | ATA_PFLAG_INITIALIZING = (1 << 7), /* being initialized, don't touch */ |
219 | ATA_PFLAG_RESETTING = (1 << 8), /* reset in progress */ | 218 | ATA_PFLAG_RESETTING = (1 << 8), /* reset in progress */ |
219 | ATA_PFLAG_UNLOADING = (1 << 9), /* driver is being unloaded */ | ||
220 | ATA_PFLAG_UNLOADED = (1 << 10), /* driver is unloaded */ | ||
220 | 221 | ||
221 | ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */ | 222 | ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */ |
222 | ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */ | 223 | ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */ |
@@ -1285,26 +1286,62 @@ static inline int ata_link_active(struct ata_link *link) | |||
1285 | return ata_tag_valid(link->active_tag) || link->sactive; | 1286 | return ata_tag_valid(link->active_tag) || link->sactive; |
1286 | } | 1287 | } |
1287 | 1288 | ||
1288 | extern struct ata_link *__ata_port_next_link(struct ata_port *ap, | 1289 | /* |
1289 | struct ata_link *link, | 1290 | * Iterators |
1290 | bool dev_only); | 1291 | * |
1292 | * ATA_LITER_* constants are used to select link iteration mode and | ||
1293 | * ATA_DITER_* device iteration mode. | ||
1294 | * | ||
1295 | * For a custom iteration directly using ata_{link|dev}_next(), if | ||
1296 | * @link or @dev, respectively, is NULL, the first element is | ||
1297 | * returned. @dev and @link can be any valid device or link and the | ||
1298 | * next element according to the iteration mode will be returned. | ||
1299 | * After the last element, NULL is returned. | ||
1300 | */ | ||
1301 | enum ata_link_iter_mode { | ||
1302 | ATA_LITER_EDGE, /* if present, PMP links only; otherwise, | ||
1303 | * host link. no slave link */ | ||
1304 | ATA_LITER_HOST_FIRST, /* host link followed by PMP or slave links */ | ||
1305 | ATA_LITER_PMP_FIRST, /* PMP links followed by host link, | ||
1306 | * slave link still comes after host link */ | ||
1307 | }; | ||
1291 | 1308 | ||
1292 | #define __ata_port_for_each_link(link, ap) \ | 1309 | enum ata_dev_iter_mode { |
1293 | for ((link) = __ata_port_next_link((ap), NULL, false); (link); \ | 1310 | ATA_DITER_ENABLED, |
1294 | (link) = __ata_port_next_link((ap), (link), false)) | 1311 | ATA_DITER_ENABLED_REVERSE, |
1312 | ATA_DITER_ALL, | ||
1313 | ATA_DITER_ALL_REVERSE, | ||
1314 | }; | ||
1295 | 1315 | ||
1296 | #define ata_port_for_each_link(link, ap) \ | 1316 | extern struct ata_link *ata_link_next(struct ata_link *link, |
1297 | for ((link) = __ata_port_next_link((ap), NULL, true); (link); \ | 1317 | struct ata_port *ap, |
1298 | (link) = __ata_port_next_link((ap), (link), true)) | 1318 | enum ata_link_iter_mode mode); |
1299 | 1319 | ||
1300 | #define ata_link_for_each_dev(dev, link) \ | 1320 | extern struct ata_device *ata_dev_next(struct ata_device *dev, |
1301 | for ((dev) = (link)->device; \ | 1321 | struct ata_link *link, |
1302 | (dev) < (link)->device + ata_link_max_devices(link) || ((dev) = NULL); \ | 1322 | enum ata_dev_iter_mode mode); |
1303 | (dev)++) | 1323 | |
1324 | /* | ||
1325 | * Shortcut notation for iterations | ||
1326 | * | ||
1327 | * ata_for_each_link() iterates over each link of @ap according to | ||
1328 | * @mode. @link points to the current link in the loop. @link is | ||
1329 | * NULL after loop termination. ata_for_each_dev() works the same way | ||
1330 | * except that it iterates over each device of @link. | ||
1331 | * | ||
1332 | * Note that the mode prefixes ATA_{L|D}ITER_ shouldn't need to be | ||
1333 | * specified when using the following shorthand notations. Only the | ||
1334 | * mode itself (EDGE, HOST_FIRST, ENABLED, etc...) should be | ||
1335 | * specified. This not only increases brevity but also makes it | ||
1336 | * impossible to use ATA_LITER_* for device iteration or vice-versa. | ||
1337 | */ | ||
1338 | #define ata_for_each_link(link, ap, mode) \ | ||
1339 | for ((link) = ata_link_next(NULL, (ap), ATA_LITER_##mode); (link); \ | ||
1340 | (link) = ata_link_next((link), (ap), ATA_LITER_##mode)) | ||
1304 | 1341 | ||
1305 | #define ata_link_for_each_dev_reverse(dev, link) \ | 1342 | #define ata_for_each_dev(dev, link, mode) \ |
1306 | for ((dev) = (link)->device + ata_link_max_devices(link) - 1; \ | 1343 | for ((dev) = ata_dev_next(NULL, (link), ATA_DITER_##mode); (dev); \ |
1307 | (dev) >= (link)->device || ((dev) = NULL); (dev)--) | 1344 | (dev) = ata_dev_next((dev), (link), ATA_DITER_##mode)) |
1308 | 1345 | ||
1309 | /** | 1346 | /** |
1310 | * ata_ncq_enabled - Test whether NCQ is enabled | 1347 | * ata_ncq_enabled - Test whether NCQ is enabled |
diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h index e5872dc994c0..fbc48f898521 100644 --- a/include/linux/lockd/bind.h +++ b/include/linux/lockd/bind.h | |||
@@ -41,6 +41,7 @@ struct nlmclnt_initdata { | |||
41 | size_t addrlen; | 41 | size_t addrlen; |
42 | unsigned short protocol; | 42 | unsigned short protocol; |
43 | u32 nfs_version; | 43 | u32 nfs_version; |
44 | int noresvport; | ||
44 | }; | 45 | }; |
45 | 46 | ||
46 | /* | 47 | /* |
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index b56d5aa9b194..23da3fa69efa 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h | |||
@@ -49,6 +49,7 @@ struct nlm_host { | |||
49 | unsigned short h_proto; /* transport proto */ | 49 | unsigned short h_proto; /* transport proto */ |
50 | unsigned short h_reclaiming : 1, | 50 | unsigned short h_reclaiming : 1, |
51 | h_server : 1, /* server side, not client side */ | 51 | h_server : 1, /* server side, not client side */ |
52 | h_noresvport : 1, | ||
52 | h_inuse : 1; | 53 | h_inuse : 1; |
53 | wait_queue_head_t h_gracewait; /* wait while reclaiming */ | 54 | wait_queue_head_t h_gracewait; /* wait while reclaiming */ |
54 | struct rw_semaphore h_rwsem; /* Reboot recovery lock */ | 55 | struct rw_semaphore h_rwsem; /* Reboot recovery lock */ |
@@ -220,7 +221,8 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap, | |||
220 | const size_t salen, | 221 | const size_t salen, |
221 | const unsigned short protocol, | 222 | const unsigned short protocol, |
222 | const u32 version, | 223 | const u32 version, |
223 | const char *hostname); | 224 | const char *hostname, |
225 | int noresvport); | ||
224 | struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, | 226 | struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, |
225 | const char *hostname, | 227 | const char *hostname, |
226 | const size_t hostname_len); | 228 | const size_t hostname_len); |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 29aec6e10020..23bf02fb124f 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -73,6 +73,8 @@ struct lock_class_key { | |||
73 | struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; | 73 | struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; |
74 | }; | 74 | }; |
75 | 75 | ||
76 | #define LOCKSTAT_POINTS 4 | ||
77 | |||
76 | /* | 78 | /* |
77 | * The lock-class itself: | 79 | * The lock-class itself: |
78 | */ | 80 | */ |
@@ -119,7 +121,8 @@ struct lock_class { | |||
119 | int name_version; | 121 | int name_version; |
120 | 122 | ||
121 | #ifdef CONFIG_LOCK_STAT | 123 | #ifdef CONFIG_LOCK_STAT |
122 | unsigned long contention_point[4]; | 124 | unsigned long contention_point[LOCKSTAT_POINTS]; |
125 | unsigned long contending_point[LOCKSTAT_POINTS]; | ||
123 | #endif | 126 | #endif |
124 | }; | 127 | }; |
125 | 128 | ||
@@ -144,6 +147,7 @@ enum bounce_type { | |||
144 | 147 | ||
145 | struct lock_class_stats { | 148 | struct lock_class_stats { |
146 | unsigned long contention_point[4]; | 149 | unsigned long contention_point[4]; |
150 | unsigned long contending_point[4]; | ||
147 | struct lock_time read_waittime; | 151 | struct lock_time read_waittime; |
148 | struct lock_time write_waittime; | 152 | struct lock_time write_waittime; |
149 | struct lock_time read_holdtime; | 153 | struct lock_time read_holdtime; |
@@ -165,6 +169,7 @@ struct lockdep_map { | |||
165 | const char *name; | 169 | const char *name; |
166 | #ifdef CONFIG_LOCK_STAT | 170 | #ifdef CONFIG_LOCK_STAT |
167 | int cpu; | 171 | int cpu; |
172 | unsigned long ip; | ||
168 | #endif | 173 | #endif |
169 | }; | 174 | }; |
170 | 175 | ||
@@ -309,8 +314,15 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
309 | extern void lock_release(struct lockdep_map *lock, int nested, | 314 | extern void lock_release(struct lockdep_map *lock, int nested, |
310 | unsigned long ip); | 315 | unsigned long ip); |
311 | 316 | ||
312 | extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass, | 317 | extern void lock_set_class(struct lockdep_map *lock, const char *name, |
313 | unsigned long ip); | 318 | struct lock_class_key *key, unsigned int subclass, |
319 | unsigned long ip); | ||
320 | |||
321 | static inline void lock_set_subclass(struct lockdep_map *lock, | ||
322 | unsigned int subclass, unsigned long ip) | ||
323 | { | ||
324 | lock_set_class(lock, lock->name, lock->key, subclass, ip); | ||
325 | } | ||
314 | 326 | ||
315 | # define INIT_LOCKDEP .lockdep_recursion = 0, | 327 | # define INIT_LOCKDEP .lockdep_recursion = 0, |
316 | 328 | ||
@@ -328,6 +340,7 @@ static inline void lockdep_on(void) | |||
328 | 340 | ||
329 | # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) | 341 | # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) |
330 | # define lock_release(l, n, i) do { } while (0) | 342 | # define lock_release(l, n, i) do { } while (0) |
343 | # define lock_set_class(l, n, k, s, i) do { } while (0) | ||
331 | # define lock_set_subclass(l, s, i) do { } while (0) | 344 | # define lock_set_subclass(l, s, i) do { } while (0) |
332 | # define lockdep_init() do { } while (0) | 345 | # define lockdep_init() do { } while (0) |
333 | # define lockdep_info() do { } while (0) | 346 | # define lockdep_info() do { } while (0) |
@@ -356,7 +369,7 @@ struct lock_class_key { }; | |||
356 | #ifdef CONFIG_LOCK_STAT | 369 | #ifdef CONFIG_LOCK_STAT |
357 | 370 | ||
358 | extern void lock_contended(struct lockdep_map *lock, unsigned long ip); | 371 | extern void lock_contended(struct lockdep_map *lock, unsigned long ip); |
359 | extern void lock_acquired(struct lockdep_map *lock); | 372 | extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); |
360 | 373 | ||
361 | #define LOCK_CONTENDED(_lock, try, lock) \ | 374 | #define LOCK_CONTENDED(_lock, try, lock) \ |
362 | do { \ | 375 | do { \ |
@@ -364,20 +377,20 @@ do { \ | |||
364 | lock_contended(&(_lock)->dep_map, _RET_IP_); \ | 377 | lock_contended(&(_lock)->dep_map, _RET_IP_); \ |
365 | lock(_lock); \ | 378 | lock(_lock); \ |
366 | } \ | 379 | } \ |
367 | lock_acquired(&(_lock)->dep_map); \ | 380 | lock_acquired(&(_lock)->dep_map, _RET_IP_); \ |
368 | } while (0) | 381 | } while (0) |
369 | 382 | ||
370 | #else /* CONFIG_LOCK_STAT */ | 383 | #else /* CONFIG_LOCK_STAT */ |
371 | 384 | ||
372 | #define lock_contended(lockdep_map, ip) do {} while (0) | 385 | #define lock_contended(lockdep_map, ip) do {} while (0) |
373 | #define lock_acquired(lockdep_map) do {} while (0) | 386 | #define lock_acquired(lockdep_map, ip) do {} while (0) |
374 | 387 | ||
375 | #define LOCK_CONTENDED(_lock, try, lock) \ | 388 | #define LOCK_CONTENDED(_lock, try, lock) \ |
376 | lock(_lock) | 389 | lock(_lock) |
377 | 390 | ||
378 | #endif /* CONFIG_LOCK_STAT */ | 391 | #endif /* CONFIG_LOCK_STAT */ |
379 | 392 | ||
380 | #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) | 393 | #ifdef CONFIG_GENERIC_HARDIRQS |
381 | extern void early_init_irq_lock_class(void); | 394 | extern void early_init_irq_lock_class(void); |
382 | #else | 395 | #else |
383 | static inline void early_init_irq_lock_class(void) | 396 | static inline void early_init_irq_lock_class(void) |
@@ -481,4 +494,22 @@ static inline void print_irqtrace_events(struct task_struct *curr) | |||
481 | # define lock_map_release(l) do { } while (0) | 494 | # define lock_map_release(l) do { } while (0) |
482 | #endif | 495 | #endif |
483 | 496 | ||
497 | #ifdef CONFIG_PROVE_LOCKING | ||
498 | # define might_lock(lock) \ | ||
499 | do { \ | ||
500 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ | ||
501 | lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \ | ||
502 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ | ||
503 | } while (0) | ||
504 | # define might_lock_read(lock) \ | ||
505 | do { \ | ||
506 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ | ||
507 | lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \ | ||
508 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ | ||
509 | } while (0) | ||
510 | #else | ||
511 | # define might_lock(lock) do { } while (0) | ||
512 | # define might_lock_read(lock) do { } while (0) | ||
513 | #endif | ||
514 | |||
484 | #endif /* __LINUX_LOCKDEP_H */ | 515 | #endif /* __LINUX_LOCKDEP_H */ |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index fe825471d5aa..9cfc9b627fdd 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -232,8 +232,9 @@ struct mm_struct { | |||
232 | struct core_state *core_state; /* coredumping support */ | 232 | struct core_state *core_state; /* coredumping support */ |
233 | 233 | ||
234 | /* aio bits */ | 234 | /* aio bits */ |
235 | rwlock_t ioctx_list_lock; /* aio lock */ | 235 | spinlock_t ioctx_lock; |
236 | struct kioctx *ioctx_list; | 236 | struct hlist_head ioctx_list; |
237 | |||
237 | #ifdef CONFIG_MM_OWNER | 238 | #ifdef CONFIG_MM_OWNER |
238 | /* | 239 | /* |
239 | * "owner" points to a task that is regarded as the canonical | 240 | * "owner" points to a task that is regarded as the canonical |
diff --git a/include/linux/msi.h b/include/linux/msi.h index 8f2939227207..d2b8a1e8ca11 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h | |||
@@ -10,8 +10,11 @@ struct msi_msg { | |||
10 | }; | 10 | }; |
11 | 11 | ||
12 | /* Helper functions */ | 12 | /* Helper functions */ |
13 | struct irq_desc; | ||
13 | extern void mask_msi_irq(unsigned int irq); | 14 | extern void mask_msi_irq(unsigned int irq); |
14 | extern void unmask_msi_irq(unsigned int irq); | 15 | extern void unmask_msi_irq(unsigned int irq); |
16 | extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); | ||
17 | extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); | ||
15 | extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); | 18 | extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); |
16 | extern void write_msi_msg(unsigned int irq, struct msi_msg *msg); | 19 | extern void write_msi_msg(unsigned int irq, struct msi_msg *msg); |
17 | 20 | ||
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index bc6da10ceee0..7a0e5c4f8072 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -144,6 +144,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock); | |||
144 | /* | 144 | /* |
145 | * NOTE: mutex_trylock() follows the spin_trylock() convention, | 145 | * NOTE: mutex_trylock() follows the spin_trylock() convention, |
146 | * not the down_trylock() convention! | 146 | * not the down_trylock() convention! |
147 | * | ||
148 | * Returns 1 if the mutex has been acquired successfully, and 0 on contention. | ||
147 | */ | 149 | */ |
148 | extern int mutex_trylock(struct mutex *lock); | 150 | extern int mutex_trylock(struct mutex *lock); |
149 | extern void mutex_unlock(struct mutex *lock); | 151 | extern void mutex_unlock(struct mutex *lock); |
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 4eaa8347a0d9..db867b04ac3c 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
@@ -83,7 +83,7 @@ struct nfs_open_context { | |||
83 | struct rpc_cred *cred; | 83 | struct rpc_cred *cred; |
84 | struct nfs4_state *state; | 84 | struct nfs4_state *state; |
85 | fl_owner_t lockowner; | 85 | fl_owner_t lockowner; |
86 | int mode; | 86 | fmode_t mode; |
87 | 87 | ||
88 | unsigned long flags; | 88 | unsigned long flags; |
89 | #define NFS_CONTEXT_ERROR_WRITE (0) | 89 | #define NFS_CONTEXT_ERROR_WRITE (0) |
@@ -130,7 +130,10 @@ struct nfs_inode { | |||
130 | * | 130 | * |
131 | * We need to revalidate the cached attrs for this inode if | 131 | * We need to revalidate the cached attrs for this inode if |
132 | * | 132 | * |
133 | * jiffies - read_cache_jiffies > attrtimeo | 133 | * jiffies - read_cache_jiffies >= attrtimeo |
134 | * | ||
135 | * Please note the comparison is greater than or equal | ||
136 | * so that zero timeout values can be specified. | ||
134 | */ | 137 | */ |
135 | unsigned long read_cache_jiffies; | 138 | unsigned long read_cache_jiffies; |
136 | unsigned long attrtimeo; | 139 | unsigned long attrtimeo; |
@@ -180,7 +183,7 @@ struct nfs_inode { | |||
180 | /* NFSv4 state */ | 183 | /* NFSv4 state */ |
181 | struct list_head open_states; | 184 | struct list_head open_states; |
182 | struct nfs_delegation *delegation; | 185 | struct nfs_delegation *delegation; |
183 | int delegation_state; | 186 | fmode_t delegation_state; |
184 | struct rw_semaphore rwsem; | 187 | struct rw_semaphore rwsem; |
185 | #endif /* CONFIG_NFS_V4*/ | 188 | #endif /* CONFIG_NFS_V4*/ |
186 | struct inode vfs_inode; | 189 | struct inode vfs_inode; |
@@ -342,7 +345,7 @@ extern int nfs_setattr(struct dentry *, struct iattr *); | |||
342 | extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr); | 345 | extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr); |
343 | extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); | 346 | extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); |
344 | extern void put_nfs_open_context(struct nfs_open_context *ctx); | 347 | extern void put_nfs_open_context(struct nfs_open_context *ctx); |
345 | extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, int mode); | 348 | extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode); |
346 | extern u64 nfs_compat_user_ino64(u64 fileid); | 349 | extern u64 nfs_compat_user_ino64(u64 fileid); |
347 | extern void nfs_fattr_init(struct nfs_fattr *fattr); | 350 | extern void nfs_fattr_init(struct nfs_fattr *fattr); |
348 | 351 | ||
@@ -533,12 +536,6 @@ static inline void nfs3_forget_cached_acls(struct inode *inode) | |||
533 | #endif /* CONFIG_NFS_V3_ACL */ | 536 | #endif /* CONFIG_NFS_V3_ACL */ |
534 | 537 | ||
535 | /* | 538 | /* |
536 | * linux/fs/mount_clnt.c | ||
537 | */ | ||
538 | extern int nfs_mount(struct sockaddr *, size_t, char *, char *, | ||
539 | int, int, struct nfs_fh *); | ||
540 | |||
541 | /* | ||
542 | * inline functions | 539 | * inline functions |
543 | */ | 540 | */ |
544 | 541 | ||
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 4e477ae58699..9bb81aec91cf 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h | |||
@@ -42,12 +42,6 @@ struct nfs_client { | |||
42 | struct rb_root cl_openowner_id; | 42 | struct rb_root cl_openowner_id; |
43 | struct rb_root cl_lockowner_id; | 43 | struct rb_root cl_lockowner_id; |
44 | 44 | ||
45 | /* | ||
46 | * The following rwsem ensures exclusive access to the server | ||
47 | * while we recover the state following a lease expiration. | ||
48 | */ | ||
49 | struct rw_semaphore cl_sem; | ||
50 | |||
51 | struct list_head cl_delegations; | 45 | struct list_head cl_delegations; |
52 | struct rb_root cl_state_owners; | 46 | struct rb_root cl_state_owners; |
53 | spinlock_t cl_lock; | 47 | spinlock_t cl_lock; |
diff --git a/include/linux/nfs_mount.h b/include/linux/nfs_mount.h index 6549a06ac16e..4499016e6d0d 100644 --- a/include/linux/nfs_mount.h +++ b/include/linux/nfs_mount.h | |||
@@ -45,7 +45,7 @@ struct nfs_mount_data { | |||
45 | char context[NFS_MAX_CONTEXT_LEN + 1]; /* 6 */ | 45 | char context[NFS_MAX_CONTEXT_LEN + 1]; /* 6 */ |
46 | }; | 46 | }; |
47 | 47 | ||
48 | /* bits in the flags field */ | 48 | /* bits in the flags field visible to user space */ |
49 | 49 | ||
50 | #define NFS_MOUNT_SOFT 0x0001 /* 1 */ | 50 | #define NFS_MOUNT_SOFT 0x0001 /* 1 */ |
51 | #define NFS_MOUNT_INTR 0x0002 /* 1 */ /* now unused, but ABI */ | 51 | #define NFS_MOUNT_INTR 0x0002 /* 1 */ /* now unused, but ABI */ |
@@ -68,5 +68,6 @@ struct nfs_mount_data { | |||
68 | /* The following are for internal use only */ | 68 | /* The following are for internal use only */ |
69 | #define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000 | 69 | #define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000 |
70 | #define NFS_MOUNT_LOOKUP_CACHE_NONE 0x20000 | 70 | #define NFS_MOUNT_LOOKUP_CACHE_NONE 0x20000 |
71 | #define NFS_MOUNT_NORESVPORT 0x40000 | ||
71 | 72 | ||
72 | #endif | 73 | #endif |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index c1c31acb8a2b..a550b528319f 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
@@ -120,13 +120,14 @@ struct nfs_openargs { | |||
120 | const struct nfs_fh * fh; | 120 | const struct nfs_fh * fh; |
121 | struct nfs_seqid * seqid; | 121 | struct nfs_seqid * seqid; |
122 | int open_flags; | 122 | int open_flags; |
123 | fmode_t fmode; | ||
123 | __u64 clientid; | 124 | __u64 clientid; |
124 | __u64 id; | 125 | __u64 id; |
125 | union { | 126 | union { |
126 | struct iattr * attrs; /* UNCHECKED, GUARDED */ | 127 | struct iattr * attrs; /* UNCHECKED, GUARDED */ |
127 | nfs4_verifier verifier; /* EXCLUSIVE */ | 128 | nfs4_verifier verifier; /* EXCLUSIVE */ |
128 | nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */ | 129 | nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */ |
129 | int delegation_type; /* CLAIM_PREVIOUS */ | 130 | fmode_t delegation_type; /* CLAIM_PREVIOUS */ |
130 | } u; | 131 | } u; |
131 | const struct qstr * name; | 132 | const struct qstr * name; |
132 | const struct nfs_server *server; /* Needed for ID mapping */ | 133 | const struct nfs_server *server; /* Needed for ID mapping */ |
@@ -143,7 +144,7 @@ struct nfs_openres { | |||
143 | struct nfs_fattr * dir_attr; | 144 | struct nfs_fattr * dir_attr; |
144 | struct nfs_seqid * seqid; | 145 | struct nfs_seqid * seqid; |
145 | const struct nfs_server *server; | 146 | const struct nfs_server *server; |
146 | int delegation_type; | 147 | fmode_t delegation_type; |
147 | nfs4_stateid delegation; | 148 | nfs4_stateid delegation; |
148 | __u32 do_recall; | 149 | __u32 do_recall; |
149 | __u64 maxsize; | 150 | __u64 maxsize; |
@@ -171,7 +172,7 @@ struct nfs_closeargs { | |||
171 | struct nfs_fh * fh; | 172 | struct nfs_fh * fh; |
172 | nfs4_stateid * stateid; | 173 | nfs4_stateid * stateid; |
173 | struct nfs_seqid * seqid; | 174 | struct nfs_seqid * seqid; |
174 | int open_flags; | 175 | fmode_t fmode; |
175 | const u32 * bitmask; | 176 | const u32 * bitmask; |
176 | }; | 177 | }; |
177 | 178 | ||
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index d0fe2e378452..128298c0362d 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h | |||
@@ -124,6 +124,8 @@ struct nfs4_client { | |||
124 | nfs4_verifier cl_verifier; /* generated by client */ | 124 | nfs4_verifier cl_verifier; /* generated by client */ |
125 | time_t cl_time; /* time of last lease renewal */ | 125 | time_t cl_time; /* time of last lease renewal */ |
126 | __be32 cl_addr; /* client ipaddress */ | 126 | __be32 cl_addr; /* client ipaddress */ |
127 | u32 cl_flavor; /* setclientid pseudoflavor */ | ||
128 | char *cl_principal; /* setclientid principal name */ | ||
127 | struct svc_cred cl_cred; /* setclientid principal */ | 129 | struct svc_cred cl_cred; /* setclientid principal */ |
128 | clientid_t cl_clientid; /* generated by server */ | 130 | clientid_t cl_clientid; /* generated by server */ |
129 | nfs4_verifier cl_confirm; /* generated by server */ | 131 | nfs4_verifier cl_confirm; /* generated by server */ |
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index a8efcfeea732..3d327b67d7e2 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h | |||
@@ -26,8 +26,7 @@ extern struct bus_type of_platform_bus_type; | |||
26 | 26 | ||
27 | /* | 27 | /* |
28 | * An of_platform_driver driver is attached to a basic of_device on | 28 | * An of_platform_driver driver is attached to a basic of_device on |
29 | * the "platform bus" (of_platform_bus_type) (or ISA, EBUS and SBUS | 29 | * the "platform bus" (of_platform_bus_type). |
30 | * busses on sparc). | ||
31 | */ | 30 | */ |
32 | struct of_platform_driver | 31 | struct of_platform_driver |
33 | { | 32 | { |
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index 5231861f357d..1ce9fe572e51 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h | |||
@@ -86,8 +86,7 @@ int oprofile_arch_init(struct oprofile_operations * ops); | |||
86 | void oprofile_arch_exit(void); | 86 | void oprofile_arch_exit(void); |
87 | 87 | ||
88 | /** | 88 | /** |
89 | * Add a sample. This may be called from any context. Pass | 89 | * Add a sample. This may be called from any context. |
90 | * smp_processor_id() as cpu. | ||
91 | */ | 90 | */ |
92 | void oprofile_add_sample(struct pt_regs * const regs, unsigned long event); | 91 | void oprofile_add_sample(struct pt_regs * const regs, unsigned long event); |
93 | 92 | ||
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index a7c721355549..4f71bf4e628c 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h | |||
@@ -45,7 +45,11 @@ struct k_itimer { | |||
45 | int it_requeue_pending; /* waiting to requeue this timer */ | 45 | int it_requeue_pending; /* waiting to requeue this timer */ |
46 | #define REQUEUE_PENDING 1 | 46 | #define REQUEUE_PENDING 1 |
47 | int it_sigev_notify; /* notify word of sigevent struct */ | 47 | int it_sigev_notify; /* notify word of sigevent struct */ |
48 | struct task_struct *it_process; /* process to send signal to */ | 48 | struct signal_struct *it_signal; |
49 | union { | ||
50 | struct pid *it_pid; /* pid of process to send signal to */ | ||
51 | struct task_struct *it_process; /* for clock_nanosleep */ | ||
52 | }; | ||
49 | struct sigqueue *sigq; /* signal queue entry. */ | 53 | struct sigqueue *sigq; /* signal queue entry. */ |
50 | union { | 54 | union { |
51 | struct { | 55 | struct { |
diff --git a/include/linux/random.h b/include/linux/random.h index 36f125c0c603..adbf3bd3c6b3 100644 --- a/include/linux/random.h +++ b/include/linux/random.h | |||
@@ -8,6 +8,7 @@ | |||
8 | #define _LINUX_RANDOM_H | 8 | #define _LINUX_RANDOM_H |
9 | 9 | ||
10 | #include <linux/ioctl.h> | 10 | #include <linux/ioctl.h> |
11 | #include <linux/irqnr.h> | ||
11 | 12 | ||
12 | /* ioctl()'s for the random number generator */ | 13 | /* ioctl()'s for the random number generator */ |
13 | 14 | ||
@@ -44,6 +45,56 @@ struct rand_pool_info { | |||
44 | 45 | ||
45 | extern void rand_initialize_irq(int irq); | 46 | extern void rand_initialize_irq(int irq); |
46 | 47 | ||
48 | struct timer_rand_state; | ||
49 | #ifndef CONFIG_SPARSE_IRQ | ||
50 | |||
51 | extern struct timer_rand_state *irq_timer_state[]; | ||
52 | |||
53 | static inline struct timer_rand_state *get_timer_rand_state(unsigned int irq) | ||
54 | { | ||
55 | if (irq >= nr_irqs) | ||
56 | return NULL; | ||
57 | |||
58 | return irq_timer_state[irq]; | ||
59 | } | ||
60 | |||
61 | static inline void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state) | ||
62 | { | ||
63 | if (irq >= nr_irqs) | ||
64 | return; | ||
65 | |||
66 | irq_timer_state[irq] = state; | ||
67 | } | ||
68 | |||
69 | #else | ||
70 | |||
71 | #include <linux/irq.h> | ||
72 | static inline struct timer_rand_state *get_timer_rand_state(unsigned int irq) | ||
73 | { | ||
74 | struct irq_desc *desc; | ||
75 | |||
76 | desc = irq_to_desc(irq); | ||
77 | |||
78 | if (!desc) | ||
79 | return NULL; | ||
80 | |||
81 | return desc->timer_rand_state; | ||
82 | } | ||
83 | |||
84 | static inline void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state) | ||
85 | { | ||
86 | struct irq_desc *desc; | ||
87 | |||
88 | desc = irq_to_desc(irq); | ||
89 | |||
90 | if (!desc) | ||
91 | return; | ||
92 | |||
93 | desc->timer_rand_state = state; | ||
94 | } | ||
95 | #endif | ||
96 | |||
97 | |||
47 | extern void add_input_randomness(unsigned int type, unsigned int code, | 98 | extern void add_input_randomness(unsigned int type, unsigned int code, |
48 | unsigned int value); | 99 | unsigned int value); |
49 | extern void add_interrupt_randomness(int irq); | 100 | extern void add_interrupt_randomness(int irq); |
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index 5f89b62e6983..301dda829e37 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h | |||
@@ -41,7 +41,7 @@ | |||
41 | #include <linux/seqlock.h> | 41 | #include <linux/seqlock.h> |
42 | 42 | ||
43 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 43 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
44 | #define RCU_SECONDS_TILL_STALL_CHECK ( 3 * HZ) /* for rcp->jiffies_stall */ | 44 | #define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rcp->jiffies_stall */ |
45 | #define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */ | 45 | #define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */ |
46 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 46 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
47 | 47 | ||
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 895dc9c1088c..1168fbcea8d4 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -52,11 +52,15 @@ struct rcu_head { | |||
52 | void (*func)(struct rcu_head *head); | 52 | void (*func)(struct rcu_head *head); |
53 | }; | 53 | }; |
54 | 54 | ||
55 | #ifdef CONFIG_CLASSIC_RCU | 55 | #if defined(CONFIG_CLASSIC_RCU) |
56 | #include <linux/rcuclassic.h> | 56 | #include <linux/rcuclassic.h> |
57 | #else /* #ifdef CONFIG_CLASSIC_RCU */ | 57 | #elif defined(CONFIG_TREE_RCU) |
58 | #include <linux/rcutree.h> | ||
59 | #elif defined(CONFIG_PREEMPT_RCU) | ||
58 | #include <linux/rcupreempt.h> | 60 | #include <linux/rcupreempt.h> |
59 | #endif /* #else #ifdef CONFIG_CLASSIC_RCU */ | 61 | #else |
62 | #error "Unknown RCU implementation specified to kernel configuration" | ||
63 | #endif /* #else #if defined(CONFIG_CLASSIC_RCU) */ | ||
60 | 64 | ||
61 | #define RCU_HEAD_INIT { .next = NULL, .func = NULL } | 65 | #define RCU_HEAD_INIT { .next = NULL, .func = NULL } |
62 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT | 66 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h new file mode 100644 index 000000000000..d4368b7975c3 --- /dev/null +++ b/include/linux/rcutree.h | |||
@@ -0,0 +1,329 @@ | |||
1 | /* | ||
2 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright IBM Corporation, 2008 | ||
19 | * | ||
20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> | ||
21 | * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm | ||
22 | * | ||
23 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> | ||
24 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | ||
25 | * | ||
26 | * For detailed explanation of Read-Copy Update mechanism see - | ||
27 | * Documentation/RCU | ||
28 | */ | ||
29 | |||
30 | #ifndef __LINUX_RCUTREE_H | ||
31 | #define __LINUX_RCUTREE_H | ||
32 | |||
33 | #include <linux/cache.h> | ||
34 | #include <linux/spinlock.h> | ||
35 | #include <linux/threads.h> | ||
36 | #include <linux/percpu.h> | ||
37 | #include <linux/cpumask.h> | ||
38 | #include <linux/seqlock.h> | ||
39 | |||
40 | /* | ||
41 | * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT. | ||
42 | * In theory, it should be possible to add more levels straightforwardly. | ||
43 | * In practice, this has not been tested, so there is probably some | ||
44 | * bug somewhere. | ||
45 | */ | ||
46 | #define MAX_RCU_LVLS 3 | ||
47 | #define RCU_FANOUT (CONFIG_RCU_FANOUT) | ||
48 | #define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT) | ||
49 | #define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT) | ||
50 | |||
51 | #if NR_CPUS <= RCU_FANOUT | ||
52 | # define NUM_RCU_LVLS 1 | ||
53 | # define NUM_RCU_LVL_0 1 | ||
54 | # define NUM_RCU_LVL_1 (NR_CPUS) | ||
55 | # define NUM_RCU_LVL_2 0 | ||
56 | # define NUM_RCU_LVL_3 0 | ||
57 | #elif NR_CPUS <= RCU_FANOUT_SQ | ||
58 | # define NUM_RCU_LVLS 2 | ||
59 | # define NUM_RCU_LVL_0 1 | ||
60 | # define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT) | ||
61 | # define NUM_RCU_LVL_2 (NR_CPUS) | ||
62 | # define NUM_RCU_LVL_3 0 | ||
63 | #elif NR_CPUS <= RCU_FANOUT_CUBE | ||
64 | # define NUM_RCU_LVLS 3 | ||
65 | # define NUM_RCU_LVL_0 1 | ||
66 | # define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ) | ||
67 | # define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT)) | ||
68 | # define NUM_RCU_LVL_3 NR_CPUS | ||
69 | #else | ||
70 | # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" | ||
71 | #endif /* #if (NR_CPUS) <= RCU_FANOUT */ | ||
72 | |||
73 | #define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3) | ||
74 | #define NUM_RCU_NODES (RCU_SUM - NR_CPUS) | ||
75 | |||
76 | /* | ||
77 | * Dynticks per-CPU state. | ||
78 | */ | ||
79 | struct rcu_dynticks { | ||
80 | int dynticks_nesting; /* Track nesting level, sort of. */ | ||
81 | int dynticks; /* Even value for dynticks-idle, else odd. */ | ||
82 | int dynticks_nmi; /* Even value for either dynticks-idle or */ | ||
83 | /* not in nmi handler, else odd. So this */ | ||
84 | /* remains even for nmi from irq handler. */ | ||
85 | }; | ||
86 | |||
87 | /* | ||
88 | * Definition for node within the RCU grace-period-detection hierarchy. | ||
89 | */ | ||
90 | struct rcu_node { | ||
91 | spinlock_t lock; | ||
92 | unsigned long qsmask; /* CPUs or groups that need to switch in */ | ||
93 | /* order for current grace period to proceed.*/ | ||
94 | unsigned long qsmaskinit; | ||
95 | /* Per-GP initialization for qsmask. */ | ||
96 | unsigned long grpmask; /* Mask to apply to parent qsmask. */ | ||
97 | int grplo; /* lowest-numbered CPU or group here. */ | ||
98 | int grphi; /* highest-numbered CPU or group here. */ | ||
99 | u8 grpnum; /* CPU/group number for next level up. */ | ||
100 | u8 level; /* root is at level 0. */ | ||
101 | struct rcu_node *parent; | ||
102 | } ____cacheline_internodealigned_in_smp; | ||
103 | |||
104 | /* Index values for nxttail array in struct rcu_data. */ | ||
105 | #define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ | ||
106 | #define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */ | ||
107 | #define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */ | ||
108 | #define RCU_NEXT_TAIL 3 | ||
109 | #define RCU_NEXT_SIZE 4 | ||
110 | |||
111 | /* Per-CPU data for read-copy update. */ | ||
112 | struct rcu_data { | ||
113 | /* 1) quiescent-state and grace-period handling : */ | ||
114 | long completed; /* Track rsp->completed gp number */ | ||
115 | /* in order to detect GP end. */ | ||
116 | long gpnum; /* Highest gp number that this CPU */ | ||
117 | /* is aware of having started. */ | ||
118 | long passed_quiesc_completed; | ||
119 | /* Value of completed at time of qs. */ | ||
120 | bool passed_quiesc; /* User-mode/idle loop etc. */ | ||
121 | bool qs_pending; /* Core waits for quiesc state. */ | ||
122 | bool beenonline; /* CPU online at least once. */ | ||
123 | struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ | ||
124 | unsigned long grpmask; /* Mask to apply to leaf qsmask. */ | ||
125 | |||
126 | /* 2) batch handling */ | ||
127 | /* | ||
128 | * If nxtlist is not NULL, it is partitioned as follows. | ||
129 | * Any of the partitions might be empty, in which case the | ||
130 | * pointer to that partition will be equal to the pointer for | ||
131 | * the following partition. When the list is empty, all of | ||
132 | * the nxttail elements point to nxtlist, which is NULL. | ||
133 | * | ||
134 | * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]): | ||
135 | * Entries that might have arrived after current GP ended | ||
136 | * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]): | ||
137 | * Entries known to have arrived before current GP ended | ||
138 | * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]): | ||
139 | * Entries that batch # <= ->completed - 1: waiting for current GP | ||
140 | * [nxtlist, *nxttail[RCU_DONE_TAIL]): | ||
141 | * Entries that batch # <= ->completed | ||
142 | * The grace period for these entries has completed, and | ||
143 | * the other grace-period-completed entries may be moved | ||
144 | * here temporarily in rcu_process_callbacks(). | ||
145 | */ | ||
146 | struct rcu_head *nxtlist; | ||
147 | struct rcu_head **nxttail[RCU_NEXT_SIZE]; | ||
148 | long qlen; /* # of queued callbacks */ | ||
149 | long blimit; /* Upper limit on a processed batch */ | ||
150 | |||
151 | #ifdef CONFIG_NO_HZ | ||
152 | /* 3) dynticks interface. */ | ||
153 | struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */ | ||
154 | int dynticks_snap; /* Per-GP tracking for dynticks. */ | ||
155 | int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */ | ||
156 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
157 | |||
158 | /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ | ||
159 | #ifdef CONFIG_NO_HZ | ||
160 | unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ | ||
161 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
162 | unsigned long offline_fqs; /* Kicked due to being offline. */ | ||
163 | unsigned long resched_ipi; /* Sent a resched IPI. */ | ||
164 | |||
165 | /* 5) state to allow this CPU to force_quiescent_state on others */ | ||
166 | long n_rcu_pending; /* rcu_pending() calls since boot. */ | ||
167 | long n_rcu_pending_force_qs; /* when to force quiescent states. */ | ||
168 | |||
169 | int cpu; | ||
170 | }; | ||
171 | |||
172 | /* Values for signaled field in struct rcu_state. */ | ||
173 | #define RCU_GP_INIT 0 /* Grace period being initialized. */ | ||
174 | #define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */ | ||
175 | #define RCU_FORCE_QS 2 /* Need to force quiescent state. */ | ||
176 | #ifdef CONFIG_NO_HZ | ||
177 | #define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK | ||
178 | #else /* #ifdef CONFIG_NO_HZ */ | ||
179 | #define RCU_SIGNAL_INIT RCU_FORCE_QS | ||
180 | #endif /* #else #ifdef CONFIG_NO_HZ */ | ||
181 | |||
182 | #define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ | ||
183 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
184 | #define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */ | ||
185 | #define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */ | ||
186 | #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ | ||
187 | /* to take at least one */ | ||
188 | /* scheduling clock irq */ | ||
189 | /* before ratting on them. */ | ||
190 | |||
191 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
192 | |||
193 | /* | ||
194 | * RCU global state, including node hierarchy. This hierarchy is | ||
195 | * represented in "heap" form in a dense array. The root (first level) | ||
196 | * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second | ||
197 | * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]), | ||
198 | * and the third level in ->node[m+1] and following (->node[m+1] referenced | ||
199 | * by ->level[2]). The number of levels is determined by the number of | ||
200 | * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy" | ||
201 | * consisting of a single rcu_node. | ||
202 | */ | ||
203 | struct rcu_state { | ||
204 | struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */ | ||
205 | struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */ | ||
206 | u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ | ||
207 | u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */ | ||
208 | struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */ | ||
209 | |||
210 | /* The following fields are guarded by the root rcu_node's lock. */ | ||
211 | |||
212 | u8 signaled ____cacheline_internodealigned_in_smp; | ||
213 | /* Force QS state. */ | ||
214 | long gpnum; /* Current gp number. */ | ||
215 | long completed; /* # of last completed gp. */ | ||
216 | spinlock_t onofflock; /* exclude on/offline and */ | ||
217 | /* starting new GP. */ | ||
218 | spinlock_t fqslock; /* Only one task forcing */ | ||
219 | /* quiescent states. */ | ||
220 | unsigned long jiffies_force_qs; /* Time at which to invoke */ | ||
221 | /* force_quiescent_state(). */ | ||
222 | unsigned long n_force_qs; /* Number of calls to */ | ||
223 | /* force_quiescent_state(). */ | ||
224 | unsigned long n_force_qs_lh; /* ~Number of calls leaving */ | ||
225 | /* due to lock unavailable. */ | ||
226 | unsigned long n_force_qs_ngp; /* Number of calls leaving */ | ||
227 | /* due to no GP active. */ | ||
228 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
229 | unsigned long gp_start; /* Time at which GP started, */ | ||
230 | /* but in jiffies. */ | ||
231 | unsigned long jiffies_stall; /* Time at which to check */ | ||
232 | /* for CPU stalls. */ | ||
233 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
234 | #ifdef CONFIG_NO_HZ | ||
235 | long dynticks_completed; /* Value of completed @ snap. */ | ||
236 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
237 | }; | ||
238 | |||
239 | extern struct rcu_state rcu_state; | ||
240 | DECLARE_PER_CPU(struct rcu_data, rcu_data); | ||
241 | |||
242 | extern struct rcu_state rcu_bh_state; | ||
243 | DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); | ||
244 | |||
245 | /* | ||
246 | * Increment the quiescent state counter. | ||
247 | * The counter is a bit degenerated: We do not need to know | ||
248 | * how many quiescent states passed, just if there was at least | ||
249 | * one since the start of the grace period. Thus just a flag. | ||
250 | */ | ||
251 | static inline void rcu_qsctr_inc(int cpu) | ||
252 | { | ||
253 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | ||
254 | rdp->passed_quiesc = 1; | ||
255 | rdp->passed_quiesc_completed = rdp->completed; | ||
256 | } | ||
257 | static inline void rcu_bh_qsctr_inc(int cpu) | ||
258 | { | ||
259 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); | ||
260 | rdp->passed_quiesc = 1; | ||
261 | rdp->passed_quiesc_completed = rdp->completed; | ||
262 | } | ||
263 | |||
264 | extern int rcu_pending(int cpu); | ||
265 | extern int rcu_needs_cpu(int cpu); | ||
266 | |||
267 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
268 | extern struct lockdep_map rcu_lock_map; | ||
269 | # define rcu_read_acquire() \ | ||
270 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) | ||
271 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) | ||
272 | #else | ||
273 | # define rcu_read_acquire() do { } while (0) | ||
274 | # define rcu_read_release() do { } while (0) | ||
275 | #endif | ||
276 | |||
277 | static inline void __rcu_read_lock(void) | ||
278 | { | ||
279 | preempt_disable(); | ||
280 | __acquire(RCU); | ||
281 | rcu_read_acquire(); | ||
282 | } | ||
283 | static inline void __rcu_read_unlock(void) | ||
284 | { | ||
285 | rcu_read_release(); | ||
286 | __release(RCU); | ||
287 | preempt_enable(); | ||
288 | } | ||
289 | static inline void __rcu_read_lock_bh(void) | ||
290 | { | ||
291 | local_bh_disable(); | ||
292 | __acquire(RCU_BH); | ||
293 | rcu_read_acquire(); | ||
294 | } | ||
295 | static inline void __rcu_read_unlock_bh(void) | ||
296 | { | ||
297 | rcu_read_release(); | ||
298 | __release(RCU_BH); | ||
299 | local_bh_enable(); | ||
300 | } | ||
301 | |||
302 | #define __synchronize_sched() synchronize_rcu() | ||
303 | |||
304 | #define call_rcu_sched(head, func) call_rcu(head, func) | ||
305 | |||
306 | static inline void rcu_init_sched(void) | ||
307 | { | ||
308 | } | ||
309 | |||
310 | extern void __rcu_init(void); | ||
311 | extern void rcu_check_callbacks(int cpu, int user); | ||
312 | extern void rcu_restart_cpu(int cpu); | ||
313 | |||
314 | extern long rcu_batches_completed(void); | ||
315 | extern long rcu_batches_completed_bh(void); | ||
316 | |||
317 | #ifdef CONFIG_NO_HZ | ||
318 | void rcu_enter_nohz(void); | ||
319 | void rcu_exit_nohz(void); | ||
320 | #else /* CONFIG_NO_HZ */ | ||
321 | static inline void rcu_enter_nohz(void) | ||
322 | { | ||
323 | } | ||
324 | static inline void rcu_exit_nohz(void) | ||
325 | { | ||
326 | } | ||
327 | #endif /* CONFIG_NO_HZ */ | ||
328 | |||
329 | #endif /* __LINUX_RCUTREE_H */ | ||
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index d363467c8f13..b3b359660082 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h | |||
@@ -118,6 +118,8 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); | |||
118 | 118 | ||
119 | unsigned long ring_buffer_entries(struct ring_buffer *buffer); | 119 | unsigned long ring_buffer_entries(struct ring_buffer *buffer); |
120 | unsigned long ring_buffer_overruns(struct ring_buffer *buffer); | 120 | unsigned long ring_buffer_overruns(struct ring_buffer *buffer); |
121 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); | ||
122 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); | ||
121 | 123 | ||
122 | u64 ring_buffer_time_stamp(int cpu); | 124 | u64 ring_buffer_time_stamp(int cpu); |
123 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); | 125 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); |
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 4e4f1277f3bf..feb3b939ec4b 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h | |||
@@ -158,6 +158,8 @@ | |||
158 | /* SH-SCI */ | 158 | /* SH-SCI */ |
159 | #define PORT_SCIFA 83 | 159 | #define PORT_SCIFA 83 |
160 | 160 | ||
161 | #define PORT_S3C6400 84 | ||
162 | |||
161 | #ifdef __KERNEL__ | 163 | #ifdef __KERNEL__ |
162 | 164 | ||
163 | #include <linux/compiler.h> | 165 | #include <linux/compiler.h> |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 000da12b5cf0..f96d13c281e8 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -253,9 +253,9 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, | |||
253 | * request comes from. | 253 | * request comes from. |
254 | */ | 254 | */ |
255 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) | 255 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) |
256 | extern void *__kmalloc_track_caller(size_t, gfp_t, void*); | 256 | extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); |
257 | #define kmalloc_track_caller(size, flags) \ | 257 | #define kmalloc_track_caller(size, flags) \ |
258 | __kmalloc_track_caller(size, flags, __builtin_return_address(0)) | 258 | __kmalloc_track_caller(size, flags, _RET_IP_) |
259 | #else | 259 | #else |
260 | #define kmalloc_track_caller(size, flags) \ | 260 | #define kmalloc_track_caller(size, flags) \ |
261 | __kmalloc(size, flags) | 261 | __kmalloc(size, flags) |
@@ -271,10 +271,10 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, void*); | |||
271 | * allocation request comes from. | 271 | * allocation request comes from. |
272 | */ | 272 | */ |
273 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) | 273 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) |
274 | extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); | 274 | extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); |
275 | #define kmalloc_node_track_caller(size, flags, node) \ | 275 | #define kmalloc_node_track_caller(size, flags, node) \ |
276 | __kmalloc_node_track_caller(size, flags, node, \ | 276 | __kmalloc_node_track_caller(size, flags, node, \ |
277 | __builtin_return_address(0)) | 277 | _RET_IP_) |
278 | #else | 278 | #else |
279 | #define kmalloc_node_track_caller(size, flags, node) \ | 279 | #define kmalloc_node_track_caller(size, flags, node) \ |
280 | __kmalloc_node(size, flags, node) | 280 | __kmalloc_node(size, flags, node) |
@@ -285,7 +285,7 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); | |||
285 | #define kmalloc_node_track_caller(size, flags, node) \ | 285 | #define kmalloc_node_track_caller(size, flags, node) \ |
286 | kmalloc_track_caller(size, flags) | 286 | kmalloc_track_caller(size, flags) |
287 | 287 | ||
288 | #endif /* DEBUG_SLAB */ | 288 | #endif /* CONFIG_NUMA */ |
289 | 289 | ||
290 | /* | 290 | /* |
291 | * Shortcuts | 291 | * Shortcuts |
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 6f0ee1b84a4f..c39a21040dcb 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h | |||
@@ -58,6 +58,7 @@ struct rpc_clnt { | |||
58 | struct rpc_timeout cl_timeout_default; | 58 | struct rpc_timeout cl_timeout_default; |
59 | struct rpc_program * cl_program; | 59 | struct rpc_program * cl_program; |
60 | char cl_inline_name[32]; | 60 | char cl_inline_name[32]; |
61 | char *cl_principal; /* target to authenticate to */ | ||
61 | }; | 62 | }; |
62 | 63 | ||
63 | /* | 64 | /* |
@@ -108,6 +109,7 @@ struct rpc_create_args { | |||
108 | u32 version; | 109 | u32 version; |
109 | rpc_authflavor_t authflavor; | 110 | rpc_authflavor_t authflavor; |
110 | unsigned long flags; | 111 | unsigned long flags; |
112 | char *client_name; | ||
111 | }; | 113 | }; |
112 | 114 | ||
113 | /* Values for "flags" field */ | 115 | /* Values for "flags" field */ |
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index 51b977a4ca20..cea764c2359f 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h | |||
@@ -15,6 +15,7 @@ struct rpc_pipe_ops { | |||
15 | ssize_t (*upcall)(struct file *, struct rpc_pipe_msg *, char __user *, size_t); | 15 | ssize_t (*upcall)(struct file *, struct rpc_pipe_msg *, char __user *, size_t); |
16 | ssize_t (*downcall)(struct file *, const char __user *, size_t); | 16 | ssize_t (*downcall)(struct file *, const char __user *, size_t); |
17 | void (*release_pipe)(struct inode *); | 17 | void (*release_pipe)(struct inode *); |
18 | int (*open_pipe)(struct inode *); | ||
18 | void (*destroy_msg)(struct rpc_pipe_msg *); | 19 | void (*destroy_msg)(struct rpc_pipe_msg *); |
19 | }; | 20 | }; |
20 | 21 | ||
diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h index c9165d9771a8..ca7d725861fc 100644 --- a/include/linux/sunrpc/svcauth_gss.h +++ b/include/linux/sunrpc/svcauth_gss.h | |||
@@ -20,6 +20,7 @@ int gss_svc_init(void); | |||
20 | void gss_svc_shutdown(void); | 20 | void gss_svc_shutdown(void); |
21 | int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name); | 21 | int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name); |
22 | u32 svcauth_gss_flavor(struct auth_domain *dom); | 22 | u32 svcauth_gss_flavor(struct auth_domain *dom); |
23 | char *svc_gss_principal(struct svc_rqst *); | ||
23 | 24 | ||
24 | #endif /* __KERNEL__ */ | 25 | #endif /* __KERNEL__ */ |
25 | #endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */ | 26 | #endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */ |
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index e4057d729f03..49e1eb454465 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h | |||
@@ -37,21 +37,6 @@ struct xdr_netobj { | |||
37 | typedef int (*kxdrproc_t)(void *rqstp, __be32 *data, void *obj); | 37 | typedef int (*kxdrproc_t)(void *rqstp, __be32 *data, void *obj); |
38 | 38 | ||
39 | /* | 39 | /* |
40 | * We're still requiring the BKL in the xdr code until it's been | ||
41 | * more carefully audited, at which point this wrapper will become | ||
42 | * unnecessary. | ||
43 | */ | ||
44 | static inline int rpc_call_xdrproc(kxdrproc_t xdrproc, void *rqstp, __be32 *data, void *obj) | ||
45 | { | ||
46 | int ret; | ||
47 | |||
48 | lock_kernel(); | ||
49 | ret = xdrproc(rqstp, data, obj); | ||
50 | unlock_kernel(); | ||
51 | return ret; | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * Basic structure for transmission/reception of a client XDR message. | 40 | * Basic structure for transmission/reception of a client XDR message. |
56 | * Features a header (for a linear buffer containing RPC headers | 41 | * Features a header (for a linear buffer containing RPC headers |
57 | * and the data payload for short messages), and then an array of | 42 | * and the data payload for short messages), and then an array of |
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 4d80a118d538..11fc71d50c1e 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h | |||
@@ -76,8 +76,7 @@ struct rpc_rqst { | |||
76 | struct list_head rq_list; | 76 | struct list_head rq_list; |
77 | 77 | ||
78 | __u32 * rq_buffer; /* XDR encode buffer */ | 78 | __u32 * rq_buffer; /* XDR encode buffer */ |
79 | size_t rq_bufsize, | 79 | size_t rq_callsize, |
80 | rq_callsize, | ||
81 | rq_rcvsize; | 80 | rq_rcvsize; |
82 | 81 | ||
83 | struct xdr_buf rq_private_buf; /* The receive buffer | 82 | struct xdr_buf rq_private_buf; /* The receive buffer |
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index b18ec5533e8c..325af1de0351 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h | |||
@@ -7,9 +7,31 @@ struct device; | |||
7 | struct dma_attrs; | 7 | struct dma_attrs; |
8 | struct scatterlist; | 8 | struct scatterlist; |
9 | 9 | ||
10 | /* | ||
11 | * Maximum allowable number of contiguous slabs to map, | ||
12 | * must be a power of 2. What is the appropriate value ? | ||
13 | * The complexity of {map,unmap}_single is linearly dependent on this value. | ||
14 | */ | ||
15 | #define IO_TLB_SEGSIZE 128 | ||
16 | |||
17 | |||
18 | /* | ||
19 | * log of the size of each IO TLB slab. The number of slabs is command line | ||
20 | * controllable. | ||
21 | */ | ||
22 | #define IO_TLB_SHIFT 11 | ||
23 | |||
10 | extern void | 24 | extern void |
11 | swiotlb_init(void); | 25 | swiotlb_init(void); |
12 | 26 | ||
27 | extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs); | ||
28 | extern void *swiotlb_alloc(unsigned order, unsigned long nslabs); | ||
29 | |||
30 | extern dma_addr_t swiotlb_phys_to_bus(phys_addr_t address); | ||
31 | extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address); | ||
32 | |||
33 | extern int swiotlb_arch_range_needs_mapping(void *ptr, size_t size); | ||
34 | |||
13 | extern void | 35 | extern void |
14 | *swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 36 | *swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
15 | dma_addr_t *dma_handle, gfp_t flags); | 37 | dma_addr_t *dma_handle, gfp_t flags); |
diff --git a/include/linux/timex.h b/include/linux/timex.h index 9007313b5b71..998a55d80acf 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h | |||
@@ -53,47 +53,11 @@ | |||
53 | #ifndef _LINUX_TIMEX_H | 53 | #ifndef _LINUX_TIMEX_H |
54 | #define _LINUX_TIMEX_H | 54 | #define _LINUX_TIMEX_H |
55 | 55 | ||
56 | #include <linux/compiler.h> | ||
57 | #include <linux/time.h> | 56 | #include <linux/time.h> |
58 | 57 | ||
59 | #include <asm/param.h> | ||
60 | |||
61 | #define NTP_API 4 /* NTP API version */ | 58 | #define NTP_API 4 /* NTP API version */ |
62 | 59 | ||
63 | /* | 60 | /* |
64 | * SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen | ||
65 | * for a slightly underdamped convergence characteristic. SHIFT_KH | ||
66 | * establishes the damping of the FLL and is chosen by wisdom and black | ||
67 | * art. | ||
68 | * | ||
69 | * MAXTC establishes the maximum time constant of the PLL. With the | ||
70 | * SHIFT_KG and SHIFT_KF values given and a time constant range from | ||
71 | * zero to MAXTC, the PLL will converge in 15 minutes to 16 hours, | ||
72 | * respectively. | ||
73 | */ | ||
74 | #define SHIFT_PLL 4 /* PLL frequency factor (shift) */ | ||
75 | #define SHIFT_FLL 2 /* FLL frequency factor (shift) */ | ||
76 | #define MAXTC 10 /* maximum time constant (shift) */ | ||
77 | |||
78 | /* | ||
79 | * SHIFT_USEC defines the scaling (shift) of the time_freq and | ||
80 | * time_tolerance variables, which represent the current frequency | ||
81 | * offset and maximum frequency tolerance. | ||
82 | */ | ||
83 | #define SHIFT_USEC 16 /* frequency offset scale (shift) */ | ||
84 | #define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC)) | ||
85 | #define PPM_SCALE_INV_SHIFT 19 | ||
86 | #define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \ | ||
87 | PPM_SCALE + 1) | ||
88 | |||
89 | #define MAXPHASE 500000000l /* max phase error (ns) */ | ||
90 | #define MAXFREQ 500000 /* max frequency error (ns/s) */ | ||
91 | #define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT) | ||
92 | #define MINSEC 256 /* min interval between updates (s) */ | ||
93 | #define MAXSEC 2048 /* max interval between updates (s) */ | ||
94 | #define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */ | ||
95 | |||
96 | /* | ||
97 | * syscall interface - used (mainly by NTP daemon) | 61 | * syscall interface - used (mainly by NTP daemon) |
98 | * to discipline kernel clock oscillator | 62 | * to discipline kernel clock oscillator |
99 | */ | 63 | */ |
@@ -199,9 +163,46 @@ struct timex { | |||
199 | #define TIME_BAD TIME_ERROR /* bw compat */ | 163 | #define TIME_BAD TIME_ERROR /* bw compat */ |
200 | 164 | ||
201 | #ifdef __KERNEL__ | 165 | #ifdef __KERNEL__ |
166 | #include <linux/compiler.h> | ||
167 | #include <linux/types.h> | ||
168 | #include <linux/param.h> | ||
169 | |||
202 | #include <asm/timex.h> | 170 | #include <asm/timex.h> |
203 | 171 | ||
204 | /* | 172 | /* |
173 | * SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen | ||
174 | * for a slightly underdamped convergence characteristic. SHIFT_KH | ||
175 | * establishes the damping of the FLL and is chosen by wisdom and black | ||
176 | * art. | ||
177 | * | ||
178 | * MAXTC establishes the maximum time constant of the PLL. With the | ||
179 | * SHIFT_KG and SHIFT_KF values given and a time constant range from | ||
180 | * zero to MAXTC, the PLL will converge in 15 minutes to 16 hours, | ||
181 | * respectively. | ||
182 | */ | ||
183 | #define SHIFT_PLL 4 /* PLL frequency factor (shift) */ | ||
184 | #define SHIFT_FLL 2 /* FLL frequency factor (shift) */ | ||
185 | #define MAXTC 10 /* maximum time constant (shift) */ | ||
186 | |||
187 | /* | ||
188 | * SHIFT_USEC defines the scaling (shift) of the time_freq and | ||
189 | * time_tolerance variables, which represent the current frequency | ||
190 | * offset and maximum frequency tolerance. | ||
191 | */ | ||
192 | #define SHIFT_USEC 16 /* frequency offset scale (shift) */ | ||
193 | #define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC)) | ||
194 | #define PPM_SCALE_INV_SHIFT 19 | ||
195 | #define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \ | ||
196 | PPM_SCALE + 1) | ||
197 | |||
198 | #define MAXPHASE 500000000l /* max phase error (ns) */ | ||
199 | #define MAXFREQ 500000 /* max frequency error (ns/s) */ | ||
200 | #define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT) | ||
201 | #define MINSEC 256 /* min interval between updates (s) */ | ||
202 | #define MAXSEC 2048 /* max interval between updates (s) */ | ||
203 | #define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */ | ||
204 | |||
205 | /* | ||
205 | * kernel variables | 206 | * kernel variables |
206 | * Note: maximum error = NTP synch distance = dispersion + delay / 2; | 207 | * Note: maximum error = NTP synch distance = dispersion + delay / 2; |
207 | * estimated error = NTP dispersion. | 208 | * estimated error = NTP dispersion. |
diff --git a/include/linux/types.h b/include/linux/types.h index 1d98330b1f2c..121f349cb7ec 100644 --- a/include/linux/types.h +++ b/include/linux/types.h | |||
@@ -135,19 +135,14 @@ typedef __s64 int64_t; | |||
135 | * | 135 | * |
136 | * Linux always considers sectors to be 512 bytes long independently | 136 | * Linux always considers sectors to be 512 bytes long independently |
137 | * of the devices real block size. | 137 | * of the devices real block size. |
138 | * | ||
139 | * blkcnt_t is the type of the inode's block count. | ||
138 | */ | 140 | */ |
139 | #ifdef CONFIG_LBD | 141 | #ifdef CONFIG_LBD |
140 | typedef u64 sector_t; | 142 | typedef u64 sector_t; |
141 | #else | ||
142 | typedef unsigned long sector_t; | ||
143 | #endif | ||
144 | |||
145 | /* | ||
146 | * The type of the inode's block count. | ||
147 | */ | ||
148 | #ifdef CONFIG_LSF | ||
149 | typedef u64 blkcnt_t; | 143 | typedef u64 blkcnt_t; |
150 | #else | 144 | #else |
145 | typedef unsigned long sector_t; | ||
151 | typedef unsigned long blkcnt_t; | 146 | typedef unsigned long blkcnt_t; |
152 | #endif | 147 | #endif |
153 | 148 | ||
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index fec6decfb983..6b58367d145e 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h | |||
@@ -78,7 +78,7 @@ static inline unsigned long __copy_from_user_nocache(void *to, | |||
78 | \ | 78 | \ |
79 | set_fs(KERNEL_DS); \ | 79 | set_fs(KERNEL_DS); \ |
80 | pagefault_disable(); \ | 80 | pagefault_disable(); \ |
81 | ret = __get_user(retval, (__force typeof(retval) __user *)(addr)); \ | 81 | ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \ |
82 | pagefault_enable(); \ | 82 | pagefault_enable(); \ |
83 | set_fs(old_fs); \ | 83 | set_fs(old_fs); \ |
84 | ret; \ | 84 | ret; \ |
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 4669d7e72e75..1f126e30766c 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h | |||
@@ -293,6 +293,7 @@ struct v4l2_pix_format { | |||
293 | #define V4L2_PIX_FMT_YVU420 v4l2_fourcc('Y', 'V', '1', '2') /* 12 YVU 4:2:0 */ | 293 | #define V4L2_PIX_FMT_YVU420 v4l2_fourcc('Y', 'V', '1', '2') /* 12 YVU 4:2:0 */ |
294 | #define V4L2_PIX_FMT_YUYV v4l2_fourcc('Y', 'U', 'Y', 'V') /* 16 YUV 4:2:2 */ | 294 | #define V4L2_PIX_FMT_YUYV v4l2_fourcc('Y', 'U', 'Y', 'V') /* 16 YUV 4:2:2 */ |
295 | #define V4L2_PIX_FMT_UYVY v4l2_fourcc('U', 'Y', 'V', 'Y') /* 16 YUV 4:2:2 */ | 295 | #define V4L2_PIX_FMT_UYVY v4l2_fourcc('U', 'Y', 'V', 'Y') /* 16 YUV 4:2:2 */ |
296 | #define V4L2_PIX_FMT_VYUY v4l2_fourcc('V', 'Y', 'U', 'Y') /* 16 YUV 4:2:2 */ | ||
296 | #define V4L2_PIX_FMT_YUV422P v4l2_fourcc('4', '2', '2', 'P') /* 16 YVU422 planar */ | 297 | #define V4L2_PIX_FMT_YUV422P v4l2_fourcc('4', '2', '2', 'P') /* 16 YVU422 planar */ |
297 | #define V4L2_PIX_FMT_YUV411P v4l2_fourcc('4', '1', '1', 'P') /* 16 YVU411 planar */ | 298 | #define V4L2_PIX_FMT_YUV411P v4l2_fourcc('4', '1', '1', 'P') /* 16 YVU411 planar */ |
298 | #define V4L2_PIX_FMT_Y41P v4l2_fourcc('Y', '4', '1', 'P') /* 12 YUV 4:1:1 */ | 299 | #define V4L2_PIX_FMT_Y41P v4l2_fourcc('Y', '4', '1', 'P') /* 12 YUV 4:1:1 */ |
@@ -304,6 +305,8 @@ struct v4l2_pix_format { | |||
304 | /* two planes -- one Y, one Cr + Cb interleaved */ | 305 | /* two planes -- one Y, one Cr + Cb interleaved */ |
305 | #define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */ | 306 | #define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */ |
306 | #define V4L2_PIX_FMT_NV21 v4l2_fourcc('N', 'V', '2', '1') /* 12 Y/CrCb 4:2:0 */ | 307 | #define V4L2_PIX_FMT_NV21 v4l2_fourcc('N', 'V', '2', '1') /* 12 Y/CrCb 4:2:0 */ |
308 | #define V4L2_PIX_FMT_NV16 v4l2_fourcc('N', 'V', '1', '6') /* 16 Y/CbCr 4:2:2 */ | ||
309 | #define V4L2_PIX_FMT_NV61 v4l2_fourcc('N', 'V', '6', '1') /* 16 Y/CrCb 4:2:2 */ | ||
307 | 310 | ||
308 | /* The following formats are not defined in the V4L2 specification */ | 311 | /* The following formats are not defined in the V4L2 specification */ |
309 | #define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y', 'U', 'V', '9') /* 9 YUV 4:1:0 */ | 312 | #define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y', 'U', 'V', '9') /* 9 YUV 4:1:0 */ |
@@ -1050,7 +1053,7 @@ enum v4l2_mpeg_video_bitrate_mode { | |||
1050 | #define V4L2_CID_MPEG_VIDEO_MUTE (V4L2_CID_MPEG_BASE+210) | 1053 | #define V4L2_CID_MPEG_VIDEO_MUTE (V4L2_CID_MPEG_BASE+210) |
1051 | #define V4L2_CID_MPEG_VIDEO_MUTE_YUV (V4L2_CID_MPEG_BASE+211) | 1054 | #define V4L2_CID_MPEG_VIDEO_MUTE_YUV (V4L2_CID_MPEG_BASE+211) |
1052 | 1055 | ||
1053 | /* MPEG-class control IDs specific to the CX2584x driver as defined by V4L2 */ | 1056 | /* MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */ |
1054 | #define V4L2_CID_MPEG_CX2341X_BASE (V4L2_CTRL_CLASS_MPEG | 0x1000) | 1057 | #define V4L2_CID_MPEG_CX2341X_BASE (V4L2_CTRL_CLASS_MPEG | 0x1000) |
1055 | #define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+0) | 1058 | #define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+0) |
1056 | enum v4l2_mpeg_cx2341x_video_spatial_filter_mode { | 1059 | enum v4l2_mpeg_cx2341x_video_spatial_filter_mode { |
@@ -1117,6 +1120,12 @@ enum v4l2_exposure_auto_type { | |||
1117 | #define V4L2_CID_FOCUS_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+11) | 1120 | #define V4L2_CID_FOCUS_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+11) |
1118 | #define V4L2_CID_FOCUS_AUTO (V4L2_CID_CAMERA_CLASS_BASE+12) | 1121 | #define V4L2_CID_FOCUS_AUTO (V4L2_CID_CAMERA_CLASS_BASE+12) |
1119 | 1122 | ||
1123 | #define V4L2_CID_ZOOM_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+13) | ||
1124 | #define V4L2_CID_ZOOM_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+14) | ||
1125 | #define V4L2_CID_ZOOM_CONTINUOUS (V4L2_CID_CAMERA_CLASS_BASE+15) | ||
1126 | |||
1127 | #define V4L2_CID_PRIVACY (V4L2_CID_CAMERA_CLASS_BASE+16) | ||
1128 | |||
1120 | /* | 1129 | /* |
1121 | * T U N I N G | 1130 | * T U N I N G |
1122 | */ | 1131 | */ |
@@ -1369,6 +1378,7 @@ struct v4l2_streamparm { | |||
1369 | #define V4L2_CHIP_MATCH_HOST 0 /* Match against chip ID on host (0 for the host) */ | 1378 | #define V4L2_CHIP_MATCH_HOST 0 /* Match against chip ID on host (0 for the host) */ |
1370 | #define V4L2_CHIP_MATCH_I2C_DRIVER 1 /* Match against I2C driver ID */ | 1379 | #define V4L2_CHIP_MATCH_I2C_DRIVER 1 /* Match against I2C driver ID */ |
1371 | #define V4L2_CHIP_MATCH_I2C_ADDR 2 /* Match against I2C 7-bit address */ | 1380 | #define V4L2_CHIP_MATCH_I2C_ADDR 2 /* Match against I2C 7-bit address */ |
1381 | #define V4L2_CHIP_MATCH_AC97 3 /* Match against anciliary AC97 chip */ | ||
1372 | 1382 | ||
1373 | struct v4l2_register { | 1383 | struct v4l2_register { |
1374 | __u32 match_type; /* Match type */ | 1384 | __u32 match_type; /* Match type */ |
@@ -1458,6 +1468,8 @@ struct v4l2_chip_ident { | |||
1458 | #define VIDIOC_G_CHIP_IDENT _IOWR('V', 81, struct v4l2_chip_ident) | 1468 | #define VIDIOC_G_CHIP_IDENT _IOWR('V', 81, struct v4l2_chip_ident) |
1459 | #endif | 1469 | #endif |
1460 | #define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek) | 1470 | #define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek) |
1471 | /* Reminder: when adding new ioctls please add support for them to | ||
1472 | drivers/media/video/v4l2-compat-ioctl32.c as well! */ | ||
1461 | 1473 | ||
1462 | #ifdef __OLD_VIDIOC_ | 1474 | #ifdef __OLD_VIDIOC_ |
1463 | /* for compatibility, will go away some day */ | 1475 | /* for compatibility, will go away some day */ |
diff --git a/include/linux/virtio_balloon.h b/include/linux/virtio_balloon.h index c30c7bfbf39b..8726ff77763e 100644 --- a/include/linux/virtio_balloon.h +++ b/include/linux/virtio_balloon.h | |||
@@ -10,6 +10,9 @@ | |||
10 | /* The feature bitmap for virtio balloon */ | 10 | /* The feature bitmap for virtio balloon */ |
11 | #define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */ | 11 | #define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */ |
12 | 12 | ||
13 | /* Size of a PFN in the balloon interface. */ | ||
14 | #define VIRTIO_BALLOON_PFN_SHIFT 12 | ||
15 | |||
13 | struct virtio_balloon_config | 16 | struct virtio_balloon_config |
14 | { | 17 | { |
15 | /* Number of pages host wants Guest to give up. */ | 18 | /* Number of pages host wants Guest to give up. */ |
diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h index 19a0da0dba41..7615ffcdd555 100644 --- a/include/linux/virtio_console.h +++ b/include/linux/virtio_console.h | |||
@@ -7,6 +7,17 @@ | |||
7 | /* The ID for virtio console */ | 7 | /* The ID for virtio console */ |
8 | #define VIRTIO_ID_CONSOLE 3 | 8 | #define VIRTIO_ID_CONSOLE 3 |
9 | 9 | ||
10 | /* Feature bits */ | ||
11 | #define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */ | ||
12 | |||
13 | struct virtio_console_config { | ||
14 | /* colums of the screens */ | ||
15 | __u16 cols; | ||
16 | /* rows of the screens */ | ||
17 | __u16 rows; | ||
18 | } __attribute__((packed)); | ||
19 | |||
20 | |||
10 | #ifdef __KERNEL__ | 21 | #ifdef __KERNEL__ |
11 | int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)); | 22 | int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)); |
12 | #endif /* __KERNEL__ */ | 23 | #endif /* __KERNEL__ */ |
diff --git a/include/linux/virtio_pci.h b/include/linux/virtio_pci.h index cdef35742932..cd0fd5d181a6 100644 --- a/include/linux/virtio_pci.h +++ b/include/linux/virtio_pci.h | |||
@@ -53,4 +53,12 @@ | |||
53 | 53 | ||
54 | /* Virtio ABI version, this must match exactly */ | 54 | /* Virtio ABI version, this must match exactly */ |
55 | #define VIRTIO_PCI_ABI_VERSION 0 | 55 | #define VIRTIO_PCI_ABI_VERSION 0 |
56 | |||
57 | /* How many bits to shift physical queue address written to QUEUE_PFN. | ||
58 | * 12 is historical, and due to x86 page size. */ | ||
59 | #define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12 | ||
60 | |||
61 | /* The alignment to use between consumer and producer parts of vring. | ||
62 | * x86 pagesize again. */ | ||
63 | #define VIRTIO_PCI_VRING_ALIGN 4096 | ||
56 | #endif | 64 | #endif |
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index c4a598fb3826..71e03722fb59 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h | |||
@@ -83,7 +83,7 @@ struct vring { | |||
83 | * __u16 avail_idx; | 83 | * __u16 avail_idx; |
84 | * __u16 available[num]; | 84 | * __u16 available[num]; |
85 | * | 85 | * |
86 | * // Padding to the next page boundary. | 86 | * // Padding to the next align boundary. |
87 | * char pad[]; | 87 | * char pad[]; |
88 | * | 88 | * |
89 | * // A ring of used descriptor heads with free-running index. | 89 | * // A ring of used descriptor heads with free-running index. |
@@ -93,19 +93,19 @@ struct vring { | |||
93 | * }; | 93 | * }; |
94 | */ | 94 | */ |
95 | static inline void vring_init(struct vring *vr, unsigned int num, void *p, | 95 | static inline void vring_init(struct vring *vr, unsigned int num, void *p, |
96 | unsigned long pagesize) | 96 | unsigned long align) |
97 | { | 97 | { |
98 | vr->num = num; | 98 | vr->num = num; |
99 | vr->desc = p; | 99 | vr->desc = p; |
100 | vr->avail = p + num*sizeof(struct vring_desc); | 100 | vr->avail = p + num*sizeof(struct vring_desc); |
101 | vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + pagesize-1) | 101 | vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + align-1) |
102 | & ~(pagesize - 1)); | 102 | & ~(align - 1)); |
103 | } | 103 | } |
104 | 104 | ||
105 | static inline unsigned vring_size(unsigned int num, unsigned long pagesize) | 105 | static inline unsigned vring_size(unsigned int num, unsigned long align) |
106 | { | 106 | { |
107 | return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (2 + num) | 107 | return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (2 + num) |
108 | + pagesize - 1) & ~(pagesize - 1)) | 108 | + align - 1) & ~(align - 1)) |
109 | + sizeof(__u16) * 2 + sizeof(struct vring_used_elem) * num; | 109 | + sizeof(__u16) * 2 + sizeof(struct vring_used_elem) * num; |
110 | } | 110 | } |
111 | 111 | ||
@@ -115,6 +115,7 @@ struct virtio_device; | |||
115 | struct virtqueue; | 115 | struct virtqueue; |
116 | 116 | ||
117 | struct virtqueue *vring_new_virtqueue(unsigned int num, | 117 | struct virtqueue *vring_new_virtqueue(unsigned int num, |
118 | unsigned int vring_align, | ||
118 | struct virtio_device *vdev, | 119 | struct virtio_device *vdev, |
119 | void *pages, | 120 | void *pages, |
120 | void (*notify)(struct virtqueue *vq), | 121 | void (*notify)(struct virtqueue *vq), |