aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/aio.h5
-rw-r--r--include/linux/bio.h26
-rw-r--r--include/linux/blkdev.h52
-rw-r--r--include/linux/bottom_half.h1
-rw-r--r--include/linux/buffer_head.h1
-rw-r--r--include/linux/debug_locks.h2
-rw-r--r--include/linux/elevator.h8
-rw-r--r--include/linux/futex.h5
-rw-r--r--include/linux/genhd.h1
-rw-r--r--include/linux/hardirq.h13
-rw-r--r--include/linux/hrtimer.h34
-rw-r--r--include/linux/interrupt.h5
-rw-r--r--include/linux/irq.h62
-rw-r--r--include/linux/irqnr.h26
-rw-r--r--include/linux/kernel.h11
-rw-r--r--include/linux/kernel_stat.h14
-rw-r--r--include/linux/lockdep.h45
-rw-r--r--include/linux/mm_types.h5
-rw-r--r--include/linux/msi.h3
-rw-r--r--include/linux/mutex.h2
-rw-r--r--include/linux/posix-timers.h6
-rw-r--r--include/linux/random.h51
-rw-r--r--include/linux/rcuclassic.h2
-rw-r--r--include/linux/rcupdate.h10
-rw-r--r--include/linux/rcutree.h329
-rw-r--r--include/linux/swiotlb.h22
-rw-r--r--include/linux/timex.h73
-rw-r--r--include/linux/types.h11
-rw-r--r--include/linux/uaccess.h2
30 files changed, 687 insertions, 141 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index e531783e5d78..95ac82340c3b 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -313,6 +313,7 @@ unifdef-y += ptrace.h
313unifdef-y += qnx4_fs.h 313unifdef-y += qnx4_fs.h
314unifdef-y += quota.h 314unifdef-y += quota.h
315unifdef-y += random.h 315unifdef-y += random.h
316unifdef-y += irqnr.h
316unifdef-y += reboot.h 317unifdef-y += reboot.h
317unifdef-y += reiserfs_fs.h 318unifdef-y += reiserfs_fs.h
318unifdef-y += reiserfs_xattr.h 319unifdef-y += reiserfs_xattr.h
diff --git a/include/linux/aio.h b/include/linux/aio.h
index f6b8cf99b596..b16a957030f8 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -5,6 +5,7 @@
5#include <linux/workqueue.h> 5#include <linux/workqueue.h>
6#include <linux/aio_abi.h> 6#include <linux/aio_abi.h>
7#include <linux/uio.h> 7#include <linux/uio.h>
8#include <linux/rcupdate.h>
8 9
9#include <asm/atomic.h> 10#include <asm/atomic.h>
10 11
@@ -183,7 +184,7 @@ struct kioctx {
183 184
184 /* This needs improving */ 185 /* This needs improving */
185 unsigned long user_id; 186 unsigned long user_id;
186 struct kioctx *next; 187 struct hlist_node list;
187 188
188 wait_queue_head_t wait; 189 wait_queue_head_t wait;
189 190
@@ -199,6 +200,8 @@ struct kioctx {
199 struct aio_ring_info ring_info; 200 struct aio_ring_info ring_info;
200 201
201 struct delayed_work wq; 202 struct delayed_work wq;
203
204 struct rcu_head rcu_head;
202}; 205};
203 206
204/* prototypes */ 207/* prototypes */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 6a642098e5c3..18462c5b8fff 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -90,10 +90,11 @@ struct bio {
90 90
91 unsigned int bi_comp_cpu; /* completion CPU */ 91 unsigned int bi_comp_cpu; /* completion CPU */
92 92
93 atomic_t bi_cnt; /* pin count */
94
93 struct bio_vec *bi_io_vec; /* the actual vec list */ 95 struct bio_vec *bi_io_vec; /* the actual vec list */
94 96
95 bio_end_io_t *bi_end_io; 97 bio_end_io_t *bi_end_io;
96 atomic_t bi_cnt; /* pin count */
97 98
98 void *bi_private; 99 void *bi_private;
99#if defined(CONFIG_BLK_DEV_INTEGRITY) 100#if defined(CONFIG_BLK_DEV_INTEGRITY)
@@ -101,6 +102,13 @@ struct bio {
101#endif 102#endif
102 103
103 bio_destructor_t *bi_destructor; /* destructor */ 104 bio_destructor_t *bi_destructor; /* destructor */
105
106 /*
107 * We can inline a number of vecs at the end of the bio, to avoid
108 * double allocations for a small number of bio_vecs. This member
109 * MUST obviously be kept at the very end of the bio.
110 */
111 struct bio_vec bi_inline_vecs[0];
104}; 112};
105 113
106/* 114/*
@@ -117,6 +125,7 @@ struct bio {
117#define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */ 125#define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */
118#define BIO_NULL_MAPPED 9 /* contains invalid user pages */ 126#define BIO_NULL_MAPPED 9 /* contains invalid user pages */
119#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */ 127#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */
128#define BIO_QUIET 11 /* Make BIO Quiet */
120#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) 129#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
121 130
122/* 131/*
@@ -211,6 +220,11 @@ static inline void *bio_data(struct bio *bio)
211 return NULL; 220 return NULL;
212} 221}
213 222
223static inline int bio_has_allocated_vec(struct bio *bio)
224{
225 return bio->bi_io_vec && bio->bi_io_vec != bio->bi_inline_vecs;
226}
227
214/* 228/*
215 * will die 229 * will die
216 */ 230 */
@@ -332,7 +346,7 @@ struct bio_pair {
332extern struct bio_pair *bio_split(struct bio *bi, int first_sectors); 346extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
333extern void bio_pair_release(struct bio_pair *dbio); 347extern void bio_pair_release(struct bio_pair *dbio);
334 348
335extern struct bio_set *bioset_create(int, int); 349extern struct bio_set *bioset_create(unsigned int, unsigned int);
336extern void bioset_free(struct bio_set *); 350extern void bioset_free(struct bio_set *);
337 351
338extern struct bio *bio_alloc(gfp_t, int); 352extern struct bio *bio_alloc(gfp_t, int);
@@ -377,6 +391,7 @@ extern struct bio *bio_copy_user_iov(struct request_queue *,
377extern int bio_uncopy_user(struct bio *); 391extern int bio_uncopy_user(struct bio *);
378void zero_fill_bio(struct bio *bio); 392void zero_fill_bio(struct bio *bio);
379extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *); 393extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
394extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int);
380extern unsigned int bvec_nr_vecs(unsigned short idx); 395extern unsigned int bvec_nr_vecs(unsigned short idx);
381 396
382/* 397/*
@@ -395,13 +410,17 @@ static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu)
395 */ 410 */
396#define BIO_POOL_SIZE 2 411#define BIO_POOL_SIZE 2
397#define BIOVEC_NR_POOLS 6 412#define BIOVEC_NR_POOLS 6
413#define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1)
398 414
399struct bio_set { 415struct bio_set {
416 struct kmem_cache *bio_slab;
417 unsigned int front_pad;
418
400 mempool_t *bio_pool; 419 mempool_t *bio_pool;
401#if defined(CONFIG_BLK_DEV_INTEGRITY) 420#if defined(CONFIG_BLK_DEV_INTEGRITY)
402 mempool_t *bio_integrity_pool; 421 mempool_t *bio_integrity_pool;
403#endif 422#endif
404 mempool_t *bvec_pools[BIOVEC_NR_POOLS]; 423 mempool_t *bvec_pool;
405}; 424};
406 425
407struct biovec_slab { 426struct biovec_slab {
@@ -411,6 +430,7 @@ struct biovec_slab {
411}; 430};
412 431
413extern struct bio_set *fs_bio_set; 432extern struct bio_set *fs_bio_set;
433extern struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly;
414 434
415/* 435/*
416 * a small number of entries is fine, not going to be performance critical. 436 * a small number of entries is fine, not going to be performance critical.
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 031a315c0509..7035cec583b6 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -26,7 +26,6 @@ struct scsi_ioctl_command;
26 26
27struct request_queue; 27struct request_queue;
28struct elevator_queue; 28struct elevator_queue;
29typedef struct elevator_queue elevator_t;
30struct request_pm_state; 29struct request_pm_state;
31struct blk_trace; 30struct blk_trace;
32struct request; 31struct request;
@@ -313,7 +312,7 @@ struct request_queue
313 */ 312 */
314 struct list_head queue_head; 313 struct list_head queue_head;
315 struct request *last_merge; 314 struct request *last_merge;
316 elevator_t *elevator; 315 struct elevator_queue *elevator;
317 316
318 /* 317 /*
319 * the queue request freelist, one for reads and one for writes 318 * the queue request freelist, one for reads and one for writes
@@ -449,6 +448,7 @@ struct request_queue
449#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ 448#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */
450#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ 449#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */
451#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ 450#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
451#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
452 452
453static inline int queue_is_locked(struct request_queue *q) 453static inline int queue_is_locked(struct request_queue *q)
454{ 454{
@@ -522,22 +522,32 @@ enum {
522 * TAG_FLUSH : ordering by tag w/ pre and post flushes 522 * TAG_FLUSH : ordering by tag w/ pre and post flushes
523 * TAG_FUA : ordering by tag w/ pre flush and FUA write 523 * TAG_FUA : ordering by tag w/ pre flush and FUA write
524 */ 524 */
525 QUEUE_ORDERED_NONE = 0x00, 525 QUEUE_ORDERED_BY_DRAIN = 0x01,
526 QUEUE_ORDERED_DRAIN = 0x01, 526 QUEUE_ORDERED_BY_TAG = 0x02,
527 QUEUE_ORDERED_TAG = 0x02, 527 QUEUE_ORDERED_DO_PREFLUSH = 0x10,
528 528 QUEUE_ORDERED_DO_BAR = 0x20,
529 QUEUE_ORDERED_PREFLUSH = 0x10, 529 QUEUE_ORDERED_DO_POSTFLUSH = 0x40,
530 QUEUE_ORDERED_POSTFLUSH = 0x20, 530 QUEUE_ORDERED_DO_FUA = 0x80,
531 QUEUE_ORDERED_FUA = 0x40, 531
532 532 QUEUE_ORDERED_NONE = 0x00,
533 QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | 533
534 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, 534 QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN |
535 QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | 535 QUEUE_ORDERED_DO_BAR,
536 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, 536 QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
537 QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | 537 QUEUE_ORDERED_DO_PREFLUSH |
538 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, 538 QUEUE_ORDERED_DO_POSTFLUSH,
539 QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | 539 QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
540 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, 540 QUEUE_ORDERED_DO_PREFLUSH |
541 QUEUE_ORDERED_DO_FUA,
542
543 QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG |
544 QUEUE_ORDERED_DO_BAR,
545 QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
546 QUEUE_ORDERED_DO_PREFLUSH |
547 QUEUE_ORDERED_DO_POSTFLUSH,
548 QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG |
549 QUEUE_ORDERED_DO_PREFLUSH |
550 QUEUE_ORDERED_DO_FUA,
541 551
542 /* 552 /*
543 * Ordered operation sequence 553 * Ordered operation sequence
@@ -585,7 +595,6 @@ enum {
585#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) 595#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
586#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) 596#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD)
587#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 597#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
588#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
589/* rq->queuelist of dequeued request must be list_empty() */ 598/* rq->queuelist of dequeued request must be list_empty() */
590#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 599#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
591 600
@@ -855,10 +864,10 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
855extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 864extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
856extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 865extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
857extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); 866extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
858extern int blk_do_ordered(struct request_queue *, struct request **); 867extern bool blk_do_ordered(struct request_queue *, struct request **);
859extern unsigned blk_ordered_cur_seq(struct request_queue *); 868extern unsigned blk_ordered_cur_seq(struct request_queue *);
860extern unsigned blk_ordered_req_seq(struct request *); 869extern unsigned blk_ordered_req_seq(struct request *);
861extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int); 870extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int);
862 871
863extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 872extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
864extern void blk_dump_rq_flags(struct request *, char *); 873extern void blk_dump_rq_flags(struct request *, char *);
@@ -977,7 +986,6 @@ static inline void put_dev_sector(Sector p)
977 986
978struct work_struct; 987struct work_struct;
979int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 988int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
980void kblockd_flush_work(struct work_struct *work);
981 989
982#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 990#define MODULE_ALIAS_BLOCKDEV(major,minor) \
983 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 991 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
index 777dbf695d44..27b1bcffe408 100644
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
@@ -2,7 +2,6 @@
2#define _LINUX_BH_H 2#define _LINUX_BH_H
3 3
4extern void local_bh_disable(void); 4extern void local_bh_disable(void);
5extern void __local_bh_enable(void);
6extern void _local_bh_enable(void); 5extern void _local_bh_enable(void);
7extern void local_bh_enable(void); 6extern void local_bh_enable(void);
8extern void local_bh_enable_ip(unsigned long ip); 7extern void local_bh_enable_ip(unsigned long ip);
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 3ce64b90118c..8605f8a74df9 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -35,6 +35,7 @@ enum bh_state_bits {
35 BH_Ordered, /* ordered write */ 35 BH_Ordered, /* ordered write */
36 BH_Eopnotsupp, /* operation not supported (barrier) */ 36 BH_Eopnotsupp, /* operation not supported (barrier) */
37 BH_Unwritten, /* Buffer is allocated on disk but not written */ 37 BH_Unwritten, /* Buffer is allocated on disk but not written */
38 BH_Quiet, /* Buffer Error Prinks to be quiet */
38 39
39 BH_PrivateStart,/* not a state bit, but the first bit available 40 BH_PrivateStart,/* not a state bit, but the first bit available
40 * for private allocation by other entities 41 * for private allocation by other entities
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 4aaa4afb1cb9..096476f1fb35 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -17,7 +17,7 @@ extern int debug_locks_off(void);
17({ \ 17({ \
18 int __ret = 0; \ 18 int __ret = 0; \
19 \ 19 \
20 if (unlikely(c)) { \ 20 if (!oops_in_progress && unlikely(c)) { \
21 if (debug_locks_off() && !debug_locks_silent) \ 21 if (debug_locks_off() && !debug_locks_silent) \
22 WARN_ON(1); \ 22 WARN_ON(1); \
23 __ret = 1; \ 23 __ret = 1; \
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 92f6f634e3e6..7a204256b155 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -28,7 +28,7 @@ typedef void (elevator_activate_req_fn) (struct request_queue *, struct request
28typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *); 28typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
29 29
30typedef void *(elevator_init_fn) (struct request_queue *); 30typedef void *(elevator_init_fn) (struct request_queue *);
31typedef void (elevator_exit_fn) (elevator_t *); 31typedef void (elevator_exit_fn) (struct elevator_queue *);
32 32
33struct elevator_ops 33struct elevator_ops
34{ 34{
@@ -62,8 +62,8 @@ struct elevator_ops
62 62
63struct elv_fs_entry { 63struct elv_fs_entry {
64 struct attribute attr; 64 struct attribute attr;
65 ssize_t (*show)(elevator_t *, char *); 65 ssize_t (*show)(struct elevator_queue *, char *);
66 ssize_t (*store)(elevator_t *, const char *, size_t); 66 ssize_t (*store)(struct elevator_queue *, const char *, size_t);
67}; 67};
68 68
69/* 69/*
@@ -130,7 +130,7 @@ extern ssize_t elv_iosched_show(struct request_queue *, char *);
130extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); 130extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
131 131
132extern int elevator_init(struct request_queue *, char *); 132extern int elevator_init(struct request_queue *, char *);
133extern void elevator_exit(elevator_t *); 133extern void elevator_exit(struct elevator_queue *);
134extern int elv_rq_merge_ok(struct request *, struct bio *); 134extern int elv_rq_merge_ok(struct request *, struct bio *);
135 135
136/* 136/*
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 586ab56a3ec3..3bf5bb5a34f9 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -25,7 +25,8 @@ union ktime;
25#define FUTEX_WAKE_BITSET 10 25#define FUTEX_WAKE_BITSET 10
26 26
27#define FUTEX_PRIVATE_FLAG 128 27#define FUTEX_PRIVATE_FLAG 128
28#define FUTEX_CMD_MASK ~FUTEX_PRIVATE_FLAG 28#define FUTEX_CLOCK_REALTIME 256
29#define FUTEX_CMD_MASK ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
29 30
30#define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG) 31#define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG)
31#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG) 32#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG)
@@ -164,6 +165,8 @@ union futex_key {
164 } both; 165 } both;
165}; 166};
166 167
168#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
169
167#ifdef CONFIG_FUTEX 170#ifdef CONFIG_FUTEX
168extern void exit_robust_list(struct task_struct *curr); 171extern void exit_robust_list(struct task_struct *curr);
169extern void exit_pi_state_list(struct task_struct *curr); 172extern void exit_pi_state_list(struct task_struct *curr);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 3df7742ce246..16948eaecae3 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -126,6 +126,7 @@ struct blk_scsi_cmd_filter {
126struct disk_part_tbl { 126struct disk_part_tbl {
127 struct rcu_head rcu_head; 127 struct rcu_head rcu_head;
128 int len; 128 int len;
129 struct hd_struct *last_lookup;
129 struct hd_struct *part[]; 130 struct hd_struct *part[];
130}; 131};
131 132
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 89a56d79e4c6..f83288347dda 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -119,13 +119,17 @@ static inline void account_system_vtime(struct task_struct *tsk)
119} 119}
120#endif 120#endif
121 121
122#if defined(CONFIG_PREEMPT_RCU) && defined(CONFIG_NO_HZ) 122#if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU)
123extern void rcu_irq_enter(void); 123extern void rcu_irq_enter(void);
124extern void rcu_irq_exit(void); 124extern void rcu_irq_exit(void);
125extern void rcu_nmi_enter(void);
126extern void rcu_nmi_exit(void);
125#else 127#else
126# define rcu_irq_enter() do { } while (0) 128# define rcu_irq_enter() do { } while (0)
127# define rcu_irq_exit() do { } while (0) 129# define rcu_irq_exit() do { } while (0)
128#endif /* CONFIG_PREEMPT_RCU */ 130# define rcu_nmi_enter() do { } while (0)
131# define rcu_nmi_exit() do { } while (0)
132#endif /* #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) */
129 133
130/* 134/*
131 * It is safe to do non-atomic ops on ->hardirq_context, 135 * It is safe to do non-atomic ops on ->hardirq_context,
@@ -135,7 +139,6 @@ extern void rcu_irq_exit(void);
135 */ 139 */
136#define __irq_enter() \ 140#define __irq_enter() \
137 do { \ 141 do { \
138 rcu_irq_enter(); \
139 account_system_vtime(current); \ 142 account_system_vtime(current); \
140 add_preempt_count(HARDIRQ_OFFSET); \ 143 add_preempt_count(HARDIRQ_OFFSET); \
141 trace_hardirq_enter(); \ 144 trace_hardirq_enter(); \
@@ -154,7 +157,6 @@ extern void irq_enter(void);
154 trace_hardirq_exit(); \ 157 trace_hardirq_exit(); \
155 account_system_vtime(current); \ 158 account_system_vtime(current); \
156 sub_preempt_count(HARDIRQ_OFFSET); \ 159 sub_preempt_count(HARDIRQ_OFFSET); \
157 rcu_irq_exit(); \
158 } while (0) 160 } while (0)
159 161
160/* 162/*
@@ -166,11 +168,14 @@ extern void irq_exit(void);
166 do { \ 168 do { \
167 ftrace_nmi_enter(); \ 169 ftrace_nmi_enter(); \
168 lockdep_off(); \ 170 lockdep_off(); \
171 rcu_nmi_enter(); \
169 __irq_enter(); \ 172 __irq_enter(); \
170 } while (0) 173 } while (0)
174
171#define nmi_exit() \ 175#define nmi_exit() \
172 do { \ 176 do { \
173 __irq_exit(); \ 177 __irq_exit(); \
178 rcu_nmi_exit(); \
174 lockdep_on(); \ 179 lockdep_on(); \
175 ftrace_nmi_exit(); \ 180 ftrace_nmi_exit(); \
176 } while (0) 181 } while (0)
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 3eba43878dcb..bd37078c2d7d 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -43,26 +43,6 @@ enum hrtimer_restart {
43}; 43};
44 44
45/* 45/*
46 * hrtimer callback modes:
47 *
48 * HRTIMER_CB_SOFTIRQ: Callback must run in softirq context
49 * HRTIMER_CB_IRQSAFE_PERCPU: Callback must run in hardirq context
50 * Special mode for tick emulation and
51 * scheduler timer. Such timers are per
52 * cpu and not allowed to be migrated on
53 * cpu unplug.
54 * HRTIMER_CB_IRQSAFE_UNLOCKED: Callback should run in hardirq context
55 * with timer->base lock unlocked
56 * used for timers which call wakeup to
57 * avoid lock order problems with rq->lock
58 */
59enum hrtimer_cb_mode {
60 HRTIMER_CB_SOFTIRQ,
61 HRTIMER_CB_IRQSAFE_PERCPU,
62 HRTIMER_CB_IRQSAFE_UNLOCKED,
63};
64
65/*
66 * Values to track state of the timer 46 * Values to track state of the timer
67 * 47 *
68 * Possible states: 48 * Possible states:
@@ -70,7 +50,6 @@ enum hrtimer_cb_mode {
70 * 0x00 inactive 50 * 0x00 inactive
71 * 0x01 enqueued into rbtree 51 * 0x01 enqueued into rbtree
72 * 0x02 callback function running 52 * 0x02 callback function running
73 * 0x04 callback pending (high resolution mode)
74 * 53 *
75 * Special cases: 54 * Special cases:
76 * 0x03 callback function running and enqueued 55 * 0x03 callback function running and enqueued
@@ -92,8 +71,7 @@ enum hrtimer_cb_mode {
92#define HRTIMER_STATE_INACTIVE 0x00 71#define HRTIMER_STATE_INACTIVE 0x00
93#define HRTIMER_STATE_ENQUEUED 0x01 72#define HRTIMER_STATE_ENQUEUED 0x01
94#define HRTIMER_STATE_CALLBACK 0x02 73#define HRTIMER_STATE_CALLBACK 0x02
95#define HRTIMER_STATE_PENDING 0x04 74#define HRTIMER_STATE_MIGRATE 0x04
96#define HRTIMER_STATE_MIGRATE 0x08
97 75
98/** 76/**
99 * struct hrtimer - the basic hrtimer structure 77 * struct hrtimer - the basic hrtimer structure
@@ -109,8 +87,6 @@ enum hrtimer_cb_mode {
109 * @function: timer expiry callback function 87 * @function: timer expiry callback function
110 * @base: pointer to the timer base (per cpu and per clock) 88 * @base: pointer to the timer base (per cpu and per clock)
111 * @state: state information (See bit values above) 89 * @state: state information (See bit values above)
112 * @cb_mode: high resolution timer feature to select the callback execution
113 * mode
114 * @cb_entry: list head to enqueue an expired timer into the callback list 90 * @cb_entry: list head to enqueue an expired timer into the callback list
115 * @start_site: timer statistics field to store the site where the timer 91 * @start_site: timer statistics field to store the site where the timer
116 * was started 92 * was started
@@ -129,7 +105,6 @@ struct hrtimer {
129 struct hrtimer_clock_base *base; 105 struct hrtimer_clock_base *base;
130 unsigned long state; 106 unsigned long state;
131 struct list_head cb_entry; 107 struct list_head cb_entry;
132 enum hrtimer_cb_mode cb_mode;
133#ifdef CONFIG_TIMER_STATS 108#ifdef CONFIG_TIMER_STATS
134 int start_pid; 109 int start_pid;
135 void *start_site; 110 void *start_site;
@@ -188,15 +163,11 @@ struct hrtimer_clock_base {
188 * @check_clocks: Indictator, when set evaluate time source and clock 163 * @check_clocks: Indictator, when set evaluate time source and clock
189 * event devices whether high resolution mode can be 164 * event devices whether high resolution mode can be
190 * activated. 165 * activated.
191 * @cb_pending: Expired timers are moved from the rbtree to this
192 * list in the timer interrupt. The list is processed
193 * in the softirq.
194 * @nr_events: Total number of timer interrupt events 166 * @nr_events: Total number of timer interrupt events
195 */ 167 */
196struct hrtimer_cpu_base { 168struct hrtimer_cpu_base {
197 spinlock_t lock; 169 spinlock_t lock;
198 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; 170 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
199 struct list_head cb_pending;
200#ifdef CONFIG_HIGH_RES_TIMERS 171#ifdef CONFIG_HIGH_RES_TIMERS
201 ktime_t expires_next; 172 ktime_t expires_next;
202 int hres_active; 173 int hres_active;
@@ -404,8 +375,7 @@ static inline int hrtimer_active(const struct hrtimer *timer)
404 */ 375 */
405static inline int hrtimer_is_queued(struct hrtimer *timer) 376static inline int hrtimer_is_queued(struct hrtimer *timer)
406{ 377{
407 return timer->state & 378 return timer->state & HRTIMER_STATE_ENQUEUED;
408 (HRTIMER_STATE_ENQUEUED | HRTIMER_STATE_PENDING);
409} 379}
410 380
411/* 381/*
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index f58a0cf8929a..be3c484b5242 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -14,6 +14,8 @@
14#include <linux/irqflags.h> 14#include <linux/irqflags.h>
15#include <linux/smp.h> 15#include <linux/smp.h>
16#include <linux/percpu.h> 16#include <linux/percpu.h>
17#include <linux/irqnr.h>
18
17#include <asm/atomic.h> 19#include <asm/atomic.h>
18#include <asm/ptrace.h> 20#include <asm/ptrace.h>
19#include <asm/system.h> 21#include <asm/system.h>
@@ -251,9 +253,6 @@ enum
251 BLOCK_SOFTIRQ, 253 BLOCK_SOFTIRQ,
252 TASKLET_SOFTIRQ, 254 TASKLET_SOFTIRQ,
253 SCHED_SOFTIRQ, 255 SCHED_SOFTIRQ,
254#ifdef CONFIG_HIGH_RES_TIMERS
255 HRTIMER_SOFTIRQ,
256#endif
257 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ 256 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
258 257
259 NR_SOFTIRQS 258 NR_SOFTIRQS
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 3dddfa703ebd..98564dc64476 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -129,9 +129,14 @@ struct irq_chip {
129 const char *typename; 129 const char *typename;
130}; 130};
131 131
132struct timer_rand_state;
133struct irq_2_iommu;
132/** 134/**
133 * struct irq_desc - interrupt descriptor 135 * struct irq_desc - interrupt descriptor
134 * @irq: interrupt number for this descriptor 136 * @irq: interrupt number for this descriptor
137 * @timer_rand_state: pointer to timer rand state struct
138 * @kstat_irqs: irq stats per cpu
139 * @irq_2_iommu: iommu with this irq
135 * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] 140 * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()]
136 * @chip: low level interrupt hardware access 141 * @chip: low level interrupt hardware access
137 * @msi_desc: MSI descriptor 142 * @msi_desc: MSI descriptor
@@ -143,8 +148,8 @@ struct irq_chip {
143 * @depth: disable-depth, for nested irq_disable() calls 148 * @depth: disable-depth, for nested irq_disable() calls
144 * @wake_depth: enable depth, for multiple set_irq_wake() callers 149 * @wake_depth: enable depth, for multiple set_irq_wake() callers
145 * @irq_count: stats field to detect stalled irqs 150 * @irq_count: stats field to detect stalled irqs
146 * @irqs_unhandled: stats field for spurious unhandled interrupts
147 * @last_unhandled: aging timer for unhandled count 151 * @last_unhandled: aging timer for unhandled count
152 * @irqs_unhandled: stats field for spurious unhandled interrupts
148 * @lock: locking for SMP 153 * @lock: locking for SMP
149 * @affinity: IRQ affinity on SMP 154 * @affinity: IRQ affinity on SMP
150 * @cpu: cpu index useful for balancing 155 * @cpu: cpu index useful for balancing
@@ -154,6 +159,13 @@ struct irq_chip {
154 */ 159 */
155struct irq_desc { 160struct irq_desc {
156 unsigned int irq; 161 unsigned int irq;
162#ifdef CONFIG_SPARSE_IRQ
163 struct timer_rand_state *timer_rand_state;
164 unsigned int *kstat_irqs;
165# ifdef CONFIG_INTR_REMAP
166 struct irq_2_iommu *irq_2_iommu;
167# endif
168#endif
157 irq_flow_handler_t handle_irq; 169 irq_flow_handler_t handle_irq;
158 struct irq_chip *chip; 170 struct irq_chip *chip;
159 struct msi_desc *msi_desc; 171 struct msi_desc *msi_desc;
@@ -165,8 +177,8 @@ struct irq_desc {
165 unsigned int depth; /* nested irq disables */ 177 unsigned int depth; /* nested irq disables */
166 unsigned int wake_depth; /* nested wake enables */ 178 unsigned int wake_depth; /* nested wake enables */
167 unsigned int irq_count; /* For detecting broken IRQs */ 179 unsigned int irq_count; /* For detecting broken IRQs */
168 unsigned int irqs_unhandled;
169 unsigned long last_unhandled; /* Aging timer for unhandled count */ 180 unsigned long last_unhandled; /* Aging timer for unhandled count */
181 unsigned int irqs_unhandled;
170 spinlock_t lock; 182 spinlock_t lock;
171#ifdef CONFIG_SMP 183#ifdef CONFIG_SMP
172 cpumask_t affinity; 184 cpumask_t affinity;
@@ -181,12 +193,51 @@ struct irq_desc {
181 const char *name; 193 const char *name;
182} ____cacheline_internodealigned_in_smp; 194} ____cacheline_internodealigned_in_smp;
183 195
196extern void early_irq_init(void);
197extern void arch_early_irq_init(void);
198extern void arch_init_chip_data(struct irq_desc *desc, int cpu);
199extern void arch_init_copy_chip_data(struct irq_desc *old_desc,
200 struct irq_desc *desc, int cpu);
201extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc);
184 202
203#ifndef CONFIG_SPARSE_IRQ
185extern struct irq_desc irq_desc[NR_IRQS]; 204extern struct irq_desc irq_desc[NR_IRQS];
186 205
187static inline struct irq_desc *irq_to_desc(unsigned int irq) 206static inline struct irq_desc *irq_to_desc(unsigned int irq)
188{ 207{
189 return (irq < nr_irqs) ? irq_desc + irq : NULL; 208 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
209}
210static inline struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
211{
212 return irq_to_desc(irq);
213}
214
215#else
216
217extern struct irq_desc *irq_to_desc(unsigned int irq);
218extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu);
219extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu);
220
221# define for_each_irq_desc(irq, desc) \
222 for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; irq++, desc = irq_to_desc(irq))
223# define for_each_irq_desc_reverse(irq, desc) \
224 for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; irq--, desc = irq_to_desc(irq))
225
226#define kstat_irqs_this_cpu(DESC) \
227 ((DESC)->kstat_irqs[smp_processor_id()])
228#define kstat_incr_irqs_this_cpu(irqno, DESC) \
229 ((DESC)->kstat_irqs[smp_processor_id()]++)
230
231#endif
232
233static inline struct irq_desc *
234irq_remap_to_desc(unsigned int irq, struct irq_desc *desc)
235{
236#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
237 return irq_to_desc(irq);
238#else
239 return desc;
240#endif
190} 241}
191 242
192/* 243/*
@@ -380,6 +431,11 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
380#define get_irq_data(irq) (irq_to_desc(irq)->handler_data) 431#define get_irq_data(irq) (irq_to_desc(irq)->handler_data)
381#define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc) 432#define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc)
382 433
434#define get_irq_desc_chip(desc) ((desc)->chip)
435#define get_irq_desc_chip_data(desc) ((desc)->chip_data)
436#define get_irq_desc_data(desc) ((desc)->handler_data)
437#define get_irq_desc_msi(desc) ((desc)->msi_desc)
438
383#endif /* CONFIG_GENERIC_HARDIRQS */ 439#endif /* CONFIG_GENERIC_HARDIRQS */
384 440
385#endif /* !CONFIG_S390 */ 441#endif /* !CONFIG_S390 */
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
index 452c280c8115..95d2b74641f5 100644
--- a/include/linux/irqnr.h
+++ b/include/linux/irqnr.h
@@ -1,24 +1,38 @@
1#ifndef _LINUX_IRQNR_H 1#ifndef _LINUX_IRQNR_H
2#define _LINUX_IRQNR_H 2#define _LINUX_IRQNR_H
3 3
4/*
5 * Generic irq_desc iterators:
6 */
7#ifdef __KERNEL__
8
4#ifndef CONFIG_GENERIC_HARDIRQS 9#ifndef CONFIG_GENERIC_HARDIRQS
5#include <asm/irq.h> 10#include <asm/irq.h>
6# define nr_irqs NR_IRQS 11# define nr_irqs NR_IRQS
7 12
8# define for_each_irq_desc(irq, desc) \ 13# define for_each_irq_desc(irq, desc) \
9 for (irq = 0; irq < nr_irqs; irq++) 14 for (irq = 0; irq < nr_irqs; irq++)
15
16# define for_each_irq_desc_reverse(irq, desc) \
17 for (irq = nr_irqs - 1; irq >= 0; irq--)
10#else 18#else
19
11extern int nr_irqs; 20extern int nr_irqs;
12 21
22#ifndef CONFIG_SPARSE_IRQ
23
24struct irq_desc;
13# define for_each_irq_desc(irq, desc) \ 25# define for_each_irq_desc(irq, desc) \
14 for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc++) 26 for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc++)
15 27# define for_each_irq_desc_reverse(irq, desc) \
16# define for_each_irq_desc_reverse(irq, desc) \ 28 for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1); \
17 for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1); \ 29 irq >= 0; irq--, desc--)
18 irq >= 0; irq--, desc--) 30#endif
19#endif 31#endif
20 32
21#define for_each_irq_nr(irq) \ 33#define for_each_irq_nr(irq) \
22 for (irq = 0; irq < nr_irqs; irq++) 34 for (irq = 0; irq < nr_irqs; irq++)
35
36#endif /* __KERNEL__ */
23 37
24#endif 38#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 6002ae76785c..ca9ff6411dfa 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -141,6 +141,15 @@ extern int _cond_resched(void);
141 (__x < 0) ? -__x : __x; \ 141 (__x < 0) ? -__x : __x; \
142 }) 142 })
143 143
144#ifdef CONFIG_PROVE_LOCKING
145void might_fault(void);
146#else
147static inline void might_fault(void)
148{
149 might_sleep();
150}
151#endif
152
144extern struct atomic_notifier_head panic_notifier_list; 153extern struct atomic_notifier_head panic_notifier_list;
145extern long (*panic_blink)(long time); 154extern long (*panic_blink)(long time);
146NORET_TYPE void panic(const char * fmt, ...) 155NORET_TYPE void panic(const char * fmt, ...)
@@ -188,6 +197,8 @@ extern unsigned long long memparse(const char *ptr, char **retptr);
188extern int core_kernel_text(unsigned long addr); 197extern int core_kernel_text(unsigned long addr);
189extern int __kernel_text_address(unsigned long addr); 198extern int __kernel_text_address(unsigned long addr);
190extern int kernel_text_address(unsigned long addr); 199extern int kernel_text_address(unsigned long addr);
200extern int func_ptr_is_kernel_text(void *ptr);
201
191struct pid; 202struct pid;
192extern struct pid *session_of_pgrp(struct pid *pgrp); 203extern struct pid *session_of_pgrp(struct pid *pgrp);
193 204
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 4a145caeee07..4ee4b3d2316f 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -28,7 +28,9 @@ struct cpu_usage_stat {
28 28
29struct kernel_stat { 29struct kernel_stat {
30 struct cpu_usage_stat cpustat; 30 struct cpu_usage_stat cpustat;
31 unsigned int irqs[NR_IRQS]; 31#ifndef CONFIG_SPARSE_IRQ
32 unsigned int irqs[NR_IRQS];
33#endif
32}; 34};
33 35
34DECLARE_PER_CPU(struct kernel_stat, kstat); 36DECLARE_PER_CPU(struct kernel_stat, kstat);
@@ -39,6 +41,10 @@ DECLARE_PER_CPU(struct kernel_stat, kstat);
39 41
40extern unsigned long long nr_context_switches(void); 42extern unsigned long long nr_context_switches(void);
41 43
44#ifndef CONFIG_SPARSE_IRQ
45#define kstat_irqs_this_cpu(irq) \
46 (kstat_this_cpu.irqs[irq])
47
42struct irq_desc; 48struct irq_desc;
43 49
44static inline void kstat_incr_irqs_this_cpu(unsigned int irq, 50static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
@@ -46,11 +52,17 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
46{ 52{
47 kstat_this_cpu.irqs[irq]++; 53 kstat_this_cpu.irqs[irq]++;
48} 54}
55#endif
56
49 57
58#ifndef CONFIG_SPARSE_IRQ
50static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 59static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
51{ 60{
52 return kstat_cpu(cpu).irqs[irq]; 61 return kstat_cpu(cpu).irqs[irq];
53} 62}
63#else
64extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
65#endif
54 66
55/* 67/*
56 * Number of interrupts per specific IRQ source, since bootup 68 * Number of interrupts per specific IRQ source, since bootup
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 29aec6e10020..23bf02fb124f 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -73,6 +73,8 @@ struct lock_class_key {
73 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; 73 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
74}; 74};
75 75
76#define LOCKSTAT_POINTS 4
77
76/* 78/*
77 * The lock-class itself: 79 * The lock-class itself:
78 */ 80 */
@@ -119,7 +121,8 @@ struct lock_class {
119 int name_version; 121 int name_version;
120 122
121#ifdef CONFIG_LOCK_STAT 123#ifdef CONFIG_LOCK_STAT
122 unsigned long contention_point[4]; 124 unsigned long contention_point[LOCKSTAT_POINTS];
125 unsigned long contending_point[LOCKSTAT_POINTS];
123#endif 126#endif
124}; 127};
125 128
@@ -144,6 +147,7 @@ enum bounce_type {
144 147
145struct lock_class_stats { 148struct lock_class_stats {
146 unsigned long contention_point[4]; 149 unsigned long contention_point[4];
150 unsigned long contending_point[4];
147 struct lock_time read_waittime; 151 struct lock_time read_waittime;
148 struct lock_time write_waittime; 152 struct lock_time write_waittime;
149 struct lock_time read_holdtime; 153 struct lock_time read_holdtime;
@@ -165,6 +169,7 @@ struct lockdep_map {
165 const char *name; 169 const char *name;
166#ifdef CONFIG_LOCK_STAT 170#ifdef CONFIG_LOCK_STAT
167 int cpu; 171 int cpu;
172 unsigned long ip;
168#endif 173#endif
169}; 174};
170 175
@@ -309,8 +314,15 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
309extern void lock_release(struct lockdep_map *lock, int nested, 314extern void lock_release(struct lockdep_map *lock, int nested,
310 unsigned long ip); 315 unsigned long ip);
311 316
312extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass, 317extern void lock_set_class(struct lockdep_map *lock, const char *name,
313 unsigned long ip); 318 struct lock_class_key *key, unsigned int subclass,
319 unsigned long ip);
320
321static inline void lock_set_subclass(struct lockdep_map *lock,
322 unsigned int subclass, unsigned long ip)
323{
324 lock_set_class(lock, lock->name, lock->key, subclass, ip);
325}
314 326
315# define INIT_LOCKDEP .lockdep_recursion = 0, 327# define INIT_LOCKDEP .lockdep_recursion = 0,
316 328
@@ -328,6 +340,7 @@ static inline void lockdep_on(void)
328 340
329# define lock_acquire(l, s, t, r, c, n, i) do { } while (0) 341# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
330# define lock_release(l, n, i) do { } while (0) 342# define lock_release(l, n, i) do { } while (0)
343# define lock_set_class(l, n, k, s, i) do { } while (0)
331# define lock_set_subclass(l, s, i) do { } while (0) 344# define lock_set_subclass(l, s, i) do { } while (0)
332# define lockdep_init() do { } while (0) 345# define lockdep_init() do { } while (0)
333# define lockdep_info() do { } while (0) 346# define lockdep_info() do { } while (0)
@@ -356,7 +369,7 @@ struct lock_class_key { };
356#ifdef CONFIG_LOCK_STAT 369#ifdef CONFIG_LOCK_STAT
357 370
358extern void lock_contended(struct lockdep_map *lock, unsigned long ip); 371extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
359extern void lock_acquired(struct lockdep_map *lock); 372extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
360 373
361#define LOCK_CONTENDED(_lock, try, lock) \ 374#define LOCK_CONTENDED(_lock, try, lock) \
362do { \ 375do { \
@@ -364,20 +377,20 @@ do { \
364 lock_contended(&(_lock)->dep_map, _RET_IP_); \ 377 lock_contended(&(_lock)->dep_map, _RET_IP_); \
365 lock(_lock); \ 378 lock(_lock); \
366 } \ 379 } \
367 lock_acquired(&(_lock)->dep_map); \ 380 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
368} while (0) 381} while (0)
369 382
370#else /* CONFIG_LOCK_STAT */ 383#else /* CONFIG_LOCK_STAT */
371 384
372#define lock_contended(lockdep_map, ip) do {} while (0) 385#define lock_contended(lockdep_map, ip) do {} while (0)
373#define lock_acquired(lockdep_map) do {} while (0) 386#define lock_acquired(lockdep_map, ip) do {} while (0)
374 387
375#define LOCK_CONTENDED(_lock, try, lock) \ 388#define LOCK_CONTENDED(_lock, try, lock) \
376 lock(_lock) 389 lock(_lock)
377 390
378#endif /* CONFIG_LOCK_STAT */ 391#endif /* CONFIG_LOCK_STAT */
379 392
380#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) 393#ifdef CONFIG_GENERIC_HARDIRQS
381extern void early_init_irq_lock_class(void); 394extern void early_init_irq_lock_class(void);
382#else 395#else
383static inline void early_init_irq_lock_class(void) 396static inline void early_init_irq_lock_class(void)
@@ -481,4 +494,22 @@ static inline void print_irqtrace_events(struct task_struct *curr)
481# define lock_map_release(l) do { } while (0) 494# define lock_map_release(l) do { } while (0)
482#endif 495#endif
483 496
497#ifdef CONFIG_PROVE_LOCKING
498# define might_lock(lock) \
499do { \
500 typecheck(struct lockdep_map *, &(lock)->dep_map); \
501 lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \
502 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
503} while (0)
504# define might_lock_read(lock) \
505do { \
506 typecheck(struct lockdep_map *, &(lock)->dep_map); \
507 lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \
508 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
509} while (0)
510#else
511# define might_lock(lock) do { } while (0)
512# define might_lock_read(lock) do { } while (0)
513#endif
514
484#endif /* __LINUX_LOCKDEP_H */ 515#endif /* __LINUX_LOCKDEP_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index fe825471d5aa..9cfc9b627fdd 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -232,8 +232,9 @@ struct mm_struct {
232 struct core_state *core_state; /* coredumping support */ 232 struct core_state *core_state; /* coredumping support */
233 233
234 /* aio bits */ 234 /* aio bits */
235 rwlock_t ioctx_list_lock; /* aio lock */ 235 spinlock_t ioctx_lock;
236 struct kioctx *ioctx_list; 236 struct hlist_head ioctx_list;
237
237#ifdef CONFIG_MM_OWNER 238#ifdef CONFIG_MM_OWNER
238 /* 239 /*
239 * "owner" points to a task that is regarded as the canonical 240 * "owner" points to a task that is regarded as the canonical
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 8f2939227207..d2b8a1e8ca11 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -10,8 +10,11 @@ struct msi_msg {
10}; 10};
11 11
12/* Helper functions */ 12/* Helper functions */
13struct irq_desc;
13extern void mask_msi_irq(unsigned int irq); 14extern void mask_msi_irq(unsigned int irq);
14extern void unmask_msi_irq(unsigned int irq); 15extern void unmask_msi_irq(unsigned int irq);
16extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
17extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
15extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); 18extern void read_msi_msg(unsigned int irq, struct msi_msg *msg);
16extern void write_msi_msg(unsigned int irq, struct msi_msg *msg); 19extern void write_msi_msg(unsigned int irq, struct msi_msg *msg);
17 20
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index bc6da10ceee0..7a0e5c4f8072 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -144,6 +144,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
144/* 144/*
145 * NOTE: mutex_trylock() follows the spin_trylock() convention, 145 * NOTE: mutex_trylock() follows the spin_trylock() convention,
146 * not the down_trylock() convention! 146 * not the down_trylock() convention!
147 *
148 * Returns 1 if the mutex has been acquired successfully, and 0 on contention.
147 */ 149 */
148extern int mutex_trylock(struct mutex *lock); 150extern int mutex_trylock(struct mutex *lock);
149extern void mutex_unlock(struct mutex *lock); 151extern void mutex_unlock(struct mutex *lock);
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index a7c721355549..4f71bf4e628c 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -45,7 +45,11 @@ struct k_itimer {
45 int it_requeue_pending; /* waiting to requeue this timer */ 45 int it_requeue_pending; /* waiting to requeue this timer */
46#define REQUEUE_PENDING 1 46#define REQUEUE_PENDING 1
47 int it_sigev_notify; /* notify word of sigevent struct */ 47 int it_sigev_notify; /* notify word of sigevent struct */
48 struct task_struct *it_process; /* process to send signal to */ 48 struct signal_struct *it_signal;
49 union {
50 struct pid *it_pid; /* pid of process to send signal to */
51 struct task_struct *it_process; /* for clock_nanosleep */
52 };
49 struct sigqueue *sigq; /* signal queue entry. */ 53 struct sigqueue *sigq; /* signal queue entry. */
50 union { 54 union {
51 struct { 55 struct {
diff --git a/include/linux/random.h b/include/linux/random.h
index 36f125c0c603..adbf3bd3c6b3 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -8,6 +8,7 @@
8#define _LINUX_RANDOM_H 8#define _LINUX_RANDOM_H
9 9
10#include <linux/ioctl.h> 10#include <linux/ioctl.h>
11#include <linux/irqnr.h>
11 12
12/* ioctl()'s for the random number generator */ 13/* ioctl()'s for the random number generator */
13 14
@@ -44,6 +45,56 @@ struct rand_pool_info {
44 45
45extern void rand_initialize_irq(int irq); 46extern void rand_initialize_irq(int irq);
46 47
48struct timer_rand_state;
49#ifndef CONFIG_SPARSE_IRQ
50
51extern struct timer_rand_state *irq_timer_state[];
52
53static inline struct timer_rand_state *get_timer_rand_state(unsigned int irq)
54{
55 if (irq >= nr_irqs)
56 return NULL;
57
58 return irq_timer_state[irq];
59}
60
61static inline void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state)
62{
63 if (irq >= nr_irqs)
64 return;
65
66 irq_timer_state[irq] = state;
67}
68
69#else
70
71#include <linux/irq.h>
72static inline struct timer_rand_state *get_timer_rand_state(unsigned int irq)
73{
74 struct irq_desc *desc;
75
76 desc = irq_to_desc(irq);
77
78 if (!desc)
79 return NULL;
80
81 return desc->timer_rand_state;
82}
83
84static inline void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state)
85{
86 struct irq_desc *desc;
87
88 desc = irq_to_desc(irq);
89
90 if (!desc)
91 return;
92
93 desc->timer_rand_state = state;
94}
95#endif
96
97
47extern void add_input_randomness(unsigned int type, unsigned int code, 98extern void add_input_randomness(unsigned int type, unsigned int code,
48 unsigned int value); 99 unsigned int value);
49extern void add_interrupt_randomness(int irq); 100extern void add_interrupt_randomness(int irq);
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
index 5f89b62e6983..301dda829e37 100644
--- a/include/linux/rcuclassic.h
+++ b/include/linux/rcuclassic.h
@@ -41,7 +41,7 @@
41#include <linux/seqlock.h> 41#include <linux/seqlock.h>
42 42
43#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 43#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
44#define RCU_SECONDS_TILL_STALL_CHECK ( 3 * HZ) /* for rcp->jiffies_stall */ 44#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rcp->jiffies_stall */
45#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */ 45#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */
46#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 46#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
47 47
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 895dc9c1088c..1168fbcea8d4 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -52,11 +52,15 @@ struct rcu_head {
52 void (*func)(struct rcu_head *head); 52 void (*func)(struct rcu_head *head);
53}; 53};
54 54
55#ifdef CONFIG_CLASSIC_RCU 55#if defined(CONFIG_CLASSIC_RCU)
56#include <linux/rcuclassic.h> 56#include <linux/rcuclassic.h>
57#else /* #ifdef CONFIG_CLASSIC_RCU */ 57#elif defined(CONFIG_TREE_RCU)
58#include <linux/rcutree.h>
59#elif defined(CONFIG_PREEMPT_RCU)
58#include <linux/rcupreempt.h> 60#include <linux/rcupreempt.h>
59#endif /* #else #ifdef CONFIG_CLASSIC_RCU */ 61#else
62#error "Unknown RCU implementation specified to kernel configuration"
63#endif /* #else #if defined(CONFIG_CLASSIC_RCU) */
60 64
61#define RCU_HEAD_INIT { .next = NULL, .func = NULL } 65#define RCU_HEAD_INIT { .next = NULL, .func = NULL }
62#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT 66#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
new file mode 100644
index 000000000000..d4368b7975c3
--- /dev/null
+++ b/include/linux/rcutree.h
@@ -0,0 +1,329 @@
1/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Author: Dipankar Sarma <dipankar@in.ibm.com>
21 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm
22 *
23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25 *
26 * For detailed explanation of Read-Copy Update mechanism see -
27 * Documentation/RCU
28 */
29
30#ifndef __LINUX_RCUTREE_H
31#define __LINUX_RCUTREE_H
32
33#include <linux/cache.h>
34#include <linux/spinlock.h>
35#include <linux/threads.h>
36#include <linux/percpu.h>
37#include <linux/cpumask.h>
38#include <linux/seqlock.h>
39
40/*
41 * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT.
42 * In theory, it should be possible to add more levels straightforwardly.
43 * In practice, this has not been tested, so there is probably some
44 * bug somewhere.
45 */
46#define MAX_RCU_LVLS 3
47#define RCU_FANOUT (CONFIG_RCU_FANOUT)
48#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT)
49#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT)
50
51#if NR_CPUS <= RCU_FANOUT
52# define NUM_RCU_LVLS 1
53# define NUM_RCU_LVL_0 1
54# define NUM_RCU_LVL_1 (NR_CPUS)
55# define NUM_RCU_LVL_2 0
56# define NUM_RCU_LVL_3 0
57#elif NR_CPUS <= RCU_FANOUT_SQ
58# define NUM_RCU_LVLS 2
59# define NUM_RCU_LVL_0 1
60# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT)
61# define NUM_RCU_LVL_2 (NR_CPUS)
62# define NUM_RCU_LVL_3 0
63#elif NR_CPUS <= RCU_FANOUT_CUBE
64# define NUM_RCU_LVLS 3
65# define NUM_RCU_LVL_0 1
66# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ)
67# define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT))
68# define NUM_RCU_LVL_3 NR_CPUS
69#else
70# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
71#endif /* #if (NR_CPUS) <= RCU_FANOUT */
72
73#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
74#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
75
76/*
77 * Dynticks per-CPU state.
78 */
79struct rcu_dynticks {
80 int dynticks_nesting; /* Track nesting level, sort of. */
81 int dynticks; /* Even value for dynticks-idle, else odd. */
82 int dynticks_nmi; /* Even value for either dynticks-idle or */
83 /* not in nmi handler, else odd. So this */
84 /* remains even for nmi from irq handler. */
85};
86
87/*
88 * Definition for node within the RCU grace-period-detection hierarchy.
89 */
90struct rcu_node {
91 spinlock_t lock;
92 unsigned long qsmask; /* CPUs or groups that need to switch in */
93 /* order for current grace period to proceed.*/
94 unsigned long qsmaskinit;
95 /* Per-GP initialization for qsmask. */
96 unsigned long grpmask; /* Mask to apply to parent qsmask. */
97 int grplo; /* lowest-numbered CPU or group here. */
98 int grphi; /* highest-numbered CPU or group here. */
99 u8 grpnum; /* CPU/group number for next level up. */
100 u8 level; /* root is at level 0. */
101 struct rcu_node *parent;
102} ____cacheline_internodealigned_in_smp;
103
104/* Index values for nxttail array in struct rcu_data. */
105#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
106#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
107#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
108#define RCU_NEXT_TAIL 3
109#define RCU_NEXT_SIZE 4
110
111/* Per-CPU data for read-copy update. */
112struct rcu_data {
113 /* 1) quiescent-state and grace-period handling : */
114 long completed; /* Track rsp->completed gp number */
115 /* in order to detect GP end. */
116 long gpnum; /* Highest gp number that this CPU */
117 /* is aware of having started. */
118 long passed_quiesc_completed;
119 /* Value of completed at time of qs. */
120 bool passed_quiesc; /* User-mode/idle loop etc. */
121 bool qs_pending; /* Core waits for quiesc state. */
122 bool beenonline; /* CPU online at least once. */
123 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
124 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
125
126 /* 2) batch handling */
127 /*
128 * If nxtlist is not NULL, it is partitioned as follows.
129 * Any of the partitions might be empty, in which case the
130 * pointer to that partition will be equal to the pointer for
131 * the following partition. When the list is empty, all of
132 * the nxttail elements point to nxtlist, which is NULL.
133 *
134 * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]):
135 * Entries that might have arrived after current GP ended
136 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
137 * Entries known to have arrived before current GP ended
138 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
139 * Entries that batch # <= ->completed - 1: waiting for current GP
140 * [nxtlist, *nxttail[RCU_DONE_TAIL]):
141 * Entries that batch # <= ->completed
142 * The grace period for these entries has completed, and
143 * the other grace-period-completed entries may be moved
144 * here temporarily in rcu_process_callbacks().
145 */
146 struct rcu_head *nxtlist;
147 struct rcu_head **nxttail[RCU_NEXT_SIZE];
148 long qlen; /* # of queued callbacks */
149 long blimit; /* Upper limit on a processed batch */
150
151#ifdef CONFIG_NO_HZ
152 /* 3) dynticks interface. */
153 struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
154 int dynticks_snap; /* Per-GP tracking for dynticks. */
155 int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */
156#endif /* #ifdef CONFIG_NO_HZ */
157
158 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
159#ifdef CONFIG_NO_HZ
160 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
161#endif /* #ifdef CONFIG_NO_HZ */
162 unsigned long offline_fqs; /* Kicked due to being offline. */
163 unsigned long resched_ipi; /* Sent a resched IPI. */
164
165 /* 5) state to allow this CPU to force_quiescent_state on others */
166 long n_rcu_pending; /* rcu_pending() calls since boot. */
167 long n_rcu_pending_force_qs; /* when to force quiescent states. */
168
169 int cpu;
170};
171
172/* Values for signaled field in struct rcu_state. */
173#define RCU_GP_INIT 0 /* Grace period being initialized. */
174#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */
175#define RCU_FORCE_QS 2 /* Need to force quiescent state. */
176#ifdef CONFIG_NO_HZ
177#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
178#else /* #ifdef CONFIG_NO_HZ */
179#define RCU_SIGNAL_INIT RCU_FORCE_QS
180#endif /* #else #ifdef CONFIG_NO_HZ */
181
182#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
183#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
184#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */
185#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */
186#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */
187 /* to take at least one */
188 /* scheduling clock irq */
189 /* before ratting on them. */
190
191#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
192
193/*
194 * RCU global state, including node hierarchy. This hierarchy is
195 * represented in "heap" form in a dense array. The root (first level)
196 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
197 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
198 * and the third level in ->node[m+1] and following (->node[m+1] referenced
199 * by ->level[2]). The number of levels is determined by the number of
200 * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
201 * consisting of a single rcu_node.
202 */
203struct rcu_state {
204 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
205 struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */
206 u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
207 u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */
208 struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */
209
210 /* The following fields are guarded by the root rcu_node's lock. */
211
212 u8 signaled ____cacheline_internodealigned_in_smp;
213 /* Force QS state. */
214 long gpnum; /* Current gp number. */
215 long completed; /* # of last completed gp. */
216 spinlock_t onofflock; /* exclude on/offline and */
217 /* starting new GP. */
218 spinlock_t fqslock; /* Only one task forcing */
219 /* quiescent states. */
220 unsigned long jiffies_force_qs; /* Time at which to invoke */
221 /* force_quiescent_state(). */
222 unsigned long n_force_qs; /* Number of calls to */
223 /* force_quiescent_state(). */
224 unsigned long n_force_qs_lh; /* ~Number of calls leaving */
225 /* due to lock unavailable. */
226 unsigned long n_force_qs_ngp; /* Number of calls leaving */
227 /* due to no GP active. */
228#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
229 unsigned long gp_start; /* Time at which GP started, */
230 /* but in jiffies. */
231 unsigned long jiffies_stall; /* Time at which to check */
232 /* for CPU stalls. */
233#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
234#ifdef CONFIG_NO_HZ
235 long dynticks_completed; /* Value of completed @ snap. */
236#endif /* #ifdef CONFIG_NO_HZ */
237};
238
239extern struct rcu_state rcu_state;
240DECLARE_PER_CPU(struct rcu_data, rcu_data);
241
242extern struct rcu_state rcu_bh_state;
243DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
244
245/*
246 * Increment the quiescent state counter.
247 * The counter is a bit degenerated: We do not need to know
248 * how many quiescent states passed, just if there was at least
249 * one since the start of the grace period. Thus just a flag.
250 */
251static inline void rcu_qsctr_inc(int cpu)
252{
253 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
254 rdp->passed_quiesc = 1;
255 rdp->passed_quiesc_completed = rdp->completed;
256}
257static inline void rcu_bh_qsctr_inc(int cpu)
258{
259 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
260 rdp->passed_quiesc = 1;
261 rdp->passed_quiesc_completed = rdp->completed;
262}
263
264extern int rcu_pending(int cpu);
265extern int rcu_needs_cpu(int cpu);
266
267#ifdef CONFIG_DEBUG_LOCK_ALLOC
268extern struct lockdep_map rcu_lock_map;
269# define rcu_read_acquire() \
270 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
271# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
272#else
273# define rcu_read_acquire() do { } while (0)
274# define rcu_read_release() do { } while (0)
275#endif
276
277static inline void __rcu_read_lock(void)
278{
279 preempt_disable();
280 __acquire(RCU);
281 rcu_read_acquire();
282}
283static inline void __rcu_read_unlock(void)
284{
285 rcu_read_release();
286 __release(RCU);
287 preempt_enable();
288}
289static inline void __rcu_read_lock_bh(void)
290{
291 local_bh_disable();
292 __acquire(RCU_BH);
293 rcu_read_acquire();
294}
295static inline void __rcu_read_unlock_bh(void)
296{
297 rcu_read_release();
298 __release(RCU_BH);
299 local_bh_enable();
300}
301
302#define __synchronize_sched() synchronize_rcu()
303
304#define call_rcu_sched(head, func) call_rcu(head, func)
305
306static inline void rcu_init_sched(void)
307{
308}
309
310extern void __rcu_init(void);
311extern void rcu_check_callbacks(int cpu, int user);
312extern void rcu_restart_cpu(int cpu);
313
314extern long rcu_batches_completed(void);
315extern long rcu_batches_completed_bh(void);
316
317#ifdef CONFIG_NO_HZ
318void rcu_enter_nohz(void);
319void rcu_exit_nohz(void);
320#else /* CONFIG_NO_HZ */
321static inline void rcu_enter_nohz(void)
322{
323}
324static inline void rcu_exit_nohz(void)
325{
326}
327#endif /* CONFIG_NO_HZ */
328
329#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index b18ec5533e8c..325af1de0351 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -7,9 +7,31 @@ struct device;
7struct dma_attrs; 7struct dma_attrs;
8struct scatterlist; 8struct scatterlist;
9 9
10/*
11 * Maximum allowable number of contiguous slabs to map,
12 * must be a power of 2. What is the appropriate value ?
13 * The complexity of {map,unmap}_single is linearly dependent on this value.
14 */
15#define IO_TLB_SEGSIZE 128
16
17
18/*
19 * log of the size of each IO TLB slab. The number of slabs is command line
20 * controllable.
21 */
22#define IO_TLB_SHIFT 11
23
10extern void 24extern void
11swiotlb_init(void); 25swiotlb_init(void);
12 26
27extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs);
28extern void *swiotlb_alloc(unsigned order, unsigned long nslabs);
29
30extern dma_addr_t swiotlb_phys_to_bus(phys_addr_t address);
31extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address);
32
33extern int swiotlb_arch_range_needs_mapping(void *ptr, size_t size);
34
13extern void 35extern void
14*swiotlb_alloc_coherent(struct device *hwdev, size_t size, 36*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
15 dma_addr_t *dma_handle, gfp_t flags); 37 dma_addr_t *dma_handle, gfp_t flags);
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 9007313b5b71..998a55d80acf 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -53,47 +53,11 @@
53#ifndef _LINUX_TIMEX_H 53#ifndef _LINUX_TIMEX_H
54#define _LINUX_TIMEX_H 54#define _LINUX_TIMEX_H
55 55
56#include <linux/compiler.h>
57#include <linux/time.h> 56#include <linux/time.h>
58 57
59#include <asm/param.h>
60
61#define NTP_API 4 /* NTP API version */ 58#define NTP_API 4 /* NTP API version */
62 59
63/* 60/*
64 * SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
65 * for a slightly underdamped convergence characteristic. SHIFT_KH
66 * establishes the damping of the FLL and is chosen by wisdom and black
67 * art.
68 *
69 * MAXTC establishes the maximum time constant of the PLL. With the
70 * SHIFT_KG and SHIFT_KF values given and a time constant range from
71 * zero to MAXTC, the PLL will converge in 15 minutes to 16 hours,
72 * respectively.
73 */
74#define SHIFT_PLL 4 /* PLL frequency factor (shift) */
75#define SHIFT_FLL 2 /* FLL frequency factor (shift) */
76#define MAXTC 10 /* maximum time constant (shift) */
77
78/*
79 * SHIFT_USEC defines the scaling (shift) of the time_freq and
80 * time_tolerance variables, which represent the current frequency
81 * offset and maximum frequency tolerance.
82 */
83#define SHIFT_USEC 16 /* frequency offset scale (shift) */
84#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
85#define PPM_SCALE_INV_SHIFT 19
86#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
87 PPM_SCALE + 1)
88
89#define MAXPHASE 500000000l /* max phase error (ns) */
90#define MAXFREQ 500000 /* max frequency error (ns/s) */
91#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT)
92#define MINSEC 256 /* min interval between updates (s) */
93#define MAXSEC 2048 /* max interval between updates (s) */
94#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */
95
96/*
97 * syscall interface - used (mainly by NTP daemon) 61 * syscall interface - used (mainly by NTP daemon)
98 * to discipline kernel clock oscillator 62 * to discipline kernel clock oscillator
99 */ 63 */
@@ -199,9 +163,46 @@ struct timex {
199#define TIME_BAD TIME_ERROR /* bw compat */ 163#define TIME_BAD TIME_ERROR /* bw compat */
200 164
201#ifdef __KERNEL__ 165#ifdef __KERNEL__
166#include <linux/compiler.h>
167#include <linux/types.h>
168#include <linux/param.h>
169
202#include <asm/timex.h> 170#include <asm/timex.h>
203 171
204/* 172/*
173 * SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
174 * for a slightly underdamped convergence characteristic. SHIFT_KH
175 * establishes the damping of the FLL and is chosen by wisdom and black
176 * art.
177 *
178 * MAXTC establishes the maximum time constant of the PLL. With the
179 * SHIFT_KG and SHIFT_KF values given and a time constant range from
180 * zero to MAXTC, the PLL will converge in 15 minutes to 16 hours,
181 * respectively.
182 */
183#define SHIFT_PLL 4 /* PLL frequency factor (shift) */
184#define SHIFT_FLL 2 /* FLL frequency factor (shift) */
185#define MAXTC 10 /* maximum time constant (shift) */
186
187/*
188 * SHIFT_USEC defines the scaling (shift) of the time_freq and
189 * time_tolerance variables, which represent the current frequency
190 * offset and maximum frequency tolerance.
191 */
192#define SHIFT_USEC 16 /* frequency offset scale (shift) */
193#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
194#define PPM_SCALE_INV_SHIFT 19
195#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
196 PPM_SCALE + 1)
197
198#define MAXPHASE 500000000l /* max phase error (ns) */
199#define MAXFREQ 500000 /* max frequency error (ns/s) */
200#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT)
201#define MINSEC 256 /* min interval between updates (s) */
202#define MAXSEC 2048 /* max interval between updates (s) */
203#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */
204
205/*
205 * kernel variables 206 * kernel variables
206 * Note: maximum error = NTP synch distance = dispersion + delay / 2; 207 * Note: maximum error = NTP synch distance = dispersion + delay / 2;
207 * estimated error = NTP dispersion. 208 * estimated error = NTP dispersion.
diff --git a/include/linux/types.h b/include/linux/types.h
index 1d98330b1f2c..121f349cb7ec 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -135,19 +135,14 @@ typedef __s64 int64_t;
135 * 135 *
136 * Linux always considers sectors to be 512 bytes long independently 136 * Linux always considers sectors to be 512 bytes long independently
137 * of the devices real block size. 137 * of the devices real block size.
138 *
139 * blkcnt_t is the type of the inode's block count.
138 */ 140 */
139#ifdef CONFIG_LBD 141#ifdef CONFIG_LBD
140typedef u64 sector_t; 142typedef u64 sector_t;
141#else
142typedef unsigned long sector_t;
143#endif
144
145/*
146 * The type of the inode's block count.
147 */
148#ifdef CONFIG_LSF
149typedef u64 blkcnt_t; 143typedef u64 blkcnt_t;
150#else 144#else
145typedef unsigned long sector_t;
151typedef unsigned long blkcnt_t; 146typedef unsigned long blkcnt_t;
152#endif 147#endif
153 148
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index fec6decfb983..6b58367d145e 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -78,7 +78,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
78 \ 78 \
79 set_fs(KERNEL_DS); \ 79 set_fs(KERNEL_DS); \
80 pagefault_disable(); \ 80 pagefault_disable(); \
81 ret = __get_user(retval, (__force typeof(retval) __user *)(addr)); \ 81 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
82 pagefault_enable(); \ 82 pagefault_enable(); \
83 set_fs(old_fs); \ 83 set_fs(old_fs); \
84 ret; \ 84 ret; \