aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bio.h13
-rw-r--r--include/linux/blk-mq.h2
-rw-r--r--include/linux/blkdev.h3
-rw-r--r--include/linux/cpufreq.h4
-rw-r--r--include/linux/elevator.h3
-rw-r--r--include/linux/fs.h6
-rw-r--r--include/linux/kernfs.h2
-rw-r--r--include/linux/mlx4/device.h4
-rw-r--r--include/linux/mutex.h4
-rw-r--r--include/linux/nmi.h12
-rw-r--r--include/linux/of_mdio.h8
-rw-r--r--include/linux/osq_lock.h27
-rw-r--r--include/linux/page-flags.h3
-rw-r--r--include/linux/percpu-defs.h4
-rw-r--r--include/linux/phy.h9
-rw-r--r--include/linux/profile.h1
-rw-r--r--include/linux/ptrace.h3
-rw-r--r--include/linux/rcupdate.h46
-rw-r--r--include/linux/regulator/consumer.h5
-rw-r--r--include/linux/rwsem-spinlock.h8
-rw-r--r--include/linux/rwsem.h34
-rw-r--r--include/linux/sched.h8
-rw-r--r--include/linux/socket.h4
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/uio.h19
-rw-r--r--include/linux/usb_usual.h4
26 files changed, 144 insertions, 94 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 5a645769f020..d2633ee099d9 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -186,6 +186,15 @@ static inline void *bio_data(struct bio *bio)
186#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ 186#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
187 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) 187 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
188 188
189/*
190 * Check if adding a bio_vec after bprv with offset would create a gap in
191 * the SG list. Most drivers don't care about this, but some do.
192 */
193static inline bool bvec_gap_to_prev(struct bio_vec *bprv, unsigned int offset)
194{
195 return offset || ((bprv->bv_offset + bprv->bv_len) & (PAGE_SIZE - 1));
196}
197
189#define bio_io_error(bio) bio_endio((bio), -EIO) 198#define bio_io_error(bio) bio_endio((bio), -EIO)
190 199
191/* 200/*
@@ -644,10 +653,6 @@ struct biovec_slab {
644 653
645#if defined(CONFIG_BLK_DEV_INTEGRITY) 654#if defined(CONFIG_BLK_DEV_INTEGRITY)
646 655
647
648
649#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
650
651#define bip_for_each_vec(bvl, bip, iter) \ 656#define bip_for_each_vec(bvl, bip, iter) \
652 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) 657 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
653 658
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index a002cf191427..eb726b9c5762 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -42,7 +42,7 @@ struct blk_mq_hw_ctx {
42 unsigned int nr_ctx; 42 unsigned int nr_ctx;
43 struct blk_mq_ctx **ctxs; 43 struct blk_mq_ctx **ctxs;
44 44
45 unsigned int wait_index; 45 atomic_t wait_index;
46 46
47 struct blk_mq_tags *tags; 47 struct blk_mq_tags *tags;
48 48
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 31e11051f1ba..8699bcf5f099 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -512,6 +512,7 @@ struct request_queue {
512#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 512#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
513#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 513#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
514#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ 514#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
515#define QUEUE_FLAG_SG_GAPS 22 /* queue doesn't support SG gaps */
515 516
516#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 517#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
517 (1 << QUEUE_FLAG_STACKABLE) | \ 518 (1 << QUEUE_FLAG_STACKABLE) | \
@@ -920,7 +921,7 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
920 sector_t offset) 921 sector_t offset)
921{ 922{
922 if (!q->limits.chunk_sectors) 923 if (!q->limits.chunk_sectors)
923 return q->limits.max_hw_sectors; 924 return q->limits.max_sectors;
924 925
925 return q->limits.chunk_sectors - 926 return q->limits.chunk_sectors -
926 (offset & (q->limits.chunk_sectors - 1)); 927 (offset & (q->limits.chunk_sectors - 1));
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index ec4112d257bc..8f8ae95c6e27 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -482,8 +482,8 @@ extern struct cpufreq_governor cpufreq_gov_conservative;
482 *********************************************************************/ 482 *********************************************************************/
483 483
484/* Special Values of .frequency field */ 484/* Special Values of .frequency field */
485#define CPUFREQ_ENTRY_INVALID ~0 485#define CPUFREQ_ENTRY_INVALID ~0u
486#define CPUFREQ_TABLE_END ~1 486#define CPUFREQ_TABLE_END ~1u
487/* Special Values of .flags field */ 487/* Special Values of .flags field */
488#define CPUFREQ_BOOST_FREQ (1 << 0) 488#define CPUFREQ_BOOST_FREQ (1 << 0)
489 489
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 4ff262e2bf37..45a91474487d 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -133,7 +133,6 @@ extern struct request *elv_latter_request(struct request_queue *, struct request
133extern int elv_register_queue(struct request_queue *q); 133extern int elv_register_queue(struct request_queue *q);
134extern void elv_unregister_queue(struct request_queue *q); 134extern void elv_unregister_queue(struct request_queue *q);
135extern int elv_may_queue(struct request_queue *, int); 135extern int elv_may_queue(struct request_queue *, int);
136extern void elv_abort_queue(struct request_queue *);
137extern void elv_completed_request(struct request_queue *, struct request *); 136extern void elv_completed_request(struct request_queue *, struct request *);
138extern int elv_set_request(struct request_queue *q, struct request *rq, 137extern int elv_set_request(struct request_queue *q, struct request *rq,
139 struct bio *bio, gfp_t gfp_mask); 138 struct bio *bio, gfp_t gfp_mask);
@@ -144,7 +143,7 @@ extern void elv_drain_elevator(struct request_queue *);
144 * io scheduler registration 143 * io scheduler registration
145 */ 144 */
146extern void __init load_default_elevator_module(void); 145extern void __init load_default_elevator_module(void);
147extern int __init elv_register(struct elevator_type *); 146extern int elv_register(struct elevator_type *);
148extern void elv_unregister(struct elevator_type *); 147extern void elv_unregister(struct elevator_type *);
149 148
150/* 149/*
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 338e6f758c6d..e11d60cc867b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1921,6 +1921,12 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
1921 1921
1922static inline int break_deleg(struct inode *inode, unsigned int mode) 1922static inline int break_deleg(struct inode *inode, unsigned int mode)
1923{ 1923{
1924 /*
1925 * Since this check is lockless, we must ensure that any refcounts
1926 * taken are done before checking inode->i_flock. Otherwise, we could
1927 * end up racing with tasks trying to set a new lease on this file.
1928 */
1929 smp_mb();
1924 if (inode->i_flock) 1930 if (inode->i_flock)
1925 return __break_lease(inode, mode, FL_DELEG); 1931 return __break_lease(inode, mode, FL_DELEG);
1926 return 0; 1932 return 0;
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 17aa1cce6f8e..30faf797c2c3 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -91,6 +91,7 @@ struct kernfs_elem_attr {
91 const struct kernfs_ops *ops; 91 const struct kernfs_ops *ops;
92 struct kernfs_open_node *open; 92 struct kernfs_open_node *open;
93 loff_t size; 93 loff_t size;
94 struct kernfs_node *notify_next; /* for kernfs_notify() */
94}; 95};
95 96
96/* 97/*
@@ -304,6 +305,7 @@ struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
304 struct kernfs_root *root, unsigned long magic, 305 struct kernfs_root *root, unsigned long magic,
305 bool *new_sb_created, const void *ns); 306 bool *new_sb_created, const void *ns);
306void kernfs_kill_sb(struct super_block *sb); 307void kernfs_kill_sb(struct super_block *sb);
308struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns);
307 309
308void kernfs_init(void); 310void kernfs_init(void);
309 311
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index b12f4bbd064c..35b51e7af886 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -578,8 +578,6 @@ struct mlx4_cq {
578 u32 cons_index; 578 u32 cons_index;
579 579
580 u16 irq; 580 u16 irq;
581 bool irq_affinity_change;
582
583 __be32 *set_ci_db; 581 __be32 *set_ci_db;
584 __be32 *arm_db; 582 __be32 *arm_db;
585 int arm_sn; 583 int arm_sn;
@@ -1167,6 +1165,8 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
1167 int *vector); 1165 int *vector);
1168void mlx4_release_eq(struct mlx4_dev *dev, int vec); 1166void mlx4_release_eq(struct mlx4_dev *dev, int vec);
1169 1167
1168int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
1169
1170int mlx4_get_phys_port_id(struct mlx4_dev *dev); 1170int mlx4_get_phys_port_id(struct mlx4_dev *dev);
1171int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port); 1171int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
1172int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port); 1172int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 11692dea18aa..42aa9b9ecd5f 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -17,6 +17,7 @@
17#include <linux/lockdep.h> 17#include <linux/lockdep.h>
18#include <linux/atomic.h> 18#include <linux/atomic.h>
19#include <asm/processor.h> 19#include <asm/processor.h>
20#include <linux/osq_lock.h>
20 21
21/* 22/*
22 * Simple, straightforward mutexes with strict semantics: 23 * Simple, straightforward mutexes with strict semantics:
@@ -46,7 +47,6 @@
46 * - detects multi-task circular deadlocks and prints out all affected 47 * - detects multi-task circular deadlocks and prints out all affected
47 * locks and tasks (and only those tasks) 48 * locks and tasks (and only those tasks)
48 */ 49 */
49struct optimistic_spin_queue;
50struct mutex { 50struct mutex {
51 /* 1: unlocked, 0: locked, negative: locked, possible waiters */ 51 /* 1: unlocked, 0: locked, negative: locked, possible waiters */
52 atomic_t count; 52 atomic_t count;
@@ -56,7 +56,7 @@ struct mutex {
56 struct task_struct *owner; 56 struct task_struct *owner;
57#endif 57#endif
58#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 58#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
59 struct optimistic_spin_queue *osq; /* Spinner MCS lock */ 59 struct optimistic_spin_queue osq; /* Spinner MCS lock */
60#endif 60#endif
61#ifdef CONFIG_DEBUG_MUTEXES 61#ifdef CONFIG_DEBUG_MUTEXES
62 const char *name; 62 const char *name;
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 6a45fb583ff1..447775ee2c4b 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -32,15 +32,24 @@ static inline void touch_nmi_watchdog(void)
32#ifdef arch_trigger_all_cpu_backtrace 32#ifdef arch_trigger_all_cpu_backtrace
33static inline bool trigger_all_cpu_backtrace(void) 33static inline bool trigger_all_cpu_backtrace(void)
34{ 34{
35 arch_trigger_all_cpu_backtrace(); 35 arch_trigger_all_cpu_backtrace(true);
36 36
37 return true; 37 return true;
38} 38}
39static inline bool trigger_allbutself_cpu_backtrace(void)
40{
41 arch_trigger_all_cpu_backtrace(false);
42 return true;
43}
39#else 44#else
40static inline bool trigger_all_cpu_backtrace(void) 45static inline bool trigger_all_cpu_backtrace(void)
41{ 46{
42 return false; 47 return false;
43} 48}
49static inline bool trigger_allbutself_cpu_backtrace(void)
50{
51 return false;
52}
44#endif 53#endif
45 54
46#ifdef CONFIG_LOCKUP_DETECTOR 55#ifdef CONFIG_LOCKUP_DETECTOR
@@ -48,6 +57,7 @@ int hw_nmi_is_cpu_stuck(struct pt_regs *);
48u64 hw_nmi_get_sample_period(int watchdog_thresh); 57u64 hw_nmi_get_sample_period(int watchdog_thresh);
49extern int watchdog_user_enabled; 58extern int watchdog_user_enabled;
50extern int watchdog_thresh; 59extern int watchdog_thresh;
60extern int sysctl_softlockup_all_cpu_backtrace;
51struct ctl_table; 61struct ctl_table;
52extern int proc_dowatchdog(struct ctl_table *, int , 62extern int proc_dowatchdog(struct ctl_table *, int ,
53 void __user *, size_t *, loff_t *); 63 void __user *, size_t *, loff_t *);
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index a70c9493d55a..d449018d0726 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -25,9 +25,6 @@ struct phy_device *of_phy_attach(struct net_device *dev,
25 25
26extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); 26extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
27 27
28extern void of_mdiobus_link_phydev(struct mii_bus *mdio,
29 struct phy_device *phydev);
30
31#else /* CONFIG_OF */ 28#else /* CONFIG_OF */
32static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) 29static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
33{ 30{
@@ -63,11 +60,6 @@ static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
63{ 60{
64 return NULL; 61 return NULL;
65} 62}
66
67static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
68 struct phy_device *phydev)
69{
70}
71#endif /* CONFIG_OF */ 63#endif /* CONFIG_OF */
72 64
73#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY) 65#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY)
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
new file mode 100644
index 000000000000..90230d5811c5
--- /dev/null
+++ b/include/linux/osq_lock.h
@@ -0,0 +1,27 @@
1#ifndef __LINUX_OSQ_LOCK_H
2#define __LINUX_OSQ_LOCK_H
3
4/*
5 * An MCS like lock especially tailored for optimistic spinning for sleeping
6 * lock implementations (mutex, rwsem, etc).
7 */
8
9#define OSQ_UNLOCKED_VAL (0)
10
11struct optimistic_spin_queue {
12 /*
13 * Stores an encoded value of the CPU # of the tail node in the queue.
14 * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL.
15 */
16 atomic_t tail;
17};
18
19/* Init macro and function. */
20#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
21
22static inline void osq_lock_init(struct optimistic_spin_queue *lock)
23{
24 atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
25}
26
27#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 3c545b48aeab..8304959ad336 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -360,6 +360,9 @@ static inline void ClearPageCompound(struct page *page)
360 ClearPageHead(page); 360 ClearPageHead(page);
361} 361}
362#endif 362#endif
363
364#define PG_head_mask ((1L << PG_head))
365
363#else 366#else
364/* 367/*
365 * Reduce page flag use as much as possible by overlapping 368 * Reduce page flag use as much as possible by overlapping
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index a5fc7d01aad6..dec01d6c3f80 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -146,10 +146,10 @@
146 * Declaration/definition used for per-CPU variables that must be read mostly. 146 * Declaration/definition used for per-CPU variables that must be read mostly.
147 */ 147 */
148#define DECLARE_PER_CPU_READ_MOSTLY(type, name) \ 148#define DECLARE_PER_CPU_READ_MOSTLY(type, name) \
149 DECLARE_PER_CPU_SECTION(type, name, "..readmostly") 149 DECLARE_PER_CPU_SECTION(type, name, "..read_mostly")
150 150
151#define DEFINE_PER_CPU_READ_MOSTLY(type, name) \ 151#define DEFINE_PER_CPU_READ_MOSTLY(type, name) \
152 DEFINE_PER_CPU_SECTION(type, name, "..readmostly") 152 DEFINE_PER_CPU_SECTION(type, name, "..read_mostly")
153 153
154/* 154/*
155 * Intermodule exports for per-CPU variables. sparse forgets about 155 * Intermodule exports for per-CPU variables. sparse forgets about
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 864ddafad8cc..68041446c450 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -536,6 +536,15 @@ struct phy_driver {
536 /* See set_wol, but for checking whether Wake on LAN is enabled. */ 536 /* See set_wol, but for checking whether Wake on LAN is enabled. */
537 void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol); 537 void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol);
538 538
539 /*
540 * Called to inform a PHY device driver when the core is about to
541 * change the link state. This callback is supposed to be used as
542 * fixup hook for drivers that need to take action when the link
543 * state changes. Drivers are by no means allowed to mess with the
544 * PHY device structure in their implementations.
545 */
546 void (*link_change_notify)(struct phy_device *dev);
547
539 struct device_driver driver; 548 struct device_driver driver;
540}; 549};
541#define to_phy_driver(d) container_of(d, struct phy_driver, driver) 550#define to_phy_driver(d) container_of(d, struct phy_driver, driver)
diff --git a/include/linux/profile.h b/include/linux/profile.h
index aaad3861beb8..b537a25ffa17 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -44,6 +44,7 @@ extern int prof_on __read_mostly;
44int profile_init(void); 44int profile_init(void);
45int profile_setup(char *str); 45int profile_setup(char *str);
46void profile_tick(int type); 46void profile_tick(int type);
47int setup_profiling_timer(unsigned int multiplier);
47 48
48/* 49/*
49 * Add multiple profiler hits to a given address: 50 * Add multiple profiler hits to a given address:
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 077904c8b70d..cc79eff4a1ad 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -334,6 +334,9 @@ static inline void user_single_step_siginfo(struct task_struct *tsk,
334 * calling arch_ptrace_stop() when it would be superfluous. For example, 334 * calling arch_ptrace_stop() when it would be superfluous. For example,
335 * if the thread has not been back to user mode since the last stop, the 335 * if the thread has not been back to user mode since the last stop, the
336 * thread state might indicate that nothing needs to be done. 336 * thread state might indicate that nothing needs to be done.
337 *
338 * This is guaranteed to be invoked once before a task stops for ptrace and
339 * may include arch-specific operations necessary prior to a ptrace stop.
337 */ 340 */
338#define arch_ptrace_stop_needed(code, info) (0) 341#define arch_ptrace_stop_needed(code, info) (0)
339#endif 342#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 5a75d19aa661..6a94cc8b1ca0 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -44,7 +44,6 @@
44#include <linux/debugobjects.h> 44#include <linux/debugobjects.h>
45#include <linux/bug.h> 45#include <linux/bug.h>
46#include <linux/compiler.h> 46#include <linux/compiler.h>
47#include <linux/percpu.h>
48#include <asm/barrier.h> 47#include <asm/barrier.h>
49 48
50extern int rcu_expedited; /* for sysctl */ 49extern int rcu_expedited; /* for sysctl */
@@ -300,41 +299,6 @@ bool __rcu_is_watching(void);
300#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ 299#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
301 300
302/* 301/*
303 * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
304 */
305
306#define RCU_COND_RESCHED_LIM 256 /* ms vs. 100s of ms. */
307DECLARE_PER_CPU(int, rcu_cond_resched_count);
308void rcu_resched(void);
309
310/*
311 * Is it time to report RCU quiescent states?
312 *
313 * Note unsynchronized access to rcu_cond_resched_count. Yes, we might
314 * increment some random CPU's count, and possibly also load the result from
315 * yet another CPU's count. We might even clobber some other CPU's attempt
316 * to zero its counter. This is all OK because the goal is not precision,
317 * but rather reasonable amortization of rcu_note_context_switch() overhead
318 * and extremely high probability of avoiding RCU CPU stall warnings.
319 * Note that this function has to be preempted in just the wrong place,
320 * many thousands of times in a row, for anything bad to happen.
321 */
322static inline bool rcu_should_resched(void)
323{
324 return raw_cpu_inc_return(rcu_cond_resched_count) >=
325 RCU_COND_RESCHED_LIM;
326}
327
328/*
329 * Report quiscent states to RCU if it is time to do so.
330 */
331static inline void rcu_cond_resched(void)
332{
333 if (unlikely(rcu_should_resched()))
334 rcu_resched();
335}
336
337/*
338 * Infrastructure to implement the synchronize_() primitives in 302 * Infrastructure to implement the synchronize_() primitives in
339 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. 303 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
340 */ 304 */
@@ -358,9 +322,19 @@ void wait_rcu_gp(call_rcu_func_t crf);
358 * initialization. 322 * initialization.
359 */ 323 */
360#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 324#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
325void init_rcu_head(struct rcu_head *head);
326void destroy_rcu_head(struct rcu_head *head);
361void init_rcu_head_on_stack(struct rcu_head *head); 327void init_rcu_head_on_stack(struct rcu_head *head);
362void destroy_rcu_head_on_stack(struct rcu_head *head); 328void destroy_rcu_head_on_stack(struct rcu_head *head);
363#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 329#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
330static inline void init_rcu_head(struct rcu_head *head)
331{
332}
333
334static inline void destroy_rcu_head(struct rcu_head *head)
335{
336}
337
364static inline void init_rcu_head_on_stack(struct rcu_head *head) 338static inline void init_rcu_head_on_stack(struct rcu_head *head)
365{ 339{
366} 340}
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index a2d9d81038d1..14ec18d5e18b 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -395,6 +395,11 @@ static inline void regulator_bulk_free(int num_consumers,
395{ 395{
396} 396}
397 397
398static inline int regulator_can_change_voltage(struct regulator *regulator)
399{
400 return 0;
401}
402
398static inline int regulator_set_voltage(struct regulator *regulator, 403static inline int regulator_set_voltage(struct regulator *regulator,
399 int min_uV, int max_uV) 404 int min_uV, int max_uV)
400{ 405{
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index d5b13bc07a0b..561e8615528d 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -15,13 +15,13 @@
15#ifdef __KERNEL__ 15#ifdef __KERNEL__
16/* 16/*
17 * the rw-semaphore definition 17 * the rw-semaphore definition
18 * - if activity is 0 then there are no active readers or writers 18 * - if count is 0 then there are no active readers or writers
19 * - if activity is +ve then that is the number of active readers 19 * - if count is +ve then that is the number of active readers
20 * - if activity is -1 then there is one active writer 20 * - if count is -1 then there is one active writer
21 * - if wait_list is not empty, then there are processes waiting for the semaphore 21 * - if wait_list is not empty, then there are processes waiting for the semaphore
22 */ 22 */
23struct rw_semaphore { 23struct rw_semaphore {
24 __s32 activity; 24 __s32 count;
25 raw_spinlock_t wait_lock; 25 raw_spinlock_t wait_lock;
26 struct list_head wait_list; 26 struct list_head wait_list;
27#ifdef CONFIG_DEBUG_LOCK_ALLOC 27#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 8d79708146aa..035d3c57fc8a 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -13,10 +13,11 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16
17#include <linux/atomic.h> 16#include <linux/atomic.h>
17#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
18#include <linux/osq_lock.h>
19#endif
18 20
19struct optimistic_spin_queue;
20struct rw_semaphore; 21struct rw_semaphore;
21 22
22#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK 23#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
@@ -25,15 +26,15 @@ struct rw_semaphore;
25/* All arch specific implementations share the same struct */ 26/* All arch specific implementations share the same struct */
26struct rw_semaphore { 27struct rw_semaphore {
27 long count; 28 long count;
28 raw_spinlock_t wait_lock;
29 struct list_head wait_list; 29 struct list_head wait_list;
30#ifdef CONFIG_SMP 30 raw_spinlock_t wait_lock;
31#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
32 struct optimistic_spin_queue osq; /* spinner MCS lock */
31 /* 33 /*
32 * Write owner. Used as a speculative check to see 34 * Write owner. Used as a speculative check to see
33 * if the owner is running on the cpu. 35 * if the owner is running on the cpu.
34 */ 36 */
35 struct task_struct *owner; 37 struct task_struct *owner;
36 struct optimistic_spin_queue *osq; /* spinner MCS lock */
37#endif 38#endif
38#ifdef CONFIG_DEBUG_LOCK_ALLOC 39#ifdef CONFIG_DEBUG_LOCK_ALLOC
39 struct lockdep_map dep_map; 40 struct lockdep_map dep_map;
@@ -64,22 +65,19 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
64# define __RWSEM_DEP_MAP_INIT(lockname) 65# define __RWSEM_DEP_MAP_INIT(lockname)
65#endif 66#endif
66 67
67#if defined(CONFIG_SMP) && !defined(CONFIG_RWSEM_GENERIC_SPINLOCK) 68#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
68#define __RWSEM_INITIALIZER(name) \ 69#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL
69 { RWSEM_UNLOCKED_VALUE, \
70 __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
71 LIST_HEAD_INIT((name).wait_list), \
72 NULL, /* owner */ \
73 NULL /* mcs lock */ \
74 __RWSEM_DEP_MAP_INIT(name) }
75#else 70#else
76#define __RWSEM_INITIALIZER(name) \ 71#define __RWSEM_OPT_INIT(lockname)
77 { RWSEM_UNLOCKED_VALUE, \
78 __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
79 LIST_HEAD_INIT((name).wait_list) \
80 __RWSEM_DEP_MAP_INIT(name) }
81#endif 72#endif
82 73
74#define __RWSEM_INITIALIZER(name) \
75 { .count = RWSEM_UNLOCKED_VALUE, \
76 .wait_list = LIST_HEAD_INIT((name).wait_list), \
77 .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
78 __RWSEM_OPT_INIT(name) \
79 __RWSEM_DEP_MAP_INIT(name) }
80
83#define DECLARE_RWSEM(name) \ 81#define DECLARE_RWSEM(name) \
84 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 82 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
85 83
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 306f4f0c987a..0376b054a0d0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -872,21 +872,21 @@ enum cpu_idle_type {
872#define SD_NUMA 0x4000 /* cross-node balancing */ 872#define SD_NUMA 0x4000 /* cross-node balancing */
873 873
874#ifdef CONFIG_SCHED_SMT 874#ifdef CONFIG_SCHED_SMT
875static inline const int cpu_smt_flags(void) 875static inline int cpu_smt_flags(void)
876{ 876{
877 return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; 877 return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
878} 878}
879#endif 879#endif
880 880
881#ifdef CONFIG_SCHED_MC 881#ifdef CONFIG_SCHED_MC
882static inline const int cpu_core_flags(void) 882static inline int cpu_core_flags(void)
883{ 883{
884 return SD_SHARE_PKG_RESOURCES; 884 return SD_SHARE_PKG_RESOURCES;
885} 885}
886#endif 886#endif
887 887
888#ifdef CONFIG_NUMA 888#ifdef CONFIG_NUMA
889static inline const int cpu_numa_flags(void) 889static inline int cpu_numa_flags(void)
890{ 890{
891 return SD_NUMA; 891 return SD_NUMA;
892} 892}
@@ -999,7 +999,7 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
999bool cpus_share_cache(int this_cpu, int that_cpu); 999bool cpus_share_cache(int this_cpu, int that_cpu);
1000 1000
1001typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); 1001typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
1002typedef const int (*sched_domain_flags_f)(void); 1002typedef int (*sched_domain_flags_f)(void);
1003 1003
1004#define SDTL_OVERLAP 0x01 1004#define SDTL_OVERLAP 0x01
1005 1005
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 8e98297f1388..ec538fc287a6 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -305,8 +305,6 @@ struct ucred {
305/* IPX options */ 305/* IPX options */
306#define IPX_TYPE 1 306#define IPX_TYPE 1
307 307
308extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
309 int offset, int len);
310extern int csum_partial_copy_fromiovecend(unsigned char *kdata, 308extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
311 struct iovec *iov, 309 struct iovec *iov,
312 int offset, 310 int offset,
@@ -315,8 +313,6 @@ extern unsigned long iov_pages(const struct iovec *iov, int offset,
315 unsigned long nr_segs); 313 unsigned long nr_segs);
316 314
317extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode); 315extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode);
318extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
319 int offset, int len);
320extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); 316extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
321extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); 317extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
322 318
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index f76994b9396c..519064e0c943 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -327,6 +327,7 @@ extern unsigned long get_safe_page(gfp_t gfp_mask);
327extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); 327extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
328extern int hibernate(void); 328extern int hibernate(void);
329extern bool system_entering_hibernation(void); 329extern bool system_entering_hibernation(void);
330extern bool hibernation_available(void);
330asmlinkage int swsusp_save(void); 331asmlinkage int swsusp_save(void);
331extern struct pbe *restore_pblist; 332extern struct pbe *restore_pblist;
332#else /* CONFIG_HIBERNATION */ 333#else /* CONFIG_HIBERNATION */
@@ -339,6 +340,7 @@ static inline void swsusp_unset_page_free(struct page *p) {}
339static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {} 340static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
340static inline int hibernate(void) { return -ENOSYS; } 341static inline int hibernate(void) { return -ENOSYS; }
341static inline bool system_entering_hibernation(void) { return false; } 342static inline bool system_entering_hibernation(void) { return false; }
343static inline bool hibernation_available(void) { return false; }
342#endif /* CONFIG_HIBERNATION */ 344#endif /* CONFIG_HIBERNATION */
343 345
344/* Hibernation and suspend events */ 346/* Hibernation and suspend events */
diff --git a/include/linux/uio.h b/include/linux/uio.h
index e2231e47cec1..09a7cffc224e 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -94,8 +94,20 @@ static inline size_t iov_iter_count(struct iov_iter *i)
94 return i->count; 94 return i->count;
95} 95}
96 96
97static inline void iov_iter_truncate(struct iov_iter *i, size_t count) 97/*
98 * Cap the iov_iter by given limit; note that the second argument is
99 * *not* the new size - it's upper limit for such. Passing it a value
100 * greater than the amount of data in iov_iter is fine - it'll just do
101 * nothing in that case.
102 */
103static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
98{ 104{
105 /*
106 * count doesn't have to fit in size_t - comparison extends both
107 * operands to u64 here and any value that would be truncated by
108 * conversion in assignement is by definition greater than all
109 * values of size_t, including old i->count.
110 */
99 if (i->count > count) 111 if (i->count > count)
100 i->count = count; 112 i->count = count;
101} 113}
@@ -111,6 +123,9 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
111 123
112int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); 124int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
113int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len); 125int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len);
114 126int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
127 int offset, int len);
128int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
129 int offset, int len);
115 130
116#endif 131#endif
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 1a64b26046ed..9b7de1b46437 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -70,7 +70,9 @@
70 US_FLAG(NEEDS_CAP16, 0x00400000) \ 70 US_FLAG(NEEDS_CAP16, 0x00400000) \
71 /* cannot handle READ_CAPACITY_10 */ \ 71 /* cannot handle READ_CAPACITY_10 */ \
72 US_FLAG(IGNORE_UAS, 0x00800000) \ 72 US_FLAG(IGNORE_UAS, 0x00800000) \
73 /* Device advertises UAS but it is broken */ 73 /* Device advertises UAS but it is broken */ \
74 US_FLAG(BROKEN_FUA, 0x01000000) \
75 /* Cannot handle FUA in WRITE or READ CDBs */ \
74 76
75#define US_FLAG(name, value) US_FL_##name = value , 77#define US_FLAG(name, value) US_FL_##name = value ,
76enum { US_DO_ALL_FLAGS }; 78enum { US_DO_ALL_FLAGS };