diff options
| author | Ingo Molnar <mingo@kernel.org> | 2014-07-17 05:45:29 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2014-07-17 05:45:29 -0400 |
| commit | b5e4111f027c4be85dbe97e090530d03c55c4cf4 (patch) | |
| tree | 11e0a37cb59314f4e9a7b2810124a4a7a33140e5 /include/linux | |
| parent | 72d5305dcb3637913c2c37e847a4de9028e49244 (diff) | |
| parent | 9de8033f1bbcce5ed23fe5da9ca1a5060207f7ed (diff) | |
Merge branch 'locking/urgent' into locking/core, before applying larger changes and to refresh the branch with fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/bio.h | 13 | ||||
| -rw-r--r-- | include/linux/blk-mq.h | 2 | ||||
| -rw-r--r-- | include/linux/blkdev.h | 3 | ||||
| -rw-r--r-- | include/linux/elevator.h | 3 | ||||
| -rw-r--r-- | include/linux/fs.h | 6 | ||||
| -rw-r--r-- | include/linux/kernfs.h | 2 | ||||
| -rw-r--r-- | include/linux/mutex.h | 4 | ||||
| -rw-r--r-- | include/linux/nmi.h | 12 | ||||
| -rw-r--r-- | include/linux/osq_lock.h | 27 | ||||
| -rw-r--r-- | include/linux/page-flags.h | 3 | ||||
| -rw-r--r-- | include/linux/percpu-defs.h | 4 | ||||
| -rw-r--r-- | include/linux/phy.h | 9 | ||||
| -rw-r--r-- | include/linux/profile.h | 1 | ||||
| -rw-r--r-- | include/linux/ptrace.h | 3 | ||||
| -rw-r--r-- | include/linux/regulator/consumer.h | 5 | ||||
| -rw-r--r-- | include/linux/rwsem-spinlock.h | 8 | ||||
| -rw-r--r-- | include/linux/rwsem.h | 34 | ||||
| -rw-r--r-- | include/linux/socket.h | 4 | ||||
| -rw-r--r-- | include/linux/suspend.h | 2 | ||||
| -rw-r--r-- | include/linux/uio.h | 19 | ||||
| -rw-r--r-- | include/linux/usb_usual.h | 4 |
21 files changed, 126 insertions, 42 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h index 5a645769f020..d2633ee099d9 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
| @@ -186,6 +186,15 @@ static inline void *bio_data(struct bio *bio) | |||
| 186 | #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ | 186 | #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ |
| 187 | __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) | 187 | __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) |
| 188 | 188 | ||
| 189 | /* | ||
| 190 | * Check if adding a bio_vec after bprv with offset would create a gap in | ||
| 191 | * the SG list. Most drivers don't care about this, but some do. | ||
| 192 | */ | ||
| 193 | static inline bool bvec_gap_to_prev(struct bio_vec *bprv, unsigned int offset) | ||
| 194 | { | ||
| 195 | return offset || ((bprv->bv_offset + bprv->bv_len) & (PAGE_SIZE - 1)); | ||
| 196 | } | ||
| 197 | |||
| 189 | #define bio_io_error(bio) bio_endio((bio), -EIO) | 198 | #define bio_io_error(bio) bio_endio((bio), -EIO) |
| 190 | 199 | ||
| 191 | /* | 200 | /* |
| @@ -644,10 +653,6 @@ struct biovec_slab { | |||
| 644 | 653 | ||
| 645 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | 654 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
| 646 | 655 | ||
| 647 | |||
| 648 | |||
| 649 | #define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)])) | ||
| 650 | |||
| 651 | #define bip_for_each_vec(bvl, bip, iter) \ | 656 | #define bip_for_each_vec(bvl, bip, iter) \ |
| 652 | for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) | 657 | for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) |
| 653 | 658 | ||
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index a002cf191427..eb726b9c5762 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
| @@ -42,7 +42,7 @@ struct blk_mq_hw_ctx { | |||
| 42 | unsigned int nr_ctx; | 42 | unsigned int nr_ctx; |
| 43 | struct blk_mq_ctx **ctxs; | 43 | struct blk_mq_ctx **ctxs; |
| 44 | 44 | ||
| 45 | unsigned int wait_index; | 45 | atomic_t wait_index; |
| 46 | 46 | ||
| 47 | struct blk_mq_tags *tags; | 47 | struct blk_mq_tags *tags; |
| 48 | 48 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 31e11051f1ba..8699bcf5f099 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -512,6 +512,7 @@ struct request_queue { | |||
| 512 | #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ | 512 | #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ |
| 513 | #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ | 513 | #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ |
| 514 | #define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ | 514 | #define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ |
| 515 | #define QUEUE_FLAG_SG_GAPS 22 /* queue doesn't support SG gaps */ | ||
| 515 | 516 | ||
| 516 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 517 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
| 517 | (1 << QUEUE_FLAG_STACKABLE) | \ | 518 | (1 << QUEUE_FLAG_STACKABLE) | \ |
| @@ -920,7 +921,7 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q, | |||
| 920 | sector_t offset) | 921 | sector_t offset) |
| 921 | { | 922 | { |
| 922 | if (!q->limits.chunk_sectors) | 923 | if (!q->limits.chunk_sectors) |
| 923 | return q->limits.max_hw_sectors; | 924 | return q->limits.max_sectors; |
| 924 | 925 | ||
| 925 | return q->limits.chunk_sectors - | 926 | return q->limits.chunk_sectors - |
| 926 | (offset & (q->limits.chunk_sectors - 1)); | 927 | (offset & (q->limits.chunk_sectors - 1)); |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 4ff262e2bf37..45a91474487d 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
| @@ -133,7 +133,6 @@ extern struct request *elv_latter_request(struct request_queue *, struct request | |||
| 133 | extern int elv_register_queue(struct request_queue *q); | 133 | extern int elv_register_queue(struct request_queue *q); |
| 134 | extern void elv_unregister_queue(struct request_queue *q); | 134 | extern void elv_unregister_queue(struct request_queue *q); |
| 135 | extern int elv_may_queue(struct request_queue *, int); | 135 | extern int elv_may_queue(struct request_queue *, int); |
| 136 | extern void elv_abort_queue(struct request_queue *); | ||
| 137 | extern void elv_completed_request(struct request_queue *, struct request *); | 136 | extern void elv_completed_request(struct request_queue *, struct request *); |
| 138 | extern int elv_set_request(struct request_queue *q, struct request *rq, | 137 | extern int elv_set_request(struct request_queue *q, struct request *rq, |
| 139 | struct bio *bio, gfp_t gfp_mask); | 138 | struct bio *bio, gfp_t gfp_mask); |
| @@ -144,7 +143,7 @@ extern void elv_drain_elevator(struct request_queue *); | |||
| 144 | * io scheduler registration | 143 | * io scheduler registration |
| 145 | */ | 144 | */ |
| 146 | extern void __init load_default_elevator_module(void); | 145 | extern void __init load_default_elevator_module(void); |
| 147 | extern int __init elv_register(struct elevator_type *); | 146 | extern int elv_register(struct elevator_type *); |
| 148 | extern void elv_unregister(struct elevator_type *); | 147 | extern void elv_unregister(struct elevator_type *); |
| 149 | 148 | ||
| 150 | /* | 149 | /* |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 338e6f758c6d..e11d60cc867b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -1921,6 +1921,12 @@ static inline int break_lease(struct inode *inode, unsigned int mode) | |||
| 1921 | 1921 | ||
| 1922 | static inline int break_deleg(struct inode *inode, unsigned int mode) | 1922 | static inline int break_deleg(struct inode *inode, unsigned int mode) |
| 1923 | { | 1923 | { |
| 1924 | /* | ||
| 1925 | * Since this check is lockless, we must ensure that any refcounts | ||
| 1926 | * taken are done before checking inode->i_flock. Otherwise, we could | ||
| 1927 | * end up racing with tasks trying to set a new lease on this file. | ||
| 1928 | */ | ||
| 1929 | smp_mb(); | ||
| 1924 | if (inode->i_flock) | 1930 | if (inode->i_flock) |
| 1925 | return __break_lease(inode, mode, FL_DELEG); | 1931 | return __break_lease(inode, mode, FL_DELEG); |
| 1926 | return 0; | 1932 | return 0; |
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 17aa1cce6f8e..30faf797c2c3 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h | |||
| @@ -91,6 +91,7 @@ struct kernfs_elem_attr { | |||
| 91 | const struct kernfs_ops *ops; | 91 | const struct kernfs_ops *ops; |
| 92 | struct kernfs_open_node *open; | 92 | struct kernfs_open_node *open; |
| 93 | loff_t size; | 93 | loff_t size; |
| 94 | struct kernfs_node *notify_next; /* for kernfs_notify() */ | ||
| 94 | }; | 95 | }; |
| 95 | 96 | ||
| 96 | /* | 97 | /* |
| @@ -304,6 +305,7 @@ struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags, | |||
| 304 | struct kernfs_root *root, unsigned long magic, | 305 | struct kernfs_root *root, unsigned long magic, |
| 305 | bool *new_sb_created, const void *ns); | 306 | bool *new_sb_created, const void *ns); |
| 306 | void kernfs_kill_sb(struct super_block *sb); | 307 | void kernfs_kill_sb(struct super_block *sb); |
| 308 | struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns); | ||
| 307 | 309 | ||
| 308 | void kernfs_init(void); | 310 | void kernfs_init(void); |
| 309 | 311 | ||
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 11692dea18aa..42aa9b9ecd5f 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <linux/lockdep.h> | 17 | #include <linux/lockdep.h> |
| 18 | #include <linux/atomic.h> | 18 | #include <linux/atomic.h> |
| 19 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
| 20 | #include <linux/osq_lock.h> | ||
| 20 | 21 | ||
| 21 | /* | 22 | /* |
| 22 | * Simple, straightforward mutexes with strict semantics: | 23 | * Simple, straightforward mutexes with strict semantics: |
| @@ -46,7 +47,6 @@ | |||
| 46 | * - detects multi-task circular deadlocks and prints out all affected | 47 | * - detects multi-task circular deadlocks and prints out all affected |
| 47 | * locks and tasks (and only those tasks) | 48 | * locks and tasks (and only those tasks) |
| 48 | */ | 49 | */ |
| 49 | struct optimistic_spin_queue; | ||
| 50 | struct mutex { | 50 | struct mutex { |
| 51 | /* 1: unlocked, 0: locked, negative: locked, possible waiters */ | 51 | /* 1: unlocked, 0: locked, negative: locked, possible waiters */ |
| 52 | atomic_t count; | 52 | atomic_t count; |
| @@ -56,7 +56,7 @@ struct mutex { | |||
| 56 | struct task_struct *owner; | 56 | struct task_struct *owner; |
| 57 | #endif | 57 | #endif |
| 58 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | 58 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
| 59 | struct optimistic_spin_queue *osq; /* Spinner MCS lock */ | 59 | struct optimistic_spin_queue osq; /* Spinner MCS lock */ |
| 60 | #endif | 60 | #endif |
| 61 | #ifdef CONFIG_DEBUG_MUTEXES | 61 | #ifdef CONFIG_DEBUG_MUTEXES |
| 62 | const char *name; | 62 | const char *name; |
diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 6a45fb583ff1..447775ee2c4b 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h | |||
| @@ -32,15 +32,24 @@ static inline void touch_nmi_watchdog(void) | |||
| 32 | #ifdef arch_trigger_all_cpu_backtrace | 32 | #ifdef arch_trigger_all_cpu_backtrace |
| 33 | static inline bool trigger_all_cpu_backtrace(void) | 33 | static inline bool trigger_all_cpu_backtrace(void) |
| 34 | { | 34 | { |
| 35 | arch_trigger_all_cpu_backtrace(); | 35 | arch_trigger_all_cpu_backtrace(true); |
| 36 | 36 | ||
| 37 | return true; | 37 | return true; |
| 38 | } | 38 | } |
| 39 | static inline bool trigger_allbutself_cpu_backtrace(void) | ||
| 40 | { | ||
| 41 | arch_trigger_all_cpu_backtrace(false); | ||
| 42 | return true; | ||
| 43 | } | ||
| 39 | #else | 44 | #else |
| 40 | static inline bool trigger_all_cpu_backtrace(void) | 45 | static inline bool trigger_all_cpu_backtrace(void) |
| 41 | { | 46 | { |
| 42 | return false; | 47 | return false; |
| 43 | } | 48 | } |
| 49 | static inline bool trigger_allbutself_cpu_backtrace(void) | ||
| 50 | { | ||
| 51 | return false; | ||
| 52 | } | ||
| 44 | #endif | 53 | #endif |
| 45 | 54 | ||
| 46 | #ifdef CONFIG_LOCKUP_DETECTOR | 55 | #ifdef CONFIG_LOCKUP_DETECTOR |
| @@ -48,6 +57,7 @@ int hw_nmi_is_cpu_stuck(struct pt_regs *); | |||
| 48 | u64 hw_nmi_get_sample_period(int watchdog_thresh); | 57 | u64 hw_nmi_get_sample_period(int watchdog_thresh); |
| 49 | extern int watchdog_user_enabled; | 58 | extern int watchdog_user_enabled; |
| 50 | extern int watchdog_thresh; | 59 | extern int watchdog_thresh; |
| 60 | extern int sysctl_softlockup_all_cpu_backtrace; | ||
| 51 | struct ctl_table; | 61 | struct ctl_table; |
| 52 | extern int proc_dowatchdog(struct ctl_table *, int , | 62 | extern int proc_dowatchdog(struct ctl_table *, int , |
| 53 | void __user *, size_t *, loff_t *); | 63 | void __user *, size_t *, loff_t *); |
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h new file mode 100644 index 000000000000..90230d5811c5 --- /dev/null +++ b/include/linux/osq_lock.h | |||
| @@ -0,0 +1,27 @@ | |||
| 1 | #ifndef __LINUX_OSQ_LOCK_H | ||
| 2 | #define __LINUX_OSQ_LOCK_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * An MCS like lock especially tailored for optimistic spinning for sleeping | ||
| 6 | * lock implementations (mutex, rwsem, etc). | ||
| 7 | */ | ||
| 8 | |||
| 9 | #define OSQ_UNLOCKED_VAL (0) | ||
| 10 | |||
| 11 | struct optimistic_spin_queue { | ||
| 12 | /* | ||
| 13 | * Stores an encoded value of the CPU # of the tail node in the queue. | ||
| 14 | * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL. | ||
| 15 | */ | ||
| 16 | atomic_t tail; | ||
| 17 | }; | ||
| 18 | |||
| 19 | /* Init macro and function. */ | ||
| 20 | #define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) } | ||
| 21 | |||
| 22 | static inline void osq_lock_init(struct optimistic_spin_queue *lock) | ||
| 23 | { | ||
| 24 | atomic_set(&lock->tail, OSQ_UNLOCKED_VAL); | ||
| 25 | } | ||
| 26 | |||
| 27 | #endif | ||
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 3c545b48aeab..8304959ad336 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
| @@ -360,6 +360,9 @@ static inline void ClearPageCompound(struct page *page) | |||
| 360 | ClearPageHead(page); | 360 | ClearPageHead(page); |
| 361 | } | 361 | } |
| 362 | #endif | 362 | #endif |
| 363 | |||
| 364 | #define PG_head_mask ((1L << PG_head)) | ||
| 365 | |||
| 363 | #else | 366 | #else |
| 364 | /* | 367 | /* |
| 365 | * Reduce page flag use as much as possible by overlapping | 368 | * Reduce page flag use as much as possible by overlapping |
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index a5fc7d01aad6..dec01d6c3f80 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h | |||
| @@ -146,10 +146,10 @@ | |||
| 146 | * Declaration/definition used for per-CPU variables that must be read mostly. | 146 | * Declaration/definition used for per-CPU variables that must be read mostly. |
| 147 | */ | 147 | */ |
| 148 | #define DECLARE_PER_CPU_READ_MOSTLY(type, name) \ | 148 | #define DECLARE_PER_CPU_READ_MOSTLY(type, name) \ |
| 149 | DECLARE_PER_CPU_SECTION(type, name, "..readmostly") | 149 | DECLARE_PER_CPU_SECTION(type, name, "..read_mostly") |
| 150 | 150 | ||
| 151 | #define DEFINE_PER_CPU_READ_MOSTLY(type, name) \ | 151 | #define DEFINE_PER_CPU_READ_MOSTLY(type, name) \ |
| 152 | DEFINE_PER_CPU_SECTION(type, name, "..readmostly") | 152 | DEFINE_PER_CPU_SECTION(type, name, "..read_mostly") |
| 153 | 153 | ||
| 154 | /* | 154 | /* |
| 155 | * Intermodule exports for per-CPU variables. sparse forgets about | 155 | * Intermodule exports for per-CPU variables. sparse forgets about |
diff --git a/include/linux/phy.h b/include/linux/phy.h index 864ddafad8cc..68041446c450 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
| @@ -536,6 +536,15 @@ struct phy_driver { | |||
| 536 | /* See set_wol, but for checking whether Wake on LAN is enabled. */ | 536 | /* See set_wol, but for checking whether Wake on LAN is enabled. */ |
| 537 | void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol); | 537 | void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol); |
| 538 | 538 | ||
| 539 | /* | ||
| 540 | * Called to inform a PHY device driver when the core is about to | ||
| 541 | * change the link state. This callback is supposed to be used as | ||
| 542 | * fixup hook for drivers that need to take action when the link | ||
| 543 | * state changes. Drivers are by no means allowed to mess with the | ||
| 544 | * PHY device structure in their implementations. | ||
| 545 | */ | ||
| 546 | void (*link_change_notify)(struct phy_device *dev); | ||
| 547 | |||
| 539 | struct device_driver driver; | 548 | struct device_driver driver; |
| 540 | }; | 549 | }; |
| 541 | #define to_phy_driver(d) container_of(d, struct phy_driver, driver) | 550 | #define to_phy_driver(d) container_of(d, struct phy_driver, driver) |
diff --git a/include/linux/profile.h b/include/linux/profile.h index aaad3861beb8..b537a25ffa17 100644 --- a/include/linux/profile.h +++ b/include/linux/profile.h | |||
| @@ -44,6 +44,7 @@ extern int prof_on __read_mostly; | |||
| 44 | int profile_init(void); | 44 | int profile_init(void); |
| 45 | int profile_setup(char *str); | 45 | int profile_setup(char *str); |
| 46 | void profile_tick(int type); | 46 | void profile_tick(int type); |
| 47 | int setup_profiling_timer(unsigned int multiplier); | ||
| 47 | 48 | ||
| 48 | /* | 49 | /* |
| 49 | * Add multiple profiler hits to a given address: | 50 | * Add multiple profiler hits to a given address: |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 077904c8b70d..cc79eff4a1ad 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
| @@ -334,6 +334,9 @@ static inline void user_single_step_siginfo(struct task_struct *tsk, | |||
| 334 | * calling arch_ptrace_stop() when it would be superfluous. For example, | 334 | * calling arch_ptrace_stop() when it would be superfluous. For example, |
| 335 | * if the thread has not been back to user mode since the last stop, the | 335 | * if the thread has not been back to user mode since the last stop, the |
| 336 | * thread state might indicate that nothing needs to be done. | 336 | * thread state might indicate that nothing needs to be done. |
| 337 | * | ||
| 338 | * This is guaranteed to be invoked once before a task stops for ptrace and | ||
| 339 | * may include arch-specific operations necessary prior to a ptrace stop. | ||
| 337 | */ | 340 | */ |
| 338 | #define arch_ptrace_stop_needed(code, info) (0) | 341 | #define arch_ptrace_stop_needed(code, info) (0) |
| 339 | #endif | 342 | #endif |
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index a2d9d81038d1..14ec18d5e18b 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h | |||
| @@ -395,6 +395,11 @@ static inline void regulator_bulk_free(int num_consumers, | |||
| 395 | { | 395 | { |
| 396 | } | 396 | } |
| 397 | 397 | ||
| 398 | static inline int regulator_can_change_voltage(struct regulator *regulator) | ||
| 399 | { | ||
| 400 | return 0; | ||
| 401 | } | ||
| 402 | |||
| 398 | static inline int regulator_set_voltage(struct regulator *regulator, | 403 | static inline int regulator_set_voltage(struct regulator *regulator, |
| 399 | int min_uV, int max_uV) | 404 | int min_uV, int max_uV) |
| 400 | { | 405 | { |
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h index d5b13bc07a0b..561e8615528d 100644 --- a/include/linux/rwsem-spinlock.h +++ b/include/linux/rwsem-spinlock.h | |||
| @@ -15,13 +15,13 @@ | |||
| 15 | #ifdef __KERNEL__ | 15 | #ifdef __KERNEL__ |
| 16 | /* | 16 | /* |
| 17 | * the rw-semaphore definition | 17 | * the rw-semaphore definition |
| 18 | * - if activity is 0 then there are no active readers or writers | 18 | * - if count is 0 then there are no active readers or writers |
| 19 | * - if activity is +ve then that is the number of active readers | 19 | * - if count is +ve then that is the number of active readers |
| 20 | * - if activity is -1 then there is one active writer | 20 | * - if count is -1 then there is one active writer |
| 21 | * - if wait_list is not empty, then there are processes waiting for the semaphore | 21 | * - if wait_list is not empty, then there are processes waiting for the semaphore |
| 22 | */ | 22 | */ |
| 23 | struct rw_semaphore { | 23 | struct rw_semaphore { |
| 24 | __s32 activity; | 24 | __s32 count; |
| 25 | raw_spinlock_t wait_lock; | 25 | raw_spinlock_t wait_lock; |
| 26 | struct list_head wait_list; | 26 | struct list_head wait_list; |
| 27 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 27 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 8d79708146aa..035d3c57fc8a 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h | |||
| @@ -13,10 +13,11 @@ | |||
| 13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
| 14 | #include <linux/list.h> | 14 | #include <linux/list.h> |
| 15 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
| 16 | |||
| 17 | #include <linux/atomic.h> | 16 | #include <linux/atomic.h> |
| 17 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER | ||
| 18 | #include <linux/osq_lock.h> | ||
| 19 | #endif | ||
| 18 | 20 | ||
| 19 | struct optimistic_spin_queue; | ||
| 20 | struct rw_semaphore; | 21 | struct rw_semaphore; |
| 21 | 22 | ||
| 22 | #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK | 23 | #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK |
| @@ -25,15 +26,15 @@ struct rw_semaphore; | |||
| 25 | /* All arch specific implementations share the same struct */ | 26 | /* All arch specific implementations share the same struct */ |
| 26 | struct rw_semaphore { | 27 | struct rw_semaphore { |
| 27 | long count; | 28 | long count; |
| 28 | raw_spinlock_t wait_lock; | ||
| 29 | struct list_head wait_list; | 29 | struct list_head wait_list; |
| 30 | #ifdef CONFIG_SMP | 30 | raw_spinlock_t wait_lock; |
| 31 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER | ||
| 32 | struct optimistic_spin_queue osq; /* spinner MCS lock */ | ||
| 31 | /* | 33 | /* |
| 32 | * Write owner. Used as a speculative check to see | 34 | * Write owner. Used as a speculative check to see |
| 33 | * if the owner is running on the cpu. | 35 | * if the owner is running on the cpu. |
| 34 | */ | 36 | */ |
| 35 | struct task_struct *owner; | 37 | struct task_struct *owner; |
| 36 | struct optimistic_spin_queue *osq; /* spinner MCS lock */ | ||
| 37 | #endif | 38 | #endif |
| 38 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 39 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 39 | struct lockdep_map dep_map; | 40 | struct lockdep_map dep_map; |
| @@ -64,22 +65,19 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) | |||
| 64 | # define __RWSEM_DEP_MAP_INIT(lockname) | 65 | # define __RWSEM_DEP_MAP_INIT(lockname) |
| 65 | #endif | 66 | #endif |
| 66 | 67 | ||
| 67 | #if defined(CONFIG_SMP) && !defined(CONFIG_RWSEM_GENERIC_SPINLOCK) | 68 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
| 68 | #define __RWSEM_INITIALIZER(name) \ | 69 | #define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL |
| 69 | { RWSEM_UNLOCKED_VALUE, \ | ||
| 70 | __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ | ||
| 71 | LIST_HEAD_INIT((name).wait_list), \ | ||
| 72 | NULL, /* owner */ \ | ||
| 73 | NULL /* mcs lock */ \ | ||
| 74 | __RWSEM_DEP_MAP_INIT(name) } | ||
| 75 | #else | 70 | #else |
| 76 | #define __RWSEM_INITIALIZER(name) \ | 71 | #define __RWSEM_OPT_INIT(lockname) |
| 77 | { RWSEM_UNLOCKED_VALUE, \ | ||
| 78 | __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ | ||
| 79 | LIST_HEAD_INIT((name).wait_list) \ | ||
| 80 | __RWSEM_DEP_MAP_INIT(name) } | ||
| 81 | #endif | 72 | #endif |
| 82 | 73 | ||
| 74 | #define __RWSEM_INITIALIZER(name) \ | ||
| 75 | { .count = RWSEM_UNLOCKED_VALUE, \ | ||
| 76 | .wait_list = LIST_HEAD_INIT((name).wait_list), \ | ||
| 77 | .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \ | ||
| 78 | __RWSEM_OPT_INIT(name) \ | ||
| 79 | __RWSEM_DEP_MAP_INIT(name) } | ||
| 80 | |||
| 83 | #define DECLARE_RWSEM(name) \ | 81 | #define DECLARE_RWSEM(name) \ |
| 84 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | 82 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) |
| 85 | 83 | ||
diff --git a/include/linux/socket.h b/include/linux/socket.h index 8e98297f1388..ec538fc287a6 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h | |||
| @@ -305,8 +305,6 @@ struct ucred { | |||
| 305 | /* IPX options */ | 305 | /* IPX options */ |
| 306 | #define IPX_TYPE 1 | 306 | #define IPX_TYPE 1 |
| 307 | 307 | ||
| 308 | extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, | ||
| 309 | int offset, int len); | ||
| 310 | extern int csum_partial_copy_fromiovecend(unsigned char *kdata, | 308 | extern int csum_partial_copy_fromiovecend(unsigned char *kdata, |
| 311 | struct iovec *iov, | 309 | struct iovec *iov, |
| 312 | int offset, | 310 | int offset, |
| @@ -315,8 +313,6 @@ extern unsigned long iov_pages(const struct iovec *iov, int offset, | |||
| 315 | unsigned long nr_segs); | 313 | unsigned long nr_segs); |
| 316 | 314 | ||
| 317 | extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode); | 315 | extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode); |
| 318 | extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, | ||
| 319 | int offset, int len); | ||
| 320 | extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); | 316 | extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); |
| 321 | extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); | 317 | extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); |
| 322 | 318 | ||
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index f76994b9396c..519064e0c943 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
| @@ -327,6 +327,7 @@ extern unsigned long get_safe_page(gfp_t gfp_mask); | |||
| 327 | extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); | 327 | extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); |
| 328 | extern int hibernate(void); | 328 | extern int hibernate(void); |
| 329 | extern bool system_entering_hibernation(void); | 329 | extern bool system_entering_hibernation(void); |
| 330 | extern bool hibernation_available(void); | ||
| 330 | asmlinkage int swsusp_save(void); | 331 | asmlinkage int swsusp_save(void); |
| 331 | extern struct pbe *restore_pblist; | 332 | extern struct pbe *restore_pblist; |
| 332 | #else /* CONFIG_HIBERNATION */ | 333 | #else /* CONFIG_HIBERNATION */ |
| @@ -339,6 +340,7 @@ static inline void swsusp_unset_page_free(struct page *p) {} | |||
| 339 | static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {} | 340 | static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {} |
| 340 | static inline int hibernate(void) { return -ENOSYS; } | 341 | static inline int hibernate(void) { return -ENOSYS; } |
| 341 | static inline bool system_entering_hibernation(void) { return false; } | 342 | static inline bool system_entering_hibernation(void) { return false; } |
| 343 | static inline bool hibernation_available(void) { return false; } | ||
| 342 | #endif /* CONFIG_HIBERNATION */ | 344 | #endif /* CONFIG_HIBERNATION */ |
| 343 | 345 | ||
| 344 | /* Hibernation and suspend events */ | 346 | /* Hibernation and suspend events */ |
diff --git a/include/linux/uio.h b/include/linux/uio.h index e2231e47cec1..09a7cffc224e 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h | |||
| @@ -94,8 +94,20 @@ static inline size_t iov_iter_count(struct iov_iter *i) | |||
| 94 | return i->count; | 94 | return i->count; |
| 95 | } | 95 | } |
| 96 | 96 | ||
| 97 | static inline void iov_iter_truncate(struct iov_iter *i, size_t count) | 97 | /* |
| 98 | * Cap the iov_iter by given limit; note that the second argument is | ||
| 99 | * *not* the new size - it's upper limit for such. Passing it a value | ||
| 100 | * greater than the amount of data in iov_iter is fine - it'll just do | ||
| 101 | * nothing in that case. | ||
| 102 | */ | ||
| 103 | static inline void iov_iter_truncate(struct iov_iter *i, u64 count) | ||
| 98 | { | 104 | { |
| 105 | /* | ||
| 106 | * count doesn't have to fit in size_t - comparison extends both | ||
| 107 | * operands to u64 here and any value that would be truncated by | ||
| 108 | * conversion in assignement is by definition greater than all | ||
| 109 | * values of size_t, including old i->count. | ||
| 110 | */ | ||
| 99 | if (i->count > count) | 111 | if (i->count > count) |
| 100 | i->count = count; | 112 | i->count = count; |
| 101 | } | 113 | } |
| @@ -111,6 +123,9 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) | |||
| 111 | 123 | ||
| 112 | int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); | 124 | int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); |
| 113 | int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len); | 125 | int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len); |
| 114 | 126 | int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, | |
| 127 | int offset, int len); | ||
| 128 | int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, | ||
| 129 | int offset, int len); | ||
| 115 | 130 | ||
| 116 | #endif | 131 | #endif |
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h index 1a64b26046ed..9b7de1b46437 100644 --- a/include/linux/usb_usual.h +++ b/include/linux/usb_usual.h | |||
| @@ -70,7 +70,9 @@ | |||
| 70 | US_FLAG(NEEDS_CAP16, 0x00400000) \ | 70 | US_FLAG(NEEDS_CAP16, 0x00400000) \ |
| 71 | /* cannot handle READ_CAPACITY_10 */ \ | 71 | /* cannot handle READ_CAPACITY_10 */ \ |
| 72 | US_FLAG(IGNORE_UAS, 0x00800000) \ | 72 | US_FLAG(IGNORE_UAS, 0x00800000) \ |
| 73 | /* Device advertises UAS but it is broken */ | 73 | /* Device advertises UAS but it is broken */ \ |
| 74 | US_FLAG(BROKEN_FUA, 0x01000000) \ | ||
| 75 | /* Cannot handle FUA in WRITE or READ CDBs */ \ | ||
| 74 | 76 | ||
| 75 | #define US_FLAG(name, value) US_FL_##name = value , | 77 | #define US_FLAG(name, value) US_FL_##name = value , |
| 76 | enum { US_DO_ALL_FLAGS }; | 78 | enum { US_DO_ALL_FLAGS }; |
