diff options
| author | David S. Miller <davem@davemloft.net> | 2013-09-05 14:58:52 -0400 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2013-09-05 14:58:52 -0400 |
| commit | 06c54055bebf919249aa1eb68312887c3cfe77b4 (patch) | |
| tree | 223a49c09e5d26516ed0161b8a52d08454ae028e /include/linux | |
| parent | 1a5bbfc3d6b700178b75743a2ba1fd2e58a8f36f (diff) | |
| parent | e2e5c4c07caf810d7849658dca42f598b3938e21 (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
net/bridge/br_multicast.c
net/ipv6/sit.c
The conflicts were minor:
1) sit.c changes overlap with change to ip_tunnel_xmit() signature.
2) br_multicast.c had an overlap between computing max_delay using
msecs_to_jiffies and turning MLDV2_MRC() into an inline function
with a name using lowercase instead of uppercase letters.
3) stmmac had two overlapping changes, one which conditionally allocated
and hooked up a dma_cfg based upon the presence of the pbl OF property,
and another one handling store-and-forward DMA made. The latter of
which should not go into the new of_find_property() basic block.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/dcache.h | 20 | ||||
| -rw-r--r-- | include/linux/lockref.h | 71 | ||||
| -rw-r--r-- | include/linux/nsproxy.h | 6 | ||||
| -rw-r--r-- | include/linux/regmap.h | 1 | ||||
| -rw-r--r-- | include/linux/wait.h | 57 |
5 files changed, 144 insertions, 11 deletions
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index b90337c9d468..efdc94434c30 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include <linux/seqlock.h> | 9 | #include <linux/seqlock.h> |
| 10 | #include <linux/cache.h> | 10 | #include <linux/cache.h> |
| 11 | #include <linux/rcupdate.h> | 11 | #include <linux/rcupdate.h> |
| 12 | #include <linux/lockref.h> | ||
| 12 | 13 | ||
| 13 | struct nameidata; | 14 | struct nameidata; |
| 14 | struct path; | 15 | struct path; |
| @@ -100,6 +101,8 @@ extern unsigned int full_name_hash(const unsigned char *, unsigned int); | |||
| 100 | # endif | 101 | # endif |
| 101 | #endif | 102 | #endif |
| 102 | 103 | ||
| 104 | #define d_lock d_lockref.lock | ||
| 105 | |||
| 103 | struct dentry { | 106 | struct dentry { |
| 104 | /* RCU lookup touched fields */ | 107 | /* RCU lookup touched fields */ |
| 105 | unsigned int d_flags; /* protected by d_lock */ | 108 | unsigned int d_flags; /* protected by d_lock */ |
| @@ -112,8 +115,7 @@ struct dentry { | |||
| 112 | unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */ | 115 | unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */ |
| 113 | 116 | ||
| 114 | /* Ref lookup also touches following */ | 117 | /* Ref lookup also touches following */ |
| 115 | unsigned int d_count; /* protected by d_lock */ | 118 | struct lockref d_lockref; /* per-dentry lock and refcount */ |
| 116 | spinlock_t d_lock; /* per dentry lock */ | ||
| 117 | const struct dentry_operations *d_op; | 119 | const struct dentry_operations *d_op; |
| 118 | struct super_block *d_sb; /* The root of the dentry tree */ | 120 | struct super_block *d_sb; /* The root of the dentry tree */ |
| 119 | unsigned long d_time; /* used by d_revalidate */ | 121 | unsigned long d_time; /* used by d_revalidate */ |
| @@ -318,7 +320,7 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq) | |||
| 318 | assert_spin_locked(&dentry->d_lock); | 320 | assert_spin_locked(&dentry->d_lock); |
| 319 | if (!read_seqcount_retry(&dentry->d_seq, seq)) { | 321 | if (!read_seqcount_retry(&dentry->d_seq, seq)) { |
| 320 | ret = 1; | 322 | ret = 1; |
| 321 | dentry->d_count++; | 323 | dentry->d_lockref.count++; |
| 322 | } | 324 | } |
| 323 | 325 | ||
| 324 | return ret; | 326 | return ret; |
| @@ -326,7 +328,7 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq) | |||
| 326 | 328 | ||
| 327 | static inline unsigned d_count(const struct dentry *dentry) | 329 | static inline unsigned d_count(const struct dentry *dentry) |
| 328 | { | 330 | { |
| 329 | return dentry->d_count; | 331 | return dentry->d_lockref.count; |
| 330 | } | 332 | } |
| 331 | 333 | ||
| 332 | /* validate "insecure" dentry pointer */ | 334 | /* validate "insecure" dentry pointer */ |
| @@ -336,6 +338,7 @@ extern int d_validate(struct dentry *, struct dentry *); | |||
| 336 | * helper function for dentry_operations.d_dname() members | 338 | * helper function for dentry_operations.d_dname() members |
| 337 | */ | 339 | */ |
| 338 | extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); | 340 | extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); |
| 341 | extern char *simple_dname(struct dentry *, char *, int); | ||
| 339 | 342 | ||
| 340 | extern char *__d_path(const struct path *, const struct path *, char *, int); | 343 | extern char *__d_path(const struct path *, const struct path *, char *, int); |
| 341 | extern char *d_absolute_path(const struct path *, char *, int); | 344 | extern char *d_absolute_path(const struct path *, char *, int); |
| @@ -356,17 +359,14 @@ extern char *dentry_path(struct dentry *, char *, int); | |||
| 356 | static inline struct dentry *dget_dlock(struct dentry *dentry) | 359 | static inline struct dentry *dget_dlock(struct dentry *dentry) |
| 357 | { | 360 | { |
| 358 | if (dentry) | 361 | if (dentry) |
| 359 | dentry->d_count++; | 362 | dentry->d_lockref.count++; |
| 360 | return dentry; | 363 | return dentry; |
| 361 | } | 364 | } |
| 362 | 365 | ||
| 363 | static inline struct dentry *dget(struct dentry *dentry) | 366 | static inline struct dentry *dget(struct dentry *dentry) |
| 364 | { | 367 | { |
| 365 | if (dentry) { | 368 | if (dentry) |
| 366 | spin_lock(&dentry->d_lock); | 369 | lockref_get(&dentry->d_lockref); |
| 367 | dget_dlock(dentry); | ||
| 368 | spin_unlock(&dentry->d_lock); | ||
| 369 | } | ||
| 370 | return dentry; | 370 | return dentry; |
| 371 | } | 371 | } |
| 372 | 372 | ||
diff --git a/include/linux/lockref.h b/include/linux/lockref.h new file mode 100644 index 000000000000..01233e01627a --- /dev/null +++ b/include/linux/lockref.h | |||
| @@ -0,0 +1,71 @@ | |||
| 1 | #ifndef __LINUX_LOCKREF_H | ||
| 2 | #define __LINUX_LOCKREF_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * Locked reference counts. | ||
| 6 | * | ||
| 7 | * These are different from just plain atomic refcounts in that they | ||
| 8 | * are atomic with respect to the spinlock that goes with them. In | ||
| 9 | * particular, there can be implementations that don't actually get | ||
| 10 | * the spinlock for the common decrement/increment operations, but they | ||
| 11 | * still have to check that the operation is done semantically as if | ||
| 12 | * the spinlock had been taken (using a cmpxchg operation that covers | ||
| 13 | * both the lock and the count word, or using memory transactions, for | ||
| 14 | * example). | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <linux/spinlock.h> | ||
| 18 | |||
| 19 | struct lockref { | ||
| 20 | spinlock_t lock; | ||
| 21 | unsigned int count; | ||
| 22 | }; | ||
| 23 | |||
| 24 | /** | ||
| 25 | * lockref_get - Increments reference count unconditionally | ||
| 26 | * @lockcnt: pointer to lockref structure | ||
| 27 | * | ||
| 28 | * This operation is only valid if you already hold a reference | ||
| 29 | * to the object, so you know the count cannot be zero. | ||
| 30 | */ | ||
| 31 | static inline void lockref_get(struct lockref *lockref) | ||
| 32 | { | ||
| 33 | spin_lock(&lockref->lock); | ||
| 34 | lockref->count++; | ||
| 35 | spin_unlock(&lockref->lock); | ||
| 36 | } | ||
| 37 | |||
| 38 | /** | ||
| 39 | * lockref_get_not_zero - Increments count unless the count is 0 | ||
| 40 | * @lockcnt: pointer to lockref structure | ||
| 41 | * Return: 1 if count updated successfully or 0 if count is 0 | ||
| 42 | */ | ||
| 43 | static inline int lockref_get_not_zero(struct lockref *lockref) | ||
| 44 | { | ||
| 45 | int retval = 0; | ||
| 46 | |||
| 47 | spin_lock(&lockref->lock); | ||
| 48 | if (lockref->count) { | ||
| 49 | lockref->count++; | ||
| 50 | retval = 1; | ||
| 51 | } | ||
| 52 | spin_unlock(&lockref->lock); | ||
| 53 | return retval; | ||
| 54 | } | ||
| 55 | |||
| 56 | /** | ||
| 57 | * lockref_put_or_lock - decrements count unless count <= 1 before decrement | ||
| 58 | * @lockcnt: pointer to lockref structure | ||
| 59 | * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken | ||
| 60 | */ | ||
| 61 | static inline int lockref_put_or_lock(struct lockref *lockref) | ||
| 62 | { | ||
| 63 | spin_lock(&lockref->lock); | ||
| 64 | if (lockref->count <= 1) | ||
| 65 | return 0; | ||
| 66 | lockref->count--; | ||
| 67 | spin_unlock(&lockref->lock); | ||
| 68 | return 1; | ||
| 69 | } | ||
| 70 | |||
| 71 | #endif /* __LINUX_LOCKREF_H */ | ||
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h index 10e5947491c7..b4ec59d159ac 100644 --- a/include/linux/nsproxy.h +++ b/include/linux/nsproxy.h | |||
| @@ -14,6 +14,10 @@ struct fs_struct; | |||
| 14 | * A structure to contain pointers to all per-process | 14 | * A structure to contain pointers to all per-process |
| 15 | * namespaces - fs (mount), uts, network, sysvipc, etc. | 15 | * namespaces - fs (mount), uts, network, sysvipc, etc. |
| 16 | * | 16 | * |
| 17 | * The pid namespace is an exception -- it's accessed using | ||
| 18 | * task_active_pid_ns. The pid namespace here is the | ||
| 19 | * namespace that children will use. | ||
| 20 | * | ||
| 17 | * 'count' is the number of tasks holding a reference. | 21 | * 'count' is the number of tasks holding a reference. |
| 18 | * The count for each namespace, then, will be the number | 22 | * The count for each namespace, then, will be the number |
| 19 | * of nsproxies pointing to it, not the number of tasks. | 23 | * of nsproxies pointing to it, not the number of tasks. |
| @@ -27,7 +31,7 @@ struct nsproxy { | |||
| 27 | struct uts_namespace *uts_ns; | 31 | struct uts_namespace *uts_ns; |
| 28 | struct ipc_namespace *ipc_ns; | 32 | struct ipc_namespace *ipc_ns; |
| 29 | struct mnt_namespace *mnt_ns; | 33 | struct mnt_namespace *mnt_ns; |
| 30 | struct pid_namespace *pid_ns; | 34 | struct pid_namespace *pid_ns_for_children; |
| 31 | struct net *net_ns; | 35 | struct net *net_ns; |
| 32 | }; | 36 | }; |
| 33 | extern struct nsproxy init_nsproxy; | 37 | extern struct nsproxy init_nsproxy; |
diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 580a5320cc96..6d91fcb4c5cb 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/list.h> | 16 | #include <linux/list.h> |
| 17 | #include <linux/rbtree.h> | 17 | #include <linux/rbtree.h> |
| 18 | #include <linux/err.h> | 18 | #include <linux/err.h> |
| 19 | #include <linux/bug.h> | ||
| 19 | 20 | ||
| 20 | struct module; | 21 | struct module; |
| 21 | struct device; | 22 | struct device; |
diff --git a/include/linux/wait.h b/include/linux/wait.h index f487a4750b7f..a67fc1635592 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
| @@ -811,6 +811,63 @@ do { \ | |||
| 811 | __ret; \ | 811 | __ret; \ |
| 812 | }) | 812 | }) |
| 813 | 813 | ||
| 814 | #define __wait_event_interruptible_lock_irq_timeout(wq, condition, \ | ||
| 815 | lock, ret) \ | ||
| 816 | do { \ | ||
| 817 | DEFINE_WAIT(__wait); \ | ||
| 818 | \ | ||
| 819 | for (;;) { \ | ||
| 820 | prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ | ||
| 821 | if (condition) \ | ||
| 822 | break; \ | ||
| 823 | if (signal_pending(current)) { \ | ||
| 824 | ret = -ERESTARTSYS; \ | ||
| 825 | break; \ | ||
| 826 | } \ | ||
| 827 | spin_unlock_irq(&lock); \ | ||
| 828 | ret = schedule_timeout(ret); \ | ||
| 829 | spin_lock_irq(&lock); \ | ||
| 830 | if (!ret) \ | ||
| 831 | break; \ | ||
| 832 | } \ | ||
| 833 | finish_wait(&wq, &__wait); \ | ||
| 834 | } while (0) | ||
| 835 | |||
| 836 | /** | ||
| 837 | * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses. | ||
| 838 | * The condition is checked under the lock. This is expected | ||
| 839 | * to be called with the lock taken. | ||
| 840 | * @wq: the waitqueue to wait on | ||
| 841 | * @condition: a C expression for the event to wait for | ||
| 842 | * @lock: a locked spinlock_t, which will be released before schedule() | ||
| 843 | * and reacquired afterwards. | ||
| 844 | * @timeout: timeout, in jiffies | ||
| 845 | * | ||
| 846 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the | ||
| 847 | * @condition evaluates to true or signal is received. The @condition is | ||
| 848 | * checked each time the waitqueue @wq is woken up. | ||
| 849 | * | ||
| 850 | * wake_up() has to be called after changing any variable that could | ||
| 851 | * change the result of the wait condition. | ||
| 852 | * | ||
| 853 | * This is supposed to be called while holding the lock. The lock is | ||
| 854 | * dropped before going to sleep and is reacquired afterwards. | ||
| 855 | * | ||
| 856 | * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it | ||
| 857 | * was interrupted by a signal, and the remaining jiffies otherwise | ||
| 858 | * if the condition evaluated to true before the timeout elapsed. | ||
| 859 | */ | ||
| 860 | #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \ | ||
| 861 | timeout) \ | ||
| 862 | ({ \ | ||
| 863 | int __ret = timeout; \ | ||
| 864 | \ | ||
| 865 | if (!(condition)) \ | ||
| 866 | __wait_event_interruptible_lock_irq_timeout( \ | ||
| 867 | wq, condition, lock, __ret); \ | ||
| 868 | __ret; \ | ||
| 869 | }) | ||
| 870 | |||
| 814 | 871 | ||
| 815 | /* | 872 | /* |
| 816 | * These are the old interfaces to sleep waiting for an event. | 873 | * These are the old interfaces to sleep waiting for an event. |
