aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/dcache.h20
-rw-r--r--include/linux/lockref.h71
-rw-r--r--include/linux/nsproxy.h6
-rw-r--r--include/linux/regmap.h1
-rw-r--r--include/linux/wait.h57
-rw-r--r--include/net/busy_poll.h1
-rw-r--r--include/net/genetlink.h20
-rw-r--r--include/net/route.h8
-rw-r--r--include/net/xfrm.h6
-rw-r--r--include/uapi/linux/cm4000_cs.h1
-rw-r--r--include/uapi/linux/icmpv6.h2
11 files changed, 180 insertions, 13 deletions
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index b90337c9d468..efdc94434c30 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -9,6 +9,7 @@
9#include <linux/seqlock.h> 9#include <linux/seqlock.h>
10#include <linux/cache.h> 10#include <linux/cache.h>
11#include <linux/rcupdate.h> 11#include <linux/rcupdate.h>
12#include <linux/lockref.h>
12 13
13struct nameidata; 14struct nameidata;
14struct path; 15struct path;
@@ -100,6 +101,8 @@ extern unsigned int full_name_hash(const unsigned char *, unsigned int);
100# endif 101# endif
101#endif 102#endif
102 103
104#define d_lock d_lockref.lock
105
103struct dentry { 106struct dentry {
104 /* RCU lookup touched fields */ 107 /* RCU lookup touched fields */
105 unsigned int d_flags; /* protected by d_lock */ 108 unsigned int d_flags; /* protected by d_lock */
@@ -112,8 +115,7 @@ struct dentry {
112 unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */ 115 unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */
113 116
114 /* Ref lookup also touches following */ 117 /* Ref lookup also touches following */
115 unsigned int d_count; /* protected by d_lock */ 118 struct lockref d_lockref; /* per-dentry lock and refcount */
116 spinlock_t d_lock; /* per dentry lock */
117 const struct dentry_operations *d_op; 119 const struct dentry_operations *d_op;
118 struct super_block *d_sb; /* The root of the dentry tree */ 120 struct super_block *d_sb; /* The root of the dentry tree */
119 unsigned long d_time; /* used by d_revalidate */ 121 unsigned long d_time; /* used by d_revalidate */
@@ -318,7 +320,7 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq)
318 assert_spin_locked(&dentry->d_lock); 320 assert_spin_locked(&dentry->d_lock);
319 if (!read_seqcount_retry(&dentry->d_seq, seq)) { 321 if (!read_seqcount_retry(&dentry->d_seq, seq)) {
320 ret = 1; 322 ret = 1;
321 dentry->d_count++; 323 dentry->d_lockref.count++;
322 } 324 }
323 325
324 return ret; 326 return ret;
@@ -326,7 +328,7 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq)
326 328
327static inline unsigned d_count(const struct dentry *dentry) 329static inline unsigned d_count(const struct dentry *dentry)
328{ 330{
329 return dentry->d_count; 331 return dentry->d_lockref.count;
330} 332}
331 333
332/* validate "insecure" dentry pointer */ 334/* validate "insecure" dentry pointer */
@@ -336,6 +338,7 @@ extern int d_validate(struct dentry *, struct dentry *);
336 * helper function for dentry_operations.d_dname() members 338 * helper function for dentry_operations.d_dname() members
337 */ 339 */
338extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); 340extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
341extern char *simple_dname(struct dentry *, char *, int);
339 342
340extern char *__d_path(const struct path *, const struct path *, char *, int); 343extern char *__d_path(const struct path *, const struct path *, char *, int);
341extern char *d_absolute_path(const struct path *, char *, int); 344extern char *d_absolute_path(const struct path *, char *, int);
@@ -356,17 +359,14 @@ extern char *dentry_path(struct dentry *, char *, int);
356static inline struct dentry *dget_dlock(struct dentry *dentry) 359static inline struct dentry *dget_dlock(struct dentry *dentry)
357{ 360{
358 if (dentry) 361 if (dentry)
359 dentry->d_count++; 362 dentry->d_lockref.count++;
360 return dentry; 363 return dentry;
361} 364}
362 365
363static inline struct dentry *dget(struct dentry *dentry) 366static inline struct dentry *dget(struct dentry *dentry)
364{ 367{
365 if (dentry) { 368 if (dentry)
366 spin_lock(&dentry->d_lock); 369 lockref_get(&dentry->d_lockref);
367 dget_dlock(dentry);
368 spin_unlock(&dentry->d_lock);
369 }
370 return dentry; 370 return dentry;
371} 371}
372 372
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
new file mode 100644
index 000000000000..01233e01627a
--- /dev/null
+++ b/include/linux/lockref.h
@@ -0,0 +1,71 @@
1#ifndef __LINUX_LOCKREF_H
2#define __LINUX_LOCKREF_H
3
4/*
5 * Locked reference counts.
6 *
7 * These are different from just plain atomic refcounts in that they
8 * are atomic with respect to the spinlock that goes with them. In
9 * particular, there can be implementations that don't actually get
10 * the spinlock for the common decrement/increment operations, but they
11 * still have to check that the operation is done semantically as if
12 * the spinlock had been taken (using a cmpxchg operation that covers
13 * both the lock and the count word, or using memory transactions, for
14 * example).
15 */
16
17#include <linux/spinlock.h>
18
19struct lockref {
20 spinlock_t lock;
21 unsigned int count;
22};
23
24/**
25 * lockref_get - Increments reference count unconditionally
26 * @lockcnt: pointer to lockref structure
27 *
28 * This operation is only valid if you already hold a reference
29 * to the object, so you know the count cannot be zero.
30 */
31static inline void lockref_get(struct lockref *lockref)
32{
33 spin_lock(&lockref->lock);
34 lockref->count++;
35 spin_unlock(&lockref->lock);
36}
37
38/**
39 * lockref_get_not_zero - Increments count unless the count is 0
40 * @lockcnt: pointer to lockref structure
41 * Return: 1 if count updated successfully or 0 if count is 0
42 */
43static inline int lockref_get_not_zero(struct lockref *lockref)
44{
45 int retval = 0;
46
47 spin_lock(&lockref->lock);
48 if (lockref->count) {
49 lockref->count++;
50 retval = 1;
51 }
52 spin_unlock(&lockref->lock);
53 return retval;
54}
55
56/**
57 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
58 * @lockcnt: pointer to lockref structure
59 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
60 */
61static inline int lockref_put_or_lock(struct lockref *lockref)
62{
63 spin_lock(&lockref->lock);
64 if (lockref->count <= 1)
65 return 0;
66 lockref->count--;
67 spin_unlock(&lockref->lock);
68 return 1;
69}
70
71#endif /* __LINUX_LOCKREF_H */
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index 10e5947491c7..b4ec59d159ac 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -14,6 +14,10 @@ struct fs_struct;
14 * A structure to contain pointers to all per-process 14 * A structure to contain pointers to all per-process
15 * namespaces - fs (mount), uts, network, sysvipc, etc. 15 * namespaces - fs (mount), uts, network, sysvipc, etc.
16 * 16 *
17 * The pid namespace is an exception -- it's accessed using
18 * task_active_pid_ns. The pid namespace here is the
19 * namespace that children will use.
20 *
17 * 'count' is the number of tasks holding a reference. 21 * 'count' is the number of tasks holding a reference.
18 * The count for each namespace, then, will be the number 22 * The count for each namespace, then, will be the number
19 * of nsproxies pointing to it, not the number of tasks. 23 * of nsproxies pointing to it, not the number of tasks.
@@ -27,7 +31,7 @@ struct nsproxy {
27 struct uts_namespace *uts_ns; 31 struct uts_namespace *uts_ns;
28 struct ipc_namespace *ipc_ns; 32 struct ipc_namespace *ipc_ns;
29 struct mnt_namespace *mnt_ns; 33 struct mnt_namespace *mnt_ns;
30 struct pid_namespace *pid_ns; 34 struct pid_namespace *pid_ns_for_children;
31 struct net *net_ns; 35 struct net *net_ns;
32}; 36};
33extern struct nsproxy init_nsproxy; 37extern struct nsproxy init_nsproxy;
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 580a5320cc96..6d91fcb4c5cb 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -16,6 +16,7 @@
16#include <linux/list.h> 16#include <linux/list.h>
17#include <linux/rbtree.h> 17#include <linux/rbtree.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/bug.h>
19 20
20struct module; 21struct module;
21struct device; 22struct device;
diff --git a/include/linux/wait.h b/include/linux/wait.h
index f487a4750b7f..a67fc1635592 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -811,6 +811,63 @@ do { \
811 __ret; \ 811 __ret; \
812}) 812})
813 813
814#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
815 lock, ret) \
816do { \
817 DEFINE_WAIT(__wait); \
818 \
819 for (;;) { \
820 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
821 if (condition) \
822 break; \
823 if (signal_pending(current)) { \
824 ret = -ERESTARTSYS; \
825 break; \
826 } \
827 spin_unlock_irq(&lock); \
828 ret = schedule_timeout(ret); \
829 spin_lock_irq(&lock); \
830 if (!ret) \
831 break; \
832 } \
833 finish_wait(&wq, &__wait); \
834} while (0)
835
836/**
837 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
838 * The condition is checked under the lock. This is expected
839 * to be called with the lock taken.
840 * @wq: the waitqueue to wait on
841 * @condition: a C expression for the event to wait for
842 * @lock: a locked spinlock_t, which will be released before schedule()
843 * and reacquired afterwards.
844 * @timeout: timeout, in jiffies
845 *
846 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
847 * @condition evaluates to true or signal is received. The @condition is
848 * checked each time the waitqueue @wq is woken up.
849 *
850 * wake_up() has to be called after changing any variable that could
851 * change the result of the wait condition.
852 *
853 * This is supposed to be called while holding the lock. The lock is
854 * dropped before going to sleep and is reacquired afterwards.
855 *
856 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
857 * was interrupted by a signal, and the remaining jiffies otherwise
858 * if the condition evaluated to true before the timeout elapsed.
859 */
860#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
861 timeout) \
862({ \
863 int __ret = timeout; \
864 \
865 if (!(condition)) \
866 __wait_event_interruptible_lock_irq_timeout( \
867 wq, condition, lock, __ret); \
868 __ret; \
869})
870
814 871
815/* 872/*
816 * These are the old interfaces to sleep waiting for an event. 873 * These are the old interfaces to sleep waiting for an event.
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 8a358a2c97e6..829627d7b846 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -123,6 +123,7 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock)
123 /* local bh are disabled so it is ok to use _BH */ 123 /* local bh are disabled so it is ok to use _BH */
124 NET_ADD_STATS_BH(sock_net(sk), 124 NET_ADD_STATS_BH(sock_net(sk),
125 LINUX_MIB_BUSYPOLLRXPACKETS, rc); 125 LINUX_MIB_BUSYPOLLRXPACKETS, rc);
126 cpu_relax();
126 127
127 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && 128 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
128 !need_resched() && !busy_loop_timeout(end_time)); 129 !need_resched() && !busy_loop_timeout(end_time));
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 93024a47e0e2..8e0b6c856a13 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -61,6 +61,7 @@ struct genl_family {
61 struct list_head ops_list; /* private */ 61 struct list_head ops_list; /* private */
62 struct list_head family_list; /* private */ 62 struct list_head family_list; /* private */
63 struct list_head mcast_groups; /* private */ 63 struct list_head mcast_groups; /* private */
64 struct module *module;
64}; 65};
65 66
66/** 67/**
@@ -121,9 +122,24 @@ struct genl_ops {
121 struct list_head ops_list; 122 struct list_head ops_list;
122}; 123};
123 124
124extern int genl_register_family(struct genl_family *family); 125extern int __genl_register_family(struct genl_family *family);
125extern int genl_register_family_with_ops(struct genl_family *family, 126
127static inline int genl_register_family(struct genl_family *family)
128{
129 family->module = THIS_MODULE;
130 return __genl_register_family(family);
131}
132
133extern int __genl_register_family_with_ops(struct genl_family *family,
126 struct genl_ops *ops, size_t n_ops); 134 struct genl_ops *ops, size_t n_ops);
135
136static inline int genl_register_family_with_ops(struct genl_family *family,
137 struct genl_ops *ops, size_t n_ops)
138{
139 family->module = THIS_MODULE;
140 return __genl_register_family_with_ops(family, ops, n_ops);
141}
142
127extern int genl_unregister_family(struct genl_family *family); 143extern int genl_unregister_family(struct genl_family *family);
128extern int genl_register_ops(struct genl_family *, struct genl_ops *ops); 144extern int genl_register_ops(struct genl_family *, struct genl_ops *ops);
129extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops); 145extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops);
diff --git a/include/net/route.h b/include/net/route.h
index 2ea40c1b5e00..afdeeb5bec25 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -317,4 +317,12 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
317 return hoplimit; 317 return hoplimit;
318} 318}
319 319
320static inline int ip_skb_dst_mtu(struct sk_buff *skb)
321{
322 struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
323
324 return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
325 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
326}
327
320#endif /* _ROUTE_H */ 328#endif /* _ROUTE_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 89d3d8ae204e..e253bf0cc7ef 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -341,10 +341,13 @@ struct xfrm_state_afinfo {
341 struct sk_buff *skb); 341 struct sk_buff *skb);
342 int (*transport_finish)(struct sk_buff *skb, 342 int (*transport_finish)(struct sk_buff *skb,
343 int async); 343 int async);
344 void (*local_error)(struct sk_buff *skb, u32 mtu);
344}; 345};
345 346
346extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo); 347extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
347extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo); 348extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
349extern struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
350extern void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
348 351
349extern void xfrm_state_delete_tunnel(struct xfrm_state *x); 352extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
350 353
@@ -1477,6 +1480,7 @@ extern int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
1477extern int xfrm_output_resume(struct sk_buff *skb, int err); 1480extern int xfrm_output_resume(struct sk_buff *skb, int err);
1478extern int xfrm_output(struct sk_buff *skb); 1481extern int xfrm_output(struct sk_buff *skb);
1479extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb); 1482extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1483extern void xfrm_local_error(struct sk_buff *skb, int mtu);
1480extern int xfrm4_extract_header(struct sk_buff *skb); 1484extern int xfrm4_extract_header(struct sk_buff *skb);
1481extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb); 1485extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1482extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, 1486extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
@@ -1497,6 +1501,7 @@ extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short fam
1497extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); 1501extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
1498extern int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler); 1502extern int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler);
1499extern int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler); 1503extern int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler);
1504extern void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
1500extern int xfrm6_extract_header(struct sk_buff *skb); 1505extern int xfrm6_extract_header(struct sk_buff *skb);
1501extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb); 1506extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1502extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi); 1507extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
@@ -1514,6 +1519,7 @@ extern int xfrm6_output(struct sk_buff *skb);
1514extern int xfrm6_output_finish(struct sk_buff *skb); 1519extern int xfrm6_output_finish(struct sk_buff *skb);
1515extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, 1520extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
1516 u8 **prevhdr); 1521 u8 **prevhdr);
1522extern void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
1517 1523
1518#ifdef CONFIG_XFRM 1524#ifdef CONFIG_XFRM
1519extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); 1525extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
diff --git a/include/uapi/linux/cm4000_cs.h b/include/uapi/linux/cm4000_cs.h
index bc51f77db918..1217f751a1bc 100644
--- a/include/uapi/linux/cm4000_cs.h
+++ b/include/uapi/linux/cm4000_cs.h
@@ -2,6 +2,7 @@
2#define _UAPI_CM4000_H_ 2#define _UAPI_CM4000_H_
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/ioctl.h>
5 6
6#define MAX_ATR 33 7#define MAX_ATR 33
7 8
diff --git a/include/uapi/linux/icmpv6.h b/include/uapi/linux/icmpv6.h
index e0133c73c304..590beda78ea0 100644
--- a/include/uapi/linux/icmpv6.h
+++ b/include/uapi/linux/icmpv6.h
@@ -115,6 +115,8 @@ struct icmp6hdr {
115#define ICMPV6_NOT_NEIGHBOUR 2 115#define ICMPV6_NOT_NEIGHBOUR 2
116#define ICMPV6_ADDR_UNREACH 3 116#define ICMPV6_ADDR_UNREACH 3
117#define ICMPV6_PORT_UNREACH 4 117#define ICMPV6_PORT_UNREACH 4
118#define ICMPV6_POLICY_FAIL 5
119#define ICMPV6_REJECT_ROUTE 6
118 120
119/* 121/*
120 * Codes for Time Exceeded 122 * Codes for Time Exceeded