aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/netdevice.h4
-rw-r--r--include/linux/netfilter/nfnetlink_conntrack.h1
-rw-r--r--include/linux/netfilter/x_tables.h73
-rw-r--r--include/linux/wait.h6
4 files changed, 75 insertions, 9 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 2e7783f4a755..5a96a1a406e9 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -104,7 +104,7 @@ struct wireless_dev;
104# else 104# else
105# define LL_MAX_HEADER 96 105# define LL_MAX_HEADER 96
106# endif 106# endif
107#elif defined(CONFIG_TR) 107#elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
108# define LL_MAX_HEADER 48 108# define LL_MAX_HEADER 48
109#else 109#else
110# define LL_MAX_HEADER 32 110# define LL_MAX_HEADER 32
@@ -500,7 +500,7 @@ struct netdev_queue {
500 * 500 *
501 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); 501 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
502 * This function is called when the Media Access Control address 502 * This function is called when the Media Access Control address
503 * needs to be changed. If not this interface is not defined, the 503 * needs to be changed. If this interface is not defined, the
504 * mac address can not be changed. 504 * mac address can not be changed.
505 * 505 *
506 * int (*ndo_validate_addr)(struct net_device *dev); 506 * int (*ndo_validate_addr)(struct net_device *dev);
diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h
index 29fe9ea1d346..1a865e48b8eb 100644
--- a/include/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/linux/netfilter/nfnetlink_conntrack.h
@@ -100,6 +100,7 @@ enum ctattr_protoinfo_tcp {
100enum ctattr_protoinfo_dccp { 100enum ctattr_protoinfo_dccp {
101 CTA_PROTOINFO_DCCP_UNSPEC, 101 CTA_PROTOINFO_DCCP_UNSPEC,
102 CTA_PROTOINFO_DCCP_STATE, 102 CTA_PROTOINFO_DCCP_STATE,
103 CTA_PROTOINFO_DCCP_ROLE,
103 __CTA_PROTOINFO_DCCP_MAX, 104 __CTA_PROTOINFO_DCCP_MAX,
104}; 105};
105#define CTA_PROTOINFO_DCCP_MAX (__CTA_PROTOINFO_DCCP_MAX - 1) 106#define CTA_PROTOINFO_DCCP_MAX (__CTA_PROTOINFO_DCCP_MAX - 1)
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 7b1a652066c0..1b2e43502ef7 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -354,9 +354,6 @@ struct xt_table
354 /* What hooks you will enter on */ 354 /* What hooks you will enter on */
355 unsigned int valid_hooks; 355 unsigned int valid_hooks;
356 356
357 /* Lock for the curtain */
358 struct mutex lock;
359
360 /* Man behind the curtain... */ 357 /* Man behind the curtain... */
361 struct xt_table_info *private; 358 struct xt_table_info *private;
362 359
@@ -434,8 +431,74 @@ extern void xt_proto_fini(struct net *net, u_int8_t af);
434 431
435extern struct xt_table_info *xt_alloc_table_info(unsigned int size); 432extern struct xt_table_info *xt_alloc_table_info(unsigned int size);
436extern void xt_free_table_info(struct xt_table_info *info); 433extern void xt_free_table_info(struct xt_table_info *info);
437extern void xt_table_entry_swap_rcu(struct xt_table_info *old, 434
438 struct xt_table_info *new); 435/*
436 * Per-CPU spinlock associated with per-cpu table entries, and
437 * with a counter for the "reading" side that allows a recursive
438 * reader to avoid taking the lock and deadlocking.
439 *
440 * "reading" is used by ip/arp/ip6 tables rule processing which runs per-cpu.
441 * It needs to ensure that the rules are not being changed while the packet
442 * is being processed. In some cases, the read lock will be acquired
443 * twice on the same CPU; this is okay because of the count.
444 *
445 * "writing" is used when reading counters.
446 * During replace any readers that are using the old tables have to complete
447 * before freeing the old table. This is handled by the write locking
448 * necessary for reading the counters.
449 */
450struct xt_info_lock {
451 spinlock_t lock;
452 unsigned char readers;
453};
454DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);
455
456/*
457 * Note: we need to ensure that preemption is disabled before acquiring
458 * the per-cpu-variable, so we do it as a two step process rather than
459 * using "spin_lock_bh()".
460 *
461 * We _also_ need to disable bottom half processing before updating our
462 * nesting count, to make sure that the only kind of re-entrancy is this
463 * code being called by itself: since the count+lock is not an atomic
464 * operation, we can allow no races.
465 *
466 * _Only_ that special combination of being per-cpu and never getting
467 * re-entered asynchronously means that the count is safe.
468 */
469static inline void xt_info_rdlock_bh(void)
470{
471 struct xt_info_lock *lock;
472
473 local_bh_disable();
474 lock = &__get_cpu_var(xt_info_locks);
475 if (!lock->readers++)
476 spin_lock(&lock->lock);
477}
478
479static inline void xt_info_rdunlock_bh(void)
480{
481 struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks);
482
483 if (!--lock->readers)
484 spin_unlock(&lock->lock);
485 local_bh_enable();
486}
487
488/*
489 * The "writer" side needs to get exclusive access to the lock,
490 * regardless of readers. This must be called with bottom half
491 * processing (and thus also preemption) disabled.
492 */
493static inline void xt_info_wrlock(unsigned int cpu)
494{
495 spin_lock(&per_cpu(xt_info_locks, cpu).lock);
496}
497
498static inline void xt_info_wrunlock(unsigned int cpu)
499{
500 spin_unlock(&per_cpu(xt_info_locks, cpu).lock);
501}
439 502
440/* 503/*
441 * This helper is performance critical and must be inlined 504 * This helper is performance critical and must be inlined
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 5d631c17eaee..bc024632f365 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -440,13 +440,15 @@ void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
440int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 440int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
441int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 441int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
442 442
443#define DEFINE_WAIT(name) \ 443#define DEFINE_WAIT_FUNC(name, function) \
444 wait_queue_t name = { \ 444 wait_queue_t name = { \
445 .private = current, \ 445 .private = current, \
446 .func = autoremove_wake_function, \ 446 .func = function, \
447 .task_list = LIST_HEAD_INIT((name).task_list), \ 447 .task_list = LIST_HEAD_INIT((name).task_list), \
448 } 448 }
449 449
450#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
451
450#define DEFINE_WAIT_BIT(name, word, bit) \ 452#define DEFINE_WAIT_BIT(name, word, bit) \
451 struct wait_bit_queue name = { \ 453 struct wait_bit_queue name = { \
452 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \ 454 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \