diff options
-rw-r--r-- | Documentation/connector/connector.txt | 44 | ||||
-rw-r--r-- | arch/cris/arch-v32/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/sh/kernel/smp.c | 3 | ||||
-rw-r--r-- | include/linux/cpumask.h | 12 | ||||
-rw-r--r-- | net/bridge/netfilter/ebtables.c | 27 | ||||
-rw-r--r-- | net/ipv4/netfilter/arp_tables.c | 14 | ||||
-rw-r--r-- | net/ipv4/netfilter/ip_tables.c | 17 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 12 | ||||
-rw-r--r-- | net/ipv6/netfilter/ip6_tables.c | 16 | ||||
-rw-r--r-- | net/sched/Kconfig | 4 |
10 files changed, 119 insertions, 32 deletions
diff --git a/Documentation/connector/connector.txt b/Documentation/connector/connector.txt index 54a0a14bfb..57a314b14c 100644 --- a/Documentation/connector/connector.txt +++ b/Documentation/connector/connector.txt | |||
@@ -131,3 +131,47 @@ Netlink itself is not reliable protocol, that means that messages can | |||
131 | be lost due to memory pressure or process' receiving queue overflowed, | 131 | be lost due to memory pressure or process' receiving queue overflowed, |
132 | so caller is warned must be prepared. That is why struct cn_msg [main | 132 | so caller is warned must be prepared. That is why struct cn_msg [main |
133 | connector's message header] contains u32 seq and u32 ack fields. | 133 | connector's message header] contains u32 seq and u32 ack fields. |
134 | |||
135 | /*****************************************/ | ||
136 | Userspace usage. | ||
137 | /*****************************************/ | ||
138 | 2.6.14 has a new netlink socket implementation, which by default does not | ||
139 | allow to send data to netlink groups other than 1. | ||
140 | So, if to use netlink socket (for example using connector) | ||
141 | with different group number userspace application must subscribe to | ||
142 | that group. It can be achieved by following pseudocode: | ||
143 | |||
144 | s = socket(PF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR); | ||
145 | |||
146 | l_local.nl_family = AF_NETLINK; | ||
147 | l_local.nl_groups = 12345; | ||
148 | l_local.nl_pid = 0; | ||
149 | |||
150 | if (bind(s, (struct sockaddr *)&l_local, sizeof(struct sockaddr_nl)) == -1) { | ||
151 | perror("bind"); | ||
152 | close(s); | ||
153 | return -1; | ||
154 | } | ||
155 | |||
156 | { | ||
157 | int on = l_local.nl_groups; | ||
158 | setsockopt(s, 270, 1, &on, sizeof(on)); | ||
159 | } | ||
160 | |||
161 | Where 270 above is SOL_NETLINK, and 1 is a NETLINK_ADD_MEMBERSHIP socket | ||
162 | option. To drop multicast subscription one should call above socket option | ||
163 | with NETLINK_DROP_MEMBERSHIP parameter which is defined as 0. | ||
164 | |||
165 | 2.6.14 netlink code only allows to select a group which is less or equal to | ||
166 | the maximum group number, which is used at netlink_kernel_create() time. | ||
167 | In case of connector it is CN_NETLINK_USERS + 0xf, so if you want to use | ||
168 | group number 12345, you must increment CN_NETLINK_USERS to that number. | ||
169 | Additional 0xf numbers are allocated to be used by non-in-kernel users. | ||
170 | |||
171 | Due to this limitation, group 0xffffffff does not work now, so one can | ||
172 | not use add/remove connector's group notifications, but as far as I know, | ||
173 | only cn_test.c test module used it. | ||
174 | |||
175 | Some work in netlink area is still being done, so things can be changed in | ||
176 | 2.6.15 timeframe, if it will happen, documentation will be updated for that | ||
177 | kernel. | ||
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c index 2c5cae04a9..957f551ba5 100644 --- a/arch/cris/arch-v32/kernel/smp.c +++ b/arch/cris/arch-v32/kernel/smp.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/cpumask.h> | 16 | #include <linux/cpumask.h> |
17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/module.h> | ||
18 | 19 | ||
19 | #define IPI_SCHEDULE 1 | 20 | #define IPI_SCHEDULE 1 |
20 | #define IPI_CALL 2 | 21 | #define IPI_CALL 2 |
@@ -28,6 +29,7 @@ spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED}; | |||
28 | /* CPU masks */ | 29 | /* CPU masks */ |
29 | cpumask_t cpu_online_map = CPU_MASK_NONE; | 30 | cpumask_t cpu_online_map = CPU_MASK_NONE; |
30 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; | 31 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; |
32 | EXPORT_SYMBOL(phys_cpu_present_map); | ||
31 | 33 | ||
32 | /* Variables used during SMP boot */ | 34 | /* Variables used during SMP boot */ |
33 | volatile int cpu_now_booting = 0; | 35 | volatile int cpu_now_booting = 0; |
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 56a39d69e0..5ecefc0289 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/time.h> | 22 | #include <linux/time.h> |
23 | #include <linux/timex.h> | 23 | #include <linux/timex.h> |
24 | #include <linux/sched.h> | 24 | #include <linux/sched.h> |
25 | #include <linux/module.h> | ||
25 | 26 | ||
26 | #include <asm/atomic.h> | 27 | #include <asm/atomic.h> |
27 | #include <asm/processor.h> | 28 | #include <asm/processor.h> |
@@ -39,6 +40,8 @@ struct sh_cpuinfo cpu_data[NR_CPUS]; | |||
39 | extern void per_cpu_trap_init(void); | 40 | extern void per_cpu_trap_init(void); |
40 | 41 | ||
41 | cpumask_t cpu_possible_map; | 42 | cpumask_t cpu_possible_map; |
43 | EXPORT_SYMBOL(cpu_possible_map); | ||
44 | |||
42 | cpumask_t cpu_online_map; | 45 | cpumask_t cpu_online_map; |
43 | static atomic_t cpus_booted = ATOMIC_INIT(0); | 46 | static atomic_t cpus_booted = ATOMIC_INIT(0); |
44 | 47 | ||
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index b15826f6e3..fe9778301d 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -392,4 +392,16 @@ extern cpumask_t cpu_present_map; | |||
392 | #define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map) | 392 | #define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map) |
393 | #define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map) | 393 | #define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map) |
394 | 394 | ||
395 | /* Find the highest possible smp_processor_id() */ | ||
396 | static inline unsigned int highest_possible_processor_id(void) | ||
397 | { | ||
398 | unsigned int cpu, highest = 0; | ||
399 | |||
400 | for_each_cpu_mask(cpu, cpu_possible_map) | ||
401 | highest = cpu; | ||
402 | |||
403 | return highest; | ||
404 | } | ||
405 | |||
406 | |||
395 | #endif /* __LINUX_CPUMASK_H */ | 407 | #endif /* __LINUX_CPUMASK_H */ |
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index c4540144f0..f8ffbf6e23 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/spinlock.h> | 26 | #include <linux/spinlock.h> |
27 | #include <asm/uaccess.h> | 27 | #include <asm/uaccess.h> |
28 | #include <linux/smp.h> | 28 | #include <linux/smp.h> |
29 | #include <linux/cpumask.h> | ||
29 | #include <net/sock.h> | 30 | #include <net/sock.h> |
30 | /* needed for logical [in,out]-dev filtering */ | 31 | /* needed for logical [in,out]-dev filtering */ |
31 | #include "../br_private.h" | 32 | #include "../br_private.h" |
@@ -823,10 +824,11 @@ static int translate_table(struct ebt_replace *repl, | |||
823 | /* this will get free'd in do_replace()/ebt_register_table() | 824 | /* this will get free'd in do_replace()/ebt_register_table() |
824 | if an error occurs */ | 825 | if an error occurs */ |
825 | newinfo->chainstack = (struct ebt_chainstack **) | 826 | newinfo->chainstack = (struct ebt_chainstack **) |
826 | vmalloc(num_possible_cpus() * sizeof(struct ebt_chainstack)); | 827 | vmalloc((highest_possible_processor_id()+1) |
828 | * sizeof(struct ebt_chainstack)); | ||
827 | if (!newinfo->chainstack) | 829 | if (!newinfo->chainstack) |
828 | return -ENOMEM; | 830 | return -ENOMEM; |
829 | for (i = 0; i < num_possible_cpus(); i++) { | 831 | for_each_cpu(i) { |
830 | newinfo->chainstack[i] = | 832 | newinfo->chainstack[i] = |
831 | vmalloc(udc_cnt * sizeof(struct ebt_chainstack)); | 833 | vmalloc(udc_cnt * sizeof(struct ebt_chainstack)); |
832 | if (!newinfo->chainstack[i]) { | 834 | if (!newinfo->chainstack[i]) { |
@@ -895,9 +897,12 @@ static void get_counters(struct ebt_counter *oldcounters, | |||
895 | 897 | ||
896 | /* counters of cpu 0 */ | 898 | /* counters of cpu 0 */ |
897 | memcpy(counters, oldcounters, | 899 | memcpy(counters, oldcounters, |
898 | sizeof(struct ebt_counter) * nentries); | 900 | sizeof(struct ebt_counter) * nentries); |
901 | |||
899 | /* add other counters to those of cpu 0 */ | 902 | /* add other counters to those of cpu 0 */ |
900 | for (cpu = 1; cpu < num_possible_cpus(); cpu++) { | 903 | for_each_cpu(cpu) { |
904 | if (cpu == 0) | ||
905 | continue; | ||
901 | counter_base = COUNTER_BASE(oldcounters, nentries, cpu); | 906 | counter_base = COUNTER_BASE(oldcounters, nentries, cpu); |
902 | for (i = 0; i < nentries; i++) { | 907 | for (i = 0; i < nentries; i++) { |
903 | counters[i].pcnt += counter_base[i].pcnt; | 908 | counters[i].pcnt += counter_base[i].pcnt; |
@@ -929,7 +934,8 @@ static int do_replace(void __user *user, unsigned int len) | |||
929 | BUGPRINT("Entries_size never zero\n"); | 934 | BUGPRINT("Entries_size never zero\n"); |
930 | return -EINVAL; | 935 | return -EINVAL; |
931 | } | 936 | } |
932 | countersize = COUNTER_OFFSET(tmp.nentries) * num_possible_cpus(); | 937 | countersize = COUNTER_OFFSET(tmp.nentries) * |
938 | (highest_possible_processor_id()+1); | ||
933 | newinfo = (struct ebt_table_info *) | 939 | newinfo = (struct ebt_table_info *) |
934 | vmalloc(sizeof(struct ebt_table_info) + countersize); | 940 | vmalloc(sizeof(struct ebt_table_info) + countersize); |
935 | if (!newinfo) | 941 | if (!newinfo) |
@@ -1022,7 +1028,7 @@ static int do_replace(void __user *user, unsigned int len) | |||
1022 | 1028 | ||
1023 | vfree(table->entries); | 1029 | vfree(table->entries); |
1024 | if (table->chainstack) { | 1030 | if (table->chainstack) { |
1025 | for (i = 0; i < num_possible_cpus(); i++) | 1031 | for_each_cpu(i) |
1026 | vfree(table->chainstack[i]); | 1032 | vfree(table->chainstack[i]); |
1027 | vfree(table->chainstack); | 1033 | vfree(table->chainstack); |
1028 | } | 1034 | } |
@@ -1040,7 +1046,7 @@ free_counterstmp: | |||
1040 | vfree(counterstmp); | 1046 | vfree(counterstmp); |
1041 | /* can be initialized in translate_table() */ | 1047 | /* can be initialized in translate_table() */ |
1042 | if (newinfo->chainstack) { | 1048 | if (newinfo->chainstack) { |
1043 | for (i = 0; i < num_possible_cpus(); i++) | 1049 | for_each_cpu(i) |
1044 | vfree(newinfo->chainstack[i]); | 1050 | vfree(newinfo->chainstack[i]); |
1045 | vfree(newinfo->chainstack); | 1051 | vfree(newinfo->chainstack); |
1046 | } | 1052 | } |
@@ -1132,7 +1138,8 @@ int ebt_register_table(struct ebt_table *table) | |||
1132 | return -EINVAL; | 1138 | return -EINVAL; |
1133 | } | 1139 | } |
1134 | 1140 | ||
1135 | countersize = COUNTER_OFFSET(table->table->nentries) * num_possible_cpus(); | 1141 | countersize = COUNTER_OFFSET(table->table->nentries) * |
1142 | (highest_possible_processor_id()+1); | ||
1136 | newinfo = (struct ebt_table_info *) | 1143 | newinfo = (struct ebt_table_info *) |
1137 | vmalloc(sizeof(struct ebt_table_info) + countersize); | 1144 | vmalloc(sizeof(struct ebt_table_info) + countersize); |
1138 | ret = -ENOMEM; | 1145 | ret = -ENOMEM; |
@@ -1186,7 +1193,7 @@ free_unlock: | |||
1186 | up(&ebt_mutex); | 1193 | up(&ebt_mutex); |
1187 | free_chainstack: | 1194 | free_chainstack: |
1188 | if (newinfo->chainstack) { | 1195 | if (newinfo->chainstack) { |
1189 | for (i = 0; i < num_possible_cpus(); i++) | 1196 | for_each_cpu(i) |
1190 | vfree(newinfo->chainstack[i]); | 1197 | vfree(newinfo->chainstack[i]); |
1191 | vfree(newinfo->chainstack); | 1198 | vfree(newinfo->chainstack); |
1192 | } | 1199 | } |
@@ -1209,7 +1216,7 @@ void ebt_unregister_table(struct ebt_table *table) | |||
1209 | up(&ebt_mutex); | 1216 | up(&ebt_mutex); |
1210 | vfree(table->private->entries); | 1217 | vfree(table->private->entries); |
1211 | if (table->private->chainstack) { | 1218 | if (table->private->chainstack) { |
1212 | for (i = 0; i < num_possible_cpus(); i++) | 1219 | for_each_cpu(i) |
1213 | vfree(table->private->chainstack[i]); | 1220 | vfree(table->private->chainstack[i]); |
1214 | vfree(table->private->chainstack); | 1221 | vfree(table->private->chainstack); |
1215 | } | 1222 | } |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index fa16342566..a7969286e6 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -716,8 +716,10 @@ static int translate_table(const char *name, | |||
716 | } | 716 | } |
717 | 717 | ||
718 | /* And one copy for every other CPU */ | 718 | /* And one copy for every other CPU */ |
719 | for (i = 1; i < num_possible_cpus(); i++) { | 719 | for_each_cpu(i) { |
720 | memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i, | 720 | if (i == 0) |
721 | continue; | ||
722 | memcpy(newinfo->entries + SMP_ALIGN(newinfo->size) * i, | ||
721 | newinfo->entries, | 723 | newinfo->entries, |
722 | SMP_ALIGN(newinfo->size)); | 724 | SMP_ALIGN(newinfo->size)); |
723 | } | 725 | } |
@@ -767,7 +769,7 @@ static void get_counters(const struct arpt_table_info *t, | |||
767 | unsigned int cpu; | 769 | unsigned int cpu; |
768 | unsigned int i; | 770 | unsigned int i; |
769 | 771 | ||
770 | for (cpu = 0; cpu < num_possible_cpus(); cpu++) { | 772 | for_each_cpu(cpu) { |
771 | i = 0; | 773 | i = 0; |
772 | ARPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu), | 774 | ARPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu), |
773 | t->size, | 775 | t->size, |
@@ -885,7 +887,8 @@ static int do_replace(void __user *user, unsigned int len) | |||
885 | return -ENOMEM; | 887 | return -ENOMEM; |
886 | 888 | ||
887 | newinfo = vmalloc(sizeof(struct arpt_table_info) | 889 | newinfo = vmalloc(sizeof(struct arpt_table_info) |
888 | + SMP_ALIGN(tmp.size) * num_possible_cpus()); | 890 | + SMP_ALIGN(tmp.size) * |
891 | (highest_possible_processor_id()+1)); | ||
889 | if (!newinfo) | 892 | if (!newinfo) |
890 | return -ENOMEM; | 893 | return -ENOMEM; |
891 | 894 | ||
@@ -1158,7 +1161,8 @@ int arpt_register_table(struct arpt_table *table, | |||
1158 | = { 0, 0, 0, { 0 }, { 0 }, { } }; | 1161 | = { 0, 0, 0, { 0 }, { 0 }, { } }; |
1159 | 1162 | ||
1160 | newinfo = vmalloc(sizeof(struct arpt_table_info) | 1163 | newinfo = vmalloc(sizeof(struct arpt_table_info) |
1161 | + SMP_ALIGN(repl->size) * num_possible_cpus()); | 1164 | + SMP_ALIGN(repl->size) * |
1165 | (highest_possible_processor_id()+1)); | ||
1162 | if (!newinfo) { | 1166 | if (!newinfo) { |
1163 | ret = -ENOMEM; | 1167 | ret = -ENOMEM; |
1164 | return ret; | 1168 | return ret; |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index eef99a1b5d..75c27e92f6 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <asm/semaphore.h> | 27 | #include <asm/semaphore.h> |
28 | #include <linux/proc_fs.h> | 28 | #include <linux/proc_fs.h> |
29 | #include <linux/err.h> | 29 | #include <linux/err.h> |
30 | #include <linux/cpumask.h> | ||
30 | 31 | ||
31 | #include <linux/netfilter_ipv4/ip_tables.h> | 32 | #include <linux/netfilter_ipv4/ip_tables.h> |
32 | 33 | ||
@@ -921,8 +922,10 @@ translate_table(const char *name, | |||
921 | } | 922 | } |
922 | 923 | ||
923 | /* And one copy for every other CPU */ | 924 | /* And one copy for every other CPU */ |
924 | for (i = 1; i < num_possible_cpus(); i++) { | 925 | for_each_cpu(i) { |
925 | memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i, | 926 | if (i == 0) |
927 | continue; | ||
928 | memcpy(newinfo->entries + SMP_ALIGN(newinfo->size) * i, | ||
926 | newinfo->entries, | 929 | newinfo->entries, |
927 | SMP_ALIGN(newinfo->size)); | 930 | SMP_ALIGN(newinfo->size)); |
928 | } | 931 | } |
@@ -943,7 +946,7 @@ replace_table(struct ipt_table *table, | |||
943 | struct ipt_entry *table_base; | 946 | struct ipt_entry *table_base; |
944 | unsigned int i; | 947 | unsigned int i; |
945 | 948 | ||
946 | for (i = 0; i < num_possible_cpus(); i++) { | 949 | for_each_cpu(i) { |
947 | table_base = | 950 | table_base = |
948 | (void *)newinfo->entries | 951 | (void *)newinfo->entries |
949 | + TABLE_OFFSET(newinfo, i); | 952 | + TABLE_OFFSET(newinfo, i); |
@@ -990,7 +993,7 @@ get_counters(const struct ipt_table_info *t, | |||
990 | unsigned int cpu; | 993 | unsigned int cpu; |
991 | unsigned int i; | 994 | unsigned int i; |
992 | 995 | ||
993 | for (cpu = 0; cpu < num_possible_cpus(); cpu++) { | 996 | for_each_cpu(cpu) { |
994 | i = 0; | 997 | i = 0; |
995 | IPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu), | 998 | IPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu), |
996 | t->size, | 999 | t->size, |
@@ -1128,7 +1131,8 @@ do_replace(void __user *user, unsigned int len) | |||
1128 | return -ENOMEM; | 1131 | return -ENOMEM; |
1129 | 1132 | ||
1130 | newinfo = vmalloc(sizeof(struct ipt_table_info) | 1133 | newinfo = vmalloc(sizeof(struct ipt_table_info) |
1131 | + SMP_ALIGN(tmp.size) * num_possible_cpus()); | 1134 | + SMP_ALIGN(tmp.size) * |
1135 | (highest_possible_processor_id()+1)); | ||
1132 | if (!newinfo) | 1136 | if (!newinfo) |
1133 | return -ENOMEM; | 1137 | return -ENOMEM; |
1134 | 1138 | ||
@@ -1458,7 +1462,8 @@ int ipt_register_table(struct ipt_table *table, const struct ipt_replace *repl) | |||
1458 | = { 0, 0, 0, { 0 }, { 0 }, { } }; | 1462 | = { 0, 0, 0, { 0 }, { 0 }, { } }; |
1459 | 1463 | ||
1460 | newinfo = vmalloc(sizeof(struct ipt_table_info) | 1464 | newinfo = vmalloc(sizeof(struct ipt_table_info) |
1461 | + SMP_ALIGN(repl->size) * num_possible_cpus()); | 1465 | + SMP_ALIGN(repl->size) * |
1466 | (highest_possible_processor_id()+1)); | ||
1462 | if (!newinfo) | 1467 | if (!newinfo) |
1463 | return -ENOMEM; | 1468 | return -ENOMEM; |
1464 | 1469 | ||
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index f37a50e55b..7114031fdc 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -436,11 +436,13 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss | |||
436 | u16 flags; | 436 | u16 flags; |
437 | 437 | ||
438 | if (unlikely(len >= skb->len)) { | 438 | if (unlikely(len >= skb->len)) { |
439 | printk(KERN_DEBUG "TCP: seg_size=%u, mss=%u, seq=%u, " | 439 | if (net_ratelimit()) { |
440 | "end_seq=%u, skb->len=%u.\n", len, mss_now, | 440 | printk(KERN_DEBUG "TCP: seg_size=%u, mss=%u, seq=%u, " |
441 | TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, | 441 | "end_seq=%u, skb->len=%u.\n", len, mss_now, |
442 | skb->len); | 442 | TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, |
443 | WARN_ON(1); | 443 | skb->len); |
444 | WARN_ON(1); | ||
445 | } | ||
444 | return 0; | 446 | return 0; |
445 | } | 447 | } |
446 | 448 | ||
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 2da514b16d..b03e90649e 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
29 | #include <asm/semaphore.h> | 29 | #include <asm/semaphore.h> |
30 | #include <linux/proc_fs.h> | 30 | #include <linux/proc_fs.h> |
31 | #include <linux/cpumask.h> | ||
31 | 32 | ||
32 | #include <linux/netfilter_ipv6/ip6_tables.h> | 33 | #include <linux/netfilter_ipv6/ip6_tables.h> |
33 | 34 | ||
@@ -950,8 +951,10 @@ translate_table(const char *name, | |||
950 | } | 951 | } |
951 | 952 | ||
952 | /* And one copy for every other CPU */ | 953 | /* And one copy for every other CPU */ |
953 | for (i = 1; i < num_possible_cpus(); i++) { | 954 | for_each_cpu(i) { |
954 | memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i, | 955 | if (i == 0) |
956 | continue; | ||
957 | memcpy(newinfo->entries + SMP_ALIGN(newinfo->size) * i, | ||
955 | newinfo->entries, | 958 | newinfo->entries, |
956 | SMP_ALIGN(newinfo->size)); | 959 | SMP_ALIGN(newinfo->size)); |
957 | } | 960 | } |
@@ -973,6 +976,7 @@ replace_table(struct ip6t_table *table, | |||
973 | unsigned int i; | 976 | unsigned int i; |
974 | 977 | ||
975 | for (i = 0; i < num_possible_cpus(); i++) { | 978 | for (i = 0; i < num_possible_cpus(); i++) { |
979 | for_each_cpu(i) { | ||
976 | table_base = | 980 | table_base = |
977 | (void *)newinfo->entries | 981 | (void *)newinfo->entries |
978 | + TABLE_OFFSET(newinfo, i); | 982 | + TABLE_OFFSET(newinfo, i); |
@@ -1019,7 +1023,7 @@ get_counters(const struct ip6t_table_info *t, | |||
1019 | unsigned int cpu; | 1023 | unsigned int cpu; |
1020 | unsigned int i; | 1024 | unsigned int i; |
1021 | 1025 | ||
1022 | for (cpu = 0; cpu < num_possible_cpus(); cpu++) { | 1026 | for_each_cpu(cpu) { |
1023 | i = 0; | 1027 | i = 0; |
1024 | IP6T_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu), | 1028 | IP6T_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu), |
1025 | t->size, | 1029 | t->size, |
@@ -1153,7 +1157,8 @@ do_replace(void __user *user, unsigned int len) | |||
1153 | return -ENOMEM; | 1157 | return -ENOMEM; |
1154 | 1158 | ||
1155 | newinfo = vmalloc(sizeof(struct ip6t_table_info) | 1159 | newinfo = vmalloc(sizeof(struct ip6t_table_info) |
1156 | + SMP_ALIGN(tmp.size) * num_possible_cpus()); | 1160 | + SMP_ALIGN(tmp.size) * |
1161 | (highest_possible_processor_id()+1)); | ||
1157 | if (!newinfo) | 1162 | if (!newinfo) |
1158 | return -ENOMEM; | 1163 | return -ENOMEM; |
1159 | 1164 | ||
@@ -1467,7 +1472,8 @@ int ip6t_register_table(struct ip6t_table *table, | |||
1467 | = { 0, 0, 0, { 0 }, { 0 }, { } }; | 1472 | = { 0, 0, 0, { 0 }, { 0 }, { } }; |
1468 | 1473 | ||
1469 | newinfo = vmalloc(sizeof(struct ip6t_table_info) | 1474 | newinfo = vmalloc(sizeof(struct ip6t_table_info) |
1470 | + SMP_ALIGN(repl->size) * num_possible_cpus()); | 1475 | + SMP_ALIGN(repl->size) * |
1476 | (highest_possible_processor_id()+1)); | ||
1471 | if (!newinfo) | 1477 | if (!newinfo) |
1472 | return -ENOMEM; | 1478 | return -ENOMEM; |
1473 | 1479 | ||
diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 45d3bc0812..81510da317 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig | |||
@@ -72,9 +72,11 @@ config NET_SCH_CLK_GETTIMEOFDAY | |||
72 | Choose this if you need a high resolution clock source but can't use | 72 | Choose this if you need a high resolution clock source but can't use |
73 | the CPU's cycle counter. | 73 | the CPU's cycle counter. |
74 | 74 | ||
75 | # don't allow on SMP x86 because they can have unsynchronized TSCs. | ||
76 | # gettimeofday is a good alternative | ||
75 | config NET_SCH_CLK_CPU | 77 | config NET_SCH_CLK_CPU |
76 | bool "CPU cycle counter" | 78 | bool "CPU cycle counter" |
77 | depends on X86_TSC || X86_64 || ALPHA || SPARC64 || PPC64 || IA64 | 79 | depends on ((X86_TSC || X86_64) && !SMP) || ALPHA || SPARC64 || PPC64 || IA64 |
78 | help | 80 | help |
79 | Say Y here if you want to use the CPU's cycle counter as clock source. | 81 | Say Y here if you want to use the CPU's cycle counter as clock source. |
80 | This is a cheap and high resolution clock source, but on some | 82 | This is a cheap and high resolution clock source, but on some |