aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS2
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/i386/Kconfig1
-rw-r--r--arch/i386/kernel/cpu/amd.c8
-rw-r--r--arch/i386/kernel/cpu/centaur.c8
-rw-r--r--arch/i386/kernel/cpu/common.c11
-rw-r--r--arch/i386/kernel/cpu/cyrix.c18
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c1
-rw-r--r--arch/i386/kernel/cpu/nexgen.c8
-rw-r--r--arch/i386/kernel/cpu/rise.c8
-rw-r--r--arch/i386/kernel/cpu/transmeta.c10
-rw-r--r--arch/i386/kernel/cpu/umc.c8
-rw-r--r--arch/i386/kernel/nmi.c2
-rw-r--r--arch/i386/kernel/process.c6
-rw-r--r--arch/i386/kernel/traps.c9
-rw-r--r--arch/x86_64/kernel/smpboot.c3
-rw-r--r--block/elevator.c3
-rw-r--r--block/ll_rw_blk.c40
-rw-r--r--drivers/block/Kconfig8
-rw-r--r--drivers/block/pktcdvd.c58
-rw-r--r--drivers/message/i2o/i2o_scsi.c2
-rw-r--r--drivers/scsi/scsi.c2
-rw-r--r--fs/file.c3
-rw-r--r--fs/fuse/dev.c40
-rw-r--r--fs/jbd/transaction.c10
-rw-r--r--fs/namei.c37
-rw-r--r--include/asm-i386/system.h2
-rw-r--r--include/asm-x86_64/numa.h7
-rw-r--r--include/asm-x86_64/system.h2
-rw-r--r--include/linux/jbd.h4
-rw-r--r--include/linux/netfilter_ipv4/ipt_connbytes.h4
-rw-r--r--include/linux/netfilter_ipv4/ipt_policy.h22
-rw-r--r--include/linux/netfilter_ipv6/ip6t_policy.h22
-rw-r--r--include/linux/pktcdvd.h8
-rw-r--r--include/linux/reiserfs_acl.h6
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h15
-rw-r--r--kernel/intermodule.c3
-rw-r--r--kernel/sched.c16
-rw-r--r--mm/hugetlb.c9
-rw-r--r--mm/page_alloc.c10
-rw-r--r--mm/slab.c169
-rw-r--r--net/bridge/netfilter/ebt_ulog.c10
-rw-r--r--net/bridge/netfilter/ebtables.c7
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/utils.c4
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/netfilter/arp_tables.c7
-rw-r--r--net/ipv4/netfilter/ip_conntrack_netlink.c3
-rw-r--r--net/ipv4/netfilter/ip_conntrack_tftp.c1
-rw-r--r--net/ipv4/netfilter/ip_nat_standalone.c4
-rw-r--r--net/ipv4/netfilter/ip_tables.c7
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c26
-rw-r--r--net/ipv4/netfilter/ipt_policy.c11
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c7
-rw-r--r--net/ipv6/netfilter/ip6t_policy.c7
-rw-r--r--net/ipv6/proc.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nf_conntrack_ftp.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c3
-rw-r--r--net/netfilter/nfnetlink_log.c20
-rw-r--r--net/netfilter/nfnetlink_queue.c3
-rw-r--r--net/socket.c2
-rw-r--r--scripts/kconfig/Makefile12
-rw-r--r--security/selinux/Kconfig2
-rw-r--r--security/selinux/Makefile4
-rw-r--r--security/selinux/hooks.c21
67 files changed, 499 insertions, 281 deletions
diff --git a/CREDITS b/CREDITS
index 8e577ce4abeb..6957ef4efab3 100644
--- a/CREDITS
+++ b/CREDITS
@@ -3101,7 +3101,7 @@ S: Minto, NSW, 2566
3101S: Australia 3101S: Australia
3102 3102
3103N: Stephen Smalley 3103N: Stephen Smalley
3104E: sds@epoch.ncsc.mil 3104E: sds@tycho.nsa.gov
3105D: portions of the Linux Security Module (LSM) framework and security modules 3105D: portions of the Linux Security Module (LSM) framework and security modules
3106 3106
3107N: Chris Smith 3107N: Chris Smith
diff --git a/MAINTAINERS b/MAINTAINERS
index b6cbac5dbfd5..11d44daa6025 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2298,7 +2298,7 @@ S: Supported
2298 2298
2299SELINUX SECURITY MODULE 2299SELINUX SECURITY MODULE
2300P: Stephen Smalley 2300P: Stephen Smalley
2301M: sds@epoch.ncsc.mil 2301M: sds@tycho.nsa.gov
2302P: James Morris 2302P: James Morris
2303M: jmorris@namei.org 2303M: jmorris@namei.org
2304L: linux-kernel@vger.kernel.org (kernel issues) 2304L: linux-kernel@vger.kernel.org (kernel issues)
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index d86c865a7cd2..0afec8566e7b 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -442,6 +442,7 @@ config HIGHMEM4G
442 442
443config HIGHMEM64G 443config HIGHMEM64G
444 bool "64GB" 444 bool "64GB"
445 depends on X86_CMPXCHG64
445 help 446 help
446 Select this if you have a 32-bit processor and more than 4 447 Select this if you have a 32-bit processor and more than 4
447 gigabytes of physical RAM. 448 gigabytes of physical RAM.
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index 333578a4e91a..0810f81f2a05 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -282,3 +282,11 @@ int __init amd_init_cpu(void)
282} 282}
283 283
284//early_arch_initcall(amd_init_cpu); 284//early_arch_initcall(amd_init_cpu);
285
286static int __init amd_exit_cpu(void)
287{
288 cpu_devs[X86_VENDOR_AMD] = NULL;
289 return 0;
290}
291
292late_initcall(amd_exit_cpu);
diff --git a/arch/i386/kernel/cpu/centaur.c b/arch/i386/kernel/cpu/centaur.c
index 0dd92a23d622..f52669ecb93f 100644
--- a/arch/i386/kernel/cpu/centaur.c
+++ b/arch/i386/kernel/cpu/centaur.c
@@ -470,3 +470,11 @@ int __init centaur_init_cpu(void)
470} 470}
471 471
472//early_arch_initcall(centaur_init_cpu); 472//early_arch_initcall(centaur_init_cpu);
473
474static int __init centaur_exit_cpu(void)
475{
476 cpu_devs[X86_VENDOR_CENTAUR] = NULL;
477 return 0;
478}
479
480late_initcall(centaur_exit_cpu);
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index 15aee26ec2b6..7eb9213734a3 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -44,6 +44,7 @@ static void default_init(struct cpuinfo_x86 * c)
44 44
45static struct cpu_dev default_cpu = { 45static struct cpu_dev default_cpu = {
46 .c_init = default_init, 46 .c_init = default_init,
47 .c_vendor = "Unknown",
47}; 48};
48static struct cpu_dev * this_cpu = &default_cpu; 49static struct cpu_dev * this_cpu = &default_cpu;
49 50
@@ -150,6 +151,7 @@ static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
150{ 151{
151 char *v = c->x86_vendor_id; 152 char *v = c->x86_vendor_id;
152 int i; 153 int i;
154 static int printed;
153 155
154 for (i = 0; i < X86_VENDOR_NUM; i++) { 156 for (i = 0; i < X86_VENDOR_NUM; i++) {
155 if (cpu_devs[i]) { 157 if (cpu_devs[i]) {
@@ -159,10 +161,17 @@ static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
159 c->x86_vendor = i; 161 c->x86_vendor = i;
160 if (!early) 162 if (!early)
161 this_cpu = cpu_devs[i]; 163 this_cpu = cpu_devs[i];
162 break; 164 return;
163 } 165 }
164 } 166 }
165 } 167 }
168 if (!printed) {
169 printed++;
170 printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
171 printk(KERN_ERR "CPU: Your system may be unstable.\n");
172 }
173 c->x86_vendor = X86_VENDOR_UNKNOWN;
174 this_cpu = &default_cpu;
166} 175}
167 176
168 177
diff --git a/arch/i386/kernel/cpu/cyrix.c b/arch/i386/kernel/cpu/cyrix.c
index 75015975d038..00f2e058797c 100644
--- a/arch/i386/kernel/cpu/cyrix.c
+++ b/arch/i386/kernel/cpu/cyrix.c
@@ -345,7 +345,7 @@ static void __init init_cyrix(struct cpuinfo_x86 *c)
345/* 345/*
346 * Handle National Semiconductor branded processors 346 * Handle National Semiconductor branded processors
347 */ 347 */
348static void __devinit init_nsc(struct cpuinfo_x86 *c) 348static void __init init_nsc(struct cpuinfo_x86 *c)
349{ 349{
350 /* There may be GX1 processors in the wild that are branded 350 /* There may be GX1 processors in the wild that are branded
351 * NSC and not Cyrix. 351 * NSC and not Cyrix.
@@ -444,6 +444,14 @@ int __init cyrix_init_cpu(void)
444 444
445//early_arch_initcall(cyrix_init_cpu); 445//early_arch_initcall(cyrix_init_cpu);
446 446
447static int __init cyrix_exit_cpu(void)
448{
449 cpu_devs[X86_VENDOR_CYRIX] = NULL;
450 return 0;
451}
452
453late_initcall(cyrix_exit_cpu);
454
447static struct cpu_dev nsc_cpu_dev __initdata = { 455static struct cpu_dev nsc_cpu_dev __initdata = {
448 .c_vendor = "NSC", 456 .c_vendor = "NSC",
449 .c_ident = { "Geode by NSC" }, 457 .c_ident = { "Geode by NSC" },
@@ -458,3 +466,11 @@ int __init nsc_init_cpu(void)
458} 466}
459 467
460//early_arch_initcall(nsc_init_cpu); 468//early_arch_initcall(nsc_init_cpu);
469
470static int __init nsc_exit_cpu(void)
471{
472 cpu_devs[X86_VENDOR_NSC] = NULL;
473 return 0;
474}
475
476late_initcall(nsc_exit_cpu);
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index af591c73345f..ffe58cee0c48 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -152,6 +152,7 @@ static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_le
152 return 0; 152 return 0;
153} 153}
154 154
155/* will only be called once; __init is safe here */
155static int __init find_num_cache_leaves(void) 156static int __init find_num_cache_leaves(void)
156{ 157{
157 unsigned int eax, ebx, ecx, edx; 158 unsigned int eax, ebx, ecx, edx;
diff --git a/arch/i386/kernel/cpu/nexgen.c b/arch/i386/kernel/cpu/nexgen.c
index 30898a260a5c..ad87fa58058d 100644
--- a/arch/i386/kernel/cpu/nexgen.c
+++ b/arch/i386/kernel/cpu/nexgen.c
@@ -61,3 +61,11 @@ int __init nexgen_init_cpu(void)
61} 61}
62 62
63//early_arch_initcall(nexgen_init_cpu); 63//early_arch_initcall(nexgen_init_cpu);
64
65static int __init nexgen_exit_cpu(void)
66{
67 cpu_devs[X86_VENDOR_NEXGEN] = NULL;
68 return 0;
69}
70
71late_initcall(nexgen_exit_cpu);
diff --git a/arch/i386/kernel/cpu/rise.c b/arch/i386/kernel/cpu/rise.c
index 8602425628ca..d08d5a2811c8 100644
--- a/arch/i386/kernel/cpu/rise.c
+++ b/arch/i386/kernel/cpu/rise.c
@@ -51,3 +51,11 @@ int __init rise_init_cpu(void)
51} 51}
52 52
53//early_arch_initcall(rise_init_cpu); 53//early_arch_initcall(rise_init_cpu);
54
55static int __init rise_exit_cpu(void)
56{
57 cpu_devs[X86_VENDOR_RISE] = NULL;
58 return 0;
59}
60
61late_initcall(rise_exit_cpu);
diff --git a/arch/i386/kernel/cpu/transmeta.c b/arch/i386/kernel/cpu/transmeta.c
index fc426380366b..bdbeb77f4e22 100644
--- a/arch/i386/kernel/cpu/transmeta.c
+++ b/arch/i386/kernel/cpu/transmeta.c
@@ -84,7 +84,7 @@ static void __init init_transmeta(struct cpuinfo_x86 *c)
84#endif 84#endif
85} 85}
86 86
87static void transmeta_identify(struct cpuinfo_x86 * c) 87static void __init transmeta_identify(struct cpuinfo_x86 * c)
88{ 88{
89 u32 xlvl; 89 u32 xlvl;
90 generic_identify(c); 90 generic_identify(c);
@@ -111,3 +111,11 @@ int __init transmeta_init_cpu(void)
111} 111}
112 112
113//early_arch_initcall(transmeta_init_cpu); 113//early_arch_initcall(transmeta_init_cpu);
114
115static int __init transmeta_exit_cpu(void)
116{
117 cpu_devs[X86_VENDOR_TRANSMETA] = NULL;
118 return 0;
119}
120
121late_initcall(transmeta_exit_cpu);
diff --git a/arch/i386/kernel/cpu/umc.c b/arch/i386/kernel/cpu/umc.c
index 264fcad559d5..2cd988f6dc55 100644
--- a/arch/i386/kernel/cpu/umc.c
+++ b/arch/i386/kernel/cpu/umc.c
@@ -31,3 +31,11 @@ int __init umc_init_cpu(void)
31} 31}
32 32
33//early_arch_initcall(umc_init_cpu); 33//early_arch_initcall(umc_init_cpu);
34
35static int __init umc_exit_cpu(void)
36{
37 cpu_devs[X86_VENDOR_UMC] = NULL;
38 return 0;
39}
40
41late_initcall(umc_exit_cpu);
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index d661703ac1cb..63f39a7e2c96 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -138,7 +138,7 @@ static int __init check_nmi_watchdog(void)
138 if (nmi_watchdog == NMI_LOCAL_APIC) 138 if (nmi_watchdog == NMI_LOCAL_APIC)
139 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); 139 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
140 140
141 for (cpu = 0; cpu < NR_CPUS; cpu++) 141 for_each_cpu(cpu)
142 prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; 142 prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
143 local_irq_enable(); 143 local_irq_enable();
144 mdelay((10*1000)/nmi_hz); // wait 10 ticks 144 mdelay((10*1000)/nmi_hz); // wait 10 ticks
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 2185377fdde1..0480454ebffa 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -297,8 +297,10 @@ void show_regs(struct pt_regs * regs)
297 297
298 if (user_mode(regs)) 298 if (user_mode(regs))
299 printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp); 299 printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
300 printk(" EFLAGS: %08lx %s (%s)\n", 300 printk(" EFLAGS: %08lx %s (%s %.*s)\n",
301 regs->eflags, print_tainted(), system_utsname.release); 301 regs->eflags, print_tainted(), system_utsname.release,
302 (int)strcspn(system_utsname.version, " "),
303 system_utsname.version);
302 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", 304 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
303 regs->eax,regs->ebx,regs->ecx,regs->edx); 305 regs->eax,regs->ebx,regs->ecx,regs->edx);
304 printk("ESI: %08lx EDI: %08lx EBP: %08lx", 306 printk("ESI: %08lx EDI: %08lx EBP: %08lx",
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 0aaebf3e1cfa..b814dbdcc91e 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -166,7 +166,8 @@ static void show_trace_log_lvl(struct task_struct *task,
166 stack = (unsigned long*)context->previous_esp; 166 stack = (unsigned long*)context->previous_esp;
167 if (!stack) 167 if (!stack)
168 break; 168 break;
169 printk(KERN_EMERG " =======================\n"); 169 printk(log_lvl);
170 printk(" =======================\n");
170 } 171 }
171} 172}
172 173
@@ -239,9 +240,11 @@ void show_registers(struct pt_regs *regs)
239 } 240 }
240 print_modules(); 241 print_modules();
241 printk(KERN_EMERG "CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\n" 242 printk(KERN_EMERG "CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\n"
242 "EFLAGS: %08lx (%s) \n", 243 "EFLAGS: %08lx (%s %.*s) \n",
243 smp_processor_id(), 0xffff & regs->xcs, regs->eip, 244 smp_processor_id(), 0xffff & regs->xcs, regs->eip,
244 print_tainted(), regs->eflags, system_utsname.release); 245 print_tainted(), regs->eflags, system_utsname.release,
246 (int)strcspn(system_utsname.version, " "),
247 system_utsname.version);
245 print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip); 248 print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
246 printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n", 249 printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
247 regs->eax, regs->ebx, regs->ecx, regs->edx); 250 regs->eax, regs->ebx, regs->ecx, regs->edx);
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 67e4e28f4df8..a28756ef7cef 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -59,7 +59,6 @@
59#include <asm/nmi.h> 59#include <asm/nmi.h>
60#include <asm/irq.h> 60#include <asm/irq.h>
61#include <asm/hw_irq.h> 61#include <asm/hw_irq.h>
62#include <asm/numa.h>
63 62
64/* Number of siblings per CPU package */ 63/* Number of siblings per CPU package */
65int smp_num_siblings = 1; 64int smp_num_siblings = 1;
@@ -891,7 +890,6 @@ do_rest:
891 if (boot_error) { 890 if (boot_error) {
892 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */ 891 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
893 clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */ 892 clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
894 clear_node_cpumask(cpu); /* was set by numa_add_cpu */
895 cpu_clear(cpu, cpu_present_map); 893 cpu_clear(cpu, cpu_present_map);
896 cpu_clear(cpu, cpu_possible_map); 894 cpu_clear(cpu, cpu_possible_map);
897 x86_cpu_to_apicid[cpu] = BAD_APICID; 895 x86_cpu_to_apicid[cpu] = BAD_APICID;
@@ -1189,7 +1187,6 @@ void remove_cpu_from_maps(void)
1189 cpu_clear(cpu, cpu_callout_map); 1187 cpu_clear(cpu, cpu_callout_map);
1190 cpu_clear(cpu, cpu_callin_map); 1188 cpu_clear(cpu, cpu_callin_map);
1191 clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */ 1189 clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
1192 clear_node_cpumask(cpu);
1193} 1190}
1194 1191
1195int __cpu_disable(void) 1192int __cpu_disable(void)
diff --git a/block/elevator.c b/block/elevator.c
index 96a61e029ce5..2fc269f69726 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -323,7 +323,8 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
323 /* 323 /*
324 * toggle ordered color 324 * toggle ordered color
325 */ 325 */
326 q->ordcolor ^= 1; 326 if (blk_barrier_rq(rq))
327 q->ordcolor ^= 1;
327 328
328 /* 329 /*
329 * barriers implicitly indicate back insertion 330 * barriers implicitly indicate back insertion
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index f9fc07efd2da..ee5ed98db4cd 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -508,7 +508,7 @@ static inline struct request *start_ordered(request_queue_t *q,
508 508
509int blk_do_ordered(request_queue_t *q, struct request **rqp) 509int blk_do_ordered(request_queue_t *q, struct request **rqp)
510{ 510{
511 struct request *rq = *rqp, *allowed_rq; 511 struct request *rq = *rqp;
512 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); 512 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
513 513
514 if (!q->ordseq) { 514 if (!q->ordseq) {
@@ -532,32 +532,26 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp)
532 } 532 }
533 } 533 }
534 534
535 /*
536 * Ordered sequence in progress
537 */
538
539 /* Special requests are not subject to ordering rules. */
540 if (!blk_fs_request(rq) &&
541 rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
542 return 1;
543
535 if (q->ordered & QUEUE_ORDERED_TAG) { 544 if (q->ordered & QUEUE_ORDERED_TAG) {
545 /* Ordered by tag. Blocking the next barrier is enough. */
536 if (is_barrier && rq != &q->bar_rq) 546 if (is_barrier && rq != &q->bar_rq)
537 *rqp = NULL; 547 *rqp = NULL;
538 return 1; 548 } else {
539 } 549 /* Ordered by draining. Wait for turn. */
540 550 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
541 switch (blk_ordered_cur_seq(q)) { 551 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
542 case QUEUE_ORDSEQ_PREFLUSH: 552 *rqp = NULL;
543 allowed_rq = &q->pre_flush_rq;
544 break;
545 case QUEUE_ORDSEQ_BAR:
546 allowed_rq = &q->bar_rq;
547 break;
548 case QUEUE_ORDSEQ_POSTFLUSH:
549 allowed_rq = &q->post_flush_rq;
550 break;
551 default:
552 allowed_rq = NULL;
553 break;
554 } 553 }
555 554
556 if (rq != allowed_rq &&
557 (blk_fs_request(rq) || rq == &q->pre_flush_rq ||
558 rq == &q->post_flush_rq))
559 *rqp = NULL;
560
561 return 1; 555 return 1;
562} 556}
563 557
@@ -3453,7 +3447,7 @@ int __init blk_dev_init(void)
3453 iocontext_cachep = kmem_cache_create("blkdev_ioc", 3447 iocontext_cachep = kmem_cache_create("blkdev_ioc",
3454 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); 3448 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
3455 3449
3456 for (i = 0; i < NR_CPUS; i++) 3450 for_each_cpu(i)
3457 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); 3451 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
3458 3452
3459 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); 3453 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 139cbba76180..8b1331677407 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -433,12 +433,12 @@ config CDROM_PKTCDVD_BUFFERS
433 This controls the maximum number of active concurrent packets. More 433 This controls the maximum number of active concurrent packets. More
434 concurrent packets can increase write performance, but also require 434 concurrent packets can increase write performance, but also require
435 more memory. Each concurrent packet will require approximately 64Kb 435 more memory. Each concurrent packet will require approximately 64Kb
436 of non-swappable kernel memory, memory which will be allocated at 436 of non-swappable kernel memory, memory which will be allocated when
437 pktsetup time. 437 a disc is opened for writing.
438 438
439config CDROM_PKTCDVD_WCACHE 439config CDROM_PKTCDVD_WCACHE
440 bool "Enable write caching" 440 bool "Enable write caching (EXPERIMENTAL)"
441 depends on CDROM_PKTCDVD 441 depends on CDROM_PKTCDVD && EXPERIMENTAL
442 help 442 help
443 If enabled, write caching will be set for the CD-R/W device. For now 443 If enabled, write caching will be set for the CD-R/W device. For now
444 this option is dangerous unless the CD-RW media is known good, as we 444 this option is dangerous unless the CD-RW media is known good, as we
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 93affeeef7bd..4e7dbcc425ff 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -43,8 +43,6 @@
43 * 43 *
44 *************************************************************************/ 44 *************************************************************************/
45 45
46#define VERSION_CODE "v0.2.0a 2004-07-14 Jens Axboe (axboe@suse.de) and petero2@telia.com"
47
48#include <linux/pktcdvd.h> 46#include <linux/pktcdvd.h>
49#include <linux/config.h> 47#include <linux/config.h>
50#include <linux/module.h> 48#include <linux/module.h>
@@ -131,7 +129,7 @@ static struct bio *pkt_bio_alloc(int nr_iovecs)
131/* 129/*
132 * Allocate a packet_data struct 130 * Allocate a packet_data struct
133 */ 131 */
134static struct packet_data *pkt_alloc_packet_data(void) 132static struct packet_data *pkt_alloc_packet_data(int frames)
135{ 133{
136 int i; 134 int i;
137 struct packet_data *pkt; 135 struct packet_data *pkt;
@@ -140,11 +138,12 @@ static struct packet_data *pkt_alloc_packet_data(void)
140 if (!pkt) 138 if (!pkt)
141 goto no_pkt; 139 goto no_pkt;
142 140
143 pkt->w_bio = pkt_bio_alloc(PACKET_MAX_SIZE); 141 pkt->frames = frames;
142 pkt->w_bio = pkt_bio_alloc(frames);
144 if (!pkt->w_bio) 143 if (!pkt->w_bio)
145 goto no_bio; 144 goto no_bio;
146 145
147 for (i = 0; i < PAGES_PER_PACKET; i++) { 146 for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
148 pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO); 147 pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
149 if (!pkt->pages[i]) 148 if (!pkt->pages[i])
150 goto no_page; 149 goto no_page;
@@ -152,7 +151,7 @@ static struct packet_data *pkt_alloc_packet_data(void)
152 151
153 spin_lock_init(&pkt->lock); 152 spin_lock_init(&pkt->lock);
154 153
155 for (i = 0; i < PACKET_MAX_SIZE; i++) { 154 for (i = 0; i < frames; i++) {
156 struct bio *bio = pkt_bio_alloc(1); 155 struct bio *bio = pkt_bio_alloc(1);
157 if (!bio) 156 if (!bio)
158 goto no_rd_bio; 157 goto no_rd_bio;
@@ -162,14 +161,14 @@ static struct packet_data *pkt_alloc_packet_data(void)
162 return pkt; 161 return pkt;
163 162
164no_rd_bio: 163no_rd_bio:
165 for (i = 0; i < PACKET_MAX_SIZE; i++) { 164 for (i = 0; i < frames; i++) {
166 struct bio *bio = pkt->r_bios[i]; 165 struct bio *bio = pkt->r_bios[i];
167 if (bio) 166 if (bio)
168 bio_put(bio); 167 bio_put(bio);
169 } 168 }
170 169
171no_page: 170no_page:
172 for (i = 0; i < PAGES_PER_PACKET; i++) 171 for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
173 if (pkt->pages[i]) 172 if (pkt->pages[i])
174 __free_page(pkt->pages[i]); 173 __free_page(pkt->pages[i]);
175 bio_put(pkt->w_bio); 174 bio_put(pkt->w_bio);
@@ -186,12 +185,12 @@ static void pkt_free_packet_data(struct packet_data *pkt)
186{ 185{
187 int i; 186 int i;
188 187
189 for (i = 0; i < PACKET_MAX_SIZE; i++) { 188 for (i = 0; i < pkt->frames; i++) {
190 struct bio *bio = pkt->r_bios[i]; 189 struct bio *bio = pkt->r_bios[i];
191 if (bio) 190 if (bio)
192 bio_put(bio); 191 bio_put(bio);
193 } 192 }
194 for (i = 0; i < PAGES_PER_PACKET; i++) 193 for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
195 __free_page(pkt->pages[i]); 194 __free_page(pkt->pages[i]);
196 bio_put(pkt->w_bio); 195 bio_put(pkt->w_bio);
197 kfree(pkt); 196 kfree(pkt);
@@ -206,17 +205,17 @@ static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
206 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) { 205 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
207 pkt_free_packet_data(pkt); 206 pkt_free_packet_data(pkt);
208 } 207 }
208 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
209} 209}
210 210
211static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets) 211static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
212{ 212{
213 struct packet_data *pkt; 213 struct packet_data *pkt;
214 214
215 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list); 215 BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
216 INIT_LIST_HEAD(&pd->cdrw.pkt_active_list); 216
217 spin_lock_init(&pd->cdrw.active_list_lock);
218 while (nr_packets > 0) { 217 while (nr_packets > 0) {
219 pkt = pkt_alloc_packet_data(); 218 pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
220 if (!pkt) { 219 if (!pkt) {
221 pkt_shrink_pktlist(pd); 220 pkt_shrink_pktlist(pd);
222 return 0; 221 return 0;
@@ -951,7 +950,7 @@ try_next_bio:
951 950
952 pd->current_sector = zone + pd->settings.size; 951 pd->current_sector = zone + pd->settings.size;
953 pkt->sector = zone; 952 pkt->sector = zone;
954 pkt->frames = pd->settings.size >> 2; 953 BUG_ON(pkt->frames != pd->settings.size >> 2);
955 pkt->write_size = 0; 954 pkt->write_size = 0;
956 955
957 /* 956 /*
@@ -1639,7 +1638,7 @@ static int pkt_probe_settings(struct pktcdvd_device *pd)
1639 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2; 1638 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
1640 if (pd->settings.size == 0) { 1639 if (pd->settings.size == 0) {
1641 printk("pktcdvd: detected zero packet size!\n"); 1640 printk("pktcdvd: detected zero packet size!\n");
1642 pd->settings.size = 128; 1641 return -ENXIO;
1643 } 1642 }
1644 if (pd->settings.size > PACKET_MAX_SECTORS) { 1643 if (pd->settings.size > PACKET_MAX_SECTORS) {
1645 printk("pktcdvd: packet size is too big\n"); 1644 printk("pktcdvd: packet size is too big\n");
@@ -1987,8 +1986,14 @@ static int pkt_open_dev(struct pktcdvd_device *pd, int write)
1987 if ((ret = pkt_set_segment_merging(pd, q))) 1986 if ((ret = pkt_set_segment_merging(pd, q)))
1988 goto out_unclaim; 1987 goto out_unclaim;
1989 1988
1990 if (write) 1989 if (write) {
1990 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
1991 printk("pktcdvd: not enough memory for buffers\n");
1992 ret = -ENOMEM;
1993 goto out_unclaim;
1994 }
1991 printk("pktcdvd: %lukB available on disc\n", lba << 1); 1995 printk("pktcdvd: %lukB available on disc\n", lba << 1);
1996 }
1992 1997
1993 return 0; 1998 return 0;
1994 1999
@@ -2014,6 +2019,8 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
2014 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); 2019 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2015 bd_release(pd->bdev); 2020 bd_release(pd->bdev);
2016 blkdev_put(pd->bdev); 2021 blkdev_put(pd->bdev);
2022
2023 pkt_shrink_pktlist(pd);
2017} 2024}
2018 2025
2019static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor) 2026static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor)
@@ -2379,12 +2386,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2379 /* This is safe, since we have a reference from open(). */ 2386 /* This is safe, since we have a reference from open(). */
2380 __module_get(THIS_MODULE); 2387 __module_get(THIS_MODULE);
2381 2388
2382 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
2383 printk("pktcdvd: not enough memory for buffers\n");
2384 ret = -ENOMEM;
2385 goto out_mem;
2386 }
2387
2388 pd->bdev = bdev; 2389 pd->bdev = bdev;
2389 set_blocksize(bdev, CD_FRAMESIZE); 2390 set_blocksize(bdev, CD_FRAMESIZE);
2390 2391
@@ -2395,7 +2396,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2395 if (IS_ERR(pd->cdrw.thread)) { 2396 if (IS_ERR(pd->cdrw.thread)) {
2396 printk("pktcdvd: can't start kernel thread\n"); 2397 printk("pktcdvd: can't start kernel thread\n");
2397 ret = -ENOMEM; 2398 ret = -ENOMEM;
2398 goto out_thread; 2399 goto out_mem;
2399 } 2400 }
2400 2401
2401 proc = create_proc_entry(pd->name, 0, pkt_proc); 2402 proc = create_proc_entry(pd->name, 0, pkt_proc);
@@ -2406,8 +2407,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2406 DPRINTK("pktcdvd: writer %s mapped to %s\n", pd->name, bdevname(bdev, b)); 2407 DPRINTK("pktcdvd: writer %s mapped to %s\n", pd->name, bdevname(bdev, b));
2407 return 0; 2408 return 0;
2408 2409
2409out_thread:
2410 pkt_shrink_pktlist(pd);
2411out_mem: 2410out_mem:
2412 blkdev_put(bdev); 2411 blkdev_put(bdev);
2413 /* This is safe: open() is still holding a reference. */ 2412 /* This is safe: open() is still holding a reference. */
@@ -2503,6 +2502,10 @@ static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd)
2503 goto out_mem; 2502 goto out_mem;
2504 pd->disk = disk; 2503 pd->disk = disk;
2505 2504
2505 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
2506 INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
2507 spin_lock_init(&pd->cdrw.active_list_lock);
2508
2506 spin_lock_init(&pd->lock); 2509 spin_lock_init(&pd->lock);
2507 spin_lock_init(&pd->iosched.lock); 2510 spin_lock_init(&pd->iosched.lock);
2508 sprintf(pd->name, "pktcdvd%d", idx); 2511 sprintf(pd->name, "pktcdvd%d", idx);
@@ -2567,8 +2570,6 @@ static int pkt_remove_dev(struct pkt_ctrl_command *ctrl_cmd)
2567 2570
2568 blkdev_put(pd->bdev); 2571 blkdev_put(pd->bdev);
2569 2572
2570 pkt_shrink_pktlist(pd);
2571
2572 remove_proc_entry(pd->name, pkt_proc); 2573 remove_proc_entry(pd->name, pkt_proc);
2573 DPRINTK("pktcdvd: writer %s unmapped\n", pd->name); 2574 DPRINTK("pktcdvd: writer %s unmapped\n", pd->name);
2574 2575
@@ -2678,7 +2679,6 @@ static int __init pkt_init(void)
2678 2679
2679 pkt_proc = proc_mkdir("pktcdvd", proc_root_driver); 2680 pkt_proc = proc_mkdir("pktcdvd", proc_root_driver);
2680 2681
2681 DPRINTK("pktcdvd: %s\n", VERSION_CODE);
2682 return 0; 2682 return 0;
2683 2683
2684out: 2684out:
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index f9e5a23697a1..c08ddac3717d 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -732,7 +732,7 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt)
732 cpu_to_le32(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid); 732 cpu_to_le32(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid);
733 msg->body[0] = cpu_to_le32(i2o_cntxt_list_get_ptr(c, SCpnt)); 733 msg->body[0] = cpu_to_le32(i2o_cntxt_list_get_ptr(c, SCpnt));
734 734
735 if (i2o_msg_post_wait(c, msg, I2O_TIMEOUT_SCSI_SCB_ABORT)) 735 if (!i2o_msg_post_wait(c, msg, I2O_TIMEOUT_SCSI_SCB_ABORT))
736 status = SUCCESS; 736 status = SUCCESS;
737 737
738 return status; 738 return status;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 245ca99a641e..c551bb84dbfb 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1245,7 +1245,7 @@ static int __init init_scsi(void)
1245 if (error) 1245 if (error)
1246 goto cleanup_sysctl; 1246 goto cleanup_sysctl;
1247 1247
1248 for (i = 0; i < NR_CPUS; i++) 1248 for_each_cpu(i)
1249 INIT_LIST_HEAD(&per_cpu(scsi_done_q, i)); 1249 INIT_LIST_HEAD(&per_cpu(scsi_done_q, i));
1250 1250
1251 devfs_mk_dir("scsi"); 1251 devfs_mk_dir("scsi");
diff --git a/fs/file.c b/fs/file.c
index fd066b261c75..cea7cbea11d0 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -379,7 +379,6 @@ static void __devinit fdtable_defer_list_init(int cpu)
379void __init files_defer_init(void) 379void __init files_defer_init(void)
380{ 380{
381 int i; 381 int i;
382 /* Really early - can't use for_each_cpu */ 382 for_each_cpu(i)
383 for (i = 0; i < NR_CPUS; i++)
384 fdtable_defer_list_init(i); 383 fdtable_defer_list_init(i);
385} 384}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 4526da8907c6..f556a0d5c0d3 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -120,9 +120,9 @@ struct fuse_req *fuse_get_request(struct fuse_conn *fc)
120 return do_get_request(fc); 120 return do_get_request(fc);
121} 121}
122 122
123/* Must be called with fuse_lock held */
123static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req) 124static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req)
124{ 125{
125 spin_lock(&fuse_lock);
126 if (req->preallocated) { 126 if (req->preallocated) {
127 atomic_dec(&fc->num_waiting); 127 atomic_dec(&fc->num_waiting);
128 list_add(&req->list, &fc->unused_list); 128 list_add(&req->list, &fc->unused_list);
@@ -134,11 +134,19 @@ static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req)
134 fc->outstanding_debt--; 134 fc->outstanding_debt--;
135 else 135 else
136 up(&fc->outstanding_sem); 136 up(&fc->outstanding_sem);
137 spin_unlock(&fuse_lock);
138} 137}
139 138
140void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) 139void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
141{ 140{
141 if (atomic_dec_and_test(&req->count)) {
142 spin_lock(&fuse_lock);
143 fuse_putback_request(fc, req);
144 spin_unlock(&fuse_lock);
145 }
146}
147
148static void fuse_put_request_locked(struct fuse_conn *fc, struct fuse_req *req)
149{
142 if (atomic_dec_and_test(&req->count)) 150 if (atomic_dec_and_test(&req->count))
143 fuse_putback_request(fc, req); 151 fuse_putback_request(fc, req);
144} 152}
@@ -163,26 +171,36 @@ void fuse_release_background(struct fuse_req *req)
163 * still waiting), the 'end' callback is called if given, else the 171 * still waiting), the 'end' callback is called if given, else the
164 * reference to the request is released 172 * reference to the request is released
165 * 173 *
174 * Releasing extra reference for foreground requests must be done
175 * within the same locked region as setting state to finished. This
176 * is because fuse_reset_request() may be called after request is
177 * finished and it must be the sole possessor. If request is
178 * interrupted and put in the background, it will return with an error
179 * and hence never be reset and reused.
180 *
166 * Called with fuse_lock, unlocks it 181 * Called with fuse_lock, unlocks it
167 */ 182 */
168static void request_end(struct fuse_conn *fc, struct fuse_req *req) 183static void request_end(struct fuse_conn *fc, struct fuse_req *req)
169{ 184{
170 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
171 req->end = NULL;
172 list_del(&req->list); 185 list_del(&req->list);
173 req->state = FUSE_REQ_FINISHED; 186 req->state = FUSE_REQ_FINISHED;
174 spin_unlock(&fuse_lock); 187 if (!req->background) {
175 if (req->background) { 188 wake_up(&req->waitq);
189 fuse_put_request_locked(fc, req);
190 spin_unlock(&fuse_lock);
191 } else {
192 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
193 req->end = NULL;
194 spin_unlock(&fuse_lock);
176 down_read(&fc->sbput_sem); 195 down_read(&fc->sbput_sem);
177 if (fc->mounted) 196 if (fc->mounted)
178 fuse_release_background(req); 197 fuse_release_background(req);
179 up_read(&fc->sbput_sem); 198 up_read(&fc->sbput_sem);
199 if (end)
200 end(fc, req);
201 else
202 fuse_put_request(fc, req);
180 } 203 }
181 wake_up(&req->waitq);
182 if (end)
183 end(fc, req);
184 else
185 fuse_put_request(fc, req);
186} 204}
187 205
188/* 206/*
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 429f4b263cf1..ca917973c2c0 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -1308,6 +1308,7 @@ int journal_stop(handle_t *handle)
1308 transaction_t *transaction = handle->h_transaction; 1308 transaction_t *transaction = handle->h_transaction;
1309 journal_t *journal = transaction->t_journal; 1309 journal_t *journal = transaction->t_journal;
1310 int old_handle_count, err; 1310 int old_handle_count, err;
1311 pid_t pid;
1311 1312
1312 J_ASSERT(transaction->t_updates > 0); 1313 J_ASSERT(transaction->t_updates > 0);
1313 J_ASSERT(journal_current_handle() == handle); 1314 J_ASSERT(journal_current_handle() == handle);
@@ -1333,8 +1334,15 @@ int journal_stop(handle_t *handle)
1333 * It doesn't cost much - we're about to run a commit and sleep 1334 * It doesn't cost much - we're about to run a commit and sleep
1334 * on IO anyway. Speeds up many-threaded, many-dir operations 1335 * on IO anyway. Speeds up many-threaded, many-dir operations
1335 * by 30x or more... 1336 * by 30x or more...
1337 *
1338 * But don't do this if this process was the most recent one to
1339 * perform a synchronous write. We do this to detect the case where a
1340 * single process is doing a stream of sync writes. No point in waiting
1341 * for joiners in that case.
1336 */ 1342 */
1337 if (handle->h_sync) { 1343 pid = current->pid;
1344 if (handle->h_sync && journal->j_last_sync_writer != pid) {
1345 journal->j_last_sync_writer = pid;
1338 do { 1346 do {
1339 old_handle_count = transaction->t_handle_count; 1347 old_handle_count = transaction->t_handle_count;
1340 schedule_timeout_uninterruptible(1); 1348 schedule_timeout_uninterruptible(1);
diff --git a/fs/namei.c b/fs/namei.c
index 7ac9fb4acb2c..faf61c35308c 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -790,7 +790,7 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd)
790 790
791 inode = nd->dentry->d_inode; 791 inode = nd->dentry->d_inode;
792 if (nd->depth) 792 if (nd->depth)
793 lookup_flags = LOOKUP_FOLLOW; 793 lookup_flags = LOOKUP_FOLLOW | (nd->flags & LOOKUP_CONTINUE);
794 794
795 /* At this point we know we have a real path component. */ 795 /* At this point we know we have a real path component. */
796 for(;;) { 796 for(;;) {
@@ -885,7 +885,8 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd)
885last_with_slashes: 885last_with_slashes:
886 lookup_flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; 886 lookup_flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
887last_component: 887last_component:
888 nd->flags &= ~LOOKUP_CONTINUE; 888 /* Clear LOOKUP_CONTINUE iff it was previously unset */
889 nd->flags &= lookup_flags | ~LOOKUP_CONTINUE;
889 if (lookup_flags & LOOKUP_PARENT) 890 if (lookup_flags & LOOKUP_PARENT)
890 goto lookup_parent; 891 goto lookup_parent;
891 if (this.name[0] == '.') switch (this.len) { 892 if (this.name[0] == '.') switch (this.len) {
@@ -1069,6 +1070,8 @@ static int fastcall do_path_lookup(int dfd, const char *name,
1069 unsigned int flags, struct nameidata *nd) 1070 unsigned int flags, struct nameidata *nd)
1070{ 1071{
1071 int retval = 0; 1072 int retval = 0;
1073 int fput_needed;
1074 struct file *file;
1072 1075
1073 nd->last_type = LAST_ROOT; /* if there are only slashes... */ 1076 nd->last_type = LAST_ROOT; /* if there are only slashes... */
1074 nd->flags = flags; 1077 nd->flags = flags;
@@ -1090,29 +1093,22 @@ static int fastcall do_path_lookup(int dfd, const char *name,
1090 nd->mnt = mntget(current->fs->pwdmnt); 1093 nd->mnt = mntget(current->fs->pwdmnt);
1091 nd->dentry = dget(current->fs->pwd); 1094 nd->dentry = dget(current->fs->pwd);
1092 } else { 1095 } else {
1093 struct file *file;
1094 int fput_needed;
1095 struct dentry *dentry; 1096 struct dentry *dentry;
1096 1097
1097 file = fget_light(dfd, &fput_needed); 1098 file = fget_light(dfd, &fput_needed);
1098 if (!file) { 1099 retval = -EBADF;
1099 retval = -EBADF; 1100 if (!file)
1100 goto out_fail; 1101 goto unlock_fail;
1101 }
1102 1102
1103 dentry = file->f_dentry; 1103 dentry = file->f_dentry;
1104 1104
1105 if (!S_ISDIR(dentry->d_inode->i_mode)) { 1105 retval = -ENOTDIR;
1106 retval = -ENOTDIR; 1106 if (!S_ISDIR(dentry->d_inode->i_mode))
1107 fput_light(file, fput_needed); 1107 goto fput_unlock_fail;
1108 goto out_fail;
1109 }
1110 1108
1111 retval = file_permission(file, MAY_EXEC); 1109 retval = file_permission(file, MAY_EXEC);
1112 if (retval) { 1110 if (retval)
1113 fput_light(file, fput_needed); 1111 goto fput_unlock_fail;
1114 goto out_fail;
1115 }
1116 1112
1117 nd->mnt = mntget(file->f_vfsmnt); 1113 nd->mnt = mntget(file->f_vfsmnt);
1118 nd->dentry = dget(dentry); 1114 nd->dentry = dget(dentry);
@@ -1126,7 +1122,12 @@ out:
1126 if (unlikely(current->audit_context 1122 if (unlikely(current->audit_context
1127 && nd && nd->dentry && nd->dentry->d_inode)) 1123 && nd && nd->dentry && nd->dentry->d_inode))
1128 audit_inode(name, nd->dentry->d_inode, flags); 1124 audit_inode(name, nd->dentry->d_inode, flags);
1129out_fail: 1125 return retval;
1126
1127fput_unlock_fail:
1128 fput_light(file, fput_needed);
1129unlock_fail:
1130 read_unlock(&current->fs->lock);
1130 return retval; 1131 return retval;
1131} 1132}
1132 1133
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 36a92ed6a9d0..399145a247f2 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -507,7 +507,7 @@ struct alt_instr {
507#define smp_rmb() rmb() 507#define smp_rmb() rmb()
508#define smp_wmb() wmb() 508#define smp_wmb() wmb()
509#define smp_read_barrier_depends() read_barrier_depends() 509#define smp_read_barrier_depends() read_barrier_depends()
510#define set_mb(var, value) do { xchg(&var, value); } while (0) 510#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
511#else 511#else
512#define smp_mb() barrier() 512#define smp_mb() barrier()
513#define smp_rmb() barrier() 513#define smp_rmb() barrier()
diff --git a/include/asm-x86_64/numa.h b/include/asm-x86_64/numa.h
index dffe276ca2df..34e434ce3268 100644
--- a/include/asm-x86_64/numa.h
+++ b/include/asm-x86_64/numa.h
@@ -22,15 +22,8 @@ extern void numa_set_node(int cpu, int node);
22extern unsigned char apicid_to_node[256]; 22extern unsigned char apicid_to_node[256];
23#ifdef CONFIG_NUMA 23#ifdef CONFIG_NUMA
24extern void __init init_cpu_to_node(void); 24extern void __init init_cpu_to_node(void);
25
26static inline void clear_node_cpumask(int cpu)
27{
28 clear_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
29}
30
31#else 25#else
32#define init_cpu_to_node() do {} while (0) 26#define init_cpu_to_node() do {} while (0)
33#define clear_node_cpumask(cpu) do {} while (0)
34#endif 27#endif
35 28
36#define NUMA_NO_NODE 0xff 29#define NUMA_NO_NODE 0xff
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index a73f0c789d8b..b7f66034ae7a 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -327,7 +327,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
327#define wmb() asm volatile("" ::: "memory") 327#define wmb() asm volatile("" ::: "memory")
328#endif 328#endif
329#define read_barrier_depends() do {} while(0) 329#define read_barrier_depends() do {} while(0)
330#define set_mb(var, value) do { xchg(&var, value); } while (0) 330#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
331#define set_wmb(var, value) do { var = value; wmb(); } while (0) 331#define set_wmb(var, value) do { var = value; wmb(); } while (0)
332 332
333#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0) 333#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 558cb4c26ec9..751bb3849467 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -23,6 +23,7 @@
23#define jfs_debug jbd_debug 23#define jfs_debug jbd_debug
24#else 24#else
25 25
26#include <linux/types.h>
26#include <linux/buffer_head.h> 27#include <linux/buffer_head.h>
27#include <linux/journal-head.h> 28#include <linux/journal-head.h>
28#include <linux/stddef.h> 29#include <linux/stddef.h>
@@ -618,6 +619,7 @@ struct transaction_s
618 * @j_wbuf: array of buffer_heads for journal_commit_transaction 619 * @j_wbuf: array of buffer_heads for journal_commit_transaction
619 * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the 620 * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
620 * number that will fit in j_blocksize 621 * number that will fit in j_blocksize
622 * @j_last_sync_writer: most recent pid which did a synchronous write
621 * @j_private: An opaque pointer to fs-private information. 623 * @j_private: An opaque pointer to fs-private information.
622 */ 624 */
623 625
@@ -807,6 +809,8 @@ struct journal_s
807 struct buffer_head **j_wbuf; 809 struct buffer_head **j_wbuf;
808 int j_wbufsize; 810 int j_wbufsize;
809 811
812 pid_t j_last_sync_writer;
813
810 /* 814 /*
811 * An opaque pointer to fs-private information. ext3 puts its 815 * An opaque pointer to fs-private information. ext3 puts its
812 * superblock pointer here 816 * superblock pointer here
diff --git a/include/linux/netfilter_ipv4/ipt_connbytes.h b/include/linux/netfilter_ipv4/ipt_connbytes.h
index b04dfa3083c9..f63e6ee91113 100644
--- a/include/linux/netfilter_ipv4/ipt_connbytes.h
+++ b/include/linux/netfilter_ipv4/ipt_connbytes.h
@@ -1,10 +1,10 @@
1#ifndef _IPT_CONNBYTES_H 1#ifndef _IPT_CONNBYTES_H
2#define _IPT_CONNBYTES_H 2#define _IPT_CONNBYTES_H
3 3
4#include <net/netfilter/xt_connbytes.h> 4#include <linux/netfilter/xt_connbytes.h>
5#define ipt_connbytes_what xt_connbytes_what 5#define ipt_connbytes_what xt_connbytes_what
6 6
7#define IPT_CONNBYTES_PKTS XT_CONNBYTES_PACKETS 7#define IPT_CONNBYTES_PKTS XT_CONNBYTES_PKTS
8#define IPT_CONNBYTES_BYTES XT_CONNBYTES_BYTES 8#define IPT_CONNBYTES_BYTES XT_CONNBYTES_BYTES
9#define IPT_CONNBYTES_AVGPKT XT_CONNBYTES_AVGPKT 9#define IPT_CONNBYTES_AVGPKT XT_CONNBYTES_AVGPKT
10 10
diff --git a/include/linux/netfilter_ipv4/ipt_policy.h b/include/linux/netfilter_ipv4/ipt_policy.h
index 7fd1bec453f1..a3f6eff39d33 100644
--- a/include/linux/netfilter_ipv4/ipt_policy.h
+++ b/include/linux/netfilter_ipv4/ipt_policy.h
@@ -27,16 +27,22 @@ struct ipt_policy_spec
27 reqid:1; 27 reqid:1;
28}; 28};
29 29
30union ipt_policy_addr
31{
32 struct in_addr a4;
33 struct in6_addr a6;
34};
35
30struct ipt_policy_elem 36struct ipt_policy_elem
31{ 37{
32 u_int32_t saddr; 38 union ipt_policy_addr saddr;
33 u_int32_t smask; 39 union ipt_policy_addr smask;
34 u_int32_t daddr; 40 union ipt_policy_addr daddr;
35 u_int32_t dmask; 41 union ipt_policy_addr dmask;
36 u_int32_t spi; 42 u_int32_t spi;
37 u_int32_t reqid; 43 u_int32_t reqid;
38 u_int8_t proto; 44 u_int8_t proto;
39 u_int8_t mode; 45 u_int8_t mode;
40 46
41 struct ipt_policy_spec match; 47 struct ipt_policy_spec match;
42 struct ipt_policy_spec invert; 48 struct ipt_policy_spec invert;
diff --git a/include/linux/netfilter_ipv6/ip6t_policy.h b/include/linux/netfilter_ipv6/ip6t_policy.h
index 5a93afcd2ff1..671bd818300f 100644
--- a/include/linux/netfilter_ipv6/ip6t_policy.h
+++ b/include/linux/netfilter_ipv6/ip6t_policy.h
@@ -27,16 +27,22 @@ struct ip6t_policy_spec
27 reqid:1; 27 reqid:1;
28}; 28};
29 29
30union ip6t_policy_addr
31{
32 struct in_addr a4;
33 struct in6_addr a6;
34};
35
30struct ip6t_policy_elem 36struct ip6t_policy_elem
31{ 37{
32 struct in6_addr saddr; 38 union ip6t_policy_addr saddr;
33 struct in6_addr smask; 39 union ip6t_policy_addr smask;
34 struct in6_addr daddr; 40 union ip6t_policy_addr daddr;
35 struct in6_addr dmask; 41 union ip6t_policy_addr dmask;
36 u_int32_t spi; 42 u_int32_t spi;
37 u_int32_t reqid; 43 u_int32_t reqid;
38 u_int8_t proto; 44 u_int8_t proto;
39 u_int8_t mode; 45 u_int8_t mode;
40 46
41 struct ip6t_policy_spec match; 47 struct ip6t_policy_spec match;
42 struct ip6t_policy_spec invert; 48 struct ip6t_policy_spec invert;
diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h
index 2c177e4c8f22..8a94c717c266 100644
--- a/include/linux/pktcdvd.h
+++ b/include/linux/pktcdvd.h
@@ -114,7 +114,7 @@ struct pkt_ctrl_command {
114 114
115struct packet_settings 115struct packet_settings
116{ 116{
117 __u8 size; /* packet size in (512 byte) sectors */ 117 __u32 size; /* packet size in (512 byte) sectors */
118 __u8 fp; /* fixed packets */ 118 __u8 fp; /* fixed packets */
119 __u8 link_loss; /* the rest is specified 119 __u8 link_loss; /* the rest is specified
120 * as per Mt Fuji */ 120 * as per Mt Fuji */
@@ -169,8 +169,8 @@ struct packet_iosched
169#if (PAGE_SIZE % CD_FRAMESIZE) != 0 169#if (PAGE_SIZE % CD_FRAMESIZE) != 0
170#error "PAGE_SIZE must be a multiple of CD_FRAMESIZE" 170#error "PAGE_SIZE must be a multiple of CD_FRAMESIZE"
171#endif 171#endif
172#define PACKET_MAX_SIZE 32 172#define PACKET_MAX_SIZE 128
173#define PAGES_PER_PACKET (PACKET_MAX_SIZE * CD_FRAMESIZE / PAGE_SIZE) 173#define FRAMES_PER_PAGE (PAGE_SIZE / CD_FRAMESIZE)
174#define PACKET_MAX_SECTORS (PACKET_MAX_SIZE * CD_FRAMESIZE >> 9) 174#define PACKET_MAX_SECTORS (PACKET_MAX_SIZE * CD_FRAMESIZE >> 9)
175 175
176enum packet_data_state { 176enum packet_data_state {
@@ -219,7 +219,7 @@ struct packet_data
219 atomic_t io_errors; /* Number of read/write errors during IO */ 219 atomic_t io_errors; /* Number of read/write errors during IO */
220 220
221 struct bio *r_bios[PACKET_MAX_SIZE]; /* bios to use during data gathering */ 221 struct bio *r_bios[PACKET_MAX_SIZE]; /* bios to use during data gathering */
222 struct page *pages[PAGES_PER_PACKET]; 222 struct page *pages[PACKET_MAX_SIZE / FRAMES_PER_PAGE];
223 223
224 int cache_valid; /* If non-zero, the data for the zone defined */ 224 int cache_valid; /* If non-zero, the data for the zone defined */
225 /* by the sector variable is completely cached */ 225 /* by the sector variable is completely cached */
diff --git a/include/linux/reiserfs_acl.h b/include/linux/reiserfs_acl.h
index 0a3605099c44..806ec5b06707 100644
--- a/include/linux/reiserfs_acl.h
+++ b/include/linux/reiserfs_acl.h
@@ -58,9 +58,13 @@ extern struct reiserfs_xattr_handler posix_acl_default_handler;
58extern struct reiserfs_xattr_handler posix_acl_access_handler; 58extern struct reiserfs_xattr_handler posix_acl_access_handler;
59#else 59#else
60 60
61#define reiserfs_get_acl NULL
62#define reiserfs_cache_default_acl(inode) 0 61#define reiserfs_cache_default_acl(inode) 0
63 62
63static inline struct posix_acl *reiserfs_get_acl(struct inode *inode, int type)
64{
65 return NULL;
66}
67
64static inline int reiserfs_xattr_posix_acl_init(void) 68static inline int reiserfs_xattr_posix_acl_init(void)
65{ 69{
66 return 0; 70 return 0;
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index 67856eb93b43..dac43b15a5b0 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -88,12 +88,6 @@ extern struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX];
88extern int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto); 88extern int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto);
89extern void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto); 89extern void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto);
90 90
91static inline struct nf_conntrack_l3proto *
92__nf_ct_l3proto_find(u_int16_t l3proto)
93{
94 return nf_ct_l3protos[l3proto];
95}
96
97extern struct nf_conntrack_l3proto * 91extern struct nf_conntrack_l3proto *
98nf_ct_l3proto_find_get(u_int16_t l3proto); 92nf_ct_l3proto_find_get(u_int16_t l3proto);
99 93
@@ -103,4 +97,13 @@ extern void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p);
103extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4; 97extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
104extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6; 98extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
105extern struct nf_conntrack_l3proto nf_conntrack_generic_l3proto; 99extern struct nf_conntrack_l3proto nf_conntrack_generic_l3proto;
100
101static inline struct nf_conntrack_l3proto *
102__nf_ct_l3proto_find(u_int16_t l3proto)
103{
104 if (unlikely(l3proto >= AF_MAX))
105 return &nf_conntrack_generic_l3proto;
106 return nf_ct_l3protos[l3proto];
107}
108
106#endif /*_NF_CONNTRACK_L3PROTO_H*/ 109#endif /*_NF_CONNTRACK_L3PROTO_H*/
diff --git a/kernel/intermodule.c b/kernel/intermodule.c
index 0cbe633420fb..55b1e5b85db9 100644
--- a/kernel/intermodule.c
+++ b/kernel/intermodule.c
@@ -179,3 +179,6 @@ EXPORT_SYMBOL(inter_module_register);
179EXPORT_SYMBOL(inter_module_unregister); 179EXPORT_SYMBOL(inter_module_unregister);
180EXPORT_SYMBOL(inter_module_get_request); 180EXPORT_SYMBOL(inter_module_get_request);
181EXPORT_SYMBOL(inter_module_put); 181EXPORT_SYMBOL(inter_module_put);
182
183MODULE_LICENSE("GPL");
184
diff --git a/kernel/sched.c b/kernel/sched.c
index f77f23f8f479..bc38804e40dd 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5551,13 +5551,15 @@ static void calibrate_migration_costs(const cpumask_t *cpu_map)
5551 -1 5551 -1
5552#endif 5552#endif
5553 ); 5553 );
5554 printk("migration_cost="); 5554 if (system_state == SYSTEM_BOOTING) {
5555 for (distance = 0; distance <= max_distance; distance++) { 5555 printk("migration_cost=");
5556 if (distance) 5556 for (distance = 0; distance <= max_distance; distance++) {
5557 printk(","); 5557 if (distance)
5558 printk("%ld", (long)migration_cost[distance] / 1000); 5558 printk(",");
5559 printk("%ld", (long)migration_cost[distance] / 1000);
5560 }
5561 printk("\n");
5559 } 5562 }
5560 printk("\n");
5561 j1 = jiffies; 5563 j1 = jiffies;
5562 if (migration_debug) 5564 if (migration_debug)
5563 printk("migration: %ld seconds\n", (j1-j0)/HZ); 5565 printk("migration: %ld seconds\n", (j1-j0)/HZ);
@@ -6109,7 +6111,7 @@ void __init sched_init(void)
6109 runqueue_t *rq; 6111 runqueue_t *rq;
6110 int i, j, k; 6112 int i, j, k;
6111 6113
6112 for (i = 0; i < NR_CPUS; i++) { 6114 for_each_cpu(i) {
6113 prio_array_t *array; 6115 prio_array_t *array;
6114 6116
6115 rq = cpu_rq(i); 6117 rq = cpu_rq(i);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b21d78c941b5..ceb3ebb3c399 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -444,6 +444,15 @@ retry:
444 page = alloc_huge_page(vma, address); 444 page = alloc_huge_page(vma, address);
445 if (!page) { 445 if (!page) {
446 hugetlb_put_quota(mapping); 446 hugetlb_put_quota(mapping);
447 /*
448 * No huge pages available. So this is an OOM
449 * condition but we do not want to trigger the OOM
450 * killer, so we return VM_FAULT_SIGBUS.
451 *
452 * A program using hugepages may fault with Bus Error
453 * because no huge pages are available in the cpuset, per
454 * memory policy or because all are in use!
455 */
447 goto out; 456 goto out;
448 } 457 }
449 458
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 44b4eb4202d9..dde04ff4be31 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1213,18 +1213,21 @@ static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
1213{ 1213{
1214 int cpu = 0; 1214 int cpu = 0;
1215 1215
1216 memset(ret, 0, sizeof(*ret)); 1216 memset(ret, 0, nr * sizeof(unsigned long));
1217 cpus_and(*cpumask, *cpumask, cpu_online_map); 1217 cpus_and(*cpumask, *cpumask, cpu_online_map);
1218 1218
1219 cpu = first_cpu(*cpumask); 1219 cpu = first_cpu(*cpumask);
1220 while (cpu < NR_CPUS) { 1220 while (cpu < NR_CPUS) {
1221 unsigned long *in, *out, off; 1221 unsigned long *in, *out, off;
1222 1222
1223 if (!cpu_isset(cpu, *cpumask))
1224 continue;
1225
1223 in = (unsigned long *)&per_cpu(page_states, cpu); 1226 in = (unsigned long *)&per_cpu(page_states, cpu);
1224 1227
1225 cpu = next_cpu(cpu, *cpumask); 1228 cpu = next_cpu(cpu, *cpumask);
1226 1229
1227 if (cpu < NR_CPUS) 1230 if (likely(cpu < NR_CPUS))
1228 prefetch(&per_cpu(page_states, cpu)); 1231 prefetch(&per_cpu(page_states, cpu));
1229 1232
1230 out = (unsigned long *)ret; 1233 out = (unsigned long *)ret;
@@ -1886,8 +1889,7 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p,
1886 * not check if the processor is online before following the pageset pointer. 1889 * not check if the processor is online before following the pageset pointer.
1887 * Other parts of the kernel may not check if the zone is available. 1890 * Other parts of the kernel may not check if the zone is available.
1888 */ 1891 */
1889static struct per_cpu_pageset 1892static struct per_cpu_pageset boot_pageset[NR_CPUS];
1890 boot_pageset[NR_CPUS];
1891 1893
1892/* 1894/*
1893 * Dynamically allocate memory for the 1895 * Dynamically allocate memory for the
diff --git a/mm/slab.c b/mm/slab.c
index 71370256a7eb..9cc049a942c6 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -294,6 +294,7 @@ struct kmem_list3 {
294 unsigned long next_reap; 294 unsigned long next_reap;
295 int free_touched; 295 int free_touched;
296 unsigned int free_limit; 296 unsigned int free_limit;
297 unsigned int colour_next; /* Per-node cache coloring */
297 spinlock_t list_lock; 298 spinlock_t list_lock;
298 struct array_cache *shared; /* shared per node */ 299 struct array_cache *shared; /* shared per node */
299 struct array_cache **alien; /* on other nodes */ 300 struct array_cache **alien; /* on other nodes */
@@ -344,6 +345,7 @@ static void kmem_list3_init(struct kmem_list3 *parent)
344 INIT_LIST_HEAD(&parent->slabs_free); 345 INIT_LIST_HEAD(&parent->slabs_free);
345 parent->shared = NULL; 346 parent->shared = NULL;
346 parent->alien = NULL; 347 parent->alien = NULL;
348 parent->colour_next = 0;
347 spin_lock_init(&parent->list_lock); 349 spin_lock_init(&parent->list_lock);
348 parent->free_objects = 0; 350 parent->free_objects = 0;
349 parent->free_touched = 0; 351 parent->free_touched = 0;
@@ -390,7 +392,6 @@ struct kmem_cache {
390 392
391 size_t colour; /* cache colouring range */ 393 size_t colour; /* cache colouring range */
392 unsigned int colour_off; /* colour offset */ 394 unsigned int colour_off; /* colour offset */
393 unsigned int colour_next; /* cache colouring */
394 struct kmem_cache *slabp_cache; 395 struct kmem_cache *slabp_cache;
395 unsigned int slab_size; 396 unsigned int slab_size;
396 unsigned int dflags; /* dynamic flags */ 397 unsigned int dflags; /* dynamic flags */
@@ -883,14 +884,14 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
883 } 884 }
884} 885}
885 886
886static void drain_alien_cache(struct kmem_cache *cachep, struct kmem_list3 *l3) 887static void drain_alien_cache(struct kmem_cache *cachep, struct array_cache **alien)
887{ 888{
888 int i = 0; 889 int i = 0;
889 struct array_cache *ac; 890 struct array_cache *ac;
890 unsigned long flags; 891 unsigned long flags;
891 892
892 for_each_online_node(i) { 893 for_each_online_node(i) {
893 ac = l3->alien[i]; 894 ac = alien[i];
894 if (ac) { 895 if (ac) {
895 spin_lock_irqsave(&ac->lock, flags); 896 spin_lock_irqsave(&ac->lock, flags);
896 __drain_alien_cache(cachep, ac, i); 897 __drain_alien_cache(cachep, ac, i);
@@ -900,8 +901,11 @@ static void drain_alien_cache(struct kmem_cache *cachep, struct kmem_list3 *l3)
900} 901}
901#else 902#else
902#define alloc_alien_cache(node, limit) do { } while (0) 903#define alloc_alien_cache(node, limit) do { } while (0)
903#define free_alien_cache(ac_ptr) do { } while (0) 904#define drain_alien_cache(cachep, alien) do { } while (0)
904#define drain_alien_cache(cachep, l3) do { } while (0) 905
906static inline void free_alien_cache(struct array_cache **ac_ptr)
907{
908}
905#endif 909#endif
906 910
907static int __devinit cpuup_callback(struct notifier_block *nfb, 911static int __devinit cpuup_callback(struct notifier_block *nfb,
@@ -935,6 +939,11 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
935 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + 939 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
936 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 940 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
937 941
942 /*
943 * The l3s don't come and go as CPUs come and
944 * go. cache_chain_mutex is sufficient
945 * protection here.
946 */
938 cachep->nodelists[node] = l3; 947 cachep->nodelists[node] = l3;
939 } 948 }
940 949
@@ -949,26 +958,47 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
949 & array cache's */ 958 & array cache's */
950 list_for_each_entry(cachep, &cache_chain, next) { 959 list_for_each_entry(cachep, &cache_chain, next) {
951 struct array_cache *nc; 960 struct array_cache *nc;
961 struct array_cache *shared;
962 struct array_cache **alien;
952 963
953 nc = alloc_arraycache(node, cachep->limit, 964 nc = alloc_arraycache(node, cachep->limit,
954 cachep->batchcount); 965 cachep->batchcount);
955 if (!nc) 966 if (!nc)
956 goto bad; 967 goto bad;
968 shared = alloc_arraycache(node,
969 cachep->shared * cachep->batchcount,
970 0xbaadf00d);
971 if (!shared)
972 goto bad;
973#ifdef CONFIG_NUMA
974 alien = alloc_alien_cache(node, cachep->limit);
975 if (!alien)
976 goto bad;
977#endif
957 cachep->array[cpu] = nc; 978 cachep->array[cpu] = nc;
958 979
959 l3 = cachep->nodelists[node]; 980 l3 = cachep->nodelists[node];
960 BUG_ON(!l3); 981 BUG_ON(!l3);
961 if (!l3->shared) {
962 if (!(nc = alloc_arraycache(node,
963 cachep->shared *
964 cachep->batchcount,
965 0xbaadf00d)))
966 goto bad;
967 982
968 /* we are serialised from CPU_DEAD or 983 spin_lock_irq(&l3->list_lock);
969 CPU_UP_CANCELLED by the cpucontrol lock */ 984 if (!l3->shared) {
970 l3->shared = nc; 985 /*
986 * We are serialised from CPU_DEAD or
987 * CPU_UP_CANCELLED by the cpucontrol lock
988 */
989 l3->shared = shared;
990 shared = NULL;
991 }
992#ifdef CONFIG_NUMA
993 if (!l3->alien) {
994 l3->alien = alien;
995 alien = NULL;
971 } 996 }
997#endif
998 spin_unlock_irq(&l3->list_lock);
999
1000 kfree(shared);
1001 free_alien_cache(alien);
972 } 1002 }
973 mutex_unlock(&cache_chain_mutex); 1003 mutex_unlock(&cache_chain_mutex);
974 break; 1004 break;
@@ -977,25 +1007,34 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
977 break; 1007 break;
978#ifdef CONFIG_HOTPLUG_CPU 1008#ifdef CONFIG_HOTPLUG_CPU
979 case CPU_DEAD: 1009 case CPU_DEAD:
1010 /*
1011 * Even if all the cpus of a node are down, we don't free the
1012 * kmem_list3 of any cache. This to avoid a race between
1013 * cpu_down, and a kmalloc allocation from another cpu for
1014 * memory from the node of the cpu going down. The list3
1015 * structure is usually allocated from kmem_cache_create() and
1016 * gets destroyed at kmem_cache_destroy().
1017 */
980 /* fall thru */ 1018 /* fall thru */
981 case CPU_UP_CANCELED: 1019 case CPU_UP_CANCELED:
982 mutex_lock(&cache_chain_mutex); 1020 mutex_lock(&cache_chain_mutex);
983 1021
984 list_for_each_entry(cachep, &cache_chain, next) { 1022 list_for_each_entry(cachep, &cache_chain, next) {
985 struct array_cache *nc; 1023 struct array_cache *nc;
1024 struct array_cache *shared;
1025 struct array_cache **alien;
986 cpumask_t mask; 1026 cpumask_t mask;
987 1027
988 mask = node_to_cpumask(node); 1028 mask = node_to_cpumask(node);
989 spin_lock_irq(&cachep->spinlock);
990 /* cpu is dead; no one can alloc from it. */ 1029 /* cpu is dead; no one can alloc from it. */
991 nc = cachep->array[cpu]; 1030 nc = cachep->array[cpu];
992 cachep->array[cpu] = NULL; 1031 cachep->array[cpu] = NULL;
993 l3 = cachep->nodelists[node]; 1032 l3 = cachep->nodelists[node];
994 1033
995 if (!l3) 1034 if (!l3)
996 goto unlock_cache; 1035 goto free_array_cache;
997 1036
998 spin_lock(&l3->list_lock); 1037 spin_lock_irq(&l3->list_lock);
999 1038
1000 /* Free limit for this kmem_list3 */ 1039 /* Free limit for this kmem_list3 */
1001 l3->free_limit -= cachep->batchcount; 1040 l3->free_limit -= cachep->batchcount;
@@ -1003,34 +1042,44 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
1003 free_block(cachep, nc->entry, nc->avail, node); 1042 free_block(cachep, nc->entry, nc->avail, node);
1004 1043
1005 if (!cpus_empty(mask)) { 1044 if (!cpus_empty(mask)) {
1006 spin_unlock(&l3->list_lock); 1045 spin_unlock_irq(&l3->list_lock);
1007 goto unlock_cache; 1046 goto free_array_cache;
1008 } 1047 }
1009 1048
1010 if (l3->shared) { 1049 shared = l3->shared;
1050 if (shared) {
1011 free_block(cachep, l3->shared->entry, 1051 free_block(cachep, l3->shared->entry,
1012 l3->shared->avail, node); 1052 l3->shared->avail, node);
1013 kfree(l3->shared);
1014 l3->shared = NULL; 1053 l3->shared = NULL;
1015 } 1054 }
1016 if (l3->alien) {
1017 drain_alien_cache(cachep, l3);
1018 free_alien_cache(l3->alien);
1019 l3->alien = NULL;
1020 }
1021 1055
1022 /* free slabs belonging to this node */ 1056 alien = l3->alien;
1023 if (__node_shrink(cachep, node)) { 1057 l3->alien = NULL;
1024 cachep->nodelists[node] = NULL; 1058
1025 spin_unlock(&l3->list_lock); 1059 spin_unlock_irq(&l3->list_lock);
1026 kfree(l3); 1060
1027 } else { 1061 kfree(shared);
1028 spin_unlock(&l3->list_lock); 1062 if (alien) {
1063 drain_alien_cache(cachep, alien);
1064 free_alien_cache(alien);
1029 } 1065 }
1030 unlock_cache: 1066free_array_cache:
1031 spin_unlock_irq(&cachep->spinlock);
1032 kfree(nc); 1067 kfree(nc);
1033 } 1068 }
1069 /*
1070 * In the previous loop, all the objects were freed to
1071 * the respective cache's slabs, now we can go ahead and
1072 * shrink each nodelist to its limit.
1073 */
1074 list_for_each_entry(cachep, &cache_chain, next) {
1075 l3 = cachep->nodelists[node];
1076 if (!l3)
1077 continue;
1078 spin_lock_irq(&l3->list_lock);
1079 /* free slabs belonging to this node */
1080 __node_shrink(cachep, node);
1081 spin_unlock_irq(&l3->list_lock);
1082 }
1034 mutex_unlock(&cache_chain_mutex); 1083 mutex_unlock(&cache_chain_mutex);
1035 break; 1084 break;
1036#endif 1085#endif
@@ -1119,7 +1168,6 @@ void __init kmem_cache_init(void)
1119 BUG(); 1168 BUG();
1120 1169
1121 cache_cache.colour = left_over / cache_cache.colour_off; 1170 cache_cache.colour = left_over / cache_cache.colour_off;
1122 cache_cache.colour_next = 0;
1123 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + 1171 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
1124 sizeof(struct slab), cache_line_size()); 1172 sizeof(struct slab), cache_line_size());
1125 1173
@@ -2011,18 +2059,16 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
2011 2059
2012 smp_call_function_all_cpus(do_drain, cachep); 2060 smp_call_function_all_cpus(do_drain, cachep);
2013 check_irq_on(); 2061 check_irq_on();
2014 spin_lock_irq(&cachep->spinlock);
2015 for_each_online_node(node) { 2062 for_each_online_node(node) {
2016 l3 = cachep->nodelists[node]; 2063 l3 = cachep->nodelists[node];
2017 if (l3) { 2064 if (l3) {
2018 spin_lock(&l3->list_lock); 2065 spin_lock_irq(&l3->list_lock);
2019 drain_array_locked(cachep, l3->shared, 1, node); 2066 drain_array_locked(cachep, l3->shared, 1, node);
2020 spin_unlock(&l3->list_lock); 2067 spin_unlock_irq(&l3->list_lock);
2021 if (l3->alien) 2068 if (l3->alien)
2022 drain_alien_cache(cachep, l3); 2069 drain_alien_cache(cachep, l3->alien);
2023 } 2070 }
2024 } 2071 }
2025 spin_unlock_irq(&cachep->spinlock);
2026} 2072}
2027 2073
2028static int __node_shrink(struct kmem_cache *cachep, int node) 2074static int __node_shrink(struct kmem_cache *cachep, int node)
@@ -2324,20 +2370,20 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
2324 */ 2370 */
2325 ctor_flags |= SLAB_CTOR_ATOMIC; 2371 ctor_flags |= SLAB_CTOR_ATOMIC;
2326 2372
2327 /* About to mess with non-constant members - lock. */ 2373 /* Take the l3 list lock to change the colour_next on this node */
2328 check_irq_off(); 2374 check_irq_off();
2329 spin_lock(&cachep->spinlock); 2375 l3 = cachep->nodelists[nodeid];
2376 spin_lock(&l3->list_lock);
2330 2377
2331 /* Get colour for the slab, and cal the next value. */ 2378 /* Get colour for the slab, and cal the next value. */
2332 offset = cachep->colour_next; 2379 offset = l3->colour_next;
2333 cachep->colour_next++; 2380 l3->colour_next++;
2334 if (cachep->colour_next >= cachep->colour) 2381 if (l3->colour_next >= cachep->colour)
2335 cachep->colour_next = 0; 2382 l3->colour_next = 0;
2336 offset *= cachep->colour_off; 2383 spin_unlock(&l3->list_lock);
2337 2384
2338 spin_unlock(&cachep->spinlock); 2385 offset *= cachep->colour_off;
2339 2386
2340 check_irq_off();
2341 if (local_flags & __GFP_WAIT) 2387 if (local_flags & __GFP_WAIT)
2342 local_irq_enable(); 2388 local_irq_enable();
2343 2389
@@ -2367,7 +2413,6 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
2367 if (local_flags & __GFP_WAIT) 2413 if (local_flags & __GFP_WAIT)
2368 local_irq_disable(); 2414 local_irq_disable();
2369 check_irq_off(); 2415 check_irq_off();
2370 l3 = cachep->nodelists[nodeid];
2371 spin_lock(&l3->list_lock); 2416 spin_lock(&l3->list_lock);
2372 2417
2373 /* Make slab active. */ 2418 /* Make slab active. */
@@ -2725,6 +2770,7 @@ static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int node
2725 BUG_ON(!l3); 2770 BUG_ON(!l3);
2726 2771
2727 retry: 2772 retry:
2773 check_irq_off();
2728 spin_lock(&l3->list_lock); 2774 spin_lock(&l3->list_lock);
2729 entry = l3->slabs_partial.next; 2775 entry = l3->slabs_partial.next;
2730 if (entry == &l3->slabs_partial) { 2776 if (entry == &l3->slabs_partial) {
@@ -3304,11 +3350,11 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount
3304 smp_call_function_all_cpus(do_ccupdate_local, (void *)&new); 3350 smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
3305 3351
3306 check_irq_on(); 3352 check_irq_on();
3307 spin_lock_irq(&cachep->spinlock); 3353 spin_lock(&cachep->spinlock);
3308 cachep->batchcount = batchcount; 3354 cachep->batchcount = batchcount;
3309 cachep->limit = limit; 3355 cachep->limit = limit;
3310 cachep->shared = shared; 3356 cachep->shared = shared;
3311 spin_unlock_irq(&cachep->spinlock); 3357 spin_unlock(&cachep->spinlock);
3312 3358
3313 for_each_online_cpu(i) { 3359 for_each_online_cpu(i) {
3314 struct array_cache *ccold = new.new[i]; 3360 struct array_cache *ccold = new.new[i];
@@ -3440,7 +3486,7 @@ static void cache_reap(void *unused)
3440 3486
3441 l3 = searchp->nodelists[numa_node_id()]; 3487 l3 = searchp->nodelists[numa_node_id()];
3442 if (l3->alien) 3488 if (l3->alien)
3443 drain_alien_cache(searchp, l3); 3489 drain_alien_cache(searchp, l3->alien);
3444 spin_lock_irq(&l3->list_lock); 3490 spin_lock_irq(&l3->list_lock);
3445 3491
3446 drain_array_locked(searchp, cpu_cache_get(searchp), 0, 3492 drain_array_locked(searchp, cpu_cache_get(searchp), 0,
@@ -3564,8 +3610,7 @@ static int s_show(struct seq_file *m, void *p)
3564 int node; 3610 int node;
3565 struct kmem_list3 *l3; 3611 struct kmem_list3 *l3;
3566 3612
3567 check_irq_on(); 3613 spin_lock(&cachep->spinlock);
3568 spin_lock_irq(&cachep->spinlock);
3569 active_objs = 0; 3614 active_objs = 0;
3570 num_slabs = 0; 3615 num_slabs = 0;
3571 for_each_online_node(node) { 3616 for_each_online_node(node) {
@@ -3573,7 +3618,8 @@ static int s_show(struct seq_file *m, void *p)
3573 if (!l3) 3618 if (!l3)
3574 continue; 3619 continue;
3575 3620
3576 spin_lock(&l3->list_lock); 3621 check_irq_on();
3622 spin_lock_irq(&l3->list_lock);
3577 3623
3578 list_for_each(q, &l3->slabs_full) { 3624 list_for_each(q, &l3->slabs_full) {
3579 slabp = list_entry(q, struct slab, list); 3625 slabp = list_entry(q, struct slab, list);
@@ -3598,9 +3644,10 @@ static int s_show(struct seq_file *m, void *p)
3598 num_slabs++; 3644 num_slabs++;
3599 } 3645 }
3600 free_objects += l3->free_objects; 3646 free_objects += l3->free_objects;
3601 shared_avail += l3->shared->avail; 3647 if (l3->shared)
3648 shared_avail += l3->shared->avail;
3602 3649
3603 spin_unlock(&l3->list_lock); 3650 spin_unlock_irq(&l3->list_lock);
3604 } 3651 }
3605 num_slabs += active_slabs; 3652 num_slabs += active_slabs;
3606 num_objs = num_slabs * cachep->num; 3653 num_objs = num_slabs * cachep->num;
@@ -3644,7 +3691,7 @@ static int s_show(struct seq_file *m, void *p)
3644 } 3691 }
3645#endif 3692#endif
3646 seq_putc(m, '\n'); 3693 seq_putc(m, '\n');
3647 spin_unlock_irq(&cachep->spinlock); 3694 spin_unlock(&cachep->spinlock);
3648 return 0; 3695 return 0;
3649} 3696}
3650 3697
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index ce617b3dbbb8..802baf755ef4 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -46,7 +46,7 @@
46#define PRINTR(format, args...) do { if (net_ratelimit()) \ 46#define PRINTR(format, args...) do { if (net_ratelimit()) \
47 printk(format , ## args); } while (0) 47 printk(format , ## args); } while (0)
48 48
49static unsigned int nlbufsiz = 4096; 49static unsigned int nlbufsiz = NLMSG_GOODSIZE;
50module_param(nlbufsiz, uint, 0600); 50module_param(nlbufsiz, uint, 0600);
51MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) " 51MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) "
52 "(defaults to 4096)"); 52 "(defaults to 4096)");
@@ -98,12 +98,14 @@ static void ulog_timer(unsigned long data)
98static struct sk_buff *ulog_alloc_skb(unsigned int size) 98static struct sk_buff *ulog_alloc_skb(unsigned int size)
99{ 99{
100 struct sk_buff *skb; 100 struct sk_buff *skb;
101 unsigned int n;
101 102
102 skb = alloc_skb(nlbufsiz, GFP_ATOMIC); 103 n = max(size, nlbufsiz);
104 skb = alloc_skb(n, GFP_ATOMIC);
103 if (!skb) { 105 if (!skb) {
104 PRINTR(KERN_ERR "ebt_ulog: can't alloc whole buffer " 106 PRINTR(KERN_ERR "ebt_ulog: can't alloc whole buffer "
105 "of size %ub!\n", nlbufsiz); 107 "of size %ub!\n", n);
106 if (size < nlbufsiz) { 108 if (n > size) {
107 /* try to allocate only as much as we need for 109 /* try to allocate only as much as we need for
108 * current packet */ 110 * current packet */
109 skb = alloc_skb(size, GFP_ATOMIC); 111 skb = alloc_skb(size, GFP_ATOMIC);
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 00729b3604f8..cbd4020cc84d 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -934,6 +934,13 @@ static int do_replace(void __user *user, unsigned int len)
934 BUGPRINT("Entries_size never zero\n"); 934 BUGPRINT("Entries_size never zero\n");
935 return -EINVAL; 935 return -EINVAL;
936 } 936 }
937 /* overflow check */
938 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / NR_CPUS -
939 SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
940 return -ENOMEM;
941 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
942 return -ENOMEM;
943
937 countersize = COUNTER_OFFSET(tmp.nentries) * 944 countersize = COUNTER_OFFSET(tmp.nentries) *
938 (highest_possible_processor_id()+1); 945 (highest_possible_processor_id()+1);
939 newinfo = (struct ebt_table_info *) 946 newinfo = (struct ebt_table_info *)
diff --git a/net/core/dev.c b/net/core/dev.c
index ffb82073056e..2afb0de95329 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3237,7 +3237,7 @@ static int __init net_dev_init(void)
3237 * Initialise the packet receive queues. 3237 * Initialise the packet receive queues.
3238 */ 3238 */
3239 3239
3240 for (i = 0; i < NR_CPUS; i++) { 3240 for_each_cpu(i) {
3241 struct softnet_data *queue; 3241 struct softnet_data *queue;
3242 3242
3243 queue = &per_cpu(softnet_data, i); 3243 queue = &per_cpu(softnet_data, i);
diff --git a/net/core/utils.c b/net/core/utils.c
index ac1d1fcf8673..fdc4f38bc46c 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -121,7 +121,7 @@ void __init net_random_init(void)
121{ 121{
122 int i; 122 int i;
123 123
124 for (i = 0; i < NR_CPUS; i++) { 124 for_each_cpu(i) {
125 struct nrnd_state *state = &per_cpu(net_rand_state,i); 125 struct nrnd_state *state = &per_cpu(net_rand_state,i);
126 __net_srandom(state, i+jiffies); 126 __net_srandom(state, i+jiffies);
127 } 127 }
@@ -133,7 +133,7 @@ static int net_random_reseed(void)
133 unsigned long seed[NR_CPUS]; 133 unsigned long seed[NR_CPUS];
134 134
135 get_random_bytes(seed, sizeof(seed)); 135 get_random_bytes(seed, sizeof(seed));
136 for (i = 0; i < NR_CPUS; i++) { 136 for_each_cpu(i) {
137 struct nrnd_state *state = &per_cpu(net_rand_state,i); 137 struct nrnd_state *state = &per_cpu(net_rand_state,i);
138 __net_srandom(state, seed[i]); 138 __net_srandom(state, seed[i]);
139 } 139 }
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 6bc0887b0834..4d1c40972a4b 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -524,7 +524,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
524 iph->tos; 524 iph->tos;
525 525
526 if (ip_options_echo(&icmp_param.replyopts, skb_in)) 526 if (ip_options_echo(&icmp_param.replyopts, skb_in))
527 goto ende; 527 goto out_unlock;
528 528
529 529
530 /* 530 /*
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index afe3d8f8177d..dd1048be8a01 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -807,6 +807,13 @@ static int do_replace(void __user *user, unsigned int len)
807 if (len != sizeof(tmp) + tmp.size) 807 if (len != sizeof(tmp) + tmp.size)
808 return -ENOPROTOOPT; 808 return -ENOPROTOOPT;
809 809
810 /* overflow check */
811 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
812 SMP_CACHE_BYTES)
813 return -ENOMEM;
814 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
815 return -ENOMEM;
816
810 newinfo = xt_alloc_table_info(tmp.size); 817 newinfo = xt_alloc_table_info(tmp.size);
811 if (!newinfo) 818 if (!newinfo)
812 return -ENOMEM; 819 return -ENOMEM;
diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c
index c9ebbe0d2d9c..e0b5926c76f9 100644
--- a/net/ipv4/netfilter/ip_conntrack_netlink.c
+++ b/net/ipv4/netfilter/ip_conntrack_netlink.c
@@ -1216,7 +1216,7 @@ static int ctnetlink_expect_event(struct notifier_block *this,
1216 1216
1217 b = skb->tail; 1217 b = skb->tail;
1218 1218
1219 type |= NFNL_SUBSYS_CTNETLINK << 8; 1219 type |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
1220 nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg)); 1220 nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg));
1221 nfmsg = NLMSG_DATA(nlh); 1221 nfmsg = NLMSG_DATA(nlh);
1222 1222
@@ -1567,6 +1567,7 @@ static struct nfnetlink_subsystem ctnl_exp_subsys = {
1567}; 1567};
1568 1568
1569MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); 1569MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
1570MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
1570 1571
1571static int __init ctnetlink_init(void) 1572static int __init ctnetlink_init(void)
1572{ 1573{
diff --git a/net/ipv4/netfilter/ip_conntrack_tftp.c b/net/ipv4/netfilter/ip_conntrack_tftp.c
index d3c5a371f993..4ba4463cec28 100644
--- a/net/ipv4/netfilter/ip_conntrack_tftp.c
+++ b/net/ipv4/netfilter/ip_conntrack_tftp.c
@@ -71,6 +71,7 @@ static int tftp_help(struct sk_buff **pskb,
71 71
72 exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; 72 exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
73 exp->mask.src.ip = 0xffffffff; 73 exp->mask.src.ip = 0xffffffff;
74 exp->mask.src.u.udp.port = 0;
74 exp->mask.dst.ip = 0xffffffff; 75 exp->mask.dst.ip = 0xffffffff;
75 exp->mask.dst.u.udp.port = 0xffff; 76 exp->mask.dst.u.udp.port = 0xffff;
76 exp->mask.dst.protonum = 0xff; 77 exp->mask.dst.protonum = 0xff;
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c
index ad438fb185b8..92c54999a19d 100644
--- a/net/ipv4/netfilter/ip_nat_standalone.c
+++ b/net/ipv4/netfilter/ip_nat_standalone.c
@@ -209,8 +209,8 @@ ip_nat_in(unsigned int hooknum,
209 && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) { 209 && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) {
210 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 210 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
211 211
212 if (ct->tuplehash[dir].tuple.src.ip != 212 if (ct->tuplehash[dir].tuple.dst.ip !=
213 ct->tuplehash[!dir].tuple.dst.ip) { 213 ct->tuplehash[!dir].tuple.src.ip) {
214 dst_release((*pskb)->dst); 214 dst_release((*pskb)->dst);
215 (*pskb)->dst = NULL; 215 (*pskb)->dst = NULL;
216 } 216 }
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 2371b2062c2d..16f47c675fef 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -921,6 +921,13 @@ do_replace(void __user *user, unsigned int len)
921 if (len != sizeof(tmp) + tmp.size) 921 if (len != sizeof(tmp) + tmp.size)
922 return -ENOPROTOOPT; 922 return -ENOPROTOOPT;
923 923
924 /* overflow check */
925 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
926 SMP_CACHE_BYTES)
927 return -ENOMEM;
928 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
929 return -ENOMEM;
930
924 newinfo = xt_alloc_table_info(tmp.size); 931 newinfo = xt_alloc_table_info(tmp.size);
925 if (!newinfo) 932 if (!newinfo)
926 return -ENOMEM; 933 return -ENOMEM;
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index 641dbc477650..180a9ea57b69 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -35,6 +35,10 @@
35 * each nlgroup you are using, so the total kernel memory usage increases 35 * each nlgroup you are using, so the total kernel memory usage increases
36 * by that factor. 36 * by that factor.
37 * 37 *
38 * Actually you should use nlbufsiz a bit smaller than PAGE_SIZE, since
39 * nlbufsiz is used with alloc_skb, which adds another
40 * sizeof(struct skb_shared_info). Use NLMSG_GOODSIZE instead.
41 *
38 * flushtimeout: 42 * flushtimeout:
39 * Specify, after how many hundredths of a second the queue should be 43 * Specify, after how many hundredths of a second the queue should be
40 * flushed even if it is not full yet. 44 * flushed even if it is not full yet.
@@ -76,7 +80,7 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NFLOG);
76 80
77#define PRINTR(format, args...) do { if (net_ratelimit()) printk(format , ## args); } while (0) 81#define PRINTR(format, args...) do { if (net_ratelimit()) printk(format , ## args); } while (0)
78 82
79static unsigned int nlbufsiz = 4096; 83static unsigned int nlbufsiz = NLMSG_GOODSIZE;
80module_param(nlbufsiz, uint, 0400); 84module_param(nlbufsiz, uint, 0400);
81MODULE_PARM_DESC(nlbufsiz, "netlink buffer size"); 85MODULE_PARM_DESC(nlbufsiz, "netlink buffer size");
82 86
@@ -143,22 +147,26 @@ static void ulog_timer(unsigned long data)
143static struct sk_buff *ulog_alloc_skb(unsigned int size) 147static struct sk_buff *ulog_alloc_skb(unsigned int size)
144{ 148{
145 struct sk_buff *skb; 149 struct sk_buff *skb;
150 unsigned int n;
146 151
147 /* alloc skb which should be big enough for a whole 152 /* alloc skb which should be big enough for a whole
148 * multipart message. WARNING: has to be <= 131000 153 * multipart message. WARNING: has to be <= 131000
149 * due to slab allocator restrictions */ 154 * due to slab allocator restrictions */
150 155
151 skb = alloc_skb(nlbufsiz, GFP_ATOMIC); 156 n = max(size, nlbufsiz);
157 skb = alloc_skb(n, GFP_ATOMIC);
152 if (!skb) { 158 if (!skb) {
153 PRINTR("ipt_ULOG: can't alloc whole buffer %ub!\n", 159 PRINTR("ipt_ULOG: can't alloc whole buffer %ub!\n", n);
154 nlbufsiz);
155 160
156 /* try to allocate only as much as we need for 161 if (n > size) {
157 * current packet */ 162 /* try to allocate only as much as we need for
163 * current packet */
158 164
159 skb = alloc_skb(size, GFP_ATOMIC); 165 skb = alloc_skb(size, GFP_ATOMIC);
160 if (!skb) 166 if (!skb)
161 PRINTR("ipt_ULOG: can't even allocate %ub\n", size); 167 PRINTR("ipt_ULOG: can't even allocate %ub\n",
168 size);
169 }
162 } 170 }
163 171
164 return skb; 172 return skb;
diff --git a/net/ipv4/netfilter/ipt_policy.c b/net/ipv4/netfilter/ipt_policy.c
index 18ca8258a1c5..5a7a265280f9 100644
--- a/net/ipv4/netfilter/ipt_policy.c
+++ b/net/ipv4/netfilter/ipt_policy.c
@@ -26,10 +26,13 @@ MODULE_LICENSE("GPL");
26static inline int 26static inline int
27match_xfrm_state(struct xfrm_state *x, const struct ipt_policy_elem *e) 27match_xfrm_state(struct xfrm_state *x, const struct ipt_policy_elem *e)
28{ 28{
29#define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x)) 29#define MATCH_ADDR(x,y,z) (!e->match.x || \
30 ((e->x.a4.s_addr == (e->y.a4.s_addr & (z))) \
31 ^ e->invert.x))
32#define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x))
30 33
31 return MATCH(saddr, x->props.saddr.a4 & e->smask) && 34 return MATCH_ADDR(saddr, smask, x->props.saddr.a4) &&
32 MATCH(daddr, x->id.daddr.a4 & e->dmask) && 35 MATCH_ADDR(daddr, dmask, x->id.daddr.a4) &&
33 MATCH(proto, x->id.proto) && 36 MATCH(proto, x->id.proto) &&
34 MATCH(mode, x->props.mode) && 37 MATCH(mode, x->props.mode) &&
35 MATCH(spi, x->id.spi) && 38 MATCH(spi, x->id.spi) &&
@@ -89,7 +92,7 @@ match_policy_out(const struct sk_buff *skb, const struct ipt_policy_info *info)
89 return 0; 92 return 0;
90 } 93 }
91 94
92 return strict ? 1 : 0; 95 return strict ? i == info->len : 0;
93} 96}
94 97
95static int match(const struct sk_buff *skb, 98static int match(const struct sk_buff *skb,
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 39d49dc333a7..1b167c4bb3be 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -49,7 +49,7 @@ static int fold_prot_inuse(struct proto *proto)
49 int res = 0; 49 int res = 0;
50 int cpu; 50 int cpu;
51 51
52 for (cpu = 0; cpu < NR_CPUS; cpu++) 52 for_each_cpu(cpu)
53 res += proto->stats[cpu].inuse; 53 res += proto->stats[cpu].inuse;
54 54
55 return res; 55 return res;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 847068fd3367..74ff56c322f4 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -978,6 +978,13 @@ do_replace(void __user *user, unsigned int len)
978 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 978 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
979 return -EFAULT; 979 return -EFAULT;
980 980
981 /* overflow check */
982 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
983 SMP_CACHE_BYTES)
984 return -ENOMEM;
985 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
986 return -ENOMEM;
987
981 newinfo = xt_alloc_table_info(tmp.size); 988 newinfo = xt_alloc_table_info(tmp.size);
982 if (!newinfo) 989 if (!newinfo)
983 return -ENOMEM; 990 return -ENOMEM;
diff --git a/net/ipv6/netfilter/ip6t_policy.c b/net/ipv6/netfilter/ip6t_policy.c
index afe1cc4c18a5..3d39ec924041 100644
--- a/net/ipv6/netfilter/ip6t_policy.c
+++ b/net/ipv6/netfilter/ip6t_policy.c
@@ -26,8 +26,9 @@ MODULE_LICENSE("GPL");
26static inline int 26static inline int
27match_xfrm_state(struct xfrm_state *x, const struct ip6t_policy_elem *e) 27match_xfrm_state(struct xfrm_state *x, const struct ip6t_policy_elem *e)
28{ 28{
29#define MATCH_ADDR(x,y,z) (!e->match.x || \ 29#define MATCH_ADDR(x,y,z) (!e->match.x || \
30 ((ip6_masked_addrcmp((z), &e->x, &e->y)) == 0) ^ e->invert.x) 30 ((!ip6_masked_addrcmp(&e->x.a6, &e->y.a6, z)) \
31 ^ e->invert.x))
31#define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x)) 32#define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x))
32 33
33 return MATCH_ADDR(saddr, smask, (struct in6_addr *)&x->props.saddr.a6) && 34 return MATCH_ADDR(saddr, smask, (struct in6_addr *)&x->props.saddr.a6) &&
@@ -91,7 +92,7 @@ match_policy_out(const struct sk_buff *skb, const struct ip6t_policy_info *info)
91 return 0; 92 return 0;
92 } 93 }
93 94
94 return strict ? 1 : 0; 95 return strict ? i == info->len : 0;
95} 96}
96 97
97static int match(const struct sk_buff *skb, 98static int match(const struct sk_buff *skb,
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 50a13e75d70e..4238b1ed8860 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -38,7 +38,7 @@ static int fold_prot_inuse(struct proto *proto)
38 int res = 0; 38 int res = 0;
39 int cpu; 39 int cpu;
40 40
41 for (cpu=0; cpu<NR_CPUS; cpu++) 41 for_each_cpu(cpu)
42 res += proto->stats[cpu].inuse; 42 res += proto->stats[cpu].inuse;
43 43
44 return res; 44 return res;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 62bb509f05d4..0ce337a1d974 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -188,7 +188,7 @@ extern struct nf_conntrack_protocol nf_conntrack_generic_protocol;
188struct nf_conntrack_protocol * 188struct nf_conntrack_protocol *
189__nf_ct_proto_find(u_int16_t l3proto, u_int8_t protocol) 189__nf_ct_proto_find(u_int16_t l3proto, u_int8_t protocol)
190{ 190{
191 if (unlikely(nf_ct_protos[l3proto] == NULL)) 191 if (unlikely(l3proto >= AF_MAX || nf_ct_protos[l3proto] == NULL))
192 return &nf_conntrack_generic_protocol; 192 return &nf_conntrack_generic_protocol;
193 193
194 return nf_ct_protos[l3proto][protocol]; 194 return nf_ct_protos[l3proto][protocol];
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index ab0c920f0d30..6f210f399762 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -657,8 +657,6 @@ static int __init init(void)
657 /* FIXME should be configurable whether IPv4 and IPv6 FTP connections 657 /* FIXME should be configurable whether IPv4 and IPv6 FTP connections
658 are tracked or not - YK */ 658 are tracked or not - YK */
659 for (i = 0; i < ports_c; i++) { 659 for (i = 0; i < ports_c; i++) {
660 memset(&ftp[i], 0, sizeof(struct nf_conntrack_helper));
661
662 ftp[i][0].tuple.src.l3num = PF_INET; 660 ftp[i][0].tuple.src.l3num = PF_INET;
663 ftp[i][1].tuple.src.l3num = PF_INET6; 661 ftp[i][1].tuple.src.l3num = PF_INET6;
664 for (j = 0; j < 2; j++) { 662 for (j = 0; j < 2; j++) {
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 73ab16bc7d40..9ff3463037e1 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1232,7 +1232,7 @@ static int ctnetlink_expect_event(struct notifier_block *this,
1232 1232
1233 b = skb->tail; 1233 b = skb->tail;
1234 1234
1235 type |= NFNL_SUBSYS_CTNETLINK << 8; 1235 type |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
1236 nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg)); 1236 nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg));
1237 nfmsg = NLMSG_DATA(nlh); 1237 nfmsg = NLMSG_DATA(nlh);
1238 1238
@@ -1589,6 +1589,7 @@ static struct nfnetlink_subsystem ctnl_exp_subsys = {
1589}; 1589};
1590 1590
1591MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); 1591MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
1592MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
1592 1593
1593static int __init ctnetlink_init(void) 1594static int __init ctnetlink_init(void)
1594{ 1595{
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index e10512e229b6..3b3c781b40c0 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -37,7 +37,7 @@
37#include "../bridge/br_private.h" 37#include "../bridge/br_private.h"
38#endif 38#endif
39 39
40#define NFULNL_NLBUFSIZ_DEFAULT 4096 40#define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE
41#define NFULNL_TIMEOUT_DEFAULT 100 /* every second */ 41#define NFULNL_TIMEOUT_DEFAULT 100 /* every second */
42#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ 42#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */
43 43
@@ -314,24 +314,28 @@ static struct sk_buff *nfulnl_alloc_skb(unsigned int inst_size,
314 unsigned int pkt_size) 314 unsigned int pkt_size)
315{ 315{
316 struct sk_buff *skb; 316 struct sk_buff *skb;
317 unsigned int n;
317 318
318 UDEBUG("entered (%u, %u)\n", inst_size, pkt_size); 319 UDEBUG("entered (%u, %u)\n", inst_size, pkt_size);
319 320
320 /* alloc skb which should be big enough for a whole multipart 321 /* alloc skb which should be big enough for a whole multipart
321 * message. WARNING: has to be <= 128k due to slab restrictions */ 322 * message. WARNING: has to be <= 128k due to slab restrictions */
322 323
323 skb = alloc_skb(inst_size, GFP_ATOMIC); 324 n = max(inst_size, pkt_size);
325 skb = alloc_skb(n, GFP_ATOMIC);
324 if (!skb) { 326 if (!skb) {
325 PRINTR("nfnetlink_log: can't alloc whole buffer (%u bytes)\n", 327 PRINTR("nfnetlink_log: can't alloc whole buffer (%u bytes)\n",
326 inst_size); 328 inst_size);
327 329
328 /* try to allocate only as much as we need for current 330 if (n > pkt_size) {
329 * packet */ 331 /* try to allocate only as much as we need for current
332 * packet */
330 333
331 skb = alloc_skb(pkt_size, GFP_ATOMIC); 334 skb = alloc_skb(pkt_size, GFP_ATOMIC);
332 if (!skb) 335 if (!skb)
333 PRINTR("nfnetlink_log: can't even alloc %u bytes\n", 336 PRINTR("nfnetlink_log: can't even alloc %u "
334 pkt_size); 337 "bytes\n", pkt_size);
338 }
335 } 339 }
336 340
337 return skb; 341 return skb;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 18ed9c5d209c..cac38b2e147a 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -825,7 +825,8 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
825 } 825 }
826 826
827 if (nfqa[NFQA_MARK-1]) 827 if (nfqa[NFQA_MARK-1])
828 skb->nfmark = ntohl(*(u_int32_t *)NFA_DATA(nfqa[NFQA_MARK-1])); 828 entry->skb->nfmark = ntohl(*(u_int32_t *)
829 NFA_DATA(nfqa[NFQA_MARK-1]));
829 830
830 issue_verdict(entry, verdict); 831 issue_verdict(entry, verdict);
831 instance_put(queue); 832 instance_put(queue);
diff --git a/net/socket.c b/net/socket.c
index b38a263853c3..a00851f981db 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2078,7 +2078,7 @@ void socket_seq_show(struct seq_file *seq)
2078 int cpu; 2078 int cpu;
2079 int counter = 0; 2079 int counter = 0;
2080 2080
2081 for (cpu = 0; cpu < NR_CPUS; cpu++) 2081 for_each_cpu(cpu)
2082 counter += per_cpu(sockets_in_use, cpu); 2082 counter += per_cpu(sockets_in_use, cpu);
2083 2083
2084 /* It can be negative, by the way. 8) */ 2084 /* It can be negative, by the way. 8) */
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 5760e057ecba..d64aae85c378 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -123,7 +123,17 @@ KBUILD_HAVE_NLS := $(shell \
123 then echo yes ; \ 123 then echo yes ; \
124 else echo no ; fi) 124 else echo no ; fi)
125ifeq ($(KBUILD_HAVE_NLS),no) 125ifeq ($(KBUILD_HAVE_NLS),no)
126HOSTCFLAGS += -DKBUILD_NO_NLS 126 HOSTCFLAGS += -DKBUILD_NO_NLS
127else
128 KBUILD_NEED_LINTL := $(shell \
129 if echo -e "\#include <libintl.h>\nint main(int a, char** b) { gettext(\"\"); return 0; }\n" | \
130 $(HOSTCC) $(HOSTCFLAGS) -x c - -o /dev/null> /dev/null 2>&1 ; \
131 then echo no ; \
132 else echo yes ; fi)
133 ifeq ($(KBUILD_NEED_LINTL),yes)
134 HOSTLOADLIBES_conf += -lintl
135 HOSTLOADLIBES_mconf += -lintl
136 endif
127endif 137endif
128 138
129# generated files seem to need this to find local include files 139# generated files seem to need this to find local include files
diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig
index b59582b92283..502f78f13f5f 100644
--- a/security/selinux/Kconfig
+++ b/security/selinux/Kconfig
@@ -1,6 +1,6 @@
1config SECURITY_SELINUX 1config SECURITY_SELINUX
2 bool "NSA SELinux Support" 2 bool "NSA SELinux Support"
3 depends on SECURITY && NET && INET 3 depends on SECURITY_NETWORK && NET && INET
4 default n 4 default n
5 help 5 help
6 This selects NSA Security-Enhanced Linux (SELinux). 6 This selects NSA Security-Enhanced Linux (SELinux).
diff --git a/security/selinux/Makefile b/security/selinux/Makefile
index 06d54d9d20a5..688c0a267b62 100644
--- a/security/selinux/Makefile
+++ b/security/selinux/Makefile
@@ -4,9 +4,7 @@
4 4
5obj-$(CONFIG_SECURITY_SELINUX) := selinux.o ss/ 5obj-$(CONFIG_SECURITY_SELINUX) := selinux.o ss/
6 6
7selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o 7selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o netif.o
8
9selinux-$(CONFIG_SECURITY_NETWORK) += netif.o
10 8
11selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o 9selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o
12 10
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 4ae834d89bce..b7773bf68efa 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -232,7 +232,6 @@ static void superblock_free_security(struct super_block *sb)
232 kfree(sbsec); 232 kfree(sbsec);
233} 233}
234 234
235#ifdef CONFIG_SECURITY_NETWORK
236static int sk_alloc_security(struct sock *sk, int family, gfp_t priority) 235static int sk_alloc_security(struct sock *sk, int family, gfp_t priority)
237{ 236{
238 struct sk_security_struct *ssec; 237 struct sk_security_struct *ssec;
@@ -261,7 +260,6 @@ static void sk_free_security(struct sock *sk)
261 sk->sk_security = NULL; 260 sk->sk_security = NULL;
262 kfree(ssec); 261 kfree(ssec);
263} 262}
264#endif /* CONFIG_SECURITY_NETWORK */
265 263
266/* The security server must be initialized before 264/* The security server must be initialized before
267 any labeling or access decisions can be provided. */ 265 any labeling or access decisions can be provided. */
@@ -2736,8 +2734,6 @@ static void selinux_task_to_inode(struct task_struct *p,
2736 return; 2734 return;
2737} 2735}
2738 2736
2739#ifdef CONFIG_SECURITY_NETWORK
2740
2741/* Returns error only if unable to parse addresses */ 2737/* Returns error only if unable to parse addresses */
2742static int selinux_parse_skb_ipv4(struct sk_buff *skb, struct avc_audit_data *ad) 2738static int selinux_parse_skb_ipv4(struct sk_buff *skb, struct avc_audit_data *ad)
2743{ 2739{
@@ -3556,15 +3552,6 @@ static unsigned int selinux_ipv6_postroute_last(unsigned int hooknum,
3556 3552
3557#endif /* CONFIG_NETFILTER */ 3553#endif /* CONFIG_NETFILTER */
3558 3554
3559#else
3560
3561static inline int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
3562{
3563 return 0;
3564}
3565
3566#endif /* CONFIG_SECURITY_NETWORK */
3567
3568static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb) 3555static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb)
3569{ 3556{
3570 struct task_security_struct *tsec; 3557 struct task_security_struct *tsec;
@@ -4340,7 +4327,6 @@ static struct security_operations selinux_ops = {
4340 .getprocattr = selinux_getprocattr, 4327 .getprocattr = selinux_getprocattr,
4341 .setprocattr = selinux_setprocattr, 4328 .setprocattr = selinux_setprocattr,
4342 4329
4343#ifdef CONFIG_SECURITY_NETWORK
4344 .unix_stream_connect = selinux_socket_unix_stream_connect, 4330 .unix_stream_connect = selinux_socket_unix_stream_connect,
4345 .unix_may_send = selinux_socket_unix_may_send, 4331 .unix_may_send = selinux_socket_unix_may_send,
4346 4332
@@ -4362,7 +4348,6 @@ static struct security_operations selinux_ops = {
4362 .sk_alloc_security = selinux_sk_alloc_security, 4348 .sk_alloc_security = selinux_sk_alloc_security,
4363 .sk_free_security = selinux_sk_free_security, 4349 .sk_free_security = selinux_sk_free_security,
4364 .sk_getsid = selinux_sk_getsid_security, 4350 .sk_getsid = selinux_sk_getsid_security,
4365#endif
4366 4351
4367#ifdef CONFIG_SECURITY_NETWORK_XFRM 4352#ifdef CONFIG_SECURITY_NETWORK_XFRM
4368 .xfrm_policy_alloc_security = selinux_xfrm_policy_alloc, 4353 .xfrm_policy_alloc_security = selinux_xfrm_policy_alloc,
@@ -4440,7 +4425,7 @@ next_sb:
4440 all processes and objects when they are created. */ 4425 all processes and objects when they are created. */
4441security_initcall(selinux_init); 4426security_initcall(selinux_init);
4442 4427
4443#if defined(CONFIG_SECURITY_NETWORK) && defined(CONFIG_NETFILTER) 4428#if defined(CONFIG_NETFILTER)
4444 4429
4445static struct nf_hook_ops selinux_ipv4_op = { 4430static struct nf_hook_ops selinux_ipv4_op = {
4446 .hook = selinux_ipv4_postroute_last, 4431 .hook = selinux_ipv4_postroute_last,
@@ -4501,13 +4486,13 @@ static void selinux_nf_ip_exit(void)
4501} 4486}
4502#endif 4487#endif
4503 4488
4504#else /* CONFIG_SECURITY_NETWORK && CONFIG_NETFILTER */ 4489#else /* CONFIG_NETFILTER */
4505 4490
4506#ifdef CONFIG_SECURITY_SELINUX_DISABLE 4491#ifdef CONFIG_SECURITY_SELINUX_DISABLE
4507#define selinux_nf_ip_exit() 4492#define selinux_nf_ip_exit()
4508#endif 4493#endif
4509 4494
4510#endif /* CONFIG_SECURITY_NETWORK && CONFIG_NETFILTER */ 4495#endif /* CONFIG_NETFILTER */
4511 4496
4512#ifdef CONFIG_SECURITY_SELINUX_DISABLE 4497#ifdef CONFIG_SECURITY_SELINUX_DISABLE
4513int selinux_disable(void) 4498int selinux_disable(void)