aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/hw_random.txt59
-rw-r--r--Documentation/kernel-parameters.txt4
-rw-r--r--Makefile2
-rw-r--r--drivers/char/hw_random/Kconfig9
-rw-r--r--drivers/mtd/devices/block2mtd.c1
-rw-r--r--kernel/acct.c23
-rw-r--r--kernel/marker.c31
-rw-r--r--kernel/printk.c83
-rw-r--r--mm/bootmem.c25
-rw-r--r--mm/vmscan.c27
-rw-r--r--scripts/Makefile.modpost6
-rw-r--r--scripts/mod/file2alias.c4
-rw-r--r--scripts/mod/modpost.c5
-rw-r--r--scripts/mod/modpost.h1
-rw-r--r--security/smack/smackfs.c35
15 files changed, 172 insertions, 143 deletions
diff --git a/Documentation/hw_random.txt b/Documentation/hw_random.txt
index bb58c36b5845..690f52550c80 100644
--- a/Documentation/hw_random.txt
+++ b/Documentation/hw_random.txt
@@ -1,33 +1,26 @@
1 Hardware driver for Intel/AMD/VIA Random Number Generators (RNG)
2 Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
3 Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
4
5Introduction: 1Introduction:
6 2
7 The hw_random device driver is software that makes use of a 3 The hw_random framework is software that makes use of a
8 special hardware feature on your CPU or motherboard, 4 special hardware feature on your CPU or motherboard,
9 a Random Number Generator (RNG). 5 a Random Number Generator (RNG). The software has two parts:
6 a core providing the /dev/hw_random character device and its
7 sysfs support, plus a hardware-specific driver that plugs
8 into that core.
10 9
11 In order to make effective use of this device driver, you 10 To make the most effective use of these mechanisms, you
12 should download the support software as well. Download the 11 should download the support software as well. Download the
13 latest version of the "rng-tools" package from the 12 latest version of the "rng-tools" package from the
14 hw_random driver's official Web site: 13 hw_random driver's official Web site:
15 14
16 http://sourceforge.net/projects/gkernel/ 15 http://sourceforge.net/projects/gkernel/
17 16
18About the Intel RNG hardware, from the firmware hub datasheet: 17 Those tools use /dev/hw_random to fill the kernel entropy pool,
19 18 which is used internally and exported by the /dev/urandom and
20 The Firmware Hub integrates a Random Number Generator (RNG) 19 /dev/random special files.
21 using thermal noise generated from inherently random quantum
22 mechanical properties of silicon. When not generating new random
23 bits the RNG circuitry will enter a low power state. Intel will
24 provide a binary software driver to give third party software
25 access to our RNG for use as a security feature. At this time,
26 the RNG is only to be used with a system in an OS-present state.
27 20
28Theory of operation: 21Theory of operation:
29 22
30 Character driver. Using the standard open() 23 CHARACTER DEVICE. Using the standard open()
31 and read() system calls, you can read random data from 24 and read() system calls, you can read random data from
32 the hardware RNG device. This data is NOT CHECKED by any 25 the hardware RNG device. This data is NOT CHECKED by any
33 fitness tests, and could potentially be bogus (if the 26 fitness tests, and could potentially be bogus (if the
@@ -36,9 +29,37 @@ Theory of operation:
36 a security-conscious person would run fitness tests on the 29 a security-conscious person would run fitness tests on the
37 data before assuming it is truly random. 30 data before assuming it is truly random.
38 31
39 /dev/hwrandom is char device major 10, minor 183. 32 The rng-tools package uses such tests in "rngd", and lets you
33 run them by hand with a "rngtest" utility.
34
35 /dev/hw_random is char device major 10, minor 183.
36
37 CLASS DEVICE. There is a /sys/class/misc/hw_random node with
38 two unique attributes, "rng_available" and "rng_current". The
39 "rng_available" attribute lists the hardware-specific drivers
40 available, while "rng_current" lists the one which is currently
41 connected to /dev/hw_random. If your system has more than one
42 RNG available, you may change the one used by writing a name from
43 the list in "rng_available" into "rng_current".
44
45==========================================================================
46
47 Hardware driver for Intel/AMD/VIA Random Number Generators (RNG)
48 Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
49 Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
50
51
52About the Intel RNG hardware, from the firmware hub datasheet:
53
54 The Firmware Hub integrates a Random Number Generator (RNG)
55 using thermal noise generated from inherently random quantum
56 mechanical properties of silicon. When not generating new random
57 bits the RNG circuitry will enter a low power state. Intel will
58 provide a binary software driver to give third party software
59 access to our RNG for use as a security feature. At this time,
60 the RNG is only to be used with a system in an OS-present state.
40 61
41Driver notes: 62Intel RNG Driver notes:
42 63
43 * FIXME: support poll(2) 64 * FIXME: support poll(2)
44 65
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 650b0d8aa89b..508e2a2c9864 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1130,6 +1130,10 @@ and is between 256 and 4096 characters. It is defined in the file
1130 memmap=nn[KMG]$ss[KMG] 1130 memmap=nn[KMG]$ss[KMG]
1131 [KNL,ACPI] Mark specific memory as reserved. 1131 [KNL,ACPI] Mark specific memory as reserved.
1132 Region of memory to be used, from ss to ss+nn. 1132 Region of memory to be used, from ss to ss+nn.
1133 Example: Exclude memory from 0x18690000-0x1869ffff
1134 memmap=64K$0x18690000
1135 or
1136 memmap=0x10000$0x18690000
1133 1137
1134 meye.*= [HW] Set MotionEye Camera parameters 1138 meye.*= [HW] Set MotionEye Camera parameters
1135 See Documentation/video4linux/meye.txt. 1139 See Documentation/video4linux/meye.txt.
diff --git a/Makefile b/Makefile
index 7a4d34b91e03..1c4c8f87f6c5 100644
--- a/Makefile
+++ b/Makefile
@@ -189,7 +189,7 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
189# Alternatively CROSS_COMPILE can be set in the environment. 189# Alternatively CROSS_COMPILE can be set in the environment.
190# Default value for CROSS_COMPILE is not to prefix executables 190# Default value for CROSS_COMPILE is not to prefix executables
191# Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile 191# Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile
192 192export KBUILD_BUILDHOST := $(SUBARCH)
193ARCH ?= $(SUBARCH) 193ARCH ?= $(SUBARCH)
194CROSS_COMPILE ?= 194CROSS_COMPILE ?=
195 195
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 6bbd4fa50f3b..8d6c2089d2a8 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -9,7 +9,14 @@ config HW_RANDOM
9 Hardware Random Number Generator Core infrastructure. 9 Hardware Random Number Generator Core infrastructure.
10 10
11 To compile this driver as a module, choose M here: the 11 To compile this driver as a module, choose M here: the
12 module will be called rng-core. 12 module will be called rng-core. This provides a device
13 that's usually called /dev/hw_random, and which exposes one
14 of possibly several hardware random number generators.
15
16 These hardware random number generators do not feed directly
17 into the kernel's random number generator. That is usually
18 handled by the "rngd" daemon. Documentation/hw_random.txt
19 has more information.
13 20
14 If unsure, say Y. 21 If unsure, say Y.
15 22
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index eeaaa9dce6ef..ad1880c67518 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -408,7 +408,6 @@ static int block2mtd_setup2(const char *val)
408 if (token[1]) { 408 if (token[1]) {
409 ret = parse_num(&erase_size, token[1]); 409 ret = parse_num(&erase_size, token[1]);
410 if (ret) { 410 if (ret) {
411 kfree(name);
412 parse_err("illegal erase size"); 411 parse_err("illegal erase size");
413 } 412 }
414 } 413 }
diff --git a/kernel/acct.c b/kernel/acct.c
index 521dfa53cb99..91e1cfd734d2 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -58,6 +58,7 @@
58#include <asm/uaccess.h> 58#include <asm/uaccess.h>
59#include <asm/div64.h> 59#include <asm/div64.h>
60#include <linux/blkdev.h> /* sector_div */ 60#include <linux/blkdev.h> /* sector_div */
61#include <linux/pid_namespace.h>
61 62
62/* 63/*
63 * These constants control the amount of freespace that suspend and 64 * These constants control the amount of freespace that suspend and
@@ -74,7 +75,7 @@ int acct_parm[3] = {4, 2, 30};
74/* 75/*
75 * External references and all of the globals. 76 * External references and all of the globals.
76 */ 77 */
77static void do_acct_process(struct file *); 78static void do_acct_process(struct pid_namespace *ns, struct file *);
78 79
79/* 80/*
80 * This structure is used so that all the data protected by lock 81 * This structure is used so that all the data protected by lock
@@ -86,6 +87,7 @@ struct acct_glbs {
86 volatile int active; 87 volatile int active;
87 volatile int needcheck; 88 volatile int needcheck;
88 struct file *file; 89 struct file *file;
90 struct pid_namespace *ns;
89 struct timer_list timer; 91 struct timer_list timer;
90}; 92};
91 93
@@ -175,9 +177,11 @@ out:
175static void acct_file_reopen(struct file *file) 177static void acct_file_reopen(struct file *file)
176{ 178{
177 struct file *old_acct = NULL; 179 struct file *old_acct = NULL;
180 struct pid_namespace *old_ns = NULL;
178 181
179 if (acct_globals.file) { 182 if (acct_globals.file) {
180 old_acct = acct_globals.file; 183 old_acct = acct_globals.file;
184 old_ns = acct_globals.ns;
181 del_timer(&acct_globals.timer); 185 del_timer(&acct_globals.timer);
182 acct_globals.active = 0; 186 acct_globals.active = 0;
183 acct_globals.needcheck = 0; 187 acct_globals.needcheck = 0;
@@ -185,6 +189,7 @@ static void acct_file_reopen(struct file *file)
185 } 189 }
186 if (file) { 190 if (file) {
187 acct_globals.file = file; 191 acct_globals.file = file;
192 acct_globals.ns = get_pid_ns(task_active_pid_ns(current));
188 acct_globals.needcheck = 0; 193 acct_globals.needcheck = 0;
189 acct_globals.active = 1; 194 acct_globals.active = 1;
190 /* It's been deleted if it was used before so this is safe */ 195 /* It's been deleted if it was used before so this is safe */
@@ -196,8 +201,9 @@ static void acct_file_reopen(struct file *file)
196 if (old_acct) { 201 if (old_acct) {
197 mnt_unpin(old_acct->f_path.mnt); 202 mnt_unpin(old_acct->f_path.mnt);
198 spin_unlock(&acct_globals.lock); 203 spin_unlock(&acct_globals.lock);
199 do_acct_process(old_acct); 204 do_acct_process(old_ns, old_acct);
200 filp_close(old_acct, NULL); 205 filp_close(old_acct, NULL);
206 put_pid_ns(old_ns);
201 spin_lock(&acct_globals.lock); 207 spin_lock(&acct_globals.lock);
202 } 208 }
203} 209}
@@ -419,7 +425,7 @@ static u32 encode_float(u64 value)
419/* 425/*
420 * do_acct_process does all actual work. Caller holds the reference to file. 426 * do_acct_process does all actual work. Caller holds the reference to file.
421 */ 427 */
422static void do_acct_process(struct file *file) 428static void do_acct_process(struct pid_namespace *ns, struct file *file)
423{ 429{
424 struct pacct_struct *pacct = &current->signal->pacct; 430 struct pacct_struct *pacct = &current->signal->pacct;
425 acct_t ac; 431 acct_t ac;
@@ -481,8 +487,10 @@ static void do_acct_process(struct file *file)
481 ac.ac_gid16 = current->gid; 487 ac.ac_gid16 = current->gid;
482#endif 488#endif
483#if ACCT_VERSION==3 489#if ACCT_VERSION==3
484 ac.ac_pid = current->tgid; 490 ac.ac_pid = task_tgid_nr_ns(current, ns);
485 ac.ac_ppid = current->real_parent->tgid; 491 rcu_read_lock();
492 ac.ac_ppid = task_tgid_nr_ns(rcu_dereference(current->real_parent), ns);
493 rcu_read_unlock();
486#endif 494#endif
487 495
488 spin_lock_irq(&current->sighand->siglock); 496 spin_lock_irq(&current->sighand->siglock);
@@ -578,6 +586,7 @@ void acct_collect(long exitcode, int group_dead)
578void acct_process(void) 586void acct_process(void)
579{ 587{
580 struct file *file = NULL; 588 struct file *file = NULL;
589 struct pid_namespace *ns;
581 590
582 /* 591 /*
583 * accelerate the common fastpath: 592 * accelerate the common fastpath:
@@ -592,8 +601,10 @@ void acct_process(void)
592 return; 601 return;
593 } 602 }
594 get_file(file); 603 get_file(file);
604 ns = get_pid_ns(acct_globals.ns);
595 spin_unlock(&acct_globals.lock); 605 spin_unlock(&acct_globals.lock);
596 606
597 do_acct_process(file); 607 do_acct_process(ns, file);
598 fput(file); 608 fput(file);
609 put_pid_ns(ns);
599} 610}
diff --git a/kernel/marker.c b/kernel/marker.c
index 48a4ea5afffd..041c33e3e95c 100644
--- a/kernel/marker.c
+++ b/kernel/marker.c
@@ -104,18 +104,18 @@ void marker_probe_cb(const struct marker *mdata, void *call_private,
104 char ptype; 104 char ptype;
105 105
106 /* 106 /*
107 * disabling preemption to make sure the teardown of the callbacks can 107 * preempt_disable does two things : disabling preemption to make sure
108 * be done correctly when they are in modules and they insure RCU read 108 * the teardown of the callbacks can be done correctly when they are in
109 * coherency. 109 * modules and they insure RCU read coherency.
110 */ 110 */
111 preempt_disable(); 111 preempt_disable();
112 ptype = ACCESS_ONCE(mdata->ptype); 112 ptype = mdata->ptype;
113 if (likely(!ptype)) { 113 if (likely(!ptype)) {
114 marker_probe_func *func; 114 marker_probe_func *func;
115 /* Must read the ptype before ptr. They are not data dependant, 115 /* Must read the ptype before ptr. They are not data dependant,
116 * so we put an explicit smp_rmb() here. */ 116 * so we put an explicit smp_rmb() here. */
117 smp_rmb(); 117 smp_rmb();
118 func = ACCESS_ONCE(mdata->single.func); 118 func = mdata->single.func;
119 /* Must read the ptr before private data. They are not data 119 /* Must read the ptr before private data. They are not data
120 * dependant, so we put an explicit smp_rmb() here. */ 120 * dependant, so we put an explicit smp_rmb() here. */
121 smp_rmb(); 121 smp_rmb();
@@ -133,7 +133,7 @@ void marker_probe_cb(const struct marker *mdata, void *call_private,
133 * in the fast path, so put the explicit barrier here. 133 * in the fast path, so put the explicit barrier here.
134 */ 134 */
135 smp_read_barrier_depends(); 135 smp_read_barrier_depends();
136 multi = ACCESS_ONCE(mdata->multi); 136 multi = mdata->multi;
137 for (i = 0; multi[i].func; i++) { 137 for (i = 0; multi[i].func; i++) {
138 va_start(args, fmt); 138 va_start(args, fmt);
139 multi[i].func(multi[i].probe_private, call_private, fmt, 139 multi[i].func(multi[i].probe_private, call_private, fmt,
@@ -161,13 +161,13 @@ void marker_probe_cb_noarg(const struct marker *mdata,
161 char ptype; 161 char ptype;
162 162
163 preempt_disable(); 163 preempt_disable();
164 ptype = ACCESS_ONCE(mdata->ptype); 164 ptype = mdata->ptype;
165 if (likely(!ptype)) { 165 if (likely(!ptype)) {
166 marker_probe_func *func; 166 marker_probe_func *func;
167 /* Must read the ptype before ptr. They are not data dependant, 167 /* Must read the ptype before ptr. They are not data dependant,
168 * so we put an explicit smp_rmb() here. */ 168 * so we put an explicit smp_rmb() here. */
169 smp_rmb(); 169 smp_rmb();
170 func = ACCESS_ONCE(mdata->single.func); 170 func = mdata->single.func;
171 /* Must read the ptr before private data. They are not data 171 /* Must read the ptr before private data. They are not data
172 * dependant, so we put an explicit smp_rmb() here. */ 172 * dependant, so we put an explicit smp_rmb() here. */
173 smp_rmb(); 173 smp_rmb();
@@ -183,7 +183,7 @@ void marker_probe_cb_noarg(const struct marker *mdata,
183 * in the fast path, so put the explicit barrier here. 183 * in the fast path, so put the explicit barrier here.
184 */ 184 */
185 smp_read_barrier_depends(); 185 smp_read_barrier_depends();
186 multi = ACCESS_ONCE(mdata->multi); 186 multi = mdata->multi;
187 for (i = 0; multi[i].func; i++) 187 for (i = 0; multi[i].func; i++)
188 multi[i].func(multi[i].probe_private, call_private, fmt, 188 multi[i].func(multi[i].probe_private, call_private, fmt,
189 &args); 189 &args);
@@ -551,9 +551,9 @@ static int set_marker(struct marker_entry **entry, struct marker *elem,
551 551
552/* 552/*
553 * Disable a marker and its probe callback. 553 * Disable a marker and its probe callback.
554 * Note: only after a synchronize_sched() issued after setting elem->call to the 554 * Note: only waiting an RCU period after setting elem->call to the empty
555 * empty function insures that the original callback is not used anymore. This 555 * function insures that the original callback is not used anymore. This insured
556 * insured by preemption disabling around the call site. 556 * by preempt_disable around the call site.
557 */ 557 */
558static void disable_marker(struct marker *elem) 558static void disable_marker(struct marker *elem)
559{ 559{
@@ -565,8 +565,8 @@ static void disable_marker(struct marker *elem)
565 elem->ptype = 0; /* single probe */ 565 elem->ptype = 0; /* single probe */
566 /* 566 /*
567 * Leave the private data and id there, because removal is racy and 567 * Leave the private data and id there, because removal is racy and
568 * should be done only after a synchronize_sched(). These are never used 568 * should be done only after an RCU period. These are never used until
569 * until the next initialization anyway. 569 * the next initialization anyway.
570 */ 570 */
571} 571}
572 572
@@ -601,9 +601,6 @@ void marker_update_probe_range(struct marker *begin,
601 601
602/* 602/*
603 * Update probes, removing the faulty probes. 603 * Update probes, removing the faulty probes.
604 * Issues a synchronize_sched() when no reference to the module passed
605 * as parameter is found in the probes so the probe module can be
606 * safely unloaded from now on.
607 * 604 *
608 * Internal callback only changed before the first probe is connected to it. 605 * Internal callback only changed before the first probe is connected to it.
609 * Single probe private data can only be changed on 0 -> 1 and 2 -> 1 606 * Single probe private data can only be changed on 0 -> 1 and 2 -> 1
diff --git a/kernel/printk.c b/kernel/printk.c
index 9adc2a473e6e..c46a20a19a15 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -616,6 +616,40 @@ asmlinkage int printk(const char *fmt, ...)
616/* cpu currently holding logbuf_lock */ 616/* cpu currently holding logbuf_lock */
617static volatile unsigned int printk_cpu = UINT_MAX; 617static volatile unsigned int printk_cpu = UINT_MAX;
618 618
619/*
620 * Can we actually use the console at this time on this cpu?
621 *
622 * Console drivers may assume that per-cpu resources have
623 * been allocated. So unless they're explicitly marked as
624 * being able to cope (CON_ANYTIME) don't call them until
625 * this CPU is officially up.
626 */
627static inline int can_use_console(unsigned int cpu)
628{
629 return cpu_online(cpu) || have_callable_console();
630}
631
632/*
633 * Try to get console ownership to actually show the kernel
634 * messages from a 'printk'. Return true (and with the
635 * console_semaphore held, and 'console_locked' set) if it
636 * is successful, false otherwise.
637 *
638 * This gets called with the 'logbuf_lock' spinlock held and
639 * interrupts disabled. It should return with 'lockbuf_lock'
640 * released but interrupts still disabled.
641 */
642static int acquire_console_semaphore_for_printk(unsigned int cpu)
643{
644 int retval = 0;
645
646 if (can_use_console(cpu))
647 retval = !try_acquire_console_sem();
648 printk_cpu = UINT_MAX;
649 spin_unlock(&logbuf_lock);
650 return retval;
651}
652
619const char printk_recursion_bug_msg [] = 653const char printk_recursion_bug_msg [] =
620 KERN_CRIT "BUG: recent printk recursion!\n"; 654 KERN_CRIT "BUG: recent printk recursion!\n";
621static int printk_recursion_bug; 655static int printk_recursion_bug;
@@ -725,43 +759,22 @@ asmlinkage int vprintk(const char *fmt, va_list args)
725 log_level_unknown = 1; 759 log_level_unknown = 1;
726 } 760 }
727 761
728 if (!down_trylock(&console_sem)) { 762 /*
729 /* 763 * Try to acquire and then immediately release the
730 * We own the drivers. We can drop the spinlock and 764 * console semaphore. The release will do all the
731 * let release_console_sem() print the text, maybe ... 765 * actual magic (print out buffers, wake up klogd,
732 */ 766 * etc).
733 console_locked = 1; 767 *
734 printk_cpu = UINT_MAX; 768 * The acquire_console_semaphore_for_printk() function
735 spin_unlock(&logbuf_lock); 769 * will release 'logbuf_lock' regardless of whether it
770 * actually gets the semaphore or not.
771 */
772 if (acquire_console_semaphore_for_printk(this_cpu))
773 release_console_sem();
736 774
737 /* 775 lockdep_on();
738 * Console drivers may assume that per-cpu resources have
739 * been allocated. So unless they're explicitly marked as
740 * being able to cope (CON_ANYTIME) don't call them until
741 * this CPU is officially up.
742 */
743 if (cpu_online(smp_processor_id()) || have_callable_console()) {
744 console_may_schedule = 0;
745 release_console_sem();
746 } else {
747 /* Release by hand to avoid flushing the buffer. */
748 console_locked = 0;
749 up(&console_sem);
750 }
751 lockdep_on();
752 raw_local_irq_restore(flags);
753 } else {
754 /*
755 * Someone else owns the drivers. We drop the spinlock, which
756 * allows the semaphore holder to proceed and to call the
757 * console drivers with the output which we just produced.
758 */
759 printk_cpu = UINT_MAX;
760 spin_unlock(&logbuf_lock);
761 lockdep_on();
762out_restore_irqs: 776out_restore_irqs:
763 raw_local_irq_restore(flags); 777 raw_local_irq_restore(flags);
764 }
765 778
766 preempt_enable(); 779 preempt_enable();
767 return printed_len; 780 return printed_len;
diff --git a/mm/bootmem.c b/mm/bootmem.c
index f6ff4337b424..2ccea700968f 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -125,6 +125,7 @@ static int __init reserve_bootmem_core(bootmem_data_t *bdata,
125 BUG_ON(!size); 125 BUG_ON(!size);
126 BUG_ON(PFN_DOWN(addr) >= bdata->node_low_pfn); 126 BUG_ON(PFN_DOWN(addr) >= bdata->node_low_pfn);
127 BUG_ON(PFN_UP(addr + size) > bdata->node_low_pfn); 127 BUG_ON(PFN_UP(addr + size) > bdata->node_low_pfn);
128 BUG_ON(addr < bdata->node_boot_start);
128 129
129 sidx = PFN_DOWN(addr - bdata->node_boot_start); 130 sidx = PFN_DOWN(addr - bdata->node_boot_start);
130 eidx = PFN_UP(addr + size - bdata->node_boot_start); 131 eidx = PFN_UP(addr + size - bdata->node_boot_start);
@@ -156,21 +157,31 @@ static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr,
156 unsigned long sidx, eidx; 157 unsigned long sidx, eidx;
157 unsigned long i; 158 unsigned long i;
158 159
160 BUG_ON(!size);
161
162 /* out range */
163 if (addr + size < bdata->node_boot_start ||
164 PFN_DOWN(addr) > bdata->node_low_pfn)
165 return;
159 /* 166 /*
160 * round down end of usable mem, partially free pages are 167 * round down end of usable mem, partially free pages are
161 * considered reserved. 168 * considered reserved.
162 */ 169 */
163 BUG_ON(!size);
164 BUG_ON(PFN_DOWN(addr + size) > bdata->node_low_pfn);
165 170
166 if (addr < bdata->last_success) 171 if (addr >= bdata->node_boot_start && addr < bdata->last_success)
167 bdata->last_success = addr; 172 bdata->last_success = addr;
168 173
169 /* 174 /*
170 * Round up the beginning of the address. 175 * Round up to index to the range.
171 */ 176 */
172 sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start); 177 if (PFN_UP(addr) > PFN_DOWN(bdata->node_boot_start))
178 sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start);
179 else
180 sidx = 0;
181
173 eidx = PFN_DOWN(addr + size - bdata->node_boot_start); 182 eidx = PFN_DOWN(addr + size - bdata->node_boot_start);
183 if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
184 eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
174 185
175 for (i = sidx; i < eidx; i++) { 186 for (i = sidx; i < eidx; i++) {
176 if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map))) 187 if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map)))
@@ -421,7 +432,9 @@ int __init reserve_bootmem(unsigned long addr, unsigned long size,
421 432
422void __init free_bootmem(unsigned long addr, unsigned long size) 433void __init free_bootmem(unsigned long addr, unsigned long size)
423{ 434{
424 free_bootmem_core(NODE_DATA(0)->bdata, addr, size); 435 bootmem_data_t *bdata;
436 list_for_each_entry(bdata, &bdata_list, list)
437 free_bootmem_core(bdata, addr, size);
425} 438}
426 439
427unsigned long __init free_all_bootmem(void) 440unsigned long __init free_all_bootmem(void)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 45711585684e..4046434046e6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -70,13 +70,6 @@ struct scan_control {
70 70
71 int order; 71 int order;
72 72
73 /*
74 * Pages that have (or should have) IO pending. If we run into
75 * a lot of these, we're better off waiting a little for IO to
76 * finish rather than scanning more pages in the VM.
77 */
78 int nr_io_pages;
79
80 /* Which cgroup do we reclaim from */ 73 /* Which cgroup do we reclaim from */
81 struct mem_cgroup *mem_cgroup; 74 struct mem_cgroup *mem_cgroup;
82 75
@@ -512,10 +505,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
512 */ 505 */
513 if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs) 506 if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
514 wait_on_page_writeback(page); 507 wait_on_page_writeback(page);
515 else { 508 else
516 sc->nr_io_pages++;
517 goto keep_locked; 509 goto keep_locked;
518 }
519 } 510 }
520 511
521 referenced = page_referenced(page, 1, sc->mem_cgroup); 512 referenced = page_referenced(page, 1, sc->mem_cgroup);
@@ -554,10 +545,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
554 if (PageDirty(page)) { 545 if (PageDirty(page)) {
555 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced) 546 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
556 goto keep_locked; 547 goto keep_locked;
557 if (!may_enter_fs) { 548 if (!may_enter_fs)
558 sc->nr_io_pages++;
559 goto keep_locked; 549 goto keep_locked;
560 }
561 if (!sc->may_writepage) 550 if (!sc->may_writepage)
562 goto keep_locked; 551 goto keep_locked;
563 552
@@ -568,10 +557,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
568 case PAGE_ACTIVATE: 557 case PAGE_ACTIVATE:
569 goto activate_locked; 558 goto activate_locked;
570 case PAGE_SUCCESS: 559 case PAGE_SUCCESS:
571 if (PageWriteback(page) || PageDirty(page)) { 560 if (PageWriteback(page) || PageDirty(page))
572 sc->nr_io_pages++;
573 goto keep; 561 goto keep;
574 }
575 /* 562 /*
576 * A synchronous write - probably a ramdisk. Go 563 * A synchronous write - probably a ramdisk. Go
577 * ahead and try to reclaim the page. 564 * ahead and try to reclaim the page.
@@ -1344,7 +1331,6 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
1344 1331
1345 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 1332 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1346 sc->nr_scanned = 0; 1333 sc->nr_scanned = 0;
1347 sc->nr_io_pages = 0;
1348 if (!priority) 1334 if (!priority)
1349 disable_swap_token(); 1335 disable_swap_token();
1350 nr_reclaimed += shrink_zones(priority, zones, sc); 1336 nr_reclaimed += shrink_zones(priority, zones, sc);
@@ -1379,8 +1365,7 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
1379 } 1365 }
1380 1366
1381 /* Take a nap, wait for some writeback to complete */ 1367 /* Take a nap, wait for some writeback to complete */
1382 if (sc->nr_scanned && priority < DEF_PRIORITY - 2 && 1368 if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
1383 sc->nr_io_pages > sc->swap_cluster_max)
1384 congestion_wait(WRITE, HZ/10); 1369 congestion_wait(WRITE, HZ/10);
1385 } 1370 }
1386 /* top priority shrink_caches still had more to do? don't OOM, then */ 1371 /* top priority shrink_caches still had more to do? don't OOM, then */
@@ -1514,7 +1499,6 @@ loop_again:
1514 if (!priority) 1499 if (!priority)
1515 disable_swap_token(); 1500 disable_swap_token();
1516 1501
1517 sc.nr_io_pages = 0;
1518 all_zones_ok = 1; 1502 all_zones_ok = 1;
1519 1503
1520 /* 1504 /*
@@ -1607,8 +1591,7 @@ loop_again:
1607 * OK, kswapd is getting into trouble. Take a nap, then take 1591 * OK, kswapd is getting into trouble. Take a nap, then take
1608 * another pass across the zones. 1592 * another pass across the zones.
1609 */ 1593 */
1610 if (total_scanned && priority < DEF_PRIORITY - 2 && 1594 if (total_scanned && priority < DEF_PRIORITY - 2)
1611 sc.nr_io_pages > sc.swap_cluster_max)
1612 congestion_wait(WRITE, HZ/10); 1595 congestion_wait(WRITE, HZ/10);
1613 1596
1614 /* 1597 /*
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index cfc004e04417..2d20640854b7 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -58,6 +58,9 @@ modules := $(patsubst %.o,%.ko, $(wildcard $(__modules:.ko=.o)))
58# Stop after building .o files if NOFINAL is set. Makes compile tests quicker 58# Stop after building .o files if NOFINAL is set. Makes compile tests quicker
59_modpost: $(if $(KBUILD_MODPOST_NOFINAL), $(modules:.ko:.o),$(modules)) 59_modpost: $(if $(KBUILD_MODPOST_NOFINAL), $(modules:.ko:.o),$(modules))
60 60
61ifneq ($(KBUILD_BUILDHOST),$(ARCH))
62 cross_build := 1
63endif
61 64
62# Step 2), invoke modpost 65# Step 2), invoke modpost
63# Includes step 3,4 66# Includes step 3,4
@@ -70,7 +73,8 @@ modpost = scripts/mod/modpost \
70 $(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S) \ 73 $(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S) \
71 $(if $(CONFIG_MARKERS),-K $(kernelmarkersfile)) \ 74 $(if $(CONFIG_MARKERS),-K $(kernelmarkersfile)) \
72 $(if $(CONFIG_MARKERS),-M $(markersfile)) \ 75 $(if $(CONFIG_MARKERS),-M $(markersfile)) \
73 $(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w) 76 $(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w) \
77 $(if $(cross_build),-c)
74 78
75quiet_cmd_modpost = MODPOST $(words $(filter-out vmlinux FORCE, $^)) modules 79quiet_cmd_modpost = MODPOST $(words $(filter-out vmlinux FORCE, $^)) modules
76 cmd_modpost = $(modpost) -s 80 cmd_modpost = $(modpost) -s
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 9ddf944cce29..348d8687b7c9 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -51,11 +51,13 @@ do { \
51 sprintf(str + strlen(str), "*"); \ 51 sprintf(str + strlen(str), "*"); \
52} while(0) 52} while(0)
53 53
54unsigned int cross_build = 0;
54/** 55/**
55 * Check that sizeof(device_id type) are consistent with size of section 56 * Check that sizeof(device_id type) are consistent with size of section
56 * in .o file. If in-consistent then userspace and kernel does not agree 57 * in .o file. If in-consistent then userspace and kernel does not agree
57 * on actual size which is a bug. 58 * on actual size which is a bug.
58 * Also verify that the final entry in the table is all zeros. 59 * Also verify that the final entry in the table is all zeros.
60 * Ignore both checks if build host differ from target host and size differs.
59 **/ 61 **/
60static void device_id_check(const char *modname, const char *device_id, 62static void device_id_check(const char *modname, const char *device_id,
61 unsigned long size, unsigned long id_size, 63 unsigned long size, unsigned long id_size,
@@ -64,6 +66,8 @@ static void device_id_check(const char *modname, const char *device_id,
64 int i; 66 int i;
65 67
66 if (size % id_size || size < id_size) { 68 if (size % id_size || size < id_size) {
69 if (cross_build != 0)
70 return;
67 fatal("%s: sizeof(struct %s_device_id)=%lu is not a modulo " 71 fatal("%s: sizeof(struct %s_device_id)=%lu is not a modulo "
68 "of the size of section __mod_%s_device_table=%lu.\n" 72 "of the size of section __mod_%s_device_table=%lu.\n"
69 "Fix definition of struct %s_device_id " 73 "Fix definition of struct %s_device_id "
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 695b5d657cf5..110cf243fa4e 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -2026,7 +2026,7 @@ int main(int argc, char **argv)
2026 int opt; 2026 int opt;
2027 int err; 2027 int err;
2028 2028
2029 while ((opt = getopt(argc, argv, "i:I:msSo:awM:K:")) != -1) { 2029 while ((opt = getopt(argc, argv, "i:I:cmsSo:awM:K:")) != -1) {
2030 switch (opt) { 2030 switch (opt) {
2031 case 'i': 2031 case 'i':
2032 kernel_read = optarg; 2032 kernel_read = optarg;
@@ -2035,6 +2035,9 @@ int main(int argc, char **argv)
2035 module_read = optarg; 2035 module_read = optarg;
2036 external_module = 1; 2036 external_module = 1;
2037 break; 2037 break;
2038 case 'c':
2039 cross_build = 1;
2040 break;
2038 case 'm': 2041 case 'm':
2039 modversions = 1; 2042 modversions = 1;
2040 break; 2043 break;
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
index 565c5872407e..09f58e33d227 100644
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -135,6 +135,7 @@ struct elf_info {
135}; 135};
136 136
137/* file2alias.c */ 137/* file2alias.c */
138extern unsigned int cross_build;
138void handle_moddevtable(struct module *mod, struct elf_info *info, 139void handle_moddevtable(struct module *mod, struct elf_info *info,
139 Elf_Sym *sym, const char *symname); 140 Elf_Sym *sym, const char *symname);
140void add_moddevtable(struct buffer *buf, struct module *mod); 141void add_moddevtable(struct buffer *buf, struct module *mod);
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index afe7c9b0732a..cfae8afcc262 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -74,11 +74,6 @@ struct smk_list_entry *smack_list;
74#define SEQ_READ_FINISHED 1 74#define SEQ_READ_FINISHED 1
75 75
76/* 76/*
77 * Disable concurrent writing open() operations
78 */
79static struct semaphore smack_write_sem;
80
81/*
82 * Values for parsing cipso rules 77 * Values for parsing cipso rules
83 * SMK_DIGITLEN: Length of a digit field in a rule. 78 * SMK_DIGITLEN: Length of a digit field in a rule.
84 * SMK_CIPSOMIN: Minimum possible cipso rule length. 79 * SMK_CIPSOMIN: Minimum possible cipso rule length.
@@ -168,32 +163,7 @@ static struct seq_operations load_seq_ops = {
168 */ 163 */
169static int smk_open_load(struct inode *inode, struct file *file) 164static int smk_open_load(struct inode *inode, struct file *file)
170{ 165{
171 if ((file->f_flags & O_ACCMODE) == O_RDONLY) 166 return seq_open(file, &load_seq_ops);
172 return seq_open(file, &load_seq_ops);
173
174 if (down_interruptible(&smack_write_sem))
175 return -ERESTARTSYS;
176
177 return 0;
178}
179
180/**
181 * smk_release_load - release() for /smack/load
182 * @inode: inode structure representing file
183 * @file: "load" file pointer
184 *
185 * For a reading session, use the seq_file release
186 * implementation.
187 * Otherwise, we are at the end of a writing session so
188 * clean everything up.
189 */
190static int smk_release_load(struct inode *inode, struct file *file)
191{
192 if ((file->f_flags & O_ACCMODE) == O_RDONLY)
193 return seq_release(inode, file);
194
195 up(&smack_write_sem);
196 return 0;
197} 167}
198 168
199/** 169/**
@@ -341,7 +311,7 @@ static const struct file_operations smk_load_ops = {
341 .read = seq_read, 311 .read = seq_read,
342 .llseek = seq_lseek, 312 .llseek = seq_lseek,
343 .write = smk_write_load, 313 .write = smk_write_load,
344 .release = smk_release_load, 314 .release = seq_release,
345}; 315};
346 316
347/** 317/**
@@ -1011,7 +981,6 @@ static int __init init_smk_fs(void)
1011 } 981 }
1012 } 982 }
1013 983
1014 sema_init(&smack_write_sem, 1);
1015 smk_cipso_doi(); 984 smk_cipso_doi();
1016 smk_unlbl_ambient(NULL); 985 smk_unlbl_ambient(NULL);
1017 986