aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/mips/bcm47xx/prom.c8
-rw-r--r--arch/mips/mm/highmem.c1
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/kernel/hw_breakpoint.c30
-rw-r--r--arch/x86/kernel/ptrace.c7
-rw-r--r--drivers/clocksource/cs5535-clockevt.c2
-rw-r--r--drivers/platform/x86/acer-wmi.c2
-rw-r--r--drivers/video/efifb.c11
-rw-r--r--fs/exec.c1
-rw-r--r--kernel/perf_event.c11
-rw-r--r--kernel/sys.c2
-rw-r--r--lib/idr.c4
-rw-r--r--mm/oom_kill.c2
-rw-r--r--tools/perf/util/probe-event.c3
15 files changed, 50 insertions, 37 deletions
diff --git a/arch/mips/bcm47xx/prom.c b/arch/mips/bcm47xx/prom.c
index c51405e57921..29d3cbf9555f 100644
--- a/arch/mips/bcm47xx/prom.c
+++ b/arch/mips/bcm47xx/prom.c
@@ -141,6 +141,14 @@ static __init void prom_init_mem(void)
141 break; 141 break;
142 } 142 }
143 143
144 /* Ignoring the last page when ddr size is 128M. Cached
145 * accesses to last page is causing the processor to prefetch
146 * using address above 128M stepping out of the ddr address
147 * space.
148 */
149 if (mem == 0x8000000)
150 mem -= 0x1000;
151
144 add_memory_region(0, mem, BOOT_MEM_RAM); 152 add_memory_region(0, mem, BOOT_MEM_RAM);
145} 153}
146 154
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index e274fda329f4..127d732474bf 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -1,5 +1,6 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <linux/highmem.h> 2#include <linux/highmem.h>
3#include <linux/sched.h>
3#include <linux/smp.h> 4#include <linux/smp.h>
4#include <asm/fixmap.h> 5#include <asm/fixmap.h>
5#include <asm/tlbflush.h> 6#include <asm/tlbflush.h>
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 524d9352f17e..f388dc68f605 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -18,7 +18,6 @@ config PARISC
18 select BUG 18 select BUG
19 select HAVE_PERF_EVENTS 19 select HAVE_PERF_EVENTS
20 select GENERIC_ATOMIC64 if !64BIT 20 select GENERIC_ATOMIC64 if !64BIT
21 select HAVE_ARCH_TRACEHOOK
22 help 21 help
23 The PA-RISC microprocessor is designed by Hewlett-Packard and used 22 The PA-RISC microprocessor is designed by Hewlett-Packard and used
24 in many of their workstations & servers (HP9000 700 and 800 series, 23 in many of their workstations & servers (HP9000 700 and 800 series,
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index fc801bab1b3b..b753ea59703a 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -450,6 +450,8 @@ struct thread_struct {
450 struct perf_event *ptrace_bps[HBP_NUM]; 450 struct perf_event *ptrace_bps[HBP_NUM];
451 /* Debug status used for traps, single steps, etc... */ 451 /* Debug status used for traps, single steps, etc... */
452 unsigned long debugreg6; 452 unsigned long debugreg6;
453 /* Keep track of the exact dr7 value set by the user */
454 unsigned long ptrace_dr7;
453 /* Fault info: */ 455 /* Fault info: */
454 unsigned long cr2; 456 unsigned long cr2;
455 unsigned long trap_no; 457 unsigned long trap_no;
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 05d5fec64a94..bb6006e3e295 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -212,25 +212,6 @@ static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
212 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); 212 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
213} 213}
214 214
215/*
216 * Store a breakpoint's encoded address, length, and type.
217 */
218static int arch_store_info(struct perf_event *bp)
219{
220 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
221 /*
222 * For kernel-addresses, either the address or symbol name can be
223 * specified.
224 */
225 if (info->name)
226 info->address = (unsigned long)
227 kallsyms_lookup_name(info->name);
228 if (info->address)
229 return 0;
230
231 return -EINVAL;
232}
233
234int arch_bp_generic_fields(int x86_len, int x86_type, 215int arch_bp_generic_fields(int x86_len, int x86_type,
235 int *gen_len, int *gen_type) 216 int *gen_len, int *gen_type)
236{ 217{
@@ -362,10 +343,13 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
362 return ret; 343 return ret;
363 } 344 }
364 345
365 ret = arch_store_info(bp); 346 /*
366 347 * For kernel-addresses, either the address or symbol name can be
367 if (ret < 0) 348 * specified.
368 return ret; 349 */
350 if (info->name)
351 info->address = (unsigned long)
352 kallsyms_lookup_name(info->name);
369 /* 353 /*
370 * Check that the low-order bits of the address are appropriate 354 * Check that the low-order bits of the address are appropriate
371 * for the alignment implied by len. 355 * for the alignment implied by len.
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 017d937639fe..0c1033d61e59 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -702,7 +702,7 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
702 } else if (n == 6) { 702 } else if (n == 6) {
703 val = thread->debugreg6; 703 val = thread->debugreg6;
704 } else if (n == 7) { 704 } else if (n == 7) {
705 val = ptrace_get_dr7(thread->ptrace_bps); 705 val = thread->ptrace_dr7;
706 } 706 }
707 return val; 707 return val;
708} 708}
@@ -778,8 +778,11 @@ int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val)
778 return rc; 778 return rc;
779 } 779 }
780 /* All that's left is DR7 */ 780 /* All that's left is DR7 */
781 if (n == 7) 781 if (n == 7) {
782 rc = ptrace_write_dr7(tsk, val); 782 rc = ptrace_write_dr7(tsk, val);
783 if (!rc)
784 thread->ptrace_dr7 = val;
785 }
783 786
784ret_path: 787ret_path:
785 return rc; 788 return rc;
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index 27d20fac19d1..b314a999aabe 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -21,7 +21,7 @@
21 21
22#define DRV_NAME "cs5535-clockevt" 22#define DRV_NAME "cs5535-clockevt"
23 23
24static int timer_irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ; 24static int timer_irq;
25module_param_named(irq, timer_irq, int, 0644); 25module_param_named(irq, timer_irq, int, 0644);
26MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks."); 26MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks.");
27 27
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 07d14dfdf0b4..226b3e93498c 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -934,7 +934,7 @@ static int __devinit acer_backlight_init(struct device *dev)
934 acer_backlight_device = bd; 934 acer_backlight_device = bd;
935 935
936 bd->props.power = FB_BLANK_UNBLANK; 936 bd->props.power = FB_BLANK_UNBLANK;
937 bd->props.brightness = max_brightness; 937 bd->props.brightness = read_brightness(bd);
938 bd->props.max_brightness = max_brightness; 938 bd->props.max_brightness = max_brightness;
939 backlight_update_status(bd); 939 backlight_update_status(bd);
940 return 0; 940 return 0;
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index eb12182b2059..d25df51bb0d2 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -161,8 +161,17 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
161 return 0; 161 return 0;
162} 162}
163 163
164static void efifb_destroy(struct fb_info *info)
165{
166 if (info->screen_base)
167 iounmap(info->screen_base);
168 release_mem_region(info->aperture_base, info->aperture_size);
169 framebuffer_release(info);
170}
171
164static struct fb_ops efifb_ops = { 172static struct fb_ops efifb_ops = {
165 .owner = THIS_MODULE, 173 .owner = THIS_MODULE,
174 .fb_destroy = efifb_destroy,
166 .fb_setcolreg = efifb_setcolreg, 175 .fb_setcolreg = efifb_setcolreg,
167 .fb_fillrect = cfb_fillrect, 176 .fb_fillrect = cfb_fillrect,
168 .fb_copyarea = cfb_copyarea, 177 .fb_copyarea = cfb_copyarea,
@@ -281,7 +290,7 @@ static int __init efifb_probe(struct platform_device *dev)
281 info->par = NULL; 290 info->par = NULL;
282 291
283 info->aperture_base = efifb_fix.smem_start; 292 info->aperture_base = efifb_fix.smem_start;
284 info->aperture_size = size_total; 293 info->aperture_size = size_remap;
285 294
286 info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len); 295 info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
287 if (!info->screen_base) { 296 if (!info->screen_base) {
diff --git a/fs/exec.c b/fs/exec.c
index e95c692ef0e4..cce6bbdbdbb1 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -637,7 +637,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
637 * will align it up. 637 * will align it up.
638 */ 638 */
639 rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK; 639 rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
640 rlim_stack = min(rlim_stack, stack_size);
641#ifdef CONFIG_STACK_GROWSUP 640#ifdef CONFIG_STACK_GROWSUP
642 if (stack_size + stack_expand > rlim_stack) 641 if (stack_size + stack_expand > rlim_stack)
643 stack_base = vma->vm_start + rlim_stack; 642 stack_base = vma->vm_start + rlim_stack;
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 2b19297742cb..2ae7409bf38f 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -3259,8 +3259,6 @@ static void perf_event_task_output(struct perf_event *event,
3259 task_event->event_id.tid = perf_event_tid(event, task); 3259 task_event->event_id.tid = perf_event_tid(event, task);
3260 task_event->event_id.ptid = perf_event_tid(event, current); 3260 task_event->event_id.ptid = perf_event_tid(event, current);
3261 3261
3262 task_event->event_id.time = perf_clock();
3263
3264 perf_output_put(&handle, task_event->event_id); 3262 perf_output_put(&handle, task_event->event_id);
3265 3263
3266 perf_output_end(&handle); 3264 perf_output_end(&handle);
@@ -3268,7 +3266,7 @@ static void perf_event_task_output(struct perf_event *event,
3268 3266
3269static int perf_event_task_match(struct perf_event *event) 3267static int perf_event_task_match(struct perf_event *event)
3270{ 3268{
3271 if (event->state != PERF_EVENT_STATE_ACTIVE) 3269 if (event->state < PERF_EVENT_STATE_INACTIVE)
3272 return 0; 3270 return 0;
3273 3271
3274 if (event->cpu != -1 && event->cpu != smp_processor_id()) 3272 if (event->cpu != -1 && event->cpu != smp_processor_id())
@@ -3300,7 +3298,7 @@ static void perf_event_task_event(struct perf_task_event *task_event)
3300 cpuctx = &get_cpu_var(perf_cpu_context); 3298 cpuctx = &get_cpu_var(perf_cpu_context);
3301 perf_event_task_ctx(&cpuctx->ctx, task_event); 3299 perf_event_task_ctx(&cpuctx->ctx, task_event);
3302 if (!ctx) 3300 if (!ctx)
3303 ctx = rcu_dereference(task_event->task->perf_event_ctxp); 3301 ctx = rcu_dereference(current->perf_event_ctxp);
3304 if (ctx) 3302 if (ctx)
3305 perf_event_task_ctx(ctx, task_event); 3303 perf_event_task_ctx(ctx, task_event);
3306 put_cpu_var(perf_cpu_context); 3304 put_cpu_var(perf_cpu_context);
@@ -3331,6 +3329,7 @@ static void perf_event_task(struct task_struct *task,
3331 /* .ppid */ 3329 /* .ppid */
3332 /* .tid */ 3330 /* .tid */
3333 /* .ptid */ 3331 /* .ptid */
3332 .time = perf_clock(),
3334 }, 3333 },
3335 }; 3334 };
3336 3335
@@ -3380,7 +3379,7 @@ static void perf_event_comm_output(struct perf_event *event,
3380 3379
3381static int perf_event_comm_match(struct perf_event *event) 3380static int perf_event_comm_match(struct perf_event *event)
3382{ 3381{
3383 if (event->state != PERF_EVENT_STATE_ACTIVE) 3382 if (event->state < PERF_EVENT_STATE_INACTIVE)
3384 return 0; 3383 return 0;
3385 3384
3386 if (event->cpu != -1 && event->cpu != smp_processor_id()) 3385 if (event->cpu != -1 && event->cpu != smp_processor_id())
@@ -3500,7 +3499,7 @@ static void perf_event_mmap_output(struct perf_event *event,
3500static int perf_event_mmap_match(struct perf_event *event, 3499static int perf_event_mmap_match(struct perf_event *event,
3501 struct perf_mmap_event *mmap_event) 3500 struct perf_mmap_event *mmap_event)
3502{ 3501{
3503 if (event->state != PERF_EVENT_STATE_ACTIVE) 3502 if (event->state < PERF_EVENT_STATE_INACTIVE)
3504 return 0; 3503 return 0;
3505 3504
3506 if (event->cpu != -1 && event->cpu != smp_processor_id()) 3505 if (event->cpu != -1 && event->cpu != smp_processor_id())
diff --git a/kernel/sys.c b/kernel/sys.c
index 26a6b73a6b85..18bde979f346 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -222,6 +222,7 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
222 if (which > PRIO_USER || which < PRIO_PROCESS) 222 if (which > PRIO_USER || which < PRIO_PROCESS)
223 return -EINVAL; 223 return -EINVAL;
224 224
225 rcu_read_lock();
225 read_lock(&tasklist_lock); 226 read_lock(&tasklist_lock);
226 switch (which) { 227 switch (which) {
227 case PRIO_PROCESS: 228 case PRIO_PROCESS:
@@ -267,6 +268,7 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
267 } 268 }
268out_unlock: 269out_unlock:
269 read_unlock(&tasklist_lock); 270 read_unlock(&tasklist_lock);
271 rcu_read_unlock();
270 272
271 return retval; 273 return retval;
272} 274}
diff --git a/lib/idr.c b/lib/idr.c
index 1cac726c44bc..0dc782216d4b 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -156,10 +156,12 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
156 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; 156 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
157 157
158 /* if already at the top layer, we need to grow */ 158 /* if already at the top layer, we need to grow */
159 if (!(p = pa[l])) { 159 if (id >= 1 << (idp->layers * IDR_BITS)) {
160 *starting_id = id; 160 *starting_id = id;
161 return IDR_NEED_TO_GROW; 161 return IDR_NEED_TO_GROW;
162 } 162 }
163 p = pa[l];
164 BUG_ON(!p);
163 165
164 /* If we need to go up one layer, continue the 166 /* If we need to go up one layer, continue the
165 * loop; otherwise, restart from the top. 167 * loop; otherwise, restart from the top.
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index f52481b1c1e5..237050478f28 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -459,6 +459,8 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
459 list_for_each_entry(c, &p->children, sibling) { 459 list_for_each_entry(c, &p->children, sibling) {
460 if (c->mm == p->mm) 460 if (c->mm == p->mm)
461 continue; 461 continue;
462 if (mem && !task_in_mem_cgroup(c, mem))
463 continue;
462 if (!oom_kill_task(c)) 464 if (!oom_kill_task(c))
463 return 0; 465 return 0;
464 } 466 }
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 29465d440043..fde17b090a47 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -272,6 +272,7 @@ int synthesize_perf_probe_point(struct probe_point *pp)
272 int ret; 272 int ret;
273 273
274 pp->probes[0] = buf = zalloc(MAX_CMDLEN); 274 pp->probes[0] = buf = zalloc(MAX_CMDLEN);
275 pp->found = 1;
275 if (!buf) 276 if (!buf)
276 die("Failed to allocate memory by zalloc."); 277 die("Failed to allocate memory by zalloc.");
277 if (pp->offset) { 278 if (pp->offset) {
@@ -294,6 +295,7 @@ int synthesize_perf_probe_point(struct probe_point *pp)
294error: 295error:
295 free(pp->probes[0]); 296 free(pp->probes[0]);
296 pp->probes[0] = NULL; 297 pp->probes[0] = NULL;
298 pp->found = 0;
297 } 299 }
298 return ret; 300 return ret;
299} 301}
@@ -455,6 +457,7 @@ void show_perf_probe_events(void)
455 struct strlist *rawlist; 457 struct strlist *rawlist;
456 struct str_node *ent; 458 struct str_node *ent;
457 459
460 memset(&pp, 0, sizeof(pp));
458 fd = open_kprobe_events(O_RDONLY, 0); 461 fd = open_kprobe_events(O_RDONLY, 0);
459 rawlist = get_trace_kprobe_event_rawlist(fd); 462 rawlist = get_trace_kprobe_event_rawlist(fd);
460 close(fd); 463 close(fd);