diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-11-16 12:27:13 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-11-16 12:27:13 -0500 |
commit | 2ebc8ec86fe0f3f3acf9ba9b41a368f819e7807e (patch) | |
tree | c0b3d4f14ae6dd98318d78fcf7d16f47954e06a4 /arch | |
parent | df6e61d4ca268dc8706db38222fde9f04701566c (diff) | |
parent | 89480801a17a3069f45169d40b828c8e511aa005 (diff) |
Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6:
[S390] kprobes: Fix the return address of multiple kretprobes
[S390] kprobes: disable interrupts throughout
[S390] ftrace: build without frame pointers on s390
[S390] mm: add devmem_is_allowed() for STRICT_DEVMEM checking
[S390] vmlogrdr: purge after recording is switched off
[S390] cio: fix incorrect ccw_device_init_count
[S390] tape: add medium state notifications
[S390] fix get_user_pages_fast
Diffstat (limited to 'arch')
-rw-r--r-- | arch/s390/Kconfig.debug | 12 | ||||
-rw-r--r-- | arch/s390/include/asm/page.h | 5 | ||||
-rw-r--r-- | arch/s390/kernel/kprobes.c | 70 | ||||
-rw-r--r-- | arch/s390/mm/gup.c | 7 |
4 files changed, 73 insertions, 21 deletions
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug index 45e0c6199f36..05221b13ffb1 100644 --- a/arch/s390/Kconfig.debug +++ b/arch/s390/Kconfig.debug | |||
@@ -6,6 +6,18 @@ config TRACE_IRQFLAGS_SUPPORT | |||
6 | 6 | ||
7 | source "lib/Kconfig.debug" | 7 | source "lib/Kconfig.debug" |
8 | 8 | ||
9 | config STRICT_DEVMEM | ||
10 | def_bool y | ||
11 | prompt "Filter access to /dev/mem" | ||
12 | ---help--- | ||
13 | This option restricts access to /dev/mem. If this option is | ||
14 | disabled, you allow userspace access to all memory, including | ||
15 | kernel and userspace memory. Accidental memory access is likely | ||
16 | to be disastrous. | ||
17 | Memory access is required for experts who want to debug the kernel. | ||
18 | |||
19 | If you are unsure, say Y. | ||
20 | |||
9 | config DEBUG_STRICT_USER_COPY_CHECKS | 21 | config DEBUG_STRICT_USER_COPY_CHECKS |
10 | bool "Strict user copy size checks" | 22 | bool "Strict user copy size checks" |
11 | ---help--- | 23 | ---help--- |
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index a8729ea7e9ac..3c987e9ec8d6 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h | |||
@@ -130,6 +130,11 @@ struct page; | |||
130 | void arch_free_page(struct page *page, int order); | 130 | void arch_free_page(struct page *page, int order); |
131 | void arch_alloc_page(struct page *page, int order); | 131 | void arch_alloc_page(struct page *page, int order); |
132 | 132 | ||
133 | static inline int devmem_is_allowed(unsigned long pfn) | ||
134 | { | ||
135 | return 0; | ||
136 | } | ||
137 | |||
133 | #define HAVE_ARCH_FREE_PAGE | 138 | #define HAVE_ARCH_FREE_PAGE |
134 | #define HAVE_ARCH_ALLOC_PAGE | 139 | #define HAVE_ARCH_ALLOC_PAGE |
135 | 140 | ||
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index d60fc4398516..2564793ec2b6 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <asm/sections.h> | 30 | #include <asm/sections.h> |
31 | #include <linux/module.h> | 31 | #include <linux/module.h> |
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/hardirq.h> | ||
33 | 34 | ||
34 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; | 35 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; |
35 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | 36 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); |
@@ -212,7 +213,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | |||
212 | /* Set the PER control regs, turns on single step for this address */ | 213 | /* Set the PER control regs, turns on single step for this address */ |
213 | __ctl_load(kprobe_per_regs, 9, 11); | 214 | __ctl_load(kprobe_per_regs, 9, 11); |
214 | regs->psw.mask |= PSW_MASK_PER; | 215 | regs->psw.mask |= PSW_MASK_PER; |
215 | regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK); | 216 | regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); |
216 | } | 217 | } |
217 | 218 | ||
218 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) | 219 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) |
@@ -239,7 +240,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, | |||
239 | __get_cpu_var(current_kprobe) = p; | 240 | __get_cpu_var(current_kprobe) = p; |
240 | /* Save the interrupt and per flags */ | 241 | /* Save the interrupt and per flags */ |
241 | kcb->kprobe_saved_imask = regs->psw.mask & | 242 | kcb->kprobe_saved_imask = regs->psw.mask & |
242 | (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK); | 243 | (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT); |
243 | /* Save the control regs that govern PER */ | 244 | /* Save the control regs that govern PER */ |
244 | __ctl_store(kcb->kprobe_saved_ctl, 9, 11); | 245 | __ctl_store(kcb->kprobe_saved_ctl, 9, 11); |
245 | } | 246 | } |
@@ -316,8 +317,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
316 | return 1; | 317 | return 1; |
317 | 318 | ||
318 | ss_probe: | 319 | ss_probe: |
319 | if (regs->psw.mask & (PSW_MASK_PER | PSW_MASK_IO)) | ||
320 | local_irq_disable(); | ||
321 | prepare_singlestep(p, regs); | 320 | prepare_singlestep(p, regs); |
322 | kcb->kprobe_status = KPROBE_HIT_SS; | 321 | kcb->kprobe_status = KPROBE_HIT_SS; |
323 | return 1; | 322 | return 1; |
@@ -350,6 +349,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, | |||
350 | struct hlist_node *node, *tmp; | 349 | struct hlist_node *node, *tmp; |
351 | unsigned long flags, orig_ret_address = 0; | 350 | unsigned long flags, orig_ret_address = 0; |
352 | unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; | 351 | unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; |
352 | kprobe_opcode_t *correct_ret_addr = NULL; | ||
353 | 353 | ||
354 | INIT_HLIST_HEAD(&empty_rp); | 354 | INIT_HLIST_HEAD(&empty_rp); |
355 | kretprobe_hash_lock(current, &head, &flags); | 355 | kretprobe_hash_lock(current, &head, &flags); |
@@ -372,10 +372,32 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, | |||
372 | /* another task is sharing our hash bucket */ | 372 | /* another task is sharing our hash bucket */ |
373 | continue; | 373 | continue; |
374 | 374 | ||
375 | if (ri->rp && ri->rp->handler) | 375 | orig_ret_address = (unsigned long)ri->ret_addr; |
376 | ri->rp->handler(ri, regs); | 376 | |
377 | if (orig_ret_address != trampoline_address) | ||
378 | /* | ||
379 | * This is the real return address. Any other | ||
380 | * instances associated with this task are for | ||
381 | * other calls deeper on the call stack | ||
382 | */ | ||
383 | break; | ||
384 | } | ||
385 | |||
386 | kretprobe_assert(ri, orig_ret_address, trampoline_address); | ||
387 | |||
388 | correct_ret_addr = ri->ret_addr; | ||
389 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | ||
390 | if (ri->task != current) | ||
391 | /* another task is sharing our hash bucket */ | ||
392 | continue; | ||
377 | 393 | ||
378 | orig_ret_address = (unsigned long)ri->ret_addr; | 394 | orig_ret_address = (unsigned long)ri->ret_addr; |
395 | |||
396 | if (ri->rp && ri->rp->handler) { | ||
397 | ri->ret_addr = correct_ret_addr; | ||
398 | ri->rp->handler(ri, regs); | ||
399 | } | ||
400 | |||
379 | recycle_rp_inst(ri, &empty_rp); | 401 | recycle_rp_inst(ri, &empty_rp); |
380 | 402 | ||
381 | if (orig_ret_address != trampoline_address) { | 403 | if (orig_ret_address != trampoline_address) { |
@@ -387,7 +409,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, | |||
387 | break; | 409 | break; |
388 | } | 410 | } |
389 | } | 411 | } |
390 | kretprobe_assert(ri, orig_ret_address, trampoline_address); | 412 | |
391 | regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE; | 413 | regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE; |
392 | 414 | ||
393 | reset_current_kprobe(); | 415 | reset_current_kprobe(); |
@@ -465,8 +487,6 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs) | |||
465 | goto out; | 487 | goto out; |
466 | } | 488 | } |
467 | reset_current_kprobe(); | 489 | reset_current_kprobe(); |
468 | if (regs->psw.mask & (PSW_MASK_PER | PSW_MASK_IO)) | ||
469 | local_irq_enable(); | ||
470 | out: | 490 | out: |
471 | preempt_enable_no_resched(); | 491 | preempt_enable_no_resched(); |
472 | 492 | ||
@@ -482,7 +502,7 @@ out: | |||
482 | return 1; | 502 | return 1; |
483 | } | 503 | } |
484 | 504 | ||
485 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | 505 | static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr) |
486 | { | 506 | { |
487 | struct kprobe *cur = kprobe_running(); | 507 | struct kprobe *cur = kprobe_running(); |
488 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 508 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
@@ -508,8 +528,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |||
508 | restore_previous_kprobe(kcb); | 528 | restore_previous_kprobe(kcb); |
509 | else { | 529 | else { |
510 | reset_current_kprobe(); | 530 | reset_current_kprobe(); |
511 | if (regs->psw.mask & (PSW_MASK_PER | PSW_MASK_IO)) | ||
512 | local_irq_enable(); | ||
513 | } | 531 | } |
514 | preempt_enable_no_resched(); | 532 | preempt_enable_no_resched(); |
515 | break; | 533 | break; |
@@ -553,6 +571,18 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |||
553 | return 0; | 571 | return 0; |
554 | } | 572 | } |
555 | 573 | ||
574 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | ||
575 | { | ||
576 | int ret; | ||
577 | |||
578 | if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) | ||
579 | local_irq_disable(); | ||
580 | ret = kprobe_trap_handler(regs, trapnr); | ||
581 | if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) | ||
582 | local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); | ||
583 | return ret; | ||
584 | } | ||
585 | |||
556 | /* | 586 | /* |
557 | * Wrapper routine to for handling exceptions. | 587 | * Wrapper routine to for handling exceptions. |
558 | */ | 588 | */ |
@@ -560,8 +590,12 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
560 | unsigned long val, void *data) | 590 | unsigned long val, void *data) |
561 | { | 591 | { |
562 | struct die_args *args = (struct die_args *)data; | 592 | struct die_args *args = (struct die_args *)data; |
593 | struct pt_regs *regs = args->regs; | ||
563 | int ret = NOTIFY_DONE; | 594 | int ret = NOTIFY_DONE; |
564 | 595 | ||
596 | if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) | ||
597 | local_irq_disable(); | ||
598 | |||
565 | switch (val) { | 599 | switch (val) { |
566 | case DIE_BPT: | 600 | case DIE_BPT: |
567 | if (kprobe_handler(args->regs)) | 601 | if (kprobe_handler(args->regs)) |
@@ -572,16 +606,17 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
572 | ret = NOTIFY_STOP; | 606 | ret = NOTIFY_STOP; |
573 | break; | 607 | break; |
574 | case DIE_TRAP: | 608 | case DIE_TRAP: |
575 | /* kprobe_running() needs smp_processor_id() */ | 609 | if (!preemptible() && kprobe_running() && |
576 | preempt_disable(); | 610 | kprobe_trap_handler(args->regs, args->trapnr)) |
577 | if (kprobe_running() && | ||
578 | kprobe_fault_handler(args->regs, args->trapnr)) | ||
579 | ret = NOTIFY_STOP; | 611 | ret = NOTIFY_STOP; |
580 | preempt_enable(); | ||
581 | break; | 612 | break; |
582 | default: | 613 | default: |
583 | break; | 614 | break; |
584 | } | 615 | } |
616 | |||
617 | if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) | ||
618 | local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); | ||
619 | |||
585 | return ret; | 620 | return ret; |
586 | } | 621 | } |
587 | 622 | ||
@@ -595,6 +630,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
595 | 630 | ||
596 | /* setup return addr to the jprobe handler routine */ | 631 | /* setup return addr to the jprobe handler routine */ |
597 | regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE; | 632 | regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE; |
633 | regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); | ||
598 | 634 | ||
599 | /* r14 is the function return address */ | 635 | /* r14 is the function return address */ |
600 | kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14]; | 636 | kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14]; |
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 38e641cdd977..45b405ca2567 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c | |||
@@ -20,18 +20,17 @@ | |||
20 | static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, | 20 | static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, |
21 | unsigned long end, int write, struct page **pages, int *nr) | 21 | unsigned long end, int write, struct page **pages, int *nr) |
22 | { | 22 | { |
23 | unsigned long mask, result; | 23 | unsigned long mask; |
24 | pte_t *ptep, pte; | 24 | pte_t *ptep, pte; |
25 | struct page *page; | 25 | struct page *page; |
26 | 26 | ||
27 | result = write ? 0 : _PAGE_RO; | 27 | mask = (write ? _PAGE_RO : 0) | _PAGE_INVALID | _PAGE_SPECIAL; |
28 | mask = result | _PAGE_INVALID | _PAGE_SPECIAL; | ||
29 | 28 | ||
30 | ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr); | 29 | ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr); |
31 | do { | 30 | do { |
32 | pte = *ptep; | 31 | pte = *ptep; |
33 | barrier(); | 32 | barrier(); |
34 | if ((pte_val(pte) & mask) != result) | 33 | if ((pte_val(pte) & mask) != 0) |
35 | return 0; | 34 | return 0; |
36 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); | 35 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); |
37 | page = pte_page(pte); | 36 | page = pte_page(pte); |