diff options
author | Markus Metzger <markus.t.metzger@intel.com> | 2009-04-24 03:51:43 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-24 04:18:52 -0400 |
commit | 1cb81b143fa8f0e4629f10690862e2e52ca792ff (patch) | |
tree | 667b9677f8ad1211ca3d094bedabe47a3d4f5ba9 | |
parent | 7e0bfad24d85de7cf2202a7b0ce51de11a077b21 (diff) |
x86, bts, mm: clean up buffer allocation
The current mm interface is asymetric. One function allocates a locked
buffer, another function only refunds the memory.
Change this to have two functions for accounting and refunding locked
memory, respectively; and do the actual buffer allocation in ptrace.
[ Impact: refactor BTS buffer allocation code ]
Signed-off-by: Markus Metzger <markus.t.metzger@intel.com>
Acked-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20090424095143.A30265@sedona.ch.intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/x86/kernel/ptrace.c | 39 | ||||
-rw-r--r-- | include/linux/mm.h | 6 | ||||
-rw-r--r-- | mm/mlock.c | 36 |
3 files changed, 47 insertions, 34 deletions
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index d5252ae6c520..09ecbde91c13 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -617,17 +617,28 @@ struct bts_context { | |||
617 | struct work_struct work; | 617 | struct work_struct work; |
618 | }; | 618 | }; |
619 | 619 | ||
620 | static inline void alloc_bts_buffer(struct bts_context *context, | 620 | static int alloc_bts_buffer(struct bts_context *context, unsigned int size) |
621 | unsigned int size) | ||
622 | { | 621 | { |
623 | void *buffer; | 622 | void *buffer = NULL; |
623 | int err = -ENOMEM; | ||
624 | 624 | ||
625 | buffer = alloc_locked_buffer(size); | 625 | err = account_locked_memory(current->mm, current->signal->rlim, size); |
626 | if (buffer) { | 626 | if (err < 0) |
627 | context->buffer = buffer; | 627 | return err; |
628 | context->size = size; | 628 | |
629 | context->mm = get_task_mm(current); | 629 | buffer = kzalloc(size, GFP_KERNEL); |
630 | } | 630 | if (!buffer) |
631 | goto out_refund; | ||
632 | |||
633 | context->buffer = buffer; | ||
634 | context->size = size; | ||
635 | context->mm = get_task_mm(current); | ||
636 | |||
637 | return 0; | ||
638 | |||
639 | out_refund: | ||
640 | refund_locked_memory(current->mm, size); | ||
641 | return err; | ||
631 | } | 642 | } |
632 | 643 | ||
633 | static inline void free_bts_buffer(struct bts_context *context) | 644 | static inline void free_bts_buffer(struct bts_context *context) |
@@ -638,7 +649,7 @@ static inline void free_bts_buffer(struct bts_context *context) | |||
638 | kfree(context->buffer); | 649 | kfree(context->buffer); |
639 | context->buffer = NULL; | 650 | context->buffer = NULL; |
640 | 651 | ||
641 | refund_locked_buffer_memory(context->mm, context->size); | 652 | refund_locked_memory(context->mm, context->size); |
642 | context->size = 0; | 653 | context->size = 0; |
643 | 654 | ||
644 | mmput(context->mm); | 655 | mmput(context->mm); |
@@ -786,13 +797,15 @@ static int ptrace_bts_config(struct task_struct *child, | |||
786 | context->tracer = NULL; | 797 | context->tracer = NULL; |
787 | 798 | ||
788 | if ((cfg.flags & PTRACE_BTS_O_ALLOC) && (cfg.size != context->size)) { | 799 | if ((cfg.flags & PTRACE_BTS_O_ALLOC) && (cfg.size != context->size)) { |
800 | int err; | ||
801 | |||
789 | free_bts_buffer(context); | 802 | free_bts_buffer(context); |
790 | if (!cfg.size) | 803 | if (!cfg.size) |
791 | return 0; | 804 | return 0; |
792 | 805 | ||
793 | alloc_bts_buffer(context, cfg.size); | 806 | err = alloc_bts_buffer(context, cfg.size); |
794 | if (!context->buffer) | 807 | if (err < 0) |
795 | return -ENOMEM; | 808 | return err; |
796 | } | 809 | } |
797 | 810 | ||
798 | if (cfg.flags & PTRACE_BTS_O_TRACE) | 811 | if (cfg.flags & PTRACE_BTS_O_TRACE) |
diff --git a/include/linux/mm.h b/include/linux/mm.h index a3963ba23a6d..009eabd3c21c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -19,6 +19,7 @@ struct anon_vma; | |||
19 | struct file_ra_state; | 19 | struct file_ra_state; |
20 | struct user_struct; | 20 | struct user_struct; |
21 | struct writeback_control; | 21 | struct writeback_control; |
22 | struct rlimit; | ||
22 | 23 | ||
23 | #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ | 24 | #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ |
24 | extern unsigned long max_mapnr; | 25 | extern unsigned long max_mapnr; |
@@ -1319,7 +1320,8 @@ int vmemmap_populate_basepages(struct page *start_page, | |||
1319 | int vmemmap_populate(struct page *start_page, unsigned long pages, int node); | 1320 | int vmemmap_populate(struct page *start_page, unsigned long pages, int node); |
1320 | void vmemmap_populate_print_last(void); | 1321 | void vmemmap_populate_print_last(void); |
1321 | 1322 | ||
1322 | extern void *alloc_locked_buffer(size_t size); | 1323 | extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim, |
1323 | extern void refund_locked_buffer_memory(struct mm_struct *mm, size_t size); | 1324 | size_t size); |
1325 | extern void refund_locked_memory(struct mm_struct *mm, size_t size); | ||
1324 | #endif /* __KERNEL__ */ | 1326 | #endif /* __KERNEL__ */ |
1325 | #endif /* _LINUX_MM_H */ | 1327 | #endif /* _LINUX_MM_H */ |
diff --git a/mm/mlock.c b/mm/mlock.c index 28be15ead9c1..ac130433c7d3 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -629,38 +629,36 @@ void user_shm_unlock(size_t size, struct user_struct *user) | |||
629 | free_uid(user); | 629 | free_uid(user); |
630 | } | 630 | } |
631 | 631 | ||
632 | void *alloc_locked_buffer(size_t size) | 632 | int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim, |
633 | size_t size) | ||
633 | { | 634 | { |
634 | unsigned long rlim, vm, pgsz; | 635 | unsigned long lim, vm, pgsz; |
635 | void *buffer = NULL; | 636 | int error = -ENOMEM; |
636 | 637 | ||
637 | pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; | 638 | pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; |
638 | 639 | ||
639 | down_write(¤t->mm->mmap_sem); | 640 | down_write(&mm->mmap_sem); |
640 | |||
641 | rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT; | ||
642 | vm = current->mm->total_vm + pgsz; | ||
643 | if (rlim < vm) | ||
644 | goto out; | ||
645 | 641 | ||
646 | rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; | 642 | lim = rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT; |
647 | vm = current->mm->locked_vm + pgsz; | 643 | vm = mm->total_vm + pgsz; |
648 | if (rlim < vm) | 644 | if (lim < vm) |
649 | goto out; | 645 | goto out; |
650 | 646 | ||
651 | buffer = kzalloc(size, GFP_KERNEL); | 647 | lim = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; |
652 | if (!buffer) | 648 | vm = mm->locked_vm + pgsz; |
649 | if (lim < vm) | ||
653 | goto out; | 650 | goto out; |
654 | 651 | ||
655 | current->mm->total_vm += pgsz; | 652 | mm->total_vm += pgsz; |
656 | current->mm->locked_vm += pgsz; | 653 | mm->locked_vm += pgsz; |
657 | 654 | ||
655 | error = 0; | ||
658 | out: | 656 | out: |
659 | up_write(¤t->mm->mmap_sem); | 657 | up_write(&mm->mmap_sem); |
660 | return buffer; | 658 | return error; |
661 | } | 659 | } |
662 | 660 | ||
663 | void refund_locked_buffer_memory(struct mm_struct *mm, size_t size) | 661 | void refund_locked_memory(struct mm_struct *mm, size_t size) |
664 | { | 662 | { |
665 | unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; | 663 | unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; |
666 | 664 | ||