From 163b2f0ba93e9298b3d5fff2337d860c3872ec60 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 25 Jun 2009 02:49:03 +0900 Subject: sh64: Hook up page fault events for software perf counters. sh64 can use these as well, so tie them up there as well. Signed-off-by: Paul Mundt --- arch/sh/mm/tlbflush_64.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) (limited to 'arch/sh/mm/tlbflush_64.c') diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index fcbb6e135cef..3ce40ea34824 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c @@ -3,7 +3,7 @@ * * Copyright (C) 2000, 2001 Paolo Alberelli * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes) - * Copyright (C) 2003 Paul Mundt + * Copyright (C) 2003 - 2009 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -115,6 +116,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, /* Not an IO address, so reenable interrupts */ local_irq_enable(); + perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + /* * If we're in an interrupt or have no user * context, we must not take the fault.. @@ -195,10 +198,16 @@ survive: goto do_sigbus; BUG(); } - if (fault & VM_FAULT_MAJOR) + + if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - else + perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + regs, address); + } else { tsk->min_flt++; + perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + regs, address); + } /* If we get here, the page fault has been handled. Do the TLB refill now from the newly-setup PTE, to avoid having to fault again right -- cgit v1.2.2 From 9cef7492696a416663b4edb953a4eade8517ebeb Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 29 Jul 2009 00:12:17 +0900 Subject: sh: update_mmu_cache() consolidation. This splits out a separate __update_cache()/__update_tlb() for update_mmu_cache() to wrap in to. This lets us share the common __update_cache() bits while keeping special __update_tlb() handling broken out. Signed-off-by: Paul Mundt --- arch/sh/mm/tlbflush_64.c | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) (limited to 'arch/sh/mm/tlbflush_64.c') diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index 3ce40ea34824..f2e44e9ffb75 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c @@ -329,22 +329,6 @@ do_sigbus: goto no_context; } -void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte) -{ - /* - * This appears to get called once for every pte entry that gets - * established => I don't think it's efficient to try refilling the - * TLBs with the pages - some may not get accessed even. Also, for - * executable pages, it is impossible to determine reliably here which - * TLB they should be mapped into (or both even). - * - * So, just do nothing here and handle faults on demand. In the - * TLBMISS handling case, the refill is now done anyway after the pte - * has been fixed up, so that deals with most useful cases. - */ -} - void local_flush_tlb_one(unsigned long asid, unsigned long page) { unsigned long long match, pteh=0, lpage; @@ -482,3 +466,12 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) /* FIXME: Optimize this later.. */ flush_tlb_all(); } + +void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) +{ +} + +void __update_cache(struct vm_area_struct *vma, + unsigned long address, pte_t pte) +{ +} -- cgit v1.2.2 From c7914834ef3b8a396b7e82ea34ac07cdcfe6f868 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 4 Aug 2009 17:14:39 +0900 Subject: sh: Tidy up NEFF-based sign extension for SH-5. This consolidates all of the NEFF-based sign extension for SH-5. In the future the other SH code will need to make use of this as well, so make it generic in preparation for more 32/64 consolidation. Signed-off-by: Paul Mundt --- arch/sh/mm/tlbflush_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/mm/tlbflush_64.c') diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index f2e44e9ffb75..fa5a95a062d0 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c @@ -337,7 +337,7 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page) /* * Sign-extend based on neff. */ - lpage = (page & NEFF_SIGN) ? (page | NEFF_MASK) : page; + lpage = neff_sign_extend(page); match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID; match |= lpage; -- cgit v1.2.2 From 65305ae816ca17a38340aef0ccc92d0c127acccf Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sun, 16 Aug 2009 00:53:56 +0900 Subject: sh: Convert cache disabled SH-5 over to new cache interface. The caches enabled case needs more work, but is presently broken regardless, so this can be done incrementally. Signed-off-by: Paul Mundt --- arch/sh/mm/tlbflush_64.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch/sh/mm/tlbflush_64.c') diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index fa5a95a062d0..2dcc48528f7a 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c @@ -470,8 +470,3 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { } - -void __update_cache(struct vm_area_struct *vma, - unsigned long address, pte_t pte) -{ -} -- cgit v1.2.2 From cdd6c482c9ff9c55475ee7392ec8f672eddb7be6 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 21 Sep 2009 12:02:48 +0200 Subject: perf: Do the big rename: Performance Counters -> Performance Events Bye-bye Performance Counters, welcome Performance Events! In the past few months the perfcounters subsystem has grown out its initial role of counting hardware events, and has become (and is becoming) a much broader generic event enumeration, reporting, logging, monitoring, analysis facility. Naming its core object 'perf_counter' and naming the subsystem 'perfcounters' has become more and more of a misnomer. With pending code like hw-breakpoints support the 'counter' name is less and less appropriate. All in one, we've decided to rename the subsystem to 'performance events' and to propagate this rename through all fields, variables and API names. (in an ABI compatible fashion) The word 'event' is also a bit shorter than 'counter' - which makes it slightly more convenient to write/handle as well. Thanks goes to Stephane Eranian who first observed this misnomer and suggested a rename. User-space tooling and ABI compatibility is not affected - this patch should be function-invariant. (Also, defconfigs were not touched to keep the size down.) This patch has been generated via the following script: FILES=$(find * -type f | grep -vE 'oprofile|[^K]config') sed -i \ -e 's/PERF_EVENT_/PERF_RECORD_/g' \ -e 's/PERF_COUNTER/PERF_EVENT/g' \ -e 's/perf_counter/perf_event/g' \ -e 's/nb_counters/nb_events/g' \ -e 's/swcounter/swevent/g' \ -e 's/tpcounter_event/tp_event/g' \ $FILES for N in $(find . -name perf_counter.[ch]); do M=$(echo $N | sed 's/perf_counter/perf_event/g') mv $N $M done FILES=$(find . -name perf_event.*) sed -i \ -e 's/COUNTER_MASK/REG_MASK/g' \ -e 's/COUNTER/EVENT/g' \ -e 's/\/event_id/g' \ -e 's/counter/event/g' \ -e 's/Counter/Event/g' \ $FILES ... to keep it as correct as possible. This script can also be used by anyone who has pending perfcounters patches - it converts a Linux kernel tree over to the new naming. We tried to time this change to the point in time where the amount of pending patches is the smallest: the end of the merge window. Namespace clashes were fixed up in a preparatory patch - and some stylistic fallout will be fixed up in a subsequent patch. ( NOTE: 'counters' are still the proper terminology when we deal with hardware registers - and these sed scripts are a bit over-eager in renaming them. I've undone some of that, but in case there's something left where 'counter' would be better than 'event' we can undo that on an individual basis instead of touching an otherwise nicely automated patch. ) Suggested-by: Stephane Eranian Acked-by: Peter Zijlstra Acked-by: Paul Mackerras Reviewed-by: Arjan van de Ven Cc: Mike Galbraith Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Steven Rostedt Cc: Benjamin Herrenschmidt Cc: David Howells Cc: Kyle McMartin Cc: Martin Schwidefsky Cc: "David S. Miller" Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: LKML-Reference: Signed-off-by: Ingo Molnar --- arch/sh/mm/tlbflush_64.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/sh/mm/tlbflush_64.c') diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index 2dcc48528f7a..de0b0e881823 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include @@ -116,7 +116,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, /* Not an IO address, so reenable interrupts */ local_irq_enable(); - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); /* * If we're in an interrupt or have no user @@ -201,11 +201,11 @@ survive: if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, address); } else { tsk->min_flt++; - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, address); } -- cgit v1.2.2 From 24ef7fc4dcc57afa0c33166c25bfe7676ffd4296 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Thu, 19 Nov 2009 21:11:05 +0000 Subject: sh: Acquire some more page flags for SH-5. We need some more page flags to hook up _PAGE_WIRED (and eventually other things). So use the unused PTE bits above the PPN field as no implementations use these for anything currently. Now that we have _PAGE_WIRED let's provide the SH-5 functions for wiring up TLB entries. Signed-off-by: Matt Fleming --- arch/sh/mm/tlbflush_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/mm/tlbflush_64.c') diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index de0b0e881823..706da1d3a67a 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c @@ -36,7 +36,7 @@ extern void die(const char *,struct pt_regs *,long); static inline void print_prots(pgprot_t prot) { - printk("prot is 0x%08lx\n",pgprot_val(prot)); + printk("prot is 0x%016llx\n",pgprot_val(prot)); printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ), PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER)); -- cgit v1.2.2 From 6b6b18e62cfba44ce7b6489c7100f12b199232d7 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Thu, 22 Apr 2010 16:06:26 +0000 Subject: sh: invoke oom-killer from page fault As explained in commit 1c0fe6e3bd, we want to call the architecture independent oom killer when getting an unexplained OOM from handle_mm_fault, rather than simply killing current. Cc: linux-sh@vger.kernel.org Cc: linux-arch@vger.kernel.org Signed-off-by: Nick Piggin Acked-by: David Rientjes Signed-off-by: Paul Mundt --- arch/sh/mm/tlbflush_64.c | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) (limited to 'arch/sh/mm/tlbflush_64.c') diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index 706da1d3a67a..25bd64c0977c 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c @@ -294,22 +294,11 @@ no_context: * us unable to handle the page fault gracefully. */ out_of_memory: - if (is_global_init(current)) { - panic("INIT out of memory\n"); - yield(); - goto survive; - } - printk("fault:Out of memory\n"); up_read(&mm->mmap_sem); - if (is_global_init(current)) { - yield(); - down_read(&mm->mmap_sem); - goto survive; - } - printk("VM: killing process %s\n", tsk->comm); - if (user_mode(regs)) - do_group_exit(SIGKILL); - goto no_context; + if (!user_mode(regs)) + goto no_context; + pagefault_out_of_memory(); + return; do_sigbus: printk("fault:Do sigbus\n"); -- cgit v1.2.2 From 364b97d9e2fec32b7c125f67e5a9e5f1cd0e6a37 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 26 Apr 2010 16:15:17 +0900 Subject: sh: Kill off dangling goto labels from oom-killer rework. Signed-off-by: Paul Mundt --- arch/sh/mm/tlbflush_64.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/sh/mm/tlbflush_64.c') diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index 25bd64c0977c..03db41cc1268 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c @@ -189,7 +189,6 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ -survive: fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) -- cgit v1.2.2 From 59615ecdb516cf218c3699b02d87d9827dc3e0c7 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 2 Jul 2010 15:44:09 +0900 Subject: sh: Provide a global TLB flush for U/I-TLB clear. This provides a sledgehammer approach for clearing the TLBs, only to be used in cases where we know we will never want to use the mappings again and have no interest in preserving state. This also destroys wired entries. The primary use for this is when we are either entering or exiting the kernel completely, in the latter case as a precursor for CPU reset by MMU. Signed-off-by: Paul Mundt --- arch/sh/mm/tlbflush_64.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/sh/mm/tlbflush_64.c') diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index 03db41cc1268..7f5810f5dfdc 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c @@ -455,6 +455,11 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) flush_tlb_all(); } +void __flush_tlb_global(void) +{ + flush_tlb_all(); +} + void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { } -- cgit v1.2.2