From 9974458e2f9a11dbd2f4bd14fab5a79af4907b41 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 15 Jun 2009 21:45:16 +1000 Subject: perf_counter: Make set_perf_counter_pending() declaration common At present, every architecture that supports perf_counters has to declare set_perf_counter_pending() in its arch-specific headers. This consolidates the declarations into a single declaration in one common place, include/linux/perf_counter.h. On powerpc, we continue to provide a static inline definition of set_perf_counter_pending() in the powerpc hw_irq.h. Also, this removes from the x86 perf_counter.h the unused null definitions of {test,clear}_perf_counter_pending. Reported-by: Mike Frysinger Signed-off-by: Paul Mackerras Cc: Peter Zijlstra Cc: benh@kernel.crashing.org LKML-Reference: <18998.13388.920691.523227@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/hw_irq.h | 1 - arch/powerpc/include/asm/perf_counter.h | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 53512374e1c9..1974cf191b03 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -163,7 +163,6 @@ static inline unsigned long test_perf_counter_pending(void) return 0; } -static inline void set_perf_counter_pending(void) {} static inline void clear_perf_counter_pending(void) {} #endif /* CONFIG_PERF_COUNTERS */ diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h index cc7c887705b8..b398a84edced 100644 --- a/arch/powerpc/include/asm/perf_counter.h +++ b/arch/powerpc/include/asm/perf_counter.h @@ -10,6 +10,8 @@ */ #include +#include + #define MAX_HWCOUNTERS 8 #define MAX_EVENT_ALTERNATIVES 8 #define MAX_LIMITED_HWCOUNTERS 2 -- cgit v1.2.2 From cab888e678d0986ebce95464d3842a6aeca1e3d8 Mon Sep 17 00:00:00 2001 From: Nate Case Date: Wed, 10 Jun 2009 15:37:28 -0500 Subject: powerpc/fsl-booke: Enable L1 cache on e500v1/e500v2/e500mc CPUs Some boot loaders may not enable L1 instruction/data cache. Check if data and instruction caches are enabled, and enable them if needed. Signed-off-by: Nate Case Signed-off-by: Kumar Gala --- arch/powerpc/include/asm/reg_booke.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 601ddbc46002..6bcf364cbb2f 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -389,12 +389,14 @@ #define ICCR_CACHE 1 /* Cacheable */ /* Bit definitions for L1CSR0. */ +#define L1CSR0_CPE 0x00010000 /* Data Cache Parity Enable */ #define L1CSR0_CLFC 0x00000100 /* Cache Lock Bits Flash Clear */ #define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */ #define L1CSR0_CFI 0x00000002 /* Cache Flash Invalidate */ #define L1CSR0_DCE 0x00000001 /* Data Cache Enable */ /* Bit definitions for L1CSR1. */ +#define L1CSR1_CPE 0x00010000 /* Instruction Cache Parity Enable */ #define L1CSR1_ICLFR 0x00000100 /* Instr Cache Lock Bits Flash Reset */ #define L1CSR1_ICFI 0x00000002 /* Instr Cache Flash Invalidate */ #define L1CSR1_ICE 0x00000001 /* Instr Cache Enable */ -- cgit v1.2.2 From e86b4998f00b51f60b4baab9bbef5e07c9407614 Mon Sep 17 00:00:00 2001 From: "mware@internode.on.net" Date: Wed, 10 Jun 2009 17:01:19 +0000 Subject: powerpc/fsl: Increase the number of possible localbus banks Currently the fsl,*lbc devices support 8 banks (ie OR and BR registers). This is adequate for most pq2 and pq3 processors, but not the MPC8280 which has 12 banks. Signed-Off-By: Mark Ware Signed-off-by: Kumar Gala --- arch/powerpc/include/asm/fsl_lbc.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h index 63a4f779f531..1b5a21041f9b 100644 --- a/arch/powerpc/include/asm/fsl_lbc.h +++ b/arch/powerpc/include/asm/fsl_lbc.h @@ -95,8 +95,8 @@ struct fsl_lbc_bank { }; struct fsl_lbc_regs { - struct fsl_lbc_bank bank[8]; - u8 res0[0x28]; + struct fsl_lbc_bank bank[12]; + u8 res0[0x8]; __be32 mar; /**< UPM Address Register */ u8 res1[0x4]; __be32 mamr; /**< UPMA Mode Register */ -- cgit v1.2.2 From 9317726de42a157c377f7fe9110a63260800582f Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Tue, 26 May 2009 05:21:41 +0000 Subject: powerpc: Introduce macro spin_event_timeout() The macro spin_event_timeout() takes a condition and timeout value (in microseconds) as parameters. It spins until either the condition is true or the timeout expires. It returns the result of the condition when the loop was terminated. This primary purpose of this macro is to poll on a hardware register until a status bit changes. The timeout ensures that the loop still terminates if the bit doesn't change as expected. This macro makes it easier for driver developers to perform this kind of operation properly. Signed-off-by: Timur Tabi Acked-by: Geoff Thorpe Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/delay.h | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/delay.h b/arch/powerpc/include/asm/delay.h index f9200a65c632..1e2eb41fa057 100644 --- a/arch/powerpc/include/asm/delay.h +++ b/arch/powerpc/include/asm/delay.h @@ -2,8 +2,11 @@ #define _ASM_POWERPC_DELAY_H #ifdef __KERNEL__ +#include + /* * Copyright 1996, Paul Mackerras. + * Copyright (C) 2009 Freescale Semiconductor, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -30,5 +33,38 @@ extern void udelay(unsigned long usecs); #define mdelay(n) udelay((n) * 1000) #endif +/** + * spin_event_timeout - spin until a condition gets true or a timeout elapses + * @condition: a C expression to evalate + * @timeout: timeout, in microseconds + * @delay: the number of microseconds to delay between each evaluation of + * @condition + * + * The process spins until the condition evaluates to true (non-zero) or the + * timeout elapses. The return value of this macro is the value of + * @condition when the loop terminates. This allows you to determine the cause + * of the loop terminates. If the return value is zero, then you know a + * timeout has occurred. + * + * This primary purpose of this macro is to poll on a hardware register + * until a status bit changes. The timeout ensures that the loop still + * terminates even if the bit never changes. The delay is for devices that + * need a delay in between successive reads. + * + * gcc will optimize out the if-statement if @delay is a constant. + */ +#define spin_event_timeout(condition, timeout, delay) \ +({ \ + typeof(condition) __ret; \ + unsigned long __loops = tb_ticks_per_usec * timeout; \ + unsigned long __start = get_tbl(); \ + while (!(__ret = (condition)) && (tb_ticks_since(__start) <= __loops)) \ + if (delay) \ + udelay(delay); \ + else \ + cpu_relax(); \ + __ret; \ +}) + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_DELAY_H */ -- cgit v1.2.2 From 2fae0a524b193e200b71778407ad29b22417056a Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Sun, 14 Jun 2009 16:16:10 +0000 Subject: powerpc: Add memory clobber to mtspr() Without this clobber, mtspr can be re-ordered by gcc vs. surrounding memory accesses. While this might be ok for some cases, it's not in others and I'm not confident that all callers get it right (In fact I'm sure some of them don't). So for now, let's make mtspr() itself contain a memory clobber until we can audit and fix everything, at which point we can remove it if we think it's worth doing so. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/reg.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index a3c28e46947c..1170267736d3 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -755,7 +755,8 @@ #define mfspr(rn) ({unsigned long rval; \ asm volatile("mfspr %0," __stringify(rn) \ : "=r" (rval)); rval;}) -#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v)) +#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v)\ + : "memory") #ifdef __powerpc64__ #ifdef CONFIG_PPC_CELL -- cgit v1.2.2 From 87c441e54dfcf9f45593ecaf68e7e18ea53d5e13 Mon Sep 17 00:00:00 2001 From: Wolfgang Denk Date: Wed, 17 Jun 2009 00:30:22 -0600 Subject: powerpc/5xxx: Add common mpc5xxx_get_bus_frequency() function So far, MPC512x used mpc512x_find_ips_freq() to get the bus frequency, while MPC52xx used mpc52xx_find_ipb_freq(). Despite the different clock names (IPS vs. IPB) the code was identical. Use common code for both processor families. Signed-off-by: Wolfgang Denk Signed-off-by: Grant Likely --- arch/powerpc/include/asm/mpc512x.h | 22 ---------------------- arch/powerpc/include/asm/mpc52xx.h | 2 +- arch/powerpc/include/asm/mpc5xxx.h | 22 ++++++++++++++++++++++ 3 files changed, 23 insertions(+), 23 deletions(-) delete mode 100644 arch/powerpc/include/asm/mpc512x.h create mode 100644 arch/powerpc/include/asm/mpc5xxx.h (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/mpc512x.h b/arch/powerpc/include/asm/mpc512x.h deleted file mode 100644 index c48a1658eeac..000000000000 --- a/arch/powerpc/include/asm/mpc512x.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. - * - * Author: John Rigby, , Friday Apr 13 2007 - * - * Description: - * MPC5121 Prototypes and definitions - * - * This is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - */ - -#ifndef __ASM_POWERPC_MPC512x_H__ -#define __ASM_POWERPC_MPC512x_H__ - -extern unsigned long mpc512x_find_ips_freq(struct device_node *node); - -#endif /* __ASM_POWERPC_MPC512x_H__ */ - diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h index 52e049cd9e68..1b4f697abbdd 100644 --- a/arch/powerpc/include/asm/mpc52xx.h +++ b/arch/powerpc/include/asm/mpc52xx.h @@ -16,6 +16,7 @@ #ifndef __ASSEMBLY__ #include #include +#include #endif /* __ASSEMBLY__ */ #include @@ -268,7 +269,6 @@ struct mpc52xx_intr { #ifndef __ASSEMBLY__ /* mpc52xx_common.c */ -extern unsigned int mpc52xx_find_ipb_freq(struct device_node *node); extern void mpc5200_setup_xlb_arbiter(void); extern void mpc52xx_declare_of_platform_devices(void); extern void mpc52xx_map_common_devices(void); diff --git a/arch/powerpc/include/asm/mpc5xxx.h b/arch/powerpc/include/asm/mpc5xxx.h new file mode 100644 index 000000000000..5ce9c5fa434a --- /dev/null +++ b/arch/powerpc/include/asm/mpc5xxx.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. + * + * Author: John Rigby, , Friday Apr 13 2007 + * + * Description: + * MPC5xxx Prototypes and definitions + * + * This is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#ifndef __ASM_POWERPC_MPC5xxx_H__ +#define __ASM_POWERPC_MPC5xxx_H__ + +extern unsigned long mpc5xxx_get_bus_frequency(struct device_node *node); + +#endif /* __ASM_POWERPC_MPC5xxx_H__ */ + -- cgit v1.2.2 From a6c140969b4685f9b9f6773c0760f55ca66d1825 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 17 Jun 2009 16:33:34 -0400 Subject: Delete pcibios_select_root This function was only used by pci_claim_resource(), and the last commit deleted that use. Signed-off-by: Matthew Wilcox Signed-off-by: Linus Torvalds --- arch/powerpc/include/asm/pci.h | 13 ------------- 1 file changed, 13 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index ba17d5d90a49..d9483c504d2d 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -195,19 +195,6 @@ extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, struct pci_bus_region *region); -static inline struct resource *pcibios_select_root(struct pci_dev *pdev, - struct resource *res) -{ - struct resource *root = NULL; - - if (res->flags & IORESOURCE_IO) - root = &ioport_resource; - if (res->flags & IORESOURCE_MEM) - root = &iomem_resource; - - return root; -} - extern void pcibios_claim_one_bus(struct pci_bus *b); extern void pcibios_finish_adding_to_bus(struct pci_bus *bus); -- cgit v1.2.2 From 105988c015943e77092a6568bc5fb7e386df6ccd Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 17 Jun 2009 21:50:04 +1000 Subject: perf_counter: powerpc: Enable use of software counters on 32-bit powerpc This enables the perf_counter subsystem on 32-bit powerpc. Since we don't have any support for hardware counters on 32-bit powerpc yet, only software counters can be used. Besides selecting HAVE_PERF_COUNTERS for 32-bit powerpc as well as 64-bit, the main thing this does is add an implementation of set_perf_counter_pending(). This needs to arrange for perf_counter_do_pending() to be called when interrupts are enabled. Rather than add code to local_irq_restore as 64-bit does, the 32-bit set_perf_counter_pending() generates an interrupt by setting the decrementer to 1 so that a decrementer interrupt will become pending in 1 or 2 timebase ticks (if a decrementer interrupt isn't already pending). When interrupts are enabled, timer_interrupt() will be called, and some new code in there calls perf_counter_do_pending(). We use a per-cpu array of flags to indicate whether we need to call perf_counter_do_pending() or not. This introduces a couple of new Kconfig symbols: PPC_HAVE_PMU_SUPPORT, which is selected by processor families for which we have hardware PMU support (currently only PPC64), and PPC_PERF_CTRS, which enables the powerpc-specific perf_counter back-end. Signed-off-by: Paul Mackerras Cc: Peter Zijlstra Cc: linuxppc-dev@ozlabs.org Cc: benh@kernel.crashing.org LKML-Reference: <19000.55404.103840.393470@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/hw_irq.h | 5 ++++- arch/powerpc/include/asm/perf_counter.h | 10 ++++++++-- 2 files changed, 12 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 10a642df014e..867ab8ed69b3 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -131,6 +131,8 @@ static inline int irqs_disabled_flags(unsigned long flags) struct irq_chip; #ifdef CONFIG_PERF_COUNTERS + +#ifdef CONFIG_PPC64 static inline unsigned long test_perf_counter_pending(void) { unsigned long x; @@ -154,8 +156,9 @@ static inline void clear_perf_counter_pending(void) "r" (0), "i" (offsetof(struct paca_struct, perf_counter_pending))); } +#endif /* CONFIG_PPC64 */ -#else +#else /* CONFIG_PERF_COUNTERS */ static inline unsigned long test_perf_counter_pending(void) { diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h index b398a84edced..2c2d9f643df0 100644 --- a/arch/powerpc/include/asm/perf_counter.h +++ b/arch/powerpc/include/asm/perf_counter.h @@ -57,10 +57,16 @@ extern struct power_pmu *ppmu; struct pt_regs; extern unsigned long perf_misc_flags(struct pt_regs *regs); -#define perf_misc_flags(regs) perf_misc_flags(regs) - extern unsigned long perf_instruction_pointer(struct pt_regs *regs); +/* + * Only override the default definitions in include/linux/perf_counter.h + * if we have hardware PMU support. + */ +#ifdef CONFIG_PPC_PERF_CTRS +#define perf_misc_flags(regs) perf_misc_flags(regs) +#endif + /* * The power_pmu.get_constraint function returns a 64-bit value and * a 64-bit mask that express the constraints between this event and -- cgit v1.2.2 From 448d64f8f4c147db466c549550767cc515a4d34c Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 17 Jun 2009 21:51:13 +1000 Subject: perf_counter: powerpc: Use unsigned long for register and constraint values This changes the powerpc perf_counter back-end to use unsigned long types for hardware register values and for the value/mask pairs used in checking whether a given set of events fit within the hardware constraints. This is in preparation for adding support for the PMU on some 32-bit powerpc processors. On 32-bit processors the hardware registers are only 32 bits wide, and the PMU structure is generally simpler, so 32 bits should be ample for expressing the hardware constraints. On 64-bit processors, unsigned long is 64 bits wide, so using unsigned long vs. u64 (unsigned long long) makes no actual difference. This makes some other very minor changes: adjusting whitespace to line things up in initialized structures, and simplifying some code in hw_perf_disable(). Signed-off-by: Paul Mackerras Cc: Peter Zijlstra Cc: linuxppc-dev@ozlabs.org Cc: benh@kernel.crashing.org LKML-Reference: <19000.55473.26174.331511@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/perf_counter.h | 35 +++++++++++++++++---------------- 1 file changed, 18 insertions(+), 17 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h index 2c2d9f643df0..2ceb0fefa93a 100644 --- a/arch/powerpc/include/asm/perf_counter.h +++ b/arch/powerpc/include/asm/perf_counter.h @@ -21,21 +21,22 @@ * describe the PMU on a particular POWER-family CPU. */ struct power_pmu { - int n_counter; - int max_alternatives; - u64 add_fields; - u64 test_adder; - int (*compute_mmcr)(u64 events[], int n_ev, - unsigned int hwc[], u64 mmcr[]); - int (*get_constraint)(u64 event, u64 *mskp, u64 *valp); - int (*get_alternatives)(u64 event, unsigned int flags, - u64 alt[]); - void (*disable_pmc)(unsigned int pmc, u64 mmcr[]); - int (*limited_pmc_event)(u64 event); - u32 flags; - int n_generic; - int *generic_events; - int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] + int n_counter; + int max_alternatives; + unsigned long add_fields; + unsigned long test_adder; + int (*compute_mmcr)(u64 events[], int n_ev, + unsigned int hwc[], unsigned long mmcr[]); + int (*get_constraint)(u64 event, unsigned long *mskp, + unsigned long *valp); + int (*get_alternatives)(u64 event, unsigned int flags, + u64 alt[]); + void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]); + int (*limited_pmc_event)(u64 event); + u32 flags; + int n_generic; + int *generic_events; + int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX]; }; @@ -68,8 +69,8 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs); #endif /* - * The power_pmu.get_constraint function returns a 64-bit value and - * a 64-bit mask that express the constraints between this event and + * The power_pmu.get_constraint function returns a 32/64-bit value and + * a 32/64-bit mask that express the constraints between this event and * other events. * * The value and mask are divided up into (non-overlapping) bitfields -- cgit v1.2.2 From 079b3c569c87819e7a19d9b9f51d4746fc47bf9a Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 17 Jun 2009 21:52:09 +1000 Subject: perf_counter: powerpc: Change how processor-specific back-ends get selected At present, the powerpc generic (processor-independent) perf_counter code has list of processor back-end modules, and at initialization, it looks at the PVR (processor version register) and has a switch statement to select a suitable processor-specific back-end. This is going to become inconvenient as we add more processor-specific back-ends, so this inverts the order: now each back-end checks whether it applies to the current processor, and registers itself if so. Furthermore, instead of looking at the PVR, back-ends now check the cur_cpu_spec->oprofile_cpu_type string and match on that. Lastly, each back-end now specifies a name for itself so the core can print a nice message when a back-end registers itself. This doesn't provide any support for unregistering back-ends, but that wouldn't be hard to do and would allow back-ends to be modules. Signed-off-by: Paul Mackerras Cc: Peter Zijlstra Cc: linuxppc-dev@ozlabs.org Cc: benh@kernel.crashing.org LKML-Reference: <19000.55529.762227.518531@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/perf_counter.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h index 2ceb0fefa93a..8ccd4e155768 100644 --- a/arch/powerpc/include/asm/perf_counter.h +++ b/arch/powerpc/include/asm/perf_counter.h @@ -21,6 +21,7 @@ * describe the PMU on a particular POWER-family CPU. */ struct power_pmu { + const char *name; int n_counter; int max_alternatives; unsigned long add_fields; @@ -41,8 +42,6 @@ struct power_pmu { [PERF_COUNT_HW_CACHE_RESULT_MAX]; }; -extern struct power_pmu *ppmu; - /* * Values for power_pmu.flags */ @@ -56,6 +55,8 @@ extern struct power_pmu *ppmu; #define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */ #define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */ +extern int register_power_pmu(struct power_pmu *); + struct pt_regs; extern unsigned long perf_misc_flags(struct pt_regs *regs); extern unsigned long perf_instruction_pointer(struct pt_regs *regs); -- cgit v1.2.2 From 5e10cf587a7b1aa34099cbfc244e6f04e96dc835 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Tue, 19 May 2009 00:52:20 -0500 Subject: powerpc/cpm1: Remove IMAP_ADDR We no longer user IMAP_ADDR for anything so kill it off. Signed-off-by: Kumar Gala --- arch/powerpc/include/asm/cpm1.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h index 2ff798744c1d..7685ffde8821 100644 --- a/arch/powerpc/include/asm/cpm1.h +++ b/arch/powerpc/include/asm/cpm1.h @@ -598,8 +598,6 @@ typedef struct risc_timer_pram { #define CICR_IEN ((uint)0x00000080) /* Int. enable */ #define CICR_SPS ((uint)0x00000001) /* SCC Spread */ -#define IMAP_ADDR (get_immrbase()) - #define CPM_PIN_INPUT 0 #define CPM_PIN_OUTPUT 1 #define CPM_PIN_PRIMARY 0 -- cgit v1.2.2 From 194002b274e9169a04beb1b23dcc132159bb566c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 22 Jun 2009 16:35:24 +0200 Subject: perf_counter, x86: Add mmap counter read support Update the mmap control page with the needed information to use the userspace RDPMC instruction for self monitoring. Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/perf_counter.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h index 8ccd4e155768..0ea0639fcf75 100644 --- a/arch/powerpc/include/asm/perf_counter.h +++ b/arch/powerpc/include/asm/perf_counter.h @@ -61,6 +61,8 @@ struct pt_regs; extern unsigned long perf_misc_flags(struct pt_regs *regs); extern unsigned long perf_instruction_pointer(struct pt_regs *regs); +#define PERF_COUNTER_INDEX_OFFSET 1 + /* * Only override the default definitions in include/linux/perf_counter.h * if we have hardware PMU support. -- cgit v1.2.2 From 6f0b1c6094b3e8eeeb13f8f16c1b2ef452a6f519 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Mon, 22 Jun 2009 23:13:48 +0000 Subject: powerpc: Swiotlb breaks pseries Turning on SWIOTLB selects or enables PPC_NEED_DMA_SYNC_OPS, which means we get the non empty versions of dma_sync_* in asm/dma-mapping.h On my pseries machine the dma_ops have no such routines and we die with a null pointer - this patch gets it booting, is there a more elegant way to do it? Signed-off-by: Michael Ellerman Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/dma-mapping.h | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 3d9e887c3c0c..b44aaabdd1a6 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -309,7 +309,9 @@ static inline void dma_sync_single_for_cpu(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); - dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0, + + if (dma_ops->sync_single_range_for_cpu) + dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0, size, direction); } @@ -320,7 +322,9 @@ static inline void dma_sync_single_for_device(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); - dma_ops->sync_single_range_for_device(dev, dma_handle, + + if (dma_ops->sync_single_range_for_device) + dma_ops->sync_single_range_for_device(dev, dma_handle, 0, size, direction); } @@ -331,7 +335,9 @@ static inline void dma_sync_sg_for_cpu(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); - dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction); + + if (dma_ops->sync_sg_for_cpu) + dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction); } static inline void dma_sync_sg_for_device(struct device *dev, @@ -341,7 +347,9 @@ static inline void dma_sync_sg_for_device(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); - dma_ops->sync_sg_for_device(dev, sgl, nents, direction); + + if (dma_ops->sync_sg_for_device) + dma_ops->sync_sg_for_device(dev, sgl, nents, direction); } static inline void dma_sync_single_range_for_cpu(struct device *dev, @@ -351,7 +359,9 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); - dma_ops->sync_single_range_for_cpu(dev, dma_handle, + + if (dma_ops->sync_single_range_for_cpu) + dma_ops->sync_single_range_for_cpu(dev, dma_handle, offset, size, direction); } @@ -362,7 +372,9 @@ static inline void dma_sync_single_range_for_device(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); - dma_ops->sync_single_range_for_device(dev, dma_handle, offset, + + if (dma_ops->sync_single_range_for_device) + dma_ops->sync_single_range_for_device(dev, dma_handle, offset, size, direction); } #else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */ -- cgit v1.2.2 From 850f6ac316cf84bba63fdb775c897834eccbfaa3 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Thu, 18 Jun 2009 19:25:00 +0000 Subject: powerpc/mm: Make k(un)map_atomic out of line Those functions are way too big to be inline, besides, kmap_atomic() wants to call debug_kmap_atomic() which isn't exported for modules and causes module link failures. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/highmem.h | 57 +++----------------------------------- 1 file changed, 4 insertions(+), 53 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h index 684a73f4324f..a74c4ee6c020 100644 --- a/arch/powerpc/include/asm/highmem.h +++ b/arch/powerpc/include/asm/highmem.h @@ -22,9 +22,7 @@ #ifdef __KERNEL__ -#include #include -#include #include #include #include @@ -62,6 +60,9 @@ extern pte_t *pkmap_page_table; extern void *kmap_high(struct page *page); extern void kunmap_high(struct page *page); +extern void *kmap_atomic_prot(struct page *page, enum km_type type, + pgprot_t prot); +extern void kunmap_atomic(void *kvaddr, enum km_type type); static inline void *kmap(struct page *page) { @@ -79,62 +80,11 @@ static inline void kunmap(struct page *page) kunmap_high(page); } -/* - * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap - * gives a more generic (and caching) interface. But kmap_atomic can - * be used in IRQ contexts, so in some (very limited) cases we need - * it. - */ -static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) -{ - unsigned int idx; - unsigned long vaddr; - - /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ - pagefault_disable(); - if (!PageHighMem(page)) - return page_address(page); - - debug_kmap_atomic(type); - idx = type + KM_TYPE_NR*smp_processor_id(); - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); -#ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(!pte_none(*(kmap_pte-idx))); -#endif - __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1); - local_flush_tlb_page(NULL, vaddr); - - return (void*) vaddr; -} - static inline void *kmap_atomic(struct page *page, enum km_type type) { return kmap_atomic_prot(page, type, kmap_prot); } -static inline void kunmap_atomic(void *kvaddr, enum km_type type) -{ -#ifdef CONFIG_DEBUG_HIGHMEM - unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; - enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); - - if (vaddr < __fix_to_virt(FIX_KMAP_END)) { - pagefault_enable(); - return; - } - - BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); - - /* - * force other mappings to Oops if they'll try to access - * this pte without first remap it - */ - pte_clear(&init_mm, vaddr, kmap_pte-idx); - local_flush_tlb_page(NULL, vaddr); -#endif - pagefault_enable(); -} - static inline struct page *kmap_atomic_to_page(void *ptr) { unsigned long idx, vaddr = (unsigned long) ptr; @@ -148,6 +98,7 @@ static inline struct page *kmap_atomic_to_page(void *ptr) return pte_page(*pte); } + #define flush_cache_kmaps() flush_cache_all() #endif /* __KERNEL__ */ -- cgit v1.2.2 From 5d38902c483881645ba16058cffaa478b81e5cfa Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 17 Jun 2009 17:43:59 +0000 Subject: powerpc: Add irqtrace support for 32-bit powerpc Based on initial work from: Dale Farnsworth Add the low level irq tracing hooks for 32-bit powerpc needed to enable full lockdep functionality. The approach taken to deal with the code in entry_32.S is that we don't trace all the transitions of MSR:EE when we just turn it off to peek at TI_FLAGS without races. Only when we are calling into C code or returning from exceptions with a state that have changed from what lockdep thinks. There's a little bugger though: If we take an exception that keeps interrupts enabled (such as an alignment exception) while interrupts are enabled, we will call trace_hardirqs_on() on the way back spurriously. Not a big deal, but to get rid of it would require remembering in pt_regs that the exception was one of the type that kept interrupts enabled which we don't know at this stage. (Well, we could test all cases for regs->trap but that sucks too much). Signed-off-by: Benjamin Herrenschmidt Tested-by: Kumar Gala --- arch/powerpc/include/asm/hw_irq.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 867ab8ed69b3..8b505eaaa38a 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -68,13 +68,13 @@ static inline int irqs_disabled_flags(unsigned long flags) #if defined(CONFIG_BOOKE) #define SET_MSR_EE(x) mtmsr(x) -#define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") +#define raw_local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") #else #define SET_MSR_EE(x) mtmsr(x) -#define local_irq_restore(flags) mtmsr(flags) +#define raw_local_irq_restore(flags) mtmsr(flags) #endif -static inline void local_irq_disable(void) +static inline void raw_local_irq_disable(void) { #ifdef CONFIG_BOOKE __asm__ __volatile__("wrteei 0": : :"memory"); @@ -86,7 +86,7 @@ static inline void local_irq_disable(void) #endif } -static inline void local_irq_enable(void) +static inline void raw_local_irq_enable(void) { #ifdef CONFIG_BOOKE __asm__ __volatile__("wrteei 1": : :"memory"); @@ -98,7 +98,7 @@ static inline void local_irq_enable(void) #endif } -static inline void local_irq_save_ptr(unsigned long *flags) +static inline void raw_local_irq_save_ptr(unsigned long *flags) { unsigned long msr; msr = mfmsr(); @@ -110,12 +110,12 @@ static inline void local_irq_save_ptr(unsigned long *flags) #endif } -#define local_save_flags(flags) ((flags) = mfmsr()) -#define local_irq_save(flags) local_irq_save_ptr(&flags) -#define irqs_disabled() ((mfmsr() & MSR_EE) == 0) +#define raw_local_save_flags(flags) ((flags) = mfmsr()) +#define raw_local_irq_save(flags) raw_local_irq_save_ptr(&flags) +#define raw_irqs_disabled() ((mfmsr() & MSR_EE) == 0) +#define raw_irqs_disabled_flags(flags) (((flags) & MSR_EE) == 0) -#define hard_irq_enable() local_irq_enable() -#define hard_irq_disable() local_irq_disable() +#define hard_irq_disable() raw_local_irq_disable() static inline int irqs_disabled_flags(unsigned long flags) { -- cgit v1.2.2 From f97bb36f705da0a86b3ea77bfeee3415fee0b025 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 16 Jun 2009 16:42:49 +0000 Subject: powerpc/rtas: Turn rtas lock into a raw spinlock RTAS currently uses a normal spinlock. However it can be called from contexts where this is not necessarily a good idea. For example, it can be called while syncing timebases, with the core timebase being frozen. Unfortunately, that will deadlock in case of lock contention when spinlock debugging is enabled as the spin lock debugging code will try to use __delay() which ... relies on the timebase being enabled. Also RTAS can be used in some low level IRQ handling code path so it may as well be a raw spinlock for -rt sake. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/rtas.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 01c12339b304..0af42d20b692 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -58,7 +58,7 @@ struct rtas_t { unsigned long entry; /* physical address pointer */ unsigned long base; /* physical address pointer */ unsigned long size; - spinlock_t lock; + raw_spinlock_t lock; struct rtas_args args; struct device_node *dev; /* virtual address pointer */ }; -- cgit v1.2.2 From c4007a2fbf5f82b7e694c22b5929c87e38415a56 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 16 Jun 2009 16:42:50 +0000 Subject: powerpc: Use one common impl. of RTAS timebase sync and use raw spinlock Several platforms use their own copy of what is essentially the same code, using RTAS to synchronize the timebases when bringing up new CPUs. This moves it all into a single common implementation and additionally turns the spinlock into a raw spinlock since the former can rely on the timebase not being frozen when spinlock debugging is enabled, and finally masks interrupts while the timebase is disabled. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/rtas.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 0af42d20b692..168fce726201 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -245,5 +245,8 @@ static inline u32 rtas_config_addr(int busno, int devfn, int reg) (devfn << 8) | (reg & 0xff); } +extern void __cpuinit rtas_give_timebase(void); +extern void __cpuinit rtas_take_timebase(void); + #endif /* __KERNEL__ */ #endif /* _POWERPC_RTAS_H */ -- cgit v1.2.2 From 6c16a74d423f584ed80815ee7b944f5b578dd37a Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Mon, 15 Jun 2009 16:53:43 +0000 Subject: powerpc/mm: Fix potential access to freed pages when using hugetlbfs When using 64k page sizes, our PTE pages are split in two halves, the second half containing the "extension" used to keep track of individual 4k pages when not using HW 64k pages. However, our page tables used for hugetlb have a slightly different format and don't carry that "second half". Our code that batched PTEs to be invalidated unconditionally reads the "second half" (to put it into the batch), which means that when called to invalidate hugetlb PTEs, it will access unrelated memory. It breaks when CONFIG_DEBUG_PAGEALLOC is enabled. This fixes it by only accessing the second half when the _PAGE_COMBO bit is set in the first half, which indicates that we are dealing with a "combo" page which represents 16x4k subpages. Anything else shouldn't have this bit set and thus not require loading from the second half. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/pte-hash64-64k.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h index e05d26fa372f..82b72207c51c 100644 --- a/arch/powerpc/include/asm/pte-hash64-64k.h +++ b/arch/powerpc/include/asm/pte-hash64-64k.h @@ -47,7 +47,8 @@ * generic accessors and iterators here */ #define __real_pte(e,p) ((real_pte_t) { \ - (e), pte_val(*((p) + PTRS_PER_PTE)) }) + (e), ((e) & _PAGE_COMBO) ? \ + (pte_val(*((p) + PTRS_PER_PTE))) : 0 }) #define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) #define __rpte_to_pte(r) ((r).pte) -- cgit v1.2.2 From ad9064d5e22a6a24f828dad63c4775c4d7280bd4 Mon Sep 17 00:00:00 2001 From: Grant Likely Date: Mon, 29 Jun 2009 13:40:51 +0000 Subject: powerpc: Fix spin_event_timeout() to be robust over context switches Current implementation of spin_event_timeout can be interrupted by an IRQ or context switch after testing the condition, but before checking the timeout. This can cause the loop to report a timeout when the condition actually became true in the middle. This patch adds one final check of the condition upon exit of the loop if the last test of the condition was still false. Signed-off-by: Grant Likely Acked-by: Timur Tabi Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/delay.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/delay.h b/arch/powerpc/include/asm/delay.h index 1e2eb41fa057..52e4d54da2a9 100644 --- a/arch/powerpc/include/asm/delay.h +++ b/arch/powerpc/include/asm/delay.h @@ -63,6 +63,8 @@ extern void udelay(unsigned long usecs); udelay(delay); \ else \ cpu_relax(); \ + if (!__ret) \ + __ret = (condition); \ __ret; \ }) -- cgit v1.2.2 From c99e6efe1ba04561e7d93a81f0be07e37427e835 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 10 Jul 2009 14:57:56 +0200 Subject: sched: INIT_PREEMPT_COUNT Pull the initial preempt_count value into a single definition site. Maintainers for: alpha, ia64 and m68k, please have a look, your arch code is funny. The header magic is a bit odd, but similar to the KERNEL_DS one, CPP waits with expanding these macros until the INIT_THREAD_INFO macro itself is expanded, which is in arch/*/kernel/init_task.c where we've already included sched.h so we're good. Cc: tony.luck@intel.com Cc: rth@twiddle.net Cc: geert@linux-m68k.org Signed-off-by: Peter Zijlstra Acked-by: Matt Mackall Signed-off-by: Linus Torvalds --- arch/powerpc/include/asm/thread_info.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 9aba5a38a7c4..c8b329255678 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -46,15 +46,13 @@ struct thread_info { /* * macros/functions for gaining access to the thread information structure - * - * preempt_count needs to be 1 initially, until the scheduler is functional. */ #define INIT_THREAD_INFO(tsk) \ { \ .task = &tsk, \ .exec_domain = &default_exec_domain, \ .cpu = 0, \ - .preempt_count = 1, \ + .preempt_count = INIT_PREEMPT_COUNT, \ .restart_block = { \ .fn = do_no_restart_syscall, \ }, \ -- cgit v1.2.2 From 9e1b32caa525cb236e80e9c671e179bcecccc657 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 22 Jul 2009 15:44:28 +1000 Subject: mm: Pass virtual address to [__]p{te,ud,md}_free_tlb() mm: Pass virtual address to [__]p{te,ud,md}_free_tlb() Upcoming paches to support the new 64-bit "BookE" powerpc architecture will need to have the virtual address corresponding to PTE page when freeing it, due to the way the HW table walker works. Basically, the TLB can be loaded with "large" pages that cover the whole virtual space (well, sort-of, half of it actually) represented by a PTE page, and which contain an "indirect" bit indicating that this TLB entry RPN points to an array of PTEs from which the TLB can then create direct entries. Thus, in order to invalidate those when PTE pages are deleted, we need the virtual address to pass to tlbilx or tlbivax instructions. The old trick of sticking it somewhere in the PTE page struct page sucks too much, the address is almost readily available in all call sites and almost everybody implemets these as macros, so we may as well add the argument everywhere. I added it to the pmd and pud variants for consistency. Signed-off-by: Benjamin Herrenschmidt Acked-by: David Howells [MN10300 & FRV] Acked-by: Nick Piggin Acked-by: Martin Schwidefsky [s390] Signed-off-by: Linus Torvalds --- arch/powerpc/include/asm/pgalloc-32.h | 2 +- arch/powerpc/include/asm/pgalloc-64.h | 4 ++-- arch/powerpc/include/asm/pgalloc.h | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h index 0815eb40acae..c9500d666a1d 100644 --- a/arch/powerpc/include/asm/pgalloc-32.h +++ b/arch/powerpc/include/asm/pgalloc-32.h @@ -16,7 +16,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); */ /* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */ #define pmd_free(mm, x) do { } while (0) -#define __pmd_free_tlb(tlb,x) do { } while (0) +#define __pmd_free_tlb(tlb,x,a) do { } while (0) /* #define pgd_populate(mm, pmd, pte) BUG() */ #ifndef CONFIG_BOOKE diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index afda2bdd860f..e6f069c4f713 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h @@ -118,11 +118,11 @@ static inline void pgtable_free(pgtable_free_t pgf) kmem_cache_free(pgtable_cache[cachenum], p); } -#define __pmd_free_tlb(tlb, pmd) \ +#define __pmd_free_tlb(tlb, pmd,addr) \ pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) #ifndef CONFIG_PPC_64K_PAGES -#define __pud_free_tlb(tlb, pud) \ +#define __pud_free_tlb(tlb, pud, addr) \ pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) #endif /* CONFIG_PPC_64K_PAGES */ diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h index 5d8480265a77..1730e5e298d6 100644 --- a/arch/powerpc/include/asm/pgalloc.h +++ b/arch/powerpc/include/asm/pgalloc.h @@ -38,14 +38,14 @@ static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum, extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); #ifdef CONFIG_SMP -#define __pte_free_tlb(tlb,ptepage) \ +#define __pte_free_tlb(tlb,ptepage,address) \ do { \ pgtable_page_dtor(ptepage); \ pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ - PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \ + PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \ } while (0) #else -#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) +#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, (pte)) #endif -- cgit v1.2.2 From c428dcc9b9f967945992a2f8529e8c50a31d7913 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Wed, 17 Jun 2009 15:04:19 +1000 Subject: KVM: Make KVM_HPAGES_PER_HPAGE unsigned long to avoid build error on powerpc Eliminates this compiler warning: arch/powerpc/kvm/../../../virt/kvm/kvm_main.c:1178: error: integer overflow in expression Signed-off-by: Stephen Rothwell Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_host.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index dfdf13c9fefd..fddc3ed715fa 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -34,7 +34,7 @@ #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 /* We don't currently support large pages. */ -#define KVM_PAGES_PER_HPAGE (1<<31) +#define KVM_PAGES_PER_HPAGE (1UL << 31) struct kvm; struct kvm_run; -- cgit v1.2.2