diff options
Diffstat (limited to 'arch/powerpc')
49 files changed, 1240 insertions, 491 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 155d571f5e26..8a54eb8e3768 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
| @@ -672,6 +672,9 @@ config ZONE_DMA | |||
| 672 | bool | 672 | bool |
| 673 | default y | 673 | default y |
| 674 | 674 | ||
| 675 | config NEED_DMA_MAP_STATE | ||
| 676 | def_bool (PPC64 || NOT_COHERENT_CACHE) | ||
| 677 | |||
| 675 | config GENERIC_ISA_DMA | 678 | config GENERIC_ISA_DMA |
| 676 | bool | 679 | bool |
| 677 | depends on PPC64 || POWER4 || 6xx && !CPM2 | 680 | depends on PPC64 || POWER4 || 6xx && !CPM2 |
diff --git a/arch/powerpc/boot/dts/gef_ppc9a.dts b/arch/powerpc/boot/dts/gef_ppc9a.dts index 977f260d5e64..83f4b79dff85 100644 --- a/arch/powerpc/boot/dts/gef_ppc9a.dts +++ b/arch/powerpc/boot/dts/gef_ppc9a.dts | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * GE Fanuc PPC9A Device Tree Source | 2 | * GE PPC9A Device Tree Source |
| 3 | * | 3 | * |
| 4 | * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc. | 4 | * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
| 7 | * under the terms of the GNU General Public License as published by the | 7 | * under the terms of the GNU General Public License as published by the |
diff --git a/arch/powerpc/boot/dts/gef_sbc310.dts b/arch/powerpc/boot/dts/gef_sbc310.dts index 8e4efff3bda1..fc3a331dd392 100644 --- a/arch/powerpc/boot/dts/gef_sbc310.dts +++ b/arch/powerpc/boot/dts/gef_sbc310.dts | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * GE Fanuc SBC310 Device Tree Source | 2 | * GE SBC310 Device Tree Source |
| 3 | * | 3 | * |
| 4 | * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc. | 4 | * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
| 7 | * under the terms of the GNU General Public License as published by the | 7 | * under the terms of the GNU General Public License as published by the |
diff --git a/arch/powerpc/boot/dts/gef_sbc610.dts b/arch/powerpc/boot/dts/gef_sbc610.dts index bb7060078fb4..c0671cc98125 100644 --- a/arch/powerpc/boot/dts/gef_sbc610.dts +++ b/arch/powerpc/boot/dts/gef_sbc610.dts | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * GE Fanuc SBC610 Device Tree Source | 2 | * GE SBC610 Device Tree Source |
| 3 | * | 3 | * |
| 4 | * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc. | 4 | * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
| 7 | * under the terms of the GNU General Public License as published by the | 7 | * under the terms of the GNU General Public License as published by the |
diff --git a/arch/powerpc/boot/dts/kmeter1.dts b/arch/powerpc/boot/dts/kmeter1.dts index 65b8b4f27efe..d8b5d12fb663 100644 --- a/arch/powerpc/boot/dts/kmeter1.dts +++ b/arch/powerpc/boot/dts/kmeter1.dts | |||
| @@ -490,7 +490,7 @@ | |||
| 490 | compatible = "cfi-flash"; | 490 | compatible = "cfi-flash"; |
| 491 | /* | 491 | /* |
| 492 | * The Intel P30 chip has 2 non-identical chips on | 492 | * The Intel P30 chip has 2 non-identical chips on |
| 493 | * one die, so we need to define 2 seperate regions | 493 | * one die, so we need to define 2 separate regions |
| 494 | * that are scanned by physmap_of independantly. | 494 | * that are scanned by physmap_of independantly. |
| 495 | */ | 495 | */ |
| 496 | reg = <0 0x00000000 0x02000000 | 496 | reg = <0 0x00000000 0x02000000 |
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h index 4774c2f92232..396d21a80058 100644 --- a/arch/powerpc/include/asm/compat.h +++ b/arch/powerpc/include/asm/compat.h | |||
| @@ -7,7 +7,8 @@ | |||
| 7 | #include <linux/types.h> | 7 | #include <linux/types.h> |
| 8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
| 9 | 9 | ||
| 10 | #define COMPAT_USER_HZ 100 | 10 | #define COMPAT_USER_HZ 100 |
| 11 | #define COMPAT_UTS_MACHINE "ppc\0\0" | ||
| 11 | 12 | ||
| 12 | typedef u32 compat_size_t; | 13 | typedef u32 compat_size_t; |
| 13 | typedef s32 compat_ssize_t; | 14 | typedef s32 compat_ssize_t; |
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 80a973bb9e71..c85ef230135b 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h | |||
| @@ -127,9 +127,6 @@ static inline int dma_supported(struct device *dev, u64 mask) | |||
| 127 | return dma_ops->dma_supported(dev, mask); | 127 | return dma_ops->dma_supported(dev, mask); |
| 128 | } | 128 | } |
| 129 | 129 | ||
| 130 | /* We have our own implementation of pci_set_dma_mask() */ | ||
| 131 | #define HAVE_ARCH_PCI_SET_DMA_MASK | ||
| 132 | |||
| 133 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) | 130 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) |
| 134 | { | 131 | { |
| 135 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | 132 | struct dma_map_ops *dma_ops = get_dma_ops(dev); |
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index d8a693109c82..a011603d4079 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h | |||
| @@ -14,6 +14,9 @@ | |||
| 14 | #define _ASM_POWERPC_PACA_H | 14 | #define _ASM_POWERPC_PACA_H |
| 15 | #ifdef __KERNEL__ | 15 | #ifdef __KERNEL__ |
| 16 | 16 | ||
| 17 | #ifdef CONFIG_PPC64 | ||
| 18 | |||
| 19 | #include <linux/init.h> | ||
| 17 | #include <asm/types.h> | 20 | #include <asm/types.h> |
| 18 | #include <asm/lppaca.h> | 21 | #include <asm/lppaca.h> |
| 19 | #include <asm/mmu.h> | 22 | #include <asm/mmu.h> |
| @@ -145,8 +148,19 @@ struct paca_struct { | |||
| 145 | #endif | 148 | #endif |
| 146 | }; | 149 | }; |
| 147 | 150 | ||
| 148 | extern struct paca_struct paca[]; | 151 | extern struct paca_struct *paca; |
| 149 | extern void initialise_pacas(void); | 152 | extern __initdata struct paca_struct boot_paca; |
| 153 | extern void initialise_paca(struct paca_struct *new_paca, int cpu); | ||
| 154 | |||
| 155 | extern void allocate_pacas(void); | ||
| 156 | extern void free_unused_pacas(void); | ||
| 157 | |||
| 158 | #else /* CONFIG_PPC64 */ | ||
| 159 | |||
| 160 | static inline void allocate_pacas(void) { }; | ||
| 161 | static inline void free_unused_pacas(void) { }; | ||
| 162 | |||
| 163 | #endif /* CONFIG_PPC64 */ | ||
| 150 | 164 | ||
| 151 | #endif /* __KERNEL__ */ | 165 | #endif /* __KERNEL__ */ |
| 152 | #endif /* _ASM_POWERPC_PACA_H */ | 166 | #endif /* _ASM_POWERPC_PACA_H */ |
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index b5ea626eea2d..a20a9ad2258b 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h | |||
| @@ -141,38 +141,6 @@ extern int pci_mmap_legacy_page_range(struct pci_bus *bus, | |||
| 141 | 141 | ||
| 142 | #define HAVE_PCI_LEGACY 1 | 142 | #define HAVE_PCI_LEGACY 1 |
| 143 | 143 | ||
| 144 | #if defined(CONFIG_PPC64) || defined(CONFIG_NOT_COHERENT_CACHE) | ||
| 145 | /* | ||
| 146 | * For 64-bit kernels, pci_unmap_{single,page} is not a nop. | ||
| 147 | * For 32-bit non-coherent kernels, pci_dma_sync_single_for_cpu() and | ||
| 148 | * so on are not nops. | ||
| 149 | * and thus... | ||
| 150 | */ | ||
| 151 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ | ||
| 152 | dma_addr_t ADDR_NAME; | ||
| 153 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ | ||
| 154 | __u32 LEN_NAME; | ||
| 155 | #define pci_unmap_addr(PTR, ADDR_NAME) \ | ||
| 156 | ((PTR)->ADDR_NAME) | ||
| 157 | #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ | ||
| 158 | (((PTR)->ADDR_NAME) = (VAL)) | ||
| 159 | #define pci_unmap_len(PTR, LEN_NAME) \ | ||
| 160 | ((PTR)->LEN_NAME) | ||
| 161 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ | ||
| 162 | (((PTR)->LEN_NAME) = (VAL)) | ||
| 163 | |||
| 164 | #else /* 32-bit && coherent */ | ||
| 165 | |||
| 166 | /* pci_unmap_{page,single} is a nop so... */ | ||
| 167 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) | ||
| 168 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) | ||
| 169 | #define pci_unmap_addr(PTR, ADDR_NAME) (0) | ||
| 170 | #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) | ||
| 171 | #define pci_unmap_len(PTR, LEN_NAME) (0) | ||
| 172 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) | ||
| 173 | |||
| 174 | #endif /* CONFIG_PPC64 || CONFIG_NOT_COHERENT_CACHE */ | ||
| 175 | |||
| 176 | #ifdef CONFIG_PPC64 | 144 | #ifdef CONFIG_PPC64 |
| 177 | 145 | ||
| 178 | /* The PCI address space does not equal the physical memory address | 146 | /* The PCI address space does not equal the physical memory address |
diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h index 3288ce3997e0..e6d4ce69b126 100644 --- a/arch/powerpc/include/asm/perf_event.h +++ b/arch/powerpc/include/asm/perf_event.h | |||
| @@ -1,110 +1,23 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Performance event support - PowerPC-specific definitions. | 2 | * Performance event support - hardware-specific disambiguation |
| 3 | * | 3 | * |
| 4 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. | 4 | * For now this is a compile-time decision, but eventually it should be |
| 5 | * runtime. This would allow multiplatform perf event support for e300 (fsl | ||
| 6 | * embedded perf counters) plus server/classic, and would accommodate | ||
| 7 | * devices other than the core which provide their own performance counters. | ||
| 8 | * | ||
| 9 | * Copyright 2010 Freescale Semiconductor, Inc. | ||
| 5 | * | 10 | * |
| 6 | * This program is free software; you can redistribute it and/or | 11 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License | 12 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version | 13 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. | 14 | * 2 of the License, or (at your option) any later version. |
| 10 | */ | 15 | */ |
| 11 | #include <linux/types.h> | ||
| 12 | |||
| 13 | #include <asm/hw_irq.h> | ||
| 14 | |||
| 15 | #define MAX_HWEVENTS 8 | ||
| 16 | #define MAX_EVENT_ALTERNATIVES 8 | ||
| 17 | #define MAX_LIMITED_HWCOUNTERS 2 | ||
| 18 | |||
| 19 | /* | ||
| 20 | * This struct provides the constants and functions needed to | ||
| 21 | * describe the PMU on a particular POWER-family CPU. | ||
| 22 | */ | ||
| 23 | struct power_pmu { | ||
| 24 | const char *name; | ||
| 25 | int n_counter; | ||
| 26 | int max_alternatives; | ||
| 27 | unsigned long add_fields; | ||
| 28 | unsigned long test_adder; | ||
| 29 | int (*compute_mmcr)(u64 events[], int n_ev, | ||
| 30 | unsigned int hwc[], unsigned long mmcr[]); | ||
| 31 | int (*get_constraint)(u64 event_id, unsigned long *mskp, | ||
| 32 | unsigned long *valp); | ||
| 33 | int (*get_alternatives)(u64 event_id, unsigned int flags, | ||
| 34 | u64 alt[]); | ||
| 35 | void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]); | ||
| 36 | int (*limited_pmc_event)(u64 event_id); | ||
| 37 | u32 flags; | ||
| 38 | int n_generic; | ||
| 39 | int *generic_events; | ||
| 40 | int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] | ||
| 41 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
| 42 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
| 43 | }; | ||
| 44 | |||
| 45 | /* | ||
| 46 | * Values for power_pmu.flags | ||
| 47 | */ | ||
| 48 | #define PPMU_LIMITED_PMC5_6 1 /* PMC5/6 have limited function */ | ||
| 49 | #define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */ | ||
| 50 | |||
| 51 | /* | ||
| 52 | * Values for flags to get_alternatives() | ||
| 53 | */ | ||
| 54 | #define PPMU_LIMITED_PMC_OK 1 /* can put this on a limited PMC */ | ||
| 55 | #define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */ | ||
| 56 | #define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */ | ||
| 57 | |||
| 58 | extern int register_power_pmu(struct power_pmu *); | ||
| 59 | 16 | ||
| 60 | struct pt_regs; | ||
| 61 | extern unsigned long perf_misc_flags(struct pt_regs *regs); | ||
| 62 | extern unsigned long perf_instruction_pointer(struct pt_regs *regs); | ||
| 63 | |||
| 64 | #define PERF_EVENT_INDEX_OFFSET 1 | ||
| 65 | |||
| 66 | /* | ||
| 67 | * Only override the default definitions in include/linux/perf_event.h | ||
| 68 | * if we have hardware PMU support. | ||
| 69 | */ | ||
| 70 | #ifdef CONFIG_PPC_PERF_CTRS | 17 | #ifdef CONFIG_PPC_PERF_CTRS |
| 71 | #define perf_misc_flags(regs) perf_misc_flags(regs) | 18 | #include <asm/perf_event_server.h> |
| 72 | #endif | 19 | #endif |
| 73 | 20 | ||
| 74 | /* | 21 | #ifdef CONFIG_FSL_EMB_PERF_EVENT |
| 75 | * The power_pmu.get_constraint function returns a 32/64-bit value and | 22 | #include <asm/perf_event_fsl_emb.h> |
| 76 | * a 32/64-bit mask that express the constraints between this event_id and | 23 | #endif |
| 77 | * other events. | ||
| 78 | * | ||
| 79 | * The value and mask are divided up into (non-overlapping) bitfields | ||
| 80 | * of three different types: | ||
| 81 | * | ||
| 82 | * Select field: this expresses the constraint that some set of bits | ||
| 83 | * in MMCR* needs to be set to a specific value for this event_id. For a | ||
| 84 | * select field, the mask contains 1s in every bit of the field, and | ||
| 85 | * the value contains a unique value for each possible setting of the | ||
| 86 | * MMCR* bits. The constraint checking code will ensure that two events | ||
| 87 | * that set the same field in their masks have the same value in their | ||
| 88 | * value dwords. | ||
| 89 | * | ||
| 90 | * Add field: this expresses the constraint that there can be at most | ||
| 91 | * N events in a particular class. A field of k bits can be used for | ||
| 92 | * N <= 2^(k-1) - 1. The mask has the most significant bit of the field | ||
| 93 | * set (and the other bits 0), and the value has only the least significant | ||
| 94 | * bit of the field set. In addition, the 'add_fields' and 'test_adder' | ||
| 95 | * in the struct power_pmu for this processor come into play. The | ||
| 96 | * add_fields value contains 1 in the LSB of the field, and the | ||
| 97 | * test_adder contains 2^(k-1) - 1 - N in the field. | ||
| 98 | * | ||
| 99 | * NAND field: this expresses the constraint that you may not have events | ||
| 100 | * in all of a set of classes. (For example, on PPC970, you can't select | ||
| 101 | * events from the FPU, ISU and IDU simultaneously, although any two are | ||
| 102 | * possible.) For N classes, the field is N+1 bits wide, and each class | ||
| 103 | * is assigned one bit from the least-significant N bits. The mask has | ||
| 104 | * only the most-significant bit set, and the value has only the bit | ||
| 105 | * for the event_id's class set. The test_adder has the least significant | ||
| 106 | * bit set in the field. | ||
| 107 | * | ||
| 108 | * If an event_id is not subject to the constraint expressed by a particular | ||
| 109 | * field, then it will have 0 in both the mask and value for that field. | ||
| 110 | */ | ||
diff --git a/arch/powerpc/include/asm/perf_event_fsl_emb.h b/arch/powerpc/include/asm/perf_event_fsl_emb.h new file mode 100644 index 000000000000..718a9fa94e68 --- /dev/null +++ b/arch/powerpc/include/asm/perf_event_fsl_emb.h | |||
| @@ -0,0 +1,50 @@ | |||
| 1 | /* | ||
| 2 | * Performance event support - Freescale embedded specific definitions. | ||
| 3 | * | ||
| 4 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. | ||
| 5 | * Copyright 2010 Freescale Semiconductor, Inc. | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or | ||
| 8 | * modify it under the terms of the GNU General Public License | ||
| 9 | * as published by the Free Software Foundation; either version | ||
| 10 | * 2 of the License, or (at your option) any later version. | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <linux/types.h> | ||
| 14 | #include <asm/hw_irq.h> | ||
| 15 | |||
| 16 | #define MAX_HWEVENTS 4 | ||
| 17 | |||
| 18 | /* event flags */ | ||
| 19 | #define FSL_EMB_EVENT_VALID 1 | ||
| 20 | #define FSL_EMB_EVENT_RESTRICTED 2 | ||
| 21 | |||
| 22 | /* upper half of event flags is PMLCb */ | ||
| 23 | #define FSL_EMB_EVENT_THRESHMUL 0x0000070000000000ULL | ||
| 24 | #define FSL_EMB_EVENT_THRESH 0x0000003f00000000ULL | ||
| 25 | |||
| 26 | struct fsl_emb_pmu { | ||
| 27 | const char *name; | ||
| 28 | int n_counter; /* total number of counters */ | ||
| 29 | |||
| 30 | /* | ||
| 31 | * The number of contiguous counters starting at zero that | ||
| 32 | * can hold restricted events, or zero if there are no | ||
| 33 | * restricted events. | ||
| 34 | * | ||
| 35 | * This isn't a very flexible method of expressing constraints, | ||
| 36 | * but it's very simple and is adequate for existing chips. | ||
| 37 | */ | ||
| 38 | int n_restricted; | ||
| 39 | |||
| 40 | /* Returns event flags and PMLCb (FSL_EMB_EVENT_*) */ | ||
| 41 | u64 (*xlate_event)(u64 event_id); | ||
| 42 | |||
| 43 | int n_generic; | ||
| 44 | int *generic_events; | ||
| 45 | int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] | ||
| 46 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
| 47 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
| 48 | }; | ||
| 49 | |||
| 50 | int register_fsl_emb_pmu(struct fsl_emb_pmu *); | ||
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h new file mode 100644 index 000000000000..8f1df1208d23 --- /dev/null +++ b/arch/powerpc/include/asm/perf_event_server.h | |||
| @@ -0,0 +1,110 @@ | |||
| 1 | /* | ||
| 2 | * Performance event support - PowerPC classic/server specific definitions. | ||
| 3 | * | ||
| 4 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or | ||
| 7 | * modify it under the terms of the GNU General Public License | ||
| 8 | * as published by the Free Software Foundation; either version | ||
| 9 | * 2 of the License, or (at your option) any later version. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/types.h> | ||
| 13 | #include <asm/hw_irq.h> | ||
| 14 | |||
| 15 | #define MAX_HWEVENTS 8 | ||
| 16 | #define MAX_EVENT_ALTERNATIVES 8 | ||
| 17 | #define MAX_LIMITED_HWCOUNTERS 2 | ||
| 18 | |||
| 19 | /* | ||
| 20 | * This struct provides the constants and functions needed to | ||
| 21 | * describe the PMU on a particular POWER-family CPU. | ||
| 22 | */ | ||
| 23 | struct power_pmu { | ||
| 24 | const char *name; | ||
| 25 | int n_counter; | ||
| 26 | int max_alternatives; | ||
| 27 | unsigned long add_fields; | ||
| 28 | unsigned long test_adder; | ||
| 29 | int (*compute_mmcr)(u64 events[], int n_ev, | ||
| 30 | unsigned int hwc[], unsigned long mmcr[]); | ||
| 31 | int (*get_constraint)(u64 event_id, unsigned long *mskp, | ||
| 32 | unsigned long *valp); | ||
| 33 | int (*get_alternatives)(u64 event_id, unsigned int flags, | ||
| 34 | u64 alt[]); | ||
| 35 | void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]); | ||
| 36 | int (*limited_pmc_event)(u64 event_id); | ||
| 37 | u32 flags; | ||
| 38 | int n_generic; | ||
| 39 | int *generic_events; | ||
| 40 | int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] | ||
| 41 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
| 42 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
| 43 | }; | ||
| 44 | |||
| 45 | /* | ||
| 46 | * Values for power_pmu.flags | ||
| 47 | */ | ||
| 48 | #define PPMU_LIMITED_PMC5_6 1 /* PMC5/6 have limited function */ | ||
| 49 | #define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */ | ||
| 50 | |||
| 51 | /* | ||
| 52 | * Values for flags to get_alternatives() | ||
| 53 | */ | ||
| 54 | #define PPMU_LIMITED_PMC_OK 1 /* can put this on a limited PMC */ | ||
| 55 | #define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */ | ||
| 56 | #define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */ | ||
| 57 | |||
| 58 | extern int register_power_pmu(struct power_pmu *); | ||
| 59 | |||
| 60 | struct pt_regs; | ||
| 61 | extern unsigned long perf_misc_flags(struct pt_regs *regs); | ||
| 62 | extern unsigned long perf_instruction_pointer(struct pt_regs *regs); | ||
| 63 | |||
| 64 | #define PERF_EVENT_INDEX_OFFSET 1 | ||
| 65 | |||
| 66 | /* | ||
| 67 | * Only override the default definitions in include/linux/perf_event.h | ||
| 68 | * if we have hardware PMU support. | ||
| 69 | */ | ||
| 70 | #ifdef CONFIG_PPC_PERF_CTRS | ||
| 71 | #define perf_misc_flags(regs) perf_misc_flags(regs) | ||
| 72 | #endif | ||
| 73 | |||
| 74 | /* | ||
| 75 | * The power_pmu.get_constraint function returns a 32/64-bit value and | ||
| 76 | * a 32/64-bit mask that express the constraints between this event_id and | ||
| 77 | * other events. | ||
| 78 | * | ||
| 79 | * The value and mask are divided up into (non-overlapping) bitfields | ||
| 80 | * of three different types: | ||
| 81 | * | ||
| 82 | * Select field: this expresses the constraint that some set of bits | ||
| 83 | * in MMCR* needs to be set to a specific value for this event_id. For a | ||
| 84 | * select field, the mask contains 1s in every bit of the field, and | ||
| 85 | * the value contains a unique value for each possible setting of the | ||
| 86 | * MMCR* bits. The constraint checking code will ensure that two events | ||
| 87 | * that set the same field in their masks have the same value in their | ||
| 88 | * value dwords. | ||
| 89 | * | ||
| 90 | * Add field: this expresses the constraint that there can be at most | ||
| 91 | * N events in a particular class. A field of k bits can be used for | ||
| 92 | * N <= 2^(k-1) - 1. The mask has the most significant bit of the field | ||
| 93 | * set (and the other bits 0), and the value has only the least significant | ||
| 94 | * bit of the field set. In addition, the 'add_fields' and 'test_adder' | ||
| 95 | * in the struct power_pmu for this processor come into play. The | ||
| 96 | * add_fields value contains 1 in the LSB of the field, and the | ||
| 97 | * test_adder contains 2^(k-1) - 1 - N in the field. | ||
| 98 | * | ||
| 99 | * NAND field: this expresses the constraint that you may not have events | ||
| 100 | * in all of a set of classes. (For example, on PPC970, you can't select | ||
| 101 | * events from the FPU, ISU and IDU simultaneously, although any two are | ||
| 102 | * possible.) For N classes, the field is N+1 bits wide, and each class | ||
| 103 | * is assigned one bit from the least-significant N bits. The mask has | ||
| 104 | * only the most-significant bit set, and the value has only the bit | ||
| 105 | * for the event_id's class set. The test_adder has the least significant | ||
| 106 | * bit set in the field. | ||
| 107 | * | ||
| 108 | * If an event_id is not subject to the constraint expressed by a particular | ||
| 109 | * field, then it will have 0 in both the mask and value for that field. | ||
| 110 | */ | ||
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index b45108126562..9e2d84c06b74 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h | |||
| @@ -137,15 +137,8 @@ do { \ | |||
| 137 | } while (0) | 137 | } while (0) |
| 138 | #endif /* __powerpc64__ */ | 138 | #endif /* __powerpc64__ */ |
| 139 | 139 | ||
| 140 | /* | ||
| 141 | * These are defined as per linux/ptrace.h, which see. | ||
| 142 | */ | ||
| 143 | #define arch_has_single_step() (1) | 140 | #define arch_has_single_step() (1) |
| 144 | #define arch_has_block_step() (!cpu_has_feature(CPU_FTR_601)) | 141 | #define arch_has_block_step() (!cpu_has_feature(CPU_FTR_601)) |
| 145 | extern void user_enable_single_step(struct task_struct *); | ||
| 146 | extern void user_enable_block_step(struct task_struct *); | ||
| 147 | extern void user_disable_single_step(struct task_struct *); | ||
| 148 | |||
| 149 | #define ARCH_HAS_USER_SINGLE_STEP_INFO | 142 | #define ARCH_HAS_USER_SINGLE_STEP_INFO |
| 150 | 143 | ||
| 151 | #endif /* __ASSEMBLY__ */ | 144 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 8808d307fe7e..414d434a66d0 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h | |||
| @@ -421,8 +421,8 @@ | |||
| 421 | /* Bit definitions related to the DBCR2. */ | 421 | /* Bit definitions related to the DBCR2. */ |
| 422 | #define DBCR2_DAC1US 0xC0000000 /* Data Addr Cmp 1 Sup/User */ | 422 | #define DBCR2_DAC1US 0xC0000000 /* Data Addr Cmp 1 Sup/User */ |
| 423 | #define DBCR2_DAC1ER 0x30000000 /* Data Addr Cmp 1 Eff/Real */ | 423 | #define DBCR2_DAC1ER 0x30000000 /* Data Addr Cmp 1 Eff/Real */ |
| 424 | #define DBCR2_DAC2US 0x00000000 /* Data Addr Cmp 2 Sup/User */ | 424 | #define DBCR2_DAC2US 0x0C000000 /* Data Addr Cmp 2 Sup/User */ |
| 425 | #define DBCR2_DAC2ER 0x00000000 /* Data Addr Cmp 2 Eff/Real */ | 425 | #define DBCR2_DAC2ER 0x03000000 /* Data Addr Cmp 2 Eff/Real */ |
| 426 | #define DBCR2_DAC12M 0x00800000 /* DAC 1-2 range enable */ | 426 | #define DBCR2_DAC12M 0x00800000 /* DAC 1-2 range enable */ |
| 427 | #define DBCR2_DAC12MM 0x00400000 /* DAC 1-2 Mask mode*/ | 427 | #define DBCR2_DAC12MM 0x00400000 /* DAC 1-2 Mask mode*/ |
| 428 | #define DBCR2_DAC12MX 0x00C00000 /* DAC 1-2 range eXclusive */ | 428 | #define DBCR2_DAC12MX 0x00C00000 /* DAC 1-2 range eXclusive */ |
diff --git a/arch/powerpc/include/asm/reg_fsl_emb.h b/arch/powerpc/include/asm/reg_fsl_emb.h index 0de404dfee8b..77bb71cfd991 100644 --- a/arch/powerpc/include/asm/reg_fsl_emb.h +++ b/arch/powerpc/include/asm/reg_fsl_emb.h | |||
| @@ -31,7 +31,7 @@ | |||
| 31 | #define PMLCA_FCM0 0x08000000 /* Freeze when PMM==0 */ | 31 | #define PMLCA_FCM0 0x08000000 /* Freeze when PMM==0 */ |
| 32 | #define PMLCA_CE 0x04000000 /* Condition Enable */ | 32 | #define PMLCA_CE 0x04000000 /* Condition Enable */ |
| 33 | 33 | ||
| 34 | #define PMLCA_EVENT_MASK 0x007f0000 /* Event field */ | 34 | #define PMLCA_EVENT_MASK 0x00ff0000 /* Event field */ |
| 35 | #define PMLCA_EVENT_SHIFT 16 | 35 | #define PMLCA_EVENT_SHIFT 16 |
| 36 | 36 | ||
| 37 | #define PMRN_PMLCB0 0x110 /* PM Local Control B0 */ | 37 | #define PMRN_PMLCB0 0x110 /* PM Local Control B0 */ |
diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h index eb8eb400c664..4084e567d28e 100644 --- a/arch/powerpc/include/asm/syscalls.h +++ b/arch/powerpc/include/asm/syscalls.h | |||
| @@ -7,7 +7,6 @@ | |||
| 7 | #include <linux/types.h> | 7 | #include <linux/types.h> |
| 8 | #include <asm/signal.h> | 8 | #include <asm/signal.h> |
| 9 | 9 | ||
| 10 | struct new_utsname; | ||
| 11 | struct pt_regs; | 10 | struct pt_regs; |
| 12 | struct rtas_args; | 11 | struct rtas_args; |
| 13 | struct sigaction; | 12 | struct sigaction; |
| @@ -35,12 +34,9 @@ asmlinkage long sys_pipe2(int __user *fildes, int flags); | |||
| 35 | asmlinkage long sys_rt_sigaction(int sig, | 34 | asmlinkage long sys_rt_sigaction(int sig, |
| 36 | const struct sigaction __user *act, | 35 | const struct sigaction __user *act, |
| 37 | struct sigaction __user *oact, size_t sigsetsize); | 36 | struct sigaction __user *oact, size_t sigsetsize); |
| 38 | asmlinkage int sys_ipc(uint call, int first, unsigned long second, | ||
| 39 | long third, void __user *ptr, long fifth); | ||
| 40 | asmlinkage long ppc64_personality(unsigned long personality); | 37 | asmlinkage long ppc64_personality(unsigned long personality); |
| 41 | asmlinkage int ppc_rtas(struct rtas_args __user *uargs); | 38 | asmlinkage int ppc_rtas(struct rtas_args __user *uargs); |
| 42 | asmlinkage time_t sys64_time(time_t __user * tloc); | 39 | asmlinkage time_t sys64_time(time_t __user * tloc); |
| 43 | asmlinkage long ppc_newuname(struct new_utsname __user * name); | ||
| 44 | 40 | ||
| 45 | asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, | 41 | asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, |
| 46 | size_t sigsetsize); | 42 | size_t sigsetsize); |
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 07d2d19ab5e9..a5ee345b6a5c 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h | |||
| @@ -125,7 +125,7 @@ SYSCALL_SPU(fsync) | |||
| 125 | SYS32ONLY(sigreturn) | 125 | SYS32ONLY(sigreturn) |
| 126 | PPC_SYS(clone) | 126 | PPC_SYS(clone) |
| 127 | COMPAT_SYS_SPU(setdomainname) | 127 | COMPAT_SYS_SPU(setdomainname) |
| 128 | PPC_SYS_SPU(newuname) | 128 | SYSCALL_SPU(newuname) |
| 129 | SYSCALL(ni_syscall) | 129 | SYSCALL(ni_syscall) |
| 130 | COMPAT_SYS_SPU(adjtimex) | 130 | COMPAT_SYS_SPU(adjtimex) |
| 131 | SYSCALL_SPU(mprotect) | 131 | SYSCALL_SPU(mprotect) |
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index f6ca76176766..f0a10266e7f7 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h | |||
| @@ -364,6 +364,7 @@ | |||
| 364 | #define __ARCH_WANT_STAT64 | 364 | #define __ARCH_WANT_STAT64 |
| 365 | #define __ARCH_WANT_SYS_ALARM | 365 | #define __ARCH_WANT_SYS_ALARM |
| 366 | #define __ARCH_WANT_SYS_GETHOSTNAME | 366 | #define __ARCH_WANT_SYS_GETHOSTNAME |
| 367 | #define __ARCH_WANT_SYS_IPC | ||
| 367 | #define __ARCH_WANT_SYS_PAUSE | 368 | #define __ARCH_WANT_SYS_PAUSE |
| 368 | #define __ARCH_WANT_SYS_SGETMASK | 369 | #define __ARCH_WANT_SYS_SGETMASK |
| 369 | #define __ARCH_WANT_SYS_SIGNAL | 370 | #define __ARCH_WANT_SYS_SIGNAL |
| @@ -376,6 +377,7 @@ | |||
| 376 | #define __ARCH_WANT_SYS_LLSEEK | 377 | #define __ARCH_WANT_SYS_LLSEEK |
| 377 | #define __ARCH_WANT_SYS_NICE | 378 | #define __ARCH_WANT_SYS_NICE |
| 378 | #define __ARCH_WANT_SYS_OLD_GETRLIMIT | 379 | #define __ARCH_WANT_SYS_OLD_GETRLIMIT |
| 380 | #define __ARCH_WANT_SYS_OLD_UNAME | ||
| 379 | #define __ARCH_WANT_SYS_OLDUMOUNT | 381 | #define __ARCH_WANT_SYS_OLDUMOUNT |
| 380 | #define __ARCH_WANT_SYS_SIGPENDING | 382 | #define __ARCH_WANT_SYS_SIGPENDING |
| 381 | #define __ARCH_WANT_SYS_SIGPROCMASK | 383 | #define __ARCH_WANT_SYS_SIGPROCMASK |
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index c002b0410219..877326320e74 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
| @@ -98,11 +98,16 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o | |||
| 98 | 98 | ||
| 99 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 99 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
| 100 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o | 100 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o |
| 101 | obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o perf_callchain.o | 101 | obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o |
| 102 | |||
| 103 | obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o | ||
| 102 | obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \ | 104 | obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \ |
| 103 | power5+-pmu.o power6-pmu.o power7-pmu.o | 105 | power5+-pmu.o power6-pmu.o power7-pmu.o |
| 104 | obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o | 106 | obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o |
| 105 | 107 | ||
| 108 | obj-$(CONFIG_FSL_EMB_PERF_EVENT) += perf_event_fsl_emb.o | ||
| 109 | obj-$(CONFIG_FSL_EMB_PERF_EVENT_E500) += e500-pmu.o | ||
| 110 | |||
| 106 | obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o | 111 | obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o |
| 107 | 112 | ||
| 108 | ifneq ($(CONFIG_PPC_INDIRECT_IO),y) | 113 | ifneq ($(CONFIG_PPC_INDIRECT_IO),y) |
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 2fc82bac3bbc..8af4949434b2 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
| @@ -1808,7 +1808,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
| 1808 | .icache_bsize = 64, | 1808 | .icache_bsize = 64, |
| 1809 | .dcache_bsize = 64, | 1809 | .dcache_bsize = 64, |
| 1810 | .num_pmcs = 4, | 1810 | .num_pmcs = 4, |
| 1811 | .oprofile_cpu_type = "ppc/e500", /* xxx - galak, e500mc? */ | 1811 | .oprofile_cpu_type = "ppc/e500mc", |
| 1812 | .oprofile_type = PPC_OPROFILE_FSL_EMB, | 1812 | .oprofile_type = PPC_OPROFILE_FSL_EMB, |
| 1813 | .cpu_setup = __setup_cpu_e500mc, | 1813 | .cpu_setup = __setup_cpu_e500mc, |
| 1814 | .machine_check = machine_check_e500, | 1814 | .machine_check = machine_check_e500, |
diff --git a/arch/powerpc/kernel/e500-pmu.c b/arch/powerpc/kernel/e500-pmu.c new file mode 100644 index 000000000000..7c07de0d8943 --- /dev/null +++ b/arch/powerpc/kernel/e500-pmu.c | |||
| @@ -0,0 +1,129 @@ | |||
| 1 | /* | ||
| 2 | * Performance counter support for e500 family processors. | ||
| 3 | * | ||
| 4 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. | ||
| 5 | * Copyright 2010 Freescale Semiconductor, Inc. | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or | ||
| 8 | * modify it under the terms of the GNU General Public License | ||
| 9 | * as published by the Free Software Foundation; either version | ||
| 10 | * 2 of the License, or (at your option) any later version. | ||
| 11 | */ | ||
| 12 | #include <linux/string.h> | ||
| 13 | #include <linux/perf_event.h> | ||
| 14 | #include <asm/reg.h> | ||
| 15 | #include <asm/cputable.h> | ||
| 16 | |||
| 17 | /* | ||
| 18 | * Map of generic hardware event types to hardware events | ||
| 19 | * Zero if unsupported | ||
| 20 | */ | ||
| 21 | static int e500_generic_events[] = { | ||
| 22 | [PERF_COUNT_HW_CPU_CYCLES] = 1, | ||
| 23 | [PERF_COUNT_HW_INSTRUCTIONS] = 2, | ||
| 24 | [PERF_COUNT_HW_CACHE_MISSES] = 41, /* Data L1 cache reloads */ | ||
| 25 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 12, | ||
| 26 | [PERF_COUNT_HW_BRANCH_MISSES] = 15, | ||
| 27 | }; | ||
| 28 | |||
| 29 | #define C(x) PERF_COUNT_HW_CACHE_##x | ||
| 30 | |||
| 31 | /* | ||
| 32 | * Table of generalized cache-related events. | ||
| 33 | * 0 means not supported, -1 means nonsensical, other values | ||
| 34 | * are event codes. | ||
| 35 | */ | ||
| 36 | static int e500_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | ||
| 37 | /* | ||
| 38 | * D-cache misses are not split into read/write/prefetch; | ||
| 39 | * use raw event 41. | ||
| 40 | */ | ||
| 41 | [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ | ||
| 42 | [C(OP_READ)] = { 27, 0 }, | ||
| 43 | [C(OP_WRITE)] = { 28, 0 }, | ||
| 44 | [C(OP_PREFETCH)] = { 29, 0 }, | ||
| 45 | }, | ||
| 46 | [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ | ||
| 47 | [C(OP_READ)] = { 2, 60 }, | ||
| 48 | [C(OP_WRITE)] = { -1, -1 }, | ||
| 49 | [C(OP_PREFETCH)] = { 0, 0 }, | ||
| 50 | }, | ||
| 51 | /* | ||
| 52 | * Assuming LL means L2, it's not a good match for this model. | ||
| 53 | * It allocates only on L1 castout or explicit prefetch, and | ||
| 54 | * does not have separate read/write events (but it does have | ||
| 55 | * separate instruction/data events). | ||
| 56 | */ | ||
| 57 | [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ | ||
| 58 | [C(OP_READ)] = { 0, 0 }, | ||
| 59 | [C(OP_WRITE)] = { 0, 0 }, | ||
| 60 | [C(OP_PREFETCH)] = { 0, 0 }, | ||
| 61 | }, | ||
| 62 | /* | ||
| 63 | * There are data/instruction MMU misses, but that's a miss on | ||
| 64 | * the chip's internal level-one TLB which is probably not | ||
| 65 | * what the user wants. Instead, unified level-two TLB misses | ||
| 66 | * are reported here. | ||
| 67 | */ | ||
| 68 | [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ | ||
| 69 | [C(OP_READ)] = { 26, 66 }, | ||
| 70 | [C(OP_WRITE)] = { -1, -1 }, | ||
| 71 | [C(OP_PREFETCH)] = { -1, -1 }, | ||
| 72 | }, | ||
| 73 | [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ | ||
| 74 | [C(OP_READ)] = { 12, 15 }, | ||
| 75 | [C(OP_WRITE)] = { -1, -1 }, | ||
| 76 | [C(OP_PREFETCH)] = { -1, -1 }, | ||
| 77 | }, | ||
| 78 | }; | ||
| 79 | |||
| 80 | static int num_events = 128; | ||
| 81 | |||
| 82 | /* Upper half of event id is PMLCb, for threshold events */ | ||
| 83 | static u64 e500_xlate_event(u64 event_id) | ||
| 84 | { | ||
| 85 | u32 event_low = (u32)event_id; | ||
| 86 | u64 ret; | ||
| 87 | |||
| 88 | if (event_low >= num_events) | ||
| 89 | return 0; | ||
| 90 | |||
| 91 | ret = FSL_EMB_EVENT_VALID; | ||
| 92 | |||
| 93 | if (event_low >= 76 && event_low <= 81) { | ||
| 94 | ret |= FSL_EMB_EVENT_RESTRICTED; | ||
| 95 | ret |= event_id & | ||
| 96 | (FSL_EMB_EVENT_THRESHMUL | FSL_EMB_EVENT_THRESH); | ||
| 97 | } else if (event_id & | ||
| 98 | (FSL_EMB_EVENT_THRESHMUL | FSL_EMB_EVENT_THRESH)) { | ||
| 99 | /* Threshold requested on non-threshold event */ | ||
| 100 | return 0; | ||
| 101 | } | ||
| 102 | |||
| 103 | return ret; | ||
| 104 | } | ||
| 105 | |||
| 106 | static struct fsl_emb_pmu e500_pmu = { | ||
| 107 | .name = "e500 family", | ||
| 108 | .n_counter = 4, | ||
| 109 | .n_restricted = 2, | ||
| 110 | .xlate_event = e500_xlate_event, | ||
| 111 | .n_generic = ARRAY_SIZE(e500_generic_events), | ||
| 112 | .generic_events = e500_generic_events, | ||
| 113 | .cache_events = &e500_cache_events, | ||
| 114 | }; | ||
| 115 | |||
| 116 | static int init_e500_pmu(void) | ||
| 117 | { | ||
| 118 | if (!cur_cpu_spec->oprofile_cpu_type) | ||
| 119 | return -ENODEV; | ||
| 120 | |||
| 121 | if (!strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/e500mc")) | ||
| 122 | num_events = 256; | ||
| 123 | else if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/e500")) | ||
| 124 | return -ENODEV; | ||
| 125 | |||
| 126 | return register_fsl_emb_pmu(&e500_pmu); | ||
| 127 | } | ||
| 128 | |||
| 129 | arch_initcall(init_e500_pmu); | ||
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 925807488022..bed9a29ee383 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
| @@ -219,7 +219,8 @@ generic_secondary_common_init: | |||
| 219 | * physical cpu id in r24, we need to search the pacas to find | 219 | * physical cpu id in r24, we need to search the pacas to find |
| 220 | * which logical id maps to our physical one. | 220 | * which logical id maps to our physical one. |
| 221 | */ | 221 | */ |
| 222 | LOAD_REG_ADDR(r13, paca) /* Get base vaddr of paca array */ | 222 | LOAD_REG_ADDR(r13, paca) /* Load paca pointer */ |
| 223 | ld r13,0(r13) /* Get base vaddr of paca array */ | ||
| 223 | li r5,0 /* logical cpu id */ | 224 | li r5,0 /* logical cpu id */ |
| 224 | 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ | 225 | 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ |
| 225 | cmpw r6,r24 /* Compare to our id */ | 226 | cmpw r6,r24 /* Compare to our id */ |
| @@ -536,7 +537,8 @@ _GLOBAL(pmac_secondary_start) | |||
| 536 | mtmsrd r3 /* RI on */ | 537 | mtmsrd r3 /* RI on */ |
| 537 | 538 | ||
| 538 | /* Set up a paca value for this processor. */ | 539 | /* Set up a paca value for this processor. */ |
| 539 | LOAD_REG_ADDR(r4,paca) /* Get base vaddr of paca array */ | 540 | LOAD_REG_ADDR(r4,paca) /* Load paca pointer */ |
| 541 | ld r4,0(r4) /* Get base vaddr of paca array */ | ||
| 540 | mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ | 542 | mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ |
| 541 | add r13,r13,r4 /* for this processor. */ | 543 | add r13,r13,r4 /* for this processor. */ |
| 542 | mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/ | 544 | mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/ |
| @@ -615,6 +617,17 @@ _GLOBAL(start_secondary_prolog) | |||
| 615 | std r3,0(r1) /* Zero the stack frame pointer */ | 617 | std r3,0(r1) /* Zero the stack frame pointer */ |
| 616 | bl .start_secondary | 618 | bl .start_secondary |
| 617 | b . | 619 | b . |
| 620 | /* | ||
| 621 | * Reset stack pointer and call start_secondary | ||
| 622 | * to continue with online operation when woken up | ||
| 623 | * from cede in cpu offline. | ||
| 624 | */ | ||
| 625 | _GLOBAL(start_secondary_resume) | ||
| 626 | ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */ | ||
| 627 | li r3,0 | ||
| 628 | std r3,0(r1) /* Zero the stack frame pointer */ | ||
| 629 | bl .start_secondary | ||
| 630 | b . | ||
| 618 | #endif | 631 | #endif |
| 619 | 632 | ||
| 620 | /* | 633 | /* |
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index 9ddfaef1a184..035ada5443ee 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c | |||
| @@ -469,7 +469,7 @@ static int __init serial_dev_init(void) | |||
| 469 | return -ENODEV; | 469 | return -ENODEV; |
| 470 | 470 | ||
| 471 | /* | 471 | /* |
| 472 | * Before we register the platfrom serial devices, we need | 472 | * Before we register the platform serial devices, we need |
| 473 | * to fixup their interrupts and their IO ports. | 473 | * to fixup their interrupts and their IO ports. |
| 474 | */ | 474 | */ |
| 475 | DBG("Fixing serial ports interrupts and IO ports ...\n"); | 475 | DBG("Fixing serial ports interrupts and IO ports ...\n"); |
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index d16b1ea55d44..0c40c6f476fe 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c | |||
| @@ -9,11 +9,15 @@ | |||
| 9 | 9 | ||
| 10 | #include <linux/threads.h> | 10 | #include <linux/threads.h> |
| 11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
| 12 | #include <linux/lmb.h> | ||
| 12 | 13 | ||
| 14 | #include <asm/firmware.h> | ||
| 13 | #include <asm/lppaca.h> | 15 | #include <asm/lppaca.h> |
| 14 | #include <asm/paca.h> | 16 | #include <asm/paca.h> |
| 15 | #include <asm/sections.h> | 17 | #include <asm/sections.h> |
| 16 | #include <asm/pgtable.h> | 18 | #include <asm/pgtable.h> |
| 19 | #include <asm/iseries/lpar_map.h> | ||
| 20 | #include <asm/iseries/hv_types.h> | ||
| 17 | 21 | ||
| 18 | /* This symbol is provided by the linker - let it fill in the paca | 22 | /* This symbol is provided by the linker - let it fill in the paca |
| 19 | * field correctly */ | 23 | * field correctly */ |
| @@ -70,37 +74,82 @@ struct slb_shadow slb_shadow[] __cacheline_aligned = { | |||
| 70 | * processors. The processor VPD array needs one entry per physical | 74 | * processors. The processor VPD array needs one entry per physical |
| 71 | * processor (not thread). | 75 | * processor (not thread). |
| 72 | */ | 76 | */ |
| 73 | struct paca_struct paca[NR_CPUS]; | 77 | struct paca_struct *paca; |
| 74 | EXPORT_SYMBOL(paca); | 78 | EXPORT_SYMBOL(paca); |
| 75 | 79 | ||
| 76 | void __init initialise_pacas(void) | 80 | struct paca_struct boot_paca; |
| 77 | { | ||
| 78 | int cpu; | ||
| 79 | 81 | ||
| 80 | /* The TOC register (GPR2) points 32kB into the TOC, so that 64kB | 82 | void __init initialise_paca(struct paca_struct *new_paca, int cpu) |
| 81 | * of the TOC can be addressed using a single machine instruction. | 83 | { |
| 82 | */ | 84 | /* The TOC register (GPR2) points 32kB into the TOC, so that 64kB |
| 85 | * of the TOC can be addressed using a single machine instruction. | ||
| 86 | */ | ||
| 83 | unsigned long kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL; | 87 | unsigned long kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL; |
| 84 | 88 | ||
| 85 | /* Can't use for_each_*_cpu, as they aren't functional yet */ | ||
| 86 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
| 87 | struct paca_struct *new_paca = &paca[cpu]; | ||
| 88 | |||
| 89 | #ifdef CONFIG_PPC_BOOK3S | 89 | #ifdef CONFIG_PPC_BOOK3S |
| 90 | new_paca->lppaca_ptr = &lppaca[cpu]; | 90 | new_paca->lppaca_ptr = &lppaca[cpu]; |
| 91 | #else | 91 | #else |
| 92 | new_paca->kernel_pgd = swapper_pg_dir; | 92 | new_paca->kernel_pgd = swapper_pg_dir; |
| 93 | #endif | 93 | #endif |
| 94 | new_paca->lock_token = 0x8000; | 94 | new_paca->lock_token = 0x8000; |
| 95 | new_paca->paca_index = cpu; | 95 | new_paca->paca_index = cpu; |
| 96 | new_paca->kernel_toc = kernel_toc; | 96 | new_paca->kernel_toc = kernel_toc; |
| 97 | new_paca->kernelbase = (unsigned long) _stext; | 97 | new_paca->kernelbase = (unsigned long) _stext; |
| 98 | new_paca->kernel_msr = MSR_KERNEL; | 98 | new_paca->kernel_msr = MSR_KERNEL; |
| 99 | new_paca->hw_cpu_id = 0xffff; | 99 | new_paca->hw_cpu_id = 0xffff; |
| 100 | new_paca->__current = &init_task; | 100 | new_paca->__current = &init_task; |
| 101 | #ifdef CONFIG_PPC_STD_MMU_64 | 101 | #ifdef CONFIG_PPC_STD_MMU_64 |
| 102 | new_paca->slb_shadow_ptr = &slb_shadow[cpu]; | 102 | new_paca->slb_shadow_ptr = &slb_shadow[cpu]; |
| 103 | #endif /* CONFIG_PPC_STD_MMU_64 */ | 103 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
| 104 | } | ||
| 105 | |||
| 106 | static int __initdata paca_size; | ||
| 107 | |||
| 108 | void __init allocate_pacas(void) | ||
| 109 | { | ||
| 110 | int nr_cpus, cpu, limit; | ||
| 111 | |||
| 112 | /* | ||
| 113 | * We can't take SLB misses on the paca, and we want to access them | ||
| 114 | * in real mode, so allocate them within the RMA and also within | ||
| 115 | * the first segment. On iSeries they must be within the area mapped | ||
| 116 | * by the HV, which is HvPagesToMap * HVPAGESIZE bytes. | ||
| 117 | */ | ||
| 118 | limit = min(0x10000000ULL, lmb.rmo_size); | ||
| 119 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | ||
| 120 | limit = min(limit, HvPagesToMap * HVPAGESIZE); | ||
| 121 | |||
| 122 | nr_cpus = NR_CPUS; | ||
| 123 | /* On iSeries we know we can never have more than 64 cpus */ | ||
| 124 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | ||
| 125 | nr_cpus = min(64, nr_cpus); | ||
| 126 | |||
| 127 | paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpus); | ||
| 128 | |||
| 129 | paca = __va(lmb_alloc_base(paca_size, PAGE_SIZE, limit)); | ||
| 130 | memset(paca, 0, paca_size); | ||
| 131 | |||
| 132 | printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n", | ||
| 133 | paca_size, nr_cpus, paca); | ||
| 134 | |||
| 135 | /* Can't use for_each_*_cpu, as they aren't functional yet */ | ||
| 136 | for (cpu = 0; cpu < nr_cpus; cpu++) | ||
| 137 | initialise_paca(&paca[cpu], cpu); | ||
| 138 | } | ||
| 139 | |||
| 140 | void __init free_unused_pacas(void) | ||
| 141 | { | ||
| 142 | int new_size; | ||
| 143 | |||
| 144 | new_size = PAGE_ALIGN(sizeof(struct paca_struct) * num_possible_cpus()); | ||
| 145 | |||
| 146 | if (new_size >= paca_size) | ||
| 147 | return; | ||
| 148 | |||
| 149 | lmb_free(__pa(paca) + new_size, paca_size - new_size); | ||
| 150 | |||
| 151 | printk(KERN_DEBUG "Freed %u bytes for unused pacas\n", | ||
| 152 | paca_size - new_size); | ||
| 104 | 153 | ||
| 105 | } | 154 | paca_size = new_size; |
| 106 | } | 155 | } |
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 2597f9545d8a..f3c42ce516e7 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
| @@ -63,21 +63,6 @@ struct dma_map_ops *get_pci_dma_ops(void) | |||
| 63 | } | 63 | } |
| 64 | EXPORT_SYMBOL(get_pci_dma_ops); | 64 | EXPORT_SYMBOL(get_pci_dma_ops); |
| 65 | 65 | ||
| 66 | int pci_set_dma_mask(struct pci_dev *dev, u64 mask) | ||
| 67 | { | ||
| 68 | return dma_set_mask(&dev->dev, mask); | ||
| 69 | } | ||
| 70 | |||
| 71 | int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) | ||
| 72 | { | ||
| 73 | int rc; | ||
| 74 | |||
| 75 | rc = dma_set_mask(&dev->dev, mask); | ||
| 76 | dev->dev.coherent_dma_mask = dev->dma_mask; | ||
| 77 | |||
| 78 | return rc; | ||
| 79 | } | ||
| 80 | |||
| 81 | struct pci_controller *pcibios_alloc_controller(struct device_node *dev) | 66 | struct pci_controller *pcibios_alloc_controller(struct device_node *dev) |
| 82 | { | 67 | { |
| 83 | struct pci_controller *phb; | 68 | struct pci_controller *phb; |
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index b6cf8f1f4d35..5120bd44f69a 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
| @@ -1164,10 +1164,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
| 1164 | * Finally record data if requested. | 1164 | * Finally record data if requested. |
| 1165 | */ | 1165 | */ |
| 1166 | if (record) { | 1166 | if (record) { |
| 1167 | struct perf_sample_data data = { | 1167 | struct perf_sample_data data; |
| 1168 | .addr = ~0ULL, | 1168 | |
| 1169 | .period = event->hw.last_period, | 1169 | perf_sample_data_init(&data, ~0ULL); |
| 1170 | }; | 1170 | data.period = event->hw.last_period; |
| 1171 | 1171 | ||
| 1172 | if (event->attr.sample_type & PERF_SAMPLE_ADDR) | 1172 | if (event->attr.sample_type & PERF_SAMPLE_ADDR) |
| 1173 | perf_get_data_addr(regs, &data.addr); | 1173 | perf_get_data_addr(regs, &data.addr); |
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c new file mode 100644 index 000000000000..369872f6cf78 --- /dev/null +++ b/arch/powerpc/kernel/perf_event_fsl_emb.c | |||
| @@ -0,0 +1,654 @@ | |||
| 1 | /* | ||
| 2 | * Performance event support - Freescale Embedded Performance Monitor | ||
| 3 | * | ||
| 4 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. | ||
| 5 | * Copyright 2010 Freescale Semiconductor, Inc. | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or | ||
| 8 | * modify it under the terms of the GNU General Public License | ||
| 9 | * as published by the Free Software Foundation; either version | ||
| 10 | * 2 of the License, or (at your option) any later version. | ||
| 11 | */ | ||
| 12 | #include <linux/kernel.h> | ||
| 13 | #include <linux/sched.h> | ||
| 14 | #include <linux/perf_event.h> | ||
| 15 | #include <linux/percpu.h> | ||
| 16 | #include <linux/hardirq.h> | ||
| 17 | #include <asm/reg_fsl_emb.h> | ||
| 18 | #include <asm/pmc.h> | ||
| 19 | #include <asm/machdep.h> | ||
| 20 | #include <asm/firmware.h> | ||
| 21 | #include <asm/ptrace.h> | ||
| 22 | |||
| 23 | struct cpu_hw_events { | ||
| 24 | int n_events; | ||
| 25 | int disabled; | ||
| 26 | u8 pmcs_enabled; | ||
| 27 | struct perf_event *event[MAX_HWEVENTS]; | ||
| 28 | }; | ||
| 29 | static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); | ||
| 30 | |||
| 31 | static struct fsl_emb_pmu *ppmu; | ||
| 32 | |||
| 33 | /* Number of perf_events counting hardware events */ | ||
| 34 | static atomic_t num_events; | ||
| 35 | /* Used to avoid races in calling reserve/release_pmc_hardware */ | ||
| 36 | static DEFINE_MUTEX(pmc_reserve_mutex); | ||
| 37 | |||
| 38 | /* | ||
| 39 | * If interrupts were soft-disabled when a PMU interrupt occurs, treat | ||
| 40 | * it as an NMI. | ||
| 41 | */ | ||
| 42 | static inline int perf_intr_is_nmi(struct pt_regs *regs) | ||
| 43 | { | ||
| 44 | #ifdef __powerpc64__ | ||
| 45 | return !regs->softe; | ||
| 46 | #else | ||
| 47 | return 0; | ||
| 48 | #endif | ||
| 49 | } | ||
| 50 | |||
| 51 | static void perf_event_interrupt(struct pt_regs *regs); | ||
| 52 | |||
| 53 | /* | ||
| 54 | * Read one performance monitor counter (PMC). | ||
| 55 | */ | ||
| 56 | static unsigned long read_pmc(int idx) | ||
| 57 | { | ||
| 58 | unsigned long val; | ||
| 59 | |||
| 60 | switch (idx) { | ||
| 61 | case 0: | ||
| 62 | val = mfpmr(PMRN_PMC0); | ||
| 63 | break; | ||
| 64 | case 1: | ||
| 65 | val = mfpmr(PMRN_PMC1); | ||
| 66 | break; | ||
| 67 | case 2: | ||
| 68 | val = mfpmr(PMRN_PMC2); | ||
| 69 | break; | ||
| 70 | case 3: | ||
| 71 | val = mfpmr(PMRN_PMC3); | ||
| 72 | break; | ||
| 73 | default: | ||
| 74 | printk(KERN_ERR "oops trying to read PMC%d\n", idx); | ||
| 75 | val = 0; | ||
| 76 | } | ||
| 77 | return val; | ||
| 78 | } | ||
| 79 | |||
| 80 | /* | ||
| 81 | * Write one PMC. | ||
| 82 | */ | ||
| 83 | static void write_pmc(int idx, unsigned long val) | ||
| 84 | { | ||
| 85 | switch (idx) { | ||
| 86 | case 0: | ||
| 87 | mtpmr(PMRN_PMC0, val); | ||
| 88 | break; | ||
| 89 | case 1: | ||
| 90 | mtpmr(PMRN_PMC1, val); | ||
| 91 | break; | ||
| 92 | case 2: | ||
| 93 | mtpmr(PMRN_PMC2, val); | ||
| 94 | break; | ||
| 95 | case 3: | ||
| 96 | mtpmr(PMRN_PMC3, val); | ||
| 97 | break; | ||
| 98 | default: | ||
| 99 | printk(KERN_ERR "oops trying to write PMC%d\n", idx); | ||
| 100 | } | ||
| 101 | |||
| 102 | isync(); | ||
| 103 | } | ||
| 104 | |||
| 105 | /* | ||
| 106 | * Write one local control A register | ||
| 107 | */ | ||
| 108 | static void write_pmlca(int idx, unsigned long val) | ||
| 109 | { | ||
| 110 | switch (idx) { | ||
| 111 | case 0: | ||
| 112 | mtpmr(PMRN_PMLCA0, val); | ||
| 113 | break; | ||
| 114 | case 1: | ||
| 115 | mtpmr(PMRN_PMLCA1, val); | ||
| 116 | break; | ||
| 117 | case 2: | ||
| 118 | mtpmr(PMRN_PMLCA2, val); | ||
| 119 | break; | ||
| 120 | case 3: | ||
| 121 | mtpmr(PMRN_PMLCA3, val); | ||
| 122 | break; | ||
| 123 | default: | ||
| 124 | printk(KERN_ERR "oops trying to write PMLCA%d\n", idx); | ||
| 125 | } | ||
| 126 | |||
| 127 | isync(); | ||
| 128 | } | ||
| 129 | |||
| 130 | /* | ||
| 131 | * Write one local control B register | ||
| 132 | */ | ||
| 133 | static void write_pmlcb(int idx, unsigned long val) | ||
| 134 | { | ||
| 135 | switch (idx) { | ||
| 136 | case 0: | ||
| 137 | mtpmr(PMRN_PMLCB0, val); | ||
| 138 | break; | ||
| 139 | case 1: | ||
| 140 | mtpmr(PMRN_PMLCB1, val); | ||
| 141 | break; | ||
| 142 | case 2: | ||
| 143 | mtpmr(PMRN_PMLCB2, val); | ||
| 144 | break; | ||
| 145 | case 3: | ||
| 146 | mtpmr(PMRN_PMLCB3, val); | ||
| 147 | break; | ||
| 148 | default: | ||
| 149 | printk(KERN_ERR "oops trying to write PMLCB%d\n", idx); | ||
| 150 | } | ||
| 151 | |||
| 152 | isync(); | ||
| 153 | } | ||
| 154 | |||
| 155 | static void fsl_emb_pmu_read(struct perf_event *event) | ||
| 156 | { | ||
| 157 | s64 val, delta, prev; | ||
| 158 | |||
| 159 | /* | ||
| 160 | * Performance monitor interrupts come even when interrupts | ||
| 161 | * are soft-disabled, as long as interrupts are hard-enabled. | ||
| 162 | * Therefore we treat them like NMIs. | ||
| 163 | */ | ||
| 164 | do { | ||
| 165 | prev = atomic64_read(&event->hw.prev_count); | ||
| 166 | barrier(); | ||
| 167 | val = read_pmc(event->hw.idx); | ||
| 168 | } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev); | ||
| 169 | |||
| 170 | /* The counters are only 32 bits wide */ | ||
| 171 | delta = (val - prev) & 0xfffffffful; | ||
| 172 | atomic64_add(delta, &event->count); | ||
| 173 | atomic64_sub(delta, &event->hw.period_left); | ||
| 174 | } | ||
| 175 | |||
| 176 | /* | ||
| 177 | * Disable all events to prevent PMU interrupts and to allow | ||
| 178 | * events to be added or removed. | ||
| 179 | */ | ||
| 180 | void hw_perf_disable(void) | ||
| 181 | { | ||
| 182 | struct cpu_hw_events *cpuhw; | ||
| 183 | unsigned long flags; | ||
| 184 | |||
| 185 | local_irq_save(flags); | ||
| 186 | cpuhw = &__get_cpu_var(cpu_hw_events); | ||
| 187 | |||
| 188 | if (!cpuhw->disabled) { | ||
| 189 | cpuhw->disabled = 1; | ||
| 190 | |||
| 191 | /* | ||
| 192 | * Check if we ever enabled the PMU on this cpu. | ||
| 193 | */ | ||
| 194 | if (!cpuhw->pmcs_enabled) { | ||
| 195 | ppc_enable_pmcs(); | ||
| 196 | cpuhw->pmcs_enabled = 1; | ||
| 197 | } | ||
| 198 | |||
| 199 | if (atomic_read(&num_events)) { | ||
| 200 | /* | ||
| 201 | * Set the 'freeze all counters' bit, and disable | ||
| 202 | * interrupts. The barrier is to make sure the | ||
| 203 | * mtpmr has been executed and the PMU has frozen | ||
| 204 | * the events before we return. | ||
| 205 | */ | ||
| 206 | |||
| 207 | mtpmr(PMRN_PMGC0, PMGC0_FAC); | ||
| 208 | isync(); | ||
| 209 | } | ||
| 210 | } | ||
| 211 | local_irq_restore(flags); | ||
| 212 | } | ||
| 213 | |||
| 214 | /* | ||
| 215 | * Re-enable all events if disable == 0. | ||
| 216 | * If we were previously disabled and events were added, then | ||
| 217 | * put the new config on the PMU. | ||
| 218 | */ | ||
| 219 | void hw_perf_enable(void) | ||
| 220 | { | ||
| 221 | struct cpu_hw_events *cpuhw; | ||
| 222 | unsigned long flags; | ||
| 223 | |||
| 224 | local_irq_save(flags); | ||
| 225 | cpuhw = &__get_cpu_var(cpu_hw_events); | ||
| 226 | if (!cpuhw->disabled) | ||
| 227 | goto out; | ||
| 228 | |||
| 229 | cpuhw->disabled = 0; | ||
| 230 | ppc_set_pmu_inuse(cpuhw->n_events != 0); | ||
| 231 | |||
| 232 | if (cpuhw->n_events > 0) { | ||
| 233 | mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE); | ||
| 234 | isync(); | ||
| 235 | } | ||
| 236 | |||
| 237 | out: | ||
| 238 | local_irq_restore(flags); | ||
| 239 | } | ||
| 240 | |||
| 241 | static int collect_events(struct perf_event *group, int max_count, | ||
| 242 | struct perf_event *ctrs[]) | ||
| 243 | { | ||
| 244 | int n = 0; | ||
| 245 | struct perf_event *event; | ||
| 246 | |||
| 247 | if (!is_software_event(group)) { | ||
| 248 | if (n >= max_count) | ||
| 249 | return -1; | ||
| 250 | ctrs[n] = group; | ||
| 251 | n++; | ||
| 252 | } | ||
| 253 | list_for_each_entry(event, &group->sibling_list, group_entry) { | ||
| 254 | if (!is_software_event(event) && | ||
| 255 | event->state != PERF_EVENT_STATE_OFF) { | ||
| 256 | if (n >= max_count) | ||
| 257 | return -1; | ||
| 258 | ctrs[n] = event; | ||
| 259 | n++; | ||
| 260 | } | ||
| 261 | } | ||
| 262 | return n; | ||
| 263 | } | ||
| 264 | |||
| 265 | /* perf must be disabled, context locked on entry */ | ||
| 266 | static int fsl_emb_pmu_enable(struct perf_event *event) | ||
| 267 | { | ||
| 268 | struct cpu_hw_events *cpuhw; | ||
| 269 | int ret = -EAGAIN; | ||
| 270 | int num_counters = ppmu->n_counter; | ||
| 271 | u64 val; | ||
| 272 | int i; | ||
| 273 | |||
| 274 | cpuhw = &get_cpu_var(cpu_hw_events); | ||
| 275 | |||
| 276 | if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) | ||
| 277 | num_counters = ppmu->n_restricted; | ||
| 278 | |||
| 279 | /* | ||
| 280 | * Allocate counters from top-down, so that restricted-capable | ||
| 281 | * counters are kept free as long as possible. | ||
| 282 | */ | ||
| 283 | for (i = num_counters - 1; i >= 0; i--) { | ||
| 284 | if (cpuhw->event[i]) | ||
| 285 | continue; | ||
| 286 | |||
| 287 | break; | ||
| 288 | } | ||
| 289 | |||
| 290 | if (i < 0) | ||
| 291 | goto out; | ||
| 292 | |||
| 293 | event->hw.idx = i; | ||
| 294 | cpuhw->event[i] = event; | ||
| 295 | ++cpuhw->n_events; | ||
| 296 | |||
| 297 | val = 0; | ||
| 298 | if (event->hw.sample_period) { | ||
| 299 | s64 left = atomic64_read(&event->hw.period_left); | ||
| 300 | if (left < 0x80000000L) | ||
| 301 | val = 0x80000000L - left; | ||
| 302 | } | ||
| 303 | atomic64_set(&event->hw.prev_count, val); | ||
| 304 | write_pmc(i, val); | ||
| 305 | perf_event_update_userpage(event); | ||
| 306 | |||
| 307 | write_pmlcb(i, event->hw.config >> 32); | ||
| 308 | write_pmlca(i, event->hw.config_base); | ||
| 309 | |||
| 310 | ret = 0; | ||
| 311 | out: | ||
| 312 | put_cpu_var(cpu_hw_events); | ||
| 313 | return ret; | ||
| 314 | } | ||
| 315 | |||
| 316 | /* perf must be disabled, context locked on entry */ | ||
| 317 | static void fsl_emb_pmu_disable(struct perf_event *event) | ||
| 318 | { | ||
| 319 | struct cpu_hw_events *cpuhw; | ||
| 320 | int i = event->hw.idx; | ||
| 321 | |||
| 322 | if (i < 0) | ||
| 323 | goto out; | ||
| 324 | |||
| 325 | fsl_emb_pmu_read(event); | ||
| 326 | |||
| 327 | cpuhw = &get_cpu_var(cpu_hw_events); | ||
| 328 | |||
| 329 | WARN_ON(event != cpuhw->event[event->hw.idx]); | ||
| 330 | |||
| 331 | write_pmlca(i, 0); | ||
| 332 | write_pmlcb(i, 0); | ||
| 333 | write_pmc(i, 0); | ||
| 334 | |||
| 335 | cpuhw->event[i] = NULL; | ||
| 336 | event->hw.idx = -1; | ||
| 337 | |||
| 338 | /* | ||
| 339 | * TODO: if at least one restricted event exists, and we | ||
| 340 | * just freed up a non-restricted-capable counter, and | ||
| 341 | * there is a restricted-capable counter occupied by | ||
| 342 | * a non-restricted event, migrate that event to the | ||
| 343 | * vacated counter. | ||
| 344 | */ | ||
| 345 | |||
| 346 | cpuhw->n_events--; | ||
| 347 | |||
| 348 | out: | ||
| 349 | put_cpu_var(cpu_hw_events); | ||
| 350 | } | ||
| 351 | |||
| 352 | /* | ||
| 353 | * Re-enable interrupts on a event after they were throttled | ||
| 354 | * because they were coming too fast. | ||
| 355 | * | ||
| 356 | * Context is locked on entry, but perf is not disabled. | ||
| 357 | */ | ||
| 358 | static void fsl_emb_pmu_unthrottle(struct perf_event *event) | ||
| 359 | { | ||
| 360 | s64 val, left; | ||
| 361 | unsigned long flags; | ||
| 362 | |||
| 363 | if (event->hw.idx < 0 || !event->hw.sample_period) | ||
| 364 | return; | ||
| 365 | local_irq_save(flags); | ||
| 366 | perf_disable(); | ||
| 367 | fsl_emb_pmu_read(event); | ||
| 368 | left = event->hw.sample_period; | ||
| 369 | event->hw.last_period = left; | ||
| 370 | val = 0; | ||
| 371 | if (left < 0x80000000L) | ||
| 372 | val = 0x80000000L - left; | ||
| 373 | write_pmc(event->hw.idx, val); | ||
| 374 | atomic64_set(&event->hw.prev_count, val); | ||
| 375 | atomic64_set(&event->hw.period_left, left); | ||
| 376 | perf_event_update_userpage(event); | ||
| 377 | perf_enable(); | ||
| 378 | local_irq_restore(flags); | ||
| 379 | } | ||
| 380 | |||
| 381 | static struct pmu fsl_emb_pmu = { | ||
| 382 | .enable = fsl_emb_pmu_enable, | ||
| 383 | .disable = fsl_emb_pmu_disable, | ||
| 384 | .read = fsl_emb_pmu_read, | ||
| 385 | .unthrottle = fsl_emb_pmu_unthrottle, | ||
| 386 | }; | ||
| 387 | |||
| 388 | /* | ||
| 389 | * Release the PMU if this is the last perf_event. | ||
| 390 | */ | ||
| 391 | static void hw_perf_event_destroy(struct perf_event *event) | ||
| 392 | { | ||
| 393 | if (!atomic_add_unless(&num_events, -1, 1)) { | ||
| 394 | mutex_lock(&pmc_reserve_mutex); | ||
| 395 | if (atomic_dec_return(&num_events) == 0) | ||
| 396 | release_pmc_hardware(); | ||
| 397 | mutex_unlock(&pmc_reserve_mutex); | ||
| 398 | } | ||
| 399 | } | ||
| 400 | |||
| 401 | /* | ||
| 402 | * Translate a generic cache event_id config to a raw event_id code. | ||
| 403 | */ | ||
| 404 | static int hw_perf_cache_event(u64 config, u64 *eventp) | ||
| 405 | { | ||
| 406 | unsigned long type, op, result; | ||
| 407 | int ev; | ||
| 408 | |||
| 409 | if (!ppmu->cache_events) | ||
| 410 | return -EINVAL; | ||
| 411 | |||
| 412 | /* unpack config */ | ||
| 413 | type = config & 0xff; | ||
| 414 | op = (config >> 8) & 0xff; | ||
| 415 | result = (config >> 16) & 0xff; | ||
| 416 | |||
| 417 | if (type >= PERF_COUNT_HW_CACHE_MAX || | ||
| 418 | op >= PERF_COUNT_HW_CACHE_OP_MAX || | ||
| 419 | result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | ||
| 420 | return -EINVAL; | ||
| 421 | |||
| 422 | ev = (*ppmu->cache_events)[type][op][result]; | ||
| 423 | if (ev == 0) | ||
| 424 | return -EOPNOTSUPP; | ||
| 425 | if (ev == -1) | ||
| 426 | return -EINVAL; | ||
| 427 | *eventp = ev; | ||
| 428 | return 0; | ||
| 429 | } | ||
| 430 | |||
| 431 | const struct pmu *hw_perf_event_init(struct perf_event *event) | ||
| 432 | { | ||
| 433 | u64 ev; | ||
| 434 | struct perf_event *events[MAX_HWEVENTS]; | ||
| 435 | int n; | ||
| 436 | int err; | ||
| 437 | int num_restricted; | ||
| 438 | int i; | ||
| 439 | |||
| 440 | switch (event->attr.type) { | ||
| 441 | case PERF_TYPE_HARDWARE: | ||
| 442 | ev = event->attr.config; | ||
| 443 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) | ||
| 444 | return ERR_PTR(-EOPNOTSUPP); | ||
| 445 | ev = ppmu->generic_events[ev]; | ||
| 446 | break; | ||
| 447 | |||
| 448 | case PERF_TYPE_HW_CACHE: | ||
| 449 | err = hw_perf_cache_event(event->attr.config, &ev); | ||
| 450 | if (err) | ||
| 451 | return ERR_PTR(err); | ||
| 452 | break; | ||
| 453 | |||
| 454 | case PERF_TYPE_RAW: | ||
| 455 | ev = event->attr.config; | ||
| 456 | break; | ||
| 457 | |||
| 458 | default: | ||
| 459 | return ERR_PTR(-EINVAL); | ||
| 460 | } | ||
| 461 | |||
| 462 | event->hw.config = ppmu->xlate_event(ev); | ||
| 463 | if (!(event->hw.config & FSL_EMB_EVENT_VALID)) | ||
| 464 | return ERR_PTR(-EINVAL); | ||
| 465 | |||
| 466 | /* | ||
| 467 | * If this is in a group, check if it can go on with all the | ||
| 468 | * other hardware events in the group. We assume the event | ||
| 469 | * hasn't been linked into its leader's sibling list at this point. | ||
| 470 | */ | ||
| 471 | n = 0; | ||
| 472 | if (event->group_leader != event) { | ||
| 473 | n = collect_events(event->group_leader, | ||
| 474 | ppmu->n_counter - 1, events); | ||
| 475 | if (n < 0) | ||
| 476 | return ERR_PTR(-EINVAL); | ||
| 477 | } | ||
| 478 | |||
| 479 | if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) { | ||
| 480 | num_restricted = 0; | ||
| 481 | for (i = 0; i < n; i++) { | ||
| 482 | if (events[i]->hw.config & FSL_EMB_EVENT_RESTRICTED) | ||
| 483 | num_restricted++; | ||
| 484 | } | ||
| 485 | |||
| 486 | if (num_restricted >= ppmu->n_restricted) | ||
| 487 | return ERR_PTR(-EINVAL); | ||
| 488 | } | ||
| 489 | |||
| 490 | event->hw.idx = -1; | ||
| 491 | |||
| 492 | event->hw.config_base = PMLCA_CE | PMLCA_FCM1 | | ||
| 493 | (u32)((ev << 16) & PMLCA_EVENT_MASK); | ||
| 494 | |||
| 495 | if (event->attr.exclude_user) | ||
| 496 | event->hw.config_base |= PMLCA_FCU; | ||
| 497 | if (event->attr.exclude_kernel) | ||
| 498 | event->hw.config_base |= PMLCA_FCS; | ||
| 499 | if (event->attr.exclude_idle) | ||
| 500 | return ERR_PTR(-ENOTSUPP); | ||
| 501 | |||
| 502 | event->hw.last_period = event->hw.sample_period; | ||
| 503 | atomic64_set(&event->hw.period_left, event->hw.last_period); | ||
| 504 | |||
| 505 | /* | ||
| 506 | * See if we need to reserve the PMU. | ||
| 507 | * If no events are currently in use, then we have to take a | ||
| 508 | * mutex to ensure that we don't race with another task doing | ||
| 509 | * reserve_pmc_hardware or release_pmc_hardware. | ||
| 510 | */ | ||
| 511 | err = 0; | ||
| 512 | if (!atomic_inc_not_zero(&num_events)) { | ||
| 513 | mutex_lock(&pmc_reserve_mutex); | ||
| 514 | if (atomic_read(&num_events) == 0 && | ||
| 515 | reserve_pmc_hardware(perf_event_interrupt)) | ||
| 516 | err = -EBUSY; | ||
| 517 | else | ||
| 518 | atomic_inc(&num_events); | ||
| 519 | mutex_unlock(&pmc_reserve_mutex); | ||
| 520 | |||
| 521 | mtpmr(PMRN_PMGC0, PMGC0_FAC); | ||
| 522 | isync(); | ||
| 523 | } | ||
| 524 | event->destroy = hw_perf_event_destroy; | ||
| 525 | |||
| 526 | if (err) | ||
| 527 | return ERR_PTR(err); | ||
| 528 | return &fsl_emb_pmu; | ||
| 529 | } | ||
| 530 | |||
| 531 | /* | ||
| 532 | * A counter has overflowed; update its count and record | ||
| 533 | * things if requested. Note that interrupts are hard-disabled | ||
| 534 | * here so there is no possibility of being interrupted. | ||
| 535 | */ | ||
| 536 | static void record_and_restart(struct perf_event *event, unsigned long val, | ||
| 537 | struct pt_regs *regs, int nmi) | ||
| 538 | { | ||
| 539 | u64 period = event->hw.sample_period; | ||
| 540 | s64 prev, delta, left; | ||
| 541 | int record = 0; | ||
| 542 | |||
| 543 | /* we don't have to worry about interrupts here */ | ||
| 544 | prev = atomic64_read(&event->hw.prev_count); | ||
| 545 | delta = (val - prev) & 0xfffffffful; | ||
| 546 | atomic64_add(delta, &event->count); | ||
| 547 | |||
| 548 | /* | ||
| 549 | * See if the total period for this event has expired, | ||
| 550 | * and update for the next period. | ||
| 551 | */ | ||
| 552 | val = 0; | ||
| 553 | left = atomic64_read(&event->hw.period_left) - delta; | ||
| 554 | if (period) { | ||
| 555 | if (left <= 0) { | ||
| 556 | left += period; | ||
| 557 | if (left <= 0) | ||
| 558 | left = period; | ||
| 559 | record = 1; | ||
| 560 | } | ||
| 561 | if (left < 0x80000000LL) | ||
| 562 | val = 0x80000000LL - left; | ||
| 563 | } | ||
| 564 | |||
| 565 | /* | ||
| 566 | * Finally record data if requested. | ||
| 567 | */ | ||
| 568 | if (record) { | ||
| 569 | struct perf_sample_data data = { | ||
| 570 | .period = event->hw.last_period, | ||
| 571 | }; | ||
| 572 | |||
| 573 | if (perf_event_overflow(event, nmi, &data, regs)) { | ||
| 574 | /* | ||
| 575 | * Interrupts are coming too fast - throttle them | ||
| 576 | * by setting the event to 0, so it will be | ||
| 577 | * at least 2^30 cycles until the next interrupt | ||
| 578 | * (assuming each event counts at most 2 counts | ||
| 579 | * per cycle). | ||
| 580 | */ | ||
| 581 | val = 0; | ||
| 582 | left = ~0ULL >> 1; | ||
| 583 | } | ||
| 584 | } | ||
| 585 | |||
| 586 | write_pmc(event->hw.idx, val); | ||
| 587 | atomic64_set(&event->hw.prev_count, val); | ||
| 588 | atomic64_set(&event->hw.period_left, left); | ||
| 589 | perf_event_update_userpage(event); | ||
| 590 | } | ||
| 591 | |||
| 592 | static void perf_event_interrupt(struct pt_regs *regs) | ||
| 593 | { | ||
| 594 | int i; | ||
| 595 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | ||
| 596 | struct perf_event *event; | ||
| 597 | unsigned long val; | ||
| 598 | int found = 0; | ||
| 599 | int nmi; | ||
| 600 | |||
| 601 | nmi = perf_intr_is_nmi(regs); | ||
| 602 | if (nmi) | ||
| 603 | nmi_enter(); | ||
| 604 | else | ||
| 605 | irq_enter(); | ||
| 606 | |||
| 607 | for (i = 0; i < ppmu->n_counter; ++i) { | ||
| 608 | event = cpuhw->event[i]; | ||
| 609 | |||
| 610 | val = read_pmc(i); | ||
| 611 | if ((int)val < 0) { | ||
| 612 | if (event) { | ||
| 613 | /* event has overflowed */ | ||
| 614 | found = 1; | ||
| 615 | record_and_restart(event, val, regs, nmi); | ||
| 616 | } else { | ||
| 617 | /* | ||
| 618 | * Disabled counter is negative, | ||
| 619 | * reset it just in case. | ||
| 620 | */ | ||
| 621 | write_pmc(i, 0); | ||
| 622 | } | ||
| 623 | } | ||
| 624 | } | ||
| 625 | |||
| 626 | /* PMM will keep counters frozen until we return from the interrupt. */ | ||
| 627 | mtmsr(mfmsr() | MSR_PMM); | ||
| 628 | mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE); | ||
| 629 | isync(); | ||
| 630 | |||
| 631 | if (nmi) | ||
| 632 | nmi_exit(); | ||
| 633 | else | ||
| 634 | irq_exit(); | ||
| 635 | } | ||
| 636 | |||
| 637 | void hw_perf_event_setup(int cpu) | ||
| 638 | { | ||
| 639 | struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); | ||
| 640 | |||
| 641 | memset(cpuhw, 0, sizeof(*cpuhw)); | ||
| 642 | } | ||
| 643 | |||
| 644 | int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu) | ||
| 645 | { | ||
| 646 | if (ppmu) | ||
| 647 | return -EBUSY; /* something's already registered */ | ||
| 648 | |||
| 649 | ppmu = pmu; | ||
| 650 | pr_info("%s performance monitor hardware support registered\n", | ||
| 651 | pmu->name); | ||
| 652 | |||
| 653 | return 0; | ||
| 654 | } | ||
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 43238b2054b6..05131d634e73 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c | |||
| @@ -43,6 +43,7 @@ | |||
| 43 | #include <asm/smp.h> | 43 | #include <asm/smp.h> |
| 44 | #include <asm/system.h> | 44 | #include <asm/system.h> |
| 45 | #include <asm/mmu.h> | 45 | #include <asm/mmu.h> |
| 46 | #include <asm/paca.h> | ||
| 46 | #include <asm/pgtable.h> | 47 | #include <asm/pgtable.h> |
| 47 | #include <asm/pci.h> | 48 | #include <asm/pci.h> |
| 48 | #include <asm/iommu.h> | 49 | #include <asm/iommu.h> |
| @@ -721,6 +722,8 @@ void __init early_init_devtree(void *params) | |||
| 721 | * FIXME .. and the initrd too? */ | 722 | * FIXME .. and the initrd too? */ |
| 722 | move_device_tree(); | 723 | move_device_tree(); |
| 723 | 724 | ||
| 725 | allocate_pacas(); | ||
| 726 | |||
| 724 | DBG("Scanning CPUs ...\n"); | 727 | DBG("Scanning CPUs ...\n"); |
| 725 | 728 | ||
| 726 | /* Retreive CPU related informations from the flat tree | 729 | /* Retreive CPU related informations from the flat tree |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index d9b05866615f..ed2cfe17d25e 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
| @@ -940,7 +940,7 @@ static int del_instruction_bp(struct task_struct *child, int slot) | |||
| 940 | { | 940 | { |
| 941 | switch (slot) { | 941 | switch (slot) { |
| 942 | case 1: | 942 | case 1: |
| 943 | if (child->thread.iac1 == 0) | 943 | if ((child->thread.dbcr0 & DBCR0_IAC1) == 0) |
| 944 | return -ENOENT; | 944 | return -ENOENT; |
| 945 | 945 | ||
| 946 | if (dbcr_iac_range(child) & DBCR_IAC12MODE) { | 946 | if (dbcr_iac_range(child) & DBCR_IAC12MODE) { |
| @@ -952,7 +952,7 @@ static int del_instruction_bp(struct task_struct *child, int slot) | |||
| 952 | child->thread.dbcr0 &= ~DBCR0_IAC1; | 952 | child->thread.dbcr0 &= ~DBCR0_IAC1; |
| 953 | break; | 953 | break; |
| 954 | case 2: | 954 | case 2: |
| 955 | if (child->thread.iac2 == 0) | 955 | if ((child->thread.dbcr0 & DBCR0_IAC2) == 0) |
| 956 | return -ENOENT; | 956 | return -ENOENT; |
| 957 | 957 | ||
| 958 | if (dbcr_iac_range(child) & DBCR_IAC12MODE) | 958 | if (dbcr_iac_range(child) & DBCR_IAC12MODE) |
| @@ -963,7 +963,7 @@ static int del_instruction_bp(struct task_struct *child, int slot) | |||
| 963 | break; | 963 | break; |
| 964 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | 964 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 |
| 965 | case 3: | 965 | case 3: |
| 966 | if (child->thread.iac3 == 0) | 966 | if ((child->thread.dbcr0 & DBCR0_IAC3) == 0) |
| 967 | return -ENOENT; | 967 | return -ENOENT; |
| 968 | 968 | ||
| 969 | if (dbcr_iac_range(child) & DBCR_IAC34MODE) { | 969 | if (dbcr_iac_range(child) & DBCR_IAC34MODE) { |
| @@ -975,7 +975,7 @@ static int del_instruction_bp(struct task_struct *child, int slot) | |||
| 975 | child->thread.dbcr0 &= ~DBCR0_IAC3; | 975 | child->thread.dbcr0 &= ~DBCR0_IAC3; |
| 976 | break; | 976 | break; |
| 977 | case 4: | 977 | case 4: |
| 978 | if (child->thread.iac4 == 0) | 978 | if ((child->thread.dbcr0 & DBCR0_IAC4) == 0) |
| 979 | return -ENOENT; | 979 | return -ENOENT; |
| 980 | 980 | ||
| 981 | if (dbcr_iac_range(child) & DBCR_IAC34MODE) | 981 | if (dbcr_iac_range(child) & DBCR_IAC34MODE) |
| @@ -1054,7 +1054,7 @@ static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info) | |||
| 1054 | static int del_dac(struct task_struct *child, int slot) | 1054 | static int del_dac(struct task_struct *child, int slot) |
| 1055 | { | 1055 | { |
| 1056 | if (slot == 1) { | 1056 | if (slot == 1) { |
| 1057 | if (child->thread.dac1 == 0) | 1057 | if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) |
| 1058 | return -ENOENT; | 1058 | return -ENOENT; |
| 1059 | 1059 | ||
| 1060 | child->thread.dac1 = 0; | 1060 | child->thread.dac1 = 0; |
| @@ -1070,7 +1070,7 @@ static int del_dac(struct task_struct *child, int slot) | |||
| 1070 | child->thread.dvc1 = 0; | 1070 | child->thread.dvc1 = 0; |
| 1071 | #endif | 1071 | #endif |
| 1072 | } else if (slot == 2) { | 1072 | } else if (slot == 2) { |
| 1073 | if (child->thread.dac1 == 0) | 1073 | if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) |
| 1074 | return -ENOENT; | 1074 | return -ENOENT; |
| 1075 | 1075 | ||
| 1076 | #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE | 1076 | #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE |
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 03dd6a248198..48f0a008b20b 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
| @@ -36,6 +36,7 @@ | |||
| 36 | #include <linux/lmb.h> | 36 | #include <linux/lmb.h> |
| 37 | #include <linux/of_platform.h> | 37 | #include <linux/of_platform.h> |
| 38 | #include <asm/io.h> | 38 | #include <asm/io.h> |
| 39 | #include <asm/paca.h> | ||
| 39 | #include <asm/prom.h> | 40 | #include <asm/prom.h> |
| 40 | #include <asm/processor.h> | 41 | #include <asm/processor.h> |
| 41 | #include <asm/vdso_datapage.h> | 42 | #include <asm/vdso_datapage.h> |
| @@ -493,6 +494,8 @@ void __init smp_setup_cpu_maps(void) | |||
| 493 | * here will have to be reworked | 494 | * here will have to be reworked |
| 494 | */ | 495 | */ |
| 495 | cpu_init_thread_core_maps(nthreads); | 496 | cpu_init_thread_core_maps(nthreads); |
| 497 | |||
| 498 | free_unused_pacas(); | ||
| 496 | } | 499 | } |
| 497 | #endif /* CONFIG_SMP */ | 500 | #endif /* CONFIG_SMP */ |
| 498 | 501 | ||
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 6568406b2a30..63547394048c 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
| @@ -144,9 +144,9 @@ early_param("smt-enabled", early_smt_enabled); | |||
| 144 | #endif /* CONFIG_SMP */ | 144 | #endif /* CONFIG_SMP */ |
| 145 | 145 | ||
| 146 | /* Put the paca pointer into r13 and SPRG_PACA */ | 146 | /* Put the paca pointer into r13 and SPRG_PACA */ |
| 147 | void __init setup_paca(int cpu) | 147 | static void __init setup_paca(struct paca_struct *new_paca) |
| 148 | { | 148 | { |
| 149 | local_paca = &paca[cpu]; | 149 | local_paca = new_paca; |
| 150 | mtspr(SPRN_SPRG_PACA, local_paca); | 150 | mtspr(SPRN_SPRG_PACA, local_paca); |
| 151 | #ifdef CONFIG_PPC_BOOK3E | 151 | #ifdef CONFIG_PPC_BOOK3E |
| 152 | mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb); | 152 | mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb); |
| @@ -176,14 +176,12 @@ void __init early_setup(unsigned long dt_ptr) | |||
| 176 | { | 176 | { |
| 177 | /* -------- printk is _NOT_ safe to use here ! ------- */ | 177 | /* -------- printk is _NOT_ safe to use here ! ------- */ |
| 178 | 178 | ||
| 179 | /* Fill in any unititialised pacas */ | ||
| 180 | initialise_pacas(); | ||
| 181 | |||
| 182 | /* Identify CPU type */ | 179 | /* Identify CPU type */ |
| 183 | identify_cpu(0, mfspr(SPRN_PVR)); | 180 | identify_cpu(0, mfspr(SPRN_PVR)); |
| 184 | 181 | ||
| 185 | /* Assume we're on cpu 0 for now. Don't write to the paca yet! */ | 182 | /* Assume we're on cpu 0 for now. Don't write to the paca yet! */ |
| 186 | setup_paca(0); | 183 | initialise_paca(&boot_paca, 0); |
| 184 | setup_paca(&boot_paca); | ||
| 187 | 185 | ||
| 188 | /* Initialize lockdep early or else spinlocks will blow */ | 186 | /* Initialize lockdep early or else spinlocks will blow */ |
| 189 | lockdep_init(); | 187 | lockdep_init(); |
| @@ -203,7 +201,7 @@ void __init early_setup(unsigned long dt_ptr) | |||
| 203 | early_init_devtree(__va(dt_ptr)); | 201 | early_init_devtree(__va(dt_ptr)); |
| 204 | 202 | ||
| 205 | /* Now we know the logical id of our boot cpu, setup the paca. */ | 203 | /* Now we know the logical id of our boot cpu, setup the paca. */ |
| 206 | setup_paca(boot_cpuid); | 204 | setup_paca(&paca[boot_cpuid]); |
| 207 | 205 | ||
| 208 | /* Fix up paca fields required for the boot cpu */ | 206 | /* Fix up paca fields required for the boot cpu */ |
| 209 | get_paca()->cpu_start = 1; | 207 | get_paca()->cpu_start = 1; |
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c index 3370e62e43d4..f2496f2faecc 100644 --- a/arch/powerpc/kernel/syscalls.c +++ b/arch/powerpc/kernel/syscalls.c | |||
| @@ -42,100 +42,6 @@ | |||
| 42 | #include <asm/time.h> | 42 | #include <asm/time.h> |
| 43 | #include <asm/unistd.h> | 43 | #include <asm/unistd.h> |
| 44 | 44 | ||
| 45 | /* | ||
| 46 | * sys_ipc() is the de-multiplexer for the SysV IPC calls.. | ||
| 47 | * | ||
| 48 | * This is really horribly ugly. | ||
| 49 | */ | ||
| 50 | int sys_ipc(uint call, int first, unsigned long second, long third, | ||
| 51 | void __user *ptr, long fifth) | ||
| 52 | { | ||
| 53 | int version, ret; | ||
| 54 | |||
| 55 | version = call >> 16; /* hack for backward compatibility */ | ||
| 56 | call &= 0xffff; | ||
| 57 | |||
| 58 | ret = -ENOSYS; | ||
| 59 | switch (call) { | ||
| 60 | case SEMOP: | ||
| 61 | ret = sys_semtimedop(first, (struct sembuf __user *)ptr, | ||
| 62 | (unsigned)second, NULL); | ||
| 63 | break; | ||
| 64 | case SEMTIMEDOP: | ||
| 65 | ret = sys_semtimedop(first, (struct sembuf __user *)ptr, | ||
| 66 | (unsigned)second, | ||
| 67 | (const struct timespec __user *) fifth); | ||
| 68 | break; | ||
| 69 | case SEMGET: | ||
| 70 | ret = sys_semget (first, (int)second, third); | ||
| 71 | break; | ||
| 72 | case SEMCTL: { | ||
| 73 | union semun fourth; | ||
| 74 | |||
| 75 | ret = -EINVAL; | ||
| 76 | if (!ptr) | ||
| 77 | break; | ||
| 78 | if ((ret = get_user(fourth.__pad, (void __user * __user *)ptr))) | ||
| 79 | break; | ||
| 80 | ret = sys_semctl(first, (int)second, third, fourth); | ||
| 81 | break; | ||
| 82 | } | ||
| 83 | case MSGSND: | ||
| 84 | ret = sys_msgsnd(first, (struct msgbuf __user *)ptr, | ||
| 85 | (size_t)second, third); | ||
| 86 | break; | ||
| 87 | case MSGRCV: | ||
| 88 | switch (version) { | ||
| 89 | case 0: { | ||
| 90 | struct ipc_kludge tmp; | ||
| 91 | |||
| 92 | ret = -EINVAL; | ||
| 93 | if (!ptr) | ||
| 94 | break; | ||
| 95 | if ((ret = copy_from_user(&tmp, | ||
| 96 | (struct ipc_kludge __user *) ptr, | ||
| 97 | sizeof (tmp)) ? -EFAULT : 0)) | ||
| 98 | break; | ||
| 99 | ret = sys_msgrcv(first, tmp.msgp, (size_t) second, | ||
| 100 | tmp.msgtyp, third); | ||
| 101 | break; | ||
| 102 | } | ||
| 103 | default: | ||
| 104 | ret = sys_msgrcv (first, (struct msgbuf __user *) ptr, | ||
| 105 | (size_t)second, fifth, third); | ||
| 106 | break; | ||
| 107 | } | ||
| 108 | break; | ||
| 109 | case MSGGET: | ||
| 110 | ret = sys_msgget((key_t)first, (int)second); | ||
| 111 | break; | ||
| 112 | case MSGCTL: | ||
| 113 | ret = sys_msgctl(first, (int)second, | ||
| 114 | (struct msqid_ds __user *)ptr); | ||
| 115 | break; | ||
| 116 | case SHMAT: { | ||
| 117 | ulong raddr; | ||
| 118 | ret = do_shmat(first, (char __user *)ptr, (int)second, &raddr); | ||
| 119 | if (ret) | ||
| 120 | break; | ||
| 121 | ret = put_user(raddr, (ulong __user *) third); | ||
| 122 | break; | ||
| 123 | } | ||
| 124 | case SHMDT: | ||
| 125 | ret = sys_shmdt((char __user *)ptr); | ||
| 126 | break; | ||
| 127 | case SHMGET: | ||
| 128 | ret = sys_shmget(first, (size_t)second, third); | ||
| 129 | break; | ||
| 130 | case SHMCTL: | ||
| 131 | ret = sys_shmctl(first, (int)second, | ||
| 132 | (struct shmid_ds __user *)ptr); | ||
| 133 | break; | ||
| 134 | } | ||
| 135 | |||
| 136 | return ret; | ||
| 137 | } | ||
| 138 | |||
| 139 | static inline unsigned long do_mmap2(unsigned long addr, size_t len, | 45 | static inline unsigned long do_mmap2(unsigned long addr, size_t len, |
| 140 | unsigned long prot, unsigned long flags, | 46 | unsigned long prot, unsigned long flags, |
| 141 | unsigned long fd, unsigned long off, int shift) | 47 | unsigned long fd, unsigned long off, int shift) |
| @@ -210,76 +116,6 @@ long ppc64_personality(unsigned long personality) | |||
| 210 | } | 116 | } |
| 211 | #endif | 117 | #endif |
| 212 | 118 | ||
| 213 | #ifdef CONFIG_PPC64 | ||
| 214 | #define OVERRIDE_MACHINE (personality(current->personality) == PER_LINUX32) | ||
| 215 | #else | ||
| 216 | #define OVERRIDE_MACHINE 0 | ||
| 217 | #endif | ||
| 218 | |||
| 219 | static inline int override_machine(char __user *mach) | ||
| 220 | { | ||
| 221 | if (OVERRIDE_MACHINE) { | ||
| 222 | /* change ppc64 to ppc */ | ||
| 223 | if (__put_user(0, mach+3) || __put_user(0, mach+4)) | ||
| 224 | return -EFAULT; | ||
| 225 | } | ||
| 226 | return 0; | ||
| 227 | } | ||
| 228 | |||
| 229 | long ppc_newuname(struct new_utsname __user * name) | ||
| 230 | { | ||
| 231 | int err = 0; | ||
| 232 | |||
| 233 | down_read(&uts_sem); | ||
| 234 | if (copy_to_user(name, utsname(), sizeof(*name))) | ||
| 235 | err = -EFAULT; | ||
| 236 | up_read(&uts_sem); | ||
| 237 | if (!err) | ||
| 238 | err = override_machine(name->machine); | ||
| 239 | return err; | ||
| 240 | } | ||
| 241 | |||
| 242 | int sys_uname(struct old_utsname __user *name) | ||
| 243 | { | ||
| 244 | int err = 0; | ||
| 245 | |||
| 246 | down_read(&uts_sem); | ||
| 247 | if (copy_to_user(name, utsname(), sizeof(*name))) | ||
| 248 | err = -EFAULT; | ||
| 249 | up_read(&uts_sem); | ||
| 250 | if (!err) | ||
| 251 | err = override_machine(name->machine); | ||
| 252 | return err; | ||
| 253 | } | ||
| 254 | |||
| 255 | int sys_olduname(struct oldold_utsname __user *name) | ||
| 256 | { | ||
| 257 | int error; | ||
| 258 | |||
| 259 | if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname))) | ||
| 260 | return -EFAULT; | ||
| 261 | |||
| 262 | down_read(&uts_sem); | ||
| 263 | error = __copy_to_user(&name->sysname, &utsname()->sysname, | ||
| 264 | __OLD_UTS_LEN); | ||
| 265 | error |= __put_user(0, name->sysname + __OLD_UTS_LEN); | ||
| 266 | error |= __copy_to_user(&name->nodename, &utsname()->nodename, | ||
| 267 | __OLD_UTS_LEN); | ||
| 268 | error |= __put_user(0, name->nodename + __OLD_UTS_LEN); | ||
| 269 | error |= __copy_to_user(&name->release, &utsname()->release, | ||
| 270 | __OLD_UTS_LEN); | ||
| 271 | error |= __put_user(0, name->release + __OLD_UTS_LEN); | ||
| 272 | error |= __copy_to_user(&name->version, &utsname()->version, | ||
| 273 | __OLD_UTS_LEN); | ||
| 274 | error |= __put_user(0, name->version + __OLD_UTS_LEN); | ||
| 275 | error |= __copy_to_user(&name->machine, &utsname()->machine, | ||
| 276 | __OLD_UTS_LEN); | ||
| 277 | error |= override_machine(name->machine); | ||
| 278 | up_read(&uts_sem); | ||
| 279 | |||
| 280 | return error? -EFAULT: 0; | ||
| 281 | } | ||
| 282 | |||
| 283 | long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, | 119 | long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, |
| 284 | u32 len_high, u32 len_low) | 120 | u32 len_high, u32 len_low) |
| 285 | { | 121 | { |
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 4ec900af332f..b1dbd9ee87cc 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c | |||
| @@ -47,7 +47,7 @@ | |||
| 47 | #include "mmu_decl.h" | 47 | #include "mmu_decl.h" |
| 48 | 48 | ||
| 49 | #if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL) | 49 | #if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL) |
| 50 | /* The ammount of lowmem must be within 0xF0000000 - KERNELBASE. */ | 50 | /* The amount of lowmem must be within 0xF0000000 - KERNELBASE. */ |
| 51 | #if (CONFIG_LOWMEM_SIZE > (0xF0000000 - PAGE_OFFSET)) | 51 | #if (CONFIG_LOWMEM_SIZE > (0xF0000000 - PAGE_OFFSET)) |
| 52 | #error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL" | 52 | #error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL" |
| 53 | #endif | 53 | #endif |
diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c index 9d962d7c72c1..d4a09f8705b5 100644 --- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c +++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c | |||
| @@ -24,7 +24,7 @@ | |||
| 24 | 24 | ||
| 25 | #include "pq2.h" | 25 | #include "pq2.h" |
| 26 | 26 | ||
| 27 | static DEFINE_SPINLOCK(pci_pic_lock); | 27 | static DEFINE_RAW_SPINLOCK(pci_pic_lock); |
| 28 | 28 | ||
| 29 | struct pq2ads_pci_pic { | 29 | struct pq2ads_pci_pic { |
| 30 | struct device_node *node; | 30 | struct device_node *node; |
| @@ -45,12 +45,12 @@ static void pq2ads_pci_mask_irq(unsigned int virq) | |||
| 45 | 45 | ||
| 46 | if (irq != -1) { | 46 | if (irq != -1) { |
| 47 | unsigned long flags; | 47 | unsigned long flags; |
| 48 | spin_lock_irqsave(&pci_pic_lock, flags); | 48 | raw_spin_lock_irqsave(&pci_pic_lock, flags); |
| 49 | 49 | ||
| 50 | setbits32(&priv->regs->mask, 1 << irq); | 50 | setbits32(&priv->regs->mask, 1 << irq); |
| 51 | mb(); | 51 | mb(); |
| 52 | 52 | ||
| 53 | spin_unlock_irqrestore(&pci_pic_lock, flags); | 53 | raw_spin_unlock_irqrestore(&pci_pic_lock, flags); |
| 54 | } | 54 | } |
| 55 | } | 55 | } |
| 56 | 56 | ||
| @@ -62,9 +62,9 @@ static void pq2ads_pci_unmask_irq(unsigned int virq) | |||
| 62 | if (irq != -1) { | 62 | if (irq != -1) { |
| 63 | unsigned long flags; | 63 | unsigned long flags; |
| 64 | 64 | ||
| 65 | spin_lock_irqsave(&pci_pic_lock, flags); | 65 | raw_spin_lock_irqsave(&pci_pic_lock, flags); |
| 66 | clrbits32(&priv->regs->mask, 1 << irq); | 66 | clrbits32(&priv->regs->mask, 1 << irq); |
| 67 | spin_unlock_irqrestore(&pci_pic_lock, flags); | 67 | raw_spin_unlock_irqrestore(&pci_pic_lock, flags); |
| 68 | } | 68 | } |
| 69 | } | 69 | } |
| 70 | 70 | ||
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c index 42e87f08aa01..d48527ffc425 100644 --- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c +++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c | |||
| @@ -50,7 +50,7 @@ static struct socrates_fpga_irq_info fpga_irqs[SOCRATES_FPGA_NUM_IRQS] = { | |||
| 50 | 50 | ||
| 51 | #define socrates_fpga_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) | 51 | #define socrates_fpga_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) |
| 52 | 52 | ||
| 53 | static DEFINE_SPINLOCK(socrates_fpga_pic_lock); | 53 | static DEFINE_RAW_SPINLOCK(socrates_fpga_pic_lock); |
| 54 | 54 | ||
| 55 | static void __iomem *socrates_fpga_pic_iobase; | 55 | static void __iomem *socrates_fpga_pic_iobase; |
| 56 | static struct irq_host *socrates_fpga_pic_irq_host; | 56 | static struct irq_host *socrates_fpga_pic_irq_host; |
| @@ -80,9 +80,9 @@ static inline unsigned int socrates_fpga_pic_get_irq(unsigned int irq) | |||
| 80 | if (i == 3) | 80 | if (i == 3) |
| 81 | return NO_IRQ; | 81 | return NO_IRQ; |
| 82 | 82 | ||
| 83 | spin_lock_irqsave(&socrates_fpga_pic_lock, flags); | 83 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); |
| 84 | cause = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(i)); | 84 | cause = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(i)); |
| 85 | spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); | 85 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); |
| 86 | for (i = SOCRATES_FPGA_NUM_IRQS - 1; i >= 0; i--) { | 86 | for (i = SOCRATES_FPGA_NUM_IRQS - 1; i >= 0; i--) { |
| 87 | if (cause >> (i + 16)) | 87 | if (cause >> (i + 16)) |
| 88 | break; | 88 | break; |
| @@ -116,12 +116,12 @@ static void socrates_fpga_pic_ack(unsigned int virq) | |||
| 116 | hwirq = socrates_fpga_irq_to_hw(virq); | 116 | hwirq = socrates_fpga_irq_to_hw(virq); |
| 117 | 117 | ||
| 118 | irq_line = fpga_irqs[hwirq].irq_line; | 118 | irq_line = fpga_irqs[hwirq].irq_line; |
| 119 | spin_lock_irqsave(&socrates_fpga_pic_lock, flags); | 119 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); |
| 120 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) | 120 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) |
| 121 | & SOCRATES_FPGA_IRQ_MASK; | 121 | & SOCRATES_FPGA_IRQ_MASK; |
| 122 | mask |= (1 << (hwirq + 16)); | 122 | mask |= (1 << (hwirq + 16)); |
| 123 | socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask); | 123 | socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask); |
| 124 | spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); | 124 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); |
| 125 | } | 125 | } |
| 126 | 126 | ||
| 127 | static void socrates_fpga_pic_mask(unsigned int virq) | 127 | static void socrates_fpga_pic_mask(unsigned int virq) |
| @@ -134,12 +134,12 @@ static void socrates_fpga_pic_mask(unsigned int virq) | |||
| 134 | hwirq = socrates_fpga_irq_to_hw(virq); | 134 | hwirq = socrates_fpga_irq_to_hw(virq); |
| 135 | 135 | ||
| 136 | irq_line = fpga_irqs[hwirq].irq_line; | 136 | irq_line = fpga_irqs[hwirq].irq_line; |
| 137 | spin_lock_irqsave(&socrates_fpga_pic_lock, flags); | 137 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); |
| 138 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) | 138 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) |
| 139 | & SOCRATES_FPGA_IRQ_MASK; | 139 | & SOCRATES_FPGA_IRQ_MASK; |
| 140 | mask &= ~(1 << hwirq); | 140 | mask &= ~(1 << hwirq); |
| 141 | socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask); | 141 | socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask); |
| 142 | spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); | 142 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); |
| 143 | } | 143 | } |
| 144 | 144 | ||
| 145 | static void socrates_fpga_pic_mask_ack(unsigned int virq) | 145 | static void socrates_fpga_pic_mask_ack(unsigned int virq) |
| @@ -152,13 +152,13 @@ static void socrates_fpga_pic_mask_ack(unsigned int virq) | |||
| 152 | hwirq = socrates_fpga_irq_to_hw(virq); | 152 | hwirq = socrates_fpga_irq_to_hw(virq); |
| 153 | 153 | ||
| 154 | irq_line = fpga_irqs[hwirq].irq_line; | 154 | irq_line = fpga_irqs[hwirq].irq_line; |
| 155 | spin_lock_irqsave(&socrates_fpga_pic_lock, flags); | 155 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); |
| 156 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) | 156 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) |
| 157 | & SOCRATES_FPGA_IRQ_MASK; | 157 | & SOCRATES_FPGA_IRQ_MASK; |
| 158 | mask &= ~(1 << hwirq); | 158 | mask &= ~(1 << hwirq); |
| 159 | mask |= (1 << (hwirq + 16)); | 159 | mask |= (1 << (hwirq + 16)); |
| 160 | socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask); | 160 | socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask); |
| 161 | spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); | 161 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); |
| 162 | } | 162 | } |
| 163 | 163 | ||
| 164 | static void socrates_fpga_pic_unmask(unsigned int virq) | 164 | static void socrates_fpga_pic_unmask(unsigned int virq) |
| @@ -171,12 +171,12 @@ static void socrates_fpga_pic_unmask(unsigned int virq) | |||
| 171 | hwirq = socrates_fpga_irq_to_hw(virq); | 171 | hwirq = socrates_fpga_irq_to_hw(virq); |
| 172 | 172 | ||
| 173 | irq_line = fpga_irqs[hwirq].irq_line; | 173 | irq_line = fpga_irqs[hwirq].irq_line; |
| 174 | spin_lock_irqsave(&socrates_fpga_pic_lock, flags); | 174 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); |
| 175 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) | 175 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) |
| 176 | & SOCRATES_FPGA_IRQ_MASK; | 176 | & SOCRATES_FPGA_IRQ_MASK; |
| 177 | mask |= (1 << hwirq); | 177 | mask |= (1 << hwirq); |
| 178 | socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask); | 178 | socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask); |
| 179 | spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); | 179 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); |
| 180 | } | 180 | } |
| 181 | 181 | ||
| 182 | static void socrates_fpga_pic_eoi(unsigned int virq) | 182 | static void socrates_fpga_pic_eoi(unsigned int virq) |
| @@ -189,12 +189,12 @@ static void socrates_fpga_pic_eoi(unsigned int virq) | |||
| 189 | hwirq = socrates_fpga_irq_to_hw(virq); | 189 | hwirq = socrates_fpga_irq_to_hw(virq); |
| 190 | 190 | ||
| 191 | irq_line = fpga_irqs[hwirq].irq_line; | 191 | irq_line = fpga_irqs[hwirq].irq_line; |
| 192 | spin_lock_irqsave(&socrates_fpga_pic_lock, flags); | 192 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); |
| 193 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) | 193 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) |
| 194 | & SOCRATES_FPGA_IRQ_MASK; | 194 | & SOCRATES_FPGA_IRQ_MASK; |
| 195 | mask |= (1 << (hwirq + 16)); | 195 | mask |= (1 << (hwirq + 16)); |
| 196 | socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask); | 196 | socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask); |
| 197 | spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); | 197 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); |
| 198 | } | 198 | } |
| 199 | 199 | ||
| 200 | static int socrates_fpga_pic_set_type(unsigned int virq, | 200 | static int socrates_fpga_pic_set_type(unsigned int virq, |
| @@ -220,14 +220,14 @@ static int socrates_fpga_pic_set_type(unsigned int virq, | |||
| 220 | default: | 220 | default: |
| 221 | return -EINVAL; | 221 | return -EINVAL; |
| 222 | } | 222 | } |
| 223 | spin_lock_irqsave(&socrates_fpga_pic_lock, flags); | 223 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); |
| 224 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQCFG); | 224 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQCFG); |
| 225 | if (polarity) | 225 | if (polarity) |
| 226 | mask |= (1 << hwirq); | 226 | mask |= (1 << hwirq); |
| 227 | else | 227 | else |
| 228 | mask &= ~(1 << hwirq); | 228 | mask &= ~(1 << hwirq); |
| 229 | socrates_fpga_pic_write(FPGA_PIC_IRQCFG, mask); | 229 | socrates_fpga_pic_write(FPGA_PIC_IRQCFG, mask); |
| 230 | spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); | 230 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); |
| 231 | return 0; | 231 | return 0; |
| 232 | } | 232 | } |
| 233 | 233 | ||
| @@ -314,14 +314,14 @@ void socrates_fpga_pic_init(struct device_node *pic) | |||
| 314 | 314 | ||
| 315 | socrates_fpga_pic_iobase = of_iomap(pic, 0); | 315 | socrates_fpga_pic_iobase = of_iomap(pic, 0); |
| 316 | 316 | ||
| 317 | spin_lock_irqsave(&socrates_fpga_pic_lock, flags); | 317 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); |
| 318 | socrates_fpga_pic_write(FPGA_PIC_IRQMASK(0), | 318 | socrates_fpga_pic_write(FPGA_PIC_IRQMASK(0), |
| 319 | SOCRATES_FPGA_IRQ_MASK << 16); | 319 | SOCRATES_FPGA_IRQ_MASK << 16); |
| 320 | socrates_fpga_pic_write(FPGA_PIC_IRQMASK(1), | 320 | socrates_fpga_pic_write(FPGA_PIC_IRQMASK(1), |
| 321 | SOCRATES_FPGA_IRQ_MASK << 16); | 321 | SOCRATES_FPGA_IRQ_MASK << 16); |
| 322 | socrates_fpga_pic_write(FPGA_PIC_IRQMASK(2), | 322 | socrates_fpga_pic_write(FPGA_PIC_IRQMASK(2), |
| 323 | SOCRATES_FPGA_IRQ_MASK << 16); | 323 | SOCRATES_FPGA_IRQ_MASK << 16); |
| 324 | spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); | 324 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); |
| 325 | 325 | ||
| 326 | pr_info("FPGA PIC: Setting up Socrates FPGA PIC\n"); | 326 | pr_info("FPGA PIC: Setting up Socrates FPGA PIC\n"); |
| 327 | } | 327 | } |
diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig index 2bbfd530d6d8..fbe9f3621424 100644 --- a/arch/powerpc/platforms/86xx/Kconfig +++ b/arch/powerpc/platforms/86xx/Kconfig | |||
| @@ -33,32 +33,32 @@ config MPC8610_HPCD | |||
| 33 | This option enables support for the MPC8610 HPCD board. | 33 | This option enables support for the MPC8610 HPCD board. |
| 34 | 34 | ||
| 35 | config GEF_PPC9A | 35 | config GEF_PPC9A |
| 36 | bool "GE Fanuc PPC9A" | 36 | bool "GE PPC9A" |
| 37 | select DEFAULT_UIMAGE | 37 | select DEFAULT_UIMAGE |
| 38 | select MMIO_NVRAM | 38 | select MMIO_NVRAM |
| 39 | select GENERIC_GPIO | 39 | select GENERIC_GPIO |
| 40 | select ARCH_REQUIRE_GPIOLIB | 40 | select ARCH_REQUIRE_GPIOLIB |
| 41 | help | 41 | help |
| 42 | This option enables support for GE Fanuc's PPC9A. | 42 | This option enables support for the GE PPC9A. |
| 43 | 43 | ||
| 44 | config GEF_SBC310 | 44 | config GEF_SBC310 |
| 45 | bool "GE Fanuc SBC310" | 45 | bool "GE SBC310" |
| 46 | select DEFAULT_UIMAGE | 46 | select DEFAULT_UIMAGE |
| 47 | select MMIO_NVRAM | 47 | select MMIO_NVRAM |
| 48 | select GENERIC_GPIO | 48 | select GENERIC_GPIO |
| 49 | select ARCH_REQUIRE_GPIOLIB | 49 | select ARCH_REQUIRE_GPIOLIB |
| 50 | help | 50 | help |
| 51 | This option enables support for GE Fanuc's SBC310. | 51 | This option enables support for the GE SBC310. |
| 52 | 52 | ||
| 53 | config GEF_SBC610 | 53 | config GEF_SBC610 |
| 54 | bool "GE Fanuc SBC610" | 54 | bool "GE SBC610" |
| 55 | select DEFAULT_UIMAGE | 55 | select DEFAULT_UIMAGE |
| 56 | select MMIO_NVRAM | 56 | select MMIO_NVRAM |
| 57 | select GENERIC_GPIO | 57 | select GENERIC_GPIO |
| 58 | select ARCH_REQUIRE_GPIOLIB | 58 | select ARCH_REQUIRE_GPIOLIB |
| 59 | select HAS_RAPIDIO | 59 | select HAS_RAPIDIO |
| 60 | help | 60 | help |
| 61 | This option enables support for GE Fanuc's SBC610. | 61 | This option enables support for the GE SBC610. |
| 62 | 62 | ||
| 63 | endif | 63 | endif |
| 64 | 64 | ||
diff --git a/arch/powerpc/platforms/86xx/gef_gpio.c b/arch/powerpc/platforms/86xx/gef_gpio.c index b2ea8875adba..11f7b2b6f49e 100644 --- a/arch/powerpc/platforms/86xx/gef_gpio.c +++ b/arch/powerpc/platforms/86xx/gef_gpio.c | |||
| @@ -1,9 +1,9 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Driver for GE Fanuc's FPGA based GPIO pins | 2 | * Driver for GE FPGA based GPIO |
| 3 | * | 3 | * |
| 4 | * Author: Martyn Welch <martyn.welch@gefanuc.com> | 4 | * Author: Martyn Welch <martyn.welch@ge.com> |
| 5 | * | 5 | * |
| 6 | * 2008 (c) GE Fanuc Intelligent Platforms Embedded Systems, Inc. | 6 | * 2008 (c) GE Intelligent Platforms Embedded Systems, Inc. |
| 7 | * | 7 | * |
| 8 | * This file is licensed under the terms of the GNU General Public License | 8 | * This file is licensed under the terms of the GNU General Public License |
| 9 | * version 2. This program is licensed "as is" without any warranty of any | 9 | * version 2. This program is licensed "as is" without any warranty of any |
| @@ -164,6 +164,6 @@ static int __init gef_gpio_init(void) | |||
| 164 | }; | 164 | }; |
| 165 | arch_initcall(gef_gpio_init); | 165 | arch_initcall(gef_gpio_init); |
| 166 | 166 | ||
| 167 | MODULE_DESCRIPTION("GE Fanuc I/O FPGA GPIO driver"); | 167 | MODULE_DESCRIPTION("GE I/O FPGA GPIO driver"); |
| 168 | MODULE_AUTHOR("Martyn Welch <martyn.welch@gefanuc.com"); | 168 | MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com"); |
| 169 | MODULE_LICENSE("GPL"); | 169 | MODULE_LICENSE("GPL"); |
diff --git a/arch/powerpc/platforms/86xx/gef_pic.c b/arch/powerpc/platforms/86xx/gef_pic.c index 0110a8736d33..6df9e2561c06 100644 --- a/arch/powerpc/platforms/86xx/gef_pic.c +++ b/arch/powerpc/platforms/86xx/gef_pic.c | |||
| @@ -1,9 +1,9 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Interrupt handling for GE Fanuc's FPGA based PIC | 2 | * Interrupt handling for GE FPGA based PIC |
| 3 | * | 3 | * |
| 4 | * Author: Martyn Welch <martyn.welch@gefanuc.com> | 4 | * Author: Martyn Welch <martyn.welch@ge.com> |
| 5 | * | 5 | * |
| 6 | * 2008 (c) GE Fanuc Intelligent Platforms Embedded Systems, Inc. | 6 | * 2008 (c) GE Intelligent Platforms Embedded Systems, Inc. |
| 7 | * | 7 | * |
| 8 | * This file is licensed under the terms of the GNU General Public License | 8 | * This file is licensed under the terms of the GNU General Public License |
| 9 | * version 2. This program is licensed "as is" without any warranty of any | 9 | * version 2. This program is licensed "as is" without any warranty of any |
| @@ -49,7 +49,7 @@ | |||
| 49 | #define gef_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) | 49 | #define gef_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) |
| 50 | 50 | ||
| 51 | 51 | ||
| 52 | static DEFINE_SPINLOCK(gef_pic_lock); | 52 | static DEFINE_RAW_SPINLOCK(gef_pic_lock); |
| 53 | 53 | ||
| 54 | static void __iomem *gef_pic_irq_reg_base; | 54 | static void __iomem *gef_pic_irq_reg_base; |
| 55 | static struct irq_host *gef_pic_irq_host; | 55 | static struct irq_host *gef_pic_irq_host; |
| @@ -118,11 +118,11 @@ static void gef_pic_mask(unsigned int virq) | |||
| 118 | 118 | ||
| 119 | hwirq = gef_irq_to_hw(virq); | 119 | hwirq = gef_irq_to_hw(virq); |
| 120 | 120 | ||
| 121 | spin_lock_irqsave(&gef_pic_lock, flags); | 121 | raw_spin_lock_irqsave(&gef_pic_lock, flags); |
| 122 | mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0)); | 122 | mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0)); |
| 123 | mask &= ~(1 << hwirq); | 123 | mask &= ~(1 << hwirq); |
| 124 | out_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0), mask); | 124 | out_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0), mask); |
| 125 | spin_unlock_irqrestore(&gef_pic_lock, flags); | 125 | raw_spin_unlock_irqrestore(&gef_pic_lock, flags); |
| 126 | } | 126 | } |
| 127 | 127 | ||
| 128 | static void gef_pic_mask_ack(unsigned int virq) | 128 | static void gef_pic_mask_ack(unsigned int virq) |
| @@ -141,11 +141,11 @@ static void gef_pic_unmask(unsigned int virq) | |||
| 141 | 141 | ||
| 142 | hwirq = gef_irq_to_hw(virq); | 142 | hwirq = gef_irq_to_hw(virq); |
| 143 | 143 | ||
| 144 | spin_lock_irqsave(&gef_pic_lock, flags); | 144 | raw_spin_lock_irqsave(&gef_pic_lock, flags); |
| 145 | mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0)); | 145 | mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0)); |
| 146 | mask |= (1 << hwirq); | 146 | mask |= (1 << hwirq); |
| 147 | out_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0), mask); | 147 | out_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0), mask); |
| 148 | spin_unlock_irqrestore(&gef_pic_lock, flags); | 148 | raw_spin_unlock_irqrestore(&gef_pic_lock, flags); |
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | static struct irq_chip gef_pic_chip = { | 151 | static struct irq_chip gef_pic_chip = { |
| @@ -199,7 +199,7 @@ void __init gef_pic_init(struct device_node *np) | |||
| 199 | /* Map the devices registers into memory */ | 199 | /* Map the devices registers into memory */ |
| 200 | gef_pic_irq_reg_base = of_iomap(np, 0); | 200 | gef_pic_irq_reg_base = of_iomap(np, 0); |
| 201 | 201 | ||
| 202 | spin_lock_irqsave(&gef_pic_lock, flags); | 202 | raw_spin_lock_irqsave(&gef_pic_lock, flags); |
| 203 | 203 | ||
| 204 | /* Initialise everything as masked. */ | 204 | /* Initialise everything as masked. */ |
| 205 | out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU0_INTR_MASK, 0); | 205 | out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU0_INTR_MASK, 0); |
| @@ -208,7 +208,7 @@ void __init gef_pic_init(struct device_node *np) | |||
| 208 | out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU0_MCP_MASK, 0); | 208 | out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU0_MCP_MASK, 0); |
| 209 | out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU1_MCP_MASK, 0); | 209 | out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU1_MCP_MASK, 0); |
| 210 | 210 | ||
| 211 | spin_unlock_irqrestore(&gef_pic_lock, flags); | 211 | raw_spin_unlock_irqrestore(&gef_pic_lock, flags); |
| 212 | 212 | ||
| 213 | /* Map controller */ | 213 | /* Map controller */ |
| 214 | gef_pic_cascade_irq = irq_of_parse_and_map(np, 0); | 214 | gef_pic_cascade_irq = irq_of_parse_and_map(np, 0); |
diff --git a/arch/powerpc/platforms/86xx/gef_ppc9a.c b/arch/powerpc/platforms/86xx/gef_ppc9a.c index a792e5d85813..60ce07e39100 100644 --- a/arch/powerpc/platforms/86xx/gef_ppc9a.c +++ b/arch/powerpc/platforms/86xx/gef_ppc9a.c | |||
| @@ -1,9 +1,9 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * GE Fanuc PPC9A board support | 2 | * GE PPC9A board support |
| 3 | * | 3 | * |
| 4 | * Author: Martyn Welch <martyn.welch@gefanuc.com> | 4 | * Author: Martyn Welch <martyn.welch@ge.com> |
| 5 | * | 5 | * |
| 6 | * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc. | 6 | * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. |
| 7 | * | 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify it | 8 | * This program is free software; you can redistribute it and/or modify it |
| 9 | * under the terms of the GNU General Public License as published by the | 9 | * under the terms of the GNU General Public License as published by the |
| @@ -82,7 +82,7 @@ static void __init gef_ppc9a_setup_arch(void) | |||
| 82 | } | 82 | } |
| 83 | #endif | 83 | #endif |
| 84 | 84 | ||
| 85 | printk(KERN_INFO "GE Fanuc Intelligent Platforms PPC9A 6U VME SBC\n"); | 85 | printk(KERN_INFO "GE Intelligent Platforms PPC9A 6U VME SBC\n"); |
| 86 | 86 | ||
| 87 | #ifdef CONFIG_SMP | 87 | #ifdef CONFIG_SMP |
| 88 | mpc86xx_smp_init(); | 88 | mpc86xx_smp_init(); |
| @@ -151,7 +151,7 @@ static void gef_ppc9a_show_cpuinfo(struct seq_file *m) | |||
| 151 | { | 151 | { |
| 152 | uint svid = mfspr(SPRN_SVR); | 152 | uint svid = mfspr(SPRN_SVR); |
| 153 | 153 | ||
| 154 | seq_printf(m, "Vendor\t\t: GE Fanuc Intelligent Platforms\n"); | 154 | seq_printf(m, "Vendor\t\t: GE Intelligent Platforms\n"); |
| 155 | 155 | ||
| 156 | seq_printf(m, "Revision\t: %u%c\n", gef_ppc9a_get_pcb_rev(), | 156 | seq_printf(m, "Revision\t: %u%c\n", gef_ppc9a_get_pcb_rev(), |
| 157 | ('A' + gef_ppc9a_get_board_rev())); | 157 | ('A' + gef_ppc9a_get_board_rev())); |
| @@ -235,7 +235,7 @@ static int __init declare_of_platform_devices(void) | |||
| 235 | machine_device_initcall(gef_ppc9a, declare_of_platform_devices); | 235 | machine_device_initcall(gef_ppc9a, declare_of_platform_devices); |
| 236 | 236 | ||
| 237 | define_machine(gef_ppc9a) { | 237 | define_machine(gef_ppc9a) { |
| 238 | .name = "GE Fanuc PPC9A", | 238 | .name = "GE PPC9A", |
| 239 | .probe = gef_ppc9a_probe, | 239 | .probe = gef_ppc9a_probe, |
| 240 | .setup_arch = gef_ppc9a_setup_arch, | 240 | .setup_arch = gef_ppc9a_setup_arch, |
| 241 | .init_IRQ = gef_ppc9a_init_irq, | 241 | .init_IRQ = gef_ppc9a_init_irq, |
diff --git a/arch/powerpc/platforms/86xx/gef_sbc310.c b/arch/powerpc/platforms/86xx/gef_sbc310.c index 6a1a613836c2..3ecee25bf3ed 100644 --- a/arch/powerpc/platforms/86xx/gef_sbc310.c +++ b/arch/powerpc/platforms/86xx/gef_sbc310.c | |||
| @@ -1,9 +1,9 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * GE Fanuc SBC310 board support | 2 | * GE SBC310 board support |
| 3 | * | 3 | * |
| 4 | * Author: Martyn Welch <martyn.welch@gefanuc.com> | 4 | * Author: Martyn Welch <martyn.welch@ge.com> |
| 5 | * | 5 | * |
| 6 | * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc. | 6 | * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. |
| 7 | * | 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify it | 8 | * This program is free software; you can redistribute it and/or modify it |
| 9 | * under the terms of the GNU General Public License as published by the | 9 | * under the terms of the GNU General Public License as published by the |
| @@ -82,7 +82,7 @@ static void __init gef_sbc310_setup_arch(void) | |||
| 82 | } | 82 | } |
| 83 | #endif | 83 | #endif |
| 84 | 84 | ||
| 85 | printk(KERN_INFO "GE Fanuc Intelligent Platforms SBC310 6U VPX SBC\n"); | 85 | printk(KERN_INFO "GE Intelligent Platforms SBC310 6U VPX SBC\n"); |
| 86 | 86 | ||
| 87 | #ifdef CONFIG_SMP | 87 | #ifdef CONFIG_SMP |
| 88 | mpc86xx_smp_init(); | 88 | mpc86xx_smp_init(); |
| @@ -142,7 +142,7 @@ static void gef_sbc310_show_cpuinfo(struct seq_file *m) | |||
| 142 | { | 142 | { |
| 143 | uint svid = mfspr(SPRN_SVR); | 143 | uint svid = mfspr(SPRN_SVR); |
| 144 | 144 | ||
| 145 | seq_printf(m, "Vendor\t\t: GE Fanuc Intelligent Platforms\n"); | 145 | seq_printf(m, "Vendor\t\t: GE Intelligent Platforms\n"); |
| 146 | 146 | ||
| 147 | seq_printf(m, "Board ID\t: 0x%2.2x\n", gef_sbc310_get_board_id()); | 147 | seq_printf(m, "Board ID\t: 0x%2.2x\n", gef_sbc310_get_board_id()); |
| 148 | seq_printf(m, "Revision\t: %u%c\n", gef_sbc310_get_pcb_rev(), | 148 | seq_printf(m, "Revision\t: %u%c\n", gef_sbc310_get_pcb_rev(), |
| @@ -223,7 +223,7 @@ static int __init declare_of_platform_devices(void) | |||
| 223 | machine_device_initcall(gef_sbc310, declare_of_platform_devices); | 223 | machine_device_initcall(gef_sbc310, declare_of_platform_devices); |
| 224 | 224 | ||
| 225 | define_machine(gef_sbc310) { | 225 | define_machine(gef_sbc310) { |
| 226 | .name = "GE Fanuc SBC310", | 226 | .name = "GE SBC310", |
| 227 | .probe = gef_sbc310_probe, | 227 | .probe = gef_sbc310_probe, |
| 228 | .setup_arch = gef_sbc310_setup_arch, | 228 | .setup_arch = gef_sbc310_setup_arch, |
| 229 | .init_IRQ = gef_sbc310_init_irq, | 229 | .init_IRQ = gef_sbc310_init_irq, |
diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c index e10688a0fc4e..5090d608d9ee 100644 --- a/arch/powerpc/platforms/86xx/gef_sbc610.c +++ b/arch/powerpc/platforms/86xx/gef_sbc610.c | |||
| @@ -1,9 +1,9 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * GE Fanuc SBC610 board support | 2 | * GE SBC610 board support |
| 3 | * | 3 | * |
| 4 | * Author: Martyn Welch <martyn.welch@gefanuc.com> | 4 | * Author: Martyn Welch <martyn.welch@ge.com> |
| 5 | * | 5 | * |
| 6 | * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc. | 6 | * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. |
| 7 | * | 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify it | 8 | * This program is free software; you can redistribute it and/or modify it |
| 9 | * under the terms of the GNU General Public License as published by the | 9 | * under the terms of the GNU General Public License as published by the |
| @@ -82,7 +82,7 @@ static void __init gef_sbc610_setup_arch(void) | |||
| 82 | } | 82 | } |
| 83 | #endif | 83 | #endif |
| 84 | 84 | ||
| 85 | printk(KERN_INFO "GE Fanuc Intelligent Platforms SBC610 6U VPX SBC\n"); | 85 | printk(KERN_INFO "GE Intelligent Platforms SBC610 6U VPX SBC\n"); |
| 86 | 86 | ||
| 87 | #ifdef CONFIG_SMP | 87 | #ifdef CONFIG_SMP |
| 88 | mpc86xx_smp_init(); | 88 | mpc86xx_smp_init(); |
| @@ -133,7 +133,7 @@ static void gef_sbc610_show_cpuinfo(struct seq_file *m) | |||
| 133 | { | 133 | { |
| 134 | uint svid = mfspr(SPRN_SVR); | 134 | uint svid = mfspr(SPRN_SVR); |
| 135 | 135 | ||
| 136 | seq_printf(m, "Vendor\t\t: GE Fanuc Intelligent Platforms\n"); | 136 | seq_printf(m, "Vendor\t\t: GE Intelligent Platforms\n"); |
| 137 | 137 | ||
| 138 | seq_printf(m, "Revision\t: %u%c\n", gef_sbc610_get_pcb_rev(), | 138 | seq_printf(m, "Revision\t: %u%c\n", gef_sbc610_get_pcb_rev(), |
| 139 | ('A' + gef_sbc610_get_board_rev() - 1)); | 139 | ('A' + gef_sbc610_get_board_rev() - 1)); |
| @@ -212,7 +212,7 @@ static int __init declare_of_platform_devices(void) | |||
| 212 | machine_device_initcall(gef_sbc610, declare_of_platform_devices); | 212 | machine_device_initcall(gef_sbc610, declare_of_platform_devices); |
| 213 | 213 | ||
| 214 | define_machine(gef_sbc610) { | 214 | define_machine(gef_sbc610) { |
| 215 | .name = "GE Fanuc SBC610", | 215 | .name = "GE SBC610", |
| 216 | .probe = gef_sbc610_probe, | 216 | .probe = gef_sbc610_probe, |
| 217 | .setup_arch = gef_sbc610_setup_arch, | 217 | .setup_arch = gef_sbc610_setup_arch, |
| 218 | .init_IRQ = gef_sbc610_init_irq, | 218 | .init_IRQ = gef_sbc610_init_irq, |
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index fa0f690d3867..a8aae0b54579 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype | |||
| @@ -144,6 +144,16 @@ config FSL_EMB_PERFMON | |||
| 144 | and some e300 cores (c3 and c4). Select this only if your | 144 | and some e300 cores (c3 and c4). Select this only if your |
| 145 | core supports the Embedded Performance Monitor APU | 145 | core supports the Embedded Performance Monitor APU |
| 146 | 146 | ||
| 147 | config FSL_EMB_PERF_EVENT | ||
| 148 | bool | ||
| 149 | depends on FSL_EMB_PERFMON && PERF_EVENTS && !PPC_PERF_CTRS | ||
| 150 | default y | ||
| 151 | |||
| 152 | config FSL_EMB_PERF_EVENT_E500 | ||
| 153 | bool | ||
| 154 | depends on FSL_EMB_PERF_EVENT && E500 | ||
| 155 | default y | ||
| 156 | |||
| 147 | config 4xx | 157 | config 4xx |
| 148 | bool | 158 | bool |
| 149 | depends on 40x || 44x | 159 | depends on 40x || 44x |
diff --git a/arch/powerpc/platforms/iseries/exception.S b/arch/powerpc/platforms/iseries/exception.S index 5369653dcf6a..fba5bf915073 100644 --- a/arch/powerpc/platforms/iseries/exception.S +++ b/arch/powerpc/platforms/iseries/exception.S | |||
| @@ -43,17 +43,14 @@ system_reset_iSeries: | |||
| 43 | LOAD_REG_ADDR(r23, alpaca) | 43 | LOAD_REG_ADDR(r23, alpaca) |
| 44 | li r0,ALPACA_SIZE | 44 | li r0,ALPACA_SIZE |
| 45 | sub r23,r13,r23 | 45 | sub r23,r13,r23 |
| 46 | divdu r23,r23,r0 /* r23 has cpu number */ | 46 | divdu r24,r23,r0 /* r24 has cpu number */ |
| 47 | LOAD_REG_ADDR(r13, paca) | ||
| 48 | mulli r0,r23,PACA_SIZE | ||
| 49 | add r13,r13,r0 | ||
| 50 | mtspr SPRN_SPRG_PACA,r13 /* Save it away for the future */ | ||
| 51 | mfmsr r24 | ||
| 52 | ori r24,r24,MSR_RI | ||
| 53 | mtmsrd r24 /* RI on */ | ||
| 54 | mr r24,r23 | ||
| 55 | cmpwi 0,r24,0 /* Are we processor 0? */ | 47 | cmpwi 0,r24,0 /* Are we processor 0? */ |
| 56 | bne 1f | 48 | bne 1f |
| 49 | LOAD_REG_ADDR(r13, boot_paca) | ||
| 50 | mtspr SPRN_SPRG_PACA,r13 /* Save it away for the future */ | ||
| 51 | mfmsr r23 | ||
| 52 | ori r23,r23,MSR_RI | ||
| 53 | mtmsrd r23 /* RI on */ | ||
| 57 | b .__start_initialization_iSeries /* Start up the first processor */ | 54 | b .__start_initialization_iSeries /* Start up the first processor */ |
| 58 | 1: mfspr r4,SPRN_CTRLF | 55 | 1: mfspr r4,SPRN_CTRLF |
| 59 | li r5,CTRL_RUNLATCH /* Turn off the run light */ | 56 | li r5,CTRL_RUNLATCH /* Turn off the run light */ |
| @@ -86,6 +83,16 @@ system_reset_iSeries: | |||
| 86 | #endif | 83 | #endif |
| 87 | 84 | ||
| 88 | 2: | 85 | 2: |
| 86 | /* Load our paca now that it's been allocated */ | ||
| 87 | LOAD_REG_ADDR(r13, paca) | ||
| 88 | ld r13,0(r13) | ||
| 89 | mulli r0,r24,PACA_SIZE | ||
| 90 | add r13,r13,r0 | ||
| 91 | mtspr SPRN_SPRG_PACA,r13 /* Save it away for the future */ | ||
| 92 | mfmsr r23 | ||
| 93 | ori r23,r23,MSR_RI | ||
| 94 | mtmsrd r23 /* RI on */ | ||
| 95 | |||
| 89 | HMT_LOW | 96 | HMT_LOW |
| 90 | #ifdef CONFIG_SMP | 97 | #ifdef CONFIG_SMP |
| 91 | lbz r23,PACAPROCSTART(r13) /* Test if this processor | 98 | lbz r23,PACAPROCSTART(r13) /* Test if this processor |
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index d1b124e44d77..a8e1d5d17a28 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c | |||
| @@ -122,44 +122,32 @@ static void pseries_mach_cpu_die(void) | |||
| 122 | if (!get_lppaca()->shared_proc) | 122 | if (!get_lppaca()->shared_proc) |
| 123 | get_lppaca()->donate_dedicated_cpu = 1; | 123 | get_lppaca()->donate_dedicated_cpu = 1; |
| 124 | 124 | ||
| 125 | printk(KERN_INFO | ||
| 126 | "cpu %u (hwid %u) ceding for offline with hint %d\n", | ||
| 127 | cpu, hwcpu, cede_latency_hint); | ||
| 128 | while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { | 125 | while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { |
| 129 | extended_cede_processor(cede_latency_hint); | 126 | extended_cede_processor(cede_latency_hint); |
| 130 | printk(KERN_INFO "cpu %u (hwid %u) returned from cede.\n", | ||
| 131 | cpu, hwcpu); | ||
| 132 | printk(KERN_INFO | ||
| 133 | "Decrementer value = %x Timebase value = %llx\n", | ||
| 134 | get_dec(), get_tb()); | ||
| 135 | } | 127 | } |
| 136 | 128 | ||
| 137 | printk(KERN_INFO "cpu %u (hwid %u) got prodded to go online\n", | ||
| 138 | cpu, hwcpu); | ||
| 139 | |||
| 140 | if (!get_lppaca()->shared_proc) | 129 | if (!get_lppaca()->shared_proc) |
| 141 | get_lppaca()->donate_dedicated_cpu = 0; | 130 | get_lppaca()->donate_dedicated_cpu = 0; |
| 142 | get_lppaca()->idle = 0; | 131 | get_lppaca()->idle = 0; |
| 143 | } | ||
| 144 | 132 | ||
| 145 | if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) { | 133 | if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) { |
| 146 | unregister_slb_shadow(hwcpu, __pa(get_slb_shadow())); | 134 | unregister_slb_shadow(hwcpu, __pa(get_slb_shadow())); |
| 147 | 135 | ||
| 148 | /* | 136 | /* |
| 149 | * NOTE: Calling start_secondary() here for now to | 137 | * Call to start_secondary_resume() will not return. |
| 150 | * start new context. | 138 | * Kernel stack will be reset and start_secondary() |
| 151 | * However, need to do it cleanly by resetting the | 139 | * will be called to continue the online operation. |
| 152 | * stack pointer. | 140 | */ |
| 153 | */ | 141 | start_secondary_resume(); |
| 154 | start_secondary(); | 142 | } |
| 143 | } | ||
| 155 | 144 | ||
| 156 | } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) { | 145 | /* Requested state is CPU_STATE_OFFLINE at this point */ |
| 146 | WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE); | ||
| 157 | 147 | ||
| 158 | set_cpu_current_state(cpu, CPU_STATE_OFFLINE); | 148 | set_cpu_current_state(cpu, CPU_STATE_OFFLINE); |
| 159 | unregister_slb_shadow(hard_smp_processor_id(), | 149 | unregister_slb_shadow(hwcpu, __pa(get_slb_shadow())); |
| 160 | __pa(get_slb_shadow())); | 150 | rtas_stop_self(); |
| 161 | rtas_stop_self(); | ||
| 162 | } | ||
| 163 | 151 | ||
| 164 | /* Should never get here... */ | 152 | /* Should never get here... */ |
| 165 | BUG(); | 153 | BUG(); |
diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h index 22574e0d9d91..75a6f480d931 100644 --- a/arch/powerpc/platforms/pseries/offline_states.h +++ b/arch/powerpc/platforms/pseries/offline_states.h | |||
| @@ -9,10 +9,31 @@ enum cpu_state_vals { | |||
| 9 | CPU_MAX_OFFLINE_STATES | 9 | CPU_MAX_OFFLINE_STATES |
| 10 | }; | 10 | }; |
| 11 | 11 | ||
| 12 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 12 | extern enum cpu_state_vals get_cpu_current_state(int cpu); | 13 | extern enum cpu_state_vals get_cpu_current_state(int cpu); |
| 13 | extern void set_cpu_current_state(int cpu, enum cpu_state_vals state); | 14 | extern void set_cpu_current_state(int cpu, enum cpu_state_vals state); |
| 14 | extern enum cpu_state_vals get_preferred_offline_state(int cpu); | ||
| 15 | extern void set_preferred_offline_state(int cpu, enum cpu_state_vals state); | 15 | extern void set_preferred_offline_state(int cpu, enum cpu_state_vals state); |
| 16 | extern void set_default_offline_state(int cpu); | 16 | extern void set_default_offline_state(int cpu); |
| 17 | #else | ||
| 18 | static inline enum cpu_state_vals get_cpu_current_state(int cpu) | ||
| 19 | { | ||
| 20 | return CPU_STATE_ONLINE; | ||
| 21 | } | ||
| 22 | |||
| 23 | static inline void set_cpu_current_state(int cpu, enum cpu_state_vals state) | ||
| 24 | { | ||
| 25 | } | ||
| 26 | |||
| 27 | static inline void set_preferred_offline_state(int cpu, enum cpu_state_vals state) | ||
| 28 | { | ||
| 29 | } | ||
| 30 | |||
| 31 | static inline void set_default_offline_state(int cpu) | ||
| 32 | { | ||
| 33 | } | ||
| 34 | #endif | ||
| 35 | |||
| 36 | extern enum cpu_state_vals get_preferred_offline_state(int cpu); | ||
| 17 | extern int start_secondary(void); | 37 | extern int start_secondary(void); |
| 38 | extern void start_secondary_resume(void); | ||
| 18 | #endif | 39 | #endif |
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h index 0603c91538ae..a05f8d427856 100644 --- a/arch/powerpc/platforms/pseries/plpar_wrappers.h +++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h | |||
| @@ -259,12 +259,12 @@ static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr) | |||
| 259 | return plpar_hcall_norets(H_IPI, servernum, mfrr); | 259 | return plpar_hcall_norets(H_IPI, servernum, mfrr); |
| 260 | } | 260 | } |
| 261 | 261 | ||
| 262 | static inline long plpar_xirr(unsigned long *xirr_ret) | 262 | static inline long plpar_xirr(unsigned long *xirr_ret, unsigned char cppr) |
| 263 | { | 263 | { |
| 264 | long rc; | 264 | long rc; |
| 265 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | 265 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; |
| 266 | 266 | ||
| 267 | rc = plpar_hcall(H_XIRR, retbuf); | 267 | rc = plpar_hcall(H_XIRR, retbuf, cppr); |
| 268 | 268 | ||
| 269 | *xirr_ret = retbuf[0]; | 269 | *xirr_ret = retbuf[0]; |
| 270 | 270 | ||
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 4ca641042ec3..1bcedd8b4616 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
| @@ -120,12 +120,12 @@ static inline void direct_qirr_info(int n_cpu, u8 value) | |||
| 120 | 120 | ||
| 121 | /* LPAR low level accessors */ | 121 | /* LPAR low level accessors */ |
| 122 | 122 | ||
| 123 | static inline unsigned int lpar_xirr_info_get(void) | 123 | static inline unsigned int lpar_xirr_info_get(unsigned char cppr) |
| 124 | { | 124 | { |
| 125 | unsigned long lpar_rc; | 125 | unsigned long lpar_rc; |
| 126 | unsigned long return_value; | 126 | unsigned long return_value; |
| 127 | 127 | ||
| 128 | lpar_rc = plpar_xirr(&return_value); | 128 | lpar_rc = plpar_xirr(&return_value, cppr); |
| 129 | if (lpar_rc != H_SUCCESS) | 129 | if (lpar_rc != H_SUCCESS) |
| 130 | panic(" bad return code xirr - rc = %lx\n", lpar_rc); | 130 | panic(" bad return code xirr - rc = %lx\n", lpar_rc); |
| 131 | return (unsigned int)return_value; | 131 | return (unsigned int)return_value; |
| @@ -331,7 +331,8 @@ static unsigned int xics_get_irq_direct(void) | |||
| 331 | 331 | ||
| 332 | static unsigned int xics_get_irq_lpar(void) | 332 | static unsigned int xics_get_irq_lpar(void) |
| 333 | { | 333 | { |
| 334 | unsigned int xirr = lpar_xirr_info_get(); | 334 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); |
| 335 | unsigned int xirr = lpar_xirr_info_get(os_cppr->stack[os_cppr->index]); | ||
| 335 | unsigned int vec = xics_xirr_vector(xirr); | 336 | unsigned int vec = xics_xirr_vector(xirr); |
| 336 | unsigned int irq; | 337 | unsigned int irq; |
| 337 | 338 | ||
diff --git a/arch/powerpc/sysdev/cpm2_pic.h b/arch/powerpc/sysdev/cpm2_pic.h index 30e5828a2781..2c5f70c24485 100644 --- a/arch/powerpc/sysdev/cpm2_pic.h +++ b/arch/powerpc/sysdev/cpm2_pic.h | |||
| @@ -3,6 +3,6 @@ | |||
| 3 | 3 | ||
| 4 | extern unsigned int cpm2_get_irq(void); | 4 | extern unsigned int cpm2_get_irq(void); |
| 5 | 5 | ||
| 6 | extern void cpm2_pic_init(struct device_node*); | 6 | extern void cpm2_pic_init(struct device_node *); |
| 7 | 7 | ||
| 8 | #endif /* _PPC_KERNEL_CPM2_H */ | 8 | #endif /* _PPC_KERNEL_CPM2_H */ |
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c index d927da893ec4..541ba9863647 100644 --- a/arch/powerpc/sysdev/qe_lib/qe_ic.c +++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c | |||
| @@ -33,7 +33,7 @@ | |||
| 33 | 33 | ||
| 34 | #include "qe_ic.h" | 34 | #include "qe_ic.h" |
| 35 | 35 | ||
| 36 | static DEFINE_SPINLOCK(qe_ic_lock); | 36 | static DEFINE_RAW_SPINLOCK(qe_ic_lock); |
| 37 | 37 | ||
| 38 | static struct qe_ic_info qe_ic_info[] = { | 38 | static struct qe_ic_info qe_ic_info[] = { |
| 39 | [1] = { | 39 | [1] = { |
| @@ -201,13 +201,13 @@ static void qe_ic_unmask_irq(unsigned int virq) | |||
| 201 | unsigned long flags; | 201 | unsigned long flags; |
| 202 | u32 temp; | 202 | u32 temp; |
| 203 | 203 | ||
| 204 | spin_lock_irqsave(&qe_ic_lock, flags); | 204 | raw_spin_lock_irqsave(&qe_ic_lock, flags); |
| 205 | 205 | ||
| 206 | temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg); | 206 | temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg); |
| 207 | qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg, | 207 | qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg, |
| 208 | temp | qe_ic_info[src].mask); | 208 | temp | qe_ic_info[src].mask); |
| 209 | 209 | ||
| 210 | spin_unlock_irqrestore(&qe_ic_lock, flags); | 210 | raw_spin_unlock_irqrestore(&qe_ic_lock, flags); |
| 211 | } | 211 | } |
| 212 | 212 | ||
| 213 | static void qe_ic_mask_irq(unsigned int virq) | 213 | static void qe_ic_mask_irq(unsigned int virq) |
| @@ -217,7 +217,7 @@ static void qe_ic_mask_irq(unsigned int virq) | |||
| 217 | unsigned long flags; | 217 | unsigned long flags; |
| 218 | u32 temp; | 218 | u32 temp; |
| 219 | 219 | ||
| 220 | spin_lock_irqsave(&qe_ic_lock, flags); | 220 | raw_spin_lock_irqsave(&qe_ic_lock, flags); |
| 221 | 221 | ||
| 222 | temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg); | 222 | temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg); |
| 223 | qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg, | 223 | qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg, |
| @@ -233,7 +233,7 @@ static void qe_ic_mask_irq(unsigned int virq) | |||
| 233 | */ | 233 | */ |
| 234 | mb(); | 234 | mb(); |
| 235 | 235 | ||
| 236 | spin_unlock_irqrestore(&qe_ic_lock, flags); | 236 | raw_spin_unlock_irqrestore(&qe_ic_lock, flags); |
| 237 | } | 237 | } |
| 238 | 238 | ||
| 239 | static struct irq_chip qe_ic_irq_chip = { | 239 | static struct irq_chip qe_ic_irq_chip = { |
