diff options
Diffstat (limited to 'include/asm-powerpc')
| -rw-r--r-- | include/asm-powerpc/atomic.h | 209 | ||||
| -rw-r--r-- | include/asm-powerpc/auxvec.h | 2 | ||||
| -rw-r--r-- | include/asm-powerpc/dma.h | 390 | ||||
| -rw-r--r-- | include/asm-powerpc/hw_irq.h | 115 | ||||
| -rw-r--r-- | include/asm-powerpc/kdebug.h | 42 | ||||
| -rw-r--r-- | include/asm-powerpc/kprobes.h | 67 | ||||
| -rw-r--r-- | include/asm-powerpc/mpic.h | 279 | ||||
| -rw-r--r-- | include/asm-powerpc/ppc_asm.h | 3 | ||||
| -rw-r--r-- | include/asm-powerpc/reg.h | 446 | ||||
| -rw-r--r-- | include/asm-powerpc/rwsem.h | 163 | ||||
| -rw-r--r-- | include/asm-powerpc/seccomp.h | 16 | ||||
| -rw-r--r-- | include/asm-powerpc/semaphore.h | 98 | ||||
| -rw-r--r-- | include/asm-powerpc/synch.h | 51 | ||||
| -rw-r--r-- | include/asm-powerpc/system.h | 350 |
14 files changed, 2231 insertions, 0 deletions
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h new file mode 100644 index 000000000000..ed4b345ed75d --- /dev/null +++ b/include/asm-powerpc/atomic.h | |||
| @@ -0,0 +1,209 @@ | |||
| 1 | #ifndef _ASM_POWERPC_ATOMIC_H_ | ||
| 2 | #define _ASM_POWERPC_ATOMIC_H_ | ||
| 3 | |||
| 4 | /* | ||
| 5 | * PowerPC atomic operations | ||
| 6 | */ | ||
| 7 | |||
| 8 | typedef struct { volatile int counter; } atomic_t; | ||
| 9 | |||
| 10 | #ifdef __KERNEL__ | ||
| 11 | #include <asm/synch.h> | ||
| 12 | |||
| 13 | #define ATOMIC_INIT(i) { (i) } | ||
| 14 | |||
| 15 | #define atomic_read(v) ((v)->counter) | ||
| 16 | #define atomic_set(v,i) (((v)->counter) = (i)) | ||
| 17 | |||
| 18 | /* Erratum #77 on the 405 means we need a sync or dcbt before every stwcx. | ||
| 19 | * The old ATOMIC_SYNC_FIX covered some but not all of this. | ||
| 20 | */ | ||
| 21 | #ifdef CONFIG_IBM405_ERR77 | ||
| 22 | #define PPC405_ERR77(ra,rb) "dcbt " #ra "," #rb ";" | ||
| 23 | #else | ||
| 24 | #define PPC405_ERR77(ra,rb) | ||
| 25 | #endif | ||
| 26 | |||
| 27 | static __inline__ void atomic_add(int a, atomic_t *v) | ||
| 28 | { | ||
| 29 | int t; | ||
| 30 | |||
| 31 | __asm__ __volatile__( | ||
| 32 | "1: lwarx %0,0,%3 # atomic_add\n\ | ||
| 33 | add %0,%2,%0\n" | ||
| 34 | PPC405_ERR77(0,%3) | ||
| 35 | " stwcx. %0,0,%3 \n\ | ||
| 36 | bne- 1b" | ||
| 37 | : "=&r" (t), "=m" (v->counter) | ||
| 38 | : "r" (a), "r" (&v->counter), "m" (v->counter) | ||
| 39 | : "cc"); | ||
| 40 | } | ||
| 41 | |||
| 42 | static __inline__ int atomic_add_return(int a, atomic_t *v) | ||
| 43 | { | ||
| 44 | int t; | ||
| 45 | |||
| 46 | __asm__ __volatile__( | ||
| 47 | EIEIO_ON_SMP | ||
| 48 | "1: lwarx %0,0,%2 # atomic_add_return\n\ | ||
| 49 | add %0,%1,%0\n" | ||
| 50 | PPC405_ERR77(0,%2) | ||
| 51 | " stwcx. %0,0,%2 \n\ | ||
| 52 | bne- 1b" | ||
| 53 | ISYNC_ON_SMP | ||
| 54 | : "=&r" (t) | ||
| 55 | : "r" (a), "r" (&v->counter) | ||
| 56 | : "cc", "memory"); | ||
| 57 | |||
| 58 | return t; | ||
| 59 | } | ||
| 60 | |||
| 61 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | ||
| 62 | |||
| 63 | static __inline__ void atomic_sub(int a, atomic_t *v) | ||
| 64 | { | ||
| 65 | int t; | ||
| 66 | |||
| 67 | __asm__ __volatile__( | ||
| 68 | "1: lwarx %0,0,%3 # atomic_sub\n\ | ||
| 69 | subf %0,%2,%0\n" | ||
| 70 | PPC405_ERR77(0,%3) | ||
| 71 | " stwcx. %0,0,%3 \n\ | ||
| 72 | bne- 1b" | ||
| 73 | : "=&r" (t), "=m" (v->counter) | ||
| 74 | : "r" (a), "r" (&v->counter), "m" (v->counter) | ||
| 75 | : "cc"); | ||
| 76 | } | ||
| 77 | |||
| 78 | static __inline__ int atomic_sub_return(int a, atomic_t *v) | ||
| 79 | { | ||
| 80 | int t; | ||
| 81 | |||
| 82 | __asm__ __volatile__( | ||
| 83 | EIEIO_ON_SMP | ||
| 84 | "1: lwarx %0,0,%2 # atomic_sub_return\n\ | ||
| 85 | subf %0,%1,%0\n" | ||
| 86 | PPC405_ERR77(0,%2) | ||
| 87 | " stwcx. %0,0,%2 \n\ | ||
| 88 | bne- 1b" | ||
| 89 | ISYNC_ON_SMP | ||
| 90 | : "=&r" (t) | ||
| 91 | : "r" (a), "r" (&v->counter) | ||
| 92 | : "cc", "memory"); | ||
| 93 | |||
| 94 | return t; | ||
| 95 | } | ||
| 96 | |||
| 97 | static __inline__ void atomic_inc(atomic_t *v) | ||
| 98 | { | ||
| 99 | int t; | ||
| 100 | |||
| 101 | __asm__ __volatile__( | ||
| 102 | "1: lwarx %0,0,%2 # atomic_inc\n\ | ||
| 103 | addic %0,%0,1\n" | ||
| 104 | PPC405_ERR77(0,%2) | ||
| 105 | " stwcx. %0,0,%2 \n\ | ||
| 106 | bne- 1b" | ||
| 107 | : "=&r" (t), "=m" (v->counter) | ||
| 108 | : "r" (&v->counter), "m" (v->counter) | ||
| 109 | : "cc"); | ||
| 110 | } | ||
| 111 | |||
| 112 | static __inline__ int atomic_inc_return(atomic_t *v) | ||
| 113 | { | ||
| 114 | int t; | ||
| 115 | |||
| 116 | __asm__ __volatile__( | ||
| 117 | EIEIO_ON_SMP | ||
| 118 | "1: lwarx %0,0,%1 # atomic_inc_return\n\ | ||
| 119 | addic %0,%0,1\n" | ||
| 120 | PPC405_ERR77(0,%1) | ||
| 121 | " stwcx. %0,0,%1 \n\ | ||
| 122 | bne- 1b" | ||
| 123 | ISYNC_ON_SMP | ||
| 124 | : "=&r" (t) | ||
| 125 | : "r" (&v->counter) | ||
| 126 | : "cc", "memory"); | ||
| 127 | |||
| 128 | return t; | ||
| 129 | } | ||
| 130 | |||
| 131 | /* | ||
| 132 | * atomic_inc_and_test - increment and test | ||
| 133 | * @v: pointer of type atomic_t | ||
| 134 | * | ||
| 135 | * Atomically increments @v by 1 | ||
| 136 | * and returns true if the result is zero, or false for all | ||
| 137 | * other cases. | ||
| 138 | */ | ||
| 139 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | ||
| 140 | |||
| 141 | static __inline__ void atomic_dec(atomic_t *v) | ||
| 142 | { | ||
| 143 | int t; | ||
| 144 | |||
| 145 | __asm__ __volatile__( | ||
| 146 | "1: lwarx %0,0,%2 # atomic_dec\n\ | ||
| 147 | addic %0,%0,-1\n" | ||
| 148 | PPC405_ERR77(0,%2)\ | ||
| 149 | " stwcx. %0,0,%2\n\ | ||
| 150 | bne- 1b" | ||
| 151 | : "=&r" (t), "=m" (v->counter) | ||
| 152 | : "r" (&v->counter), "m" (v->counter) | ||
| 153 | : "cc"); | ||
| 154 | } | ||
| 155 | |||
| 156 | static __inline__ int atomic_dec_return(atomic_t *v) | ||
| 157 | { | ||
| 158 | int t; | ||
| 159 | |||
| 160 | __asm__ __volatile__( | ||
| 161 | EIEIO_ON_SMP | ||
| 162 | "1: lwarx %0,0,%1 # atomic_dec_return\n\ | ||
| 163 | addic %0,%0,-1\n" | ||
| 164 | PPC405_ERR77(0,%1) | ||
| 165 | " stwcx. %0,0,%1\n\ | ||
| 166 | bne- 1b" | ||
| 167 | ISYNC_ON_SMP | ||
| 168 | : "=&r" (t) | ||
| 169 | : "r" (&v->counter) | ||
| 170 | : "cc", "memory"); | ||
| 171 | |||
| 172 | return t; | ||
| 173 | } | ||
| 174 | |||
| 175 | #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) | ||
| 176 | #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) | ||
| 177 | |||
| 178 | /* | ||
| 179 | * Atomically test *v and decrement if it is greater than 0. | ||
| 180 | * The function returns the old value of *v minus 1. | ||
| 181 | */ | ||
| 182 | static __inline__ int atomic_dec_if_positive(atomic_t *v) | ||
| 183 | { | ||
| 184 | int t; | ||
| 185 | |||
| 186 | __asm__ __volatile__( | ||
| 187 | EIEIO_ON_SMP | ||
| 188 | "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ | ||
| 189 | addic. %0,%0,-1\n\ | ||
| 190 | blt- 2f\n" | ||
| 191 | PPC405_ERR77(0,%1) | ||
| 192 | " stwcx. %0,0,%1\n\ | ||
| 193 | bne- 1b" | ||
| 194 | ISYNC_ON_SMP | ||
| 195 | "\n\ | ||
| 196 | 2:" : "=&r" (t) | ||
| 197 | : "r" (&v->counter) | ||
| 198 | : "cc", "memory"); | ||
| 199 | |||
| 200 | return t; | ||
| 201 | } | ||
| 202 | |||
| 203 | #define smp_mb__before_atomic_dec() smp_mb() | ||
| 204 | #define smp_mb__after_atomic_dec() smp_mb() | ||
| 205 | #define smp_mb__before_atomic_inc() smp_mb() | ||
| 206 | #define smp_mb__after_atomic_inc() smp_mb() | ||
| 207 | |||
| 208 | #endif /* __KERNEL__ */ | ||
| 209 | #endif /* _ASM_POWERPC_ATOMIC_H_ */ | ||
diff --git a/include/asm-powerpc/auxvec.h b/include/asm-powerpc/auxvec.h index 19a099b62cd6..79d8c4732309 100644 --- a/include/asm-powerpc/auxvec.h +++ b/include/asm-powerpc/auxvec.h | |||
| @@ -14,6 +14,8 @@ | |||
| 14 | /* The vDSO location. We have to use the same value as x86 for glibc's | 14 | /* The vDSO location. We have to use the same value as x86 for glibc's |
| 15 | * sake :-) | 15 | * sake :-) |
| 16 | */ | 16 | */ |
| 17 | #ifdef __powerpc64__ | ||
| 17 | #define AT_SYSINFO_EHDR 33 | 18 | #define AT_SYSINFO_EHDR 33 |
| 19 | #endif | ||
| 18 | 20 | ||
| 19 | #endif | 21 | #endif |
diff --git a/include/asm-powerpc/dma.h b/include/asm-powerpc/dma.h new file mode 100644 index 000000000000..926378d2cd94 --- /dev/null +++ b/include/asm-powerpc/dma.h | |||
| @@ -0,0 +1,390 @@ | |||
| 1 | #ifndef _ASM_POWERPC_DMA_H | ||
| 2 | #define _ASM_POWERPC_DMA_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * Defines for using and allocating dma channels. | ||
| 6 | * Written by Hennus Bergman, 1992. | ||
| 7 | * High DMA channel support & info by Hannu Savolainen | ||
| 8 | * and John Boyd, Nov. 1992. | ||
| 9 | * Changes for ppc sound by Christoph Nadig | ||
| 10 | */ | ||
| 11 | |||
| 12 | /* | ||
| 13 | * Note: Adapted for PowerPC by Gary Thomas | ||
| 14 | * Modified by Cort Dougan <cort@cs.nmt.edu> | ||
| 15 | * | ||
| 16 | * None of this really applies for Power Macintoshes. There is | ||
| 17 | * basically just enough here to get kernel/dma.c to compile. | ||
| 18 | * | ||
| 19 | * There may be some comments or restrictions made here which are | ||
| 20 | * not valid for the PReP platform. Take what you read | ||
| 21 | * with a grain of salt. | ||
| 22 | */ | ||
| 23 | |||
| 24 | #include <linux/config.h> | ||
| 25 | #include <asm/io.h> | ||
| 26 | #include <linux/spinlock.h> | ||
| 27 | #include <asm/system.h> | ||
| 28 | |||
| 29 | #ifndef MAX_DMA_CHANNELS | ||
| 30 | #define MAX_DMA_CHANNELS 8 | ||
| 31 | #endif | ||
| 32 | |||
| 33 | /* The maximum address that we can perform a DMA transfer to on this platform */ | ||
| 34 | /* Doesn't really apply... */ | ||
| 35 | #define MAX_DMA_ADDRESS (~0UL) | ||
| 36 | |||
| 37 | #if !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI) | ||
| 38 | |||
| 39 | #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER | ||
| 40 | #define dma_outb outb_p | ||
| 41 | #else | ||
| 42 | #define dma_outb outb | ||
| 43 | #endif | ||
| 44 | |||
| 45 | #define dma_inb inb | ||
| 46 | |||
| 47 | /* | ||
| 48 | * NOTES about DMA transfers: | ||
| 49 | * | ||
| 50 | * controller 1: channels 0-3, byte operations, ports 00-1F | ||
| 51 | * controller 2: channels 4-7, word operations, ports C0-DF | ||
| 52 | * | ||
| 53 | * - ALL registers are 8 bits only, regardless of transfer size | ||
| 54 | * - channel 4 is not used - cascades 1 into 2. | ||
| 55 | * - channels 0-3 are byte - addresses/counts are for physical bytes | ||
| 56 | * - channels 5-7 are word - addresses/counts are for physical words | ||
| 57 | * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries | ||
| 58 | * - transfer count loaded to registers is 1 less than actual count | ||
| 59 | * - controller 2 offsets are all even (2x offsets for controller 1) | ||
| 60 | * - page registers for 5-7 don't use data bit 0, represent 128K pages | ||
| 61 | * - page registers for 0-3 use bit 0, represent 64K pages | ||
| 62 | * | ||
| 63 | * On PReP, DMA transfers are limited to the lower 16MB of _physical_ memory. | ||
| 64 | * On CHRP, the W83C553F (and VLSI Tollgate?) support full 32 bit addressing. | ||
| 65 | * Note that addresses loaded into registers must be _physical_ addresses, | ||
| 66 | * not logical addresses (which may differ if paging is active). | ||
| 67 | * | ||
| 68 | * Address mapping for channels 0-3: | ||
| 69 | * | ||
| 70 | * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) | ||
| 71 | * | ... | | ... | | ... | | ||
| 72 | * | ... | | ... | | ... | | ||
| 73 | * | ... | | ... | | ... | | ||
| 74 | * P7 ... P0 A7 ... A0 A7 ... A0 | ||
| 75 | * | Page | Addr MSB | Addr LSB | (DMA registers) | ||
| 76 | * | ||
| 77 | * Address mapping for channels 5-7: | ||
| 78 | * | ||
| 79 | * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) | ||
| 80 | * | ... | \ \ ... \ \ \ ... \ \ | ||
| 81 | * | ... | \ \ ... \ \ \ ... \ (not used) | ||
| 82 | * | ... | \ \ ... \ \ \ ... \ | ||
| 83 | * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 | ||
| 84 | * | Page | Addr MSB | Addr LSB | (DMA registers) | ||
| 85 | * | ||
| 86 | * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses | ||
| 87 | * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at | ||
| 88 | * the hardware level, so odd-byte transfers aren't possible). | ||
| 89 | * | ||
| 90 | * Transfer count (_not # bytes_) is limited to 64K, represented as actual | ||
| 91 | * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, | ||
| 92 | * and up to 128K bytes may be transferred on channels 5-7 in one operation. | ||
| 93 | * | ||
| 94 | */ | ||
| 95 | |||
| 96 | /* see prep_setup_arch() for detailed informations */ | ||
| 97 | #if defined(CONFIG_SOUND_CS4232) && defined(CONFIG_PPC_PREP) | ||
| 98 | extern long ppc_cs4232_dma, ppc_cs4232_dma2; | ||
| 99 | #define SND_DMA1 ppc_cs4232_dma | ||
| 100 | #define SND_DMA2 ppc_cs4232_dma2 | ||
| 101 | #else | ||
| 102 | #define SND_DMA1 -1 | ||
| 103 | #define SND_DMA2 -1 | ||
| 104 | #endif | ||
| 105 | |||
| 106 | /* 8237 DMA controllers */ | ||
| 107 | #define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ | ||
| 108 | #define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ | ||
| 109 | |||
| 110 | /* DMA controller registers */ | ||
| 111 | #define DMA1_CMD_REG 0x08 /* command register (w) */ | ||
| 112 | #define DMA1_STAT_REG 0x08 /* status register (r) */ | ||
| 113 | #define DMA1_REQ_REG 0x09 /* request register (w) */ | ||
| 114 | #define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ | ||
| 115 | #define DMA1_MODE_REG 0x0B /* mode register (w) */ | ||
| 116 | #define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ | ||
| 117 | #define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ | ||
| 118 | #define DMA1_RESET_REG 0x0D /* Master Clear (w) */ | ||
| 119 | #define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ | ||
| 120 | #define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ | ||
| 121 | |||
| 122 | #define DMA2_CMD_REG 0xD0 /* command register (w) */ | ||
| 123 | #define DMA2_STAT_REG 0xD0 /* status register (r) */ | ||
| 124 | #define DMA2_REQ_REG 0xD2 /* request register (w) */ | ||
| 125 | #define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ | ||
| 126 | #define DMA2_MODE_REG 0xD6 /* mode register (w) */ | ||
| 127 | #define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ | ||
| 128 | #define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ | ||
| 129 | #define DMA2_RESET_REG 0xDA /* Master Clear (w) */ | ||
| 130 | #define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ | ||
| 131 | #define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ | ||
| 132 | |||
| 133 | #define DMA_ADDR_0 0x00 /* DMA address registers */ | ||
| 134 | #define DMA_ADDR_1 0x02 | ||
| 135 | #define DMA_ADDR_2 0x04 | ||
| 136 | #define DMA_ADDR_3 0x06 | ||
| 137 | #define DMA_ADDR_4 0xC0 | ||
| 138 | #define DMA_ADDR_5 0xC4 | ||
| 139 | #define DMA_ADDR_6 0xC8 | ||
| 140 | #define DMA_ADDR_7 0xCC | ||
| 141 | |||
| 142 | #define DMA_CNT_0 0x01 /* DMA count registers */ | ||
| 143 | #define DMA_CNT_1 0x03 | ||
| 144 | #define DMA_CNT_2 0x05 | ||
| 145 | #define DMA_CNT_3 0x07 | ||
| 146 | #define DMA_CNT_4 0xC2 | ||
| 147 | #define DMA_CNT_5 0xC6 | ||
| 148 | #define DMA_CNT_6 0xCA | ||
| 149 | #define DMA_CNT_7 0xCE | ||
| 150 | |||
| 151 | #define DMA_LO_PAGE_0 0x87 /* DMA page registers */ | ||
| 152 | #define DMA_LO_PAGE_1 0x83 | ||
| 153 | #define DMA_LO_PAGE_2 0x81 | ||
| 154 | #define DMA_LO_PAGE_3 0x82 | ||
| 155 | #define DMA_LO_PAGE_5 0x8B | ||
| 156 | #define DMA_LO_PAGE_6 0x89 | ||
| 157 | #define DMA_LO_PAGE_7 0x8A | ||
| 158 | |||
| 159 | #define DMA_HI_PAGE_0 0x487 /* DMA page registers */ | ||
| 160 | #define DMA_HI_PAGE_1 0x483 | ||
| 161 | #define DMA_HI_PAGE_2 0x481 | ||
| 162 | #define DMA_HI_PAGE_3 0x482 | ||
| 163 | #define DMA_HI_PAGE_5 0x48B | ||
| 164 | #define DMA_HI_PAGE_6 0x489 | ||
| 165 | #define DMA_HI_PAGE_7 0x48A | ||
| 166 | |||
| 167 | #define DMA1_EXT_REG 0x40B | ||
| 168 | #define DMA2_EXT_REG 0x4D6 | ||
| 169 | |||
| 170 | #ifndef __powerpc64__ | ||
| 171 | /* in arch/ppc/kernel/setup.c -- Cort */ | ||
| 172 | extern unsigned int DMA_MODE_WRITE; | ||
| 173 | extern unsigned int DMA_MODE_READ; | ||
| 174 | extern unsigned long ISA_DMA_THRESHOLD; | ||
| 175 | #else | ||
| 176 | #define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ | ||
| 177 | #define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ | ||
| 178 | #endif | ||
| 179 | |||
| 180 | #define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ | ||
| 181 | |||
| 182 | #define DMA_AUTOINIT 0x10 | ||
| 183 | |||
| 184 | extern spinlock_t dma_spin_lock; | ||
| 185 | |||
| 186 | static __inline__ unsigned long claim_dma_lock(void) | ||
| 187 | { | ||
| 188 | unsigned long flags; | ||
| 189 | spin_lock_irqsave(&dma_spin_lock, flags); | ||
| 190 | return flags; | ||
| 191 | } | ||
| 192 | |||
| 193 | static __inline__ void release_dma_lock(unsigned long flags) | ||
| 194 | { | ||
| 195 | spin_unlock_irqrestore(&dma_spin_lock, flags); | ||
| 196 | } | ||
| 197 | |||
| 198 | /* enable/disable a specific DMA channel */ | ||
| 199 | static __inline__ void enable_dma(unsigned int dmanr) | ||
| 200 | { | ||
| 201 | unsigned char ucDmaCmd = 0x00; | ||
| 202 | |||
| 203 | if (dmanr != 4) { | ||
| 204 | dma_outb(0, DMA2_MASK_REG); /* This may not be enabled */ | ||
| 205 | dma_outb(ucDmaCmd, DMA2_CMD_REG); /* Enable group */ | ||
| 206 | } | ||
| 207 | if (dmanr <= 3) { | ||
| 208 | dma_outb(dmanr, DMA1_MASK_REG); | ||
| 209 | dma_outb(ucDmaCmd, DMA1_CMD_REG); /* Enable group */ | ||
| 210 | } else { | ||
| 211 | dma_outb(dmanr & 3, DMA2_MASK_REG); | ||
| 212 | } | ||
| 213 | } | ||
| 214 | |||
| 215 | static __inline__ void disable_dma(unsigned int dmanr) | ||
| 216 | { | ||
| 217 | if (dmanr <= 3) | ||
| 218 | dma_outb(dmanr | 4, DMA1_MASK_REG); | ||
| 219 | else | ||
| 220 | dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); | ||
| 221 | } | ||
| 222 | |||
| 223 | /* Clear the 'DMA Pointer Flip Flop'. | ||
| 224 | * Write 0 for LSB/MSB, 1 for MSB/LSB access. | ||
| 225 | * Use this once to initialize the FF to a known state. | ||
| 226 | * After that, keep track of it. :-) | ||
| 227 | * --- In order to do that, the DMA routines below should --- | ||
| 228 | * --- only be used while interrupts are disabled! --- | ||
| 229 | */ | ||
| 230 | static __inline__ void clear_dma_ff(unsigned int dmanr) | ||
| 231 | { | ||
| 232 | if (dmanr <= 3) | ||
| 233 | dma_outb(0, DMA1_CLEAR_FF_REG); | ||
| 234 | else | ||
| 235 | dma_outb(0, DMA2_CLEAR_FF_REG); | ||
| 236 | } | ||
| 237 | |||
| 238 | /* set mode (above) for a specific DMA channel */ | ||
| 239 | static __inline__ void set_dma_mode(unsigned int dmanr, char mode) | ||
| 240 | { | ||
| 241 | if (dmanr <= 3) | ||
| 242 | dma_outb(mode | dmanr, DMA1_MODE_REG); | ||
| 243 | else | ||
| 244 | dma_outb(mode | (dmanr & 3), DMA2_MODE_REG); | ||
| 245 | } | ||
| 246 | |||
| 247 | /* Set only the page register bits of the transfer address. | ||
| 248 | * This is used for successive transfers when we know the contents of | ||
| 249 | * the lower 16 bits of the DMA current address register, but a 64k boundary | ||
| 250 | * may have been crossed. | ||
| 251 | */ | ||
| 252 | static __inline__ void set_dma_page(unsigned int dmanr, int pagenr) | ||
| 253 | { | ||
| 254 | switch (dmanr) { | ||
| 255 | case 0: | ||
| 256 | dma_outb(pagenr, DMA_LO_PAGE_0); | ||
| 257 | dma_outb(pagenr >> 8, DMA_HI_PAGE_0); | ||
| 258 | break; | ||
| 259 | case 1: | ||
| 260 | dma_outb(pagenr, DMA_LO_PAGE_1); | ||
| 261 | dma_outb(pagenr >> 8, DMA_HI_PAGE_1); | ||
| 262 | break; | ||
| 263 | case 2: | ||
| 264 | dma_outb(pagenr, DMA_LO_PAGE_2); | ||
| 265 | dma_outb(pagenr >> 8, DMA_HI_PAGE_2); | ||
| 266 | break; | ||
| 267 | case 3: | ||
| 268 | dma_outb(pagenr, DMA_LO_PAGE_3); | ||
| 269 | dma_outb(pagenr >> 8, DMA_HI_PAGE_3); | ||
| 270 | break; | ||
| 271 | case 5: | ||
| 272 | if (SND_DMA1 == 5 || SND_DMA2 == 5) | ||
| 273 | dma_outb(pagenr, DMA_LO_PAGE_5); | ||
| 274 | else | ||
| 275 | dma_outb(pagenr & 0xfe, DMA_LO_PAGE_5); | ||
| 276 | dma_outb(pagenr >> 8, DMA_HI_PAGE_5); | ||
| 277 | break; | ||
| 278 | case 6: | ||
| 279 | if (SND_DMA1 == 6 || SND_DMA2 == 6) | ||
| 280 | dma_outb(pagenr, DMA_LO_PAGE_6); | ||
| 281 | else | ||
| 282 | dma_outb(pagenr & 0xfe, DMA_LO_PAGE_6); | ||
| 283 | dma_outb(pagenr >> 8, DMA_HI_PAGE_6); | ||
| 284 | break; | ||
| 285 | case 7: | ||
| 286 | if (SND_DMA1 == 7 || SND_DMA2 == 7) | ||
| 287 | dma_outb(pagenr, DMA_LO_PAGE_7); | ||
| 288 | else | ||
| 289 | dma_outb(pagenr & 0xfe, DMA_LO_PAGE_7); | ||
| 290 | dma_outb(pagenr >> 8, DMA_HI_PAGE_7); | ||
| 291 | break; | ||
| 292 | } | ||
| 293 | } | ||
| 294 | |||
| 295 | /* Set transfer address & page bits for specific DMA channel. | ||
| 296 | * Assumes dma flipflop is clear. | ||
| 297 | */ | ||
| 298 | static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int phys) | ||
| 299 | { | ||
| 300 | if (dmanr <= 3) { | ||
| 301 | dma_outb(phys & 0xff, | ||
| 302 | ((dmanr & 3) << 1) + IO_DMA1_BASE); | ||
| 303 | dma_outb((phys >> 8) & 0xff, | ||
| 304 | ((dmanr & 3) << 1) + IO_DMA1_BASE); | ||
| 305 | } else if (dmanr == SND_DMA1 || dmanr == SND_DMA2) { | ||
| 306 | dma_outb(phys & 0xff, | ||
| 307 | ((dmanr & 3) << 2) + IO_DMA2_BASE); | ||
| 308 | dma_outb((phys >> 8) & 0xff, | ||
| 309 | ((dmanr & 3) << 2) + IO_DMA2_BASE); | ||
| 310 | dma_outb((dmanr & 3), DMA2_EXT_REG); | ||
| 311 | } else { | ||
| 312 | dma_outb((phys >> 1) & 0xff, | ||
| 313 | ((dmanr & 3) << 2) + IO_DMA2_BASE); | ||
| 314 | dma_outb((phys >> 9) & 0xff, | ||
| 315 | ((dmanr & 3) << 2) + IO_DMA2_BASE); | ||
| 316 | } | ||
| 317 | set_dma_page(dmanr, phys >> 16); | ||
| 318 | } | ||
| 319 | |||
| 320 | |||
| 321 | /* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for | ||
| 322 | * a specific DMA channel. | ||
| 323 | * You must ensure the parameters are valid. | ||
| 324 | * NOTE: from a manual: "the number of transfers is one more | ||
| 325 | * than the initial word count"! This is taken into account. | ||
| 326 | * Assumes dma flip-flop is clear. | ||
| 327 | * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. | ||
| 328 | */ | ||
| 329 | static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) | ||
| 330 | { | ||
| 331 | count--; | ||
| 332 | if (dmanr <= 3) { | ||
| 333 | dma_outb(count & 0xff, | ||
| 334 | ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); | ||
| 335 | dma_outb((count >> 8) & 0xff, | ||
| 336 | ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); | ||
| 337 | } else if (dmanr == SND_DMA1 || dmanr == SND_DMA2) { | ||
| 338 | dma_outb(count & 0xff, | ||
| 339 | ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); | ||
| 340 | dma_outb((count >> 8) & 0xff, | ||
| 341 | ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); | ||
| 342 | } else { | ||
| 343 | dma_outb((count >> 1) & 0xff, | ||
| 344 | ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); | ||
| 345 | dma_outb((count >> 9) & 0xff, | ||
| 346 | ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); | ||
| 347 | } | ||
| 348 | } | ||
| 349 | |||
| 350 | |||
| 351 | /* Get DMA residue count. After a DMA transfer, this | ||
| 352 | * should return zero. Reading this while a DMA transfer is | ||
| 353 | * still in progress will return unpredictable results. | ||
| 354 | * If called before the channel has been used, it may return 1. | ||
| 355 | * Otherwise, it returns the number of _bytes_ left to transfer. | ||
| 356 | * | ||
| 357 | * Assumes DMA flip-flop is clear. | ||
| 358 | */ | ||
| 359 | static __inline__ int get_dma_residue(unsigned int dmanr) | ||
| 360 | { | ||
| 361 | unsigned int io_port = (dmanr <= 3) | ||
| 362 | ? ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE | ||
| 363 | : ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE; | ||
| 364 | |||
| 365 | /* using short to get 16-bit wrap around */ | ||
| 366 | unsigned short count; | ||
| 367 | |||
| 368 | count = 1 + dma_inb(io_port); | ||
| 369 | count += dma_inb(io_port) << 8; | ||
| 370 | |||
| 371 | return (dmanr <= 3 || dmanr == SND_DMA1 || dmanr == SND_DMA2) | ||
| 372 | ? count : (count << 1); | ||
| 373 | } | ||
| 374 | |||
| 375 | /* These are in kernel/dma.c: */ | ||
| 376 | |||
| 377 | /* reserve a DMA channel */ | ||
| 378 | extern int request_dma(unsigned int dmanr, const char *device_id); | ||
| 379 | /* release it again */ | ||
| 380 | extern void free_dma(unsigned int dmanr); | ||
| 381 | |||
| 382 | #ifdef CONFIG_PCI | ||
| 383 | extern int isa_dma_bridge_buggy; | ||
| 384 | #else | ||
| 385 | #define isa_dma_bridge_buggy (0) | ||
| 386 | #endif | ||
| 387 | |||
| 388 | #endif /* !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI) */ | ||
| 389 | |||
| 390 | #endif /* _ASM_POWERPC_DMA_H */ | ||
diff --git a/include/asm-powerpc/hw_irq.h b/include/asm-powerpc/hw_irq.h new file mode 100644 index 000000000000..605a65e42063 --- /dev/null +++ b/include/asm-powerpc/hw_irq.h | |||
| @@ -0,0 +1,115 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | ||
| 3 | */ | ||
| 4 | #ifndef _ASM_POWERPC_HW_IRQ_H | ||
| 5 | #define _ASM_POWERPC_HW_IRQ_H | ||
| 6 | |||
| 7 | #ifdef __KERNEL__ | ||
| 8 | |||
| 9 | #include <linux/config.h> | ||
| 10 | #include <linux/errno.h> | ||
| 11 | #include <asm/ptrace.h> | ||
| 12 | #include <asm/processor.h> | ||
| 13 | #include <asm/irq.h> | ||
| 14 | |||
| 15 | extern void timer_interrupt(struct pt_regs *); | ||
| 16 | extern void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq); | ||
| 17 | |||
| 18 | #ifdef CONFIG_PPC_ISERIES | ||
| 19 | |||
| 20 | extern unsigned long local_get_flags(void); | ||
| 21 | extern unsigned long local_irq_disable(void); | ||
| 22 | extern void local_irq_restore(unsigned long); | ||
| 23 | |||
| 24 | #define local_irq_enable() local_irq_restore(1) | ||
| 25 | #define local_save_flags(flags) ((flags) = local_get_flags()) | ||
| 26 | #define local_irq_save(flags) ((flags) = local_irq_disable()) | ||
| 27 | |||
| 28 | #define irqs_disabled() (local_get_flags() == 0) | ||
| 29 | |||
| 30 | #else | ||
| 31 | |||
| 32 | #if defined(CONFIG_BOOKE) | ||
| 33 | #define SET_MSR_EE(x) mtmsr(x) | ||
| 34 | #define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") | ||
| 35 | #elif defined(__powerpc64__) | ||
| 36 | #define SET_MSR_EE(x) __mtmsrd(x, 1) | ||
| 37 | #define local_irq_restore(flags) do { \ | ||
| 38 | __asm__ __volatile__("": : :"memory"); \ | ||
| 39 | __mtmsrd((flags), 1); \ | ||
| 40 | } while(0) | ||
| 41 | #else | ||
| 42 | #define SET_MSR_EE(x) mtmsr(x) | ||
| 43 | #define local_irq_restore(flags) mtmsr(flags) | ||
| 44 | #endif | ||
| 45 | |||
| 46 | static inline void local_irq_disable(void) | ||
| 47 | { | ||
| 48 | #ifdef CONFIG_BOOKE | ||
| 49 | __asm__ __volatile__("wrteei 0": : :"memory"); | ||
| 50 | #else | ||
| 51 | unsigned long msr; | ||
| 52 | __asm__ __volatile__("": : :"memory"); | ||
| 53 | msr = mfmsr(); | ||
| 54 | SET_MSR_EE(msr & ~MSR_EE); | ||
| 55 | #endif | ||
| 56 | } | ||
| 57 | |||
| 58 | static inline void local_irq_enable(void) | ||
| 59 | { | ||
| 60 | #ifdef CONFIG_BOOKE | ||
| 61 | __asm__ __volatile__("wrteei 1": : :"memory"); | ||
| 62 | #else | ||
| 63 | unsigned long msr; | ||
| 64 | __asm__ __volatile__("": : :"memory"); | ||
| 65 | msr = mfmsr(); | ||
| 66 | SET_MSR_EE(msr | MSR_EE); | ||
| 67 | #endif | ||
| 68 | } | ||
| 69 | |||
| 70 | static inline void local_irq_save_ptr(unsigned long *flags) | ||
| 71 | { | ||
| 72 | unsigned long msr; | ||
| 73 | msr = mfmsr(); | ||
| 74 | *flags = msr; | ||
| 75 | #ifdef CONFIG_BOOKE | ||
| 76 | __asm__ __volatile__("wrteei 0": : :"memory"); | ||
| 77 | #else | ||
| 78 | SET_MSR_EE(msr & ~MSR_EE); | ||
| 79 | #endif | ||
| 80 | __asm__ __volatile__("": : :"memory"); | ||
| 81 | } | ||
| 82 | |||
| 83 | #define local_save_flags(flags) ((flags) = mfmsr()) | ||
| 84 | #define local_irq_save(flags) local_irq_save_ptr(&flags) | ||
| 85 | #define irqs_disabled() ((mfmsr() & MSR_EE) == 0) | ||
| 86 | |||
| 87 | #endif /* CONFIG_PPC_ISERIES */ | ||
| 88 | |||
| 89 | #define mask_irq(irq) \ | ||
| 90 | ({ \ | ||
| 91 | irq_desc_t *desc = get_irq_desc(irq); \ | ||
| 92 | if (desc->handler && desc->handler->disable) \ | ||
| 93 | desc->handler->disable(irq); \ | ||
| 94 | }) | ||
| 95 | #define unmask_irq(irq) \ | ||
| 96 | ({ \ | ||
| 97 | irq_desc_t *desc = get_irq_desc(irq); \ | ||
| 98 | if (desc->handler && desc->handler->enable) \ | ||
| 99 | desc->handler->enable(irq); \ | ||
| 100 | }) | ||
| 101 | #define ack_irq(irq) \ | ||
| 102 | ({ \ | ||
| 103 | irq_desc_t *desc = get_irq_desc(irq); \ | ||
| 104 | if (desc->handler && desc->handler->ack) \ | ||
| 105 | desc->handler->ack(irq); \ | ||
| 106 | }) | ||
| 107 | |||
| 108 | /* Should we handle this via lost interrupts and IPIs or should we don't care like | ||
| 109 | * we do now ? --BenH. | ||
| 110 | */ | ||
| 111 | struct hw_interrupt_type; | ||
| 112 | static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {} | ||
| 113 | |||
| 114 | #endif /* __KERNEL__ */ | ||
| 115 | #endif /* _ASM_POWERPC_HW_IRQ_H */ | ||
diff --git a/include/asm-powerpc/kdebug.h b/include/asm-powerpc/kdebug.h new file mode 100644 index 000000000000..7c55abf597f6 --- /dev/null +++ b/include/asm-powerpc/kdebug.h | |||
| @@ -0,0 +1,42 @@ | |||
| 1 | #ifndef _POWERPC_KDEBUG_H | ||
| 2 | #define _POWERPC_KDEBUG_H 1 | ||
| 3 | |||
| 4 | /* nearly identical to x86_64/i386 code */ | ||
| 5 | |||
| 6 | #include <linux/notifier.h> | ||
| 7 | |||
| 8 | struct pt_regs; | ||
| 9 | |||
| 10 | struct die_args { | ||
| 11 | struct pt_regs *regs; | ||
| 12 | const char *str; | ||
| 13 | long err; | ||
| 14 | int trapnr; | ||
| 15 | int signr; | ||
| 16 | }; | ||
| 17 | |||
| 18 | /* | ||
| 19 | Note - you should never unregister because that can race with NMIs. | ||
| 20 | If you really want to do it first unregister - then synchronize_sched - | ||
| 21 | then free. | ||
| 22 | */ | ||
| 23 | int register_die_notifier(struct notifier_block *nb); | ||
| 24 | extern struct notifier_block *powerpc_die_chain; | ||
| 25 | |||
| 26 | /* Grossly misnamed. */ | ||
| 27 | enum die_val { | ||
| 28 | DIE_OOPS = 1, | ||
| 29 | DIE_IABR_MATCH, | ||
| 30 | DIE_DABR_MATCH, | ||
| 31 | DIE_BPT, | ||
| 32 | DIE_SSTEP, | ||
| 33 | DIE_PAGE_FAULT, | ||
| 34 | }; | ||
| 35 | |||
| 36 | static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig) | ||
| 37 | { | ||
| 38 | struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig }; | ||
| 39 | return notifier_call_chain(&powerpc_die_chain, val, &args); | ||
| 40 | } | ||
| 41 | |||
| 42 | #endif | ||
diff --git a/include/asm-powerpc/kprobes.h b/include/asm-powerpc/kprobes.h new file mode 100644 index 000000000000..d9129d2b038e --- /dev/null +++ b/include/asm-powerpc/kprobes.h | |||
| @@ -0,0 +1,67 @@ | |||
| 1 | #ifndef _ASM_KPROBES_H | ||
| 2 | #define _ASM_KPROBES_H | ||
| 3 | /* | ||
| 4 | * Kernel Probes (KProbes) | ||
| 5 | * include/asm-ppc64/kprobes.h | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License as published by | ||
| 9 | * the Free Software Foundation; either version 2 of the License, or | ||
| 10 | * (at your option) any later version. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, | ||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | * GNU General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program; if not, write to the Free Software | ||
| 19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 20 | * | ||
| 21 | * Copyright (C) IBM Corporation, 2002, 2004 | ||
| 22 | * | ||
| 23 | * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel | ||
| 24 | * Probes initial implementation ( includes suggestions from | ||
| 25 | * Rusty Russell). | ||
| 26 | * 2004-Nov Modified for PPC64 by Ananth N Mavinakayanahalli | ||
| 27 | * <ananth@in.ibm.com> | ||
| 28 | */ | ||
| 29 | #include <linux/types.h> | ||
| 30 | #include <linux/ptrace.h> | ||
| 31 | |||
| 32 | struct pt_regs; | ||
| 33 | |||
| 34 | typedef unsigned int kprobe_opcode_t; | ||
| 35 | #define BREAKPOINT_INSTRUCTION 0x7fe00008 /* trap */ | ||
| 36 | #define MAX_INSN_SIZE 1 | ||
| 37 | |||
| 38 | #define IS_TW(instr) (((instr) & 0xfc0007fe) == 0x7c000008) | ||
| 39 | #define IS_TD(instr) (((instr) & 0xfc0007fe) == 0x7c000088) | ||
| 40 | #define IS_TDI(instr) (((instr) & 0xfc000000) == 0x08000000) | ||
| 41 | #define IS_TWI(instr) (((instr) & 0xfc000000) == 0x0c000000) | ||
| 42 | |||
| 43 | #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)((func_descr_t *)pentry) | ||
| 44 | |||
| 45 | #define is_trap(instr) (IS_TW(instr) || IS_TD(instr) || \ | ||
| 46 | IS_TWI(instr) || IS_TDI(instr)) | ||
| 47 | |||
| 48 | #define ARCH_SUPPORTS_KRETPROBES | ||
| 49 | void kretprobe_trampoline(void); | ||
| 50 | |||
| 51 | /* Architecture specific copy of original instruction */ | ||
| 52 | struct arch_specific_insn { | ||
| 53 | /* copy of original instruction */ | ||
| 54 | kprobe_opcode_t *insn; | ||
| 55 | }; | ||
| 56 | |||
| 57 | #ifdef CONFIG_KPROBES | ||
| 58 | extern int kprobe_exceptions_notify(struct notifier_block *self, | ||
| 59 | unsigned long val, void *data); | ||
| 60 | #else /* !CONFIG_KPROBES */ | ||
| 61 | static inline int kprobe_exceptions_notify(struct notifier_block *self, | ||
| 62 | unsigned long val, void *data) | ||
| 63 | { | ||
| 64 | return 0; | ||
| 65 | } | ||
| 66 | #endif | ||
| 67 | #endif /* _ASM_KPROBES_H */ | ||
diff --git a/include/asm-powerpc/mpic.h b/include/asm-powerpc/mpic.h new file mode 100644 index 000000000000..f1e24f4b2d1c --- /dev/null +++ b/include/asm-powerpc/mpic.h | |||
| @@ -0,0 +1,279 @@ | |||
| 1 | #include <linux/irq.h> | ||
| 2 | |||
| 3 | /* | ||
| 4 | * Global registers | ||
| 5 | */ | ||
| 6 | |||
| 7 | #define MPIC_GREG_BASE 0x01000 | ||
| 8 | |||
| 9 | #define MPIC_GREG_FEATURE_0 0x00000 | ||
| 10 | #define MPIC_GREG_FEATURE_LAST_SRC_MASK 0x07ff0000 | ||
| 11 | #define MPIC_GREG_FEATURE_LAST_SRC_SHIFT 16 | ||
| 12 | #define MPIC_GREG_FEATURE_LAST_CPU_MASK 0x00001f00 | ||
| 13 | #define MPIC_GREG_FEATURE_LAST_CPU_SHIFT 8 | ||
| 14 | #define MPIC_GREG_FEATURE_VERSION_MASK 0xff | ||
| 15 | #define MPIC_GREG_FEATURE_1 0x00010 | ||
| 16 | #define MPIC_GREG_GLOBAL_CONF_0 0x00020 | ||
| 17 | #define MPIC_GREG_GCONF_RESET 0x80000000 | ||
| 18 | #define MPIC_GREG_GCONF_8259_PTHROU_DIS 0x20000000 | ||
| 19 | #define MPIC_GREG_GCONF_BASE_MASK 0x000fffff | ||
| 20 | #define MPIC_GREG_GLOBAL_CONF_1 0x00030 | ||
| 21 | #define MPIC_GREG_VENDOR_0 0x00040 | ||
| 22 | #define MPIC_GREG_VENDOR_1 0x00050 | ||
| 23 | #define MPIC_GREG_VENDOR_2 0x00060 | ||
| 24 | #define MPIC_GREG_VENDOR_3 0x00070 | ||
| 25 | #define MPIC_GREG_VENDOR_ID 0x00080 | ||
| 26 | #define MPIC_GREG_VENDOR_ID_STEPPING_MASK 0x00ff0000 | ||
| 27 | #define MPIC_GREG_VENDOR_ID_STEPPING_SHIFT 16 | ||
| 28 | #define MPIC_GREG_VENDOR_ID_DEVICE_ID_MASK 0x0000ff00 | ||
| 29 | #define MPIC_GREG_VENDOR_ID_DEVICE_ID_SHIFT 8 | ||
| 30 | #define MPIC_GREG_VENDOR_ID_VENDOR_ID_MASK 0x000000ff | ||
| 31 | #define MPIC_GREG_PROCESSOR_INIT 0x00090 | ||
| 32 | #define MPIC_GREG_IPI_VECTOR_PRI_0 0x000a0 | ||
| 33 | #define MPIC_GREG_IPI_VECTOR_PRI_1 0x000b0 | ||
| 34 | #define MPIC_GREG_IPI_VECTOR_PRI_2 0x000c0 | ||
| 35 | #define MPIC_GREG_IPI_VECTOR_PRI_3 0x000d0 | ||
| 36 | #define MPIC_GREG_SPURIOUS 0x000e0 | ||
| 37 | #define MPIC_GREG_TIMER_FREQ 0x000f0 | ||
| 38 | |||
| 39 | /* | ||
| 40 | * | ||
| 41 | * Timer registers | ||
| 42 | */ | ||
| 43 | #define MPIC_TIMER_BASE 0x01100 | ||
| 44 | #define MPIC_TIMER_STRIDE 0x40 | ||
| 45 | |||
| 46 | #define MPIC_TIMER_CURRENT_CNT 0x00000 | ||
| 47 | #define MPIC_TIMER_BASE_CNT 0x00010 | ||
| 48 | #define MPIC_TIMER_VECTOR_PRI 0x00020 | ||
| 49 | #define MPIC_TIMER_DESTINATION 0x00030 | ||
| 50 | |||
| 51 | /* | ||
| 52 | * Per-Processor registers | ||
| 53 | */ | ||
| 54 | |||
| 55 | #define MPIC_CPU_THISBASE 0x00000 | ||
| 56 | #define MPIC_CPU_BASE 0x20000 | ||
| 57 | #define MPIC_CPU_STRIDE 0x01000 | ||
| 58 | |||
| 59 | #define MPIC_CPU_IPI_DISPATCH_0 0x00040 | ||
| 60 | #define MPIC_CPU_IPI_DISPATCH_1 0x00050 | ||
| 61 | #define MPIC_CPU_IPI_DISPATCH_2 0x00060 | ||
| 62 | #define MPIC_CPU_IPI_DISPATCH_3 0x00070 | ||
| 63 | #define MPIC_CPU_CURRENT_TASK_PRI 0x00080 | ||
| 64 | #define MPIC_CPU_TASKPRI_MASK 0x0000000f | ||
| 65 | #define MPIC_CPU_WHOAMI 0x00090 | ||
| 66 | #define MPIC_CPU_WHOAMI_MASK 0x0000001f | ||
| 67 | #define MPIC_CPU_INTACK 0x000a0 | ||
| 68 | #define MPIC_CPU_EOI 0x000b0 | ||
| 69 | |||
| 70 | /* | ||
| 71 | * Per-source registers | ||
| 72 | */ | ||
| 73 | |||
| 74 | #define MPIC_IRQ_BASE 0x10000 | ||
| 75 | #define MPIC_IRQ_STRIDE 0x00020 | ||
| 76 | #define MPIC_IRQ_VECTOR_PRI 0x00000 | ||
| 77 | #define MPIC_VECPRI_MASK 0x80000000 | ||
| 78 | #define MPIC_VECPRI_ACTIVITY 0x40000000 /* Read Only */ | ||
| 79 | #define MPIC_VECPRI_PRIORITY_MASK 0x000f0000 | ||
| 80 | #define MPIC_VECPRI_PRIORITY_SHIFT 16 | ||
| 81 | #define MPIC_VECPRI_VECTOR_MASK 0x000007ff | ||
| 82 | #define MPIC_VECPRI_POLARITY_POSITIVE 0x00800000 | ||
| 83 | #define MPIC_VECPRI_POLARITY_NEGATIVE 0x00000000 | ||
| 84 | #define MPIC_VECPRI_POLARITY_MASK 0x00800000 | ||
| 85 | #define MPIC_VECPRI_SENSE_LEVEL 0x00400000 | ||
| 86 | #define MPIC_VECPRI_SENSE_EDGE 0x00000000 | ||
| 87 | #define MPIC_VECPRI_SENSE_MASK 0x00400000 | ||
| 88 | #define MPIC_IRQ_DESTINATION 0x00010 | ||
| 89 | |||
| 90 | #define MPIC_MAX_IRQ_SOURCES 2048 | ||
| 91 | #define MPIC_MAX_CPUS 32 | ||
| 92 | #define MPIC_MAX_ISU 32 | ||
| 93 | |||
| 94 | /* | ||
| 95 | * Special vector numbers (internal use only) | ||
| 96 | */ | ||
| 97 | #define MPIC_VEC_SPURRIOUS 255 | ||
| 98 | #define MPIC_VEC_IPI_3 254 | ||
| 99 | #define MPIC_VEC_IPI_2 253 | ||
| 100 | #define MPIC_VEC_IPI_1 252 | ||
| 101 | #define MPIC_VEC_IPI_0 251 | ||
| 102 | |||
| 103 | /* unused */ | ||
| 104 | #define MPIC_VEC_TIMER_3 250 | ||
| 105 | #define MPIC_VEC_TIMER_2 249 | ||
| 106 | #define MPIC_VEC_TIMER_1 248 | ||
| 107 | #define MPIC_VEC_TIMER_0 247 | ||
| 108 | |||
| 109 | /* Type definition of the cascade handler */ | ||
| 110 | typedef int (*mpic_cascade_t)(struct pt_regs *regs, void *data); | ||
| 111 | |||
| 112 | #ifdef CONFIG_MPIC_BROKEN_U3 | ||
| 113 | /* Fixup table entry */ | ||
| 114 | struct mpic_irq_fixup | ||
| 115 | { | ||
| 116 | u8 __iomem *base; | ||
| 117 | unsigned int irq; | ||
| 118 | }; | ||
| 119 | #endif /* CONFIG_MPIC_BROKEN_U3 */ | ||
| 120 | |||
| 121 | |||
| 122 | /* The instance data of a given MPIC */ | ||
| 123 | struct mpic | ||
| 124 | { | ||
| 125 | /* The "linux" controller struct */ | ||
| 126 | hw_irq_controller hc_irq; | ||
| 127 | #ifdef CONFIG_SMP | ||
| 128 | hw_irq_controller hc_ipi; | ||
| 129 | #endif | ||
| 130 | const char *name; | ||
| 131 | /* Flags */ | ||
| 132 | unsigned int flags; | ||
| 133 | /* How many irq sources in a given ISU */ | ||
| 134 | unsigned int isu_size; | ||
| 135 | unsigned int isu_shift; | ||
| 136 | unsigned int isu_mask; | ||
| 137 | /* Offset of irq vector numbers */ | ||
| 138 | unsigned int irq_offset; | ||
| 139 | unsigned int irq_count; | ||
| 140 | /* Offset of ipi vector numbers */ | ||
| 141 | unsigned int ipi_offset; | ||
| 142 | /* Number of sources */ | ||
| 143 | unsigned int num_sources; | ||
| 144 | /* Number of CPUs */ | ||
| 145 | unsigned int num_cpus; | ||
| 146 | /* cascade handler */ | ||
| 147 | mpic_cascade_t cascade; | ||
| 148 | void *cascade_data; | ||
| 149 | unsigned int cascade_vec; | ||
| 150 | /* senses array */ | ||
| 151 | unsigned char *senses; | ||
| 152 | unsigned int senses_count; | ||
| 153 | |||
| 154 | #ifdef CONFIG_MPIC_BROKEN_U3 | ||
| 155 | /* The fixup table */ | ||
| 156 | struct mpic_irq_fixup *fixups; | ||
| 157 | spinlock_t fixup_lock; | ||
| 158 | #endif | ||
| 159 | |||
| 160 | /* The various ioremap'ed bases */ | ||
| 161 | volatile u32 __iomem *gregs; | ||
| 162 | volatile u32 __iomem *tmregs; | ||
| 163 | volatile u32 __iomem *cpuregs[MPIC_MAX_CPUS]; | ||
| 164 | volatile u32 __iomem *isus[MPIC_MAX_ISU]; | ||
| 165 | |||
| 166 | /* link */ | ||
| 167 | struct mpic *next; | ||
| 168 | }; | ||
| 169 | |||
| 170 | /* This is the primary controller, only that one has IPIs and | ||
| 171 | * has afinity control. A non-primary MPIC always uses CPU0 | ||
| 172 | * registers only | ||
| 173 | */ | ||
| 174 | #define MPIC_PRIMARY 0x00000001 | ||
| 175 | /* Set this for a big-endian MPIC */ | ||
| 176 | #define MPIC_BIG_ENDIAN 0x00000002 | ||
| 177 | /* Broken U3 MPIC */ | ||
| 178 | #define MPIC_BROKEN_U3 0x00000004 | ||
| 179 | /* Broken IPI registers (autodetected) */ | ||
| 180 | #define MPIC_BROKEN_IPI 0x00000008 | ||
| 181 | /* MPIC wants a reset */ | ||
| 182 | #define MPIC_WANTS_RESET 0x00000010 | ||
| 183 | |||
| 184 | /* Allocate the controller structure and setup the linux irq descs | ||
| 185 | * for the range if interrupts passed in. No HW initialization is | ||
| 186 | * actually performed. | ||
| 187 | * | ||
| 188 | * @phys_addr: physial base address of the MPIC | ||
| 189 | * @flags: flags, see constants above | ||
| 190 | * @isu_size: number of interrupts in an ISU. Use 0 to use a | ||
| 191 | * standard ISU-less setup (aka powermac) | ||
| 192 | * @irq_offset: first irq number to assign to this mpic | ||
| 193 | * @irq_count: number of irqs to use with this mpic IRQ sources. Pass 0 | ||
| 194 | * to match the number of sources | ||
| 195 | * @ipi_offset: first irq number to assign to this mpic IPI sources, | ||
| 196 | * used only on primary mpic | ||
| 197 | * @senses: array of sense values | ||
| 198 | * @senses_num: number of entries in the array | ||
| 199 | * | ||
| 200 | * Note about the sense array. If none is passed, all interrupts are | ||
| 201 | * setup to be level negative unless MPIC_BROKEN_U3 is set in which | ||
| 202 | * case they are edge positive (and the array is ignored anyway). | ||
| 203 | * The values in the array start at the first source of the MPIC, | ||
| 204 | * that is senses[0] correspond to linux irq "irq_offset". | ||
| 205 | */ | ||
| 206 | extern struct mpic *mpic_alloc(unsigned long phys_addr, | ||
| 207 | unsigned int flags, | ||
| 208 | unsigned int isu_size, | ||
| 209 | unsigned int irq_offset, | ||
| 210 | unsigned int irq_count, | ||
| 211 | unsigned int ipi_offset, | ||
| 212 | unsigned char *senses, | ||
| 213 | unsigned int senses_num, | ||
| 214 | const char *name); | ||
| 215 | |||
| 216 | /* Assign ISUs, to call before mpic_init() | ||
| 217 | * | ||
| 218 | * @mpic: controller structure as returned by mpic_alloc() | ||
| 219 | * @isu_num: ISU number | ||
| 220 | * @phys_addr: physical address of the ISU | ||
| 221 | */ | ||
| 222 | extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, | ||
| 223 | unsigned long phys_addr); | ||
| 224 | |||
| 225 | /* Initialize the controller. After this has been called, none of the above | ||
| 226 | * should be called again for this mpic | ||
| 227 | */ | ||
| 228 | extern void mpic_init(struct mpic *mpic); | ||
| 229 | |||
| 230 | /* Setup a cascade. Currently, only one cascade is supported this | ||
| 231 | * way, though you can always do a normal request_irq() and add | ||
| 232 | * other cascades this way. You should call this _after_ having | ||
| 233 | * added all the ISUs | ||
| 234 | * | ||
| 235 | * @irq_no: "linux" irq number of the cascade (that is offset'ed vector) | ||
| 236 | * @handler: cascade handler function | ||
| 237 | */ | ||
| 238 | extern void mpic_setup_cascade(unsigned int irq_no, mpic_cascade_t hanlder, | ||
| 239 | void *data); | ||
| 240 | |||
| 241 | /* | ||
| 242 | * All of the following functions must only be used after the | ||
| 243 | * ISUs have been assigned and the controller fully initialized | ||
| 244 | * with mpic_init() | ||
| 245 | */ | ||
| 246 | |||
| 247 | |||
| 248 | /* Change/Read the priority of an interrupt. Default is 8 for irqs and | ||
| 249 | * 10 for IPIs. You can call this on both IPIs and IRQ numbers, but the | ||
| 250 | * IPI number is then the offset'ed (linux irq number mapped to the IPI) | ||
| 251 | */ | ||
| 252 | extern void mpic_irq_set_priority(unsigned int irq, unsigned int pri); | ||
| 253 | extern unsigned int mpic_irq_get_priority(unsigned int irq); | ||
| 254 | |||
| 255 | /* Setup a non-boot CPU */ | ||
| 256 | extern void mpic_setup_this_cpu(void); | ||
| 257 | |||
| 258 | /* Clean up for kexec (or cpu offline or ...) */ | ||
| 259 | extern void mpic_teardown_this_cpu(int secondary); | ||
| 260 | |||
| 261 | /* Get the current cpu priority for this cpu (0..15) */ | ||
| 262 | extern int mpic_cpu_get_priority(void); | ||
| 263 | |||
| 264 | /* Set the current cpu priority for this cpu */ | ||
| 265 | extern void mpic_cpu_set_priority(int prio); | ||
| 266 | |||
| 267 | /* Request IPIs on primary mpic */ | ||
| 268 | extern void mpic_request_ipis(void); | ||
| 269 | |||
| 270 | /* Send an IPI (non offseted number 0..3) */ | ||
| 271 | extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask); | ||
| 272 | |||
| 273 | /* Fetch interrupt from a given mpic */ | ||
| 274 | extern int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs); | ||
| 275 | /* This one gets to the primary mpic */ | ||
| 276 | extern int mpic_get_irq(struct pt_regs *regs); | ||
| 277 | |||
| 278 | /* global mpic for pSeries */ | ||
| 279 | extern struct mpic *pSeries_mpic; | ||
diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h index 553035cda00e..4efa71878fa9 100644 --- a/include/asm-powerpc/ppc_asm.h +++ b/include/asm-powerpc/ppc_asm.h | |||
| @@ -75,8 +75,11 @@ | |||
| 75 | #define REST_32EVRS(n,s,base) REST_16EVRS(n,s,base); REST_16EVRS(n+16,s,base) | 75 | #define REST_32EVRS(n,s,base) REST_16EVRS(n,s,base); REST_16EVRS(n+16,s,base) |
| 76 | 76 | ||
| 77 | /* Macros to adjust thread priority for Iseries hardware multithreading */ | 77 | /* Macros to adjust thread priority for Iseries hardware multithreading */ |
| 78 | #define HMT_VERY_LOW or 31,31,31 # very low priority\n" | ||
| 78 | #define HMT_LOW or 1,1,1 | 79 | #define HMT_LOW or 1,1,1 |
| 80 | #define HMT_MEDIUM_LOW or 6,6,6 # medium low priority\n" | ||
| 79 | #define HMT_MEDIUM or 2,2,2 | 81 | #define HMT_MEDIUM or 2,2,2 |
| 82 | #define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority\n" | ||
| 80 | #define HMT_HIGH or 3,3,3 | 83 | #define HMT_HIGH or 3,3,3 |
| 81 | 84 | ||
| 82 | /* handle instructions that older assemblers may not know */ | 85 | /* handle instructions that older assemblers may not know */ |
diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h new file mode 100644 index 000000000000..f97a5f1761b4 --- /dev/null +++ b/include/asm-powerpc/reg.h | |||
| @@ -0,0 +1,446 @@ | |||
| 1 | /* | ||
| 2 | * Contains the definition of registers common to all PowerPC variants. | ||
| 3 | * If a register definition has been changed in a different PowerPC | ||
| 4 | * variant, we will case it in #ifndef XXX ... #endif, and have the | ||
| 5 | * number used in the Programming Environments Manual For 32-Bit | ||
| 6 | * Implementations of the PowerPC Architecture (a.k.a. Green Book) here. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #ifdef __KERNEL__ | ||
| 10 | #ifndef __ASM_PPC_REGS_H__ | ||
| 11 | #define __ASM_PPC_REGS_H__ | ||
| 12 | |||
| 13 | #include <linux/stringify.h> | ||
| 14 | |||
| 15 | /* Pickup Book E specific registers. */ | ||
| 16 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) | ||
| 17 | #include <asm/reg_booke.h> | ||
| 18 | #endif | ||
| 19 | |||
| 20 | /* Machine State Register (MSR) Fields */ | ||
| 21 | #define MSR_SF (1<<63) | ||
| 22 | #define MSR_ISF (1<<61) | ||
| 23 | #define MSR_VEC (1<<25) /* Enable AltiVec */ | ||
| 24 | #define MSR_POW (1<<18) /* Enable Power Management */ | ||
| 25 | #define MSR_WE (1<<18) /* Wait State Enable */ | ||
| 26 | #define MSR_TGPR (1<<17) /* TLB Update registers in use */ | ||
| 27 | #define MSR_CE (1<<17) /* Critical Interrupt Enable */ | ||
| 28 | #define MSR_ILE (1<<16) /* Interrupt Little Endian */ | ||
| 29 | #define MSR_EE (1<<15) /* External Interrupt Enable */ | ||
| 30 | #define MSR_PR (1<<14) /* Problem State / Privilege Level */ | ||
| 31 | #define MSR_FP (1<<13) /* Floating Point enable */ | ||
| 32 | #define MSR_ME (1<<12) /* Machine Check Enable */ | ||
| 33 | #define MSR_FE0 (1<<11) /* Floating Exception mode 0 */ | ||
| 34 | #define MSR_SE (1<<10) /* Single Step */ | ||
| 35 | #define MSR_BE (1<<9) /* Branch Trace */ | ||
| 36 | #define MSR_DE (1<<9) /* Debug Exception Enable */ | ||
| 37 | #define MSR_FE1 (1<<8) /* Floating Exception mode 1 */ | ||
| 38 | #define MSR_IP (1<<6) /* Exception prefix 0x000/0xFFF */ | ||
| 39 | #define MSR_IR (1<<5) /* Instruction Relocate */ | ||
| 40 | #define MSR_DR (1<<4) /* Data Relocate */ | ||
| 41 | #define MSR_PE (1<<3) /* Protection Enable */ | ||
| 42 | #define MSR_PX (1<<2) /* Protection Exclusive Mode */ | ||
| 43 | #define MSR_RI (1<<1) /* Recoverable Exception */ | ||
| 44 | #define MSR_LE (1<<0) /* Little Endian */ | ||
| 45 | |||
| 46 | /* Default MSR for kernel mode. */ | ||
| 47 | #ifdef CONFIG_APUS_FAST_EXCEPT | ||
| 48 | #define MSR_KERNEL (MSR_ME|MSR_IP|MSR_RI|MSR_IR|MSR_DR) | ||
| 49 | #endif | ||
| 50 | |||
| 51 | #ifndef MSR_KERNEL | ||
| 52 | #define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR) | ||
| 53 | #endif | ||
| 54 | |||
| 55 | #define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE) | ||
| 56 | |||
| 57 | /* Floating Point Status and Control Register (FPSCR) Fields */ | ||
| 58 | #define FPSCR_FX 0x80000000 /* FPU exception summary */ | ||
| 59 | #define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */ | ||
| 60 | #define FPSCR_VX 0x20000000 /* Invalid operation summary */ | ||
| 61 | #define FPSCR_OX 0x10000000 /* Overflow exception summary */ | ||
| 62 | #define FPSCR_UX 0x08000000 /* Underflow exception summary */ | ||
| 63 | #define FPSCR_ZX 0x04000000 /* Zero-divide exception summary */ | ||
| 64 | #define FPSCR_XX 0x02000000 /* Inexact exception summary */ | ||
| 65 | #define FPSCR_VXSNAN 0x01000000 /* Invalid op for SNaN */ | ||
| 66 | #define FPSCR_VXISI 0x00800000 /* Invalid op for Inv - Inv */ | ||
| 67 | #define FPSCR_VXIDI 0x00400000 /* Invalid op for Inv / Inv */ | ||
| 68 | #define FPSCR_VXZDZ 0x00200000 /* Invalid op for Zero / Zero */ | ||
| 69 | #define FPSCR_VXIMZ 0x00100000 /* Invalid op for Inv * Zero */ | ||
| 70 | #define FPSCR_VXVC 0x00080000 /* Invalid op for Compare */ | ||
| 71 | #define FPSCR_FR 0x00040000 /* Fraction rounded */ | ||
| 72 | #define FPSCR_FI 0x00020000 /* Fraction inexact */ | ||
| 73 | #define FPSCR_FPRF 0x0001f000 /* FPU Result Flags */ | ||
| 74 | #define FPSCR_FPCC 0x0000f000 /* FPU Condition Codes */ | ||
| 75 | #define FPSCR_VXSOFT 0x00000400 /* Invalid op for software request */ | ||
| 76 | #define FPSCR_VXSQRT 0x00000200 /* Invalid op for square root */ | ||
| 77 | #define FPSCR_VXCVI 0x00000100 /* Invalid op for integer convert */ | ||
| 78 | #define FPSCR_VE 0x00000080 /* Invalid op exception enable */ | ||
| 79 | #define FPSCR_OE 0x00000040 /* IEEE overflow exception enable */ | ||
| 80 | #define FPSCR_UE 0x00000020 /* IEEE underflow exception enable */ | ||
| 81 | #define FPSCR_ZE 0x00000010 /* IEEE zero divide exception enable */ | ||
| 82 | #define FPSCR_XE 0x00000008 /* FP inexact exception enable */ | ||
| 83 | #define FPSCR_NI 0x00000004 /* FPU non IEEE-Mode */ | ||
| 84 | #define FPSCR_RN 0x00000003 /* FPU rounding control */ | ||
| 85 | |||
| 86 | /* Special Purpose Registers (SPRNs)*/ | ||
| 87 | #define SPRN_CTR 0x009 /* Count Register */ | ||
| 88 | #define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */ | ||
| 89 | #define DABR_TRANSLATION (1UL << 2) | ||
| 90 | #define SPRN_DAR 0x013 /* Data Address Register */ | ||
| 91 | #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */ | ||
| 92 | #define DSISR_NOHPTE 0x40000000 /* no translation found */ | ||
| 93 | #define DSISR_PROTFAULT 0x08000000 /* protection fault */ | ||
| 94 | #define DSISR_ISSTORE 0x02000000 /* access was a store */ | ||
| 95 | #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */ | ||
| 96 | #define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */ | ||
| 97 | #define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */ | ||
| 98 | #define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */ | ||
| 99 | #define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */ | ||
| 100 | #define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */ | ||
| 101 | #define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */ | ||
| 102 | #define SPRN_DBAT0L 0x219 /* Data BAT 0 Lower Register */ | ||
| 103 | #define SPRN_DBAT0U 0x218 /* Data BAT 0 Upper Register */ | ||
| 104 | #define SPRN_DBAT1L 0x21B /* Data BAT 1 Lower Register */ | ||
| 105 | #define SPRN_DBAT1U 0x21A /* Data BAT 1 Upper Register */ | ||
| 106 | #define SPRN_DBAT2L 0x21D /* Data BAT 2 Lower Register */ | ||
| 107 | #define SPRN_DBAT2U 0x21C /* Data BAT 2 Upper Register */ | ||
| 108 | #define SPRN_DBAT3L 0x21F /* Data BAT 3 Lower Register */ | ||
| 109 | #define SPRN_DBAT3U 0x21E /* Data BAT 3 Upper Register */ | ||
| 110 | #define SPRN_DBAT4L 0x239 /* Data BAT 4 Lower Register */ | ||
| 111 | #define SPRN_DBAT4U 0x238 /* Data BAT 4 Upper Register */ | ||
| 112 | #define SPRN_DBAT5L 0x23B /* Data BAT 5 Lower Register */ | ||
| 113 | #define SPRN_DBAT5U 0x23A /* Data BAT 5 Upper Register */ | ||
| 114 | #define SPRN_DBAT6L 0x23D /* Data BAT 6 Lower Register */ | ||
| 115 | #define SPRN_DBAT6U 0x23C /* Data BAT 6 Upper Register */ | ||
| 116 | #define SPRN_DBAT7L 0x23F /* Data BAT 7 Lower Register */ | ||
| 117 | #define SPRN_DBAT7U 0x23E /* Data BAT 7 Upper Register */ | ||
| 118 | |||
| 119 | #define SPRN_DEC 0x016 /* Decrement Register */ | ||
| 120 | #define SPRN_DER 0x095 /* Debug Enable Regsiter */ | ||
| 121 | #define DER_RSTE 0x40000000 /* Reset Interrupt */ | ||
| 122 | #define DER_CHSTPE 0x20000000 /* Check Stop */ | ||
| 123 | #define DER_MCIE 0x10000000 /* Machine Check Interrupt */ | ||
| 124 | #define DER_EXTIE 0x02000000 /* External Interrupt */ | ||
| 125 | #define DER_ALIE 0x01000000 /* Alignment Interrupt */ | ||
| 126 | #define DER_PRIE 0x00800000 /* Program Interrupt */ | ||
| 127 | #define DER_FPUVIE 0x00400000 /* FP Unavailable Interrupt */ | ||
| 128 | #define DER_DECIE 0x00200000 /* Decrementer Interrupt */ | ||
| 129 | #define DER_SYSIE 0x00040000 /* System Call Interrupt */ | ||
| 130 | #define DER_TRE 0x00020000 /* Trace Interrupt */ | ||
| 131 | #define DER_SEIE 0x00004000 /* FP SW Emulation Interrupt */ | ||
| 132 | #define DER_ITLBMSE 0x00002000 /* Imp. Spec. Instruction TLB Miss */ | ||
| 133 | #define DER_ITLBERE 0x00001000 /* Imp. Spec. Instruction TLB Error */ | ||
| 134 | #define DER_DTLBMSE 0x00000800 /* Imp. Spec. Data TLB Miss */ | ||
| 135 | #define DER_DTLBERE 0x00000400 /* Imp. Spec. Data TLB Error */ | ||
| 136 | #define DER_LBRKE 0x00000008 /* Load/Store Breakpoint Interrupt */ | ||
| 137 | #define DER_IBRKE 0x00000004 /* Instruction Breakpoint Interrupt */ | ||
| 138 | #define DER_EBRKE 0x00000002 /* External Breakpoint Interrupt */ | ||
| 139 | #define DER_DPIE 0x00000001 /* Dev. Port Nonmaskable Request */ | ||
| 140 | #define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */ | ||
| 141 | #define SPRN_EAR 0x11A /* External Address Register */ | ||
| 142 | #define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */ | ||
| 143 | #define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */ | ||
| 144 | #define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */ | ||
| 145 | #define HID0_EMCP (1<<31) /* Enable Machine Check pin */ | ||
| 146 | #define HID0_EBA (1<<29) /* Enable Bus Address Parity */ | ||
| 147 | #define HID0_EBD (1<<28) /* Enable Bus Data Parity */ | ||
| 148 | #define HID0_SBCLK (1<<27) | ||
| 149 | #define HID0_EICE (1<<26) | ||
| 150 | #define HID0_TBEN (1<<26) /* Timebase enable - 745x */ | ||
| 151 | #define HID0_ECLK (1<<25) | ||
| 152 | #define HID0_PAR (1<<24) | ||
| 153 | #define HID0_STEN (1<<24) /* Software table search enable - 745x */ | ||
| 154 | #define HID0_HIGH_BAT (1<<23) /* Enable high BATs - 7455 */ | ||
| 155 | #define HID0_DOZE (1<<23) | ||
| 156 | #define HID0_NAP (1<<22) | ||
| 157 | #define HID0_SLEEP (1<<21) | ||
| 158 | #define HID0_DPM (1<<20) | ||
| 159 | #define HID0_BHTCLR (1<<18) /* Clear branch history table - 7450 */ | ||
| 160 | #define HID0_XAEN (1<<17) /* Extended addressing enable - 7450 */ | ||
| 161 | #define HID0_NHR (1<<16) /* Not hard reset (software bit-7450)*/ | ||
| 162 | #define HID0_ICE (1<<15) /* Instruction Cache Enable */ | ||
| 163 | #define HID0_DCE (1<<14) /* Data Cache Enable */ | ||
| 164 | #define HID0_ILOCK (1<<13) /* Instruction Cache Lock */ | ||
| 165 | #define HID0_DLOCK (1<<12) /* Data Cache Lock */ | ||
| 166 | #define HID0_ICFI (1<<11) /* Instr. Cache Flash Invalidate */ | ||
| 167 | #define HID0_DCI (1<<10) /* Data Cache Invalidate */ | ||
| 168 | #define HID0_SPD (1<<9) /* Speculative disable */ | ||
| 169 | #define HID0_DAPUEN (1<<8) /* Debug APU enable */ | ||
| 170 | #define HID0_SGE (1<<7) /* Store Gathering Enable */ | ||
| 171 | #define HID0_SIED (1<<7) /* Serial Instr. Execution [Disable] */ | ||
| 172 | #define HID0_DFCA (1<<6) /* Data Cache Flush Assist */ | ||
| 173 | #define HID0_LRSTK (1<<4) /* Link register stack - 745x */ | ||
| 174 | #define HID0_BTIC (1<<5) /* Branch Target Instr Cache Enable */ | ||
| 175 | #define HID0_ABE (1<<3) /* Address Broadcast Enable */ | ||
| 176 | #define HID0_FOLD (1<<3) /* Branch Folding enable - 745x */ | ||
| 177 | #define HID0_BHTE (1<<2) /* Branch History Table Enable */ | ||
| 178 | #define HID0_BTCD (1<<1) /* Branch target cache disable */ | ||
| 179 | #define HID0_NOPDST (1<<1) /* No-op dst, dstt, etc. instr. */ | ||
| 180 | #define HID0_NOPTI (1<<0) /* No-op dcbt and dcbst instr. */ | ||
| 181 | |||
| 182 | #define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */ | ||
| 183 | #define HID1_EMCP (1<<31) /* 7450 Machine Check Pin Enable */ | ||
| 184 | #define HID1_DFS (1<<22) /* 7447A Dynamic Frequency Scaling */ | ||
| 185 | #define HID1_PC0 (1<<16) /* 7450 PLL_CFG[0] */ | ||
| 186 | #define HID1_PC1 (1<<15) /* 7450 PLL_CFG[1] */ | ||
| 187 | #define HID1_PC2 (1<<14) /* 7450 PLL_CFG[2] */ | ||
| 188 | #define HID1_PC3 (1<<13) /* 7450 PLL_CFG[3] */ | ||
| 189 | #define HID1_SYNCBE (1<<11) /* 7450 ABE for sync, eieio */ | ||
| 190 | #define HID1_ABE (1<<10) /* 7450 Address Broadcast Enable */ | ||
| 191 | #define HID1_PS (1<<16) /* 750FX PLL selection */ | ||
| 192 | #define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */ | ||
| 193 | #define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */ | ||
| 194 | #define SPRN_HID4 0x3F4 /* 970 HID4 */ | ||
| 195 | #define SPRN_HID5 0x3F6 /* 970 HID5 */ | ||
| 196 | #if !defined(SPRN_IAC1) && !defined(SPRN_IAC2) | ||
| 197 | #define SPRN_IAC1 0x3F4 /* Instruction Address Compare 1 */ | ||
| 198 | #define SPRN_IAC2 0x3F5 /* Instruction Address Compare 2 */ | ||
| 199 | #endif | ||
| 200 | #define SPRN_IBAT0L 0x211 /* Instruction BAT 0 Lower Register */ | ||
| 201 | #define SPRN_IBAT0U 0x210 /* Instruction BAT 0 Upper Register */ | ||
| 202 | #define SPRN_IBAT1L 0x213 /* Instruction BAT 1 Lower Register */ | ||
| 203 | #define SPRN_IBAT1U 0x212 /* Instruction BAT 1 Upper Register */ | ||
| 204 | #define SPRN_IBAT2L 0x215 /* Instruction BAT 2 Lower Register */ | ||
| 205 | #define SPRN_IBAT2U 0x214 /* Instruction BAT 2 Upper Register */ | ||
| 206 | #define SPRN_IBAT3L 0x217 /* Instruction BAT 3 Lower Register */ | ||
| 207 | #define SPRN_IBAT3U 0x216 /* Instruction BAT 3 Upper Register */ | ||
| 208 | #define SPRN_IBAT4L 0x231 /* Instruction BAT 4 Lower Register */ | ||
| 209 | #define SPRN_IBAT4U 0x230 /* Instruction BAT 4 Upper Register */ | ||
| 210 | #define SPRN_IBAT5L 0x233 /* Instruction BAT 5 Lower Register */ | ||
| 211 | #define SPRN_IBAT5U 0x232 /* Instruction BAT 5 Upper Register */ | ||
| 212 | #define SPRN_IBAT6L 0x235 /* Instruction BAT 6 Lower Register */ | ||
| 213 | #define SPRN_IBAT6U 0x234 /* Instruction BAT 6 Upper Register */ | ||
| 214 | #define SPRN_IBAT7L 0x237 /* Instruction BAT 7 Lower Register */ | ||
| 215 | #define SPRN_IBAT7U 0x236 /* Instruction BAT 7 Upper Register */ | ||
| 216 | #define SPRN_ICMP 0x3D5 /* Instruction TLB Compare Register */ | ||
| 217 | #define SPRN_ICTC 0x3FB /* Instruction Cache Throttling Control Reg */ | ||
| 218 | #define SPRN_ICTRL 0x3F3 /* 1011 7450 icache and interrupt ctrl */ | ||
| 219 | #define ICTRL_EICE 0x08000000 /* enable icache parity errs */ | ||
| 220 | #define ICTRL_EDC 0x04000000 /* enable dcache parity errs */ | ||
| 221 | #define ICTRL_EICP 0x00000100 /* enable icache par. check */ | ||
| 222 | #define SPRN_IMISS 0x3D4 /* Instruction TLB Miss Register */ | ||
| 223 | #define SPRN_IMMR 0x27E /* Internal Memory Map Register */ | ||
| 224 | #define SPRN_L2CR 0x3F9 /* Level 2 Cache Control Regsiter */ | ||
| 225 | #define SPRN_L2CR2 0x3f8 | ||
| 226 | #define L2CR_L2E 0x80000000 /* L2 enable */ | ||
| 227 | #define L2CR_L2PE 0x40000000 /* L2 parity enable */ | ||
| 228 | #define L2CR_L2SIZ_MASK 0x30000000 /* L2 size mask */ | ||
| 229 | #define L2CR_L2SIZ_256KB 0x10000000 /* L2 size 256KB */ | ||
| 230 | #define L2CR_L2SIZ_512KB 0x20000000 /* L2 size 512KB */ | ||
| 231 | #define L2CR_L2SIZ_1MB 0x30000000 /* L2 size 1MB */ | ||
| 232 | #define L2CR_L2CLK_MASK 0x0e000000 /* L2 clock mask */ | ||
| 233 | #define L2CR_L2CLK_DISABLED 0x00000000 /* L2 clock disabled */ | ||
| 234 | #define L2CR_L2CLK_DIV1 0x02000000 /* L2 clock / 1 */ | ||
| 235 | #define L2CR_L2CLK_DIV1_5 0x04000000 /* L2 clock / 1.5 */ | ||
| 236 | #define L2CR_L2CLK_DIV2 0x08000000 /* L2 clock / 2 */ | ||
| 237 | #define L2CR_L2CLK_DIV2_5 0x0a000000 /* L2 clock / 2.5 */ | ||
| 238 | #define L2CR_L2CLK_DIV3 0x0c000000 /* L2 clock / 3 */ | ||
| 239 | #define L2CR_L2RAM_MASK 0x01800000 /* L2 RAM type mask */ | ||
| 240 | #define L2CR_L2RAM_FLOW 0x00000000 /* L2 RAM flow through */ | ||
| 241 | #define L2CR_L2RAM_PIPE 0x01000000 /* L2 RAM pipelined */ | ||
| 242 | #define L2CR_L2RAM_PIPE_LW 0x01800000 /* L2 RAM pipelined latewr */ | ||
| 243 | #define L2CR_L2DO 0x00400000 /* L2 data only */ | ||
| 244 | #define L2CR_L2I 0x00200000 /* L2 global invalidate */ | ||
| 245 | #define L2CR_L2CTL 0x00100000 /* L2 RAM control */ | ||
| 246 | #define L2CR_L2WT 0x00080000 /* L2 write-through */ | ||
| 247 | #define L2CR_L2TS 0x00040000 /* L2 test support */ | ||
| 248 | #define L2CR_L2OH_MASK 0x00030000 /* L2 output hold mask */ | ||
| 249 | #define L2CR_L2OH_0_5 0x00000000 /* L2 output hold 0.5 ns */ | ||
| 250 | #define L2CR_L2OH_1_0 0x00010000 /* L2 output hold 1.0 ns */ | ||
| 251 | #define L2CR_L2SL 0x00008000 /* L2 DLL slow */ | ||
| 252 | #define L2CR_L2DF 0x00004000 /* L2 differential clock */ | ||
| 253 | #define L2CR_L2BYP 0x00002000 /* L2 DLL bypass */ | ||
| 254 | #define L2CR_L2IP 0x00000001 /* L2 GI in progress */ | ||
| 255 | #define L2CR_L2IO_745x 0x00100000 /* L2 instr. only (745x) */ | ||
| 256 | #define L2CR_L2DO_745x 0x00010000 /* L2 data only (745x) */ | ||
| 257 | #define L2CR_L2REP_745x 0x00001000 /* L2 repl. algorithm (745x) */ | ||
| 258 | #define L2CR_L2HWF_745x 0x00000800 /* L2 hardware flush (745x) */ | ||
| 259 | #define SPRN_L3CR 0x3FA /* Level 3 Cache Control Regsiter */ | ||
| 260 | #define L3CR_L3E 0x80000000 /* L3 enable */ | ||
| 261 | #define L3CR_L3PE 0x40000000 /* L3 data parity enable */ | ||
| 262 | #define L3CR_L3APE 0x20000000 /* L3 addr parity enable */ | ||
| 263 | #define L3CR_L3SIZ 0x10000000 /* L3 size */ | ||
| 264 | #define L3CR_L3CLKEN 0x08000000 /* L3 clock enable */ | ||
| 265 | #define L3CR_L3RES 0x04000000 /* L3 special reserved bit */ | ||
| 266 | #define L3CR_L3CLKDIV 0x03800000 /* L3 clock divisor */ | ||
| 267 | #define L3CR_L3IO 0x00400000 /* L3 instruction only */ | ||
| 268 | #define L3CR_L3SPO 0x00040000 /* L3 sample point override */ | ||
| 269 | #define L3CR_L3CKSP 0x00030000 /* L3 clock sample point */ | ||
| 270 | #define L3CR_L3PSP 0x0000e000 /* L3 P-clock sample point */ | ||
| 271 | #define L3CR_L3REP 0x00001000 /* L3 replacement algorithm */ | ||
| 272 | #define L3CR_L3HWF 0x00000800 /* L3 hardware flush */ | ||
| 273 | #define L3CR_L3I 0x00000400 /* L3 global invalidate */ | ||
| 274 | #define L3CR_L3RT 0x00000300 /* L3 SRAM type */ | ||
| 275 | #define L3CR_L3NIRCA 0x00000080 /* L3 non-integer ratio clock adj. */ | ||
| 276 | #define L3CR_L3DO 0x00000040 /* L3 data only mode */ | ||
| 277 | #define L3CR_PMEN 0x00000004 /* L3 private memory enable */ | ||
| 278 | #define L3CR_PMSIZ 0x00000001 /* L3 private memory size */ | ||
| 279 | #define SPRN_MSSCR0 0x3f6 /* Memory Subsystem Control Register 0 */ | ||
| 280 | #define SPRN_MSSSR0 0x3f7 /* Memory Subsystem Status Register 1 */ | ||
| 281 | #define SPRN_LDSTCR 0x3f8 /* Load/Store control register */ | ||
| 282 | #define SPRN_LDSTDB 0x3f4 /* */ | ||
| 283 | #define SPRN_LR 0x008 /* Link Register */ | ||
| 284 | #define SPRN_MMCR0 0x3B8 /* Monitor Mode Control Register 0 */ | ||
| 285 | #define SPRN_MMCR1 0x3BC /* Monitor Mode Control Register 1 */ | ||
| 286 | #ifndef SPRN_PIR | ||
| 287 | #define SPRN_PIR 0x3FF /* Processor Identification Register */ | ||
| 288 | #endif | ||
| 289 | #define SPRN_PMC1 0x3B9 /* Performance Counter Register 1 */ | ||
| 290 | #define SPRN_PMC2 0x3BA /* Performance Counter Register 2 */ | ||
| 291 | #define SPRN_PMC3 0x3BD /* Performance Counter Register 3 */ | ||
| 292 | #define SPRN_PMC4 0x3BE /* Performance Counter Register 4 */ | ||
| 293 | #define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */ | ||
| 294 | #define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */ | ||
| 295 | #define SPRN_PVR 0x11F /* Processor Version Register */ | ||
| 296 | #define SPRN_RPA 0x3D6 /* Required Physical Address Register */ | ||
| 297 | #define SPRN_SDA 0x3BF /* Sampled Data Address Register */ | ||
| 298 | #define SPRN_SDR1 0x019 /* MMU Hash Base Register */ | ||
| 299 | #define SPRN_SIA 0x3BB /* Sampled Instruction Address Register */ | ||
| 300 | #define SPRN_SPRG0 0x110 /* Special Purpose Register General 0 */ | ||
| 301 | #define SPRN_SPRG1 0x111 /* Special Purpose Register General 1 */ | ||
| 302 | #define SPRN_SPRG2 0x112 /* Special Purpose Register General 2 */ | ||
| 303 | #define SPRN_SPRG3 0x113 /* Special Purpose Register General 3 */ | ||
| 304 | #define SPRN_SPRG4 0x114 /* Special Purpose Register General 4 */ | ||
| 305 | #define SPRN_SPRG5 0x115 /* Special Purpose Register General 5 */ | ||
| 306 | #define SPRN_SPRG6 0x116 /* Special Purpose Register General 6 */ | ||
| 307 | #define SPRN_SPRG7 0x117 /* Special Purpose Register General 7 */ | ||
| 308 | #define SPRN_SRR0 0x01A /* Save/Restore Register 0 */ | ||
| 309 | #define SPRN_SRR1 0x01B /* Save/Restore Register 1 */ | ||
| 310 | #ifndef SPRN_SVR | ||
| 311 | #define SPRN_SVR 0x11E /* System Version Register */ | ||
| 312 | #endif | ||
| 313 | #define SPRN_THRM1 0x3FC /* Thermal Management Register 1 */ | ||
| 314 | /* these bits were defined in inverted endian sense originally, ugh, confusing */ | ||
| 315 | #define THRM1_TIN (1 << 31) | ||
| 316 | #define THRM1_TIV (1 << 30) | ||
| 317 | #define THRM1_THRES(x) ((x&0x7f)<<23) | ||
| 318 | #define THRM3_SITV(x) ((x&0x3fff)<<1) | ||
| 319 | #define THRM1_TID (1<<2) | ||
| 320 | #define THRM1_TIE (1<<1) | ||
| 321 | #define THRM1_V (1<<0) | ||
| 322 | #define SPRN_THRM2 0x3FD /* Thermal Management Register 2 */ | ||
| 323 | #define SPRN_THRM3 0x3FE /* Thermal Management Register 3 */ | ||
| 324 | #define THRM3_E (1<<0) | ||
| 325 | #define SPRN_TLBMISS 0x3D4 /* 980 7450 TLB Miss Register */ | ||
| 326 | #define SPRN_UMMCR0 0x3A8 /* User Monitor Mode Control Register 0 */ | ||
| 327 | #define SPRN_UMMCR1 0x3AC /* User Monitor Mode Control Register 0 */ | ||
| 328 | #define SPRN_UPMC1 0x3A9 /* User Performance Counter Register 1 */ | ||
| 329 | #define SPRN_UPMC2 0x3AA /* User Performance Counter Register 2 */ | ||
| 330 | #define SPRN_UPMC3 0x3AD /* User Performance Counter Register 3 */ | ||
| 331 | #define SPRN_UPMC4 0x3AE /* User Performance Counter Register 4 */ | ||
| 332 | #define SPRN_USIA 0x3AB /* User Sampled Instruction Address Register */ | ||
| 333 | #define SPRN_VRSAVE 0x100 /* Vector Register Save Register */ | ||
| 334 | #define SPRN_XER 0x001 /* Fixed Point Exception Register */ | ||
| 335 | |||
| 336 | /* Bit definitions for MMCR0 and PMC1 / PMC2. */ | ||
| 337 | #define MMCR0_PMC1_CYCLES (1 << 7) | ||
| 338 | #define MMCR0_PMC1_ICACHEMISS (5 << 7) | ||
| 339 | #define MMCR0_PMC1_DTLB (6 << 7) | ||
| 340 | #define MMCR0_PMC2_DCACHEMISS 0x6 | ||
| 341 | #define MMCR0_PMC2_CYCLES 0x1 | ||
| 342 | #define MMCR0_PMC2_ITLB 0x7 | ||
| 343 | #define MMCR0_PMC2_LOADMISSTIME 0x5 | ||
| 344 | #define MMCR0_PMXE (1 << 26) | ||
| 345 | |||
| 346 | /* Processor Version Register */ | ||
| 347 | |||
| 348 | /* Processor Version Register (PVR) field extraction */ | ||
| 349 | |||
| 350 | #define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */ | ||
| 351 | #define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */ | ||
| 352 | |||
| 353 | /* | ||
| 354 | * IBM has further subdivided the standard PowerPC 16-bit version and | ||
| 355 | * revision subfields of the PVR for the PowerPC 403s into the following: | ||
| 356 | */ | ||
| 357 | |||
| 358 | #define PVR_FAM(pvr) (((pvr) >> 20) & 0xFFF) /* Family field */ | ||
| 359 | #define PVR_MEM(pvr) (((pvr) >> 16) & 0xF) /* Member field */ | ||
| 360 | #define PVR_CORE(pvr) (((pvr) >> 12) & 0xF) /* Core field */ | ||
| 361 | #define PVR_CFG(pvr) (((pvr) >> 8) & 0xF) /* Configuration field */ | ||
| 362 | #define PVR_MAJ(pvr) (((pvr) >> 4) & 0xF) /* Major revision field */ | ||
| 363 | #define PVR_MIN(pvr) (((pvr) >> 0) & 0xF) /* Minor revision field */ | ||
| 364 | |||
| 365 | /* Processor Version Numbers */ | ||
| 366 | |||
| 367 | #define PVR_403GA 0x00200000 | ||
| 368 | #define PVR_403GB 0x00200100 | ||
| 369 | #define PVR_403GC 0x00200200 | ||
| 370 | #define PVR_403GCX 0x00201400 | ||
| 371 | #define PVR_405GP 0x40110000 | ||
| 372 | #define PVR_STB03XXX 0x40310000 | ||
| 373 | #define PVR_NP405H 0x41410000 | ||
| 374 | #define PVR_NP405L 0x41610000 | ||
| 375 | #define PVR_601 0x00010000 | ||
| 376 | #define PVR_602 0x00050000 | ||
| 377 | #define PVR_603 0x00030000 | ||
| 378 | #define PVR_603e 0x00060000 | ||
| 379 | #define PVR_603ev 0x00070000 | ||
| 380 | #define PVR_603r 0x00071000 | ||
| 381 | #define PVR_604 0x00040000 | ||
| 382 | #define PVR_604e 0x00090000 | ||
| 383 | #define PVR_604r 0x000A0000 | ||
| 384 | #define PVR_620 0x00140000 | ||
| 385 | #define PVR_740 0x00080000 | ||
| 386 | #define PVR_750 PVR_740 | ||
| 387 | #define PVR_740P 0x10080000 | ||
| 388 | #define PVR_750P PVR_740P | ||
| 389 | #define PVR_7400 0x000C0000 | ||
| 390 | #define PVR_7410 0x800C0000 | ||
| 391 | #define PVR_7450 0x80000000 | ||
| 392 | #define PVR_8540 0x80200000 | ||
| 393 | #define PVR_8560 0x80200000 | ||
| 394 | /* | ||
| 395 | * For the 8xx processors, all of them report the same PVR family for | ||
| 396 | * the PowerPC core. The various versions of these processors must be | ||
| 397 | * differentiated by the version number in the Communication Processor | ||
| 398 | * Module (CPM). | ||
| 399 | */ | ||
| 400 | #define PVR_821 0x00500000 | ||
| 401 | #define PVR_823 PVR_821 | ||
| 402 | #define PVR_850 PVR_821 | ||
| 403 | #define PVR_860 PVR_821 | ||
| 404 | #define PVR_8240 0x00810100 | ||
| 405 | #define PVR_8245 0x80811014 | ||
| 406 | #define PVR_8260 PVR_8240 | ||
| 407 | |||
| 408 | #if 0 | ||
| 409 | /* Segment Registers */ | ||
| 410 | #define SR0 0 | ||
| 411 | #define SR1 1 | ||
| 412 | #define SR2 2 | ||
| 413 | #define SR3 3 | ||
| 414 | #define SR4 4 | ||
| 415 | #define SR5 5 | ||
| 416 | #define SR6 6 | ||
| 417 | #define SR7 7 | ||
| 418 | #define SR8 8 | ||
| 419 | #define SR9 9 | ||
| 420 | #define SR10 10 | ||
| 421 | #define SR11 11 | ||
| 422 | #define SR12 12 | ||
| 423 | #define SR13 13 | ||
| 424 | #define SR14 14 | ||
| 425 | #define SR15 15 | ||
| 426 | #endif | ||
| 427 | |||
| 428 | /* Macros for setting and retrieving special purpose registers */ | ||
| 429 | #ifndef __ASSEMBLY__ | ||
| 430 | #define mfmsr() ({unsigned int rval; \ | ||
| 431 | asm volatile("mfmsr %0" : "=r" (rval)); rval;}) | ||
| 432 | #define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v)) | ||
| 433 | |||
| 434 | #define mfspr(rn) ({unsigned int rval; \ | ||
| 435 | asm volatile("mfspr %0," __stringify(rn) \ | ||
| 436 | : "=r" (rval)); rval;}) | ||
| 437 | #define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v)) | ||
| 438 | |||
| 439 | #define mfsrin(v) ({unsigned int rval; \ | ||
| 440 | asm volatile("mfsrin %0,%1" : "=r" (rval) : "r" (v)); \ | ||
| 441 | rval;}) | ||
| 442 | |||
| 443 | #define proc_trap() asm volatile("trap") | ||
| 444 | #endif /* __ASSEMBLY__ */ | ||
| 445 | #endif /* __ASM_PPC_REGS_H__ */ | ||
| 446 | #endif /* __KERNEL__ */ | ||
diff --git a/include/asm-powerpc/rwsem.h b/include/asm-powerpc/rwsem.h new file mode 100644 index 000000000000..0a5b83a3c949 --- /dev/null +++ b/include/asm-powerpc/rwsem.h | |||
| @@ -0,0 +1,163 @@ | |||
| 1 | #ifndef _ASM_POWERPC_RWSEM_H | ||
| 2 | #define _ASM_POWERPC_RWSEM_H | ||
| 3 | |||
| 4 | #ifdef __KERNEL__ | ||
| 5 | |||
| 6 | /* | ||
| 7 | * include/asm-ppc64/rwsem.h: R/W semaphores for PPC using the stuff | ||
| 8 | * in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h | ||
| 9 | * by Paul Mackerras <paulus@samba.org>. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/list.h> | ||
| 13 | #include <linux/spinlock.h> | ||
| 14 | #include <asm/atomic.h> | ||
| 15 | #include <asm/system.h> | ||
| 16 | |||
| 17 | /* | ||
| 18 | * the semaphore definition | ||
| 19 | */ | ||
| 20 | struct rw_semaphore { | ||
| 21 | /* XXX this should be able to be an atomic_t -- paulus */ | ||
| 22 | signed int count; | ||
| 23 | #define RWSEM_UNLOCKED_VALUE 0x00000000 | ||
| 24 | #define RWSEM_ACTIVE_BIAS 0x00000001 | ||
| 25 | #define RWSEM_ACTIVE_MASK 0x0000ffff | ||
| 26 | #define RWSEM_WAITING_BIAS (-0x00010000) | ||
| 27 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | ||
| 28 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | ||
| 29 | spinlock_t wait_lock; | ||
| 30 | struct list_head wait_list; | ||
| 31 | #if RWSEM_DEBUG | ||
| 32 | int debug; | ||
| 33 | #endif | ||
| 34 | }; | ||
| 35 | |||
| 36 | /* | ||
| 37 | * initialisation | ||
| 38 | */ | ||
| 39 | #if RWSEM_DEBUG | ||
| 40 | #define __RWSEM_DEBUG_INIT , 0 | ||
| 41 | #else | ||
| 42 | #define __RWSEM_DEBUG_INIT /* */ | ||
| 43 | #endif | ||
| 44 | |||
| 45 | #define __RWSEM_INITIALIZER(name) \ | ||
| 46 | { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ | ||
| 47 | LIST_HEAD_INIT((name).wait_list) \ | ||
| 48 | __RWSEM_DEBUG_INIT } | ||
| 49 | |||
| 50 | #define DECLARE_RWSEM(name) \ | ||
| 51 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | ||
| 52 | |||
| 53 | extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); | ||
| 54 | extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); | ||
| 55 | extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); | ||
| 56 | extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); | ||
| 57 | |||
| 58 | static inline void init_rwsem(struct rw_semaphore *sem) | ||
| 59 | { | ||
| 60 | sem->count = RWSEM_UNLOCKED_VALUE; | ||
| 61 | spin_lock_init(&sem->wait_lock); | ||
| 62 | INIT_LIST_HEAD(&sem->wait_list); | ||
| 63 | #if RWSEM_DEBUG | ||
| 64 | sem->debug = 0; | ||
| 65 | #endif | ||
| 66 | } | ||
| 67 | |||
| 68 | /* | ||
| 69 | * lock for reading | ||
| 70 | */ | ||
| 71 | static inline void __down_read(struct rw_semaphore *sem) | ||
| 72 | { | ||
| 73 | if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0)) | ||
| 74 | rwsem_down_read_failed(sem); | ||
| 75 | } | ||
| 76 | |||
| 77 | static inline int __down_read_trylock(struct rw_semaphore *sem) | ||
| 78 | { | ||
| 79 | int tmp; | ||
| 80 | |||
| 81 | while ((tmp = sem->count) >= 0) { | ||
| 82 | if (tmp == cmpxchg(&sem->count, tmp, | ||
| 83 | tmp + RWSEM_ACTIVE_READ_BIAS)) { | ||
| 84 | return 1; | ||
| 85 | } | ||
| 86 | } | ||
| 87 | return 0; | ||
| 88 | } | ||
| 89 | |||
| 90 | /* | ||
| 91 | * lock for writing | ||
| 92 | */ | ||
| 93 | static inline void __down_write(struct rw_semaphore *sem) | ||
| 94 | { | ||
| 95 | int tmp; | ||
| 96 | |||
| 97 | tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS, | ||
| 98 | (atomic_t *)(&sem->count)); | ||
| 99 | if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) | ||
| 100 | rwsem_down_write_failed(sem); | ||
| 101 | } | ||
| 102 | |||
| 103 | static inline int __down_write_trylock(struct rw_semaphore *sem) | ||
| 104 | { | ||
| 105 | int tmp; | ||
| 106 | |||
| 107 | tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, | ||
| 108 | RWSEM_ACTIVE_WRITE_BIAS); | ||
| 109 | return tmp == RWSEM_UNLOCKED_VALUE; | ||
| 110 | } | ||
| 111 | |||
| 112 | /* | ||
| 113 | * unlock after reading | ||
| 114 | */ | ||
| 115 | static inline void __up_read(struct rw_semaphore *sem) | ||
| 116 | { | ||
| 117 | int tmp; | ||
| 118 | |||
| 119 | tmp = atomic_dec_return((atomic_t *)(&sem->count)); | ||
| 120 | if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) | ||
| 121 | rwsem_wake(sem); | ||
| 122 | } | ||
| 123 | |||
| 124 | /* | ||
| 125 | * unlock after writing | ||
| 126 | */ | ||
| 127 | static inline void __up_write(struct rw_semaphore *sem) | ||
| 128 | { | ||
| 129 | if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, | ||
| 130 | (atomic_t *)(&sem->count)) < 0)) | ||
| 131 | rwsem_wake(sem); | ||
| 132 | } | ||
| 133 | |||
| 134 | /* | ||
| 135 | * implement atomic add functionality | ||
| 136 | */ | ||
| 137 | static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) | ||
| 138 | { | ||
| 139 | atomic_add(delta, (atomic_t *)(&sem->count)); | ||
| 140 | } | ||
| 141 | |||
| 142 | /* | ||
| 143 | * downgrade write lock to read lock | ||
| 144 | */ | ||
| 145 | static inline void __downgrade_write(struct rw_semaphore *sem) | ||
| 146 | { | ||
| 147 | int tmp; | ||
| 148 | |||
| 149 | tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count)); | ||
| 150 | if (tmp < 0) | ||
| 151 | rwsem_downgrade_wake(sem); | ||
| 152 | } | ||
| 153 | |||
| 154 | /* | ||
| 155 | * implement exchange and add functionality | ||
| 156 | */ | ||
| 157 | static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) | ||
| 158 | { | ||
| 159 | return atomic_add_return(delta, (atomic_t *)(&sem->count)); | ||
| 160 | } | ||
| 161 | |||
| 162 | #endif /* __KERNEL__ */ | ||
| 163 | #endif /* _ASM_POWERPC_RWSEM_H */ | ||
diff --git a/include/asm-powerpc/seccomp.h b/include/asm-powerpc/seccomp.h new file mode 100644 index 000000000000..1e1cfe12882b --- /dev/null +++ b/include/asm-powerpc/seccomp.h | |||
| @@ -0,0 +1,16 @@ | |||
| 1 | #ifndef _ASM_POWERPC_SECCOMP_H | ||
| 2 | |||
| 3 | #include <linux/thread_info.h> | ||
| 4 | #include <linux/unistd.h> | ||
| 5 | |||
| 6 | #define __NR_seccomp_read __NR_read | ||
| 7 | #define __NR_seccomp_write __NR_write | ||
| 8 | #define __NR_seccomp_exit __NR_exit | ||
| 9 | #define __NR_seccomp_sigreturn __NR_rt_sigreturn | ||
| 10 | |||
| 11 | #define __NR_seccomp_read_32 __NR_read | ||
| 12 | #define __NR_seccomp_write_32 __NR_write | ||
| 13 | #define __NR_seccomp_exit_32 __NR_exit | ||
| 14 | #define __NR_seccomp_sigreturn_32 __NR_sigreturn | ||
| 15 | |||
| 16 | #endif /* _ASM_POWERPC_SECCOMP_H */ | ||
diff --git a/include/asm-powerpc/semaphore.h b/include/asm-powerpc/semaphore.h new file mode 100644 index 000000000000..fd42fe97158f --- /dev/null +++ b/include/asm-powerpc/semaphore.h | |||
| @@ -0,0 +1,98 @@ | |||
| 1 | #ifndef _ASM_POWERPC_SEMAPHORE_H | ||
| 2 | #define _ASM_POWERPC_SEMAPHORE_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * Remove spinlock-based RW semaphores; RW semaphore definitions are | ||
| 6 | * now in rwsem.h and we use the generic lib/rwsem.c implementation. | ||
| 7 | * Rework semaphores to use atomic_dec_if_positive. | ||
| 8 | * -- Paul Mackerras (paulus@samba.org) | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifdef __KERNEL__ | ||
| 12 | |||
| 13 | #include <asm/atomic.h> | ||
| 14 | #include <asm/system.h> | ||
| 15 | #include <linux/wait.h> | ||
| 16 | #include <linux/rwsem.h> | ||
| 17 | |||
| 18 | struct semaphore { | ||
| 19 | /* | ||
| 20 | * Note that any negative value of count is equivalent to 0, | ||
| 21 | * but additionally indicates that some process(es) might be | ||
| 22 | * sleeping on `wait'. | ||
| 23 | */ | ||
| 24 | atomic_t count; | ||
| 25 | wait_queue_head_t wait; | ||
| 26 | }; | ||
| 27 | |||
| 28 | #define __SEMAPHORE_INITIALIZER(name, n) \ | ||
| 29 | { \ | ||
| 30 | .count = ATOMIC_INIT(n), \ | ||
| 31 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | ||
| 32 | } | ||
| 33 | |||
| 34 | #define __MUTEX_INITIALIZER(name) \ | ||
| 35 | __SEMAPHORE_INITIALIZER(name, 1) | ||
| 36 | |||
| 37 | #define __DECLARE_SEMAPHORE_GENERIC(name, count) \ | ||
| 38 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | ||
| 39 | |||
| 40 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) | ||
| 41 | #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0) | ||
| 42 | |||
| 43 | static inline void sema_init (struct semaphore *sem, int val) | ||
| 44 | { | ||
| 45 | atomic_set(&sem->count, val); | ||
| 46 | init_waitqueue_head(&sem->wait); | ||
| 47 | } | ||
| 48 | |||
| 49 | static inline void init_MUTEX (struct semaphore *sem) | ||
| 50 | { | ||
| 51 | sema_init(sem, 1); | ||
| 52 | } | ||
| 53 | |||
| 54 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | ||
| 55 | { | ||
| 56 | sema_init(sem, 0); | ||
| 57 | } | ||
| 58 | |||
| 59 | extern void __down(struct semaphore * sem); | ||
| 60 | extern int __down_interruptible(struct semaphore * sem); | ||
| 61 | extern void __up(struct semaphore * sem); | ||
| 62 | |||
| 63 | static inline void down(struct semaphore * sem) | ||
| 64 | { | ||
| 65 | might_sleep(); | ||
| 66 | |||
| 67 | /* | ||
| 68 | * Try to get the semaphore, take the slow path if we fail. | ||
| 69 | */ | ||
| 70 | if (unlikely(atomic_dec_return(&sem->count) < 0)) | ||
| 71 | __down(sem); | ||
| 72 | } | ||
| 73 | |||
| 74 | static inline int down_interruptible(struct semaphore * sem) | ||
| 75 | { | ||
| 76 | int ret = 0; | ||
| 77 | |||
| 78 | might_sleep(); | ||
| 79 | |||
| 80 | if (unlikely(atomic_dec_return(&sem->count) < 0)) | ||
| 81 | ret = __down_interruptible(sem); | ||
| 82 | return ret; | ||
| 83 | } | ||
| 84 | |||
| 85 | static inline int down_trylock(struct semaphore * sem) | ||
| 86 | { | ||
| 87 | return atomic_dec_if_positive(&sem->count) < 0; | ||
| 88 | } | ||
| 89 | |||
| 90 | static inline void up(struct semaphore * sem) | ||
| 91 | { | ||
| 92 | if (unlikely(atomic_inc_return(&sem->count) <= 0)) | ||
| 93 | __up(sem); | ||
| 94 | } | ||
| 95 | |||
| 96 | #endif /* __KERNEL__ */ | ||
| 97 | |||
| 98 | #endif /* _ASM_POWERPC_SEMAPHORE_H */ | ||
diff --git a/include/asm-powerpc/synch.h b/include/asm-powerpc/synch.h new file mode 100644 index 000000000000..4660c0394a77 --- /dev/null +++ b/include/asm-powerpc/synch.h | |||
| @@ -0,0 +1,51 @@ | |||
| 1 | #ifndef _ASM_POWERPC_SYNCH_H | ||
| 2 | #define _ASM_POWERPC_SYNCH_H | ||
| 3 | |||
| 4 | #include <linux/config.h> | ||
| 5 | |||
| 6 | #ifdef __powerpc64__ | ||
| 7 | #define __SUBARCH_HAS_LWSYNC | ||
| 8 | #endif | ||
| 9 | |||
| 10 | #ifdef __SUBARCH_HAS_LWSYNC | ||
| 11 | # define LWSYNC lwsync | ||
| 12 | #else | ||
| 13 | # define LWSYNC sync | ||
| 14 | #endif | ||
| 15 | |||
| 16 | |||
| 17 | /* | ||
| 18 | * Arguably the bitops and *xchg operations don't imply any memory barrier | ||
| 19 | * or SMP ordering, but in fact a lot of drivers expect them to imply | ||
| 20 | * both, since they do on x86 cpus. | ||
| 21 | */ | ||
| 22 | #ifdef CONFIG_SMP | ||
| 23 | #define EIEIO_ON_SMP "eieio\n" | ||
| 24 | #define ISYNC_ON_SMP "\n\tisync" | ||
| 25 | #define SYNC_ON_SMP __stringify(LWSYNC) "\n" | ||
| 26 | #else | ||
| 27 | #define EIEIO_ON_SMP | ||
| 28 | #define ISYNC_ON_SMP | ||
| 29 | #define SYNC_ON_SMP | ||
| 30 | #endif | ||
| 31 | |||
| 32 | static inline void eieio(void) | ||
| 33 | { | ||
| 34 | __asm__ __volatile__ ("eieio" : : : "memory"); | ||
| 35 | } | ||
| 36 | |||
| 37 | static inline void isync(void) | ||
| 38 | { | ||
| 39 | __asm__ __volatile__ ("isync" : : : "memory"); | ||
| 40 | } | ||
| 41 | |||
| 42 | #ifdef CONFIG_SMP | ||
| 43 | #define eieio_on_smp() eieio() | ||
| 44 | #define isync_on_smp() isync() | ||
| 45 | #else | ||
| 46 | #define eieio_on_smp() __asm__ __volatile__("": : :"memory") | ||
| 47 | #define isync_on_smp() __asm__ __volatile__("": : :"memory") | ||
| 48 | #endif | ||
| 49 | |||
| 50 | #endif /* _ASM_POWERPC_SYNCH_H */ | ||
| 51 | |||
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h new file mode 100644 index 000000000000..be542efb32d3 --- /dev/null +++ b/include/asm-powerpc/system.h | |||
| @@ -0,0 +1,350 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | ||
| 3 | */ | ||
| 4 | #ifndef __PPC_SYSTEM_H | ||
| 5 | #define __PPC_SYSTEM_H | ||
| 6 | |||
| 7 | #include <linux/config.h> | ||
| 8 | #include <linux/kernel.h> | ||
| 9 | |||
| 10 | #include <asm/hw_irq.h> | ||
| 11 | #include <asm/ppc_asm.h> | ||
| 12 | |||
| 13 | /* | ||
| 14 | * Memory barrier. | ||
| 15 | * The sync instruction guarantees that all memory accesses initiated | ||
| 16 | * by this processor have been performed (with respect to all other | ||
| 17 | * mechanisms that access memory). The eieio instruction is a barrier | ||
| 18 | * providing an ordering (separately) for (a) cacheable stores and (b) | ||
| 19 | * loads and stores to non-cacheable memory (e.g. I/O devices). | ||
| 20 | * | ||
| 21 | * mb() prevents loads and stores being reordered across this point. | ||
| 22 | * rmb() prevents loads being reordered across this point. | ||
| 23 | * wmb() prevents stores being reordered across this point. | ||
| 24 | * read_barrier_depends() prevents data-dependent loads being reordered | ||
| 25 | * across this point (nop on PPC). | ||
| 26 | * | ||
| 27 | * We have to use the sync instructions for mb(), since lwsync doesn't | ||
| 28 | * order loads with respect to previous stores. Lwsync is fine for | ||
| 29 | * rmb(), though. Note that lwsync is interpreted as sync by | ||
| 30 | * 32-bit and older 64-bit CPUs. | ||
| 31 | * | ||
| 32 | * For wmb(), we use sync since wmb is used in drivers to order | ||
| 33 | * stores to system memory with respect to writes to the device. | ||
| 34 | * However, smp_wmb() can be a lighter-weight eieio barrier on | ||
| 35 | * SMP since it is only used to order updates to system memory. | ||
| 36 | */ | ||
| 37 | #define mb() __asm__ __volatile__ ("sync" : : : "memory") | ||
| 38 | #define rmb() __asm__ __volatile__ ("lwsync" : : : "memory") | ||
| 39 | #define wmb() __asm__ __volatile__ ("sync" : : : "memory") | ||
| 40 | #define read_barrier_depends() do { } while(0) | ||
| 41 | |||
| 42 | #define set_mb(var, value) do { var = value; mb(); } while (0) | ||
| 43 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) | ||
| 44 | |||
| 45 | #ifdef CONFIG_SMP | ||
| 46 | #define smp_mb() mb() | ||
| 47 | #define smp_rmb() rmb() | ||
| 48 | #define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory") | ||
| 49 | #define smp_read_barrier_depends() read_barrier_depends() | ||
| 50 | #else | ||
| 51 | #define smp_mb() barrier() | ||
| 52 | #define smp_rmb() barrier() | ||
| 53 | #define smp_wmb() barrier() | ||
| 54 | #define smp_read_barrier_depends() do { } while(0) | ||
| 55 | #endif /* CONFIG_SMP */ | ||
| 56 | |||
| 57 | #ifdef __KERNEL__ | ||
| 58 | struct task_struct; | ||
| 59 | struct pt_regs; | ||
| 60 | |||
| 61 | #ifdef CONFIG_DEBUGGER | ||
| 62 | |||
| 63 | extern int (*__debugger)(struct pt_regs *regs); | ||
| 64 | extern int (*__debugger_ipi)(struct pt_regs *regs); | ||
| 65 | extern int (*__debugger_bpt)(struct pt_regs *regs); | ||
| 66 | extern int (*__debugger_sstep)(struct pt_regs *regs); | ||
| 67 | extern int (*__debugger_iabr_match)(struct pt_regs *regs); | ||
| 68 | extern int (*__debugger_dabr_match)(struct pt_regs *regs); | ||
| 69 | extern int (*__debugger_fault_handler)(struct pt_regs *regs); | ||
| 70 | |||
| 71 | #define DEBUGGER_BOILERPLATE(__NAME) \ | ||
| 72 | static inline int __NAME(struct pt_regs *regs) \ | ||
| 73 | { \ | ||
| 74 | if (unlikely(__ ## __NAME)) \ | ||
| 75 | return __ ## __NAME(regs); \ | ||
| 76 | return 0; \ | ||
| 77 | } | ||
| 78 | |||
| 79 | DEBUGGER_BOILERPLATE(debugger) | ||
| 80 | DEBUGGER_BOILERPLATE(debugger_ipi) | ||
| 81 | DEBUGGER_BOILERPLATE(debugger_bpt) | ||
| 82 | DEBUGGER_BOILERPLATE(debugger_sstep) | ||
| 83 | DEBUGGER_BOILERPLATE(debugger_iabr_match) | ||
| 84 | DEBUGGER_BOILERPLATE(debugger_dabr_match) | ||
| 85 | DEBUGGER_BOILERPLATE(debugger_fault_handler) | ||
| 86 | |||
| 87 | #ifdef CONFIG_XMON | ||
| 88 | extern void xmon_init(int enable); | ||
| 89 | #endif | ||
| 90 | |||
| 91 | #else | ||
| 92 | static inline int debugger(struct pt_regs *regs) { return 0; } | ||
| 93 | static inline int debugger_ipi(struct pt_regs *regs) { return 0; } | ||
| 94 | static inline int debugger_bpt(struct pt_regs *regs) { return 0; } | ||
| 95 | static inline int debugger_sstep(struct pt_regs *regs) { return 0; } | ||
| 96 | static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; } | ||
| 97 | static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; } | ||
| 98 | static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } | ||
| 99 | #endif | ||
| 100 | |||
| 101 | extern int set_dabr(unsigned long dabr); | ||
| 102 | extern void print_backtrace(unsigned long *); | ||
| 103 | extern void show_regs(struct pt_regs * regs); | ||
| 104 | extern void flush_instruction_cache(void); | ||
| 105 | extern void hard_reset_now(void); | ||
| 106 | extern void poweroff_now(void); | ||
| 107 | |||
| 108 | #ifdef CONFIG_6xx | ||
| 109 | extern long _get_L2CR(void); | ||
| 110 | extern long _get_L3CR(void); | ||
| 111 | extern void _set_L2CR(unsigned long); | ||
| 112 | extern void _set_L3CR(unsigned long); | ||
| 113 | #else | ||
| 114 | #define _get_L2CR() 0L | ||
| 115 | #define _get_L3CR() 0L | ||
| 116 | #define _set_L2CR(val) do { } while(0) | ||
| 117 | #define _set_L3CR(val) do { } while(0) | ||
| 118 | #endif | ||
| 119 | |||
| 120 | extern void via_cuda_init(void); | ||
| 121 | extern void pmac_nvram_init(void); | ||
| 122 | extern void read_rtc_time(void); | ||
| 123 | extern void pmac_find_display(void); | ||
| 124 | extern void giveup_fpu(struct task_struct *); | ||
| 125 | extern void enable_kernel_fp(void); | ||
| 126 | extern void flush_fp_to_thread(struct task_struct *); | ||
| 127 | extern void enable_kernel_altivec(void); | ||
| 128 | extern void giveup_altivec(struct task_struct *); | ||
| 129 | extern void load_up_altivec(struct task_struct *); | ||
| 130 | extern void giveup_spe(struct task_struct *); | ||
| 131 | extern void load_up_spe(struct task_struct *); | ||
| 132 | extern int fix_alignment(struct pt_regs *); | ||
| 133 | extern void cvt_fd(float *from, double *to, unsigned long *fpscr); | ||
| 134 | extern void cvt_df(double *from, float *to, unsigned long *fpscr); | ||
| 135 | |||
| 136 | #ifdef CONFIG_ALTIVEC | ||
| 137 | extern void flush_altivec_to_thread(struct task_struct *); | ||
| 138 | #else | ||
| 139 | static inline void flush_altivec_to_thread(struct task_struct *t) | ||
| 140 | { | ||
| 141 | } | ||
| 142 | #endif | ||
| 143 | |||
| 144 | #ifdef CONFIG_SPE | ||
| 145 | extern void flush_spe_to_thread(struct task_struct *); | ||
| 146 | #else | ||
| 147 | static inline void flush_spe_to_thread(struct task_struct *t) | ||
| 148 | { | ||
| 149 | } | ||
| 150 | #endif | ||
| 151 | |||
| 152 | extern int call_rtas(const char *, int, int, unsigned long *, ...); | ||
| 153 | extern void cacheable_memzero(void *p, unsigned int nb); | ||
| 154 | extern void *cacheable_memcpy(void *, const void *, unsigned int); | ||
| 155 | extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); | ||
| 156 | extern void bad_page_fault(struct pt_regs *, unsigned long, int); | ||
| 157 | extern int die(const char *, struct pt_regs *, long); | ||
| 158 | extern void _exception(int, struct pt_regs *, int, unsigned long); | ||
| 159 | #ifdef CONFIG_BOOKE_WDT | ||
| 160 | extern u32 booke_wdt_enabled; | ||
| 161 | extern u32 booke_wdt_period; | ||
| 162 | #endif /* CONFIG_BOOKE_WDT */ | ||
| 163 | |||
| 164 | /* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */ | ||
| 165 | extern unsigned char e2a(unsigned char); | ||
| 166 | |||
| 167 | struct device_node; | ||
| 168 | extern void note_scsi_host(struct device_node *, void *); | ||
| 169 | |||
| 170 | extern struct task_struct *__switch_to(struct task_struct *, | ||
| 171 | struct task_struct *); | ||
| 172 | #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) | ||
| 173 | |||
| 174 | struct thread_struct; | ||
| 175 | extern struct task_struct *_switch(struct thread_struct *prev, | ||
| 176 | struct thread_struct *next); | ||
| 177 | |||
| 178 | extern unsigned int rtas_data; | ||
| 179 | |||
| 180 | /* | ||
| 181 | * Atomic exchange | ||
| 182 | * | ||
| 183 | * Changes the memory location '*ptr' to be val and returns | ||
| 184 | * the previous value stored there. | ||
| 185 | */ | ||
| 186 | static __inline__ unsigned long | ||
| 187 | __xchg_u32(volatile void *p, unsigned long val) | ||
| 188 | { | ||
| 189 | unsigned long prev; | ||
| 190 | |||
| 191 | __asm__ __volatile__( | ||
| 192 | EIEIO_ON_SMP | ||
| 193 | "1: lwarx %0,0,%2 \n" | ||
| 194 | PPC405_ERR77(0,%2) | ||
| 195 | " stwcx. %3,0,%2 \n\ | ||
| 196 | bne- 1b" | ||
| 197 | ISYNC_ON_SMP | ||
| 198 | : "=&r" (prev), "=m" (*(volatile unsigned int *)p) | ||
| 199 | : "r" (p), "r" (val), "m" (*(volatile unsigned int *)p) | ||
| 200 | : "cc", "memory"); | ||
| 201 | |||
| 202 | return prev; | ||
| 203 | } | ||
| 204 | |||
| 205 | #ifdef CONFIG_PPC64 | ||
| 206 | static __inline__ unsigned long | ||
| 207 | __xchg_u64(volatile void *p, unsigned long val) | ||
| 208 | { | ||
| 209 | unsigned long prev; | ||
| 210 | |||
| 211 | __asm__ __volatile__( | ||
| 212 | EIEIO_ON_SMP | ||
| 213 | "1: ldarx %0,0,%2 \n" | ||
| 214 | PPC405_ERR77(0,%2) | ||
| 215 | " stdcx. %3,0,%2 \n\ | ||
| 216 | bne- 1b" | ||
| 217 | ISYNC_ON_SMP | ||
| 218 | : "=&r" (prev), "=m" (*(volatile unsigned long *)p) | ||
| 219 | : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p) | ||
| 220 | : "cc", "memory"); | ||
| 221 | |||
| 222 | return prev; | ||
| 223 | } | ||
| 224 | #endif | ||
| 225 | |||
| 226 | /* | ||
| 227 | * This function doesn't exist, so you'll get a linker error | ||
| 228 | * if something tries to do an invalid xchg(). | ||
| 229 | */ | ||
| 230 | extern void __xchg_called_with_bad_pointer(void); | ||
| 231 | |||
| 232 | static __inline__ unsigned long | ||
| 233 | __xchg(volatile void *ptr, unsigned long x, unsigned int size) | ||
| 234 | { | ||
| 235 | switch (size) { | ||
| 236 | case 4: | ||
| 237 | return __xchg_u32(ptr, x); | ||
| 238 | #ifdef CONFIG_PPC64 | ||
| 239 | case 8: | ||
| 240 | return __xchg_u64(ptr, x); | ||
| 241 | #endif | ||
| 242 | } | ||
| 243 | __xchg_called_with_bad_pointer(); | ||
| 244 | return x; | ||
| 245 | } | ||
| 246 | |||
| 247 | #define xchg(ptr,x) \ | ||
| 248 | ({ \ | ||
| 249 | __typeof__(*(ptr)) _x_ = (x); \ | ||
| 250 | (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ | ||
| 251 | }) | ||
| 252 | |||
| 253 | #define tas(ptr) (xchg((ptr),1)) | ||
| 254 | |||
| 255 | /* | ||
| 256 | * Compare and exchange - if *p == old, set it to new, | ||
| 257 | * and return the old value of *p. | ||
| 258 | */ | ||
| 259 | #define __HAVE_ARCH_CMPXCHG 1 | ||
| 260 | |||
| 261 | static __inline__ unsigned long | ||
| 262 | __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) | ||
| 263 | { | ||
| 264 | unsigned int prev; | ||
| 265 | |||
| 266 | __asm__ __volatile__ ( | ||
| 267 | EIEIO_ON_SMP | ||
| 268 | "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ | ||
| 269 | cmpw 0,%0,%3\n\ | ||
| 270 | bne- 2f\n" | ||
| 271 | PPC405_ERR77(0,%2) | ||
| 272 | " stwcx. %4,0,%2\n\ | ||
| 273 | bne- 1b" | ||
| 274 | ISYNC_ON_SMP | ||
| 275 | "\n\ | ||
| 276 | 2:" | ||
| 277 | : "=&r" (prev), "=m" (*p) | ||
| 278 | : "r" (p), "r" (old), "r" (new), "m" (*p) | ||
| 279 | : "cc", "memory"); | ||
| 280 | |||
| 281 | return prev; | ||
| 282 | } | ||
| 283 | |||
| 284 | #ifdef CONFIG_PPC64 | ||
| 285 | static __inline__ unsigned long | ||
| 286 | __cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new) | ||
| 287 | { | ||
| 288 | unsigned long prev; | ||
| 289 | |||
| 290 | __asm__ __volatile__ ( | ||
| 291 | EIEIO_ON_SMP | ||
| 292 | "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ | ||
| 293 | cmpd 0,%0,%3\n\ | ||
| 294 | bne- 2f\n\ | ||
| 295 | stdcx. %4,0,%2\n\ | ||
| 296 | bne- 1b" | ||
| 297 | ISYNC_ON_SMP | ||
| 298 | "\n\ | ||
| 299 | 2:" | ||
| 300 | : "=&r" (prev), "=m" (*p) | ||
| 301 | : "r" (p), "r" (old), "r" (new), "m" (*p) | ||
| 302 | : "cc", "memory"); | ||
| 303 | |||
| 304 | return prev; | ||
| 305 | } | ||
| 306 | #endif | ||
| 307 | |||
| 308 | /* This function doesn't exist, so you'll get a linker error | ||
| 309 | if something tries to do an invalid cmpxchg(). */ | ||
| 310 | extern void __cmpxchg_called_with_bad_pointer(void); | ||
| 311 | |||
| 312 | static __inline__ unsigned long | ||
| 313 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, | ||
| 314 | unsigned int size) | ||
| 315 | { | ||
| 316 | switch (size) { | ||
| 317 | case 4: | ||
| 318 | return __cmpxchg_u32(ptr, old, new); | ||
| 319 | #ifdef CONFIG_PPC64 | ||
| 320 | case 8: | ||
| 321 | return __cmpxchg_u64(ptr, old, new); | ||
| 322 | #endif | ||
| 323 | } | ||
| 324 | __cmpxchg_called_with_bad_pointer(); | ||
| 325 | return old; | ||
| 326 | } | ||
| 327 | |||
| 328 | #define cmpxchg(ptr,o,n) \ | ||
| 329 | ({ \ | ||
| 330 | __typeof__(*(ptr)) _o_ = (o); \ | ||
| 331 | __typeof__(*(ptr)) _n_ = (n); \ | ||
| 332 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | ||
| 333 | (unsigned long)_n_, sizeof(*(ptr))); \ | ||
| 334 | }) | ||
| 335 | |||
| 336 | #ifdef CONFIG_PPC64 | ||
| 337 | /* | ||
| 338 | * We handle most unaligned accesses in hardware. On the other hand | ||
| 339 | * unaligned DMA can be very expensive on some ppc64 IO chips (it does | ||
| 340 | * powers of 2 writes until it reaches sufficient alignment). | ||
| 341 | * | ||
| 342 | * Based on this we disable the IP header alignment in network drivers. | ||
| 343 | */ | ||
| 344 | #define NET_IP_ALIGN 0 | ||
| 345 | #endif | ||
| 346 | |||
| 347 | #define arch_align_stack(x) (x) | ||
| 348 | |||
| 349 | #endif /* __KERNEL__ */ | ||
| 350 | #endif /* __PPC_SYSTEM_H */ | ||
