diff options
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r-- | arch/powerpc/include/asm/cpm1.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/dma-mapping.h | 24 | ||||
-rw-r--r-- | arch/powerpc/include/asm/highmem.h | 57 | ||||
-rw-r--r-- | arch/powerpc/include/asm/hw_irq.h | 20 | ||||
-rw-r--r-- | arch/powerpc/include/asm/perf_counter.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pte-hash64-64k.h | 3 | ||||
-rw-r--r-- | arch/powerpc/include/asm/rtas.h | 5 |
7 files changed, 40 insertions, 73 deletions
diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h index 2ff798744c1d..7685ffde8821 100644 --- a/arch/powerpc/include/asm/cpm1.h +++ b/arch/powerpc/include/asm/cpm1.h | |||
@@ -598,8 +598,6 @@ typedef struct risc_timer_pram { | |||
598 | #define CICR_IEN ((uint)0x00000080) /* Int. enable */ | 598 | #define CICR_IEN ((uint)0x00000080) /* Int. enable */ |
599 | #define CICR_SPS ((uint)0x00000001) /* SCC Spread */ | 599 | #define CICR_SPS ((uint)0x00000001) /* SCC Spread */ |
600 | 600 | ||
601 | #define IMAP_ADDR (get_immrbase()) | ||
602 | |||
603 | #define CPM_PIN_INPUT 0 | 601 | #define CPM_PIN_INPUT 0 |
604 | #define CPM_PIN_OUTPUT 1 | 602 | #define CPM_PIN_OUTPUT 1 |
605 | #define CPM_PIN_PRIMARY 0 | 603 | #define CPM_PIN_PRIMARY 0 |
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 3d9e887c3c0c..b44aaabdd1a6 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h | |||
@@ -309,7 +309,9 @@ static inline void dma_sync_single_for_cpu(struct device *dev, | |||
309 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 309 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
310 | 310 | ||
311 | BUG_ON(!dma_ops); | 311 | BUG_ON(!dma_ops); |
312 | dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0, | 312 | |
313 | if (dma_ops->sync_single_range_for_cpu) | ||
314 | dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0, | ||
313 | size, direction); | 315 | size, direction); |
314 | } | 316 | } |
315 | 317 | ||
@@ -320,7 +322,9 @@ static inline void dma_sync_single_for_device(struct device *dev, | |||
320 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 322 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
321 | 323 | ||
322 | BUG_ON(!dma_ops); | 324 | BUG_ON(!dma_ops); |
323 | dma_ops->sync_single_range_for_device(dev, dma_handle, | 325 | |
326 | if (dma_ops->sync_single_range_for_device) | ||
327 | dma_ops->sync_single_range_for_device(dev, dma_handle, | ||
324 | 0, size, direction); | 328 | 0, size, direction); |
325 | } | 329 | } |
326 | 330 | ||
@@ -331,7 +335,9 @@ static inline void dma_sync_sg_for_cpu(struct device *dev, | |||
331 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 335 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
332 | 336 | ||
333 | BUG_ON(!dma_ops); | 337 | BUG_ON(!dma_ops); |
334 | dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction); | 338 | |
339 | if (dma_ops->sync_sg_for_cpu) | ||
340 | dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction); | ||
335 | } | 341 | } |
336 | 342 | ||
337 | static inline void dma_sync_sg_for_device(struct device *dev, | 343 | static inline void dma_sync_sg_for_device(struct device *dev, |
@@ -341,7 +347,9 @@ static inline void dma_sync_sg_for_device(struct device *dev, | |||
341 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 347 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
342 | 348 | ||
343 | BUG_ON(!dma_ops); | 349 | BUG_ON(!dma_ops); |
344 | dma_ops->sync_sg_for_device(dev, sgl, nents, direction); | 350 | |
351 | if (dma_ops->sync_sg_for_device) | ||
352 | dma_ops->sync_sg_for_device(dev, sgl, nents, direction); | ||
345 | } | 353 | } |
346 | 354 | ||
347 | static inline void dma_sync_single_range_for_cpu(struct device *dev, | 355 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
@@ -351,7 +359,9 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, | |||
351 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 359 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
352 | 360 | ||
353 | BUG_ON(!dma_ops); | 361 | BUG_ON(!dma_ops); |
354 | dma_ops->sync_single_range_for_cpu(dev, dma_handle, | 362 | |
363 | if (dma_ops->sync_single_range_for_cpu) | ||
364 | dma_ops->sync_single_range_for_cpu(dev, dma_handle, | ||
355 | offset, size, direction); | 365 | offset, size, direction); |
356 | } | 366 | } |
357 | 367 | ||
@@ -362,7 +372,9 @@ static inline void dma_sync_single_range_for_device(struct device *dev, | |||
362 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); | 372 | struct dma_mapping_ops *dma_ops = get_dma_ops(dev); |
363 | 373 | ||
364 | BUG_ON(!dma_ops); | 374 | BUG_ON(!dma_ops); |
365 | dma_ops->sync_single_range_for_device(dev, dma_handle, offset, | 375 | |
376 | if (dma_ops->sync_single_range_for_device) | ||
377 | dma_ops->sync_single_range_for_device(dev, dma_handle, offset, | ||
366 | size, direction); | 378 | size, direction); |
367 | } | 379 | } |
368 | #else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */ | 380 | #else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */ |
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h index 684a73f4324f..a74c4ee6c020 100644 --- a/arch/powerpc/include/asm/highmem.h +++ b/arch/powerpc/include/asm/highmem.h | |||
@@ -22,9 +22,7 @@ | |||
22 | 22 | ||
23 | #ifdef __KERNEL__ | 23 | #ifdef __KERNEL__ |
24 | 24 | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
27 | #include <linux/highmem.h> | ||
28 | #include <asm/kmap_types.h> | 26 | #include <asm/kmap_types.h> |
29 | #include <asm/tlbflush.h> | 27 | #include <asm/tlbflush.h> |
30 | #include <asm/page.h> | 28 | #include <asm/page.h> |
@@ -62,6 +60,9 @@ extern pte_t *pkmap_page_table; | |||
62 | 60 | ||
63 | extern void *kmap_high(struct page *page); | 61 | extern void *kmap_high(struct page *page); |
64 | extern void kunmap_high(struct page *page); | 62 | extern void kunmap_high(struct page *page); |
63 | extern void *kmap_atomic_prot(struct page *page, enum km_type type, | ||
64 | pgprot_t prot); | ||
65 | extern void kunmap_atomic(void *kvaddr, enum km_type type); | ||
65 | 66 | ||
66 | static inline void *kmap(struct page *page) | 67 | static inline void *kmap(struct page *page) |
67 | { | 68 | { |
@@ -79,62 +80,11 @@ static inline void kunmap(struct page *page) | |||
79 | kunmap_high(page); | 80 | kunmap_high(page); |
80 | } | 81 | } |
81 | 82 | ||
82 | /* | ||
83 | * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap | ||
84 | * gives a more generic (and caching) interface. But kmap_atomic can | ||
85 | * be used in IRQ contexts, so in some (very limited) cases we need | ||
86 | * it. | ||
87 | */ | ||
88 | static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | ||
89 | { | ||
90 | unsigned int idx; | ||
91 | unsigned long vaddr; | ||
92 | |||
93 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | ||
94 | pagefault_disable(); | ||
95 | if (!PageHighMem(page)) | ||
96 | return page_address(page); | ||
97 | |||
98 | debug_kmap_atomic(type); | ||
99 | idx = type + KM_TYPE_NR*smp_processor_id(); | ||
100 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
101 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
102 | BUG_ON(!pte_none(*(kmap_pte-idx))); | ||
103 | #endif | ||
104 | __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1); | ||
105 | local_flush_tlb_page(NULL, vaddr); | ||
106 | |||
107 | return (void*) vaddr; | ||
108 | } | ||
109 | |||
110 | static inline void *kmap_atomic(struct page *page, enum km_type type) | 83 | static inline void *kmap_atomic(struct page *page, enum km_type type) |
111 | { | 84 | { |
112 | return kmap_atomic_prot(page, type, kmap_prot); | 85 | return kmap_atomic_prot(page, type, kmap_prot); |
113 | } | 86 | } |
114 | 87 | ||
115 | static inline void kunmap_atomic(void *kvaddr, enum km_type type) | ||
116 | { | ||
117 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
118 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | ||
119 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | ||
120 | |||
121 | if (vaddr < __fix_to_virt(FIX_KMAP_END)) { | ||
122 | pagefault_enable(); | ||
123 | return; | ||
124 | } | ||
125 | |||
126 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | ||
127 | |||
128 | /* | ||
129 | * force other mappings to Oops if they'll try to access | ||
130 | * this pte without first remap it | ||
131 | */ | ||
132 | pte_clear(&init_mm, vaddr, kmap_pte-idx); | ||
133 | local_flush_tlb_page(NULL, vaddr); | ||
134 | #endif | ||
135 | pagefault_enable(); | ||
136 | } | ||
137 | |||
138 | static inline struct page *kmap_atomic_to_page(void *ptr) | 88 | static inline struct page *kmap_atomic_to_page(void *ptr) |
139 | { | 89 | { |
140 | unsigned long idx, vaddr = (unsigned long) ptr; | 90 | unsigned long idx, vaddr = (unsigned long) ptr; |
@@ -148,6 +98,7 @@ static inline struct page *kmap_atomic_to_page(void *ptr) | |||
148 | return pte_page(*pte); | 98 | return pte_page(*pte); |
149 | } | 99 | } |
150 | 100 | ||
101 | |||
151 | #define flush_cache_kmaps() flush_cache_all() | 102 | #define flush_cache_kmaps() flush_cache_all() |
152 | 103 | ||
153 | #endif /* __KERNEL__ */ | 104 | #endif /* __KERNEL__ */ |
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 867ab8ed69b3..8b505eaaa38a 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h | |||
@@ -68,13 +68,13 @@ static inline int irqs_disabled_flags(unsigned long flags) | |||
68 | 68 | ||
69 | #if defined(CONFIG_BOOKE) | 69 | #if defined(CONFIG_BOOKE) |
70 | #define SET_MSR_EE(x) mtmsr(x) | 70 | #define SET_MSR_EE(x) mtmsr(x) |
71 | #define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") | 71 | #define raw_local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") |
72 | #else | 72 | #else |
73 | #define SET_MSR_EE(x) mtmsr(x) | 73 | #define SET_MSR_EE(x) mtmsr(x) |
74 | #define local_irq_restore(flags) mtmsr(flags) | 74 | #define raw_local_irq_restore(flags) mtmsr(flags) |
75 | #endif | 75 | #endif |
76 | 76 | ||
77 | static inline void local_irq_disable(void) | 77 | static inline void raw_local_irq_disable(void) |
78 | { | 78 | { |
79 | #ifdef CONFIG_BOOKE | 79 | #ifdef CONFIG_BOOKE |
80 | __asm__ __volatile__("wrteei 0": : :"memory"); | 80 | __asm__ __volatile__("wrteei 0": : :"memory"); |
@@ -86,7 +86,7 @@ static inline void local_irq_disable(void) | |||
86 | #endif | 86 | #endif |
87 | } | 87 | } |
88 | 88 | ||
89 | static inline void local_irq_enable(void) | 89 | static inline void raw_local_irq_enable(void) |
90 | { | 90 | { |
91 | #ifdef CONFIG_BOOKE | 91 | #ifdef CONFIG_BOOKE |
92 | __asm__ __volatile__("wrteei 1": : :"memory"); | 92 | __asm__ __volatile__("wrteei 1": : :"memory"); |
@@ -98,7 +98,7 @@ static inline void local_irq_enable(void) | |||
98 | #endif | 98 | #endif |
99 | } | 99 | } |
100 | 100 | ||
101 | static inline void local_irq_save_ptr(unsigned long *flags) | 101 | static inline void raw_local_irq_save_ptr(unsigned long *flags) |
102 | { | 102 | { |
103 | unsigned long msr; | 103 | unsigned long msr; |
104 | msr = mfmsr(); | 104 | msr = mfmsr(); |
@@ -110,12 +110,12 @@ static inline void local_irq_save_ptr(unsigned long *flags) | |||
110 | #endif | 110 | #endif |
111 | } | 111 | } |
112 | 112 | ||
113 | #define local_save_flags(flags) ((flags) = mfmsr()) | 113 | #define raw_local_save_flags(flags) ((flags) = mfmsr()) |
114 | #define local_irq_save(flags) local_irq_save_ptr(&flags) | 114 | #define raw_local_irq_save(flags) raw_local_irq_save_ptr(&flags) |
115 | #define irqs_disabled() ((mfmsr() & MSR_EE) == 0) | 115 | #define raw_irqs_disabled() ((mfmsr() & MSR_EE) == 0) |
116 | #define raw_irqs_disabled_flags(flags) (((flags) & MSR_EE) == 0) | ||
116 | 117 | ||
117 | #define hard_irq_enable() local_irq_enable() | 118 | #define hard_irq_disable() raw_local_irq_disable() |
118 | #define hard_irq_disable() local_irq_disable() | ||
119 | 119 | ||
120 | static inline int irqs_disabled_flags(unsigned long flags) | 120 | static inline int irqs_disabled_flags(unsigned long flags) |
121 | { | 121 | { |
diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h index 8ccd4e155768..0ea0639fcf75 100644 --- a/arch/powerpc/include/asm/perf_counter.h +++ b/arch/powerpc/include/asm/perf_counter.h | |||
@@ -61,6 +61,8 @@ struct pt_regs; | |||
61 | extern unsigned long perf_misc_flags(struct pt_regs *regs); | 61 | extern unsigned long perf_misc_flags(struct pt_regs *regs); |
62 | extern unsigned long perf_instruction_pointer(struct pt_regs *regs); | 62 | extern unsigned long perf_instruction_pointer(struct pt_regs *regs); |
63 | 63 | ||
64 | #define PERF_COUNTER_INDEX_OFFSET 1 | ||
65 | |||
64 | /* | 66 | /* |
65 | * Only override the default definitions in include/linux/perf_counter.h | 67 | * Only override the default definitions in include/linux/perf_counter.h |
66 | * if we have hardware PMU support. | 68 | * if we have hardware PMU support. |
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h index e05d26fa372f..82b72207c51c 100644 --- a/arch/powerpc/include/asm/pte-hash64-64k.h +++ b/arch/powerpc/include/asm/pte-hash64-64k.h | |||
@@ -47,7 +47,8 @@ | |||
47 | * generic accessors and iterators here | 47 | * generic accessors and iterators here |
48 | */ | 48 | */ |
49 | #define __real_pte(e,p) ((real_pte_t) { \ | 49 | #define __real_pte(e,p) ((real_pte_t) { \ |
50 | (e), pte_val(*((p) + PTRS_PER_PTE)) }) | 50 | (e), ((e) & _PAGE_COMBO) ? \ |
51 | (pte_val(*((p) + PTRS_PER_PTE))) : 0 }) | ||
51 | #define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ | 52 | #define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ |
52 | (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) | 53 | (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) |
53 | #define __rpte_to_pte(r) ((r).pte) | 54 | #define __rpte_to_pte(r) ((r).pte) |
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 01c12339b304..168fce726201 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h | |||
@@ -58,7 +58,7 @@ struct rtas_t { | |||
58 | unsigned long entry; /* physical address pointer */ | 58 | unsigned long entry; /* physical address pointer */ |
59 | unsigned long base; /* physical address pointer */ | 59 | unsigned long base; /* physical address pointer */ |
60 | unsigned long size; | 60 | unsigned long size; |
61 | spinlock_t lock; | 61 | raw_spinlock_t lock; |
62 | struct rtas_args args; | 62 | struct rtas_args args; |
63 | struct device_node *dev; /* virtual address pointer */ | 63 | struct device_node *dev; /* virtual address pointer */ |
64 | }; | 64 | }; |
@@ -245,5 +245,8 @@ static inline u32 rtas_config_addr(int busno, int devfn, int reg) | |||
245 | (devfn << 8) | (reg & 0xff); | 245 | (devfn << 8) | (reg & 0xff); |
246 | } | 246 | } |
247 | 247 | ||
248 | extern void __cpuinit rtas_give_timebase(void); | ||
249 | extern void __cpuinit rtas_take_timebase(void); | ||
250 | |||
248 | #endif /* __KERNEL__ */ | 251 | #endif /* __KERNEL__ */ |
249 | #endif /* _POWERPC_RTAS_H */ | 252 | #endif /* _POWERPC_RTAS_H */ |