aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r--arch/powerpc/include/asm/cpm1.h2
-rw-r--r--arch/powerpc/include/asm/delay.h38
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h24
-rw-r--r--arch/powerpc/include/asm/fsl_lbc.h4
-rw-r--r--arch/powerpc/include/asm/highmem.h57
-rw-r--r--arch/powerpc/include/asm/hw_irq.h26
-rw-r--r--arch/powerpc/include/asm/kvm_host.h2
-rw-r--r--arch/powerpc/include/asm/mpc52xx.h2
-rw-r--r--arch/powerpc/include/asm/mpc5xxx.h (renamed from arch/powerpc/include/asm/mpc512x.h)10
-rw-r--r--arch/powerpc/include/asm/pci.h13
-rw-r--r--arch/powerpc/include/asm/perf_counter.h54
-rw-r--r--arch/powerpc/include/asm/pgalloc-32.h2
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h4
-rw-r--r--arch/powerpc/include/asm/pgalloc.h6
-rw-r--r--arch/powerpc/include/asm/pte-hash64-64k.h3
-rw-r--r--arch/powerpc/include/asm/reg.h3
-rw-r--r--arch/powerpc/include/asm/reg_booke.h2
-rw-r--r--arch/powerpc/include/asm/rtas.h5
-rw-r--r--arch/powerpc/include/asm/thread_info.h4
19 files changed, 133 insertions, 128 deletions
diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h
index 2ff798744c1d..7685ffde8821 100644
--- a/arch/powerpc/include/asm/cpm1.h
+++ b/arch/powerpc/include/asm/cpm1.h
@@ -598,8 +598,6 @@ typedef struct risc_timer_pram {
598#define CICR_IEN ((uint)0x00000080) /* Int. enable */ 598#define CICR_IEN ((uint)0x00000080) /* Int. enable */
599#define CICR_SPS ((uint)0x00000001) /* SCC Spread */ 599#define CICR_SPS ((uint)0x00000001) /* SCC Spread */
600 600
601#define IMAP_ADDR (get_immrbase())
602
603#define CPM_PIN_INPUT 0 601#define CPM_PIN_INPUT 0
604#define CPM_PIN_OUTPUT 1 602#define CPM_PIN_OUTPUT 1
605#define CPM_PIN_PRIMARY 0 603#define CPM_PIN_PRIMARY 0
diff --git a/arch/powerpc/include/asm/delay.h b/arch/powerpc/include/asm/delay.h
index f9200a65c632..52e4d54da2a9 100644
--- a/arch/powerpc/include/asm/delay.h
+++ b/arch/powerpc/include/asm/delay.h
@@ -2,8 +2,11 @@
2#define _ASM_POWERPC_DELAY_H 2#define _ASM_POWERPC_DELAY_H
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#include <asm/time.h>
6
5/* 7/*
6 * Copyright 1996, Paul Mackerras. 8 * Copyright 1996, Paul Mackerras.
9 * Copyright (C) 2009 Freescale Semiconductor, Inc. All rights reserved.
7 * 10 *
8 * This program is free software; you can redistribute it and/or 11 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 12 * modify it under the terms of the GNU General Public License
@@ -30,5 +33,40 @@ extern void udelay(unsigned long usecs);
30#define mdelay(n) udelay((n) * 1000) 33#define mdelay(n) udelay((n) * 1000)
31#endif 34#endif
32 35
36/**
37 * spin_event_timeout - spin until a condition gets true or a timeout elapses
38 * @condition: a C expression to evalate
39 * @timeout: timeout, in microseconds
40 * @delay: the number of microseconds to delay between each evaluation of
41 * @condition
42 *
43 * The process spins until the condition evaluates to true (non-zero) or the
44 * timeout elapses. The return value of this macro is the value of
45 * @condition when the loop terminates. This allows you to determine the cause
46 * of the loop terminates. If the return value is zero, then you know a
47 * timeout has occurred.
48 *
49 * This primary purpose of this macro is to poll on a hardware register
50 * until a status bit changes. The timeout ensures that the loop still
51 * terminates even if the bit never changes. The delay is for devices that
52 * need a delay in between successive reads.
53 *
54 * gcc will optimize out the if-statement if @delay is a constant.
55 */
56#define spin_event_timeout(condition, timeout, delay) \
57({ \
58 typeof(condition) __ret; \
59 unsigned long __loops = tb_ticks_per_usec * timeout; \
60 unsigned long __start = get_tbl(); \
61 while (!(__ret = (condition)) && (tb_ticks_since(__start) <= __loops)) \
62 if (delay) \
63 udelay(delay); \
64 else \
65 cpu_relax(); \
66 if (!__ret) \
67 __ret = (condition); \
68 __ret; \
69})
70
33#endif /* __KERNEL__ */ 71#endif /* __KERNEL__ */
34#endif /* _ASM_POWERPC_DELAY_H */ 72#endif /* _ASM_POWERPC_DELAY_H */
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 3d9e887c3c0c..b44aaabdd1a6 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -309,7 +309,9 @@ static inline void dma_sync_single_for_cpu(struct device *dev,
309 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 309 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
310 310
311 BUG_ON(!dma_ops); 311 BUG_ON(!dma_ops);
312 dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0, 312
313 if (dma_ops->sync_single_range_for_cpu)
314 dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
313 size, direction); 315 size, direction);
314} 316}
315 317
@@ -320,7 +322,9 @@ static inline void dma_sync_single_for_device(struct device *dev,
320 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 322 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
321 323
322 BUG_ON(!dma_ops); 324 BUG_ON(!dma_ops);
323 dma_ops->sync_single_range_for_device(dev, dma_handle, 325
326 if (dma_ops->sync_single_range_for_device)
327 dma_ops->sync_single_range_for_device(dev, dma_handle,
324 0, size, direction); 328 0, size, direction);
325} 329}
326 330
@@ -331,7 +335,9 @@ static inline void dma_sync_sg_for_cpu(struct device *dev,
331 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 335 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
332 336
333 BUG_ON(!dma_ops); 337 BUG_ON(!dma_ops);
334 dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction); 338
339 if (dma_ops->sync_sg_for_cpu)
340 dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
335} 341}
336 342
337static inline void dma_sync_sg_for_device(struct device *dev, 343static inline void dma_sync_sg_for_device(struct device *dev,
@@ -341,7 +347,9 @@ static inline void dma_sync_sg_for_device(struct device *dev,
341 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 347 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
342 348
343 BUG_ON(!dma_ops); 349 BUG_ON(!dma_ops);
344 dma_ops->sync_sg_for_device(dev, sgl, nents, direction); 350
351 if (dma_ops->sync_sg_for_device)
352 dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
345} 353}
346 354
347static inline void dma_sync_single_range_for_cpu(struct device *dev, 355static inline void dma_sync_single_range_for_cpu(struct device *dev,
@@ -351,7 +359,9 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
351 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 359 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
352 360
353 BUG_ON(!dma_ops); 361 BUG_ON(!dma_ops);
354 dma_ops->sync_single_range_for_cpu(dev, dma_handle, 362
363 if (dma_ops->sync_single_range_for_cpu)
364 dma_ops->sync_single_range_for_cpu(dev, dma_handle,
355 offset, size, direction); 365 offset, size, direction);
356} 366}
357 367
@@ -362,7 +372,9 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
362 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 372 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
363 373
364 BUG_ON(!dma_ops); 374 BUG_ON(!dma_ops);
365 dma_ops->sync_single_range_for_device(dev, dma_handle, offset, 375
376 if (dma_ops->sync_single_range_for_device)
377 dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
366 size, direction); 378 size, direction);
367} 379}
368#else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */ 380#else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */
diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h
index 63a4f779f531..1b5a21041f9b 100644
--- a/arch/powerpc/include/asm/fsl_lbc.h
+++ b/arch/powerpc/include/asm/fsl_lbc.h
@@ -95,8 +95,8 @@ struct fsl_lbc_bank {
95}; 95};
96 96
97struct fsl_lbc_regs { 97struct fsl_lbc_regs {
98 struct fsl_lbc_bank bank[8]; 98 struct fsl_lbc_bank bank[12];
99 u8 res0[0x28]; 99 u8 res0[0x8];
100 __be32 mar; /**< UPM Address Register */ 100 __be32 mar; /**< UPM Address Register */
101 u8 res1[0x4]; 101 u8 res1[0x4];
102 __be32 mamr; /**< UPMA Mode Register */ 102 __be32 mamr; /**< UPMA Mode Register */
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index 684a73f4324f..a74c4ee6c020 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -22,9 +22,7 @@
22 22
23#ifdef __KERNEL__ 23#ifdef __KERNEL__
24 24
25#include <linux/init.h>
26#include <linux/interrupt.h> 25#include <linux/interrupt.h>
27#include <linux/highmem.h>
28#include <asm/kmap_types.h> 26#include <asm/kmap_types.h>
29#include <asm/tlbflush.h> 27#include <asm/tlbflush.h>
30#include <asm/page.h> 28#include <asm/page.h>
@@ -62,6 +60,9 @@ extern pte_t *pkmap_page_table;
62 60
63extern void *kmap_high(struct page *page); 61extern void *kmap_high(struct page *page);
64extern void kunmap_high(struct page *page); 62extern void kunmap_high(struct page *page);
63extern void *kmap_atomic_prot(struct page *page, enum km_type type,
64 pgprot_t prot);
65extern void kunmap_atomic(void *kvaddr, enum km_type type);
65 66
66static inline void *kmap(struct page *page) 67static inline void *kmap(struct page *page)
67{ 68{
@@ -79,62 +80,11 @@ static inline void kunmap(struct page *page)
79 kunmap_high(page); 80 kunmap_high(page);
80} 81}
81 82
82/*
83 * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
84 * gives a more generic (and caching) interface. But kmap_atomic can
85 * be used in IRQ contexts, so in some (very limited) cases we need
86 * it.
87 */
88static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
89{
90 unsigned int idx;
91 unsigned long vaddr;
92
93 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
94 pagefault_disable();
95 if (!PageHighMem(page))
96 return page_address(page);
97
98 debug_kmap_atomic(type);
99 idx = type + KM_TYPE_NR*smp_processor_id();
100 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
101#ifdef CONFIG_DEBUG_HIGHMEM
102 BUG_ON(!pte_none(*(kmap_pte-idx)));
103#endif
104 __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1);
105 local_flush_tlb_page(NULL, vaddr);
106
107 return (void*) vaddr;
108}
109
110static inline void *kmap_atomic(struct page *page, enum km_type type) 83static inline void *kmap_atomic(struct page *page, enum km_type type)
111{ 84{
112 return kmap_atomic_prot(page, type, kmap_prot); 85 return kmap_atomic_prot(page, type, kmap_prot);
113} 86}
114 87
115static inline void kunmap_atomic(void *kvaddr, enum km_type type)
116{
117#ifdef CONFIG_DEBUG_HIGHMEM
118 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
119 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
120
121 if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
122 pagefault_enable();
123 return;
124 }
125
126 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
127
128 /*
129 * force other mappings to Oops if they'll try to access
130 * this pte without first remap it
131 */
132 pte_clear(&init_mm, vaddr, kmap_pte-idx);
133 local_flush_tlb_page(NULL, vaddr);
134#endif
135 pagefault_enable();
136}
137
138static inline struct page *kmap_atomic_to_page(void *ptr) 88static inline struct page *kmap_atomic_to_page(void *ptr)
139{ 89{
140 unsigned long idx, vaddr = (unsigned long) ptr; 90 unsigned long idx, vaddr = (unsigned long) ptr;
@@ -148,6 +98,7 @@ static inline struct page *kmap_atomic_to_page(void *ptr)
148 return pte_page(*pte); 98 return pte_page(*pte);
149} 99}
150 100
101
151#define flush_cache_kmaps() flush_cache_all() 102#define flush_cache_kmaps() flush_cache_all()
152 103
153#endif /* __KERNEL__ */ 104#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index b7f8f4a87cc0..8b505eaaa38a 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -68,13 +68,13 @@ static inline int irqs_disabled_flags(unsigned long flags)
68 68
69#if defined(CONFIG_BOOKE) 69#if defined(CONFIG_BOOKE)
70#define SET_MSR_EE(x) mtmsr(x) 70#define SET_MSR_EE(x) mtmsr(x)
71#define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") 71#define raw_local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
72#else 72#else
73#define SET_MSR_EE(x) mtmsr(x) 73#define SET_MSR_EE(x) mtmsr(x)
74#define local_irq_restore(flags) mtmsr(flags) 74#define raw_local_irq_restore(flags) mtmsr(flags)
75#endif 75#endif
76 76
77static inline void local_irq_disable(void) 77static inline void raw_local_irq_disable(void)
78{ 78{
79#ifdef CONFIG_BOOKE 79#ifdef CONFIG_BOOKE
80 __asm__ __volatile__("wrteei 0": : :"memory"); 80 __asm__ __volatile__("wrteei 0": : :"memory");
@@ -86,7 +86,7 @@ static inline void local_irq_disable(void)
86#endif 86#endif
87} 87}
88 88
89static inline void local_irq_enable(void) 89static inline void raw_local_irq_enable(void)
90{ 90{
91#ifdef CONFIG_BOOKE 91#ifdef CONFIG_BOOKE
92 __asm__ __volatile__("wrteei 1": : :"memory"); 92 __asm__ __volatile__("wrteei 1": : :"memory");
@@ -98,7 +98,7 @@ static inline void local_irq_enable(void)
98#endif 98#endif
99} 99}
100 100
101static inline void local_irq_save_ptr(unsigned long *flags) 101static inline void raw_local_irq_save_ptr(unsigned long *flags)
102{ 102{
103 unsigned long msr; 103 unsigned long msr;
104 msr = mfmsr(); 104 msr = mfmsr();
@@ -110,12 +110,12 @@ static inline void local_irq_save_ptr(unsigned long *flags)
110#endif 110#endif
111} 111}
112 112
113#define local_save_flags(flags) ((flags) = mfmsr()) 113#define raw_local_save_flags(flags) ((flags) = mfmsr())
114#define local_irq_save(flags) local_irq_save_ptr(&flags) 114#define raw_local_irq_save(flags) raw_local_irq_save_ptr(&flags)
115#define irqs_disabled() ((mfmsr() & MSR_EE) == 0) 115#define raw_irqs_disabled() ((mfmsr() & MSR_EE) == 0)
116#define raw_irqs_disabled_flags(flags) (((flags) & MSR_EE) == 0)
116 117
117#define hard_irq_enable() local_irq_enable() 118#define hard_irq_disable() raw_local_irq_disable()
118#define hard_irq_disable() local_irq_disable()
119 119
120static inline int irqs_disabled_flags(unsigned long flags) 120static inline int irqs_disabled_flags(unsigned long flags)
121{ 121{
@@ -131,6 +131,8 @@ static inline int irqs_disabled_flags(unsigned long flags)
131struct irq_chip; 131struct irq_chip;
132 132
133#ifdef CONFIG_PERF_COUNTERS 133#ifdef CONFIG_PERF_COUNTERS
134
135#ifdef CONFIG_PPC64
134static inline unsigned long test_perf_counter_pending(void) 136static inline unsigned long test_perf_counter_pending(void)
135{ 137{
136 unsigned long x; 138 unsigned long x;
@@ -154,15 +156,15 @@ static inline void clear_perf_counter_pending(void)
154 "r" (0), 156 "r" (0),
155 "i" (offsetof(struct paca_struct, perf_counter_pending))); 157 "i" (offsetof(struct paca_struct, perf_counter_pending)));
156} 158}
159#endif /* CONFIG_PPC64 */
157 160
158#else 161#else /* CONFIG_PERF_COUNTERS */
159 162
160static inline unsigned long test_perf_counter_pending(void) 163static inline unsigned long test_perf_counter_pending(void)
161{ 164{
162 return 0; 165 return 0;
163} 166}
164 167
165static inline void set_perf_counter_pending(void) {}
166static inline void clear_perf_counter_pending(void) {} 168static inline void clear_perf_counter_pending(void) {}
167#endif /* CONFIG_PERF_COUNTERS */ 169#endif /* CONFIG_PERF_COUNTERS */
168 170
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index dfdf13c9fefd..fddc3ed715fa 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -34,7 +34,7 @@
34#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 34#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
35 35
36/* We don't currently support large pages. */ 36/* We don't currently support large pages. */
37#define KVM_PAGES_PER_HPAGE (1<<31) 37#define KVM_PAGES_PER_HPAGE (1UL << 31)
38 38
39struct kvm; 39struct kvm;
40struct kvm_run; 40struct kvm_run;
diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h
index 52e049cd9e68..1b4f697abbdd 100644
--- a/arch/powerpc/include/asm/mpc52xx.h
+++ b/arch/powerpc/include/asm/mpc52xx.h
@@ -16,6 +16,7 @@
16#ifndef __ASSEMBLY__ 16#ifndef __ASSEMBLY__
17#include <asm/types.h> 17#include <asm/types.h>
18#include <asm/prom.h> 18#include <asm/prom.h>
19#include <asm/mpc5xxx.h>
19#endif /* __ASSEMBLY__ */ 20#endif /* __ASSEMBLY__ */
20 21
21#include <linux/suspend.h> 22#include <linux/suspend.h>
@@ -268,7 +269,6 @@ struct mpc52xx_intr {
268#ifndef __ASSEMBLY__ 269#ifndef __ASSEMBLY__
269 270
270/* mpc52xx_common.c */ 271/* mpc52xx_common.c */
271extern unsigned int mpc52xx_find_ipb_freq(struct device_node *node);
272extern void mpc5200_setup_xlb_arbiter(void); 272extern void mpc5200_setup_xlb_arbiter(void);
273extern void mpc52xx_declare_of_platform_devices(void); 273extern void mpc52xx_declare_of_platform_devices(void);
274extern void mpc52xx_map_common_devices(void); 274extern void mpc52xx_map_common_devices(void);
diff --git a/arch/powerpc/include/asm/mpc512x.h b/arch/powerpc/include/asm/mpc5xxx.h
index c48a1658eeac..5ce9c5fa434a 100644
--- a/arch/powerpc/include/asm/mpc512x.h
+++ b/arch/powerpc/include/asm/mpc5xxx.h
@@ -4,7 +4,7 @@
4 * Author: John Rigby, <jrigby@freescale.com>, Friday Apr 13 2007 4 * Author: John Rigby, <jrigby@freescale.com>, Friday Apr 13 2007
5 * 5 *
6 * Description: 6 * Description:
7 * MPC5121 Prototypes and definitions 7 * MPC5xxx Prototypes and definitions
8 * 8 *
9 * This is free software; you can redistribute it and/or modify it 9 * This is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by 10 * under the terms of the GNU General Public License as published by
@@ -13,10 +13,10 @@
13 * 13 *
14 */ 14 */
15 15
16#ifndef __ASM_POWERPC_MPC512x_H__ 16#ifndef __ASM_POWERPC_MPC5xxx_H__
17#define __ASM_POWERPC_MPC512x_H__ 17#define __ASM_POWERPC_MPC5xxx_H__
18 18
19extern unsigned long mpc512x_find_ips_freq(struct device_node *node); 19extern unsigned long mpc5xxx_get_bus_frequency(struct device_node *node);
20 20
21#endif /* __ASM_POWERPC_MPC512x_H__ */ 21#endif /* __ASM_POWERPC_MPC5xxx_H__ */
22 22
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index ba17d5d90a49..d9483c504d2d 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -195,19 +195,6 @@ extern void pcibios_bus_to_resource(struct pci_dev *dev,
195 struct resource *res, 195 struct resource *res,
196 struct pci_bus_region *region); 196 struct pci_bus_region *region);
197 197
198static inline struct resource *pcibios_select_root(struct pci_dev *pdev,
199 struct resource *res)
200{
201 struct resource *root = NULL;
202
203 if (res->flags & IORESOURCE_IO)
204 root = &ioport_resource;
205 if (res->flags & IORESOURCE_MEM)
206 root = &iomem_resource;
207
208 return root;
209}
210
211extern void pcibios_claim_one_bus(struct pci_bus *b); 198extern void pcibios_claim_one_bus(struct pci_bus *b);
212 199
213extern void pcibios_finish_adding_to_bus(struct pci_bus *bus); 200extern void pcibios_finish_adding_to_bus(struct pci_bus *bus);
diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h
index cc7c887705b8..0ea0639fcf75 100644
--- a/arch/powerpc/include/asm/perf_counter.h
+++ b/arch/powerpc/include/asm/perf_counter.h
@@ -10,6 +10,8 @@
10 */ 10 */
11#include <linux/types.h> 11#include <linux/types.h>
12 12
13#include <asm/hw_irq.h>
14
13#define MAX_HWCOUNTERS 8 15#define MAX_HWCOUNTERS 8
14#define MAX_EVENT_ALTERNATIVES 8 16#define MAX_EVENT_ALTERNATIVES 8
15#define MAX_LIMITED_HWCOUNTERS 2 17#define MAX_LIMITED_HWCOUNTERS 2
@@ -19,27 +21,27 @@
19 * describe the PMU on a particular POWER-family CPU. 21 * describe the PMU on a particular POWER-family CPU.
20 */ 22 */
21struct power_pmu { 23struct power_pmu {
22 int n_counter; 24 const char *name;
23 int max_alternatives; 25 int n_counter;
24 u64 add_fields; 26 int max_alternatives;
25 u64 test_adder; 27 unsigned long add_fields;
26 int (*compute_mmcr)(u64 events[], int n_ev, 28 unsigned long test_adder;
27 unsigned int hwc[], u64 mmcr[]); 29 int (*compute_mmcr)(u64 events[], int n_ev,
28 int (*get_constraint)(u64 event, u64 *mskp, u64 *valp); 30 unsigned int hwc[], unsigned long mmcr[]);
29 int (*get_alternatives)(u64 event, unsigned int flags, 31 int (*get_constraint)(u64 event, unsigned long *mskp,
30 u64 alt[]); 32 unsigned long *valp);
31 void (*disable_pmc)(unsigned int pmc, u64 mmcr[]); 33 int (*get_alternatives)(u64 event, unsigned int flags,
32 int (*limited_pmc_event)(u64 event); 34 u64 alt[]);
33 u32 flags; 35 void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
34 int n_generic; 36 int (*limited_pmc_event)(u64 event);
35 int *generic_events; 37 u32 flags;
36 int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] 38 int n_generic;
39 int *generic_events;
40 int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
37 [PERF_COUNT_HW_CACHE_OP_MAX] 41 [PERF_COUNT_HW_CACHE_OP_MAX]
38 [PERF_COUNT_HW_CACHE_RESULT_MAX]; 42 [PERF_COUNT_HW_CACHE_RESULT_MAX];
39}; 43};
40 44
41extern struct power_pmu *ppmu;
42
43/* 45/*
44 * Values for power_pmu.flags 46 * Values for power_pmu.flags
45 */ 47 */
@@ -53,15 +55,25 @@ extern struct power_pmu *ppmu;
53#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */ 55#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */
54#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */ 56#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */
55 57
58extern int register_power_pmu(struct power_pmu *);
59
56struct pt_regs; 60struct pt_regs;
57extern unsigned long perf_misc_flags(struct pt_regs *regs); 61extern unsigned long perf_misc_flags(struct pt_regs *regs);
58#define perf_misc_flags(regs) perf_misc_flags(regs)
59
60extern unsigned long perf_instruction_pointer(struct pt_regs *regs); 62extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
61 63
64#define PERF_COUNTER_INDEX_OFFSET 1
65
66/*
67 * Only override the default definitions in include/linux/perf_counter.h
68 * if we have hardware PMU support.
69 */
70#ifdef CONFIG_PPC_PERF_CTRS
71#define perf_misc_flags(regs) perf_misc_flags(regs)
72#endif
73
62/* 74/*
63 * The power_pmu.get_constraint function returns a 64-bit value and 75 * The power_pmu.get_constraint function returns a 32/64-bit value and
64 * a 64-bit mask that express the constraints between this event and 76 * a 32/64-bit mask that express the constraints between this event and
65 * other events. 77 * other events.
66 * 78 *
67 * The value and mask are divided up into (non-overlapping) bitfields 79 * The value and mask are divided up into (non-overlapping) bitfields
diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h
index 0815eb40acae..c9500d666a1d 100644
--- a/arch/powerpc/include/asm/pgalloc-32.h
+++ b/arch/powerpc/include/asm/pgalloc-32.h
@@ -16,7 +16,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
16 */ 16 */
17/* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */ 17/* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */
18#define pmd_free(mm, x) do { } while (0) 18#define pmd_free(mm, x) do { } while (0)
19#define __pmd_free_tlb(tlb,x) do { } while (0) 19#define __pmd_free_tlb(tlb,x,a) do { } while (0)
20/* #define pgd_populate(mm, pmd, pte) BUG() */ 20/* #define pgd_populate(mm, pmd, pte) BUG() */
21 21
22#ifndef CONFIG_BOOKE 22#ifndef CONFIG_BOOKE
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index afda2bdd860f..e6f069c4f713 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -118,11 +118,11 @@ static inline void pgtable_free(pgtable_free_t pgf)
118 kmem_cache_free(pgtable_cache[cachenum], p); 118 kmem_cache_free(pgtable_cache[cachenum], p);
119} 119}
120 120
121#define __pmd_free_tlb(tlb, pmd) \ 121#define __pmd_free_tlb(tlb, pmd,addr) \
122 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ 122 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
123 PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) 123 PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
124#ifndef CONFIG_PPC_64K_PAGES 124#ifndef CONFIG_PPC_64K_PAGES
125#define __pud_free_tlb(tlb, pud) \ 125#define __pud_free_tlb(tlb, pud, addr) \
126 pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ 126 pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
127 PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) 127 PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
128#endif /* CONFIG_PPC_64K_PAGES */ 128#endif /* CONFIG_PPC_64K_PAGES */
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index 5d8480265a77..1730e5e298d6 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -38,14 +38,14 @@ static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
38extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); 38extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
39 39
40#ifdef CONFIG_SMP 40#ifdef CONFIG_SMP
41#define __pte_free_tlb(tlb,ptepage) \ 41#define __pte_free_tlb(tlb,ptepage,address) \
42do { \ 42do { \
43 pgtable_page_dtor(ptepage); \ 43 pgtable_page_dtor(ptepage); \
44 pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ 44 pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
45 PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \ 45 PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
46} while (0) 46} while (0)
47#else 47#else
48#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) 48#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, (pte))
49#endif 49#endif
50 50
51 51
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
index e05d26fa372f..82b72207c51c 100644
--- a/arch/powerpc/include/asm/pte-hash64-64k.h
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
@@ -47,7 +47,8 @@
47 * generic accessors and iterators here 47 * generic accessors and iterators here
48 */ 48 */
49#define __real_pte(e,p) ((real_pte_t) { \ 49#define __real_pte(e,p) ((real_pte_t) { \
50 (e), pte_val(*((p) + PTRS_PER_PTE)) }) 50 (e), ((e) & _PAGE_COMBO) ? \
51 (pte_val(*((p) + PTRS_PER_PTE))) : 0 })
51#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ 52#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
52 (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) 53 (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
53#define __rpte_to_pte(r) ((r).pte) 54#define __rpte_to_pte(r) ((r).pte)
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index a3c28e46947c..1170267736d3 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -755,7 +755,8 @@
755#define mfspr(rn) ({unsigned long rval; \ 755#define mfspr(rn) ({unsigned long rval; \
756 asm volatile("mfspr %0," __stringify(rn) \ 756 asm volatile("mfspr %0," __stringify(rn) \
757 : "=r" (rval)); rval;}) 757 : "=r" (rval)); rval;})
758#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v)) 758#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v)\
759 : "memory")
759 760
760#ifdef __powerpc64__ 761#ifdef __powerpc64__
761#ifdef CONFIG_PPC_CELL 762#ifdef CONFIG_PPC_CELL
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 601ddbc46002..6bcf364cbb2f 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -389,12 +389,14 @@
389#define ICCR_CACHE 1 /* Cacheable */ 389#define ICCR_CACHE 1 /* Cacheable */
390 390
391/* Bit definitions for L1CSR0. */ 391/* Bit definitions for L1CSR0. */
392#define L1CSR0_CPE 0x00010000 /* Data Cache Parity Enable */
392#define L1CSR0_CLFC 0x00000100 /* Cache Lock Bits Flash Clear */ 393#define L1CSR0_CLFC 0x00000100 /* Cache Lock Bits Flash Clear */
393#define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */ 394#define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */
394#define L1CSR0_CFI 0x00000002 /* Cache Flash Invalidate */ 395#define L1CSR0_CFI 0x00000002 /* Cache Flash Invalidate */
395#define L1CSR0_DCE 0x00000001 /* Data Cache Enable */ 396#define L1CSR0_DCE 0x00000001 /* Data Cache Enable */
396 397
397/* Bit definitions for L1CSR1. */ 398/* Bit definitions for L1CSR1. */
399#define L1CSR1_CPE 0x00010000 /* Instruction Cache Parity Enable */
398#define L1CSR1_ICLFR 0x00000100 /* Instr Cache Lock Bits Flash Reset */ 400#define L1CSR1_ICLFR 0x00000100 /* Instr Cache Lock Bits Flash Reset */
399#define L1CSR1_ICFI 0x00000002 /* Instr Cache Flash Invalidate */ 401#define L1CSR1_ICFI 0x00000002 /* Instr Cache Flash Invalidate */
400#define L1CSR1_ICE 0x00000001 /* Instr Cache Enable */ 402#define L1CSR1_ICE 0x00000001 /* Instr Cache Enable */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 01c12339b304..168fce726201 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -58,7 +58,7 @@ struct rtas_t {
58 unsigned long entry; /* physical address pointer */ 58 unsigned long entry; /* physical address pointer */
59 unsigned long base; /* physical address pointer */ 59 unsigned long base; /* physical address pointer */
60 unsigned long size; 60 unsigned long size;
61 spinlock_t lock; 61 raw_spinlock_t lock;
62 struct rtas_args args; 62 struct rtas_args args;
63 struct device_node *dev; /* virtual address pointer */ 63 struct device_node *dev; /* virtual address pointer */
64}; 64};
@@ -245,5 +245,8 @@ static inline u32 rtas_config_addr(int busno, int devfn, int reg)
245 (devfn << 8) | (reg & 0xff); 245 (devfn << 8) | (reg & 0xff);
246} 246}
247 247
248extern void __cpuinit rtas_give_timebase(void);
249extern void __cpuinit rtas_take_timebase(void);
250
248#endif /* __KERNEL__ */ 251#endif /* __KERNEL__ */
249#endif /* _POWERPC_RTAS_H */ 252#endif /* _POWERPC_RTAS_H */
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 9aba5a38a7c4..c8b329255678 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -46,15 +46,13 @@ struct thread_info {
46 46
47/* 47/*
48 * macros/functions for gaining access to the thread information structure 48 * macros/functions for gaining access to the thread information structure
49 *
50 * preempt_count needs to be 1 initially, until the scheduler is functional.
51 */ 49 */
52#define INIT_THREAD_INFO(tsk) \ 50#define INIT_THREAD_INFO(tsk) \
53{ \ 51{ \
54 .task = &tsk, \ 52 .task = &tsk, \
55 .exec_domain = &default_exec_domain, \ 53 .exec_domain = &default_exec_domain, \
56 .cpu = 0, \ 54 .cpu = 0, \
57 .preempt_count = 1, \ 55 .preempt_count = INIT_PREEMPT_COUNT, \
58 .restart_block = { \ 56 .restart_block = { \
59 .fn = do_no_restart_syscall, \ 57 .fn = do_no_restart_syscall, \
60 }, \ 58 }, \