aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/dma.c26
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S103
-rw-r--r--arch/powerpc/kernel/ibmebus.c1
-rw-r--r--arch/powerpc/kernel/paca.c1
-rw-r--r--arch/powerpc/kernel/pci-common.c276
-rw-r--r--arch/powerpc/kernel/pci_32.c101
-rw-r--r--arch/powerpc/kernel/pci_64.c134
-rw-r--r--arch/powerpc/kernel/process.c4
-rw-r--r--arch/powerpc/kernel/prom_parse.c5
-rw-r--r--arch/powerpc/kernel/rtas_pci.c48
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kernel/smp-tbsync.c12
-rw-r--r--arch/powerpc/kernel/smp.c59
-rw-r--r--arch/powerpc/kernel/time.c36
-rw-r--r--arch/powerpc/kernel/traps.c62
-rw-r--r--arch/powerpc/kernel/vdso32/gettimeofday.S208
-rw-r--r--arch/powerpc/kernel/vdso64/gettimeofday.S141
18 files changed, 728 insertions, 492 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 75c5dd0138fd..050abfd5c17c 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -306,6 +306,7 @@ int main(void)
306 DEFINE(CFG_SYSCALL_MAP32, offsetof(struct vdso_data, syscall_map_32)); 306 DEFINE(CFG_SYSCALL_MAP32, offsetof(struct vdso_data, syscall_map_32));
307 DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec)); 307 DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec));
308 DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); 308 DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
309 DEFINE(STAMP_XTIME, offsetof(struct vdso_data, stamp_xtime));
309 DEFINE(CFG_ICACHE_BLOCKSZ, offsetof(struct vdso_data, icache_block_size)); 310 DEFINE(CFG_ICACHE_BLOCKSZ, offsetof(struct vdso_data, icache_block_size));
310 DEFINE(CFG_DCACHE_BLOCKSZ, offsetof(struct vdso_data, dcache_block_size)); 311 DEFINE(CFG_DCACHE_BLOCKSZ, offsetof(struct vdso_data, dcache_block_size));
311 DEFINE(CFG_ICACHE_LOGBLOCKSZ, offsetof(struct vdso_data, icache_log_block_size)); 312 DEFINE(CFG_ICACHE_LOGBLOCKSZ, offsetof(struct vdso_data, icache_log_block_size));
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 3a6eaa876ee1..1c5c8a6fc129 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -120,6 +120,26 @@ static inline void dma_direct_unmap_page(struct device *dev,
120{ 120{
121} 121}
122 122
123#ifdef CONFIG_NOT_COHERENT_CACHE
124static inline void dma_direct_sync_sg(struct device *dev,
125 struct scatterlist *sgl, int nents,
126 enum dma_data_direction direction)
127{
128 struct scatterlist *sg;
129 int i;
130
131 for_each_sg(sgl, sg, nents, i)
132 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
133}
134
135static inline void dma_direct_sync_single_range(struct device *dev,
136 dma_addr_t dma_handle, unsigned long offset, size_t size,
137 enum dma_data_direction direction)
138{
139 __dma_sync(bus_to_virt(dma_handle+offset), size, direction);
140}
141#endif
142
123struct dma_mapping_ops dma_direct_ops = { 143struct dma_mapping_ops dma_direct_ops = {
124 .alloc_coherent = dma_direct_alloc_coherent, 144 .alloc_coherent = dma_direct_alloc_coherent,
125 .free_coherent = dma_direct_free_coherent, 145 .free_coherent = dma_direct_free_coherent,
@@ -128,5 +148,11 @@ struct dma_mapping_ops dma_direct_ops = {
128 .dma_supported = dma_direct_dma_supported, 148 .dma_supported = dma_direct_dma_supported,
129 .map_page = dma_direct_map_page, 149 .map_page = dma_direct_map_page,
130 .unmap_page = dma_direct_unmap_page, 150 .unmap_page = dma_direct_unmap_page,
151#ifdef CONFIG_NOT_COHERENT_CACHE
152 .sync_single_range_for_cpu = dma_direct_sync_single_range,
153 .sync_single_range_for_device = dma_direct_sync_single_range,
154 .sync_sg_for_cpu = dma_direct_sync_sg,
155 .sync_sg_for_device = dma_direct_sync_sg,
156#endif
131}; 157};
132EXPORT_SYMBOL(dma_direct_ops); 158EXPORT_SYMBOL(dma_direct_ops);
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 590304c24dad..9a4639c459e6 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -92,6 +92,7 @@ _ENTRY(_start);
92 * if needed 92 * if needed
93 */ 93 */
94 94
95_ENTRY(__early_start)
95/* 1. Find the index of the entry we're executing in */ 96/* 1. Find the index of the entry we're executing in */
96 bl invstr /* Find our address */ 97 bl invstr /* Find our address */
97invstr: mflr r6 /* Make it accessible */ 98invstr: mflr r6 /* Make it accessible */
@@ -235,36 +236,40 @@ skpinv: addi r6,r6,1 /* Increment */
235 tlbivax 0,r9 236 tlbivax 0,r9
236 TLBSYNC 237 TLBSYNC
237 238
239/* The mapping only needs to be cache-coherent on SMP */
240#ifdef CONFIG_SMP
241#define M_IF_SMP MAS2_M
242#else
243#define M_IF_SMP 0
244#endif
245
238/* 6. Setup KERNELBASE mapping in TLB1[0] */ 246/* 6. Setup KERNELBASE mapping in TLB1[0] */
239 lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */ 247 lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
240 mtspr SPRN_MAS0,r6 248 mtspr SPRN_MAS0,r6
241 lis r6,(MAS1_VALID|MAS1_IPROT)@h 249 lis r6,(MAS1_VALID|MAS1_IPROT)@h
242 ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_64M))@l 250 ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_64M))@l
243 mtspr SPRN_MAS1,r6 251 mtspr SPRN_MAS1,r6
244 li r7,0 252 lis r6,MAS2_VAL(PAGE_OFFSET, BOOKE_PAGESZ_64M, M_IF_SMP)@h
245 lis r6,PAGE_OFFSET@h 253 ori r6,r6,MAS2_VAL(PAGE_OFFSET, BOOKE_PAGESZ_64M, M_IF_SMP)@l
246 ori r6,r6,PAGE_OFFSET@l
247 rlwimi r6,r7,0,20,31
248 mtspr SPRN_MAS2,r6 254 mtspr SPRN_MAS2,r6
249 mtspr SPRN_MAS3,r8 255 mtspr SPRN_MAS3,r8
250 tlbwe 256 tlbwe
251 257
252/* 7. Jump to KERNELBASE mapping */ 258/* 7. Jump to KERNELBASE mapping */
253 lis r6,KERNELBASE@h 259 lis r6,(KERNELBASE & ~0xfff)@h
254 ori r6,r6,KERNELBASE@l 260 ori r6,r6,(KERNELBASE & ~0xfff)@l
255 rlwimi r6,r7,0,20,31
256 lis r7,MSR_KERNEL@h 261 lis r7,MSR_KERNEL@h
257 ori r7,r7,MSR_KERNEL@l 262 ori r7,r7,MSR_KERNEL@l
258 bl 1f /* Find our address */ 263 bl 1f /* Find our address */
2591: mflr r9 2641: mflr r9
260 rlwimi r6,r9,0,20,31 265 rlwimi r6,r9,0,20,31
261 addi r6,r6,24 266 addi r6,r6,(2f - 1b)
262 mtspr SPRN_SRR0,r6 267 mtspr SPRN_SRR0,r6
263 mtspr SPRN_SRR1,r7 268 mtspr SPRN_SRR1,r7
264 rfi /* start execution out of TLB1[0] entry */ 269 rfi /* start execution out of TLB1[0] entry */
265 270
266/* 8. Clear out the temp mapping */ 271/* 8. Clear out the temp mapping */
267 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ 2722: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
268 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ 273 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
269 mtspr SPRN_MAS0,r7 274 mtspr SPRN_MAS0,r7
270 tlbre 275 tlbre
@@ -344,6 +349,15 @@ skpinv: addi r6,r6,1 /* Increment */
344 mtspr SPRN_DBSR,r2 349 mtspr SPRN_DBSR,r2
345#endif 350#endif
346 351
352#ifdef CONFIG_SMP
353 /* Check to see if we're the second processor, and jump
354 * to the secondary_start code if so
355 */
356 mfspr r24,SPRN_PIR
357 cmpwi r24,0
358 bne __secondary_start
359#endif
360
347 /* 361 /*
348 * This is where the main kernel code starts. 362 * This is where the main kernel code starts.
349 */ 363 */
@@ -685,12 +699,13 @@ interrupt_base:
685 /* SPE Floating Point Data */ 699 /* SPE Floating Point Data */
686#ifdef CONFIG_SPE 700#ifdef CONFIG_SPE
687 EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE); 701 EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE);
688#else
689 EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
690#endif /* CONFIG_SPE */
691 702
692 /* SPE Floating Point Round */ 703 /* SPE Floating Point Round */
704 EXCEPTION(0x2050, SPEFloatingPointRound, SPEFloatingPointRoundException, EXC_XFER_EE)
705#else
706 EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
693 EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE) 707 EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE)
708#endif /* CONFIG_SPE */
694 709
695 /* Performance Monitor */ 710 /* Performance Monitor */
696 EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD) 711 EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
@@ -735,6 +750,9 @@ finish_tlb_load:
735#else 750#else
736 rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ 751 rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */
737#endif 752#endif
753#ifdef CONFIG_SMP
754 ori r12, r12, MAS2_M
755#endif
738 mtspr SPRN_MAS2, r12 756 mtspr SPRN_MAS2, r12
739 757
740 li r10, (_PAGE_HWEXEC | _PAGE_PRESENT) 758 li r10, (_PAGE_HWEXEC | _PAGE_PRESENT)
@@ -746,7 +764,7 @@ finish_tlb_load:
746 iseleq r12, r12, r10 764 iseleq r12, r12, r10
747 765
748#ifdef CONFIG_PTE_64BIT 766#ifdef CONFIG_PTE_64BIT
7492: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */ 767 rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */
750 rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */ 768 rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */
751 mtspr SPRN_MAS3, r12 769 mtspr SPRN_MAS3, r12
752BEGIN_FTR_SECTION 770BEGIN_FTR_SECTION
@@ -754,7 +772,7 @@ BEGIN_FTR_SECTION
754 mtspr SPRN_MAS7, r10 772 mtspr SPRN_MAS7, r10
755END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS) 773END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS)
756#else 774#else
7572: rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */ 775 rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */
758 mtspr SPRN_MAS3, r11 776 mtspr SPRN_MAS3, r11
759#endif 777#endif
760#ifdef CONFIG_E200 778#ifdef CONFIG_E200
@@ -1037,6 +1055,63 @@ _GLOBAL(flush_dcache_L1)
1037 1055
1038 blr 1056 blr
1039 1057
1058#ifdef CONFIG_SMP
1059/* When we get here, r24 needs to hold the CPU # */
1060 .globl __secondary_start
1061__secondary_start:
1062 lis r3,__secondary_hold_acknowledge@h
1063 ori r3,r3,__secondary_hold_acknowledge@l
1064 stw r24,0(r3)
1065
1066 li r3,0
1067 mr r4,r24 /* Why? */
1068 bl call_setup_cpu
1069
1070 lis r3,tlbcam_index@ha
1071 lwz r3,tlbcam_index@l(r3)
1072 mtctr r3
1073 li r26,0 /* r26 safe? */
1074
1075 /* Load each CAM entry */
10761: mr r3,r26
1077 bl loadcam_entry
1078 addi r26,r26,1
1079 bdnz 1b
1080
1081 /* get current_thread_info and current */
1082 lis r1,secondary_ti@ha
1083 lwz r1,secondary_ti@l(r1)
1084 lwz r2,TI_TASK(r1)
1085
1086 /* stack */
1087 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1088 li r0,0
1089 stw r0,0(r1)
1090
1091 /* ptr to current thread */
1092 addi r4,r2,THREAD /* address of our thread_struct */
1093 mtspr SPRN_SPRG3,r4
1094
1095 /* Setup the defaults for TLB entries */
1096 li r4,(MAS4_TSIZED(BOOKE_PAGESZ_4K))@l
1097 mtspr SPRN_MAS4,r4
1098
1099 /* Jump to start_secondary */
1100 lis r4,MSR_KERNEL@h
1101 ori r4,r4,MSR_KERNEL@l
1102 lis r3,start_secondary@h
1103 ori r3,r3,start_secondary@l
1104 mtspr SPRN_SRR0,r3
1105 mtspr SPRN_SRR1,r4
1106 sync
1107 rfi
1108 sync
1109
1110 .globl __secondary_hold_acknowledge
1111__secondary_hold_acknowledge:
1112 .long -1
1113#endif
1114
1040/* 1115/*
1041 * We put a few things here that have to be page-aligned. This stuff 1116 * We put a few things here that have to be page-aligned. This stuff
1042 * goes at the beginning of the data segment, which is page-aligned. 1117 * goes at the beginning of the data segment, which is page-aligned.
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 64299d28f364..7c6537777b2c 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -231,6 +231,7 @@ void ibmebus_free_irq(u32 ist, void *dev_id)
231 unsigned int irq = irq_find_mapping(NULL, ist); 231 unsigned int irq = irq_find_mapping(NULL, ist);
232 232
233 free_irq(irq, dev_id); 233 free_irq(irq, dev_id);
234 irq_dispose_mapping(irq);
234} 235}
235EXPORT_SYMBOL(ibmebus_free_irq); 236EXPORT_SYMBOL(ibmebus_free_irq);
236 237
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 48a347133f41..c744b327bcab 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -37,6 +37,7 @@ struct lppaca lppaca[] = {
37 .end_of_quantum = 0xfffffffffffffffful, 37 .end_of_quantum = 0xfffffffffffffffful,
38 .slb_count = 64, 38 .slb_count = 64,
39 .vmxregs_in_use = 0, 39 .vmxregs_in_use = 0,
40 .page_ins = 0,
40 }, 41 },
41}; 42};
42 43
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index f36936d9fda3..91c3f52e33a8 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -37,13 +37,7 @@
37#include <asm/machdep.h> 37#include <asm/machdep.h>
38#include <asm/ppc-pci.h> 38#include <asm/ppc-pci.h>
39#include <asm/firmware.h> 39#include <asm/firmware.h>
40 40#include <asm/eeh.h>
41#ifdef DEBUG
42#include <asm/udbg.h>
43#define DBG(fmt...) printk(fmt)
44#else
45#define DBG(fmt...)
46#endif
47 41
48static DEFINE_SPINLOCK(hose_spinlock); 42static DEFINE_SPINLOCK(hose_spinlock);
49 43
@@ -53,8 +47,9 @@ static int global_phb_number; /* Global phb counter */
53/* ISA Memory physical address */ 47/* ISA Memory physical address */
54resource_size_t isa_mem_base; 48resource_size_t isa_mem_base;
55 49
56/* Default PCI flags is 0 */ 50/* Default PCI flags is 0 on ppc32, modified at boot on ppc64 */
57unsigned int ppc_pci_flags; 51unsigned int ppc_pci_flags = 0;
52
58 53
59static struct dma_mapping_ops *pci_dma_ops; 54static struct dma_mapping_ops *pci_dma_ops;
60 55
@@ -208,26 +203,6 @@ char __devinit *pcibios_setup(char *str)
208 return str; 203 return str;
209} 204}
210 205
211void __devinit pcibios_setup_new_device(struct pci_dev *dev)
212{
213 struct dev_archdata *sd = &dev->dev.archdata;
214
215 sd->of_node = pci_device_to_OF_node(dev);
216
217 DBG("PCI: device %s OF node: %s\n", pci_name(dev),
218 sd->of_node ? sd->of_node->full_name : "<none>");
219
220 sd->dma_ops = pci_dma_ops;
221#ifdef CONFIG_PPC32
222 sd->dma_data = (void *)PCI_DRAM_OFFSET;
223#endif
224 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
225
226 if (ppc_md.pci_dma_dev_setup)
227 ppc_md.pci_dma_dev_setup(dev);
228}
229EXPORT_SYMBOL(pcibios_setup_new_device);
230
231/* 206/*
232 * Reads the interrupt pin to determine if interrupt is use by card. 207 * Reads the interrupt pin to determine if interrupt is use by card.
233 * If the interrupt is used, then gets the interrupt line from the 208 * If the interrupt is used, then gets the interrupt line from the
@@ -252,7 +227,7 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
252 return -1; 227 return -1;
253#endif 228#endif
254 229
255 DBG("Try to map irq for %s...\n", pci_name(pci_dev)); 230 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
256 231
257#ifdef DEBUG 232#ifdef DEBUG
258 memset(&oirq, 0xff, sizeof(oirq)); 233 memset(&oirq, 0xff, sizeof(oirq));
@@ -276,26 +251,26 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
276 line == 0xff || line == 0) { 251 line == 0xff || line == 0) {
277 return -1; 252 return -1;
278 } 253 }
279 DBG(" -> no map ! Using line %d (pin %d) from PCI config\n", 254 pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
280 line, pin); 255 line, pin);
281 256
282 virq = irq_create_mapping(NULL, line); 257 virq = irq_create_mapping(NULL, line);
283 if (virq != NO_IRQ) 258 if (virq != NO_IRQ)
284 set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); 259 set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
285 } else { 260 } else {
286 DBG(" -> got one, spec %d cells (0x%08x 0x%08x...) on %s\n", 261 pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
287 oirq.size, oirq.specifier[0], oirq.specifier[1], 262 oirq.size, oirq.specifier[0], oirq.specifier[1],
288 oirq.controller->full_name); 263 oirq.controller->full_name);
289 264
290 virq = irq_create_of_mapping(oirq.controller, oirq.specifier, 265 virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
291 oirq.size); 266 oirq.size);
292 } 267 }
293 if(virq == NO_IRQ) { 268 if(virq == NO_IRQ) {
294 DBG(" -> failed to map !\n"); 269 pr_debug(" Failed to map !\n");
295 return -1; 270 return -1;
296 } 271 }
297 272
298 DBG(" -> mapped to linux irq %d\n", virq); 273 pr_debug(" Mapped to linux irq %d\n", virq);
299 274
300 pci_dev->irq = virq; 275 pci_dev->irq = virq;
301 276
@@ -451,8 +426,8 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
451 pci_dev_put(pdev); 426 pci_dev_put(pdev);
452 } 427 }
453 428
454 DBG("non-PCI map for %llx, prot: %lx\n", 429 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
455 (unsigned long long)offset, prot); 430 (unsigned long long)offset, prot);
456 431
457 return __pgprot(prot); 432 return __pgprot(prot);
458} 433}
@@ -853,15 +828,12 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
853int pci_proc_domain(struct pci_bus *bus) 828int pci_proc_domain(struct pci_bus *bus)
854{ 829{
855 struct pci_controller *hose = pci_bus_to_host(bus); 830 struct pci_controller *hose = pci_bus_to_host(bus);
856#ifdef CONFIG_PPC64 831
857 return hose->buid != 0;
858#else
859 if (!(ppc_pci_flags & PPC_PCI_ENABLE_PROC_DOMAINS)) 832 if (!(ppc_pci_flags & PPC_PCI_ENABLE_PROC_DOMAINS))
860 return 0; 833 return 0;
861 if (ppc_pci_flags & PPC_PCI_COMPAT_DOMAIN_0) 834 if (ppc_pci_flags & PPC_PCI_COMPAT_DOMAIN_0)
862 return hose->global_number != 0; 835 return hose->global_number != 0;
863 return 1; 836 return 1;
864#endif
865} 837}
866 838
867void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, 839void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
@@ -1083,27 +1055,50 @@ static void __devinit pcibios_fixup_bridge(struct pci_bus *bus)
1083 } 1055 }
1084} 1056}
1085 1057
1086static void __devinit __pcibios_fixup_bus(struct pci_bus *bus) 1058void __devinit pcibios_setup_bus_self(struct pci_bus *bus)
1087{ 1059{
1088 struct pci_dev *dev = bus->self; 1060 /* Fix up the bus resources for P2P bridges */
1089 1061 if (bus->self != NULL)
1090 pr_debug("PCI: Fixup bus %d (%s)\n", bus->number, dev ? pci_name(dev) : "PHB");
1091
1092 /* Fixup PCI<->PCI bridges. Host bridges are handled separately, for
1093 * now differently between 32 and 64 bits.
1094 */
1095 if (dev != NULL)
1096 pcibios_fixup_bridge(bus); 1062 pcibios_fixup_bridge(bus);
1097 1063
1098 /* Additional setup that is different between 32 and 64 bits for now */ 1064 /* Platform specific bus fixups. This is currently only used
1099 pcibios_do_bus_setup(bus); 1065 * by fsl_pci and I'm hoping to get rid of it at some point
1100 1066 */
1101 /* Platform specific bus fixups */
1102 if (ppc_md.pcibios_fixup_bus) 1067 if (ppc_md.pcibios_fixup_bus)
1103 ppc_md.pcibios_fixup_bus(bus); 1068 ppc_md.pcibios_fixup_bus(bus);
1104 1069
1105 /* Read default IRQs and fixup if necessary */ 1070 /* Setup bus DMA mappings */
1071 if (ppc_md.pci_dma_bus_setup)
1072 ppc_md.pci_dma_bus_setup(bus);
1073}
1074
1075void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
1076{
1077 struct pci_dev *dev;
1078
1079 pr_debug("PCI: Fixup bus devices %d (%s)\n",
1080 bus->number, bus->self ? pci_name(bus->self) : "PHB");
1081
1106 list_for_each_entry(dev, &bus->devices, bus_list) { 1082 list_for_each_entry(dev, &bus->devices, bus_list) {
1083 struct dev_archdata *sd = &dev->dev.archdata;
1084
1085 /* Setup OF node pointer in archdata */
1086 sd->of_node = pci_device_to_OF_node(dev);
1087
1088 /* Fixup NUMA node as it may not be setup yet by the generic
1089 * code and is needed by the DMA init
1090 */
1091 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
1092
1093 /* Hook up default DMA ops */
1094 sd->dma_ops = pci_dma_ops;
1095 sd->dma_data = (void *)PCI_DRAM_OFFSET;
1096
1097 /* Additional platform DMA/iommu setup */
1098 if (ppc_md.pci_dma_dev_setup)
1099 ppc_md.pci_dma_dev_setup(dev);
1100
1101 /* Read default IRQs and fixup if necessary */
1107 pci_read_irq_line(dev); 1102 pci_read_irq_line(dev);
1108 if (ppc_md.pci_irq_fixup) 1103 if (ppc_md.pci_irq_fixup)
1109 ppc_md.pci_irq_fixup(dev); 1104 ppc_md.pci_irq_fixup(dev);
@@ -1113,22 +1108,19 @@ static void __devinit __pcibios_fixup_bus(struct pci_bus *bus)
1113void __devinit pcibios_fixup_bus(struct pci_bus *bus) 1108void __devinit pcibios_fixup_bus(struct pci_bus *bus)
1114{ 1109{
1115 /* When called from the generic PCI probe, read PCI<->PCI bridge 1110 /* When called from the generic PCI probe, read PCI<->PCI bridge
1116 * bases before proceeding 1111 * bases. This is -not- called when generating the PCI tree from
1112 * the OF device-tree.
1117 */ 1113 */
1118 if (bus->self != NULL) 1114 if (bus->self != NULL)
1119 pci_read_bridge_bases(bus); 1115 pci_read_bridge_bases(bus);
1120 __pcibios_fixup_bus(bus);
1121}
1122EXPORT_SYMBOL(pcibios_fixup_bus);
1123 1116
1124/* When building a bus from the OF tree rather than probing, we need a 1117 /* Now fixup the bus bus */
1125 * slightly different version of the fixup which doesn't read the 1118 pcibios_setup_bus_self(bus);
1126 * bridge bases using config space accesses 1119
1127 */ 1120 /* Now fixup devices on that bus */
1128void __devinit pcibios_fixup_of_probed_bus(struct pci_bus *bus) 1121 pcibios_setup_bus_devices(bus);
1129{
1130 __pcibios_fixup_bus(bus);
1131} 1122}
1123EXPORT_SYMBOL(pcibios_fixup_bus);
1132 1124
1133static int skip_isa_ioresource_align(struct pci_dev *dev) 1125static int skip_isa_ioresource_align(struct pci_dev *dev)
1134{ 1126{
@@ -1198,10 +1190,10 @@ static int __init reparent_resources(struct resource *parent,
1198 *pp = NULL; 1190 *pp = NULL;
1199 for (p = res->child; p != NULL; p = p->sibling) { 1191 for (p = res->child; p != NULL; p = p->sibling) {
1200 p->parent = res; 1192 p->parent = res;
1201 DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n", 1193 pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n",
1202 p->name, 1194 p->name,
1203 (unsigned long long)p->start, 1195 (unsigned long long)p->start,
1204 (unsigned long long)p->end, res->name); 1196 (unsigned long long)p->end, res->name);
1205 } 1197 }
1206 return 0; 1198 return 0;
1207} 1199}
@@ -1245,9 +1237,12 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus)
1245 int i; 1237 int i;
1246 struct resource *res, *pr; 1238 struct resource *res, *pr;
1247 1239
1240 pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
1241 pci_domain_nr(bus), bus->number);
1242
1248 for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) { 1243 for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) {
1249 if ((res = bus->resource[i]) == NULL || !res->flags 1244 if ((res = bus->resource[i]) == NULL || !res->flags
1250 || res->start > res->end) 1245 || res->start > res->end || res->parent)
1251 continue; 1246 continue;
1252 if (bus->parent == NULL) 1247 if (bus->parent == NULL)
1253 pr = (res->flags & IORESOURCE_IO) ? 1248 pr = (res->flags & IORESOURCE_IO) ?
@@ -1271,14 +1266,14 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus)
1271 } 1266 }
1272 } 1267 }
1273 1268
1274 DBG("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx " 1269 pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
1275 "[0x%x], parent %p (%s)\n", 1270 "[0x%x], parent %p (%s)\n",
1276 bus->self ? pci_name(bus->self) : "PHB", 1271 bus->self ? pci_name(bus->self) : "PHB",
1277 bus->number, i, 1272 bus->number, i,
1278 (unsigned long long)res->start, 1273 (unsigned long long)res->start,
1279 (unsigned long long)res->end, 1274 (unsigned long long)res->end,
1280 (unsigned int)res->flags, 1275 (unsigned int)res->flags,
1281 pr, (pr && pr->name) ? pr->name : "nil"); 1276 pr, (pr && pr->name) ? pr->name : "nil");
1282 1277
1283 if (pr && !(pr->flags & IORESOURCE_UNSET)) { 1278 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1284 if (request_resource(pr, res) == 0) 1279 if (request_resource(pr, res) == 0)
@@ -1305,11 +1300,11 @@ static inline void __devinit alloc_resource(struct pci_dev *dev, int idx)
1305{ 1300{
1306 struct resource *pr, *r = &dev->resource[idx]; 1301 struct resource *pr, *r = &dev->resource[idx];
1307 1302
1308 DBG("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n", 1303 pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
1309 pci_name(dev), idx, 1304 pci_name(dev), idx,
1310 (unsigned long long)r->start, 1305 (unsigned long long)r->start,
1311 (unsigned long long)r->end, 1306 (unsigned long long)r->end,
1312 (unsigned int)r->flags); 1307 (unsigned int)r->flags);
1313 1308
1314 pr = pci_find_parent_resource(dev, r); 1309 pr = pci_find_parent_resource(dev, r);
1315 if (!pr || (pr->flags & IORESOURCE_UNSET) || 1310 if (!pr || (pr->flags & IORESOURCE_UNSET) ||
@@ -1317,10 +1312,11 @@ static inline void __devinit alloc_resource(struct pci_dev *dev, int idx)
1317 printk(KERN_WARNING "PCI: Cannot allocate resource region %d" 1312 printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
1318 " of device %s, will remap\n", idx, pci_name(dev)); 1313 " of device %s, will remap\n", idx, pci_name(dev));
1319 if (pr) 1314 if (pr)
1320 DBG("PCI: parent is %p: %016llx-%016llx [%x]\n", pr, 1315 pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n",
1321 (unsigned long long)pr->start, 1316 pr,
1322 (unsigned long long)pr->end, 1317 (unsigned long long)pr->start,
1323 (unsigned int)pr->flags); 1318 (unsigned long long)pr->end,
1319 (unsigned int)pr->flags);
1324 /* We'll assign a new address later */ 1320 /* We'll assign a new address later */
1325 r->flags |= IORESOURCE_UNSET; 1321 r->flags |= IORESOURCE_UNSET;
1326 r->end -= r->start; 1322 r->end -= r->start;
@@ -1358,7 +1354,8 @@ static void __init pcibios_allocate_resources(int pass)
1358 * but keep it unregistered. 1354 * but keep it unregistered.
1359 */ 1355 */
1360 u32 reg; 1356 u32 reg;
1361 DBG("PCI: Switching off ROM of %s\n", pci_name(dev)); 1357 pr_debug("PCI: Switching off ROM of %s\n",
1358 pci_name(dev));
1362 r->flags &= ~IORESOURCE_ROM_ENABLE; 1359 r->flags &= ~IORESOURCE_ROM_ENABLE;
1363 pci_read_config_dword(dev, dev->rom_base_reg, &reg); 1360 pci_read_config_dword(dev, dev->rom_base_reg, &reg);
1364 pci_write_config_dword(dev, dev->rom_base_reg, 1361 pci_write_config_dword(dev, dev->rom_base_reg,
@@ -1383,7 +1380,7 @@ void __init pcibios_resource_survey(void)
1383 } 1380 }
1384 1381
1385 if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) { 1382 if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) {
1386 DBG("PCI: Assigning unassigned resouces...\n"); 1383 pr_debug("PCI: Assigning unassigned resouces...\n");
1387 pci_assign_unassigned_resources(); 1384 pci_assign_unassigned_resources();
1388 } 1385 }
1389 1386
@@ -1393,9 +1390,11 @@ void __init pcibios_resource_survey(void)
1393} 1390}
1394 1391
1395#ifdef CONFIG_HOTPLUG 1392#ifdef CONFIG_HOTPLUG
1396/* This is used by the pSeries hotplug driver to allocate resource 1393
1394/* This is used by the PCI hotplug driver to allocate resource
1397 * of newly plugged busses. We can try to consolidate with the 1395 * of newly plugged busses. We can try to consolidate with the
1398 * rest of the code later, for now, keep it as-is 1396 * rest of the code later, for now, keep it as-is as our main
1397 * resource allocation function doesn't deal with sub-trees yet.
1399 */ 1398 */
1400void __devinit pcibios_claim_one_bus(struct pci_bus *bus) 1399void __devinit pcibios_claim_one_bus(struct pci_bus *bus)
1401{ 1400{
@@ -1410,6 +1409,14 @@ void __devinit pcibios_claim_one_bus(struct pci_bus *bus)
1410 1409
1411 if (r->parent || !r->start || !r->flags) 1410 if (r->parent || !r->start || !r->flags)
1412 continue; 1411 continue;
1412
1413 pr_debug("PCI: Claiming %s: "
1414 "Resource %d: %016llx..%016llx [%x]\n",
1415 pci_name(dev), i,
1416 (unsigned long long)r->start,
1417 (unsigned long long)r->end,
1418 (unsigned int)r->flags);
1419
1413 pci_claim_resource(dev, i); 1420 pci_claim_resource(dev, i);
1414 } 1421 }
1415 } 1422 }
@@ -1418,6 +1425,31 @@ void __devinit pcibios_claim_one_bus(struct pci_bus *bus)
1418 pcibios_claim_one_bus(child_bus); 1425 pcibios_claim_one_bus(child_bus);
1419} 1426}
1420EXPORT_SYMBOL_GPL(pcibios_claim_one_bus); 1427EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
1428
1429
1430/* pcibios_finish_adding_to_bus
1431 *
1432 * This is to be called by the hotplug code after devices have been
1433 * added to a bus, this include calling it for a PHB that is just
1434 * being added
1435 */
1436void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1437{
1438 pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1439 pci_domain_nr(bus), bus->number);
1440
1441 /* Allocate bus and devices resources */
1442 pcibios_allocate_bus_resources(bus);
1443 pcibios_claim_one_bus(bus);
1444
1445 /* Add new devices to global lists. Register in proc, sysfs. */
1446 pci_bus_add_devices(bus);
1447
1448 /* Fixup EEH */
1449 eeh_add_device_tree_late(bus);
1450}
1451EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1452
1421#endif /* CONFIG_HOTPLUG */ 1453#endif /* CONFIG_HOTPLUG */
1422 1454
1423int pcibios_enable_device(struct pci_dev *dev, int mask) 1455int pcibios_enable_device(struct pci_dev *dev, int mask)
@@ -1428,3 +1460,61 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
1428 1460
1429 return pci_enable_resources(dev, mask); 1461 return pci_enable_resources(dev, mask);
1430} 1462}
1463
1464void __devinit pcibios_setup_phb_resources(struct pci_controller *hose)
1465{
1466 struct pci_bus *bus = hose->bus;
1467 struct resource *res;
1468 int i;
1469
1470 /* Hookup PHB IO resource */
1471 bus->resource[0] = res = &hose->io_resource;
1472
1473 if (!res->flags) {
1474 printk(KERN_WARNING "PCI: I/O resource not set for host"
1475 " bridge %s (domain %d)\n",
1476 hose->dn->full_name, hose->global_number);
1477#ifdef CONFIG_PPC32
1478 /* Workaround for lack of IO resource only on 32-bit */
1479 res->start = (unsigned long)hose->io_base_virt - isa_io_base;
1480 res->end = res->start + IO_SPACE_LIMIT;
1481 res->flags = IORESOURCE_IO;
1482#endif /* CONFIG_PPC32 */
1483 }
1484
1485 pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n",
1486 (unsigned long long)res->start,
1487 (unsigned long long)res->end,
1488 (unsigned long)res->flags);
1489
1490 /* Hookup PHB Memory resources */
1491 for (i = 0; i < 3; ++i) {
1492 res = &hose->mem_resources[i];
1493 if (!res->flags) {
1494 if (i > 0)
1495 continue;
1496 printk(KERN_ERR "PCI: Memory resource 0 not set for "
1497 "host bridge %s (domain %d)\n",
1498 hose->dn->full_name, hose->global_number);
1499#ifdef CONFIG_PPC32
1500 /* Workaround for lack of MEM resource only on 32-bit */
1501 res->start = hose->pci_mem_offset;
1502 res->end = (resource_size_t)-1LL;
1503 res->flags = IORESOURCE_MEM;
1504#endif /* CONFIG_PPC32 */
1505 }
1506 bus->resource[i+1] = res;
1507
1508 pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n", i,
1509 (unsigned long long)res->start,
1510 (unsigned long long)res->end,
1511 (unsigned long)res->flags);
1512 }
1513
1514 pr_debug("PCI: PHB MEM offset = %016llx\n",
1515 (unsigned long long)hose->pci_mem_offset);
1516 pr_debug("PCI: PHB IO offset = %08lx\n",
1517 (unsigned long)hose->io_base_virt - _IO_BASE);
1518
1519}
1520
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 131b1dfa68c6..7ad11e592f2b 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -26,12 +26,6 @@
26 26
27#undef DEBUG 27#undef DEBUG
28 28
29#ifdef DEBUG
30#define DBG(x...) printk(x)
31#else
32#define DBG(x...)
33#endif
34
35unsigned long isa_io_base = 0; 29unsigned long isa_io_base = 0;
36unsigned long pci_dram_offset = 0; 30unsigned long pci_dram_offset = 0;
37int pcibios_assign_bus_offset = 1; 31int pcibios_assign_bus_offset = 1;
@@ -275,14 +269,14 @@ pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
275 if (!have_of) 269 if (!have_of)
276 return NULL; 270 return NULL;
277 271
278 DBG("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn); 272 pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
279 parent = scan_OF_for_pci_bus(bus); 273 parent = scan_OF_for_pci_bus(bus);
280 if (parent == NULL) 274 if (parent == NULL)
281 return NULL; 275 return NULL;
282 DBG(" parent is %s\n", parent ? parent->full_name : "<NULL>"); 276 pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>");
283 np = scan_OF_for_pci_dev(parent, devfn); 277 np = scan_OF_for_pci_dev(parent, devfn);
284 of_node_put(parent); 278 of_node_put(parent);
285 DBG(" result is %s\n", np ? np->full_name : "<NULL>"); 279 pr_debug(" result is %s\n", np ? np->full_name : "<NULL>");
286 280
287 /* XXX most callers don't release the returned node 281 /* XXX most callers don't release the returned node
288 * mostly because ppc64 doesn't increase the refcount, 282 * mostly because ppc64 doesn't increase the refcount,
@@ -379,10 +373,41 @@ void pcibios_make_OF_bus_map(void)
379} 373}
380#endif /* CONFIG_PPC_OF */ 374#endif /* CONFIG_PPC_OF */
381 375
376static void __devinit pcibios_scan_phb(struct pci_controller *hose)
377{
378 struct pci_bus *bus;
379 struct device_node *node = hose->dn;
380 unsigned long io_offset;
381 struct resource *res = &hose->io_resource;
382
383 pr_debug("PCI: Scanning PHB %s\n",
384 node ? node->full_name : "<NO NAME>");
385
386 /* Create an empty bus for the toplevel */
387 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose);
388 if (bus == NULL) {
389 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
390 hose->global_number);
391 return;
392 }
393 bus->secondary = hose->first_busno;
394 hose->bus = bus;
395
396 /* Fixup IO space offset */
397 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
398 res->start = (res->start + io_offset) & 0xffffffffu;
399 res->end = (res->end + io_offset) & 0xffffffffu;
400
401 /* Wire up PHB bus resources */
402 pcibios_setup_phb_resources(hose);
403
404 /* Scan children */
405 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
406}
407
382static int __init pcibios_init(void) 408static int __init pcibios_init(void)
383{ 409{
384 struct pci_controller *hose, *tmp; 410 struct pci_controller *hose, *tmp;
385 struct pci_bus *bus;
386 int next_busno = 0; 411 int next_busno = 0;
387 412
388 printk(KERN_INFO "PCI: Probing PCI hardware\n"); 413 printk(KERN_INFO "PCI: Probing PCI hardware\n");
@@ -395,12 +420,8 @@ static int __init pcibios_init(void)
395 if (pci_assign_all_buses) 420 if (pci_assign_all_buses)
396 hose->first_busno = next_busno; 421 hose->first_busno = next_busno;
397 hose->last_busno = 0xff; 422 hose->last_busno = 0xff;
398 bus = pci_scan_bus_parented(hose->parent, hose->first_busno, 423 pcibios_scan_phb(hose);
399 hose->ops, hose); 424 pci_bus_add_devices(hose->bus);
400 if (bus) {
401 pci_bus_add_devices(bus);
402 hose->last_busno = bus->subordinate;
403 }
404 if (pci_assign_all_buses || next_busno <= hose->last_busno) 425 if (pci_assign_all_buses || next_busno <= hose->last_busno)
405 next_busno = hose->last_busno + pcibios_assign_bus_offset; 426 next_busno = hose->last_busno + pcibios_assign_bus_offset;
406 } 427 }
@@ -425,54 +446,6 @@ static int __init pcibios_init(void)
425 446
426subsys_initcall(pcibios_init); 447subsys_initcall(pcibios_init);
427 448
428void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
429{
430 struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
431 unsigned long io_offset;
432 struct resource *res;
433 int i;
434 struct pci_dev *dev;
435
436 /* Hookup PHB resources */
437 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
438 if (bus->parent == NULL) {
439 /* This is a host bridge - fill in its resources */
440 hose->bus = bus;
441
442 bus->resource[0] = res = &hose->io_resource;
443 if (!res->flags) {
444 if (io_offset)
445 printk(KERN_ERR "I/O resource not set for host"
446 " bridge %d\n", hose->global_number);
447 res->start = 0;
448 res->end = IO_SPACE_LIMIT;
449 res->flags = IORESOURCE_IO;
450 }
451 res->start = (res->start + io_offset) & 0xffffffffu;
452 res->end = (res->end + io_offset) & 0xffffffffu;
453
454 for (i = 0; i < 3; ++i) {
455 res = &hose->mem_resources[i];
456 if (!res->flags) {
457 if (i > 0)
458 continue;
459 printk(KERN_ERR "Memory resource not set for "
460 "host bridge %d\n", hose->global_number);
461 res->start = hose->pci_mem_offset;
462 res->end = ~0U;
463 res->flags = IORESOURCE_MEM;
464 }
465 bus->resource[i+1] = res;
466 }
467 }
468
469 if (ppc_md.pci_dma_bus_setup)
470 ppc_md.pci_dma_bus_setup(bus);
471
472 list_for_each_entry(dev, &bus->devices, bus_list)
473 pcibios_setup_new_device(dev);
474}
475
476/* the next one is stolen from the alpha port... */ 449/* the next one is stolen from the alpha port... */
477void __init 450void __init
478pcibios_update_irq(struct pci_dev *dev, int irq) 451pcibios_update_irq(struct pci_dev *dev, int irq)
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 3502b9101e6b..39fadc6e1492 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -32,13 +32,6 @@
32#include <asm/machdep.h> 32#include <asm/machdep.h>
33#include <asm/ppc-pci.h> 33#include <asm/ppc-pci.h>
34 34
35#ifdef DEBUG
36#include <asm/udbg.h>
37#define DBG(fmt...) printk(fmt)
38#else
39#define DBG(fmt...)
40#endif
41
42unsigned long pci_probe_only = 1; 35unsigned long pci_probe_only = 1;
43 36
44/* pci_io_base -- the base address from which io bars are offsets. 37/* pci_io_base -- the base address from which io bars are offsets.
@@ -102,7 +95,7 @@ static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
102 addrs = of_get_property(node, "assigned-addresses", &proplen); 95 addrs = of_get_property(node, "assigned-addresses", &proplen);
103 if (!addrs) 96 if (!addrs)
104 return; 97 return;
105 DBG(" parse addresses (%d bytes) @ %p\n", proplen, addrs); 98 pr_debug(" parse addresses (%d bytes) @ %p\n", proplen, addrs);
106 for (; proplen >= 20; proplen -= 20, addrs += 5) { 99 for (; proplen >= 20; proplen -= 20, addrs += 5) {
107 flags = pci_parse_of_flags(addrs[0]); 100 flags = pci_parse_of_flags(addrs[0]);
108 if (!flags) 101 if (!flags)
@@ -112,8 +105,9 @@ static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
112 if (!size) 105 if (!size)
113 continue; 106 continue;
114 i = addrs[0] & 0xff; 107 i = addrs[0] & 0xff;
115 DBG(" base: %llx, size: %llx, i: %x\n", 108 pr_debug(" base: %llx, size: %llx, i: %x\n",
116 (unsigned long long)base, (unsigned long long)size, i); 109 (unsigned long long)base,
110 (unsigned long long)size, i);
117 111
118 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { 112 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
119 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; 113 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
@@ -144,7 +138,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
144 if (type == NULL) 138 if (type == NULL)
145 type = ""; 139 type = "";
146 140
147 DBG(" create device, devfn: %x, type: %s\n", devfn, type); 141 pr_debug(" create device, devfn: %x, type: %s\n", devfn, type);
148 142
149 dev->bus = bus; 143 dev->bus = bus;
150 dev->sysdata = node; 144 dev->sysdata = node;
@@ -165,8 +159,8 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
165 dev->class = get_int_prop(node, "class-code", 0); 159 dev->class = get_int_prop(node, "class-code", 0);
166 dev->revision = get_int_prop(node, "revision-id", 0); 160 dev->revision = get_int_prop(node, "revision-id", 0);
167 161
168 DBG(" class: 0x%x\n", dev->class); 162 pr_debug(" class: 0x%x\n", dev->class);
169 DBG(" revision: 0x%x\n", dev->revision); 163 pr_debug(" revision: 0x%x\n", dev->revision);
170 164
171 dev->current_state = 4; /* unknown power state */ 165 dev->current_state = 4; /* unknown power state */
172 dev->error_state = pci_channel_io_normal; 166 dev->error_state = pci_channel_io_normal;
@@ -187,7 +181,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
187 181
188 pci_parse_of_addrs(node, dev); 182 pci_parse_of_addrs(node, dev);
189 183
190 DBG(" adding to system ...\n"); 184 pr_debug(" adding to system ...\n");
191 185
192 pci_device_add(dev, bus); 186 pci_device_add(dev, bus);
193 187
@@ -195,19 +189,20 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
195} 189}
196EXPORT_SYMBOL(of_create_pci_dev); 190EXPORT_SYMBOL(of_create_pci_dev);
197 191
198void __devinit of_scan_bus(struct device_node *node, 192static void __devinit __of_scan_bus(struct device_node *node,
199 struct pci_bus *bus) 193 struct pci_bus *bus, int rescan_existing)
200{ 194{
201 struct device_node *child; 195 struct device_node *child;
202 const u32 *reg; 196 const u32 *reg;
203 int reglen, devfn; 197 int reglen, devfn;
204 struct pci_dev *dev; 198 struct pci_dev *dev;
205 199
206 DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number); 200 pr_debug("of_scan_bus(%s) bus no %d... \n",
201 node->full_name, bus->number);
207 202
208 /* Scan direct children */ 203 /* Scan direct children */
209 for_each_child_of_node(node, child) { 204 for_each_child_of_node(node, child) {
210 DBG(" * %s\n", child->full_name); 205 pr_debug(" * %s\n", child->full_name);
211 reg = of_get_property(child, "reg", &reglen); 206 reg = of_get_property(child, "reg", &reglen);
212 if (reg == NULL || reglen < 20) 207 if (reg == NULL || reglen < 20)
213 continue; 208 continue;
@@ -217,11 +212,15 @@ void __devinit of_scan_bus(struct device_node *node,
217 dev = of_create_pci_dev(child, bus, devfn); 212 dev = of_create_pci_dev(child, bus, devfn);
218 if (!dev) 213 if (!dev)
219 continue; 214 continue;
220 DBG(" dev header type: %x\n", dev->hdr_type); 215 pr_debug(" dev header type: %x\n", dev->hdr_type);
221 } 216 }
222 217
223 /* Ally all fixups */ 218 /* Apply all fixups necessary. We don't fixup the bus "self"
224 pcibios_fixup_of_probed_bus(bus); 219 * for an existing bridge that is being rescanned
220 */
221 if (!rescan_existing)
222 pcibios_setup_bus_self(bus);
223 pcibios_setup_bus_devices(bus);
225 224
226 /* Now scan child busses */ 225 /* Now scan child busses */
227 list_for_each_entry(dev, &bus->devices, bus_list) { 226 list_for_each_entry(dev, &bus->devices, bus_list) {
@@ -233,7 +232,20 @@ void __devinit of_scan_bus(struct device_node *node,
233 } 232 }
234 } 233 }
235} 234}
236EXPORT_SYMBOL(of_scan_bus); 235
236void __devinit of_scan_bus(struct device_node *node,
237 struct pci_bus *bus)
238{
239 __of_scan_bus(node, bus, 0);
240}
241EXPORT_SYMBOL_GPL(of_scan_bus);
242
243void __devinit of_rescan_bus(struct device_node *node,
244 struct pci_bus *bus)
245{
246 __of_scan_bus(node, bus, 1);
247}
248EXPORT_SYMBOL_GPL(of_rescan_bus);
237 249
238void __devinit of_scan_pci_bridge(struct device_node *node, 250void __devinit of_scan_pci_bridge(struct device_node *node,
239 struct pci_dev *dev) 251 struct pci_dev *dev)
@@ -245,7 +257,7 @@ void __devinit of_scan_pci_bridge(struct device_node *node,
245 unsigned int flags; 257 unsigned int flags;
246 u64 size; 258 u64 size;
247 259
248 DBG("of_scan_pci_bridge(%s)\n", node->full_name); 260 pr_debug("of_scan_pci_bridge(%s)\n", node->full_name);
249 261
250 /* parse bus-range property */ 262 /* parse bus-range property */
251 busrange = of_get_property(node, "bus-range", &len); 263 busrange = of_get_property(node, "bus-range", &len);
@@ -309,12 +321,12 @@ void __devinit of_scan_pci_bridge(struct device_node *node,
309 } 321 }
310 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), 322 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
311 bus->number); 323 bus->number);
312 DBG(" bus name: %s\n", bus->name); 324 pr_debug(" bus name: %s\n", bus->name);
313 325
314 mode = PCI_PROBE_NORMAL; 326 mode = PCI_PROBE_NORMAL;
315 if (ppc_md.pci_probe_mode) 327 if (ppc_md.pci_probe_mode)
316 mode = ppc_md.pci_probe_mode(bus); 328 mode = ppc_md.pci_probe_mode(bus);
317 DBG(" probe mode: %d\n", mode); 329 pr_debug(" probe mode: %d\n", mode);
318 330
319 if (mode == PCI_PROBE_DEVTREE) 331 if (mode == PCI_PROBE_DEVTREE)
320 of_scan_bus(node, bus); 332 of_scan_bus(node, bus);
@@ -327,9 +339,10 @@ void __devinit scan_phb(struct pci_controller *hose)
327{ 339{
328 struct pci_bus *bus; 340 struct pci_bus *bus;
329 struct device_node *node = hose->dn; 341 struct device_node *node = hose->dn;
330 int i, mode; 342 int mode;
331 343
332 DBG("PCI: Scanning PHB %s\n", node ? node->full_name : "<NO NAME>"); 344 pr_debug("PCI: Scanning PHB %s\n",
345 node ? node->full_name : "<NO NAME>");
333 346
334 /* Create an empty bus for the toplevel */ 347 /* Create an empty bus for the toplevel */
335 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node); 348 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node);
@@ -345,26 +358,13 @@ void __devinit scan_phb(struct pci_controller *hose)
345 pcibios_map_io_space(bus); 358 pcibios_map_io_space(bus);
346 359
347 /* Wire up PHB bus resources */ 360 /* Wire up PHB bus resources */
348 DBG("PCI: PHB IO resource = %016lx-%016lx [%lx]\n", 361 pcibios_setup_phb_resources(hose);
349 hose->io_resource.start, hose->io_resource.end,
350 hose->io_resource.flags);
351 bus->resource[0] = &hose->io_resource;
352 for (i = 0; i < 3; ++i) {
353 DBG("PCI: PHB MEM resource %d = %016lx-%016lx [%lx]\n", i,
354 hose->mem_resources[i].start,
355 hose->mem_resources[i].end,
356 hose->mem_resources[i].flags);
357 bus->resource[i+1] = &hose->mem_resources[i];
358 }
359 DBG("PCI: PHB MEM offset = %016lx\n", hose->pci_mem_offset);
360 DBG("PCI: PHB IO offset = %08lx\n",
361 (unsigned long)hose->io_base_virt - _IO_BASE);
362 362
363 /* Get probe mode and perform scan */ 363 /* Get probe mode and perform scan */
364 mode = PCI_PROBE_NORMAL; 364 mode = PCI_PROBE_NORMAL;
365 if (node && ppc_md.pci_probe_mode) 365 if (node && ppc_md.pci_probe_mode)
366 mode = ppc_md.pci_probe_mode(bus); 366 mode = ppc_md.pci_probe_mode(bus);
367 DBG(" probe mode: %d\n", mode); 367 pr_debug(" probe mode: %d\n", mode);
368 if (mode == PCI_PROBE_DEVTREE) { 368 if (mode == PCI_PROBE_DEVTREE) {
369 bus->subordinate = hose->last_busno; 369 bus->subordinate = hose->last_busno;
370 of_scan_bus(node, bus); 370 of_scan_bus(node, bus);
@@ -380,7 +380,7 @@ static int __init pcibios_init(void)
380 380
381 printk(KERN_INFO "PCI: Probing PCI hardware\n"); 381 printk(KERN_INFO "PCI: Probing PCI hardware\n");
382 382
383 /* For now, override phys_mem_access_prot. If we need it, 383 /* For now, override phys_mem_access_prot. If we need it,g
384 * later, we may move that initialization to each ppc_md 384 * later, we may move that initialization to each ppc_md
385 */ 385 */
386 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot; 386 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
@@ -388,6 +388,11 @@ static int __init pcibios_init(void)
388 if (pci_probe_only) 388 if (pci_probe_only)
389 ppc_pci_flags |= PPC_PCI_PROBE_ONLY; 389 ppc_pci_flags |= PPC_PCI_PROBE_ONLY;
390 390
391 /* On ppc64, we always enable PCI domains and we keep domain 0
392 * backward compatible in /proc for video cards
393 */
394 ppc_pci_flags |= PPC_PCI_ENABLE_PROC_DOMAINS | PPC_PCI_COMPAT_DOMAIN_0;
395
391 /* Scan all of the recorded PCI controllers. */ 396 /* Scan all of the recorded PCI controllers. */
392 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 397 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
393 scan_phb(hose); 398 scan_phb(hose);
@@ -422,8 +427,8 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
422 if (bus->self) { 427 if (bus->self) {
423 struct resource *res = bus->resource[0]; 428 struct resource *res = bus->resource[0];
424 429
425 DBG("IO unmapping for PCI-PCI bridge %s\n", 430 pr_debug("IO unmapping for PCI-PCI bridge %s\n",
426 pci_name(bus->self)); 431 pci_name(bus->self));
427 432
428 __flush_hash_table_range(&init_mm, res->start + _IO_BASE, 433 __flush_hash_table_range(&init_mm, res->start + _IO_BASE,
429 res->end + _IO_BASE + 1); 434 res->end + _IO_BASE + 1);
@@ -437,8 +442,8 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
437 if (hose->io_base_alloc == 0) 442 if (hose->io_base_alloc == 0)
438 return 0; 443 return 0;
439 444
440 DBG("IO unmapping for PHB %s\n", hose->dn->full_name); 445 pr_debug("IO unmapping for PHB %s\n", hose->dn->full_name);
441 DBG(" alloc=0x%p\n", hose->io_base_alloc); 446 pr_debug(" alloc=0x%p\n", hose->io_base_alloc);
442 447
443 /* This is a PHB, we fully unmap the IO area */ 448 /* This is a PHB, we fully unmap the IO area */
444 vunmap(hose->io_base_alloc); 449 vunmap(hose->io_base_alloc);
@@ -463,11 +468,11 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
463 * thus HPTEs will be faulted in when needed 468 * thus HPTEs will be faulted in when needed
464 */ 469 */
465 if (bus->self) { 470 if (bus->self) {
466 DBG("IO mapping for PCI-PCI bridge %s\n", 471 pr_debug("IO mapping for PCI-PCI bridge %s\n",
467 pci_name(bus->self)); 472 pci_name(bus->self));
468 DBG(" virt=0x%016lx...0x%016lx\n", 473 pr_debug(" virt=0x%016lx...0x%016lx\n",
469 bus->resource[0]->start + _IO_BASE, 474 bus->resource[0]->start + _IO_BASE,
470 bus->resource[0]->end + _IO_BASE); 475 bus->resource[0]->end + _IO_BASE);
471 return 0; 476 return 0;
472 } 477 }
473 478
@@ -496,11 +501,11 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
496 hose->io_base_virt = (void __iomem *)(area->addr + 501 hose->io_base_virt = (void __iomem *)(area->addr +
497 hose->io_base_phys - phys_page); 502 hose->io_base_phys - phys_page);
498 503
499 DBG("IO mapping for PHB %s\n", hose->dn->full_name); 504 pr_debug("IO mapping for PHB %s\n", hose->dn->full_name);
500 DBG(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n", 505 pr_debug(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n",
501 hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc); 506 hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
502 DBG(" size=0x%016lx (alloc=0x%016lx)\n", 507 pr_debug(" size=0x%016lx (alloc=0x%016lx)\n",
503 hose->pci_io_size, size_page); 508 hose->pci_io_size, size_page);
504 509
505 /* Establish the mapping */ 510 /* Establish the mapping */
506 if (__ioremap_at(phys_page, area->addr, size_page, 511 if (__ioremap_at(phys_page, area->addr, size_page,
@@ -512,24 +517,13 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
512 hose->io_resource.start += io_virt_offset; 517 hose->io_resource.start += io_virt_offset;
513 hose->io_resource.end += io_virt_offset; 518 hose->io_resource.end += io_virt_offset;
514 519
515 DBG(" hose->io_resource=0x%016lx...0x%016lx\n", 520 pr_debug(" hose->io_resource=0x%016lx...0x%016lx\n",
516 hose->io_resource.start, hose->io_resource.end); 521 hose->io_resource.start, hose->io_resource.end);
517 522
518 return 0; 523 return 0;
519} 524}
520EXPORT_SYMBOL_GPL(pcibios_map_io_space); 525EXPORT_SYMBOL_GPL(pcibios_map_io_space);
521 526
522void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
523{
524 struct pci_dev *dev;
525
526 if (ppc_md.pci_dma_bus_setup)
527 ppc_md.pci_dma_bus_setup(bus);
528
529 list_for_each_entry(dev, &bus->devices, bus_list)
530 pcibios_setup_new_device(dev);
531}
532
533unsigned long pci_address_to_pio(phys_addr_t address) 527unsigned long pci_address_to_pio(phys_addr_t address)
534{ 528{
535 struct pci_controller *hose, *tmp; 529 struct pci_controller *hose, *tmp;
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 957bded0020d..51b201ddf9a1 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -467,6 +467,8 @@ static struct regbit {
467 {MSR_VEC, "VEC"}, 467 {MSR_VEC, "VEC"},
468 {MSR_VSX, "VSX"}, 468 {MSR_VSX, "VSX"},
469 {MSR_ME, "ME"}, 469 {MSR_ME, "ME"},
470 {MSR_CE, "CE"},
471 {MSR_DE, "DE"},
470 {MSR_IR, "IR"}, 472 {MSR_IR, "IR"},
471 {MSR_DR, "DR"}, 473 {MSR_DR, "DR"},
472 {0, NULL} 474 {0, NULL}
@@ -998,7 +1000,7 @@ unsigned long get_wchan(struct task_struct *p)
998 return 0; 1000 return 0;
999} 1001}
1000 1002
1001static int kstack_depth_to_print = 64; 1003static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
1002 1004
1003void show_stack(struct task_struct *tsk, unsigned long *stack) 1005void show_stack(struct task_struct *tsk, unsigned long *stack)
1004{ 1006{
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index a11d68976dc8..8c1335566089 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -734,10 +734,7 @@ void of_irq_map_init(unsigned int flags)
734 if (flags & OF_IMAP_NO_PHANDLE) { 734 if (flags & OF_IMAP_NO_PHANDLE) {
735 struct device_node *np; 735 struct device_node *np;
736 736
737 for(np = NULL; (np = of_find_all_nodes(np)) != NULL;) { 737 for_each_node_with_property(np, "interrupt-controller") {
738 if (of_get_property(np, "interrupt-controller", NULL)
739 == NULL)
740 continue;
741 /* Skip /chosen/interrupt-controller */ 738 /* Skip /chosen/interrupt-controller */
742 if (strcmp(np->name, "chosen") == 0) 739 if (strcmp(np->name, "chosen") == 0)
743 continue; 740 continue;
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 589a2797eac2..8869001ab5d7 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -301,51 +301,3 @@ void __init find_and_init_phbs(void)
301#endif /* CONFIG_PPC32 */ 301#endif /* CONFIG_PPC32 */
302 } 302 }
303} 303}
304
305/* RPA-specific bits for removing PHBs */
306int pcibios_remove_root_bus(struct pci_controller *phb)
307{
308 struct pci_bus *b = phb->bus;
309 struct resource *res;
310 int rc, i;
311
312 res = b->resource[0];
313 if (!res->flags) {
314 printk(KERN_ERR "%s: no IO resource for PHB %s\n", __func__,
315 b->name);
316 return 1;
317 }
318
319 rc = pcibios_unmap_io_space(b);
320 if (rc) {
321 printk(KERN_ERR "%s: failed to unmap IO on bus %s\n",
322 __func__, b->name);
323 return 1;
324 }
325
326 if (release_resource(res)) {
327 printk(KERN_ERR "%s: failed to release IO on bus %s\n",
328 __func__, b->name);
329 return 1;
330 }
331
332 for (i = 1; i < 3; ++i) {
333 res = b->resource[i];
334 if (!res->flags && i == 0) {
335 printk(KERN_ERR "%s: no MEM resource for PHB %s\n",
336 __func__, b->name);
337 return 1;
338 }
339 if (res->flags && release_resource(res)) {
340 printk(KERN_ERR
341 "%s: failed to release IO %d on bus %s\n",
342 __func__, i, b->name);
343 return 1;
344 }
345 }
346
347 pcibios_free_controller(phb);
348
349 return 0;
350}
351EXPORT_SYMBOL(pcibios_remove_root_bus);
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 169d74cef157..93c875ae985a 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -606,8 +606,6 @@ void __init setup_per_cpu_areas(void)
606 606
607 for_each_possible_cpu(i) { 607 for_each_possible_cpu(i) {
608 ptr = alloc_bootmem_pages_node(NODE_DATA(cpu_to_node(i)), size); 608 ptr = alloc_bootmem_pages_node(NODE_DATA(cpu_to_node(i)), size);
609 if (!ptr)
610 panic("Cannot allocate cpu data for CPU %d\n", i);
611 609
612 paca[i].data_offset = ptr - __per_cpu_start; 610 paca[i].data_offset = ptr - __per_cpu_start;
613 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 611 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
diff --git a/arch/powerpc/kernel/smp-tbsync.c b/arch/powerpc/kernel/smp-tbsync.c
index bc892e69b4f7..a5e54526403d 100644
--- a/arch/powerpc/kernel/smp-tbsync.c
+++ b/arch/powerpc/kernel/smp-tbsync.c
@@ -113,7 +113,7 @@ void __devinit smp_generic_give_timebase(void)
113{ 113{
114 int i, score, score2, old, min=0, max=5000, offset=1000; 114 int i, score, score2, old, min=0, max=5000, offset=1000;
115 115
116 printk("Synchronizing timebase\n"); 116 pr_debug("Software timebase sync\n");
117 117
118 /* if this fails then this kernel won't work anyway... */ 118 /* if this fails then this kernel won't work anyway... */
119 tbsync = kzalloc( sizeof(*tbsync), GFP_KERNEL ); 119 tbsync = kzalloc( sizeof(*tbsync), GFP_KERNEL );
@@ -123,13 +123,13 @@ void __devinit smp_generic_give_timebase(void)
123 while (!tbsync->ack) 123 while (!tbsync->ack)
124 barrier(); 124 barrier();
125 125
126 printk("Got ack\n"); 126 pr_debug("Got ack\n");
127 127
128 /* binary search */ 128 /* binary search */
129 for (old = -1; old != offset ; offset = (min+max) / 2) { 129 for (old = -1; old != offset ; offset = (min+max) / 2) {
130 score = start_contest(kSetAndTest, offset, NUM_ITER); 130 score = start_contest(kSetAndTest, offset, NUM_ITER);
131 131
132 printk("score %d, offset %d\n", score, offset ); 132 pr_debug("score %d, offset %d\n", score, offset );
133 133
134 if( score > 0 ) 134 if( score > 0 )
135 max = offset; 135 max = offset;
@@ -140,8 +140,8 @@ void __devinit smp_generic_give_timebase(void)
140 score = start_contest(kSetAndTest, min, NUM_ITER); 140 score = start_contest(kSetAndTest, min, NUM_ITER);
141 score2 = start_contest(kSetAndTest, max, NUM_ITER); 141 score2 = start_contest(kSetAndTest, max, NUM_ITER);
142 142
143 printk("Min %d (score %d), Max %d (score %d)\n", 143 pr_debug("Min %d (score %d), Max %d (score %d)\n",
144 min, score, max, score2); 144 min, score, max, score2);
145 score = abs(score); 145 score = abs(score);
146 score2 = abs(score2); 146 score2 = abs(score2);
147 offset = (score < score2) ? min : max; 147 offset = (score < score2) ? min : max;
@@ -155,7 +155,7 @@ void __devinit smp_generic_give_timebase(void)
155 if (score2 <= score || score2 < 20) 155 if (score2 <= score || score2 < 20)
156 break; 156 break;
157 } 157 }
158 printk("Final offset: %d (%d/%d)\n", offset, score2, NUM_ITER ); 158 pr_debug("Final offset: %d (%d/%d)\n", offset, score2, NUM_ITER );
159 159
160 /* exiting */ 160 /* exiting */
161 tbsync->cmd = kExit; 161 tbsync->cmd = kExit;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index ff9f7010097d..a59d8d72bb97 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -123,6 +123,65 @@ void smp_message_recv(int msg)
123 } 123 }
124} 124}
125 125
126static irqreturn_t call_function_action(int irq, void *data)
127{
128 generic_smp_call_function_interrupt();
129 return IRQ_HANDLED;
130}
131
132static irqreturn_t reschedule_action(int irq, void *data)
133{
134 /* we just need the return path side effect of checking need_resched */
135 return IRQ_HANDLED;
136}
137
138static irqreturn_t call_function_single_action(int irq, void *data)
139{
140 generic_smp_call_function_single_interrupt();
141 return IRQ_HANDLED;
142}
143
144static irqreturn_t debug_ipi_action(int irq, void *data)
145{
146 smp_message_recv(PPC_MSG_DEBUGGER_BREAK);
147 return IRQ_HANDLED;
148}
149
150static irq_handler_t smp_ipi_action[] = {
151 [PPC_MSG_CALL_FUNCTION] = call_function_action,
152 [PPC_MSG_RESCHEDULE] = reschedule_action,
153 [PPC_MSG_CALL_FUNC_SINGLE] = call_function_single_action,
154 [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
155};
156
157const char *smp_ipi_name[] = {
158 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
159 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
160 [PPC_MSG_CALL_FUNC_SINGLE] = "ipi call function single",
161 [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
162};
163
164/* optional function to request ipi, for controllers with >= 4 ipis */
165int smp_request_message_ipi(int virq, int msg)
166{
167 int err;
168
169 if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
170 return -EINVAL;
171 }
172#if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
173 if (msg == PPC_MSG_DEBUGGER_BREAK) {
174 return 1;
175 }
176#endif
177 err = request_irq(virq, smp_ipi_action[msg], IRQF_DISABLED|IRQF_PERCPU,
178 smp_ipi_name[msg], 0);
179 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
180 virq, smp_ipi_name[msg], err);
181
182 return err;
183}
184
126void smp_send_reschedule(int cpu) 185void smp_send_reschedule(int cpu)
127{ 186{
128 if (likely(smp_ops)) 187 if (likely(smp_ops))
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index e2ee66b5831d..e1f3a5140429 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -164,8 +164,6 @@ static u64 tb_to_ns_scale __read_mostly;
164static unsigned tb_to_ns_shift __read_mostly; 164static unsigned tb_to_ns_shift __read_mostly;
165static unsigned long boot_tb __read_mostly; 165static unsigned long boot_tb __read_mostly;
166 166
167static struct gettimeofday_struct do_gtod;
168
169extern struct timezone sys_tz; 167extern struct timezone sys_tz;
170static long timezone_offset; 168static long timezone_offset;
171 169
@@ -415,31 +413,9 @@ void udelay(unsigned long usecs)
415} 413}
416EXPORT_SYMBOL(udelay); 414EXPORT_SYMBOL(udelay);
417 415
418
419/*
420 * There are two copies of tb_to_xs and stamp_xsec so that no
421 * lock is needed to access and use these values in
422 * do_gettimeofday. We alternate the copies and as long as a
423 * reasonable time elapses between changes, there will never
424 * be inconsistent values. ntpd has a minimum of one minute
425 * between updates.
426 */
427static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec, 416static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
428 u64 new_tb_to_xs) 417 u64 new_tb_to_xs)
429{ 418{
430 unsigned temp_idx;
431 struct gettimeofday_vars *temp_varp;
432
433 temp_idx = (do_gtod.var_idx == 0);
434 temp_varp = &do_gtod.vars[temp_idx];
435
436 temp_varp->tb_to_xs = new_tb_to_xs;
437 temp_varp->tb_orig_stamp = new_tb_stamp;
438 temp_varp->stamp_xsec = new_stamp_xsec;
439 smp_mb();
440 do_gtod.varp = temp_varp;
441 do_gtod.var_idx = temp_idx;
442
443 /* 419 /*
444 * tb_update_count is used to allow the userspace gettimeofday code 420 * tb_update_count is used to allow the userspace gettimeofday code
445 * to assure itself that it sees a consistent view of the tb_to_xs and 421 * to assure itself that it sees a consistent view of the tb_to_xs and
@@ -456,6 +432,7 @@ static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
456 vdso_data->tb_to_xs = new_tb_to_xs; 432 vdso_data->tb_to_xs = new_tb_to_xs;
457 vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; 433 vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
458 vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; 434 vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
435 vdso_data->stamp_xtime = xtime;
459 smp_wmb(); 436 smp_wmb();
460 ++(vdso_data->tb_update_count); 437 ++(vdso_data->tb_update_count);
461} 438}
@@ -514,9 +491,7 @@ static int __init iSeries_tb_recal(void)
514 tb_ticks_per_sec = new_tb_ticks_per_sec; 491 tb_ticks_per_sec = new_tb_ticks_per_sec;
515 calc_cputime_factors(); 492 calc_cputime_factors();
516 div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres ); 493 div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
517 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
518 tb_to_xs = divres.result_low; 494 tb_to_xs = divres.result_low;
519 do_gtod.varp->tb_to_xs = tb_to_xs;
520 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 495 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
521 vdso_data->tb_to_xs = tb_to_xs; 496 vdso_data->tb_to_xs = tb_to_xs;
522 } 497 }
@@ -988,15 +963,6 @@ void __init time_init(void)
988 sys_tz.tz_dsttime = 0; 963 sys_tz.tz_dsttime = 0;
989 } 964 }
990 965
991 do_gtod.varp = &do_gtod.vars[0];
992 do_gtod.var_idx = 0;
993 do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
994 __get_cpu_var(last_jiffy) = tb_last_jiffy;
995 do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
996 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
997 do_gtod.varp->tb_to_xs = tb_to_xs;
998 do_gtod.tb_to_us = tb_to_us;
999
1000 vdso_data->tb_orig_stamp = tb_last_jiffy; 966 vdso_data->tb_orig_stamp = tb_last_jiffy;
1001 vdso_data->tb_update_count = 0; 967 vdso_data->tb_update_count = 0;
1002 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 968 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index f5def6cf5cd6..5457e9575685 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1160,37 +1160,85 @@ void CacheLockingException(struct pt_regs *regs, unsigned long address,
1160#ifdef CONFIG_SPE 1160#ifdef CONFIG_SPE
1161void SPEFloatingPointException(struct pt_regs *regs) 1161void SPEFloatingPointException(struct pt_regs *regs)
1162{ 1162{
1163 extern int do_spe_mathemu(struct pt_regs *regs);
1163 unsigned long spefscr; 1164 unsigned long spefscr;
1164 int fpexc_mode; 1165 int fpexc_mode;
1165 int code = 0; 1166 int code = 0;
1167 int err;
1168
1169 preempt_disable();
1170 if (regs->msr & MSR_SPE)
1171 giveup_spe(current);
1172 preempt_enable();
1166 1173
1167 spefscr = current->thread.spefscr; 1174 spefscr = current->thread.spefscr;
1168 fpexc_mode = current->thread.fpexc_mode; 1175 fpexc_mode = current->thread.fpexc_mode;
1169 1176
1170 /* Hardware does not neccessarily set sticky
1171 * underflow/overflow/invalid flags */
1172 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { 1177 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1173 code = FPE_FLTOVF; 1178 code = FPE_FLTOVF;
1174 spefscr |= SPEFSCR_FOVFS;
1175 } 1179 }
1176 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { 1180 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1177 code = FPE_FLTUND; 1181 code = FPE_FLTUND;
1178 spefscr |= SPEFSCR_FUNFS;
1179 } 1182 }
1180 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) 1183 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1181 code = FPE_FLTDIV; 1184 code = FPE_FLTDIV;
1182 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { 1185 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1183 code = FPE_FLTINV; 1186 code = FPE_FLTINV;
1184 spefscr |= SPEFSCR_FINVS;
1185 } 1187 }
1186 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) 1188 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1187 code = FPE_FLTRES; 1189 code = FPE_FLTRES;
1188 1190
1189 current->thread.spefscr = spefscr; 1191 err = do_spe_mathemu(regs);
1192 if (err == 0) {
1193 regs->nip += 4; /* skip emulated instruction */
1194 emulate_single_step(regs);
1195 return;
1196 }
1197
1198 if (err == -EFAULT) {
1199 /* got an error reading the instruction */
1200 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1201 } else if (err == -EINVAL) {
1202 /* didn't recognize the instruction */
1203 printk(KERN_ERR "unrecognized spe instruction "
1204 "in %s at %lx\n", current->comm, regs->nip);
1205 } else {
1206 _exception(SIGFPE, regs, code, regs->nip);
1207 }
1190 1208
1191 _exception(SIGFPE, regs, code, regs->nip);
1192 return; 1209 return;
1193} 1210}
1211
1212void SPEFloatingPointRoundException(struct pt_regs *regs)
1213{
1214 extern int speround_handler(struct pt_regs *regs);
1215 int err;
1216
1217 preempt_disable();
1218 if (regs->msr & MSR_SPE)
1219 giveup_spe(current);
1220 preempt_enable();
1221
1222 regs->nip -= 4;
1223 err = speround_handler(regs);
1224 if (err == 0) {
1225 regs->nip += 4; /* skip emulated instruction */
1226 emulate_single_step(regs);
1227 return;
1228 }
1229
1230 if (err == -EFAULT) {
1231 /* got an error reading the instruction */
1232 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1233 } else if (err == -EINVAL) {
1234 /* didn't recognize the instruction */
1235 printk(KERN_ERR "unrecognized spe instruction "
1236 "in %s at %lx\n", current->comm, regs->nip);
1237 } else {
1238 _exception(SIGFPE, regs, 0, regs->nip);
1239 return;
1240 }
1241}
1194#endif 1242#endif
1195 1243
1196/* 1244/*
diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
index 72ca26df457e..ee038d4bf252 100644
--- a/arch/powerpc/kernel/vdso32/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
@@ -16,6 +16,13 @@
16#include <asm/asm-offsets.h> 16#include <asm/asm-offsets.h>
17#include <asm/unistd.h> 17#include <asm/unistd.h>
18 18
19/* Offset for the low 32-bit part of a field of long type */
20#ifdef CONFIG_PPC64
21#define LOPART 4
22#else
23#define LOPART 0
24#endif
25
19 .text 26 .text
20/* 27/*
21 * Exact prototype of gettimeofday 28 * Exact prototype of gettimeofday
@@ -90,101 +97,53 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
90 97
91 mflr r12 /* r12 saves lr */ 98 mflr r12 /* r12 saves lr */
92 .cfi_register lr,r12 99 .cfi_register lr,r12
93 mr r10,r3 /* r10 saves id */
94 mr r11,r4 /* r11 saves tp */ 100 mr r11,r4 /* r11 saves tp */
95 bl __get_datapage@local /* get data page */ 101 bl __get_datapage@local /* get data page */
96 mr r9,r3 /* datapage ptr in r9 */ 102 mr r9,r3 /* datapage ptr in r9 */
97 beq cr1,50f /* if monotonic -> jump there */
98
99 /*
100 * CLOCK_REALTIME
101 */
102
103 bl __do_get_xsec@local /* get xsec from tb & kernel */
104 bne- 98f /* out of line -> do syscall */
105
106 /* seconds are xsec >> 20 */
107 rlwinm r5,r4,12,20,31
108 rlwimi r5,r3,12,0,19
109 stw r5,TSPC32_TV_SEC(r11)
110 103
111 /* get remaining xsec and convert to nsec. we scale 10450: bl __do_get_tspec@local /* get sec/nsec from tb & kernel */
112 * up remaining xsec by 12 bits and get the top 32 bits 105 bne cr1,80f /* not monotonic -> all done */
113 * of the multiplication, then we multiply by 1000
114 */
115 rlwinm r5,r4,12,0,19
116 lis r6,1000000@h
117 ori r6,r6,1000000@l
118 mulhwu r5,r5,r6
119 mulli r5,r5,1000
120 stw r5,TSPC32_TV_NSEC(r11)
121 mtlr r12
122 crclr cr0*4+so
123 li r3,0
124 blr
125 106
126 /* 107 /*
127 * CLOCK_MONOTONIC 108 * CLOCK_MONOTONIC
128 */ 109 */
129 110
13050: bl __do_get_xsec@local /* get xsec from tb & kernel */
131 bne- 98f /* out of line -> do syscall */
132
133 /* seconds are xsec >> 20 */
134 rlwinm r6,r4,12,20,31
135 rlwimi r6,r3,12,0,19
136
137 /* get remaining xsec and convert to nsec. we scale
138 * up remaining xsec by 12 bits and get the top 32 bits
139 * of the multiplication, then we multiply by 1000
140 */
141 rlwinm r7,r4,12,0,19
142 lis r5,1000000@h
143 ori r5,r5,1000000@l
144 mulhwu r7,r7,r5
145 mulli r7,r7,1000
146
147 /* now we must fixup using wall to monotonic. We need to snapshot 111 /* now we must fixup using wall to monotonic. We need to snapshot
148 * that value and do the counter trick again. Fortunately, we still 112 * that value and do the counter trick again. Fortunately, we still
149 * have the counter value in r8 that was returned by __do_get_xsec. 113 * have the counter value in r8 that was returned by __do_get_xsec.
150 * At this point, r6,r7 contain our sec/nsec values, r3,r4 and r5 114 * At this point, r3,r4 contain our sec/nsec values, r5 and r6
151 * can be used 115 * can be used, r7 contains NSEC_PER_SEC.
152 */ 116 */
153 117
154 lwz r3,WTOM_CLOCK_SEC(r9) 118 lwz r5,WTOM_CLOCK_SEC(r9)
155 lwz r4,WTOM_CLOCK_NSEC(r9) 119 lwz r6,WTOM_CLOCK_NSEC(r9)
156 120
157 /* We now have our result in r3,r4. We create a fake dependency 121 /* We now have our offset in r5,r6. We create a fake dependency
158 * on that result and re-check the counter 122 * on that value and re-check the counter
159 */ 123 */
160 or r5,r4,r3 124 or r0,r6,r5
161 xor r0,r5,r5 125 xor r0,r0,r0
162 add r9,r9,r0 126 add r9,r9,r0
163#ifdef CONFIG_PPC64 127 lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
164 lwz r0,(CFG_TB_UPDATE_COUNT+4)(r9)
165#else
166 lwz r0,(CFG_TB_UPDATE_COUNT)(r9)
167#endif
168 cmpl cr0,r8,r0 /* check if updated */ 128 cmpl cr0,r8,r0 /* check if updated */
169 bne- 50b 129 bne- 50b
170 130
171 /* Calculate and store result. Note that this mimmics the C code, 131 /* Calculate and store result. Note that this mimics the C code,
172 * which may cause funny results if nsec goes negative... is that 132 * which may cause funny results if nsec goes negative... is that
173 * possible at all ? 133 * possible at all ?
174 */ 134 */
175 add r3,r3,r6 135 add r3,r3,r5
176 add r4,r4,r7 136 add r4,r4,r6
177 lis r5,NSEC_PER_SEC@h 137 cmpw cr0,r4,r7
178 ori r5,r5,NSEC_PER_SEC@l 138 cmpwi cr1,r4,0
179 cmpl cr0,r4,r5
180 cmpli cr1,r4,0
181 blt 1f 139 blt 1f
182 subf r4,r5,r4 140 subf r4,r7,r4
183 addi r3,r3,1 141 addi r3,r3,1
1841: bge cr1,1f 1421: bge cr1,80f
185 addi r3,r3,-1 143 addi r3,r3,-1
186 add r4,r4,r5 144 add r4,r4,r7
1871: stw r3,TSPC32_TV_SEC(r11) 145
14680: stw r3,TSPC32_TV_SEC(r11)
188 stw r4,TSPC32_TV_NSEC(r11) 147 stw r4,TSPC32_TV_NSEC(r11)
189 148
190 mtlr r12 149 mtlr r12
@@ -195,10 +154,6 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
195 /* 154 /*
196 * syscall fallback 155 * syscall fallback
197 */ 156 */
19898:
199 mtlr r12
200 mr r3,r10
201 mr r4,r11
20299: 15799:
203 li r0,__NR_clock_gettime 158 li r0,__NR_clock_gettime
204 sc 159 sc
@@ -254,11 +209,7 @@ __do_get_xsec:
254 /* Check for update count & load values. We use the low 209 /* Check for update count & load values. We use the low
255 * order 32 bits of the update count 210 * order 32 bits of the update count
256 */ 211 */
257#ifdef CONFIG_PPC64 2121: lwz r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
2581: lwz r8,(CFG_TB_UPDATE_COUNT+4)(r9)
259#else
2601: lwz r8,(CFG_TB_UPDATE_COUNT)(r9)
261#endif
262 andi. r0,r8,1 /* pending update ? loop */ 213 andi. r0,r8,1 /* pending update ? loop */
263 bne- 1b 214 bne- 1b
264 xor r0,r8,r8 /* create dependency */ 215 xor r0,r8,r8 /* create dependency */
@@ -305,11 +256,7 @@ __do_get_xsec:
305 or r6,r4,r3 256 or r6,r4,r3
306 xor r0,r6,r6 257 xor r0,r6,r6
307 add r9,r9,r0 258 add r9,r9,r0
308#ifdef CONFIG_PPC64 259 lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
309 lwz r0,(CFG_TB_UPDATE_COUNT+4)(r9)
310#else
311 lwz r0,(CFG_TB_UPDATE_COUNT)(r9)
312#endif
313 cmpl cr0,r8,r0 /* check if updated */ 260 cmpl cr0,r8,r0 /* check if updated */
314 bne- 1b 261 bne- 1b
315 262
@@ -322,3 +269,98 @@ __do_get_xsec:
322 */ 269 */
3233: blr 2703: blr
324 .cfi_endproc 271 .cfi_endproc
272
273/*
274 * This is the core of clock_gettime(), it returns the current
275 * time in seconds and nanoseconds in r3 and r4.
276 * It expects the datapage ptr in r9 and doesn't clobber it.
277 * It clobbers r0, r5, r6, r10 and returns NSEC_PER_SEC in r7.
278 * On return, r8 contains the counter value that can be reused.
279 * This clobbers cr0 but not any other cr field.
280 */
281__do_get_tspec:
282 .cfi_startproc
283 /* Check for update count & load values. We use the low
284 * order 32 bits of the update count
285 */
2861: lwz r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
287 andi. r0,r8,1 /* pending update ? loop */
288 bne- 1b
289 xor r0,r8,r8 /* create dependency */
290 add r9,r9,r0
291
292 /* Load orig stamp (offset to TB) */
293 lwz r5,CFG_TB_ORIG_STAMP(r9)
294 lwz r6,(CFG_TB_ORIG_STAMP+4)(r9)
295
296 /* Get a stable TB value */
2972: mftbu r3
298 mftbl r4
299 mftbu r0
300 cmpl cr0,r3,r0
301 bne- 2b
302
303 /* Subtract tb orig stamp and shift left 12 bits.
304 */
305 subfc r7,r6,r4
306 subfe r0,r5,r3
307 slwi r0,r0,12
308 rlwimi. r0,r7,12,20,31
309 slwi r7,r7,12
310
311 /* Load scale factor & do multiplication */
312 lwz r5,CFG_TB_TO_XS(r9) /* load values */
313 lwz r6,(CFG_TB_TO_XS+4)(r9)
314 mulhwu r3,r7,r6
315 mullw r10,r7,r5
316 mulhwu r4,r7,r5
317 addc r10,r3,r10
318 li r3,0
319
320 beq+ 4f /* skip high part computation if 0 */
321 mulhwu r3,r0,r5
322 mullw r7,r0,r5
323 mulhwu r5,r0,r6
324 mullw r6,r0,r6
325 adde r4,r4,r7
326 addze r3,r3
327 addc r4,r4,r5
328 addze r3,r3
329 addc r10,r10,r6
330
3314: addze r4,r4 /* add in carry */
332 lis r7,NSEC_PER_SEC@h
333 ori r7,r7,NSEC_PER_SEC@l
334 mulhwu r4,r4,r7 /* convert to nanoseconds */
335
336 /* At this point, we have seconds & nanoseconds since the xtime
337 * stamp in r3+CA and r4. Load & add the xtime stamp.
338 */
339#ifdef CONFIG_PPC64
340 lwz r5,STAMP_XTIME+TSPC64_TV_SEC+LOPART(r9)
341 lwz r6,STAMP_XTIME+TSPC64_TV_NSEC+LOPART(r9)
342#else
343 lwz r5,STAMP_XTIME+TSPC32_TV_SEC(r9)
344 lwz r6,STAMP_XTIME+TSPC32_TV_NSEC(r9)
345#endif
346 add r4,r4,r6
347 adde r3,r3,r5
348
349 /* We now have our result in r3,r4. We create a fake dependency
350 * on that result and re-check the counter
351 */
352 or r6,r4,r3
353 xor r0,r6,r6
354 add r9,r9,r0
355 lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
356 cmpl cr0,r8,r0 /* check if updated */
357 bne- 1b
358
359 /* check for nanosecond overflow and adjust if necessary */
360 cmpw r4,r7
361 bltlr /* all done if no overflow */
362 subf r4,r7,r4 /* adjust if overflow */
363 addi r3,r3,1
364
365 blr
366 .cfi_endproc
diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
index c6401f9e37f1..262cd5857a56 100644
--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
@@ -75,90 +75,49 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
75 75
76 mflr r12 /* r12 saves lr */ 76 mflr r12 /* r12 saves lr */
77 .cfi_register lr,r12 77 .cfi_register lr,r12
78 mr r10,r3 /* r10 saves id */
79 mr r11,r4 /* r11 saves tp */ 78 mr r11,r4 /* r11 saves tp */
80 bl V_LOCAL_FUNC(__get_datapage) /* get data page */ 79 bl V_LOCAL_FUNC(__get_datapage) /* get data page */
81 beq cr1,50f /* if monotonic -> jump there */ 8050: bl V_LOCAL_FUNC(__do_get_tspec) /* get time from tb & kernel */
82 81 bne cr1,80f /* if not monotonic, all done */
83 /*
84 * CLOCK_REALTIME
85 */
86
87 bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */
88
89 lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */
90 ori r7,r7,16960
91 rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */
92 rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */
93 std r5,TSPC64_TV_SEC(r11) /* store sec in tv */
94 subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */
95 mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) /
96 * XSEC_PER_SEC
97 */
98 rldicl r0,r0,44,20
99 mulli r0,r0,1000 /* nsec = usec * 1000 */
100 std r0,TSPC64_TV_NSEC(r11) /* store nsec in tp */
101
102 mtlr r12
103 crclr cr0*4+so
104 li r3,0
105 blr
106 82
107 /* 83 /*
108 * CLOCK_MONOTONIC 84 * CLOCK_MONOTONIC
109 */ 85 */
110 86
11150: bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */
112
113 lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */
114 ori r7,r7,16960
115 rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */
116 rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */
117 subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */
118 mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) /
119 * XSEC_PER_SEC
120 */
121 rldicl r6,r0,44,20
122 mulli r6,r6,1000 /* nsec = usec * 1000 */
123
124 /* now we must fixup using wall to monotonic. We need to snapshot 87 /* now we must fixup using wall to monotonic. We need to snapshot
125 * that value and do the counter trick again. Fortunately, we still 88 * that value and do the counter trick again. Fortunately, we still
126 * have the counter value in r8 that was returned by __do_get_xsec. 89 * have the counter value in r8 that was returned by __do_get_tspec.
127 * At this point, r5,r6 contain our sec/nsec values. 90 * At this point, r4,r5 contain our sec/nsec values.
128 * can be used
129 */ 91 */
130 92
131 lwa r4,WTOM_CLOCK_SEC(r3) 93 lwa r6,WTOM_CLOCK_SEC(r3)
132 lwa r7,WTOM_CLOCK_NSEC(r3) 94 lwa r9,WTOM_CLOCK_NSEC(r3)
133 95
134 /* We now have our result in r4,r7. We create a fake dependency 96 /* We now have our result in r6,r9. We create a fake dependency
135 * on that result and re-check the counter 97 * on that result and re-check the counter
136 */ 98 */
137 or r9,r4,r7 99 or r0,r6,r9
138 xor r0,r9,r9 100 xor r0,r0,r0
139 add r3,r3,r0 101 add r3,r3,r0
140 ld r0,CFG_TB_UPDATE_COUNT(r3) 102 ld r0,CFG_TB_UPDATE_COUNT(r3)
141 cmpld cr0,r0,r8 /* check if updated */ 103 cmpld cr0,r0,r8 /* check if updated */
142 bne- 50b 104 bne- 50b
143 105
144 /* Calculate and store result. Note that this mimmics the C code, 106 /* Add wall->monotonic offset and check for overflow or underflow.
145 * which may cause funny results if nsec goes negative... is that
146 * possible at all ?
147 */ 107 */
148 add r4,r4,r5 108 add r4,r4,r6
149 add r7,r7,r6 109 add r5,r5,r9
150 lis r9,NSEC_PER_SEC@h 110 cmpd cr0,r5,r7
151 ori r9,r9,NSEC_PER_SEC@l 111 cmpdi cr1,r5,0
152 cmpl cr0,r7,r9
153 cmpli cr1,r7,0
154 blt 1f 112 blt 1f
155 subf r7,r9,r7 113 subf r5,r7,r5
156 addi r4,r4,1 114 addi r4,r4,1
1571: bge cr1,1f 1151: bge cr1,80f
158 addi r4,r4,-1 116 addi r4,r4,-1
159 add r7,r7,r9 117 add r5,r5,r7
1601: std r4,TSPC64_TV_SEC(r11) 118
161 std r7,TSPC64_TV_NSEC(r11) 11980: std r4,TSPC64_TV_SEC(r11)
120 std r5,TSPC64_TV_NSEC(r11)
162 121
163 mtlr r12 122 mtlr r12
164 crclr cr0*4+so 123 crclr cr0*4+so
@@ -168,10 +127,6 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
168 /* 127 /*
169 * syscall fallback 128 * syscall fallback
170 */ 129 */
17198:
172 mtlr r12
173 mr r3,r10
174 mr r4,r11
17599: 13099:
176 li r0,__NR_clock_gettime 131 li r0,__NR_clock_gettime
177 sc 132 sc
@@ -253,3 +208,59 @@ V_FUNCTION_BEGIN(__do_get_xsec)
253 blr 208 blr
254 .cfi_endproc 209 .cfi_endproc
255V_FUNCTION_END(__do_get_xsec) 210V_FUNCTION_END(__do_get_xsec)
211
212/*
213 * This is the core of clock_gettime(), it returns the current
214 * time in seconds and nanoseconds in r4 and r5.
215 * It expects the datapage ptr in r3 and doesn't clobber it.
216 * It clobbers r0 and r6 and returns NSEC_PER_SEC in r7.
217 * On return, r8 contains the counter value that can be reused.
218 * This clobbers cr0 but not any other cr field.
219 */
220V_FUNCTION_BEGIN(__do_get_tspec)
221 .cfi_startproc
222 /* check for update count & load values */
2231: ld r8,CFG_TB_UPDATE_COUNT(r3)
224 andi. r0,r8,1 /* pending update ? loop */
225 bne- 1b
226 xor r0,r8,r8 /* create dependency */
227 add r3,r3,r0
228
229 /* Get TB & offset it. We use the MFTB macro which will generate
230 * workaround code for Cell.
231 */
232 MFTB(r7)
233 ld r9,CFG_TB_ORIG_STAMP(r3)
234 subf r7,r9,r7
235
236 /* Scale result */
237 ld r5,CFG_TB_TO_XS(r3)
238 sldi r7,r7,12 /* compute time since stamp_xtime */
239 mulhdu r6,r7,r5 /* in units of 2^-32 seconds */
240
241 /* Add stamp since epoch */
242 ld r4,STAMP_XTIME+TSPC64_TV_SEC(r3)
243 ld r5,STAMP_XTIME+TSPC64_TV_NSEC(r3)
244 or r0,r4,r5
245 or r0,r0,r6
246 xor r0,r0,r0
247 add r3,r3,r0
248 ld r0,CFG_TB_UPDATE_COUNT(r3)
249 cmpld r0,r8 /* check if updated */
250 bne- 1b /* reload if so */
251
252 /* convert to seconds & nanoseconds and add to stamp */
253 lis r7,NSEC_PER_SEC@h
254 ori r7,r7,NSEC_PER_SEC@l
255 mulhwu r0,r6,r7 /* compute nanoseconds and */
256 srdi r6,r6,32 /* seconds since stamp_xtime */
257 clrldi r0,r0,32
258 add r5,r5,r0 /* add nanoseconds together */
259 cmpd r5,r7 /* overflow? */
260 add r4,r4,r6
261 bltlr /* all done if no overflow */
262 subf r5,r7,r5 /* if overflow, adjust */
263 addi r4,r4,1
264 blr
265 .cfi_endproc
266V_FUNCTION_END(__do_get_tspec)