aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-07-22 16:16:01 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-22 16:16:01 -0400
commit06b8147c5dbd385b5b97ca74e19f6f3951ebc1cb (patch)
tree6ed9de7ca0ab3a65af6a189a89deb0a36ab35f6b /arch/powerpc/kernel
parent53baaaa9682c230410a057263d1ce2922f43ddc4 (diff)
parent8725f25acc656c1522d48a6746055099efdaca4c (diff)
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (49 commits) powerpc: Fix build bug with binutils < 2.18 and GCC < 4.2 powerpc/eeh: Don't panic when EEH_MAX_FAILS is exceeded fbdev: Teaches offb about palette on radeon r5xx/r6xx powerpc/cell/edac: Log a syndrome code in case of correctable error powerpc/cell: Add DMA_ATTR_WEAK_ORDERING dma attribute and use in Cell IOMMU code powerpc: Indicate which oprofile counters to use while in compat mode powerpc/boot: Change spaces to tabs powerpc: Remove duplicate 6xx option in Kconfig powerpc: Use PPC_LONG and PPC_LONG_ALIGN in lib/string.S powerpc: Use PPC_LONG_ALIGN in uaccess.h powerpc: Add a #define for aligning to a long-sized boundary powerpc: Fix OF parsing of 64 bits PCI addresses powerpc: Use WARN_ON(1) instead of __WARN() powerpc: Fix support for latencytop powerpc/ps3: Update ps3_defconfig powerpc/ps3: Add a sub-match id to ps3_system_bus powerpc: Add a 6xx defconfig powerpc/dma: Use the struct dma_attrs in iommu code powerpc/cell: Add support for power button of future IBM cell blades powerpc/cell: Cleanup sysreset_hack for IBM cell blades ...
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/cputable.c20
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S187
-rw-r--r--arch/powerpc/kernel/iommu.c13
-rw-r--r--arch/powerpc/kernel/pci-common.c1
-rw-r--r--arch/powerpc/kernel/prom_parse.c44
-rw-r--r--arch/powerpc/kernel/stacktrace.c2
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S31
7 files changed, 130 insertions, 168 deletions
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index f7f3c215d06f..b936a1dd0a50 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -355,6 +355,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
355 .icache_bsize = 128, 355 .icache_bsize = 128,
356 .dcache_bsize = 128, 356 .dcache_bsize = 128,
357 .machine_check = machine_check_generic, 357 .machine_check = machine_check_generic,
358 .oprofile_cpu_type = "ppc64/compat-power5+",
358 .platform = "power5+", 359 .platform = "power5+",
359 }, 360 },
360 { /* Power6 */ 361 { /* Power6 */
@@ -386,6 +387,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
386 .icache_bsize = 128, 387 .icache_bsize = 128,
387 .dcache_bsize = 128, 388 .dcache_bsize = 128,
388 .machine_check = machine_check_generic, 389 .machine_check = machine_check_generic,
390 .oprofile_cpu_type = "ppc64/compat-power6",
389 .platform = "power6", 391 .platform = "power6",
390 }, 392 },
391 { /* 2.06-compliant processor, i.e. Power7 "architected" mode */ 393 { /* 2.06-compliant processor, i.e. Power7 "architected" mode */
@@ -397,6 +399,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
397 .icache_bsize = 128, 399 .icache_bsize = 128,
398 .dcache_bsize = 128, 400 .dcache_bsize = 128,
399 .machine_check = machine_check_generic, 401 .machine_check = machine_check_generic,
402 .oprofile_cpu_type = "ppc64/compat-power7",
400 .platform = "power7", 403 .platform = "power7",
401 }, 404 },
402 { /* Power7 */ 405 { /* Power7 */
@@ -1629,6 +1632,23 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
1629 t->cpu_setup = s->cpu_setup; 1632 t->cpu_setup = s->cpu_setup;
1630 t->cpu_restore = s->cpu_restore; 1633 t->cpu_restore = s->cpu_restore;
1631 t->platform = s->platform; 1634 t->platform = s->platform;
1635 /*
1636 * If we have passed through this logic once
1637 * before and have pulled the default case
1638 * because the real PVR was not found inside
1639 * cpu_specs[], then we are possibly running in
1640 * compatibility mode. In that case, let the
1641 * oprofiler know which set of compatibility
1642 * counters to pull from by making sure the
1643 * oprofile_cpu_type string is set to that of
1644 * compatibility mode. If the oprofile_cpu_type
1645 * already has a value, then we are possibly
1646 * overriding a real PVR with a logical one, and,
1647 * in that case, keep the current value for
1648 * oprofile_cpu_type.
1649 */
1650 if (t->oprofile_cpu_type == NULL)
1651 t->oprofile_cpu_type = s->oprofile_cpu_type;
1632 } else 1652 } else
1633 *t = *s; 1653 *t = *s;
1634 *PTRRELOC(&cur_cpu_spec) = &the_cpu_spec; 1654 *PTRRELOC(&cur_cpu_spec) = &the_cpu_spec;
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index c4268500e856..3cb52fa0eda3 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -151,16 +151,11 @@ skpinv: addi r6,r6,1 /* Increment */
151 /* Invalidate TLB0 */ 151 /* Invalidate TLB0 */
152 li r6,0x04 152 li r6,0x04
153 tlbivax 0,r6 153 tlbivax 0,r6
154#ifdef CONFIG_SMP 154 TLBSYNC
155 tlbsync
156#endif
157 /* Invalidate TLB1 */ 155 /* Invalidate TLB1 */
158 li r6,0x0c 156 li r6,0x0c
159 tlbivax 0,r6 157 tlbivax 0,r6
160#ifdef CONFIG_SMP 158 TLBSYNC
161 tlbsync
162#endif
163 msync
164 159
165/* 3. Setup a temp mapping and jump to it */ 160/* 3. Setup a temp mapping and jump to it */
166 andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */ 161 andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */
@@ -238,10 +233,7 @@ skpinv: addi r6,r6,1 /* Increment */
238 /* Invalidate TLB1 */ 233 /* Invalidate TLB1 */
239 li r9,0x0c 234 li r9,0x0c
240 tlbivax 0,r9 235 tlbivax 0,r9
241#ifdef CONFIG_SMP 236 TLBSYNC
242 tlbsync
243#endif
244 msync
245 237
246/* 6. Setup KERNELBASE mapping in TLB1[0] */ 238/* 6. Setup KERNELBASE mapping in TLB1[0] */
247 lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */ 239 lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
@@ -283,10 +275,7 @@ skpinv: addi r6,r6,1 /* Increment */
283 /* Invalidate TLB1 */ 275 /* Invalidate TLB1 */
284 li r9,0x0c 276 li r9,0x0c
285 tlbivax 0,r9 277 tlbivax 0,r9
286#ifdef CONFIG_SMP 278 TLBSYNC
287 tlbsync
288#endif
289 msync
290 279
291 /* Establish the interrupt vector offsets */ 280 /* Establish the interrupt vector offsets */
292 SET_IVOR(0, CriticalInput); 281 SET_IVOR(0, CriticalInput);
@@ -483,90 +472,16 @@ interrupt_base:
483 472
484 /* Data Storage Interrupt */ 473 /* Data Storage Interrupt */
485 START_EXCEPTION(DataStorage) 474 START_EXCEPTION(DataStorage)
486 mtspr SPRN_SPRG0, r10 /* Save some working registers */ 475 NORMAL_EXCEPTION_PROLOG
487 mtspr SPRN_SPRG1, r11 476 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
488 mtspr SPRN_SPRG4W, r12 477 stw r5,_ESR(r11)
489 mtspr SPRN_SPRG5W, r13 478 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
490 mfcr r11 479 andis. r10,r5,(ESR_ILK|ESR_DLK)@h
491 mtspr SPRN_SPRG7W, r11 480 bne 1f
492 481 EXC_XFER_EE_LITE(0x0300, handle_page_fault)
493 /* 4821:
494 * Check if it was a store fault, if not then bail 483 addi r3,r1,STACK_FRAME_OVERHEAD
495 * because a user tried to access a kernel or 484 EXC_XFER_EE_LITE(0x0300, CacheLockingException)
496 * read-protected page. Otherwise, get the
497 * offending address and handle it.
498 */
499 mfspr r10, SPRN_ESR
500 andis. r10, r10, ESR_ST@h
501 beq 2f
502
503 mfspr r10, SPRN_DEAR /* Get faulting address */
504
505 /* If we are faulting a kernel address, we have to use the
506 * kernel page tables.
507 */
508 lis r11, PAGE_OFFSET@h
509 cmplw 0, r10, r11
510 bge 2f
511
512 /* Get the PGD for the current thread */
5133:
514 mfspr r11,SPRN_SPRG3
515 lwz r11,PGDIR(r11)
5164:
517 FIND_PTE
518
519 /* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */
520 andi. r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE
521 cmpwi 0, r13, _PAGE_RW|_PAGE_USER
522 bne 2f /* Bail if not */
523
524 /* Update 'changed'. */
525 ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
526 stw r11, PTE_FLAGS_OFFSET(r12) /* Update Linux page table */
527
528 /* MAS2 not updated as the entry does exist in the tlb, this
529 fault taken to detect state transition (eg: COW -> DIRTY)
530 */
531 andi. r11, r11, _PAGE_HWEXEC
532 rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */
533 ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */
534
535 /* update search PID in MAS6, AS = 0 */
536 mfspr r12, SPRN_PID0
537 slwi r12, r12, 16
538 mtspr SPRN_MAS6, r12
539
540 /* find the TLB index that caused the fault. It has to be here. */
541 tlbsx 0, r10
542
543 /* only update the perm bits, assume the RPN is fine */
544 mfspr r12, SPRN_MAS3
545 rlwimi r12, r11, 0, 20, 31
546 mtspr SPRN_MAS3,r12
547 tlbwe
548
549 /* Done...restore registers and get out of here. */
550 mfspr r11, SPRN_SPRG7R
551 mtcr r11
552 mfspr r13, SPRN_SPRG5R
553 mfspr r12, SPRN_SPRG4R
554 mfspr r11, SPRN_SPRG1
555 mfspr r10, SPRN_SPRG0
556 rfi /* Force context change */
557
5582:
559 /*
560 * The bailout. Restore registers to pre-exception conditions
561 * and call the heavyweights to help us out.
562 */
563 mfspr r11, SPRN_SPRG7R
564 mtcr r11
565 mfspr r13, SPRN_SPRG5R
566 mfspr r12, SPRN_SPRG4R
567 mfspr r11, SPRN_SPRG1
568 mfspr r10, SPRN_SPRG0
569 b data_access
570 485
571 /* Instruction Storage Interrupt */ 486 /* Instruction Storage Interrupt */
572 INSTRUCTION_STORAGE_EXCEPTION 487 INSTRUCTION_STORAGE_EXCEPTION
@@ -645,15 +560,30 @@ interrupt_base:
645 lwz r11,PGDIR(r11) 560 lwz r11,PGDIR(r11)
646 561
6474: 5624:
563 /* Mask of required permission bits. Note that while we
564 * do copy ESR:ST to _PAGE_RW position as trying to write
565 * to an RO page is pretty common, we don't do it with
566 * _PAGE_DIRTY. We could do it, but it's a fairly rare
567 * event so I'd rather take the overhead when it happens
568 * rather than adding an instruction here. We should measure
569 * whether the whole thing is worth it in the first place
570 * as we could avoid loading SPRN_ESR completely in the first
571 * place...
572 *
573 * TODO: Is it worth doing that mfspr & rlwimi in the first
574 * place or can we save a couple of instructions here ?
575 */
576 mfspr r12,SPRN_ESR
577 li r13,_PAGE_PRESENT|_PAGE_ACCESSED
578 rlwimi r13,r12,11,29,29
579
648 FIND_PTE 580 FIND_PTE
649 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ 581 andc. r13,r13,r11 /* Check permission */
650 beq 2f /* Bail if not present */ 582 bne 2f /* Bail if permission mismach */
651 583
652#ifdef CONFIG_PTE_64BIT 584#ifdef CONFIG_PTE_64BIT
653 lwz r13, 0(r12) 585 lwz r13, 0(r12)
654#endif 586#endif
655 ori r11, r11, _PAGE_ACCESSED
656 stw r11, PTE_FLAGS_OFFSET(r12)
657 587
658 /* Jump to common tlb load */ 588 /* Jump to common tlb load */
659 b finish_tlb_load 589 b finish_tlb_load
@@ -667,7 +597,7 @@ interrupt_base:
667 mfspr r12, SPRN_SPRG4R 597 mfspr r12, SPRN_SPRG4R
668 mfspr r11, SPRN_SPRG1 598 mfspr r11, SPRN_SPRG1
669 mfspr r10, SPRN_SPRG0 599 mfspr r10, SPRN_SPRG0
670 b data_access 600 b DataStorage
671 601
672 /* Instruction TLB Error Interrupt */ 602 /* Instruction TLB Error Interrupt */
673 /* 603 /*
@@ -705,15 +635,16 @@ interrupt_base:
705 lwz r11,PGDIR(r11) 635 lwz r11,PGDIR(r11)
706 636
7074: 6374:
638 /* Make up the required permissions */
639 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_HWEXEC
640
708 FIND_PTE 641 FIND_PTE
709 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ 642 andc. r13,r13,r11 /* Check permission */
710 beq 2f /* Bail if not present */ 643 bne 2f /* Bail if permission mismach */
711 644
712#ifdef CONFIG_PTE_64BIT 645#ifdef CONFIG_PTE_64BIT
713 lwz r13, 0(r12) 646 lwz r13, 0(r12)
714#endif 647#endif
715 ori r11, r11, _PAGE_ACCESSED
716 stw r11, PTE_FLAGS_OFFSET(r12)
717 648
718 /* Jump to common TLB load point */ 649 /* Jump to common TLB load point */
719 b finish_tlb_load 650 b finish_tlb_load
@@ -768,29 +699,13 @@ interrupt_base:
768 * Local functions 699 * Local functions
769 */ 700 */
770 701
771 /*
772 * Data TLB exceptions will bail out to this point
773 * if they can't resolve the lightweight TLB fault.
774 */
775data_access:
776 NORMAL_EXCEPTION_PROLOG
777 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
778 stw r5,_ESR(r11)
779 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
780 andis. r10,r5,(ESR_ILK|ESR_DLK)@h
781 bne 1f
782 EXC_XFER_EE_LITE(0x0300, handle_page_fault)
7831:
784 addi r3,r1,STACK_FRAME_OVERHEAD
785 EXC_XFER_EE_LITE(0x0300, CacheLockingException)
786
787/* 702/*
788
789 * Both the instruction and data TLB miss get to this 703 * Both the instruction and data TLB miss get to this
790 * point to load the TLB. 704 * point to load the TLB.
791 * r10 - EA of fault 705 * r10 - EA of fault
792 * r11 - TLB (info from Linux PTE) 706 * r11 - TLB (info from Linux PTE)
793 * r12, r13 - available to use 707 * r12 - available to use
708 * r13 - upper bits of PTE (if PTE_64BIT) or available to use
794 * CR5 - results of addr >= PAGE_OFFSET 709 * CR5 - results of addr >= PAGE_OFFSET
795 * MAS0, MAS1 - loaded with proper value when we get here 710 * MAS0, MAS1 - loaded with proper value when we get here
796 * MAS2, MAS3 - will need additional info from Linux PTE 711 * MAS2, MAS3 - will need additional info from Linux PTE
@@ -812,20 +727,14 @@ finish_tlb_load:
812#endif 727#endif
813 mtspr SPRN_MAS2, r12 728 mtspr SPRN_MAS2, r12
814 729
815 bge 5, 1f 730 li r10, (_PAGE_HWEXEC | _PAGE_PRESENT)
816 731 rlwimi r10, r11, 31, 29, 29 /* extract _PAGE_DIRTY into SW */
817 /* is user addr */ 732 and r12, r11, r10
818 andi. r12, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC)
819 andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */ 733 andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */
820 srwi r10, r12, 1 734 slwi r10, r12, 1
821 or r12, r12, r10 /* Copy user perms into supervisor */ 735 or r10, r10, r12
822 iseleq r12, 0, r12 736 iseleq r12, r12, r10
823 b 2f 737
824
825 /* is kernel addr */
8261: rlwinm r12, r11, 31, 29, 29 /* Extract _PAGE_HWWRITE into SW */
827 ori r12, r12, (MAS3_SX | MAS3_SR)
828
829#ifdef CONFIG_PTE_64BIT 738#ifdef CONFIG_PTE_64BIT
8302: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */ 7392: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */
831 rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */ 740 rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 8c68ee9e5d1c..2385f68c1751 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -186,7 +186,8 @@ static unsigned long iommu_range_alloc(struct device *dev,
186static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, 186static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
187 void *page, unsigned int npages, 187 void *page, unsigned int npages,
188 enum dma_data_direction direction, 188 enum dma_data_direction direction,
189 unsigned long mask, unsigned int align_order) 189 unsigned long mask, unsigned int align_order,
190 struct dma_attrs *attrs)
190{ 191{
191 unsigned long entry, flags; 192 unsigned long entry, flags;
192 dma_addr_t ret = DMA_ERROR_CODE; 193 dma_addr_t ret = DMA_ERROR_CODE;
@@ -205,7 +206,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
205 206
206 /* Put the TCEs in the HW table */ 207 /* Put the TCEs in the HW table */
207 ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK, 208 ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK,
208 direction); 209 direction, attrs);
209 210
210 211
211 /* Flush/invalidate TLB caches if necessary */ 212 /* Flush/invalidate TLB caches if necessary */
@@ -336,7 +337,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
336 npages, entry, dma_addr); 337 npages, entry, dma_addr);
337 338
338 /* Insert into HW table */ 339 /* Insert into HW table */
339 ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction); 340 ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK,
341 direction, attrs);
340 342
341 /* If we are in an open segment, try merging */ 343 /* If we are in an open segment, try merging */
342 if (segstart != s) { 344 if (segstart != s) {
@@ -573,7 +575,8 @@ dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
573 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; 575 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
574 576
575 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, 577 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
576 mask >> IOMMU_PAGE_SHIFT, align); 578 mask >> IOMMU_PAGE_SHIFT, align,
579 attrs);
577 if (dma_handle == DMA_ERROR_CODE) { 580 if (dma_handle == DMA_ERROR_CODE) {
578 if (printk_ratelimit()) { 581 if (printk_ratelimit()) {
579 printk(KERN_INFO "iommu_alloc failed, " 582 printk(KERN_INFO "iommu_alloc failed, "
@@ -642,7 +645,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
642 nio_pages = size >> IOMMU_PAGE_SHIFT; 645 nio_pages = size >> IOMMU_PAGE_SHIFT;
643 io_order = get_iommu_order(size); 646 io_order = get_iommu_order(size);
644 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, 647 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
645 mask >> IOMMU_PAGE_SHIFT, io_order); 648 mask >> IOMMU_PAGE_SHIFT, io_order, NULL);
646 if (mapping == DMA_ERROR_CODE) { 649 if (mapping == DMA_ERROR_CODE) {
647 free_pages((unsigned long)ret, order); 650 free_pages((unsigned long)ret, order);
648 return NULL; 651 return NULL;
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 063cdd413049..224e9a11765c 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -598,6 +598,7 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
598 res->start = pci_addr; 598 res->start = pci_addr;
599 break; 599 break;
600 case 2: /* PCI Memory space */ 600 case 2: /* PCI Memory space */
601 case 3: /* PCI 64 bits Memory space */
601 printk(KERN_INFO 602 printk(KERN_INFO
602 " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n", 603 " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
603 cpu_addr, cpu_addr + size - 1, pci_addr, 604 cpu_addr, cpu_addr + size - 1, pci_addr,
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index 90eb3a3e383e..bc1fb27368af 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -128,12 +128,35 @@ static void of_bus_pci_count_cells(struct device_node *np,
128 *sizec = 2; 128 *sizec = 2;
129} 129}
130 130
131static unsigned int of_bus_pci_get_flags(const u32 *addr)
132{
133 unsigned int flags = 0;
134 u32 w = addr[0];
135
136 switch((w >> 24) & 0x03) {
137 case 0x01:
138 flags |= IORESOURCE_IO;
139 break;
140 case 0x02: /* 32 bits */
141 case 0x03: /* 64 bits */
142 flags |= IORESOURCE_MEM;
143 break;
144 }
145 if (w & 0x40000000)
146 flags |= IORESOURCE_PREFETCH;
147 return flags;
148}
149
131static u64 of_bus_pci_map(u32 *addr, const u32 *range, int na, int ns, int pna) 150static u64 of_bus_pci_map(u32 *addr, const u32 *range, int na, int ns, int pna)
132{ 151{
133 u64 cp, s, da; 152 u64 cp, s, da;
153 unsigned int af, rf;
154
155 af = of_bus_pci_get_flags(addr);
156 rf = of_bus_pci_get_flags(range);
134 157
135 /* Check address type match */ 158 /* Check address type match */
136 if ((addr[0] ^ range[0]) & 0x03000000) 159 if ((af ^ rf) & (IORESOURCE_MEM | IORESOURCE_IO))
137 return OF_BAD_ADDR; 160 return OF_BAD_ADDR;
138 161
139 /* Read address values, skipping high cell */ 162 /* Read address values, skipping high cell */
@@ -153,25 +176,6 @@ static int of_bus_pci_translate(u32 *addr, u64 offset, int na)
153 return of_bus_default_translate(addr + 1, offset, na - 1); 176 return of_bus_default_translate(addr + 1, offset, na - 1);
154} 177}
155 178
156static unsigned int of_bus_pci_get_flags(const u32 *addr)
157{
158 unsigned int flags = 0;
159 u32 w = addr[0];
160
161 switch((w >> 24) & 0x03) {
162 case 0x01:
163 flags |= IORESOURCE_IO;
164 break;
165 case 0x02: /* 32 bits */
166 case 0x03: /* 64 bits */
167 flags |= IORESOURCE_MEM;
168 break;
169 }
170 if (w & 0x40000000)
171 flags |= IORESOURCE_PREFETCH;
172 return flags;
173}
174
175const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size, 179const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
176 unsigned int *flags) 180 unsigned int *flags)
177{ 181{
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index 071bee3ec749..f2589645870a 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -59,6 +59,6 @@ EXPORT_SYMBOL_GPL(save_stack_trace);
59 59
60void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 60void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
61{ 61{
62 save_context_stack(trace, tsk->thread.regs->gpr[1], tsk, 0); 62 save_context_stack(trace, tsk->thread.ksp, tsk, 0);
63} 63}
64EXPORT_SYMBOL_GPL(save_stack_trace_tsk); 64EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 87a72c66ce27..a914411bced5 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -9,6 +9,25 @@
9 9
10ENTRY(_stext) 10ENTRY(_stext)
11 11
12PHDRS {
13 kernel PT_LOAD FLAGS(7); /* RWX */
14 notes PT_NOTE FLAGS(0);
15 dummy PT_NOTE FLAGS(0);
16
17 /* binutils < 2.18 has a bug that makes it misbehave when taking an
18 ELF file with all segments at load address 0 as input. This
19 happens when running "strip" on vmlinux, because of the AT() magic
20 in this linker script. People using GCC >= 4.2 won't run into
21 this problem, because the "build-id" support will put some data
22 into the "notes" segment (at a non-zero load address).
23
24 To work around this, we force some data into both the "dummy"
25 segment and the kernel segment, so the dummy segment will get a
26 non-zero load address. It's not enough to always create the
27 "notes" segment, since if nothing gets assigned to it, its load
28 address will be zero. */
29}
30
12#ifdef CONFIG_PPC64 31#ifdef CONFIG_PPC64
13OUTPUT_ARCH(powerpc:common64) 32OUTPUT_ARCH(powerpc:common64)
14jiffies = jiffies_64; 33jiffies = jiffies_64;
@@ -50,7 +69,7 @@ SECTIONS
50 . = ALIGN(PAGE_SIZE); 69 . = ALIGN(PAGE_SIZE);
51 _etext = .; 70 _etext = .;
52 PROVIDE32 (etext = .); 71 PROVIDE32 (etext = .);
53 } 72 } :kernel
54 73
55 /* Read-only data */ 74 /* Read-only data */
56 RODATA 75 RODATA
@@ -62,7 +81,13 @@ SECTIONS
62 __stop___ex_table = .; 81 __stop___ex_table = .;
63 } 82 }
64 83
65 NOTES 84 NOTES :kernel :notes
85
86 /* The dummy segment contents for the bug workaround mentioned above
87 near PHDRS. */
88 .dummy : {
89 LONG(0xf177)
90 } :kernel :dummy
66 91
67/* 92/*
68 * Init sections discarded at runtime 93 * Init sections discarded at runtime
@@ -74,7 +99,7 @@ SECTIONS
74 _sinittext = .; 99 _sinittext = .;
75 INIT_TEXT 100 INIT_TEXT
76 _einittext = .; 101 _einittext = .;
77 } 102 } :kernel
78 103
79 /* .exit.text is discarded at runtime, not link time, 104 /* .exit.text is discarded at runtime, not link time,
80 * to deal with references from __bug_table 105 * to deal with references from __bug_table