aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Makefile3
-rw-r--r--arch/powerpc/kernel/btext.c4
-rw-r--r--arch/powerpc/kernel/entry_32.S6
-rw-r--r--arch/powerpc/kernel/rtas.c99
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/mm/init_64.c16
-rw-r--r--arch/powerpc/mm/stab.c1
-rw-r--r--arch/powerpc/mm/tlb_64.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/prpmc2800.c1
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig2
-rw-r--r--arch/powerpc/sysdev/cpm_common.c4
11 files changed, 79 insertions, 61 deletions
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 4e165342210a..bd87626c1f60 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -107,6 +107,9 @@ endif
107# No AltiVec instruction when building kernel 107# No AltiVec instruction when building kernel
108KBUILD_CFLAGS += $(call cc-option,-mno-altivec) 108KBUILD_CFLAGS += $(call cc-option,-mno-altivec)
109 109
110# No SPE instruction when building kernel
111KBUILD_CFLAGS += $(call cc-option,-mno-spe)
112
110# Enable unit-at-a-time mode when possible. It shrinks the 113# Enable unit-at-a-time mode when possible. It shrinks the
111# kernel considerably. 114# kernel considerably.
112KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time) 115KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index 3ef51fb6f107..9c74fdf29eec 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -186,7 +186,9 @@ int btext_initialize(struct device_node *np)
186 pitch = *prop; 186 pitch = *prop;
187 if (pitch == 1) 187 if (pitch == 1)
188 pitch = 0x1000; 188 pitch = 0x1000;
189 prop = of_get_property(np, "address", NULL); 189 prop = of_get_property(np, "linux,bootx-addr", NULL);
190 if (prop == NULL)
191 prop = of_get_property(np, "address", NULL);
190 if (prop) 192 if (prop)
191 address = *prop; 193 address = *prop;
192 194
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index a7572cf464bd..69a91bd46115 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -251,6 +251,9 @@ syscall_exit_cont:
251 bne- 2f 251 bne- 2f
2521: 2521:
253#endif /* CONFIG_44x */ 253#endif /* CONFIG_44x */
254BEGIN_FTR_SECTION
255 lwarx r7,0,r1
256END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
254 stwcx. r0,0,r1 /* to clear the reservation */ 257 stwcx. r0,0,r1 /* to clear the reservation */
255 lwz r4,_LINK(r1) 258 lwz r4,_LINK(r1)
256 lwz r5,_CCR(r1) 259 lwz r5,_CCR(r1)
@@ -717,6 +720,9 @@ restore:
717 mtctr r11 720 mtctr r11
718 721
719 PPC405_ERR77(0,r1) 722 PPC405_ERR77(0,r1)
723BEGIN_FTR_SECTION
724 lwarx r11,0,r1
725END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
720 stwcx. r0,0,r1 /* to clear the reservation */ 726 stwcx. r0,0,r1 /* to clear the reservation */
721 727
722#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) 728#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 214780798289..52e95c2158c0 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -19,6 +19,9 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/capability.h> 20#include <linux/capability.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/smp.h>
23#include <linux/completion.h>
24#include <linux/cpumask.h>
22 25
23#include <asm/prom.h> 26#include <asm/prom.h>
24#include <asm/rtas.h> 27#include <asm/rtas.h>
@@ -34,6 +37,8 @@
34#include <asm/lmb.h> 37#include <asm/lmb.h>
35#include <asm/udbg.h> 38#include <asm/udbg.h>
36#include <asm/syscalls.h> 39#include <asm/syscalls.h>
40#include <asm/smp.h>
41#include <asm/atomic.h>
37 42
38struct rtas_t rtas = { 43struct rtas_t rtas = {
39 .lock = SPIN_LOCK_UNLOCKED 44 .lock = SPIN_LOCK_UNLOCKED
@@ -41,8 +46,10 @@ struct rtas_t rtas = {
41EXPORT_SYMBOL(rtas); 46EXPORT_SYMBOL(rtas);
42 47
43struct rtas_suspend_me_data { 48struct rtas_suspend_me_data {
44 long waiting; 49 atomic_t working; /* number of cpus accessing this struct */
45 struct rtas_args *args; 50 int token; /* ibm,suspend-me */
51 int error;
52 struct completion *complete; /* wait on this until working == 0 */
46}; 53};
47 54
48DEFINE_SPINLOCK(rtas_data_buf_lock); 55DEFINE_SPINLOCK(rtas_data_buf_lock);
@@ -657,50 +664,62 @@ static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE;
657#ifdef CONFIG_PPC_PSERIES 664#ifdef CONFIG_PPC_PSERIES
658static void rtas_percpu_suspend_me(void *info) 665static void rtas_percpu_suspend_me(void *info)
659{ 666{
660 int i;
661 long rc; 667 long rc;
662 long flags; 668 unsigned long msr_save;
669 int cpu;
663 struct rtas_suspend_me_data *data = 670 struct rtas_suspend_me_data *data =
664 (struct rtas_suspend_me_data *)info; 671 (struct rtas_suspend_me_data *)info;
665 672
666 /* 673 atomic_inc(&data->working);
667 * We use "waiting" to indicate our state. As long 674
668 * as it is >0, we are still trying to all join up. 675 /* really need to ensure MSR.EE is off for H_JOIN */
669 * If it goes to 0, we have successfully joined up and 676 msr_save = mfmsr();
670 * one thread got H_CONTINUE. If any error happens, 677 mtmsr(msr_save & ~(MSR_EE));
671 * we set it to <0. 678
672 */ 679 rc = plpar_hcall_norets(H_JOIN);
673 local_irq_save(flags); 680
674 do { 681 mtmsr(msr_save);
675 rc = plpar_hcall_norets(H_JOIN);
676 smp_rmb();
677 } while (rc == H_SUCCESS && data->waiting > 0);
678 if (rc == H_SUCCESS)
679 goto out;
680 682
681 if (rc == H_CONTINUE) { 683 if (rc == H_SUCCESS) {
682 data->waiting = 0; 684 /* This cpu was prodded and the suspend is complete. */
683 data->args->args[data->args->nargs] = 685 goto out;
684 rtas_call(ibm_suspend_me_token, 0, 1, NULL); 686 } else if (rc == H_CONTINUE) {
685 for_each_possible_cpu(i) 687 /* All other cpus are in H_JOIN, this cpu does
686 plpar_hcall_norets(H_PROD,i); 688 * the suspend.
689 */
690 printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n",
691 smp_processor_id());
692 data->error = rtas_call(data->token, 0, 1, NULL);
693
694 if (data->error)
695 printk(KERN_DEBUG "ibm,suspend-me returned %d\n",
696 data->error);
687 } else { 697 } else {
688 data->waiting = -EBUSY; 698 printk(KERN_ERR "H_JOIN on cpu %i failed with rc = %ld\n",
689 printk(KERN_ERR "Error on H_JOIN hypervisor call\n"); 699 smp_processor_id(), rc);
700 data->error = rc;
690 } 701 }
691 702 /* This cpu did the suspend or got an error; in either case,
703 * we need to prod all other other cpus out of join state.
704 * Extra prods are harmless.
705 */
706 for_each_online_cpu(cpu)
707 plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
692out: 708out:
693 local_irq_restore(flags); 709 if (atomic_dec_return(&data->working) == 0)
694 return; 710 complete(data->complete);
695} 711}
696 712
697static int rtas_ibm_suspend_me(struct rtas_args *args) 713static int rtas_ibm_suspend_me(struct rtas_args *args)
698{ 714{
699 int i;
700 long state; 715 long state;
701 long rc; 716 long rc;
702 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 717 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
703 struct rtas_suspend_me_data data; 718 struct rtas_suspend_me_data data;
719 DECLARE_COMPLETION_ONSTACK(done);
720
721 if (!rtas_service_present("ibm,suspend-me"))
722 return -ENOSYS;
704 723
705 /* Make sure the state is valid */ 724 /* Make sure the state is valid */
706 rc = plpar_hcall(H_VASI_STATE, retbuf, 725 rc = plpar_hcall(H_VASI_STATE, retbuf,
@@ -721,25 +740,23 @@ static int rtas_ibm_suspend_me(struct rtas_args *args)
721 return 0; 740 return 0;
722 } 741 }
723 742
724 data.waiting = 1; 743 atomic_set(&data.working, 0);
725 data.args = args; 744 data.token = rtas_token("ibm,suspend-me");
745 data.error = 0;
746 data.complete = &done;
726 747
727 /* Call function on all CPUs. One of us will make the 748 /* Call function on all CPUs. One of us will make the
728 * rtas call 749 * rtas call
729 */ 750 */
730 if (on_each_cpu(rtas_percpu_suspend_me, &data, 1, 0)) 751 if (on_each_cpu(rtas_percpu_suspend_me, &data, 1, 0))
731 data.waiting = -EINVAL; 752 data.error = -EINVAL;
732 753
733 if (data.waiting != 0) 754 wait_for_completion(&done);
734 printk(KERN_ERR "Error doing global join\n");
735 755
736 /* Prod each CPU. This won't hurt, and will wake 756 if (data.error != 0)
737 * anyone we successfully put to sleep with H_JOIN. 757 printk(KERN_ERR "Error doing global join\n");
738 */
739 for_each_possible_cpu(i)
740 plpar_hcall_norets(H_PROD, i);
741 758
742 return data.waiting; 759 return data.error;
743} 760}
744#else /* CONFIG_PPC_PSERIES */ 761#else /* CONFIG_PPC_PSERIES */
745static int rtas_ibm_suspend_me(struct rtas_args *args) 762static int rtas_ibm_suspend_me(struct rtas_args *args)
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 4beb6329dfb7..c0d77723ba11 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -829,7 +829,7 @@ static void register_decrementer_clockevent(int cpu)
829 *dec = decrementer_clockevent; 829 *dec = decrementer_clockevent;
830 dec->cpumask = cpumask_of_cpu(cpu); 830 dec->cpumask = cpumask_of_cpu(cpu);
831 831
832 printk(KERN_INFO "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n", 832 printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n",
833 dec->name, dec->mult, dec->shift, cpu); 833 dec->name, dec->mult, dec->shift, cpu);
834 834
835 clockevents_register_device(dec); 835 clockevents_register_device(dec);
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index d9c82d3d6482..c0f5cff77035 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -19,8 +19,6 @@
19 * 19 *
20 */ 20 */
21 21
22#undef DEBUG
23
24#include <linux/signal.h> 22#include <linux/signal.h>
25#include <linux/sched.h> 23#include <linux/sched.h>
26#include <linux/kernel.h> 24#include <linux/kernel.h>
@@ -66,12 +64,6 @@
66 64
67#include "mmu_decl.h" 65#include "mmu_decl.h"
68 66
69#ifdef DEBUG
70#define DBG(fmt...) printk(fmt)
71#else
72#define DBG(fmt...)
73#endif
74
75#if PGTABLE_RANGE > USER_VSID_RANGE 67#if PGTABLE_RANGE > USER_VSID_RANGE
76#warning Limited user VSID range means pagetable space is wasted 68#warning Limited user VSID range means pagetable space is wasted
77#endif 69#endif
@@ -175,8 +167,8 @@ void pgtable_cache_init(void)
175 int size = pgtable_cache_size[i]; 167 int size = pgtable_cache_size[i];
176 const char *name = pgtable_cache_name[i]; 168 const char *name = pgtable_cache_name[i];
177 169
178 DBG("Allocating page table cache %s (#%d) " 170 pr_debug("Allocating page table cache %s (#%d) "
179 "for size: %08x...\n", name, i, size); 171 "for size: %08x...\n", name, i, size);
180 pgtable_cache[i] = kmem_cache_create(name, 172 pgtable_cache[i] = kmem_cache_create(name,
181 size, size, 173 size, size,
182 SLAB_PANIC, 174 SLAB_PANIC,
@@ -239,8 +231,8 @@ int __meminit vmemmap_populate(struct page *start_page,
239 if (!p) 231 if (!p)
240 return -ENOMEM; 232 return -ENOMEM;
241 233
242 printk(KERN_WARNING "vmemmap %08lx allocated at %p, " 234 pr_debug("vmemmap %08lx allocated at %p, physical %08lx.\n",
243 "physical %08lx.\n", start, p, __pa(p)); 235 start, p, __pa(p));
244 236
245 mapped = htab_bolt_mapping(start, start + page_size, 237 mapped = htab_bolt_mapping(start, start + page_size,
246 __pa(p), mode_rw, mmu_linear_psize, 238 __pa(p), mode_rw, mmu_linear_psize,
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 9e85bda76216..50448d5de9d2 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -20,6 +20,7 @@
20#include <asm/lmb.h> 20#include <asm/lmb.h>
21#include <asm/abs_addr.h> 21#include <asm/abs_addr.h>
22#include <asm/firmware.h> 22#include <asm/firmware.h>
23#include <asm/iseries/hv_call.h>
23 24
24struct stab_entry { 25struct stab_entry {
25 unsigned long esid_data; 26 unsigned long esid_data;
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index eafbca52bff9..e2d867ce1c7e 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -54,12 +54,10 @@ unsigned long pte_freelist_forced_free;
54 ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \ 54 ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
55 / sizeof(pgtable_free_t)) 55 / sizeof(pgtable_free_t))
56 56
57#ifdef CONFIG_SMP
58static void pte_free_smp_sync(void *arg) 57static void pte_free_smp_sync(void *arg)
59{ 58{
60 /* Do nothing, just ensure we sync with all CPUs */ 59 /* Do nothing, just ensure we sync with all CPUs */
61} 60}
62#endif
63 61
64/* This is only called when we are critically out of memory 62/* This is only called when we are critically out of memory
65 * (and fail to get a page in pte_free_tlb). 63 * (and fail to get a page in pte_free_tlb).
diff --git a/arch/powerpc/platforms/embedded6xx/prpmc2800.c b/arch/powerpc/platforms/embedded6xx/prpmc2800.c
index e484cac75095..653a5eb91c90 100644
--- a/arch/powerpc/platforms/embedded6xx/prpmc2800.c
+++ b/arch/powerpc/platforms/embedded6xx/prpmc2800.c
@@ -144,6 +144,7 @@ static int __init prpmc2800_probe(void)
144 strncpy(prpmc2800_platform_name, m, 144 strncpy(prpmc2800_platform_name, m,
145 min((int)len, PLATFORM_NAME_MAX - 1)); 145 min((int)len, PLATFORM_NAME_MAX - 1));
146 146
147 _set_L2CR(_get_L2CR() | L2CR_L2E);
147 return 1; 148 return 1;
148} 149}
149 150
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 16e4e401b820..306a9d07491d 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -21,7 +21,7 @@ config PPC_SPLPAR
21 21
22config EEH 22config EEH
23 bool "PCI Extended Error Handling (EEH)" if EMBEDDED 23 bool "PCI Extended Error Handling (EEH)" if EMBEDDED
24 depends on PPC_PSERIES 24 depends on PPC_PSERIES && PCI
25 default y if !EMBEDDED 25 default y if !EMBEDDED
26 26
27config SCANLOG 27config SCANLOG
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index 66c8ad4cfce6..165981c87786 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -77,8 +77,6 @@ int __init cpm_muram_init(void)
77 int i = 0; 77 int i = 0;
78 int ret = 0; 78 int ret = 0;
79 79
80 printk("cpm_muram_init\n");
81
82 spin_lock_init(&cpm_muram_lock); 80 spin_lock_init(&cpm_muram_lock);
83 /* initialize the info header */ 81 /* initialize the info header */
84 rh_init(&cpm_muram_info, 1, 82 rh_init(&cpm_muram_info, 1,
@@ -193,7 +191,7 @@ void __iomem *cpm_muram_addr(unsigned long offset)
193EXPORT_SYMBOL(cpm_muram_addr); 191EXPORT_SYMBOL(cpm_muram_addr);
194 192
195/** 193/**
196 * cpm_muram_phys - turn a muram virtual address into a DMA address 194 * cpm_muram_dma - turn a muram virtual address into a DMA address
197 * @offset: virtual address from cpm_muram_addr() to convert 195 * @offset: virtual address from cpm_muram_addr() to convert
198 */ 196 */
199dma_addr_t cpm_muram_dma(void __iomem *addr) 197dma_addr_t cpm_muram_dma(void __iomem *addr)