diff options
author | Dave Airlie <airlied@redhat.com> | 2011-03-15 21:34:41 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2011-03-15 21:34:41 -0400 |
commit | 38f1cff0863809587b5fd10ecd0c24c8b543a48c (patch) | |
tree | 7cf6eb88cdc938c3683209d38311e711a1119400 /arch/powerpc | |
parent | 4819d2e4310796c4e9eef674499af9b9caf36b5a (diff) | |
parent | 5359533801e3dd3abca5b7d3d985b0b33fd9fe8b (diff) |
Merge commit '5359533801e3dd3abca5b7d3d985b0b33fd9fe8b' into drm-core-next
This commit changed an internal radeon structure, that meant a new driver
in -next had to be fixed up, merge in the commit and fix up the driver.
Also fixes a trivial nouveau merge.
Conflicts:
drivers/gpu/drm/nouveau/nouveau_mem.c
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/lppaca.h | 16 | ||||
-rw-r--r-- | arch/powerpc/include/asm/machdep.h | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/machine_kexec.c | 5 | ||||
-rw-r--r-- | arch/powerpc/kernel/paca.c | 14 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 8 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_hash64.c | 6 | ||||
-rw-r--r-- | arch/powerpc/platforms/iseries/dt.c | 6 | ||||
-rw-r--r-- | arch/powerpc/platforms/iseries/setup.c | 1 |
9 files changed, 40 insertions, 25 deletions
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index 380d48bacd16..26b8c807f8f1 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h | |||
@@ -33,9 +33,25 @@ | |||
33 | // | 33 | // |
34 | //---------------------------------------------------------------------------- | 34 | //---------------------------------------------------------------------------- |
35 | #include <linux/cache.h> | 35 | #include <linux/cache.h> |
36 | #include <linux/threads.h> | ||
36 | #include <asm/types.h> | 37 | #include <asm/types.h> |
37 | #include <asm/mmu.h> | 38 | #include <asm/mmu.h> |
38 | 39 | ||
40 | /* | ||
41 | * We only have to have statically allocated lppaca structs on | ||
42 | * legacy iSeries, which supports at most 64 cpus. | ||
43 | */ | ||
44 | #ifdef CONFIG_PPC_ISERIES | ||
45 | #if NR_CPUS < 64 | ||
46 | #define NR_LPPACAS NR_CPUS | ||
47 | #else | ||
48 | #define NR_LPPACAS 64 | ||
49 | #endif | ||
50 | #else /* not iSeries */ | ||
51 | #define NR_LPPACAS 1 | ||
52 | #endif | ||
53 | |||
54 | |||
39 | /* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k | 55 | /* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k |
40 | * alignment is sufficient to prevent this */ | 56 | * alignment is sufficient to prevent this */ |
41 | struct lppaca { | 57 | struct lppaca { |
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 991d5998d6be..fe56a23e1ff0 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h | |||
@@ -240,6 +240,12 @@ struct machdep_calls { | |||
240 | * claims to support kexec. | 240 | * claims to support kexec. |
241 | */ | 241 | */ |
242 | int (*machine_kexec_prepare)(struct kimage *image); | 242 | int (*machine_kexec_prepare)(struct kimage *image); |
243 | |||
244 | /* Called to perform the _real_ kexec. | ||
245 | * Do NOT allocate memory or fail here. We are past the point of | ||
246 | * no return. | ||
247 | */ | ||
248 | void (*machine_kexec)(struct kimage *image); | ||
243 | #endif /* CONFIG_KEXEC */ | 249 | #endif /* CONFIG_KEXEC */ |
244 | 250 | ||
245 | #ifdef CONFIG_SUSPEND | 251 | #ifdef CONFIG_SUSPEND |
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index 49a170af8145..a5f8672eeff3 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c | |||
@@ -87,7 +87,10 @@ void machine_kexec(struct kimage *image) | |||
87 | 87 | ||
88 | save_ftrace_enabled = __ftrace_enabled_save(); | 88 | save_ftrace_enabled = __ftrace_enabled_save(); |
89 | 89 | ||
90 | default_machine_kexec(image); | 90 | if (ppc_md.machine_kexec) |
91 | ppc_md.machine_kexec(image); | ||
92 | else | ||
93 | default_machine_kexec(image); | ||
91 | 94 | ||
92 | __ftrace_enabled_restore(save_ftrace_enabled); | 95 | __ftrace_enabled_restore(save_ftrace_enabled); |
93 | 96 | ||
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index ebf9846f3c3b..f4adf89d7614 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c | |||
@@ -27,20 +27,6 @@ extern unsigned long __toc_start; | |||
27 | #ifdef CONFIG_PPC_BOOK3S | 27 | #ifdef CONFIG_PPC_BOOK3S |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * We only have to have statically allocated lppaca structs on | ||
31 | * legacy iSeries, which supports at most 64 cpus. | ||
32 | */ | ||
33 | #ifdef CONFIG_PPC_ISERIES | ||
34 | #if NR_CPUS < 64 | ||
35 | #define NR_LPPACAS NR_CPUS | ||
36 | #else | ||
37 | #define NR_LPPACAS 64 | ||
38 | #endif | ||
39 | #else /* not iSeries */ | ||
40 | #define NR_LPPACAS 1 | ||
41 | #endif | ||
42 | |||
43 | /* | ||
44 | * The structure which the hypervisor knows about - this structure | 30 | * The structure which the hypervisor knows about - this structure |
45 | * should not cross a page boundary. The vpa_init/register_vpa call | 31 | * should not cross a page boundary. The vpa_init/register_vpa call |
46 | * is now known to fail if the lppaca structure crosses a page | 32 | * is now known to fail if the lppaca structure crosses a page |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 7a1d5cb76932..8303a6c65ef7 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -353,6 +353,7 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread) | |||
353 | prime_debug_regs(new_thread); | 353 | prime_debug_regs(new_thread); |
354 | } | 354 | } |
355 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | 355 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ |
356 | #ifndef CONFIG_HAVE_HW_BREAKPOINT | ||
356 | static void set_debug_reg_defaults(struct thread_struct *thread) | 357 | static void set_debug_reg_defaults(struct thread_struct *thread) |
357 | { | 358 | { |
358 | if (thread->dabr) { | 359 | if (thread->dabr) { |
@@ -360,6 +361,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread) | |||
360 | set_dabr(0); | 361 | set_dabr(0); |
361 | } | 362 | } |
362 | } | 363 | } |
364 | #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ | ||
363 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | 365 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ |
364 | 366 | ||
365 | int set_dabr(unsigned long dabr) | 367 | int set_dabr(unsigned long dabr) |
@@ -670,11 +672,11 @@ void flush_thread(void) | |||
670 | { | 672 | { |
671 | discard_lazy_cpu_state(); | 673 | discard_lazy_cpu_state(); |
672 | 674 | ||
673 | #ifdef CONFIG_HAVE_HW_BREAKPOINTS | 675 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
674 | flush_ptrace_hw_breakpoint(current); | 676 | flush_ptrace_hw_breakpoint(current); |
675 | #else /* CONFIG_HAVE_HW_BREAKPOINTS */ | 677 | #else /* CONFIG_HAVE_HW_BREAKPOINT */ |
676 | set_debug_reg_defaults(¤t->thread); | 678 | set_debug_reg_defaults(¤t->thread); |
677 | #endif /* CONFIG_HAVE_HW_BREAKPOINTS */ | 679 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
678 | } | 680 | } |
679 | 681 | ||
680 | void | 682 | void |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index fd4812329570..0dc95c0aa3be 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -1516,7 +1516,8 @@ int start_topology_update(void) | |||
1516 | { | 1516 | { |
1517 | int rc = 0; | 1517 | int rc = 0; |
1518 | 1518 | ||
1519 | if (firmware_has_feature(FW_FEATURE_VPHN) && | 1519 | /* Disabled until races with load balancing are fixed */ |
1520 | if (0 && firmware_has_feature(FW_FEATURE_VPHN) && | ||
1520 | get_lppaca()->shared_proc) { | 1521 | get_lppaca()->shared_proc) { |
1521 | vphn_enabled = 1; | 1522 | vphn_enabled = 1; |
1522 | setup_cpu_associativity_change_counters(); | 1523 | setup_cpu_associativity_change_counters(); |
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 1ec06576f619..c14d09f614f3 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c | |||
@@ -38,13 +38,11 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); | |||
38 | * neesd to be flushed. This function will either perform the flush | 38 | * neesd to be flushed. This function will either perform the flush |
39 | * immediately or will batch it up if the current CPU has an active | 39 | * immediately or will batch it up if the current CPU has an active |
40 | * batch on it. | 40 | * batch on it. |
41 | * | ||
42 | * Must be called from within some kind of spinlock/non-preempt region... | ||
43 | */ | 41 | */ |
44 | void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | 42 | void hpte_need_flush(struct mm_struct *mm, unsigned long addr, |
45 | pte_t *ptep, unsigned long pte, int huge) | 43 | pte_t *ptep, unsigned long pte, int huge) |
46 | { | 44 | { |
47 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | 45 | struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); |
48 | unsigned long vsid, vaddr; | 46 | unsigned long vsid, vaddr; |
49 | unsigned int psize; | 47 | unsigned int psize; |
50 | int ssize; | 48 | int ssize; |
@@ -99,6 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | |||
99 | */ | 97 | */ |
100 | if (!batch->active) { | 98 | if (!batch->active) { |
101 | flush_hash_page(vaddr, rpte, psize, ssize, 0); | 99 | flush_hash_page(vaddr, rpte, psize, ssize, 0); |
100 | put_cpu_var(ppc64_tlb_batch); | ||
102 | return; | 101 | return; |
103 | } | 102 | } |
104 | 103 | ||
@@ -127,6 +126,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | |||
127 | batch->index = ++i; | 126 | batch->index = ++i; |
128 | if (i >= PPC64_TLB_BATCH_NR) | 127 | if (i >= PPC64_TLB_BATCH_NR) |
129 | __flush_tlb_pending(batch); | 128 | __flush_tlb_pending(batch); |
129 | put_cpu_var(ppc64_tlb_batch); | ||
130 | } | 130 | } |
131 | 131 | ||
132 | /* | 132 | /* |
diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c index fdb7384c0c4f..f0491cc28900 100644 --- a/arch/powerpc/platforms/iseries/dt.c +++ b/arch/powerpc/platforms/iseries/dt.c | |||
@@ -242,8 +242,8 @@ static void __init dt_cpus(struct iseries_flat_dt *dt) | |||
242 | pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */ | 242 | pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */ |
243 | pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE); | 243 | pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE); |
244 | 244 | ||
245 | for (i = 0; i < NR_CPUS; i++) { | 245 | for (i = 0; i < NR_LPPACAS; i++) { |
246 | if (lppaca_of(i).dyn_proc_status >= 2) | 246 | if (lppaca[i].dyn_proc_status >= 2) |
247 | continue; | 247 | continue; |
248 | 248 | ||
249 | snprintf(p, 32 - (p - buf), "@%d", i); | 249 | snprintf(p, 32 - (p - buf), "@%d", i); |
@@ -251,7 +251,7 @@ static void __init dt_cpus(struct iseries_flat_dt *dt) | |||
251 | 251 | ||
252 | dt_prop_str(dt, "device_type", device_type_cpu); | 252 | dt_prop_str(dt, "device_type", device_type_cpu); |
253 | 253 | ||
254 | index = lppaca_of(i).dyn_hv_phys_proc_index; | 254 | index = lppaca[i].dyn_hv_phys_proc_index; |
255 | d = &xIoHriProcessorVpd[index]; | 255 | d = &xIoHriProcessorVpd[index]; |
256 | 256 | ||
257 | dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024); | 257 | dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024); |
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c index b0863410517f..2946ae10fbfd 100644 --- a/arch/powerpc/platforms/iseries/setup.c +++ b/arch/powerpc/platforms/iseries/setup.c | |||
@@ -680,6 +680,7 @@ void * __init iSeries_early_setup(void) | |||
680 | * on but calling this function multiple times is fine. | 680 | * on but calling this function multiple times is fine. |
681 | */ | 681 | */ |
682 | identify_cpu(0, mfspr(SPRN_PVR)); | 682 | identify_cpu(0, mfspr(SPRN_PVR)); |
683 | initialise_paca(&boot_paca, 0); | ||
683 | 684 | ||
684 | powerpc_firmware_features |= FW_FEATURE_ISERIES; | 685 | powerpc_firmware_features |= FW_FEATURE_ISERIES; |
685 | powerpc_firmware_features |= FW_FEATURE_LPAR; | 686 | powerpc_firmware_features |= FW_FEATURE_LPAR; |