aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig6
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h38
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h4
-rw-r--r--arch/powerpc/include/asm/kvm_host.h14
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h25
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h4
-rw-r--r--arch/powerpc/include/asm/prom.h3
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/iommu.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c8
-rw-r--r--arch/powerpc/kernel/prom.c43
-rw-r--r--arch/powerpc/kernel/setup_64.c4
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/kvm/Kconfig1
-rw-r--r--arch/powerpc/kvm/Makefile1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c150
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c42
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c2
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c36
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c246
-rw-r--r--arch/powerpc/kvm/book3s_hv_cma.c240
-rw-r--r--arch/powerpc/kvm/book3s_hv_cma.h27
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c139
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S14
-rw-r--r--arch/powerpc/kvm/book3s_pr.c35
-rw-r--r--arch/powerpc/kvm/book3s_xics.c1
-rw-r--r--arch/powerpc/kvm/booke.c6
-rw-r--r--arch/powerpc/kvm/powerpc.c26
-rw-r--r--arch/powerpc/mm/init_32.c2
-rw-r--r--arch/powerpc/oprofile/common.c28
-rw-r--r--arch/powerpc/perf/power7-events-list.h548
-rw-r--r--arch/powerpc/perf/power7-pmu.c148
-rw-r--r--arch/powerpc/platforms/44x/warp.c1
-rw-r--r--arch/powerpc/platforms/ps3/time.c2
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c112
-rw-r--r--arch/powerpc/sysdev/rtc_cmos_setup.c2
39 files changed, 1319 insertions, 649 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 04f1e94c3437..a4e3a93bf2d4 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -389,9 +389,9 @@ config KEXEC
389 389
390 It is an ongoing process to be certain the hardware in a machine 390 It is an ongoing process to be certain the hardware in a machine
391 is properly shutdown, so do not be surprised if this code does not 391 is properly shutdown, so do not be surprised if this code does not
392 initially work for you. It may help to enable device hotplugging 392 initially work for you. As of this writing the exact hardware
393 support. As of this writing the exact hardware interface is 393 interface is strongly in flux, so no good recommendation can be
394 strongly in flux, so no good recommendation can be made. 394 made.
395 395
396config CRASH_DUMP 396config CRASH_DUMP
397 bool "Build a kdump crash kernel" 397 bool "Build a kdump crash kernel"
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 650757c300db..704e6f10ae80 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -2,3 +2,4 @@
2generic-y += clkdev.h 2generic-y += clkdev.h
3generic-y += rwsem.h 3generic-y += rwsem.h
4generic-y += trace_clock.h 4generic-y += trace_clock.h
5generic-y += vtime.h \ No newline at end of file
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 08891d07aeb6..fa19e2f1a874 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -334,6 +334,27 @@ static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
334 return r; 334 return r;
335} 335}
336 336
337/*
338 * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
339 * Because the sc instruction sets SRR0 to point to the following
340 * instruction, we have to fetch from pc - 4.
341 */
342static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
343{
344 ulong pc = kvmppc_get_pc(vcpu) - 4;
345 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
346 u32 r;
347
348 /* Load the instruction manually if it failed to do so in the
349 * exit path */
350 if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
351 kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
352
353 r = svcpu->last_inst;
354 svcpu_put(svcpu);
355 return r;
356}
357
337static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) 358static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
338{ 359{
339 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 360 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
@@ -446,6 +467,23 @@ static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
446 return vcpu->arch.last_inst; 467 return vcpu->arch.last_inst;
447} 468}
448 469
470/*
471 * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
472 * Because the sc instruction sets SRR0 to point to the following
473 * instruction, we have to fetch from pc - 4.
474 */
475static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
476{
477 ulong pc = kvmppc_get_pc(vcpu) - 4;
478
479 /* Load the instruction manually if it failed to do so in the
480 * exit path */
481 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
482 kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
483
484 return vcpu->arch.last_inst;
485}
486
449static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) 487static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
450{ 488{
451 return vcpu->arch.fault_dar; 489 return vcpu->arch.fault_dar;
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index a1ecb14e4442..86d638a3b359 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -37,7 +37,7 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
37 37
38#ifdef CONFIG_KVM_BOOK3S_64_HV 38#ifdef CONFIG_KVM_BOOK3S_64_HV
39#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */ 39#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
40extern int kvm_hpt_order; /* order of preallocated HPTs */ 40extern unsigned long kvm_rma_pages;
41#endif 41#endif
42 42
43#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */ 43#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
@@ -100,7 +100,7 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
100 /* (masks depend on page size) */ 100 /* (masks depend on page size) */
101 rb |= 0x1000; /* page encoding in LP field */ 101 rb |= 0x1000; /* page encoding in LP field */
102 rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */ 102 rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
103 rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */ 103 rb |= ((va_low << 4) & 0xf0); /* AVAL field (P7 doesn't seem to care) */
104 } 104 }
105 } else { 105 } else {
106 /* 4kB page */ 106 /* 4kB page */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index af326cde7cb6..33283532e9d8 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -183,13 +183,9 @@ struct kvmppc_spapr_tce_table {
183 struct page *pages[0]; 183 struct page *pages[0];
184}; 184};
185 185
186struct kvmppc_linear_info { 186struct kvm_rma_info {
187 void *base_virt; 187 atomic_t use_count;
188 unsigned long base_pfn; 188 unsigned long base_pfn;
189 unsigned long npages;
190 struct list_head list;
191 atomic_t use_count;
192 int type;
193}; 189};
194 190
195/* XICS components, defined in book3s_xics.c */ 191/* XICS components, defined in book3s_xics.c */
@@ -246,7 +242,7 @@ struct kvm_arch {
246 int tlbie_lock; 242 int tlbie_lock;
247 unsigned long lpcr; 243 unsigned long lpcr;
248 unsigned long rmor; 244 unsigned long rmor;
249 struct kvmppc_linear_info *rma; 245 struct kvm_rma_info *rma;
250 unsigned long vrma_slb_v; 246 unsigned long vrma_slb_v;
251 int rma_setup_done; 247 int rma_setup_done;
252 int using_mmu_notifiers; 248 int using_mmu_notifiers;
@@ -259,7 +255,7 @@ struct kvm_arch {
259 spinlock_t slot_phys_lock; 255 spinlock_t slot_phys_lock;
260 cpumask_t need_tlb_flush; 256 cpumask_t need_tlb_flush;
261 struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; 257 struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
262 struct kvmppc_linear_info *hpt_li; 258 int hpt_cma_alloc;
263#endif /* CONFIG_KVM_BOOK3S_64_HV */ 259#endif /* CONFIG_KVM_BOOK3S_64_HV */
264#ifdef CONFIG_PPC_BOOK3S_64 260#ifdef CONFIG_PPC_BOOK3S_64
265 struct list_head spapr_tce_tables; 261 struct list_head spapr_tce_tables;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index a5287fe03d77..b15554a26c20 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -137,10 +137,10 @@ extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
137 unsigned long ioba, unsigned long tce); 137 unsigned long ioba, unsigned long tce);
138extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, 138extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
139 struct kvm_allocate_rma *rma); 139 struct kvm_allocate_rma *rma);
140extern struct kvmppc_linear_info *kvm_alloc_rma(void); 140extern struct kvm_rma_info *kvm_alloc_rma(void);
141extern void kvm_release_rma(struct kvmppc_linear_info *ri); 141extern void kvm_release_rma(struct kvm_rma_info *ri);
142extern struct kvmppc_linear_info *kvm_alloc_hpt(void); 142extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
143extern void kvm_release_hpt(struct kvmppc_linear_info *li); 143extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
144extern int kvmppc_core_init_vm(struct kvm *kvm); 144extern int kvmppc_core_init_vm(struct kvm *kvm);
145extern void kvmppc_core_destroy_vm(struct kvm *kvm); 145extern void kvmppc_core_destroy_vm(struct kvm *kvm);
146extern void kvmppc_core_free_memslot(struct kvm_memory_slot *free, 146extern void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
@@ -261,6 +261,7 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
261struct openpic; 261struct openpic;
262 262
263#ifdef CONFIG_KVM_BOOK3S_64_HV 263#ifdef CONFIG_KVM_BOOK3S_64_HV
264extern void kvm_cma_reserve(void) __init;
264static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) 265static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
265{ 266{
266 paca[cpu].kvm_hstate.xics_phys = addr; 267 paca[cpu].kvm_hstate.xics_phys = addr;
@@ -281,13 +282,12 @@ static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
281} 282}
282 283
283extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu); 284extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu);
284extern void kvm_linear_init(void);
285 285
286#else 286#else
287static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) 287static inline void __init kvm_cma_reserve(void)
288{} 288{}
289 289
290static inline void kvm_linear_init(void) 290static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
291{} 291{}
292 292
293static inline u32 kvmppc_get_xics_latch(void) 293static inline u32 kvmppc_get_xics_latch(void)
@@ -394,10 +394,15 @@ static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
394 } 394 }
395} 395}
396 396
397/* Please call after prepare_to_enter. This function puts the lazy ee state 397/*
398 back to normal mode, without actually enabling interrupts. */ 398 * Please call after prepare_to_enter. This function puts the lazy ee and irq
399static inline void kvmppc_lazy_ee_enable(void) 399 * disabled tracking state back to normal mode, without actually enabling
400 * interrupts.
401 */
402static inline void kvmppc_fix_ee_before_entry(void)
400{ 403{
404 trace_hardirqs_on();
405
401#ifdef CONFIG_PPC64 406#ifdef CONFIG_PPC64
402 /* Only need to enable IRQs by hard enabling them after this */ 407 /* Only need to enable IRQs by hard enabling them after this */
403 local_paca->irq_happened = 0; 408 local_paca->irq_happened = 0;
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 8b2492644754..3fd2f1b6f906 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -138,11 +138,11 @@ extern ssize_t power_events_sysfs_show(struct device *dev,
138#define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix).attr.attr 138#define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix).attr.attr
139 139
140#define EVENT_ATTR(_name, _id, _suffix) \ 140#define EVENT_ATTR(_name, _id, _suffix) \
141 PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_PM_##_id, \ 141 PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_##_id, \
142 power_events_sysfs_show) 142 power_events_sysfs_show)
143 143
144#define GENERIC_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _g) 144#define GENERIC_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _g)
145#define GENERIC_EVENT_PTR(_id) EVENT_PTR(_id, _g) 145#define GENERIC_EVENT_PTR(_id) EVENT_PTR(_id, _g)
146 146
147#define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(PM_##_name, _id, _p) 147#define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _p)
148#define POWER_EVENT_PTR(_id) EVENT_PTR(_id, _p) 148#define POWER_EVENT_PTR(_id) EVENT_PTR(_id, _p)
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index e6ec2cffba16..7d0c7f3a7171 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -44,9 +44,6 @@ void of_parse_dma_window(struct device_node *dn, const __be32 *dma_window,
44 44
45extern void kdump_move_device_tree(void); 45extern void kdump_move_device_tree(void);
46 46
47/* CPU OF node matching */
48struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
49
50/* cache lookup */ 47/* cache lookup */
51struct device_node *of_find_next_cache_node(struct device_node *np); 48struct device_node *of_find_next_cache_node(struct device_node *np);
52 49
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 8207459efe56..d8958be5f31a 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -454,6 +454,7 @@ int main(void)
454 DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2)); 454 DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2));
455 DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3)); 455 DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3));
456#endif 456#endif
457 DEFINE(VCPU_SHARED_SPRG3, offsetof(struct kvm_vcpu_arch_shared, sprg3));
457 DEFINE(VCPU_SHARED_SPRG4, offsetof(struct kvm_vcpu_arch_shared, sprg4)); 458 DEFINE(VCPU_SHARED_SPRG4, offsetof(struct kvm_vcpu_arch_shared, sprg4));
458 DEFINE(VCPU_SHARED_SPRG5, offsetof(struct kvm_vcpu_arch_shared, sprg5)); 459 DEFINE(VCPU_SHARED_SPRG5, offsetof(struct kvm_vcpu_arch_shared, sprg5));
459 DEFINE(VCPU_SHARED_SPRG6, offsetof(struct kvm_vcpu_arch_shared, sprg6)); 460 DEFINE(VCPU_SHARED_SPRG6, offsetof(struct kvm_vcpu_arch_shared, sprg6));
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index b20ff173a671..0adab06ce5c0 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -105,7 +105,7 @@ static int __init fail_iommu_debugfs(void)
105 struct dentry *dir = fault_create_debugfs_attr("fail_iommu", 105 struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
106 NULL, &fail_iommu); 106 NULL, &fail_iommu);
107 107
108 return PTR_RET(dir); 108 return PTR_ERR_OR_ZERO(dir);
109} 109}
110late_initcall(fail_iommu_debugfs); 110late_initcall(fail_iommu_debugfs);
111 111
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index eae0ee00ca25..905a24bb7acc 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1673,12 +1673,8 @@ void pcibios_scan_phb(struct pci_controller *hose)
1673 /* Configure PCI Express settings */ 1673 /* Configure PCI Express settings */
1674 if (bus && !pci_has_flag(PCI_PROBE_ONLY)) { 1674 if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
1675 struct pci_bus *child; 1675 struct pci_bus *child;
1676 list_for_each_entry(child, &bus->children, node) { 1676 list_for_each_entry(child, &bus->children, node)
1677 struct pci_dev *self = child->self; 1677 pcie_bus_configure_settings(child);
1678 if (!self)
1679 continue;
1680 pcie_bus_configure_settings(child, self->pcie_mpss);
1681 }
1682 } 1678 }
1683} 1679}
1684 1680
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index d87e03fc8cfd..6bfcab97c981 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -893,49 +893,10 @@ static int __init prom_reconfig_setup(void)
893__initcall(prom_reconfig_setup); 893__initcall(prom_reconfig_setup);
894#endif 894#endif
895 895
896/* Find the device node for a given logical cpu number, also returns the cpu 896bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
897 * local thread number (index in ibm,interrupt-server#s) if relevant and
898 * asked for (non NULL)
899 */
900struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
901{ 897{
902 int hardid; 898 return (int)phys_id == get_hard_smp_processor_id(cpu);
903 struct device_node *np;
904
905 hardid = get_hard_smp_processor_id(cpu);
906
907 for_each_node_by_type(np, "cpu") {
908 const __be32 *intserv;
909 unsigned int plen, t;
910
911 /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
912 * fallback to "reg" property and assume no threads
913 */
914 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s",
915 &plen);
916 if (intserv == NULL) {
917 const __be32 *reg = of_get_property(np, "reg", NULL);
918 if (reg == NULL)
919 continue;
920 if (be32_to_cpup(reg) == hardid) {
921 if (thread)
922 *thread = 0;
923 return np;
924 }
925 } else {
926 plen /= sizeof(u32);
927 for (t = 0; t < plen; t++) {
928 if (hardid == be32_to_cpu(intserv[t])) {
929 if (thread)
930 *thread = t;
931 return np;
932 }
933 }
934 }
935 }
936 return NULL;
937} 899}
938EXPORT_SYMBOL(of_get_cpu_node);
939 900
940#if defined(CONFIG_DEBUG_FS) && defined(DEBUG) 901#if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
941static struct debugfs_blob_wrapper flat_dt_blob; 902static struct debugfs_blob_wrapper flat_dt_blob;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 45f2d1fac670..278ca93e1f28 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -232,6 +232,8 @@ void __init early_setup(unsigned long dt_ptr)
232 /* Initialize the hash table or TLB handling */ 232 /* Initialize the hash table or TLB handling */
233 early_init_mmu(); 233 early_init_mmu();
234 234
235 kvm_cma_reserve();
236
235 /* 237 /*
236 * Reserve any gigantic pages requested on the command line. 238 * Reserve any gigantic pages requested on the command line.
237 * memblock needs to have been initialized by the time this is 239 * memblock needs to have been initialized by the time this is
@@ -624,8 +626,6 @@ void __init setup_arch(char **cmdline_p)
624 /* Initialize the MMU context management stuff */ 626 /* Initialize the MMU context management stuff */
625 mmu_context_init(); 627 mmu_context_init();
626 628
627 kvm_linear_init();
628
629 /* Interrupt code needs to be 64K-aligned */ 629 /* Interrupt code needs to be 64K-aligned */
630 if ((unsigned long)_stext & 0xffff) 630 if ((unsigned long)_stext & 0xffff)
631 panic("Kernelbase not 64K-aligned (0x%lx)!\n", 631 panic("Kernelbase not 64K-aligned (0x%lx)!\n",
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index b2bcd34f72d2..192b051df97e 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -1049,7 +1049,7 @@ static int __init rtc_init(void)
1049 1049
1050 pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0); 1050 pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
1051 1051
1052 return PTR_RET(pdev); 1052 return PTR_ERR_OR_ZERO(pdev);
1053} 1053}
1054 1054
1055module_init(rtc_init); 1055module_init(rtc_init);
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index eb643f862579..ffaef2cb101a 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -72,6 +72,7 @@ config KVM_BOOK3S_64_HV
72 bool "KVM support for POWER7 and PPC970 using hypervisor mode in host" 72 bool "KVM support for POWER7 and PPC970 using hypervisor mode in host"
73 depends on KVM_BOOK3S_64 73 depends on KVM_BOOK3S_64
74 select MMU_NOTIFIER 74 select MMU_NOTIFIER
75 select CMA
75 ---help--- 76 ---help---
76 Support running unmodified book3s_64 guest kernels in 77 Support running unmodified book3s_64 guest kernels in
77 virtual machines on POWER7 and PPC970 processors that have 78 virtual machines on POWER7 and PPC970 processors that have
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 008cd856c5b5..6646c952c5e3 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -81,6 +81,7 @@ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
81 book3s_64_vio_hv.o \ 81 book3s_64_vio_hv.o \
82 book3s_hv_ras.o \ 82 book3s_hv_ras.o \
83 book3s_hv_builtin.o \ 83 book3s_hv_builtin.o \
84 book3s_hv_cma.o \
84 $(kvm-book3s_64-builtin-xics-objs-y) 85 $(kvm-book3s_64-builtin-xics-objs-y)
85 86
86kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \ 87kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 739bfbadb85e..7e345e00661a 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -182,10 +182,13 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
182 hva_t ptegp; 182 hva_t ptegp;
183 u64 pteg[16]; 183 u64 pteg[16];
184 u64 avpn = 0; 184 u64 avpn = 0;
185 u64 v, r;
186 u64 v_val, v_mask;
187 u64 eaddr_mask;
185 int i; 188 int i;
186 u8 key = 0; 189 u8 pp, key = 0;
187 bool found = false; 190 bool found = false;
188 int second = 0; 191 bool second = false;
189 ulong mp_ea = vcpu->arch.magic_page_ea; 192 ulong mp_ea = vcpu->arch.magic_page_ea;
190 193
191 /* Magic page override */ 194 /* Magic page override */
@@ -208,8 +211,16 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
208 goto no_seg_found; 211 goto no_seg_found;
209 212
210 avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr); 213 avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
214 v_val = avpn & HPTE_V_AVPN;
215
211 if (slbe->tb) 216 if (slbe->tb)
212 avpn |= SLB_VSID_B_1T; 217 v_val |= SLB_VSID_B_1T;
218 if (slbe->large)
219 v_val |= HPTE_V_LARGE;
220 v_val |= HPTE_V_VALID;
221
222 v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID |
223 HPTE_V_SECONDARY;
213 224
214do_second: 225do_second:
215 ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second); 226 ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second);
@@ -227,91 +238,74 @@ do_second:
227 key = 4; 238 key = 4;
228 239
229 for (i=0; i<16; i+=2) { 240 for (i=0; i<16; i+=2) {
230 u64 v = pteg[i]; 241 /* Check all relevant fields of 1st dword */
231 u64 r = pteg[i+1]; 242 if ((pteg[i] & v_mask) == v_val) {
232
233 /* Valid check */
234 if (!(v & HPTE_V_VALID))
235 continue;
236 /* Hash check */
237 if ((v & HPTE_V_SECONDARY) != second)
238 continue;
239
240 /* AVPN compare */
241 if (HPTE_V_COMPARE(avpn, v)) {
242 u8 pp = (r & HPTE_R_PP) | key;
243 int eaddr_mask = 0xFFF;
244
245 gpte->eaddr = eaddr;
246 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu,
247 eaddr,
248 data);
249 if (slbe->large)
250 eaddr_mask = 0xFFFFFF;
251 gpte->raddr = (r & HPTE_R_RPN) | (eaddr & eaddr_mask);
252 gpte->may_execute = ((r & HPTE_R_N) ? false : true);
253 gpte->may_read = false;
254 gpte->may_write = false;
255
256 switch (pp) {
257 case 0:
258 case 1:
259 case 2:
260 case 6:
261 gpte->may_write = true;
262 /* fall through */
263 case 3:
264 case 5:
265 case 7:
266 gpte->may_read = true;
267 break;
268 }
269
270 dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
271 "-> 0x%lx\n",
272 eaddr, avpn, gpte->vpage, gpte->raddr);
273 found = true; 243 found = true;
274 break; 244 break;
275 } 245 }
276 } 246 }
277 247
278 /* Update PTE R and C bits, so the guest's swapper knows we used the 248 if (!found) {
279 * page */ 249 if (second)
280 if (found) { 250 goto no_page_found;
281 u32 oldr = pteg[i+1]; 251 v_val |= HPTE_V_SECONDARY;
252 second = true;
253 goto do_second;
254 }
282 255
283 if (gpte->may_read) { 256 v = pteg[i];
284 /* Set the accessed flag */ 257 r = pteg[i+1];
285 pteg[i+1] |= HPTE_R_R; 258 pp = (r & HPTE_R_PP) | key;
286 } 259 eaddr_mask = 0xFFF;
287 if (gpte->may_write) { 260
288 /* Set the dirty flag */ 261 gpte->eaddr = eaddr;
289 pteg[i+1] |= HPTE_R_C; 262 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
290 } else { 263 if (slbe->large)
291 dprintk("KVM: Mapping read-only page!\n"); 264 eaddr_mask = 0xFFFFFF;
292 } 265 gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask);
266 gpte->may_execute = ((r & HPTE_R_N) ? false : true);
267 gpte->may_read = false;
268 gpte->may_write = false;
269
270 switch (pp) {
271 case 0:
272 case 1:
273 case 2:
274 case 6:
275 gpte->may_write = true;
276 /* fall through */
277 case 3:
278 case 5:
279 case 7:
280 gpte->may_read = true;
281 break;
282 }
293 283
294 /* Write back into the PTEG */ 284 dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
295 if (pteg[i+1] != oldr) 285 "-> 0x%lx\n",
296 copy_to_user((void __user *)ptegp, pteg, sizeof(pteg)); 286 eaddr, avpn, gpte->vpage, gpte->raddr);
297 287
298 if (!gpte->may_read) 288 /* Update PTE R and C bits, so the guest's swapper knows we used the
299 return -EPERM; 289 * page */
300 return 0; 290 if (gpte->may_read) {
301 } else { 291 /* Set the accessed flag */
302 dprintk("KVM MMU: No PTE found (ea=0x%lx sdr1=0x%llx " 292 r |= HPTE_R_R;
303 "ptegp=0x%lx)\n", 293 }
304 eaddr, to_book3s(vcpu)->sdr1, ptegp); 294 if (data && gpte->may_write) {
305 for (i = 0; i < 16; i += 2) 295 /* Set the dirty flag -- XXX even if not writing */
306 dprintk(" %02d: 0x%llx - 0x%llx (0x%llx)\n", 296 r |= HPTE_R_C;
307 i, pteg[i], pteg[i+1], avpn); 297 }
308 298
309 if (!second) { 299 /* Write back into the PTEG */
310 second = HPTE_V_SECONDARY; 300 if (pteg[i+1] != r) {
311 goto do_second; 301 pteg[i+1] = r;
312 } 302 copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
313 } 303 }
314 304
305 if (!gpte->may_read)
306 return -EPERM;
307 return 0;
308
315no_page_found: 309no_page_found:
316 return -ENOENT; 310 return -ENOENT;
317 311
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 710d31317d81..043eec8461e7 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -37,6 +37,8 @@
37#include <asm/ppc-opcode.h> 37#include <asm/ppc-opcode.h>
38#include <asm/cputable.h> 38#include <asm/cputable.h>
39 39
40#include "book3s_hv_cma.h"
41
40/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */ 42/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
41#define MAX_LPID_970 63 43#define MAX_LPID_970 63
42 44
@@ -52,8 +54,8 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
52{ 54{
53 unsigned long hpt; 55 unsigned long hpt;
54 struct revmap_entry *rev; 56 struct revmap_entry *rev;
55 struct kvmppc_linear_info *li; 57 struct page *page = NULL;
56 long order = kvm_hpt_order; 58 long order = KVM_DEFAULT_HPT_ORDER;
57 59
58 if (htab_orderp) { 60 if (htab_orderp) {
59 order = *htab_orderp; 61 order = *htab_orderp;
@@ -61,26 +63,23 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
61 order = PPC_MIN_HPT_ORDER; 63 order = PPC_MIN_HPT_ORDER;
62 } 64 }
63 65
66 kvm->arch.hpt_cma_alloc = 0;
64 /* 67 /*
65 * If the user wants a different size from default,
66 * try first to allocate it from the kernel page allocator. 68 * try first to allocate it from the kernel page allocator.
69 * We keep the CMA reserved for failed allocation.
67 */ 70 */
68 hpt = 0; 71 hpt = __get_free_pages(GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT |
69 if (order != kvm_hpt_order) { 72 __GFP_NOWARN, order - PAGE_SHIFT);
70 hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
71 __GFP_NOWARN, order - PAGE_SHIFT);
72 if (!hpt)
73 --order;
74 }
75 73
76 /* Next try to allocate from the preallocated pool */ 74 /* Next try to allocate from the preallocated pool */
77 if (!hpt) { 75 if (!hpt) {
78 li = kvm_alloc_hpt(); 76 VM_BUG_ON(order < KVM_CMA_CHUNK_ORDER);
79 if (li) { 77 page = kvm_alloc_hpt(1 << (order - PAGE_SHIFT));
80 hpt = (ulong)li->base_virt; 78 if (page) {
81 kvm->arch.hpt_li = li; 79 hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
82 order = kvm_hpt_order; 80 kvm->arch.hpt_cma_alloc = 1;
83 } 81 } else
82 --order;
84 } 83 }
85 84
86 /* Lastly try successively smaller sizes from the page allocator */ 85 /* Lastly try successively smaller sizes from the page allocator */
@@ -118,8 +117,8 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
118 return 0; 117 return 0;
119 118
120 out_freehpt: 119 out_freehpt:
121 if (kvm->arch.hpt_li) 120 if (kvm->arch.hpt_cma_alloc)
122 kvm_release_hpt(kvm->arch.hpt_li); 121 kvm_release_hpt(page, 1 << (order - PAGE_SHIFT));
123 else 122 else
124 free_pages(hpt, order - PAGE_SHIFT); 123 free_pages(hpt, order - PAGE_SHIFT);
125 return -ENOMEM; 124 return -ENOMEM;
@@ -165,8 +164,9 @@ void kvmppc_free_hpt(struct kvm *kvm)
165{ 164{
166 kvmppc_free_lpid(kvm->arch.lpid); 165 kvmppc_free_lpid(kvm->arch.lpid);
167 vfree(kvm->arch.revmap); 166 vfree(kvm->arch.revmap);
168 if (kvm->arch.hpt_li) 167 if (kvm->arch.hpt_cma_alloc)
169 kvm_release_hpt(kvm->arch.hpt_li); 168 kvm_release_hpt(virt_to_page(kvm->arch.hpt_virt),
169 1 << (kvm->arch.hpt_order - PAGE_SHIFT));
170 else 170 else
171 free_pages(kvm->arch.hpt_virt, 171 free_pages(kvm->arch.hpt_virt,
172 kvm->arch.hpt_order - PAGE_SHIFT); 172 kvm->arch.hpt_order - PAGE_SHIFT);
@@ -1579,7 +1579,7 @@ int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf)
1579 ctx->first_pass = 1; 1579 ctx->first_pass = 1;
1580 1580
1581 rwflag = (ghf->flags & KVM_GET_HTAB_WRITE) ? O_WRONLY : O_RDONLY; 1581 rwflag = (ghf->flags & KVM_GET_HTAB_WRITE) ? O_WRONLY : O_RDONLY;
1582 ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag); 1582 ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag | O_CLOEXEC);
1583 if (ret < 0) { 1583 if (ret < 0) {
1584 kvm_put_kvm(kvm); 1584 kvm_put_kvm(kvm);
1585 return ret; 1585 return ret;
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index b2d3f3b2de72..54cf9bc94dad 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -136,7 +136,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
136 mutex_unlock(&kvm->lock); 136 mutex_unlock(&kvm->lock);
137 137
138 return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops, 138 return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
139 stt, O_RDWR); 139 stt, O_RDWR | O_CLOEXEC);
140 140
141fail: 141fail:
142 if (stt) { 142 if (stt) {
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 1f6344c4408d..360ce68c9809 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -458,6 +458,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
458 case SPRN_PMC4_GEKKO: 458 case SPRN_PMC4_GEKKO:
459 case SPRN_WPAR_GEKKO: 459 case SPRN_WPAR_GEKKO:
460 case SPRN_MSSSR0: 460 case SPRN_MSSSR0:
461 case SPRN_DABR:
461 break; 462 break;
462unprivileged: 463unprivileged:
463 default: 464 default:
@@ -555,6 +556,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
555 case SPRN_PMC4_GEKKO: 556 case SPRN_PMC4_GEKKO:
556 case SPRN_WPAR_GEKKO: 557 case SPRN_WPAR_GEKKO:
557 case SPRN_MSSSR0: 558 case SPRN_MSSSR0:
559 case SPRN_DABR:
558 *spr_val = 0; 560 *spr_val = 0;
559 break; 561 break;
560 default: 562 default:
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index e8d51cb76752..62a2b5ab08ed 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -680,13 +680,12 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
680} 680}
681 681
682int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 682int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
683 struct kvm_sregs *sregs) 683 struct kvm_sregs *sregs)
684{ 684{
685 int i; 685 int i;
686 686
687 sregs->pvr = vcpu->arch.pvr;
688
689 memset(sregs, 0, sizeof(struct kvm_sregs)); 687 memset(sregs, 0, sizeof(struct kvm_sregs));
688 sregs->pvr = vcpu->arch.pvr;
690 for (i = 0; i < vcpu->arch.slb_max; i++) { 689 for (i = 0; i < vcpu->arch.slb_max; i++) {
691 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; 690 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
692 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; 691 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
@@ -696,7 +695,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
696} 695}
697 696
698int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 697int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
699 struct kvm_sregs *sregs) 698 struct kvm_sregs *sregs)
700{ 699{
701 int i, j; 700 int i, j;
702 701
@@ -1511,10 +1510,10 @@ static inline int lpcr_rmls(unsigned long rma_size)
1511 1510
1512static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1511static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1513{ 1512{
1514 struct kvmppc_linear_info *ri = vma->vm_file->private_data;
1515 struct page *page; 1513 struct page *page;
1514 struct kvm_rma_info *ri = vma->vm_file->private_data;
1516 1515
1517 if (vmf->pgoff >= ri->npages) 1516 if (vmf->pgoff >= kvm_rma_pages)
1518 return VM_FAULT_SIGBUS; 1517 return VM_FAULT_SIGBUS;
1519 1518
1520 page = pfn_to_page(ri->base_pfn + vmf->pgoff); 1519 page = pfn_to_page(ri->base_pfn + vmf->pgoff);
@@ -1536,7 +1535,7 @@ static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
1536 1535
1537static int kvm_rma_release(struct inode *inode, struct file *filp) 1536static int kvm_rma_release(struct inode *inode, struct file *filp)
1538{ 1537{
1539 struct kvmppc_linear_info *ri = filp->private_data; 1538 struct kvm_rma_info *ri = filp->private_data;
1540 1539
1541 kvm_release_rma(ri); 1540 kvm_release_rma(ri);
1542 return 0; 1541 return 0;
@@ -1549,18 +1548,27 @@ static const struct file_operations kvm_rma_fops = {
1549 1548
1550long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret) 1549long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
1551{ 1550{
1552 struct kvmppc_linear_info *ri;
1553 long fd; 1551 long fd;
1552 struct kvm_rma_info *ri;
1553 /*
1554 * Only do this on PPC970 in HV mode
1555 */
1556 if (!cpu_has_feature(CPU_FTR_HVMODE) ||
1557 !cpu_has_feature(CPU_FTR_ARCH_201))
1558 return -EINVAL;
1559
1560 if (!kvm_rma_pages)
1561 return -EINVAL;
1554 1562
1555 ri = kvm_alloc_rma(); 1563 ri = kvm_alloc_rma();
1556 if (!ri) 1564 if (!ri)
1557 return -ENOMEM; 1565 return -ENOMEM;
1558 1566
1559 fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR); 1567 fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR | O_CLOEXEC);
1560 if (fd < 0) 1568 if (fd < 0)
1561 kvm_release_rma(ri); 1569 kvm_release_rma(ri);
1562 1570
1563 ret->rma_size = ri->npages << PAGE_SHIFT; 1571 ret->rma_size = kvm_rma_pages << PAGE_SHIFT;
1564 return fd; 1572 return fd;
1565} 1573}
1566 1574
@@ -1725,7 +1733,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1725{ 1733{
1726 int err = 0; 1734 int err = 0;
1727 struct kvm *kvm = vcpu->kvm; 1735 struct kvm *kvm = vcpu->kvm;
1728 struct kvmppc_linear_info *ri = NULL; 1736 struct kvm_rma_info *ri = NULL;
1729 unsigned long hva; 1737 unsigned long hva;
1730 struct kvm_memory_slot *memslot; 1738 struct kvm_memory_slot *memslot;
1731 struct vm_area_struct *vma; 1739 struct vm_area_struct *vma;
@@ -1803,7 +1811,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1803 1811
1804 } else { 1812 } else {
1805 /* Set up to use an RMO region */ 1813 /* Set up to use an RMO region */
1806 rma_size = ri->npages; 1814 rma_size = kvm_rma_pages;
1807 if (rma_size > memslot->npages) 1815 if (rma_size > memslot->npages)
1808 rma_size = memslot->npages; 1816 rma_size = memslot->npages;
1809 rma_size <<= PAGE_SHIFT; 1817 rma_size <<= PAGE_SHIFT;
@@ -1831,14 +1839,14 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1831 /* POWER7 */ 1839 /* POWER7 */
1832 lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L); 1840 lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L);
1833 lpcr |= rmls << LPCR_RMLS_SH; 1841 lpcr |= rmls << LPCR_RMLS_SH;
1834 kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT; 1842 kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT;
1835 } 1843 }
1836 kvm->arch.lpcr = lpcr; 1844 kvm->arch.lpcr = lpcr;
1837 pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n", 1845 pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
1838 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr); 1846 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
1839 1847
1840 /* Initialize phys addrs of pages in RMO */ 1848 /* Initialize phys addrs of pages in RMO */
1841 npages = ri->npages; 1849 npages = kvm_rma_pages;
1842 porder = __ilog2(npages); 1850 porder = __ilog2(npages);
1843 physp = memslot->arch.slot_phys; 1851 physp = memslot->arch.slot_phys;
1844 if (physp) { 1852 if (physp) {
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index ec0a9e5de100..8cd0daebb82d 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -13,33 +13,34 @@
13#include <linux/spinlock.h> 13#include <linux/spinlock.h>
14#include <linux/bootmem.h> 14#include <linux/bootmem.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/memblock.h>
17#include <linux/sizes.h>
16 18
17#include <asm/cputable.h> 19#include <asm/cputable.h>
18#include <asm/kvm_ppc.h> 20#include <asm/kvm_ppc.h>
19#include <asm/kvm_book3s.h> 21#include <asm/kvm_book3s.h>
20 22
21#define KVM_LINEAR_RMA 0 23#include "book3s_hv_cma.h"
22#define KVM_LINEAR_HPT 1 24/*
23 25 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
24static void __init kvm_linear_init_one(ulong size, int count, int type); 26 * should be power of 2.
25static struct kvmppc_linear_info *kvm_alloc_linear(int type); 27 */
26static void kvm_release_linear(struct kvmppc_linear_info *ri); 28#define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */
27 29/*
28int kvm_hpt_order = KVM_DEFAULT_HPT_ORDER; 30 * By default we reserve 5% of memory for hash pagetable allocation.
29EXPORT_SYMBOL_GPL(kvm_hpt_order); 31 */
30 32static unsigned long kvm_cma_resv_ratio = 5;
31/*************** RMA *************/
32
33/* 33/*
34 * This maintains a list of RMAs (real mode areas) for KVM guests to use. 34 * We allocate RMAs (real mode areas) for KVM guests from the KVM CMA area.
35 * Each RMA has to be physically contiguous and of a size that the 35 * Each RMA has to be physically contiguous and of a size that the
36 * hardware supports. PPC970 and POWER7 support 64MB, 128MB and 256MB, 36 * hardware supports. PPC970 and POWER7 support 64MB, 128MB and 256MB,
37 * and other larger sizes. Since we are unlikely to be allocate that 37 * and other larger sizes. Since we are unlikely to be allocate that
38 * much physically contiguous memory after the system is up and running, 38 * much physically contiguous memory after the system is up and running,
39 * we preallocate a set of RMAs in early boot for KVM to use. 39 * we preallocate a set of RMAs in early boot using CMA.
40 * should be power of 2.
40 */ 41 */
41static unsigned long kvm_rma_size = 64 << 20; /* 64MB */ 42unsigned long kvm_rma_pages = (1 << 27) >> PAGE_SHIFT; /* 128MB */
42static unsigned long kvm_rma_count; 43EXPORT_SYMBOL_GPL(kvm_rma_pages);
43 44
44/* Work out RMLS (real mode limit selector) field value for a given RMA size. 45/* Work out RMLS (real mode limit selector) field value for a given RMA size.
45 Assumes POWER7 or PPC970. */ 46 Assumes POWER7 or PPC970. */
@@ -69,165 +70,114 @@ static inline int lpcr_rmls(unsigned long rma_size)
69 70
70static int __init early_parse_rma_size(char *p) 71static int __init early_parse_rma_size(char *p)
71{ 72{
72 if (!p) 73 unsigned long kvm_rma_size;
73 return 1;
74 74
75 pr_debug("%s(%s)\n", __func__, p);
76 if (!p)
77 return -EINVAL;
75 kvm_rma_size = memparse(p, &p); 78 kvm_rma_size = memparse(p, &p);
76 79 /*
80 * Check that the requested size is one supported in hardware
81 */
82 if (lpcr_rmls(kvm_rma_size) < 0) {
83 pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
84 return -EINVAL;
85 }
86 kvm_rma_pages = kvm_rma_size >> PAGE_SHIFT;
77 return 0; 87 return 0;
78} 88}
79early_param("kvm_rma_size", early_parse_rma_size); 89early_param("kvm_rma_size", early_parse_rma_size);
80 90
81static int __init early_parse_rma_count(char *p) 91struct kvm_rma_info *kvm_alloc_rma()
82{ 92{
83 if (!p) 93 struct page *page;
84 return 1; 94 struct kvm_rma_info *ri;
85 95
86 kvm_rma_count = simple_strtoul(p, NULL, 0); 96 ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL);
87 97 if (!ri)
88 return 0; 98 return NULL;
89} 99 page = kvm_alloc_cma(kvm_rma_pages, kvm_rma_pages);
90early_param("kvm_rma_count", early_parse_rma_count); 100 if (!page)
91 101 goto err_out;
92struct kvmppc_linear_info *kvm_alloc_rma(void) 102 atomic_set(&ri->use_count, 1);
93{ 103 ri->base_pfn = page_to_pfn(page);
94 return kvm_alloc_linear(KVM_LINEAR_RMA); 104 return ri;
105err_out:
106 kfree(ri);
107 return NULL;
95} 108}
96EXPORT_SYMBOL_GPL(kvm_alloc_rma); 109EXPORT_SYMBOL_GPL(kvm_alloc_rma);
97 110
98void kvm_release_rma(struct kvmppc_linear_info *ri) 111void kvm_release_rma(struct kvm_rma_info *ri)
99{ 112{
100 kvm_release_linear(ri); 113 if (atomic_dec_and_test(&ri->use_count)) {
114 kvm_release_cma(pfn_to_page(ri->base_pfn), kvm_rma_pages);
115 kfree(ri);
116 }
101} 117}
102EXPORT_SYMBOL_GPL(kvm_release_rma); 118EXPORT_SYMBOL_GPL(kvm_release_rma);
103 119
104/*************** HPT *************/ 120static int __init early_parse_kvm_cma_resv(char *p)
105
106/*
107 * This maintains a list of big linear HPT tables that contain the GVA->HPA
108 * memory mappings. If we don't reserve those early on, we might not be able
109 * to get a big (usually 16MB) linear memory region from the kernel anymore.
110 */
111
112static unsigned long kvm_hpt_count;
113
114static int __init early_parse_hpt_count(char *p)
115{ 121{
122 pr_debug("%s(%s)\n", __func__, p);
116 if (!p) 123 if (!p)
117 return 1; 124 return -EINVAL;
118 125 return kstrtoul(p, 0, &kvm_cma_resv_ratio);
119 kvm_hpt_count = simple_strtoul(p, NULL, 0);
120
121 return 0;
122} 126}
123early_param("kvm_hpt_count", early_parse_hpt_count); 127early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
124 128
125struct kvmppc_linear_info *kvm_alloc_hpt(void) 129struct page *kvm_alloc_hpt(unsigned long nr_pages)
126{ 130{
127 return kvm_alloc_linear(KVM_LINEAR_HPT); 131 unsigned long align_pages = HPT_ALIGN_PAGES;
132
133 /* Old CPUs require HPT aligned on a multiple of its size */
134 if (!cpu_has_feature(CPU_FTR_ARCH_206))
135 align_pages = nr_pages;
136 return kvm_alloc_cma(nr_pages, align_pages);
128} 137}
129EXPORT_SYMBOL_GPL(kvm_alloc_hpt); 138EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
130 139
131void kvm_release_hpt(struct kvmppc_linear_info *li) 140void kvm_release_hpt(struct page *page, unsigned long nr_pages)
132{ 141{
133 kvm_release_linear(li); 142 kvm_release_cma(page, nr_pages);
134} 143}
135EXPORT_SYMBOL_GPL(kvm_release_hpt); 144EXPORT_SYMBOL_GPL(kvm_release_hpt);
136 145
137/*************** generic *************/ 146/**
138 147 * kvm_cma_reserve() - reserve area for kvm hash pagetable
139static LIST_HEAD(free_linears); 148 *
140static DEFINE_SPINLOCK(linear_lock); 149 * This function reserves memory from early allocator. It should be
141 150 * called by arch specific code once the early allocator (memblock or bootmem)
142static void __init kvm_linear_init_one(ulong size, int count, int type) 151 * has been activated and all other subsystems have already allocated/reserved
143{ 152 * memory.
144 unsigned long i;
145 unsigned long j, npages;
146 void *linear;
147 struct page *pg;
148 const char *typestr;
149 struct kvmppc_linear_info *linear_info;
150
151 if (!count)
152 return;
153
154 typestr = (type == KVM_LINEAR_RMA) ? "RMA" : "HPT";
155
156 npages = size >> PAGE_SHIFT;
157 linear_info = alloc_bootmem(count * sizeof(struct kvmppc_linear_info));
158 for (i = 0; i < count; ++i) {
159 linear = alloc_bootmem_align(size, size);
160 pr_debug("Allocated KVM %s at %p (%ld MB)\n", typestr, linear,
161 size >> 20);
162 linear_info[i].base_virt = linear;
163 linear_info[i].base_pfn = __pa(linear) >> PAGE_SHIFT;
164 linear_info[i].npages = npages;
165 linear_info[i].type = type;
166 list_add_tail(&linear_info[i].list, &free_linears);
167 atomic_set(&linear_info[i].use_count, 0);
168
169 pg = pfn_to_page(linear_info[i].base_pfn);
170 for (j = 0; j < npages; ++j) {
171 atomic_inc(&pg->_count);
172 ++pg;
173 }
174 }
175}
176
177static struct kvmppc_linear_info *kvm_alloc_linear(int type)
178{
179 struct kvmppc_linear_info *ri, *ret;
180
181 ret = NULL;
182 spin_lock(&linear_lock);
183 list_for_each_entry(ri, &free_linears, list) {
184 if (ri->type != type)
185 continue;
186
187 list_del(&ri->list);
188 atomic_inc(&ri->use_count);
189 memset(ri->base_virt, 0, ri->npages << PAGE_SHIFT);
190 ret = ri;
191 break;
192 }
193 spin_unlock(&linear_lock);
194 return ret;
195}
196
197static void kvm_release_linear(struct kvmppc_linear_info *ri)
198{
199 if (atomic_dec_and_test(&ri->use_count)) {
200 spin_lock(&linear_lock);
201 list_add_tail(&ri->list, &free_linears);
202 spin_unlock(&linear_lock);
203
204 }
205}
206
207/*
208 * Called at boot time while the bootmem allocator is active,
209 * to allocate contiguous physical memory for the hash page
210 * tables for guests.
211 */ 153 */
212void __init kvm_linear_init(void) 154void __init kvm_cma_reserve(void)
213{ 155{
214 /* HPT */ 156 unsigned long align_size;
215 kvm_linear_init_one(1 << kvm_hpt_order, kvm_hpt_count, KVM_LINEAR_HPT); 157 struct memblock_region *reg;
216 158 phys_addr_t selected_size = 0;
217 /* RMA */ 159 /*
218 /* Only do this on PPC970 in HV mode */ 160 * We cannot use memblock_phys_mem_size() here, because
219 if (!cpu_has_feature(CPU_FTR_HVMODE) || 161 * memblock_analyze() has not been called yet.
220 !cpu_has_feature(CPU_FTR_ARCH_201)) 162 */
221 return; 163 for_each_memblock(memory, reg)
222 164 selected_size += memblock_region_memory_end_pfn(reg) -
223 if (!kvm_rma_size || !kvm_rma_count) 165 memblock_region_memory_base_pfn(reg);
224 return; 166
225 167 selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT;
226 /* Check that the requested size is one supported in hardware */ 168 if (selected_size) {
227 if (lpcr_rmls(kvm_rma_size) < 0) { 169 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
228 pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size); 170 (unsigned long)selected_size / SZ_1M);
229 return; 171 /*
172 * Old CPUs require HPT aligned on a multiple of its size. So for them
173 * make the alignment as max size we could request.
174 */
175 if (!cpu_has_feature(CPU_FTR_ARCH_206))
176 align_size = __rounddown_pow_of_two(selected_size);
177 else
178 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
179
180 align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size);
181 kvm_cma_declare_contiguous(selected_size, align_size);
230 } 182 }
231
232 kvm_linear_init_one(kvm_rma_size, kvm_rma_count, KVM_LINEAR_RMA);
233} 183}
diff --git a/arch/powerpc/kvm/book3s_hv_cma.c b/arch/powerpc/kvm/book3s_hv_cma.c
new file mode 100644
index 000000000000..d9d3d8553d51
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_cma.c
@@ -0,0 +1,240 @@
1/*
2 * Contiguous Memory Allocator for ppc KVM hash pagetable based on CMA
3 * for DMA mapping framework
4 *
5 * Copyright IBM Corporation, 2013
6 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License or (at your optional) any later version of the license.
12 *
13 */
14#define pr_fmt(fmt) "kvm_cma: " fmt
15
16#ifdef CONFIG_CMA_DEBUG
17#ifndef DEBUG
18# define DEBUG
19#endif
20#endif
21
22#include <linux/memblock.h>
23#include <linux/mutex.h>
24#include <linux/sizes.h>
25#include <linux/slab.h>
26
27#include "book3s_hv_cma.h"
28
29struct kvm_cma {
30 unsigned long base_pfn;
31 unsigned long count;
32 unsigned long *bitmap;
33};
34
35static DEFINE_MUTEX(kvm_cma_mutex);
36static struct kvm_cma kvm_cma_area;
37
38/**
39 * kvm_cma_declare_contiguous() - reserve area for contiguous memory handling
40 * for kvm hash pagetable
41 * @size: Size of the reserved memory.
42 * @alignment: Alignment for the contiguous memory area
43 *
44 * This function reserves memory for kvm cma area. It should be
45 * called by arch code when early allocator (memblock or bootmem)
46 * is still activate.
47 */
48long __init kvm_cma_declare_contiguous(phys_addr_t size, phys_addr_t alignment)
49{
50 long base_pfn;
51 phys_addr_t addr;
52 struct kvm_cma *cma = &kvm_cma_area;
53
54 pr_debug("%s(size %lx)\n", __func__, (unsigned long)size);
55
56 if (!size)
57 return -EINVAL;
58 /*
59 * Sanitise input arguments.
60 * We should be pageblock aligned for CMA.
61 */
62 alignment = max(alignment, (phys_addr_t)(PAGE_SIZE << pageblock_order));
63 size = ALIGN(size, alignment);
64 /*
65 * Reserve memory
66 * Use __memblock_alloc_base() since
67 * memblock_alloc_base() panic()s.
68 */
69 addr = __memblock_alloc_base(size, alignment, 0);
70 if (!addr) {
71 base_pfn = -ENOMEM;
72 goto err;
73 } else
74 base_pfn = PFN_DOWN(addr);
75
76 /*
77 * Each reserved area must be initialised later, when more kernel
78 * subsystems (like slab allocator) are available.
79 */
80 cma->base_pfn = base_pfn;
81 cma->count = size >> PAGE_SHIFT;
82 pr_info("CMA: reserved %ld MiB\n", (unsigned long)size / SZ_1M);
83 return 0;
84err:
85 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
86 return base_pfn;
87}
88
89/**
90 * kvm_alloc_cma() - allocate pages from contiguous area
91 * @nr_pages: Requested number of pages.
92 * @align_pages: Requested alignment in number of pages
93 *
94 * This function allocates memory buffer for hash pagetable.
95 */
96struct page *kvm_alloc_cma(unsigned long nr_pages, unsigned long align_pages)
97{
98 int ret;
99 struct page *page = NULL;
100 struct kvm_cma *cma = &kvm_cma_area;
101 unsigned long chunk_count, nr_chunk;
102 unsigned long mask, pfn, pageno, start = 0;
103
104
105 if (!cma || !cma->count)
106 return NULL;
107
108 pr_debug("%s(cma %p, count %lu, align pages %lu)\n", __func__,
109 (void *)cma, nr_pages, align_pages);
110
111 if (!nr_pages)
112 return NULL;
113 /*
114 * align mask with chunk size. The bit tracks pages in chunk size
115 */
116 VM_BUG_ON(!is_power_of_2(align_pages));
117 mask = (align_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT)) - 1;
118 BUILD_BUG_ON(PAGE_SHIFT > KVM_CMA_CHUNK_ORDER);
119
120 chunk_count = cma->count >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
121 nr_chunk = nr_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
122
123 mutex_lock(&kvm_cma_mutex);
124 for (;;) {
125 pageno = bitmap_find_next_zero_area(cma->bitmap, chunk_count,
126 start, nr_chunk, mask);
127 if (pageno >= chunk_count)
128 break;
129
130 pfn = cma->base_pfn + (pageno << (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT));
131 ret = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_CMA);
132 if (ret == 0) {
133 bitmap_set(cma->bitmap, pageno, nr_chunk);
134 page = pfn_to_page(pfn);
135 memset(pfn_to_kaddr(pfn), 0, nr_pages << PAGE_SHIFT);
136 break;
137 } else if (ret != -EBUSY) {
138 break;
139 }
140 pr_debug("%s(): memory range at %p is busy, retrying\n",
141 __func__, pfn_to_page(pfn));
142 /* try again with a bit different memory target */
143 start = pageno + mask + 1;
144 }
145 mutex_unlock(&kvm_cma_mutex);
146 pr_debug("%s(): returned %p\n", __func__, page);
147 return page;
148}
149
150/**
151 * kvm_release_cma() - release allocated pages for hash pagetable
152 * @pages: Allocated pages.
153 * @nr_pages: Number of allocated pages.
154 *
155 * This function releases memory allocated by kvm_alloc_cma().
156 * It returns false when provided pages do not belong to contiguous area and
157 * true otherwise.
158 */
159bool kvm_release_cma(struct page *pages, unsigned long nr_pages)
160{
161 unsigned long pfn;
162 unsigned long nr_chunk;
163 struct kvm_cma *cma = &kvm_cma_area;
164
165 if (!cma || !pages)
166 return false;
167
168 pr_debug("%s(page %p count %lu)\n", __func__, (void *)pages, nr_pages);
169
170 pfn = page_to_pfn(pages);
171
172 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
173 return false;
174
175 VM_BUG_ON(pfn + nr_pages > cma->base_pfn + cma->count);
176 nr_chunk = nr_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
177
178 mutex_lock(&kvm_cma_mutex);
179 bitmap_clear(cma->bitmap,
180 (pfn - cma->base_pfn) >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT),
181 nr_chunk);
182 free_contig_range(pfn, nr_pages);
183 mutex_unlock(&kvm_cma_mutex);
184
185 return true;
186}
187
188static int __init kvm_cma_activate_area(unsigned long base_pfn,
189 unsigned long count)
190{
191 unsigned long pfn = base_pfn;
192 unsigned i = count >> pageblock_order;
193 struct zone *zone;
194
195 WARN_ON_ONCE(!pfn_valid(pfn));
196 zone = page_zone(pfn_to_page(pfn));
197 do {
198 unsigned j;
199 base_pfn = pfn;
200 for (j = pageblock_nr_pages; j; --j, pfn++) {
201 WARN_ON_ONCE(!pfn_valid(pfn));
202 /*
203 * alloc_contig_range requires the pfn range
204 * specified to be in the same zone. Make this
205 * simple by forcing the entire CMA resv range
206 * to be in the same zone.
207 */
208 if (page_zone(pfn_to_page(pfn)) != zone)
209 return -EINVAL;
210 }
211 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
212 } while (--i);
213 return 0;
214}
215
216static int __init kvm_cma_init_reserved_areas(void)
217{
218 int bitmap_size, ret;
219 unsigned long chunk_count;
220 struct kvm_cma *cma = &kvm_cma_area;
221
222 pr_debug("%s()\n", __func__);
223 if (!cma->count)
224 return 0;
225 chunk_count = cma->count >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
226 bitmap_size = BITS_TO_LONGS(chunk_count) * sizeof(long);
227 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
228 if (!cma->bitmap)
229 return -ENOMEM;
230
231 ret = kvm_cma_activate_area(cma->base_pfn, cma->count);
232 if (ret)
233 goto error;
234 return 0;
235
236error:
237 kfree(cma->bitmap);
238 return ret;
239}
240core_initcall(kvm_cma_init_reserved_areas);
diff --git a/arch/powerpc/kvm/book3s_hv_cma.h b/arch/powerpc/kvm/book3s_hv_cma.h
new file mode 100644
index 000000000000..655144f75fa5
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_cma.h
@@ -0,0 +1,27 @@
1/*
2 * Contiguous Memory Allocator for ppc KVM hash pagetable based on CMA
3 * for DMA mapping framework
4 *
5 * Copyright IBM Corporation, 2013
6 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License or (at your optional) any later version of the license.
12 *
13 */
14
15#ifndef __POWERPC_KVM_CMA_ALLOC_H__
16#define __POWERPC_KVM_CMA_ALLOC_H__
17/*
18 * Both RMA and Hash page allocation will be multiple of 256K.
19 */
20#define KVM_CMA_CHUNK_ORDER 18
21
22extern struct page *kvm_alloc_cma(unsigned long nr_pages,
23 unsigned long align_pages);
24extern bool kvm_release_cma(struct page *pages, unsigned long nr_pages);
25extern long kvm_cma_declare_contiguous(phys_addr_t size,
26 phys_addr_t alignment) __init;
27#endif
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index c3785d4aeed7..9c515440ad1a 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -387,6 +387,80 @@ static inline int try_lock_tlbie(unsigned int *lock)
387 return old == 0; 387 return old == 0;
388} 388}
389 389
390/*
391 * tlbie/tlbiel is a bit different on the PPC970 compared to later
392 * processors such as POWER7; the large page bit is in the instruction
393 * not RB, and the top 16 bits and the bottom 12 bits of the VA
394 * in RB must be 0.
395 */
396static void do_tlbies_970(struct kvm *kvm, unsigned long *rbvalues,
397 long npages, int global, bool need_sync)
398{
399 long i;
400
401 if (global) {
402 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
403 cpu_relax();
404 if (need_sync)
405 asm volatile("ptesync" : : : "memory");
406 for (i = 0; i < npages; ++i) {
407 unsigned long rb = rbvalues[i];
408
409 if (rb & 1) /* large page */
410 asm volatile("tlbie %0,1" : :
411 "r" (rb & 0x0000fffffffff000ul));
412 else
413 asm volatile("tlbie %0,0" : :
414 "r" (rb & 0x0000fffffffff000ul));
415 }
416 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
417 kvm->arch.tlbie_lock = 0;
418 } else {
419 if (need_sync)
420 asm volatile("ptesync" : : : "memory");
421 for (i = 0; i < npages; ++i) {
422 unsigned long rb = rbvalues[i];
423
424 if (rb & 1) /* large page */
425 asm volatile("tlbiel %0,1" : :
426 "r" (rb & 0x0000fffffffff000ul));
427 else
428 asm volatile("tlbiel %0,0" : :
429 "r" (rb & 0x0000fffffffff000ul));
430 }
431 asm volatile("ptesync" : : : "memory");
432 }
433}
434
435static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
436 long npages, int global, bool need_sync)
437{
438 long i;
439
440 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
441 /* PPC970 tlbie instruction is a bit different */
442 do_tlbies_970(kvm, rbvalues, npages, global, need_sync);
443 return;
444 }
445 if (global) {
446 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
447 cpu_relax();
448 if (need_sync)
449 asm volatile("ptesync" : : : "memory");
450 for (i = 0; i < npages; ++i)
451 asm volatile(PPC_TLBIE(%1,%0) : :
452 "r" (rbvalues[i]), "r" (kvm->arch.lpid));
453 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
454 kvm->arch.tlbie_lock = 0;
455 } else {
456 if (need_sync)
457 asm volatile("ptesync" : : : "memory");
458 for (i = 0; i < npages; ++i)
459 asm volatile("tlbiel %0" : : "r" (rbvalues[i]));
460 asm volatile("ptesync" : : : "memory");
461 }
462}
463
390long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags, 464long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
391 unsigned long pte_index, unsigned long avpn, 465 unsigned long pte_index, unsigned long avpn,
392 unsigned long *hpret) 466 unsigned long *hpret)
@@ -412,19 +486,7 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
412 if (v & HPTE_V_VALID) { 486 if (v & HPTE_V_VALID) {
413 hpte[0] &= ~HPTE_V_VALID; 487 hpte[0] &= ~HPTE_V_VALID;
414 rb = compute_tlbie_rb(v, hpte[1], pte_index); 488 rb = compute_tlbie_rb(v, hpte[1], pte_index);
415 if (global_invalidates(kvm, flags)) { 489 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
416 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
417 cpu_relax();
418 asm volatile("ptesync" : : : "memory");
419 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
420 : : "r" (rb), "r" (kvm->arch.lpid));
421 asm volatile("ptesync" : : : "memory");
422 kvm->arch.tlbie_lock = 0;
423 } else {
424 asm volatile("ptesync" : : : "memory");
425 asm volatile("tlbiel %0" : : "r" (rb));
426 asm volatile("ptesync" : : : "memory");
427 }
428 /* Read PTE low word after tlbie to get final R/C values */ 490 /* Read PTE low word after tlbie to get final R/C values */
429 remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]); 491 remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]);
430 } 492 }
@@ -452,12 +514,11 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
452 unsigned long *hp, *hptes[4], tlbrb[4]; 514 unsigned long *hp, *hptes[4], tlbrb[4];
453 long int i, j, k, n, found, indexes[4]; 515 long int i, j, k, n, found, indexes[4];
454 unsigned long flags, req, pte_index, rcbits; 516 unsigned long flags, req, pte_index, rcbits;
455 long int local = 0; 517 int global;
456 long int ret = H_SUCCESS; 518 long int ret = H_SUCCESS;
457 struct revmap_entry *rev, *revs[4]; 519 struct revmap_entry *rev, *revs[4];
458 520
459 if (atomic_read(&kvm->online_vcpus) == 1) 521 global = global_invalidates(kvm, 0);
460 local = 1;
461 for (i = 0; i < 4 && ret == H_SUCCESS; ) { 522 for (i = 0; i < 4 && ret == H_SUCCESS; ) {
462 n = 0; 523 n = 0;
463 for (; i < 4; ++i) { 524 for (; i < 4; ++i) {
@@ -533,22 +594,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
533 break; 594 break;
534 595
535 /* Now that we've collected a batch, do the tlbies */ 596 /* Now that we've collected a batch, do the tlbies */
536 if (!local) { 597 do_tlbies(kvm, tlbrb, n, global, true);
537 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
538 cpu_relax();
539 asm volatile("ptesync" : : : "memory");
540 for (k = 0; k < n; ++k)
541 asm volatile(PPC_TLBIE(%1,%0) : :
542 "r" (tlbrb[k]),
543 "r" (kvm->arch.lpid));
544 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
545 kvm->arch.tlbie_lock = 0;
546 } else {
547 asm volatile("ptesync" : : : "memory");
548 for (k = 0; k < n; ++k)
549 asm volatile("tlbiel %0" : : "r" (tlbrb[k]));
550 asm volatile("ptesync" : : : "memory");
551 }
552 598
553 /* Read PTE low words after tlbie to get final R/C values */ 599 /* Read PTE low words after tlbie to get final R/C values */
554 for (k = 0; k < n; ++k) { 600 for (k = 0; k < n; ++k) {
@@ -607,19 +653,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
607 if (v & HPTE_V_VALID) { 653 if (v & HPTE_V_VALID) {
608 rb = compute_tlbie_rb(v, r, pte_index); 654 rb = compute_tlbie_rb(v, r, pte_index);
609 hpte[0] = v & ~HPTE_V_VALID; 655 hpte[0] = v & ~HPTE_V_VALID;
610 if (global_invalidates(kvm, flags)) { 656 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
611 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
612 cpu_relax();
613 asm volatile("ptesync" : : : "memory");
614 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
615 : : "r" (rb), "r" (kvm->arch.lpid));
616 asm volatile("ptesync" : : : "memory");
617 kvm->arch.tlbie_lock = 0;
618 } else {
619 asm volatile("ptesync" : : : "memory");
620 asm volatile("tlbiel %0" : : "r" (rb));
621 asm volatile("ptesync" : : : "memory");
622 }
623 /* 657 /*
624 * If the host has this page as readonly but the guest 658 * If the host has this page as readonly but the guest
625 * wants to make it read/write, reduce the permissions. 659 * wants to make it read/write, reduce the permissions.
@@ -690,13 +724,7 @@ void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
690 724
691 hptep[0] &= ~HPTE_V_VALID; 725 hptep[0] &= ~HPTE_V_VALID;
692 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index); 726 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
693 while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) 727 do_tlbies(kvm, &rb, 1, 1, true);
694 cpu_relax();
695 asm volatile("ptesync" : : : "memory");
696 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
697 : : "r" (rb), "r" (kvm->arch.lpid));
698 asm volatile("ptesync" : : : "memory");
699 kvm->arch.tlbie_lock = 0;
700} 728}
701EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte); 729EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
702 730
@@ -710,12 +738,7 @@ void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
710 rbyte = (hptep[1] & ~HPTE_R_R) >> 8; 738 rbyte = (hptep[1] & ~HPTE_R_R) >> 8;
711 /* modify only the second-last byte, which contains the ref bit */ 739 /* modify only the second-last byte, which contains the ref bit */
712 *((char *)hptep + 14) = rbyte; 740 *((char *)hptep + 14) = rbyte;
713 while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) 741 do_tlbies(kvm, &rb, 1, 1, false);
714 cpu_relax();
715 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
716 : : "r" (rb), "r" (kvm->arch.lpid));
717 asm volatile("ptesync" : : : "memory");
718 kvm->arch.tlbie_lock = 0;
719} 742}
720EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte); 743EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
721 744
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index b93e3cd8bf2b..294b7af28cdd 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1393,7 +1393,7 @@ hcall_try_real_mode:
1393 cmpldi r3,hcall_real_table_end - hcall_real_table 1393 cmpldi r3,hcall_real_table_end - hcall_real_table
1394 bge guest_exit_cont 1394 bge guest_exit_cont
1395 LOAD_REG_ADDR(r4, hcall_real_table) 1395 LOAD_REG_ADDR(r4, hcall_real_table)
1396 lwzx r3,r3,r4 1396 lwax r3,r3,r4
1397 cmpwi r3,0 1397 cmpwi r3,0
1398 beq guest_exit_cont 1398 beq guest_exit_cont
1399 add r3,r3,r4 1399 add r3,r3,r4
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 48cbbf862958..17cfae5497a3 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -92,6 +92,11 @@ kvm_start_lightweight:
92 PPC_LL r3, VCPU_HFLAGS(r4) 92 PPC_LL r3, VCPU_HFLAGS(r4)
93 rldicl r3, r3, 0, 63 /* r3 &= 1 */ 93 rldicl r3, r3, 0, 63 /* r3 &= 1 */
94 stb r3, HSTATE_RESTORE_HID5(r13) 94 stb r3, HSTATE_RESTORE_HID5(r13)
95
96 /* Load up guest SPRG3 value, since it's user readable */
97 ld r3, VCPU_SHARED(r4)
98 ld r3, VCPU_SHARED_SPRG3(r3)
99 mtspr SPRN_SPRG3, r3
95#endif /* CONFIG_PPC_BOOK3S_64 */ 100#endif /* CONFIG_PPC_BOOK3S_64 */
96 101
97 PPC_LL r4, VCPU_SHADOW_MSR(r4) /* get shadow_msr */ 102 PPC_LL r4, VCPU_SHADOW_MSR(r4) /* get shadow_msr */
@@ -123,6 +128,15 @@ kvmppc_handler_highmem:
123 /* R7 = vcpu */ 128 /* R7 = vcpu */
124 PPC_LL r7, GPR4(r1) 129 PPC_LL r7, GPR4(r1)
125 130
131#ifdef CONFIG_PPC_BOOK3S_64
132 /*
133 * Reload kernel SPRG3 value.
134 * No need to save guest value as usermode can't modify SPRG3.
135 */
136 ld r3, PACA_SPRG3(r13)
137 mtspr SPRN_SPRG3, r3
138#endif /* CONFIG_PPC_BOOK3S_64 */
139
126 PPC_STL r14, VCPU_GPR(R14)(r7) 140 PPC_STL r14, VCPU_GPR(R14)(r7)
127 PPC_STL r15, VCPU_GPR(R15)(r7) 141 PPC_STL r15, VCPU_GPR(R15)(r7)
128 PPC_STL r16, VCPU_GPR(R16)(r7) 142 PPC_STL r16, VCPU_GPR(R16)(r7)
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index c6e13d9a9e15..27db1e665959 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -468,7 +468,8 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
468 * both the traditional FP registers and the added VSX 468 * both the traditional FP registers and the added VSX
469 * registers into thread.fpr[]. 469 * registers into thread.fpr[].
470 */ 470 */
471 giveup_fpu(current); 471 if (current->thread.regs->msr & MSR_FP)
472 giveup_fpu(current);
472 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) 473 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
473 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)]; 474 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
474 475
@@ -483,7 +484,8 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
483 484
484#ifdef CONFIG_ALTIVEC 485#ifdef CONFIG_ALTIVEC
485 if (msr & MSR_VEC) { 486 if (msr & MSR_VEC) {
486 giveup_altivec(current); 487 if (current->thread.regs->msr & MSR_VEC)
488 giveup_altivec(current);
487 memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr)); 489 memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
488 vcpu->arch.vscr = t->vscr; 490 vcpu->arch.vscr = t->vscr;
489 } 491 }
@@ -575,8 +577,6 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
575 printk(KERN_INFO "Loading up ext 0x%lx\n", msr); 577 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
576#endif 578#endif
577 579
578 current->thread.regs->msr |= msr;
579
580 if (msr & MSR_FP) { 580 if (msr & MSR_FP) {
581 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) 581 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
582 thread_fpr[get_fpr_index(i)] = vcpu_fpr[i]; 582 thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
@@ -598,12 +598,32 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
598#endif 598#endif
599 } 599 }
600 600
601 current->thread.regs->msr |= msr;
601 vcpu->arch.guest_owned_ext |= msr; 602 vcpu->arch.guest_owned_ext |= msr;
602 kvmppc_recalc_shadow_msr(vcpu); 603 kvmppc_recalc_shadow_msr(vcpu);
603 604
604 return RESUME_GUEST; 605 return RESUME_GUEST;
605} 606}
606 607
608/*
609 * Kernel code using FP or VMX could have flushed guest state to
610 * the thread_struct; if so, get it back now.
611 */
612static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
613{
614 unsigned long lost_ext;
615
616 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
617 if (!lost_ext)
618 return;
619
620 if (lost_ext & MSR_FP)
621 kvmppc_load_up_fpu();
622 if (lost_ext & MSR_VEC)
623 kvmppc_load_up_altivec();
624 current->thread.regs->msr |= lost_ext;
625}
626
607int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, 627int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
608 unsigned int exit_nr) 628 unsigned int exit_nr)
609{ 629{
@@ -772,7 +792,7 @@ program_interrupt:
772 } 792 }
773 case BOOK3S_INTERRUPT_SYSCALL: 793 case BOOK3S_INTERRUPT_SYSCALL:
774 if (vcpu->arch.papr_enabled && 794 if (vcpu->arch.papr_enabled &&
775 (kvmppc_get_last_inst(vcpu) == 0x44000022) && 795 (kvmppc_get_last_sc(vcpu) == 0x44000022) &&
776 !(vcpu->arch.shared->msr & MSR_PR)) { 796 !(vcpu->arch.shared->msr & MSR_PR)) {
777 /* SC 1 papr hypercalls */ 797 /* SC 1 papr hypercalls */
778 ulong cmd = kvmppc_get_gpr(vcpu, 3); 798 ulong cmd = kvmppc_get_gpr(vcpu, 3);
@@ -890,8 +910,9 @@ program_interrupt:
890 local_irq_enable(); 910 local_irq_enable();
891 r = s; 911 r = s;
892 } else { 912 } else {
893 kvmppc_lazy_ee_enable(); 913 kvmppc_fix_ee_before_entry();
894 } 914 }
915 kvmppc_handle_lost_ext(vcpu);
895 } 916 }
896 917
897 trace_kvm_book3s_reenter(r, vcpu); 918 trace_kvm_book3s_reenter(r, vcpu);
@@ -1162,7 +1183,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1162 if (vcpu->arch.shared->msr & MSR_FP) 1183 if (vcpu->arch.shared->msr & MSR_FP)
1163 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 1184 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1164 1185
1165 kvmppc_lazy_ee_enable(); 1186 kvmppc_fix_ee_before_entry();
1166 1187
1167 ret = __kvmppc_vcpu_run(kvm_run, vcpu); 1188 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1168 1189
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index 94c1dd46b83d..a3a5cb8ee7ea 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -19,6 +19,7 @@
19#include <asm/hvcall.h> 19#include <asm/hvcall.h>
20#include <asm/xics.h> 20#include <asm/xics.h>
21#include <asm/debug.h> 21#include <asm/debug.h>
22#include <asm/time.h>
22 23
23#include <linux/debugfs.h> 24#include <linux/debugfs.h>
24#include <linux/seq_file.h> 25#include <linux/seq_file.h>
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index dcc94f016007..17722d82f1d1 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -674,8 +674,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
674 goto out; 674 goto out;
675 } 675 }
676 676
677 kvm_guest_enter();
678
679#ifdef CONFIG_PPC_FPU 677#ifdef CONFIG_PPC_FPU
680 /* Save userspace FPU state in stack */ 678 /* Save userspace FPU state in stack */
681 enable_kernel_fp(); 679 enable_kernel_fp();
@@ -698,7 +696,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
698 kvmppc_load_guest_fp(vcpu); 696 kvmppc_load_guest_fp(vcpu);
699#endif 697#endif
700 698
701 kvmppc_lazy_ee_enable(); 699 kvmppc_fix_ee_before_entry();
702 700
703 ret = __kvmppc_vcpu_run(kvm_run, vcpu); 701 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
704 702
@@ -1168,7 +1166,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
1168 local_irq_enable(); 1166 local_irq_enable();
1169 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); 1167 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
1170 } else { 1168 } else {
1171 kvmppc_lazy_ee_enable(); 1169 kvmppc_fix_ee_before_entry();
1172 } 1170 }
1173 } 1171 }
1174 1172
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 6316ee336e88..07c0106fab76 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -117,8 +117,6 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
117 kvm_guest_exit(); 117 kvm_guest_exit();
118 continue; 118 continue;
119 } 119 }
120
121 trace_hardirqs_on();
122#endif 120#endif
123 121
124 kvm_guest_enter(); 122 kvm_guest_enter();
@@ -420,6 +418,10 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
420 return kvmppc_core_create_memslot(slot, npages); 418 return kvmppc_core_create_memslot(slot, npages);
421} 419}
422 420
421void kvm_arch_memslots_updated(struct kvm *kvm)
422{
423}
424
423int kvm_arch_prepare_memory_region(struct kvm *kvm, 425int kvm_arch_prepare_memory_region(struct kvm *kvm,
424 struct kvm_memory_slot *memslot, 426 struct kvm_memory_slot *memslot,
425 struct kvm_userspace_memory_region *mem, 427 struct kvm_userspace_memory_region *mem,
@@ -823,39 +825,39 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
823#endif 825#endif
824#ifdef CONFIG_KVM_MPIC 826#ifdef CONFIG_KVM_MPIC
825 case KVM_CAP_IRQ_MPIC: { 827 case KVM_CAP_IRQ_MPIC: {
826 struct file *filp; 828 struct fd f;
827 struct kvm_device *dev; 829 struct kvm_device *dev;
828 830
829 r = -EBADF; 831 r = -EBADF;
830 filp = fget(cap->args[0]); 832 f = fdget(cap->args[0]);
831 if (!filp) 833 if (!f.file)
832 break; 834 break;
833 835
834 r = -EPERM; 836 r = -EPERM;
835 dev = kvm_device_from_filp(filp); 837 dev = kvm_device_from_filp(f.file);
836 if (dev) 838 if (dev)
837 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); 839 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
838 840
839 fput(filp); 841 fdput(f);
840 break; 842 break;
841 } 843 }
842#endif 844#endif
843#ifdef CONFIG_KVM_XICS 845#ifdef CONFIG_KVM_XICS
844 case KVM_CAP_IRQ_XICS: { 846 case KVM_CAP_IRQ_XICS: {
845 struct file *filp; 847 struct fd f;
846 struct kvm_device *dev; 848 struct kvm_device *dev;
847 849
848 r = -EBADF; 850 r = -EBADF;
849 filp = fget(cap->args[0]); 851 f = fdget(cap->args[0]);
850 if (!filp) 852 if (!f.file)
851 break; 853 break;
852 854
853 r = -EPERM; 855 r = -EPERM;
854 dev = kvm_device_from_filp(filp); 856 dev = kvm_device_from_filp(f.file);
855 if (dev) 857 if (dev)
856 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); 858 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
857 859
858 fput(filp); 860 fdput(f);
859 break; 861 break;
860 } 862 }
861#endif /* CONFIG_KVM_XICS */ 863#endif /* CONFIG_KVM_XICS */
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 01e2db97a210..d47d3dab4870 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -52,7 +52,7 @@
52#if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL) 52#if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL)
53/* The amount of lowmem must be within 0xF0000000 - KERNELBASE. */ 53/* The amount of lowmem must be within 0xF0000000 - KERNELBASE. */
54#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - PAGE_OFFSET)) 54#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - PAGE_OFFSET))
55#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL" 55#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_KERNEL_START"
56#endif 56#endif
57#endif 57#endif
58#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE 58#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c
index 4f51025f5b00..c77348c5d463 100644
--- a/arch/powerpc/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -119,7 +119,7 @@ static void op_powerpc_stop(void)
119 model->global_stop(); 119 model->global_stop();
120} 120}
121 121
122static int op_powerpc_create_files(struct super_block *sb, struct dentry *root) 122static int op_powerpc_create_files(struct dentry *root)
123{ 123{
124 int i; 124 int i;
125 125
@@ -128,9 +128,9 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
128 * There is one mmcr0, mmcr1 and mmcra for setting the events for 128 * There is one mmcr0, mmcr1 and mmcra for setting the events for
129 * all of the counters. 129 * all of the counters.
130 */ 130 */
131 oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0); 131 oprofilefs_create_ulong(root, "mmcr0", &sys.mmcr0);
132 oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1); 132 oprofilefs_create_ulong(root, "mmcr1", &sys.mmcr1);
133 oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra); 133 oprofilefs_create_ulong(root, "mmcra", &sys.mmcra);
134#ifdef CONFIG_OPROFILE_CELL 134#ifdef CONFIG_OPROFILE_CELL
135 /* create a file the user tool can check to see what level of profiling 135 /* create a file the user tool can check to see what level of profiling
136 * support exits with this kernel. Initialize bit mask to indicate 136 * support exits with this kernel. Initialize bit mask to indicate
@@ -142,7 +142,7 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
142 * If the file does not exist, then the kernel only supports SPU 142 * If the file does not exist, then the kernel only supports SPU
143 * cycle profiling, PPU event and cycle profiling. 143 * cycle profiling, PPU event and cycle profiling.
144 */ 144 */
145 oprofilefs_create_ulong(sb, root, "cell_support", &sys.cell_support); 145 oprofilefs_create_ulong(root, "cell_support", &sys.cell_support);
146 sys.cell_support = 0x1; /* Note, the user OProfile tool must check 146 sys.cell_support = 0x1; /* Note, the user OProfile tool must check
147 * that this bit is set before attempting to 147 * that this bit is set before attempting to
148 * user SPU event profiling. Older kernels 148 * user SPU event profiling. Older kernels
@@ -160,11 +160,11 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
160 char buf[4]; 160 char buf[4];
161 161
162 snprintf(buf, sizeof buf, "%d", i); 162 snprintf(buf, sizeof buf, "%d", i);
163 dir = oprofilefs_mkdir(sb, root, buf); 163 dir = oprofilefs_mkdir(root, buf);
164 164
165 oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled); 165 oprofilefs_create_ulong(dir, "enabled", &ctr[i].enabled);
166 oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event); 166 oprofilefs_create_ulong(dir, "event", &ctr[i].event);
167 oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count); 167 oprofilefs_create_ulong(dir, "count", &ctr[i].count);
168 168
169 /* 169 /*
170 * Classic PowerPC doesn't support per-counter 170 * Classic PowerPC doesn't support per-counter
@@ -173,14 +173,14 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
173 * Book-E style performance monitors, we do 173 * Book-E style performance monitors, we do
174 * support them. 174 * support them.
175 */ 175 */
176 oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel); 176 oprofilefs_create_ulong(dir, "kernel", &ctr[i].kernel);
177 oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user); 177 oprofilefs_create_ulong(dir, "user", &ctr[i].user);
178 178
179 oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask); 179 oprofilefs_create_ulong(dir, "unit_mask", &ctr[i].unit_mask);
180 } 180 }
181 181
182 oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel); 182 oprofilefs_create_ulong(root, "enable_kernel", &sys.enable_kernel);
183 oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user); 183 oprofilefs_create_ulong(root, "enable_user", &sys.enable_user);
184 184
185 /* Default to tracing both kernel and user */ 185 /* Default to tracing both kernel and user */
186 sys.enable_kernel = 1; 186 sys.enable_kernel = 1;
diff --git a/arch/powerpc/perf/power7-events-list.h b/arch/powerpc/perf/power7-events-list.h
new file mode 100644
index 000000000000..687790a2c0b8
--- /dev/null
+++ b/arch/powerpc/perf/power7-events-list.h
@@ -0,0 +1,548 @@
1/*
2 * Performance counter support for POWER7 processors.
3 *
4 * Copyright 2013 Runzhen Wang, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12EVENT(PM_IC_DEMAND_L2_BR_ALL, 0x04898)
13EVENT(PM_GCT_UTIL_7_TO_10_SLOTS, 0x020a0)
14EVENT(PM_PMC2_SAVED, 0x10022)
15EVENT(PM_CMPLU_STALL_DFU, 0x2003c)
16EVENT(PM_VSU0_16FLOP, 0x0a0a4)
17EVENT(PM_MRK_LSU_DERAT_MISS, 0x3d05a)
18EVENT(PM_MRK_ST_CMPL, 0x10034)
19EVENT(PM_NEST_PAIR3_ADD, 0x40881)
20EVENT(PM_L2_ST_DISP, 0x46180)
21EVENT(PM_L2_CASTOUT_MOD, 0x16180)
22EVENT(PM_ISEG, 0x020a4)
23EVENT(PM_MRK_INST_TIMEO, 0x40034)
24EVENT(PM_L2_RCST_DISP_FAIL_ADDR, 0x36282)
25EVENT(PM_LSU1_DC_PREF_STREAM_CONFIRM, 0x0d0b6)
26EVENT(PM_IERAT_WR_64K, 0x040be)
27EVENT(PM_MRK_DTLB_MISS_16M, 0x4d05e)
28EVENT(PM_IERAT_MISS, 0x100f6)
29EVENT(PM_MRK_PTEG_FROM_LMEM, 0x4d052)
30EVENT(PM_FLOP, 0x100f4)
31EVENT(PM_THRD_PRIO_4_5_CYC, 0x040b4)
32EVENT(PM_BR_PRED_TA, 0x040aa)
33EVENT(PM_CMPLU_STALL_FXU, 0x20014)
34EVENT(PM_EXT_INT, 0x200f8)
35EVENT(PM_VSU_FSQRT_FDIV, 0x0a888)
36EVENT(PM_MRK_LD_MISS_EXPOSED_CYC, 0x1003e)
37EVENT(PM_LSU1_LDF, 0x0c086)
38EVENT(PM_IC_WRITE_ALL, 0x0488c)
39EVENT(PM_LSU0_SRQ_STFWD, 0x0c0a0)
40EVENT(PM_PTEG_FROM_RL2L3_MOD, 0x1c052)
41EVENT(PM_MRK_DATA_FROM_L31_SHR, 0x1d04e)
42EVENT(PM_DATA_FROM_L21_MOD, 0x3c046)
43EVENT(PM_VSU1_SCAL_DOUBLE_ISSUED, 0x0b08a)
44EVENT(PM_VSU0_8FLOP, 0x0a0a0)
45EVENT(PM_POWER_EVENT1, 0x1006e)
46EVENT(PM_DISP_CLB_HELD_BAL, 0x02092)
47EVENT(PM_VSU1_2FLOP, 0x0a09a)
48EVENT(PM_LWSYNC_HELD, 0x0209a)
49EVENT(PM_PTEG_FROM_DL2L3_SHR, 0x3c054)
50EVENT(PM_INST_FROM_L21_MOD, 0x34046)
51EVENT(PM_IERAT_XLATE_WR_16MPLUS, 0x040bc)
52EVENT(PM_IC_REQ_ALL, 0x04888)
53EVENT(PM_DSLB_MISS, 0x0d090)
54EVENT(PM_L3_MISS, 0x1f082)
55EVENT(PM_LSU0_L1_PREF, 0x0d0b8)
56EVENT(PM_VSU_SCALAR_SINGLE_ISSUED, 0x0b884)
57EVENT(PM_LSU1_DC_PREF_STREAM_CONFIRM_STRIDE, 0x0d0be)
58EVENT(PM_L2_INST, 0x36080)
59EVENT(PM_VSU0_FRSP, 0x0a0b4)
60EVENT(PM_FLUSH_DISP, 0x02082)
61EVENT(PM_PTEG_FROM_L2MISS, 0x4c058)
62EVENT(PM_VSU1_DQ_ISSUED, 0x0b09a)
63EVENT(PM_CMPLU_STALL_LSU, 0x20012)
64EVENT(PM_MRK_DATA_FROM_DMEM, 0x1d04a)
65EVENT(PM_LSU_FLUSH_ULD, 0x0c8b0)
66EVENT(PM_PTEG_FROM_LMEM, 0x4c052)
67EVENT(PM_MRK_DERAT_MISS_16M, 0x3d05c)
68EVENT(PM_THRD_ALL_RUN_CYC, 0x2000c)
69EVENT(PM_MEM0_PREFETCH_DISP, 0x20083)
70EVENT(PM_MRK_STALL_CMPLU_CYC_COUNT, 0x3003f)
71EVENT(PM_DATA_FROM_DL2L3_MOD, 0x3c04c)
72EVENT(PM_VSU_FRSP, 0x0a8b4)
73EVENT(PM_MRK_DATA_FROM_L21_MOD, 0x3d046)
74EVENT(PM_PMC1_OVERFLOW, 0x20010)
75EVENT(PM_VSU0_SINGLE, 0x0a0a8)
76EVENT(PM_MRK_PTEG_FROM_L3MISS, 0x2d058)
77EVENT(PM_MRK_PTEG_FROM_L31_SHR, 0x2d056)
78EVENT(PM_VSU0_VECTOR_SP_ISSUED, 0x0b090)
79EVENT(PM_VSU1_FEST, 0x0a0ba)
80EVENT(PM_MRK_INST_DISP, 0x20030)
81EVENT(PM_VSU0_COMPLEX_ISSUED, 0x0b096)
82EVENT(PM_LSU1_FLUSH_UST, 0x0c0b6)
83EVENT(PM_INST_CMPL, 0x00002)
84EVENT(PM_FXU_IDLE, 0x1000e)
85EVENT(PM_LSU0_FLUSH_ULD, 0x0c0b0)
86EVENT(PM_MRK_DATA_FROM_DL2L3_MOD, 0x3d04c)
87EVENT(PM_LSU_LMQ_SRQ_EMPTY_ALL_CYC, 0x3001c)
88EVENT(PM_LSU1_REJECT_LMQ_FULL, 0x0c0a6)
89EVENT(PM_INST_PTEG_FROM_L21_MOD, 0x3e056)
90EVENT(PM_INST_FROM_RL2L3_MOD, 0x14042)
91EVENT(PM_SHL_CREATED, 0x05082)
92EVENT(PM_L2_ST_HIT, 0x46182)
93EVENT(PM_DATA_FROM_DMEM, 0x1c04a)
94EVENT(PM_L3_LD_MISS, 0x2f082)
95EVENT(PM_FXU1_BUSY_FXU0_IDLE, 0x4000e)
96EVENT(PM_DISP_CLB_HELD_RES, 0x02094)
97EVENT(PM_L2_SN_SX_I_DONE, 0x36382)
98EVENT(PM_GRP_CMPL, 0x30004)
99EVENT(PM_STCX_CMPL, 0x0c098)
100EVENT(PM_VSU0_2FLOP, 0x0a098)
101EVENT(PM_L3_PREF_MISS, 0x3f082)
102EVENT(PM_LSU_SRQ_SYNC_CYC, 0x0d096)
103EVENT(PM_LSU_REJECT_ERAT_MISS, 0x20064)
104EVENT(PM_L1_ICACHE_MISS, 0x200fc)
105EVENT(PM_LSU1_FLUSH_SRQ, 0x0c0be)
106EVENT(PM_LD_REF_L1_LSU0, 0x0c080)
107EVENT(PM_VSU0_FEST, 0x0a0b8)
108EVENT(PM_VSU_VECTOR_SINGLE_ISSUED, 0x0b890)
109EVENT(PM_FREQ_UP, 0x4000c)
110EVENT(PM_DATA_FROM_LMEM, 0x3c04a)
111EVENT(PM_LSU1_LDX, 0x0c08a)
112EVENT(PM_PMC3_OVERFLOW, 0x40010)
113EVENT(PM_MRK_BR_MPRED, 0x30036)
114EVENT(PM_SHL_MATCH, 0x05086)
115EVENT(PM_MRK_BR_TAKEN, 0x10036)
116EVENT(PM_CMPLU_STALL_BRU, 0x4004e)
117EVENT(PM_ISLB_MISS, 0x0d092)
118EVENT(PM_CYC, 0x0001e)
119EVENT(PM_DISP_HELD_THERMAL, 0x30006)
120EVENT(PM_INST_PTEG_FROM_RL2L3_SHR, 0x2e054)
121EVENT(PM_LSU1_SRQ_STFWD, 0x0c0a2)
122EVENT(PM_GCT_NOSLOT_BR_MPRED, 0x4001a)
123EVENT(PM_1PLUS_PPC_CMPL, 0x100f2)
124EVENT(PM_PTEG_FROM_DMEM, 0x2c052)
125EVENT(PM_VSU_2FLOP, 0x0a898)
126EVENT(PM_GCT_FULL_CYC, 0x04086)
127EVENT(PM_MRK_DATA_FROM_L3_CYC, 0x40020)
128EVENT(PM_LSU_SRQ_S0_ALLOC, 0x0d09d)
129EVENT(PM_MRK_DERAT_MISS_4K, 0x1d05c)
130EVENT(PM_BR_MPRED_TA, 0x040ae)
131EVENT(PM_INST_PTEG_FROM_L2MISS, 0x4e058)
132EVENT(PM_DPU_HELD_POWER, 0x20006)
133EVENT(PM_RUN_INST_CMPL, 0x400fa)
134EVENT(PM_MRK_VSU_FIN, 0x30032)
135EVENT(PM_LSU_SRQ_S0_VALID, 0x0d09c)
136EVENT(PM_GCT_EMPTY_CYC, 0x20008)
137EVENT(PM_IOPS_DISP, 0x30014)
138EVENT(PM_RUN_SPURR, 0x10008)
139EVENT(PM_PTEG_FROM_L21_MOD, 0x3c056)
140EVENT(PM_VSU0_1FLOP, 0x0a080)
141EVENT(PM_SNOOP_TLBIE, 0x0d0b2)
142EVENT(PM_DATA_FROM_L3MISS, 0x2c048)
143EVENT(PM_VSU_SINGLE, 0x0a8a8)
144EVENT(PM_DTLB_MISS_16G, 0x1c05e)
145EVENT(PM_CMPLU_STALL_VECTOR, 0x2001c)
146EVENT(PM_FLUSH, 0x400f8)
147EVENT(PM_L2_LD_HIT, 0x36182)
148EVENT(PM_NEST_PAIR2_AND, 0x30883)
149EVENT(PM_VSU1_1FLOP, 0x0a082)
150EVENT(PM_IC_PREF_REQ, 0x0408a)
151EVENT(PM_L3_LD_HIT, 0x2f080)
152EVENT(PM_GCT_NOSLOT_IC_MISS, 0x2001a)
153EVENT(PM_DISP_HELD, 0x10006)
154EVENT(PM_L2_LD, 0x16080)
155EVENT(PM_LSU_FLUSH_SRQ, 0x0c8bc)
156EVENT(PM_BC_PLUS_8_CONV, 0x040b8)
157EVENT(PM_MRK_DATA_FROM_L31_MOD_CYC, 0x40026)
158EVENT(PM_CMPLU_STALL_VECTOR_LONG, 0x4004a)
159EVENT(PM_L2_RCST_BUSY_RC_FULL, 0x26282)
160EVENT(PM_TB_BIT_TRANS, 0x300f8)
161EVENT(PM_THERMAL_MAX, 0x40006)
162EVENT(PM_LSU1_FLUSH_ULD, 0x0c0b2)
163EVENT(PM_LSU1_REJECT_LHS, 0x0c0ae)
164EVENT(PM_LSU_LRQ_S0_ALLOC, 0x0d09f)
165EVENT(PM_L3_CO_L31, 0x4f080)
166EVENT(PM_POWER_EVENT4, 0x4006e)
167EVENT(PM_DATA_FROM_L31_SHR, 0x1c04e)
168EVENT(PM_BR_UNCOND, 0x0409e)
169EVENT(PM_LSU1_DC_PREF_STREAM_ALLOC, 0x0d0aa)
170EVENT(PM_PMC4_REWIND, 0x10020)
171EVENT(PM_L2_RCLD_DISP, 0x16280)
172EVENT(PM_THRD_PRIO_2_3_CYC, 0x040b2)
173EVENT(PM_MRK_PTEG_FROM_L2MISS, 0x4d058)
174EVENT(PM_IC_DEMAND_L2_BHT_REDIRECT, 0x04098)
175EVENT(PM_LSU_DERAT_MISS, 0x200f6)
176EVENT(PM_IC_PREF_CANCEL_L2, 0x04094)
177EVENT(PM_MRK_FIN_STALL_CYC_COUNT, 0x1003d)
178EVENT(PM_BR_PRED_CCACHE, 0x040a0)
179EVENT(PM_GCT_UTIL_1_TO_2_SLOTS, 0x0209c)
180EVENT(PM_MRK_ST_CMPL_INT, 0x30034)
181EVENT(PM_LSU_TWO_TABLEWALK_CYC, 0x0d0a6)
182EVENT(PM_MRK_DATA_FROM_L3MISS, 0x2d048)
183EVENT(PM_GCT_NOSLOT_CYC, 0x100f8)
184EVENT(PM_LSU_SET_MPRED, 0x0c0a8)
185EVENT(PM_FLUSH_DISP_TLBIE, 0x0208a)
186EVENT(PM_VSU1_FCONV, 0x0a0b2)
187EVENT(PM_DERAT_MISS_16G, 0x4c05c)
188EVENT(PM_INST_FROM_LMEM, 0x3404a)
189EVENT(PM_IC_DEMAND_L2_BR_REDIRECT, 0x0409a)
190EVENT(PM_CMPLU_STALL_SCALAR_LONG, 0x20018)
191EVENT(PM_INST_PTEG_FROM_L2, 0x1e050)
192EVENT(PM_PTEG_FROM_L2, 0x1c050)
193EVENT(PM_MRK_DATA_FROM_L21_SHR_CYC, 0x20024)
194EVENT(PM_MRK_DTLB_MISS_4K, 0x2d05a)
195EVENT(PM_VSU0_FPSCR, 0x0b09c)
196EVENT(PM_VSU1_VECT_DOUBLE_ISSUED, 0x0b082)
197EVENT(PM_MRK_PTEG_FROM_RL2L3_MOD, 0x1d052)
198EVENT(PM_MEM0_RQ_DISP, 0x10083)
199EVENT(PM_L2_LD_MISS, 0x26080)
200EVENT(PM_VMX_RESULT_SAT_1, 0x0b0a0)
201EVENT(PM_L1_PREF, 0x0d8b8)
202EVENT(PM_MRK_DATA_FROM_LMEM_CYC, 0x2002c)
203EVENT(PM_GRP_IC_MISS_NONSPEC, 0x1000c)
204EVENT(PM_PB_NODE_PUMP, 0x10081)
205EVENT(PM_SHL_MERGED, 0x05084)
206EVENT(PM_NEST_PAIR1_ADD, 0x20881)
207EVENT(PM_DATA_FROM_L3, 0x1c048)
208EVENT(PM_LSU_FLUSH, 0x0208e)
209EVENT(PM_LSU_SRQ_SYNC_COUNT, 0x0d097)
210EVENT(PM_PMC2_OVERFLOW, 0x30010)
211EVENT(PM_LSU_LDF, 0x0c884)
212EVENT(PM_POWER_EVENT3, 0x3006e)
213EVENT(PM_DISP_WT, 0x30008)
214EVENT(PM_CMPLU_STALL_REJECT, 0x40016)
215EVENT(PM_IC_BANK_CONFLICT, 0x04082)
216EVENT(PM_BR_MPRED_CR_TA, 0x048ae)
217EVENT(PM_L2_INST_MISS, 0x36082)
218EVENT(PM_CMPLU_STALL_ERAT_MISS, 0x40018)
219EVENT(PM_NEST_PAIR2_ADD, 0x30881)
220EVENT(PM_MRK_LSU_FLUSH, 0x0d08c)
221EVENT(PM_L2_LDST, 0x16880)
222EVENT(PM_INST_FROM_L31_SHR, 0x1404e)
223EVENT(PM_VSU0_FIN, 0x0a0bc)
224EVENT(PM_LARX_LSU, 0x0c894)
225EVENT(PM_INST_FROM_RMEM, 0x34042)
226EVENT(PM_DISP_CLB_HELD_TLBIE, 0x02096)
227EVENT(PM_MRK_DATA_FROM_DMEM_CYC, 0x2002e)
228EVENT(PM_BR_PRED_CR, 0x040a8)
229EVENT(PM_LSU_REJECT, 0x10064)
230EVENT(PM_GCT_UTIL_3_TO_6_SLOTS, 0x0209e)
231EVENT(PM_CMPLU_STALL_END_GCT_NOSLOT, 0x10028)
232EVENT(PM_LSU0_REJECT_LMQ_FULL, 0x0c0a4)
233EVENT(PM_VSU_FEST, 0x0a8b8)
234EVENT(PM_NEST_PAIR0_AND, 0x10883)
235EVENT(PM_PTEG_FROM_L3, 0x2c050)
236EVENT(PM_POWER_EVENT2, 0x2006e)
237EVENT(PM_IC_PREF_CANCEL_PAGE, 0x04090)
238EVENT(PM_VSU0_FSQRT_FDIV, 0x0a088)
239EVENT(PM_MRK_GRP_CMPL, 0x40030)
240EVENT(PM_VSU0_SCAL_DOUBLE_ISSUED, 0x0b088)
241EVENT(PM_GRP_DISP, 0x3000a)
242EVENT(PM_LSU0_LDX, 0x0c088)
243EVENT(PM_DATA_FROM_L2, 0x1c040)
244EVENT(PM_MRK_DATA_FROM_RL2L3_MOD, 0x1d042)
245EVENT(PM_LD_REF_L1, 0x0c880)
246EVENT(PM_VSU0_VECT_DOUBLE_ISSUED, 0x0b080)
247EVENT(PM_VSU1_2FLOP_DOUBLE, 0x0a08e)
248EVENT(PM_THRD_PRIO_6_7_CYC, 0x040b6)
249EVENT(PM_BC_PLUS_8_RSLV_TAKEN, 0x040ba)
250EVENT(PM_BR_MPRED_CR, 0x040ac)
251EVENT(PM_L3_CO_MEM, 0x4f082)
252EVENT(PM_LD_MISS_L1, 0x400f0)
253EVENT(PM_DATA_FROM_RL2L3_MOD, 0x1c042)
254EVENT(PM_LSU_SRQ_FULL_CYC, 0x1001a)
255EVENT(PM_TABLEWALK_CYC, 0x10026)
256EVENT(PM_MRK_PTEG_FROM_RMEM, 0x3d052)
257EVENT(PM_LSU_SRQ_STFWD, 0x0c8a0)
258EVENT(PM_INST_PTEG_FROM_RMEM, 0x3e052)
259EVENT(PM_FXU0_FIN, 0x10004)
260EVENT(PM_LSU1_L1_SW_PREF, 0x0c09e)
261EVENT(PM_PTEG_FROM_L31_MOD, 0x1c054)
262EVENT(PM_PMC5_OVERFLOW, 0x10024)
263EVENT(PM_LD_REF_L1_LSU1, 0x0c082)
264EVENT(PM_INST_PTEG_FROM_L21_SHR, 0x4e056)
265EVENT(PM_CMPLU_STALL_THRD, 0x1001c)
266EVENT(PM_DATA_FROM_RMEM, 0x3c042)
267EVENT(PM_VSU0_SCAL_SINGLE_ISSUED, 0x0b084)
268EVENT(PM_BR_MPRED_LSTACK, 0x040a6)
269EVENT(PM_MRK_DATA_FROM_RL2L3_MOD_CYC, 0x40028)
270EVENT(PM_LSU0_FLUSH_UST, 0x0c0b4)
271EVENT(PM_LSU_NCST, 0x0c090)
272EVENT(PM_BR_TAKEN, 0x20004)
273EVENT(PM_INST_PTEG_FROM_LMEM, 0x4e052)
274EVENT(PM_GCT_NOSLOT_BR_MPRED_IC_MISS, 0x4001c)
275EVENT(PM_DTLB_MISS_4K, 0x2c05a)
276EVENT(PM_PMC4_SAVED, 0x30022)
277EVENT(PM_VSU1_PERMUTE_ISSUED, 0x0b092)
278EVENT(PM_SLB_MISS, 0x0d890)
279EVENT(PM_LSU1_FLUSH_LRQ, 0x0c0ba)
280EVENT(PM_DTLB_MISS, 0x300fc)
281EVENT(PM_VSU1_FRSP, 0x0a0b6)
282EVENT(PM_VSU_VECTOR_DOUBLE_ISSUED, 0x0b880)
283EVENT(PM_L2_CASTOUT_SHR, 0x16182)
284EVENT(PM_DATA_FROM_DL2L3_SHR, 0x3c044)
285EVENT(PM_VSU1_STF, 0x0b08e)
286EVENT(PM_ST_FIN, 0x200f0)
287EVENT(PM_PTEG_FROM_L21_SHR, 0x4c056)
288EVENT(PM_L2_LOC_GUESS_WRONG, 0x26480)
289EVENT(PM_MRK_STCX_FAIL, 0x0d08e)
290EVENT(PM_LSU0_REJECT_LHS, 0x0c0ac)
291EVENT(PM_IC_PREF_CANCEL_HIT, 0x04092)
292EVENT(PM_L3_PREF_BUSY, 0x4f080)
293EVENT(PM_MRK_BRU_FIN, 0x2003a)
294EVENT(PM_LSU1_NCLD, 0x0c08e)
295EVENT(PM_INST_PTEG_FROM_L31_MOD, 0x1e054)
296EVENT(PM_LSU_NCLD, 0x0c88c)
297EVENT(PM_LSU_LDX, 0x0c888)
298EVENT(PM_L2_LOC_GUESS_CORRECT, 0x16480)
299EVENT(PM_THRESH_TIMEO, 0x10038)
300EVENT(PM_L3_PREF_ST, 0x0d0ae)
301EVENT(PM_DISP_CLB_HELD_SYNC, 0x02098)
302EVENT(PM_VSU_SIMPLE_ISSUED, 0x0b894)
303EVENT(PM_VSU1_SINGLE, 0x0a0aa)
304EVENT(PM_DATA_TABLEWALK_CYC, 0x3001a)
305EVENT(PM_L2_RC_ST_DONE, 0x36380)
306EVENT(PM_MRK_PTEG_FROM_L21_MOD, 0x3d056)
307EVENT(PM_LARX_LSU1, 0x0c096)
308EVENT(PM_MRK_DATA_FROM_RMEM, 0x3d042)
309EVENT(PM_DISP_CLB_HELD, 0x02090)
310EVENT(PM_DERAT_MISS_4K, 0x1c05c)
311EVENT(PM_L2_RCLD_DISP_FAIL_ADDR, 0x16282)
312EVENT(PM_SEG_EXCEPTION, 0x028a4)
313EVENT(PM_FLUSH_DISP_SB, 0x0208c)
314EVENT(PM_L2_DC_INV, 0x26182)
315EVENT(PM_PTEG_FROM_DL2L3_MOD, 0x4c054)
316EVENT(PM_DSEG, 0x020a6)
317EVENT(PM_BR_PRED_LSTACK, 0x040a2)
318EVENT(PM_VSU0_STF, 0x0b08c)
319EVENT(PM_LSU_FX_FIN, 0x10066)
320EVENT(PM_DERAT_MISS_16M, 0x3c05c)
321EVENT(PM_MRK_PTEG_FROM_DL2L3_MOD, 0x4d054)
322EVENT(PM_GCT_UTIL_11_PLUS_SLOTS, 0x020a2)
323EVENT(PM_INST_FROM_L3, 0x14048)
324EVENT(PM_MRK_IFU_FIN, 0x3003a)
325EVENT(PM_ITLB_MISS, 0x400fc)
326EVENT(PM_VSU_STF, 0x0b88c)
327EVENT(PM_LSU_FLUSH_UST, 0x0c8b4)
328EVENT(PM_L2_LDST_MISS, 0x26880)
329EVENT(PM_FXU1_FIN, 0x40004)
330EVENT(PM_SHL_DEALLOCATED, 0x05080)
331EVENT(PM_L2_SN_M_WR_DONE, 0x46382)
332EVENT(PM_LSU_REJECT_SET_MPRED, 0x0c8a8)
333EVENT(PM_L3_PREF_LD, 0x0d0ac)
334EVENT(PM_L2_SN_M_RD_DONE, 0x46380)
335EVENT(PM_MRK_DERAT_MISS_16G, 0x4d05c)
336EVENT(PM_VSU_FCONV, 0x0a8b0)
337EVENT(PM_ANY_THRD_RUN_CYC, 0x100fa)
338EVENT(PM_LSU_LMQ_FULL_CYC, 0x0d0a4)
339EVENT(PM_MRK_LSU_REJECT_LHS, 0x0d082)
340EVENT(PM_MRK_LD_MISS_L1_CYC, 0x4003e)
341EVENT(PM_MRK_DATA_FROM_L2_CYC, 0x20020)
342EVENT(PM_INST_IMC_MATCH_DISP, 0x30016)
343EVENT(PM_MRK_DATA_FROM_RMEM_CYC, 0x4002c)
344EVENT(PM_VSU0_SIMPLE_ISSUED, 0x0b094)
345EVENT(PM_CMPLU_STALL_DIV, 0x40014)
346EVENT(PM_MRK_PTEG_FROM_RL2L3_SHR, 0x2d054)
347EVENT(PM_VSU_FMA_DOUBLE, 0x0a890)
348EVENT(PM_VSU_4FLOP, 0x0a89c)
349EVENT(PM_VSU1_FIN, 0x0a0be)
350EVENT(PM_NEST_PAIR1_AND, 0x20883)
351EVENT(PM_INST_PTEG_FROM_RL2L3_MOD, 0x1e052)
352EVENT(PM_RUN_CYC, 0x200f4)
353EVENT(PM_PTEG_FROM_RMEM, 0x3c052)
354EVENT(PM_LSU_LRQ_S0_VALID, 0x0d09e)
355EVENT(PM_LSU0_LDF, 0x0c084)
356EVENT(PM_FLUSH_COMPLETION, 0x30012)
357EVENT(PM_ST_MISS_L1, 0x300f0)
358EVENT(PM_L2_NODE_PUMP, 0x36480)
359EVENT(PM_INST_FROM_DL2L3_SHR, 0x34044)
360EVENT(PM_MRK_STALL_CMPLU_CYC, 0x3003e)
361EVENT(PM_VSU1_DENORM, 0x0a0ae)
362EVENT(PM_MRK_DATA_FROM_L31_SHR_CYC, 0x20026)
363EVENT(PM_NEST_PAIR0_ADD, 0x10881)
364EVENT(PM_INST_FROM_L3MISS, 0x24048)
365EVENT(PM_EE_OFF_EXT_INT, 0x02080)
366EVENT(PM_INST_PTEG_FROM_DMEM, 0x2e052)
367EVENT(PM_INST_FROM_DL2L3_MOD, 0x3404c)
368EVENT(PM_PMC6_OVERFLOW, 0x30024)
369EVENT(PM_VSU_2FLOP_DOUBLE, 0x0a88c)
370EVENT(PM_TLB_MISS, 0x20066)
371EVENT(PM_FXU_BUSY, 0x2000e)
372EVENT(PM_L2_RCLD_DISP_FAIL_OTHER, 0x26280)
373EVENT(PM_LSU_REJECT_LMQ_FULL, 0x0c8a4)
374EVENT(PM_IC_RELOAD_SHR, 0x04096)
375EVENT(PM_GRP_MRK, 0x10031)
376EVENT(PM_MRK_ST_NEST, 0x20034)
377EVENT(PM_VSU1_FSQRT_FDIV, 0x0a08a)
378EVENT(PM_LSU0_FLUSH_LRQ, 0x0c0b8)
379EVENT(PM_LARX_LSU0, 0x0c094)
380EVENT(PM_IBUF_FULL_CYC, 0x04084)
381EVENT(PM_MRK_DATA_FROM_DL2L3_SHR_CYC, 0x2002a)
382EVENT(PM_LSU_DC_PREF_STREAM_ALLOC, 0x0d8a8)
383EVENT(PM_GRP_MRK_CYC, 0x10030)
384EVENT(PM_MRK_DATA_FROM_RL2L3_SHR_CYC, 0x20028)
385EVENT(PM_L2_GLOB_GUESS_CORRECT, 0x16482)
386EVENT(PM_LSU_REJECT_LHS, 0x0c8ac)
387EVENT(PM_MRK_DATA_FROM_LMEM, 0x3d04a)
388EVENT(PM_INST_PTEG_FROM_L3, 0x2e050)
389EVENT(PM_FREQ_DOWN, 0x3000c)
390EVENT(PM_PB_RETRY_NODE_PUMP, 0x30081)
391EVENT(PM_INST_FROM_RL2L3_SHR, 0x1404c)
392EVENT(PM_MRK_INST_ISSUED, 0x10032)
393EVENT(PM_PTEG_FROM_L3MISS, 0x2c058)
394EVENT(PM_RUN_PURR, 0x400f4)
395EVENT(PM_MRK_GRP_IC_MISS, 0x40038)
396EVENT(PM_MRK_DATA_FROM_L3, 0x1d048)
397EVENT(PM_CMPLU_STALL_DCACHE_MISS, 0x20016)
398EVENT(PM_PTEG_FROM_RL2L3_SHR, 0x2c054)
399EVENT(PM_LSU_FLUSH_LRQ, 0x0c8b8)
400EVENT(PM_MRK_DERAT_MISS_64K, 0x2d05c)
401EVENT(PM_INST_PTEG_FROM_DL2L3_MOD, 0x4e054)
402EVENT(PM_L2_ST_MISS, 0x26082)
403EVENT(PM_MRK_PTEG_FROM_L21_SHR, 0x4d056)
404EVENT(PM_LWSYNC, 0x0d094)
405EVENT(PM_LSU0_DC_PREF_STREAM_CONFIRM_STRIDE, 0x0d0bc)
406EVENT(PM_MRK_LSU_FLUSH_LRQ, 0x0d088)
407EVENT(PM_INST_IMC_MATCH_CMPL, 0x100f0)
408EVENT(PM_NEST_PAIR3_AND, 0x40883)
409EVENT(PM_PB_RETRY_SYS_PUMP, 0x40081)
410EVENT(PM_MRK_INST_FIN, 0x30030)
411EVENT(PM_MRK_PTEG_FROM_DL2L3_SHR, 0x3d054)
412EVENT(PM_INST_FROM_L31_MOD, 0x14044)
413EVENT(PM_MRK_DTLB_MISS_64K, 0x3d05e)
414EVENT(PM_LSU_FIN, 0x30066)
415EVENT(PM_MRK_LSU_REJECT, 0x40064)
416EVENT(PM_L2_CO_FAIL_BUSY, 0x16382)
417EVENT(PM_MEM0_WQ_DISP, 0x40083)
418EVENT(PM_DATA_FROM_L31_MOD, 0x1c044)
419EVENT(PM_THERMAL_WARN, 0x10016)
420EVENT(PM_VSU0_4FLOP, 0x0a09c)
421EVENT(PM_BR_MPRED_CCACHE, 0x040a4)
422EVENT(PM_CMPLU_STALL_IFU, 0x4004c)
423EVENT(PM_L1_DEMAND_WRITE, 0x0408c)
424EVENT(PM_FLUSH_BR_MPRED, 0x02084)
425EVENT(PM_MRK_DTLB_MISS_16G, 0x1d05e)
426EVENT(PM_MRK_PTEG_FROM_DMEM, 0x2d052)
427EVENT(PM_L2_RCST_DISP, 0x36280)
428EVENT(PM_CMPLU_STALL, 0x4000a)
429EVENT(PM_LSU_PARTIAL_CDF, 0x0c0aa)
430EVENT(PM_DISP_CLB_HELD_SB, 0x020a8)
431EVENT(PM_VSU0_FMA_DOUBLE, 0x0a090)
432EVENT(PM_FXU0_BUSY_FXU1_IDLE, 0x3000e)
433EVENT(PM_IC_DEMAND_CYC, 0x10018)
434EVENT(PM_MRK_DATA_FROM_L21_SHR, 0x3d04e)
435EVENT(PM_MRK_LSU_FLUSH_UST, 0x0d086)
436EVENT(PM_INST_PTEG_FROM_L3MISS, 0x2e058)
437EVENT(PM_VSU_DENORM, 0x0a8ac)
438EVENT(PM_MRK_LSU_PARTIAL_CDF, 0x0d080)
439EVENT(PM_INST_FROM_L21_SHR, 0x3404e)
440EVENT(PM_IC_PREF_WRITE, 0x0408e)
441EVENT(PM_BR_PRED, 0x0409c)
442EVENT(PM_INST_FROM_DMEM, 0x1404a)
443EVENT(PM_IC_PREF_CANCEL_ALL, 0x04890)
444EVENT(PM_LSU_DC_PREF_STREAM_CONFIRM, 0x0d8b4)
445EVENT(PM_MRK_LSU_FLUSH_SRQ, 0x0d08a)
446EVENT(PM_MRK_FIN_STALL_CYC, 0x1003c)
447EVENT(PM_L2_RCST_DISP_FAIL_OTHER, 0x46280)
448EVENT(PM_VSU1_DD_ISSUED, 0x0b098)
449EVENT(PM_PTEG_FROM_L31_SHR, 0x2c056)
450EVENT(PM_DATA_FROM_L21_SHR, 0x3c04e)
451EVENT(PM_LSU0_NCLD, 0x0c08c)
452EVENT(PM_VSU1_4FLOP, 0x0a09e)
453EVENT(PM_VSU1_8FLOP, 0x0a0a2)
454EVENT(PM_VSU_8FLOP, 0x0a8a0)
455EVENT(PM_LSU_LMQ_SRQ_EMPTY_CYC, 0x2003e)
456EVENT(PM_DTLB_MISS_64K, 0x3c05e)
457EVENT(PM_THRD_CONC_RUN_INST, 0x300f4)
458EVENT(PM_MRK_PTEG_FROM_L2, 0x1d050)
459EVENT(PM_PB_SYS_PUMP, 0x20081)
460EVENT(PM_VSU_FIN, 0x0a8bc)
461EVENT(PM_MRK_DATA_FROM_L31_MOD, 0x1d044)
462EVENT(PM_THRD_PRIO_0_1_CYC, 0x040b0)
463EVENT(PM_DERAT_MISS_64K, 0x2c05c)
464EVENT(PM_PMC2_REWIND, 0x30020)
465EVENT(PM_INST_FROM_L2, 0x14040)
466EVENT(PM_GRP_BR_MPRED_NONSPEC, 0x1000a)
467EVENT(PM_INST_DISP, 0x200f2)
468EVENT(PM_MEM0_RD_CANCEL_TOTAL, 0x30083)
469EVENT(PM_LSU0_DC_PREF_STREAM_CONFIRM, 0x0d0b4)
470EVENT(PM_L1_DCACHE_RELOAD_VALID, 0x300f6)
471EVENT(PM_VSU_SCALAR_DOUBLE_ISSUED, 0x0b888)
472EVENT(PM_L3_PREF_HIT, 0x3f080)
473EVENT(PM_MRK_PTEG_FROM_L31_MOD, 0x1d054)
474EVENT(PM_CMPLU_STALL_STORE, 0x2004a)
475EVENT(PM_MRK_FXU_FIN, 0x20038)
476EVENT(PM_PMC4_OVERFLOW, 0x10010)
477EVENT(PM_MRK_PTEG_FROM_L3, 0x2d050)
478EVENT(PM_LSU0_LMQ_LHR_MERGE, 0x0d098)
479EVENT(PM_BTAC_HIT, 0x0508a)
480EVENT(PM_L3_RD_BUSY, 0x4f082)
481EVENT(PM_LSU0_L1_SW_PREF, 0x0c09c)
482EVENT(PM_INST_FROM_L2MISS, 0x44048)
483EVENT(PM_LSU0_DC_PREF_STREAM_ALLOC, 0x0d0a8)
484EVENT(PM_L2_ST, 0x16082)
485EVENT(PM_VSU0_DENORM, 0x0a0ac)
486EVENT(PM_MRK_DATA_FROM_DL2L3_SHR, 0x3d044)
487EVENT(PM_BR_PRED_CR_TA, 0x048aa)
488EVENT(PM_VSU0_FCONV, 0x0a0b0)
489EVENT(PM_MRK_LSU_FLUSH_ULD, 0x0d084)
490EVENT(PM_BTAC_MISS, 0x05088)
491EVENT(PM_MRK_LD_MISS_EXPOSED_CYC_COUNT, 0x1003f)
492EVENT(PM_MRK_DATA_FROM_L2, 0x1d040)
493EVENT(PM_LSU_DCACHE_RELOAD_VALID, 0x0d0a2)
494EVENT(PM_VSU_FMA, 0x0a884)
495EVENT(PM_LSU0_FLUSH_SRQ, 0x0c0bc)
496EVENT(PM_LSU1_L1_PREF, 0x0d0ba)
497EVENT(PM_IOPS_CMPL, 0x10014)
498EVENT(PM_L2_SYS_PUMP, 0x36482)
499EVENT(PM_L2_RCLD_BUSY_RC_FULL, 0x46282)
500EVENT(PM_LSU_LMQ_S0_ALLOC, 0x0d0a1)
501EVENT(PM_FLUSH_DISP_SYNC, 0x02088)
502EVENT(PM_MRK_DATA_FROM_DL2L3_MOD_CYC, 0x4002a)
503EVENT(PM_L2_IC_INV, 0x26180)
504EVENT(PM_MRK_DATA_FROM_L21_MOD_CYC, 0x40024)
505EVENT(PM_L3_PREF_LDST, 0x0d8ac)
506EVENT(PM_LSU_SRQ_EMPTY_CYC, 0x40008)
507EVENT(PM_LSU_LMQ_S0_VALID, 0x0d0a0)
508EVENT(PM_FLUSH_PARTIAL, 0x02086)
509EVENT(PM_VSU1_FMA_DOUBLE, 0x0a092)
510EVENT(PM_1PLUS_PPC_DISP, 0x400f2)
511EVENT(PM_DATA_FROM_L2MISS, 0x200fe)
512EVENT(PM_SUSPENDED, 0x00000)
513EVENT(PM_VSU0_FMA, 0x0a084)
514EVENT(PM_CMPLU_STALL_SCALAR, 0x40012)
515EVENT(PM_STCX_FAIL, 0x0c09a)
516EVENT(PM_VSU0_FSQRT_FDIV_DOUBLE, 0x0a094)
517EVENT(PM_DC_PREF_DST, 0x0d0b0)
518EVENT(PM_VSU1_SCAL_SINGLE_ISSUED, 0x0b086)
519EVENT(PM_L3_HIT, 0x1f080)
520EVENT(PM_L2_GLOB_GUESS_WRONG, 0x26482)
521EVENT(PM_MRK_DFU_FIN, 0x20032)
522EVENT(PM_INST_FROM_L1, 0x04080)
523EVENT(PM_BRU_FIN, 0x10068)
524EVENT(PM_IC_DEMAND_REQ, 0x04088)
525EVENT(PM_VSU1_FSQRT_FDIV_DOUBLE, 0x0a096)
526EVENT(PM_VSU1_FMA, 0x0a086)
527EVENT(PM_MRK_LD_MISS_L1, 0x20036)
528EVENT(PM_VSU0_2FLOP_DOUBLE, 0x0a08c)
529EVENT(PM_LSU_DC_PREF_STRIDED_STREAM_CONFIRM, 0x0d8bc)
530EVENT(PM_INST_PTEG_FROM_L31_SHR, 0x2e056)
531EVENT(PM_MRK_LSU_REJECT_ERAT_MISS, 0x30064)
532EVENT(PM_MRK_DATA_FROM_L2MISS, 0x4d048)
533EVENT(PM_DATA_FROM_RL2L3_SHR, 0x1c04c)
534EVENT(PM_INST_FROM_PREF, 0x14046)
535EVENT(PM_VSU1_SQ, 0x0b09e)
536EVENT(PM_L2_LD_DISP, 0x36180)
537EVENT(PM_L2_DISP_ALL, 0x46080)
538EVENT(PM_THRD_GRP_CMPL_BOTH_CYC, 0x10012)
539EVENT(PM_VSU_FSQRT_FDIV_DOUBLE, 0x0a894)
540EVENT(PM_BR_MPRED, 0x400f6)
541EVENT(PM_INST_PTEG_FROM_DL2L3_SHR, 0x3e054)
542EVENT(PM_VSU_1FLOP, 0x0a880)
543EVENT(PM_HV_CYC, 0x2000a)
544EVENT(PM_MRK_LSU_FIN, 0x40032)
545EVENT(PM_MRK_DATA_FROM_RL2L3_SHR, 0x1d04c)
546EVENT(PM_DTLB_MISS_16M, 0x4c05e)
547EVENT(PM_LSU1_LMQ_LHR_MERGE, 0x0d09a)
548EVENT(PM_IFU_FIN, 0x40066)
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index d1821b8bbc4c..56c67bca2f75 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -53,37 +53,13 @@
53/* 53/*
54 * Power7 event codes. 54 * Power7 event codes.
55 */ 55 */
56#define PME_PM_CYC 0x1e 56#define EVENT(_name, _code) \
57#define PME_PM_GCT_NOSLOT_CYC 0x100f8 57 PME_##_name = _code,
58#define PME_PM_CMPLU_STALL 0x4000a 58
59#define PME_PM_INST_CMPL 0x2 59enum {
60#define PME_PM_LD_REF_L1 0xc880 60#include "power7-events-list.h"
61#define PME_PM_LD_MISS_L1 0x400f0 61};
62#define PME_PM_BRU_FIN 0x10068 62#undef EVENT
63#define PME_PM_BR_MPRED 0x400f6
64
65#define PME_PM_CMPLU_STALL_FXU 0x20014
66#define PME_PM_CMPLU_STALL_DIV 0x40014
67#define PME_PM_CMPLU_STALL_SCALAR 0x40012
68#define PME_PM_CMPLU_STALL_SCALAR_LONG 0x20018
69#define PME_PM_CMPLU_STALL_VECTOR 0x2001c
70#define PME_PM_CMPLU_STALL_VECTOR_LONG 0x4004a
71#define PME_PM_CMPLU_STALL_LSU 0x20012
72#define PME_PM_CMPLU_STALL_REJECT 0x40016
73#define PME_PM_CMPLU_STALL_ERAT_MISS 0x40018
74#define PME_PM_CMPLU_STALL_DCACHE_MISS 0x20016
75#define PME_PM_CMPLU_STALL_STORE 0x2004a
76#define PME_PM_CMPLU_STALL_THRD 0x1001c
77#define PME_PM_CMPLU_STALL_IFU 0x4004c
78#define PME_PM_CMPLU_STALL_BRU 0x4004e
79#define PME_PM_GCT_NOSLOT_IC_MISS 0x2001a
80#define PME_PM_GCT_NOSLOT_BR_MPRED 0x4001a
81#define PME_PM_GCT_NOSLOT_BR_MPRED_IC_MISS 0x4001c
82#define PME_PM_GRP_CMPL 0x30004
83#define PME_PM_1PLUS_PPC_CMPL 0x100f2
84#define PME_PM_CMPLU_STALL_DFU 0x2003c
85#define PME_PM_RUN_CYC 0x200f4
86#define PME_PM_RUN_INST_CMPL 0x400fa
87 63
88/* 64/*
89 * Layout of constraint bits: 65 * Layout of constraint bits:
@@ -398,96 +374,36 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
398}; 374};
399 375
400 376
401GENERIC_EVENT_ATTR(cpu-cycles, CYC); 377GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
402GENERIC_EVENT_ATTR(stalled-cycles-frontend, GCT_NOSLOT_CYC); 378GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_GCT_NOSLOT_CYC);
403GENERIC_EVENT_ATTR(stalled-cycles-backend, CMPLU_STALL); 379GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
404GENERIC_EVENT_ATTR(instructions, INST_CMPL); 380GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
405GENERIC_EVENT_ATTR(cache-references, LD_REF_L1); 381GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
406GENERIC_EVENT_ATTR(cache-misses, LD_MISS_L1); 382GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1);
407GENERIC_EVENT_ATTR(branch-instructions, BRU_FIN); 383GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_FIN);
408GENERIC_EVENT_ATTR(branch-misses, BR_MPRED); 384GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED);
409 385
410POWER_EVENT_ATTR(CYC, CYC); 386#define EVENT(_name, _code) POWER_EVENT_ATTR(_name, _name);
411POWER_EVENT_ATTR(GCT_NOSLOT_CYC, GCT_NOSLOT_CYC); 387#include "power7-events-list.h"
412POWER_EVENT_ATTR(CMPLU_STALL, CMPLU_STALL); 388#undef EVENT
413POWER_EVENT_ATTR(INST_CMPL, INST_CMPL); 389
414POWER_EVENT_ATTR(LD_REF_L1, LD_REF_L1); 390#define EVENT(_name, _code) POWER_EVENT_PTR(_name),
415POWER_EVENT_ATTR(LD_MISS_L1, LD_MISS_L1);
416POWER_EVENT_ATTR(BRU_FIN, BRU_FIN)
417POWER_EVENT_ATTR(BR_MPRED, BR_MPRED);
418
419POWER_EVENT_ATTR(CMPLU_STALL_FXU, CMPLU_STALL_FXU);
420POWER_EVENT_ATTR(CMPLU_STALL_DIV, CMPLU_STALL_DIV);
421POWER_EVENT_ATTR(CMPLU_STALL_SCALAR, CMPLU_STALL_SCALAR);
422POWER_EVENT_ATTR(CMPLU_STALL_SCALAR_LONG, CMPLU_STALL_SCALAR_LONG);
423POWER_EVENT_ATTR(CMPLU_STALL_VECTOR, CMPLU_STALL_VECTOR);
424POWER_EVENT_ATTR(CMPLU_STALL_VECTOR_LONG, CMPLU_STALL_VECTOR_LONG);
425POWER_EVENT_ATTR(CMPLU_STALL_LSU, CMPLU_STALL_LSU);
426POWER_EVENT_ATTR(CMPLU_STALL_REJECT, CMPLU_STALL_REJECT);
427
428POWER_EVENT_ATTR(CMPLU_STALL_ERAT_MISS, CMPLU_STALL_ERAT_MISS);
429POWER_EVENT_ATTR(CMPLU_STALL_DCACHE_MISS, CMPLU_STALL_DCACHE_MISS);
430POWER_EVENT_ATTR(CMPLU_STALL_STORE, CMPLU_STALL_STORE);
431POWER_EVENT_ATTR(CMPLU_STALL_THRD, CMPLU_STALL_THRD);
432POWER_EVENT_ATTR(CMPLU_STALL_IFU, CMPLU_STALL_IFU);
433POWER_EVENT_ATTR(CMPLU_STALL_BRU, CMPLU_STALL_BRU);
434POWER_EVENT_ATTR(GCT_NOSLOT_IC_MISS, GCT_NOSLOT_IC_MISS);
435
436POWER_EVENT_ATTR(GCT_NOSLOT_BR_MPRED, GCT_NOSLOT_BR_MPRED);
437POWER_EVENT_ATTR(GCT_NOSLOT_BR_MPRED_IC_MISS, GCT_NOSLOT_BR_MPRED_IC_MISS);
438POWER_EVENT_ATTR(GRP_CMPL, GRP_CMPL);
439POWER_EVENT_ATTR(1PLUS_PPC_CMPL, 1PLUS_PPC_CMPL);
440POWER_EVENT_ATTR(CMPLU_STALL_DFU, CMPLU_STALL_DFU);
441POWER_EVENT_ATTR(RUN_CYC, RUN_CYC);
442POWER_EVENT_ATTR(RUN_INST_CMPL, RUN_INST_CMPL);
443 391
444static struct attribute *power7_events_attr[] = { 392static struct attribute *power7_events_attr[] = {
445 GENERIC_EVENT_PTR(CYC), 393 GENERIC_EVENT_PTR(PM_CYC),
446 GENERIC_EVENT_PTR(GCT_NOSLOT_CYC), 394 GENERIC_EVENT_PTR(PM_GCT_NOSLOT_CYC),
447 GENERIC_EVENT_PTR(CMPLU_STALL), 395 GENERIC_EVENT_PTR(PM_CMPLU_STALL),
448 GENERIC_EVENT_PTR(INST_CMPL), 396 GENERIC_EVENT_PTR(PM_INST_CMPL),
449 GENERIC_EVENT_PTR(LD_REF_L1), 397 GENERIC_EVENT_PTR(PM_LD_REF_L1),
450 GENERIC_EVENT_PTR(LD_MISS_L1), 398 GENERIC_EVENT_PTR(PM_LD_MISS_L1),
451 GENERIC_EVENT_PTR(BRU_FIN), 399 GENERIC_EVENT_PTR(PM_BRU_FIN),
452 GENERIC_EVENT_PTR(BR_MPRED), 400 GENERIC_EVENT_PTR(PM_BR_MPRED),
453 401
454 POWER_EVENT_PTR(CYC), 402 #include "power7-events-list.h"
455 POWER_EVENT_PTR(GCT_NOSLOT_CYC), 403 #undef EVENT
456 POWER_EVENT_PTR(CMPLU_STALL),
457 POWER_EVENT_PTR(INST_CMPL),
458 POWER_EVENT_PTR(LD_REF_L1),
459 POWER_EVENT_PTR(LD_MISS_L1),
460 POWER_EVENT_PTR(BRU_FIN),
461 POWER_EVENT_PTR(BR_MPRED),
462
463 POWER_EVENT_PTR(CMPLU_STALL_FXU),
464 POWER_EVENT_PTR(CMPLU_STALL_DIV),
465 POWER_EVENT_PTR(CMPLU_STALL_SCALAR),
466 POWER_EVENT_PTR(CMPLU_STALL_SCALAR_LONG),
467 POWER_EVENT_PTR(CMPLU_STALL_VECTOR),
468 POWER_EVENT_PTR(CMPLU_STALL_VECTOR_LONG),
469 POWER_EVENT_PTR(CMPLU_STALL_LSU),
470 POWER_EVENT_PTR(CMPLU_STALL_REJECT),
471
472 POWER_EVENT_PTR(CMPLU_STALL_ERAT_MISS),
473 POWER_EVENT_PTR(CMPLU_STALL_DCACHE_MISS),
474 POWER_EVENT_PTR(CMPLU_STALL_STORE),
475 POWER_EVENT_PTR(CMPLU_STALL_THRD),
476 POWER_EVENT_PTR(CMPLU_STALL_IFU),
477 POWER_EVENT_PTR(CMPLU_STALL_BRU),
478 POWER_EVENT_PTR(GCT_NOSLOT_IC_MISS),
479 POWER_EVENT_PTR(GCT_NOSLOT_BR_MPRED),
480
481 POWER_EVENT_PTR(GCT_NOSLOT_BR_MPRED_IC_MISS),
482 POWER_EVENT_PTR(GRP_CMPL),
483 POWER_EVENT_PTR(1PLUS_PPC_CMPL),
484 POWER_EVENT_PTR(CMPLU_STALL_DFU),
485 POWER_EVENT_PTR(RUN_CYC),
486 POWER_EVENT_PTR(RUN_INST_CMPL),
487 NULL 404 NULL
488}; 405};
489 406
490
491static struct attribute_group power7_pmu_events_group = { 407static struct attribute_group power7_pmu_events_group = {
492 .name = "events", 408 .name = "events",
493 .attrs = power7_events_attr, 409 .attrs = power7_events_attr,
diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c
index 4cfa49901c02..534574a97ec9 100644
--- a/arch/powerpc/platforms/44x/warp.c
+++ b/arch/powerpc/platforms/44x/warp.c
@@ -16,7 +16,6 @@
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/of_gpio.h> 18#include <linux/of_gpio.h>
19#include <linux/of_i2c.h>
20#include <linux/slab.h> 19#include <linux/slab.h>
21#include <linux/export.h> 20#include <linux/export.h>
22 21
diff --git a/arch/powerpc/platforms/ps3/time.c b/arch/powerpc/platforms/ps3/time.c
index cba1e6be68e5..ce73ce865613 100644
--- a/arch/powerpc/platforms/ps3/time.c
+++ b/arch/powerpc/platforms/ps3/time.c
@@ -90,7 +90,7 @@ static int __init ps3_rtc_init(void)
90 90
91 pdev = platform_device_register_simple("rtc-ps3", -1, NULL, 0); 91 pdev = platform_device_register_simple("rtc-ps3", -1, NULL, 0);
92 92
93 return PTR_RET(pdev); 93 return PTR_ERR_OR_ZERO(pdev);
94} 94}
95 95
96module_init(ps3_rtc_init); 96module_init(ps3_rtc_init);
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 6a5f2b1f32ca..d276cd3edd8f 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -539,36 +539,6 @@ static int zip_oops(size_t text_len)
539} 539}
540 540
541#ifdef CONFIG_PSTORE 541#ifdef CONFIG_PSTORE
542/* Derived from logfs_uncompress */
543int nvram_decompress(void *in, void *out, size_t inlen, size_t outlen)
544{
545 int err, ret;
546
547 ret = -EIO;
548 err = zlib_inflateInit(&stream);
549 if (err != Z_OK)
550 goto error;
551
552 stream.next_in = in;
553 stream.avail_in = inlen;
554 stream.total_in = 0;
555 stream.next_out = out;
556 stream.avail_out = outlen;
557 stream.total_out = 0;
558
559 err = zlib_inflate(&stream, Z_FINISH);
560 if (err != Z_STREAM_END)
561 goto error;
562
563 err = zlib_inflateEnd(&stream);
564 if (err != Z_OK)
565 goto error;
566
567 ret = stream.total_out;
568error:
569 return ret;
570}
571
572static int nvram_pstore_open(struct pstore_info *psi) 542static int nvram_pstore_open(struct pstore_info *psi)
573{ 543{
574 /* Reset the iterator to start reading partitions again */ 544 /* Reset the iterator to start reading partitions again */
@@ -584,7 +554,7 @@ static int nvram_pstore_open(struct pstore_info *psi)
584 * @part: pstore writes data to registered buffer in parts, 554 * @part: pstore writes data to registered buffer in parts,
585 * part number will indicate the same. 555 * part number will indicate the same.
586 * @count: Indicates oops count 556 * @count: Indicates oops count
587 * @hsize: Size of header added by pstore 557 * @compressed: Flag to indicate the log is compressed
588 * @size: number of bytes written to the registered buffer 558 * @size: number of bytes written to the registered buffer
589 * @psi: registered pstore_info structure 559 * @psi: registered pstore_info structure
590 * 560 *
@@ -595,7 +565,7 @@ static int nvram_pstore_open(struct pstore_info *psi)
595static int nvram_pstore_write(enum pstore_type_id type, 565static int nvram_pstore_write(enum pstore_type_id type,
596 enum kmsg_dump_reason reason, 566 enum kmsg_dump_reason reason,
597 u64 *id, unsigned int part, int count, 567 u64 *id, unsigned int part, int count,
598 size_t hsize, size_t size, 568 bool compressed, size_t size,
599 struct pstore_info *psi) 569 struct pstore_info *psi)
600{ 570{
601 int rc; 571 int rc;
@@ -611,30 +581,11 @@ static int nvram_pstore_write(enum pstore_type_id type,
611 oops_hdr->report_length = (u16) size; 581 oops_hdr->report_length = (u16) size;
612 oops_hdr->timestamp = get_seconds(); 582 oops_hdr->timestamp = get_seconds();
613 583
614 if (big_oops_buf) { 584 if (compressed)
615 rc = zip_oops(size); 585 err_type = ERR_TYPE_KERNEL_PANIC_GZ;
616 /*
617 * If compression fails copy recent log messages from
618 * big_oops_buf to oops_data.
619 */
620 if (rc != 0) {
621 size_t diff = size - oops_data_sz + hsize;
622
623 if (size > oops_data_sz) {
624 memcpy(oops_data, big_oops_buf, hsize);
625 memcpy(oops_data + hsize, big_oops_buf + diff,
626 oops_data_sz - hsize);
627
628 oops_hdr->report_length = (u16) oops_data_sz;
629 } else
630 memcpy(oops_data, big_oops_buf, size);
631 } else
632 err_type = ERR_TYPE_KERNEL_PANIC_GZ;
633 }
634 586
635 rc = nvram_write_os_partition(&oops_log_partition, oops_buf, 587 rc = nvram_write_os_partition(&oops_log_partition, oops_buf,
636 (int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type, 588 (int) (sizeof(*oops_hdr) + size), err_type, count);
637 count);
638 589
639 if (rc != 0) 590 if (rc != 0)
640 return rc; 591 return rc;
@@ -650,12 +601,12 @@ static int nvram_pstore_write(enum pstore_type_id type,
650 */ 601 */
651static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type, 602static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
652 int *count, struct timespec *time, char **buf, 603 int *count, struct timespec *time, char **buf,
653 struct pstore_info *psi) 604 bool *compressed, struct pstore_info *psi)
654{ 605{
655 struct oops_log_info *oops_hdr; 606 struct oops_log_info *oops_hdr;
656 unsigned int err_type, id_no, size = 0; 607 unsigned int err_type, id_no, size = 0;
657 struct nvram_os_partition *part = NULL; 608 struct nvram_os_partition *part = NULL;
658 char *buff = NULL, *big_buff = NULL; 609 char *buff = NULL;
659 int sig = 0; 610 int sig = 0;
660 loff_t p; 611 loff_t p;
661 612
@@ -719,8 +670,7 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
719 *id = id_no; 670 *id = id_no;
720 671
721 if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) { 672 if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) {
722 int length, unzipped_len; 673 size_t length, hdr_size;
723 size_t hdr_size;
724 674
725 oops_hdr = (struct oops_log_info *)buff; 675 oops_hdr = (struct oops_log_info *)buff;
726 if (oops_hdr->version < OOPS_HDR_VERSION) { 676 if (oops_hdr->version < OOPS_HDR_VERSION) {
@@ -741,23 +691,10 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
741 memcpy(*buf, buff + hdr_size, length); 691 memcpy(*buf, buff + hdr_size, length);
742 kfree(buff); 692 kfree(buff);
743 693
744 if (err_type == ERR_TYPE_KERNEL_PANIC_GZ) { 694 if (err_type == ERR_TYPE_KERNEL_PANIC_GZ)
745 big_buff = kmalloc(big_oops_buf_sz, GFP_KERNEL); 695 *compressed = true;
746 if (!big_buff) 696 else
747 return -ENOMEM; 697 *compressed = false;
748
749 unzipped_len = nvram_decompress(*buf, big_buff,
750 length, big_oops_buf_sz);
751
752 if (unzipped_len < 0) {
753 pr_err("nvram: decompression failed, returned "
754 "rc %d\n", unzipped_len);
755 kfree(big_buff);
756 } else {
757 *buf = big_buff;
758 length = unzipped_len;
759 }
760 }
761 return length; 698 return length;
762 } 699 }
763 700
@@ -777,13 +714,8 @@ static int nvram_pstore_init(void)
777{ 714{
778 int rc = 0; 715 int rc = 0;
779 716
780 if (big_oops_buf) { 717 nvram_pstore_info.buf = oops_data;
781 nvram_pstore_info.buf = big_oops_buf; 718 nvram_pstore_info.bufsize = oops_data_sz;
782 nvram_pstore_info.bufsize = big_oops_buf_sz;
783 } else {
784 nvram_pstore_info.buf = oops_data;
785 nvram_pstore_info.bufsize = oops_data_sz;
786 }
787 719
788 rc = pstore_register(&nvram_pstore_info); 720 rc = pstore_register(&nvram_pstore_info);
789 if (rc != 0) 721 if (rc != 0)
@@ -802,7 +734,6 @@ static int nvram_pstore_init(void)
802static void __init nvram_init_oops_partition(int rtas_partition_exists) 734static void __init nvram_init_oops_partition(int rtas_partition_exists)
803{ 735{
804 int rc; 736 int rc;
805 size_t size;
806 737
807 rc = pseries_nvram_init_os_partition(&oops_log_partition); 738 rc = pseries_nvram_init_os_partition(&oops_log_partition);
808 if (rc != 0) { 739 if (rc != 0) {
@@ -823,6 +754,11 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists)
823 oops_data = oops_buf + sizeof(struct oops_log_info); 754 oops_data = oops_buf + sizeof(struct oops_log_info);
824 oops_data_sz = oops_log_partition.size - sizeof(struct oops_log_info); 755 oops_data_sz = oops_log_partition.size - sizeof(struct oops_log_info);
825 756
757 rc = nvram_pstore_init();
758
759 if (!rc)
760 return;
761
826 /* 762 /*
827 * Figure compression (preceded by elimination of each line's <n> 763 * Figure compression (preceded by elimination of each line's <n>
828 * severity prefix) will reduce the oops/panic report to at most 764 * severity prefix) will reduce the oops/panic report to at most
@@ -831,9 +767,8 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists)
831 big_oops_buf_sz = (oops_data_sz * 100) / 45; 767 big_oops_buf_sz = (oops_data_sz * 100) / 45;
832 big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL); 768 big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
833 if (big_oops_buf) { 769 if (big_oops_buf) {
834 size = max(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL), 770 stream.workspace = kmalloc(zlib_deflate_workspacesize(
835 zlib_inflate_workspacesize()); 771 WINDOW_BITS, MEM_LEVEL), GFP_KERNEL);
836 stream.workspace = kmalloc(size, GFP_KERNEL);
837 if (!stream.workspace) { 772 if (!stream.workspace) {
838 pr_err("nvram: No memory for compression workspace; " 773 pr_err("nvram: No memory for compression workspace; "
839 "skipping compression of %s partition data\n", 774 "skipping compression of %s partition data\n",
@@ -847,11 +782,6 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists)
847 stream.workspace = NULL; 782 stream.workspace = NULL;
848 } 783 }
849 784
850 rc = nvram_pstore_init();
851
852 if (!rc)
853 return;
854
855 rc = kmsg_dump_register(&nvram_kmsg_dumper); 785 rc = kmsg_dump_register(&nvram_kmsg_dumper);
856 if (rc != 0) { 786 if (rc != 0) {
857 pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc); 787 pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc);
diff --git a/arch/powerpc/sysdev/rtc_cmos_setup.c b/arch/powerpc/sysdev/rtc_cmos_setup.c
index af79e1ea74b6..af0f9beddca9 100644
--- a/arch/powerpc/sysdev/rtc_cmos_setup.c
+++ b/arch/powerpc/sysdev/rtc_cmos_setup.c
@@ -62,7 +62,7 @@ static int __init add_rtc(void)
62 pd = platform_device_register_simple("rtc_cmos", -1, 62 pd = platform_device_register_simple("rtc_cmos", -1,
63 &res[0], num_res); 63 &res[0], num_res);
64 64
65 return PTR_RET(pd); 65 return PTR_ERR_OR_ZERO(pd);
66} 66}
67fs_initcall(add_rtc); 67fs_initcall(add_rtc);
68 68