aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2014-12-01 22:19:20 -0500
committerMichael Ellerman <mpe@ellerman.id.au>2014-12-01 22:19:20 -0500
commitb5be75d00833a3f0ff76b1d7473119be33367faa (patch)
tree88106f598970b086f06a0d7dd123b6d00adf9f4a /arch/powerpc
parente39f223fc93580c86ccf6b3422033e349f57f0dd (diff)
parentd557b09800dab5dd6804e5b79324069abcf0be11 (diff)
Merge remote-tracking branch 'benh/next' into next
Merge updates collected & acked by Ben. A few EEH patches from Gavin, some mm updates from Aneesh and a few odds and ends.
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/eeh.h2
-rw-r--r--arch/powerpc/include/asm/machdep.h2
-rw-r--r--arch/powerpc/include/asm/opal.h104
-rw-r--r--arch/powerpc/include/asm/paca.h7
-rw-r--r--arch/powerpc/include/asm/tlbflush.h4
-rw-r--r--arch/powerpc/kernel/asm-offsets.c7
-rw-r--r--arch/powerpc/kernel/eeh.c41
-rw-r--r--arch/powerpc/kernel/eeh_driver.c10
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S17
-rw-r--r--arch/powerpc/mm/hash_native_64.c28
-rw-r--r--arch/powerpc/mm/hash_utils_64.c70
-rw-r--r--arch/powerpc/mm/hugepage-hash64.c54
-rw-r--r--arch/powerpc/mm/hugetlbpage.c7
-rw-r--r--arch/powerpc/mm/pgtable_64.c69
-rw-r--r--arch/powerpc/oprofile/backtrace.c6
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c15
-rw-r--r--arch/powerpc/platforms/powernv/opal.c1
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c2
18 files changed, 162 insertions, 284 deletions
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index ca07f9c27335..0652ebe117af 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -39,6 +39,7 @@ struct device_node;
39#define EEH_PROBE_MODE_DEV 0x04 /* From PCI device */ 39#define EEH_PROBE_MODE_DEV 0x04 /* From PCI device */
40#define EEH_PROBE_MODE_DEVTREE 0x08 /* From device tree */ 40#define EEH_PROBE_MODE_DEVTREE 0x08 /* From device tree */
41#define EEH_ENABLE_IO_FOR_LOG 0x10 /* Enable IO for log */ 41#define EEH_ENABLE_IO_FOR_LOG 0x10 /* Enable IO for log */
42#define EEH_EARLY_DUMP_LOG 0x20 /* Dump log immediately */
42 43
43/* 44/*
44 * Delay for PE reset, all in ms 45 * Delay for PE reset, all in ms
@@ -72,6 +73,7 @@ struct device_node;
72#define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */ 73#define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */
73#define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */ 74#define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */
74#define EEH_PE_CFG_BLOCKED (1 << 2) /* Block config access */ 75#define EEH_PE_CFG_BLOCKED (1 << 2) /* Block config access */
76#define EEH_PE_RESET (1 << 3) /* PE reset in progress */
75 77
76#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */ 78#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */
77#define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */ 79#define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 15c9150a58cc..e5c0919acca4 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -60,7 +60,7 @@ struct machdep_calls {
60 void (*hugepage_invalidate)(unsigned long vsid, 60 void (*hugepage_invalidate)(unsigned long vsid,
61 unsigned long addr, 61 unsigned long addr,
62 unsigned char *hpte_slot_array, 62 unsigned char *hpte_slot_array,
63 int psize, int ssize); 63 int psize, int ssize, int local);
64 /* special for kexec, to be called in real mode, linear mapping is 64 /* special for kexec, to be called in real mode, linear mapping is
65 * destroyed as well */ 65 * destroyed as well */
66 void (*hpte_clear_all)(void); 66 void (*hpte_clear_all)(void);
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 60250e2d1f0d..5cd8d2fddba9 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -288,62 +288,6 @@ enum OpalMessageType {
288 OPAL_MSG_TYPE_MAX, 288 OPAL_MSG_TYPE_MAX,
289}; 289};
290 290
291/* Machine check related definitions */
292enum OpalMCE_Version {
293 OpalMCE_V1 = 1,
294};
295
296enum OpalMCE_Severity {
297 OpalMCE_SEV_NO_ERROR = 0,
298 OpalMCE_SEV_WARNING = 1,
299 OpalMCE_SEV_ERROR_SYNC = 2,
300 OpalMCE_SEV_FATAL = 3,
301};
302
303enum OpalMCE_Disposition {
304 OpalMCE_DISPOSITION_RECOVERED = 0,
305 OpalMCE_DISPOSITION_NOT_RECOVERED = 1,
306};
307
308enum OpalMCE_Initiator {
309 OpalMCE_INITIATOR_UNKNOWN = 0,
310 OpalMCE_INITIATOR_CPU = 1,
311};
312
313enum OpalMCE_ErrorType {
314 OpalMCE_ERROR_TYPE_UNKNOWN = 0,
315 OpalMCE_ERROR_TYPE_UE = 1,
316 OpalMCE_ERROR_TYPE_SLB = 2,
317 OpalMCE_ERROR_TYPE_ERAT = 3,
318 OpalMCE_ERROR_TYPE_TLB = 4,
319};
320
321enum OpalMCE_UeErrorType {
322 OpalMCE_UE_ERROR_INDETERMINATE = 0,
323 OpalMCE_UE_ERROR_IFETCH = 1,
324 OpalMCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
325 OpalMCE_UE_ERROR_LOAD_STORE = 3,
326 OpalMCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 4,
327};
328
329enum OpalMCE_SlbErrorType {
330 OpalMCE_SLB_ERROR_INDETERMINATE = 0,
331 OpalMCE_SLB_ERROR_PARITY = 1,
332 OpalMCE_SLB_ERROR_MULTIHIT = 2,
333};
334
335enum OpalMCE_EratErrorType {
336 OpalMCE_ERAT_ERROR_INDETERMINATE = 0,
337 OpalMCE_ERAT_ERROR_PARITY = 1,
338 OpalMCE_ERAT_ERROR_MULTIHIT = 2,
339};
340
341enum OpalMCE_TlbErrorType {
342 OpalMCE_TLB_ERROR_INDETERMINATE = 0,
343 OpalMCE_TLB_ERROR_PARITY = 1,
344 OpalMCE_TLB_ERROR_MULTIHIT = 2,
345};
346
347enum OpalThreadStatus { 291enum OpalThreadStatus {
348 OPAL_THREAD_INACTIVE = 0x0, 292 OPAL_THREAD_INACTIVE = 0x0,
349 OPAL_THREAD_STARTED = 0x1, 293 OPAL_THREAD_STARTED = 0x1,
@@ -467,54 +411,6 @@ struct opal_ipmi_msg {
467 uint8_t data[]; 411 uint8_t data[];
468}; 412};
469 413
470struct opal_machine_check_event {
471 enum OpalMCE_Version version:8; /* 0x00 */
472 uint8_t in_use; /* 0x01 */
473 enum OpalMCE_Severity severity:8; /* 0x02 */
474 enum OpalMCE_Initiator initiator:8; /* 0x03 */
475 enum OpalMCE_ErrorType error_type:8; /* 0x04 */
476 enum OpalMCE_Disposition disposition:8; /* 0x05 */
477 uint8_t reserved_1[2]; /* 0x06 */
478 uint64_t gpr3; /* 0x08 */
479 uint64_t srr0; /* 0x10 */
480 uint64_t srr1; /* 0x18 */
481 union { /* 0x20 */
482 struct {
483 enum OpalMCE_UeErrorType ue_error_type:8;
484 uint8_t effective_address_provided;
485 uint8_t physical_address_provided;
486 uint8_t reserved_1[5];
487 uint64_t effective_address;
488 uint64_t physical_address;
489 uint8_t reserved_2[8];
490 } ue_error;
491
492 struct {
493 enum OpalMCE_SlbErrorType slb_error_type:8;
494 uint8_t effective_address_provided;
495 uint8_t reserved_1[6];
496 uint64_t effective_address;
497 uint8_t reserved_2[16];
498 } slb_error;
499
500 struct {
501 enum OpalMCE_EratErrorType erat_error_type:8;
502 uint8_t effective_address_provided;
503 uint8_t reserved_1[6];
504 uint64_t effective_address;
505 uint8_t reserved_2[16];
506 } erat_error;
507
508 struct {
509 enum OpalMCE_TlbErrorType tlb_error_type:8;
510 uint8_t effective_address_provided;
511 uint8_t reserved_1[6];
512 uint64_t effective_address;
513 uint8_t reserved_2[16];
514 } tlb_error;
515 } u;
516};
517
518/* FSP memory errors handling */ 414/* FSP memory errors handling */
519enum OpalMemErr_Version { 415enum OpalMemErr_Version {
520 OpalMemErr_V1 = 1, 416 OpalMemErr_V1 = 1,
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index a5139ea6910b..24a386cbb928 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -42,7 +42,6 @@ extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */
42#define get_slb_shadow() (get_paca()->slb_shadow_ptr) 42#define get_slb_shadow() (get_paca()->slb_shadow_ptr)
43 43
44struct task_struct; 44struct task_struct;
45struct opal_machine_check_event;
46 45
47/* 46/*
48 * Defines the layout of the paca. 47 * Defines the layout of the paca.
@@ -153,12 +152,6 @@ struct paca_struct {
153 u64 tm_scratch; /* TM scratch area for reclaim */ 152 u64 tm_scratch; /* TM scratch area for reclaim */
154#endif 153#endif
155 154
156#ifdef CONFIG_PPC_POWERNV
157 /* Pointer to OPAL machine check event structure set by the
158 * early exception handler for use by high level C handler
159 */
160 struct opal_machine_check_event *opal_mc_evt;
161#endif
162#ifdef CONFIG_PPC_BOOK3S_64 155#ifdef CONFIG_PPC_BOOK3S_64
163 /* Exclusive emergency stack pointer for machine check exception. */ 156 /* Exclusive emergency stack pointer for machine check exception. */
164 void *mc_emergency_sp; 157 void *mc_emergency_sp;
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index cd7c2719d3ef..4d3ecd8d8929 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -127,7 +127,9 @@ static inline void arch_leave_lazy_mmu_mode(void)
127extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, 127extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
128 int ssize, int local); 128 int ssize, int local);
129extern void flush_hash_range(unsigned long number, int local); 129extern void flush_hash_range(unsigned long number, int local);
130 130extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
131 pmd_t *pmdp, unsigned int psize, int ssize,
132 int local);
131 133
132static inline void local_flush_tlb_mm(struct mm_struct *mm) 134static inline void local_flush_tlb_mm(struct mm_struct *mm)
133{ 135{
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 9d7dede2847c..c161ef3f28a1 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -726,12 +726,5 @@ int main(void)
726 arch.timing_last_enter.tv32.tbl)); 726 arch.timing_last_enter.tv32.tbl));
727#endif 727#endif
728 728
729#ifdef CONFIG_PPC_POWERNV
730 DEFINE(OPAL_MC_GPR3, offsetof(struct opal_machine_check_event, gpr3));
731 DEFINE(OPAL_MC_SRR0, offsetof(struct opal_machine_check_event, srr0));
732 DEFINE(OPAL_MC_SRR1, offsetof(struct opal_machine_check_event, srr1));
733 DEFINE(PACA_OPAL_MC_EVT, offsetof(struct paca_struct, opal_mc_evt));
734#endif
735
736 return 0; 729 return 0;
737} 730}
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 2248a1999c64..e1b6d8e17289 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -143,6 +143,8 @@ static int __init eeh_setup(char *str)
143{ 143{
144 if (!strcmp(str, "off")) 144 if (!strcmp(str, "off"))
145 eeh_add_flag(EEH_FORCE_DISABLED); 145 eeh_add_flag(EEH_FORCE_DISABLED);
146 else if (!strcmp(str, "early_log"))
147 eeh_add_flag(EEH_EARLY_DUMP_LOG);
146 148
147 return 1; 149 return 1;
148} 150}
@@ -758,30 +760,41 @@ static void eeh_reset_pe_once(struct eeh_pe *pe)
758int eeh_reset_pe(struct eeh_pe *pe) 760int eeh_reset_pe(struct eeh_pe *pe)
759{ 761{
760 int flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); 762 int flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
761 int i, rc; 763 int i, state, ret;
764
765 /* Mark as reset and block config space */
766 eeh_pe_state_mark(pe, EEH_PE_RESET | EEH_PE_CFG_BLOCKED);
762 767
763 /* Take three shots at resetting the bus */ 768 /* Take three shots at resetting the bus */
764 for (i=0; i<3; i++) { 769 for (i = 0; i < 3; i++) {
765 eeh_reset_pe_once(pe); 770 eeh_reset_pe_once(pe);
766 771
767 /* 772 /*
768 * EEH_PE_ISOLATED is expected to be removed after 773 * EEH_PE_ISOLATED is expected to be removed after
769 * BAR restore. 774 * BAR restore.
770 */ 775 */
771 rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); 776 state = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
772 if ((rc & flags) == flags) 777 if ((state & flags) == flags) {
773 return 0; 778 ret = 0;
779 goto out;
780 }
774 781
775 if (rc < 0) { 782 if (state < 0) {
776 pr_err("%s: Unrecoverable slot failure on PHB#%d-PE#%x", 783 pr_warn("%s: Unrecoverable slot failure on PHB#%d-PE#%x",
777 __func__, pe->phb->global_number, pe->addr); 784 __func__, pe->phb->global_number, pe->addr);
778 return -1; 785 ret = -ENOTRECOVERABLE;
786 goto out;
779 } 787 }
780 pr_err("EEH: bus reset %d failed on PHB#%d-PE#%x, rc=%d\n", 788
781 i+1, pe->phb->global_number, pe->addr, rc); 789 /* We might run out of credits */
790 ret = -EIO;
791 pr_warn("%s: Failure %d resetting PHB#%x-PE#%x\n (%d)\n",
792 __func__, state, pe->phb->global_number, pe->addr, (i + 1));
782 } 793 }
783 794
784 return -1; 795out:
796 eeh_pe_state_clear(pe, EEH_PE_RESET | EEH_PE_CFG_BLOCKED);
797 return ret;
785} 798}
786 799
787/** 800/**
@@ -920,11 +933,8 @@ int eeh_init(void)
920 pr_warn("%s: Platform EEH operation not found\n", 933 pr_warn("%s: Platform EEH operation not found\n",
921 __func__); 934 __func__);
922 return -EEXIST; 935 return -EEXIST;
923 } else if ((ret = eeh_ops->init())) { 936 } else if ((ret = eeh_ops->init()))
924 pr_warn("%s: Failed to call platform init function (%d)\n",
925 __func__, ret);
926 return ret; 937 return ret;
927 }
928 938
929 /* Initialize EEH event */ 939 /* Initialize EEH event */
930 ret = eeh_event_init(); 940 ret = eeh_event_init();
@@ -1209,6 +1219,7 @@ int eeh_unfreeze_pe(struct eeh_pe *pe, bool sw_state)
1209static struct pci_device_id eeh_reset_ids[] = { 1219static struct pci_device_id eeh_reset_ids[] = {
1210 { PCI_DEVICE(0x19a2, 0x0710) }, /* Emulex, BE */ 1220 { PCI_DEVICE(0x19a2, 0x0710) }, /* Emulex, BE */
1211 { PCI_DEVICE(0x10df, 0xe220) }, /* Emulex, Lancer */ 1221 { PCI_DEVICE(0x10df, 0xe220) }, /* Emulex, Lancer */
1222 { PCI_DEVICE(0x14e4, 0x1657) }, /* Broadcom BCM5719 */
1212 { 0 } 1223 { 0 }
1213}; 1224};
1214 1225
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 6535936bdf27..b17e793ba67e 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -528,13 +528,11 @@ int eeh_pe_reset_and_recover(struct eeh_pe *pe)
528 eeh_pe_dev_traverse(pe, eeh_report_error, &result); 528 eeh_pe_dev_traverse(pe, eeh_report_error, &result);
529 529
530 /* Issue reset */ 530 /* Issue reset */
531 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
532 ret = eeh_reset_pe(pe); 531 ret = eeh_reset_pe(pe);
533 if (ret) { 532 if (ret) {
534 eeh_pe_state_clear(pe, EEH_PE_RECOVERING | EEH_PE_CFG_BLOCKED); 533 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
535 return ret; 534 return ret;
536 } 535 }
537 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
538 536
539 /* Unfreeze the PE */ 537 /* Unfreeze the PE */
540 ret = eeh_clear_pe_frozen_state(pe, true); 538 ret = eeh_clear_pe_frozen_state(pe, true);
@@ -601,19 +599,15 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
601 * config accesses. So we prefer to block them. However, controlled 599 * config accesses. So we prefer to block them. However, controlled
602 * PCI config accesses initiated from EEH itself are allowed. 600 * PCI config accesses initiated from EEH itself are allowed.
603 */ 601 */
604 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
605 rc = eeh_reset_pe(pe); 602 rc = eeh_reset_pe(pe);
606 if (rc) { 603 if (rc)
607 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
608 return rc; 604 return rc;
609 }
610 605
611 pci_lock_rescan_remove(); 606 pci_lock_rescan_remove();
612 607
613 /* Restore PE */ 608 /* Restore PE */
614 eeh_ops->configure_bridge(pe); 609 eeh_ops->configure_bridge(pe);
615 eeh_pe_restore_bars(pe); 610 eeh_pe_restore_bars(pe);
616 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
617 611
618 /* Clear frozen state */ 612 /* Clear frozen state */
619 rc = eeh_clear_pe_frozen_state(pe, false); 613 rc = eeh_clear_pe_frozen_state(pe, false);
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index a1d45c161e24..ad62f4d6ce31 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1312,23 +1312,6 @@ hmi_exception_after_realmode:
1312 EXCEPTION_PROLOG_0(PACA_EXGEN) 1312 EXCEPTION_PROLOG_0(PACA_EXGEN)
1313 b hmi_exception_hv 1313 b hmi_exception_hv
1314 1314
1315#ifdef CONFIG_PPC_POWERNV
1316_GLOBAL(opal_mc_secondary_handler)
1317 HMT_MEDIUM_PPR_DISCARD
1318 SET_SCRATCH0(r13)
1319 GET_PACA(r13)
1320 clrldi r3,r3,2
1321 tovirt(r3,r3)
1322 std r3,PACA_OPAL_MC_EVT(r13)
1323 ld r13,OPAL_MC_SRR0(r3)
1324 mtspr SPRN_SRR0,r13
1325 ld r13,OPAL_MC_SRR1(r3)
1326 mtspr SPRN_SRR1,r13
1327 ld r3,OPAL_MC_GPR3(r3)
1328 GET_SCRATCH0(r13)
1329 b machine_check_pSeries
1330#endif /* CONFIG_PPC_POWERNV */
1331
1332 1315
1333#define MACHINE_CHECK_HANDLER_WINDUP \ 1316#define MACHINE_CHECK_HANDLER_WINDUP \
1334 /* Clear MSR_RI before setting SRR0 and SRR1. */\ 1317 /* Clear MSR_RI before setting SRR0 and SRR1. */\
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index d53288a08c37..13700911b522 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -294,8 +294,6 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
294 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)", 294 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
295 vpn, want_v & HPTE_V_AVPN, slot, newpp); 295 vpn, want_v & HPTE_V_AVPN, slot, newpp);
296 296
297 native_lock_hpte(hptep);
298
299 hpte_v = be64_to_cpu(hptep->v); 297 hpte_v = be64_to_cpu(hptep->v);
300 /* 298 /*
301 * We need to invalidate the TLB always because hpte_remove doesn't do 299 * We need to invalidate the TLB always because hpte_remove doesn't do
@@ -308,16 +306,24 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
308 DBG_LOW(" -> miss\n"); 306 DBG_LOW(" -> miss\n");
309 ret = -1; 307 ret = -1;
310 } else { 308 } else {
311 DBG_LOW(" -> hit\n"); 309 native_lock_hpte(hptep);
312 /* Update the HPTE */ 310 /* recheck with locks held */
313 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & ~(HPTE_R_PP | HPTE_R_N)) | 311 hpte_v = be64_to_cpu(hptep->v);
314 (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C))); 312 if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
313 !(hpte_v & HPTE_V_VALID))) {
314 ret = -1;
315 } else {
316 DBG_LOW(" -> hit\n");
317 /* Update the HPTE */
318 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
319 ~(HPTE_R_PP | HPTE_R_N)) |
320 (newpp & (HPTE_R_PP | HPTE_R_N |
321 HPTE_R_C)));
322 }
323 native_unlock_hpte(hptep);
315 } 324 }
316 native_unlock_hpte(hptep);
317
318 /* Ensure it is out of the tlb too. */ 325 /* Ensure it is out of the tlb too. */
319 tlbie(vpn, bpsize, apsize, ssize, local); 326 tlbie(vpn, bpsize, apsize, ssize, local);
320
321 return ret; 327 return ret;
322} 328}
323 329
@@ -419,7 +425,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
419static void native_hugepage_invalidate(unsigned long vsid, 425static void native_hugepage_invalidate(unsigned long vsid,
420 unsigned long addr, 426 unsigned long addr,
421 unsigned char *hpte_slot_array, 427 unsigned char *hpte_slot_array,
422 int psize, int ssize) 428 int psize, int ssize, int local)
423{ 429{
424 int i; 430 int i;
425 struct hash_pte *hptep; 431 struct hash_pte *hptep;
@@ -465,7 +471,7 @@ static void native_hugepage_invalidate(unsigned long vsid,
465 * instruction compares entry_VA in tlb with the VA specified 471 * instruction compares entry_VA in tlb with the VA specified
466 * here 472 * here
467 */ 473 */
468 tlbie(vpn, psize, actual_psize, ssize, 0); 474 tlbie(vpn, psize, actual_psize, ssize, local);
469 } 475 }
470 local_irq_restore(flags); 476 local_irq_restore(flags);
471} 477}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index f01027731e23..68211d398fdb 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1315,6 +1315,76 @@ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
1315#endif 1315#endif
1316} 1316}
1317 1317
1318#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1319void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
1320 pmd_t *pmdp, unsigned int psize, int ssize, int local)
1321{
1322 int i, max_hpte_count, valid;
1323 unsigned long s_addr;
1324 unsigned char *hpte_slot_array;
1325 unsigned long hidx, shift, vpn, hash, slot;
1326
1327 s_addr = addr & HPAGE_PMD_MASK;
1328 hpte_slot_array = get_hpte_slot_array(pmdp);
1329 /*
1330 * IF we try to do a HUGE PTE update after a withdraw is done.
1331 * we will find the below NULL. This happens when we do
1332 * split_huge_page_pmd
1333 */
1334 if (!hpte_slot_array)
1335 return;
1336
1337 if (ppc_md.hugepage_invalidate) {
1338 ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
1339 psize, ssize, local);
1340 goto tm_abort;
1341 }
1342 /*
1343 * No bluk hpte removal support, invalidate each entry
1344 */
1345 shift = mmu_psize_defs[psize].shift;
1346 max_hpte_count = HPAGE_PMD_SIZE >> shift;
1347 for (i = 0; i < max_hpte_count; i++) {
1348 /*
1349 * 8 bits per each hpte entries
1350 * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
1351 */
1352 valid = hpte_valid(hpte_slot_array, i);
1353 if (!valid)
1354 continue;
1355 hidx = hpte_hash_index(hpte_slot_array, i);
1356
1357 /* get the vpn */
1358 addr = s_addr + (i * (1ul << shift));
1359 vpn = hpt_vpn(addr, vsid, ssize);
1360 hash = hpt_hash(vpn, shift, ssize);
1361 if (hidx & _PTEIDX_SECONDARY)
1362 hash = ~hash;
1363
1364 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1365 slot += hidx & _PTEIDX_GROUP_IX;
1366 ppc_md.hpte_invalidate(slot, vpn, psize,
1367 MMU_PAGE_16M, ssize, local);
1368 }
1369tm_abort:
1370#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1371 /* Transactions are not aborted by tlbiel, only tlbie.
1372 * Without, syncing a page back to a block device w/ PIO could pick up
1373 * transactional data (bad!) so we force an abort here. Before the
1374 * sync the page will be made read-only, which will flush_hash_page.
1375 * BIG ISSUE here: if the kernel uses a page from userspace without
1376 * unmapping it first, it may see the speculated version.
1377 */
1378 if (local && cpu_has_feature(CPU_FTR_TM) &&
1379 current->thread.regs &&
1380 MSR_TM_ACTIVE(current->thread.regs->msr)) {
1381 tm_enable();
1382 tm_abort(TM_CAUSE_TLBI);
1383 }
1384#endif
1385}
1386#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1387
1318void flush_hash_range(unsigned long number, int local) 1388void flush_hash_range(unsigned long number, int local)
1319{ 1389{
1320 if (ppc_md.flush_hash_range) 1390 if (ppc_md.flush_hash_range)
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index 5f5e6328c21c..3a648cd363ae 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -18,57 +18,6 @@
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <asm/machdep.h> 19#include <asm/machdep.h>
20 20
21static void invalidate_old_hpte(unsigned long vsid, unsigned long addr,
22 pmd_t *pmdp, unsigned int psize, int ssize)
23{
24 int i, max_hpte_count, valid;
25 unsigned long s_addr;
26 unsigned char *hpte_slot_array;
27 unsigned long hidx, shift, vpn, hash, slot;
28
29 s_addr = addr & HPAGE_PMD_MASK;
30 hpte_slot_array = get_hpte_slot_array(pmdp);
31 /*
32 * IF we try to do a HUGE PTE update after a withdraw is done.
33 * we will find the below NULL. This happens when we do
34 * split_huge_page_pmd
35 */
36 if (!hpte_slot_array)
37 return;
38
39 if (ppc_md.hugepage_invalidate)
40 return ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
41 psize, ssize);
42 /*
43 * No bluk hpte removal support, invalidate each entry
44 */
45 shift = mmu_psize_defs[psize].shift;
46 max_hpte_count = HPAGE_PMD_SIZE >> shift;
47 for (i = 0; i < max_hpte_count; i++) {
48 /*
49 * 8 bits per each hpte entries
50 * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
51 */
52 valid = hpte_valid(hpte_slot_array, i);
53 if (!valid)
54 continue;
55 hidx = hpte_hash_index(hpte_slot_array, i);
56
57 /* get the vpn */
58 addr = s_addr + (i * (1ul << shift));
59 vpn = hpt_vpn(addr, vsid, ssize);
60 hash = hpt_hash(vpn, shift, ssize);
61 if (hidx & _PTEIDX_SECONDARY)
62 hash = ~hash;
63
64 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
65 slot += hidx & _PTEIDX_GROUP_IX;
66 ppc_md.hpte_invalidate(slot, vpn, psize,
67 MMU_PAGE_16M, ssize, 0);
68 }
69}
70
71
72int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, 21int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
73 pmd_t *pmdp, unsigned long trap, int local, int ssize, 22 pmd_t *pmdp, unsigned long trap, int local, int ssize,
74 unsigned int psize) 23 unsigned int psize)
@@ -145,7 +94,8 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
145 * hash page table entries. 94 * hash page table entries.
146 */ 95 */
147 if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO)) 96 if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO))
148 invalidate_old_hpte(vsid, ea, pmdp, MMU_PAGE_64K, ssize); 97 flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K,
98 ssize, local);
149 } 99 }
150 100
151 valid = hpte_valid(hpte_slot_array, index); 101 valid = hpte_valid(hpte_slot_array, index);
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 8c9b8115867c..868ab0fc5fbf 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -355,6 +355,13 @@ static int __init do_gpage_early_setup(char *param, char *val,
355 if (size != 0) { 355 if (size != 0) {
356 if (sscanf(val, "%lu", &npages) <= 0) 356 if (sscanf(val, "%lu", &npages) <= 0)
357 npages = 0; 357 npages = 0;
358 if (npages > MAX_NUMBER_GPAGES) {
359 pr_warn("MMU: %lu pages requested for page "
360 "size %llu KB, limiting to "
361 __stringify(MAX_NUMBER_GPAGES) "\n",
362 npages, size / 1024);
363 npages = MAX_NUMBER_GPAGES;
364 }
358 gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages; 365 gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages;
359 size = 0; 366 size = 0;
360 } 367 }
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 87ff0c1908a9..eea9fa1f8ae7 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -739,29 +739,14 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
739void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, 739void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
740 pmd_t *pmdp, unsigned long old_pmd) 740 pmd_t *pmdp, unsigned long old_pmd)
741{ 741{
742 int ssize, i; 742 int ssize, local = 0;
743 unsigned long s_addr; 743 unsigned int psize;
744 int max_hpte_count; 744 unsigned long vsid;
745 unsigned int psize, valid; 745 const struct cpumask *tmp;
746 unsigned char *hpte_slot_array;
747 unsigned long hidx, vpn, vsid, hash, shift, slot;
748
749 /*
750 * Flush all the hptes mapping this hugepage
751 */
752 s_addr = addr & HPAGE_PMD_MASK;
753 hpte_slot_array = get_hpte_slot_array(pmdp);
754 /*
755 * IF we try to do a HUGE PTE update after a withdraw is done.
756 * we will find the below NULL. This happens when we do
757 * split_huge_page_pmd
758 */
759 if (!hpte_slot_array)
760 return;
761 746
762 /* get the base page size,vsid and segment size */ 747 /* get the base page size,vsid and segment size */
763#ifdef CONFIG_DEBUG_VM 748#ifdef CONFIG_DEBUG_VM
764 psize = get_slice_psize(mm, s_addr); 749 psize = get_slice_psize(mm, addr);
765 BUG_ON(psize == MMU_PAGE_16M); 750 BUG_ON(psize == MMU_PAGE_16M);
766#endif 751#endif
767 if (old_pmd & _PAGE_COMBO) 752 if (old_pmd & _PAGE_COMBO)
@@ -769,46 +754,20 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
769 else 754 else
770 psize = MMU_PAGE_64K; 755 psize = MMU_PAGE_64K;
771 756
772 if (!is_kernel_addr(s_addr)) { 757 if (!is_kernel_addr(addr)) {
773 ssize = user_segment_size(s_addr); 758 ssize = user_segment_size(addr);
774 vsid = get_vsid(mm->context.id, s_addr, ssize); 759 vsid = get_vsid(mm->context.id, addr, ssize);
775 WARN_ON(vsid == 0); 760 WARN_ON(vsid == 0);
776 } else { 761 } else {
777 vsid = get_kernel_vsid(s_addr, mmu_kernel_ssize); 762 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
778 ssize = mmu_kernel_ssize; 763 ssize = mmu_kernel_ssize;
779 } 764 }
780 765
781 if (ppc_md.hugepage_invalidate) 766 tmp = cpumask_of(smp_processor_id());
782 return ppc_md.hugepage_invalidate(vsid, s_addr, 767 if (cpumask_equal(mm_cpumask(mm), tmp))
783 hpte_slot_array, 768 local = 1;
784 psize, ssize); 769
785 /* 770 return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, local);
786 * No bluk hpte removal support, invalidate each entry
787 */
788 shift = mmu_psize_defs[psize].shift;
789 max_hpte_count = HPAGE_PMD_SIZE >> shift;
790 for (i = 0; i < max_hpte_count; i++) {
791 /*
792 * 8 bits per each hpte entries
793 * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
794 */
795 valid = hpte_valid(hpte_slot_array, i);
796 if (!valid)
797 continue;
798 hidx = hpte_hash_index(hpte_slot_array, i);
799
800 /* get the vpn */
801 addr = s_addr + (i * (1ul << shift));
802 vpn = hpt_vpn(addr, vsid, ssize);
803 hash = hpt_hash(vpn, shift, ssize);
804 if (hidx & _PTEIDX_SECONDARY)
805 hash = ~hash;
806
807 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
808 slot += hidx & _PTEIDX_GROUP_IX;
809 ppc_md.hpte_invalidate(slot, vpn, psize,
810 MMU_PAGE_16M, ssize, 0);
811 }
812} 771}
813 772
814static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) 773static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
diff --git a/arch/powerpc/oprofile/backtrace.c b/arch/powerpc/oprofile/backtrace.c
index 6adf55fa5d88..ecc66d5f02c9 100644
--- a/arch/powerpc/oprofile/backtrace.c
+++ b/arch/powerpc/oprofile/backtrace.c
@@ -10,7 +10,7 @@
10#include <linux/oprofile.h> 10#include <linux/oprofile.h>
11#include <linux/sched.h> 11#include <linux/sched.h>
12#include <asm/processor.h> 12#include <asm/processor.h>
13#include <asm/uaccess.h> 13#include <linux/uaccess.h>
14#include <asm/compat.h> 14#include <asm/compat.h>
15#include <asm/oprofile_impl.h> 15#include <asm/oprofile_impl.h>
16 16
@@ -105,6 +105,7 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
105 first_frame = 0; 105 first_frame = 0;
106 } 106 }
107 } else { 107 } else {
108 pagefault_disable();
108#ifdef CONFIG_PPC64 109#ifdef CONFIG_PPC64
109 if (!is_32bit_task()) { 110 if (!is_32bit_task()) {
110 while (depth--) { 111 while (depth--) {
@@ -113,7 +114,7 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
113 break; 114 break;
114 first_frame = 0; 115 first_frame = 0;
115 } 116 }
116 117 pagefault_enable();
117 return; 118 return;
118 } 119 }
119#endif 120#endif
@@ -124,5 +125,6 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
124 break; 125 break;
125 first_frame = 0; 126 first_frame = 0;
126 } 127 }
128 pagefault_enable();
127 } 129 }
128} 130}
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
index db3803e21483..2809c9895288 100644
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
@@ -353,6 +353,9 @@ static int ioda_eeh_get_phb_state(struct eeh_pe *pe)
353 } else if (!(pe->state & EEH_PE_ISOLATED)) { 353 } else if (!(pe->state & EEH_PE_ISOLATED)) {
354 eeh_pe_state_mark(pe, EEH_PE_ISOLATED); 354 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
355 ioda_eeh_phb_diag(pe); 355 ioda_eeh_phb_diag(pe);
356
357 if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
358 pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
356 } 359 }
357 360
358 return result; 361 return result;
@@ -372,7 +375,7 @@ static int ioda_eeh_get_pe_state(struct eeh_pe *pe)
372 * moving forward, we have to return operational 375 * moving forward, we have to return operational
373 * state during PE reset. 376 * state during PE reset.
374 */ 377 */
375 if (pe->state & EEH_PE_CFG_BLOCKED) { 378 if (pe->state & EEH_PE_RESET) {
376 result = (EEH_STATE_MMIO_ACTIVE | 379 result = (EEH_STATE_MMIO_ACTIVE |
377 EEH_STATE_DMA_ACTIVE | 380 EEH_STATE_DMA_ACTIVE |
378 EEH_STATE_MMIO_ENABLED | 381 EEH_STATE_MMIO_ENABLED |
@@ -451,6 +454,9 @@ static int ioda_eeh_get_pe_state(struct eeh_pe *pe)
451 454
452 eeh_pe_state_mark(pe, EEH_PE_ISOLATED); 455 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
453 ioda_eeh_phb_diag(pe); 456 ioda_eeh_phb_diag(pe);
457
458 if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
459 pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
454 } 460 }
455 461
456 return result; 462 return result;
@@ -730,7 +736,8 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
730static int ioda_eeh_get_log(struct eeh_pe *pe, int severity, 736static int ioda_eeh_get_log(struct eeh_pe *pe, int severity,
731 char *drv_log, unsigned long len) 737 char *drv_log, unsigned long len)
732{ 738{
733 pnv_pci_dump_phb_diag_data(pe->phb, pe->data); 739 if (!eeh_has_flag(EEH_EARLY_DUMP_LOG))
740 pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
734 741
735 return 0; 742 return 0;
736} 743}
@@ -1086,6 +1093,10 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
1086 !((*pe)->state & EEH_PE_ISOLATED)) { 1093 !((*pe)->state & EEH_PE_ISOLATED)) {
1087 eeh_pe_state_mark(*pe, EEH_PE_ISOLATED); 1094 eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
1088 ioda_eeh_phb_diag(*pe); 1095 ioda_eeh_phb_diag(*pe);
1096
1097 if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
1098 pnv_pci_dump_phb_diag_data((*pe)->phb,
1099 (*pe)->data);
1089 } 1100 }
1090 1101
1091 /* 1102 /*
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index a1c37f9d60d2..cb0b6de79cd4 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -50,7 +50,6 @@ static int mc_recoverable_range_len;
50 50
51struct device_node *opal_node; 51struct device_node *opal_node;
52static DEFINE_SPINLOCK(opal_write_lock); 52static DEFINE_SPINLOCK(opal_write_lock);
53extern u64 opal_mc_secondary_handler[];
54static unsigned int *opal_irqs; 53static unsigned int *opal_irqs;
55static unsigned int opal_irq_count; 54static unsigned int opal_irq_count;
56static ATOMIC_NOTIFIER_HEAD(opal_notifier_head); 55static ATOMIC_NOTIFIER_HEAD(opal_notifier_head);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index d214a012b026..832f221840f2 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -442,7 +442,7 @@ static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
442static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, 442static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
443 unsigned long addr, 443 unsigned long addr,
444 unsigned char *hpte_slot_array, 444 unsigned char *hpte_slot_array,
445 int psize, int ssize) 445 int psize, int ssize, int local)
446{ 446{
447 int i, index = 0; 447 int i, index = 0;
448 unsigned long s_addr = addr; 448 unsigned long s_addr = addr;