aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/52xx/lite5200_pm.c14
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig1
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c44
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c21
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c19
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c11
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c4
7 files changed, 78 insertions, 36 deletions
diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
index 41c7fd91e99..fe92e65103e 100644
--- a/arch/powerpc/platforms/52xx/lite5200_pm.c
+++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
@@ -14,6 +14,7 @@ static struct mpc52xx_sdma __iomem *bes;
14static struct mpc52xx_xlb __iomem *xlb; 14static struct mpc52xx_xlb __iomem *xlb;
15static struct mpc52xx_gpio __iomem *gps; 15static struct mpc52xx_gpio __iomem *gps;
16static struct mpc52xx_gpio_wkup __iomem *gpw; 16static struct mpc52xx_gpio_wkup __iomem *gpw;
17static void __iomem *pci;
17static void __iomem *sram; 18static void __iomem *sram;
18static const int sram_size = 0x4000; /* 16 kBytes */ 19static const int sram_size = 0x4000; /* 16 kBytes */
19static void __iomem *mbar; 20static void __iomem *mbar;
@@ -50,6 +51,8 @@ static int lite5200_pm_prepare(void)
50 { .type = "builtin", .compatible = "mpc5200", }, /* efika */ 51 { .type = "builtin", .compatible = "mpc5200", }, /* efika */
51 {} 52 {}
52 }; 53 };
54 u64 regaddr64 = 0;
55 const u32 *regaddr_p;
53 56
54 /* deep sleep? let mpc52xx code handle that */ 57 /* deep sleep? let mpc52xx code handle that */
55 if (lite5200_pm_target_state == PM_SUSPEND_STANDBY) 58 if (lite5200_pm_target_state == PM_SUSPEND_STANDBY)
@@ -60,8 +63,12 @@ static int lite5200_pm_prepare(void)
60 63
61 /* map registers */ 64 /* map registers */
62 np = of_find_matching_node(NULL, immr_ids); 65 np = of_find_matching_node(NULL, immr_ids);
63 mbar = of_iomap(np, 0); 66 regaddr_p = of_get_address(np, 0, NULL, NULL);
67 if (regaddr_p)
68 regaddr64 = of_translate_address(np, regaddr_p);
64 of_node_put(np); 69 of_node_put(np);
70
71 mbar = ioremap((u32) regaddr64, 0xC000);
65 if (!mbar) { 72 if (!mbar) {
66 printk(KERN_ERR "%s:%i Error mapping registers\n", __func__, __LINE__); 73 printk(KERN_ERR "%s:%i Error mapping registers\n", __func__, __LINE__);
67 return -ENOSYS; 74 return -ENOSYS;
@@ -71,6 +78,7 @@ static int lite5200_pm_prepare(void)
71 pic = mbar + 0x500; 78 pic = mbar + 0x500;
72 gps = mbar + 0xb00; 79 gps = mbar + 0xb00;
73 gpw = mbar + 0xc00; 80 gpw = mbar + 0xc00;
81 pci = mbar + 0xd00;
74 bes = mbar + 0x1200; 82 bes = mbar + 0x1200;
75 xlb = mbar + 0x1f00; 83 xlb = mbar + 0x1f00;
76 sram = mbar + 0x8000; 84 sram = mbar + 0x8000;
@@ -85,6 +93,7 @@ static struct mpc52xx_sdma sbes;
85static struct mpc52xx_xlb sxlb; 93static struct mpc52xx_xlb sxlb;
86static struct mpc52xx_gpio sgps; 94static struct mpc52xx_gpio sgps;
87static struct mpc52xx_gpio_wkup sgpw; 95static struct mpc52xx_gpio_wkup sgpw;
96static char spci[0x200];
88 97
89static void lite5200_save_regs(void) 98static void lite5200_save_regs(void)
90{ 99{
@@ -94,6 +103,7 @@ static void lite5200_save_regs(void)
94 _memcpy_fromio(&sxlb, xlb, sizeof(*xlb)); 103 _memcpy_fromio(&sxlb, xlb, sizeof(*xlb));
95 _memcpy_fromio(&sgps, gps, sizeof(*gps)); 104 _memcpy_fromio(&sgps, gps, sizeof(*gps));
96 _memcpy_fromio(&sgpw, gpw, sizeof(*gpw)); 105 _memcpy_fromio(&sgpw, gpw, sizeof(*gpw));
106 _memcpy_fromio(spci, pci, 0x200);
97 107
98 _memcpy_fromio(saved_sram, sram, sram_size); 108 _memcpy_fromio(saved_sram, sram, sram_size);
99} 109}
@@ -103,6 +113,8 @@ static void lite5200_restore_regs(void)
103 int i; 113 int i;
104 _memcpy_toio(sram, saved_sram, sram_size); 114 _memcpy_toio(sram, saved_sram, sram_size);
105 115
116 /* PCI Configuration */
117 _memcpy_toio(pci, spci, 0x200);
106 118
107 /* 119 /*
108 * GPIOs. Interrupt Master Enable has higher address then other 120 * GPIOs. Interrupt Master Enable has higher address then other
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 7ff29d53dc2..ecbe580c3f3 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -34,6 +34,7 @@ config MPC85xx_MDS
34 bool "Freescale MPC85xx MDS" 34 bool "Freescale MPC85xx MDS"
35 select DEFAULT_UIMAGE 35 select DEFAULT_UIMAGE
36 select QUICC_ENGINE 36 select QUICC_ENGINE
37 select PHYLIB
37 help 38 help
38 This option enables support for the MPC85xx MDS board 39 This option enables support for the MPC85xx MDS board
39 40
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 70c660121ec..78f905bc6a4 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -219,15 +219,25 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
219extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX 219extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
220static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) 220static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
221{ 221{
222 int ret;
223
222 pr_debug("%s, %lx, %lx\n", __func__, dsisr, ea); 224 pr_debug("%s, %lx, %lx\n", __func__, dsisr, ea);
223 225
224 /* Handle kernel space hash faults immediately. 226 /*
225 User hash faults need to be deferred to process context. */ 227 * Handle kernel space hash faults immediately. User hash
226 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) 228 * faults need to be deferred to process context.
227 && REGION_ID(ea) != USER_REGION_ID 229 */
228 && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) { 230 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) &&
229 spu_restart_dma(spu); 231 (REGION_ID(ea) != USER_REGION_ID)) {
230 return 0; 232
233 spin_unlock(&spu->register_lock);
234 ret = hash_page(ea, _PAGE_PRESENT, 0x300);
235 spin_lock(&spu->register_lock);
236
237 if (!ret) {
238 spu_restart_dma(spu);
239 return 0;
240 }
231 } 241 }
232 242
233 spu->class_1_dar = ea; 243 spu->class_1_dar = ea;
@@ -324,17 +334,13 @@ spu_irq_class_0(int irq, void *data)
324 stat = spu_int_stat_get(spu, 0) & mask; 334 stat = spu_int_stat_get(spu, 0) & mask;
325 335
326 spu->class_0_pending |= stat; 336 spu->class_0_pending |= stat;
327 spu->class_0_dsisr = spu_mfc_dsisr_get(spu);
328 spu->class_0_dar = spu_mfc_dar_get(spu); 337 spu->class_0_dar = spu_mfc_dar_get(spu);
329 spin_unlock(&spu->register_lock);
330
331 spu->stop_callback(spu, 0); 338 spu->stop_callback(spu, 0);
332
333 spu->class_0_pending = 0; 339 spu->class_0_pending = 0;
334 spu->class_0_dsisr = 0;
335 spu->class_0_dar = 0; 340 spu->class_0_dar = 0;
336 341
337 spu_int_stat_clear(spu, 0, stat); 342 spu_int_stat_clear(spu, 0, stat);
343 spin_unlock(&spu->register_lock);
338 344
339 return IRQ_HANDLED; 345 return IRQ_HANDLED;
340} 346}
@@ -357,13 +363,12 @@ spu_irq_class_1(int irq, void *data)
357 spu_mfc_dsisr_set(spu, 0ul); 363 spu_mfc_dsisr_set(spu, 0ul);
358 spu_int_stat_clear(spu, 1, stat); 364 spu_int_stat_clear(spu, 1, stat);
359 365
360 if (stat & CLASS1_SEGMENT_FAULT_INTR)
361 __spu_trap_data_seg(spu, dar);
362
363 spin_unlock(&spu->register_lock);
364 pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat, 366 pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat,
365 dar, dsisr); 367 dar, dsisr);
366 368
369 if (stat & CLASS1_SEGMENT_FAULT_INTR)
370 __spu_trap_data_seg(spu, dar);
371
367 if (stat & CLASS1_STORAGE_FAULT_INTR) 372 if (stat & CLASS1_STORAGE_FAULT_INTR)
368 __spu_trap_data_map(spu, dar, dsisr); 373 __spu_trap_data_map(spu, dar, dsisr);
369 374
@@ -376,6 +381,8 @@ spu_irq_class_1(int irq, void *data)
376 spu->class_1_dsisr = 0; 381 spu->class_1_dsisr = 0;
377 spu->class_1_dar = 0; 382 spu->class_1_dar = 0;
378 383
384 spin_unlock(&spu->register_lock);
385
379 return stat ? IRQ_HANDLED : IRQ_NONE; 386 return stat ? IRQ_HANDLED : IRQ_NONE;
380} 387}
381 388
@@ -394,14 +401,12 @@ spu_irq_class_2(int irq, void *data)
394 mask = spu_int_mask_get(spu, 2); 401 mask = spu_int_mask_get(spu, 2);
395 /* ignore interrupts we're not waiting for */ 402 /* ignore interrupts we're not waiting for */
396 stat &= mask; 403 stat &= mask;
397
398 /* mailbox interrupts are level triggered. mask them now before 404 /* mailbox interrupts are level triggered. mask them now before
399 * acknowledging */ 405 * acknowledging */
400 if (stat & mailbox_intrs) 406 if (stat & mailbox_intrs)
401 spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs)); 407 spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs));
402 /* acknowledge all interrupts before the callbacks */ 408 /* acknowledge all interrupts before the callbacks */
403 spu_int_stat_clear(spu, 2, stat); 409 spu_int_stat_clear(spu, 2, stat);
404 spin_unlock(&spu->register_lock);
405 410
406 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); 411 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
407 412
@@ -421,6 +426,9 @@ spu_irq_class_2(int irq, void *data)
421 spu->wbox_callback(spu); 426 spu->wbox_callback(spu);
422 427
423 spu->stats.class2_intr++; 428 spu->stats.class2_intr++;
429
430 spin_unlock(&spu->register_lock);
431
424 return stat ? IRQ_HANDLED : IRQ_NONE; 432 return stat ? IRQ_HANDLED : IRQ_NONE;
425} 433}
426 434
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index b7493b86581..f7edba6cb79 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -27,7 +27,6 @@ void spufs_stop_callback(struct spu *spu, int irq)
27 switch(irq) { 27 switch(irq) {
28 case 0 : 28 case 0 :
29 ctx->csa.class_0_pending = spu->class_0_pending; 29 ctx->csa.class_0_pending = spu->class_0_pending;
30 ctx->csa.class_0_dsisr = spu->class_0_dsisr;
31 ctx->csa.class_0_dar = spu->class_0_dar; 30 ctx->csa.class_0_dar = spu->class_0_dar;
32 break; 31 break;
33 case 1 : 32 case 1 :
@@ -51,18 +50,22 @@ int spu_stopped(struct spu_context *ctx, u32 *stat)
51 u64 dsisr; 50 u64 dsisr;
52 u32 stopped; 51 u32 stopped;
53 52
54 *stat = ctx->ops->status_read(ctx);
55
56 if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
57 return 1;
58
59 stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP | 53 stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
60 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP; 54 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
61 if (!(*stat & SPU_STATUS_RUNNING) && (*stat & stopped)) 55
56top:
57 *stat = ctx->ops->status_read(ctx);
58 if (*stat & stopped) {
59 /*
60 * If the spu hasn't finished stopping, we need to
61 * re-read the register to get the stopped value.
62 */
63 if (*stat & SPU_STATUS_RUNNING)
64 goto top;
62 return 1; 65 return 1;
66 }
63 67
64 dsisr = ctx->csa.class_0_dsisr; 68 if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
65 if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
66 return 1; 69 return 1;
67 70
68 dsisr = ctx->csa.class_1_dsisr; 71 dsisr = ctx->csa.class_1_dsisr;
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 745dd51ec37..e929e70a84e 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -230,19 +230,23 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
230 ctx->stats.slb_flt_base = spu->stats.slb_flt; 230 ctx->stats.slb_flt_base = spu->stats.slb_flt;
231 ctx->stats.class2_intr_base = spu->stats.class2_intr; 231 ctx->stats.class2_intr_base = spu->stats.class2_intr;
232 232
233 spu_associate_mm(spu, ctx->owner);
234
235 spin_lock_irq(&spu->register_lock);
233 spu->ctx = ctx; 236 spu->ctx = ctx;
234 spu->flags = 0; 237 spu->flags = 0;
235 ctx->spu = spu; 238 ctx->spu = spu;
236 ctx->ops = &spu_hw_ops; 239 ctx->ops = &spu_hw_ops;
237 spu->pid = current->pid; 240 spu->pid = current->pid;
238 spu->tgid = current->tgid; 241 spu->tgid = current->tgid;
239 spu_associate_mm(spu, ctx->owner);
240 spu->ibox_callback = spufs_ibox_callback; 242 spu->ibox_callback = spufs_ibox_callback;
241 spu->wbox_callback = spufs_wbox_callback; 243 spu->wbox_callback = spufs_wbox_callback;
242 spu->stop_callback = spufs_stop_callback; 244 spu->stop_callback = spufs_stop_callback;
243 spu->mfc_callback = spufs_mfc_callback; 245 spu->mfc_callback = spufs_mfc_callback;
244 mb(); 246 spin_unlock_irq(&spu->register_lock);
247
245 spu_unmap_mappings(ctx); 248 spu_unmap_mappings(ctx);
249
246 spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0); 250 spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
247 spu_restore(&ctx->csa, spu); 251 spu_restore(&ctx->csa, spu);
248 spu->timestamp = jiffies; 252 spu->timestamp = jiffies;
@@ -403,6 +407,8 @@ static int has_affinity(struct spu_context *ctx)
403 */ 407 */
404static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) 408static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
405{ 409{
410 u32 status;
411
406 spu_context_trace(spu_unbind_context__enter, ctx, spu); 412 spu_context_trace(spu_unbind_context__enter, ctx, spu);
407 413
408 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); 414 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
@@ -423,18 +429,22 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
423 spu_unmap_mappings(ctx); 429 spu_unmap_mappings(ctx);
424 spu_save(&ctx->csa, spu); 430 spu_save(&ctx->csa, spu);
425 spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0); 431 spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
432
433 spin_lock_irq(&spu->register_lock);
426 spu->timestamp = jiffies; 434 spu->timestamp = jiffies;
427 ctx->state = SPU_STATE_SAVED; 435 ctx->state = SPU_STATE_SAVED;
428 spu->ibox_callback = NULL; 436 spu->ibox_callback = NULL;
429 spu->wbox_callback = NULL; 437 spu->wbox_callback = NULL;
430 spu->stop_callback = NULL; 438 spu->stop_callback = NULL;
431 spu->mfc_callback = NULL; 439 spu->mfc_callback = NULL;
432 spu_associate_mm(spu, NULL);
433 spu->pid = 0; 440 spu->pid = 0;
434 spu->tgid = 0; 441 spu->tgid = 0;
435 ctx->ops = &spu_backing_ops; 442 ctx->ops = &spu_backing_ops;
436 spu->flags = 0; 443 spu->flags = 0;
437 spu->ctx = NULL; 444 spu->ctx = NULL;
445 spin_unlock_irq(&spu->register_lock);
446
447 spu_associate_mm(spu, NULL);
438 448
439 ctx->stats.slb_flt += 449 ctx->stats.slb_flt +=
440 (spu->stats.slb_flt - ctx->stats.slb_flt_base); 450 (spu->stats.slb_flt - ctx->stats.slb_flt_base);
@@ -444,6 +454,9 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
444 /* This maps the underlying spu state to idle */ 454 /* This maps the underlying spu state to idle */
445 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); 455 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
446 ctx->spu = NULL; 456 ctx->spu = NULL;
457
458 if (spu_stopped(ctx, &status))
459 wake_up_all(&ctx->stop_wq);
447} 460}
448 461
449/** 462/**
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index 68ea5eee39a..8c1ca477c52 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -42,17 +42,20 @@ static inline const char * pcid_name (struct pci_dev *pdev)
42} 42}
43 43
44#ifdef DEBUG 44#ifdef DEBUG
45static void print_device_node_tree (struct pci_dn *pdn, int dent) 45static void print_device_node_tree(struct pci_dn *pdn, int dent)
46{ 46{
47 int i; 47 int i;
48 if (!pdn) return; 48 struct device_node *pc;
49 for (i=0;i<dent; i++) 49
50 if (!pdn)
51 return;
52 for (i = 0; i < dent; i++)
50 printk(" "); 53 printk(" ");
51 printk("dn=%s mode=%x \tcfg_addr=%x pe_addr=%x \tfull=%s\n", 54 printk("dn=%s mode=%x \tcfg_addr=%x pe_addr=%x \tfull=%s\n",
52 pdn->node->name, pdn->eeh_mode, pdn->eeh_config_addr, 55 pdn->node->name, pdn->eeh_mode, pdn->eeh_config_addr,
53 pdn->eeh_pe_config_addr, pdn->node->full_name); 56 pdn->eeh_pe_config_addr, pdn->node->full_name);
54 dent += 3; 57 dent += 3;
55 struct device_node *pc = pdn->node->child; 58 pc = pdn->node->child;
56 while (pc) { 59 while (pc) {
57 print_device_node_tree(PCI_DN(pc), dent); 60 print_device_node_tree(PCI_DN(pc), dent);
58 pc = pc->sibling; 61 pc = pc->sibling;
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index f68903e15bd..42f7e384e6c 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -131,8 +131,10 @@ int __init pSeries_nvram_init(void)
131 return -ENODEV; 131 return -ENODEV;
132 132
133 nbytes_p = of_get_property(nvram, "#bytes", &proplen); 133 nbytes_p = of_get_property(nvram, "#bytes", &proplen);
134 if (nbytes_p == NULL || proplen != sizeof(unsigned int)) 134 if (nbytes_p == NULL || proplen != sizeof(unsigned int)) {
135 of_node_put(nvram);
135 return -EIO; 136 return -EIO;
137 }
136 138
137 nvram_size = *nbytes_p; 139 nvram_size = *nbytes_p;
138 140