aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/iseries
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-06-29 14:32:34 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-29 14:32:34 -0400
commit3aa590c6b7c89d844f81c2e96f295cf2c6967773 (patch)
tree6f18b295b1ff4cd7fd1880db6f56721599d64439 /arch/powerpc/platforms/iseries
parent4d3ce21fa9d2eaeda113aa2f9c2da80d972bef64 (diff)
parent339d76c54336443f5050b00172beb675f35e3be0 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
* git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: (43 commits) [POWERPC] Use little-endian bit from firmware ibm,pa-features property [POWERPC] Make sure smp_processor_id works very early in boot [POWERPC] U4 DART improvements [POWERPC] todc: add support for Time-Of-Day-Clock [POWERPC] Make lparcfg.c work when both iseries and pseries are selected [POWERPC] Fix idr locking in init_new_context [POWERPC] mpc7448hpc2 (taiga) board config file [POWERPC] Add tsi108 pci and platform device data register function [POWERPC] Add general support for mpc7448hpc2 (Taiga) platform [POWERPC] Correct the MAX_CONTEXT definition powerpc: minor cleanups for mpc86xx [POWERPC] Make sure we select CONFIG_NEW_LEDS if ADB_PMU_LED is set [POWERPC] Simplify the code defining the 64-bit CPU features [POWERPC] powerpc: kconfig warning fix [POWERPC] Consolidate some of kernel/misc*.S [POWERPC] Remove unused function call_with_mmu_off [POWERPC] update asm-powerpc/time.h [POWERPC] Clean up it_lp_queue.h [POWERPC] Skip the "copy down" of the kernel if it is already at zero. [POWERPC] Add the use of the firmware soft-reset-nmi to kdump. ...
Diffstat (limited to 'arch/powerpc/platforms/iseries')
-rw-r--r--arch/powerpc/platforms/iseries/dt.c2
-rw-r--r--arch/powerpc/platforms/iseries/htab.c4
-rw-r--r--arch/powerpc/platforms/iseries/lpevents.c55
-rw-r--r--arch/powerpc/platforms/iseries/proc.c1
-rw-r--r--arch/powerpc/platforms/iseries/setup.c19
5 files changed, 33 insertions, 48 deletions
diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c
index d3444aabe76e..d194140c1ebf 100644
--- a/arch/powerpc/platforms/iseries/dt.c
+++ b/arch/powerpc/platforms/iseries/dt.c
@@ -252,6 +252,7 @@ static void __init dt_model(struct iseries_flat_dt *dt)
252{ 252{
253 char buf[16] = "IBM,"; 253 char buf[16] = "IBM,";
254 254
255 /* N.B. lparcfg.c knows about the "IBM," prefixes ... */
255 /* "IBM," + mfgId[2:3] + systemSerial[1:5] */ 256 /* "IBM," + mfgId[2:3] + systemSerial[1:5] */
256 strne2a(buf + 4, xItExtVpdPanel.mfgID + 2, 2); 257 strne2a(buf + 4, xItExtVpdPanel.mfgID + 2, 2);
257 strne2a(buf + 6, xItExtVpdPanel.systemSerial + 1, 5); 258 strne2a(buf + 6, xItExtVpdPanel.systemSerial + 1, 5);
@@ -264,6 +265,7 @@ static void __init dt_model(struct iseries_flat_dt *dt)
264 dt_prop_str(dt, "model", buf); 265 dt_prop_str(dt, "model", buf);
265 266
266 dt_prop_str(dt, "compatible", "IBM,iSeries"); 267 dt_prop_str(dt, "compatible", "IBM,iSeries");
268 dt_prop_u32(dt, "ibm,partition-no", HvLpConfig_getLpIndex());
267} 269}
268 270
269static void __init dt_do_vdevice(struct iseries_flat_dt *dt, 271static void __init dt_do_vdevice(struct iseries_flat_dt *dt,
diff --git a/arch/powerpc/platforms/iseries/htab.c b/arch/powerpc/platforms/iseries/htab.c
index 30bdcf3925d9..ed44dfceaa45 100644
--- a/arch/powerpc/platforms/iseries/htab.c
+++ b/arch/powerpc/platforms/iseries/htab.c
@@ -242,13 +242,11 @@ static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
242 local_irq_restore(flags); 242 local_irq_restore(flags);
243} 243}
244 244
245void hpte_init_iSeries(void) 245void __init hpte_init_iSeries(void)
246{ 246{
247 ppc_md.hpte_invalidate = iSeries_hpte_invalidate; 247 ppc_md.hpte_invalidate = iSeries_hpte_invalidate;
248 ppc_md.hpte_updatepp = iSeries_hpte_updatepp; 248 ppc_md.hpte_updatepp = iSeries_hpte_updatepp;
249 ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp; 249 ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp;
250 ppc_md.hpte_insert = iSeries_hpte_insert; 250 ppc_md.hpte_insert = iSeries_hpte_insert;
251 ppc_md.hpte_remove = iSeries_hpte_remove; 251 ppc_md.hpte_remove = iSeries_hpte_remove;
252
253 htab_finish_init();
254} 252}
diff --git a/arch/powerpc/platforms/iseries/lpevents.c b/arch/powerpc/platforms/iseries/lpevents.c
index 8ca7b9396355..2a9f81ea27d6 100644
--- a/arch/powerpc/platforms/iseries/lpevents.c
+++ b/arch/powerpc/platforms/iseries/lpevents.c
@@ -51,20 +51,21 @@ static unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
51static struct HvLpEvent * get_next_hvlpevent(void) 51static struct HvLpEvent * get_next_hvlpevent(void)
52{ 52{
53 struct HvLpEvent * event; 53 struct HvLpEvent * event;
54 event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr; 54 event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event;
55 55
56 if (hvlpevent_is_valid(event)) { 56 if (hvlpevent_is_valid(event)) {
57 /* rmb() needed only for weakly consistent machines (regatta) */ 57 /* rmb() needed only for weakly consistent machines (regatta) */
58 rmb(); 58 rmb();
59 /* Set pointer to next potential event */ 59 /* Set pointer to next potential event */
60 hvlpevent_queue.xSlicCurEventPtr += ((event->xSizeMinus1 + 60 hvlpevent_queue.hq_current_event += ((event->xSizeMinus1 +
61 LpEventAlign) / LpEventAlign) * LpEventAlign; 61 IT_LP_EVENT_ALIGN) / IT_LP_EVENT_ALIGN) *
62 IT_LP_EVENT_ALIGN;
62 63
63 /* Wrap to beginning if no room at end */ 64 /* Wrap to beginning if no room at end */
64 if (hvlpevent_queue.xSlicCurEventPtr > 65 if (hvlpevent_queue.hq_current_event >
65 hvlpevent_queue.xSlicLastValidEventPtr) { 66 hvlpevent_queue.hq_last_event) {
66 hvlpevent_queue.xSlicCurEventPtr = 67 hvlpevent_queue.hq_current_event =
67 hvlpevent_queue.xSlicEventStackPtr; 68 hvlpevent_queue.hq_event_stack;
68 } 69 }
69 } else { 70 } else {
70 event = NULL; 71 event = NULL;
@@ -82,10 +83,10 @@ int hvlpevent_is_pending(void)
82 if (smp_processor_id() >= spread_lpevents) 83 if (smp_processor_id() >= spread_lpevents)
83 return 0; 84 return 0;
84 85
85 next_event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr; 86 next_event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event;
86 87
87 return hvlpevent_is_valid(next_event) || 88 return hvlpevent_is_valid(next_event) ||
88 hvlpevent_queue.xPlicOverflowIntPending; 89 hvlpevent_queue.hq_overflow_pending;
89} 90}
90 91
91static void hvlpevent_clear_valid(struct HvLpEvent * event) 92static void hvlpevent_clear_valid(struct HvLpEvent * event)
@@ -95,18 +96,18 @@ static void hvlpevent_clear_valid(struct HvLpEvent * event)
95 * ie. on 64-byte boundaries. 96 * ie. on 64-byte boundaries.
96 */ 97 */
97 struct HvLpEvent *tmp; 98 struct HvLpEvent *tmp;
98 unsigned extra = ((event->xSizeMinus1 + LpEventAlign) / 99 unsigned extra = ((event->xSizeMinus1 + IT_LP_EVENT_ALIGN) /
99 LpEventAlign) - 1; 100 IT_LP_EVENT_ALIGN) - 1;
100 101
101 switch (extra) { 102 switch (extra) {
102 case 3: 103 case 3:
103 tmp = (struct HvLpEvent*)((char*)event + 3 * LpEventAlign); 104 tmp = (struct HvLpEvent*)((char*)event + 3 * IT_LP_EVENT_ALIGN);
104 hvlpevent_invalidate(tmp); 105 hvlpevent_invalidate(tmp);
105 case 2: 106 case 2:
106 tmp = (struct HvLpEvent*)((char*)event + 2 * LpEventAlign); 107 tmp = (struct HvLpEvent*)((char*)event + 2 * IT_LP_EVENT_ALIGN);
107 hvlpevent_invalidate(tmp); 108 hvlpevent_invalidate(tmp);
108 case 1: 109 case 1:
109 tmp = (struct HvLpEvent*)((char*)event + 1 * LpEventAlign); 110 tmp = (struct HvLpEvent*)((char*)event + 1 * IT_LP_EVENT_ALIGN);
110 hvlpevent_invalidate(tmp); 111 hvlpevent_invalidate(tmp);
111 } 112 }
112 113
@@ -120,7 +121,7 @@ void process_hvlpevents(struct pt_regs *regs)
120 struct HvLpEvent * event; 121 struct HvLpEvent * event;
121 122
122 /* If we have recursed, just return */ 123 /* If we have recursed, just return */
123 if (!spin_trylock(&hvlpevent_queue.lock)) 124 if (!spin_trylock(&hvlpevent_queue.hq_lock))
124 return; 125 return;
125 126
126 for (;;) { 127 for (;;) {
@@ -148,17 +149,17 @@ void process_hvlpevents(struct pt_regs *regs)
148 printk(KERN_INFO "Unexpected Lp Event type=%d\n", event->xType ); 149 printk(KERN_INFO "Unexpected Lp Event type=%d\n", event->xType );
149 150
150 hvlpevent_clear_valid(event); 151 hvlpevent_clear_valid(event);
151 } else if (hvlpevent_queue.xPlicOverflowIntPending) 152 } else if (hvlpevent_queue.hq_overflow_pending)
152 /* 153 /*
153 * No more valid events. If overflow events are 154 * No more valid events. If overflow events are
154 * pending process them 155 * pending process them
155 */ 156 */
156 HvCallEvent_getOverflowLpEvents(hvlpevent_queue.xIndex); 157 HvCallEvent_getOverflowLpEvents(hvlpevent_queue.hq_index);
157 else 158 else
158 break; 159 break;
159 } 160 }
160 161
161 spin_unlock(&hvlpevent_queue.lock); 162 spin_unlock(&hvlpevent_queue.hq_lock);
162} 163}
163 164
164static int set_spread_lpevents(char *str) 165static int set_spread_lpevents(char *str)
@@ -184,20 +185,20 @@ void setup_hvlpevent_queue(void)
184{ 185{
185 void *eventStack; 186 void *eventStack;
186 187
187 spin_lock_init(&hvlpevent_queue.lock); 188 spin_lock_init(&hvlpevent_queue.hq_lock);
188 189
189 /* Allocate a page for the Event Stack. */ 190 /* Allocate a page for the Event Stack. */
190 eventStack = alloc_bootmem_pages(LpEventStackSize); 191 eventStack = alloc_bootmem_pages(IT_LP_EVENT_STACK_SIZE);
191 memset(eventStack, 0, LpEventStackSize); 192 memset(eventStack, 0, IT_LP_EVENT_STACK_SIZE);
192 193
193 /* Invoke the hypervisor to initialize the event stack */ 194 /* Invoke the hypervisor to initialize the event stack */
194 HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize); 195 HvCallEvent_setLpEventStack(0, eventStack, IT_LP_EVENT_STACK_SIZE);
195 196
196 hvlpevent_queue.xSlicEventStackPtr = (char *)eventStack; 197 hvlpevent_queue.hq_event_stack = eventStack;
197 hvlpevent_queue.xSlicCurEventPtr = (char *)eventStack; 198 hvlpevent_queue.hq_current_event = eventStack;
198 hvlpevent_queue.xSlicLastValidEventPtr = (char *)eventStack + 199 hvlpevent_queue.hq_last_event = (char *)eventStack +
199 (LpEventStackSize - LpEventMaxSize); 200 (IT_LP_EVENT_STACK_SIZE - IT_LP_EVENT_MAX_SIZE);
200 hvlpevent_queue.xIndex = 0; 201 hvlpevent_queue.hq_index = 0;
201} 202}
202 203
203/* Register a handler for an LpEvent type */ 204/* Register a handler for an LpEvent type */
diff --git a/arch/powerpc/platforms/iseries/proc.c b/arch/powerpc/platforms/iseries/proc.c
index e68b6b5fa89f..c241413629ac 100644
--- a/arch/powerpc/platforms/iseries/proc.c
+++ b/arch/powerpc/platforms/iseries/proc.c
@@ -24,7 +24,6 @@
24#include <asm/processor.h> 24#include <asm/processor.h>
25#include <asm/time.h> 25#include <asm/time.h>
26#include <asm/lppaca.h> 26#include <asm/lppaca.h>
27#include <asm/iseries/it_lp_queue.h>
28#include <asm/iseries/hv_call_xm.h> 27#include <asm/iseries/hv_call_xm.h>
29 28
30#include "processor_vpd.h" 29#include "processor_vpd.h"
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index 617c724c4590..66c77e4f8ec2 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -81,8 +81,6 @@ static void iSeries_pci_final_fixup(void) { }
81#endif 81#endif
82 82
83extern int rd_size; /* Defined in drivers/block/rd.c */ 83extern int rd_size; /* Defined in drivers/block/rd.c */
84extern unsigned long embedded_sysmap_start;
85extern unsigned long embedded_sysmap_end;
86 84
87extern unsigned long iSeries_recal_tb; 85extern unsigned long iSeries_recal_tb;
88extern unsigned long iSeries_recal_titan; 86extern unsigned long iSeries_recal_titan;
@@ -321,11 +319,6 @@ static void __init iSeries_init_early(void)
321 iSeries_recal_titan = HvCallXm_loadTod(); 319 iSeries_recal_titan = HvCallXm_loadTod();
322 320
323 /* 321 /*
324 * Initialize the hash table management pointers
325 */
326 hpte_init_iSeries();
327
328 /*
329 * Initialize the DMA/TCE management 322 * Initialize the DMA/TCE management
330 */ 323 */
331 iommu_init_early_iSeries(); 324 iommu_init_early_iSeries();
@@ -563,16 +556,6 @@ static void __init iSeries_fixup_klimit(void)
563 if (naca.xRamDisk) 556 if (naca.xRamDisk)
564 klimit = KERNELBASE + (u64)naca.xRamDisk + 557 klimit = KERNELBASE + (u64)naca.xRamDisk +
565 (naca.xRamDiskSize * HW_PAGE_SIZE); 558 (naca.xRamDiskSize * HW_PAGE_SIZE);
566 else {
567 /*
568 * No ram disk was included - check and see if there
569 * was an embedded system map. Change klimit to take
570 * into account any embedded system map
571 */
572 if (embedded_sysmap_end)
573 klimit = KERNELBASE + ((embedded_sysmap_end + 4095) &
574 0xfffffffffffff000);
575 }
576} 559}
577 560
578static int __init iSeries_src_init(void) 561static int __init iSeries_src_init(void)
@@ -683,6 +666,8 @@ static int __init iseries_probe(void)
683 */ 666 */
684 virt_irq_max = 255; 667 virt_irq_max = 255;
685 668
669 hpte_init_iSeries();
670
686 return 1; 671 return 1;
687} 672}
688 673