aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc64/kernel')
-rw-r--r--arch/ppc64/kernel/ItLpQueue.c294
-rw-r--r--arch/ppc64/kernel/LparData.c11
-rw-r--r--arch/ppc64/kernel/cputable.c365
-rw-r--r--arch/ppc64/kernel/head.S10
-rw-r--r--arch/ppc64/kernel/hvconsole.c51
-rw-r--r--arch/ppc64/kernel/iSeries_proc.c48
-rw-r--r--arch/ppc64/kernel/iSeries_setup.c119
-rw-r--r--arch/ppc64/kernel/idle.c283
-rw-r--r--arch/ppc64/kernel/irq.c7
-rw-r--r--arch/ppc64/kernel/kprobes.c2
-rw-r--r--arch/ppc64/kernel/maple_setup.c3
-rw-r--r--arch/ppc64/kernel/mf.c6
-rw-r--r--arch/ppc64/kernel/misc.S6
-rw-r--r--arch/ppc64/kernel/nvram.c8
-rw-r--r--arch/ppc64/kernel/of_device.c15
-rw-r--r--arch/ppc64/kernel/pSeries_setup.c156
-rw-r--r--arch/ppc64/kernel/pacaData.c212
-rw-r--r--arch/ppc64/kernel/pmac_setup.c5
-rw-r--r--arch/ppc64/kernel/setup.c8
-rw-r--r--arch/ppc64/kernel/sys_ppc32.c54
-rw-r--r--arch/ppc64/kernel/sysfs.c21
-rw-r--r--arch/ppc64/kernel/time.c8
-rw-r--r--arch/ppc64/kernel/vdso32/vdso32.lds.S4
23 files changed, 803 insertions, 893 deletions
diff --git a/arch/ppc64/kernel/ItLpQueue.c b/arch/ppc64/kernel/ItLpQueue.c
index cdea00d7707f..4231861288a3 100644
--- a/arch/ppc64/kernel/ItLpQueue.c
+++ b/arch/ppc64/kernel/ItLpQueue.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * ItLpQueue.c 2 * ItLpQueue.c
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation 3 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or 7 * the Free Software Foundation; either version 2 of the License, or
@@ -11,156 +11,252 @@
11#include <linux/stddef.h> 11#include <linux/stddef.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/bootmem.h>
15#include <linux/seq_file.h>
16#include <linux/proc_fs.h>
14#include <asm/system.h> 17#include <asm/system.h>
15#include <asm/paca.h> 18#include <asm/paca.h>
16#include <asm/iSeries/ItLpQueue.h> 19#include <asm/iSeries/ItLpQueue.h>
17#include <asm/iSeries/HvLpEvent.h> 20#include <asm/iSeries/HvLpEvent.h>
18#include <asm/iSeries/HvCallEvent.h> 21#include <asm/iSeries/HvCallEvent.h>
19 22
20static __inline__ int set_inUse( struct ItLpQueue * lpQueue ) 23/*
21{ 24 * The LpQueue is used to pass event data from the hypervisor to
22 int t; 25 * the partition. This is where I/O interrupt events are communicated.
23 u32 * inUseP = &(lpQueue->xInUseWord); 26 *
24 27 * It is written to by the hypervisor so cannot end up in the BSS.
25 __asm__ __volatile__("\n\ 28 */
261: lwarx %0,0,%2 \n\ 29struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));
27 cmpwi 0,%0,0 \n\
28 li %0,0 \n\
29 bne- 2f \n\
30 addi %0,%0,1 \n\
31 stwcx. %0,0,%2 \n\
32 bne- 1b \n\
332: eieio"
34 : "=&r" (t), "=m" (lpQueue->xInUseWord)
35 : "r" (inUseP), "m" (lpQueue->xInUseWord)
36 : "cc");
37
38 return t;
39}
40 30
41static __inline__ void clear_inUse( struct ItLpQueue * lpQueue ) 31DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes], hvlpevent_counts);
42{ 32
43 lpQueue->xInUseWord = 0; 33static char *event_types[HvLpEvent_Type_NumTypes] = {
44} 34 "Hypervisor",
35 "Machine Facilities",
36 "Session Manager",
37 "SPD I/O",
38 "Virtual Bus",
39 "PCI I/O",
40 "RIO I/O",
41 "Virtual Lan",
42 "Virtual I/O"
43};
45 44
46/* Array of LpEvent handler functions */ 45/* Array of LpEvent handler functions */
47extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes]; 46extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
48unsigned long ItLpQueueInProcess = 0;
49 47
50struct HvLpEvent * ItLpQueue_getNextLpEvent( struct ItLpQueue * lpQueue ) 48static struct HvLpEvent * get_next_hvlpevent(void)
51{ 49{
52 struct HvLpEvent * nextLpEvent = 50 struct HvLpEvent * event;
53 (struct HvLpEvent *)lpQueue->xSlicCurEventPtr; 51 event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
54 if ( nextLpEvent->xFlags.xValid ) { 52
53 if (event->xFlags.xValid) {
55 /* rmb() needed only for weakly consistent machines (regatta) */ 54 /* rmb() needed only for weakly consistent machines (regatta) */
56 rmb(); 55 rmb();
57 /* Set pointer to next potential event */ 56 /* Set pointer to next potential event */
58 lpQueue->xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 + 57 hvlpevent_queue.xSlicCurEventPtr += ((event->xSizeMinus1 +
59 LpEventAlign ) / 58 LpEventAlign) / LpEventAlign) * LpEventAlign;
60 LpEventAlign ) * 59
61 LpEventAlign;
62 /* Wrap to beginning if no room at end */ 60 /* Wrap to beginning if no room at end */
63 if (lpQueue->xSlicCurEventPtr > lpQueue->xSlicLastValidEventPtr) 61 if (hvlpevent_queue.xSlicCurEventPtr >
64 lpQueue->xSlicCurEventPtr = lpQueue->xSlicEventStackPtr; 62 hvlpevent_queue.xSlicLastValidEventPtr) {
63 hvlpevent_queue.xSlicCurEventPtr =
64 hvlpevent_queue.xSlicEventStackPtr;
65 }
66 } else {
67 event = NULL;
65 } 68 }
66 else
67 nextLpEvent = NULL;
68 69
69 return nextLpEvent; 70 return event;
70} 71}
71 72
72int ItLpQueue_isLpIntPending( struct ItLpQueue * lpQueue ) 73static unsigned long spread_lpevents = NR_CPUS;
74
75int hvlpevent_is_pending(void)
73{ 76{
74 int retval = 0; 77 struct HvLpEvent *next_event;
75 struct HvLpEvent * nextLpEvent; 78
76 if ( lpQueue ) { 79 if (smp_processor_id() >= spread_lpevents)
77 nextLpEvent = (struct HvLpEvent *)lpQueue->xSlicCurEventPtr; 80 return 0;
78 retval = nextLpEvent->xFlags.xValid | lpQueue->xPlicOverflowIntPending; 81
79 } 82 next_event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
80 return retval; 83
84 return next_event->xFlags.xValid |
85 hvlpevent_queue.xPlicOverflowIntPending;
81} 86}
82 87
83void ItLpQueue_clearValid( struct HvLpEvent * event ) 88static void hvlpevent_clear_valid(struct HvLpEvent * event)
84{ 89{
85 /* Clear the valid bit of the event 90 /* Tell the Hypervisor that we're done with this event.
86 * Also clear bits within this event that might 91 * Also clear bits within this event that might look like valid bits.
87 * look like valid bits (on 64-byte boundaries) 92 * ie. on 64-byte boundaries.
88 */ 93 */
89 unsigned extra = (( event->xSizeMinus1 + LpEventAlign ) / 94 struct HvLpEvent *tmp;
90 LpEventAlign ) - 1; 95 unsigned extra = ((event->xSizeMinus1 + LpEventAlign) /
91 switch ( extra ) { 96 LpEventAlign) - 1;
92 case 3: 97
93 ((struct HvLpEvent*)((char*)event+3*LpEventAlign))->xFlags.xValid=0; 98 switch (extra) {
94 case 2: 99 case 3:
95 ((struct HvLpEvent*)((char*)event+2*LpEventAlign))->xFlags.xValid=0; 100 tmp = (struct HvLpEvent*)((char*)event + 3 * LpEventAlign);
96 case 1: 101 tmp->xFlags.xValid = 0;
97 ((struct HvLpEvent*)((char*)event+1*LpEventAlign))->xFlags.xValid=0; 102 case 2:
98 case 0: 103 tmp = (struct HvLpEvent*)((char*)event + 2 * LpEventAlign);
99 ; 104 tmp->xFlags.xValid = 0;
105 case 1:
106 tmp = (struct HvLpEvent*)((char*)event + 1 * LpEventAlign);
107 tmp->xFlags.xValid = 0;
100 } 108 }
109
101 mb(); 110 mb();
111
102 event->xFlags.xValid = 0; 112 event->xFlags.xValid = 0;
103} 113}
104 114
105unsigned ItLpQueue_process( struct ItLpQueue * lpQueue, struct pt_regs *regs ) 115void process_hvlpevents(struct pt_regs *regs)
106{ 116{
107 unsigned numIntsProcessed = 0; 117 struct HvLpEvent * event;
108 struct HvLpEvent * nextLpEvent;
109 118
110 /* If we have recursed, just return */ 119 /* If we have recursed, just return */
111 if ( !set_inUse( lpQueue ) ) 120 if (!spin_trylock(&hvlpevent_queue.lock))
112 return 0; 121 return;
113
114 if (ItLpQueueInProcess == 0)
115 ItLpQueueInProcess = 1;
116 else
117 BUG();
118 122
119 for (;;) { 123 for (;;) {
120 nextLpEvent = ItLpQueue_getNextLpEvent( lpQueue ); 124 event = get_next_hvlpevent();
121 if ( nextLpEvent ) { 125 if (event) {
122 /* Count events to return to caller 126 /* Call appropriate handler here, passing
123 * and count processed events in lpQueue
124 */
125 ++numIntsProcessed;
126 lpQueue->xLpIntCount++;
127 /* Call appropriate handler here, passing
128 * a pointer to the LpEvent. The handler 127 * a pointer to the LpEvent. The handler
129 * must make a copy of the LpEvent if it 128 * must make a copy of the LpEvent if it
130 * needs it in a bottom half. (perhaps for 129 * needs it in a bottom half. (perhaps for
131 * an ACK) 130 * an ACK)
132 * 131 *
133 * Handlers are responsible for ACK processing 132 * Handlers are responsible for ACK processing
134 * 133 *
135 * The Hypervisor guarantees that LpEvents will 134 * The Hypervisor guarantees that LpEvents will
136 * only be delivered with types that we have 135 * only be delivered with types that we have
137 * registered for, so no type check is necessary 136 * registered for, so no type check is necessary
138 * here! 137 * here!
139 */ 138 */
140 if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes ) 139 if (event->xType < HvLpEvent_Type_NumTypes)
141 lpQueue->xLpIntCountByType[nextLpEvent->xType]++; 140 __get_cpu_var(hvlpevent_counts)[event->xType]++;
142 if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes && 141 if (event->xType < HvLpEvent_Type_NumTypes &&
143 lpEventHandler[nextLpEvent->xType] ) 142 lpEventHandler[event->xType])
144 lpEventHandler[nextLpEvent->xType](nextLpEvent, regs); 143 lpEventHandler[event->xType](event, regs);
145 else 144 else
146 printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType ); 145 printk(KERN_INFO "Unexpected Lp Event type=%d\n", event->xType );
147 146
148 ItLpQueue_clearValid( nextLpEvent ); 147 hvlpevent_clear_valid(event);
149 } else if ( lpQueue->xPlicOverflowIntPending ) 148 } else if (hvlpevent_queue.xPlicOverflowIntPending)
150 /* 149 /*
151 * No more valid events. If overflow events are 150 * No more valid events. If overflow events are
152 * pending process them 151 * pending process them
153 */ 152 */
154 HvCallEvent_getOverflowLpEvents( lpQueue->xIndex); 153 HvCallEvent_getOverflowLpEvents(hvlpevent_queue.xIndex);
155 else 154 else
156 break; 155 break;
157 } 156 }
158 157
159 ItLpQueueInProcess = 0; 158 spin_unlock(&hvlpevent_queue.lock);
160 mb(); 159}
161 clear_inUse( lpQueue ); 160
161static int set_spread_lpevents(char *str)
162{
163 unsigned long val = simple_strtoul(str, NULL, 0);
164
165 /*
166 * The parameter is the number of processors to share in processing
167 * lp events.
168 */
169 if (( val > 0) && (val <= NR_CPUS)) {
170 spread_lpevents = val;
171 printk("lpevent processing spread over %ld processors\n", val);
172 } else {
173 printk("invalid spread_lpevents %ld\n", val);
174 }
162 175
163 get_paca()->lpevent_count += numIntsProcessed; 176 return 1;
177}
178__setup("spread_lpevents=", set_spread_lpevents);
179
180void setup_hvlpevent_queue(void)
181{
182 void *eventStack;
183
184 /*
185 * Allocate a page for the Event Stack. The Hypervisor needs the
186 * absolute real address, so we subtract out the KERNELBASE and add
187 * in the absolute real address of the kernel load area.
188 */
189 eventStack = alloc_bootmem_pages(LpEventStackSize);
190 memset(eventStack, 0, LpEventStackSize);
191
192 /* Invoke the hypervisor to initialize the event stack */
193 HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize);
194
195 hvlpevent_queue.xSlicEventStackPtr = (char *)eventStack;
196 hvlpevent_queue.xSlicCurEventPtr = (char *)eventStack;
197 hvlpevent_queue.xSlicLastValidEventPtr = (char *)eventStack +
198 (LpEventStackSize - LpEventMaxSize);
199 hvlpevent_queue.xIndex = 0;
200}
201
202static int proc_lpevents_show(struct seq_file *m, void *v)
203{
204 int cpu, i;
205 unsigned long sum;
206 static unsigned long cpu_totals[NR_CPUS];
207
208 /* FIXME: do we care that there's no locking here? */
209 sum = 0;
210 for_each_online_cpu(cpu) {
211 cpu_totals[cpu] = 0;
212 for (i = 0; i < HvLpEvent_Type_NumTypes; i++) {
213 cpu_totals[cpu] += per_cpu(hvlpevent_counts, cpu)[i];
214 }
215 sum += cpu_totals[cpu];
216 }
217
218 seq_printf(m, "LpEventQueue 0\n");
219 seq_printf(m, " events processed:\t%lu\n", sum);
220
221 for (i = 0; i < HvLpEvent_Type_NumTypes; ++i) {
222 sum = 0;
223 for_each_online_cpu(cpu) {
224 sum += per_cpu(hvlpevent_counts, cpu)[i];
225 }
226
227 seq_printf(m, " %-20s %10lu\n", event_types[i], sum);
228 }
229
230 seq_printf(m, "\n events processed by processor:\n");
231
232 for_each_online_cpu(cpu) {
233 seq_printf(m, " CPU%02d %10lu\n", cpu, cpu_totals[cpu]);
234 }
235
236 return 0;
237}
238
239static int proc_lpevents_open(struct inode *inode, struct file *file)
240{
241 return single_open(file, proc_lpevents_show, NULL);
242}
164 243
165 return numIntsProcessed; 244static struct file_operations proc_lpevents_operations = {
245 .open = proc_lpevents_open,
246 .read = seq_read,
247 .llseek = seq_lseek,
248 .release = single_release,
249};
250
251static int __init proc_lpevents_init(void)
252{
253 struct proc_dir_entry *e;
254
255 e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL);
256 if (e)
257 e->proc_fops = &proc_lpevents_operations;
258
259 return 0;
166} 260}
261__initcall(proc_lpevents_init);
262
diff --git a/arch/ppc64/kernel/LparData.c b/arch/ppc64/kernel/LparData.c
index badc5a443614..6ffcf67dd507 100644
--- a/arch/ppc64/kernel/LparData.c
+++ b/arch/ppc64/kernel/LparData.c
@@ -28,13 +28,6 @@
28#include <asm/iSeries/IoHriProcessorVpd.h> 28#include <asm/iSeries/IoHriProcessorVpd.h>
29#include <asm/iSeries/ItSpCommArea.h> 29#include <asm/iSeries/ItSpCommArea.h>
30 30
31/* The LpQueue is used to pass event data from the hypervisor to
32 * the partition. This is where I/O interrupt events are communicated.
33 */
34
35/* May be filled in by the hypervisor so cannot end up in the BSS */
36struct ItLpQueue xItLpQueue __attribute__((__section__(".data")));
37
38 31
39/* The HvReleaseData is the root of the information shared between 32/* The HvReleaseData is the root of the information shared between
40 * the hypervisor and Linux. 33 * the hypervisor and Linux.
@@ -200,7 +193,7 @@ struct ItVpdAreas itVpdAreas = {
200 0,0,0, /* 13 - 15 */ 193 0,0,0, /* 13 - 15 */
201 sizeof(struct IoHriProcessorVpd),/* 16 length of Proc Vpd */ 194 sizeof(struct IoHriProcessorVpd),/* 16 length of Proc Vpd */
202 0,0,0,0,0,0, /* 17 - 22 */ 195 0,0,0,0,0,0, /* 17 - 22 */
203 sizeof(struct ItLpQueue),/* 23 length of Lp Queue */ 196 sizeof(struct hvlpevent_queue), /* 23 length of Lp Queue */
204 0,0 /* 24 - 25 */ 197 0,0 /* 24 - 25 */
205 }, 198 },
206 .xSlicVpdAdrs = { /* VPD addresses */ 199 .xSlicVpdAdrs = { /* VPD addresses */
@@ -218,7 +211,7 @@ struct ItVpdAreas itVpdAreas = {
218 0,0,0, /* 13 - 15 */ 211 0,0,0, /* 13 - 15 */
219 &xIoHriProcessorVpd, /* 16 Proc Vpd */ 212 &xIoHriProcessorVpd, /* 16 Proc Vpd */
220 0,0,0,0,0,0, /* 17 - 22 */ 213 0,0,0,0,0,0, /* 17 - 22 */
221 &xItLpQueue, /* 23 Lp Queue */ 214 &hvlpevent_queue, /* 23 Lp Queue */
222 0,0 215 0,0
223 } 216 }
224}; 217};
diff --git a/arch/ppc64/kernel/cputable.c b/arch/ppc64/kernel/cputable.c
index 1d162c7c59df..8d4c46f6f0b6 100644
--- a/arch/ppc64/kernel/cputable.c
+++ b/arch/ppc64/kernel/cputable.c
@@ -49,160 +49,219 @@ extern void __setup_cpu_be(unsigned long offset, struct cpu_spec* spec);
49#endif 49#endif
50 50
51struct cpu_spec cpu_specs[] = { 51struct cpu_spec cpu_specs[] = {
52 { /* Power3 */ 52 { /* Power3 */
53 0xffff0000, 0x00400000, "POWER3 (630)", 53 .pvr_mask = 0xffff0000,
54 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | 54 .pvr_value = 0x00400000,
55 CPU_FTR_IABR | CPU_FTR_PMC8, 55 .cpu_name = "POWER3 (630)",
56 COMMON_USER_PPC64, 56 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
57 128, 128, 57 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
58 __setup_cpu_power3, 58 CPU_FTR_PMC8,
59 COMMON_PPC64_FW 59 .cpu_user_features = COMMON_USER_PPC64,
60 }, 60 .icache_bsize = 128,
61 { /* Power3+ */ 61 .dcache_bsize = 128,
62 0xffff0000, 0x00410000, "POWER3 (630+)", 62 .cpu_setup = __setup_cpu_power3,
63 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | 63 .firmware_features = COMMON_PPC64_FW,
64 CPU_FTR_IABR | CPU_FTR_PMC8, 64 },
65 COMMON_USER_PPC64, 65 { /* Power3+ */
66 128, 128, 66 .pvr_mask = 0xffff0000,
67 __setup_cpu_power3, 67 .pvr_value = 0x00410000,
68 COMMON_PPC64_FW 68 .cpu_name = "POWER3 (630+)",
69 }, 69 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
70 { /* Northstar */ 70 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
71 0xffff0000, 0x00330000, "RS64-II (northstar)", 71 CPU_FTR_PMC8,
72 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | 72 .cpu_user_features = COMMON_USER_PPC64,
73 CPU_FTR_IABR | CPU_FTR_PMC8 | CPU_FTR_MMCRA, 73 .icache_bsize = 128,
74 COMMON_USER_PPC64, 74 .dcache_bsize = 128,
75 128, 128, 75 .cpu_setup = __setup_cpu_power3,
76 __setup_cpu_power3, 76 .firmware_features = COMMON_PPC64_FW,
77 COMMON_PPC64_FW 77 },
78 }, 78 { /* Northstar */
79 { /* Pulsar */ 79 .pvr_mask = 0xffff0000,
80 0xffff0000, 0x00340000, "RS64-III (pulsar)", 80 .pvr_value = 0x00330000,
81 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | 81 .cpu_name = "RS64-II (northstar)",
82 CPU_FTR_IABR | CPU_FTR_PMC8 | CPU_FTR_MMCRA, 82 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
83 COMMON_USER_PPC64, 83 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
84 128, 128, 84 CPU_FTR_PMC8 | CPU_FTR_MMCRA | CPU_FTR_CTRL,
85 __setup_cpu_power3, 85 .cpu_user_features = COMMON_USER_PPC64,
86 COMMON_PPC64_FW 86 .icache_bsize = 128,
87 }, 87 .dcache_bsize = 128,
88 { /* I-star */ 88 .cpu_setup = __setup_cpu_power3,
89 0xffff0000, 0x00360000, "RS64-III (icestar)", 89 .firmware_features = COMMON_PPC64_FW,
90 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | 90 },
91 CPU_FTR_IABR | CPU_FTR_PMC8 | CPU_FTR_MMCRA, 91 { /* Pulsar */
92 COMMON_USER_PPC64, 92 .pvr_mask = 0xffff0000,
93 128, 128, 93 .pvr_value = 0x00340000,
94 __setup_cpu_power3, 94 .cpu_name = "RS64-III (pulsar)",
95 COMMON_PPC64_FW 95 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
96 }, 96 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
97 { /* S-star */ 97 CPU_FTR_PMC8 | CPU_FTR_MMCRA | CPU_FTR_CTRL,
98 0xffff0000, 0x00370000, "RS64-IV (sstar)", 98 .cpu_user_features = COMMON_USER_PPC64,
99 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | 99 .icache_bsize = 128,
100 CPU_FTR_IABR | CPU_FTR_PMC8 | CPU_FTR_MMCRA, 100 .dcache_bsize = 128,
101 COMMON_USER_PPC64, 101 .cpu_setup = __setup_cpu_power3,
102 128, 128, 102 .firmware_features = COMMON_PPC64_FW,
103 __setup_cpu_power3, 103 },
104 COMMON_PPC64_FW 104 { /* I-star */
105 }, 105 .pvr_mask = 0xffff0000,
106 { /* Power4 */ 106 .pvr_value = 0x00360000,
107 0xffff0000, 0x00350000, "POWER4 (gp)", 107 .cpu_name = "RS64-III (icestar)",
108 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | 108 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
109 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_PMC8 | CPU_FTR_MMCRA, 109 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
110 COMMON_USER_PPC64, 110 CPU_FTR_PMC8 | CPU_FTR_MMCRA | CPU_FTR_CTRL,
111 128, 128, 111 .cpu_user_features = COMMON_USER_PPC64,
112 __setup_cpu_power4, 112 .icache_bsize = 128,
113 COMMON_PPC64_FW 113 .dcache_bsize = 128,
114 }, 114 .cpu_setup = __setup_cpu_power3,
115 { /* Power4+ */ 115 .firmware_features = COMMON_PPC64_FW,
116 0xffff0000, 0x00380000, "POWER4+ (gq)", 116 },
117 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | 117 { /* S-star */
118 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_PMC8 | CPU_FTR_MMCRA, 118 .pvr_mask = 0xffff0000,
119 COMMON_USER_PPC64, 119 .pvr_value = 0x00370000,
120 128, 128, 120 .cpu_name = "RS64-IV (sstar)",
121 __setup_cpu_power4, 121 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
122 COMMON_PPC64_FW 122 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
123 }, 123 CPU_FTR_PMC8 | CPU_FTR_MMCRA | CPU_FTR_CTRL,
124 { /* PPC970 */ 124 .cpu_user_features = COMMON_USER_PPC64,
125 0xffff0000, 0x00390000, "PPC970", 125 .icache_bsize = 128,
126 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | 126 .dcache_bsize = 128,
127 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | 127 .cpu_setup = __setup_cpu_power3,
128 CPU_FTR_CAN_NAP | CPU_FTR_PMC8 | CPU_FTR_MMCRA, 128 .firmware_features = COMMON_PPC64_FW,
129 COMMON_USER_PPC64 | PPC_FEATURE_HAS_ALTIVEC_COMP, 129 },
130 128, 128, 130 { /* Power4 */
131 __setup_cpu_ppc970, 131 .pvr_mask = 0xffff0000,
132 COMMON_PPC64_FW 132 .pvr_value = 0x00350000,
133 }, 133 .cpu_name = "POWER4 (gp)",
134 { /* PPC970FX */ 134 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
135 0xffff0000, 0x003c0000, "PPC970FX", 135 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
136 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | 136 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_PMC8 | CPU_FTR_MMCRA,
137 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | 137 .cpu_user_features = COMMON_USER_PPC64,
138 CPU_FTR_CAN_NAP | CPU_FTR_PMC8 | CPU_FTR_MMCRA, 138 .icache_bsize = 128,
139 COMMON_USER_PPC64 | PPC_FEATURE_HAS_ALTIVEC_COMP, 139 .dcache_bsize = 128,
140 128, 128, 140 .cpu_setup = __setup_cpu_power4,
141 __setup_cpu_ppc970, 141 .firmware_features = COMMON_PPC64_FW,
142 COMMON_PPC64_FW 142 },
143 }, 143 { /* Power4+ */
144 { /* Power5 */ 144 .pvr_mask = 0xffff0000,
145 0xffff0000, 0x003a0000, "POWER5 (gr)", 145 .pvr_value = 0x00380000,
146 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | 146 .cpu_name = "POWER4+ (gq)",
147 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA | CPU_FTR_SMT | 147 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
148 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | 148 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
149 CPU_FTR_MMCRA_SIHV, 149 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_PMC8 | CPU_FTR_MMCRA,
150 COMMON_USER_PPC64, 150 .cpu_user_features = COMMON_USER_PPC64,
151 128, 128, 151 .icache_bsize = 128,
152 __setup_cpu_power4, 152 .dcache_bsize = 128,
153 COMMON_PPC64_FW 153 .cpu_setup = __setup_cpu_power4,
154 }, 154 .firmware_features = COMMON_PPC64_FW,
155 { /* Power5 */ 155 },
156 0xffff0000, 0x003b0000, "POWER5 (gs)", 156 { /* PPC970 */
157 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | 157 .pvr_mask = 0xffff0000,
158 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA | CPU_FTR_SMT | 158 .pvr_value = 0x00390000,
159 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | 159 .cpu_name = "PPC970",
160 CPU_FTR_MMCRA_SIHV, 160 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
161 COMMON_USER_PPC64, 161 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
162 128, 128, 162 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
163 __setup_cpu_power4, 163 CPU_FTR_CAN_NAP | CPU_FTR_PMC8 | CPU_FTR_MMCRA,
164 COMMON_PPC64_FW 164 .cpu_user_features = COMMON_USER_PPC64 |
165 }, 165 PPC_FEATURE_HAS_ALTIVEC_COMP,
166 { /* BE DD1.x */ 166 .icache_bsize = 128,
167 0xffff0000, 0x00700000, "Broadband Engine", 167 .dcache_bsize = 128,
168 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | 168 .cpu_setup = __setup_cpu_ppc970,
169 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | 169 .firmware_features = COMMON_PPC64_FW,
170 CPU_FTR_SMT, 170 },
171 COMMON_USER_PPC64 | PPC_FEATURE_HAS_ALTIVEC_COMP, 171 { /* PPC970FX */
172 128, 128, 172 .pvr_mask = 0xffff0000,
173 __setup_cpu_be, 173 .pvr_value = 0x003c0000,
174 COMMON_PPC64_FW 174 .cpu_name = "PPC970FX",
175 }, 175 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
176 { /* default match */ 176 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
177 0x00000000, 0x00000000, "POWER4 (compatible)", 177 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
178 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | 178 CPU_FTR_CAN_NAP | CPU_FTR_PMC8 | CPU_FTR_MMCRA,
179 CPU_FTR_PPCAS_ARCH_V2, 179 .cpu_user_features = COMMON_USER_PPC64 |
180 COMMON_USER_PPC64, 180 PPC_FEATURE_HAS_ALTIVEC_COMP,
181 128, 128, 181 .icache_bsize = 128,
182 __setup_cpu_power4, 182 .dcache_bsize = 128,
183 COMMON_PPC64_FW 183 .cpu_setup = __setup_cpu_ppc970,
184 } 184 .firmware_features = COMMON_PPC64_FW,
185 },
186 { /* Power5 */
187 .pvr_mask = 0xffff0000,
188 .pvr_value = 0x003a0000,
189 .cpu_name = "POWER5 (gr)",
190 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
191 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
192 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA | CPU_FTR_SMT |
193 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
194 CPU_FTR_MMCRA_SIHV,
195 .cpu_user_features = COMMON_USER_PPC64,
196 .icache_bsize = 128,
197 .dcache_bsize = 128,
198 .cpu_setup = __setup_cpu_power4,
199 .firmware_features = COMMON_PPC64_FW,
200 },
201 { /* Power5 */
202 .pvr_mask = 0xffff0000,
203 .pvr_value = 0x003b0000,
204 .cpu_name = "POWER5 (gs)",
205 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
206 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
207 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA | CPU_FTR_SMT |
208 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
209 CPU_FTR_MMCRA_SIHV,
210 .cpu_user_features = COMMON_USER_PPC64,
211 .icache_bsize = 128,
212 .dcache_bsize = 128,
213 .cpu_setup = __setup_cpu_power4,
214 .firmware_features = COMMON_PPC64_FW,
215 },
216 { /* BE DD1.x */
217 .pvr_mask = 0xffff0000,
218 .pvr_value = 0x00700000,
219 .cpu_name = "Broadband Engine",
220 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
221 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
222 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
223 CPU_FTR_SMT,
224 .cpu_user_features = COMMON_USER_PPC64 |
225 PPC_FEATURE_HAS_ALTIVEC_COMP,
226 .icache_bsize = 128,
227 .dcache_bsize = 128,
228 .cpu_setup = __setup_cpu_be,
229 .firmware_features = COMMON_PPC64_FW,
230 },
231 { /* default match */
232 .pvr_mask = 0x00000000,
233 .pvr_value = 0x00000000,
234 .cpu_name = "POWER4 (compatible)",
235 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
236 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
237 CPU_FTR_PPCAS_ARCH_V2,
238 .cpu_user_features = COMMON_USER_PPC64,
239 .icache_bsize = 128,
240 .dcache_bsize = 128,
241 .cpu_setup = __setup_cpu_power4,
242 .firmware_features = COMMON_PPC64_FW,
243 }
185}; 244};
186 245
187firmware_feature_t firmware_features_table[FIRMWARE_MAX_FEATURES] = { 246firmware_feature_t firmware_features_table[FIRMWARE_MAX_FEATURES] = {
188 {FW_FEATURE_PFT, "hcall-pft"}, 247 {FW_FEATURE_PFT, "hcall-pft"},
189 {FW_FEATURE_TCE, "hcall-tce"}, 248 {FW_FEATURE_TCE, "hcall-tce"},
190 {FW_FEATURE_SPRG0, "hcall-sprg0"}, 249 {FW_FEATURE_SPRG0, "hcall-sprg0"},
191 {FW_FEATURE_DABR, "hcall-dabr"}, 250 {FW_FEATURE_DABR, "hcall-dabr"},
192 {FW_FEATURE_COPY, "hcall-copy"}, 251 {FW_FEATURE_COPY, "hcall-copy"},
193 {FW_FEATURE_ASR, "hcall-asr"}, 252 {FW_FEATURE_ASR, "hcall-asr"},
194 {FW_FEATURE_DEBUG, "hcall-debug"}, 253 {FW_FEATURE_DEBUG, "hcall-debug"},
195 {FW_FEATURE_PERF, "hcall-perf"}, 254 {FW_FEATURE_PERF, "hcall-perf"},
196 {FW_FEATURE_DUMP, "hcall-dump"}, 255 {FW_FEATURE_DUMP, "hcall-dump"},
197 {FW_FEATURE_INTERRUPT, "hcall-interrupt"}, 256 {FW_FEATURE_INTERRUPT, "hcall-interrupt"},
198 {FW_FEATURE_MIGRATE, "hcall-migrate"}, 257 {FW_FEATURE_MIGRATE, "hcall-migrate"},
199 {FW_FEATURE_PERFMON, "hcall-perfmon"}, 258 {FW_FEATURE_PERFMON, "hcall-perfmon"},
200 {FW_FEATURE_CRQ, "hcall-crq"}, 259 {FW_FEATURE_CRQ, "hcall-crq"},
201 {FW_FEATURE_VIO, "hcall-vio"}, 260 {FW_FEATURE_VIO, "hcall-vio"},
202 {FW_FEATURE_RDMA, "hcall-rdma"}, 261 {FW_FEATURE_RDMA, "hcall-rdma"},
203 {FW_FEATURE_LLAN, "hcall-lLAN"}, 262 {FW_FEATURE_LLAN, "hcall-lLAN"},
204 {FW_FEATURE_BULK, "hcall-bulk"}, 263 {FW_FEATURE_BULK, "hcall-bulk"},
205 {FW_FEATURE_XDABR, "hcall-xdabr"}, 264 {FW_FEATURE_XDABR, "hcall-xdabr"},
206 {FW_FEATURE_MULTITCE, "hcall-multi-tce"}, 265 {FW_FEATURE_MULTITCE, "hcall-multi-tce"},
207 {FW_FEATURE_SPLPAR, "hcall-splpar"}, 266 {FW_FEATURE_SPLPAR, "hcall-splpar"},
208}; 267};
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index 675c2708588f..93ebcac0d5a2 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -308,6 +308,7 @@ exception_marker:
308label##_pSeries: \ 308label##_pSeries: \
309 HMT_MEDIUM; \ 309 HMT_MEDIUM; \
310 mtspr SPRG1,r13; /* save r13 */ \ 310 mtspr SPRG1,r13; /* save r13 */ \
311 RUNLATCH_ON(r13); \
311 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) 312 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
312 313
313#define STD_EXCEPTION_ISERIES(n, label, area) \ 314#define STD_EXCEPTION_ISERIES(n, label, area) \
@@ -315,6 +316,7 @@ label##_pSeries: \
315label##_iSeries: \ 316label##_iSeries: \
316 HMT_MEDIUM; \ 317 HMT_MEDIUM; \
317 mtspr SPRG1,r13; /* save r13 */ \ 318 mtspr SPRG1,r13; /* save r13 */ \
319 RUNLATCH_ON(r13); \
318 EXCEPTION_PROLOG_ISERIES_1(area); \ 320 EXCEPTION_PROLOG_ISERIES_1(area); \
319 EXCEPTION_PROLOG_ISERIES_2; \ 321 EXCEPTION_PROLOG_ISERIES_2; \
320 b label##_common 322 b label##_common
@@ -324,6 +326,7 @@ label##_iSeries: \
324label##_iSeries: \ 326label##_iSeries: \
325 HMT_MEDIUM; \ 327 HMT_MEDIUM; \
326 mtspr SPRG1,r13; /* save r13 */ \ 328 mtspr SPRG1,r13; /* save r13 */ \
329 RUNLATCH_ON(r13); \
327 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \ 330 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
328 lbz r10,PACAPROCENABLED(r13); \ 331 lbz r10,PACAPROCENABLED(r13); \
329 cmpwi 0,r10,0; \ 332 cmpwi 0,r10,0; \
@@ -393,6 +396,7 @@ __start_interrupts:
393_machine_check_pSeries: 396_machine_check_pSeries:
394 HMT_MEDIUM 397 HMT_MEDIUM
395 mtspr SPRG1,r13 /* save r13 */ 398 mtspr SPRG1,r13 /* save r13 */
399 RUNLATCH_ON(r13)
396 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 400 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
397 401
398 . = 0x300 402 . = 0x300
@@ -419,6 +423,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
419data_access_slb_pSeries: 423data_access_slb_pSeries:
420 HMT_MEDIUM 424 HMT_MEDIUM
421 mtspr SPRG1,r13 425 mtspr SPRG1,r13
426 RUNLATCH_ON(r13)
422 mfspr r13,SPRG3 /* get paca address into r13 */ 427 mfspr r13,SPRG3 /* get paca address into r13 */
423 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ 428 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
424 std r10,PACA_EXSLB+EX_R10(r13) 429 std r10,PACA_EXSLB+EX_R10(r13)
@@ -439,6 +444,7 @@ data_access_slb_pSeries:
439instruction_access_slb_pSeries: 444instruction_access_slb_pSeries:
440 HMT_MEDIUM 445 HMT_MEDIUM
441 mtspr SPRG1,r13 446 mtspr SPRG1,r13
447 RUNLATCH_ON(r13)
442 mfspr r13,SPRG3 /* get paca address into r13 */ 448 mfspr r13,SPRG3 /* get paca address into r13 */
443 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ 449 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
444 std r10,PACA_EXSLB+EX_R10(r13) 450 std r10,PACA_EXSLB+EX_R10(r13)
@@ -464,6 +470,7 @@ instruction_access_slb_pSeries:
464 .globl system_call_pSeries 470 .globl system_call_pSeries
465system_call_pSeries: 471system_call_pSeries:
466 HMT_MEDIUM 472 HMT_MEDIUM
473 RUNLATCH_ON(r9)
467 mr r9,r13 474 mr r9,r13
468 mfmsr r10 475 mfmsr r10
469 mfspr r13,SPRG3 476 mfspr r13,SPRG3
@@ -707,11 +714,13 @@ fwnmi_data_area:
707system_reset_fwnmi: 714system_reset_fwnmi:
708 HMT_MEDIUM 715 HMT_MEDIUM
709 mtspr SPRG1,r13 /* save r13 */ 716 mtspr SPRG1,r13 /* save r13 */
717 RUNLATCH_ON(r13)
710 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) 718 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
711 .globl machine_check_fwnmi 719 .globl machine_check_fwnmi
712machine_check_fwnmi: 720machine_check_fwnmi:
713 HMT_MEDIUM 721 HMT_MEDIUM
714 mtspr SPRG1,r13 /* save r13 */ 722 mtspr SPRG1,r13 /* save r13 */
723 RUNLATCH_ON(r13)
715 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 724 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
716 725
717 /* 726 /*
@@ -848,6 +857,7 @@ unrecov_fer:
848 .align 7 857 .align 7
849 .globl data_access_common 858 .globl data_access_common
850data_access_common: 859data_access_common:
860 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */
851 mfspr r10,DAR 861 mfspr r10,DAR
852 std r10,PACA_EXGEN+EX_DAR(r13) 862 std r10,PACA_EXGEN+EX_DAR(r13)
853 mfspr r10,DSISR 863 mfspr r10,DSISR
diff --git a/arch/ppc64/kernel/hvconsole.c b/arch/ppc64/kernel/hvconsole.c
index c72fb8ffe974..138e128a3886 100644
--- a/arch/ppc64/kernel/hvconsole.c
+++ b/arch/ppc64/kernel/hvconsole.c
@@ -27,7 +27,6 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <asm/hvcall.h> 28#include <asm/hvcall.h>
29#include <asm/hvconsole.h> 29#include <asm/hvconsole.h>
30#include <asm/prom.h>
31 30
32/** 31/**
33 * hvc_get_chars - retrieve characters from firmware for denoted vterm adatper 32 * hvc_get_chars - retrieve characters from firmware for denoted vterm adatper
@@ -42,29 +41,14 @@ int hvc_get_chars(uint32_t vtermno, char *buf, int count)
42 unsigned long got; 41 unsigned long got;
43 42
44 if (plpar_hcall(H_GET_TERM_CHAR, vtermno, 0, 0, 0, &got, 43 if (plpar_hcall(H_GET_TERM_CHAR, vtermno, 0, 0, 0, &got,
45 (unsigned long *)buf, (unsigned long *)buf+1) == H_Success) { 44 (unsigned long *)buf, (unsigned long *)buf+1) == H_Success)
46 /*
47 * Work around a HV bug where it gives us a null
48 * after every \r. -- paulus
49 */
50 if (got > 0) {
51 int i;
52 for (i = 1; i < got; ++i) {
53 if (buf[i] == 0 && buf[i-1] == '\r') {
54 --got;
55 if (i < got)
56 memmove(&buf[i], &buf[i+1],
57 got - i);
58 }
59 }
60 }
61 return got; 45 return got;
62 }
63 return 0; 46 return 0;
64} 47}
65 48
66EXPORT_SYMBOL(hvc_get_chars); 49EXPORT_SYMBOL(hvc_get_chars);
67 50
51
68/** 52/**
69 * hvc_put_chars: send characters to firmware for denoted vterm adapter 53 * hvc_put_chars: send characters to firmware for denoted vterm adapter
70 * @vtermno: The vtermno or unit_address of the adapter from which the data 54 * @vtermno: The vtermno or unit_address of the adapter from which the data
@@ -88,34 +72,3 @@ int hvc_put_chars(uint32_t vtermno, const char *buf, int count)
88} 72}
89 73
90EXPORT_SYMBOL(hvc_put_chars); 74EXPORT_SYMBOL(hvc_put_chars);
91
92/*
93 * We hope/assume that the first vty found corresponds to the first console
94 * device.
95 */
96int hvc_find_vtys(void)
97{
98 struct device_node *vty;
99 int num_found = 0;
100
101 for (vty = of_find_node_by_name(NULL, "vty"); vty != NULL;
102 vty = of_find_node_by_name(vty, "vty")) {
103 uint32_t *vtermno;
104
105 /* We have statically defined space for only a certain number of
106 * console adapters. */
107 if (num_found >= MAX_NR_HVC_CONSOLES)
108 break;
109
110 vtermno = (uint32_t *)get_property(vty, "reg", NULL);
111 if (!vtermno)
112 continue;
113
114 if (device_is_compatible(vty, "hvterm1")) {
115 hvc_instantiate(*vtermno, num_found);
116 ++num_found;
117 }
118 }
119
120 return num_found;
121}
diff --git a/arch/ppc64/kernel/iSeries_proc.c b/arch/ppc64/kernel/iSeries_proc.c
index 356bd9931fcc..0fe3116eba29 100644
--- a/arch/ppc64/kernel/iSeries_proc.c
+++ b/arch/ppc64/kernel/iSeries_proc.c
@@ -40,50 +40,6 @@ static int __init iseries_proc_create(void)
40} 40}
41core_initcall(iseries_proc_create); 41core_initcall(iseries_proc_create);
42 42
43static char *event_types[9] = {
44 "Hypervisor\t\t",
45 "Machine Facilities\t",
46 "Session Manager\t",
47 "SPD I/O\t\t",
48 "Virtual Bus\t\t",
49 "PCI I/O\t\t",
50 "RIO I/O\t\t",
51 "Virtual Lan\t\t",
52 "Virtual I/O\t\t"
53};
54
55static int proc_lpevents_show(struct seq_file *m, void *v)
56{
57 unsigned int i;
58
59 seq_printf(m, "LpEventQueue 0\n");
60 seq_printf(m, " events processed:\t%lu\n",
61 (unsigned long)xItLpQueue.xLpIntCount);
62
63 for (i = 0; i < 9; ++i)
64 seq_printf(m, " %s %10lu\n", event_types[i],
65 (unsigned long)xItLpQueue.xLpIntCountByType[i]);
66
67 seq_printf(m, "\n events processed by processor:\n");
68
69 for_each_online_cpu(i)
70 seq_printf(m, " CPU%02d %10u\n", i, paca[i].lpevent_count);
71
72 return 0;
73}
74
75static int proc_lpevents_open(struct inode *inode, struct file *file)
76{
77 return single_open(file, proc_lpevents_show, NULL);
78}
79
80static struct file_operations proc_lpevents_operations = {
81 .open = proc_lpevents_open,
82 .read = seq_read,
83 .llseek = seq_lseek,
84 .release = single_release,
85};
86
87static unsigned long startTitan = 0; 43static unsigned long startTitan = 0;
88static unsigned long startTb = 0; 44static unsigned long startTb = 0;
89 45
@@ -148,10 +104,6 @@ static int __init iseries_proc_init(void)
148{ 104{
149 struct proc_dir_entry *e; 105 struct proc_dir_entry *e;
150 106
151 e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL);
152 if (e)
153 e->proc_fops = &proc_lpevents_operations;
154
155 e = create_proc_entry("iSeries/titanTod", S_IFREG|S_IRUGO, NULL); 107 e = create_proc_entry("iSeries/titanTod", S_IFREG|S_IRUGO, NULL);
156 if (e) 108 if (e)
157 e->proc_fops = &proc_titantod_operations; 109 e->proc_fops = &proc_titantod_operations;
diff --git a/arch/ppc64/kernel/iSeries_setup.c b/arch/ppc64/kernel/iSeries_setup.c
index 86966ce76b58..077c82fc9f3a 100644
--- a/arch/ppc64/kernel/iSeries_setup.c
+++ b/arch/ppc64/kernel/iSeries_setup.c
@@ -24,7 +24,6 @@
24#include <linux/smp.h> 24#include <linux/smp.h>
25#include <linux/param.h> 25#include <linux/param.h>
26#include <linux/string.h> 26#include <linux/string.h>
27#include <linux/bootmem.h>
28#include <linux/initrd.h> 27#include <linux/initrd.h>
29#include <linux/seq_file.h> 28#include <linux/seq_file.h>
30#include <linux/kdev_t.h> 29#include <linux/kdev_t.h>
@@ -676,7 +675,6 @@ static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr)
676 */ 675 */
677static void __init iSeries_setup_arch(void) 676static void __init iSeries_setup_arch(void)
678{ 677{
679 void *eventStack;
680 unsigned procIx = get_paca()->lppaca.dyn_hv_phys_proc_index; 678 unsigned procIx = get_paca()->lppaca.dyn_hv_phys_proc_index;
681 679
682 /* Add an eye catcher and the systemcfg layout version number */ 680 /* Add an eye catcher and the systemcfg layout version number */
@@ -685,24 +683,7 @@ static void __init iSeries_setup_arch(void)
685 systemcfg->version.minor = SYSTEMCFG_MINOR; 683 systemcfg->version.minor = SYSTEMCFG_MINOR;
686 684
687 /* Setup the Lp Event Queue */ 685 /* Setup the Lp Event Queue */
688 686 setup_hvlpevent_queue();
689 /* Allocate a page for the Event Stack
690 * The hypervisor wants the absolute real address, so
691 * we subtract out the KERNELBASE and add in the
692 * absolute real address of the kernel load area
693 */
694 eventStack = alloc_bootmem_pages(LpEventStackSize);
695 memset(eventStack, 0, LpEventStackSize);
696
697 /* Invoke the hypervisor to initialize the event stack */
698 HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize);
699
700 /* Initialize fields in our Lp Event Queue */
701 xItLpQueue.xSlicEventStackPtr = (char *)eventStack;
702 xItLpQueue.xSlicCurEventPtr = (char *)eventStack;
703 xItLpQueue.xSlicLastValidEventPtr = (char *)eventStack +
704 (LpEventStackSize - LpEventMaxSize);
705 xItLpQueue.xIndex = 0;
706 687
707 /* Compute processor frequency */ 688 /* Compute processor frequency */
708 procFreqHz = ((1UL << 34) * 1000000) / 689 procFreqHz = ((1UL << 34) * 1000000) /
@@ -853,27 +834,91 @@ static int __init iSeries_src_init(void)
853 834
854late_initcall(iSeries_src_init); 835late_initcall(iSeries_src_init);
855 836
856static int set_spread_lpevents(char *str) 837static inline void process_iSeries_events(void)
857{ 838{
858 unsigned long i; 839 asm volatile ("li 0,0x5555; sc" : : : "r0", "r3");
859 unsigned long val = simple_strtoul(str, NULL, 0); 840}
841
842static void yield_shared_processor(void)
843{
844 unsigned long tb;
845
846 HvCall_setEnabledInterrupts(HvCall_MaskIPI |
847 HvCall_MaskLpEvent |
848 HvCall_MaskLpProd |
849 HvCall_MaskTimeout);
850
851 tb = get_tb();
852 /* Compute future tb value when yield should expire */
853 HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy);
860 854
861 /* 855 /*
862 * The parameter is the number of processors to share in processing 856 * The decrementer stops during the yield. Force a fake decrementer
863 * lp events. 857 * here and let the timer_interrupt code sort out the actual time.
864 */ 858 */
865 if (( val > 0) && (val <= NR_CPUS)) { 859 get_paca()->lppaca.int_dword.fields.decr_int = 1;
866 for (i = 1; i < val; ++i) 860 process_iSeries_events();
867 paca[i].lpqueue_ptr = paca[0].lpqueue_ptr; 861}
868 862
869 printk("lpevent processing spread over %ld processors\n", val); 863static int iseries_shared_idle(void)
870 } else { 864{
871 printk("invalid spread_lpevents %ld\n", val); 865 while (1) {
866 while (!need_resched() && !hvlpevent_is_pending()) {
867 local_irq_disable();
868 ppc64_runlatch_off();
869
870 /* Recheck with irqs off */
871 if (!need_resched() && !hvlpevent_is_pending())
872 yield_shared_processor();
873
874 HMT_medium();
875 local_irq_enable();
876 }
877
878 ppc64_runlatch_on();
879
880 if (hvlpevent_is_pending())
881 process_iSeries_events();
882
883 schedule();
884 }
885
886 return 0;
887}
888
889static int iseries_dedicated_idle(void)
890{
891 long oldval;
892
893 while (1) {
894 oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
895
896 if (!oldval) {
897 set_thread_flag(TIF_POLLING_NRFLAG);
898
899 while (!need_resched()) {
900 ppc64_runlatch_off();
901 HMT_low();
902
903 if (hvlpevent_is_pending()) {
904 HMT_medium();
905 ppc64_runlatch_on();
906 process_iSeries_events();
907 }
908 }
909
910 HMT_medium();
911 clear_thread_flag(TIF_POLLING_NRFLAG);
912 } else {
913 set_need_resched();
914 }
915
916 ppc64_runlatch_on();
917 schedule();
872 } 918 }
873 919
874 return 1; 920 return 0;
875} 921}
876__setup("spread_lpevents=", set_spread_lpevents);
877 922
878#ifndef CONFIG_PCI 923#ifndef CONFIG_PCI
879void __init iSeries_init_IRQ(void) { } 924void __init iSeries_init_IRQ(void) { }
@@ -900,5 +945,13 @@ void __init iSeries_early_setup(void)
900 ppc_md.get_rtc_time = iSeries_get_rtc_time; 945 ppc_md.get_rtc_time = iSeries_get_rtc_time;
901 ppc_md.calibrate_decr = iSeries_calibrate_decr; 946 ppc_md.calibrate_decr = iSeries_calibrate_decr;
902 ppc_md.progress = iSeries_progress; 947 ppc_md.progress = iSeries_progress;
948
949 if (get_paca()->lppaca.shared_proc) {
950 ppc_md.idle_loop = iseries_shared_idle;
951 printk(KERN_INFO "Using shared processor idle loop\n");
952 } else {
953 ppc_md.idle_loop = iseries_dedicated_idle;
954 printk(KERN_INFO "Using dedicated idle loop\n");
955 }
903} 956}
904 957
diff --git a/arch/ppc64/kernel/idle.c b/arch/ppc64/kernel/idle.c
index bdf13b4dc1c8..954395d42636 100644
--- a/arch/ppc64/kernel/idle.c
+++ b/arch/ppc64/kernel/idle.c
@@ -20,109 +20,18 @@
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/smp.h> 21#include <linux/smp.h>
22#include <linux/cpu.h> 22#include <linux/cpu.h>
23#include <linux/module.h>
24#include <linux/sysctl.h> 23#include <linux/sysctl.h>
25#include <linux/smp.h>
26 24
27#include <asm/system.h> 25#include <asm/system.h>
28#include <asm/processor.h> 26#include <asm/processor.h>
29#include <asm/mmu.h>
30#include <asm/cputable.h> 27#include <asm/cputable.h>
31#include <asm/time.h> 28#include <asm/time.h>
32#include <asm/iSeries/HvCall.h>
33#include <asm/iSeries/ItLpQueue.h>
34#include <asm/plpar_wrappers.h>
35#include <asm/systemcfg.h> 29#include <asm/systemcfg.h>
30#include <asm/machdep.h>
36 31
37extern void power4_idle(void); 32extern void power4_idle(void);
38 33
39static int (*idle_loop)(void); 34int default_idle(void)
40
41#ifdef CONFIG_PPC_ISERIES
42static unsigned long maxYieldTime = 0;
43static unsigned long minYieldTime = 0xffffffffffffffffUL;
44
45static inline void process_iSeries_events(void)
46{
47 asm volatile ("li 0,0x5555; sc" : : : "r0", "r3");
48}
49
50static void yield_shared_processor(void)
51{
52 unsigned long tb;
53 unsigned long yieldTime;
54
55 HvCall_setEnabledInterrupts(HvCall_MaskIPI |
56 HvCall_MaskLpEvent |
57 HvCall_MaskLpProd |
58 HvCall_MaskTimeout);
59
60 tb = get_tb();
61 /* Compute future tb value when yield should expire */
62 HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy);
63
64 yieldTime = get_tb() - tb;
65 if (yieldTime > maxYieldTime)
66 maxYieldTime = yieldTime;
67
68 if (yieldTime < minYieldTime)
69 minYieldTime = yieldTime;
70
71 /*
72 * The decrementer stops during the yield. Force a fake decrementer
73 * here and let the timer_interrupt code sort out the actual time.
74 */
75 get_paca()->lppaca.int_dword.fields.decr_int = 1;
76 process_iSeries_events();
77}
78
79static int iSeries_idle(void)
80{
81 struct paca_struct *lpaca;
82 long oldval;
83
84 /* ensure iSeries run light will be out when idle */
85 ppc64_runlatch_off();
86
87 lpaca = get_paca();
88
89 while (1) {
90 if (lpaca->lppaca.shared_proc) {
91 if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr))
92 process_iSeries_events();
93 if (!need_resched())
94 yield_shared_processor();
95 } else {
96 oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
97
98 if (!oldval) {
99 set_thread_flag(TIF_POLLING_NRFLAG);
100
101 while (!need_resched()) {
102 HMT_medium();
103 if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr))
104 process_iSeries_events();
105 HMT_low();
106 }
107
108 HMT_medium();
109 clear_thread_flag(TIF_POLLING_NRFLAG);
110 } else {
111 set_need_resched();
112 }
113 }
114
115 ppc64_runlatch_on();
116 schedule();
117 ppc64_runlatch_off();
118 }
119
120 return 0;
121}
122
123#else
124
125static int default_idle(void)
126{ 35{
127 long oldval; 36 long oldval;
128 unsigned int cpu = smp_processor_id(); 37 unsigned int cpu = smp_processor_id();
@@ -134,7 +43,8 @@ static int default_idle(void)
134 set_thread_flag(TIF_POLLING_NRFLAG); 43 set_thread_flag(TIF_POLLING_NRFLAG);
135 44
136 while (!need_resched() && !cpu_is_offline(cpu)) { 45 while (!need_resched() && !cpu_is_offline(cpu)) {
137 barrier(); 46 ppc64_runlatch_off();
47
138 /* 48 /*
139 * Go into low thread priority and possibly 49 * Go into low thread priority and possibly
140 * low power mode. 50 * low power mode.
@@ -149,6 +59,7 @@ static int default_idle(void)
149 set_need_resched(); 59 set_need_resched();
150 } 60 }
151 61
62 ppc64_runlatch_on();
152 schedule(); 63 schedule();
153 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) 64 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
154 cpu_die(); 65 cpu_die();
@@ -157,127 +68,19 @@ static int default_idle(void)
157 return 0; 68 return 0;
158} 69}
159 70
160#ifdef CONFIG_PPC_PSERIES 71int native_idle(void)
161
162DECLARE_PER_CPU(unsigned long, smt_snooze_delay);
163
164int dedicated_idle(void)
165{ 72{
166 long oldval;
167 struct paca_struct *lpaca = get_paca(), *ppaca;
168 unsigned long start_snooze;
169 unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
170 unsigned int cpu = smp_processor_id();
171
172 ppaca = &paca[cpu ^ 1];
173
174 while (1) { 73 while (1) {
175 /* 74 ppc64_runlatch_off();
176 * Indicate to the HV that we are idle. Now would be
177 * a good time to find other work to dispatch.
178 */
179 lpaca->lppaca.idle = 1;
180
181 oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
182 if (!oldval) {
183 set_thread_flag(TIF_POLLING_NRFLAG);
184 start_snooze = __get_tb() +
185 *smt_snooze_delay * tb_ticks_per_usec;
186 while (!need_resched() && !cpu_is_offline(cpu)) {
187 /*
188 * Go into low thread priority and possibly
189 * low power mode.
190 */
191 HMT_low();
192 HMT_very_low();
193
194 if (*smt_snooze_delay == 0 ||
195 __get_tb() < start_snooze)
196 continue;
197
198 HMT_medium();
199
200 if (!(ppaca->lppaca.idle)) {
201 local_irq_disable();
202
203 /*
204 * We are about to sleep the thread
205 * and so wont be polling any
206 * more.
207 */
208 clear_thread_flag(TIF_POLLING_NRFLAG);
209
210 /*
211 * SMT dynamic mode. Cede will result
212 * in this thread going dormant, if the
213 * partner thread is still doing work.
214 * Thread wakes up if partner goes idle,
215 * an interrupt is presented, or a prod
216 * occurs. Returning from the cede
217 * enables external interrupts.
218 */
219 if (!need_resched())
220 cede_processor();
221 else
222 local_irq_enable();
223 } else {
224 /*
225 * Give the HV an opportunity at the
226 * processor, since we are not doing
227 * any work.
228 */
229 poll_pending();
230 }
231 }
232
233 clear_thread_flag(TIF_POLLING_NRFLAG);
234 } else {
235 set_need_resched();
236 }
237
238 HMT_medium();
239 lpaca->lppaca.idle = 0;
240 schedule();
241 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
242 cpu_die();
243 }
244 return 0;
245}
246
247static int shared_idle(void)
248{
249 struct paca_struct *lpaca = get_paca();
250 unsigned int cpu = smp_processor_id();
251
252 while (1) {
253 /*
254 * Indicate to the HV that we are idle. Now would be
255 * a good time to find other work to dispatch.
256 */
257 lpaca->lppaca.idle = 1;
258 75
259 while (!need_resched() && !cpu_is_offline(cpu)) { 76 if (!need_resched())
260 local_irq_disable(); 77 power4_idle();
261 78
262 /* 79 if (need_resched()) {
263 * Yield the processor to the hypervisor. We return if 80 ppc64_runlatch_on();
264 * an external interrupt occurs (which are driven prior 81 schedule();
265 * to returning here) or if a prod occurs from another
266 * processor. When returning here, external interrupts
267 * are enabled.
268 *
269 * Check need_resched() again with interrupts disabled
270 * to avoid a race.
271 */
272 if (!need_resched())
273 cede_processor();
274 else
275 local_irq_enable();
276 } 82 }
277 83
278 HMT_medium();
279 lpaca->lppaca.idle = 0;
280 schedule();
281 if (cpu_is_offline(smp_processor_id()) && 84 if (cpu_is_offline(smp_processor_id()) &&
282 system_state == SYSTEM_RUNNING) 85 system_state == SYSTEM_RUNNING)
283 cpu_die(); 86 cpu_die();
@@ -286,29 +89,10 @@ static int shared_idle(void)
286 return 0; 89 return 0;
287} 90}
288 91
289#endif /* CONFIG_PPC_PSERIES */
290
291static int native_idle(void)
292{
293 while(1) {
294 /* check CPU type here */
295 if (!need_resched())
296 power4_idle();
297 if (need_resched())
298 schedule();
299
300 if (cpu_is_offline(raw_smp_processor_id()) &&
301 system_state == SYSTEM_RUNNING)
302 cpu_die();
303 }
304 return 0;
305}
306
307#endif /* CONFIG_PPC_ISERIES */
308
309void cpu_idle(void) 92void cpu_idle(void)
310{ 93{
311 idle_loop(); 94 BUG_ON(NULL == ppc_md.idle_loop);
95 ppc_md.idle_loop();
312} 96}
313 97
314int powersave_nap; 98int powersave_nap;
@@ -342,42 +126,3 @@ register_powersave_nap_sysctl(void)
342} 126}
343__initcall(register_powersave_nap_sysctl); 127__initcall(register_powersave_nap_sysctl);
344#endif 128#endif
345
346int idle_setup(void)
347{
348 /*
349 * Move that junk to each platform specific file, eventually define
350 * a pSeries_idle for shared processor stuff
351 */
352#ifdef CONFIG_PPC_ISERIES
353 idle_loop = iSeries_idle;
354 return 1;
355#else
356 idle_loop = default_idle;
357#endif
358#ifdef CONFIG_PPC_PSERIES
359 if (systemcfg->platform & PLATFORM_PSERIES) {
360 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
361 if (get_paca()->lppaca.shared_proc) {
362 printk(KERN_INFO "Using shared processor idle loop\n");
363 idle_loop = shared_idle;
364 } else {
365 printk(KERN_INFO "Using dedicated idle loop\n");
366 idle_loop = dedicated_idle;
367 }
368 } else {
369 printk(KERN_INFO "Using default idle loop\n");
370 idle_loop = default_idle;
371 }
372 }
373#endif /* CONFIG_PPC_PSERIES */
374#ifndef CONFIG_PPC_ISERIES
375 if (systemcfg->platform == PLATFORM_POWERMAC ||
376 systemcfg->platform == PLATFORM_MAPLE) {
377 printk(KERN_INFO "Using native/NAP idle loop\n");
378 idle_loop = native_idle;
379 }
380#endif /* CONFIG_PPC_ISERIES */
381
382 return 1;
383}
diff --git a/arch/ppc64/kernel/irq.c b/arch/ppc64/kernel/irq.c
index ffe300611f00..f41afe545045 100644
--- a/arch/ppc64/kernel/irq.c
+++ b/arch/ppc64/kernel/irq.c
@@ -66,7 +66,6 @@ EXPORT_SYMBOL(irq_desc);
66int distribute_irqs = 1; 66int distribute_irqs = 1;
67int __irq_offset_value; 67int __irq_offset_value;
68int ppc_spurious_interrupts; 68int ppc_spurious_interrupts;
69unsigned long lpevent_count;
70u64 ppc64_interrupt_controller; 69u64 ppc64_interrupt_controller;
71 70
72int show_interrupts(struct seq_file *p, void *v) 71int show_interrupts(struct seq_file *p, void *v)
@@ -269,7 +268,6 @@ out:
269void do_IRQ(struct pt_regs *regs) 268void do_IRQ(struct pt_regs *regs)
270{ 269{
271 struct paca_struct *lpaca; 270 struct paca_struct *lpaca;
272 struct ItLpQueue *lpq;
273 271
274 irq_enter(); 272 irq_enter();
275 273
@@ -295,9 +293,8 @@ void do_IRQ(struct pt_regs *regs)
295 iSeries_smp_message_recv(regs); 293 iSeries_smp_message_recv(regs);
296 } 294 }
297#endif /* CONFIG_SMP */ 295#endif /* CONFIG_SMP */
298 lpq = lpaca->lpqueue_ptr; 296 if (hvlpevent_is_pending())
299 if (lpq && ItLpQueue_isLpIntPending(lpq)) 297 process_hvlpevents(regs);
300 lpevent_count += ItLpQueue_process(lpq, regs);
301 298
302 irq_exit(); 299 irq_exit();
303 300
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c
index 1d2ff6d6b0b3..a3d519518fb8 100644
--- a/arch/ppc64/kernel/kprobes.c
+++ b/arch/ppc64/kernel/kprobes.c
@@ -444,7 +444,7 @@ static struct kprobe trampoline_p = {
444 .pre_handler = trampoline_probe_handler 444 .pre_handler = trampoline_probe_handler
445}; 445};
446 446
447int __init arch_init(void) 447int __init arch_init_kprobes(void)
448{ 448{
449 return register_kprobe(&trampoline_p); 449 return register_kprobe(&trampoline_p);
450} 450}
diff --git a/arch/ppc64/kernel/maple_setup.c b/arch/ppc64/kernel/maple_setup.c
index da8900b51f40..bb55b5a56910 100644
--- a/arch/ppc64/kernel/maple_setup.c
+++ b/arch/ppc64/kernel/maple_setup.c
@@ -177,6 +177,8 @@ void __init maple_setup_arch(void)
177#ifdef CONFIG_DUMMY_CONSOLE 177#ifdef CONFIG_DUMMY_CONSOLE
178 conswitchp = &dummy_con; 178 conswitchp = &dummy_con;
179#endif 179#endif
180
181 printk(KERN_INFO "Using native/NAP idle loop\n");
180} 182}
181 183
182/* 184/*
@@ -297,4 +299,5 @@ struct machdep_calls __initdata maple_md = {
297 .get_rtc_time = maple_get_rtc_time, 299 .get_rtc_time = maple_get_rtc_time,
298 .calibrate_decr = generic_calibrate_decr, 300 .calibrate_decr = generic_calibrate_decr,
299 .progress = maple_progress, 301 .progress = maple_progress,
302 .idle_loop = native_idle,
300}; 303};
diff --git a/arch/ppc64/kernel/mf.c b/arch/ppc64/kernel/mf.c
index d98bebf7042f..ef4a338ebd01 100644
--- a/arch/ppc64/kernel/mf.c
+++ b/arch/ppc64/kernel/mf.c
@@ -801,10 +801,8 @@ int mf_get_boot_rtc(struct rtc_time *tm)
801 return rc; 801 return rc;
802 /* We need to poll here as we are not yet taking interrupts */ 802 /* We need to poll here as we are not yet taking interrupts */
803 while (rtc_data.busy) { 803 while (rtc_data.busy) {
804 extern unsigned long lpevent_count; 804 if (hvlpevent_is_pending())
805 struct ItLpQueue *lpq = get_paca()->lpqueue_ptr; 805 process_hvlpevents(NULL);
806 if (lpq && ItLpQueue_isLpIntPending(lpq))
807 lpevent_count += ItLpQueue_process(lpq, NULL);
808 } 806 }
809 return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm); 807 return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm);
810} 808}
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S
index f3dea0c5a88c..59f4f9973818 100644
--- a/arch/ppc64/kernel/misc.S
+++ b/arch/ppc64/kernel/misc.S
@@ -1124,9 +1124,11 @@ _GLOBAL(sys_call_table32)
1124 .llong .compat_sys_mq_getsetattr 1124 .llong .compat_sys_mq_getsetattr
1125 .llong .compat_sys_kexec_load 1125 .llong .compat_sys_kexec_load
1126 .llong .sys32_add_key 1126 .llong .sys32_add_key
1127 .llong .sys32_request_key 1127 .llong .sys32_request_key /* 270 */
1128 .llong .compat_sys_keyctl 1128 .llong .compat_sys_keyctl
1129 .llong .compat_sys_waitid 1129 .llong .compat_sys_waitid
1130 .llong .sys32_ioprio_set
1131 .llong .sys32_ioprio_get
1130 1132
1131 .balign 8 1133 .balign 8
1132_GLOBAL(sys_call_table) 1134_GLOBAL(sys_call_table)
@@ -1403,3 +1405,5 @@ _GLOBAL(sys_call_table)
1403 .llong .sys_request_key /* 270 */ 1405 .llong .sys_request_key /* 270 */
1404 .llong .sys_keyctl 1406 .llong .sys_keyctl
1405 .llong .sys_waitid 1407 .llong .sys_waitid
1408 .llong .sys_ioprio_set
1409 .llong .sys_ioprio_get
diff --git a/arch/ppc64/kernel/nvram.c b/arch/ppc64/kernel/nvram.c
index 4e71781a4414..4fb1a9f5060d 100644
--- a/arch/ppc64/kernel/nvram.c
+++ b/arch/ppc64/kernel/nvram.c
@@ -338,9 +338,8 @@ static int nvram_remove_os_partition(void)
338 */ 338 */
339static int nvram_create_os_partition(void) 339static int nvram_create_os_partition(void)
340{ 340{
341 struct list_head * p; 341 struct nvram_partition *part;
342 struct nvram_partition *part = NULL; 342 struct nvram_partition *new_part;
343 struct nvram_partition *new_part = NULL;
344 struct nvram_partition *free_part = NULL; 343 struct nvram_partition *free_part = NULL;
345 int seq_init[2] = { 0, 0 }; 344 int seq_init[2] = { 0, 0 };
346 loff_t tmp_index; 345 loff_t tmp_index;
@@ -349,8 +348,7 @@ static int nvram_create_os_partition(void)
349 348
350 /* Find a free partition that will give us the maximum needed size 349 /* Find a free partition that will give us the maximum needed size
351 If can't find one that will give us the minimum size needed */ 350 If can't find one that will give us the minimum size needed */
352 list_for_each(p, &nvram_part->partition) { 351 list_for_each_entry(part, &nvram_part->partition, partition) {
353 part = list_entry(p, struct nvram_partition, partition);
354 if (part->header.signature != NVRAM_SIG_FREE) 352 if (part->header.signature != NVRAM_SIG_FREE)
355 continue; 353 continue;
356 354
diff --git a/arch/ppc64/kernel/of_device.c b/arch/ppc64/kernel/of_device.c
index 66bd5ab7c25a..b80e81984ba8 100644
--- a/arch/ppc64/kernel/of_device.c
+++ b/arch/ppc64/kernel/of_device.c
@@ -3,6 +3,7 @@
3#include <linux/kernel.h> 3#include <linux/kernel.h>
4#include <linux/init.h> 4#include <linux/init.h>
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/mod_devicetable.h>
6#include <asm/errno.h> 7#include <asm/errno.h>
7#include <asm/of_device.h> 8#include <asm/of_device.h>
8 9
@@ -15,20 +16,20 @@
15 * Used by a driver to check whether an of_device present in the 16 * Used by a driver to check whether an of_device present in the
16 * system is in its list of supported devices. 17 * system is in its list of supported devices.
17 */ 18 */
18const struct of_match * of_match_device(const struct of_match *matches, 19const struct of_device_id *of_match_device(const struct of_device_id *matches,
19 const struct of_device *dev) 20 const struct of_device *dev)
20{ 21{
21 if (!dev->node) 22 if (!dev->node)
22 return NULL; 23 return NULL;
23 while (matches->name || matches->type || matches->compatible) { 24 while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
24 int match = 1; 25 int match = 1;
25 if (matches->name && matches->name != OF_ANY_MATCH) 26 if (matches->name[0])
26 match &= dev->node->name 27 match &= dev->node->name
27 && !strcmp(matches->name, dev->node->name); 28 && !strcmp(matches->name, dev->node->name);
28 if (matches->type && matches->type != OF_ANY_MATCH) 29 if (matches->type[0])
29 match &= dev->node->type 30 match &= dev->node->type
30 && !strcmp(matches->type, dev->node->type); 31 && !strcmp(matches->type, dev->node->type);
31 if (matches->compatible && matches->compatible != OF_ANY_MATCH) 32 if (matches->compatible[0])
32 match &= device_is_compatible(dev->node, 33 match &= device_is_compatible(dev->node,
33 matches->compatible); 34 matches->compatible);
34 if (match) 35 if (match)
@@ -42,7 +43,7 @@ static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
42{ 43{
43 struct of_device * of_dev = to_of_device(dev); 44 struct of_device * of_dev = to_of_device(dev);
44 struct of_platform_driver * of_drv = to_of_platform_driver(drv); 45 struct of_platform_driver * of_drv = to_of_platform_driver(drv);
45 const struct of_match * matches = of_drv->match_table; 46 const struct of_device_id * matches = of_drv->match_table;
46 47
47 if (!matches) 48 if (!matches)
48 return 0; 49 return 0;
@@ -75,7 +76,7 @@ static int of_device_probe(struct device *dev)
75 int error = -ENODEV; 76 int error = -ENODEV;
76 struct of_platform_driver *drv; 77 struct of_platform_driver *drv;
77 struct of_device *of_dev; 78 struct of_device *of_dev;
78 const struct of_match *match; 79 const struct of_device_id *match;
79 80
80 drv = to_of_platform_driver(dev->driver); 81 drv = to_of_platform_driver(dev->driver);
81 of_dev = to_of_device(dev); 82 of_dev = to_of_device(dev);
diff --git a/arch/ppc64/kernel/pSeries_setup.c b/arch/ppc64/kernel/pSeries_setup.c
index 44d9af72d225..5bec956e44a0 100644
--- a/arch/ppc64/kernel/pSeries_setup.c
+++ b/arch/ppc64/kernel/pSeries_setup.c
@@ -19,6 +19,7 @@
19#undef DEBUG 19#undef DEBUG
20 20
21#include <linux/config.h> 21#include <linux/config.h>
22#include <linux/cpu.h>
22#include <linux/errno.h> 23#include <linux/errno.h>
23#include <linux/sched.h> 24#include <linux/sched.h>
24#include <linux/kernel.h> 25#include <linux/kernel.h>
@@ -82,6 +83,9 @@ int fwnmi_active; /* TRUE if an FWNMI handler is present */
82extern void pSeries_system_reset_exception(struct pt_regs *regs); 83extern void pSeries_system_reset_exception(struct pt_regs *regs);
83extern int pSeries_machine_check_exception(struct pt_regs *regs); 84extern int pSeries_machine_check_exception(struct pt_regs *regs);
84 85
86static int pseries_shared_idle(void);
87static int pseries_dedicated_idle(void);
88
85static volatile void __iomem * chrp_int_ack_special; 89static volatile void __iomem * chrp_int_ack_special;
86struct mpic *pSeries_mpic; 90struct mpic *pSeries_mpic;
87 91
@@ -229,6 +233,20 @@ static void __init pSeries_setup_arch(void)
229 233
230 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) 234 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR)
231 vpa_init(boot_cpuid); 235 vpa_init(boot_cpuid);
236
237 /* Choose an idle loop */
238 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
239 if (get_paca()->lppaca.shared_proc) {
240 printk(KERN_INFO "Using shared processor idle loop\n");
241 ppc_md.idle_loop = pseries_shared_idle;
242 } else {
243 printk(KERN_INFO "Using dedicated idle loop\n");
244 ppc_md.idle_loop = pseries_dedicated_idle;
245 }
246 } else {
247 printk(KERN_INFO "Using default idle loop\n");
248 ppc_md.idle_loop = default_idle;
249 }
232} 250}
233 251
234static int __init pSeries_init_panel(void) 252static int __init pSeries_init_panel(void)
@@ -418,6 +436,144 @@ static int __init pSeries_probe(int platform)
418 return 1; 436 return 1;
419} 437}
420 438
439DECLARE_PER_CPU(unsigned long, smt_snooze_delay);
440
441static inline void dedicated_idle_sleep(unsigned int cpu)
442{
443 struct paca_struct *ppaca = &paca[cpu ^ 1];
444
445 /* Only sleep if the other thread is not idle */
446 if (!(ppaca->lppaca.idle)) {
447 local_irq_disable();
448
449 /*
450 * We are about to sleep the thread and so wont be polling any
451 * more.
452 */
453 clear_thread_flag(TIF_POLLING_NRFLAG);
454
455 /*
456 * SMT dynamic mode. Cede will result in this thread going
457 * dormant, if the partner thread is still doing work. Thread
458 * wakes up if partner goes idle, an interrupt is presented, or
459 * a prod occurs. Returning from the cede enables external
460 * interrupts.
461 */
462 if (!need_resched())
463 cede_processor();
464 else
465 local_irq_enable();
466 } else {
467 /*
468 * Give the HV an opportunity at the processor, since we are
469 * not doing any work.
470 */
471 poll_pending();
472 }
473}
474
475static int pseries_dedicated_idle(void)
476{
477 long oldval;
478 struct paca_struct *lpaca = get_paca();
479 unsigned int cpu = smp_processor_id();
480 unsigned long start_snooze;
481 unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
482
483 while (1) {
484 /*
485 * Indicate to the HV that we are idle. Now would be
486 * a good time to find other work to dispatch.
487 */
488 lpaca->lppaca.idle = 1;
489
490 oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
491 if (!oldval) {
492 set_thread_flag(TIF_POLLING_NRFLAG);
493
494 start_snooze = __get_tb() +
495 *smt_snooze_delay * tb_ticks_per_usec;
496
497 while (!need_resched() && !cpu_is_offline(cpu)) {
498 ppc64_runlatch_off();
499
500 /*
501 * Go into low thread priority and possibly
502 * low power mode.
503 */
504 HMT_low();
505 HMT_very_low();
506
507 if (*smt_snooze_delay != 0 &&
508 __get_tb() > start_snooze) {
509 HMT_medium();
510 dedicated_idle_sleep(cpu);
511 }
512
513 }
514
515 HMT_medium();
516 clear_thread_flag(TIF_POLLING_NRFLAG);
517 } else {
518 set_need_resched();
519 }
520
521 lpaca->lppaca.idle = 0;
522 ppc64_runlatch_on();
523
524 schedule();
525
526 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
527 cpu_die();
528 }
529}
530
531static int pseries_shared_idle(void)
532{
533 struct paca_struct *lpaca = get_paca();
534 unsigned int cpu = smp_processor_id();
535
536 while (1) {
537 /*
538 * Indicate to the HV that we are idle. Now would be
539 * a good time to find other work to dispatch.
540 */
541 lpaca->lppaca.idle = 1;
542
543 while (!need_resched() && !cpu_is_offline(cpu)) {
544 local_irq_disable();
545 ppc64_runlatch_off();
546
547 /*
548 * Yield the processor to the hypervisor. We return if
549 * an external interrupt occurs (which are driven prior
550 * to returning here) or if a prod occurs from another
551 * processor. When returning here, external interrupts
552 * are enabled.
553 *
554 * Check need_resched() again with interrupts disabled
555 * to avoid a race.
556 */
557 if (!need_resched())
558 cede_processor();
559 else
560 local_irq_enable();
561
562 HMT_medium();
563 }
564
565 lpaca->lppaca.idle = 0;
566 ppc64_runlatch_on();
567
568 schedule();
569
570 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
571 cpu_die();
572 }
573
574 return 0;
575}
576
421struct machdep_calls __initdata pSeries_md = { 577struct machdep_calls __initdata pSeries_md = {
422 .probe = pSeries_probe, 578 .probe = pSeries_probe,
423 .setup_arch = pSeries_setup_arch, 579 .setup_arch = pSeries_setup_arch,
diff --git a/arch/ppc64/kernel/pacaData.c b/arch/ppc64/kernel/pacaData.c
index a3e0975c26c1..6316188737b6 100644
--- a/arch/ppc64/kernel/pacaData.c
+++ b/arch/ppc64/kernel/pacaData.c
@@ -42,21 +42,7 @@ extern unsigned long __toc_start;
42 * processors. The processor VPD array needs one entry per physical 42 * processors. The processor VPD array needs one entry per physical
43 * processor (not thread). 43 * processor (not thread).
44 */ 44 */
45#ifdef CONFIG_PPC_ISERIES 45#define PACA_INIT_COMMON(number, start, asrr, asrv) \
46#define EXTRA_INITS(number, lpq) \
47 .lppaca_ptr = &paca[number].lppaca, \
48 .lpqueue_ptr = (lpq), /* &xItLpQueue, */ \
49 .reg_save_ptr = &paca[number].reg_save, \
50 .reg_save = { \
51 .xDesc = 0xd397d9e2, /* "LpRS" */ \
52 .xSize = sizeof(struct ItLpRegSave) \
53 },
54#else
55#define EXTRA_INITS(number, lpq)
56#endif
57
58#define PACAINITDATA(number,start,lpq,asrr,asrv) \
59{ \
60 .lock_token = 0x8000, \ 46 .lock_token = 0x8000, \
61 .paca_index = (number), /* Paca Index */ \ 47 .paca_index = (number), /* Paca Index */ \
62 .default_decr = 0x00ff0000, /* Initial Decr */ \ 48 .default_decr = 0x00ff0000, /* Initial Decr */ \
@@ -74,147 +60,79 @@ extern unsigned long __toc_start;
74 .end_of_quantum = 0xfffffffffffffffful, \ 60 .end_of_quantum = 0xfffffffffffffffful, \
75 .slb_count = 64, \ 61 .slb_count = 64, \
76 }, \ 62 }, \
77 EXTRA_INITS((number), (lpq)) \
78}
79 63
80struct paca_struct paca[] = {
81#ifdef CONFIG_PPC_ISERIES 64#ifdef CONFIG_PPC_ISERIES
82 PACAINITDATA( 0, 1, &xItLpQueue, 0, STAB0_VIRT_ADDR), 65#define PACA_INIT_ISERIES(number) \
66 .lppaca_ptr = &paca[number].lppaca, \
67 .reg_save_ptr = &paca[number].reg_save, \
68 .reg_save = { \
69 .xDesc = 0xd397d9e2, /* "LpRS" */ \
70 .xSize = sizeof(struct ItLpRegSave) \
71 }
72
73#define PACA_INIT(number) \
74{ \
75 PACA_INIT_COMMON(number, 0, 0, 0) \
76 PACA_INIT_ISERIES(number) \
77}
78
79#define BOOTCPU_PACA_INIT(number) \
80{ \
81 PACA_INIT_COMMON(number, 1, 0, STAB0_VIRT_ADDR) \
82 PACA_INIT_ISERIES(number) \
83}
84
83#else 85#else
84 PACAINITDATA( 0, 1, NULL, STAB0_PHYS_ADDR, STAB0_VIRT_ADDR), 86#define PACA_INIT(number) \
87{ \
88 PACA_INIT_COMMON(number, 0, 0, 0) \
89}
90
91#define BOOTCPU_PACA_INIT(number) \
92{ \
93 PACA_INIT_COMMON(number, 1, STAB0_PHYS_ADDR, STAB0_VIRT_ADDR) \
94}
85#endif 95#endif
96
97struct paca_struct paca[] = {
98 BOOTCPU_PACA_INIT(0),
86#if NR_CPUS > 1 99#if NR_CPUS > 1
87 PACAINITDATA( 1, 0, NULL, 0, 0), 100 PACA_INIT( 1), PACA_INIT( 2), PACA_INIT( 3),
88 PACAINITDATA( 2, 0, NULL, 0, 0),
89 PACAINITDATA( 3, 0, NULL, 0, 0),
90#if NR_CPUS > 4 101#if NR_CPUS > 4
91 PACAINITDATA( 4, 0, NULL, 0, 0), 102 PACA_INIT( 4), PACA_INIT( 5), PACA_INIT( 6), PACA_INIT( 7),
92 PACAINITDATA( 5, 0, NULL, 0, 0),
93 PACAINITDATA( 6, 0, NULL, 0, 0),
94 PACAINITDATA( 7, 0, NULL, 0, 0),
95#if NR_CPUS > 8 103#if NR_CPUS > 8
96 PACAINITDATA( 8, 0, NULL, 0, 0), 104 PACA_INIT( 8), PACA_INIT( 9), PACA_INIT( 10), PACA_INIT( 11),
97 PACAINITDATA( 9, 0, NULL, 0, 0), 105 PACA_INIT( 12), PACA_INIT( 13), PACA_INIT( 14), PACA_INIT( 15),
98 PACAINITDATA(10, 0, NULL, 0, 0), 106 PACA_INIT( 16), PACA_INIT( 17), PACA_INIT( 18), PACA_INIT( 19),
99 PACAINITDATA(11, 0, NULL, 0, 0), 107 PACA_INIT( 20), PACA_INIT( 21), PACA_INIT( 22), PACA_INIT( 23),
100 PACAINITDATA(12, 0, NULL, 0, 0), 108 PACA_INIT( 24), PACA_INIT( 25), PACA_INIT( 26), PACA_INIT( 27),
101 PACAINITDATA(13, 0, NULL, 0, 0), 109 PACA_INIT( 28), PACA_INIT( 29), PACA_INIT( 30), PACA_INIT( 31),
102 PACAINITDATA(14, 0, NULL, 0, 0),
103 PACAINITDATA(15, 0, NULL, 0, 0),
104 PACAINITDATA(16, 0, NULL, 0, 0),
105 PACAINITDATA(17, 0, NULL, 0, 0),
106 PACAINITDATA(18, 0, NULL, 0, 0),
107 PACAINITDATA(19, 0, NULL, 0, 0),
108 PACAINITDATA(20, 0, NULL, 0, 0),
109 PACAINITDATA(21, 0, NULL, 0, 0),
110 PACAINITDATA(22, 0, NULL, 0, 0),
111 PACAINITDATA(23, 0, NULL, 0, 0),
112 PACAINITDATA(24, 0, NULL, 0, 0),
113 PACAINITDATA(25, 0, NULL, 0, 0),
114 PACAINITDATA(26, 0, NULL, 0, 0),
115 PACAINITDATA(27, 0, NULL, 0, 0),
116 PACAINITDATA(28, 0, NULL, 0, 0),
117 PACAINITDATA(29, 0, NULL, 0, 0),
118 PACAINITDATA(30, 0, NULL, 0, 0),
119 PACAINITDATA(31, 0, NULL, 0, 0),
120#if NR_CPUS > 32 110#if NR_CPUS > 32
121 PACAINITDATA(32, 0, NULL, 0, 0), 111 PACA_INIT( 32), PACA_INIT( 33), PACA_INIT( 34), PACA_INIT( 35),
122 PACAINITDATA(33, 0, NULL, 0, 0), 112 PACA_INIT( 36), PACA_INIT( 37), PACA_INIT( 38), PACA_INIT( 39),
123 PACAINITDATA(34, 0, NULL, 0, 0), 113 PACA_INIT( 40), PACA_INIT( 41), PACA_INIT( 42), PACA_INIT( 43),
124 PACAINITDATA(35, 0, NULL, 0, 0), 114 PACA_INIT( 44), PACA_INIT( 45), PACA_INIT( 46), PACA_INIT( 47),
125 PACAINITDATA(36, 0, NULL, 0, 0), 115 PACA_INIT( 48), PACA_INIT( 49), PACA_INIT( 50), PACA_INIT( 51),
126 PACAINITDATA(37, 0, NULL, 0, 0), 116 PACA_INIT( 52), PACA_INIT( 53), PACA_INIT( 54), PACA_INIT( 55),
127 PACAINITDATA(38, 0, NULL, 0, 0), 117 PACA_INIT( 56), PACA_INIT( 57), PACA_INIT( 58), PACA_INIT( 59),
128 PACAINITDATA(39, 0, NULL, 0, 0), 118 PACA_INIT( 60), PACA_INIT( 61), PACA_INIT( 62), PACA_INIT( 63),
129 PACAINITDATA(40, 0, NULL, 0, 0),
130 PACAINITDATA(41, 0, NULL, 0, 0),
131 PACAINITDATA(42, 0, NULL, 0, 0),
132 PACAINITDATA(43, 0, NULL, 0, 0),
133 PACAINITDATA(44, 0, NULL, 0, 0),
134 PACAINITDATA(45, 0, NULL, 0, 0),
135 PACAINITDATA(46, 0, NULL, 0, 0),
136 PACAINITDATA(47, 0, NULL, 0, 0),
137 PACAINITDATA(48, 0, NULL, 0, 0),
138 PACAINITDATA(49, 0, NULL, 0, 0),
139 PACAINITDATA(50, 0, NULL, 0, 0),
140 PACAINITDATA(51, 0, NULL, 0, 0),
141 PACAINITDATA(52, 0, NULL, 0, 0),
142 PACAINITDATA(53, 0, NULL, 0, 0),
143 PACAINITDATA(54, 0, NULL, 0, 0),
144 PACAINITDATA(55, 0, NULL, 0, 0),
145 PACAINITDATA(56, 0, NULL, 0, 0),
146 PACAINITDATA(57, 0, NULL, 0, 0),
147 PACAINITDATA(58, 0, NULL, 0, 0),
148 PACAINITDATA(59, 0, NULL, 0, 0),
149 PACAINITDATA(60, 0, NULL, 0, 0),
150 PACAINITDATA(61, 0, NULL, 0, 0),
151 PACAINITDATA(62, 0, NULL, 0, 0),
152 PACAINITDATA(63, 0, NULL, 0, 0),
153#if NR_CPUS > 64 119#if NR_CPUS > 64
154 PACAINITDATA(64, 0, NULL, 0, 0), 120 PACA_INIT( 64), PACA_INIT( 65), PACA_INIT( 66), PACA_INIT( 67),
155 PACAINITDATA(65, 0, NULL, 0, 0), 121 PACA_INIT( 68), PACA_INIT( 69), PACA_INIT( 70), PACA_INIT( 71),
156 PACAINITDATA(66, 0, NULL, 0, 0), 122 PACA_INIT( 72), PACA_INIT( 73), PACA_INIT( 74), PACA_INIT( 75),
157 PACAINITDATA(67, 0, NULL, 0, 0), 123 PACA_INIT( 76), PACA_INIT( 77), PACA_INIT( 78), PACA_INIT( 79),
158 PACAINITDATA(68, 0, NULL, 0, 0), 124 PACA_INIT( 80), PACA_INIT( 81), PACA_INIT( 82), PACA_INIT( 83),
159 PACAINITDATA(69, 0, NULL, 0, 0), 125 PACA_INIT( 84), PACA_INIT( 85), PACA_INIT( 86), PACA_INIT( 87),
160 PACAINITDATA(70, 0, NULL, 0, 0), 126 PACA_INIT( 88), PACA_INIT( 89), PACA_INIT( 90), PACA_INIT( 91),
161 PACAINITDATA(71, 0, NULL, 0, 0), 127 PACA_INIT( 92), PACA_INIT( 93), PACA_INIT( 94), PACA_INIT( 95),
162 PACAINITDATA(72, 0, NULL, 0, 0), 128 PACA_INIT( 96), PACA_INIT( 97), PACA_INIT( 98), PACA_INIT( 99),
163 PACAINITDATA(73, 0, NULL, 0, 0), 129 PACA_INIT(100), PACA_INIT(101), PACA_INIT(102), PACA_INIT(103),
164 PACAINITDATA(74, 0, NULL, 0, 0), 130 PACA_INIT(104), PACA_INIT(105), PACA_INIT(106), PACA_INIT(107),
165 PACAINITDATA(75, 0, NULL, 0, 0), 131 PACA_INIT(108), PACA_INIT(109), PACA_INIT(110), PACA_INIT(111),
166 PACAINITDATA(76, 0, NULL, 0, 0), 132 PACA_INIT(112), PACA_INIT(113), PACA_INIT(114), PACA_INIT(115),
167 PACAINITDATA(77, 0, NULL, 0, 0), 133 PACA_INIT(116), PACA_INIT(117), PACA_INIT(118), PACA_INIT(119),
168 PACAINITDATA(78, 0, NULL, 0, 0), 134 PACA_INIT(120), PACA_INIT(121), PACA_INIT(122), PACA_INIT(123),
169 PACAINITDATA(79, 0, NULL, 0, 0), 135 PACA_INIT(124), PACA_INIT(125), PACA_INIT(126), PACA_INIT(127),
170 PACAINITDATA(80, 0, NULL, 0, 0),
171 PACAINITDATA(81, 0, NULL, 0, 0),
172 PACAINITDATA(82, 0, NULL, 0, 0),
173 PACAINITDATA(83, 0, NULL, 0, 0),
174 PACAINITDATA(84, 0, NULL, 0, 0),
175 PACAINITDATA(85, 0, NULL, 0, 0),
176 PACAINITDATA(86, 0, NULL, 0, 0),
177 PACAINITDATA(87, 0, NULL, 0, 0),
178 PACAINITDATA(88, 0, NULL, 0, 0),
179 PACAINITDATA(89, 0, NULL, 0, 0),
180 PACAINITDATA(90, 0, NULL, 0, 0),
181 PACAINITDATA(91, 0, NULL, 0, 0),
182 PACAINITDATA(92, 0, NULL, 0, 0),
183 PACAINITDATA(93, 0, NULL, 0, 0),
184 PACAINITDATA(94, 0, NULL, 0, 0),
185 PACAINITDATA(95, 0, NULL, 0, 0),
186 PACAINITDATA(96, 0, NULL, 0, 0),
187 PACAINITDATA(97, 0, NULL, 0, 0),
188 PACAINITDATA(98, 0, NULL, 0, 0),
189 PACAINITDATA(99, 0, NULL, 0, 0),
190 PACAINITDATA(100, 0, NULL, 0, 0),
191 PACAINITDATA(101, 0, NULL, 0, 0),
192 PACAINITDATA(102, 0, NULL, 0, 0),
193 PACAINITDATA(103, 0, NULL, 0, 0),
194 PACAINITDATA(104, 0, NULL, 0, 0),
195 PACAINITDATA(105, 0, NULL, 0, 0),
196 PACAINITDATA(106, 0, NULL, 0, 0),
197 PACAINITDATA(107, 0, NULL, 0, 0),
198 PACAINITDATA(108, 0, NULL, 0, 0),
199 PACAINITDATA(109, 0, NULL, 0, 0),
200 PACAINITDATA(110, 0, NULL, 0, 0),
201 PACAINITDATA(111, 0, NULL, 0, 0),
202 PACAINITDATA(112, 0, NULL, 0, 0),
203 PACAINITDATA(113, 0, NULL, 0, 0),
204 PACAINITDATA(114, 0, NULL, 0, 0),
205 PACAINITDATA(115, 0, NULL, 0, 0),
206 PACAINITDATA(116, 0, NULL, 0, 0),
207 PACAINITDATA(117, 0, NULL, 0, 0),
208 PACAINITDATA(118, 0, NULL, 0, 0),
209 PACAINITDATA(119, 0, NULL, 0, 0),
210 PACAINITDATA(120, 0, NULL, 0, 0),
211 PACAINITDATA(121, 0, NULL, 0, 0),
212 PACAINITDATA(122, 0, NULL, 0, 0),
213 PACAINITDATA(123, 0, NULL, 0, 0),
214 PACAINITDATA(124, 0, NULL, 0, 0),
215 PACAINITDATA(125, 0, NULL, 0, 0),
216 PACAINITDATA(126, 0, NULL, 0, 0),
217 PACAINITDATA(127, 0, NULL, 0, 0),
218#endif 136#endif
219#endif 137#endif
220#endif 138#endif
diff --git a/arch/ppc64/kernel/pmac_setup.c b/arch/ppc64/kernel/pmac_setup.c
index 6cf03d387b91..3013cdb5f933 100644
--- a/arch/ppc64/kernel/pmac_setup.c
+++ b/arch/ppc64/kernel/pmac_setup.c
@@ -186,6 +186,8 @@ void __init pmac_setup_arch(void)
186#ifdef CONFIG_DUMMY_CONSOLE 186#ifdef CONFIG_DUMMY_CONSOLE
187 conswitchp = &dummy_con; 187 conswitchp = &dummy_con;
188#endif 188#endif
189
190 printk(KERN_INFO "Using native/NAP idle loop\n");
189} 191}
190 192
191#ifdef CONFIG_SCSI 193#ifdef CONFIG_SCSI
@@ -507,5 +509,6 @@ struct machdep_calls __initdata pmac_md = {
507 .calibrate_decr = pmac_calibrate_decr, 509 .calibrate_decr = pmac_calibrate_decr,
508 .feature_call = pmac_do_feature_call, 510 .feature_call = pmac_do_feature_call,
509 .progress = pmac_progress, 511 .progress = pmac_progress,
510 .check_legacy_ioport = pmac_check_legacy_ioport 512 .check_legacy_ioport = pmac_check_legacy_ioport,
513 .idle_loop = native_idle,
511}; 514};
diff --git a/arch/ppc64/kernel/setup.c b/arch/ppc64/kernel/setup.c
index d5e4866e9ac2..d1b33f0b26cb 100644
--- a/arch/ppc64/kernel/setup.c
+++ b/arch/ppc64/kernel/setup.c
@@ -96,7 +96,6 @@ extern void udbg_init_maple_realmode(void);
96extern unsigned long klimit; 96extern unsigned long klimit;
97 97
98extern void mm_init_ppc64(void); 98extern void mm_init_ppc64(void);
99extern int idle_setup(void);
100extern void stab_initialize(unsigned long stab); 99extern void stab_initialize(unsigned long stab);
101extern void htab_initialize(void); 100extern void htab_initialize(void);
102extern void early_init_devtree(void *flat_dt); 101extern void early_init_devtree(void *flat_dt);
@@ -1081,8 +1080,11 @@ void __init setup_arch(char **cmdline_p)
1081 1080
1082 ppc_md.setup_arch(); 1081 ppc_md.setup_arch();
1083 1082
1084 /* Select the correct idle loop for the platform. */ 1083 /* Use the default idle loop if the platform hasn't provided one. */
1085 idle_setup(); 1084 if (NULL == ppc_md.idle_loop) {
1085 ppc_md.idle_loop = default_idle;
1086 printk(KERN_INFO "Using default idle loop\n");
1087 }
1086 1088
1087 paging_init(); 1089 paging_init();
1088 ppc64_boot_msg(0x15, "Setup Done"); 1090 ppc64_boot_msg(0x15, "Setup Done");
diff --git a/arch/ppc64/kernel/sys_ppc32.c b/arch/ppc64/kernel/sys_ppc32.c
index 118436e8085a..206619080e66 100644
--- a/arch/ppc64/kernel/sys_ppc32.c
+++ b/arch/ppc64/kernel/sys_ppc32.c
@@ -30,47 +30,26 @@
30#include <linux/sem.h> 30#include <linux/sem.h>
31#include <linux/msg.h> 31#include <linux/msg.h>
32#include <linux/shm.h> 32#include <linux/shm.h>
33#include <linux/slab.h>
34#include <linux/uio.h>
35#include <linux/aio.h>
36#include <linux/nfs_fs.h>
37#include <linux/module.h>
38#include <linux/sunrpc/svc.h>
39#include <linux/nfsd/nfsd.h>
40#include <linux/nfsd/cache.h>
41#include <linux/nfsd/xdr.h>
42#include <linux/nfsd/syscall.h>
43#include <linux/poll.h> 33#include <linux/poll.h>
44#include <linux/personality.h> 34#include <linux/personality.h>
45#include <linux/stat.h> 35#include <linux/stat.h>
46#include <linux/filter.h>
47#include <linux/highmem.h>
48#include <linux/highuid.h>
49#include <linux/mman.h> 36#include <linux/mman.h>
50#include <linux/ipv6.h>
51#include <linux/in.h> 37#include <linux/in.h>
52#include <linux/icmpv6.h>
53#include <linux/syscalls.h> 38#include <linux/syscalls.h>
54#include <linux/unistd.h> 39#include <linux/unistd.h>
55#include <linux/sysctl.h> 40#include <linux/sysctl.h>
56#include <linux/binfmts.h> 41#include <linux/binfmts.h>
57#include <linux/dnotify.h>
58#include <linux/security.h> 42#include <linux/security.h>
59#include <linux/compat.h> 43#include <linux/compat.h>
60#include <linux/ptrace.h> 44#include <linux/ptrace.h>
61#include <linux/aio_abi.h>
62#include <linux/elf.h> 45#include <linux/elf.h>
63 46
64#include <net/scm.h>
65#include <net/sock.h>
66
67#include <asm/ptrace.h> 47#include <asm/ptrace.h>
68#include <asm/types.h> 48#include <asm/types.h>
69#include <asm/ipc.h> 49#include <asm/ipc.h>
70#include <asm/uaccess.h> 50#include <asm/uaccess.h>
71#include <asm/unistd.h> 51#include <asm/unistd.h>
72#include <asm/semaphore.h> 52#include <asm/semaphore.h>
73#include <asm/ppcdebug.h>
74#include <asm/time.h> 53#include <asm/time.h>
75#include <asm/mmu_context.h> 54#include <asm/mmu_context.h>
76#include <asm/systemcfg.h> 55#include <asm/systemcfg.h>
@@ -350,8 +329,6 @@ asmlinkage long sys32_adjtimex(struct timex32 __user *utp)
350 return ret; 329 return ret;
351} 330}
352 331
353
354/* These are here just in case some old sparc32 binary calls it. */
355asmlinkage long sys32_pause(void) 332asmlinkage long sys32_pause(void)
356{ 333{
357 current->state = TASK_INTERRUPTIBLE; 334 current->state = TASK_INTERRUPTIBLE;
@@ -360,8 +337,6 @@ asmlinkage long sys32_pause(void)
360 return -ERESTARTNOHAND; 337 return -ERESTARTNOHAND;
361} 338}
362 339
363
364
365static inline long get_ts32(struct timespec *o, struct compat_timeval __user *i) 340static inline long get_ts32(struct timespec *o, struct compat_timeval __user *i)
366{ 341{
367 long usec; 342 long usec;
@@ -847,16 +822,6 @@ asmlinkage long sys32_getpgid(u32 pid)
847} 822}
848 823
849 824
850/* Note: it is necessary to treat which and who as unsigned ints,
851 * with the corresponding cast to a signed int to insure that the
852 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
853 * and the register representation of a signed int (msr in 64-bit mode) is performed.
854 */
855asmlinkage long sys32_getpriority(u32 which, u32 who)
856{
857 return sys_getpriority((int)which, (int)who);
858}
859
860 825
861/* Note: it is necessary to treat pid as an unsigned int, 826/* Note: it is necessary to treat pid as an unsigned int,
862 * with the corresponding cast to a signed int to insure that the 827 * with the corresponding cast to a signed int to insure that the
@@ -1048,6 +1013,11 @@ asmlinkage long sys32_setpgid(u32 pid, u32 pgid)
1048 return sys_setpgid((int)pid, (int)pgid); 1013 return sys_setpgid((int)pid, (int)pgid);
1049} 1014}
1050 1015
1016long sys32_getpriority(u32 which, u32 who)
1017{
1018 /* sign extend which and who */
1019 return sys_getpriority((int)which, (int)who);
1020}
1051 1021
1052long sys32_setpriority(u32 which, u32 who, u32 niceval) 1022long sys32_setpriority(u32 which, u32 who, u32 niceval)
1053{ 1023{
@@ -1055,6 +1025,18 @@ long sys32_setpriority(u32 which, u32 who, u32 niceval)
1055 return sys_setpriority((int)which, (int)who, (int)niceval); 1025 return sys_setpriority((int)which, (int)who, (int)niceval);
1056} 1026}
1057 1027
1028long sys32_ioprio_get(u32 which, u32 who)
1029{
1030 /* sign extend which and who */
1031 return sys_ioprio_get((int)which, (int)who);
1032}
1033
1034long sys32_ioprio_set(u32 which, u32 who, u32 ioprio)
1035{
1036 /* sign extend which, who and ioprio */
1037 return sys_ioprio_set((int)which, (int)who, (int)ioprio);
1038}
1039
1058/* Note: it is necessary to treat newmask as an unsigned int, 1040/* Note: it is necessary to treat newmask as an unsigned int,
1059 * with the corresponding cast to a signed int to insure that the 1041 * with the corresponding cast to a signed int to insure that the
1060 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 1042 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
@@ -1273,8 +1255,6 @@ long ppc32_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
1273 (u64)len_high << 32 | len_low, advice); 1255 (u64)len_high << 32 | len_low, advice);
1274} 1256}
1275 1257
1276extern asmlinkage long sys_timer_create(clockid_t, sigevent_t __user *, timer_t __user *);
1277
1278long ppc32_timer_create(clockid_t clock, 1258long ppc32_timer_create(clockid_t clock,
1279 struct compat_sigevent __user *ev32, 1259 struct compat_sigevent __user *ev32,
1280 timer_t __user *timer_id) 1260 timer_t __user *timer_id)
diff --git a/arch/ppc64/kernel/sysfs.c b/arch/ppc64/kernel/sysfs.c
index c8fa6569b2fd..02b8ac4e0168 100644
--- a/arch/ppc64/kernel/sysfs.c
+++ b/arch/ppc64/kernel/sysfs.c
@@ -112,7 +112,6 @@ void ppc64_enable_pmcs(void)
112 unsigned long hid0; 112 unsigned long hid0;
113#ifdef CONFIG_PPC_PSERIES 113#ifdef CONFIG_PPC_PSERIES
114 unsigned long set, reset; 114 unsigned long set, reset;
115 int ret;
116#endif /* CONFIG_PPC_PSERIES */ 115#endif /* CONFIG_PPC_PSERIES */
117 116
118 /* Only need to enable them once */ 117 /* Only need to enable them once */
@@ -145,11 +144,7 @@ void ppc64_enable_pmcs(void)
145 case PLATFORM_PSERIES_LPAR: 144 case PLATFORM_PSERIES_LPAR:
146 set = 1UL << 63; 145 set = 1UL << 63;
147 reset = 0; 146 reset = 0;
148 ret = plpar_hcall_norets(H_PERFMON, set, reset); 147 plpar_hcall_norets(H_PERFMON, set, reset);
149 if (ret)
150 printk(KERN_ERR "H_PERFMON call on cpu %u "
151 "returned %d\n",
152 smp_processor_id(), ret);
153 break; 148 break;
154#endif /* CONFIG_PPC_PSERIES */ 149#endif /* CONFIG_PPC_PSERIES */
155 150
@@ -161,13 +156,6 @@ void ppc64_enable_pmcs(void)
161 /* instruct hypervisor to maintain PMCs */ 156 /* instruct hypervisor to maintain PMCs */
162 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) 157 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR)
163 get_paca()->lppaca.pmcregs_in_use = 1; 158 get_paca()->lppaca.pmcregs_in_use = 1;
164
165 /*
166 * On SMT machines we have to set the run latch in the ctrl register
167 * in order to make PMC6 spin.
168 */
169 if (cpu_has_feature(CPU_FTR_SMT))
170 ppc64_runlatch_on();
171#endif /* CONFIG_PPC_PSERIES */ 159#endif /* CONFIG_PPC_PSERIES */
172} 160}
173 161
@@ -400,7 +388,12 @@ static int __init topology_init(void)
400 struct cpu *c = &per_cpu(cpu_devices, cpu); 388 struct cpu *c = &per_cpu(cpu_devices, cpu);
401 389
402#ifdef CONFIG_NUMA 390#ifdef CONFIG_NUMA
403 parent = &node_devices[cpu_to_node(cpu)]; 391 /* The node to which a cpu belongs can't be known
392 * until the cpu is made present.
393 */
394 parent = NULL;
395 if (cpu_present(cpu))
396 parent = &node_devices[cpu_to_node(cpu)];
404#endif 397#endif
405 /* 398 /*
406 * For now, we just see if the system supports making 399 * For now, we just see if the system supports making
diff --git a/arch/ppc64/kernel/time.c b/arch/ppc64/kernel/time.c
index 2a532db9138a..909462e1adea 100644
--- a/arch/ppc64/kernel/time.c
+++ b/arch/ppc64/kernel/time.c
@@ -99,7 +99,6 @@ unsigned long tb_to_ns_shift;
99struct gettimeofday_struct do_gtod; 99struct gettimeofday_struct do_gtod;
100 100
101extern unsigned long wall_jiffies; 101extern unsigned long wall_jiffies;
102extern unsigned long lpevent_count;
103extern int smp_tb_synchronized; 102extern int smp_tb_synchronized;
104 103
105extern struct timezone sys_tz; 104extern struct timezone sys_tz;
@@ -367,11 +366,8 @@ int timer_interrupt(struct pt_regs * regs)
367 set_dec(next_dec); 366 set_dec(next_dec);
368 367
369#ifdef CONFIG_PPC_ISERIES 368#ifdef CONFIG_PPC_ISERIES
370 { 369 if (hvlpevent_is_pending())
371 struct ItLpQueue *lpq = lpaca->lpqueue_ptr; 370 process_hvlpevents(regs);
372 if (lpq && ItLpQueue_isLpIntPending(lpq))
373 lpevent_count += ItLpQueue_process(lpq, regs);
374 }
375#endif 371#endif
376 372
377/* collect purr register values often, for accurate calculations */ 373/* collect purr register values often, for accurate calculations */
diff --git a/arch/ppc64/kernel/vdso32/vdso32.lds.S b/arch/ppc64/kernel/vdso32/vdso32.lds.S
index 11290c902ba3..6f87a916a394 100644
--- a/arch/ppc64/kernel/vdso32/vdso32.lds.S
+++ b/arch/ppc64/kernel/vdso32/vdso32.lds.S
@@ -40,9 +40,9 @@ SECTIONS
40 .gcc_except_table : { *(.gcc_except_table) } 40 .gcc_except_table : { *(.gcc_except_table) }
41 .fixup : { *(.fixup) } 41 .fixup : { *(.fixup) }
42 42
43 .got ALIGN(4) : { *(.got.plt) *(.got) }
44
45 .dynamic : { *(.dynamic) } :text :dynamic 43 .dynamic : { *(.dynamic) } :text :dynamic
44 .got : { *(.got) }
45 .plt : { *(.plt) }
46 46
47 _end = .; 47 _end = .;
48 __end = .; 48 __end = .;