aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/pseries
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/pseries')
-rw-r--r--arch/powerpc/platforms/pseries/Makefile13
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c7
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c224
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c25
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c362
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h9
-rw-r--r--arch/powerpc/platforms/pseries/setup.c52
-rw-r--r--arch/powerpc/platforms/pseries/xics.c2
8 files changed, 623 insertions, 71 deletions
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 046ace9c438..59eb8bdaa79 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -1,14 +1,9 @@
1ifeq ($(CONFIG_PPC64),y) 1ccflags-$(CONFIG_PPC64) := -mno-minimal-toc
2EXTRA_CFLAGS += -mno-minimal-toc 2ccflags-$(CONFIG_PPC_PSERIES_DEBUG) += -DDEBUG
3endif
4
5ifeq ($(CONFIG_PPC_PSERIES_DEBUG),y)
6EXTRA_CFLAGS += -DDEBUG
7endif
8 3
9obj-y := lpar.o hvCall.o nvram.o reconfig.o \ 4obj-y := lpar.o hvCall.o nvram.o reconfig.o \
10 setup.o iommu.o event_sources.o ras.o \ 5 setup.o iommu.o event_sources.o ras.o \
11 firmware.o power.o dlpar.o 6 firmware.o power.o dlpar.o mobility.o
12obj-$(CONFIG_SMP) += smp.o 7obj-$(CONFIG_SMP) += smp.o
13obj-$(CONFIG_XICS) += xics.o 8obj-$(CONFIG_XICS) += xics.o
14obj-$(CONFIG_SCANLOG) += scanlog.o 9obj-$(CONFIG_SCANLOG) += scanlog.o
@@ -23,7 +18,7 @@ obj-$(CONFIG_MEMORY_HOTPLUG) += hotplug-memory.o
23obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o 18obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
24obj-$(CONFIG_HVCS) += hvcserver.o 19obj-$(CONFIG_HVCS) += hvcserver.o
25obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o 20obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o
26obj-$(CONFIG_PHYP_DUMP) += phyp_dump.o 21obj-$(CONFIG_PHYP_DUMP) += phyp_dump.o
27obj-$(CONFIG_CMM) += cmm.o 22obj-$(CONFIG_CMM) += cmm.o
28obj-$(CONFIG_DTL) += dtl.o 23obj-$(CONFIG_DTL) += dtl.o
29 24
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 72d8054fa73..b74a9230edc 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -33,7 +33,7 @@ struct cc_workarea {
33 u32 prop_offset; 33 u32 prop_offset;
34}; 34};
35 35
36static void dlpar_free_cc_property(struct property *prop) 36void dlpar_free_cc_property(struct property *prop)
37{ 37{
38 kfree(prop->name); 38 kfree(prop->name);
39 kfree(prop->value); 39 kfree(prop->value);
@@ -55,13 +55,12 @@ static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
55 55
56 prop->length = ccwa->prop_length; 56 prop->length = ccwa->prop_length;
57 value = (char *)ccwa + ccwa->prop_offset; 57 value = (char *)ccwa + ccwa->prop_offset;
58 prop->value = kzalloc(prop->length, GFP_KERNEL); 58 prop->value = kmemdup(value, prop->length, GFP_KERNEL);
59 if (!prop->value) { 59 if (!prop->value) {
60 dlpar_free_cc_property(prop); 60 dlpar_free_cc_property(prop);
61 return NULL; 61 return NULL;
62 } 62 }
63 63
64 memcpy(prop->value, value, prop->length);
65 return prop; 64 return prop;
66} 65}
67 66
@@ -102,7 +101,7 @@ static void dlpar_free_one_cc_node(struct device_node *dn)
102 kfree(dn); 101 kfree(dn);
103} 102}
104 103
105static void dlpar_free_cc_nodes(struct device_node *dn) 104void dlpar_free_cc_nodes(struct device_node *dn)
106{ 105{
107 if (dn->child) 106 if (dn->child)
108 dlpar_free_cc_nodes(dn->child); 107 dlpar_free_cc_nodes(dn->child);
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index a00addb5594..c371bc06434 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -23,37 +23,22 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/debugfs.h> 25#include <linux/debugfs.h>
26#include <linux/spinlock.h>
26#include <asm/smp.h> 27#include <asm/smp.h>
27#include <asm/system.h> 28#include <asm/system.h>
28#include <asm/uaccess.h> 29#include <asm/uaccess.h>
29#include <asm/firmware.h> 30#include <asm/firmware.h>
31#include <asm/lppaca.h>
30 32
31#include "plpar_wrappers.h" 33#include "plpar_wrappers.h"
32 34
33/*
34 * Layout of entries in the hypervisor's DTL buffer. Although we don't
35 * actually access the internals of an entry (we only need to know the size),
36 * we might as well define it here for reference.
37 */
38struct dtl_entry {
39 u8 dispatch_reason;
40 u8 preempt_reason;
41 u16 processor_id;
42 u32 enqueue_to_dispatch_time;
43 u32 ready_to_enqueue_time;
44 u32 waiting_to_ready_time;
45 u64 timebase;
46 u64 fault_addr;
47 u64 srr0;
48 u64 srr1;
49};
50
51struct dtl { 35struct dtl {
52 struct dtl_entry *buf; 36 struct dtl_entry *buf;
53 struct dentry *file; 37 struct dentry *file;
54 int cpu; 38 int cpu;
55 int buf_entries; 39 int buf_entries;
56 u64 last_idx; 40 u64 last_idx;
41 spinlock_t lock;
57}; 42};
58static DEFINE_PER_CPU(struct dtl, cpu_dtl); 43static DEFINE_PER_CPU(struct dtl, cpu_dtl);
59 44
@@ -72,25 +57,97 @@ static u8 dtl_event_mask = 0x7;
72static int dtl_buf_entries = (16 * 85); 57static int dtl_buf_entries = (16 * 85);
73 58
74 59
75static int dtl_enable(struct dtl *dtl) 60#ifdef CONFIG_VIRT_CPU_ACCOUNTING
61struct dtl_ring {
62 u64 write_index;
63 struct dtl_entry *write_ptr;
64 struct dtl_entry *buf;
65 struct dtl_entry *buf_end;
66 u8 saved_dtl_mask;
67};
68
69static DEFINE_PER_CPU(struct dtl_ring, dtl_rings);
70
71static atomic_t dtl_count;
72
73/*
74 * The cpu accounting code controls the DTL ring buffer, and we get
75 * given entries as they are processed.
76 */
77static void consume_dtle(struct dtl_entry *dtle, u64 index)
76{ 78{
77 unsigned long addr; 79 struct dtl_ring *dtlr = &__get_cpu_var(dtl_rings);
78 int ret, hwcpu; 80 struct dtl_entry *wp = dtlr->write_ptr;
81 struct lppaca *vpa = local_paca->lppaca_ptr;
79 82
80 /* only allow one reader */ 83 if (!wp)
81 if (dtl->buf) 84 return;
82 return -EBUSY;
83 85
84 /* we need to store the original allocation size for use during read */ 86 *wp = *dtle;
85 dtl->buf_entries = dtl_buf_entries; 87 barrier();
86 88
87 dtl->buf = kmalloc_node(dtl->buf_entries * sizeof(struct dtl_entry), 89 /* check for hypervisor ring buffer overflow, ignore this entry if so */
88 GFP_KERNEL, cpu_to_node(dtl->cpu)); 90 if (index + N_DISPATCH_LOG < vpa->dtl_idx)
89 if (!dtl->buf) { 91 return;
90 printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n", 92
91 __func__, dtl->cpu); 93 ++wp;
92 return -ENOMEM; 94 if (wp == dtlr->buf_end)
93 } 95 wp = dtlr->buf;
96 dtlr->write_ptr = wp;
97
98 /* incrementing write_index makes the new entry visible */
99 smp_wmb();
100 ++dtlr->write_index;
101}
102
103static int dtl_start(struct dtl *dtl)
104{
105 struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
106
107 dtlr->buf = dtl->buf;
108 dtlr->buf_end = dtl->buf + dtl->buf_entries;
109 dtlr->write_index = 0;
110
111 /* setting write_ptr enables logging into our buffer */
112 smp_wmb();
113 dtlr->write_ptr = dtl->buf;
114
115 /* enable event logging */
116 dtlr->saved_dtl_mask = lppaca_of(dtl->cpu).dtl_enable_mask;
117 lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask;
118
119 dtl_consumer = consume_dtle;
120 atomic_inc(&dtl_count);
121 return 0;
122}
123
124static void dtl_stop(struct dtl *dtl)
125{
126 struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
127
128 dtlr->write_ptr = NULL;
129 smp_wmb();
130
131 dtlr->buf = NULL;
132
133 /* restore dtl_enable_mask */
134 lppaca_of(dtl->cpu).dtl_enable_mask = dtlr->saved_dtl_mask;
135
136 if (atomic_dec_and_test(&dtl_count))
137 dtl_consumer = NULL;
138}
139
140static u64 dtl_current_index(struct dtl *dtl)
141{
142 return per_cpu(dtl_rings, dtl->cpu).write_index;
143}
144
145#else /* CONFIG_VIRT_CPU_ACCOUNTING */
146
147static int dtl_start(struct dtl *dtl)
148{
149 unsigned long addr;
150 int ret, hwcpu;
94 151
95 /* Register our dtl buffer with the hypervisor. The HV expects the 152 /* Register our dtl buffer with the hypervisor. The HV expects the
96 * buffer size to be passed in the second word of the buffer */ 153 * buffer size to be passed in the second word of the buffer */
@@ -102,34 +159,82 @@ static int dtl_enable(struct dtl *dtl)
102 if (ret) { 159 if (ret) {
103 printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) " 160 printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) "
104 "failed with %d\n", __func__, dtl->cpu, hwcpu, ret); 161 "failed with %d\n", __func__, dtl->cpu, hwcpu, ret);
105 kfree(dtl->buf);
106 return -EIO; 162 return -EIO;
107 } 163 }
108 164
109 /* set our initial buffer indices */ 165 /* set our initial buffer indices */
110 dtl->last_idx = lppaca[dtl->cpu].dtl_idx = 0; 166 lppaca_of(dtl->cpu).dtl_idx = 0;
111 167
112 /* ensure that our updates to the lppaca fields have occurred before 168 /* ensure that our updates to the lppaca fields have occurred before
113 * we actually enable the logging */ 169 * we actually enable the logging */
114 smp_wmb(); 170 smp_wmb();
115 171
116 /* enable event logging */ 172 /* enable event logging */
117 lppaca[dtl->cpu].dtl_enable_mask = dtl_event_mask; 173 lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask;
118 174
119 return 0; 175 return 0;
120} 176}
121 177
122static void dtl_disable(struct dtl *dtl) 178static void dtl_stop(struct dtl *dtl)
123{ 179{
124 int hwcpu = get_hard_smp_processor_id(dtl->cpu); 180 int hwcpu = get_hard_smp_processor_id(dtl->cpu);
125 181
126 lppaca[dtl->cpu].dtl_enable_mask = 0x0; 182 lppaca_of(dtl->cpu).dtl_enable_mask = 0x0;
127 183
128 unregister_dtl(hwcpu, __pa(dtl->buf)); 184 unregister_dtl(hwcpu, __pa(dtl->buf));
185}
186
187static u64 dtl_current_index(struct dtl *dtl)
188{
189 return lppaca_of(dtl->cpu).dtl_idx;
190}
191#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
192
193static int dtl_enable(struct dtl *dtl)
194{
195 long int n_entries;
196 long int rc;
197 struct dtl_entry *buf = NULL;
129 198
199 /* only allow one reader */
200 if (dtl->buf)
201 return -EBUSY;
202
203 n_entries = dtl_buf_entries;
204 buf = kmalloc_node(n_entries * sizeof(struct dtl_entry),
205 GFP_KERNEL, cpu_to_node(dtl->cpu));
206 if (!buf) {
207 printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
208 __func__, dtl->cpu);
209 return -ENOMEM;
210 }
211
212 spin_lock(&dtl->lock);
213 rc = -EBUSY;
214 if (!dtl->buf) {
215 /* store the original allocation size for use during read */
216 dtl->buf_entries = n_entries;
217 dtl->buf = buf;
218 dtl->last_idx = 0;
219 rc = dtl_start(dtl);
220 if (rc)
221 dtl->buf = NULL;
222 }
223 spin_unlock(&dtl->lock);
224
225 if (rc)
226 kfree(buf);
227 return rc;
228}
229
230static void dtl_disable(struct dtl *dtl)
231{
232 spin_lock(&dtl->lock);
233 dtl_stop(dtl);
130 kfree(dtl->buf); 234 kfree(dtl->buf);
131 dtl->buf = NULL; 235 dtl->buf = NULL;
132 dtl->buf_entries = 0; 236 dtl->buf_entries = 0;
237 spin_unlock(&dtl->lock);
133} 238}
134 239
135/* file interface */ 240/* file interface */
@@ -157,8 +262,9 @@ static int dtl_file_release(struct inode *inode, struct file *filp)
157static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len, 262static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,
158 loff_t *pos) 263 loff_t *pos)
159{ 264{
160 int rc, cur_idx, last_idx, n_read, n_req, read_size; 265 long int rc, n_read, n_req, read_size;
161 struct dtl *dtl; 266 struct dtl *dtl;
267 u64 cur_idx, last_idx, i;
162 268
163 if ((len % sizeof(struct dtl_entry)) != 0) 269 if ((len % sizeof(struct dtl_entry)) != 0)
164 return -EINVAL; 270 return -EINVAL;
@@ -171,41 +277,48 @@ static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,
171 /* actual number of entries read */ 277 /* actual number of entries read */
172 n_read = 0; 278 n_read = 0;
173 279
174 cur_idx = lppaca[dtl->cpu].dtl_idx; 280 spin_lock(&dtl->lock);
281
282 cur_idx = dtl_current_index(dtl);
175 last_idx = dtl->last_idx; 283 last_idx = dtl->last_idx;
176 284
177 if (cur_idx - last_idx > dtl->buf_entries) { 285 if (last_idx + dtl->buf_entries <= cur_idx)
178 pr_debug("%s: hv buffer overflow for cpu %d, samples lost\n", 286 last_idx = cur_idx - dtl->buf_entries + 1;
179 __func__, dtl->cpu); 287
180 } 288 if (last_idx + n_req > cur_idx)
289 n_req = cur_idx - last_idx;
290
291 if (n_req > 0)
292 dtl->last_idx = last_idx + n_req;
293
294 spin_unlock(&dtl->lock);
295
296 if (n_req <= 0)
297 return 0;
181 298
182 cur_idx %= dtl->buf_entries; 299 i = last_idx % dtl->buf_entries;
183 last_idx %= dtl->buf_entries;
184 300
185 /* read the tail of the buffer if we've wrapped */ 301 /* read the tail of the buffer if we've wrapped */
186 if (last_idx > cur_idx) { 302 if (i + n_req > dtl->buf_entries) {
187 read_size = min(n_req, dtl->buf_entries - last_idx); 303 read_size = dtl->buf_entries - i;
188 304
189 rc = copy_to_user(buf, &dtl->buf[last_idx], 305 rc = copy_to_user(buf, &dtl->buf[i],
190 read_size * sizeof(struct dtl_entry)); 306 read_size * sizeof(struct dtl_entry));
191 if (rc) 307 if (rc)
192 return -EFAULT; 308 return -EFAULT;
193 309
194 last_idx = 0; 310 i = 0;
195 n_req -= read_size; 311 n_req -= read_size;
196 n_read += read_size; 312 n_read += read_size;
197 buf += read_size * sizeof(struct dtl_entry); 313 buf += read_size * sizeof(struct dtl_entry);
198 } 314 }
199 315
200 /* .. and now the head */ 316 /* .. and now the head */
201 read_size = min(n_req, cur_idx - last_idx); 317 rc = copy_to_user(buf, &dtl->buf[i], n_req * sizeof(struct dtl_entry));
202 rc = copy_to_user(buf, &dtl->buf[last_idx],
203 read_size * sizeof(struct dtl_entry));
204 if (rc) 318 if (rc)
205 return -EFAULT; 319 return -EFAULT;
206 320
207 n_read += read_size; 321 n_read += n_req;
208 dtl->last_idx += n_read;
209 322
210 return n_read * sizeof(struct dtl_entry); 323 return n_read * sizeof(struct dtl_entry);
211} 324}
@@ -263,6 +376,7 @@ static int dtl_init(void)
263 /* set up the per-cpu log structures */ 376 /* set up the per-cpu log structures */
264 for_each_possible_cpu(i) { 377 for_each_possible_cpu(i) {
265 struct dtl *dtl = &per_cpu(cpu_dtl, i); 378 struct dtl *dtl = &per_cpu(cpu_dtl, i);
379 spin_lock_init(&dtl->lock);
266 dtl->cpu = i; 380 dtl->cpu = i;
267 381
268 rc = dtl_setup_file(dtl); 382 rc = dtl_setup_file(dtl);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index cf79b46d8f8..f129040d974 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -248,11 +248,13 @@ void vpa_init(int cpu)
248 int hwcpu = get_hard_smp_processor_id(cpu); 248 int hwcpu = get_hard_smp_processor_id(cpu);
249 unsigned long addr; 249 unsigned long addr;
250 long ret; 250 long ret;
251 struct paca_struct *pp;
252 struct dtl_entry *dtl;
251 253
252 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 254 if (cpu_has_feature(CPU_FTR_ALTIVEC))
253 lppaca[cpu].vmxregs_in_use = 1; 255 lppaca_of(cpu).vmxregs_in_use = 1;
254 256
255 addr = __pa(&lppaca[cpu]); 257 addr = __pa(&lppaca_of(cpu));
256 ret = register_vpa(hwcpu, addr); 258 ret = register_vpa(hwcpu, addr);
257 259
258 if (ret) { 260 if (ret) {
@@ -274,6 +276,25 @@ void vpa_init(int cpu)
274 "registration for cpu %d (hw %d) of area %lx " 276 "registration for cpu %d (hw %d) of area %lx "
275 "returns %ld\n", cpu, hwcpu, addr, ret); 277 "returns %ld\n", cpu, hwcpu, addr, ret);
276 } 278 }
279
280 /*
281 * Register dispatch trace log, if one has been allocated.
282 */
283 pp = &paca[cpu];
284 dtl = pp->dispatch_log;
285 if (dtl) {
286 pp->dtl_ridx = 0;
287 pp->dtl_curr = dtl;
288 lppaca_of(cpu).dtl_idx = 0;
289
290 /* hypervisor reads buffer length from this field */
291 dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES;
292 ret = register_dtl(hwcpu, __pa(dtl));
293 if (ret)
294 pr_warn("DTL registration failed for cpu %d (%ld)\n",
295 cpu, ret);
296 lppaca_of(cpu).dtl_enable_mask = 2;
297 }
277} 298}
278 299
279static long pSeries_lpar_hpte_insert(unsigned long hpte_group, 300static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
new file mode 100644
index 00000000000..3e7f651e50a
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -0,0 +1,362 @@
1/*
2 * Support for Partition Mobility/Migration
3 *
4 * Copyright (C) 2010 Nathan Fontenot
5 * Copyright (C) 2010 IBM Corporation
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 */
11
12#include <linux/kernel.h>
13#include <linux/kobject.h>
14#include <linux/smp.h>
15#include <linux/completion.h>
16#include <linux/device.h>
17#include <linux/delay.h>
18#include <linux/slab.h>
19
20#include <asm/rtas.h>
21#include "pseries.h"
22
23static struct kobject *mobility_kobj;
24
25struct update_props_workarea {
26 u32 phandle;
27 u32 state;
28 u64 reserved;
29 u32 nprops;
30};
31
32#define NODE_ACTION_MASK 0xff000000
33#define NODE_COUNT_MASK 0x00ffffff
34
35#define DELETE_DT_NODE 0x01000000
36#define UPDATE_DT_NODE 0x02000000
37#define ADD_DT_NODE 0x03000000
38
39static int mobility_rtas_call(int token, char *buf)
40{
41 int rc;
42
43 spin_lock(&rtas_data_buf_lock);
44
45 memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE);
46 rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, 1);
47 memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
48
49 spin_unlock(&rtas_data_buf_lock);
50 return rc;
51}
52
53static int delete_dt_node(u32 phandle)
54{
55 struct device_node *dn;
56
57 dn = of_find_node_by_phandle(phandle);
58 if (!dn)
59 return -ENOENT;
60
61 dlpar_detach_node(dn);
62 return 0;
63}
64
65static int update_dt_property(struct device_node *dn, struct property **prop,
66 const char *name, u32 vd, char *value)
67{
68 struct property *new_prop = *prop;
69 struct property *old_prop;
70 int more = 0;
71
72 /* A negative 'vd' value indicates that only part of the new property
73 * value is contained in the buffer and we need to call
74 * ibm,update-properties again to get the rest of the value.
75 *
76 * A negative value is also the two's compliment of the actual value.
77 */
78 if (vd & 0x80000000) {
79 vd = ~vd + 1;
80 more = 1;
81 }
82
83 if (new_prop) {
84 /* partial property fixup */
85 char *new_data = kzalloc(new_prop->length + vd, GFP_KERNEL);
86 if (!new_data)
87 return -ENOMEM;
88
89 memcpy(new_data, new_prop->value, new_prop->length);
90 memcpy(new_data + new_prop->length, value, vd);
91
92 kfree(new_prop->value);
93 new_prop->value = new_data;
94 new_prop->length += vd;
95 } else {
96 new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
97 if (!new_prop)
98 return -ENOMEM;
99
100 new_prop->name = kstrdup(name, GFP_KERNEL);
101 if (!new_prop->name) {
102 kfree(new_prop);
103 return -ENOMEM;
104 }
105
106 new_prop->length = vd;
107 new_prop->value = kzalloc(new_prop->length, GFP_KERNEL);
108 if (!new_prop->value) {
109 kfree(new_prop->name);
110 kfree(new_prop);
111 return -ENOMEM;
112 }
113
114 memcpy(new_prop->value, value, vd);
115 *prop = new_prop;
116 }
117
118 if (!more) {
119 old_prop = of_find_property(dn, new_prop->name, NULL);
120 if (old_prop)
121 prom_update_property(dn, new_prop, old_prop);
122 else
123 prom_add_property(dn, new_prop);
124
125 new_prop = NULL;
126 }
127
128 return 0;
129}
130
131static int update_dt_node(u32 phandle)
132{
133 struct update_props_workarea *upwa;
134 struct device_node *dn;
135 struct property *prop = NULL;
136 int i, rc;
137 char *prop_data;
138 char *rtas_buf;
139 int update_properties_token;
140
141 update_properties_token = rtas_token("ibm,update-properties");
142 if (update_properties_token == RTAS_UNKNOWN_SERVICE)
143 return -EINVAL;
144
145 rtas_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
146 if (!rtas_buf)
147 return -ENOMEM;
148
149 dn = of_find_node_by_phandle(phandle);
150 if (!dn) {
151 kfree(rtas_buf);
152 return -ENOENT;
153 }
154
155 upwa = (struct update_props_workarea *)&rtas_buf[0];
156 upwa->phandle = phandle;
157
158 do {
159 rc = mobility_rtas_call(update_properties_token, rtas_buf);
160 if (rc < 0)
161 break;
162
163 prop_data = rtas_buf + sizeof(*upwa);
164
165 for (i = 0; i < upwa->nprops; i++) {
166 char *prop_name;
167 u32 vd;
168
169 prop_name = prop_data + 1;
170 prop_data += strlen(prop_name) + 1;
171 vd = *prop_data++;
172
173 switch (vd) {
174 case 0x00000000:
175 /* name only property, nothing to do */
176 break;
177
178 case 0x80000000:
179 prop = of_find_property(dn, prop_name, NULL);
180 prom_remove_property(dn, prop);
181 prop = NULL;
182 break;
183
184 default:
185 rc = update_dt_property(dn, &prop, prop_name,
186 vd, prop_data);
187 if (rc) {
188 printk(KERN_ERR "Could not update %s"
189 " property\n", prop_name);
190 }
191
192 prop_data += vd;
193 }
194 }
195 } while (rc == 1);
196
197 of_node_put(dn);
198 kfree(rtas_buf);
199 return 0;
200}
201
202static int add_dt_node(u32 parent_phandle, u32 drc_index)
203{
204 struct device_node *dn;
205 struct device_node *parent_dn;
206 int rc;
207
208 dn = dlpar_configure_connector(drc_index);
209 if (!dn)
210 return -ENOENT;
211
212 parent_dn = of_find_node_by_phandle(parent_phandle);
213 if (!parent_dn) {
214 dlpar_free_cc_nodes(dn);
215 return -ENOENT;
216 }
217
218 dn->parent = parent_dn;
219 rc = dlpar_attach_node(dn);
220 if (rc)
221 dlpar_free_cc_nodes(dn);
222
223 of_node_put(parent_dn);
224 return rc;
225}
226
227static int pseries_devicetree_update(void)
228{
229 char *rtas_buf;
230 u32 *data;
231 int update_nodes_token;
232 int rc;
233
234 update_nodes_token = rtas_token("ibm,update-nodes");
235 if (update_nodes_token == RTAS_UNKNOWN_SERVICE)
236 return -EINVAL;
237
238 rtas_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
239 if (!rtas_buf)
240 return -ENOMEM;
241
242 do {
243 rc = mobility_rtas_call(update_nodes_token, rtas_buf);
244 if (rc && rc != 1)
245 break;
246
247 data = (u32 *)rtas_buf + 4;
248 while (*data & NODE_ACTION_MASK) {
249 int i;
250 u32 action = *data & NODE_ACTION_MASK;
251 int node_count = *data & NODE_COUNT_MASK;
252
253 data++;
254
255 for (i = 0; i < node_count; i++) {
256 u32 phandle = *data++;
257 u32 drc_index;
258
259 switch (action) {
260 case DELETE_DT_NODE:
261 delete_dt_node(phandle);
262 break;
263 case UPDATE_DT_NODE:
264 update_dt_node(phandle);
265 break;
266 case ADD_DT_NODE:
267 drc_index = *data++;
268 add_dt_node(phandle, drc_index);
269 break;
270 }
271 }
272 }
273 } while (rc == 1);
274
275 kfree(rtas_buf);
276 return rc;
277}
278
279void post_mobility_fixup(void)
280{
281 int rc;
282 int activate_fw_token;
283
284 rc = pseries_devicetree_update();
285 if (rc) {
286 printk(KERN_ERR "Initial post-mobility device tree update "
287 "failed: %d\n", rc);
288 return;
289 }
290
291 activate_fw_token = rtas_token("ibm,activate-firmware");
292 if (activate_fw_token == RTAS_UNKNOWN_SERVICE) {
293 printk(KERN_ERR "Could not make post-mobility "
294 "activate-fw call.\n");
295 return;
296 }
297
298 rc = rtas_call(activate_fw_token, 0, 1, NULL);
299 if (!rc) {
300 rc = pseries_devicetree_update();
301 if (rc)
302 printk(KERN_ERR "Secondary post-mobility device tree "
303 "update failed: %d\n", rc);
304 } else {
305 printk(KERN_ERR "Post-mobility activate-fw failed: %d\n", rc);
306 return;
307 }
308
309 return;
310}
311
312static ssize_t migrate_store(struct class *class, struct class_attribute *attr,
313 const char *buf, size_t count)
314{
315 struct rtas_args args;
316 u64 streamid;
317 int rc;
318
319 rc = strict_strtoull(buf, 0, &streamid);
320 if (rc)
321 return rc;
322
323 memset(&args, 0, sizeof(args));
324 args.token = rtas_token("ibm,suspend-me");
325 args.nargs = 2;
326 args.nret = 1;
327
328 args.args[0] = streamid >> 32 ;
329 args.args[1] = streamid & 0xffffffff;
330 args.rets = &args.args[args.nargs];
331
332 do {
333 args.rets[0] = 0;
334 rc = rtas_ibm_suspend_me(&args);
335 if (!rc && args.rets[0] == RTAS_NOT_SUSPENDABLE)
336 ssleep(1);
337 } while (!rc && args.rets[0] == RTAS_NOT_SUSPENDABLE);
338
339 if (rc)
340 return rc;
341 else if (args.rets[0])
342 return args.rets[0];
343
344 post_mobility_fixup();
345 return count;
346}
347
348static CLASS_ATTR(migration, S_IWUSR, NULL, migrate_store);
349
350static int __init mobility_sysfs_init(void)
351{
352 int rc;
353
354 mobility_kobj = kobject_create_and_add("mobility", kernel_kobj);
355 if (!mobility_kobj)
356 return -ENOMEM;
357
358 rc = sysfs_create_file(mobility_kobj, &class_attr_migration.attr);
359
360 return rc;
361}
362device_initcall(mobility_sysfs_init);
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 40c93cad91d..e9f6d2859c3 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -17,6 +17,8 @@ struct device_node;
17extern void request_event_sources_irqs(struct device_node *np, 17extern void request_event_sources_irqs(struct device_node *np,
18 irq_handler_t handler, const char *name); 18 irq_handler_t handler, const char *name);
19 19
20#include <linux/of.h>
21
20extern void __init fw_feature_init(const char *hypertas, unsigned long len); 22extern void __init fw_feature_init(const char *hypertas, unsigned long len);
21 23
22struct pt_regs; 24struct pt_regs;
@@ -47,4 +49,11 @@ extern unsigned long rtas_poweron_auto;
47 49
48extern void find_udbg_vterm(void); 50extern void find_udbg_vterm(void);
49 51
52/* Dynamic logical Partitioning/Mobility */
53extern void dlpar_free_cc_nodes(struct device_node *);
54extern void dlpar_free_cc_property(struct property *);
55extern struct device_node *dlpar_configure_connector(u32);
56extern int dlpar_attach_node(struct device_node *);
57extern int dlpar_detach_node(struct device_node *);
58
50#endif /* _PSERIES_PSERIES_H */ 59#endif /* _PSERIES_PSERIES_H */
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index a6d19e3a505..d345bfd56bb 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -273,6 +273,58 @@ static struct notifier_block pci_dn_reconfig_nb = {
273 .notifier_call = pci_dn_reconfig_notifier, 273 .notifier_call = pci_dn_reconfig_notifier,
274}; 274};
275 275
276#ifdef CONFIG_VIRT_CPU_ACCOUNTING
277/*
278 * Allocate space for the dispatch trace log for all possible cpus
279 * and register the buffers with the hypervisor. This is used for
280 * computing time stolen by the hypervisor.
281 */
282static int alloc_dispatch_logs(void)
283{
284 int cpu, ret;
285 struct paca_struct *pp;
286 struct dtl_entry *dtl;
287
288 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
289 return 0;
290
291 for_each_possible_cpu(cpu) {
292 pp = &paca[cpu];
293 dtl = kmalloc_node(DISPATCH_LOG_BYTES, GFP_KERNEL,
294 cpu_to_node(cpu));
295 if (!dtl) {
296 pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
297 cpu);
298 pr_warn("Stolen time statistics will be unreliable\n");
299 break;
300 }
301
302 pp->dtl_ridx = 0;
303 pp->dispatch_log = dtl;
304 pp->dispatch_log_end = dtl + N_DISPATCH_LOG;
305 pp->dtl_curr = dtl;
306 }
307
308 /* Register the DTL for the current (boot) cpu */
309 dtl = get_paca()->dispatch_log;
310 get_paca()->dtl_ridx = 0;
311 get_paca()->dtl_curr = dtl;
312 get_paca()->lppaca_ptr->dtl_idx = 0;
313
314 /* hypervisor reads buffer length from this field */
315 dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES;
316 ret = register_dtl(hard_smp_processor_id(), __pa(dtl));
317 if (ret)
318 pr_warn("DTL registration failed for boot cpu %d (%d)\n",
319 smp_processor_id(), ret);
320 get_paca()->lppaca_ptr->dtl_enable_mask = 2;
321
322 return 0;
323}
324
325early_initcall(alloc_dispatch_logs);
326#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
327
276static void __init pSeries_setup_arch(void) 328static void __init pSeries_setup_arch(void)
277{ 329{
278 /* Discover PIC type and setup ppc_md accordingly */ 330 /* Discover PIC type and setup ppc_md accordingly */
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 67e2c4bdac8..7b96e5a270c 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -178,7 +178,7 @@ static int get_irq_server(unsigned int virq, const struct cpumask *cpumask,
178 if (!distribute_irqs) 178 if (!distribute_irqs)
179 return default_server; 179 return default_server;
180 180
181 if (!cpumask_equal(cpumask, cpu_all_mask)) { 181 if (!cpumask_subset(cpu_possible_mask, cpumask)) {
182 int server = cpumask_first_and(cpu_online_mask, cpumask); 182 int server = cpumask_first_and(cpu_online_mask, cpumask);
183 183
184 if (server < nr_cpu_ids) 184 if (server < nr_cpu_ids)