aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorMichael Holzheu <holzheu@linux.vnet.ibm.com>2012-05-09 10:27:36 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2012-05-16 08:42:38 -0400
commitb2a68c235663c65365c4b4424c6e6c5ee90ae3a1 (patch)
tree2602a18a860e0dadd9a1332e6b9dd81f26ffb301 /arch/s390/mm
parent6022afc060425864c33f4ab62bbe41d20ac85362 (diff)
s390: allow absolute memory access for /dev/mem
Currently dev/mem for s390 provides only real memory access. This means that the CPU prefix pages are swapped. The prefix swap for real memory works as follows: Each CPU owns a prefix register that points to a page aligned memory location "P". If this CPU accesses the address range [0,0x1fff], it is translated by the hardware to [P,P+0x1fff]. Accordingly if this CPU accesses the address range [P,P+0x1fff], it is translated by the hardware to [0,0x1fff]. Therefore, if [P,P+0x1fff] or [0,0x1fff] is read from the current /dev/mem device, the incorrectly swapped memory content is returned. With this patch the /dev/mem architecture code is modified to provide absolute memory access. This is done via the arch specific functions xlate_dev_mem_ptr() and unxlate_dev_mem_ptr(). For swapped pages on s390 the function xlate_dev_mem_ptr() now returns a new buffer with a copy of the requested absolute memory. In case the buffer was allocated, the unxlate_dev_mem_ptr() function frees it after /dev/mem code has called copy_to_user(). Signed-off-by: Michael Holzheu <holzheu@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/maccess.c67
1 files changed, 67 insertions, 0 deletions
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index e1335dc2b1b..795a0a9bb2e 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -12,6 +12,7 @@
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/gfp.h> 14#include <linux/gfp.h>
15#include <linux/cpu.h>
15#include <asm/ctl_reg.h> 16#include <asm/ctl_reg.h>
16 17
17/* 18/*
@@ -166,3 +167,69 @@ out:
166 free_page((unsigned long) buf); 167 free_page((unsigned long) buf);
167 return rc; 168 return rc;
168} 169}
170
171/*
172 * Check if physical address is within prefix or zero page
173 */
174static int is_swapped(unsigned long addr)
175{
176 unsigned long lc;
177 int cpu;
178
179 if (addr < sizeof(struct _lowcore))
180 return 1;
181 for_each_online_cpu(cpu) {
182 lc = (unsigned long) lowcore_ptr[cpu];
183 if (addr > lc + sizeof(struct _lowcore) - 1 || addr < lc)
184 continue;
185 return 1;
186 }
187 return 0;
188}
189
190/*
191 * Return swapped prefix or zero page address
192 */
193static unsigned long get_swapped(unsigned long addr)
194{
195 unsigned long prefix = store_prefix();
196
197 if (addr < sizeof(struct _lowcore))
198 return addr + prefix;
199 if (addr >= prefix && addr < prefix + sizeof(struct _lowcore))
200 return addr - prefix;
201 return addr;
202}
203
204/*
205 * Convert a physical pointer for /dev/mem access
206 *
207 * For swapped prefix pages a new buffer is returned that contains a copy of
208 * the absolute memory. The buffer size is maximum one page large.
209 */
210void *xlate_dev_mem_ptr(unsigned long addr)
211{
212 void *bounce = (void *) addr;
213 unsigned long size;
214
215 get_online_cpus();
216 preempt_disable();
217 if (is_swapped(addr)) {
218 size = PAGE_SIZE - (addr & ~PAGE_MASK);
219 bounce = (void *) __get_free_page(GFP_ATOMIC);
220 if (bounce)
221 memcpy_real(bounce, (void *) get_swapped(addr), size);
222 }
223 preempt_enable();
224 put_online_cpus();
225 return bounce;
226}
227
228/*
229 * Free converted buffer for /dev/mem access (if necessary)
230 */
231void unxlate_dev_mem_ptr(unsigned long addr, void *buf)
232{
233 if ((void *) addr != buf)
234 free_page((unsigned long) buf);
235}