diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2006-12-08 09:56:07 -0500 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2006-12-08 09:56:07 -0500 |
commit | f4eb07c17df2e6cf9bd58bfcd9cc9e05e9489d07 (patch) | |
tree | c1b4b422d3b8183edf452cc745dadd0fe129018b /include/asm-s390/page.h | |
parent | 7f090145a14afc35844dce80174c9c24f9e66ec5 (diff) |
[S390] Virtual memmap for s390.
Virtual memmap support for s390. Inspired by the ia64 implementation.
Unlike ia64 we need a mechanism which allows us to dynamically attach
shared memory regions.
These memory regions are accessed via the dcss device driver. dcss
implements the 'direct_access' operation, which requires struct pages
for every single shared page.
Therefore this implementation provides an interface to attach/detach
shared memory:
int add_shared_memory(unsigned long start, unsigned long size);
int remove_shared_memory(unsigned long start, unsigned long size);
The purpose of the add_shared_memory function is to add the given
memory range to the 1:1 mapping and to make sure that the
corresponding range in the vmemmap is backed with physical pages.
It also initialises the new struct pages.
remove_shared_memory in turn only invalidates the page table
entries in the 1:1 mapping. The page tables and the memory used for
struct pages in the vmemmap are currently not freed. They will be
reused when the next segment will be attached.
Given that the maximum size of a shared memory region is 2GB and
in addition all regions must reside below 2GB this is not too much of
a restriction, but there is room for improvement.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'include/asm-s390/page.h')
-rw-r--r-- | include/asm-s390/page.h | 22 |
1 files changed, 20 insertions, 2 deletions
diff --git a/include/asm-s390/page.h b/include/asm-s390/page.h index 363ea761d5e..05ea6f17278 100644 --- a/include/asm-s390/page.h +++ b/include/asm-s390/page.h | |||
@@ -127,6 +127,26 @@ page_get_storage_key(unsigned long addr) | |||
127 | return skey; | 127 | return skey; |
128 | } | 128 | } |
129 | 129 | ||
130 | extern unsigned long max_pfn; | ||
131 | |||
132 | static inline int pfn_valid(unsigned long pfn) | ||
133 | { | ||
134 | unsigned long dummy; | ||
135 | int ccode; | ||
136 | |||
137 | if (pfn >= max_pfn) | ||
138 | return 0; | ||
139 | |||
140 | asm volatile( | ||
141 | " lra %0,0(%2)\n" | ||
142 | " ipm %1\n" | ||
143 | " srl %1,28\n" | ||
144 | : "=d" (dummy), "=d" (ccode) | ||
145 | : "a" (pfn << PAGE_SHIFT) | ||
146 | : "cc"); | ||
147 | return !ccode; | ||
148 | } | ||
149 | |||
130 | #endif /* !__ASSEMBLY__ */ | 150 | #endif /* !__ASSEMBLY__ */ |
131 | 151 | ||
132 | /* to align the pointer to the (next) page boundary */ | 152 | /* to align the pointer to the (next) page boundary */ |
@@ -138,8 +158,6 @@ page_get_storage_key(unsigned long addr) | |||
138 | #define __va(x) (void *)(unsigned long)(x) | 158 | #define __va(x) (void *)(unsigned long)(x) |
139 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | 159 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
140 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) | 160 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) |
141 | |||
142 | #define pfn_valid(pfn) ((pfn) < max_mapnr) | ||
143 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) | 161 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
144 | 162 | ||
145 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ | 163 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ |