aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2012-03-29 13:58:43 -0400
committerChris Metcalf <cmetcalf@tilera.com>2012-05-25 12:48:24 -0400
commitd5d14ed6f2db7287a5088e1350cf422bf72140b3 (patch)
tree19f0bc20bb6f1995a1e4f75dc58e388c047f7d23 /arch/tile/kernel
parent47d632f9f8f3ed62b21f725e98b726d65769b6d7 (diff)
arch/tile: Allow tilegx to build with either 16K or 64K page size
This change introduces new flags for the hv_install_context() API that passes a page table pointer to the hypervisor. Clients can explicitly request 4K, 16K, or 64K small pages when they install a new context. In practice, the page size is fixed at kernel compile time and the same size is always requested every time a new page table is installed. The <hv/hypervisor.h> header changes so that it provides more abstract macros for managing "page" things like PFNs and page tables. For example there is now a HV_DEFAULT_PAGE_SIZE_SMALL instead of the old HV_PAGE_SIZE_SMALL. The various PFN routines have been eliminated and only PA- or PTFN-based ones remain (since PTFNs are always expressed in fixed 2KB "page" size). The page-table management macros are renamed with a leading underscore and take page-size arguments with the presumption that clients will use those macros in some single place to provide the "real" macros they will use themselves. I happened to notice the old hv_set_caching() API was totally broken (it assumed 4KB pages) so I changed it so it would nominally work correctly with other page sizes. Tag modules with the page size so you can't load a module built with a conflicting page size. (And add a test for SMP while we're at it.) Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/kernel')
-rw-r--r--arch/tile/kernel/head_32.S8
-rw-r--r--arch/tile/kernel/head_64.S22
-rw-r--r--arch/tile/kernel/machine_kexec.c7
-rw-r--r--arch/tile/kernel/setup.c8
-rw-r--r--arch/tile/kernel/smp.c2
5 files changed, 25 insertions, 22 deletions
diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S
index 1a39b7c1c87e..f71bfeeaf1a9 100644
--- a/arch/tile/kernel/head_32.S
+++ b/arch/tile/kernel/head_32.S
@@ -69,7 +69,7 @@ ENTRY(_start)
69 } 69 }
70 { 70 {
71 moveli lr, lo16(1f) 71 moveli lr, lo16(1f)
72 move r5, zero 72 moveli r5, CTX_PAGE_FLAG
73 } 73 }
74 { 74 {
75 auli lr, lr, ha16(1f) 75 auli lr, lr, ha16(1f)
@@ -141,11 +141,11 @@ ENTRY(empty_zero_page)
141 141
142 .macro PTE va, cpa, bits1, no_org=0 142 .macro PTE va, cpa, bits1, no_org=0
143 .ifeq \no_org 143 .ifeq \no_org
144 .org swapper_pg_dir + HV_L1_INDEX(\va) * HV_PTE_SIZE 144 .org swapper_pg_dir + PGD_INDEX(\va) * HV_PTE_SIZE
145 .endif 145 .endif
146 .word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \ 146 .word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \
147 (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) 147 (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE)
148 .word (\bits1) | (HV_CPA_TO_PFN(\cpa) << (HV_PTE_INDEX_PFN - 32)) 148 .word (\bits1) | (HV_CPA_TO_PTFN(\cpa) << (HV_PTE_INDEX_PTFN - 32))
149 .endm 149 .endm
150 150
151__PAGE_ALIGNED_DATA 151__PAGE_ALIGNED_DATA
@@ -166,7 +166,7 @@ ENTRY(swapper_pg_dir)
166 /* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */ 166 /* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */
167 PTE MEM_SV_INTRPT, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \ 167 PTE MEM_SV_INTRPT, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
168 (1 << (HV_PTE_INDEX_EXECUTABLE - 32)) 168 (1 << (HV_PTE_INDEX_EXECUTABLE - 32))
169 .org swapper_pg_dir + HV_L1_SIZE 169 .org swapper_pg_dir + PGDIR_SIZE
170 END(swapper_pg_dir) 170 END(swapper_pg_dir)
171 171
172 /* 172 /*
diff --git a/arch/tile/kernel/head_64.S b/arch/tile/kernel/head_64.S
index 6bc3a932fe45..f9a2734f7b82 100644
--- a/arch/tile/kernel/head_64.S
+++ b/arch/tile/kernel/head_64.S
@@ -114,7 +114,7 @@ ENTRY(_start)
114 shl16insli r0, r0, hw0(swapper_pg_dir - PAGE_OFFSET) 114 shl16insli r0, r0, hw0(swapper_pg_dir - PAGE_OFFSET)
115 } 115 }
116 { 116 {
117 move r3, zero 117 moveli r3, CTX_PAGE_FLAG
118 j hv_install_context 118 j hv_install_context
119 } 119 }
1201: 1201:
@@ -210,19 +210,19 @@ ENTRY(empty_zero_page)
210 .macro PTE cpa, bits1 210 .macro PTE cpa, bits1
211 .quad HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED |\ 211 .quad HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED |\
212 HV_PTE_GLOBAL | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) |\ 212 HV_PTE_GLOBAL | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) |\
213 (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN) 213 (\bits1) | (HV_CPA_TO_PTFN(\cpa) << HV_PTE_INDEX_PTFN)
214 .endm 214 .endm
215 215
216__PAGE_ALIGNED_DATA 216__PAGE_ALIGNED_DATA
217 .align PAGE_SIZE 217 .align PAGE_SIZE
218ENTRY(swapper_pg_dir) 218ENTRY(swapper_pg_dir)
219 .org swapper_pg_dir + HV_L0_INDEX(PAGE_OFFSET) * HV_PTE_SIZE 219 .org swapper_pg_dir + PGD_INDEX(PAGE_OFFSET) * HV_PTE_SIZE
220.Lsv_data_pmd: 220.Lsv_data_pmd:
221 .quad 0 /* PTE temp_data_pmd - PAGE_OFFSET, 0 */ 221 .quad 0 /* PTE temp_data_pmd - PAGE_OFFSET, 0 */
222 .org swapper_pg_dir + HV_L0_INDEX(MEM_SV_START) * HV_PTE_SIZE 222 .org swapper_pg_dir + PGD_INDEX(MEM_SV_START) * HV_PTE_SIZE
223.Lsv_code_pmd: 223.Lsv_code_pmd:
224 .quad 0 /* PTE temp_code_pmd - PAGE_OFFSET, 0 */ 224 .quad 0 /* PTE temp_code_pmd - PAGE_OFFSET, 0 */
225 .org swapper_pg_dir + HV_L0_SIZE 225 .org swapper_pg_dir + SIZEOF_PGD
226 END(swapper_pg_dir) 226 END(swapper_pg_dir)
227 227
228 .align HV_PAGE_TABLE_ALIGN 228 .align HV_PAGE_TABLE_ALIGN
@@ -233,11 +233,11 @@ ENTRY(temp_data_pmd)
233 * permissions later. 233 * permissions later.
234 */ 234 */
235 .set addr, 0 235 .set addr, 0
236 .rept HV_L1_ENTRIES 236 .rept PTRS_PER_PMD
237 PTE addr, HV_PTE_READABLE | HV_PTE_WRITABLE 237 PTE addr, HV_PTE_READABLE | HV_PTE_WRITABLE
238 .set addr, addr + HV_PAGE_SIZE_LARGE 238 .set addr, addr + HPAGE_SIZE
239 .endr 239 .endr
240 .org temp_data_pmd + HV_L1_SIZE 240 .org temp_data_pmd + SIZEOF_PMD
241 END(temp_data_pmd) 241 END(temp_data_pmd)
242 242
243 .align HV_PAGE_TABLE_ALIGN 243 .align HV_PAGE_TABLE_ALIGN
@@ -248,11 +248,11 @@ ENTRY(temp_code_pmd)
248 * permissions later. 248 * permissions later.
249 */ 249 */
250 .set addr, 0 250 .set addr, 0
251 .rept HV_L1_ENTRIES 251 .rept PTRS_PER_PMD
252 PTE addr, HV_PTE_READABLE | HV_PTE_EXECUTABLE 252 PTE addr, HV_PTE_READABLE | HV_PTE_EXECUTABLE
253 .set addr, addr + HV_PAGE_SIZE_LARGE 253 .set addr, addr + HPAGE_SIZE
254 .endr 254 .endr
255 .org temp_code_pmd + HV_L1_SIZE 255 .org temp_code_pmd + SIZEOF_PMD
256 END(temp_code_pmd) 256 END(temp_code_pmd)
257 257
258 /* 258 /*
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c
index 6255f2eab112..b0fa37c1a521 100644
--- a/arch/tile/kernel/machine_kexec.c
+++ b/arch/tile/kernel/machine_kexec.c
@@ -251,6 +251,7 @@ static void setup_quasi_va_is_pa(void)
251void machine_kexec(struct kimage *image) 251void machine_kexec(struct kimage *image)
252{ 252{
253 void *reboot_code_buffer; 253 void *reboot_code_buffer;
254 pte_t *ptep;
254 void (*rnk)(unsigned long, void *, unsigned long) 255 void (*rnk)(unsigned long, void *, unsigned long)
255 __noreturn; 256 __noreturn;
256 257
@@ -266,8 +267,10 @@ void machine_kexec(struct kimage *image)
266 */ 267 */
267 homecache_change_page_home(image->control_code_page, 0, 268 homecache_change_page_home(image->control_code_page, 0,
268 smp_processor_id()); 269 smp_processor_id());
269 reboot_code_buffer = vmap(&image->control_code_page, 1, 0, 270 reboot_code_buffer = page_address(image->control_code_page);
270 __pgprot(_PAGE_KERNEL | _PAGE_EXECUTABLE)); 271 BUG_ON(reboot_code_buffer == NULL);
272 ptep = virt_to_pte(NULL, (unsigned long)reboot_code_buffer);
273 __set_pte(ptep, pte_mkexec(*ptep));
271 memcpy(reboot_code_buffer, relocate_new_kernel, 274 memcpy(reboot_code_buffer, relocate_new_kernel,
272 relocate_new_kernel_size); 275 relocate_new_kernel_size);
273 __flush_icache_range( 276 __flush_icache_range(
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index bff23f476110..32948e21113a 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -1396,13 +1396,13 @@ void __init setup_per_cpu_areas(void)
1396 for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) { 1396 for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) {
1397 1397
1398 /* Update the vmalloc mapping and page home. */ 1398 /* Update the vmalloc mapping and page home. */
1399 pte_t *ptep = 1399 unsigned long addr = (unsigned long)ptr + i;
1400 virt_to_pte(NULL, (unsigned long)ptr + i); 1400 pte_t *ptep = virt_to_pte(NULL, addr);
1401 pte_t pte = *ptep; 1401 pte_t pte = *ptep;
1402 BUG_ON(pfn != pte_pfn(pte)); 1402 BUG_ON(pfn != pte_pfn(pte));
1403 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); 1403 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
1404 pte = set_remote_cache_cpu(pte, cpu); 1404 pte = set_remote_cache_cpu(pte, cpu);
1405 set_pte(ptep, pte); 1405 set_pte_at(&init_mm, addr, ptep, pte);
1406 1406
1407 /* Update the lowmem mapping for consistency. */ 1407 /* Update the lowmem mapping for consistency. */
1408 lowmem_va = (unsigned long)pfn_to_kaddr(pfn); 1408 lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
@@ -1415,7 +1415,7 @@ void __init setup_per_cpu_areas(void)
1415 BUG_ON(pte_huge(*ptep)); 1415 BUG_ON(pte_huge(*ptep));
1416 } 1416 }
1417 BUG_ON(pfn != pte_pfn(*ptep)); 1417 BUG_ON(pfn != pte_pfn(*ptep));
1418 set_pte(ptep, pte); 1418 set_pte_at(&init_mm, lowmem_va, ptep, pte);
1419 } 1419 }
1420 } 1420 }
1421 1421
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 91da0f721958..cbc73a8b8fe1 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -203,7 +203,7 @@ void __init ipi_init(void)
203 if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0) 203 if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
204 panic("Failed to initialize IPI for cpu %d\n", cpu); 204 panic("Failed to initialize IPI for cpu %d\n", cpu);
205 205
206 offset = hv_pte_get_pfn(pte) << PAGE_SHIFT; 206 offset = PFN_PHYS(pte_pfn(pte));
207 ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte); 207 ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte);
208 } 208 }
209#endif 209#endif