aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-s390/pgtable.h
diff options
context:
space:
mode:
authorGerald Schaefer <geraldsc@de.ibm.com>2007-02-05 15:18:17 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2007-02-05 15:18:17 -0500
commitc1821c2e9711adc3cd298a16b7237c92a2cee78d (patch)
tree9155b089db35a37d95863125ea4c5f918bd7801b /include/asm-s390/pgtable.h
parent86aa9fc2456d8a662f299a70bdb70987209170f0 (diff)
[S390] noexec protection
This provides a noexec protection on s390 hardware. Our hardware does not have any bits left in the pte for a hw noexec bit, so this is a different approach using shadow page tables and a special addressing mode that allows separate address spaces for code and data. As a special feature of our "secondary-space" addressing mode, separate page tables can be specified for the translation of data addresses (storage operands) and instruction addresses. The shadow page table is used for the instruction addresses and the standard page table for the data addresses. The shadow page table is linked to the standard page table by a pointer in page->lru.next of the struct page corresponding to the page that contains the standard page table (since page->private is not really private with the pte_lock and the page table pages are not in the LRU list). Depending on the software bits of a pte, it is either inserted into both page tables or just into the standard (data) page table. Pages of a vma that does not have the VM_EXEC bit set get mapped only in the data address space. Any try to execute code on such a page will cause a page translation exception. The standard reaction to this is a SIGSEGV with two exceptions: the two system call opcodes 0x0a77 (sys_sigreturn) and 0x0aad (sys_rt_sigreturn) are allowed. They are stored by the kernel to the signal stack frame. Unfortunately, the signal return mechanism cannot be modified to use an SA_RESTORER because the exception unwinding code depends on the system call opcode stored behind the signal stack frame. This feature requires that user space is executed in secondary-space mode and the kernel in home-space mode, which means that the addressing modes need to be switched and that the noexec protection only works for user space. After switching the addressing modes, we cannot use the mvcp/mvcs instructions anymore to copy between kernel and user space. A new mvcos instruction has been added to the z9 EC/BC hardware which allows to copy between arbitrary address spaces, but on older hardware the page tables need to be walked manually. Signed-off-by: Gerald Schaefer <geraldsc@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'include/asm-s390/pgtable.h')
-rw-r--r--include/asm-s390/pgtable.h146
1 files changed, 130 insertions, 16 deletions
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 304ee7736413..13c16546eff5 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -224,6 +224,8 @@ extern unsigned long vmalloc_end;
224#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ 224#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
225#define _PAGE_TYPE_RO 0x200 225#define _PAGE_TYPE_RO 0x200
226#define _PAGE_TYPE_RW 0x000 226#define _PAGE_TYPE_RW 0x000
227#define _PAGE_TYPE_EX_RO 0x202
228#define _PAGE_TYPE_EX_RW 0x002
227 229
228/* 230/*
229 * PTE type bits are rather complicated. handle_pte_fault uses pte_present, 231 * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
@@ -244,11 +246,13 @@ extern unsigned long vmalloc_end;
244 * _PAGE_TYPE_FILE 11?1 -> 11?1 246 * _PAGE_TYPE_FILE 11?1 -> 11?1
245 * _PAGE_TYPE_RO 0100 -> 1100 247 * _PAGE_TYPE_RO 0100 -> 1100
246 * _PAGE_TYPE_RW 0000 -> 1000 248 * _PAGE_TYPE_RW 0000 -> 1000
249 * _PAGE_TYPE_EX_RO 0110 -> 1110
250 * _PAGE_TYPE_EX_RW 0010 -> 1010
247 * 251 *
248 * pte_none is true for bits combinations 1000, 1100 252 * pte_none is true for bits combinations 1000, 1010, 1100, 1110
249 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 253 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
250 * pte_file is true for bits combinations 1101, 1111 254 * pte_file is true for bits combinations 1101, 1111
251 * swap pte is 1011 and 0001, 0011, 0101, 0111, 1010 and 1110 are invalid. 255 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
252 */ 256 */
253 257
254#ifndef __s390x__ 258#ifndef __s390x__
@@ -313,33 +317,100 @@ extern unsigned long vmalloc_end;
313#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) 317#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
314#define PAGE_RO __pgprot(_PAGE_TYPE_RO) 318#define PAGE_RO __pgprot(_PAGE_TYPE_RO)
315#define PAGE_RW __pgprot(_PAGE_TYPE_RW) 319#define PAGE_RW __pgprot(_PAGE_TYPE_RW)
320#define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO)
321#define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW)
316 322
317#define PAGE_KERNEL PAGE_RW 323#define PAGE_KERNEL PAGE_RW
318#define PAGE_COPY PAGE_RO 324#define PAGE_COPY PAGE_RO
319 325
320/* 326/*
321 * The S390 can't do page protection for execute, and considers that the 327 * Dependent on the EXEC_PROTECT option s390 can do execute protection.
322 * same are read. Also, write permissions imply read permissions. This is 328 * Write permission always implies read permission. In theory with a
323 * the closest we can get.. 329 * primary/secondary page table execute only can be implemented but
330 * it would cost an additional bit in the pte to distinguish all the
331 * different pte types. To avoid that execute permission currently
332 * implies read permission as well.
324 */ 333 */
325 /*xwr*/ 334 /*xwr*/
326#define __P000 PAGE_NONE 335#define __P000 PAGE_NONE
327#define __P001 PAGE_RO 336#define __P001 PAGE_RO
328#define __P010 PAGE_RO 337#define __P010 PAGE_RO
329#define __P011 PAGE_RO 338#define __P011 PAGE_RO
330#define __P100 PAGE_RO 339#define __P100 PAGE_EX_RO
331#define __P101 PAGE_RO 340#define __P101 PAGE_EX_RO
332#define __P110 PAGE_RO 341#define __P110 PAGE_EX_RO
333#define __P111 PAGE_RO 342#define __P111 PAGE_EX_RO
334 343
335#define __S000 PAGE_NONE 344#define __S000 PAGE_NONE
336#define __S001 PAGE_RO 345#define __S001 PAGE_RO
337#define __S010 PAGE_RW 346#define __S010 PAGE_RW
338#define __S011 PAGE_RW 347#define __S011 PAGE_RW
339#define __S100 PAGE_RO 348#define __S100 PAGE_EX_RO
340#define __S101 PAGE_RO 349#define __S101 PAGE_EX_RO
341#define __S110 PAGE_RW 350#define __S110 PAGE_EX_RW
342#define __S111 PAGE_RW 351#define __S111 PAGE_EX_RW
352
353#ifndef __s390x__
354# define PMD_SHADOW_SHIFT 1
355# define PGD_SHADOW_SHIFT 1
356#else /* __s390x__ */
357# define PMD_SHADOW_SHIFT 2
358# define PGD_SHADOW_SHIFT 2
359#endif /* __s390x__ */
360
361static inline struct page *get_shadow_page(struct page *page)
362{
363 if (s390_noexec && !list_empty(&page->lru))
364 return virt_to_page(page->lru.next);
365 return NULL;
366}
367
368static inline pte_t *get_shadow_pte(pte_t *ptep)
369{
370 unsigned long pteptr = (unsigned long) (ptep);
371
372 if (s390_noexec) {
373 unsigned long offset = pteptr & (PAGE_SIZE - 1);
374 void *addr = (void *) (pteptr ^ offset);
375 struct page *page = virt_to_page(addr);
376 if (!list_empty(&page->lru))
377 return (pte_t *) ((unsigned long) page->lru.next |
378 offset);
379 }
380 return NULL;
381}
382
383static inline pmd_t *get_shadow_pmd(pmd_t *pmdp)
384{
385 unsigned long pmdptr = (unsigned long) (pmdp);
386
387 if (s390_noexec) {
388 unsigned long offset = pmdptr &
389 ((PAGE_SIZE << PMD_SHADOW_SHIFT) - 1);
390 void *addr = (void *) (pmdptr ^ offset);
391 struct page *page = virt_to_page(addr);
392 if (!list_empty(&page->lru))
393 return (pmd_t *) ((unsigned long) page->lru.next |
394 offset);
395 }
396 return NULL;
397}
398
399static inline pgd_t *get_shadow_pgd(pgd_t *pgdp)
400{
401 unsigned long pgdptr = (unsigned long) (pgdp);
402
403 if (s390_noexec) {
404 unsigned long offset = pgdptr &
405 ((PAGE_SIZE << PGD_SHADOW_SHIFT) - 1);
406 void *addr = (void *) (pgdptr ^ offset);
407 struct page *page = virt_to_page(addr);
408 if (!list_empty(&page->lru))
409 return (pgd_t *) ((unsigned long) page->lru.next |
410 offset);
411 }
412 return NULL;
413}
343 414
344/* 415/*
345 * Certain architectures need to do special things when PTEs 416 * Certain architectures need to do special things when PTEs
@@ -348,7 +419,16 @@ extern unsigned long vmalloc_end;
348 */ 419 */
349static inline void set_pte(pte_t *pteptr, pte_t pteval) 420static inline void set_pte(pte_t *pteptr, pte_t pteval)
350{ 421{
422 pte_t *shadow_pte = get_shadow_pte(pteptr);
423
351 *pteptr = pteval; 424 *pteptr = pteval;
425 if (shadow_pte) {
426 if (!(pte_val(pteval) & _PAGE_INVALID) &&
427 (pte_val(pteval) & _PAGE_SWX))
428 pte_val(*shadow_pte) = pte_val(pteval) | _PAGE_RO;
429 else
430 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
431 }
352} 432}
353#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 433#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
354 434
@@ -466,7 +546,7 @@ static inline int pte_read(pte_t pte)
466 546
467static inline void pgd_clear(pgd_t * pgdp) { } 547static inline void pgd_clear(pgd_t * pgdp) { }
468 548
469static inline void pmd_clear(pmd_t * pmdp) 549static inline void pmd_clear_kernel(pmd_t * pmdp)
470{ 550{
471 pmd_val(pmdp[0]) = _PAGE_TABLE_INV; 551 pmd_val(pmdp[0]) = _PAGE_TABLE_INV;
472 pmd_val(pmdp[1]) = _PAGE_TABLE_INV; 552 pmd_val(pmdp[1]) = _PAGE_TABLE_INV;
@@ -474,24 +554,55 @@ static inline void pmd_clear(pmd_t * pmdp)
474 pmd_val(pmdp[3]) = _PAGE_TABLE_INV; 554 pmd_val(pmdp[3]) = _PAGE_TABLE_INV;
475} 555}
476 556
557static inline void pmd_clear(pmd_t * pmdp)
558{
559 pmd_t *shadow_pmd = get_shadow_pmd(pmdp);
560
561 pmd_clear_kernel(pmdp);
562 if (shadow_pmd)
563 pmd_clear_kernel(shadow_pmd);
564}
565
477#else /* __s390x__ */ 566#else /* __s390x__ */
478 567
479static inline void pgd_clear(pgd_t * pgdp) 568static inline void pgd_clear_kernel(pgd_t * pgdp)
480{ 569{
481 pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY; 570 pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY;
482} 571}
483 572
484static inline void pmd_clear(pmd_t * pmdp) 573static inline void pgd_clear(pgd_t * pgdp)
574{
575 pgd_t *shadow_pgd = get_shadow_pgd(pgdp);
576
577 pgd_clear_kernel(pgdp);
578 if (shadow_pgd)
579 pgd_clear_kernel(shadow_pgd);
580}
581
582static inline void pmd_clear_kernel(pmd_t * pmdp)
485{ 583{
486 pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; 584 pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY;
487 pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; 585 pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY;
488} 586}
489 587
588static inline void pmd_clear(pmd_t * pmdp)
589{
590 pmd_t *shadow_pmd = get_shadow_pmd(pmdp);
591
592 pmd_clear_kernel(pmdp);
593 if (shadow_pmd)
594 pmd_clear_kernel(shadow_pmd);
595}
596
490#endif /* __s390x__ */ 597#endif /* __s390x__ */
491 598
492static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 599static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
493{ 600{
601 pte_t *shadow_pte = get_shadow_pte(ptep);
602
494 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 603 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
604 if (shadow_pte)
605 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
495} 606}
496 607
497/* 608/*
@@ -609,8 +720,11 @@ ptep_clear_flush(struct vm_area_struct *vma,
609 unsigned long address, pte_t *ptep) 720 unsigned long address, pte_t *ptep)
610{ 721{
611 pte_t pte = *ptep; 722 pte_t pte = *ptep;
723 pte_t *shadow_pte = get_shadow_pte(ptep);
612 724
613 __ptep_ipte(address, ptep); 725 __ptep_ipte(address, ptep);
726 if (shadow_pte)
727 __ptep_ipte(address, shadow_pte);
614 return pte; 728 return pte;
615} 729}
616 730