diff options
author | Ingo Molnar <mingo@kernel.org> | 2013-04-21 04:57:33 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-04-21 04:57:33 -0400 |
commit | 73e21ce28d8d2b75140b742b01373c3a085ecc52 (patch) | |
tree | add380154271f9a68be4099590f912b4ab4bed9f /arch/s390 | |
parent | b5210b2a34bae35fc00675462333af45676d727c (diff) | |
parent | f1923820c447e986a9da0fc6bf60c1dccdf0408e (diff) |
Merge branch 'perf/urgent' into perf/core
Conflicts:
arch/x86/kernel/cpu/perf_event_intel.c
Merge in the latest fixes before applying new patches, resolve the conflict.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/pgtable.h | 4 | ||||
-rw-r--r-- | arch/s390/lib/uaccess_pt.c | 83 |
2 files changed, 58 insertions, 29 deletions
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 4a2930844d43..4a5443118cfb 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -344,6 +344,7 @@ extern unsigned long MODULES_END; | |||
344 | #define _REGION3_ENTRY_CO 0x100 /* change-recording override */ | 344 | #define _REGION3_ENTRY_CO 0x100 /* change-recording override */ |
345 | 345 | ||
346 | /* Bits in the segment table entry */ | 346 | /* Bits in the segment table entry */ |
347 | #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ | ||
347 | #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ | 348 | #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ |
348 | #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ | 349 | #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ |
349 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ | 350 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ |
@@ -1531,7 +1532,8 @@ extern int s390_enable_sie(void); | |||
1531 | /* | 1532 | /* |
1532 | * No page table caches to initialise | 1533 | * No page table caches to initialise |
1533 | */ | 1534 | */ |
1534 | #define pgtable_cache_init() do { } while (0) | 1535 | static inline void pgtable_cache_init(void) { } |
1536 | static inline void check_pgt_cache(void) { } | ||
1535 | 1537 | ||
1536 | #include <asm-generic/pgtable.h> | 1538 | #include <asm-generic/pgtable.h> |
1537 | 1539 | ||
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index dff631d34b45..466fb3383960 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c | |||
@@ -77,42 +77,69 @@ static size_t copy_in_kernel(size_t count, void __user *to, | |||
77 | * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address | 77 | * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address |
78 | * contains the (negative) exception code. | 78 | * contains the (negative) exception code. |
79 | */ | 79 | */ |
80 | static __always_inline unsigned long follow_table(struct mm_struct *mm, | 80 | #ifdef CONFIG_64BIT |
81 | unsigned long addr, int write) | 81 | static unsigned long follow_table(struct mm_struct *mm, |
82 | unsigned long address, int write) | ||
82 | { | 83 | { |
83 | pgd_t *pgd; | 84 | unsigned long *table = (unsigned long *)__pa(mm->pgd); |
84 | pud_t *pud; | 85 | |
85 | pmd_t *pmd; | 86 | switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { |
86 | pte_t *ptep; | 87 | case _ASCE_TYPE_REGION1: |
88 | table = table + ((address >> 53) & 0x7ff); | ||
89 | if (unlikely(*table & _REGION_ENTRY_INV)) | ||
90 | return -0x39UL; | ||
91 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | ||
92 | case _ASCE_TYPE_REGION2: | ||
93 | table = table + ((address >> 42) & 0x7ff); | ||
94 | if (unlikely(*table & _REGION_ENTRY_INV)) | ||
95 | return -0x3aUL; | ||
96 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | ||
97 | case _ASCE_TYPE_REGION3: | ||
98 | table = table + ((address >> 31) & 0x7ff); | ||
99 | if (unlikely(*table & _REGION_ENTRY_INV)) | ||
100 | return -0x3bUL; | ||
101 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | ||
102 | case _ASCE_TYPE_SEGMENT: | ||
103 | table = table + ((address >> 20) & 0x7ff); | ||
104 | if (unlikely(*table & _SEGMENT_ENTRY_INV)) | ||
105 | return -0x10UL; | ||
106 | if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) { | ||
107 | if (write && (*table & _SEGMENT_ENTRY_RO)) | ||
108 | return -0x04UL; | ||
109 | return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) + | ||
110 | (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE); | ||
111 | } | ||
112 | table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); | ||
113 | } | ||
114 | table = table + ((address >> 12) & 0xff); | ||
115 | if (unlikely(*table & _PAGE_INVALID)) | ||
116 | return -0x11UL; | ||
117 | if (write && (*table & _PAGE_RO)) | ||
118 | return -0x04UL; | ||
119 | return (*table & PAGE_MASK) + (address & ~PAGE_MASK); | ||
120 | } | ||
87 | 121 | ||
88 | pgd = pgd_offset(mm, addr); | 122 | #else /* CONFIG_64BIT */ |
89 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | ||
90 | return -0x3aUL; | ||
91 | 123 | ||
92 | pud = pud_offset(pgd, addr); | 124 | static unsigned long follow_table(struct mm_struct *mm, |
93 | if (pud_none(*pud) || unlikely(pud_bad(*pud))) | 125 | unsigned long address, int write) |
94 | return -0x3bUL; | 126 | { |
127 | unsigned long *table = (unsigned long *)__pa(mm->pgd); | ||
95 | 128 | ||
96 | pmd = pmd_offset(pud, addr); | 129 | table = table + ((address >> 20) & 0x7ff); |
97 | if (pmd_none(*pmd)) | 130 | if (unlikely(*table & _SEGMENT_ENTRY_INV)) |
98 | return -0x10UL; | 131 | return -0x10UL; |
99 | if (pmd_large(*pmd)) { | 132 | table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); |
100 | if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO)) | 133 | table = table + ((address >> 12) & 0xff); |
101 | return -0x04UL; | 134 | if (unlikely(*table & _PAGE_INVALID)) |
102 | return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK); | ||
103 | } | ||
104 | if (unlikely(pmd_bad(*pmd))) | ||
105 | return -0x10UL; | ||
106 | |||
107 | ptep = pte_offset_map(pmd, addr); | ||
108 | if (!pte_present(*ptep)) | ||
109 | return -0x11UL; | 135 | return -0x11UL; |
110 | if (write && (!pte_write(*ptep) || !pte_dirty(*ptep))) | 136 | if (write && (*table & _PAGE_RO)) |
111 | return -0x04UL; | 137 | return -0x04UL; |
112 | 138 | return (*table & PAGE_MASK) + (address & ~PAGE_MASK); | |
113 | return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK); | ||
114 | } | 139 | } |
115 | 140 | ||
141 | #endif /* CONFIG_64BIT */ | ||
142 | |||
116 | static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, | 143 | static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, |
117 | size_t n, int write_user) | 144 | size_t n, int write_user) |
118 | { | 145 | { |
@@ -197,7 +224,7 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from) | |||
197 | 224 | ||
198 | static size_t clear_user_pt(size_t n, void __user *to) | 225 | static size_t clear_user_pt(size_t n, void __user *to) |
199 | { | 226 | { |
200 | void *zpage = &empty_zero_page; | 227 | void *zpage = (void *) empty_zero_page; |
201 | long done, size, ret; | 228 | long done, size, ret; |
202 | 229 | ||
203 | done = 0; | 230 | done = 0; |