diff options
author | David Gibson <david@gibson.dropbear.id.au> | 2005-11-23 21:34:56 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-11-25 06:12:45 -0500 |
commit | 9a94c5793a7b44720f19ebb71b636bc9c31b44d8 (patch) | |
tree | df25ab16bb1e586d671160dd26d3117aa43d77cf | |
parent | 26925c5910ab77fc95b4d8bb6d98780b50ab1e5a (diff) |
[PATCH] powerpc: More hugepage boundary case fixes
Blah. The patch [0] I recently sent fixing errors with
in_hugepage_area() and prepare_hugepage_range() for powerpc itself has
an off-by-one bug. Furthermore, the related functions
touches_hugepage_*_range() and within_hugepage_*_range() are also
buggy. Some of the bugs, like those addressed in [0] originated with
commit 7d24f0b8a53261709938ffabe3e00f88f6498df9 where we tweaked the
semantics of where hugepages are allowed. Other bugs have been there
essentially forever, and are due to the undefined behaviour of '<<'
with shift counts greater than the type width (LOW_ESID_MASK could
return non-zero for high ranges with the right congruences).
The good news is that I now have a testsuite which should pick up
things like this if they creep in again.
[0] "powerpc-fix-for-hugepage-areas-straddling-4gb-boundary"
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 2 | ||||
-rw-r--r-- | include/asm-powerpc/page_64.h | 17 |
2 files changed, 12 insertions, 7 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index f867bba893ca..6bc9dbad7dea 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -295,7 +295,7 @@ int prepare_hugepage_range(unsigned long addr, unsigned long len) | |||
295 | if (addr < 0x100000000UL) | 295 | if (addr < 0x100000000UL) |
296 | err = open_low_hpage_areas(current->mm, | 296 | err = open_low_hpage_areas(current->mm, |
297 | LOW_ESID_MASK(addr, len)); | 297 | LOW_ESID_MASK(addr, len)); |
298 | if ((addr + len) >= 0x100000000UL) | 298 | if ((addr + len) > 0x100000000UL) |
299 | err = open_high_hpage_areas(current->mm, | 299 | err = open_high_hpage_areas(current->mm, |
300 | HTLB_AREA_MASK(addr, len)); | 300 | HTLB_AREA_MASK(addr, len)); |
301 | if (err) { | 301 | if (err) { |
diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h index 58a3dd9a79ec..6642c0125001 100644 --- a/include/asm-powerpc/page_64.h +++ b/include/asm-powerpc/page_64.h | |||
@@ -103,8 +103,9 @@ extern unsigned int HPAGE_SHIFT; | |||
103 | #define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT) | 103 | #define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT) |
104 | #define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT) | 104 | #define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT) |
105 | 105 | ||
106 | #define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \ | 106 | #define LOW_ESID_MASK(addr, len) \ |
107 | - (1U << GET_ESID(addr))) & 0xffff) | 107 | (((1U << (GET_ESID(min((addr)+(len)-1, 0x100000000UL))+1)) \ |
108 | - (1U << GET_ESID(min((addr), 0x100000000UL)))) & 0xffff) | ||
108 | #define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \ | 109 | #define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \ |
109 | - (1U << GET_HTLB_AREA(addr))) & 0xffff) | 110 | - (1U << GET_HTLB_AREA(addr))) & 0xffff) |
110 | 111 | ||
@@ -113,17 +114,21 @@ extern unsigned int HPAGE_SHIFT; | |||
113 | #define ARCH_HAS_SETCLEAR_HUGE_PTE | 114 | #define ARCH_HAS_SETCLEAR_HUGE_PTE |
114 | 115 | ||
115 | #define touches_hugepage_low_range(mm, addr, len) \ | 116 | #define touches_hugepage_low_range(mm, addr, len) \ |
116 | (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas) | 117 | (((addr) < 0x100000000UL) \ |
118 | && (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas)) | ||
117 | #define touches_hugepage_high_range(mm, addr, len) \ | 119 | #define touches_hugepage_high_range(mm, addr, len) \ |
118 | (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas) | 120 | ((((addr) + (len)) > 0x100000000UL) \ |
121 | && (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas)) | ||
119 | 122 | ||
120 | #define __within_hugepage_low_range(addr, len, segmask) \ | 123 | #define __within_hugepage_low_range(addr, len, segmask) \ |
121 | ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask)) | 124 | ( (((addr)+(len)) <= 0x100000000UL) \ |
125 | && ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask))) | ||
122 | #define within_hugepage_low_range(addr, len) \ | 126 | #define within_hugepage_low_range(addr, len) \ |
123 | __within_hugepage_low_range((addr), (len), \ | 127 | __within_hugepage_low_range((addr), (len), \ |
124 | current->mm->context.low_htlb_areas) | 128 | current->mm->context.low_htlb_areas) |
125 | #define __within_hugepage_high_range(addr, len, zonemask) \ | 129 | #define __within_hugepage_high_range(addr, len, zonemask) \ |
126 | ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask)) | 130 | ( ((addr) >= 0x100000000UL) \ |
131 | && ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask))) | ||
127 | #define within_hugepage_high_range(addr, len) \ | 132 | #define within_hugepage_high_range(addr, len) \ |
128 | __within_hugepage_high_range((addr), (len), \ | 133 | __within_hugepage_high_range((addr), (len), \ |
129 | current->mm->context.high_htlb_areas) | 134 | current->mm->context.high_htlb_areas) |