aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-08-25 19:47:46 -0400
committerDavid S. Miller <davem@davemloft.net>2009-08-25 19:47:46 -0400
commitd8ed1d43e17898761c7221014a15a4c7501d2ff3 (patch)
treef9dbbbf6bc7ac306d003797d778e315aa34c902a /arch/sparc
parent1ca3976d8ca8b0b44145994b1433f759a642615b (diff)
sparc64: Validate linear D-TLB misses.
When page alloc debugging is not enabled, we essentially accept any virtual address for linear kernel TLB misses. But with kgdb, kernel address probing, and other facilities we can try to access arbitrary crap. So, make sure the address we miss on will translate to physical memory that actually exists. In order to make this work we have to embed the valid address bitmap into the kernel image. And in order to make that less expensive we make an adjustment, in that the max physical memory address is decreased to "1 << 41", even on the chips that support a 42-bit physical address space. We can do this because bit 41 indicates "I/O space" and thus covers non-memory ranges. The result of this is that: 1) kpte_linear_bitmap shrinks from 2K to 1K in size 2) we need 64K more for the valid address bitmap We can't let the valid address bitmap be dynamically allocated once we start using it to validate TLB misses, otherwise we have crazy issues to deal with wrt. recursive TLB misses and such. If we're in a TLB miss it could be the deepest trap level that's legal inside of the cpu. So if we TLB miss referencing the bitmap, the cpu will be out of trap levels and enter RED state. To guard against out-of-range accesses to the bitmap, we have to check to make sure no bits in the physical address above bit 40 are set. We could export and use last_valid_pfn for this check, but that's just an unnecessary extra memory reference. On the plus side of all this, since we load all of these translations into the special 4MB mapping TSB, and we check the TSB first for TLB misses, there should be absolutely no real cost for these new checks in the TLB miss path. Reported-by: heyongli@gmail.com Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/include/asm/pgtable_64.h12
-rw-r--r--arch/sparc/kernel/ktlb.S42
-rw-r--r--arch/sparc/mm/init_64.c43
-rw-r--r--arch/sparc/mm/init_64.h7
4 files changed, 76 insertions, 28 deletions
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index b049abf9902..0ff92fa2206 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -726,11 +726,17 @@ extern unsigned long pte_file(pte_t);
726extern pte_t pgoff_to_pte(unsigned long); 726extern pte_t pgoff_to_pte(unsigned long);
727#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) 727#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL)
728 728
729extern unsigned long *sparc64_valid_addr_bitmap; 729extern unsigned long sparc64_valid_addr_bitmap[];
730 730
731/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ 731/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
732#define kern_addr_valid(addr) \ 732static inline bool kern_addr_valid(unsigned long addr)
733 (test_bit(__pa((unsigned long)(addr))>>22, sparc64_valid_addr_bitmap)) 733{
734 unsigned long paddr = __pa(addr);
735
736 if ((paddr >> 41UL) != 0UL)
737 return false;
738 return test_bit(paddr >> 22, sparc64_valid_addr_bitmap);
739}
734 740
735extern int page_in_phys_avail(unsigned long paddr); 741extern int page_in_phys_avail(unsigned long paddr);
736 742
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
index cef8defcd7a..3ea6e8cde8c 100644
--- a/arch/sparc/kernel/ktlb.S
+++ b/arch/sparc/kernel/ktlb.S
@@ -151,12 +151,46 @@ kvmap_dtlb_4v:
151 * Must preserve %g1 and %g6 (TAG). 151 * Must preserve %g1 and %g6 (TAG).
152 */ 152 */
153kvmap_dtlb_tsb4m_miss: 153kvmap_dtlb_tsb4m_miss:
154 sethi %hi(kpte_linear_bitmap), %g2 154 /* Clear the PAGE_OFFSET top virtual bits, shift
155 or %g2, %lo(kpte_linear_bitmap), %g2 155 * down to get PFN, and make sure PFN is in range.
156 */
157 sllx %g4, 21, %g5
156 158
157 /* Clear the PAGE_OFFSET top virtual bits, then shift 159 /* Check to see if we know about valid memory at the 4MB
158 * down to get a 256MB physical address index. 160 * chunk this physical address will reside within.
159 */ 161 */
162 srlx %g5, 21 + 41, %g2
163 brnz,pn %g2, kvmap_dtlb_longpath
164 nop
165
166 /* This unconditional branch and delay-slot nop gets patched
167 * by the sethi sequence once the bitmap is properly setup.
168 */
169 .globl valid_addr_bitmap_insn
170valid_addr_bitmap_insn:
171 ba,pt %xcc, 2f
172 nop
173 .subsection 2
174 .globl valid_addr_bitmap_patch
175valid_addr_bitmap_patch:
176 sethi %hi(sparc64_valid_addr_bitmap), %g7
177 or %g7, %lo(sparc64_valid_addr_bitmap), %g7
178 .previous
179
180 srlx %g5, 21 + 22, %g2
181 srlx %g2, 6, %g5
182 and %g2, 63, %g2
183 sllx %g5, 3, %g5
184 ldx [%g7 + %g5], %g5
185 mov 1, %g7
186 sllx %g7, %g2, %g7
187 andcc %g5, %g7, %g0
188 be,pn %xcc, kvmap_dtlb_longpath
189
1902: sethi %hi(kpte_linear_bitmap), %g2
191 or %g2, %lo(kpte_linear_bitmap), %g2
192
193 /* Get the 256MB physical address index. */
160 sllx %g4, 21, %g5 194 sllx %g4, 21, %g5
161 mov 1, %g7 195 mov 1, %g7
162 srlx %g5, 21 + 28, %g5 196 srlx %g5, 21 + 28, %g5
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index ed6be6ba2f4..a70a5e1904d 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -145,7 +145,8 @@ static void __init read_obp_memory(const char *property,
145 cmp_p64, NULL); 145 cmp_p64, NULL);
146} 146}
147 147
148unsigned long *sparc64_valid_addr_bitmap __read_mostly; 148unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES /
149 sizeof(unsigned long)];
149EXPORT_SYMBOL(sparc64_valid_addr_bitmap); 150EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
150 151
151/* Kernel physical address base and size in bytes. */ 152/* Kernel physical address base and size in bytes. */
@@ -1874,7 +1875,7 @@ static int pavail_rescan_ents __initdata;
1874 * memory list again, and make sure it provides at least as much 1875 * memory list again, and make sure it provides at least as much
1875 * memory as 'pavail' does. 1876 * memory as 'pavail' does.
1876 */ 1877 */
1877static void __init setup_valid_addr_bitmap_from_pavail(void) 1878static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap)
1878{ 1879{
1879 int i; 1880 int i;
1880 1881
@@ -1897,8 +1898,7 @@ static void __init setup_valid_addr_bitmap_from_pavail(void)
1897 1898
1898 if (new_start <= old_start && 1899 if (new_start <= old_start &&
1899 new_end >= (old_start + PAGE_SIZE)) { 1900 new_end >= (old_start + PAGE_SIZE)) {
1900 set_bit(old_start >> 22, 1901 set_bit(old_start >> 22, bitmap);
1901 sparc64_valid_addr_bitmap);
1902 goto do_next_page; 1902 goto do_next_page;
1903 } 1903 }
1904 } 1904 }
@@ -1919,20 +1919,21 @@ static void __init setup_valid_addr_bitmap_from_pavail(void)
1919 } 1919 }
1920} 1920}
1921 1921
1922static void __init patch_tlb_miss_handler_bitmap(void)
1923{
1924 extern unsigned int valid_addr_bitmap_insn[];
1925 extern unsigned int valid_addr_bitmap_patch[];
1926
1927 valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1];
1928 mb();
1929 valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0];
1930 flushi(&valid_addr_bitmap_insn[0]);
1931}
1932
1922void __init mem_init(void) 1933void __init mem_init(void)
1923{ 1934{
1924 unsigned long codepages, datapages, initpages; 1935 unsigned long codepages, datapages, initpages;
1925 unsigned long addr, last; 1936 unsigned long addr, last;
1926 int i;
1927
1928 i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
1929 i += 1;
1930 sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3);
1931 if (sparc64_valid_addr_bitmap == NULL) {
1932 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
1933 prom_halt();
1934 }
1935 memset(sparc64_valid_addr_bitmap, 0, i << 3);
1936 1937
1937 addr = PAGE_OFFSET + kern_base; 1938 addr = PAGE_OFFSET + kern_base;
1938 last = PAGE_ALIGN(kern_size) + addr; 1939 last = PAGE_ALIGN(kern_size) + addr;
@@ -1941,15 +1942,19 @@ void __init mem_init(void)
1941 addr += PAGE_SIZE; 1942 addr += PAGE_SIZE;
1942 } 1943 }
1943 1944
1944 setup_valid_addr_bitmap_from_pavail(); 1945 setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap);
1946 patch_tlb_miss_handler_bitmap();
1945 1947
1946 high_memory = __va(last_valid_pfn << PAGE_SHIFT); 1948 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
1947 1949
1948#ifdef CONFIG_NEED_MULTIPLE_NODES 1950#ifdef CONFIG_NEED_MULTIPLE_NODES
1949 for_each_online_node(i) { 1951 {
1950 if (NODE_DATA(i)->node_spanned_pages != 0) { 1952 int i;
1951 totalram_pages += 1953 for_each_online_node(i) {
1952 free_all_bootmem_node(NODE_DATA(i)); 1954 if (NODE_DATA(i)->node_spanned_pages != 0) {
1955 totalram_pages +=
1956 free_all_bootmem_node(NODE_DATA(i));
1957 }
1953 } 1958 }
1954 } 1959 }
1955#else 1960#else
diff --git a/arch/sparc/mm/init_64.h b/arch/sparc/mm/init_64.h
index 16063870a48..c2f772dbd55 100644
--- a/arch/sparc/mm/init_64.h
+++ b/arch/sparc/mm/init_64.h
@@ -5,10 +5,13 @@
5 * marked non-static so that assembler code can get at them. 5 * marked non-static so that assembler code can get at them.
6 */ 6 */
7 7
8#define MAX_PHYS_ADDRESS (1UL << 42UL) 8#define MAX_PHYS_ADDRESS (1UL << 41UL)
9#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) 9#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
10#define KPTE_BITMAP_BYTES \ 10#define KPTE_BITMAP_BYTES \
11 ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8) 11 ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8)
12#define VALID_ADDR_BITMAP_CHUNK_SZ (4UL * 1024UL * 1024UL)
13#define VALID_ADDR_BITMAP_BYTES \
14 ((MAX_PHYS_ADDRESS / VALID_ADDR_BITMAP_CHUNK_SZ) / 8)
12 15
13extern unsigned long kern_linear_pte_xor[2]; 16extern unsigned long kern_linear_pte_xor[2];
14extern unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; 17extern unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];