diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2010-10-25 10:10:07 -0400 |
---|---|---|
committer | Martin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com> | 2010-10-25 10:10:14 -0400 |
commit | 238ec4efeee4461d5cff2ed3e5a15a3ab850959b (patch) | |
tree | d9efb4be22cc550c7559c29627a48d66413c645a /arch/s390/mm | |
parent | 229aebb873e29726b91e076161649cf45154b0bf (diff) |
[S390] zero page cache synonyms
If the zero page is mapped to virtual user space addresses that differ
only in bit 2^12 or 2^13 we get L1 cache synonyms which can affect
performance. Follow the mips model and use multiple zero pages to avoid
the synonyms.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/init.c | 49 |
1 files changed, 45 insertions, 4 deletions
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 94b8ba2ec857..0744fb3536b1 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -42,9 +42,52 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | |||
42 | 42 | ||
43 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); | 43 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); |
44 | 44 | ||
45 | char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); | 45 | unsigned long empty_zero_page, zero_page_mask; |
46 | EXPORT_SYMBOL(empty_zero_page); | 46 | EXPORT_SYMBOL(empty_zero_page); |
47 | 47 | ||
48 | static unsigned long setup_zero_pages(void) | ||
49 | { | ||
50 | struct cpuid cpu_id; | ||
51 | unsigned int order; | ||
52 | unsigned long size; | ||
53 | struct page *page; | ||
54 | int i; | ||
55 | |||
56 | get_cpu_id(&cpu_id); | ||
57 | switch (cpu_id.machine) { | ||
58 | case 0x9672: /* g5 */ | ||
59 | case 0x2064: /* z900 */ | ||
60 | case 0x2066: /* z900 */ | ||
61 | case 0x2084: /* z990 */ | ||
62 | case 0x2086: /* z990 */ | ||
63 | case 0x2094: /* z9-109 */ | ||
64 | case 0x2096: /* z9-109 */ | ||
65 | order = 0; | ||
66 | break; | ||
67 | case 0x2097: /* z10 */ | ||
68 | case 0x2098: /* z10 */ | ||
69 | default: | ||
70 | order = 2; | ||
71 | break; | ||
72 | } | ||
73 | |||
74 | empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); | ||
75 | if (!empty_zero_page) | ||
76 | panic("Out of memory in setup_zero_pages"); | ||
77 | |||
78 | page = virt_to_page((void *) empty_zero_page); | ||
79 | split_page(page, order); | ||
80 | for (i = 1 << order; i > 0; i--) { | ||
81 | SetPageReserved(page); | ||
82 | page++; | ||
83 | } | ||
84 | |||
85 | size = PAGE_SIZE << order; | ||
86 | zero_page_mask = (size - 1) & PAGE_MASK; | ||
87 | |||
88 | return 1UL << order; | ||
89 | } | ||
90 | |||
48 | /* | 91 | /* |
49 | * paging_init() sets up the page tables | 92 | * paging_init() sets up the page tables |
50 | */ | 93 | */ |
@@ -92,14 +135,12 @@ void __init mem_init(void) | |||
92 | max_mapnr = num_physpages = max_low_pfn; | 135 | max_mapnr = num_physpages = max_low_pfn; |
93 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); | 136 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); |
94 | 137 | ||
95 | /* clear the zero-page */ | ||
96 | memset(empty_zero_page, 0, PAGE_SIZE); | ||
97 | |||
98 | /* Setup guest page hinting */ | 138 | /* Setup guest page hinting */ |
99 | cmma_init(); | 139 | cmma_init(); |
100 | 140 | ||
101 | /* this will put all low memory onto the freelists */ | 141 | /* this will put all low memory onto the freelists */ |
102 | totalram_pages += free_all_bootmem(); | 142 | totalram_pages += free_all_bootmem(); |
143 | totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ | ||
103 | 144 | ||
104 | reservedpages = 0; | 145 | reservedpages = 0; |
105 | 146 | ||