aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-05-12 15:26:47 -0400
committerDavid S. Miller <davem@davemloft.net>2012-05-12 15:26:47 -0400
commita46d6056f6585b1afefde190ae78ea894d720693 (patch)
tree592accd762468a11a675ea16cce719fafe9bbf2f /arch/sparc
parent3d386c0ef60cf4810f6d5c62b637a8fb55ec9b2e (diff)
sparc32: Un-btfixup {pte,pmd,pgd}_clear().
Also we can remove BTFIXUPCALL_SWAPO0G0 as that is no longer used. This was rather amusing, we were setting the btfixup vectors based upon cpu type but all to the same exact generic srmmu routines. Furthermore, we were inconsistently marking the fixup as either BTFIXUPCALL_SWAPO0G0 or BTFIXUPCALL_NORM. What a mess, glad we could untangle this stuff. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/include/asm/btfixup.h1
-rw-r--r--arch/sparc/include/asm/pgtable_32.h44
-rw-r--r--arch/sparc/mm/srmmu.c43
3 files changed, 39 insertions, 49 deletions
diff --git a/arch/sparc/include/asm/btfixup.h b/arch/sparc/include/asm/btfixup.h
index 797722cf69f2..48b01414b19e 100644
--- a/arch/sparc/include/asm/btfixup.h
+++ b/arch/sparc/include/asm/btfixup.h
@@ -120,7 +120,6 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void);
120#define BTFIXUPCALL_RETO0 0x01000000 /* Return first parameter, actually a nop */ 120#define BTFIXUPCALL_RETO0 0x01000000 /* Return first parameter, actually a nop */
121#define BTFIXUPCALL_ANDNINT(i) (0x902a2000|((i) & 0x1fff)) /* Possibly optimize to andn %o0, i, %o0 */ 121#define BTFIXUPCALL_ANDNINT(i) (0x902a2000|((i) & 0x1fff)) /* Possibly optimize to andn %o0, i, %o0 */
122#define BTFIXUPCALL_SWAPO0O1 0xd27a0000 /* Possibly optimize to swap [%o0],%o1 */ 122#define BTFIXUPCALL_SWAPO0O1 0xd27a0000 /* Possibly optimize to swap [%o0],%o1 */
123#define BTFIXUPCALL_SWAPO0G0 0xc07a0000 /* Possibly optimize to swap [%o0],%g0 */
124#define BTFIXUPCALL_SWAPG1G2 0xc4784000 /* Possibly optimize to swap [%g1],%g2 */ 123#define BTFIXUPCALL_SWAPG1G2 0xc4784000 /* Possibly optimize to swap [%g1],%g2 */
125#define BTFIXUPCALL_STG0O0 0xc0220000 /* Possibly optimize to st %g0,[%o0] */ 124#define BTFIXUPCALL_STG0O0 0xc0220000 /* Possibly optimize to st %g0,[%o0] */
126#define BTFIXUPCALL_STO1O0 0xd2220000 /* Possibly optimize to st %o1,[%o0] */ 125#define BTFIXUPCALL_STO1O0 0xd2220000 /* Possibly optimize to st %o1,[%o0] */
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index 227118577a94..144ab311c52a 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -119,6 +119,22 @@ extern unsigned long empty_zero_page;
119#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page)) 119#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
120 120
121/* 121/*
122 * In general all page table modifications should use the V8 atomic
123 * swap instruction. This insures the mmu and the cpu are in sync
124 * with respect to ref/mod bits in the page tables.
125 */
126static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
127{
128 __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr));
129 return value;
130}
131
132static inline void srmmu_set_pte(pte_t *ptep, pte_t pteval)
133{
134 srmmu_swap((unsigned long *)ptep, pte_val(pteval));
135}
136
137/*
122 */ 138 */
123BTFIXUPDEF_CALL_CONST(struct page *, pmd_page, pmd_t) 139BTFIXUPDEF_CALL_CONST(struct page *, pmd_page, pmd_t)
124BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page_vaddr, pgd_t) 140BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page_vaddr, pgd_t)
@@ -127,7 +143,6 @@ BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page_vaddr, pgd_t)
127#define pgd_page_vaddr(pgd) BTFIXUP_CALL(pgd_page_vaddr)(pgd) 143#define pgd_page_vaddr(pgd) BTFIXUP_CALL(pgd_page_vaddr)(pgd)
128 144
129BTFIXUPDEF_CALL_CONST(int, pte_present, pte_t) 145BTFIXUPDEF_CALL_CONST(int, pte_present, pte_t)
130BTFIXUPDEF_CALL(void, pte_clear, pte_t *)
131 146
132static inline int pte_none(pte_t pte) 147static inline int pte_none(pte_t pte)
133{ 148{
@@ -135,11 +150,19 @@ static inline int pte_none(pte_t pte)
135} 150}
136 151
137#define pte_present(pte) BTFIXUP_CALL(pte_present)(pte) 152#define pte_present(pte) BTFIXUP_CALL(pte_present)(pte)
138#define pte_clear(mm,addr,pte) BTFIXUP_CALL(pte_clear)(pte) 153
154static inline void __pte_clear(pte_t *ptep)
155{
156 srmmu_set_pte(ptep, __pte(0));
157}
158
159static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
160{
161 __pte_clear(ptep);
162}
139 163
140BTFIXUPDEF_CALL_CONST(int, pmd_bad, pmd_t) 164BTFIXUPDEF_CALL_CONST(int, pmd_bad, pmd_t)
141BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t) 165BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t)
142BTFIXUPDEF_CALL(void, pmd_clear, pmd_t *)
143 166
144static inline int pmd_none(pmd_t pmd) 167static inline int pmd_none(pmd_t pmd)
145{ 168{
@@ -148,17 +171,26 @@ static inline int pmd_none(pmd_t pmd)
148 171
149#define pmd_bad(pmd) BTFIXUP_CALL(pmd_bad)(pmd) 172#define pmd_bad(pmd) BTFIXUP_CALL(pmd_bad)(pmd)
150#define pmd_present(pmd) BTFIXUP_CALL(pmd_present)(pmd) 173#define pmd_present(pmd) BTFIXUP_CALL(pmd_present)(pmd)
151#define pmd_clear(pmd) BTFIXUP_CALL(pmd_clear)(pmd) 174
175static inline void pmd_clear(pmd_t *pmdp)
176{
177 int i;
178 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
179 srmmu_set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
180}
152 181
153BTFIXUPDEF_CALL_CONST(int, pgd_none, pgd_t) 182BTFIXUPDEF_CALL_CONST(int, pgd_none, pgd_t)
154BTFIXUPDEF_CALL_CONST(int, pgd_bad, pgd_t) 183BTFIXUPDEF_CALL_CONST(int, pgd_bad, pgd_t)
155BTFIXUPDEF_CALL_CONST(int, pgd_present, pgd_t) 184BTFIXUPDEF_CALL_CONST(int, pgd_present, pgd_t)
156BTFIXUPDEF_CALL(void, pgd_clear, pgd_t *)
157 185
158#define pgd_none(pgd) BTFIXUP_CALL(pgd_none)(pgd) 186#define pgd_none(pgd) BTFIXUP_CALL(pgd_none)(pgd)
159#define pgd_bad(pgd) BTFIXUP_CALL(pgd_bad)(pgd) 187#define pgd_bad(pgd) BTFIXUP_CALL(pgd_bad)(pgd)
160#define pgd_present(pgd) BTFIXUP_CALL(pgd_present)(pgd) 188#define pgd_present(pgd) BTFIXUP_CALL(pgd_present)(pgd)
161#define pgd_clear(pgd) BTFIXUP_CALL(pgd_clear)(pgd) 189
190static inline void pgd_clear(pgd_t *pgdp)
191{
192 srmmu_set_pte((pte_t *)pgdp, __pte(0));
193}
162 194
163/* 195/*
164 * The following only work if pte_present() is true. 196 * The following only work if pte_present() is true.
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 38fcb83aaae9..29dfabffcc5e 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -91,22 +91,6 @@ static DEFINE_SPINLOCK(srmmu_context_spinlock);
91 91
92static int is_hypersparc; 92static int is_hypersparc;
93 93
94/*
95 * In general all page table modifications should use the V8 atomic
96 * swap instruction. This insures the mmu and the cpu are in sync
97 * with respect to ref/mod bits in the page tables.
98 */
99static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
100{
101 __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr));
102 return value;
103}
104
105static inline void srmmu_set_pte(pte_t *ptep, pte_t pteval)
106{
107 srmmu_swap((unsigned long *)ptep, pte_val(pteval));
108}
109
110/* The very generic SRMMU page table operations. */ 94/* The very generic SRMMU page table operations. */
111static inline int srmmu_device_memory(unsigned long x) 95static inline int srmmu_device_memory(unsigned long x)
112{ 96{
@@ -160,9 +144,6 @@ static inline int srmmu_pte_none(pte_t pte)
160static inline int srmmu_pte_present(pte_t pte) 144static inline int srmmu_pte_present(pte_t pte)
161{ return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); } 145{ return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); }
162 146
163static inline void srmmu_pte_clear(pte_t *ptep)
164{ srmmu_set_pte(ptep, __pte(0)); }
165
166static inline int srmmu_pmd_none(pmd_t pmd) 147static inline int srmmu_pmd_none(pmd_t pmd)
167{ return !(pmd_val(pmd) & 0xFFFFFFF); } 148{ return !(pmd_val(pmd) & 0xFFFFFFF); }
168 149
@@ -172,12 +153,6 @@ static inline int srmmu_pmd_bad(pmd_t pmd)
172static inline int srmmu_pmd_present(pmd_t pmd) 153static inline int srmmu_pmd_present(pmd_t pmd)
173{ return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); } 154{ return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
174 155
175static inline void srmmu_pmd_clear(pmd_t *pmdp) {
176 int i;
177 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
178 srmmu_set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
179}
180
181static inline int srmmu_pgd_none(pgd_t pgd) 156static inline int srmmu_pgd_none(pgd_t pgd)
182{ return !(pgd_val(pgd) & 0xFFFFFFF); } 157{ return !(pgd_val(pgd) & 0xFFFFFFF); }
183 158
@@ -187,9 +162,6 @@ static inline int srmmu_pgd_bad(pgd_t pgd)
187static inline int srmmu_pgd_present(pgd_t pgd) 162static inline int srmmu_pgd_present(pgd_t pgd)
188{ return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); } 163{ return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
189 164
190static inline void srmmu_pgd_clear(pgd_t * pgdp)
191{ srmmu_set_pte((pte_t *)pgdp, __pte(0)); }
192
193static inline pte_t srmmu_pte_wrprotect(pte_t pte) 165static inline pte_t srmmu_pte_wrprotect(pte_t pte)
194{ return __pte(pte_val(pte) & ~SRMMU_WRITE);} 166{ return __pte(pte_val(pte) & ~SRMMU_WRITE);}
195 167
@@ -628,7 +600,7 @@ static inline void srmmu_unmapioaddr(unsigned long virt_addr)
628 ptep = srmmu_pte_offset(pmdp, virt_addr); 600 ptep = srmmu_pte_offset(pmdp, virt_addr);
629 601
630 /* No need to flush uncacheable page. */ 602 /* No need to flush uncacheable page. */
631 srmmu_pte_clear(ptep); 603 __pte_clear(ptep);
632} 604}
633 605
634static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len) 606static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
@@ -1480,9 +1452,6 @@ static void __init init_hypersparc(void)
1480 1452
1481 is_hypersparc = 1; 1453 is_hypersparc = 1;
1482 1454
1483 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
1484 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
1485 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
1486 BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM); 1455 BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM);
1487 BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM); 1456 BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM);
1488 BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM); 1457 BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM);
@@ -1546,9 +1515,6 @@ static void __init init_cypress_common(void)
1546{ 1515{
1547 init_vac_layout(); 1516 init_vac_layout();
1548 1517
1549 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
1550 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
1551 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
1552 BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM); 1518 BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM);
1553 BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM); 1519 BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM);
1554 BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM); 1520 BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM);
@@ -1930,10 +1896,6 @@ static void __init init_viking(void)
1930 viking_mxcc_present = 0; 1896 viking_mxcc_present = 0;
1931 msi_set_sync(); 1897 msi_set_sync();
1932 1898
1933 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
1934 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
1935 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
1936
1937 /* 1899 /*
1938 * We need this to make sure old viking takes no hits 1900 * We need this to make sure old viking takes no hits
1939 * on it's cache for dma snoops to workaround the 1901 * on it's cache for dma snoops to workaround the
@@ -2162,16 +2124,13 @@ void __init ld_mmu_srmmu(void)
2162 BTFIXUPSET_CALL(pgd_page_vaddr, srmmu_pgd_page, BTFIXUPCALL_NORM); 2124 BTFIXUPSET_CALL(pgd_page_vaddr, srmmu_pgd_page, BTFIXUPCALL_NORM);
2163 2125
2164 BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM); 2126 BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM);
2165 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0);
2166 2127
2167 BTFIXUPSET_CALL(pmd_bad, srmmu_pmd_bad, BTFIXUPCALL_NORM); 2128 BTFIXUPSET_CALL(pmd_bad, srmmu_pmd_bad, BTFIXUPCALL_NORM);
2168 BTFIXUPSET_CALL(pmd_present, srmmu_pmd_present, BTFIXUPCALL_NORM); 2129 BTFIXUPSET_CALL(pmd_present, srmmu_pmd_present, BTFIXUPCALL_NORM);
2169 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_SWAPO0G0);
2170 2130
2171 BTFIXUPSET_CALL(pgd_none, srmmu_pgd_none, BTFIXUPCALL_NORM); 2131 BTFIXUPSET_CALL(pgd_none, srmmu_pgd_none, BTFIXUPCALL_NORM);
2172 BTFIXUPSET_CALL(pgd_bad, srmmu_pgd_bad, BTFIXUPCALL_NORM); 2132 BTFIXUPSET_CALL(pgd_bad, srmmu_pgd_bad, BTFIXUPCALL_NORM);
2173 BTFIXUPSET_CALL(pgd_present, srmmu_pgd_present, BTFIXUPCALL_NORM); 2133 BTFIXUPSET_CALL(pgd_present, srmmu_pgd_present, BTFIXUPCALL_NORM);
2174 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_SWAPO0G0);
2175 2134
2176 BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM); 2135 BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM);
2177 BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM); 2136 BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM);