aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm/page.c
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2013-06-18 09:38:59 -0400
committerPaul Gortmaker <paul.gortmaker@windriver.com>2013-07-14 19:36:51 -0400
commit078a55fc824c1633b3a507e4ad48b4637c1dfc18 (patch)
treeb7abb8d50bf6e322baaea322e9224d7715b52f5b /arch/mips/mm/page.c
parent60ffef065dd40b91f6f76af6c7510ddf23102f54 (diff)
MIPS: Delete __cpuinit/__CPUINIT usage from MIPS code
commit 3747069b25e419f6b51395f48127e9812abc3596 upstream. The __cpuinit type of throwaway sections might have made sense some time ago when RAM was more constrained, but now the savings do not offset the cost and complications. For example, the fix in commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time") is a good example of the nasty type of bugs that can be created with improper use of the various __init prefixes. After a discussion on LKML[1] it was decided that cpuinit should go the way of devinit and be phased out. Once all the users are gone, we can then finally remove the macros themselves from linux/init.h. Note that some harmless section mismatch warnings may result, since notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c) and are flagged as __cpuinit -- so if we remove the __cpuinit from the arch specific callers, we will also get section mismatch warnings. As an intermediate step, we intend to turn the linux/init.h cpuinit related content into no-ops as early as possible, since that will get rid of these warnings. In any case, they are temporary and harmless. Here, we remove all the MIPS __cpuinit from C code and __CPUINIT from asm files. MIPS is interesting in this respect, because there are also uasm users hiding behind their own renamed versions of the __cpuinit macros. [1] https://lkml.org/lkml/2013/5/20/589 [ralf@linux-mips.org: Folded in Paul's followup fix.] Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/5494/ Patchwork: https://patchwork.linux-mips.org/patch/5495/ Patchwork: https://patchwork.linux-mips.org/patch/5509/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/mm/page.c')
-rw-r--r--arch/mips/mm/page.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 2c0bd580b9da..218c2109a55d 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -66,29 +66,29 @@ UASM_L_LA(_copy_pref_both)
66UASM_L_LA(_copy_pref_store) 66UASM_L_LA(_copy_pref_store)
67 67
68/* We need one branch and therefore one relocation per target label. */ 68/* We need one branch and therefore one relocation per target label. */
69static struct uasm_label __cpuinitdata labels[5]; 69static struct uasm_label labels[5];
70static struct uasm_reloc __cpuinitdata relocs[5]; 70static struct uasm_reloc relocs[5];
71 71
72#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) 72#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
73#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) 73#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
74 74
75static int pref_bias_clear_store __cpuinitdata; 75static int pref_bias_clear_store;
76static int pref_bias_copy_load __cpuinitdata; 76static int pref_bias_copy_load;
77static int pref_bias_copy_store __cpuinitdata; 77static int pref_bias_copy_store;
78 78
79static u32 pref_src_mode __cpuinitdata; 79static u32 pref_src_mode;
80static u32 pref_dst_mode __cpuinitdata; 80static u32 pref_dst_mode;
81 81
82static int clear_word_size __cpuinitdata; 82static int clear_word_size;
83static int copy_word_size __cpuinitdata; 83static int copy_word_size;
84 84
85static int half_clear_loop_size __cpuinitdata; 85static int half_clear_loop_size;
86static int half_copy_loop_size __cpuinitdata; 86static int half_copy_loop_size;
87 87
88static int cache_line_size __cpuinitdata; 88static int cache_line_size;
89#define cache_line_mask() (cache_line_size - 1) 89#define cache_line_mask() (cache_line_size - 1)
90 90
91static inline void __cpuinit 91static inline void
92pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off) 92pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
93{ 93{
94 if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) { 94 if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) {
@@ -108,7 +108,7 @@ pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
108 } 108 }
109} 109}
110 110
111static void __cpuinit set_prefetch_parameters(void) 111static void set_prefetch_parameters(void)
112{ 112{
113 if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) 113 if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg)
114 clear_word_size = 8; 114 clear_word_size = 8;
@@ -199,7 +199,7 @@ static void __cpuinit set_prefetch_parameters(void)
199 4 * copy_word_size)); 199 4 * copy_word_size));
200} 200}
201 201
202static void __cpuinit build_clear_store(u32 **buf, int off) 202static void build_clear_store(u32 **buf, int off)
203{ 203{
204 if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) { 204 if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {
205 uasm_i_sd(buf, ZERO, off, A0); 205 uasm_i_sd(buf, ZERO, off, A0);
@@ -208,7 +208,7 @@ static void __cpuinit build_clear_store(u32 **buf, int off)
208 } 208 }
209} 209}
210 210
211static inline void __cpuinit build_clear_pref(u32 **buf, int off) 211static inline void build_clear_pref(u32 **buf, int off)
212{ 212{
213 if (off & cache_line_mask()) 213 if (off & cache_line_mask())
214 return; 214 return;
@@ -240,7 +240,7 @@ extern u32 __clear_page_end;
240extern u32 __copy_page_start; 240extern u32 __copy_page_start;
241extern u32 __copy_page_end; 241extern u32 __copy_page_end;
242 242
243void __cpuinit build_clear_page(void) 243void build_clear_page(void)
244{ 244{
245 int off; 245 int off;
246 u32 *buf = &__clear_page_start; 246 u32 *buf = &__clear_page_start;
@@ -333,7 +333,7 @@ void __cpuinit build_clear_page(void)
333 pr_debug("\t.set pop\n"); 333 pr_debug("\t.set pop\n");
334} 334}
335 335
336static void __cpuinit build_copy_load(u32 **buf, int reg, int off) 336static void build_copy_load(u32 **buf, int reg, int off)
337{ 337{
338 if (cpu_has_64bit_gp_regs) { 338 if (cpu_has_64bit_gp_regs) {
339 uasm_i_ld(buf, reg, off, A1); 339 uasm_i_ld(buf, reg, off, A1);
@@ -342,7 +342,7 @@ static void __cpuinit build_copy_load(u32 **buf, int reg, int off)
342 } 342 }
343} 343}
344 344
345static void __cpuinit build_copy_store(u32 **buf, int reg, int off) 345static void build_copy_store(u32 **buf, int reg, int off)
346{ 346{
347 if (cpu_has_64bit_gp_regs) { 347 if (cpu_has_64bit_gp_regs) {
348 uasm_i_sd(buf, reg, off, A0); 348 uasm_i_sd(buf, reg, off, A0);
@@ -387,7 +387,7 @@ static inline void build_copy_store_pref(u32 **buf, int off)
387 } 387 }
388} 388}
389 389
390void __cpuinit build_copy_page(void) 390void build_copy_page(void)
391{ 391{
392 int off; 392 int off;
393 u32 *buf = &__copy_page_start; 393 u32 *buf = &__copy_page_start;