aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/kernel/vmlinux.lds.S
diff options
context:
space:
mode:
authorMike Frysinger <vapier.adi@gmail.com>2008-02-02 02:53:17 -0500
committerBryan Wu <bryan.wu@analog.com>2008-02-02 02:53:17 -0500
commitb7627acc432a36072253bb1288f56e78c7d9423e (patch)
tree1d79f1c3a68c21ec1b51428a88d8f5149dbbbbf4 /arch/blackfin/kernel/vmlinux.lds.S
parent80f31c8a03d2f0644d0ceaf14e7e0108a007c962 (diff)
[Blackfin] arch: move the init sections to the end of memory
Move the init sections to the end of memory so that after they are free, run time memory is all continugous - this should help decrease memory fragementation. When doing this, we also pack some of the other sections a little closer together, to make sure we don't waste memory. To make this happen, we need to rename the .data.init_task section to .init_task.data, so it doesn't get picked up by the linker script glob. Signed-off-by: Mike Frysinger <vapier.adi@gmail.com> Signed-off-by: Bryan Wu <bryan.wu@analog.com>
Diffstat (limited to 'arch/blackfin/kernel/vmlinux.lds.S')
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S47
1 files changed, 28 insertions, 19 deletions
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 858722421b40..aed832540b3b 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -41,6 +41,9 @@ _jiffies = _jiffies_64;
41SECTIONS 41SECTIONS
42{ 42{
43 . = CONFIG_BOOT_LOAD; 43 . = CONFIG_BOOT_LOAD;
44 /* Neither the text, ro_data or bss section need to be aligned
45 * So pack them back to back
46 */
44 .text : 47 .text :
45 { 48 {
46 __text = .; 49 __text = .;
@@ -58,22 +61,25 @@ SECTIONS
58 *(__ex_table) 61 *(__ex_table)
59 ___stop___ex_table = .; 62 ___stop___ex_table = .;
60 63
61 . = ALIGN(4);
62 __etext = .; 64 __etext = .;
63 } 65 }
64 66
65 RO_DATA(PAGE_SIZE) 67 /* Just in case the first read only is a 32-bit access */
68 RO_DATA(4)
69
70 .bss :
71 {
72 . = ALIGN(4);
73 ___bss_start = .;
74 *(.bss .bss.*)
75 *(COMMON)
76 ___bss_stop = .;
77 }
66 78
67 .data : 79 .data :
68 { 80 {
69 /* make sure the init_task is aligned to the
70 * kernel thread size so we can locate the kernel
71 * stack properly and quickly.
72 */
73 __sdata = .; 81 __sdata = .;
74 . = ALIGN(THREAD_SIZE); 82 /* This gets done first, so the glob doesn't suck it in */
75 *(.data.init_task)
76
77 . = ALIGN(32); 83 . = ALIGN(32);
78 *(.data.cacheline_aligned) 84 *(.data.cacheline_aligned)
79 85
@@ -81,10 +87,22 @@ SECTIONS
81 *(.data.*) 87 *(.data.*)
82 CONSTRUCTORS 88 CONSTRUCTORS
83 89
90 /* make sure the init_task is aligned to the
91 * kernel thread size so we can locate the kernel
92 * stack properly and quickly.
93 */
84 . = ALIGN(THREAD_SIZE); 94 . = ALIGN(THREAD_SIZE);
95 *(.init_task.data)
96
85 __edata = .; 97 __edata = .;
86 } 98 }
87 99
100 /* The init section should be last, so when we free it, it goes into
101 * the general memory pool, and (hopefully) will decrease fragmentation
102 * a tiny bit. The init section has a _requirement_ that it be
103 * PAGE_SIZE aligned
104 */
105 . = ALIGN(PAGE_SIZE);
88 ___init_begin = .; 106 ___init_begin = .;
89 107
90 .init.text : 108 .init.text :
@@ -179,16 +197,7 @@ SECTIONS
179 . = ALIGN(PAGE_SIZE); 197 . = ALIGN(PAGE_SIZE);
180 ___init_end = .; 198 ___init_end = .;
181 199
182 .bss : 200 __end =.;
183 {
184 . = ALIGN(4);
185 ___bss_start = .;
186 *(.bss .bss.*)
187 *(COMMON)
188 . = ALIGN(4);
189 ___bss_stop = .;
190 __end = .;
191 }
192 201
193 STABS_DEBUG 202 STABS_DEBUG
194 203