aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMichael Ellerman <michael@ellerman.id.au>2005-08-03 06:21:24 -0400
committerPaul Mackerras <paulus@samba.org>2005-08-28 20:53:36 -0400
commita4a0f97020444f83bf22bb9c8c20d8af2b4e6317 (patch)
treee57ee269fa71c46a43d8a4ffe4df82ec6780c744 /arch
parentaefd16b0c5a594b5feaba23954ad74061f45c8a5 (diff)
[PATCH] ppc64: Remove redundant use of pointers in lmb code
The lmb code is all written to use a pointer to an lmb struct. But it's always the same lmb struct, called "lmb". So we take the address of lmb, call it _lmb and then start using _lmb->foo everywhere, which is silly. This patch removes the _lmb pointers and replaces them with direct references to the one "lmb" struct. We do the same for some _mem and _rsv pointers which point to lmb.memory and lmb.reserved respectively. This patch looks quite busy, but it's basically just: s/_lmb->/lmb./g s/_mem->/lmb.memory./g s/_rsv->/lmb.reserved./g s/_rsv/&lmb.reserved/g s/mem->/lmb.memory./g Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/ppc64/kernel/lmb.c100
1 files changed, 43 insertions, 57 deletions
diff --git a/arch/ppc64/kernel/lmb.c b/arch/ppc64/kernel/lmb.c
index d6c6bd03d2a4..6cb275615fc4 100644
--- a/arch/ppc64/kernel/lmb.c
+++ b/arch/ppc64/kernel/lmb.c
@@ -28,33 +28,32 @@ void lmb_dump_all(void)
28{ 28{
29#ifdef DEBUG 29#ifdef DEBUG
30 unsigned long i; 30 unsigned long i;
31 struct lmb *_lmb = &lmb;
32 31
33 udbg_printf("lmb_dump_all:\n"); 32 udbg_printf("lmb_dump_all:\n");
34 udbg_printf(" memory.cnt = 0x%lx\n", 33 udbg_printf(" memory.cnt = 0x%lx\n",
35 _lmb->memory.cnt); 34 lmb.memory.cnt);
36 udbg_printf(" memory.size = 0x%lx\n", 35 udbg_printf(" memory.size = 0x%lx\n",
37 _lmb->memory.size); 36 lmb.memory.size);
38 for (i=0; i < _lmb->memory.cnt ;i++) { 37 for (i=0; i < lmb.memory.cnt ;i++) {
39 udbg_printf(" memory.region[0x%x].base = 0x%lx\n", 38 udbg_printf(" memory.region[0x%x].base = 0x%lx\n",
40 i, _lmb->memory.region[i].base); 39 i, lmb.memory.region[i].base);
41 udbg_printf(" .physbase = 0x%lx\n", 40 udbg_printf(" .physbase = 0x%lx\n",
42 _lmb->memory.region[i].physbase); 41 lmb.memory.region[i].physbase);
43 udbg_printf(" .size = 0x%lx\n", 42 udbg_printf(" .size = 0x%lx\n",
44 _lmb->memory.region[i].size); 43 lmb.memory.region[i].size);
45 } 44 }
46 45
47 udbg_printf("\n reserved.cnt = 0x%lx\n", 46 udbg_printf("\n reserved.cnt = 0x%lx\n",
48 _lmb->reserved.cnt); 47 lmb.reserved.cnt);
49 udbg_printf(" reserved.size = 0x%lx\n", 48 udbg_printf(" reserved.size = 0x%lx\n",
50 _lmb->reserved.size); 49 lmb.reserved.size);
51 for (i=0; i < _lmb->reserved.cnt ;i++) { 50 for (i=0; i < lmb.reserved.cnt ;i++) {
52 udbg_printf(" reserved.region[0x%x].base = 0x%lx\n", 51 udbg_printf(" reserved.region[0x%x].base = 0x%lx\n",
53 i, _lmb->reserved.region[i].base); 52 i, lmb.reserved.region[i].base);
54 udbg_printf(" .physbase = 0x%lx\n", 53 udbg_printf(" .physbase = 0x%lx\n",
55 _lmb->reserved.region[i].physbase); 54 lmb.reserved.region[i].physbase);
56 udbg_printf(" .size = 0x%lx\n", 55 udbg_printf(" .size = 0x%lx\n",
57 _lmb->reserved.region[i].size); 56 lmb.reserved.region[i].size);
58 } 57 }
59#endif /* DEBUG */ 58#endif /* DEBUG */
60} 59}
@@ -108,19 +107,17 @@ lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
108void __init 107void __init
109lmb_init(void) 108lmb_init(void)
110{ 109{
111 struct lmb *_lmb = &lmb;
112
113 /* Create a dummy zero size LMB which will get coalesced away later. 110 /* Create a dummy zero size LMB which will get coalesced away later.
114 * This simplifies the lmb_add() code below... 111 * This simplifies the lmb_add() code below...
115 */ 112 */
116 _lmb->memory.region[0].base = 0; 113 lmb.memory.region[0].base = 0;
117 _lmb->memory.region[0].size = 0; 114 lmb.memory.region[0].size = 0;
118 _lmb->memory.cnt = 1; 115 lmb.memory.cnt = 1;
119 116
120 /* Ditto. */ 117 /* Ditto. */
121 _lmb->reserved.region[0].base = 0; 118 lmb.reserved.region[0].base = 0;
122 _lmb->reserved.region[0].size = 0; 119 lmb.reserved.region[0].size = 0;
123 _lmb->reserved.cnt = 1; 120 lmb.reserved.cnt = 1;
124} 121}
125 122
126/* This routine called with relocation disabled. */ 123/* This routine called with relocation disabled. */
@@ -130,27 +127,26 @@ lmb_analyze(void)
130 unsigned long i; 127 unsigned long i;
131 unsigned long mem_size = 0; 128 unsigned long mem_size = 0;
132 unsigned long size_mask = 0; 129 unsigned long size_mask = 0;
133 struct lmb *_lmb = &lmb;
134#ifdef CONFIG_MSCHUNKS 130#ifdef CONFIG_MSCHUNKS
135 unsigned long physbase = 0; 131 unsigned long physbase = 0;
136#endif 132#endif
137 133
138 for (i=0; i < _lmb->memory.cnt; i++) { 134 for (i=0; i < lmb.memory.cnt; i++) {
139 unsigned long lmb_size; 135 unsigned long lmb_size;
140 136
141 lmb_size = _lmb->memory.region[i].size; 137 lmb_size = lmb.memory.region[i].size;
142 138
143#ifdef CONFIG_MSCHUNKS 139#ifdef CONFIG_MSCHUNKS
144 _lmb->memory.region[i].physbase = physbase; 140 lmb.memory.region[i].physbase = physbase;
145 physbase += lmb_size; 141 physbase += lmb_size;
146#else 142#else
147 _lmb->memory.region[i].physbase = _lmb->memory.region[i].base; 143 lmb.memory.region[i].physbase = lmb.memory.region[i].base;
148#endif 144#endif
149 mem_size += lmb_size; 145 mem_size += lmb_size;
150 size_mask |= lmb_size; 146 size_mask |= lmb_size;
151 } 147 }
152 148
153 _lmb->memory.size = mem_size; 149 lmb.memory.size = mem_size;
154} 150}
155 151
156/* This routine called with relocation disabled. */ 152/* This routine called with relocation disabled. */
@@ -213,12 +209,11 @@ lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
213long __init 209long __init
214lmb_add(unsigned long base, unsigned long size) 210lmb_add(unsigned long base, unsigned long size)
215{ 211{
216 struct lmb *_lmb = &lmb; 212 struct lmb_region *_rgn = &(lmb.memory);
217 struct lmb_region *_rgn = &(_lmb->memory);
218 213
219 /* On pSeries LPAR systems, the first LMB is our RMO region. */ 214 /* On pSeries LPAR systems, the first LMB is our RMO region. */
220 if ( base == 0 ) 215 if ( base == 0 )
221 _lmb->rmo_size = size; 216 lmb.rmo_size = size;
222 217
223 return lmb_add_region(_rgn, base, size); 218 return lmb_add_region(_rgn, base, size);
224 219
@@ -227,8 +222,7 @@ lmb_add(unsigned long base, unsigned long size)
227long __init 222long __init
228lmb_reserve(unsigned long base, unsigned long size) 223lmb_reserve(unsigned long base, unsigned long size)
229{ 224{
230 struct lmb *_lmb = &lmb; 225 struct lmb_region *_rgn = &(lmb.reserved);
231 struct lmb_region *_rgn = &(_lmb->reserved);
232 226
233 return lmb_add_region(_rgn, base, size); 227 return lmb_add_region(_rgn, base, size);
234} 228}
@@ -260,13 +254,10 @@ lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
260{ 254{
261 long i, j; 255 long i, j;
262 unsigned long base = 0; 256 unsigned long base = 0;
263 struct lmb *_lmb = &lmb;
264 struct lmb_region *_mem = &(_lmb->memory);
265 struct lmb_region *_rsv = &(_lmb->reserved);
266 257
267 for (i=_mem->cnt-1; i >= 0; i--) { 258 for (i=lmb.memory.cnt-1; i >= 0; i--) {
268 unsigned long lmbbase = _mem->region[i].base; 259 unsigned long lmbbase = lmb.memory.region[i].base;
269 unsigned long lmbsize = _mem->region[i].size; 260 unsigned long lmbsize = lmb.memory.region[i].size;
270 261
271 if ( max_addr == LMB_ALLOC_ANYWHERE ) 262 if ( max_addr == LMB_ALLOC_ANYWHERE )
272 base = _ALIGN_DOWN(lmbbase+lmbsize-size, align); 263 base = _ALIGN_DOWN(lmbbase+lmbsize-size, align);
@@ -276,8 +267,8 @@ lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
276 continue; 267 continue;
277 268
278 while ( (lmbbase <= base) && 269 while ( (lmbbase <= base) &&
279 ((j = lmb_overlaps_region(_rsv,base,size)) >= 0) ) { 270 ((j = lmb_overlaps_region(&lmb.reserved,base,size)) >= 0) ) {
280 base = _ALIGN_DOWN(_rsv->region[j].base-size, align); 271 base = _ALIGN_DOWN(lmb.reserved.region[j].base-size, align);
281 } 272 }
282 273
283 if ( (base != 0) && (lmbbase <= base) ) 274 if ( (base != 0) && (lmbbase <= base) )
@@ -287,7 +278,7 @@ lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
287 if ( i < 0 ) 278 if ( i < 0 )
288 return 0; 279 return 0;
289 280
290 lmb_add_region(_rsv, base, size); 281 lmb_add_region(&lmb.reserved, base, size);
291 282
292 return base; 283 return base;
293} 284}
@@ -295,17 +286,15 @@ lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
295unsigned long __init 286unsigned long __init
296lmb_phys_mem_size(void) 287lmb_phys_mem_size(void)
297{ 288{
298 struct lmb *_lmb = &lmb;
299#ifdef CONFIG_MSCHUNKS 289#ifdef CONFIG_MSCHUNKS
300 return _lmb->memory.size; 290 return lmb.memory.size;
301#else 291#else
302 struct lmb_region *_mem = &(_lmb->memory);
303 unsigned long total = 0; 292 unsigned long total = 0;
304 int i; 293 int i;
305 294
306 /* add all physical memory to the bootmem map */ 295 /* add all physical memory to the bootmem map */
307 for (i=0; i < _mem->cnt; i++) 296 for (i=0; i < lmb.memory.cnt; i++)
308 total += _mem->region[i].size; 297 total += lmb.memory.region[i].size;
309 return total; 298 return total;
310#endif /* CONFIG_MSCHUNKS */ 299#endif /* CONFIG_MSCHUNKS */
311} 300}
@@ -313,14 +302,12 @@ lmb_phys_mem_size(void)
313unsigned long __init 302unsigned long __init
314lmb_end_of_DRAM(void) 303lmb_end_of_DRAM(void)
315{ 304{
316 struct lmb *_lmb = &lmb; 305 int idx = lmb.memory.cnt - 1;
317 struct lmb_region *_mem = &(_lmb->memory);
318 int idx = _mem->cnt - 1;
319 306
320#ifdef CONFIG_MSCHUNKS 307#ifdef CONFIG_MSCHUNKS
321 return (_mem->region[idx].physbase + _mem->region[idx].size); 308 return (lmb.memory.region[idx].physbase + lmb.memory.region[idx].size);
322#else 309#else
323 return (_mem->region[idx].base + _mem->region[idx].size); 310 return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
324#endif /* CONFIG_MSCHUNKS */ 311#endif /* CONFIG_MSCHUNKS */
325 312
326 return 0; 313 return 0;
@@ -353,20 +340,19 @@ void __init lmb_enforce_memory_limit(void)
353{ 340{
354 extern unsigned long memory_limit; 341 extern unsigned long memory_limit;
355 unsigned long i, limit; 342 unsigned long i, limit;
356 struct lmb_region *mem = &(lmb.memory);
357 343
358 if (! memory_limit) 344 if (! memory_limit)
359 return; 345 return;
360 346
361 limit = memory_limit; 347 limit = memory_limit;
362 for (i = 0; i < mem->cnt; i++) { 348 for (i = 0; i < lmb.memory.cnt; i++) {
363 if (limit > mem->region[i].size) { 349 if (limit > lmb.memory.region[i].size) {
364 limit -= mem->region[i].size; 350 limit -= lmb.memory.region[i].size;
365 continue; 351 continue;
366 } 352 }
367 353
368 mem->region[i].size = limit; 354 lmb.memory.region[i].size = limit;
369 mem->cnt = i + 1; 355 lmb.memory.cnt = i + 1;
370 break; 356 break;
371 } 357 }
372} 358}