aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorAron Griffis <aron@hp.com>2008-02-04 18:31:49 -0500
committerTony Luck <tony.luck@intel.com>2008-02-04 18:31:49 -0500
commit7d9aed26ed11d7a472104b7078b0c5e4fd416059 (patch)
tree83cc3611b958488ae076c544b21d9f1884149a35 /arch/ia64
parentcdef24c9cd38ae236065409c4a6289f165639e55 (diff)
[IA64] Make efi.c mostly fit in 80 columns
This patch is purely whitespace changes to make the code fit in 80 columns, plus fix some inconsistent indentation. The efi_guidcmp() tests remain wider than 80-columns since that seems to be the most clear. Signed-off-by: Aron Griffis <aron@hp.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/kernel/efi.c489
1 files changed, 266 insertions, 223 deletions
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 242d79341120..9e5910920da6 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * Extensible Firmware Interface 2 * Extensible Firmware Interface
3 * 3 *
4 * Based on Extensible Firmware Interface Specification version 0.9 April 30, 1999 4 * Based on Extensible Firmware Interface Specification version 0.9
5 * April 30, 1999
5 * 6 *
6 * Copyright (C) 1999 VA Linux Systems 7 * Copyright (C) 1999 VA Linux Systems
7 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 8 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
@@ -48,145 +49,157 @@ static unsigned long mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL;
48 49
49#define efi_call_virt(f, args...) (*(f))(args) 50#define efi_call_virt(f, args...) (*(f))(args)
50 51
51#define STUB_GET_TIME(prefix, adjust_arg) \ 52#define STUB_GET_TIME(prefix, adjust_arg) \
52static efi_status_t \ 53static efi_status_t \
53prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \ 54prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \
54{ \ 55{ \
55 struct ia64_fpreg fr[6]; \ 56 struct ia64_fpreg fr[6]; \
56 efi_time_cap_t *atc = NULL; \ 57 efi_time_cap_t *atc = NULL; \
57 efi_status_t ret; \ 58 efi_status_t ret; \
58 \ 59 \
59 if (tc) \ 60 if (tc) \
60 atc = adjust_arg(tc); \ 61 atc = adjust_arg(tc); \
61 ia64_save_scratch_fpregs(fr); \ 62 ia64_save_scratch_fpregs(fr); \
62 ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), adjust_arg(tm), atc); \ 63 ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), \
63 ia64_load_scratch_fpregs(fr); \ 64 adjust_arg(tm), atc); \
64 return ret; \ 65 ia64_load_scratch_fpregs(fr); \
66 return ret; \
65} 67}
66 68
67#define STUB_SET_TIME(prefix, adjust_arg) \ 69#define STUB_SET_TIME(prefix, adjust_arg) \
68static efi_status_t \ 70static efi_status_t \
69prefix##_set_time (efi_time_t *tm) \ 71prefix##_set_time (efi_time_t *tm) \
70{ \ 72{ \
71 struct ia64_fpreg fr[6]; \ 73 struct ia64_fpreg fr[6]; \
72 efi_status_t ret; \ 74 efi_status_t ret; \
73 \ 75 \
74 ia64_save_scratch_fpregs(fr); \ 76 ia64_save_scratch_fpregs(fr); \
75 ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), adjust_arg(tm)); \ 77 ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), \
76 ia64_load_scratch_fpregs(fr); \ 78 adjust_arg(tm)); \
77 return ret; \ 79 ia64_load_scratch_fpregs(fr); \
80 return ret; \
78} 81}
79 82
80#define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \ 83#define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \
81static efi_status_t \ 84static efi_status_t \
82prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, efi_time_t *tm) \ 85prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, \
83{ \ 86 efi_time_t *tm) \
84 struct ia64_fpreg fr[6]; \ 87{ \
85 efi_status_t ret; \ 88 struct ia64_fpreg fr[6]; \
86 \ 89 efi_status_t ret; \
87 ia64_save_scratch_fpregs(fr); \ 90 \
88 ret = efi_call_##prefix((efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \ 91 ia64_save_scratch_fpregs(fr); \
89 adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \ 92 ret = efi_call_##prefix( \
90 ia64_load_scratch_fpregs(fr); \ 93 (efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \
91 return ret; \ 94 adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \
95 ia64_load_scratch_fpregs(fr); \
96 return ret; \
92} 97}
93 98
94#define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \ 99#define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \
95static efi_status_t \ 100static efi_status_t \
96prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \ 101prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \
97{ \ 102{ \
98 struct ia64_fpreg fr[6]; \ 103 struct ia64_fpreg fr[6]; \
99 efi_time_t *atm = NULL; \ 104 efi_time_t *atm = NULL; \
100 efi_status_t ret; \ 105 efi_status_t ret; \
101 \ 106 \
102 if (tm) \ 107 if (tm) \
103 atm = adjust_arg(tm); \ 108 atm = adjust_arg(tm); \
104 ia64_save_scratch_fpregs(fr); \ 109 ia64_save_scratch_fpregs(fr); \
105 ret = efi_call_##prefix((efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \ 110 ret = efi_call_##prefix( \
106 enabled, atm); \ 111 (efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \
107 ia64_load_scratch_fpregs(fr); \ 112 enabled, atm); \
108 return ret; \ 113 ia64_load_scratch_fpregs(fr); \
114 return ret; \
109} 115}
110 116
111#define STUB_GET_VARIABLE(prefix, adjust_arg) \ 117#define STUB_GET_VARIABLE(prefix, adjust_arg) \
112static efi_status_t \ 118static efi_status_t \
113prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \ 119prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \
114 unsigned long *data_size, void *data) \ 120 unsigned long *data_size, void *data) \
115{ \ 121{ \
116 struct ia64_fpreg fr[6]; \ 122 struct ia64_fpreg fr[6]; \
117 u32 *aattr = NULL; \ 123 u32 *aattr = NULL; \
118 efi_status_t ret; \ 124 efi_status_t ret; \
119 \ 125 \
120 if (attr) \ 126 if (attr) \
121 aattr = adjust_arg(attr); \ 127 aattr = adjust_arg(attr); \
122 ia64_save_scratch_fpregs(fr); \ 128 ia64_save_scratch_fpregs(fr); \
123 ret = efi_call_##prefix((efi_get_variable_t *) __va(runtime->get_variable), \ 129 ret = efi_call_##prefix( \
124 adjust_arg(name), adjust_arg(vendor), aattr, \ 130 (efi_get_variable_t *) __va(runtime->get_variable), \
125 adjust_arg(data_size), adjust_arg(data)); \ 131 adjust_arg(name), adjust_arg(vendor), aattr, \
126 ia64_load_scratch_fpregs(fr); \ 132 adjust_arg(data_size), adjust_arg(data)); \
127 return ret; \ 133 ia64_load_scratch_fpregs(fr); \
134 return ret; \
128} 135}
129 136
130#define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \ 137#define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \
131static efi_status_t \ 138static efi_status_t \
132prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor) \ 139prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, \
133{ \ 140 efi_guid_t *vendor) \
134 struct ia64_fpreg fr[6]; \ 141{ \
135 efi_status_t ret; \ 142 struct ia64_fpreg fr[6]; \
136 \ 143 efi_status_t ret; \
137 ia64_save_scratch_fpregs(fr); \ 144 \
138 ret = efi_call_##prefix((efi_get_next_variable_t *) __va(runtime->get_next_variable), \ 145 ia64_save_scratch_fpregs(fr); \
139 adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \ 146 ret = efi_call_##prefix( \
140 ia64_load_scratch_fpregs(fr); \ 147 (efi_get_next_variable_t *) __va(runtime->get_next_variable), \
141 return ret; \ 148 adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \
149 ia64_load_scratch_fpregs(fr); \
150 return ret; \
142} 151}
143 152
144#define STUB_SET_VARIABLE(prefix, adjust_arg) \ 153#define STUB_SET_VARIABLE(prefix, adjust_arg) \
145static efi_status_t \ 154static efi_status_t \
146prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, unsigned long attr, \ 155prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, \
147 unsigned long data_size, void *data) \ 156 unsigned long attr, unsigned long data_size, \
148{ \ 157 void *data) \
149 struct ia64_fpreg fr[6]; \ 158{ \
150 efi_status_t ret; \ 159 struct ia64_fpreg fr[6]; \
151 \ 160 efi_status_t ret; \
152 ia64_save_scratch_fpregs(fr); \ 161 \
153 ret = efi_call_##prefix((efi_set_variable_t *) __va(runtime->set_variable), \ 162 ia64_save_scratch_fpregs(fr); \
154 adjust_arg(name), adjust_arg(vendor), attr, data_size, \ 163 ret = efi_call_##prefix( \
155 adjust_arg(data)); \ 164 (efi_set_variable_t *) __va(runtime->set_variable), \
156 ia64_load_scratch_fpregs(fr); \ 165 adjust_arg(name), adjust_arg(vendor), attr, data_size, \
157 return ret; \ 166 adjust_arg(data)); \
167 ia64_load_scratch_fpregs(fr); \
168 return ret; \
158} 169}
159 170
160#define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \ 171#define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \
161static efi_status_t \ 172static efi_status_t \
162prefix##_get_next_high_mono_count (u32 *count) \ 173prefix##_get_next_high_mono_count (u32 *count) \
163{ \ 174{ \
164 struct ia64_fpreg fr[6]; \ 175 struct ia64_fpreg fr[6]; \
165 efi_status_t ret; \ 176 efi_status_t ret; \
166 \ 177 \
167 ia64_save_scratch_fpregs(fr); \ 178 ia64_save_scratch_fpregs(fr); \
168 ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \ 179 ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \
169 __va(runtime->get_next_high_mono_count), adjust_arg(count)); \ 180 __va(runtime->get_next_high_mono_count), \
170 ia64_load_scratch_fpregs(fr); \ 181 adjust_arg(count)); \
171 return ret; \ 182 ia64_load_scratch_fpregs(fr); \
183 return ret; \
172} 184}
173 185
174#define STUB_RESET_SYSTEM(prefix, adjust_arg) \ 186#define STUB_RESET_SYSTEM(prefix, adjust_arg) \
175static void \ 187static void \
176prefix##_reset_system (int reset_type, efi_status_t status, \ 188prefix##_reset_system (int reset_type, efi_status_t status, \
177 unsigned long data_size, efi_char16_t *data) \ 189 unsigned long data_size, efi_char16_t *data) \
178{ \ 190{ \
179 struct ia64_fpreg fr[6]; \ 191 struct ia64_fpreg fr[6]; \
180 efi_char16_t *adata = NULL; \ 192 efi_char16_t *adata = NULL; \
181 \ 193 \
182 if (data) \ 194 if (data) \
183 adata = adjust_arg(data); \ 195 adata = adjust_arg(data); \
184 \ 196 \
185 ia64_save_scratch_fpregs(fr); \ 197 ia64_save_scratch_fpregs(fr); \
186 efi_call_##prefix((efi_reset_system_t *) __va(runtime->reset_system), \ 198 efi_call_##prefix( \
187 reset_type, status, data_size, adata); \ 199 (efi_reset_system_t *) __va(runtime->reset_system), \
188 /* should not return, but just in case... */ \ 200 reset_type, status, data_size, adata); \
189 ia64_load_scratch_fpregs(fr); \ 201 /* should not return, but just in case... */ \
202 ia64_load_scratch_fpregs(fr); \
190} 203}
191 204
192#define phys_ptr(arg) ((__typeof__(arg)) ia64_tpa(arg)) 205#define phys_ptr(arg) ((__typeof__(arg)) ia64_tpa(arg))
@@ -223,7 +236,8 @@ efi_gettimeofday (struct timespec *ts)
223 return; 236 return;
224 } 237 }
225 238
226 ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second); 239 ts->tv_sec = mktime(tm.year, tm.month, tm.day,
240 tm.hour, tm.minute, tm.second);
227 ts->tv_nsec = tm.nanosecond; 241 ts->tv_nsec = tm.nanosecond;
228} 242}
229 243
@@ -297,8 +311,8 @@ walk (efi_freemem_callback_t callback, void *arg, u64 attr)
297} 311}
298 312
299/* 313/*
300 * Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that 314 * Walks the EFI memory map and calls CALLBACK once for each EFI memory
301 * has memory that is available for OS use. 315 * descriptor that has memory that is available for OS use.
302 */ 316 */
303void 317void
304efi_memmap_walk (efi_freemem_callback_t callback, void *arg) 318efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
@@ -307,8 +321,8 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
307} 321}
308 322
309/* 323/*
310 * Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that 324 * Walks the EFI memory map and calls CALLBACK once for each EFI memory
311 * has memory that is available for uncached allocator. 325 * descriptor that has memory that is available for uncached allocator.
312 */ 326 */
313void 327void
314efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg) 328efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg)
@@ -321,7 +335,6 @@ efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg)
321 * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor 335 * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor
322 * Abstraction Layer chapter 11 in ADAG 336 * Abstraction Layer chapter 11 in ADAG
323 */ 337 */
324
325void * 338void *
326efi_get_pal_addr (void) 339efi_get_pal_addr (void)
327{ 340{
@@ -341,32 +354,33 @@ efi_get_pal_addr (void)
341 continue; 354 continue;
342 355
343 if (++pal_code_count > 1) { 356 if (++pal_code_count > 1) {
344 printk(KERN_ERR "Too many EFI Pal Code memory ranges, dropped @ %lx\n", 357 printk(KERN_ERR "Too many EFI Pal Code memory ranges, "
345 md->phys_addr); 358 "dropped @ %lx\n", md->phys_addr);
346 continue; 359 continue;
347 } 360 }
348 /* 361 /*
349 * The only ITLB entry in region 7 that is used is the one installed by 362 * The only ITLB entry in region 7 that is used is the one
350 * __start(). That entry covers a 64MB range. 363 * installed by __start(). That entry covers a 64MB range.
351 */ 364 */
352 mask = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1); 365 mask = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1);
353 vaddr = PAGE_OFFSET + md->phys_addr; 366 vaddr = PAGE_OFFSET + md->phys_addr;
354 367
355 /* 368 /*
356 * We must check that the PAL mapping won't overlap with the kernel 369 * We must check that the PAL mapping won't overlap with the
357 * mapping. 370 * kernel mapping.
358 * 371 *
359 * PAL code is guaranteed to be aligned on a power of 2 between 4k and 372 * PAL code is guaranteed to be aligned on a power of 2 between
360 * 256KB and that only one ITR is needed to map it. This implies that the 373 * 4k and 256KB and that only one ITR is needed to map it. This
361 * PAL code is always aligned on its size, i.e., the closest matching page 374 * implies that the PAL code is always aligned on its size,
362 * size supported by the TLB. Therefore PAL code is guaranteed never to 375 * i.e., the closest matching page size supported by the TLB.
363 * cross a 64MB unless it is bigger than 64MB (very unlikely!). So for 376 * Therefore PAL code is guaranteed never to cross a 64MB unless
364 * now the following test is enough to determine whether or not we need a 377 * it is bigger than 64MB (very unlikely!). So for now the
365 * dedicated ITR for the PAL code. 378 * following test is enough to determine whether or not we need
379 * a dedicated ITR for the PAL code.
366 */ 380 */
367 if ((vaddr & mask) == (KERNEL_START & mask)) { 381 if ((vaddr & mask) == (KERNEL_START & mask)) {
368 printk(KERN_INFO "%s: no need to install ITR for PAL code\n", 382 printk(KERN_INFO "%s: no need to install ITR for "
369 __FUNCTION__); 383 "PAL code\n", __FUNCTION__);
370 continue; 384 continue;
371 } 385 }
372 386
@@ -376,10 +390,11 @@ efi_get_pal_addr (void)
376#if EFI_DEBUG 390#if EFI_DEBUG
377 mask = ~((1 << IA64_GRANULE_SHIFT) - 1); 391 mask = ~((1 << IA64_GRANULE_SHIFT) - 1);
378 392
379 printk(KERN_INFO "CPU %d: mapping PAL code [0x%lx-0x%lx) into [0x%lx-0x%lx)\n", 393 printk(KERN_INFO "CPU %d: mapping PAL code "
380 smp_processor_id(), md->phys_addr, 394 "[0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
381 md->phys_addr + efi_md_size(md), 395 smp_processor_id(), md->phys_addr,
382 vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE); 396 md->phys_addr + efi_md_size(md),
397 vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
383#endif 398#endif
384 return __va(md->phys_addr); 399 return __va(md->phys_addr);
385 } 400 }
@@ -401,7 +416,8 @@ efi_map_pal_code (void)
401 * Cannot write to CRx with PSR.ic=1 416 * Cannot write to CRx with PSR.ic=1
402 */ 417 */
403 psr = ia64_clear_ic(); 418 psr = ia64_clear_ic();
404 ia64_itr(0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long) pal_vaddr), 419 ia64_itr(0x1, IA64_TR_PALCODE,
420 GRANULEROUNDDOWN((unsigned long) pal_vaddr),
405 pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)), 421 pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
406 IA64_GRANULE_SHIFT); 422 IA64_GRANULE_SHIFT);
407 ia64_set_psr(psr); /* restore psr */ 423 ia64_set_psr(psr); /* restore psr */
@@ -418,7 +434,10 @@ efi_init (void)
418 char *cp, vendor[100] = "unknown"; 434 char *cp, vendor[100] = "unknown";
419 int i; 435 int i;
420 436
421 /* it's too early to be able to use the standard kernel command line support... */ 437 /*
438 * it's too early to be able to use the standard kernel command line
439 * support...
440 */
422 for (cp = boot_command_line; *cp; ) { 441 for (cp = boot_command_line; *cp; ) {
423 if (memcmp(cp, "mem=", 4) == 0) { 442 if (memcmp(cp, "mem=", 4) == 0) {
424 mem_limit = memparse(cp + 4, &cp); 443 mem_limit = memparse(cp + 4, &cp);
@@ -434,9 +453,11 @@ efi_init (void)
434 } 453 }
435 } 454 }
436 if (min_addr != 0UL) 455 if (min_addr != 0UL)
437 printk(KERN_INFO "Ignoring memory below %luMB\n", min_addr >> 20); 456 printk(KERN_INFO "Ignoring memory below %luMB\n",
457 min_addr >> 20);
438 if (max_addr != ~0UL) 458 if (max_addr != ~0UL)
439 printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20); 459 printk(KERN_INFO "Ignoring memory above %luMB\n",
460 max_addr >> 20);
440 461
441 efi.systab = __va(ia64_boot_param->efi_systab); 462 efi.systab = __va(ia64_boot_param->efi_systab);
442 463
@@ -464,7 +485,8 @@ efi_init (void)
464 } 485 }
465 486
466 printk(KERN_INFO "EFI v%u.%.02u by %s:", 487 printk(KERN_INFO "EFI v%u.%.02u by %s:",
467 efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor); 488 efi.systab->hdr.revision >> 16,
489 efi.systab->hdr.revision & 0xffff, vendor);
468 490
469 efi.mps = EFI_INVALID_TABLE_ADDR; 491 efi.mps = EFI_INVALID_TABLE_ADDR;
470 efi.acpi = EFI_INVALID_TABLE_ADDR; 492 efi.acpi = EFI_INVALID_TABLE_ADDR;
@@ -519,9 +541,12 @@ efi_init (void)
519 efi_memory_desc_t *md; 541 efi_memory_desc_t *md;
520 void *p; 542 void *p;
521 543
522 for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += efi_desc_size) { 544 for (i = 0, p = efi_map_start; p < efi_map_end;
545 ++i, p += efi_desc_size)
546 {
523 md = p; 547 md = p;
524 printk("mem%02u: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)\n", 548 printk("mem%02u: type=%u, attr=0x%lx, "
549 "range=[0x%016lx-0x%016lx) (%luMB)\n",
525 i, md->type, md->attribute, md->phys_addr, 550 i, md->type, md->attribute, md->phys_addr,
526 md->phys_addr + efi_md_size(md), 551 md->phys_addr + efi_md_size(md),
527 md->num_pages >> (20 - EFI_PAGE_SHIFT)); 552 md->num_pages >> (20 - EFI_PAGE_SHIFT));
@@ -549,8 +574,8 @@ efi_enter_virtual_mode (void)
549 md = p; 574 md = p;
550 if (md->attribute & EFI_MEMORY_RUNTIME) { 575 if (md->attribute & EFI_MEMORY_RUNTIME) {
551 /* 576 /*
552 * Some descriptors have multiple bits set, so the order of 577 * Some descriptors have multiple bits set, so the
553 * the tests is relevant. 578 * order of the tests is relevant.
554 */ 579 */
555 if (md->attribute & EFI_MEMORY_WB) { 580 if (md->attribute & EFI_MEMORY_WB) {
556 md->virt_addr = (u64) __va(md->phys_addr); 581 md->virt_addr = (u64) __va(md->phys_addr);
@@ -558,21 +583,26 @@ efi_enter_virtual_mode (void)
558 md->virt_addr = (u64) ioremap(md->phys_addr, 0); 583 md->virt_addr = (u64) ioremap(md->phys_addr, 0);
559 } else if (md->attribute & EFI_MEMORY_WC) { 584 } else if (md->attribute & EFI_MEMORY_WC) {
560#if 0 585#if 0
561 md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P 586 md->virt_addr = ia64_remap(md->phys_addr,
562 | _PAGE_D 587 (_PAGE_A |
563 | _PAGE_MA_WC 588 _PAGE_P |
564 | _PAGE_PL_0 589 _PAGE_D |
565 | _PAGE_AR_RW)); 590 _PAGE_MA_WC |
591 _PAGE_PL_0 |
592 _PAGE_AR_RW));
566#else 593#else
567 printk(KERN_INFO "EFI_MEMORY_WC mapping\n"); 594 printk(KERN_INFO "EFI_MEMORY_WC mapping\n");
568 md->virt_addr = (u64) ioremap(md->phys_addr, 0); 595 md->virt_addr = (u64) ioremap(md->phys_addr, 0);
569#endif 596#endif
570 } else if (md->attribute & EFI_MEMORY_WT) { 597 } else if (md->attribute & EFI_MEMORY_WT) {
571#if 0 598#if 0
572 md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P 599 md->virt_addr = ia64_remap(md->phys_addr,
573 | _PAGE_D | _PAGE_MA_WT 600 (_PAGE_A |
574 | _PAGE_PL_0 601 _PAGE_P |
575 | _PAGE_AR_RW)); 602 _PAGE_D |
603 _PAGE_MA_WT |
604 _PAGE_PL_0 |
605 _PAGE_AR_RW));
576#else 606#else
577 printk(KERN_INFO "EFI_MEMORY_WT mapping\n"); 607 printk(KERN_INFO "EFI_MEMORY_WT mapping\n");
578 md->virt_addr = (u64) ioremap(md->phys_addr, 0); 608 md->virt_addr = (u64) ioremap(md->phys_addr, 0);
@@ -583,16 +613,18 @@ efi_enter_virtual_mode (void)
583 613
584 status = efi_call_phys(__va(runtime->set_virtual_address_map), 614 status = efi_call_phys(__va(runtime->set_virtual_address_map),
585 ia64_boot_param->efi_memmap_size, 615 ia64_boot_param->efi_memmap_size,
586 efi_desc_size, ia64_boot_param->efi_memdesc_version, 616 efi_desc_size,
617 ia64_boot_param->efi_memdesc_version,
587 ia64_boot_param->efi_memmap); 618 ia64_boot_param->efi_memmap);
588 if (status != EFI_SUCCESS) { 619 if (status != EFI_SUCCESS) {
589 printk(KERN_WARNING "warning: unable to switch EFI into virtual mode " 620 printk(KERN_WARNING "warning: unable to switch EFI into "
590 "(status=%lu)\n", status); 621 "virtual mode (status=%lu)\n", status);
591 return; 622 return;
592 } 623 }
593 624
594 /* 625 /*
595 * Now that EFI is in virtual mode, we call the EFI functions more efficiently: 626 * Now that EFI is in virtual mode, we call the EFI functions more
627 * efficiently:
596 */ 628 */
597 efi.get_time = virt_get_time; 629 efi.get_time = virt_get_time;
598 efi.set_time = virt_set_time; 630 efi.set_time = virt_set_time;
@@ -606,8 +638,8 @@ efi_enter_virtual_mode (void)
606} 638}
607 639
608/* 640/*
609 * Walk the EFI memory map looking for the I/O port range. There can only be one entry of 641 * Walk the EFI memory map looking for the I/O port range. There can only be
610 * this type, other I/O port ranges should be described via ACPI. 642 * one entry of this type, other I/O port ranges should be described via ACPI.
611 */ 643 */
612u64 644u64
613efi_get_iobase (void) 645efi_get_iobase (void)
@@ -678,7 +710,6 @@ efi_memmap_intersects (unsigned long phys_addr, unsigned long size)
678 710
679 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 711 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
680 md = p; 712 md = p;
681
682 if (md->phys_addr < end && efi_md_end(md) > phys_addr) 713 if (md->phys_addr < end && efi_md_end(md) > phys_addr)
683 return 1; 714 return 1;
684 } 715 }
@@ -883,7 +914,7 @@ efi_uart_console_only(void)
883 return 1; 914 return 1;
884 uart = 0; 915 uart = 0;
885 } 916 }
886 hdr = (struct efi_generic_dev_path *) ((u8 *) hdr + hdr->length); 917 hdr = (struct efi_generic_dev_path *)((u8 *) hdr + hdr->length);
887 } 918 }
888 printk(KERN_ERR "Malformed %s value\n", name); 919 printk(KERN_ERR "Malformed %s value\n", name);
889 return 0; 920 return 0;
@@ -921,10 +952,12 @@ find_memmap_space (void)
921 if (!efi_wb(md)) { 952 if (!efi_wb(md)) {
922 continue; 953 continue;
923 } 954 }
924 if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) { 955 if (pmd == NULL || !efi_wb(pmd) ||
956 efi_md_end(pmd) != md->phys_addr) {
925 contig_low = GRANULEROUNDUP(md->phys_addr); 957 contig_low = GRANULEROUNDUP(md->phys_addr);
926 contig_high = efi_md_end(md); 958 contig_high = efi_md_end(md);
927 for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) { 959 for (q = p + efi_desc_size; q < efi_map_end;
960 q += efi_desc_size) {
928 check_md = q; 961 check_md = q;
929 if (!efi_wb(check_md)) 962 if (!efi_wb(check_md))
930 break; 963 break;
@@ -988,8 +1021,9 @@ efi_memmap_init(unsigned long *s, unsigned long *e)
988 for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) { 1021 for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
989 md = p; 1022 md = p;
990 if (!efi_wb(md)) { 1023 if (!efi_wb(md)) {
991 if (efi_uc(md) && (md->type == EFI_CONVENTIONAL_MEMORY || 1024 if (efi_uc(md) &&
992 md->type == EFI_BOOT_SERVICES_DATA)) { 1025 (md->type == EFI_CONVENTIONAL_MEMORY ||
1026 md->type == EFI_BOOT_SERVICES_DATA)) {
993 k->attribute = EFI_MEMORY_UC; 1027 k->attribute = EFI_MEMORY_UC;
994 k->start = md->phys_addr; 1028 k->start = md->phys_addr;
995 k->num_pages = md->num_pages; 1029 k->num_pages = md->num_pages;
@@ -997,10 +1031,12 @@ efi_memmap_init(unsigned long *s, unsigned long *e)
997 } 1031 }
998 continue; 1032 continue;
999 } 1033 }
1000 if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) { 1034 if (pmd == NULL || !efi_wb(pmd) ||
1035 efi_md_end(pmd) != md->phys_addr) {
1001 contig_low = GRANULEROUNDUP(md->phys_addr); 1036 contig_low = GRANULEROUNDUP(md->phys_addr);
1002 contig_high = efi_md_end(md); 1037 contig_high = efi_md_end(md);
1003 for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) { 1038 for (q = p + efi_desc_size; q < efi_map_end;
1039 q += efi_desc_size) {
1004 check_md = q; 1040 check_md = q;
1005 if (!efi_wb(check_md)) 1041 if (!efi_wb(check_md))
1006 break; 1042 break;
@@ -1025,13 +1061,17 @@ efi_memmap_init(unsigned long *s, unsigned long *e)
1025 if (md->phys_addr < contig_low) { 1061 if (md->phys_addr < contig_low) {
1026 lim = min(efi_md_end(md), contig_low); 1062 lim = min(efi_md_end(md), contig_low);
1027 if (efi_uc(md)) { 1063 if (efi_uc(md)) {
1028 if (k > kern_memmap && (k-1)->attribute == EFI_MEMORY_UC && 1064 if (k > kern_memmap &&
1065 (k-1)->attribute == EFI_MEMORY_UC &&
1029 kmd_end(k-1) == md->phys_addr) { 1066 kmd_end(k-1) == md->phys_addr) {
1030 (k-1)->num_pages += (lim - md->phys_addr) >> EFI_PAGE_SHIFT; 1067 (k-1)->num_pages +=
1068 (lim - md->phys_addr)
1069 >> EFI_PAGE_SHIFT;
1031 } else { 1070 } else {
1032 k->attribute = EFI_MEMORY_UC; 1071 k->attribute = EFI_MEMORY_UC;
1033 k->start = md->phys_addr; 1072 k->start = md->phys_addr;
1034 k->num_pages = (lim - md->phys_addr) >> EFI_PAGE_SHIFT; 1073 k->num_pages = (lim - md->phys_addr)
1074 >> EFI_PAGE_SHIFT;
1035 k++; 1075 k++;
1036 } 1076 }
1037 } 1077 }
@@ -1049,7 +1089,8 @@ efi_memmap_init(unsigned long *s, unsigned long *e)
1049 } else { 1089 } else {
1050 k->attribute = EFI_MEMORY_UC; 1090 k->attribute = EFI_MEMORY_UC;
1051 k->start = lim; 1091 k->start = lim;
1052 k->num_pages = (efi_md_end(md) - lim) >> EFI_PAGE_SHIFT; 1092 k->num_pages = (efi_md_end(md) - lim)
1093 >> EFI_PAGE_SHIFT;
1053 k++; 1094 k++;
1054 } 1095 }
1055 } 1096 }
@@ -1151,8 +1192,10 @@ efi_initialize_iomem_resources(struct resource *code_resource,
1151 break; 1192 break;
1152 } 1193 }
1153 1194
1154 if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { 1195 if ((res = kzalloc(sizeof(struct resource),
1155 printk(KERN_ERR "failed to alocate resource for iomem\n"); 1196 GFP_KERNEL)) == NULL) {
1197 printk(KERN_ERR
1198 "failed to alocate resource for iomem\n");
1156 return; 1199 return;
1157 } 1200 }
1158 1201
@@ -1187,44 +1230,44 @@ efi_initialize_iomem_resources(struct resource *code_resource,
1187 rsvd_regions are sorted 1230 rsvd_regions are sorted
1188 */ 1231 */
1189unsigned long __init 1232unsigned long __init
1190kdump_find_rsvd_region (unsigned long size, 1233kdump_find_rsvd_region (unsigned long size, struct rsvd_region *r, int n)
1191 struct rsvd_region *r, int n)
1192{ 1234{
1193 int i; 1235 int i;
1194 u64 start, end; 1236 u64 start, end;
1195 u64 alignment = 1UL << _PAGE_SIZE_64M; 1237 u64 alignment = 1UL << _PAGE_SIZE_64M;
1196 void *efi_map_start, *efi_map_end, *p; 1238 void *efi_map_start, *efi_map_end, *p;
1197 efi_memory_desc_t *md; 1239 efi_memory_desc_t *md;
1198 u64 efi_desc_size; 1240 u64 efi_desc_size;
1199 1241
1200 efi_map_start = __va(ia64_boot_param->efi_memmap); 1242 efi_map_start = __va(ia64_boot_param->efi_memmap);
1201 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 1243 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
1202 efi_desc_size = ia64_boot_param->efi_memdesc_size; 1244 efi_desc_size = ia64_boot_param->efi_memdesc_size;
1203 1245
1204 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 1246 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1205 md = p; 1247 md = p;
1206 if (!efi_wb(md)) 1248 if (!efi_wb(md))
1207 continue; 1249 continue;
1208 start = ALIGN(md->phys_addr, alignment); 1250 start = ALIGN(md->phys_addr, alignment);
1209 end = efi_md_end(md); 1251 end = efi_md_end(md);
1210 for (i = 0; i < n; i++) { 1252 for (i = 0; i < n; i++) {
1211 if (__pa(r[i].start) >= start && __pa(r[i].end) < end) { 1253 if (__pa(r[i].start) >= start && __pa(r[i].end) < end) {
1212 if (__pa(r[i].start) > start + size) 1254 if (__pa(r[i].start) > start + size)
1213 return start; 1255 return start;
1214 start = ALIGN(__pa(r[i].end), alignment); 1256 start = ALIGN(__pa(r[i].end), alignment);
1215 if (i < n-1 && __pa(r[i+1].start) < start + size) 1257 if (i < n-1 &&
1216 continue; 1258 __pa(r[i+1].start) < start + size)
1217 else 1259 continue;
1218 break; 1260 else
1261 break;
1262 }
1219 } 1263 }
1220 } 1264 if (end > start + size)
1221 if (end > start + size) 1265 return start;
1222 return start; 1266 }
1223 } 1267
1224 1268 printk(KERN_WARNING
1225 printk(KERN_WARNING "Cannot reserve 0x%lx byte of memory for crashdump\n", 1269 "Cannot reserve 0x%lx byte of memory for crashdump\n", size);
1226 size); 1270 return ~0UL;
1227 return ~0UL;
1228} 1271}
1229#endif 1272#endif
1230 1273