diff options
author | Tony Luck <tony.luck@intel.com> | 2005-10-31 13:51:57 -0500 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2005-10-31 13:51:57 -0500 |
commit | c7fb577e2a6cb04732541f2dc402bd46747f7558 (patch) | |
tree | df3b1a1922ed13bfbcc45d08650c38beeb1a7bd1 /arch/ia64/kernel | |
parent | 9cec58dc138d6fcad9f447a19c8ff69f6540e667 (diff) | |
parent | 581c1b14394aee60aff46ea67d05483261ed6527 (diff) |
manual update from upstream:
Applied Al's change 06a544971fad0992fe8b92c5647538d573089dd4
to new location of swiotlb.c
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r-- | arch/ia64/kernel/acpi.c | 13 | ||||
-rw-r--r-- | arch/ia64/kernel/cyclone.c | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/efi.c | 510 | ||||
-rw-r--r-- | arch/ia64/kernel/irq.c | 12 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/module.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/patch.c | 16 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 3 | ||||
-rw-r--r-- | arch/ia64/kernel/ptrace.c | 28 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 63 | ||||
-rw-r--r-- | arch/ia64/kernel/smp.c | 10 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/time.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/uncached.c | 17 |
14 files changed, 439 insertions, 254 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 7e926471e4ec..9ad94ddf6687 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -838,7 +838,7 @@ EXPORT_SYMBOL(acpi_unmap_lsapic); | |||
838 | #endif /* CONFIG_ACPI_HOTPLUG_CPU */ | 838 | #endif /* CONFIG_ACPI_HOTPLUG_CPU */ |
839 | 839 | ||
840 | #ifdef CONFIG_ACPI_NUMA | 840 | #ifdef CONFIG_ACPI_NUMA |
841 | acpi_status __devinit | 841 | static acpi_status __devinit |
842 | acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret) | 842 | acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret) |
843 | { | 843 | { |
844 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 844 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
@@ -890,7 +890,16 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret) | |||
890 | map_iosapic_to_node(gsi_base, node); | 890 | map_iosapic_to_node(gsi_base, node); |
891 | return AE_OK; | 891 | return AE_OK; |
892 | } | 892 | } |
893 | #endif /* CONFIG_NUMA */ | 893 | |
894 | static int __init | ||
895 | acpi_map_iosapics (void) | ||
896 | { | ||
897 | acpi_get_devices(NULL, acpi_map_iosapic, NULL, NULL); | ||
898 | return 0; | ||
899 | } | ||
900 | |||
901 | fs_initcall(acpi_map_iosapics); | ||
902 | #endif /* CONFIG_ACPI_NUMA */ | ||
894 | 903 | ||
895 | int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) | 904 | int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) |
896 | { | 905 | { |
diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c index 768c7e46957c..6ade3790ce07 100644 --- a/arch/ia64/kernel/cyclone.c +++ b/arch/ia64/kernel/cyclone.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include <linux/smp.h> | 2 | #include <linux/smp.h> |
3 | #include <linux/time.h> | 3 | #include <linux/time.h> |
4 | #include <linux/errno.h> | 4 | #include <linux/errno.h> |
5 | #include <linux/timex.h> | ||
5 | #include <asm/io.h> | 6 | #include <asm/io.h> |
6 | 7 | ||
7 | /* IBM Summit (EXA) Cyclone counter code*/ | 8 | /* IBM Summit (EXA) Cyclone counter code*/ |
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index 179f230816ed..f72ea6aebcb1 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c | |||
@@ -239,57 +239,30 @@ is_available_memory (efi_memory_desc_t *md) | |||
239 | return 0; | 239 | return 0; |
240 | } | 240 | } |
241 | 241 | ||
242 | /* | 242 | typedef struct kern_memdesc { |
243 | * Trim descriptor MD so its starts at address START_ADDR. If the descriptor covers | 243 | u64 attribute; |
244 | * memory that is normally available to the kernel, issue a warning that some memory | 244 | u64 start; |
245 | * is being ignored. | 245 | u64 num_pages; |
246 | */ | 246 | } kern_memdesc_t; |
247 | static void | ||
248 | trim_bottom (efi_memory_desc_t *md, u64 start_addr) | ||
249 | { | ||
250 | u64 num_skipped_pages; | ||
251 | 247 | ||
252 | if (md->phys_addr >= start_addr || !md->num_pages) | 248 | static kern_memdesc_t *kern_memmap; |
253 | return; | ||
254 | |||
255 | num_skipped_pages = (start_addr - md->phys_addr) >> EFI_PAGE_SHIFT; | ||
256 | if (num_skipped_pages > md->num_pages) | ||
257 | num_skipped_pages = md->num_pages; | ||
258 | |||
259 | if (is_available_memory(md)) | ||
260 | printk(KERN_NOTICE "efi.%s: ignoring %luKB of memory at 0x%lx due to granule hole " | ||
261 | "at 0x%lx\n", __FUNCTION__, | ||
262 | (num_skipped_pages << EFI_PAGE_SHIFT) >> 10, | ||
263 | md->phys_addr, start_addr - IA64_GRANULE_SIZE); | ||
264 | /* | ||
265 | * NOTE: Don't set md->phys_addr to START_ADDR because that could cause the memory | ||
266 | * descriptor list to become unsorted. In such a case, md->num_pages will be | ||
267 | * zero, so the Right Thing will happen. | ||
268 | */ | ||
269 | md->phys_addr += num_skipped_pages << EFI_PAGE_SHIFT; | ||
270 | md->num_pages -= num_skipped_pages; | ||
271 | } | ||
272 | 249 | ||
273 | static void | 250 | static void |
274 | trim_top (efi_memory_desc_t *md, u64 end_addr) | 251 | walk (efi_freemem_callback_t callback, void *arg, u64 attr) |
275 | { | 252 | { |
276 | u64 num_dropped_pages, md_end_addr; | 253 | kern_memdesc_t *k; |
277 | 254 | u64 start, end, voff; | |
278 | md_end_addr = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); | ||
279 | |||
280 | if (md_end_addr <= end_addr || !md->num_pages) | ||
281 | return; | ||
282 | 255 | ||
283 | num_dropped_pages = (md_end_addr - end_addr) >> EFI_PAGE_SHIFT; | 256 | voff = (attr == EFI_MEMORY_WB) ? PAGE_OFFSET : __IA64_UNCACHED_OFFSET; |
284 | if (num_dropped_pages > md->num_pages) | 257 | for (k = kern_memmap; k->start != ~0UL; k++) { |
285 | num_dropped_pages = md->num_pages; | 258 | if (k->attribute != attr) |
286 | 259 | continue; | |
287 | if (is_available_memory(md)) | 260 | start = PAGE_ALIGN(k->start); |
288 | printk(KERN_NOTICE "efi.%s: ignoring %luKB of memory at 0x%lx due to granule hole " | 261 | end = (k->start + (k->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK; |
289 | "at 0x%lx\n", __FUNCTION__, | 262 | if (start < end) |
290 | (num_dropped_pages << EFI_PAGE_SHIFT) >> 10, | 263 | if ((*callback)(start + voff, end + voff, arg) < 0) |
291 | md->phys_addr, end_addr); | 264 | return; |
292 | md->num_pages -= num_dropped_pages; | 265 | } |
293 | } | 266 | } |
294 | 267 | ||
295 | /* | 268 | /* |
@@ -299,148 +272,19 @@ trim_top (efi_memory_desc_t *md, u64 end_addr) | |||
299 | void | 272 | void |
300 | efi_memmap_walk (efi_freemem_callback_t callback, void *arg) | 273 | efi_memmap_walk (efi_freemem_callback_t callback, void *arg) |
301 | { | 274 | { |
302 | int prev_valid = 0; | 275 | walk(callback, arg, EFI_MEMORY_WB); |
303 | struct range { | ||
304 | u64 start; | ||
305 | u64 end; | ||
306 | } prev, curr; | ||
307 | void *efi_map_start, *efi_map_end, *p, *q; | ||
308 | efi_memory_desc_t *md, *check_md; | ||
309 | u64 efi_desc_size, start, end, granule_addr, last_granule_addr, first_non_wb_addr = 0; | ||
310 | unsigned long total_mem = 0; | ||
311 | |||
312 | efi_map_start = __va(ia64_boot_param->efi_memmap); | ||
313 | efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; | ||
314 | efi_desc_size = ia64_boot_param->efi_memdesc_size; | ||
315 | |||
316 | for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { | ||
317 | md = p; | ||
318 | |||
319 | /* skip over non-WB memory descriptors; that's all we're interested in... */ | ||
320 | if (!(md->attribute & EFI_MEMORY_WB)) | ||
321 | continue; | ||
322 | |||
323 | /* | ||
324 | * granule_addr is the base of md's first granule. | ||
325 | * [granule_addr - first_non_wb_addr) is guaranteed to | ||
326 | * be contiguous WB memory. | ||
327 | */ | ||
328 | granule_addr = GRANULEROUNDDOWN(md->phys_addr); | ||
329 | first_non_wb_addr = max(first_non_wb_addr, granule_addr); | ||
330 | |||
331 | if (first_non_wb_addr < md->phys_addr) { | ||
332 | trim_bottom(md, granule_addr + IA64_GRANULE_SIZE); | ||
333 | granule_addr = GRANULEROUNDDOWN(md->phys_addr); | ||
334 | first_non_wb_addr = max(first_non_wb_addr, granule_addr); | ||
335 | } | ||
336 | |||
337 | for (q = p; q < efi_map_end; q += efi_desc_size) { | ||
338 | check_md = q; | ||
339 | |||
340 | if ((check_md->attribute & EFI_MEMORY_WB) && | ||
341 | (check_md->phys_addr == first_non_wb_addr)) | ||
342 | first_non_wb_addr += check_md->num_pages << EFI_PAGE_SHIFT; | ||
343 | else | ||
344 | break; /* non-WB or hole */ | ||
345 | } | ||
346 | |||
347 | last_granule_addr = GRANULEROUNDDOWN(first_non_wb_addr); | ||
348 | if (last_granule_addr < md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) | ||
349 | trim_top(md, last_granule_addr); | ||
350 | |||
351 | if (is_available_memory(md)) { | ||
352 | if (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) >= max_addr) { | ||
353 | if (md->phys_addr >= max_addr) | ||
354 | continue; | ||
355 | md->num_pages = (max_addr - md->phys_addr) >> EFI_PAGE_SHIFT; | ||
356 | first_non_wb_addr = max_addr; | ||
357 | } | ||
358 | |||
359 | if (total_mem >= mem_limit) | ||
360 | continue; | ||
361 | |||
362 | if (total_mem + (md->num_pages << EFI_PAGE_SHIFT) > mem_limit) { | ||
363 | unsigned long limit_addr = md->phys_addr; | ||
364 | |||
365 | limit_addr += mem_limit - total_mem; | ||
366 | limit_addr = GRANULEROUNDDOWN(limit_addr); | ||
367 | |||
368 | if (md->phys_addr > limit_addr) | ||
369 | continue; | ||
370 | |||
371 | md->num_pages = (limit_addr - md->phys_addr) >> | ||
372 | EFI_PAGE_SHIFT; | ||
373 | first_non_wb_addr = max_addr = md->phys_addr + | ||
374 | (md->num_pages << EFI_PAGE_SHIFT); | ||
375 | } | ||
376 | total_mem += (md->num_pages << EFI_PAGE_SHIFT); | ||
377 | |||
378 | if (md->num_pages == 0) | ||
379 | continue; | ||
380 | |||
381 | curr.start = PAGE_OFFSET + md->phys_addr; | ||
382 | curr.end = curr.start + (md->num_pages << EFI_PAGE_SHIFT); | ||
383 | |||
384 | if (!prev_valid) { | ||
385 | prev = curr; | ||
386 | prev_valid = 1; | ||
387 | } else { | ||
388 | if (curr.start < prev.start) | ||
389 | printk(KERN_ERR "Oops: EFI memory table not ordered!\n"); | ||
390 | |||
391 | if (prev.end == curr.start) { | ||
392 | /* merge two consecutive memory ranges */ | ||
393 | prev.end = curr.end; | ||
394 | } else { | ||
395 | start = PAGE_ALIGN(prev.start); | ||
396 | end = prev.end & PAGE_MASK; | ||
397 | if ((end > start) && (*callback)(start, end, arg) < 0) | ||
398 | return; | ||
399 | prev = curr; | ||
400 | } | ||
401 | } | ||
402 | } | ||
403 | } | ||
404 | if (prev_valid) { | ||
405 | start = PAGE_ALIGN(prev.start); | ||
406 | end = prev.end & PAGE_MASK; | ||
407 | if (end > start) | ||
408 | (*callback)(start, end, arg); | ||
409 | } | ||
410 | } | 276 | } |
411 | 277 | ||
412 | /* | 278 | /* |
413 | * Walk the EFI memory map to pull out leftover pages in the lower | 279 | * Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that |
414 | * memory regions which do not end up in the regular memory map and | 280 | * has memory that is available for uncached allocator. |
415 | * stick them into the uncached allocator | ||
416 | * | ||
417 | * The regular walk function is significantly more complex than the | ||
418 | * uncached walk which means it really doesn't make sense to try and | ||
419 | * marge the two. | ||
420 | */ | 281 | */ |
421 | void __init | 282 | void |
422 | efi_memmap_walk_uc (efi_freemem_callback_t callback) | 283 | efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg) |
423 | { | 284 | { |
424 | void *efi_map_start, *efi_map_end, *p; | 285 | walk(callback, arg, EFI_MEMORY_UC); |
425 | efi_memory_desc_t *md; | ||
426 | u64 efi_desc_size, start, end; | ||
427 | |||
428 | efi_map_start = __va(ia64_boot_param->efi_memmap); | ||
429 | efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; | ||
430 | efi_desc_size = ia64_boot_param->efi_memdesc_size; | ||
431 | |||
432 | for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { | ||
433 | md = p; | ||
434 | if (md->attribute == EFI_MEMORY_UC) { | ||
435 | start = PAGE_ALIGN(md->phys_addr); | ||
436 | end = PAGE_ALIGN((md->phys_addr+(md->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK); | ||
437 | if ((*callback)(start, end, NULL) < 0) | ||
438 | return; | ||
439 | } | ||
440 | } | ||
441 | } | 286 | } |
442 | 287 | ||
443 | |||
444 | /* | 288 | /* |
445 | * Look for the PAL_CODE region reported by EFI and maps it using an | 289 | * Look for the PAL_CODE region reported by EFI and maps it using an |
446 | * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor | 290 | * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor |
@@ -862,3 +706,307 @@ efi_uart_console_only(void) | |||
862 | printk(KERN_ERR "Malformed %s value\n", name); | 706 | printk(KERN_ERR "Malformed %s value\n", name); |
863 | return 0; | 707 | return 0; |
864 | } | 708 | } |
709 | |||
710 | #define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT) | ||
711 | |||
712 | static inline u64 | ||
713 | kmd_end(kern_memdesc_t *kmd) | ||
714 | { | ||
715 | return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT)); | ||
716 | } | ||
717 | |||
718 | static inline u64 | ||
719 | efi_md_end(efi_memory_desc_t *md) | ||
720 | { | ||
721 | return (md->phys_addr + efi_md_size(md)); | ||
722 | } | ||
723 | |||
724 | static inline int | ||
725 | efi_wb(efi_memory_desc_t *md) | ||
726 | { | ||
727 | return (md->attribute & EFI_MEMORY_WB); | ||
728 | } | ||
729 | |||
730 | static inline int | ||
731 | efi_uc(efi_memory_desc_t *md) | ||
732 | { | ||
733 | return (md->attribute & EFI_MEMORY_UC); | ||
734 | } | ||
735 | |||
736 | /* | ||
737 | * Look for the first granule aligned memory descriptor memory | ||
738 | * that is big enough to hold EFI memory map. Make sure this | ||
739 | * descriptor is atleast granule sized so it does not get trimmed | ||
740 | */ | ||
741 | struct kern_memdesc * | ||
742 | find_memmap_space (void) | ||
743 | { | ||
744 | u64 contig_low=0, contig_high=0; | ||
745 | u64 as = 0, ae; | ||
746 | void *efi_map_start, *efi_map_end, *p, *q; | ||
747 | efi_memory_desc_t *md, *pmd = NULL, *check_md; | ||
748 | u64 space_needed, efi_desc_size; | ||
749 | unsigned long total_mem = 0; | ||
750 | |||
751 | efi_map_start = __va(ia64_boot_param->efi_memmap); | ||
752 | efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; | ||
753 | efi_desc_size = ia64_boot_param->efi_memdesc_size; | ||
754 | |||
755 | /* | ||
756 | * Worst case: we need 3 kernel descriptors for each efi descriptor | ||
757 | * (if every entry has a WB part in the middle, and UC head and tail), | ||
758 | * plus one for the end marker. | ||
759 | */ | ||
760 | space_needed = sizeof(kern_memdesc_t) * | ||
761 | (3 * (ia64_boot_param->efi_memmap_size/efi_desc_size) + 1); | ||
762 | |||
763 | for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) { | ||
764 | md = p; | ||
765 | if (!efi_wb(md)) { | ||
766 | continue; | ||
767 | } | ||
768 | if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) { | ||
769 | contig_low = GRANULEROUNDUP(md->phys_addr); | ||
770 | contig_high = efi_md_end(md); | ||
771 | for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) { | ||
772 | check_md = q; | ||
773 | if (!efi_wb(check_md)) | ||
774 | break; | ||
775 | if (contig_high != check_md->phys_addr) | ||
776 | break; | ||
777 | contig_high = efi_md_end(check_md); | ||
778 | } | ||
779 | contig_high = GRANULEROUNDDOWN(contig_high); | ||
780 | } | ||
781 | if (!is_available_memory(md) || md->type == EFI_LOADER_DATA) | ||
782 | continue; | ||
783 | |||
784 | /* Round ends inward to granule boundaries */ | ||
785 | as = max(contig_low, md->phys_addr); | ||
786 | ae = min(contig_high, efi_md_end(md)); | ||
787 | |||
788 | /* keep within max_addr= command line arg */ | ||
789 | ae = min(ae, max_addr); | ||
790 | if (ae <= as) | ||
791 | continue; | ||
792 | |||
793 | /* avoid going over mem= command line arg */ | ||
794 | if (total_mem + (ae - as) > mem_limit) | ||
795 | ae -= total_mem + (ae - as) - mem_limit; | ||
796 | |||
797 | if (ae <= as) | ||
798 | continue; | ||
799 | |||
800 | if (ae - as > space_needed) | ||
801 | break; | ||
802 | } | ||
803 | if (p >= efi_map_end) | ||
804 | panic("Can't allocate space for kernel memory descriptors"); | ||
805 | |||
806 | return __va(as); | ||
807 | } | ||
808 | |||
809 | /* | ||
810 | * Walk the EFI memory map and gather all memory available for kernel | ||
811 | * to use. We can allocate partial granules only if the unavailable | ||
812 | * parts exist, and are WB. | ||
813 | */ | ||
814 | void | ||
815 | efi_memmap_init(unsigned long *s, unsigned long *e) | ||
816 | { | ||
817 | struct kern_memdesc *k, *prev = 0; | ||
818 | u64 contig_low=0, contig_high=0; | ||
819 | u64 as, ae, lim; | ||
820 | void *efi_map_start, *efi_map_end, *p, *q; | ||
821 | efi_memory_desc_t *md, *pmd = NULL, *check_md; | ||
822 | u64 efi_desc_size; | ||
823 | unsigned long total_mem = 0; | ||
824 | |||
825 | k = kern_memmap = find_memmap_space(); | ||
826 | |||
827 | efi_map_start = __va(ia64_boot_param->efi_memmap); | ||
828 | efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; | ||
829 | efi_desc_size = ia64_boot_param->efi_memdesc_size; | ||
830 | |||
831 | for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) { | ||
832 | md = p; | ||
833 | if (!efi_wb(md)) { | ||
834 | if (efi_uc(md) && (md->type == EFI_CONVENTIONAL_MEMORY || | ||
835 | md->type == EFI_BOOT_SERVICES_DATA)) { | ||
836 | k->attribute = EFI_MEMORY_UC; | ||
837 | k->start = md->phys_addr; | ||
838 | k->num_pages = md->num_pages; | ||
839 | k++; | ||
840 | } | ||
841 | continue; | ||
842 | } | ||
843 | if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) { | ||
844 | contig_low = GRANULEROUNDUP(md->phys_addr); | ||
845 | contig_high = efi_md_end(md); | ||
846 | for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) { | ||
847 | check_md = q; | ||
848 | if (!efi_wb(check_md)) | ||
849 | break; | ||
850 | if (contig_high != check_md->phys_addr) | ||
851 | break; | ||
852 | contig_high = efi_md_end(check_md); | ||
853 | } | ||
854 | contig_high = GRANULEROUNDDOWN(contig_high); | ||
855 | } | ||
856 | if (!is_available_memory(md)) | ||
857 | continue; | ||
858 | |||
859 | /* | ||
860 | * Round ends inward to granule boundaries | ||
861 | * Give trimmings to uncached allocator | ||
862 | */ | ||
863 | if (md->phys_addr < contig_low) { | ||
864 | lim = min(efi_md_end(md), contig_low); | ||
865 | if (efi_uc(md)) { | ||
866 | if (k > kern_memmap && (k-1)->attribute == EFI_MEMORY_UC && | ||
867 | kmd_end(k-1) == md->phys_addr) { | ||
868 | (k-1)->num_pages += (lim - md->phys_addr) >> EFI_PAGE_SHIFT; | ||
869 | } else { | ||
870 | k->attribute = EFI_MEMORY_UC; | ||
871 | k->start = md->phys_addr; | ||
872 | k->num_pages = (lim - md->phys_addr) >> EFI_PAGE_SHIFT; | ||
873 | k++; | ||
874 | } | ||
875 | } | ||
876 | as = contig_low; | ||
877 | } else | ||
878 | as = md->phys_addr; | ||
879 | |||
880 | if (efi_md_end(md) > contig_high) { | ||
881 | lim = max(md->phys_addr, contig_high); | ||
882 | if (efi_uc(md)) { | ||
883 | if (lim == md->phys_addr && k > kern_memmap && | ||
884 | (k-1)->attribute == EFI_MEMORY_UC && | ||
885 | kmd_end(k-1) == md->phys_addr) { | ||
886 | (k-1)->num_pages += md->num_pages; | ||
887 | } else { | ||
888 | k->attribute = EFI_MEMORY_UC; | ||
889 | k->start = lim; | ||
890 | k->num_pages = (efi_md_end(md) - lim) >> EFI_PAGE_SHIFT; | ||
891 | k++; | ||
892 | } | ||
893 | } | ||
894 | ae = contig_high; | ||
895 | } else | ||
896 | ae = efi_md_end(md); | ||
897 | |||
898 | /* keep within max_addr= command line arg */ | ||
899 | ae = min(ae, max_addr); | ||
900 | if (ae <= as) | ||
901 | continue; | ||
902 | |||
903 | /* avoid going over mem= command line arg */ | ||
904 | if (total_mem + (ae - as) > mem_limit) | ||
905 | ae -= total_mem + (ae - as) - mem_limit; | ||
906 | |||
907 | if (ae <= as) | ||
908 | continue; | ||
909 | if (prev && kmd_end(prev) == md->phys_addr) { | ||
910 | prev->num_pages += (ae - as) >> EFI_PAGE_SHIFT; | ||
911 | total_mem += ae - as; | ||
912 | continue; | ||
913 | } | ||
914 | k->attribute = EFI_MEMORY_WB; | ||
915 | k->start = as; | ||
916 | k->num_pages = (ae - as) >> EFI_PAGE_SHIFT; | ||
917 | total_mem += ae - as; | ||
918 | prev = k++; | ||
919 | } | ||
920 | k->start = ~0L; /* end-marker */ | ||
921 | |||
922 | /* reserve the memory we are using for kern_memmap */ | ||
923 | *s = (u64)kern_memmap; | ||
924 | *e = (u64)++k; | ||
925 | } | ||
926 | |||
927 | void | ||
928 | efi_initialize_iomem_resources(struct resource *code_resource, | ||
929 | struct resource *data_resource) | ||
930 | { | ||
931 | struct resource *res; | ||
932 | void *efi_map_start, *efi_map_end, *p; | ||
933 | efi_memory_desc_t *md; | ||
934 | u64 efi_desc_size; | ||
935 | char *name; | ||
936 | unsigned long flags; | ||
937 | |||
938 | efi_map_start = __va(ia64_boot_param->efi_memmap); | ||
939 | efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; | ||
940 | efi_desc_size = ia64_boot_param->efi_memdesc_size; | ||
941 | |||
942 | res = NULL; | ||
943 | |||
944 | for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { | ||
945 | md = p; | ||
946 | |||
947 | if (md->num_pages == 0) /* should not happen */ | ||
948 | continue; | ||
949 | |||
950 | flags = IORESOURCE_MEM; | ||
951 | switch (md->type) { | ||
952 | |||
953 | case EFI_MEMORY_MAPPED_IO: | ||
954 | case EFI_MEMORY_MAPPED_IO_PORT_SPACE: | ||
955 | continue; | ||
956 | |||
957 | case EFI_LOADER_CODE: | ||
958 | case EFI_LOADER_DATA: | ||
959 | case EFI_BOOT_SERVICES_DATA: | ||
960 | case EFI_BOOT_SERVICES_CODE: | ||
961 | case EFI_CONVENTIONAL_MEMORY: | ||
962 | if (md->attribute & EFI_MEMORY_WP) { | ||
963 | name = "System ROM"; | ||
964 | flags |= IORESOURCE_READONLY; | ||
965 | } else { | ||
966 | name = "System RAM"; | ||
967 | } | ||
968 | break; | ||
969 | |||
970 | case EFI_ACPI_MEMORY_NVS: | ||
971 | name = "ACPI Non-volatile Storage"; | ||
972 | flags |= IORESOURCE_BUSY; | ||
973 | break; | ||
974 | |||
975 | case EFI_UNUSABLE_MEMORY: | ||
976 | name = "reserved"; | ||
977 | flags |= IORESOURCE_BUSY | IORESOURCE_DISABLED; | ||
978 | break; | ||
979 | |||
980 | case EFI_RESERVED_TYPE: | ||
981 | case EFI_RUNTIME_SERVICES_CODE: | ||
982 | case EFI_RUNTIME_SERVICES_DATA: | ||
983 | case EFI_ACPI_RECLAIM_MEMORY: | ||
984 | default: | ||
985 | name = "reserved"; | ||
986 | flags |= IORESOURCE_BUSY; | ||
987 | break; | ||
988 | } | ||
989 | |||
990 | if ((res = kcalloc(1, sizeof(struct resource), GFP_KERNEL)) == NULL) { | ||
991 | printk(KERN_ERR "failed to alocate resource for iomem\n"); | ||
992 | return; | ||
993 | } | ||
994 | |||
995 | res->name = name; | ||
996 | res->start = md->phys_addr; | ||
997 | res->end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1; | ||
998 | res->flags = flags; | ||
999 | |||
1000 | if (insert_resource(&iomem_resource, res) < 0) | ||
1001 | kfree(res); | ||
1002 | else { | ||
1003 | /* | ||
1004 | * We don't know which region contains | ||
1005 | * kernel data so we try it repeatedly and | ||
1006 | * let the resource manager test it. | ||
1007 | */ | ||
1008 | insert_resource(res, code_resource); | ||
1009 | insert_resource(res, data_resource); | ||
1010 | } | ||
1011 | } | ||
1012 | } | ||
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 205d98028261..d33244c32759 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c | |||
@@ -57,9 +57,9 @@ int show_interrupts(struct seq_file *p, void *v) | |||
57 | 57 | ||
58 | if (i == 0) { | 58 | if (i == 0) { |
59 | seq_printf(p, " "); | 59 | seq_printf(p, " "); |
60 | for (j=0; j<NR_CPUS; j++) | 60 | for_each_online_cpu(j) { |
61 | if (cpu_online(j)) | 61 | seq_printf(p, "CPU%d ",j); |
62 | seq_printf(p, "CPU%d ",j); | 62 | } |
63 | seq_putc(p, '\n'); | 63 | seq_putc(p, '\n'); |
64 | } | 64 | } |
65 | 65 | ||
@@ -72,9 +72,9 @@ int show_interrupts(struct seq_file *p, void *v) | |||
72 | #ifndef CONFIG_SMP | 72 | #ifndef CONFIG_SMP |
73 | seq_printf(p, "%10u ", kstat_irqs(i)); | 73 | seq_printf(p, "%10u ", kstat_irqs(i)); |
74 | #else | 74 | #else |
75 | for (j = 0; j < NR_CPUS; j++) | 75 | for_each_online_cpu(j) { |
76 | if (cpu_online(j)) | 76 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
77 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | 77 | } |
78 | #endif | 78 | #endif |
79 | seq_printf(p, " %14s", irq_desc[i].handler->typename); | 79 | seq_printf(p, " %14s", irq_desc[i].handler->typename); |
80 | seq_printf(p, " %s", action->name); | 80 | seq_printf(p, " %s", action->name); |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index d0a5106fba24..52c47da17246 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -508,9 +508,7 @@ ia64_mca_wakeup_all(void) | |||
508 | int cpu; | 508 | int cpu; |
509 | 509 | ||
510 | /* Clear the Rendez checkin flag for all cpus */ | 510 | /* Clear the Rendez checkin flag for all cpus */ |
511 | for(cpu = 0; cpu < NR_CPUS; cpu++) { | 511 | for_each_online_cpu(cpu) { |
512 | if (!cpu_online(cpu)) | ||
513 | continue; | ||
514 | if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE) | 512 | if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE) |
515 | ia64_mca_wakeup(cpu); | 513 | ia64_mca_wakeup(cpu); |
516 | } | 514 | } |
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index f1aca7cffd12..7a2f0a798d12 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c | |||
@@ -947,8 +947,8 @@ void | |||
947 | percpu_modcopy (void *pcpudst, const void *src, unsigned long size) | 947 | percpu_modcopy (void *pcpudst, const void *src, unsigned long size) |
948 | { | 948 | { |
949 | unsigned int i; | 949 | unsigned int i; |
950 | for (i = 0; i < NR_CPUS; i++) | 950 | for_each_cpu(i) { |
951 | if (cpu_possible(i)) | 951 | memcpy(pcpudst + __per_cpu_offset[i], src, size); |
952 | memcpy(pcpudst + __per_cpu_offset[i], src, size); | 952 | } |
953 | } | 953 | } |
954 | #endif /* CONFIG_SMP */ | 954 | #endif /* CONFIG_SMP */ |
diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c index 367804a605fa..6a4ac7d70b35 100644 --- a/arch/ia64/kernel/patch.c +++ b/arch/ia64/kernel/patch.c | |||
@@ -64,22 +64,30 @@ ia64_patch (u64 insn_addr, u64 mask, u64 val) | |||
64 | void | 64 | void |
65 | ia64_patch_imm64 (u64 insn_addr, u64 val) | 65 | ia64_patch_imm64 (u64 insn_addr, u64 val) |
66 | { | 66 | { |
67 | ia64_patch(insn_addr, | 67 | /* The assembler may generate offset pointing to either slot 1 |
68 | or slot 2 for a long (2-slot) instruction, occupying slots 1 | ||
69 | and 2. */ | ||
70 | insn_addr &= -16UL; | ||
71 | ia64_patch(insn_addr + 2, | ||
68 | 0x01fffefe000UL, ( ((val & 0x8000000000000000UL) >> 27) /* bit 63 -> 36 */ | 72 | 0x01fffefe000UL, ( ((val & 0x8000000000000000UL) >> 27) /* bit 63 -> 36 */ |
69 | | ((val & 0x0000000000200000UL) << 0) /* bit 21 -> 21 */ | 73 | | ((val & 0x0000000000200000UL) << 0) /* bit 21 -> 21 */ |
70 | | ((val & 0x00000000001f0000UL) << 6) /* bit 16 -> 22 */ | 74 | | ((val & 0x00000000001f0000UL) << 6) /* bit 16 -> 22 */ |
71 | | ((val & 0x000000000000ff80UL) << 20) /* bit 7 -> 27 */ | 75 | | ((val & 0x000000000000ff80UL) << 20) /* bit 7 -> 27 */ |
72 | | ((val & 0x000000000000007fUL) << 13) /* bit 0 -> 13 */)); | 76 | | ((val & 0x000000000000007fUL) << 13) /* bit 0 -> 13 */)); |
73 | ia64_patch(insn_addr - 1, 0x1ffffffffffUL, val >> 22); | 77 | ia64_patch(insn_addr + 1, 0x1ffffffffffUL, val >> 22); |
74 | } | 78 | } |
75 | 79 | ||
76 | void | 80 | void |
77 | ia64_patch_imm60 (u64 insn_addr, u64 val) | 81 | ia64_patch_imm60 (u64 insn_addr, u64 val) |
78 | { | 82 | { |
79 | ia64_patch(insn_addr, | 83 | /* The assembler may generate offset pointing to either slot 1 |
84 | or slot 2 for a long (2-slot) instruction, occupying slots 1 | ||
85 | and 2. */ | ||
86 | insn_addr &= -16UL; | ||
87 | ia64_patch(insn_addr + 2, | ||
80 | 0x011ffffe000UL, ( ((val & 0x0800000000000000UL) >> 23) /* bit 59 -> 36 */ | 88 | 0x011ffffe000UL, ( ((val & 0x0800000000000000UL) >> 23) /* bit 59 -> 36 */ |
81 | | ((val & 0x00000000000fffffUL) << 13) /* bit 0 -> 13 */)); | 89 | | ((val & 0x00000000000fffffUL) << 13) /* bit 0 -> 13 */)); |
82 | ia64_patch(insn_addr - 1, 0x1fffffffffcUL, val >> 18); | 90 | ia64_patch(insn_addr + 1, 0x1fffffffffcUL, val >> 18); |
83 | } | 91 | } |
84 | 92 | ||
85 | /* | 93 | /* |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index d71731ee5b61..f7dfc107cb7b 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -2352,7 +2352,8 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon | |||
2352 | insert_vm_struct(mm, vma); | 2352 | insert_vm_struct(mm, vma); |
2353 | 2353 | ||
2354 | mm->total_vm += size >> PAGE_SHIFT; | 2354 | mm->total_vm += size >> PAGE_SHIFT; |
2355 | vm_stat_account(vma); | 2355 | vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, |
2356 | vma_pages(vma)); | ||
2356 | up_write(&task->mm->mmap_sem); | 2357 | up_write(&task->mm->mmap_sem); |
2357 | 2358 | ||
2358 | /* | 2359 | /* |
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index bbb8bc7c0552..4b19d0410632 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c | |||
@@ -587,8 +587,9 @@ thread_matches (struct task_struct *thread, unsigned long addr) | |||
587 | static struct task_struct * | 587 | static struct task_struct * |
588 | find_thread_for_addr (struct task_struct *child, unsigned long addr) | 588 | find_thread_for_addr (struct task_struct *child, unsigned long addr) |
589 | { | 589 | { |
590 | struct task_struct *g, *p; | 590 | struct task_struct *p; |
591 | struct mm_struct *mm; | 591 | struct mm_struct *mm; |
592 | struct list_head *this, *next; | ||
592 | int mm_users; | 593 | int mm_users; |
593 | 594 | ||
594 | if (!(mm = get_task_mm(child))) | 595 | if (!(mm = get_task_mm(child))) |
@@ -600,28 +601,21 @@ find_thread_for_addr (struct task_struct *child, unsigned long addr) | |||
600 | goto out; /* not multi-threaded */ | 601 | goto out; /* not multi-threaded */ |
601 | 602 | ||
602 | /* | 603 | /* |
603 | * First, traverse the child's thread-list. Good for scalability with | 604 | * Traverse the current process' children list. Every task that |
604 | * NPTL-threads. | 605 | * one attaches to becomes a child. And it is only attached children |
606 | * of the debugger that are of interest (ptrace_check_attach checks | ||
607 | * for this). | ||
605 | */ | 608 | */ |
606 | p = child; | 609 | list_for_each_safe(this, next, ¤t->children) { |
607 | do { | 610 | p = list_entry(this, struct task_struct, sibling); |
608 | if (thread_matches(p, addr)) { | 611 | if (p->mm != mm) |
609 | child = p; | ||
610 | goto out; | ||
611 | } | ||
612 | if (mm_users-- <= 1) | ||
613 | goto out; | ||
614 | } while ((p = next_thread(p)) != child); | ||
615 | |||
616 | do_each_thread(g, p) { | ||
617 | if (child->mm != mm) | ||
618 | continue; | 612 | continue; |
619 | |||
620 | if (thread_matches(p, addr)) { | 613 | if (thread_matches(p, addr)) { |
621 | child = p; | 614 | child = p; |
622 | goto out; | 615 | goto out; |
623 | } | 616 | } |
624 | } while_each_thread(g, p); | 617 | } |
618 | |||
625 | out: | 619 | out: |
626 | mmput(mm); | 620 | mmput(mm); |
627 | return child; | 621 | return child; |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 1f5c26dbe705..fc56ca2da358 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -78,6 +78,19 @@ struct screen_info screen_info; | |||
78 | unsigned long vga_console_iobase; | 78 | unsigned long vga_console_iobase; |
79 | unsigned long vga_console_membase; | 79 | unsigned long vga_console_membase; |
80 | 80 | ||
81 | static struct resource data_resource = { | ||
82 | .name = "Kernel data", | ||
83 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | ||
84 | }; | ||
85 | |||
86 | static struct resource code_resource = { | ||
87 | .name = "Kernel code", | ||
88 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | ||
89 | }; | ||
90 | extern void efi_initialize_iomem_resources(struct resource *, | ||
91 | struct resource *); | ||
92 | extern char _text[], _end[], _etext[]; | ||
93 | |||
81 | unsigned long ia64_max_cacheline_size; | 94 | unsigned long ia64_max_cacheline_size; |
82 | unsigned long ia64_iobase; /* virtual address for I/O accesses */ | 95 | unsigned long ia64_iobase; /* virtual address for I/O accesses */ |
83 | EXPORT_SYMBOL(ia64_iobase); | 96 | EXPORT_SYMBOL(ia64_iobase); |
@@ -171,6 +184,22 @@ sort_regions (struct rsvd_region *rsvd_region, int max) | |||
171 | } | 184 | } |
172 | } | 185 | } |
173 | 186 | ||
187 | /* | ||
188 | * Request address space for all standard resources | ||
189 | */ | ||
190 | static int __init register_memory(void) | ||
191 | { | ||
192 | code_resource.start = ia64_tpa(_text); | ||
193 | code_resource.end = ia64_tpa(_etext) - 1; | ||
194 | data_resource.start = ia64_tpa(_etext); | ||
195 | data_resource.end = ia64_tpa(_end) - 1; | ||
196 | efi_initialize_iomem_resources(&code_resource, &data_resource); | ||
197 | |||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | __initcall(register_memory); | ||
202 | |||
174 | /** | 203 | /** |
175 | * reserve_memory - setup reserved memory areas | 204 | * reserve_memory - setup reserved memory areas |
176 | * | 205 | * |
@@ -211,6 +240,9 @@ reserve_memory (void) | |||
211 | } | 240 | } |
212 | #endif | 241 | #endif |
213 | 242 | ||
243 | efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); | ||
244 | n++; | ||
245 | |||
214 | /* end of memory marker */ | 246 | /* end of memory marker */ |
215 | rsvd_region[n].start = ~0UL; | 247 | rsvd_region[n].start = ~0UL; |
216 | rsvd_region[n].end = ~0UL; | 248 | rsvd_region[n].end = ~0UL; |
@@ -244,28 +276,31 @@ find_initrd (void) | |||
244 | static void __init | 276 | static void __init |
245 | io_port_init (void) | 277 | io_port_init (void) |
246 | { | 278 | { |
247 | extern unsigned long ia64_iobase; | ||
248 | unsigned long phys_iobase; | 279 | unsigned long phys_iobase; |
249 | 280 | ||
250 | /* | 281 | /* |
251 | * Set `iobase' to the appropriate address in region 6 (uncached access range). | 282 | * Set `iobase' based on the EFI memory map or, failing that, the |
283 | * value firmware left in ar.k0. | ||
252 | * | 284 | * |
253 | * The EFI memory map is the "preferred" location to get the I/O port space base, | 285 | * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute |
254 | * rather the relying on AR.KR0. This should become more clear in future SAL | 286 | * the port's virtual address, so ia32_load_state() loads it with a |
255 | * specs. We'll fall back to getting it out of AR.KR0 if no appropriate entry is | 287 | * user virtual address. But in ia64 mode, glibc uses the |
256 | * found in the memory map. | 288 | * *physical* address in ar.k0 to mmap the appropriate area from |
289 | * /dev/mem, and the inX()/outX() interfaces use MMIO. In both | ||
290 | * cases, user-mode can only use the legacy 0-64K I/O port space. | ||
291 | * | ||
292 | * ar.k0 is not involved in kernel I/O port accesses, which can use | ||
293 | * any of the I/O port spaces and are done via MMIO using the | ||
294 | * virtual mmio_base from the appropriate io_space[]. | ||
257 | */ | 295 | */ |
258 | phys_iobase = efi_get_iobase(); | 296 | phys_iobase = efi_get_iobase(); |
259 | if (phys_iobase) | 297 | if (!phys_iobase) { |
260 | /* set AR.KR0 since this is all we use it for anyway */ | ||
261 | ia64_set_kr(IA64_KR_IO_BASE, phys_iobase); | ||
262 | else { | ||
263 | phys_iobase = ia64_get_kr(IA64_KR_IO_BASE); | 298 | phys_iobase = ia64_get_kr(IA64_KR_IO_BASE); |
264 | printk(KERN_INFO "No I/O port range found in EFI memory map, falling back " | 299 | printk(KERN_INFO "No I/O port range found in EFI memory map, " |
265 | "to AR.KR0\n"); | 300 | "falling back to AR.KR0 (0x%lx)\n", phys_iobase); |
266 | printk(KERN_INFO "I/O port base = 0x%lx\n", phys_iobase); | ||
267 | } | 301 | } |
268 | ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); | 302 | ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); |
303 | ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); | ||
269 | 304 | ||
270 | /* setup legacy IO port space */ | 305 | /* setup legacy IO port space */ |
271 | io_space[0].mmio_base = ia64_iobase; | 306 | io_space[0].mmio_base = ia64_iobase; |
@@ -526,7 +561,7 @@ show_cpuinfo (struct seq_file *m, void *v) | |||
526 | c->itc_freq / 1000000, c->itc_freq % 1000000, | 561 | c->itc_freq / 1000000, c->itc_freq % 1000000, |
527 | lpj*HZ/500000, (lpj*HZ/5000) % 100); | 562 | lpj*HZ/500000, (lpj*HZ/5000) % 100); |
528 | #ifdef CONFIG_SMP | 563 | #ifdef CONFIG_SMP |
529 | seq_printf(m, "siblings : %u\n", c->num_log); | 564 | seq_printf(m, "siblings : %u\n", cpus_weight(cpu_core_map[cpunum])); |
530 | if (c->threads_per_core > 1 || c->cores_per_socket > 1) | 565 | if (c->threads_per_core > 1 || c->cores_per_socket > 1) |
531 | seq_printf(m, | 566 | seq_printf(m, |
532 | "physical id: %u\n" | 567 | "physical id: %u\n" |
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 0166a9847095..657ac99a451c 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c | |||
@@ -185,8 +185,8 @@ send_IPI_allbutself (int op) | |||
185 | { | 185 | { |
186 | unsigned int i; | 186 | unsigned int i; |
187 | 187 | ||
188 | for (i = 0; i < NR_CPUS; i++) { | 188 | for_each_online_cpu(i) { |
189 | if (cpu_online(i) && i != smp_processor_id()) | 189 | if (i != smp_processor_id()) |
190 | send_IPI_single(i, op); | 190 | send_IPI_single(i, op); |
191 | } | 191 | } |
192 | } | 192 | } |
@@ -199,9 +199,9 @@ send_IPI_all (int op) | |||
199 | { | 199 | { |
200 | int i; | 200 | int i; |
201 | 201 | ||
202 | for (i = 0; i < NR_CPUS; i++) | 202 | for_each_online_cpu(i) { |
203 | if (cpu_online(i)) | 203 | send_IPI_single(i, op); |
204 | send_IPI_single(i, op); | 204 | } |
205 | } | 205 | } |
206 | 206 | ||
207 | /* | 207 | /* |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 7d72c0d872b3..400a48987124 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -694,9 +694,9 @@ smp_cpus_done (unsigned int dummy) | |||
694 | * Allow the user to impress friends. | 694 | * Allow the user to impress friends. |
695 | */ | 695 | */ |
696 | 696 | ||
697 | for (cpu = 0; cpu < NR_CPUS; cpu++) | 697 | for_each_online_cpu(cpu) { |
698 | if (cpu_online(cpu)) | 698 | bogosum += cpu_data(cpu)->loops_per_jiffy; |
699 | bogosum += cpu_data(cpu)->loops_per_jiffy; | 699 | } |
700 | 700 | ||
701 | printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", | 701 | printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", |
702 | (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100); | 702 | (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100); |
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 8b8a5a45b621..5b7e736f3b49 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
@@ -32,10 +32,6 @@ | |||
32 | 32 | ||
33 | extern unsigned long wall_jiffies; | 33 | extern unsigned long wall_jiffies; |
34 | 34 | ||
35 | u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; | ||
36 | |||
37 | EXPORT_SYMBOL(jiffies_64); | ||
38 | |||
39 | #define TIME_KEEPER_ID 0 /* smp_processor_id() of time-keeper */ | 35 | #define TIME_KEEPER_ID 0 /* smp_processor_id() of time-keeper */ |
40 | 36 | ||
41 | #ifdef CONFIG_IA64_DEBUG_IRQ | 37 | #ifdef CONFIG_IA64_DEBUG_IRQ |
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index 4e9d06c48a8b..c6d40446c2c4 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c | |||
@@ -205,23 +205,18 @@ EXPORT_SYMBOL(uncached_free_page); | |||
205 | static int __init | 205 | static int __init |
206 | uncached_build_memmap(unsigned long start, unsigned long end, void *arg) | 206 | uncached_build_memmap(unsigned long start, unsigned long end, void *arg) |
207 | { | 207 | { |
208 | long length; | 208 | long length = end - start; |
209 | unsigned long vstart, vend; | ||
210 | int node; | 209 | int node; |
211 | 210 | ||
212 | length = end - start; | ||
213 | vstart = start + __IA64_UNCACHED_OFFSET; | ||
214 | vend = end + __IA64_UNCACHED_OFFSET; | ||
215 | |||
216 | dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end); | 211 | dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end); |
217 | 212 | ||
218 | memset((char *)vstart, 0, length); | 213 | memset((char *)start, 0, length); |
219 | 214 | ||
220 | node = paddr_to_nid(start); | 215 | node = paddr_to_nid(start - __IA64_UNCACHED_OFFSET); |
221 | 216 | ||
222 | for (; vstart < vend ; vstart += PAGE_SIZE) { | 217 | for (; start < end ; start += PAGE_SIZE) { |
223 | dprintk(KERN_INFO "sticking %lx into the pool!\n", vstart); | 218 | dprintk(KERN_INFO "sticking %lx into the pool!\n", start); |
224 | gen_pool_free(uncached_pool[node], vstart, PAGE_SIZE); | 219 | gen_pool_free(uncached_pool[node], start, PAGE_SIZE); |
225 | } | 220 | } |
226 | 221 | ||
227 | return 0; | 222 | return 0; |