aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorSage Weil <sage@inktank.com>2013-08-15 14:11:45 -0400
committerSage Weil <sage@inktank.com>2013-08-15 14:11:45 -0400
commitee3e542fec6e69bc9fb668698889a37d93950ddf (patch)
treee74ee766a4764769ef1d3d45d266b4dea64101d3 /arch/ia64
parentfe2a801b50c0bb8039d627e5ae1fec249d10ff39 (diff)
parentf1d6e17f540af37bb1891480143669ba7636c4cf (diff)
Merge remote-tracking branch 'linus/master' into testing
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/configs/generic_defconfig2
-rw-r--r--arch/ia64/configs/gensparse_defconfig2
-rw-r--r--arch/ia64/configs/tiger_defconfig2
-rw-r--r--arch/ia64/configs/xen_domu_defconfig2
-rw-r--r--arch/ia64/hp/common/sba_iommu.c24
-rw-r--r--arch/ia64/hp/sim/boot/fw-emu.c20
-rw-r--r--arch/ia64/hp/sim/simeth.c2
-rw-r--r--arch/ia64/hp/sim/simscsi.c4
-rw-r--r--arch/ia64/include/asm/mutex.h10
-rw-r--r--arch/ia64/include/asm/pci.h10
-rw-r--r--arch/ia64/include/asm/pgtable.h3
-rw-r--r--arch/ia64/include/uapi/asm/socket.h2
-rw-r--r--arch/ia64/kernel/acpi.c4
-rw-r--r--arch/ia64/kernel/efi.c5
-rw-r--r--arch/ia64/kernel/err_inject.c8
-rw-r--r--arch/ia64/kernel/head.S2
-rw-r--r--arch/ia64/kernel/mca.c12
-rw-r--r--arch/ia64/kernel/numa.c4
-rw-r--r--arch/ia64/kernel/palinfo.c4
-rw-r--r--arch/ia64/kernel/pci-dma.c9
-rw-r--r--arch/ia64/kernel/perfmon.c20
-rw-r--r--arch/ia64/kernel/salinfo.c4
-rw-r--r--arch/ia64/kernel/setup.c10
-rw-r--r--arch/ia64/kernel/smpboot.c8
-rw-r--r--arch/ia64/kernel/topology.c18
-rw-r--r--arch/ia64/kernel/traps.c2
-rw-r--r--arch/ia64/kvm/Makefile7
-rw-r--r--arch/ia64/mm/contig.c14
-rw-r--r--arch/ia64/mm/discontig.c5
-rw-r--r--arch/ia64/mm/init.c41
-rw-r--r--arch/ia64/mm/numa.c2
-rw-r--r--arch/ia64/pci/pci.c239
-rw-r--r--arch/ia64/sn/kernel/io_init.c122
-rw-r--r--arch/ia64/sn/kernel/setup.c8
-rw-r--r--arch/ia64/xen/hypervisor.c2
36 files changed, 294 insertions, 340 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 1a2b7749b047..5a768ad8e893 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -376,7 +376,6 @@ config NR_CPUS
376config HOTPLUG_CPU 376config HOTPLUG_CPU
377 bool "Support for hot-pluggable CPUs" 377 bool "Support for hot-pluggable CPUs"
378 depends on SMP 378 depends on SMP
379 select HOTPLUG
380 default n 379 default n
381 ---help--- 380 ---help---
382 Say Y here to experiment with turning CPUs off and on. CPUs 381 Say Y here to experiment with turning CPUs off and on. CPUs
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig
index 7913695b2fcb..efbd2929aeb7 100644
--- a/arch/ia64/configs/generic_defconfig
+++ b/arch/ia64/configs/generic_defconfig
@@ -31,7 +31,7 @@ CONFIG_ACPI_FAN=m
31CONFIG_ACPI_DOCK=y 31CONFIG_ACPI_DOCK=y
32CONFIG_ACPI_PROCESSOR=m 32CONFIG_ACPI_PROCESSOR=m
33CONFIG_ACPI_CONTAINER=m 33CONFIG_ACPI_CONTAINER=m
34CONFIG_HOTPLUG_PCI=m 34CONFIG_HOTPLUG_PCI=y
35CONFIG_HOTPLUG_PCI_ACPI=m 35CONFIG_HOTPLUG_PCI_ACPI=m
36CONFIG_PACKET=y 36CONFIG_PACKET=y
37CONFIG_UNIX=y 37CONFIG_UNIX=y
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig
index f8e913365423..f64980dd20c3 100644
--- a/arch/ia64/configs/gensparse_defconfig
+++ b/arch/ia64/configs/gensparse_defconfig
@@ -25,7 +25,7 @@ CONFIG_ACPI_BUTTON=m
25CONFIG_ACPI_FAN=m 25CONFIG_ACPI_FAN=m
26CONFIG_ACPI_PROCESSOR=m 26CONFIG_ACPI_PROCESSOR=m
27CONFIG_ACPI_CONTAINER=m 27CONFIG_ACPI_CONTAINER=m
28CONFIG_HOTPLUG_PCI=m 28CONFIG_HOTPLUG_PCI=y
29CONFIG_HOTPLUG_PCI_ACPI=m 29CONFIG_HOTPLUG_PCI_ACPI=m
30CONFIG_PACKET=y 30CONFIG_PACKET=y
31CONFIG_UNIX=y 31CONFIG_UNIX=y
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig
index a5a9e02e60a0..0f4e9e41f130 100644
--- a/arch/ia64/configs/tiger_defconfig
+++ b/arch/ia64/configs/tiger_defconfig
@@ -31,7 +31,7 @@ CONFIG_ACPI_BUTTON=m
31CONFIG_ACPI_FAN=m 31CONFIG_ACPI_FAN=m
32CONFIG_ACPI_PROCESSOR=m 32CONFIG_ACPI_PROCESSOR=m
33CONFIG_ACPI_CONTAINER=m 33CONFIG_ACPI_CONTAINER=m
34CONFIG_HOTPLUG_PCI=m 34CONFIG_HOTPLUG_PCI=y
35CONFIG_HOTPLUG_PCI_ACPI=m 35CONFIG_HOTPLUG_PCI_ACPI=m
36CONFIG_PACKET=y 36CONFIG_PACKET=y
37CONFIG_UNIX=y 37CONFIG_UNIX=y
diff --git a/arch/ia64/configs/xen_domu_defconfig b/arch/ia64/configs/xen_domu_defconfig
index 37b9b422caad..b025acfde5c1 100644
--- a/arch/ia64/configs/xen_domu_defconfig
+++ b/arch/ia64/configs/xen_domu_defconfig
@@ -32,7 +32,7 @@ CONFIG_ACPI_BUTTON=m
32CONFIG_ACPI_FAN=m 32CONFIG_ACPI_FAN=m
33CONFIG_ACPI_PROCESSOR=m 33CONFIG_ACPI_PROCESSOR=m
34CONFIG_ACPI_CONTAINER=m 34CONFIG_ACPI_CONTAINER=m
35CONFIG_HOTPLUG_PCI=m 35CONFIG_HOTPLUG_PCI=y
36CONFIG_HOTPLUG_PCI_ACPI=m 36CONFIG_HOTPLUG_PCI_ACPI=m
37CONFIG_PACKET=y 37CONFIG_PACKET=y
38CONFIG_UNIX=y 38CONFIG_UNIX=y
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index bcda5b2d121a..d43daf192b21 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -2042,7 +2042,8 @@ sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
2042#endif 2042#endif
2043 2043
2044static int __init 2044static int __init
2045acpi_sba_ioc_add(struct acpi_device *device) 2045acpi_sba_ioc_add(struct acpi_device *device,
2046 const struct acpi_device_id *not_used)
2046{ 2047{
2047 struct ioc *ioc; 2048 struct ioc *ioc;
2048 acpi_status status; 2049 acpi_status status;
@@ -2090,14 +2091,18 @@ static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
2090 {"HWP0004", 0}, 2091 {"HWP0004", 0},
2091 {"", 0}, 2092 {"", 0},
2092}; 2093};
2093static struct acpi_driver acpi_sba_ioc_driver = { 2094static struct acpi_scan_handler acpi_sba_ioc_handler = {
2094 .name = "IOC IOMMU Driver", 2095 .ids = hp_ioc_iommu_device_ids,
2095 .ids = hp_ioc_iommu_device_ids, 2096 .attach = acpi_sba_ioc_add,
2096 .ops = {
2097 .add = acpi_sba_ioc_add,
2098 },
2099}; 2097};
2100 2098
2099static int __init acpi_sba_ioc_init_acpi(void)
2100{
2101 return acpi_scan_add_handler(&acpi_sba_ioc_handler);
2102}
2103/* This has to run before acpi_scan_init(). */
2104arch_initcall(acpi_sba_ioc_init_acpi);
2105
2101extern struct dma_map_ops swiotlb_dma_ops; 2106extern struct dma_map_ops swiotlb_dma_ops;
2102 2107
2103static int __init 2108static int __init
@@ -2122,7 +2127,10 @@ sba_init(void)
2122 } 2127 }
2123#endif 2128#endif
2124 2129
2125 acpi_bus_register_driver(&acpi_sba_ioc_driver); 2130 /*
2131 * ioc_list should be populated by the acpi_sba_ioc_handler's .attach()
2132 * routine, but that only happens if acpi_scan_init() has already run.
2133 */
2126 if (!ioc_list) { 2134 if (!ioc_list) {
2127#ifdef CONFIG_IA64_GENERIC 2135#ifdef CONFIG_IA64_GENERIC
2128 /* 2136 /*
diff --git a/arch/ia64/hp/sim/boot/fw-emu.c b/arch/ia64/hp/sim/boot/fw-emu.c
index 271f412bda1a..87bf9ad8cf0f 100644
--- a/arch/ia64/hp/sim/boot/fw-emu.c
+++ b/arch/ia64/hp/sim/boot/fw-emu.c
@@ -290,16 +290,16 @@ sys_fw_init (const char *args, int arglen)
290 efi_runtime->hdr.signature = EFI_RUNTIME_SERVICES_SIGNATURE; 290 efi_runtime->hdr.signature = EFI_RUNTIME_SERVICES_SIGNATURE;
291 efi_runtime->hdr.revision = EFI_RUNTIME_SERVICES_REVISION; 291 efi_runtime->hdr.revision = EFI_RUNTIME_SERVICES_REVISION;
292 efi_runtime->hdr.headersize = sizeof(efi_runtime->hdr); 292 efi_runtime->hdr.headersize = sizeof(efi_runtime->hdr);
293 efi_runtime->get_time = __pa(&fw_efi_get_time); 293 efi_runtime->get_time = (void *)__pa(&fw_efi_get_time);
294 efi_runtime->set_time = __pa(&efi_unimplemented); 294 efi_runtime->set_time = (void *)__pa(&efi_unimplemented);
295 efi_runtime->get_wakeup_time = __pa(&efi_unimplemented); 295 efi_runtime->get_wakeup_time = (void *)__pa(&efi_unimplemented);
296 efi_runtime->set_wakeup_time = __pa(&efi_unimplemented); 296 efi_runtime->set_wakeup_time = (void *)__pa(&efi_unimplemented);
297 efi_runtime->set_virtual_address_map = __pa(&efi_unimplemented); 297 efi_runtime->set_virtual_address_map = (void *)__pa(&efi_unimplemented);
298 efi_runtime->get_variable = __pa(&efi_unimplemented); 298 efi_runtime->get_variable = (void *)__pa(&efi_unimplemented);
299 efi_runtime->get_next_variable = __pa(&efi_unimplemented); 299 efi_runtime->get_next_variable = (void *)__pa(&efi_unimplemented);
300 efi_runtime->set_variable = __pa(&efi_unimplemented); 300 efi_runtime->set_variable = (void *)__pa(&efi_unimplemented);
301 efi_runtime->get_next_high_mono_count = __pa(&efi_unimplemented); 301 efi_runtime->get_next_high_mono_count = (void *)__pa(&efi_unimplemented);
302 efi_runtime->reset_system = __pa(&efi_reset_system); 302 efi_runtime->reset_system = (void *)__pa(&efi_reset_system);
303 303
304 efi_tables->guid = SAL_SYSTEM_TABLE_GUID; 304 efi_tables->guid = SAL_SYSTEM_TABLE_GUID;
305 efi_tables->table = __pa(sal_systab); 305 efi_tables->table = __pa(sal_systab);
diff --git a/arch/ia64/hp/sim/simeth.c b/arch/ia64/hp/sim/simeth.c
index c13064e422df..d1b04c4c95e3 100644
--- a/arch/ia64/hp/sim/simeth.c
+++ b/arch/ia64/hp/sim/simeth.c
@@ -268,7 +268,7 @@ static __inline__ int dev_is_ethdev(struct net_device *dev)
268static int 268static int
269simeth_device_event(struct notifier_block *this,unsigned long event, void *ptr) 269simeth_device_event(struct notifier_block *this,unsigned long event, void *ptr)
270{ 270{
271 struct net_device *dev = ptr; 271 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
272 struct simeth_local *local; 272 struct simeth_local *local;
273 struct in_device *in_dev; 273 struct in_device *in_dev;
274 struct in_ifaddr **ifap = NULL; 274 struct in_ifaddr **ifap = NULL;
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index 331de723c676..3a428f19a001 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -88,8 +88,8 @@ simscsi_setup (char *s)
88 if (strlen(s) > MAX_ROOT_LEN) { 88 if (strlen(s) > MAX_ROOT_LEN) {
89 printk(KERN_ERR "simscsi_setup: prefix too long---using default %s\n", 89 printk(KERN_ERR "simscsi_setup: prefix too long---using default %s\n",
90 simscsi_root); 90 simscsi_root);
91 } 91 } else
92 simscsi_root = s; 92 simscsi_root = s;
93 return 1; 93 return 1;
94} 94}
95 95
diff --git a/arch/ia64/include/asm/mutex.h b/arch/ia64/include/asm/mutex.h
index bed73a643a56..f41e66d65e31 100644
--- a/arch/ia64/include/asm/mutex.h
+++ b/arch/ia64/include/asm/mutex.h
@@ -29,17 +29,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
29 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 29 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
30 * from 1 to a 0 value 30 * from 1 to a 0 value
31 * @count: pointer of type atomic_t 31 * @count: pointer of type atomic_t
32 * @fail_fn: function to call if the original value was not 1
33 * 32 *
34 * Change the count from 1 to a value lower than 1, and call <fail_fn> if 33 * Change the count from 1 to a value lower than 1. This function returns 0
35 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, 34 * if the fastpath succeeds, or -1 otherwise.
36 * or anything the slow path function returns.
37 */ 35 */
38static inline int 36static inline int
39__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 37__mutex_fastpath_lock_retval(atomic_t *count)
40{ 38{
41 if (unlikely(ia64_fetchadd4_acq(count, -1) != 1)) 39 if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
42 return fail_fn(count); 40 return -1;
43 return 0; 41 return 0;
44} 42}
45 43
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h
index 5e04b591e423..80775f55f03f 100644
--- a/arch/ia64/include/asm/pci.h
+++ b/arch/ia64/include/asm/pci.h
@@ -89,9 +89,9 @@ extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
89#define pci_legacy_read platform_pci_legacy_read 89#define pci_legacy_read platform_pci_legacy_read
90#define pci_legacy_write platform_pci_legacy_write 90#define pci_legacy_write platform_pci_legacy_write
91 91
92struct pci_window { 92struct iospace_resource {
93 struct resource resource; 93 struct list_head list;
94 u64 offset; 94 struct resource res;
95}; 95};
96 96
97struct pci_controller { 97struct pci_controller {
@@ -100,12 +100,10 @@ struct pci_controller {
100 int segment; 100 int segment;
101 int node; /* nearest node with memory or -1 for global allocation */ 101 int node; /* nearest node with memory or -1 for global allocation */
102 102
103 unsigned int windows;
104 struct pci_window *window;
105
106 void *platform_data; 103 void *platform_data;
107}; 104};
108 105
106
109#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata) 107#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
110#define pci_domain_nr(busdev) (PCI_CONTROLLER(busdev)->segment) 108#define pci_domain_nr(busdev) (PCI_CONTROLLER(busdev)->segment)
111 109
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 815810cbbedc..7935115398a6 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -493,9 +493,6 @@ extern void paging_init (void);
493#define pte_to_pgoff(pte) ((pte_val(pte) << 1) >> 3) 493#define pte_to_pgoff(pte) ((pte_val(pte) << 1) >> 3)
494#define pgoff_to_pte(off) ((pte_t) { ((off) << 2) | _PAGE_FILE }) 494#define pgoff_to_pte(off) ((pte_t) { ((off) << 2) | _PAGE_FILE })
495 495
496#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
497 remap_pfn_range(vma, vaddr, pfn, size, prot)
498
499/* 496/*
500 * ZERO_PAGE is a global shared page that is always zero: used 497 * ZERO_PAGE is a global shared page that is always zero: used
501 * for zero-mapped memory areas etc.. 498 * for zero-mapped memory areas etc..
diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h
index 6b4329f18b29..556d0701a155 100644
--- a/arch/ia64/include/uapi/asm/socket.h
+++ b/arch/ia64/include/uapi/asm/socket.h
@@ -83,4 +83,6 @@
83 83
84#define SO_SELECT_ERR_QUEUE 45 84#define SO_SELECT_ERR_QUEUE 45
85 85
86#define SO_BUSY_POLL 46
87
86#endif /* _ASM_IA64_SOCKET_H */ 88#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 335eb07480fe..5eb71d22c3d5 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -807,7 +807,7 @@ int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
807 * ACPI based hotplug CPU support 807 * ACPI based hotplug CPU support
808 */ 808 */
809#ifdef CONFIG_ACPI_HOTPLUG_CPU 809#ifdef CONFIG_ACPI_HOTPLUG_CPU
810static __cpuinit 810static
811int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 811int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
812{ 812{
813#ifdef CONFIG_ACPI_NUMA 813#ifdef CONFIG_ACPI_NUMA
@@ -882,7 +882,7 @@ __init void prefill_possible_map(void)
882 set_cpu_possible(i, true); 882 set_cpu_possible(i, true);
883} 883}
884 884
885static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) 885static int _acpi_map_lsapic(acpi_handle handle, int *pcpu)
886{ 886{
887 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 887 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
888 union acpi_object *obj; 888 union acpi_object *obj;
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index f034563aeae5..51bce594eb83 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -1116,11 +1116,6 @@ efi_memmap_init(u64 *s, u64 *e)
1116 if (!is_memory_available(md)) 1116 if (!is_memory_available(md))
1117 continue; 1117 continue;
1118 1118
1119#ifdef CONFIG_CRASH_DUMP
1120 /* saved_max_pfn should ignore max_addr= command line arg */
1121 if (saved_max_pfn < (efi_md_end(md) >> PAGE_SHIFT))
1122 saved_max_pfn = (efi_md_end(md) >> PAGE_SHIFT);
1123#endif
1124 /* 1119 /*
1125 * Round ends inward to granule boundaries 1120 * Round ends inward to granule boundaries
1126 * Give trimmings to uncached allocator 1121 * Give trimmings to uncached allocator
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
index 2d67317a1ec2..f59c0b844e88 100644
--- a/arch/ia64/kernel/err_inject.c
+++ b/arch/ia64/kernel/err_inject.c
@@ -225,17 +225,17 @@ static struct attribute_group err_inject_attr_group = {
225 .name = "err_inject" 225 .name = "err_inject"
226}; 226};
227/* Add/Remove err_inject interface for CPU device */ 227/* Add/Remove err_inject interface for CPU device */
228static int __cpuinit err_inject_add_dev(struct device * sys_dev) 228static int err_inject_add_dev(struct device *sys_dev)
229{ 229{
230 return sysfs_create_group(&sys_dev->kobj, &err_inject_attr_group); 230 return sysfs_create_group(&sys_dev->kobj, &err_inject_attr_group);
231} 231}
232 232
233static int __cpuinit err_inject_remove_dev(struct device * sys_dev) 233static int err_inject_remove_dev(struct device *sys_dev)
234{ 234{
235 sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group); 235 sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group);
236 return 0; 236 return 0;
237} 237}
238static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb, 238static int err_inject_cpu_callback(struct notifier_block *nfb,
239 unsigned long action, void *hcpu) 239 unsigned long action, void *hcpu)
240{ 240{
241 unsigned int cpu = (unsigned long)hcpu; 241 unsigned int cpu = (unsigned long)hcpu;
@@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
256 return NOTIFY_OK; 256 return NOTIFY_OK;
257} 257}
258 258
259static struct notifier_block __cpuinitdata err_inject_cpu_notifier = 259static struct notifier_block err_inject_cpu_notifier =
260{ 260{
261 .notifier_call = err_inject_cpu_callback, 261 .notifier_call = err_inject_cpu_callback,
262}; 262};
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 9be4e497f3d3..991ca336b8a2 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -1035,7 +1035,7 @@ END(ia64_delay_loop)
1035 * Return a CPU-local timestamp in nano-seconds. This timestamp is 1035 * Return a CPU-local timestamp in nano-seconds. This timestamp is
1036 * NOT synchronized across CPUs its return value must never be 1036 * NOT synchronized across CPUs its return value must never be
1037 * compared against the values returned on another CPU. The usage in 1037 * compared against the values returned on another CPU. The usage in
1038 * kernel/sched.c ensures that. 1038 * kernel/sched/core.c ensures that.
1039 * 1039 *
1040 * The return-value of sched_clock() is NOT supposed to wrap-around. 1040 * The return-value of sched_clock() is NOT supposed to wrap-around.
1041 * If it did, it would cause some scheduling hiccups (at the worst). 1041 * If it did, it would cause some scheduling hiccups (at the worst).
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index d7396dbb07bb..b8edfa75a83f 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -631,7 +631,7 @@ ia64_mca_register_cpev (int cpev)
631 * Outputs 631 * Outputs
632 * None 632 * None
633 */ 633 */
634void __cpuinit 634void
635ia64_mca_cmc_vector_setup (void) 635ia64_mca_cmc_vector_setup (void)
636{ 636{
637 cmcv_reg_t cmcv; 637 cmcv_reg_t cmcv;
@@ -1814,7 +1814,7 @@ static struct irqaction mca_cpep_irqaction = {
1814 * format most of the fields. 1814 * format most of the fields.
1815 */ 1815 */
1816 1816
1817static void __cpuinit 1817static void
1818format_mca_init_stack(void *mca_data, unsigned long offset, 1818format_mca_init_stack(void *mca_data, unsigned long offset,
1819 const char *type, int cpu) 1819 const char *type, int cpu)
1820{ 1820{
@@ -1844,7 +1844,7 @@ static void * __init_refok mca_bootmem(void)
1844} 1844}
1845 1845
1846/* Do per-CPU MCA-related initialization. */ 1846/* Do per-CPU MCA-related initialization. */
1847void __cpuinit 1847void
1848ia64_mca_cpu_init(void *cpu_data) 1848ia64_mca_cpu_init(void *cpu_data)
1849{ 1849{
1850 void *pal_vaddr; 1850 void *pal_vaddr;
@@ -1896,7 +1896,7 @@ ia64_mca_cpu_init(void *cpu_data)
1896 PAGE_KERNEL)); 1896 PAGE_KERNEL));
1897} 1897}
1898 1898
1899static void __cpuinit ia64_mca_cmc_vector_adjust(void *dummy) 1899static void ia64_mca_cmc_vector_adjust(void *dummy)
1900{ 1900{
1901 unsigned long flags; 1901 unsigned long flags;
1902 1902
@@ -1906,7 +1906,7 @@ static void __cpuinit ia64_mca_cmc_vector_adjust(void *dummy)
1906 local_irq_restore(flags); 1906 local_irq_restore(flags);
1907} 1907}
1908 1908
1909static int __cpuinit mca_cpu_callback(struct notifier_block *nfb, 1909static int mca_cpu_callback(struct notifier_block *nfb,
1910 unsigned long action, 1910 unsigned long action,
1911 void *hcpu) 1911 void *hcpu)
1912{ 1912{
@@ -1922,7 +1922,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
1922 return NOTIFY_OK; 1922 return NOTIFY_OK;
1923} 1923}
1924 1924
1925static struct notifier_block mca_cpu_notifier __cpuinitdata = { 1925static struct notifier_block mca_cpu_notifier = {
1926 .notifier_call = mca_cpu_callback 1926 .notifier_call = mca_cpu_callback
1927}; 1927};
1928 1928
diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c
index c93420c97409..d288cde93606 100644
--- a/arch/ia64/kernel/numa.c
+++ b/arch/ia64/kernel/numa.c
@@ -30,7 +30,7 @@ EXPORT_SYMBOL(cpu_to_node_map);
30cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; 30cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
31EXPORT_SYMBOL(node_to_cpu_mask); 31EXPORT_SYMBOL(node_to_cpu_mask);
32 32
33void __cpuinit map_cpu_to_node(int cpu, int nid) 33void map_cpu_to_node(int cpu, int nid)
34{ 34{
35 int oldnid; 35 int oldnid;
36 if (nid < 0) { /* just initialize by zero */ 36 if (nid < 0) { /* just initialize by zero */
@@ -51,7 +51,7 @@ void __cpuinit map_cpu_to_node(int cpu, int nid)
51 return; 51 return;
52} 52}
53 53
54void __cpuinit unmap_cpu_from_node(int cpu, int nid) 54void unmap_cpu_from_node(int cpu, int nid)
55{ 55{
56 WARN_ON(!cpu_isset(cpu, node_to_cpu_mask[nid])); 56 WARN_ON(!cpu_isset(cpu, node_to_cpu_mask[nid]));
57 WARN_ON(cpu_to_node_map[cpu] != nid); 57 WARN_ON(cpu_to_node_map[cpu] != nid);
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 2b3c2d79256f..ab333284f4b2 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -932,7 +932,7 @@ static const struct file_operations proc_palinfo_fops = {
932 .release = single_release, 932 .release = single_release,
933}; 933};
934 934
935static void __cpuinit 935static void
936create_palinfo_proc_entries(unsigned int cpu) 936create_palinfo_proc_entries(unsigned int cpu)
937{ 937{
938 pal_func_cpu_u_t f; 938 pal_func_cpu_u_t f;
@@ -962,7 +962,7 @@ remove_palinfo_proc_entries(unsigned int hcpu)
962 remove_proc_subtree(cpustr, palinfo_dir); 962 remove_proc_subtree(cpustr, palinfo_dir);
963} 963}
964 964
965static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, 965static int palinfo_cpu_callback(struct notifier_block *nfb,
966 unsigned long action, void *hcpu) 966 unsigned long action, void *hcpu)
967{ 967{
968 unsigned int hotcpu = (unsigned long)hcpu; 968 unsigned int hotcpu = (unsigned long)hcpu;
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index 1ddcfe5ef353..992c1098c522 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -33,15 +33,6 @@ int force_iommu __read_mostly;
33 33
34int iommu_pass_through; 34int iommu_pass_through;
35 35
36/* Dummy device used for NULL arguments (normally ISA). Better would
37 be probably a smaller DMA mask, but this is bug-to-bug compatible
38 to i386. */
39struct device fallback_dev = {
40 .init_name = "fallback device",
41 .coherent_dma_mask = DMA_BIT_MASK(32),
42 .dma_mask = &fallback_dev.coherent_dma_mask,
43};
44
45extern struct dma_map_ops intel_dma_ops; 36extern struct dma_map_ops intel_dma_ops;
46 37
47static int __init pci_iommu_init(void) 38static int __init pci_iommu_init(void)
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 9ea25fce06d5..5a9ff1c3c3e9 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -5647,24 +5647,8 @@ pfm_proc_show_header(struct seq_file *m)
5647 5647
5648 list_for_each(pos, &pfm_buffer_fmt_list) { 5648 list_for_each(pos, &pfm_buffer_fmt_list) {
5649 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list); 5649 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
5650 seq_printf(m, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n", 5650 seq_printf(m, "format : %16phD %s\n",
5651 entry->fmt_uuid[0], 5651 entry->fmt_uuid, entry->fmt_name);
5652 entry->fmt_uuid[1],
5653 entry->fmt_uuid[2],
5654 entry->fmt_uuid[3],
5655 entry->fmt_uuid[4],
5656 entry->fmt_uuid[5],
5657 entry->fmt_uuid[6],
5658 entry->fmt_uuid[7],
5659 entry->fmt_uuid[8],
5660 entry->fmt_uuid[9],
5661 entry->fmt_uuid[10],
5662 entry->fmt_uuid[11],
5663 entry->fmt_uuid[12],
5664 entry->fmt_uuid[13],
5665 entry->fmt_uuid[14],
5666 entry->fmt_uuid[15],
5667 entry->fmt_name);
5668 } 5652 }
5669 spin_unlock(&pfm_buffer_fmt_lock); 5653 spin_unlock(&pfm_buffer_fmt_lock);
5670 5654
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 4bc580af67b3..960a396f5929 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -568,7 +568,7 @@ static const struct file_operations salinfo_data_fops = {
568 .llseek = default_llseek, 568 .llseek = default_llseek,
569}; 569};
570 570
571static int __cpuinit 571static int
572salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) 572salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
573{ 573{
574 unsigned int i, cpu = (unsigned long)hcpu; 574 unsigned int i, cpu = (unsigned long)hcpu;
@@ -609,7 +609,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
609 return NOTIFY_OK; 609 return NOTIFY_OK;
610} 610}
611 611
612static struct notifier_block salinfo_cpu_notifier __cpuinitdata = 612static struct notifier_block salinfo_cpu_notifier =
613{ 613{
614 .notifier_call = salinfo_cpu_callback, 614 .notifier_call = salinfo_cpu_callback,
615 .priority = 0, 615 .priority = 0,
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 13bfdd22afc8..4fc2e9569bb2 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -748,7 +748,7 @@ const struct seq_operations cpuinfo_op = {
748#define MAX_BRANDS 8 748#define MAX_BRANDS 8
749static char brandname[MAX_BRANDS][128]; 749static char brandname[MAX_BRANDS][128];
750 750
751static char * __cpuinit 751static char *
752get_model_name(__u8 family, __u8 model) 752get_model_name(__u8 family, __u8 model)
753{ 753{
754 static int overflow; 754 static int overflow;
@@ -778,7 +778,7 @@ get_model_name(__u8 family, __u8 model)
778 return "Unknown"; 778 return "Unknown";
779} 779}
780 780
781static void __cpuinit 781static void
782identify_cpu (struct cpuinfo_ia64 *c) 782identify_cpu (struct cpuinfo_ia64 *c)
783{ 783{
784 union { 784 union {
@@ -850,7 +850,7 @@ identify_cpu (struct cpuinfo_ia64 *c)
850 * 2. the minimum of the i-cache stride sizes for "flush_icache_range()". 850 * 2. the minimum of the i-cache stride sizes for "flush_icache_range()".
851 * 3. the minimum of the cache stride sizes for "clflush_cache_range()". 851 * 3. the minimum of the cache stride sizes for "clflush_cache_range()".
852 */ 852 */
853static void __cpuinit 853static void
854get_cache_info(void) 854get_cache_info(void)
855{ 855{
856 unsigned long line_size, max = 1; 856 unsigned long line_size, max = 1;
@@ -915,10 +915,10 @@ get_cache_info(void)
915 * cpu_init() initializes state that is per-CPU. This function acts 915 * cpu_init() initializes state that is per-CPU. This function acts
916 * as a 'CPU state barrier', nothing should get across. 916 * as a 'CPU state barrier', nothing should get across.
917 */ 917 */
918void __cpuinit 918void
919cpu_init (void) 919cpu_init (void)
920{ 920{
921 extern void __cpuinit ia64_mmu_init (void *); 921 extern void ia64_mmu_init(void *);
922 static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG; 922 static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG;
923 unsigned long num_phys_stacked; 923 unsigned long num_phys_stacked;
924 pal_vm_info_2_u_t vmi; 924 pal_vm_info_2_u_t vmi;
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 8d87168d218d..547a48d78bd7 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -351,7 +351,7 @@ static inline void smp_setup_percpu_timer(void)
351{ 351{
352} 352}
353 353
354static void __cpuinit 354static void
355smp_callin (void) 355smp_callin (void)
356{ 356{
357 int cpuid, phys_id, itc_master; 357 int cpuid, phys_id, itc_master;
@@ -442,7 +442,7 @@ smp_callin (void)
442/* 442/*
443 * Activate a secondary processor. head.S calls this. 443 * Activate a secondary processor. head.S calls this.
444 */ 444 */
445int __cpuinit 445int
446start_secondary (void *unused) 446start_secondary (void *unused)
447{ 447{
448 /* Early console may use I/O ports */ 448 /* Early console may use I/O ports */
@@ -459,7 +459,7 @@ start_secondary (void *unused)
459 return 0; 459 return 0;
460} 460}
461 461
462static int __cpuinit 462static int
463do_boot_cpu (int sapicid, int cpu, struct task_struct *idle) 463do_boot_cpu (int sapicid, int cpu, struct task_struct *idle)
464{ 464{
465 int timeout; 465 int timeout;
@@ -728,7 +728,7 @@ static inline void set_cpu_sibling_map(int cpu)
728 } 728 }
729} 729}
730 730
731int __cpuinit 731int
732__cpu_up(unsigned int cpu, struct task_struct *tidle) 732__cpu_up(unsigned int cpu, struct task_struct *tidle)
733{ 733{
734 int ret; 734 int ret;
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index dc00b2c1b42a..ca69a5a96dcc 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -135,11 +135,11 @@ struct cpu_cache_info {
135 struct kobject kobj; 135 struct kobject kobj;
136}; 136};
137 137
138static struct cpu_cache_info all_cpu_cache_info[NR_CPUS] __cpuinitdata; 138static struct cpu_cache_info all_cpu_cache_info[NR_CPUS];
139#define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y]) 139#define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y])
140 140
141#ifdef CONFIG_SMP 141#ifdef CONFIG_SMP
142static void __cpuinit cache_shared_cpu_map_setup( unsigned int cpu, 142static void cache_shared_cpu_map_setup(unsigned int cpu,
143 struct cache_info * this_leaf) 143 struct cache_info * this_leaf)
144{ 144{
145 pal_cache_shared_info_t csi; 145 pal_cache_shared_info_t csi;
@@ -174,7 +174,7 @@ static void __cpuinit cache_shared_cpu_map_setup( unsigned int cpu,
174 &csi) == PAL_STATUS_SUCCESS); 174 &csi) == PAL_STATUS_SUCCESS);
175} 175}
176#else 176#else
177static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, 177static void cache_shared_cpu_map_setup(unsigned int cpu,
178 struct cache_info * this_leaf) 178 struct cache_info * this_leaf)
179{ 179{
180 cpu_set(cpu, this_leaf->shared_cpu_map); 180 cpu_set(cpu, this_leaf->shared_cpu_map);
@@ -298,7 +298,7 @@ static struct kobj_type cache_ktype_percpu_entry = {
298 .sysfs_ops = &cache_sysfs_ops, 298 .sysfs_ops = &cache_sysfs_ops,
299}; 299};
300 300
301static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu) 301static void cpu_cache_sysfs_exit(unsigned int cpu)
302{ 302{
303 kfree(all_cpu_cache_info[cpu].cache_leaves); 303 kfree(all_cpu_cache_info[cpu].cache_leaves);
304 all_cpu_cache_info[cpu].cache_leaves = NULL; 304 all_cpu_cache_info[cpu].cache_leaves = NULL;
@@ -307,7 +307,7 @@ static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu)
307 return; 307 return;
308} 308}
309 309
310static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu) 310static int cpu_cache_sysfs_init(unsigned int cpu)
311{ 311{
312 unsigned long i, levels, unique_caches; 312 unsigned long i, levels, unique_caches;
313 pal_cache_config_info_t cci; 313 pal_cache_config_info_t cci;
@@ -351,7 +351,7 @@ static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu)
351} 351}
352 352
353/* Add cache interface for CPU device */ 353/* Add cache interface for CPU device */
354static int __cpuinit cache_add_dev(struct device * sys_dev) 354static int cache_add_dev(struct device *sys_dev)
355{ 355{
356 unsigned int cpu = sys_dev->id; 356 unsigned int cpu = sys_dev->id;
357 unsigned long i, j; 357 unsigned long i, j;
@@ -401,7 +401,7 @@ static int __cpuinit cache_add_dev(struct device * sys_dev)
401} 401}
402 402
403/* Remove cache interface for CPU device */ 403/* Remove cache interface for CPU device */
404static int __cpuinit cache_remove_dev(struct device * sys_dev) 404static int cache_remove_dev(struct device *sys_dev)
405{ 405{
406 unsigned int cpu = sys_dev->id; 406 unsigned int cpu = sys_dev->id;
407 unsigned long i; 407 unsigned long i;
@@ -425,7 +425,7 @@ static int __cpuinit cache_remove_dev(struct device * sys_dev)
425 * When a cpu is hot-plugged, do a check and initiate 425 * When a cpu is hot-plugged, do a check and initiate
426 * cache kobject if necessary 426 * cache kobject if necessary
427 */ 427 */
428static int __cpuinit cache_cpu_callback(struct notifier_block *nfb, 428static int cache_cpu_callback(struct notifier_block *nfb,
429 unsigned long action, void *hcpu) 429 unsigned long action, void *hcpu)
430{ 430{
431 unsigned int cpu = (unsigned long)hcpu; 431 unsigned int cpu = (unsigned long)hcpu;
@@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
445 return NOTIFY_OK; 445 return NOTIFY_OK;
446} 446}
447 447
448static struct notifier_block __cpuinitdata cache_cpu_notifier = 448static struct notifier_block cache_cpu_notifier =
449{ 449{
450 .notifier_call = cache_cpu_callback 450 .notifier_call = cache_cpu_callback
451}; 451};
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index f7f9f9c6caf0..d3636e67a98e 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -630,7 +630,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
630 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n", 630 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n",
631 iip, ifa, isr); 631 iip, ifa, isr);
632 force_sig(SIGSEGV, current); 632 force_sig(SIGSEGV, current);
633 break; 633 return;
634 634
635 case 46: 635 case 46:
636 printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n"); 636 printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n");
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile
index 1a4053789d01..18e45ec49bbf 100644
--- a/arch/ia64/kvm/Makefile
+++ b/arch/ia64/kvm/Makefile
@@ -47,12 +47,13 @@ FORCE : $(obj)/$(offsets-file)
47 47
48ccflags-y := -Ivirt/kvm -Iarch/ia64/kvm/ 48ccflags-y := -Ivirt/kvm -Iarch/ia64/kvm/
49asflags-y := -Ivirt/kvm -Iarch/ia64/kvm/ 49asflags-y := -Ivirt/kvm -Iarch/ia64/kvm/
50KVM := ../../../virt/kvm
50 51
51common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ 52common-objs = $(KVM)/kvm_main.o $(KVM)/ioapic.o \
52 coalesced_mmio.o irq_comm.o) 53 $(KVM)/coalesced_mmio.o $(KVM)/irq_comm.o
53 54
54ifeq ($(CONFIG_KVM_DEVICE_ASSIGNMENT),y) 55ifeq ($(CONFIG_KVM_DEVICE_ASSIGNMENT),y)
55common-objs += $(addprefix ../../../virt/kvm/, assigned-dev.o iommu.o) 56common-objs += $(KVM)/assigned-dev.o $(KVM)/iommu.o
56endif 57endif
57 58
58kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o 59kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 67c59ebec899..da5237d636d6 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -156,8 +156,7 @@ static void *cpu_data;
156 * 156 *
157 * Allocate and setup per-cpu data areas. 157 * Allocate and setup per-cpu data areas.
158 */ 158 */
159void * __cpuinit 159void *per_cpu_init(void)
160per_cpu_init (void)
161{ 160{
162 static bool first_time = true; 161 static bool first_time = true;
163 void *cpu0_data = __cpu0_per_cpu; 162 void *cpu0_data = __cpu0_per_cpu;
@@ -295,14 +294,6 @@ find_memory (void)
295 alloc_per_cpu_data(); 294 alloc_per_cpu_data();
296} 295}
297 296
298static int count_pages(u64 start, u64 end, void *arg)
299{
300 unsigned long *count = arg;
301
302 *count += (end - start) >> PAGE_SHIFT;
303 return 0;
304}
305
306/* 297/*
307 * Set up the page tables. 298 * Set up the page tables.
308 */ 299 */
@@ -313,9 +304,6 @@ paging_init (void)
313 unsigned long max_dma; 304 unsigned long max_dma;
314 unsigned long max_zone_pfns[MAX_NR_ZONES]; 305 unsigned long max_zone_pfns[MAX_NR_ZONES];
315 306
316 num_physpages = 0;
317 efi_memmap_walk(count_pages, &num_physpages);
318
319 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 307 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
320#ifdef CONFIG_ZONE_DMA 308#ifdef CONFIG_ZONE_DMA
321 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; 309 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index ae4db4bd6d97..2de08f4d9930 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -37,7 +37,6 @@ struct early_node_data {
37 struct ia64_node_data *node_data; 37 struct ia64_node_data *node_data;
38 unsigned long pernode_addr; 38 unsigned long pernode_addr;
39 unsigned long pernode_size; 39 unsigned long pernode_size;
40 unsigned long num_physpages;
41#ifdef CONFIG_ZONE_DMA 40#ifdef CONFIG_ZONE_DMA
42 unsigned long num_dma_physpages; 41 unsigned long num_dma_physpages;
43#endif 42#endif
@@ -593,7 +592,7 @@ void __init find_memory(void)
593 * find_pernode_space() does most of this already, we just need to set 592 * find_pernode_space() does most of this already, we just need to set
594 * local_per_cpu_offset 593 * local_per_cpu_offset
595 */ 594 */
596void __cpuinit *per_cpu_init(void) 595void *per_cpu_init(void)
597{ 596{
598 int cpu; 597 int cpu;
599 static int first_time = 1; 598 static int first_time = 1;
@@ -732,7 +731,6 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
732{ 731{
733 unsigned long end = start + len; 732 unsigned long end = start + len;
734 733
735 mem_data[node].num_physpages += len >> PAGE_SHIFT;
736#ifdef CONFIG_ZONE_DMA 734#ifdef CONFIG_ZONE_DMA
737 if (start <= __pa(MAX_DMA_ADDRESS)) 735 if (start <= __pa(MAX_DMA_ADDRESS))
738 mem_data[node].num_dma_physpages += 736 mem_data[node].num_dma_physpages +=
@@ -778,7 +776,6 @@ void __init paging_init(void)
778#endif 776#endif
779 777
780 for_each_online_node(node) { 778 for_each_online_node(node) {
781 num_physpages += mem_data[node].num_physpages;
782 pfn_offset = mem_data[node].min_pfn; 779 pfn_offset = mem_data[node].min_pfn;
783 780
784#ifdef CONFIG_VIRTUAL_MEM_MAP 781#ifdef CONFIG_VIRTUAL_MEM_MAP
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index d1fe4b402601..b6f7f43424ec 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -154,9 +154,8 @@ ia64_init_addr_space (void)
154void 154void
155free_initmem (void) 155free_initmem (void)
156{ 156{
157 free_reserved_area((unsigned long)ia64_imva(__init_begin), 157 free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end),
158 (unsigned long)ia64_imva(__init_end), 158 -1, "unused kernel");
159 0, "unused kernel");
160} 159}
161 160
162void __init 161void __init
@@ -546,19 +545,6 @@ int __init register_active_ranges(u64 start, u64 len, int nid)
546 return 0; 545 return 0;
547} 546}
548 547
549static int __init
550count_reserved_pages(u64 start, u64 end, void *arg)
551{
552 unsigned long num_reserved = 0;
553 unsigned long *count = arg;
554
555 for (; start < end; start += PAGE_SIZE)
556 if (PageReserved(virt_to_page(start)))
557 ++num_reserved;
558 *count += num_reserved;
559 return 0;
560}
561
562int 548int
563find_max_min_low_pfn (u64 start, u64 end, void *arg) 549find_max_min_low_pfn (u64 start, u64 end, void *arg)
564{ 550{
@@ -597,8 +583,6 @@ __setup("nolwsys", nolwsys_setup);
597void __init 583void __init
598mem_init (void) 584mem_init (void)
599{ 585{
600 long reserved_pages, codesize, datasize, initsize;
601 pg_data_t *pgdat;
602 int i; 586 int i;
603 587
604 BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE); 588 BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
@@ -616,27 +600,12 @@ mem_init (void)
616 600
617#ifdef CONFIG_FLATMEM 601#ifdef CONFIG_FLATMEM
618 BUG_ON(!mem_map); 602 BUG_ON(!mem_map);
619 max_mapnr = max_low_pfn;
620#endif 603#endif
621 604
605 set_max_mapnr(max_low_pfn);
622 high_memory = __va(max_low_pfn * PAGE_SIZE); 606 high_memory = __va(max_low_pfn * PAGE_SIZE);
623 607 free_all_bootmem();
624 for_each_online_pgdat(pgdat) 608 mem_init_print_info(NULL);
625 if (pgdat->bdata->node_bootmem_map)
626 totalram_pages += free_all_bootmem_node(pgdat);
627
628 reserved_pages = 0;
629 efi_memmap_walk(count_reserved_pages, &reserved_pages);
630
631 codesize = (unsigned long) _etext - (unsigned long) _stext;
632 datasize = (unsigned long) _edata - (unsigned long) _etext;
633 initsize = (unsigned long) __init_end - (unsigned long) __init_begin;
634
635 printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
636 "%luk data, %luk init)\n", nr_free_pages() << (PAGE_SHIFT - 10),
637 num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
638 reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
639
640 609
641 /* 610 /*
642 * For fsyscall entrpoints with no light-weight handler, use the ordinary 611 * For fsyscall entrpoints with no light-weight handler, use the ordinary
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index 4248492b9321..ea21d4cad540 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -86,7 +86,7 @@ int __meminit __early_pfn_to_nid(unsigned long pfn)
86 return -1; 86 return -1;
87} 87}
88 88
89void __cpuinit numa_clear_node(int cpu) 89void numa_clear_node(int cpu)
90{ 90{
91 unmap_cpu_from_node(cpu, NUMA_NO_NODE); 91 unmap_cpu_from_node(cpu, NUMA_NO_NODE);
92} 92}
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index de1474ff0bc5..2326790b7d8b 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -134,6 +134,10 @@ struct pci_root_info {
134 struct acpi_device *bridge; 134 struct acpi_device *bridge;
135 struct pci_controller *controller; 135 struct pci_controller *controller;
136 struct list_head resources; 136 struct list_head resources;
137 struct resource *res;
138 resource_size_t *res_offset;
139 unsigned int res_num;
140 struct list_head io_resources;
137 char *name; 141 char *name;
138}; 142};
139 143
@@ -153,7 +157,7 @@ new_space (u64 phys_base, int sparse)
153 return i; 157 return i;
154 158
155 if (num_io_spaces == MAX_IO_SPACES) { 159 if (num_io_spaces == MAX_IO_SPACES) {
156 printk(KERN_ERR "PCI: Too many IO port spaces " 160 pr_err("PCI: Too many IO port spaces "
157 "(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES); 161 "(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES);
158 return ~0; 162 return ~0;
159 } 163 }
@@ -168,25 +172,22 @@ new_space (u64 phys_base, int sparse)
168static u64 add_io_space(struct pci_root_info *info, 172static u64 add_io_space(struct pci_root_info *info,
169 struct acpi_resource_address64 *addr) 173 struct acpi_resource_address64 *addr)
170{ 174{
175 struct iospace_resource *iospace;
171 struct resource *resource; 176 struct resource *resource;
172 char *name; 177 char *name;
173 unsigned long base, min, max, base_port; 178 unsigned long base, min, max, base_port;
174 unsigned int sparse = 0, space_nr, len; 179 unsigned int sparse = 0, space_nr, len;
175 180
176 resource = kzalloc(sizeof(*resource), GFP_KERNEL); 181 len = strlen(info->name) + 32;
177 if (!resource) { 182 iospace = kzalloc(sizeof(*iospace) + len, GFP_KERNEL);
178 printk(KERN_ERR "PCI: No memory for %s I/O port space\n", 183 if (!iospace) {
179 info->name); 184 dev_err(&info->bridge->dev,
185 "PCI: No memory for %s I/O port space\n",
186 info->name);
180 goto out; 187 goto out;
181 } 188 }
182 189
183 len = strlen(info->name) + 32; 190 name = (char *)(iospace + 1);
184 name = kzalloc(len, GFP_KERNEL);
185 if (!name) {
186 printk(KERN_ERR "PCI: No memory for %s I/O port space name\n",
187 info->name);
188 goto free_resource;
189 }
190 191
191 min = addr->minimum; 192 min = addr->minimum;
192 max = min + addr->address_length - 1; 193 max = min + addr->address_length - 1;
@@ -195,7 +196,7 @@ static u64 add_io_space(struct pci_root_info *info,
195 196
196 space_nr = new_space(addr->translation_offset, sparse); 197 space_nr = new_space(addr->translation_offset, sparse);
197 if (space_nr == ~0) 198 if (space_nr == ~0)
198 goto free_name; 199 goto free_resource;
199 200
200 base = __pa(io_space[space_nr].mmio_base); 201 base = __pa(io_space[space_nr].mmio_base);
201 base_port = IO_SPACE_BASE(space_nr); 202 base_port = IO_SPACE_BASE(space_nr);
@@ -210,18 +211,23 @@ static u64 add_io_space(struct pci_root_info *info,
210 if (space_nr == 0) 211 if (space_nr == 0)
211 sparse = 1; 212 sparse = 1;
212 213
214 resource = &iospace->res;
213 resource->name = name; 215 resource->name = name;
214 resource->flags = IORESOURCE_MEM; 216 resource->flags = IORESOURCE_MEM;
215 resource->start = base + (sparse ? IO_SPACE_SPARSE_ENCODING(min) : min); 217 resource->start = base + (sparse ? IO_SPACE_SPARSE_ENCODING(min) : min);
216 resource->end = base + (sparse ? IO_SPACE_SPARSE_ENCODING(max) : max); 218 resource->end = base + (sparse ? IO_SPACE_SPARSE_ENCODING(max) : max);
217 insert_resource(&iomem_resource, resource); 219 if (insert_resource(&iomem_resource, resource)) {
220 dev_err(&info->bridge->dev,
221 "can't allocate host bridge io space resource %pR\n",
222 resource);
223 goto free_resource;
224 }
218 225
226 list_add_tail(&iospace->list, &info->io_resources);
219 return base_port; 227 return base_port;
220 228
221free_name:
222 kfree(name);
223free_resource: 229free_resource:
224 kfree(resource); 230 kfree(iospace);
225out: 231out:
226 return ~0; 232 return ~0;
227} 233}
@@ -265,7 +271,7 @@ static acpi_status count_window(struct acpi_resource *resource, void *data)
265static acpi_status add_window(struct acpi_resource *res, void *data) 271static acpi_status add_window(struct acpi_resource *res, void *data)
266{ 272{
267 struct pci_root_info *info = data; 273 struct pci_root_info *info = data;
268 struct pci_window *window; 274 struct resource *resource;
269 struct acpi_resource_address64 addr; 275 struct acpi_resource_address64 addr;
270 acpi_status status; 276 acpi_status status;
271 unsigned long flags, offset = 0; 277 unsigned long flags, offset = 0;
@@ -289,55 +295,146 @@ static acpi_status add_window(struct acpi_resource *res, void *data)
289 } else 295 } else
290 return AE_OK; 296 return AE_OK;
291 297
292 window = &info->controller->window[info->controller->windows++]; 298 resource = &info->res[info->res_num];
293 window->resource.name = info->name; 299 resource->name = info->name;
294 window->resource.flags = flags; 300 resource->flags = flags;
295 window->resource.start = addr.minimum + offset; 301 resource->start = addr.minimum + offset;
296 window->resource.end = window->resource.start + addr.address_length - 1; 302 resource->end = resource->start + addr.address_length - 1;
297 window->offset = offset; 303 info->res_offset[info->res_num] = offset;
298 304
299 if (insert_resource(root, &window->resource)) { 305 if (insert_resource(root, resource)) {
300 dev_err(&info->bridge->dev, 306 dev_err(&info->bridge->dev,
301 "can't allocate host bridge window %pR\n", 307 "can't allocate host bridge window %pR\n",
302 &window->resource); 308 resource);
303 } else { 309 } else {
304 if (offset) 310 if (offset)
305 dev_info(&info->bridge->dev, "host bridge window %pR " 311 dev_info(&info->bridge->dev, "host bridge window %pR "
306 "(PCI address [%#llx-%#llx])\n", 312 "(PCI address [%#llx-%#llx])\n",
307 &window->resource, 313 resource,
308 window->resource.start - offset, 314 resource->start - offset,
309 window->resource.end - offset); 315 resource->end - offset);
310 else 316 else
311 dev_info(&info->bridge->dev, 317 dev_info(&info->bridge->dev,
312 "host bridge window %pR\n", 318 "host bridge window %pR\n", resource);
313 &window->resource);
314 } 319 }
315
316 /* HP's firmware has a hack to work around a Windows bug. 320 /* HP's firmware has a hack to work around a Windows bug.
317 * Ignore these tiny memory ranges */ 321 * Ignore these tiny memory ranges */
318 if (!((window->resource.flags & IORESOURCE_MEM) && 322 if (!((resource->flags & IORESOURCE_MEM) &&
319 (window->resource.end - window->resource.start < 16))) 323 (resource->end - resource->start < 16)))
320 pci_add_resource_offset(&info->resources, &window->resource, 324 pci_add_resource_offset(&info->resources, resource,
321 window->offset); 325 info->res_offset[info->res_num]);
322 326
327 info->res_num++;
323 return AE_OK; 328 return AE_OK;
324} 329}
325 330
331static void free_pci_root_info_res(struct pci_root_info *info)
332{
333 struct iospace_resource *iospace, *tmp;
334
335 list_for_each_entry_safe(iospace, tmp, &info->io_resources, list)
336 kfree(iospace);
337
338 kfree(info->name);
339 kfree(info->res);
340 info->res = NULL;
341 kfree(info->res_offset);
342 info->res_offset = NULL;
343 info->res_num = 0;
344 kfree(info->controller);
345 info->controller = NULL;
346}
347
348static void __release_pci_root_info(struct pci_root_info *info)
349{
350 int i;
351 struct resource *res;
352 struct iospace_resource *iospace;
353
354 list_for_each_entry(iospace, &info->io_resources, list)
355 release_resource(&iospace->res);
356
357 for (i = 0; i < info->res_num; i++) {
358 res = &info->res[i];
359
360 if (!res->parent)
361 continue;
362
363 if (!(res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
364 continue;
365
366 release_resource(res);
367 }
368
369 free_pci_root_info_res(info);
370 kfree(info);
371}
372
373static void release_pci_root_info(struct pci_host_bridge *bridge)
374{
375 struct pci_root_info *info = bridge->release_data;
376
377 __release_pci_root_info(info);
378}
379
380static int
381probe_pci_root_info(struct pci_root_info *info, struct acpi_device *device,
382 int busnum, int domain)
383{
384 char *name;
385
386 name = kmalloc(16, GFP_KERNEL);
387 if (!name)
388 return -ENOMEM;
389
390 sprintf(name, "PCI Bus %04x:%02x", domain, busnum);
391 info->bridge = device;
392 info->name = name;
393
394 acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_window,
395 &info->res_num);
396 if (info->res_num) {
397 info->res =
398 kzalloc_node(sizeof(*info->res) * info->res_num,
399 GFP_KERNEL, info->controller->node);
400 if (!info->res) {
401 kfree(name);
402 return -ENOMEM;
403 }
404
405 info->res_offset =
406 kzalloc_node(sizeof(*info->res_offset) * info->res_num,
407 GFP_KERNEL, info->controller->node);
408 if (!info->res_offset) {
409 kfree(name);
410 kfree(info->res);
411 info->res = NULL;
412 return -ENOMEM;
413 }
414
415 info->res_num = 0;
416 acpi_walk_resources(device->handle, METHOD_NAME__CRS,
417 add_window, info);
418 } else
419 kfree(name);
420
421 return 0;
422}
423
326struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) 424struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
327{ 425{
328 struct acpi_device *device = root->device; 426 struct acpi_device *device = root->device;
329 int domain = root->segment; 427 int domain = root->segment;
330 int bus = root->secondary.start; 428 int bus = root->secondary.start;
331 struct pci_controller *controller; 429 struct pci_controller *controller;
332 unsigned int windows = 0; 430 struct pci_root_info *info = NULL;
333 struct pci_root_info info; 431 int busnum = root->secondary.start;
334 struct pci_bus *pbus; 432 struct pci_bus *pbus;
335 char *name; 433 int pxm, ret;
336 int pxm;
337 434
338 controller = alloc_pci_controller(domain); 435 controller = alloc_pci_controller(domain);
339 if (!controller) 436 if (!controller)
340 goto out1; 437 return NULL;
341 438
342 controller->acpi_handle = device->handle; 439 controller->acpi_handle = device->handle;
343 440
@@ -347,29 +444,27 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
347 controller->node = pxm_to_node(pxm); 444 controller->node = pxm_to_node(pxm);
348#endif 445#endif
349 446
350 INIT_LIST_HEAD(&info.resources); 447 info = kzalloc(sizeof(*info), GFP_KERNEL);
351 /* insert busn resource at first */ 448 if (!info) {
352 pci_add_resource(&info.resources, &root->secondary); 449 dev_err(&device->dev,
353 acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_window, 450 "pci_bus %04x:%02x: ignored (out of memory)\n",
354 &windows); 451 domain, busnum);
355 if (windows) { 452 kfree(controller);
356 controller->window = 453 return NULL;
357 kzalloc_node(sizeof(*controller->window) * windows,
358 GFP_KERNEL, controller->node);
359 if (!controller->window)
360 goto out2;
361
362 name = kmalloc(16, GFP_KERNEL);
363 if (!name)
364 goto out3;
365
366 sprintf(name, "PCI Bus %04x:%02x", domain, bus);
367 info.bridge = device;
368 info.controller = controller;
369 info.name = name;
370 acpi_walk_resources(device->handle, METHOD_NAME__CRS,
371 add_window, &info);
372 } 454 }
455
456 info->controller = controller;
457 INIT_LIST_HEAD(&info->io_resources);
458 INIT_LIST_HEAD(&info->resources);
459
460 ret = probe_pci_root_info(info, device, busnum, domain);
461 if (ret) {
462 kfree(info->controller);
463 kfree(info);
464 return NULL;
465 }
466 /* insert busn resource at first */
467 pci_add_resource(&info->resources, &root->secondary);
373 /* 468 /*
374 * See arch/x86/pci/acpi.c. 469 * See arch/x86/pci/acpi.c.
375 * The desired pci bus might already be scanned in a quirk. We 470 * The desired pci bus might already be scanned in a quirk. We
@@ -377,21 +472,17 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
377 * such quirk. So we just ignore the case now. 472 * such quirk. So we just ignore the case now.
378 */ 473 */
379 pbus = pci_create_root_bus(NULL, bus, &pci_root_ops, controller, 474 pbus = pci_create_root_bus(NULL, bus, &pci_root_ops, controller,
380 &info.resources); 475 &info->resources);
381 if (!pbus) { 476 if (!pbus) {
382 pci_free_resource_list(&info.resources); 477 pci_free_resource_list(&info->resources);
478 __release_pci_root_info(info);
383 return NULL; 479 return NULL;
384 } 480 }
385 481
482 pci_set_host_bridge_release(to_pci_host_bridge(pbus->bridge),
483 release_pci_root_info, info);
386 pci_scan_child_bus(pbus); 484 pci_scan_child_bus(pbus);
387 return pbus; 485 return pbus;
388
389out3:
390 kfree(controller->window);
391out2:
392 kfree(controller);
393out1:
394 return NULL;
395} 486}
396 487
397int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) 488int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
@@ -691,7 +782,7 @@ static void __init set_pci_dfl_cacheline_size(void)
691 782
692 status = ia64_pal_cache_summary(&levels, &unique_caches); 783 status = ia64_pal_cache_summary(&levels, &unique_caches);
693 if (status != 0) { 784 if (status != 0) {
694 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed " 785 pr_err("%s: ia64_pal_cache_summary() failed "
695 "(status=%ld)\n", __func__, status); 786 "(status=%ld)\n", __func__, status);
696 return; 787 return;
697 } 788 }
@@ -699,7 +790,7 @@ static void __init set_pci_dfl_cacheline_size(void)
699 status = ia64_pal_cache_config_info(levels - 1, 790 status = ia64_pal_cache_config_info(levels - 1,
700 /* cache_type (data_or_unified)= */ 2, &cci); 791 /* cache_type (data_or_unified)= */ 2, &cci);
701 if (status != 0) { 792 if (status != 0) {
702 printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed " 793 pr_err("%s: ia64_pal_cache_config_info() failed "
703 "(status=%ld)\n", __func__, status); 794 "(status=%ld)\n", __func__, status);
704 return; 795 return;
705 } 796 }
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 238e2c511d94..0b5ce82d203d 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -118,76 +118,26 @@ static void __init sn_fixup_ionodes(void)
118} 118}
119 119
120/* 120/*
121 * sn_pci_legacy_window_fixup - Create PCI controller windows for 121 * sn_pci_legacy_window_fixup - Setup PCI resources for
122 * legacy IO and MEM space. This needs to 122 * legacy IO and MEM space. This needs to
123 * be done here, as the PROM does not have 123 * be done here, as the PROM does not have
124 * ACPI support defining the root buses 124 * ACPI support defining the root buses
125 * and their resources (_CRS), 125 * and their resources (_CRS),
126 */ 126 */
127static void 127static void
128sn_legacy_pci_window_fixup(struct pci_controller *controller, 128sn_legacy_pci_window_fixup(struct resource *res,
129 u64 legacy_io, u64 legacy_mem) 129 u64 legacy_io, u64 legacy_mem)
130{ 130{
131 controller->window = kcalloc(2, sizeof(struct pci_window), 131 res[0].name = "legacy_io";
132 GFP_KERNEL); 132 res[0].flags = IORESOURCE_IO;
133 BUG_ON(controller->window == NULL); 133 res[0].start = legacy_io;
134 controller->window[0].offset = legacy_io; 134 res[0].end = res[0].start + 0xffff;
135 controller->window[0].resource.name = "legacy_io"; 135 res[0].parent = &ioport_resource;
136 controller->window[0].resource.flags = IORESOURCE_IO; 136 res[1].name = "legacy_mem";
137 controller->window[0].resource.start = legacy_io; 137 res[1].flags = IORESOURCE_MEM;
138 controller->window[0].resource.end = 138 res[1].start = legacy_mem;
139 controller->window[0].resource.start + 0xffff; 139 res[1].end = res[1].start + (1024 * 1024) - 1;
140 controller->window[0].resource.parent = &ioport_resource; 140 res[1].parent = &iomem_resource;
141 controller->window[1].offset = legacy_mem;
142 controller->window[1].resource.name = "legacy_mem";
143 controller->window[1].resource.flags = IORESOURCE_MEM;
144 controller->window[1].resource.start = legacy_mem;
145 controller->window[1].resource.end =
146 controller->window[1].resource.start + (1024 * 1024) - 1;
147 controller->window[1].resource.parent = &iomem_resource;
148 controller->windows = 2;
149}
150
151/*
152 * sn_pci_window_fixup() - Create a pci_window for each device resource.
153 * It will setup pci_windows for use by
154 * pcibios_bus_to_resource(), pcibios_resource_to_bus(),
155 * etc.
156 */
157static void
158sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
159 s64 * pci_addrs)
160{
161 struct pci_controller *controller = PCI_CONTROLLER(dev->bus);
162 unsigned int i;
163 unsigned int idx;
164 unsigned int new_count;
165 struct pci_window *new_window;
166
167 if (count == 0)
168 return;
169 idx = controller->windows;
170 new_count = controller->windows + count;
171 new_window = kcalloc(new_count, sizeof(struct pci_window), GFP_KERNEL);
172 BUG_ON(new_window == NULL);
173 if (controller->window) {
174 memcpy(new_window, controller->window,
175 sizeof(struct pci_window) * controller->windows);
176 kfree(controller->window);
177 }
178
179 /* Setup a pci_window for each device resource. */
180 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
181 if (pci_addrs[i] == -1)
182 continue;
183
184 new_window[idx].offset = dev->resource[i].start - pci_addrs[i];
185 new_window[idx].resource = dev->resource[i];
186 idx++;
187 }
188
189 controller->windows = new_count;
190 controller->window = new_window;
191} 141}
192 142
193/* 143/*
@@ -199,9 +149,7 @@ sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
199void 149void
200sn_io_slot_fixup(struct pci_dev *dev) 150sn_io_slot_fixup(struct pci_dev *dev)
201{ 151{
202 unsigned int count = 0;
203 int idx; 152 int idx;
204 s64 pci_addrs[PCI_ROM_RESOURCE + 1];
205 unsigned long addr, end, size, start; 153 unsigned long addr, end, size, start;
206 struct pcidev_info *pcidev_info; 154 struct pcidev_info *pcidev_info;
207 struct sn_irq_info *sn_irq_info; 155 struct sn_irq_info *sn_irq_info;
@@ -229,7 +177,6 @@ sn_io_slot_fixup(struct pci_dev *dev)
229 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { 177 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
230 178
231 if (!pcidev_info->pdi_pio_mapped_addr[idx]) { 179 if (!pcidev_info->pdi_pio_mapped_addr[idx]) {
232 pci_addrs[idx] = -1;
233 continue; 180 continue;
234 } 181 }
235 182
@@ -237,11 +184,8 @@ sn_io_slot_fixup(struct pci_dev *dev)
237 end = dev->resource[idx].end; 184 end = dev->resource[idx].end;
238 size = end - start; 185 size = end - start;
239 if (size == 0) { 186 if (size == 0) {
240 pci_addrs[idx] = -1;
241 continue; 187 continue;
242 } 188 }
243 pci_addrs[idx] = start;
244 count++;
245 addr = pcidev_info->pdi_pio_mapped_addr[idx]; 189 addr = pcidev_info->pdi_pio_mapped_addr[idx];
246 addr = ((addr << 4) >> 4) | __IA64_UNCACHED_OFFSET; 190 addr = ((addr << 4) >> 4) | __IA64_UNCACHED_OFFSET;
247 dev->resource[idx].start = addr; 191 dev->resource[idx].start = addr;
@@ -276,11 +220,6 @@ sn_io_slot_fixup(struct pci_dev *dev)
276 IORESOURCE_ROM_BIOS_COPY; 220 IORESOURCE_ROM_BIOS_COPY;
277 } 221 }
278 } 222 }
279 /* Create a pci_window in the pci_controller struct for
280 * each device resource.
281 */
282 if (count > 0)
283 sn_pci_window_fixup(dev, count, pci_addrs);
284 223
285 sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info); 224 sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info);
286} 225}
@@ -297,8 +236,8 @@ sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
297 s64 status = 0; 236 s64 status = 0;
298 struct pci_controller *controller; 237 struct pci_controller *controller;
299 struct pcibus_bussoft *prom_bussoft_ptr; 238 struct pcibus_bussoft *prom_bussoft_ptr;
239 struct resource *res;
300 LIST_HEAD(resources); 240 LIST_HEAD(resources);
301 int i;
302 241
303 status = sal_get_pcibus_info((u64) segment, (u64) busnum, 242 status = sal_get_pcibus_info((u64) segment, (u64) busnum,
304 (u64) ia64_tpa(&prom_bussoft_ptr)); 243 (u64) ia64_tpa(&prom_bussoft_ptr));
@@ -310,32 +249,29 @@ sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
310 BUG_ON(!controller); 249 BUG_ON(!controller);
311 controller->segment = segment; 250 controller->segment = segment;
312 251
252 res = kcalloc(2, sizeof(struct resource), GFP_KERNEL);
253 BUG_ON(!res);
254
313 /* 255 /*
314 * Temporarily save the prom_bussoft_ptr for use by sn_bus_fixup(). 256 * Temporarily save the prom_bussoft_ptr for use by sn_bus_fixup().
315 * (platform_data will be overwritten later in sn_common_bus_fixup()) 257 * (platform_data will be overwritten later in sn_common_bus_fixup())
316 */ 258 */
317 controller->platform_data = prom_bussoft_ptr; 259 controller->platform_data = prom_bussoft_ptr;
318 260
319 sn_legacy_pci_window_fixup(controller, 261 sn_legacy_pci_window_fixup(res,
320 prom_bussoft_ptr->bs_legacy_io, 262 prom_bussoft_ptr->bs_legacy_io,
321 prom_bussoft_ptr->bs_legacy_mem); 263 prom_bussoft_ptr->bs_legacy_mem);
322 for (i = 0; i < controller->windows; i++) 264 pci_add_resource_offset(&resources, &res[0],
323 pci_add_resource_offset(&resources, 265 prom_bussoft_ptr->bs_legacy_io);
324 &controller->window[i].resource, 266 pci_add_resource_offset(&resources, &res[1],
325 controller->window[i].offset); 267 prom_bussoft_ptr->bs_legacy_mem);
268
326 bus = pci_scan_root_bus(NULL, busnum, &pci_root_ops, controller, 269 bus = pci_scan_root_bus(NULL, busnum, &pci_root_ops, controller,
327 &resources); 270 &resources);
328 if (bus == NULL) 271 if (bus == NULL) {
329 goto error_return; /* error, or bus already scanned */ 272 kfree(res);
330 273 kfree(controller);
331 bus->sysdata = controller; 274 }
332
333 return;
334
335error_return:
336
337 kfree(controller);
338 return;
339} 275}
340 276
341/* 277/*
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index f82e7b462b7b..53b01b8e2f19 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -192,7 +192,7 @@ void __init early_sn_setup(void)
192} 192}
193 193
194extern int platform_intr_list[]; 194extern int platform_intr_list[];
195static int __cpuinitdata shub_1_1_found; 195static int shub_1_1_found;
196 196
197/* 197/*
198 * sn_check_for_wars 198 * sn_check_for_wars
@@ -200,7 +200,7 @@ static int __cpuinitdata shub_1_1_found;
200 * Set flag for enabling shub specific wars 200 * Set flag for enabling shub specific wars
201 */ 201 */
202 202
203static inline int __cpuinit is_shub_1_1(int nasid) 203static inline int is_shub_1_1(int nasid)
204{ 204{
205 unsigned long id; 205 unsigned long id;
206 int rev; 206 int rev;
@@ -212,7 +212,7 @@ static inline int __cpuinit is_shub_1_1(int nasid)
212 return rev <= 2; 212 return rev <= 2;
213} 213}
214 214
215static void __cpuinit sn_check_for_wars(void) 215static void sn_check_for_wars(void)
216{ 216{
217 int cnode; 217 int cnode;
218 218
@@ -558,7 +558,7 @@ static void __init sn_init_pdas(char **cmdline_p)
558 * Also sets up a few fields in the nodepda. Also known as 558 * Also sets up a few fields in the nodepda. Also known as
559 * platform_cpu_init() by the ia64 machvec code. 559 * platform_cpu_init() by the ia64 machvec code.
560 */ 560 */
561void __cpuinit sn_cpu_init(void) 561void sn_cpu_init(void)
562{ 562{
563 int cpuid; 563 int cpuid;
564 int cpuphyid; 564 int cpuphyid;
diff --git a/arch/ia64/xen/hypervisor.c b/arch/ia64/xen/hypervisor.c
index 52172eee8591..fab62528a80b 100644
--- a/arch/ia64/xen/hypervisor.c
+++ b/arch/ia64/xen/hypervisor.c
@@ -74,7 +74,7 @@ void __init xen_setup_vcpu_info_placement(void)
74 xen_vcpu_setup(cpu); 74 xen_vcpu_setup(cpu);
75} 75}
76 76
77void __cpuinit 77void
78xen_cpu_init(void) 78xen_cpu_init(void)
79{ 79{
80 xen_smp_intr_init(); 80 xen_smp_intr_init();