diff options
Diffstat (limited to 'arch/ia64')
36 files changed, 443 insertions, 267 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 1ee596cd942..2d7f56a98e0 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -87,9 +87,6 @@ config GENERIC_TIME_VSYSCALL | |||
87 | bool | 87 | bool |
88 | default y | 88 | default y |
89 | 89 | ||
90 | config HAVE_LEGACY_PER_CPU_AREA | ||
91 | def_bool y | ||
92 | |||
93 | config HAVE_SETUP_PER_CPU_AREA | 90 | config HAVE_SETUP_PER_CPU_AREA |
94 | def_bool y | 91 | def_bool y |
95 | 92 | ||
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index f332e3fe423..e14c492a8a9 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c | |||
@@ -677,12 +677,19 @@ sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size) | |||
677 | spin_unlock_irqrestore(&ioc->saved_lock, flags); | 677 | spin_unlock_irqrestore(&ioc->saved_lock, flags); |
678 | 678 | ||
679 | pide = sba_search_bitmap(ioc, dev, pages_needed, 0); | 679 | pide = sba_search_bitmap(ioc, dev, pages_needed, 0); |
680 | if (unlikely(pide >= (ioc->res_size << 3))) | 680 | if (unlikely(pide >= (ioc->res_size << 3))) { |
681 | panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", | 681 | printk(KERN_WARNING "%s: I/O MMU @ %p is" |
682 | ioc->ioc_hpa); | 682 | "out of mapping resources, %u %u %lx\n", |
683 | __func__, ioc->ioc_hpa, ioc->res_size, | ||
684 | pages_needed, dma_get_seg_boundary(dev)); | ||
685 | return -1; | ||
686 | } | ||
683 | #else | 687 | #else |
684 | panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", | 688 | printk(KERN_WARNING "%s: I/O MMU @ %p is" |
685 | ioc->ioc_hpa); | 689 | "out of mapping resources, %u %u %lx\n", |
690 | __func__, ioc->ioc_hpa, ioc->res_size, | ||
691 | pages_needed, dma_get_seg_boundary(dev)); | ||
692 | return -1; | ||
686 | #endif | 693 | #endif |
687 | } | 694 | } |
688 | } | 695 | } |
@@ -965,6 +972,8 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page, | |||
965 | #endif | 972 | #endif |
966 | 973 | ||
967 | pide = sba_alloc_range(ioc, dev, size); | 974 | pide = sba_alloc_range(ioc, dev, size); |
975 | if (pide < 0) | ||
976 | return 0; | ||
968 | 977 | ||
969 | iovp = (dma_addr_t) pide << iovp_shift; | 978 | iovp = (dma_addr_t) pide << iovp_shift; |
970 | 979 | ||
@@ -1320,6 +1329,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, | |||
1320 | unsigned long dma_offset, dma_len; /* start/len of DMA stream */ | 1329 | unsigned long dma_offset, dma_len; /* start/len of DMA stream */ |
1321 | int n_mappings = 0; | 1330 | int n_mappings = 0; |
1322 | unsigned int max_seg_size = dma_get_max_seg_size(dev); | 1331 | unsigned int max_seg_size = dma_get_max_seg_size(dev); |
1332 | int idx; | ||
1323 | 1333 | ||
1324 | while (nents > 0) { | 1334 | while (nents > 0) { |
1325 | unsigned long vaddr = (unsigned long) sba_sg_address(startsg); | 1335 | unsigned long vaddr = (unsigned long) sba_sg_address(startsg); |
@@ -1418,16 +1428,22 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, | |||
1418 | vcontig_sg->dma_length = vcontig_len; | 1428 | vcontig_sg->dma_length = vcontig_len; |
1419 | dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask; | 1429 | dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask; |
1420 | ASSERT(dma_len <= DMA_CHUNK_SIZE); | 1430 | ASSERT(dma_len <= DMA_CHUNK_SIZE); |
1421 | dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG | 1431 | idx = sba_alloc_range(ioc, dev, dma_len); |
1422 | | (sba_alloc_range(ioc, dev, dma_len) << iovp_shift) | 1432 | if (idx < 0) { |
1423 | | dma_offset); | 1433 | dma_sg->dma_length = 0; |
1434 | return -1; | ||
1435 | } | ||
1436 | dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift) | ||
1437 | | dma_offset); | ||
1424 | n_mappings++; | 1438 | n_mappings++; |
1425 | } | 1439 | } |
1426 | 1440 | ||
1427 | return n_mappings; | 1441 | return n_mappings; |
1428 | } | 1442 | } |
1429 | 1443 | ||
1430 | 1444 | static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, | |
1445 | int nents, enum dma_data_direction dir, | ||
1446 | struct dma_attrs *attrs); | ||
1431 | /** | 1447 | /** |
1432 | * sba_map_sg - map Scatter/Gather list | 1448 | * sba_map_sg - map Scatter/Gather list |
1433 | * @dev: instance of PCI owned by the driver that's asking. | 1449 | * @dev: instance of PCI owned by the driver that's asking. |
@@ -1493,6 +1509,10 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, | |||
1493 | ** Access to the virtual address is what forces a two pass algorithm. | 1509 | ** Access to the virtual address is what forces a two pass algorithm. |
1494 | */ | 1510 | */ |
1495 | coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents); | 1511 | coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents); |
1512 | if (coalesced < 0) { | ||
1513 | sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs); | ||
1514 | return 0; | ||
1515 | } | ||
1496 | 1516 | ||
1497 | /* | 1517 | /* |
1498 | ** Program the I/O Pdir | 1518 | ** Program the I/O Pdir |
diff --git a/arch/ia64/ia32/elfcore32.h b/arch/ia64/ia32/elfcore32.h index 9a3abf58cea..65772574261 100644 --- a/arch/ia64/ia32/elfcore32.h +++ b/arch/ia64/ia32/elfcore32.h | |||
@@ -11,8 +11,6 @@ | |||
11 | #include <asm/intrinsics.h> | 11 | #include <asm/intrinsics.h> |
12 | #include <asm/uaccess.h> | 12 | #include <asm/uaccess.h> |
13 | 13 | ||
14 | #define USE_ELF_CORE_DUMP 1 | ||
15 | |||
16 | /* Override elfcore.h */ | 14 | /* Override elfcore.h */ |
17 | #define _LINUX_ELFCORE_H 1 | 15 | #define _LINUX_ELFCORE_H 1 |
18 | typedef unsigned int elf_greg_t; | 16 | typedef unsigned int elf_greg_t; |
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h index 57a2787bc9f..6ebc229a1c5 100644 --- a/arch/ia64/include/asm/bitops.h +++ b/arch/ia64/include/asm/bitops.h | |||
@@ -127,7 +127,7 @@ clear_bit_unlock (int nr, volatile void *addr) | |||
127 | * @addr: Address to start counting from | 127 | * @addr: Address to start counting from |
128 | * | 128 | * |
129 | * Similarly to clear_bit_unlock, the implementation uses a store | 129 | * Similarly to clear_bit_unlock, the implementation uses a store |
130 | * with release semantics. See also __raw_spin_unlock(). | 130 | * with release semantics. See also arch_spin_unlock(). |
131 | */ | 131 | */ |
132 | static __inline__ void | 132 | static __inline__ void |
133 | __clear_bit_unlock(int nr, void *addr) | 133 | __clear_bit_unlock(int nr, void *addr) |
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 8d3c79cd81e..7d09a09cdaa 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h | |||
@@ -73,7 +73,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | |||
73 | if (!dev->dma_mask) | 73 | if (!dev->dma_mask) |
74 | return 0; | 74 | return 0; |
75 | 75 | ||
76 | return addr + size <= *dev->dma_mask; | 76 | return addr + size - 1 <= *dev->dma_mask; |
77 | } | 77 | } |
78 | 78 | ||
79 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) | 79 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) |
diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h index 86eddee029c..e14108b19c0 100644 --- a/arch/ia64/include/asm/elf.h +++ b/arch/ia64/include/asm/elf.h | |||
@@ -25,7 +25,6 @@ | |||
25 | #define ELF_DATA ELFDATA2LSB | 25 | #define ELF_DATA ELFDATA2LSB |
26 | #define ELF_ARCH EM_IA_64 | 26 | #define ELF_ARCH EM_IA_64 |
27 | 27 | ||
28 | #define USE_ELF_CORE_DUMP | ||
29 | #define CORE_DUMP_USE_REGSET | 28 | #define CORE_DUMP_USE_REGSET |
30 | 29 | ||
31 | /* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are | 30 | /* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are |
diff --git a/arch/ia64/include/asm/hw_irq.h b/arch/ia64/include/asm/hw_irq.h index 91619b31dbf..bf2e37493e0 100644 --- a/arch/ia64/include/asm/hw_irq.h +++ b/arch/ia64/include/asm/hw_irq.h | |||
@@ -59,7 +59,13 @@ typedef u16 ia64_vector; | |||
59 | extern int ia64_first_device_vector; | 59 | extern int ia64_first_device_vector; |
60 | extern int ia64_last_device_vector; | 60 | extern int ia64_last_device_vector; |
61 | 61 | ||
62 | #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_DIG)) | ||
63 | /* Reserve the lower priority vector than device vectors for "move IRQ" IPI */ | ||
64 | #define IA64_IRQ_MOVE_VECTOR 0x30 /* "move IRQ" IPI */ | ||
65 | #define IA64_DEF_FIRST_DEVICE_VECTOR 0x31 | ||
66 | #else | ||
62 | #define IA64_DEF_FIRST_DEVICE_VECTOR 0x30 | 67 | #define IA64_DEF_FIRST_DEVICE_VECTOR 0x30 |
68 | #endif | ||
63 | #define IA64_DEF_LAST_DEVICE_VECTOR 0xe7 | 69 | #define IA64_DEF_LAST_DEVICE_VECTOR 0xe7 |
64 | #define IA64_FIRST_DEVICE_VECTOR ia64_first_device_vector | 70 | #define IA64_FIRST_DEVICE_VECTOR ia64_first_device_vector |
65 | #define IA64_LAST_DEVICE_VECTOR ia64_last_device_vector | 71 | #define IA64_LAST_DEVICE_VECTOR ia64_last_device_vector |
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index 0d9d16e2d94..cc8335eb311 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h | |||
@@ -424,6 +424,8 @@ __writeq (unsigned long val, volatile void __iomem *addr) | |||
424 | extern void __iomem * ioremap(unsigned long offset, unsigned long size); | 424 | extern void __iomem * ioremap(unsigned long offset, unsigned long size); |
425 | extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); | 425 | extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); |
426 | extern void iounmap (volatile void __iomem *addr); | 426 | extern void iounmap (volatile void __iomem *addr); |
427 | extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size); | ||
428 | extern void early_iounmap (volatile void __iomem *addr, unsigned long size); | ||
427 | 429 | ||
428 | /* | 430 | /* |
429 | * String version of IO memory access ops: | 431 | * String version of IO memory access ops: |
diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h index c171cdf0a78..43f96ab18fa 100644 --- a/arch/ia64/include/asm/mca.h +++ b/arch/ia64/include/asm/mca.h | |||
@@ -106,6 +106,11 @@ struct ia64_sal_os_state { | |||
106 | unsigned long os_status; /* OS status to SAL, enum below */ | 106 | unsigned long os_status; /* OS status to SAL, enum below */ |
107 | unsigned long context; /* 0 if return to same context | 107 | unsigned long context; /* 0 if return to same context |
108 | 1 if return to new context */ | 108 | 1 if return to new context */ |
109 | |||
110 | /* I-resources */ | ||
111 | unsigned long iip; | ||
112 | unsigned long ipsr; | ||
113 | unsigned long ifs; | ||
109 | }; | 114 | }; |
110 | 115 | ||
111 | enum { | 116 | enum { |
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h index 688a812c017..61c7b1750b1 100644 --- a/arch/ia64/include/asm/meminit.h +++ b/arch/ia64/include/asm/meminit.h | |||
@@ -61,7 +61,7 @@ extern int register_active_ranges(u64 start, u64 len, int nid); | |||
61 | 61 | ||
62 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 62 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
63 | # define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */ | 63 | # define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */ |
64 | extern unsigned long vmalloc_end; | 64 | extern unsigned long VMALLOC_END; |
65 | extern struct page *vmem_map; | 65 | extern struct page *vmem_map; |
66 | extern int find_largest_hole(u64 start, u64 end, void *arg); | 66 | extern int find_largest_hole(u64 start, u64 end, void *arg); |
67 | extern int create_mem_map_page_table(u64 start, u64 end, void *arg); | 67 | extern int create_mem_map_page_table(u64 start, u64 end, void *arg); |
diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h index 3499ff57bf4..6a8a27cfae3 100644 --- a/arch/ia64/include/asm/numa.h +++ b/arch/ia64/include/asm/numa.h | |||
@@ -22,8 +22,6 @@ | |||
22 | 22 | ||
23 | #include <asm/mmzone.h> | 23 | #include <asm/mmzone.h> |
24 | 24 | ||
25 | #define NUMA_NO_NODE -1 | ||
26 | |||
27 | extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; | 25 | extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; |
28 | extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; | 26 | extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; |
29 | extern pg_data_t *pgdat_list[MAX_NUMNODES]; | 27 | extern pg_data_t *pgdat_list[MAX_NUMNODES]; |
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 8840a690d1e..69bf13857a9 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h | |||
@@ -228,8 +228,7 @@ ia64_phys_addr_valid (unsigned long addr) | |||
228 | #define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL) | 228 | #define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL) |
229 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 229 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
230 | # define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9))) | 230 | # define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9))) |
231 | # define VMALLOC_END vmalloc_end | 231 | extern unsigned long VMALLOC_END; |
232 | extern unsigned long vmalloc_end; | ||
233 | #else | 232 | #else |
234 | #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP) | 233 | #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP) |
235 | /* SPARSEMEM_VMEMMAP uses half of vmalloc... */ | 234 | /* SPARSEMEM_VMEMMAP uses half of vmalloc... */ |
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index 3eaeedf1aef..7fa90f73f6b 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h | |||
@@ -229,7 +229,7 @@ struct cpuinfo_ia64 { | |||
229 | #endif | 229 | #endif |
230 | }; | 230 | }; |
231 | 231 | ||
232 | DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info); | 232 | DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info); |
233 | 233 | ||
234 | /* | 234 | /* |
235 | * The "local" data variable. It refers to the per-CPU data of the currently executing | 235 | * The "local" data variable. It refers to the per-CPU data of the currently executing |
@@ -237,8 +237,8 @@ DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info); | |||
237 | * Do not use the address of local_cpu_data, since it will be different from | 237 | * Do not use the address of local_cpu_data, since it will be different from |
238 | * cpu_data(smp_processor_id())! | 238 | * cpu_data(smp_processor_id())! |
239 | */ | 239 | */ |
240 | #define local_cpu_data (&__ia64_per_cpu_var(cpu_info)) | 240 | #define local_cpu_data (&__ia64_per_cpu_var(ia64_cpu_info)) |
241 | #define cpu_data(cpu) (&per_cpu(cpu_info, cpu)) | 241 | #define cpu_data(cpu) (&per_cpu(ia64_cpu_info, cpu)) |
242 | 242 | ||
243 | extern void print_cpu_info (struct cpuinfo_ia64 *); | 243 | extern void print_cpu_info (struct cpuinfo_ia64 *); |
244 | 244 | ||
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h index fbee74b1578..e8762688e8e 100644 --- a/arch/ia64/include/asm/rwsem.h +++ b/arch/ia64/include/asm/rwsem.h | |||
@@ -47,7 +47,7 @@ struct rw_semaphore { | |||
47 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 47 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
48 | 48 | ||
49 | #define __RWSEM_INITIALIZER(name) \ | 49 | #define __RWSEM_INITIALIZER(name) \ |
50 | { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ | 50 | { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ |
51 | LIST_HEAD_INIT((name).wait_list) } | 51 | LIST_HEAD_INIT((name).wait_list) } |
52 | 52 | ||
53 | #define DECLARE_RWSEM(name) \ | 53 | #define DECLARE_RWSEM(name) \ |
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 239ecdc9516..1a91c9121d1 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <asm/intrinsics.h> | 17 | #include <asm/intrinsics.h> |
18 | #include <asm/system.h> | 18 | #include <asm/system.h> |
19 | 19 | ||
20 | #define __raw_spin_lock_init(x) ((x)->lock = 0) | 20 | #define arch_spin_lock_init(x) ((x)->lock = 0) |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * Ticket locks are conceptually two parts, one indicating the current head of | 23 | * Ticket locks are conceptually two parts, one indicating the current head of |
@@ -38,7 +38,7 @@ | |||
38 | #define TICKET_BITS 15 | 38 | #define TICKET_BITS 15 |
39 | #define TICKET_MASK ((1 << TICKET_BITS) - 1) | 39 | #define TICKET_MASK ((1 << TICKET_BITS) - 1) |
40 | 40 | ||
41 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | 41 | static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) |
42 | { | 42 | { |
43 | int *p = (int *)&lock->lock, ticket, serve; | 43 | int *p = (int *)&lock->lock, ticket, serve; |
44 | 44 | ||
@@ -58,7 +58,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | |||
58 | } | 58 | } |
59 | } | 59 | } |
60 | 60 | ||
61 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | 61 | static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) |
62 | { | 62 | { |
63 | int tmp = ACCESS_ONCE(lock->lock); | 63 | int tmp = ACCESS_ONCE(lock->lock); |
64 | 64 | ||
@@ -67,7 +67,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | |||
67 | return 0; | 67 | return 0; |
68 | } | 68 | } |
69 | 69 | ||
70 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | 70 | static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) |
71 | { | 71 | { |
72 | unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; | 72 | unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; |
73 | 73 | ||
@@ -75,7 +75,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | |||
75 | ACCESS_ONCE(*p) = (tmp + 2) & ~1; | 75 | ACCESS_ONCE(*p) = (tmp + 2) & ~1; |
76 | } | 76 | } |
77 | 77 | ||
78 | static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) | 78 | static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock) |
79 | { | 79 | { |
80 | int *p = (int *)&lock->lock, ticket; | 80 | int *p = (int *)&lock->lock, ticket; |
81 | 81 | ||
@@ -89,64 +89,64 @@ static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) | |||
89 | } | 89 | } |
90 | } | 90 | } |
91 | 91 | ||
92 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) | 92 | static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) |
93 | { | 93 | { |
94 | long tmp = ACCESS_ONCE(lock->lock); | 94 | long tmp = ACCESS_ONCE(lock->lock); |
95 | 95 | ||
96 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); | 96 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); |
97 | } | 97 | } |
98 | 98 | ||
99 | static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) | 99 | static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) |
100 | { | 100 | { |
101 | long tmp = ACCESS_ONCE(lock->lock); | 101 | long tmp = ACCESS_ONCE(lock->lock); |
102 | 102 | ||
103 | return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; | 103 | return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; |
104 | } | 104 | } |
105 | 105 | ||
106 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | 106 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
107 | { | 107 | { |
108 | return __ticket_spin_is_locked(lock); | 108 | return __ticket_spin_is_locked(lock); |
109 | } | 109 | } |
110 | 110 | ||
111 | static inline int __raw_spin_is_contended(raw_spinlock_t *lock) | 111 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
112 | { | 112 | { |
113 | return __ticket_spin_is_contended(lock); | 113 | return __ticket_spin_is_contended(lock); |
114 | } | 114 | } |
115 | #define __raw_spin_is_contended __raw_spin_is_contended | 115 | #define arch_spin_is_contended arch_spin_is_contended |
116 | 116 | ||
117 | static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) | 117 | static __always_inline void arch_spin_lock(arch_spinlock_t *lock) |
118 | { | 118 | { |
119 | __ticket_spin_lock(lock); | 119 | __ticket_spin_lock(lock); |
120 | } | 120 | } |
121 | 121 | ||
122 | static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) | 122 | static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) |
123 | { | 123 | { |
124 | return __ticket_spin_trylock(lock); | 124 | return __ticket_spin_trylock(lock); |
125 | } | 125 | } |
126 | 126 | ||
127 | static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) | 127 | static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) |
128 | { | 128 | { |
129 | __ticket_spin_unlock(lock); | 129 | __ticket_spin_unlock(lock); |
130 | } | 130 | } |
131 | 131 | ||
132 | static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, | 132 | static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, |
133 | unsigned long flags) | 133 | unsigned long flags) |
134 | { | 134 | { |
135 | __raw_spin_lock(lock); | 135 | arch_spin_lock(lock); |
136 | } | 136 | } |
137 | 137 | ||
138 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | 138 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
139 | { | 139 | { |
140 | __ticket_spin_unlock_wait(lock); | 140 | __ticket_spin_unlock_wait(lock); |
141 | } | 141 | } |
142 | 142 | ||
143 | #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) | 143 | #define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0) |
144 | #define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) | 144 | #define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0) |
145 | 145 | ||
146 | #ifdef ASM_SUPPORTED | 146 | #ifdef ASM_SUPPORTED |
147 | 147 | ||
148 | static __always_inline void | 148 | static __always_inline void |
149 | __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) | 149 | arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) |
150 | { | 150 | { |
151 | __asm__ __volatile__ ( | 151 | __asm__ __volatile__ ( |
152 | "tbit.nz p6, p0 = %1,%2\n" | 152 | "tbit.nz p6, p0 = %1,%2\n" |
@@ -169,15 +169,15 @@ __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) | |||
169 | : "p6", "p7", "r2", "memory"); | 169 | : "p6", "p7", "r2", "memory"); |
170 | } | 170 | } |
171 | 171 | ||
172 | #define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0) | 172 | #define arch_read_lock(lock) arch_read_lock_flags(lock, 0) |
173 | 173 | ||
174 | #else /* !ASM_SUPPORTED */ | 174 | #else /* !ASM_SUPPORTED */ |
175 | 175 | ||
176 | #define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) | 176 | #define arch_read_lock_flags(rw, flags) arch_read_lock(rw) |
177 | 177 | ||
178 | #define __raw_read_lock(rw) \ | 178 | #define arch_read_lock(rw) \ |
179 | do { \ | 179 | do { \ |
180 | raw_rwlock_t *__read_lock_ptr = (rw); \ | 180 | arch_rwlock_t *__read_lock_ptr = (rw); \ |
181 | \ | 181 | \ |
182 | while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ | 182 | while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ |
183 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ | 183 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ |
@@ -188,16 +188,16 @@ do { \ | |||
188 | 188 | ||
189 | #endif /* !ASM_SUPPORTED */ | 189 | #endif /* !ASM_SUPPORTED */ |
190 | 190 | ||
191 | #define __raw_read_unlock(rw) \ | 191 | #define arch_read_unlock(rw) \ |
192 | do { \ | 192 | do { \ |
193 | raw_rwlock_t *__read_lock_ptr = (rw); \ | 193 | arch_rwlock_t *__read_lock_ptr = (rw); \ |
194 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ | 194 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ |
195 | } while (0) | 195 | } while (0) |
196 | 196 | ||
197 | #ifdef ASM_SUPPORTED | 197 | #ifdef ASM_SUPPORTED |
198 | 198 | ||
199 | static __always_inline void | 199 | static __always_inline void |
200 | __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) | 200 | arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) |
201 | { | 201 | { |
202 | __asm__ __volatile__ ( | 202 | __asm__ __volatile__ ( |
203 | "tbit.nz p6, p0 = %1, %2\n" | 203 | "tbit.nz p6, p0 = %1, %2\n" |
@@ -221,9 +221,9 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) | |||
221 | : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); | 221 | : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); |
222 | } | 222 | } |
223 | 223 | ||
224 | #define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0) | 224 | #define arch_write_lock(rw) arch_write_lock_flags(rw, 0) |
225 | 225 | ||
226 | #define __raw_write_trylock(rw) \ | 226 | #define arch_write_trylock(rw) \ |
227 | ({ \ | 227 | ({ \ |
228 | register long result; \ | 228 | register long result; \ |
229 | \ | 229 | \ |
@@ -235,7 +235,7 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) | |||
235 | (result == 0); \ | 235 | (result == 0); \ |
236 | }) | 236 | }) |
237 | 237 | ||
238 | static inline void __raw_write_unlock(raw_rwlock_t *x) | 238 | static inline void arch_write_unlock(arch_rwlock_t *x) |
239 | { | 239 | { |
240 | u8 *y = (u8 *)x; | 240 | u8 *y = (u8 *)x; |
241 | barrier(); | 241 | barrier(); |
@@ -244,9 +244,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) | |||
244 | 244 | ||
245 | #else /* !ASM_SUPPORTED */ | 245 | #else /* !ASM_SUPPORTED */ |
246 | 246 | ||
247 | #define __raw_write_lock_flags(l, flags) __raw_write_lock(l) | 247 | #define arch_write_lock_flags(l, flags) arch_write_lock(l) |
248 | 248 | ||
249 | #define __raw_write_lock(l) \ | 249 | #define arch_write_lock(l) \ |
250 | ({ \ | 250 | ({ \ |
251 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ | 251 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ |
252 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ | 252 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ |
@@ -257,7 +257,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) | |||
257 | } while (ia64_val); \ | 257 | } while (ia64_val); \ |
258 | }) | 258 | }) |
259 | 259 | ||
260 | #define __raw_write_trylock(rw) \ | 260 | #define arch_write_trylock(rw) \ |
261 | ({ \ | 261 | ({ \ |
262 | __u64 ia64_val; \ | 262 | __u64 ia64_val; \ |
263 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ | 263 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ |
@@ -265,7 +265,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) | |||
265 | (ia64_val == 0); \ | 265 | (ia64_val == 0); \ |
266 | }) | 266 | }) |
267 | 267 | ||
268 | static inline void __raw_write_unlock(raw_rwlock_t *x) | 268 | static inline void arch_write_unlock(arch_rwlock_t *x) |
269 | { | 269 | { |
270 | barrier(); | 270 | barrier(); |
271 | x->write_lock = 0; | 271 | x->write_lock = 0; |
@@ -273,10 +273,10 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) | |||
273 | 273 | ||
274 | #endif /* !ASM_SUPPORTED */ | 274 | #endif /* !ASM_SUPPORTED */ |
275 | 275 | ||
276 | static inline int __raw_read_trylock(raw_rwlock_t *x) | 276 | static inline int arch_read_trylock(arch_rwlock_t *x) |
277 | { | 277 | { |
278 | union { | 278 | union { |
279 | raw_rwlock_t lock; | 279 | arch_rwlock_t lock; |
280 | __u32 word; | 280 | __u32 word; |
281 | } old, new; | 281 | } old, new; |
282 | old.lock = new.lock = *x; | 282 | old.lock = new.lock = *x; |
@@ -285,8 +285,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *x) | |||
285 | return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; | 285 | return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; |
286 | } | 286 | } |
287 | 287 | ||
288 | #define _raw_spin_relax(lock) cpu_relax() | 288 | #define arch_spin_relax(lock) cpu_relax() |
289 | #define _raw_read_relax(lock) cpu_relax() | 289 | #define arch_read_relax(lock) cpu_relax() |
290 | #define _raw_write_relax(lock) cpu_relax() | 290 | #define arch_write_relax(lock) cpu_relax() |
291 | 291 | ||
292 | #endif /* _ASM_IA64_SPINLOCK_H */ | 292 | #endif /* _ASM_IA64_SPINLOCK_H */ |
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h index 474e46f1ab4..e2b42a52a6d 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h | |||
@@ -7,15 +7,15 @@ | |||
7 | 7 | ||
8 | typedef struct { | 8 | typedef struct { |
9 | volatile unsigned int lock; | 9 | volatile unsigned int lock; |
10 | } raw_spinlock_t; | 10 | } arch_spinlock_t; |
11 | 11 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | 12 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } |
13 | 13 | ||
14 | typedef struct { | 14 | typedef struct { |
15 | volatile unsigned int read_counter : 31; | 15 | volatile unsigned int read_counter : 31; |
16 | volatile unsigned int write_lock : 1; | 16 | volatile unsigned int write_lock : 1; |
17 | } raw_rwlock_t; | 17 | } arch_rwlock_t; |
18 | 18 | ||
19 | #define __RAW_RW_LOCK_UNLOCKED { 0, 0 } | 19 | #define __ARCH_RW_LOCK_UNLOCKED { 0, 0 } |
20 | 20 | ||
21 | #endif | 21 | #endif |
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index baec6f00f7f..40574ae1140 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -702,11 +702,23 @@ int __init early_acpi_boot_init(void) | |||
702 | printk(KERN_ERR PREFIX | 702 | printk(KERN_ERR PREFIX |
703 | "Error parsing MADT - no LAPIC entries\n"); | 703 | "Error parsing MADT - no LAPIC entries\n"); |
704 | 704 | ||
705 | #ifdef CONFIG_SMP | ||
706 | if (available_cpus == 0) { | ||
707 | printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); | ||
708 | printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id()); | ||
709 | smp_boot_data.cpu_phys_id[available_cpus] = | ||
710 | hard_smp_processor_id(); | ||
711 | available_cpus = 1; /* We've got at least one of these, no? */ | ||
712 | } | ||
713 | smp_boot_data.cpu_count = available_cpus; | ||
714 | #endif | ||
715 | /* Make boot-up look pretty */ | ||
716 | printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, | ||
717 | total_cpus); | ||
718 | |||
705 | return 0; | 719 | return 0; |
706 | } | 720 | } |
707 | 721 | ||
708 | |||
709 | |||
710 | int __init acpi_boot_init(void) | 722 | int __init acpi_boot_init(void) |
711 | { | 723 | { |
712 | 724 | ||
@@ -769,18 +781,8 @@ int __init acpi_boot_init(void) | |||
769 | if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt)) | 781 | if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt)) |
770 | printk(KERN_ERR PREFIX "Can't find FADT\n"); | 782 | printk(KERN_ERR PREFIX "Can't find FADT\n"); |
771 | 783 | ||
784 | #ifdef CONFIG_ACPI_NUMA | ||
772 | #ifdef CONFIG_SMP | 785 | #ifdef CONFIG_SMP |
773 | if (available_cpus == 0) { | ||
774 | printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); | ||
775 | printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id()); | ||
776 | smp_boot_data.cpu_phys_id[available_cpus] = | ||
777 | hard_smp_processor_id(); | ||
778 | available_cpus = 1; /* We've got at least one of these, no? */ | ||
779 | } | ||
780 | smp_boot_data.cpu_count = available_cpus; | ||
781 | |||
782 | smp_build_cpu_map(); | ||
783 | # ifdef CONFIG_ACPI_NUMA | ||
784 | if (srat_num_cpus == 0) { | 786 | if (srat_num_cpus == 0) { |
785 | int cpu, i = 1; | 787 | int cpu, i = 1; |
786 | for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) | 788 | for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) |
@@ -789,14 +791,9 @@ int __init acpi_boot_init(void) | |||
789 | node_cpuid[i++].phys_id = | 791 | node_cpuid[i++].phys_id = |
790 | smp_boot_data.cpu_phys_id[cpu]; | 792 | smp_boot_data.cpu_phys_id[cpu]; |
791 | } | 793 | } |
792 | # endif | ||
793 | #endif | 794 | #endif |
794 | #ifdef CONFIG_ACPI_NUMA | ||
795 | build_cpu_to_node_map(); | 795 | build_cpu_to_node_map(); |
796 | #endif | 796 | #endif |
797 | /* Make boot-up look pretty */ | ||
798 | printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, | ||
799 | total_cpus); | ||
800 | return 0; | 797 | return 0; |
801 | } | 798 | } |
802 | 799 | ||
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 696eff28a0c..17a9fba3893 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S | |||
@@ -1051,7 +1051,7 @@ END(ia64_delay_loop) | |||
1051 | * intermediate precision so that we can produce a full 64-bit result. | 1051 | * intermediate precision so that we can produce a full 64-bit result. |
1052 | */ | 1052 | */ |
1053 | GLOBAL_ENTRY(ia64_native_sched_clock) | 1053 | GLOBAL_ENTRY(ia64_native_sched_clock) |
1054 | addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 | 1054 | addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 |
1055 | mov.m r9=ar.itc // fetch cycle-counter (35 cyc) | 1055 | mov.m r9=ar.itc // fetch cycle-counter (35 cyc) |
1056 | ;; | 1056 | ;; |
1057 | ldf8 f8=[r8] | 1057 | ldf8 f8=[r8] |
@@ -1077,7 +1077,7 @@ sched_clock = ia64_native_sched_clock | |||
1077 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 1077 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
1078 | GLOBAL_ENTRY(cycle_to_cputime) | 1078 | GLOBAL_ENTRY(cycle_to_cputime) |
1079 | alloc r16=ar.pfs,1,0,0,0 | 1079 | alloc r16=ar.pfs,1,0,0,0 |
1080 | addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 | 1080 | addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 |
1081 | ;; | 1081 | ;; |
1082 | ldf8 f8=[r8] | 1082 | ldf8 f8=[r8] |
1083 | ;; | 1083 | ;; |
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c index 14d39e30062..461b99902bf 100644 --- a/arch/ia64/kernel/ia64_ksyms.c +++ b/arch/ia64/kernel/ia64_ksyms.c | |||
@@ -30,7 +30,7 @@ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic | |||
30 | #endif | 30 | #endif |
31 | 31 | ||
32 | #include <asm/processor.h> | 32 | #include <asm/processor.h> |
33 | EXPORT_SYMBOL(per_cpu__cpu_info); | 33 | EXPORT_SYMBOL(per_cpu__ia64_cpu_info); |
34 | #ifdef CONFIG_SMP | 34 | #ifdef CONFIG_SMP |
35 | EXPORT_SYMBOL(per_cpu__local_per_cpu_offset); | 35 | EXPORT_SYMBOL(per_cpu__local_per_cpu_offset); |
36 | #endif | 36 | #endif |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index dab4d393908..95ac77aeae9 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -793,12 +793,12 @@ iosapic_register_intr (unsigned int gsi, | |||
793 | goto unlock_iosapic_lock; | 793 | goto unlock_iosapic_lock; |
794 | } | 794 | } |
795 | 795 | ||
796 | spin_lock(&irq_desc[irq].lock); | 796 | raw_spin_lock(&irq_desc[irq].lock); |
797 | dest = get_target_cpu(gsi, irq); | 797 | dest = get_target_cpu(gsi, irq); |
798 | dmode = choose_dmode(); | 798 | dmode = choose_dmode(); |
799 | err = register_intr(gsi, irq, dmode, polarity, trigger); | 799 | err = register_intr(gsi, irq, dmode, polarity, trigger); |
800 | if (err < 0) { | 800 | if (err < 0) { |
801 | spin_unlock(&irq_desc[irq].lock); | 801 | raw_spin_unlock(&irq_desc[irq].lock); |
802 | irq = err; | 802 | irq = err; |
803 | goto unlock_iosapic_lock; | 803 | goto unlock_iosapic_lock; |
804 | } | 804 | } |
@@ -817,7 +817,7 @@ iosapic_register_intr (unsigned int gsi, | |||
817 | (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), | 817 | (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), |
818 | cpu_logical_id(dest), dest, irq_to_vector(irq)); | 818 | cpu_logical_id(dest), dest, irq_to_vector(irq)); |
819 | 819 | ||
820 | spin_unlock(&irq_desc[irq].lock); | 820 | raw_spin_unlock(&irq_desc[irq].lock); |
821 | unlock_iosapic_lock: | 821 | unlock_iosapic_lock: |
822 | spin_unlock_irqrestore(&iosapic_lock, flags); | 822 | spin_unlock_irqrestore(&iosapic_lock, flags); |
823 | return irq; | 823 | return irq; |
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 7d8951229e7..94ee9d067cb 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c | |||
@@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
71 | } | 71 | } |
72 | 72 | ||
73 | if (i < NR_IRQS) { | 73 | if (i < NR_IRQS) { |
74 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 74 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
75 | action = irq_desc[i].action; | 75 | action = irq_desc[i].action; |
76 | if (!action) | 76 | if (!action) |
77 | goto skip; | 77 | goto skip; |
@@ -91,7 +91,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
91 | 91 | ||
92 | seq_putc(p, '\n'); | 92 | seq_putc(p, '\n'); |
93 | skip: | 93 | skip: |
94 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 94 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
95 | } else if (i == NR_IRQS) | 95 | } else if (i == NR_IRQS) |
96 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | 96 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); |
97 | return 0; | 97 | return 0; |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index dd9d7b54f1a..d4093a173a3 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
@@ -260,7 +260,6 @@ void __setup_vector_irq(int cpu) | |||
260 | } | 260 | } |
261 | 261 | ||
262 | #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) | 262 | #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) |
263 | #define IA64_IRQ_MOVE_VECTOR IA64_DEF_FIRST_DEVICE_VECTOR | ||
264 | 263 | ||
265 | static enum vector_domain_type { | 264 | static enum vector_domain_type { |
266 | VECTOR_DOMAIN_NONE, | 265 | VECTOR_DOMAIN_NONE, |
@@ -345,7 +344,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) | |||
345 | 344 | ||
346 | desc = irq_desc + irq; | 345 | desc = irq_desc + irq; |
347 | cfg = irq_cfg + irq; | 346 | cfg = irq_cfg + irq; |
348 | spin_lock(&desc->lock); | 347 | raw_spin_lock(&desc->lock); |
349 | if (!cfg->move_cleanup_count) | 348 | if (!cfg->move_cleanup_count) |
350 | goto unlock; | 349 | goto unlock; |
351 | 350 | ||
@@ -358,7 +357,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) | |||
358 | spin_unlock_irqrestore(&vector_lock, flags); | 357 | spin_unlock_irqrestore(&vector_lock, flags); |
359 | cfg->move_cleanup_count--; | 358 | cfg->move_cleanup_count--; |
360 | unlock: | 359 | unlock: |
361 | spin_unlock(&desc->lock); | 360 | raw_spin_unlock(&desc->lock); |
362 | } | 361 | } |
363 | return IRQ_HANDLED; | 362 | return IRQ_HANDLED; |
364 | } | 363 | } |
@@ -659,11 +658,8 @@ init_IRQ (void) | |||
659 | register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); | 658 | register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); |
660 | #ifdef CONFIG_SMP | 659 | #ifdef CONFIG_SMP |
661 | #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG) | 660 | #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG) |
662 | if (vector_domain_type != VECTOR_DOMAIN_NONE) { | 661 | if (vector_domain_type != VECTOR_DOMAIN_NONE) |
663 | BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR); | ||
664 | IA64_FIRST_DEVICE_VECTOR++; | ||
665 | register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction); | 662 | register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction); |
666 | } | ||
667 | #endif | 663 | #endif |
668 | #endif | 664 | #endif |
669 | #ifdef CONFIG_PERFMON | 665 | #ifdef CONFIG_PERFMON |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 496ac7a9948..32f2639e9b0 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -888,9 +888,10 @@ ia64_mca_modify_comm(const struct task_struct *previous_current) | |||
888 | } | 888 | } |
889 | 889 | ||
890 | static void | 890 | static void |
891 | finish_pt_regs(struct pt_regs *regs, const pal_min_state_area_t *ms, | 891 | finish_pt_regs(struct pt_regs *regs, struct ia64_sal_os_state *sos, |
892 | unsigned long *nat) | 892 | unsigned long *nat) |
893 | { | 893 | { |
894 | const pal_min_state_area_t *ms = sos->pal_min_state; | ||
894 | const u64 *bank; | 895 | const u64 *bank; |
895 | 896 | ||
896 | /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use | 897 | /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use |
@@ -904,6 +905,10 @@ finish_pt_regs(struct pt_regs *regs, const pal_min_state_area_t *ms, | |||
904 | regs->cr_iip = ms->pmsa_xip; | 905 | regs->cr_iip = ms->pmsa_xip; |
905 | regs->cr_ipsr = ms->pmsa_xpsr; | 906 | regs->cr_ipsr = ms->pmsa_xpsr; |
906 | regs->cr_ifs = ms->pmsa_xfs; | 907 | regs->cr_ifs = ms->pmsa_xfs; |
908 | |||
909 | sos->iip = ms->pmsa_iip; | ||
910 | sos->ipsr = ms->pmsa_ipsr; | ||
911 | sos->ifs = ms->pmsa_ifs; | ||
907 | } | 912 | } |
908 | regs->pr = ms->pmsa_pr; | 913 | regs->pr = ms->pmsa_pr; |
909 | regs->b0 = ms->pmsa_br0; | 914 | regs->b0 = ms->pmsa_br0; |
@@ -1079,7 +1084,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, | |||
1079 | memcpy(old_regs, regs, sizeof(*regs)); | 1084 | memcpy(old_regs, regs, sizeof(*regs)); |
1080 | old_regs->loadrs = loadrs; | 1085 | old_regs->loadrs = loadrs; |
1081 | old_unat = old_regs->ar_unat; | 1086 | old_unat = old_regs->ar_unat; |
1082 | finish_pt_regs(old_regs, ms, &old_unat); | 1087 | finish_pt_regs(old_regs, sos, &old_unat); |
1083 | 1088 | ||
1084 | /* Next stack a struct switch_stack. mca_asm.S built a partial | 1089 | /* Next stack a struct switch_stack. mca_asm.S built a partial |
1085 | * switch_stack, copy it and fill in the blanks using pt_regs and | 1090 | * switch_stack, copy it and fill in the blanks using pt_regs and |
@@ -1150,7 +1155,7 @@ no_mod: | |||
1150 | mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", | 1155 | mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", |
1151 | smp_processor_id(), type, msg); | 1156 | smp_processor_id(), type, msg); |
1152 | old_unat = regs->ar_unat; | 1157 | old_unat = regs->ar_unat; |
1153 | finish_pt_regs(regs, ms, &old_unat); | 1158 | finish_pt_regs(regs, sos, &old_unat); |
1154 | return previous_current; | 1159 | return previous_current; |
1155 | } | 1160 | } |
1156 | 1161 | ||
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index 7461d2573d4..d5bdf9de36b 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S | |||
@@ -59,7 +59,7 @@ | |||
59 | ia64_do_tlb_purge: | 59 | ia64_do_tlb_purge: |
60 | #define O(member) IA64_CPUINFO_##member##_OFFSET | 60 | #define O(member) IA64_CPUINFO_##member##_OFFSET |
61 | 61 | ||
62 | GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 | 62 | GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2 |
63 | ;; | 63 | ;; |
64 | addl r17=O(PTCE_STRIDE),r2 | 64 | addl r17=O(PTCE_STRIDE),r2 |
65 | addl r2=O(PTCE_BASE),r2 | 65 | addl r2=O(PTCE_BASE),r2 |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 599b233bef7..5246285a95f 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -2200,7 +2200,7 @@ pfm_alloc_file(pfm_context_t *ctx) | |||
2200 | { | 2200 | { |
2201 | struct file *file; | 2201 | struct file *file; |
2202 | struct inode *inode; | 2202 | struct inode *inode; |
2203 | struct dentry *dentry; | 2203 | struct path path; |
2204 | char name[32]; | 2204 | char name[32]; |
2205 | struct qstr this; | 2205 | struct qstr this; |
2206 | 2206 | ||
@@ -2225,18 +2225,19 @@ pfm_alloc_file(pfm_context_t *ctx) | |||
2225 | /* | 2225 | /* |
2226 | * allocate a new dcache entry | 2226 | * allocate a new dcache entry |
2227 | */ | 2227 | */ |
2228 | dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this); | 2228 | path.dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this); |
2229 | if (!dentry) { | 2229 | if (!path.dentry) { |
2230 | iput(inode); | 2230 | iput(inode); |
2231 | return ERR_PTR(-ENOMEM); | 2231 | return ERR_PTR(-ENOMEM); |
2232 | } | 2232 | } |
2233 | path.mnt = mntget(pfmfs_mnt); | ||
2233 | 2234 | ||
2234 | dentry->d_op = &pfmfs_dentry_operations; | 2235 | path.dentry->d_op = &pfmfs_dentry_operations; |
2235 | d_add(dentry, inode); | 2236 | d_add(path.dentry, inode); |
2236 | 2237 | ||
2237 | file = alloc_file(pfmfs_mnt, dentry, FMODE_READ, &pfm_file_ops); | 2238 | file = alloc_file(&path, FMODE_READ, &pfm_file_ops); |
2238 | if (!file) { | 2239 | if (!file) { |
2239 | dput(dentry); | 2240 | path_put(&path); |
2240 | return ERR_PTR(-ENFILE); | 2241 | return ERR_PTR(-ENFILE); |
2241 | } | 2242 | } |
2242 | 2243 | ||
diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S index 32f6fc131fb..c370e02f006 100644 --- a/arch/ia64/kernel/relocate_kernel.S +++ b/arch/ia64/kernel/relocate_kernel.S | |||
@@ -61,7 +61,7 @@ GLOBAL_ENTRY(relocate_new_kernel) | |||
61 | 61 | ||
62 | // purge all TC entries | 62 | // purge all TC entries |
63 | #define O(member) IA64_CPUINFO_##member##_OFFSET | 63 | #define O(member) IA64_CPUINFO_##member##_OFFSET |
64 | GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 | 64 | GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2 |
65 | ;; | 65 | ;; |
66 | addl r17=O(PTCE_STRIDE),r2 | 66 | addl r17=O(PTCE_STRIDE),r2 |
67 | addl r2=O(PTCE_BASE),r2 | 67 | addl r2=O(PTCE_BASE),r2 |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 1de86c96801..a1ea8791977 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -74,7 +74,7 @@ unsigned long __per_cpu_offset[NR_CPUS]; | |||
74 | EXPORT_SYMBOL(__per_cpu_offset); | 74 | EXPORT_SYMBOL(__per_cpu_offset); |
75 | #endif | 75 | #endif |
76 | 76 | ||
77 | DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); | 77 | DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info); |
78 | DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); | 78 | DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); |
79 | unsigned long ia64_cycles_per_usec; | 79 | unsigned long ia64_cycles_per_usec; |
80 | struct ia64_boot_param *ia64_boot_param; | 80 | struct ia64_boot_param *ia64_boot_param; |
@@ -566,19 +566,18 @@ setup_arch (char **cmdline_p) | |||
566 | early_acpi_boot_init(); | 566 | early_acpi_boot_init(); |
567 | # ifdef CONFIG_ACPI_NUMA | 567 | # ifdef CONFIG_ACPI_NUMA |
568 | acpi_numa_init(); | 568 | acpi_numa_init(); |
569 | #ifdef CONFIG_ACPI_HOTPLUG_CPU | 569 | # ifdef CONFIG_ACPI_HOTPLUG_CPU |
570 | prefill_possible_map(); | 570 | prefill_possible_map(); |
571 | #endif | 571 | # endif |
572 | per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? | 572 | per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? |
573 | 32 : cpus_weight(early_cpu_possible_map)), | 573 | 32 : cpus_weight(early_cpu_possible_map)), |
574 | additional_cpus > 0 ? additional_cpus : 0); | 574 | additional_cpus > 0 ? additional_cpus : 0); |
575 | # endif | 575 | # endif |
576 | #else | ||
577 | # ifdef CONFIG_SMP | ||
578 | smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */ | ||
579 | # endif | ||
580 | #endif /* CONFIG_APCI_BOOT */ | 576 | #endif /* CONFIG_APCI_BOOT */ |
581 | 577 | ||
578 | #ifdef CONFIG_SMP | ||
579 | smp_build_cpu_map(); | ||
580 | #endif | ||
582 | find_memory(); | 581 | find_memory(); |
583 | 582 | ||
584 | /* process SAL system table: */ | 583 | /* process SAL system table: */ |
@@ -856,18 +855,6 @@ identify_cpu (struct cpuinfo_ia64 *c) | |||
856 | } | 855 | } |
857 | 856 | ||
858 | /* | 857 | /* |
859 | * In UP configuration, setup_per_cpu_areas() is defined in | ||
860 | * include/linux/percpu.h | ||
861 | */ | ||
862 | #ifdef CONFIG_SMP | ||
863 | void __init | ||
864 | setup_per_cpu_areas (void) | ||
865 | { | ||
866 | /* start_kernel() requires this... */ | ||
867 | } | ||
868 | #endif | ||
869 | |||
870 | /* | ||
871 | * Do the following calculations: | 858 | * Do the following calculations: |
872 | * | 859 | * |
873 | * 1. the max. cache line size. | 860 | * 1. the max. cache line size. |
@@ -980,7 +967,7 @@ cpu_init (void) | |||
980 | * depends on the data returned by identify_cpu(). We break the dependency by | 967 | * depends on the data returned by identify_cpu(). We break the dependency by |
981 | * accessing cpu_data() through the canonical per-CPU address. | 968 | * accessing cpu_data() through the canonical per-CPU address. |
982 | */ | 969 | */ |
983 | cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start); | 970 | cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start); |
984 | identify_cpu(cpu_info); | 971 | identify_cpu(cpu_info); |
985 | 972 | ||
986 | #ifdef CONFIG_MCKINLEY | 973 | #ifdef CONFIG_MCKINLEY |
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 0a0c77b2c98..1295ba327f6 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S | |||
@@ -166,6 +166,12 @@ SECTIONS | |||
166 | } | 166 | } |
167 | #endif | 167 | #endif |
168 | 168 | ||
169 | #ifdef CONFIG_SMP | ||
170 | . = ALIGN(PERCPU_PAGE_SIZE); | ||
171 | __cpu0_per_cpu = .; | ||
172 | . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */ | ||
173 | #endif | ||
174 | |||
169 | . = ALIGN(PAGE_SIZE); | 175 | . = ALIGN(PAGE_SIZE); |
170 | __init_end = .; | 176 | __init_end = .; |
171 | 177 | ||
@@ -198,11 +204,6 @@ SECTIONS | |||
198 | data : { } :data | 204 | data : { } :data |
199 | .data : AT(ADDR(.data) - LOAD_OFFSET) | 205 | .data : AT(ADDR(.data) - LOAD_OFFSET) |
200 | { | 206 | { |
201 | #ifdef CONFIG_SMP | ||
202 | . = ALIGN(PERCPU_PAGE_SIZE); | ||
203 | __cpu0_per_cpu = .; | ||
204 | . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */ | ||
205 | #endif | ||
206 | INIT_TASK_DATA(PAGE_SIZE) | 207 | INIT_TASK_DATA(PAGE_SIZE) |
207 | CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES) | 208 | CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES) |
208 | READ_MOSTLY_DATA(SMP_CACHE_BYTES) | 209 | READ_MOSTLY_DATA(SMP_CACHE_BYTES) |
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 2f724d2bf29..54bf5405981 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c | |||
@@ -154,38 +154,99 @@ static void *cpu_data; | |||
154 | void * __cpuinit | 154 | void * __cpuinit |
155 | per_cpu_init (void) | 155 | per_cpu_init (void) |
156 | { | 156 | { |
157 | int cpu; | 157 | static bool first_time = true; |
158 | static int first_time=1; | 158 | void *cpu0_data = __cpu0_per_cpu; |
159 | unsigned int cpu; | ||
160 | |||
161 | if (!first_time) | ||
162 | goto skip; | ||
163 | first_time = false; | ||
159 | 164 | ||
160 | /* | 165 | /* |
161 | * get_free_pages() cannot be used before cpu_init() done. BSP | 166 | * get_free_pages() cannot be used before cpu_init() done. |
162 | * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls | 167 | * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs |
163 | * get_zeroed_page(). | 168 | * to avoid that AP calls get_zeroed_page(). |
164 | */ | 169 | */ |
165 | if (first_time) { | 170 | for_each_possible_cpu(cpu) { |
166 | void *cpu0_data = __cpu0_per_cpu; | 171 | void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start; |
167 | 172 | ||
168 | first_time=0; | 173 | memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start); |
174 | __per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start; | ||
175 | per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; | ||
169 | 176 | ||
170 | __per_cpu_offset[0] = (char *) cpu0_data - __per_cpu_start; | 177 | /* |
171 | per_cpu(local_per_cpu_offset, 0) = __per_cpu_offset[0]; | 178 | * percpu area for cpu0 is moved from the __init area |
179 | * which is setup by head.S and used till this point. | ||
180 | * Update ar.k3. This move is ensures that percpu | ||
181 | * area for cpu0 is on the correct node and its | ||
182 | * virtual address isn't insanely far from other | ||
183 | * percpu areas which is important for congruent | ||
184 | * percpu allocator. | ||
185 | */ | ||
186 | if (cpu == 0) | ||
187 | ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) - | ||
188 | (unsigned long)__per_cpu_start); | ||
172 | 189 | ||
173 | for (cpu = 1; cpu < NR_CPUS; cpu++) { | 190 | cpu_data += PERCPU_PAGE_SIZE; |
174 | memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start); | ||
175 | __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start; | ||
176 | cpu_data += PERCPU_PAGE_SIZE; | ||
177 | per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; | ||
178 | } | ||
179 | } | 191 | } |
192 | skip: | ||
180 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; | 193 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; |
181 | } | 194 | } |
182 | 195 | ||
183 | static inline void | 196 | static inline void |
184 | alloc_per_cpu_data(void) | 197 | alloc_per_cpu_data(void) |
185 | { | 198 | { |
186 | cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS-1, | 199 | cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(), |
187 | PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | 200 | PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); |
188 | } | 201 | } |
202 | |||
203 | /** | ||
204 | * setup_per_cpu_areas - setup percpu areas | ||
205 | * | ||
206 | * Arch code has already allocated and initialized percpu areas. All | ||
207 | * this function has to do is to teach the determined layout to the | ||
208 | * dynamic percpu allocator, which happens to be more complex than | ||
209 | * creating whole new ones using helpers. | ||
210 | */ | ||
211 | void __init | ||
212 | setup_per_cpu_areas(void) | ||
213 | { | ||
214 | struct pcpu_alloc_info *ai; | ||
215 | struct pcpu_group_info *gi; | ||
216 | unsigned int cpu; | ||
217 | ssize_t static_size, reserved_size, dyn_size; | ||
218 | int rc; | ||
219 | |||
220 | ai = pcpu_alloc_alloc_info(1, num_possible_cpus()); | ||
221 | if (!ai) | ||
222 | panic("failed to allocate pcpu_alloc_info"); | ||
223 | gi = &ai->groups[0]; | ||
224 | |||
225 | /* units are assigned consecutively to possible cpus */ | ||
226 | for_each_possible_cpu(cpu) | ||
227 | gi->cpu_map[gi->nr_units++] = cpu; | ||
228 | |||
229 | /* set parameters */ | ||
230 | static_size = __per_cpu_end - __per_cpu_start; | ||
231 | reserved_size = PERCPU_MODULE_RESERVE; | ||
232 | dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size; | ||
233 | if (dyn_size < 0) | ||
234 | panic("percpu area overflow static=%zd reserved=%zd\n", | ||
235 | static_size, reserved_size); | ||
236 | |||
237 | ai->static_size = static_size; | ||
238 | ai->reserved_size = reserved_size; | ||
239 | ai->dyn_size = dyn_size; | ||
240 | ai->unit_size = PERCPU_PAGE_SIZE; | ||
241 | ai->atom_size = PAGE_SIZE; | ||
242 | ai->alloc_size = PERCPU_PAGE_SIZE; | ||
243 | |||
244 | rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]); | ||
245 | if (rc) | ||
246 | panic("failed to setup percpu area (err=%d)", rc); | ||
247 | |||
248 | pcpu_free_alloc_info(ai); | ||
249 | } | ||
189 | #else | 250 | #else |
190 | #define alloc_per_cpu_data() do { } while (0) | 251 | #define alloc_per_cpu_data() do { } while (0) |
191 | #endif /* CONFIG_SMP */ | 252 | #endif /* CONFIG_SMP */ |
@@ -270,8 +331,8 @@ paging_init (void) | |||
270 | 331 | ||
271 | map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * | 332 | map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * |
272 | sizeof(struct page)); | 333 | sizeof(struct page)); |
273 | vmalloc_end -= map_size; | 334 | VMALLOC_END -= map_size; |
274 | vmem_map = (struct page *) vmalloc_end; | 335 | vmem_map = (struct page *) VMALLOC_END; |
275 | efi_memmap_walk(create_mem_map_page_table, NULL); | 336 | efi_memmap_walk(create_mem_map_page_table, NULL); |
276 | 337 | ||
277 | /* | 338 | /* |
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index d85ba98d900..19c4b2195dc 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
@@ -143,22 +143,120 @@ static void *per_cpu_node_setup(void *cpu_data, int node) | |||
143 | int cpu; | 143 | int cpu; |
144 | 144 | ||
145 | for_each_possible_early_cpu(cpu) { | 145 | for_each_possible_early_cpu(cpu) { |
146 | if (cpu == 0) { | 146 | void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start; |
147 | void *cpu0_data = __cpu0_per_cpu; | 147 | |
148 | __per_cpu_offset[cpu] = (char*)cpu0_data - | 148 | if (node != node_cpuid[cpu].nid) |
149 | __per_cpu_start; | 149 | continue; |
150 | } else if (node == node_cpuid[cpu].nid) { | 150 | |
151 | memcpy(__va(cpu_data), __phys_per_cpu_start, | 151 | memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start); |
152 | __per_cpu_end - __per_cpu_start); | 152 | __per_cpu_offset[cpu] = (char *)__va(cpu_data) - |
153 | __per_cpu_offset[cpu] = (char*)__va(cpu_data) - | 153 | __per_cpu_start; |
154 | __per_cpu_start; | 154 | |
155 | cpu_data += PERCPU_PAGE_SIZE; | 155 | /* |
156 | } | 156 | * percpu area for cpu0 is moved from the __init area |
157 | * which is setup by head.S and used till this point. | ||
158 | * Update ar.k3. This move is ensures that percpu | ||
159 | * area for cpu0 is on the correct node and its | ||
160 | * virtual address isn't insanely far from other | ||
161 | * percpu areas which is important for congruent | ||
162 | * percpu allocator. | ||
163 | */ | ||
164 | if (cpu == 0) | ||
165 | ia64_set_kr(IA64_KR_PER_CPU_DATA, | ||
166 | (unsigned long)cpu_data - | ||
167 | (unsigned long)__per_cpu_start); | ||
168 | |||
169 | cpu_data += PERCPU_PAGE_SIZE; | ||
157 | } | 170 | } |
158 | #endif | 171 | #endif |
159 | return cpu_data; | 172 | return cpu_data; |
160 | } | 173 | } |
161 | 174 | ||
175 | #ifdef CONFIG_SMP | ||
176 | /** | ||
177 | * setup_per_cpu_areas - setup percpu areas | ||
178 | * | ||
179 | * Arch code has already allocated and initialized percpu areas. All | ||
180 | * this function has to do is to teach the determined layout to the | ||
181 | * dynamic percpu allocator, which happens to be more complex than | ||
182 | * creating whole new ones using helpers. | ||
183 | */ | ||
184 | void __init setup_per_cpu_areas(void) | ||
185 | { | ||
186 | struct pcpu_alloc_info *ai; | ||
187 | struct pcpu_group_info *uninitialized_var(gi); | ||
188 | unsigned int *cpu_map; | ||
189 | void *base; | ||
190 | unsigned long base_offset; | ||
191 | unsigned int cpu; | ||
192 | ssize_t static_size, reserved_size, dyn_size; | ||
193 | int node, prev_node, unit, nr_units, rc; | ||
194 | |||
195 | ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids); | ||
196 | if (!ai) | ||
197 | panic("failed to allocate pcpu_alloc_info"); | ||
198 | cpu_map = ai->groups[0].cpu_map; | ||
199 | |||
200 | /* determine base */ | ||
201 | base = (void *)ULONG_MAX; | ||
202 | for_each_possible_cpu(cpu) | ||
203 | base = min(base, | ||
204 | (void *)(__per_cpu_offset[cpu] + __per_cpu_start)); | ||
205 | base_offset = (void *)__per_cpu_start - base; | ||
206 | |||
207 | /* build cpu_map, units are grouped by node */ | ||
208 | unit = 0; | ||
209 | for_each_node(node) | ||
210 | for_each_possible_cpu(cpu) | ||
211 | if (node == node_cpuid[cpu].nid) | ||
212 | cpu_map[unit++] = cpu; | ||
213 | nr_units = unit; | ||
214 | |||
215 | /* set basic parameters */ | ||
216 | static_size = __per_cpu_end - __per_cpu_start; | ||
217 | reserved_size = PERCPU_MODULE_RESERVE; | ||
218 | dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size; | ||
219 | if (dyn_size < 0) | ||
220 | panic("percpu area overflow static=%zd reserved=%zd\n", | ||
221 | static_size, reserved_size); | ||
222 | |||
223 | ai->static_size = static_size; | ||
224 | ai->reserved_size = reserved_size; | ||
225 | ai->dyn_size = dyn_size; | ||
226 | ai->unit_size = PERCPU_PAGE_SIZE; | ||
227 | ai->atom_size = PAGE_SIZE; | ||
228 | ai->alloc_size = PERCPU_PAGE_SIZE; | ||
229 | |||
230 | /* | ||
231 | * CPUs are put into groups according to node. Walk cpu_map | ||
232 | * and create new groups at node boundaries. | ||
233 | */ | ||
234 | prev_node = -1; | ||
235 | ai->nr_groups = 0; | ||
236 | for (unit = 0; unit < nr_units; unit++) { | ||
237 | cpu = cpu_map[unit]; | ||
238 | node = node_cpuid[cpu].nid; | ||
239 | |||
240 | if (node == prev_node) { | ||
241 | gi->nr_units++; | ||
242 | continue; | ||
243 | } | ||
244 | prev_node = node; | ||
245 | |||
246 | gi = &ai->groups[ai->nr_groups++]; | ||
247 | gi->nr_units = 1; | ||
248 | gi->base_offset = __per_cpu_offset[cpu] + base_offset; | ||
249 | gi->cpu_map = &cpu_map[unit]; | ||
250 | } | ||
251 | |||
252 | rc = pcpu_setup_first_chunk(ai, base); | ||
253 | if (rc) | ||
254 | panic("failed to setup percpu area (err=%d)", rc); | ||
255 | |||
256 | pcpu_free_alloc_info(ai); | ||
257 | } | ||
258 | #endif | ||
259 | |||
162 | /** | 260 | /** |
163 | * fill_pernode - initialize pernode data. | 261 | * fill_pernode - initialize pernode data. |
164 | * @node: the node id. | 262 | * @node: the node id. |
@@ -352,7 +450,8 @@ static void __init initialize_pernode_data(void) | |||
352 | /* Set the node_data pointer for each per-cpu struct */ | 450 | /* Set the node_data pointer for each per-cpu struct */ |
353 | for_each_possible_early_cpu(cpu) { | 451 | for_each_possible_early_cpu(cpu) { |
354 | node = node_cpuid[cpu].nid; | 452 | node = node_cpuid[cpu].nid; |
355 | per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data; | 453 | per_cpu(ia64_cpu_info, cpu).node_data = |
454 | mem_data[node].node_data; | ||
356 | } | 455 | } |
357 | #else | 456 | #else |
358 | { | 457 | { |
@@ -360,7 +459,7 @@ static void __init initialize_pernode_data(void) | |||
360 | cpu = 0; | 459 | cpu = 0; |
361 | node = node_cpuid[cpu].nid; | 460 | node = node_cpuid[cpu].nid; |
362 | cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start + | 461 | cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start + |
363 | ((char *)&per_cpu__cpu_info - __per_cpu_start)); | 462 | ((char *)&per_cpu__ia64_cpu_info - __per_cpu_start)); |
364 | cpu0_cpu_info->node_data = mem_data[node].node_data; | 463 | cpu0_cpu_info->node_data = mem_data[node].node_data; |
365 | } | 464 | } |
366 | #endif /* CONFIG_SMP */ | 465 | #endif /* CONFIG_SMP */ |
@@ -666,9 +765,9 @@ void __init paging_init(void) | |||
666 | sparse_init(); | 765 | sparse_init(); |
667 | 766 | ||
668 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 767 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
669 | vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * | 768 | VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * |
670 | sizeof(struct page)); | 769 | sizeof(struct page)); |
671 | vmem_map = (struct page *) vmalloc_end; | 770 | vmem_map = (struct page *) VMALLOC_END; |
672 | efi_memmap_walk(create_mem_map_page_table, NULL); | 771 | efi_memmap_walk(create_mem_map_page_table, NULL); |
673 | printk("Virtual mem_map starts at 0x%p\n", vmem_map); | 772 | printk("Virtual mem_map starts at 0x%p\n", vmem_map); |
674 | #endif | 773 | #endif |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 1857766a63c..b9609c69343 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -44,8 +44,8 @@ extern void ia64_tlb_init (void); | |||
44 | unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; | 44 | unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; |
45 | 45 | ||
46 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 46 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
47 | unsigned long vmalloc_end = VMALLOC_END_INIT; | 47 | unsigned long VMALLOC_END = VMALLOC_END_INIT; |
48 | EXPORT_SYMBOL(vmalloc_end); | 48 | EXPORT_SYMBOL(VMALLOC_END); |
49 | struct page *vmem_map; | 49 | struct page *vmem_map; |
50 | EXPORT_SYMBOL(vmem_map); | 50 | EXPORT_SYMBOL(vmem_map); |
51 | #endif | 51 | #endif |
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c index 2a140627dfd..3dccdd8eb27 100644 --- a/arch/ia64/mm/ioremap.c +++ b/arch/ia64/mm/ioremap.c | |||
@@ -22,6 +22,12 @@ __ioremap (unsigned long phys_addr) | |||
22 | } | 22 | } |
23 | 23 | ||
24 | void __iomem * | 24 | void __iomem * |
25 | early_ioremap (unsigned long phys_addr, unsigned long size) | ||
26 | { | ||
27 | return __ioremap(phys_addr); | ||
28 | } | ||
29 | |||
30 | void __iomem * | ||
25 | ioremap (unsigned long phys_addr, unsigned long size) | 31 | ioremap (unsigned long phys_addr, unsigned long size) |
26 | { | 32 | { |
27 | void __iomem *addr; | 33 | void __iomem *addr; |
@@ -102,6 +108,11 @@ ioremap_nocache (unsigned long phys_addr, unsigned long size) | |||
102 | EXPORT_SYMBOL(ioremap_nocache); | 108 | EXPORT_SYMBOL(ioremap_nocache); |
103 | 109 | ||
104 | void | 110 | void |
111 | early_iounmap (volatile void __iomem *addr, unsigned long size) | ||
112 | { | ||
113 | } | ||
114 | |||
115 | void | ||
105 | iounmap (volatile void __iomem *addr) | 116 | iounmap (volatile void __iomem *addr) |
106 | { | 117 | { |
107 | if (REGION_NUMBER(addr) == RGN_GATE) | 118 | if (REGION_NUMBER(addr) == RGN_GATE) |
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c index 1176506b2ba..e884ba4e031 100644 --- a/arch/ia64/sn/kernel/sn2/sn2_smp.c +++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c | |||
@@ -496,13 +496,13 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data) | |||
496 | seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, | 496 | seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, |
497 | stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, | 497 | stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, |
498 | stat->deadlocks, | 498 | stat->deadlocks, |
499 | 1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, | 499 | 1000 * stat->lock_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, |
500 | 1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, | 500 | 1000 * stat->shub_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, |
501 | 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec, | 501 | 1000 * stat->shub_itc_clocks_max / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, |
502 | stat->shub_ptc_flushes_not_my_mm, | 502 | stat->shub_ptc_flushes_not_my_mm, |
503 | stat->deadlocks2, | 503 | stat->deadlocks2, |
504 | stat->shub_ipi_flushes, | 504 | stat->shub_ipi_flushes, |
505 | 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec); | 505 | 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec); |
506 | } | 506 | } |
507 | return 0; | 507 | return 0; |
508 | } | 508 | } |
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index 35b2a27d2e7..efb454534e5 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
12 | #include <linux/bitmap.h> | ||
12 | #include <asm/sn/sn_sal.h> | 13 | #include <asm/sn/sn_sal.h> |
13 | #include <asm/sn/addrs.h> | 14 | #include <asm/sn/addrs.h> |
14 | #include <asm/sn/io.h> | 15 | #include <asm/sn/io.h> |
@@ -369,7 +370,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr) | |||
369 | static dma_addr_t | 370 | static dma_addr_t |
370 | tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size) | 371 | tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size) |
371 | { | 372 | { |
372 | int i, ps, ps_shift, entry, entries, mapsize, last_entry; | 373 | int ps, ps_shift, entry, entries, mapsize; |
373 | u64 xio_addr, end_xio_addr; | 374 | u64 xio_addr, end_xio_addr; |
374 | struct tioca_common *tioca_common; | 375 | struct tioca_common *tioca_common; |
375 | struct tioca_kernel *tioca_kern; | 376 | struct tioca_kernel *tioca_kern; |
@@ -410,23 +411,13 @@ tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size) | |||
410 | map = tioca_kern->ca_pcigart_pagemap; | 411 | map = tioca_kern->ca_pcigart_pagemap; |
411 | mapsize = tioca_kern->ca_pcigart_entries; | 412 | mapsize = tioca_kern->ca_pcigart_entries; |
412 | 413 | ||
413 | entry = find_first_zero_bit(map, mapsize); | 414 | entry = bitmap_find_next_zero_area(map, mapsize, 0, entries, 0); |
414 | while (entry < mapsize) { | 415 | if (entry >= mapsize) { |
415 | last_entry = find_next_bit(map, mapsize, entry); | ||
416 | |||
417 | if (last_entry - entry >= entries) | ||
418 | break; | ||
419 | |||
420 | entry = find_next_zero_bit(map, mapsize, last_entry); | ||
421 | } | ||
422 | |||
423 | if (entry > mapsize) { | ||
424 | kfree(ca_dmamap); | 416 | kfree(ca_dmamap); |
425 | goto map_return; | 417 | goto map_return; |
426 | } | 418 | } |
427 | 419 | ||
428 | for (i = 0; i < entries; i++) | 420 | bitmap_set(map, entry, entries); |
429 | set_bit(entry + i, map); | ||
430 | 421 | ||
431 | bus_addr = tioca_kern->ca_pciap_base + (entry * ps); | 422 | bus_addr = tioca_kern->ca_pciap_base + (entry * ps); |
432 | 423 | ||
diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c index f042e192d2f..a3fb7cf9ae1 100644 --- a/arch/ia64/xen/irq_xen.c +++ b/arch/ia64/xen/irq_xen.c | |||
@@ -63,19 +63,19 @@ xen_free_irq_vector(int vector) | |||
63 | } | 63 | } |
64 | 64 | ||
65 | 65 | ||
66 | static DEFINE_PER_CPU(int, timer_irq) = -1; | 66 | static DEFINE_PER_CPU(int, xen_timer_irq) = -1; |
67 | static DEFINE_PER_CPU(int, ipi_irq) = -1; | 67 | static DEFINE_PER_CPU(int, xen_ipi_irq) = -1; |
68 | static DEFINE_PER_CPU(int, resched_irq) = -1; | 68 | static DEFINE_PER_CPU(int, xen_resched_irq) = -1; |
69 | static DEFINE_PER_CPU(int, cmc_irq) = -1; | 69 | static DEFINE_PER_CPU(int, xen_cmc_irq) = -1; |
70 | static DEFINE_PER_CPU(int, cmcp_irq) = -1; | 70 | static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1; |
71 | static DEFINE_PER_CPU(int, cpep_irq) = -1; | 71 | static DEFINE_PER_CPU(int, xen_cpep_irq) = -1; |
72 | #define NAME_SIZE 15 | 72 | #define NAME_SIZE 15 |
73 | static DEFINE_PER_CPU(char[NAME_SIZE], timer_name); | 73 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name); |
74 | static DEFINE_PER_CPU(char[NAME_SIZE], ipi_name); | 74 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name); |
75 | static DEFINE_PER_CPU(char[NAME_SIZE], resched_name); | 75 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name); |
76 | static DEFINE_PER_CPU(char[NAME_SIZE], cmc_name); | 76 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name); |
77 | static DEFINE_PER_CPU(char[NAME_SIZE], cmcp_name); | 77 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name); |
78 | static DEFINE_PER_CPU(char[NAME_SIZE], cpep_name); | 78 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name); |
79 | #undef NAME_SIZE | 79 | #undef NAME_SIZE |
80 | 80 | ||
81 | struct saved_irq { | 81 | struct saved_irq { |
@@ -144,64 +144,64 @@ __xen_register_percpu_irq(unsigned int cpu, unsigned int vec, | |||
144 | if (xen_slab_ready) { | 144 | if (xen_slab_ready) { |
145 | switch (vec) { | 145 | switch (vec) { |
146 | case IA64_TIMER_VECTOR: | 146 | case IA64_TIMER_VECTOR: |
147 | snprintf(per_cpu(timer_name, cpu), | 147 | snprintf(per_cpu(xen_timer_name, cpu), |
148 | sizeof(per_cpu(timer_name, cpu)), | 148 | sizeof(per_cpu(xen_timer_name, cpu)), |
149 | "%s%d", action->name, cpu); | 149 | "%s%d", action->name, cpu); |
150 | irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu, | 150 | irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu, |
151 | action->handler, action->flags, | 151 | action->handler, action->flags, |
152 | per_cpu(timer_name, cpu), action->dev_id); | 152 | per_cpu(xen_timer_name, cpu), action->dev_id); |
153 | per_cpu(timer_irq, cpu) = irq; | 153 | per_cpu(xen_timer_irq, cpu) = irq; |
154 | break; | 154 | break; |
155 | case IA64_IPI_RESCHEDULE: | 155 | case IA64_IPI_RESCHEDULE: |
156 | snprintf(per_cpu(resched_name, cpu), | 156 | snprintf(per_cpu(xen_resched_name, cpu), |
157 | sizeof(per_cpu(resched_name, cpu)), | 157 | sizeof(per_cpu(xen_resched_name, cpu)), |
158 | "%s%d", action->name, cpu); | 158 | "%s%d", action->name, cpu); |
159 | irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, | 159 | irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, |
160 | action->handler, action->flags, | 160 | action->handler, action->flags, |
161 | per_cpu(resched_name, cpu), action->dev_id); | 161 | per_cpu(xen_resched_name, cpu), action->dev_id); |
162 | per_cpu(resched_irq, cpu) = irq; | 162 | per_cpu(xen_resched_irq, cpu) = irq; |
163 | break; | 163 | break; |
164 | case IA64_IPI_VECTOR: | 164 | case IA64_IPI_VECTOR: |
165 | snprintf(per_cpu(ipi_name, cpu), | 165 | snprintf(per_cpu(xen_ipi_name, cpu), |
166 | sizeof(per_cpu(ipi_name, cpu)), | 166 | sizeof(per_cpu(xen_ipi_name, cpu)), |
167 | "%s%d", action->name, cpu); | 167 | "%s%d", action->name, cpu); |
168 | irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu, | 168 | irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu, |
169 | action->handler, action->flags, | 169 | action->handler, action->flags, |
170 | per_cpu(ipi_name, cpu), action->dev_id); | 170 | per_cpu(xen_ipi_name, cpu), action->dev_id); |
171 | per_cpu(ipi_irq, cpu) = irq; | 171 | per_cpu(xen_ipi_irq, cpu) = irq; |
172 | break; | 172 | break; |
173 | case IA64_CMC_VECTOR: | 173 | case IA64_CMC_VECTOR: |
174 | snprintf(per_cpu(cmc_name, cpu), | 174 | snprintf(per_cpu(xen_cmc_name, cpu), |
175 | sizeof(per_cpu(cmc_name, cpu)), | 175 | sizeof(per_cpu(xen_cmc_name, cpu)), |
176 | "%s%d", action->name, cpu); | 176 | "%s%d", action->name, cpu); |
177 | irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu, | 177 | irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu, |
178 | action->handler, | 178 | action->handler, |
179 | action->flags, | 179 | action->flags, |
180 | per_cpu(cmc_name, cpu), | 180 | per_cpu(xen_cmc_name, cpu), |
181 | action->dev_id); | 181 | action->dev_id); |
182 | per_cpu(cmc_irq, cpu) = irq; | 182 | per_cpu(xen_cmc_irq, cpu) = irq; |
183 | break; | 183 | break; |
184 | case IA64_CMCP_VECTOR: | 184 | case IA64_CMCP_VECTOR: |
185 | snprintf(per_cpu(cmcp_name, cpu), | 185 | snprintf(per_cpu(xen_cmcp_name, cpu), |
186 | sizeof(per_cpu(cmcp_name, cpu)), | 186 | sizeof(per_cpu(xen_cmcp_name, cpu)), |
187 | "%s%d", action->name, cpu); | 187 | "%s%d", action->name, cpu); |
188 | irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu, | 188 | irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu, |
189 | action->handler, | 189 | action->handler, |
190 | action->flags, | 190 | action->flags, |
191 | per_cpu(cmcp_name, cpu), | 191 | per_cpu(xen_cmcp_name, cpu), |
192 | action->dev_id); | 192 | action->dev_id); |
193 | per_cpu(cmcp_irq, cpu) = irq; | 193 | per_cpu(xen_cmcp_irq, cpu) = irq; |
194 | break; | 194 | break; |
195 | case IA64_CPEP_VECTOR: | 195 | case IA64_CPEP_VECTOR: |
196 | snprintf(per_cpu(cpep_name, cpu), | 196 | snprintf(per_cpu(xen_cpep_name, cpu), |
197 | sizeof(per_cpu(cpep_name, cpu)), | 197 | sizeof(per_cpu(xen_cpep_name, cpu)), |
198 | "%s%d", action->name, cpu); | 198 | "%s%d", action->name, cpu); |
199 | irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu, | 199 | irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu, |
200 | action->handler, | 200 | action->handler, |
201 | action->flags, | 201 | action->flags, |
202 | per_cpu(cpep_name, cpu), | 202 | per_cpu(xen_cpep_name, cpu), |
203 | action->dev_id); | 203 | action->dev_id); |
204 | per_cpu(cpep_irq, cpu) = irq; | 204 | per_cpu(xen_cpep_irq, cpu) = irq; |
205 | break; | 205 | break; |
206 | case IA64_CPE_VECTOR: | 206 | case IA64_CPE_VECTOR: |
207 | case IA64_MCA_RENDEZ_VECTOR: | 207 | case IA64_MCA_RENDEZ_VECTOR: |
@@ -275,30 +275,33 @@ unbind_evtchn_callback(struct notifier_block *nfb, | |||
275 | 275 | ||
276 | if (action == CPU_DEAD) { | 276 | if (action == CPU_DEAD) { |
277 | /* Unregister evtchn. */ | 277 | /* Unregister evtchn. */ |
278 | if (per_cpu(cpep_irq, cpu) >= 0) { | 278 | if (per_cpu(xen_cpep_irq, cpu) >= 0) { |
279 | unbind_from_irqhandler(per_cpu(cpep_irq, cpu), NULL); | 279 | unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu), |
280 | per_cpu(cpep_irq, cpu) = -1; | 280 | NULL); |
281 | per_cpu(xen_cpep_irq, cpu) = -1; | ||
281 | } | 282 | } |
282 | if (per_cpu(cmcp_irq, cpu) >= 0) { | 283 | if (per_cpu(xen_cmcp_irq, cpu) >= 0) { |
283 | unbind_from_irqhandler(per_cpu(cmcp_irq, cpu), NULL); | 284 | unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu), |
284 | per_cpu(cmcp_irq, cpu) = -1; | 285 | NULL); |
286 | per_cpu(xen_cmcp_irq, cpu) = -1; | ||
285 | } | 287 | } |
286 | if (per_cpu(cmc_irq, cpu) >= 0) { | 288 | if (per_cpu(xen_cmc_irq, cpu) >= 0) { |
287 | unbind_from_irqhandler(per_cpu(cmc_irq, cpu), NULL); | 289 | unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL); |
288 | per_cpu(cmc_irq, cpu) = -1; | 290 | per_cpu(xen_cmc_irq, cpu) = -1; |
289 | } | 291 | } |
290 | if (per_cpu(ipi_irq, cpu) >= 0) { | 292 | if (per_cpu(xen_ipi_irq, cpu) >= 0) { |
291 | unbind_from_irqhandler(per_cpu(ipi_irq, cpu), NULL); | 293 | unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL); |
292 | per_cpu(ipi_irq, cpu) = -1; | 294 | per_cpu(xen_ipi_irq, cpu) = -1; |
293 | } | 295 | } |
294 | if (per_cpu(resched_irq, cpu) >= 0) { | 296 | if (per_cpu(xen_resched_irq, cpu) >= 0) { |
295 | unbind_from_irqhandler(per_cpu(resched_irq, cpu), | 297 | unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), |
296 | NULL); | 298 | NULL); |
297 | per_cpu(resched_irq, cpu) = -1; | 299 | per_cpu(xen_resched_irq, cpu) = -1; |
298 | } | 300 | } |
299 | if (per_cpu(timer_irq, cpu) >= 0) { | 301 | if (per_cpu(xen_timer_irq, cpu) >= 0) { |
300 | unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL); | 302 | unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu), |
301 | per_cpu(timer_irq, cpu) = -1; | 303 | NULL); |
304 | per_cpu(xen_timer_irq, cpu) = -1; | ||
302 | } | 305 | } |
303 | } | 306 | } |
304 | return NOTIFY_OK; | 307 | return NOTIFY_OK; |
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c index dbeadb9c8e2..c1c544513e8 100644 --- a/arch/ia64/xen/time.c +++ b/arch/ia64/xen/time.c | |||
@@ -34,15 +34,15 @@ | |||
34 | 34 | ||
35 | #include "../kernel/fsyscall_gtod_data.h" | 35 | #include "../kernel/fsyscall_gtod_data.h" |
36 | 36 | ||
37 | DEFINE_PER_CPU(struct vcpu_runstate_info, runstate); | 37 | static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate); |
38 | DEFINE_PER_CPU(unsigned long, processed_stolen_time); | 38 | static DEFINE_PER_CPU(unsigned long, xen_stolen_time); |
39 | DEFINE_PER_CPU(unsigned long, processed_blocked_time); | 39 | static DEFINE_PER_CPU(unsigned long, xen_blocked_time); |
40 | 40 | ||
41 | /* taken from i386/kernel/time-xen.c */ | 41 | /* taken from i386/kernel/time-xen.c */ |
42 | static void xen_init_missing_ticks_accounting(int cpu) | 42 | static void xen_init_missing_ticks_accounting(int cpu) |
43 | { | 43 | { |
44 | struct vcpu_register_runstate_memory_area area; | 44 | struct vcpu_register_runstate_memory_area area; |
45 | struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu); | 45 | struct vcpu_runstate_info *runstate = &per_cpu(xen_runstate, cpu); |
46 | int rc; | 46 | int rc; |
47 | 47 | ||
48 | memset(runstate, 0, sizeof(*runstate)); | 48 | memset(runstate, 0, sizeof(*runstate)); |
@@ -52,8 +52,8 @@ static void xen_init_missing_ticks_accounting(int cpu) | |||
52 | &area); | 52 | &area); |
53 | WARN_ON(rc && rc != -ENOSYS); | 53 | WARN_ON(rc && rc != -ENOSYS); |
54 | 54 | ||
55 | per_cpu(processed_blocked_time, cpu) = runstate->time[RUNSTATE_blocked]; | 55 | per_cpu(xen_blocked_time, cpu) = runstate->time[RUNSTATE_blocked]; |
56 | per_cpu(processed_stolen_time, cpu) = runstate->time[RUNSTATE_runnable] | 56 | per_cpu(xen_stolen_time, cpu) = runstate->time[RUNSTATE_runnable] |
57 | + runstate->time[RUNSTATE_offline]; | 57 | + runstate->time[RUNSTATE_offline]; |
58 | } | 58 | } |
59 | 59 | ||
@@ -68,7 +68,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res) | |||
68 | 68 | ||
69 | BUG_ON(preemptible()); | 69 | BUG_ON(preemptible()); |
70 | 70 | ||
71 | state = &__get_cpu_var(runstate); | 71 | state = &__get_cpu_var(xen_runstate); |
72 | 72 | ||
73 | /* | 73 | /* |
74 | * The runstate info is always updated by the hypervisor on | 74 | * The runstate info is always updated by the hypervisor on |
@@ -103,12 +103,12 @@ consider_steal_time(unsigned long new_itm) | |||
103 | * This function just checks and reject this effect. | 103 | * This function just checks and reject this effect. |
104 | */ | 104 | */ |
105 | if (!time_after_eq(runstate.time[RUNSTATE_blocked], | 105 | if (!time_after_eq(runstate.time[RUNSTATE_blocked], |
106 | per_cpu(processed_blocked_time, cpu))) | 106 | per_cpu(xen_blocked_time, cpu))) |
107 | blocked = 0; | 107 | blocked = 0; |
108 | 108 | ||
109 | if (!time_after_eq(runstate.time[RUNSTATE_runnable] + | 109 | if (!time_after_eq(runstate.time[RUNSTATE_runnable] + |
110 | runstate.time[RUNSTATE_offline], | 110 | runstate.time[RUNSTATE_offline], |
111 | per_cpu(processed_stolen_time, cpu))) | 111 | per_cpu(xen_stolen_time, cpu))) |
112 | stolen = 0; | 112 | stolen = 0; |
113 | 113 | ||
114 | if (!time_after(delta_itm + new_itm, ia64_get_itc())) | 114 | if (!time_after(delta_itm + new_itm, ia64_get_itc())) |
@@ -147,8 +147,8 @@ consider_steal_time(unsigned long new_itm) | |||
147 | } else { | 147 | } else { |
148 | local_cpu_data->itm_next = delta_itm + new_itm; | 148 | local_cpu_data->itm_next = delta_itm + new_itm; |
149 | } | 149 | } |
150 | per_cpu(processed_stolen_time, cpu) += NS_PER_TICK * stolen; | 150 | per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen; |
151 | per_cpu(processed_blocked_time, cpu) += NS_PER_TICK * blocked; | 151 | per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked; |
152 | } | 152 | } |
153 | return delta_itm; | 153 | return delta_itm; |
154 | } | 154 | } |