diff options
Diffstat (limited to 'arch/ia64')
49 files changed, 435 insertions, 456 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 1ee596cd942f..2d7f56a98e0f 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -87,9 +87,6 @@ config GENERIC_TIME_VSYSCALL | |||
87 | bool | 87 | bool |
88 | default y | 88 | default y |
89 | 89 | ||
90 | config HAVE_LEGACY_PER_CPU_AREA | ||
91 | def_bool y | ||
92 | |||
93 | config HAVE_SETUP_PER_CPU_AREA | 90 | config HAVE_SETUP_PER_CPU_AREA |
94 | def_bool y | 91 | def_bool y |
95 | 92 | ||
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 674a8374c6d9..f332e3fe4237 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c | |||
@@ -1381,7 +1381,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, | |||
1381 | #endif | 1381 | #endif |
1382 | 1382 | ||
1383 | /* | 1383 | /* |
1384 | ** Not virtually contigous. | 1384 | ** Not virtually contiguous. |
1385 | ** Terminate prev chunk. | 1385 | ** Terminate prev chunk. |
1386 | ** Start a new chunk. | 1386 | ** Start a new chunk. |
1387 | ** | 1387 | ** |
diff --git a/arch/ia64/ia32/elfcore32.h b/arch/ia64/ia32/elfcore32.h index 9a3abf58cea3..657725742617 100644 --- a/arch/ia64/ia32/elfcore32.h +++ b/arch/ia64/ia32/elfcore32.h | |||
@@ -11,8 +11,6 @@ | |||
11 | #include <asm/intrinsics.h> | 11 | #include <asm/intrinsics.h> |
12 | #include <asm/uaccess.h> | 12 | #include <asm/uaccess.h> |
13 | 13 | ||
14 | #define USE_ELF_CORE_DUMP 1 | ||
15 | |||
16 | /* Override elfcore.h */ | 14 | /* Override elfcore.h */ |
17 | #define _LINUX_ELFCORE_H 1 | 15 | #define _LINUX_ELFCORE_H 1 |
18 | typedef unsigned int elf_greg_t; | 16 | typedef unsigned int elf_greg_t; |
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S index af9405cd70e5..2fd7479aa216 100644 --- a/arch/ia64/ia32/ia32_entry.S +++ b/arch/ia64/ia32/ia32_entry.S | |||
@@ -79,7 +79,7 @@ GLOBAL_ENTRY(ia32_ret_from_clone) | |||
79 | (p6) br.cond.spnt .ia32_strace_check_retval | 79 | (p6) br.cond.spnt .ia32_strace_check_retval |
80 | ;; // prevent RAW on r8 | 80 | ;; // prevent RAW on r8 |
81 | END(ia32_ret_from_clone) | 81 | END(ia32_ret_from_clone) |
82 | // fall thrugh | 82 | // fall through |
83 | GLOBAL_ENTRY(ia32_ret_from_syscall) | 83 | GLOBAL_ENTRY(ia32_ret_from_syscall) |
84 | PT_REGS_UNWIND_INFO(0) | 84 | PT_REGS_UNWIND_INFO(0) |
85 | 85 | ||
@@ -327,7 +327,7 @@ ia32_syscall_table: | |||
327 | data8 compat_sys_writev | 327 | data8 compat_sys_writev |
328 | data8 sys_getsid | 328 | data8 sys_getsid |
329 | data8 sys_fdatasync | 329 | data8 sys_fdatasync |
330 | data8 sys32_sysctl | 330 | data8 compat_sys_sysctl |
331 | data8 sys_mlock /* 150 */ | 331 | data8 sys_mlock /* 150 */ |
332 | data8 sys_munlock | 332 | data8 sys_munlock |
333 | data8 sys_mlockall | 333 | data8 sys_mlockall |
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index 625ed8f76fce..045b746b9808 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c | |||
@@ -858,6 +858,9 @@ ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot | |||
858 | 858 | ||
859 | prot = get_prot32(prot); | 859 | prot = get_prot32(prot); |
860 | 860 | ||
861 | if (flags & MAP_HUGETLB) | ||
862 | return -ENOMEM; | ||
863 | |||
861 | #if PAGE_SHIFT > IA32_PAGE_SHIFT | 864 | #if PAGE_SHIFT > IA32_PAGE_SHIFT |
862 | mutex_lock(&ia32_mmap_mutex); | 865 | mutex_lock(&ia32_mmap_mutex); |
863 | { | 866 | { |
@@ -1628,61 +1631,6 @@ sys32_msync (unsigned int start, unsigned int len, int flags) | |||
1628 | return sys_msync(addr, len + (start - addr), flags); | 1631 | return sys_msync(addr, len + (start - addr), flags); |
1629 | } | 1632 | } |
1630 | 1633 | ||
1631 | struct sysctl32 { | ||
1632 | unsigned int name; | ||
1633 | int nlen; | ||
1634 | unsigned int oldval; | ||
1635 | unsigned int oldlenp; | ||
1636 | unsigned int newval; | ||
1637 | unsigned int newlen; | ||
1638 | unsigned int __unused[4]; | ||
1639 | }; | ||
1640 | |||
1641 | #ifdef CONFIG_SYSCTL_SYSCALL | ||
1642 | asmlinkage long | ||
1643 | sys32_sysctl (struct sysctl32 __user *args) | ||
1644 | { | ||
1645 | struct sysctl32 a32; | ||
1646 | mm_segment_t old_fs = get_fs (); | ||
1647 | void __user *oldvalp, *newvalp; | ||
1648 | size_t oldlen; | ||
1649 | int __user *namep; | ||
1650 | long ret; | ||
1651 | |||
1652 | if (copy_from_user(&a32, args, sizeof(a32))) | ||
1653 | return -EFAULT; | ||
1654 | |||
1655 | /* | ||
1656 | * We need to pre-validate these because we have to disable address checking | ||
1657 | * before calling do_sysctl() because of OLDLEN but we can't run the risk of the | ||
1658 | * user specifying bad addresses here. Well, since we're dealing with 32 bit | ||
1659 | * addresses, we KNOW that access_ok() will always succeed, so this is an | ||
1660 | * expensive NOP, but so what... | ||
1661 | */ | ||
1662 | namep = (int __user *) compat_ptr(a32.name); | ||
1663 | oldvalp = compat_ptr(a32.oldval); | ||
1664 | newvalp = compat_ptr(a32.newval); | ||
1665 | |||
1666 | if ((oldvalp && get_user(oldlen, (int __user *) compat_ptr(a32.oldlenp))) | ||
1667 | || !access_ok(VERIFY_WRITE, namep, 0) | ||
1668 | || !access_ok(VERIFY_WRITE, oldvalp, 0) | ||
1669 | || !access_ok(VERIFY_WRITE, newvalp, 0)) | ||
1670 | return -EFAULT; | ||
1671 | |||
1672 | set_fs(KERNEL_DS); | ||
1673 | lock_kernel(); | ||
1674 | ret = do_sysctl(namep, a32.nlen, oldvalp, (size_t __user *) &oldlen, | ||
1675 | newvalp, (size_t) a32.newlen); | ||
1676 | unlock_kernel(); | ||
1677 | set_fs(old_fs); | ||
1678 | |||
1679 | if (oldvalp && put_user (oldlen, (int __user *) compat_ptr(a32.oldlenp))) | ||
1680 | return -EFAULT; | ||
1681 | |||
1682 | return ret; | ||
1683 | } | ||
1684 | #endif | ||
1685 | |||
1686 | asmlinkage long | 1634 | asmlinkage long |
1687 | sys32_newuname (struct new_utsname __user *name) | 1635 | sys32_newuname (struct new_utsname __user *name) |
1688 | { | 1636 | { |
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h index 57a2787bc9fb..6ebc229a1c51 100644 --- a/arch/ia64/include/asm/bitops.h +++ b/arch/ia64/include/asm/bitops.h | |||
@@ -127,7 +127,7 @@ clear_bit_unlock (int nr, volatile void *addr) | |||
127 | * @addr: Address to start counting from | 127 | * @addr: Address to start counting from |
128 | * | 128 | * |
129 | * Similarly to clear_bit_unlock, the implementation uses a store | 129 | * Similarly to clear_bit_unlock, the implementation uses a store |
130 | * with release semantics. See also __raw_spin_unlock(). | 130 | * with release semantics. See also arch_spin_unlock(). |
131 | */ | 131 | */ |
132 | static __inline__ void | 132 | static __inline__ void |
133 | __clear_bit_unlock(int nr, void *addr) | 133 | __clear_bit_unlock(int nr, void *addr) |
diff --git a/arch/ia64/include/asm/cacheflush.h b/arch/ia64/include/asm/cacheflush.h index c8ce2719fee8..429eefc93ee7 100644 --- a/arch/ia64/include/asm/cacheflush.h +++ b/arch/ia64/include/asm/cacheflush.h | |||
@@ -25,6 +25,7 @@ | |||
25 | #define flush_cache_vmap(start, end) do { } while (0) | 25 | #define flush_cache_vmap(start, end) do { } while (0) |
26 | #define flush_cache_vunmap(start, end) do { } while (0) | 26 | #define flush_cache_vunmap(start, end) do { } while (0) |
27 | 27 | ||
28 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 | ||
28 | #define flush_dcache_page(page) \ | 29 | #define flush_dcache_page(page) \ |
29 | do { \ | 30 | do { \ |
30 | clear_bit(PG_arch_1, &(page)->flags); \ | 31 | clear_bit(PG_arch_1, &(page)->flags); \ |
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 8d3c79cd81e7..7d09a09cdaad 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h | |||
@@ -73,7 +73,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | |||
73 | if (!dev->dma_mask) | 73 | if (!dev->dma_mask) |
74 | return 0; | 74 | return 0; |
75 | 75 | ||
76 | return addr + size <= *dev->dma_mask; | 76 | return addr + size - 1 <= *dev->dma_mask; |
77 | } | 77 | } |
78 | 78 | ||
79 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) | 79 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) |
diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h index 86eddee029cb..e14108b19c09 100644 --- a/arch/ia64/include/asm/elf.h +++ b/arch/ia64/include/asm/elf.h | |||
@@ -25,7 +25,6 @@ | |||
25 | #define ELF_DATA ELFDATA2LSB | 25 | #define ELF_DATA ELFDATA2LSB |
26 | #define ELF_ARCH EM_IA_64 | 26 | #define ELF_ARCH EM_IA_64 |
27 | 27 | ||
28 | #define USE_ELF_CORE_DUMP | ||
29 | #define CORE_DUMP_USE_REGSET | 28 | #define CORE_DUMP_USE_REGSET |
30 | 29 | ||
31 | /* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are | 30 | /* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are |
diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h index 18a7e49abbc5..bc90c75adf67 100644 --- a/arch/ia64/include/asm/kvm.h +++ b/arch/ia64/include/asm/kvm.h | |||
@@ -60,6 +60,7 @@ struct kvm_ioapic_state { | |||
60 | #define KVM_IRQCHIP_PIC_MASTER 0 | 60 | #define KVM_IRQCHIP_PIC_MASTER 0 |
61 | #define KVM_IRQCHIP_PIC_SLAVE 1 | 61 | #define KVM_IRQCHIP_PIC_SLAVE 1 |
62 | #define KVM_IRQCHIP_IOAPIC 2 | 62 | #define KVM_IRQCHIP_IOAPIC 2 |
63 | #define KVM_NR_IRQCHIPS 3 | ||
63 | 64 | ||
64 | #define KVM_CONTEXT_SIZE 8*1024 | 65 | #define KVM_CONTEXT_SIZE 8*1024 |
65 | 66 | ||
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index d9b6325a9328..a362e67e0ca6 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h | |||
@@ -475,7 +475,6 @@ struct kvm_arch { | |||
475 | struct list_head assigned_dev_head; | 475 | struct list_head assigned_dev_head; |
476 | struct iommu_domain *iommu_domain; | 476 | struct iommu_domain *iommu_domain; |
477 | int iommu_flags; | 477 | int iommu_flags; |
478 | struct hlist_head irq_ack_notifier_list; | ||
479 | 478 | ||
480 | unsigned long irq_sources_bitmap; | 479 | unsigned long irq_sources_bitmap; |
481 | unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; | 480 | unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; |
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h index 688a812c017d..61c7b1750b16 100644 --- a/arch/ia64/include/asm/meminit.h +++ b/arch/ia64/include/asm/meminit.h | |||
@@ -61,7 +61,7 @@ extern int register_active_ranges(u64 start, u64 len, int nid); | |||
61 | 61 | ||
62 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 62 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
63 | # define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */ | 63 | # define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */ |
64 | extern unsigned long vmalloc_end; | 64 | extern unsigned long VMALLOC_END; |
65 | extern struct page *vmem_map; | 65 | extern struct page *vmem_map; |
66 | extern int find_largest_hole(u64 start, u64 end, void *arg); | 66 | extern int find_largest_hole(u64 start, u64 end, void *arg); |
67 | extern int create_mem_map_page_table(u64 start, u64 end, void *arg); | 67 | extern int create_mem_map_page_table(u64 start, u64 end, void *arg); |
diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h index 3499ff57bf42..6a8a27cfae3e 100644 --- a/arch/ia64/include/asm/numa.h +++ b/arch/ia64/include/asm/numa.h | |||
@@ -22,8 +22,6 @@ | |||
22 | 22 | ||
23 | #include <asm/mmzone.h> | 23 | #include <asm/mmzone.h> |
24 | 24 | ||
25 | #define NUMA_NO_NODE -1 | ||
26 | |||
27 | extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; | 25 | extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; |
28 | extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; | 26 | extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; |
29 | extern pg_data_t *pgdat_list[MAX_NUMNODES]; | 27 | extern pg_data_t *pgdat_list[MAX_NUMNODES]; |
diff --git a/arch/ia64/include/asm/perfmon_default_smpl.h b/arch/ia64/include/asm/perfmon_default_smpl.h index 48822c0811d8..74724b24c2b7 100644 --- a/arch/ia64/include/asm/perfmon_default_smpl.h +++ b/arch/ia64/include/asm/perfmon_default_smpl.h | |||
@@ -67,7 +67,7 @@ typedef struct { | |||
67 | unsigned long ip; /* where did the overflow interrupt happened */ | 67 | unsigned long ip; /* where did the overflow interrupt happened */ |
68 | unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */ | 68 | unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */ |
69 | 69 | ||
70 | unsigned short cpu; /* cpu on which the overfow occured */ | 70 | unsigned short cpu; /* cpu on which the overflow occured */ |
71 | unsigned short set; /* event set active when overflow ocurred */ | 71 | unsigned short set; /* event set active when overflow ocurred */ |
72 | int tgid; /* thread group id (for NPTL, this is getpid()) */ | 72 | int tgid; /* thread group id (for NPTL, this is getpid()) */ |
73 | } pfm_default_smpl_entry_t; | 73 | } pfm_default_smpl_entry_t; |
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 8840a690d1e7..69bf13857a9f 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h | |||
@@ -228,8 +228,7 @@ ia64_phys_addr_valid (unsigned long addr) | |||
228 | #define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL) | 228 | #define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL) |
229 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 229 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
230 | # define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9))) | 230 | # define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9))) |
231 | # define VMALLOC_END vmalloc_end | 231 | extern unsigned long VMALLOC_END; |
232 | extern unsigned long vmalloc_end; | ||
233 | #else | 232 | #else |
234 | #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP) | 233 | #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP) |
235 | /* SPARSEMEM_VMEMMAP uses half of vmalloc... */ | 234 | /* SPARSEMEM_VMEMMAP uses half of vmalloc... */ |
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index 3eaeedf1aef2..7fa90f73f6be 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h | |||
@@ -229,7 +229,7 @@ struct cpuinfo_ia64 { | |||
229 | #endif | 229 | #endif |
230 | }; | 230 | }; |
231 | 231 | ||
232 | DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info); | 232 | DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info); |
233 | 233 | ||
234 | /* | 234 | /* |
235 | * The "local" data variable. It refers to the per-CPU data of the currently executing | 235 | * The "local" data variable. It refers to the per-CPU data of the currently executing |
@@ -237,8 +237,8 @@ DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info); | |||
237 | * Do not use the address of local_cpu_data, since it will be different from | 237 | * Do not use the address of local_cpu_data, since it will be different from |
238 | * cpu_data(smp_processor_id())! | 238 | * cpu_data(smp_processor_id())! |
239 | */ | 239 | */ |
240 | #define local_cpu_data (&__ia64_per_cpu_var(cpu_info)) | 240 | #define local_cpu_data (&__ia64_per_cpu_var(ia64_cpu_info)) |
241 | #define cpu_data(cpu) (&per_cpu(cpu_info, cpu)) | 241 | #define cpu_data(cpu) (&per_cpu(ia64_cpu_info, cpu)) |
242 | 242 | ||
243 | extern void print_cpu_info (struct cpuinfo_ia64 *); | 243 | extern void print_cpu_info (struct cpuinfo_ia64 *); |
244 | 244 | ||
diff --git a/arch/ia64/include/asm/sn/shubio.h b/arch/ia64/include/asm/sn/shubio.h index 22a6f18a5313..6052422a22b3 100644 --- a/arch/ia64/include/asm/sn/shubio.h +++ b/arch/ia64/include/asm/sn/shubio.h | |||
@@ -3289,7 +3289,7 @@ typedef ii_icrb0_e_u_t icrbe_t; | |||
3289 | #define IIO_IIDSR_LVL_SHIFT 0 | 3289 | #define IIO_IIDSR_LVL_SHIFT 0 |
3290 | #define IIO_IIDSR_LVL_MASK 0x000000ff | 3290 | #define IIO_IIDSR_LVL_MASK 0x000000ff |
3291 | 3291 | ||
3292 | /* Xtalk timeout threshhold register (IIO_IXTT) */ | 3292 | /* Xtalk timeout threshold register (IIO_IXTT) */ |
3293 | #define IXTT_RRSP_TO_SHFT 55 /* read response timeout */ | 3293 | #define IXTT_RRSP_TO_SHFT 55 /* read response timeout */ |
3294 | #define IXTT_RRSP_TO_MASK (0x1FULL << IXTT_RRSP_TO_SHFT) | 3294 | #define IXTT_RRSP_TO_MASK (0x1FULL << IXTT_RRSP_TO_SHFT) |
3295 | #define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */ | 3295 | #define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */ |
diff --git a/arch/ia64/include/asm/socket.h b/arch/ia64/include/asm/socket.h index 0b0d5ff062e5..51427eaa51ba 100644 --- a/arch/ia64/include/asm/socket.h +++ b/arch/ia64/include/asm/socket.h | |||
@@ -69,4 +69,6 @@ | |||
69 | #define SO_PROTOCOL 38 | 69 | #define SO_PROTOCOL 38 |
70 | #define SO_DOMAIN 39 | 70 | #define SO_DOMAIN 39 |
71 | 71 | ||
72 | #define SO_RXQ_OVFL 40 | ||
73 | |||
72 | #endif /* _ASM_IA64_SOCKET_H */ | 74 | #endif /* _ASM_IA64_SOCKET_H */ |
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 239ecdc9516d..1a91c9121d17 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <asm/intrinsics.h> | 17 | #include <asm/intrinsics.h> |
18 | #include <asm/system.h> | 18 | #include <asm/system.h> |
19 | 19 | ||
20 | #define __raw_spin_lock_init(x) ((x)->lock = 0) | 20 | #define arch_spin_lock_init(x) ((x)->lock = 0) |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * Ticket locks are conceptually two parts, one indicating the current head of | 23 | * Ticket locks are conceptually two parts, one indicating the current head of |
@@ -38,7 +38,7 @@ | |||
38 | #define TICKET_BITS 15 | 38 | #define TICKET_BITS 15 |
39 | #define TICKET_MASK ((1 << TICKET_BITS) - 1) | 39 | #define TICKET_MASK ((1 << TICKET_BITS) - 1) |
40 | 40 | ||
41 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | 41 | static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) |
42 | { | 42 | { |
43 | int *p = (int *)&lock->lock, ticket, serve; | 43 | int *p = (int *)&lock->lock, ticket, serve; |
44 | 44 | ||
@@ -58,7 +58,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | |||
58 | } | 58 | } |
59 | } | 59 | } |
60 | 60 | ||
61 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | 61 | static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) |
62 | { | 62 | { |
63 | int tmp = ACCESS_ONCE(lock->lock); | 63 | int tmp = ACCESS_ONCE(lock->lock); |
64 | 64 | ||
@@ -67,7 +67,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | |||
67 | return 0; | 67 | return 0; |
68 | } | 68 | } |
69 | 69 | ||
70 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | 70 | static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) |
71 | { | 71 | { |
72 | unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; | 72 | unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; |
73 | 73 | ||
@@ -75,7 +75,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | |||
75 | ACCESS_ONCE(*p) = (tmp + 2) & ~1; | 75 | ACCESS_ONCE(*p) = (tmp + 2) & ~1; |
76 | } | 76 | } |
77 | 77 | ||
78 | static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) | 78 | static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock) |
79 | { | 79 | { |
80 | int *p = (int *)&lock->lock, ticket; | 80 | int *p = (int *)&lock->lock, ticket; |
81 | 81 | ||
@@ -89,64 +89,64 @@ static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) | |||
89 | } | 89 | } |
90 | } | 90 | } |
91 | 91 | ||
92 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) | 92 | static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) |
93 | { | 93 | { |
94 | long tmp = ACCESS_ONCE(lock->lock); | 94 | long tmp = ACCESS_ONCE(lock->lock); |
95 | 95 | ||
96 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); | 96 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); |
97 | } | 97 | } |
98 | 98 | ||
99 | static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) | 99 | static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) |
100 | { | 100 | { |
101 | long tmp = ACCESS_ONCE(lock->lock); | 101 | long tmp = ACCESS_ONCE(lock->lock); |
102 | 102 | ||
103 | return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; | 103 | return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; |
104 | } | 104 | } |
105 | 105 | ||
106 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | 106 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
107 | { | 107 | { |
108 | return __ticket_spin_is_locked(lock); | 108 | return __ticket_spin_is_locked(lock); |
109 | } | 109 | } |
110 | 110 | ||
111 | static inline int __raw_spin_is_contended(raw_spinlock_t *lock) | 111 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
112 | { | 112 | { |
113 | return __ticket_spin_is_contended(lock); | 113 | return __ticket_spin_is_contended(lock); |
114 | } | 114 | } |
115 | #define __raw_spin_is_contended __raw_spin_is_contended | 115 | #define arch_spin_is_contended arch_spin_is_contended |
116 | 116 | ||
117 | static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) | 117 | static __always_inline void arch_spin_lock(arch_spinlock_t *lock) |
118 | { | 118 | { |
119 | __ticket_spin_lock(lock); | 119 | __ticket_spin_lock(lock); |
120 | } | 120 | } |
121 | 121 | ||
122 | static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) | 122 | static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) |
123 | { | 123 | { |
124 | return __ticket_spin_trylock(lock); | 124 | return __ticket_spin_trylock(lock); |
125 | } | 125 | } |
126 | 126 | ||
127 | static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) | 127 | static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) |
128 | { | 128 | { |
129 | __ticket_spin_unlock(lock); | 129 | __ticket_spin_unlock(lock); |
130 | } | 130 | } |
131 | 131 | ||
132 | static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, | 132 | static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, |
133 | unsigned long flags) | 133 | unsigned long flags) |
134 | { | 134 | { |
135 | __raw_spin_lock(lock); | 135 | arch_spin_lock(lock); |
136 | } | 136 | } |
137 | 137 | ||
138 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | 138 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
139 | { | 139 | { |
140 | __ticket_spin_unlock_wait(lock); | 140 | __ticket_spin_unlock_wait(lock); |
141 | } | 141 | } |
142 | 142 | ||
143 | #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) | 143 | #define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0) |
144 | #define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) | 144 | #define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0) |
145 | 145 | ||
146 | #ifdef ASM_SUPPORTED | 146 | #ifdef ASM_SUPPORTED |
147 | 147 | ||
148 | static __always_inline void | 148 | static __always_inline void |
149 | __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) | 149 | arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) |
150 | { | 150 | { |
151 | __asm__ __volatile__ ( | 151 | __asm__ __volatile__ ( |
152 | "tbit.nz p6, p0 = %1,%2\n" | 152 | "tbit.nz p6, p0 = %1,%2\n" |
@@ -169,15 +169,15 @@ __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) | |||
169 | : "p6", "p7", "r2", "memory"); | 169 | : "p6", "p7", "r2", "memory"); |
170 | } | 170 | } |
171 | 171 | ||
172 | #define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0) | 172 | #define arch_read_lock(lock) arch_read_lock_flags(lock, 0) |
173 | 173 | ||
174 | #else /* !ASM_SUPPORTED */ | 174 | #else /* !ASM_SUPPORTED */ |
175 | 175 | ||
176 | #define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) | 176 | #define arch_read_lock_flags(rw, flags) arch_read_lock(rw) |
177 | 177 | ||
178 | #define __raw_read_lock(rw) \ | 178 | #define arch_read_lock(rw) \ |
179 | do { \ | 179 | do { \ |
180 | raw_rwlock_t *__read_lock_ptr = (rw); \ | 180 | arch_rwlock_t *__read_lock_ptr = (rw); \ |
181 | \ | 181 | \ |
182 | while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ | 182 | while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ |
183 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ | 183 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ |
@@ -188,16 +188,16 @@ do { \ | |||
188 | 188 | ||
189 | #endif /* !ASM_SUPPORTED */ | 189 | #endif /* !ASM_SUPPORTED */ |
190 | 190 | ||
191 | #define __raw_read_unlock(rw) \ | 191 | #define arch_read_unlock(rw) \ |
192 | do { \ | 192 | do { \ |
193 | raw_rwlock_t *__read_lock_ptr = (rw); \ | 193 | arch_rwlock_t *__read_lock_ptr = (rw); \ |
194 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ | 194 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ |
195 | } while (0) | 195 | } while (0) |
196 | 196 | ||
197 | #ifdef ASM_SUPPORTED | 197 | #ifdef ASM_SUPPORTED |
198 | 198 | ||
199 | static __always_inline void | 199 | static __always_inline void |
200 | __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) | 200 | arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) |
201 | { | 201 | { |
202 | __asm__ __volatile__ ( | 202 | __asm__ __volatile__ ( |
203 | "tbit.nz p6, p0 = %1, %2\n" | 203 | "tbit.nz p6, p0 = %1, %2\n" |
@@ -221,9 +221,9 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) | |||
221 | : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); | 221 | : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); |
222 | } | 222 | } |
223 | 223 | ||
224 | #define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0) | 224 | #define arch_write_lock(rw) arch_write_lock_flags(rw, 0) |
225 | 225 | ||
226 | #define __raw_write_trylock(rw) \ | 226 | #define arch_write_trylock(rw) \ |
227 | ({ \ | 227 | ({ \ |
228 | register long result; \ | 228 | register long result; \ |
229 | \ | 229 | \ |
@@ -235,7 +235,7 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) | |||
235 | (result == 0); \ | 235 | (result == 0); \ |
236 | }) | 236 | }) |
237 | 237 | ||
238 | static inline void __raw_write_unlock(raw_rwlock_t *x) | 238 | static inline void arch_write_unlock(arch_rwlock_t *x) |
239 | { | 239 | { |
240 | u8 *y = (u8 *)x; | 240 | u8 *y = (u8 *)x; |
241 | barrier(); | 241 | barrier(); |
@@ -244,9 +244,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) | |||
244 | 244 | ||
245 | #else /* !ASM_SUPPORTED */ | 245 | #else /* !ASM_SUPPORTED */ |
246 | 246 | ||
247 | #define __raw_write_lock_flags(l, flags) __raw_write_lock(l) | 247 | #define arch_write_lock_flags(l, flags) arch_write_lock(l) |
248 | 248 | ||
249 | #define __raw_write_lock(l) \ | 249 | #define arch_write_lock(l) \ |
250 | ({ \ | 250 | ({ \ |
251 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ | 251 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ |
252 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ | 252 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ |
@@ -257,7 +257,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) | |||
257 | } while (ia64_val); \ | 257 | } while (ia64_val); \ |
258 | }) | 258 | }) |
259 | 259 | ||
260 | #define __raw_write_trylock(rw) \ | 260 | #define arch_write_trylock(rw) \ |
261 | ({ \ | 261 | ({ \ |
262 | __u64 ia64_val; \ | 262 | __u64 ia64_val; \ |
263 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ | 263 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ |
@@ -265,7 +265,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) | |||
265 | (ia64_val == 0); \ | 265 | (ia64_val == 0); \ |
266 | }) | 266 | }) |
267 | 267 | ||
268 | static inline void __raw_write_unlock(raw_rwlock_t *x) | 268 | static inline void arch_write_unlock(arch_rwlock_t *x) |
269 | { | 269 | { |
270 | barrier(); | 270 | barrier(); |
271 | x->write_lock = 0; | 271 | x->write_lock = 0; |
@@ -273,10 +273,10 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) | |||
273 | 273 | ||
274 | #endif /* !ASM_SUPPORTED */ | 274 | #endif /* !ASM_SUPPORTED */ |
275 | 275 | ||
276 | static inline int __raw_read_trylock(raw_rwlock_t *x) | 276 | static inline int arch_read_trylock(arch_rwlock_t *x) |
277 | { | 277 | { |
278 | union { | 278 | union { |
279 | raw_rwlock_t lock; | 279 | arch_rwlock_t lock; |
280 | __u32 word; | 280 | __u32 word; |
281 | } old, new; | 281 | } old, new; |
282 | old.lock = new.lock = *x; | 282 | old.lock = new.lock = *x; |
@@ -285,8 +285,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *x) | |||
285 | return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; | 285 | return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; |
286 | } | 286 | } |
287 | 287 | ||
288 | #define _raw_spin_relax(lock) cpu_relax() | 288 | #define arch_spin_relax(lock) cpu_relax() |
289 | #define _raw_read_relax(lock) cpu_relax() | 289 | #define arch_read_relax(lock) cpu_relax() |
290 | #define _raw_write_relax(lock) cpu_relax() | 290 | #define arch_write_relax(lock) cpu_relax() |
291 | 291 | ||
292 | #endif /* _ASM_IA64_SPINLOCK_H */ | 292 | #endif /* _ASM_IA64_SPINLOCK_H */ |
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h index 474e46f1ab4a..e2b42a52a6d3 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h | |||
@@ -7,15 +7,15 @@ | |||
7 | 7 | ||
8 | typedef struct { | 8 | typedef struct { |
9 | volatile unsigned int lock; | 9 | volatile unsigned int lock; |
10 | } raw_spinlock_t; | 10 | } arch_spinlock_t; |
11 | 11 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | 12 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } |
13 | 13 | ||
14 | typedef struct { | 14 | typedef struct { |
15 | volatile unsigned int read_counter : 31; | 15 | volatile unsigned int read_counter : 31; |
16 | volatile unsigned int write_lock : 1; | 16 | volatile unsigned int write_lock : 1; |
17 | } raw_rwlock_t; | 17 | } arch_rwlock_t; |
18 | 18 | ||
19 | #define __RAW_RW_LOCK_UNLOCKED { 0, 0 } | 19 | #define __ARCH_RW_LOCK_UNLOCKED { 0, 0 } |
20 | 20 | ||
21 | #endif | 21 | #endif |
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index 5a5347f5c4e4..10a8f21ca9e3 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h | |||
@@ -311,11 +311,12 @@ | |||
311 | #define __NR_preadv 1319 | 311 | #define __NR_preadv 1319 |
312 | #define __NR_pwritev 1320 | 312 | #define __NR_pwritev 1320 |
313 | #define __NR_rt_tgsigqueueinfo 1321 | 313 | #define __NR_rt_tgsigqueueinfo 1321 |
314 | #define __NR_recvmmsg 1322 | ||
314 | 315 | ||
315 | #ifdef __KERNEL__ | 316 | #ifdef __KERNEL__ |
316 | 317 | ||
317 | 318 | ||
318 | #define NR_syscalls 298 /* length of syscall table */ | 319 | #define NR_syscalls 299 /* length of syscall table */ |
319 | 320 | ||
320 | /* | 321 | /* |
321 | * The following defines stop scripts/checksyscalls.sh from complaining about | 322 | * The following defines stop scripts/checksyscalls.sh from complaining about |
diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h index 88afb54501e4..67455c2ed2b1 100644 --- a/arch/ia64/include/asm/xen/hypervisor.h +++ b/arch/ia64/include/asm/xen/hypervisor.h | |||
@@ -37,35 +37,9 @@ | |||
37 | #include <xen/interface/xen.h> | 37 | #include <xen/interface/xen.h> |
38 | #include <xen/interface/version.h> /* to compile feature.c */ | 38 | #include <xen/interface/version.h> /* to compile feature.c */ |
39 | #include <xen/features.h> /* to comiple xen-netfront.c */ | 39 | #include <xen/features.h> /* to comiple xen-netfront.c */ |
40 | #include <xen/xen.h> | ||
40 | #include <asm/xen/hypercall.h> | 41 | #include <asm/xen/hypercall.h> |
41 | 42 | ||
42 | /* xen_domain_type is set before executing any C code by early_xen_setup */ | ||
43 | enum xen_domain_type { | ||
44 | XEN_NATIVE, /* running on bare hardware */ | ||
45 | XEN_PV_DOMAIN, /* running in a PV domain */ | ||
46 | XEN_HVM_DOMAIN, /* running in a Xen hvm domain*/ | ||
47 | }; | ||
48 | |||
49 | #ifdef CONFIG_XEN | ||
50 | extern enum xen_domain_type xen_domain_type; | ||
51 | #else | ||
52 | #define xen_domain_type XEN_NATIVE | ||
53 | #endif | ||
54 | |||
55 | #define xen_domain() (xen_domain_type != XEN_NATIVE) | ||
56 | #define xen_pv_domain() (xen_domain() && \ | ||
57 | xen_domain_type == XEN_PV_DOMAIN) | ||
58 | #define xen_hvm_domain() (xen_domain() && \ | ||
59 | xen_domain_type == XEN_HVM_DOMAIN) | ||
60 | |||
61 | #ifdef CONFIG_XEN_DOM0 | ||
62 | #define xen_initial_domain() (xen_pv_domain() && \ | ||
63 | (xen_start_info->flags & SIF_INITDOMAIN)) | ||
64 | #else | ||
65 | #define xen_initial_domain() (0) | ||
66 | #endif | ||
67 | |||
68 | |||
69 | #ifdef CONFIG_XEN | 43 | #ifdef CONFIG_XEN |
70 | extern struct shared_info *HYPERVISOR_shared_info; | 44 | extern struct shared_info *HYPERVISOR_shared_info; |
71 | extern struct start_info *xen_start_info; | 45 | extern struct start_info *xen_start_info; |
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index baec6f00f7f3..40574ae11401 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -702,11 +702,23 @@ int __init early_acpi_boot_init(void) | |||
702 | printk(KERN_ERR PREFIX | 702 | printk(KERN_ERR PREFIX |
703 | "Error parsing MADT - no LAPIC entries\n"); | 703 | "Error parsing MADT - no LAPIC entries\n"); |
704 | 704 | ||
705 | #ifdef CONFIG_SMP | ||
706 | if (available_cpus == 0) { | ||
707 | printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); | ||
708 | printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id()); | ||
709 | smp_boot_data.cpu_phys_id[available_cpus] = | ||
710 | hard_smp_processor_id(); | ||
711 | available_cpus = 1; /* We've got at least one of these, no? */ | ||
712 | } | ||
713 | smp_boot_data.cpu_count = available_cpus; | ||
714 | #endif | ||
715 | /* Make boot-up look pretty */ | ||
716 | printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, | ||
717 | total_cpus); | ||
718 | |||
705 | return 0; | 719 | return 0; |
706 | } | 720 | } |
707 | 721 | ||
708 | |||
709 | |||
710 | int __init acpi_boot_init(void) | 722 | int __init acpi_boot_init(void) |
711 | { | 723 | { |
712 | 724 | ||
@@ -769,18 +781,8 @@ int __init acpi_boot_init(void) | |||
769 | if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt)) | 781 | if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt)) |
770 | printk(KERN_ERR PREFIX "Can't find FADT\n"); | 782 | printk(KERN_ERR PREFIX "Can't find FADT\n"); |
771 | 783 | ||
784 | #ifdef CONFIG_ACPI_NUMA | ||
772 | #ifdef CONFIG_SMP | 785 | #ifdef CONFIG_SMP |
773 | if (available_cpus == 0) { | ||
774 | printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); | ||
775 | printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id()); | ||
776 | smp_boot_data.cpu_phys_id[available_cpus] = | ||
777 | hard_smp_processor_id(); | ||
778 | available_cpus = 1; /* We've got at least one of these, no? */ | ||
779 | } | ||
780 | smp_boot_data.cpu_count = available_cpus; | ||
781 | |||
782 | smp_build_cpu_map(); | ||
783 | # ifdef CONFIG_ACPI_NUMA | ||
784 | if (srat_num_cpus == 0) { | 786 | if (srat_num_cpus == 0) { |
785 | int cpu, i = 1; | 787 | int cpu, i = 1; |
786 | for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) | 788 | for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) |
@@ -789,14 +791,9 @@ int __init acpi_boot_init(void) | |||
789 | node_cpuid[i++].phys_id = | 791 | node_cpuid[i++].phys_id = |
790 | smp_boot_data.cpu_phys_id[cpu]; | 792 | smp_boot_data.cpu_phys_id[cpu]; |
791 | } | 793 | } |
792 | # endif | ||
793 | #endif | 794 | #endif |
794 | #ifdef CONFIG_ACPI_NUMA | ||
795 | build_cpu_to_node_map(); | 795 | build_cpu_to_node_map(); |
796 | #endif | 796 | #endif |
797 | /* Make boot-up look pretty */ | ||
798 | printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, | ||
799 | total_cpus); | ||
800 | return 0; | 797 | return 0; |
801 | } | 798 | } |
802 | 799 | ||
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index 6631a9dfafdc..b942f4032d7a 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c | |||
@@ -239,32 +239,29 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) | |||
239 | #ifdef CONFIG_SYSCTL | 239 | #ifdef CONFIG_SYSCTL |
240 | static ctl_table kdump_ctl_table[] = { | 240 | static ctl_table kdump_ctl_table[] = { |
241 | { | 241 | { |
242 | .ctl_name = CTL_UNNUMBERED, | ||
243 | .procname = "kdump_on_init", | 242 | .procname = "kdump_on_init", |
244 | .data = &kdump_on_init, | 243 | .data = &kdump_on_init, |
245 | .maxlen = sizeof(int), | 244 | .maxlen = sizeof(int), |
246 | .mode = 0644, | 245 | .mode = 0644, |
247 | .proc_handler = &proc_dointvec, | 246 | .proc_handler = proc_dointvec, |
248 | }, | 247 | }, |
249 | { | 248 | { |
250 | .ctl_name = CTL_UNNUMBERED, | ||
251 | .procname = "kdump_on_fatal_mca", | 249 | .procname = "kdump_on_fatal_mca", |
252 | .data = &kdump_on_fatal_mca, | 250 | .data = &kdump_on_fatal_mca, |
253 | .maxlen = sizeof(int), | 251 | .maxlen = sizeof(int), |
254 | .mode = 0644, | 252 | .mode = 0644, |
255 | .proc_handler = &proc_dointvec, | 253 | .proc_handler = proc_dointvec, |
256 | }, | 254 | }, |
257 | { .ctl_name = 0 } | 255 | { } |
258 | }; | 256 | }; |
259 | 257 | ||
260 | static ctl_table sys_table[] = { | 258 | static ctl_table sys_table[] = { |
261 | { | 259 | { |
262 | .ctl_name = CTL_KERN, | ||
263 | .procname = "kernel", | 260 | .procname = "kernel", |
264 | .mode = 0555, | 261 | .mode = 0555, |
265 | .child = kdump_ctl_table, | 262 | .child = kdump_ctl_table, |
266 | }, | 263 | }, |
267 | { .ctl_name = 0 } | 264 | { } |
268 | }; | 265 | }; |
269 | #endif | 266 | #endif |
270 | 267 | ||
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index d0e7d37017b4..d75b872ca4dc 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -1806,6 +1806,7 @@ sys_call_table: | |||
1806 | data8 sys_preadv | 1806 | data8 sys_preadv |
1807 | data8 sys_pwritev // 1320 | 1807 | data8 sys_pwritev // 1320 |
1808 | data8 sys_rt_tgsigqueueinfo | 1808 | data8 sys_rt_tgsigqueueinfo |
1809 | data8 sys_recvmmsg | ||
1809 | 1810 | ||
1810 | .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls | 1811 | .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls |
1811 | #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ | 1812 | #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ |
diff --git a/arch/ia64/kernel/esi.c b/arch/ia64/kernel/esi.c index d5764a3d74af..b091111270cb 100644 --- a/arch/ia64/kernel/esi.c +++ b/arch/ia64/kernel/esi.c | |||
@@ -84,7 +84,7 @@ static int __init esi_init (void) | |||
84 | case ESI_DESC_ENTRY_POINT: | 84 | case ESI_DESC_ENTRY_POINT: |
85 | break; | 85 | break; |
86 | default: | 86 | default: |
87 | printk(KERN_WARNING "Unkown table type %d found in " | 87 | printk(KERN_WARNING "Unknown table type %d found in " |
88 | "ESI table, ignoring rest of table\n", *p); | 88 | "ESI table, ignoring rest of table\n", *p); |
89 | return -ENODEV; | 89 | return -ENODEV; |
90 | } | 90 | } |
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 696eff28a0c4..17a9fba38930 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S | |||
@@ -1051,7 +1051,7 @@ END(ia64_delay_loop) | |||
1051 | * intermediate precision so that we can produce a full 64-bit result. | 1051 | * intermediate precision so that we can produce a full 64-bit result. |
1052 | */ | 1052 | */ |
1053 | GLOBAL_ENTRY(ia64_native_sched_clock) | 1053 | GLOBAL_ENTRY(ia64_native_sched_clock) |
1054 | addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 | 1054 | addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 |
1055 | mov.m r9=ar.itc // fetch cycle-counter (35 cyc) | 1055 | mov.m r9=ar.itc // fetch cycle-counter (35 cyc) |
1056 | ;; | 1056 | ;; |
1057 | ldf8 f8=[r8] | 1057 | ldf8 f8=[r8] |
@@ -1077,7 +1077,7 @@ sched_clock = ia64_native_sched_clock | |||
1077 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 1077 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
1078 | GLOBAL_ENTRY(cycle_to_cputime) | 1078 | GLOBAL_ENTRY(cycle_to_cputime) |
1079 | alloc r16=ar.pfs,1,0,0,0 | 1079 | alloc r16=ar.pfs,1,0,0,0 |
1080 | addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 | 1080 | addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 |
1081 | ;; | 1081 | ;; |
1082 | ldf8 f8=[r8] | 1082 | ldf8 f8=[r8] |
1083 | ;; | 1083 | ;; |
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c index 14d39e300627..461b99902bf6 100644 --- a/arch/ia64/kernel/ia64_ksyms.c +++ b/arch/ia64/kernel/ia64_ksyms.c | |||
@@ -30,7 +30,7 @@ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic | |||
30 | #endif | 30 | #endif |
31 | 31 | ||
32 | #include <asm/processor.h> | 32 | #include <asm/processor.h> |
33 | EXPORT_SYMBOL(per_cpu__cpu_info); | 33 | EXPORT_SYMBOL(per_cpu__ia64_cpu_info); |
34 | #ifdef CONFIG_SMP | 34 | #ifdef CONFIG_SMP |
35 | EXPORT_SYMBOL(per_cpu__local_per_cpu_offset); | 35 | EXPORT_SYMBOL(per_cpu__local_per_cpu_offset); |
36 | #endif | 36 | #endif |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index dab4d393908c..95ac77aeae9b 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -793,12 +793,12 @@ iosapic_register_intr (unsigned int gsi, | |||
793 | goto unlock_iosapic_lock; | 793 | goto unlock_iosapic_lock; |
794 | } | 794 | } |
795 | 795 | ||
796 | spin_lock(&irq_desc[irq].lock); | 796 | raw_spin_lock(&irq_desc[irq].lock); |
797 | dest = get_target_cpu(gsi, irq); | 797 | dest = get_target_cpu(gsi, irq); |
798 | dmode = choose_dmode(); | 798 | dmode = choose_dmode(); |
799 | err = register_intr(gsi, irq, dmode, polarity, trigger); | 799 | err = register_intr(gsi, irq, dmode, polarity, trigger); |
800 | if (err < 0) { | 800 | if (err < 0) { |
801 | spin_unlock(&irq_desc[irq].lock); | 801 | raw_spin_unlock(&irq_desc[irq].lock); |
802 | irq = err; | 802 | irq = err; |
803 | goto unlock_iosapic_lock; | 803 | goto unlock_iosapic_lock; |
804 | } | 804 | } |
@@ -817,7 +817,7 @@ iosapic_register_intr (unsigned int gsi, | |||
817 | (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), | 817 | (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), |
818 | cpu_logical_id(dest), dest, irq_to_vector(irq)); | 818 | cpu_logical_id(dest), dest, irq_to_vector(irq)); |
819 | 819 | ||
820 | spin_unlock(&irq_desc[irq].lock); | 820 | raw_spin_unlock(&irq_desc[irq].lock); |
821 | unlock_iosapic_lock: | 821 | unlock_iosapic_lock: |
822 | spin_unlock_irqrestore(&iosapic_lock, flags); | 822 | spin_unlock_irqrestore(&iosapic_lock, flags); |
823 | return irq; | 823 | return irq; |
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 7d8951229e7c..94ee9d067cbd 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c | |||
@@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
71 | } | 71 | } |
72 | 72 | ||
73 | if (i < NR_IRQS) { | 73 | if (i < NR_IRQS) { |
74 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 74 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
75 | action = irq_desc[i].action; | 75 | action = irq_desc[i].action; |
76 | if (!action) | 76 | if (!action) |
77 | goto skip; | 77 | goto skip; |
@@ -91,7 +91,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
91 | 91 | ||
92 | seq_putc(p, '\n'); | 92 | seq_putc(p, '\n'); |
93 | skip: | 93 | skip: |
94 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 94 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
95 | } else if (i == NR_IRQS) | 95 | } else if (i == NR_IRQS) |
96 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | 96 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); |
97 | return 0; | 97 | return 0; |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index dd9d7b54f1a1..70e4bad23432 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
@@ -345,7 +345,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) | |||
345 | 345 | ||
346 | desc = irq_desc + irq; | 346 | desc = irq_desc + irq; |
347 | cfg = irq_cfg + irq; | 347 | cfg = irq_cfg + irq; |
348 | spin_lock(&desc->lock); | 348 | raw_spin_lock(&desc->lock); |
349 | if (!cfg->move_cleanup_count) | 349 | if (!cfg->move_cleanup_count) |
350 | goto unlock; | 350 | goto unlock; |
351 | 351 | ||
@@ -358,7 +358,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) | |||
358 | spin_unlock_irqrestore(&vector_lock, flags); | 358 | spin_unlock_irqrestore(&vector_lock, flags); |
359 | cfg->move_cleanup_count--; | 359 | cfg->move_cleanup_count--; |
360 | unlock: | 360 | unlock: |
361 | spin_unlock(&desc->lock); | 361 | raw_spin_unlock(&desc->lock); |
362 | } | 362 | } |
363 | return IRQ_HANDLED; | 363 | return IRQ_HANDLED; |
364 | } | 364 | } |
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index 7461d2573d41..d5bdf9de36b6 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S | |||
@@ -59,7 +59,7 @@ | |||
59 | ia64_do_tlb_purge: | 59 | ia64_do_tlb_purge: |
60 | #define O(member) IA64_CPUINFO_##member##_OFFSET | 60 | #define O(member) IA64_CPUINFO_##member##_OFFSET |
61 | 61 | ||
62 | GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 | 62 | GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2 |
63 | ;; | 63 | ;; |
64 | addl r17=O(PTCE_STRIDE),r2 | 64 | addl r17=O(PTCE_STRIDE),r2 |
65 | addl r2=O(PTCE_BASE),r2 | 65 | addl r2=O(PTCE_BASE),r2 |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index f1782705b1f7..599b233bef75 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -522,42 +522,37 @@ EXPORT_SYMBOL(pfm_sysctl); | |||
522 | 522 | ||
523 | static ctl_table pfm_ctl_table[]={ | 523 | static ctl_table pfm_ctl_table[]={ |
524 | { | 524 | { |
525 | .ctl_name = CTL_UNNUMBERED, | ||
526 | .procname = "debug", | 525 | .procname = "debug", |
527 | .data = &pfm_sysctl.debug, | 526 | .data = &pfm_sysctl.debug, |
528 | .maxlen = sizeof(int), | 527 | .maxlen = sizeof(int), |
529 | .mode = 0666, | 528 | .mode = 0666, |
530 | .proc_handler = &proc_dointvec, | 529 | .proc_handler = proc_dointvec, |
531 | }, | 530 | }, |
532 | { | 531 | { |
533 | .ctl_name = CTL_UNNUMBERED, | ||
534 | .procname = "debug_ovfl", | 532 | .procname = "debug_ovfl", |
535 | .data = &pfm_sysctl.debug_ovfl, | 533 | .data = &pfm_sysctl.debug_ovfl, |
536 | .maxlen = sizeof(int), | 534 | .maxlen = sizeof(int), |
537 | .mode = 0666, | 535 | .mode = 0666, |
538 | .proc_handler = &proc_dointvec, | 536 | .proc_handler = proc_dointvec, |
539 | }, | 537 | }, |
540 | { | 538 | { |
541 | .ctl_name = CTL_UNNUMBERED, | ||
542 | .procname = "fastctxsw", | 539 | .procname = "fastctxsw", |
543 | .data = &pfm_sysctl.fastctxsw, | 540 | .data = &pfm_sysctl.fastctxsw, |
544 | .maxlen = sizeof(int), | 541 | .maxlen = sizeof(int), |
545 | .mode = 0600, | 542 | .mode = 0600, |
546 | .proc_handler = &proc_dointvec, | 543 | .proc_handler = proc_dointvec, |
547 | }, | 544 | }, |
548 | { | 545 | { |
549 | .ctl_name = CTL_UNNUMBERED, | ||
550 | .procname = "expert_mode", | 546 | .procname = "expert_mode", |
551 | .data = &pfm_sysctl.expert_mode, | 547 | .data = &pfm_sysctl.expert_mode, |
552 | .maxlen = sizeof(int), | 548 | .maxlen = sizeof(int), |
553 | .mode = 0600, | 549 | .mode = 0600, |
554 | .proc_handler = &proc_dointvec, | 550 | .proc_handler = proc_dointvec, |
555 | }, | 551 | }, |
556 | {} | 552 | {} |
557 | }; | 553 | }; |
558 | static ctl_table pfm_sysctl_dir[] = { | 554 | static ctl_table pfm_sysctl_dir[] = { |
559 | { | 555 | { |
560 | .ctl_name = CTL_UNNUMBERED, | ||
561 | .procname = "perfmon", | 556 | .procname = "perfmon", |
562 | .mode = 0555, | 557 | .mode = 0555, |
563 | .child = pfm_ctl_table, | 558 | .child = pfm_ctl_table, |
@@ -566,7 +561,6 @@ static ctl_table pfm_sysctl_dir[] = { | |||
566 | }; | 561 | }; |
567 | static ctl_table pfm_sysctl_root[] = { | 562 | static ctl_table pfm_sysctl_root[] = { |
568 | { | 563 | { |
569 | .ctl_name = CTL_KERN, | ||
570 | .procname = "kernel", | 564 | .procname = "kernel", |
571 | .mode = 0555, | 565 | .mode = 0555, |
572 | .child = pfm_sysctl_dir, | 566 | .child = pfm_sysctl_dir, |
@@ -3523,7 +3517,7 @@ pfm_use_debug_registers(struct task_struct *task) | |||
3523 | * IA64_THREAD_DBG_VALID set. This indicates a task which was | 3517 | * IA64_THREAD_DBG_VALID set. This indicates a task which was |
3524 | * able to use the debug registers for debugging purposes via | 3518 | * able to use the debug registers for debugging purposes via |
3525 | * ptrace(). Therefore we know it was not using them for | 3519 | * ptrace(). Therefore we know it was not using them for |
3526 | * perfmormance monitoring, so we only decrement the number | 3520 | * performance monitoring, so we only decrement the number |
3527 | * of "ptraced" debug register users to keep the count up to date | 3521 | * of "ptraced" debug register users to keep the count up to date |
3528 | */ | 3522 | */ |
3529 | int | 3523 | int |
diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S index 32f6fc131fbe..c370e02f0061 100644 --- a/arch/ia64/kernel/relocate_kernel.S +++ b/arch/ia64/kernel/relocate_kernel.S | |||
@@ -61,7 +61,7 @@ GLOBAL_ENTRY(relocate_new_kernel) | |||
61 | 61 | ||
62 | // purge all TC entries | 62 | // purge all TC entries |
63 | #define O(member) IA64_CPUINFO_##member##_OFFSET | 63 | #define O(member) IA64_CPUINFO_##member##_OFFSET |
64 | GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 | 64 | GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2 |
65 | ;; | 65 | ;; |
66 | addl r17=O(PTCE_STRIDE),r2 | 66 | addl r17=O(PTCE_STRIDE),r2 |
67 | addl r2=O(PTCE_BASE),r2 | 67 | addl r2=O(PTCE_BASE),r2 |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 1de86c96801d..a1ea87919777 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -74,7 +74,7 @@ unsigned long __per_cpu_offset[NR_CPUS]; | |||
74 | EXPORT_SYMBOL(__per_cpu_offset); | 74 | EXPORT_SYMBOL(__per_cpu_offset); |
75 | #endif | 75 | #endif |
76 | 76 | ||
77 | DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); | 77 | DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info); |
78 | DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); | 78 | DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); |
79 | unsigned long ia64_cycles_per_usec; | 79 | unsigned long ia64_cycles_per_usec; |
80 | struct ia64_boot_param *ia64_boot_param; | 80 | struct ia64_boot_param *ia64_boot_param; |
@@ -566,19 +566,18 @@ setup_arch (char **cmdline_p) | |||
566 | early_acpi_boot_init(); | 566 | early_acpi_boot_init(); |
567 | # ifdef CONFIG_ACPI_NUMA | 567 | # ifdef CONFIG_ACPI_NUMA |
568 | acpi_numa_init(); | 568 | acpi_numa_init(); |
569 | #ifdef CONFIG_ACPI_HOTPLUG_CPU | 569 | # ifdef CONFIG_ACPI_HOTPLUG_CPU |
570 | prefill_possible_map(); | 570 | prefill_possible_map(); |
571 | #endif | 571 | # endif |
572 | per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? | 572 | per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? |
573 | 32 : cpus_weight(early_cpu_possible_map)), | 573 | 32 : cpus_weight(early_cpu_possible_map)), |
574 | additional_cpus > 0 ? additional_cpus : 0); | 574 | additional_cpus > 0 ? additional_cpus : 0); |
575 | # endif | 575 | # endif |
576 | #else | ||
577 | # ifdef CONFIG_SMP | ||
578 | smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */ | ||
579 | # endif | ||
580 | #endif /* CONFIG_APCI_BOOT */ | 576 | #endif /* CONFIG_APCI_BOOT */ |
581 | 577 | ||
578 | #ifdef CONFIG_SMP | ||
579 | smp_build_cpu_map(); | ||
580 | #endif | ||
582 | find_memory(); | 581 | find_memory(); |
583 | 582 | ||
584 | /* process SAL system table: */ | 583 | /* process SAL system table: */ |
@@ -856,18 +855,6 @@ identify_cpu (struct cpuinfo_ia64 *c) | |||
856 | } | 855 | } |
857 | 856 | ||
858 | /* | 857 | /* |
859 | * In UP configuration, setup_per_cpu_areas() is defined in | ||
860 | * include/linux/percpu.h | ||
861 | */ | ||
862 | #ifdef CONFIG_SMP | ||
863 | void __init | ||
864 | setup_per_cpu_areas (void) | ||
865 | { | ||
866 | /* start_kernel() requires this... */ | ||
867 | } | ||
868 | #endif | ||
869 | |||
870 | /* | ||
871 | * Do the following calculations: | 858 | * Do the following calculations: |
872 | * | 859 | * |
873 | * 1. the max. cache line size. | 860 | * 1. the max. cache line size. |
@@ -980,7 +967,7 @@ cpu_init (void) | |||
980 | * depends on the data returned by identify_cpu(). We break the dependency by | 967 | * depends on the data returned by identify_cpu(). We break the dependency by |
981 | * accessing cpu_data() through the canonical per-CPU address. | 968 | * accessing cpu_data() through the canonical per-CPU address. |
982 | */ | 969 | */ |
983 | cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start); | 970 | cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start); |
984 | identify_cpu(cpu_info); | 971 | identify_cpu(cpu_info); |
985 | 972 | ||
986 | #ifdef CONFIG_MCKINLEY | 973 | #ifdef CONFIG_MCKINLEY |
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index 92ed83f34036..609d50056a6c 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c | |||
@@ -100,51 +100,7 @@ sys_getpagesize (void) | |||
100 | asmlinkage unsigned long | 100 | asmlinkage unsigned long |
101 | ia64_brk (unsigned long brk) | 101 | ia64_brk (unsigned long brk) |
102 | { | 102 | { |
103 | unsigned long rlim, retval, newbrk, oldbrk; | 103 | unsigned long retval = sys_brk(brk); |
104 | struct mm_struct *mm = current->mm; | ||
105 | |||
106 | /* | ||
107 | * Most of this replicates the code in sys_brk() except for an additional safety | ||
108 | * check and the clearing of r8. However, we can't call sys_brk() because we need | ||
109 | * to acquire the mmap_sem before we can do the test... | ||
110 | */ | ||
111 | down_write(&mm->mmap_sem); | ||
112 | |||
113 | if (brk < mm->end_code) | ||
114 | goto out; | ||
115 | newbrk = PAGE_ALIGN(brk); | ||
116 | oldbrk = PAGE_ALIGN(mm->brk); | ||
117 | if (oldbrk == newbrk) | ||
118 | goto set_brk; | ||
119 | |||
120 | /* Always allow shrinking brk. */ | ||
121 | if (brk <= mm->brk) { | ||
122 | if (!do_munmap(mm, newbrk, oldbrk-newbrk)) | ||
123 | goto set_brk; | ||
124 | goto out; | ||
125 | } | ||
126 | |||
127 | /* Check against unimplemented/unmapped addresses: */ | ||
128 | if ((newbrk - oldbrk) > RGN_MAP_LIMIT || REGION_OFFSET(newbrk) > RGN_MAP_LIMIT) | ||
129 | goto out; | ||
130 | |||
131 | /* Check against rlimit.. */ | ||
132 | rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; | ||
133 | if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim) | ||
134 | goto out; | ||
135 | |||
136 | /* Check against existing mmap mappings. */ | ||
137 | if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) | ||
138 | goto out; | ||
139 | |||
140 | /* Ok, looks good - let it rip. */ | ||
141 | if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) | ||
142 | goto out; | ||
143 | set_brk: | ||
144 | mm->brk = brk; | ||
145 | out: | ||
146 | retval = mm->brk; | ||
147 | up_write(&mm->mmap_sem); | ||
148 | force_successful_syscall_return(); | 104 | force_successful_syscall_return(); |
149 | return retval; | 105 | return retval; |
150 | } | 106 | } |
@@ -185,39 +141,6 @@ int ia64_mmap_check(unsigned long addr, unsigned long len, | |||
185 | return 0; | 141 | return 0; |
186 | } | 142 | } |
187 | 143 | ||
188 | static inline unsigned long | ||
189 | do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, unsigned long pgoff) | ||
190 | { | ||
191 | struct file *file = NULL; | ||
192 | |||
193 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
194 | if (!(flags & MAP_ANONYMOUS)) { | ||
195 | file = fget(fd); | ||
196 | if (!file) | ||
197 | return -EBADF; | ||
198 | |||
199 | if (!file->f_op || !file->f_op->mmap) { | ||
200 | addr = -ENODEV; | ||
201 | goto out; | ||
202 | } | ||
203 | } | ||
204 | |||
205 | /* Careful about overflows.. */ | ||
206 | len = PAGE_ALIGN(len); | ||
207 | if (!len || len > TASK_SIZE) { | ||
208 | addr = -EINVAL; | ||
209 | goto out; | ||
210 | } | ||
211 | |||
212 | down_write(¤t->mm->mmap_sem); | ||
213 | addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
214 | up_write(¤t->mm->mmap_sem); | ||
215 | |||
216 | out: if (file) | ||
217 | fput(file); | ||
218 | return addr; | ||
219 | } | ||
220 | |||
221 | /* | 144 | /* |
222 | * mmap2() is like mmap() except that the offset is expressed in units | 145 | * mmap2() is like mmap() except that the offset is expressed in units |
223 | * of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces | 146 | * of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces |
@@ -226,7 +149,7 @@ out: if (file) | |||
226 | asmlinkage unsigned long | 149 | asmlinkage unsigned long |
227 | sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff) | 150 | sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff) |
228 | { | 151 | { |
229 | addr = do_mmap2(addr, len, prot, flags, fd, pgoff); | 152 | addr = sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); |
230 | if (!IS_ERR((void *) addr)) | 153 | if (!IS_ERR((void *) addr)) |
231 | force_successful_syscall_return(); | 154 | force_successful_syscall_return(); |
232 | return addr; | 155 | return addr; |
@@ -238,7 +161,7 @@ sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, int fd, lo | |||
238 | if (offset_in_page(off) != 0) | 161 | if (offset_in_page(off) != 0) |
239 | return -EINVAL; | 162 | return -EINVAL; |
240 | 163 | ||
241 | addr = do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT); | 164 | addr = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); |
242 | if (!IS_ERR((void *) addr)) | 165 | if (!IS_ERR((void *) addr)) |
243 | force_successful_syscall_return(); | 166 | force_successful_syscall_return(); |
244 | return addr; | 167 | return addr; |
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 4990495d7531..a35c661e5e89 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
@@ -473,7 +473,7 @@ void update_vsyscall_tz(void) | |||
473 | { | 473 | { |
474 | } | 474 | } |
475 | 475 | ||
476 | void update_vsyscall(struct timespec *wall, struct clocksource *c) | 476 | void update_vsyscall(struct timespec *wall, struct clocksource *c, u32 mult) |
477 | { | 477 | { |
478 | unsigned long flags; | 478 | unsigned long flags; |
479 | 479 | ||
@@ -481,7 +481,7 @@ void update_vsyscall(struct timespec *wall, struct clocksource *c) | |||
481 | 481 | ||
482 | /* copy fsyscall clock data */ | 482 | /* copy fsyscall clock data */ |
483 | fsyscall_gtod_data.clk_mask = c->mask; | 483 | fsyscall_gtod_data.clk_mask = c->mask; |
484 | fsyscall_gtod_data.clk_mult = c->mult; | 484 | fsyscall_gtod_data.clk_mult = mult; |
485 | fsyscall_gtod_data.clk_shift = c->shift; | 485 | fsyscall_gtod_data.clk_shift = c->shift; |
486 | fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio; | 486 | fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio; |
487 | fsyscall_gtod_data.clk_cycle_last = c->cycle_last; | 487 | fsyscall_gtod_data.clk_cycle_last = c->cycle_last; |
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 0a0c77b2c988..1295ba327f6f 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S | |||
@@ -166,6 +166,12 @@ SECTIONS | |||
166 | } | 166 | } |
167 | #endif | 167 | #endif |
168 | 168 | ||
169 | #ifdef CONFIG_SMP | ||
170 | . = ALIGN(PERCPU_PAGE_SIZE); | ||
171 | __cpu0_per_cpu = .; | ||
172 | . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */ | ||
173 | #endif | ||
174 | |||
169 | . = ALIGN(PAGE_SIZE); | 175 | . = ALIGN(PAGE_SIZE); |
170 | __init_end = .; | 176 | __init_end = .; |
171 | 177 | ||
@@ -198,11 +204,6 @@ SECTIONS | |||
198 | data : { } :data | 204 | data : { } :data |
199 | .data : AT(ADDR(.data) - LOAD_OFFSET) | 205 | .data : AT(ADDR(.data) - LOAD_OFFSET) |
200 | { | 206 | { |
201 | #ifdef CONFIG_SMP | ||
202 | . = ALIGN(PERCPU_PAGE_SIZE); | ||
203 | __cpu0_per_cpu = .; | ||
204 | . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */ | ||
205 | #endif | ||
206 | INIT_TASK_DATA(PAGE_SIZE) | 207 | INIT_TASK_DATA(PAGE_SIZE) |
207 | CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES) | 208 | CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES) |
208 | READ_MOSTLY_DATA(SMP_CACHE_BYTES) | 209 | READ_MOSTLY_DATA(SMP_CACHE_BYTES) |
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile index 0bb99b732908..1089b3e918ac 100644 --- a/arch/ia64/kvm/Makefile +++ b/arch/ia64/kvm/Makefile | |||
@@ -49,7 +49,7 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ | |||
49 | EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ | 49 | EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ |
50 | 50 | ||
51 | common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ | 51 | common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ |
52 | coalesced_mmio.o irq_comm.o) | 52 | coalesced_mmio.o irq_comm.o assigned-dev.o) |
53 | 53 | ||
54 | ifeq ($(CONFIG_IOMMU_API),y) | 54 | ifeq ($(CONFIG_IOMMU_API),y) |
55 | common-objs += $(addprefix ../../../virt/kvm/, iommu.o) | 55 | common-objs += $(addprefix ../../../virt/kvm/, iommu.o) |
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 0ad09f05efa9..5fdeec5fddcf 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -124,7 +124,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler) | |||
124 | 124 | ||
125 | static DEFINE_SPINLOCK(vp_lock); | 125 | static DEFINE_SPINLOCK(vp_lock); |
126 | 126 | ||
127 | void kvm_arch_hardware_enable(void *garbage) | 127 | int kvm_arch_hardware_enable(void *garbage) |
128 | { | 128 | { |
129 | long status; | 129 | long status; |
130 | long tmp_base; | 130 | long tmp_base; |
@@ -137,7 +137,7 @@ void kvm_arch_hardware_enable(void *garbage) | |||
137 | slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); | 137 | slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); |
138 | local_irq_restore(saved_psr); | 138 | local_irq_restore(saved_psr); |
139 | if (slot < 0) | 139 | if (slot < 0) |
140 | return; | 140 | return -EINVAL; |
141 | 141 | ||
142 | spin_lock(&vp_lock); | 142 | spin_lock(&vp_lock); |
143 | status = ia64_pal_vp_init_env(kvm_vsa_base ? | 143 | status = ia64_pal_vp_init_env(kvm_vsa_base ? |
@@ -145,7 +145,7 @@ void kvm_arch_hardware_enable(void *garbage) | |||
145 | __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); | 145 | __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); |
146 | if (status != 0) { | 146 | if (status != 0) { |
147 | printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); | 147 | printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); |
148 | return ; | 148 | return -EINVAL; |
149 | } | 149 | } |
150 | 150 | ||
151 | if (!kvm_vsa_base) { | 151 | if (!kvm_vsa_base) { |
@@ -154,6 +154,8 @@ void kvm_arch_hardware_enable(void *garbage) | |||
154 | } | 154 | } |
155 | spin_unlock(&vp_lock); | 155 | spin_unlock(&vp_lock); |
156 | ia64_ptr_entry(0x3, slot); | 156 | ia64_ptr_entry(0x3, slot); |
157 | |||
158 | return 0; | ||
157 | } | 159 | } |
158 | 160 | ||
159 | void kvm_arch_hardware_disable(void *garbage) | 161 | void kvm_arch_hardware_disable(void *garbage) |
@@ -851,8 +853,7 @@ static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, | |||
851 | r = 0; | 853 | r = 0; |
852 | switch (chip->chip_id) { | 854 | switch (chip->chip_id) { |
853 | case KVM_IRQCHIP_IOAPIC: | 855 | case KVM_IRQCHIP_IOAPIC: |
854 | memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm), | 856 | r = kvm_get_ioapic(kvm, &chip->chip.ioapic); |
855 | sizeof(struct kvm_ioapic_state)); | ||
856 | break; | 857 | break; |
857 | default: | 858 | default: |
858 | r = -EINVAL; | 859 | r = -EINVAL; |
@@ -868,9 +869,7 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) | |||
868 | r = 0; | 869 | r = 0; |
869 | switch (chip->chip_id) { | 870 | switch (chip->chip_id) { |
870 | case KVM_IRQCHIP_IOAPIC: | 871 | case KVM_IRQCHIP_IOAPIC: |
871 | memcpy(ioapic_irqchip(kvm), | 872 | r = kvm_set_ioapic(kvm, &chip->chip.ioapic); |
872 | &chip->chip.ioapic, | ||
873 | sizeof(struct kvm_ioapic_state)); | ||
874 | break; | 873 | break; |
875 | default: | 874 | default: |
876 | r = -EINVAL; | 875 | r = -EINVAL; |
@@ -944,7 +943,7 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
944 | { | 943 | { |
945 | struct kvm *kvm = filp->private_data; | 944 | struct kvm *kvm = filp->private_data; |
946 | void __user *argp = (void __user *)arg; | 945 | void __user *argp = (void __user *)arg; |
947 | int r = -EINVAL; | 946 | int r = -ENOTTY; |
948 | 947 | ||
949 | switch (ioctl) { | 948 | switch (ioctl) { |
950 | case KVM_SET_MEMORY_REGION: { | 949 | case KVM_SET_MEMORY_REGION: { |
@@ -985,10 +984,8 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
985 | goto out; | 984 | goto out; |
986 | if (irqchip_in_kernel(kvm)) { | 985 | if (irqchip_in_kernel(kvm)) { |
987 | __s32 status; | 986 | __s32 status; |
988 | mutex_lock(&kvm->irq_lock); | ||
989 | status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, | 987 | status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, |
990 | irq_event.irq, irq_event.level); | 988 | irq_event.irq, irq_event.level); |
991 | mutex_unlock(&kvm->irq_lock); | ||
992 | if (ioctl == KVM_IRQ_LINE_STATUS) { | 989 | if (ioctl == KVM_IRQ_LINE_STATUS) { |
993 | irq_event.status = status; | 990 | irq_event.status = status; |
994 | if (copy_to_user(argp, &irq_event, | 991 | if (copy_to_user(argp, &irq_event, |
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 2f724d2bf299..54bf54059811 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c | |||
@@ -154,38 +154,99 @@ static void *cpu_data; | |||
154 | void * __cpuinit | 154 | void * __cpuinit |
155 | per_cpu_init (void) | 155 | per_cpu_init (void) |
156 | { | 156 | { |
157 | int cpu; | 157 | static bool first_time = true; |
158 | static int first_time=1; | 158 | void *cpu0_data = __cpu0_per_cpu; |
159 | unsigned int cpu; | ||
160 | |||
161 | if (!first_time) | ||
162 | goto skip; | ||
163 | first_time = false; | ||
159 | 164 | ||
160 | /* | 165 | /* |
161 | * get_free_pages() cannot be used before cpu_init() done. BSP | 166 | * get_free_pages() cannot be used before cpu_init() done. |
162 | * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls | 167 | * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs |
163 | * get_zeroed_page(). | 168 | * to avoid that AP calls get_zeroed_page(). |
164 | */ | 169 | */ |
165 | if (first_time) { | 170 | for_each_possible_cpu(cpu) { |
166 | void *cpu0_data = __cpu0_per_cpu; | 171 | void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start; |
167 | 172 | ||
168 | first_time=0; | 173 | memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start); |
174 | __per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start; | ||
175 | per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; | ||
169 | 176 | ||
170 | __per_cpu_offset[0] = (char *) cpu0_data - __per_cpu_start; | 177 | /* |
171 | per_cpu(local_per_cpu_offset, 0) = __per_cpu_offset[0]; | 178 | * percpu area for cpu0 is moved from the __init area |
179 | * which is setup by head.S and used till this point. | ||
180 | * Update ar.k3. This move is ensures that percpu | ||
181 | * area for cpu0 is on the correct node and its | ||
182 | * virtual address isn't insanely far from other | ||
183 | * percpu areas which is important for congruent | ||
184 | * percpu allocator. | ||
185 | */ | ||
186 | if (cpu == 0) | ||
187 | ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) - | ||
188 | (unsigned long)__per_cpu_start); | ||
172 | 189 | ||
173 | for (cpu = 1; cpu < NR_CPUS; cpu++) { | 190 | cpu_data += PERCPU_PAGE_SIZE; |
174 | memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start); | ||
175 | __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start; | ||
176 | cpu_data += PERCPU_PAGE_SIZE; | ||
177 | per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; | ||
178 | } | ||
179 | } | 191 | } |
192 | skip: | ||
180 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; | 193 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; |
181 | } | 194 | } |
182 | 195 | ||
183 | static inline void | 196 | static inline void |
184 | alloc_per_cpu_data(void) | 197 | alloc_per_cpu_data(void) |
185 | { | 198 | { |
186 | cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS-1, | 199 | cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(), |
187 | PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | 200 | PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); |
188 | } | 201 | } |
202 | |||
203 | /** | ||
204 | * setup_per_cpu_areas - setup percpu areas | ||
205 | * | ||
206 | * Arch code has already allocated and initialized percpu areas. All | ||
207 | * this function has to do is to teach the determined layout to the | ||
208 | * dynamic percpu allocator, which happens to be more complex than | ||
209 | * creating whole new ones using helpers. | ||
210 | */ | ||
211 | void __init | ||
212 | setup_per_cpu_areas(void) | ||
213 | { | ||
214 | struct pcpu_alloc_info *ai; | ||
215 | struct pcpu_group_info *gi; | ||
216 | unsigned int cpu; | ||
217 | ssize_t static_size, reserved_size, dyn_size; | ||
218 | int rc; | ||
219 | |||
220 | ai = pcpu_alloc_alloc_info(1, num_possible_cpus()); | ||
221 | if (!ai) | ||
222 | panic("failed to allocate pcpu_alloc_info"); | ||
223 | gi = &ai->groups[0]; | ||
224 | |||
225 | /* units are assigned consecutively to possible cpus */ | ||
226 | for_each_possible_cpu(cpu) | ||
227 | gi->cpu_map[gi->nr_units++] = cpu; | ||
228 | |||
229 | /* set parameters */ | ||
230 | static_size = __per_cpu_end - __per_cpu_start; | ||
231 | reserved_size = PERCPU_MODULE_RESERVE; | ||
232 | dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size; | ||
233 | if (dyn_size < 0) | ||
234 | panic("percpu area overflow static=%zd reserved=%zd\n", | ||
235 | static_size, reserved_size); | ||
236 | |||
237 | ai->static_size = static_size; | ||
238 | ai->reserved_size = reserved_size; | ||
239 | ai->dyn_size = dyn_size; | ||
240 | ai->unit_size = PERCPU_PAGE_SIZE; | ||
241 | ai->atom_size = PAGE_SIZE; | ||
242 | ai->alloc_size = PERCPU_PAGE_SIZE; | ||
243 | |||
244 | rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]); | ||
245 | if (rc) | ||
246 | panic("failed to setup percpu area (err=%d)", rc); | ||
247 | |||
248 | pcpu_free_alloc_info(ai); | ||
249 | } | ||
189 | #else | 250 | #else |
190 | #define alloc_per_cpu_data() do { } while (0) | 251 | #define alloc_per_cpu_data() do { } while (0) |
191 | #endif /* CONFIG_SMP */ | 252 | #endif /* CONFIG_SMP */ |
@@ -270,8 +331,8 @@ paging_init (void) | |||
270 | 331 | ||
271 | map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * | 332 | map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * |
272 | sizeof(struct page)); | 333 | sizeof(struct page)); |
273 | vmalloc_end -= map_size; | 334 | VMALLOC_END -= map_size; |
274 | vmem_map = (struct page *) vmalloc_end; | 335 | vmem_map = (struct page *) VMALLOC_END; |
275 | efi_memmap_walk(create_mem_map_page_table, NULL); | 336 | efi_memmap_walk(create_mem_map_page_table, NULL); |
276 | 337 | ||
277 | /* | 338 | /* |
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index d85ba98d9008..19c4b2195dce 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
@@ -143,22 +143,120 @@ static void *per_cpu_node_setup(void *cpu_data, int node) | |||
143 | int cpu; | 143 | int cpu; |
144 | 144 | ||
145 | for_each_possible_early_cpu(cpu) { | 145 | for_each_possible_early_cpu(cpu) { |
146 | if (cpu == 0) { | 146 | void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start; |
147 | void *cpu0_data = __cpu0_per_cpu; | 147 | |
148 | __per_cpu_offset[cpu] = (char*)cpu0_data - | 148 | if (node != node_cpuid[cpu].nid) |
149 | __per_cpu_start; | 149 | continue; |
150 | } else if (node == node_cpuid[cpu].nid) { | 150 | |
151 | memcpy(__va(cpu_data), __phys_per_cpu_start, | 151 | memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start); |
152 | __per_cpu_end - __per_cpu_start); | 152 | __per_cpu_offset[cpu] = (char *)__va(cpu_data) - |
153 | __per_cpu_offset[cpu] = (char*)__va(cpu_data) - | 153 | __per_cpu_start; |
154 | __per_cpu_start; | 154 | |
155 | cpu_data += PERCPU_PAGE_SIZE; | 155 | /* |
156 | } | 156 | * percpu area for cpu0 is moved from the __init area |
157 | * which is setup by head.S and used till this point. | ||
158 | * Update ar.k3. This move is ensures that percpu | ||
159 | * area for cpu0 is on the correct node and its | ||
160 | * virtual address isn't insanely far from other | ||
161 | * percpu areas which is important for congruent | ||
162 | * percpu allocator. | ||
163 | */ | ||
164 | if (cpu == 0) | ||
165 | ia64_set_kr(IA64_KR_PER_CPU_DATA, | ||
166 | (unsigned long)cpu_data - | ||
167 | (unsigned long)__per_cpu_start); | ||
168 | |||
169 | cpu_data += PERCPU_PAGE_SIZE; | ||
157 | } | 170 | } |
158 | #endif | 171 | #endif |
159 | return cpu_data; | 172 | return cpu_data; |
160 | } | 173 | } |
161 | 174 | ||
175 | #ifdef CONFIG_SMP | ||
176 | /** | ||
177 | * setup_per_cpu_areas - setup percpu areas | ||
178 | * | ||
179 | * Arch code has already allocated and initialized percpu areas. All | ||
180 | * this function has to do is to teach the determined layout to the | ||
181 | * dynamic percpu allocator, which happens to be more complex than | ||
182 | * creating whole new ones using helpers. | ||
183 | */ | ||
184 | void __init setup_per_cpu_areas(void) | ||
185 | { | ||
186 | struct pcpu_alloc_info *ai; | ||
187 | struct pcpu_group_info *uninitialized_var(gi); | ||
188 | unsigned int *cpu_map; | ||
189 | void *base; | ||
190 | unsigned long base_offset; | ||
191 | unsigned int cpu; | ||
192 | ssize_t static_size, reserved_size, dyn_size; | ||
193 | int node, prev_node, unit, nr_units, rc; | ||
194 | |||
195 | ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids); | ||
196 | if (!ai) | ||
197 | panic("failed to allocate pcpu_alloc_info"); | ||
198 | cpu_map = ai->groups[0].cpu_map; | ||
199 | |||
200 | /* determine base */ | ||
201 | base = (void *)ULONG_MAX; | ||
202 | for_each_possible_cpu(cpu) | ||
203 | base = min(base, | ||
204 | (void *)(__per_cpu_offset[cpu] + __per_cpu_start)); | ||
205 | base_offset = (void *)__per_cpu_start - base; | ||
206 | |||
207 | /* build cpu_map, units are grouped by node */ | ||
208 | unit = 0; | ||
209 | for_each_node(node) | ||
210 | for_each_possible_cpu(cpu) | ||
211 | if (node == node_cpuid[cpu].nid) | ||
212 | cpu_map[unit++] = cpu; | ||
213 | nr_units = unit; | ||
214 | |||
215 | /* set basic parameters */ | ||
216 | static_size = __per_cpu_end - __per_cpu_start; | ||
217 | reserved_size = PERCPU_MODULE_RESERVE; | ||
218 | dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size; | ||
219 | if (dyn_size < 0) | ||
220 | panic("percpu area overflow static=%zd reserved=%zd\n", | ||
221 | static_size, reserved_size); | ||
222 | |||
223 | ai->static_size = static_size; | ||
224 | ai->reserved_size = reserved_size; | ||
225 | ai->dyn_size = dyn_size; | ||
226 | ai->unit_size = PERCPU_PAGE_SIZE; | ||
227 | ai->atom_size = PAGE_SIZE; | ||
228 | ai->alloc_size = PERCPU_PAGE_SIZE; | ||
229 | |||
230 | /* | ||
231 | * CPUs are put into groups according to node. Walk cpu_map | ||
232 | * and create new groups at node boundaries. | ||
233 | */ | ||
234 | prev_node = -1; | ||
235 | ai->nr_groups = 0; | ||
236 | for (unit = 0; unit < nr_units; unit++) { | ||
237 | cpu = cpu_map[unit]; | ||
238 | node = node_cpuid[cpu].nid; | ||
239 | |||
240 | if (node == prev_node) { | ||
241 | gi->nr_units++; | ||
242 | continue; | ||
243 | } | ||
244 | prev_node = node; | ||
245 | |||
246 | gi = &ai->groups[ai->nr_groups++]; | ||
247 | gi->nr_units = 1; | ||
248 | gi->base_offset = __per_cpu_offset[cpu] + base_offset; | ||
249 | gi->cpu_map = &cpu_map[unit]; | ||
250 | } | ||
251 | |||
252 | rc = pcpu_setup_first_chunk(ai, base); | ||
253 | if (rc) | ||
254 | panic("failed to setup percpu area (err=%d)", rc); | ||
255 | |||
256 | pcpu_free_alloc_info(ai); | ||
257 | } | ||
258 | #endif | ||
259 | |||
162 | /** | 260 | /** |
163 | * fill_pernode - initialize pernode data. | 261 | * fill_pernode - initialize pernode data. |
164 | * @node: the node id. | 262 | * @node: the node id. |
@@ -352,7 +450,8 @@ static void __init initialize_pernode_data(void) | |||
352 | /* Set the node_data pointer for each per-cpu struct */ | 450 | /* Set the node_data pointer for each per-cpu struct */ |
353 | for_each_possible_early_cpu(cpu) { | 451 | for_each_possible_early_cpu(cpu) { |
354 | node = node_cpuid[cpu].nid; | 452 | node = node_cpuid[cpu].nid; |
355 | per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data; | 453 | per_cpu(ia64_cpu_info, cpu).node_data = |
454 | mem_data[node].node_data; | ||
356 | } | 455 | } |
357 | #else | 456 | #else |
358 | { | 457 | { |
@@ -360,7 +459,7 @@ static void __init initialize_pernode_data(void) | |||
360 | cpu = 0; | 459 | cpu = 0; |
361 | node = node_cpuid[cpu].nid; | 460 | node = node_cpuid[cpu].nid; |
362 | cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start + | 461 | cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start + |
363 | ((char *)&per_cpu__cpu_info - __per_cpu_start)); | 462 | ((char *)&per_cpu__ia64_cpu_info - __per_cpu_start)); |
364 | cpu0_cpu_info->node_data = mem_data[node].node_data; | 463 | cpu0_cpu_info->node_data = mem_data[node].node_data; |
365 | } | 464 | } |
366 | #endif /* CONFIG_SMP */ | 465 | #endif /* CONFIG_SMP */ |
@@ -666,9 +765,9 @@ void __init paging_init(void) | |||
666 | sparse_init(); | 765 | sparse_init(); |
667 | 766 | ||
668 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 767 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
669 | vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * | 768 | VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * |
670 | sizeof(struct page)); | 769 | sizeof(struct page)); |
671 | vmem_map = (struct page *) vmalloc_end; | 770 | vmem_map = (struct page *) VMALLOC_END; |
672 | efi_memmap_walk(create_mem_map_page_table, NULL); | 771 | efi_memmap_walk(create_mem_map_page_table, NULL); |
673 | printk("Virtual mem_map starts at 0x%p\n", vmem_map); | 772 | printk("Virtual mem_map starts at 0x%p\n", vmem_map); |
674 | #endif | 773 | #endif |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 1857766a63c1..b9609c69343a 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -44,8 +44,8 @@ extern void ia64_tlb_init (void); | |||
44 | unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; | 44 | unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; |
45 | 45 | ||
46 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 46 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
47 | unsigned long vmalloc_end = VMALLOC_END_INIT; | 47 | unsigned long VMALLOC_END = VMALLOC_END_INIT; |
48 | EXPORT_SYMBOL(vmalloc_end); | 48 | EXPORT_SYMBOL(VMALLOC_END); |
49 | struct page *vmem_map; | 49 | struct page *vmem_map; |
50 | EXPORT_SYMBOL(vmem_map); | 50 | EXPORT_SYMBOL(vmem_map); |
51 | #endif | 51 | #endif |
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index c0fca2c1c858..df639db779f9 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c | |||
@@ -131,6 +131,7 @@ alloc_pci_controller (int seg) | |||
131 | } | 131 | } |
132 | 132 | ||
133 | struct pci_root_info { | 133 | struct pci_root_info { |
134 | struct acpi_device *bridge; | ||
134 | struct pci_controller *controller; | 135 | struct pci_controller *controller; |
135 | char *name; | 136 | char *name; |
136 | }; | 137 | }; |
@@ -297,9 +298,20 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data) | |||
297 | window->offset = offset; | 298 | window->offset = offset; |
298 | 299 | ||
299 | if (insert_resource(root, &window->resource)) { | 300 | if (insert_resource(root, &window->resource)) { |
300 | printk(KERN_ERR "alloc 0x%llx-0x%llx from %s for %s failed\n", | 301 | dev_err(&info->bridge->dev, |
301 | window->resource.start, window->resource.end, | 302 | "can't allocate host bridge window %pR\n", |
302 | root->name, info->name); | 303 | &window->resource); |
304 | } else { | ||
305 | if (offset) | ||
306 | dev_info(&info->bridge->dev, "host bridge window %pR " | ||
307 | "(PCI address [%#llx-%#llx])\n", | ||
308 | &window->resource, | ||
309 | window->resource.start - offset, | ||
310 | window->resource.end - offset); | ||
311 | else | ||
312 | dev_info(&info->bridge->dev, | ||
313 | "host bridge window %pR\n", | ||
314 | &window->resource); | ||
303 | } | 315 | } |
304 | 316 | ||
305 | return AE_OK; | 317 | return AE_OK; |
@@ -319,8 +331,9 @@ pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl) | |||
319 | (res->end - res->start < 16)) | 331 | (res->end - res->start < 16)) |
320 | continue; | 332 | continue; |
321 | if (j >= PCI_BUS_NUM_RESOURCES) { | 333 | if (j >= PCI_BUS_NUM_RESOURCES) { |
322 | printk("Ignoring range [%#llx-%#llx] (%lx)\n", | 334 | dev_warn(&bus->dev, |
323 | res->start, res->end, res->flags); | 335 | "ignoring host bridge window %pR (no space)\n", |
336 | res); | ||
324 | continue; | 337 | continue; |
325 | } | 338 | } |
326 | bus->resource[j++] = res; | 339 | bus->resource[j++] = res; |
@@ -364,6 +377,7 @@ pci_acpi_scan_root(struct acpi_device *device, int domain, int bus) | |||
364 | goto out3; | 377 | goto out3; |
365 | 378 | ||
366 | sprintf(name, "PCI Bus %04x:%02x", domain, bus); | 379 | sprintf(name, "PCI Bus %04x:%02x", domain, bus); |
380 | info.bridge = device; | ||
367 | info.controller = controller; | 381 | info.controller = controller; |
368 | info.name = name; | 382 | info.name = name; |
369 | acpi_walk_resources(device->handle, METHOD_NAME__CRS, | 383 | acpi_walk_resources(device->handle, METHOD_NAME__CRS, |
@@ -720,9 +734,6 @@ int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size) | |||
720 | return ret; | 734 | return ret; |
721 | } | 735 | } |
722 | 736 | ||
723 | /* It's defined in drivers/pci/pci.c */ | ||
724 | extern u8 pci_cache_line_size; | ||
725 | |||
726 | /** | 737 | /** |
727 | * set_pci_cacheline_size - determine cacheline size for PCI devices | 738 | * set_pci_cacheline_size - determine cacheline size for PCI devices |
728 | * | 739 | * |
@@ -731,7 +742,7 @@ extern u8 pci_cache_line_size; | |||
731 | * | 742 | * |
732 | * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info(). | 743 | * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info(). |
733 | */ | 744 | */ |
734 | static void __init set_pci_cacheline_size(void) | 745 | static void __init set_pci_dfl_cacheline_size(void) |
735 | { | 746 | { |
736 | unsigned long levels, unique_caches; | 747 | unsigned long levels, unique_caches; |
737 | long status; | 748 | long status; |
@@ -751,7 +762,7 @@ static void __init set_pci_cacheline_size(void) | |||
751 | "(status=%ld)\n", __func__, status); | 762 | "(status=%ld)\n", __func__, status); |
752 | return; | 763 | return; |
753 | } | 764 | } |
754 | pci_cache_line_size = (1 << cci.pcci_line_size) / 4; | 765 | pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4; |
755 | } | 766 | } |
756 | 767 | ||
757 | u64 ia64_dma_get_required_mask(struct device *dev) | 768 | u64 ia64_dma_get_required_mask(struct device *dev) |
@@ -782,7 +793,7 @@ EXPORT_SYMBOL_GPL(dma_get_required_mask); | |||
782 | 793 | ||
783 | static int __init pcibios_init(void) | 794 | static int __init pcibios_init(void) |
784 | { | 795 | { |
785 | set_pci_cacheline_size(); | 796 | set_pci_dfl_cacheline_size(); |
786 | return 0; | 797 | return 0; |
787 | } | 798 | } |
788 | 799 | ||
diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c index fd50ff94302b..66f633bff059 100644 --- a/arch/ia64/sn/kernel/io_acpi_init.c +++ b/arch/ia64/sn/kernel/io_acpi_init.c | |||
@@ -390,7 +390,7 @@ sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info, | |||
390 | pcidev_match.handle = NULL; | 390 | pcidev_match.handle = NULL; |
391 | 391 | ||
392 | acpi_walk_namespace(ACPI_TYPE_DEVICE, rootbus_handle, ACPI_UINT32_MAX, | 392 | acpi_walk_namespace(ACPI_TYPE_DEVICE, rootbus_handle, ACPI_UINT32_MAX, |
393 | find_matching_device, &pcidev_match, NULL); | 393 | find_matching_device, NULL, &pcidev_match, NULL); |
394 | 394 | ||
395 | if (!pcidev_match.handle) { | 395 | if (!pcidev_match.handle) { |
396 | printk(KERN_ERR | 396 | printk(KERN_ERR |
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c index 1176506b2bae..e884ba4e031d 100644 --- a/arch/ia64/sn/kernel/sn2/sn2_smp.c +++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c | |||
@@ -496,13 +496,13 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data) | |||
496 | seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, | 496 | seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, |
497 | stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, | 497 | stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, |
498 | stat->deadlocks, | 498 | stat->deadlocks, |
499 | 1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, | 499 | 1000 * stat->lock_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, |
500 | 1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, | 500 | 1000 * stat->shub_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, |
501 | 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec, | 501 | 1000 * stat->shub_itc_clocks_max / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, |
502 | stat->shub_ptc_flushes_not_my_mm, | 502 | stat->shub_ptc_flushes_not_my_mm, |
503 | stat->deadlocks2, | 503 | stat->deadlocks2, |
504 | stat->shub_ipi_flushes, | 504 | stat->shub_ipi_flushes, |
505 | 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec); | 505 | 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec); |
506 | } | 506 | } |
507 | return 0; | 507 | return 0; |
508 | } | 508 | } |
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index 35b2a27d2e77..efb454534e52 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
12 | #include <linux/bitmap.h> | ||
12 | #include <asm/sn/sn_sal.h> | 13 | #include <asm/sn/sn_sal.h> |
13 | #include <asm/sn/addrs.h> | 14 | #include <asm/sn/addrs.h> |
14 | #include <asm/sn/io.h> | 15 | #include <asm/sn/io.h> |
@@ -369,7 +370,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr) | |||
369 | static dma_addr_t | 370 | static dma_addr_t |
370 | tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size) | 371 | tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size) |
371 | { | 372 | { |
372 | int i, ps, ps_shift, entry, entries, mapsize, last_entry; | 373 | int ps, ps_shift, entry, entries, mapsize; |
373 | u64 xio_addr, end_xio_addr; | 374 | u64 xio_addr, end_xio_addr; |
374 | struct tioca_common *tioca_common; | 375 | struct tioca_common *tioca_common; |
375 | struct tioca_kernel *tioca_kern; | 376 | struct tioca_kernel *tioca_kern; |
@@ -410,23 +411,13 @@ tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size) | |||
410 | map = tioca_kern->ca_pcigart_pagemap; | 411 | map = tioca_kern->ca_pcigart_pagemap; |
411 | mapsize = tioca_kern->ca_pcigart_entries; | 412 | mapsize = tioca_kern->ca_pcigart_entries; |
412 | 413 | ||
413 | entry = find_first_zero_bit(map, mapsize); | 414 | entry = bitmap_find_next_zero_area(map, mapsize, 0, entries, 0); |
414 | while (entry < mapsize) { | 415 | if (entry >= mapsize) { |
415 | last_entry = find_next_bit(map, mapsize, entry); | ||
416 | |||
417 | if (last_entry - entry >= entries) | ||
418 | break; | ||
419 | |||
420 | entry = find_next_zero_bit(map, mapsize, last_entry); | ||
421 | } | ||
422 | |||
423 | if (entry > mapsize) { | ||
424 | kfree(ca_dmamap); | 416 | kfree(ca_dmamap); |
425 | goto map_return; | 417 | goto map_return; |
426 | } | 418 | } |
427 | 419 | ||
428 | for (i = 0; i < entries; i++) | 420 | bitmap_set(map, entry, entries); |
429 | set_bit(entry + i, map); | ||
430 | 421 | ||
431 | bus_addr = tioca_kern->ca_pciap_base + (entry * ps); | 422 | bus_addr = tioca_kern->ca_pciap_base + (entry * ps); |
432 | 423 | ||
diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c index f042e192d2fe..a3fb7cf9ae1d 100644 --- a/arch/ia64/xen/irq_xen.c +++ b/arch/ia64/xen/irq_xen.c | |||
@@ -63,19 +63,19 @@ xen_free_irq_vector(int vector) | |||
63 | } | 63 | } |
64 | 64 | ||
65 | 65 | ||
66 | static DEFINE_PER_CPU(int, timer_irq) = -1; | 66 | static DEFINE_PER_CPU(int, xen_timer_irq) = -1; |
67 | static DEFINE_PER_CPU(int, ipi_irq) = -1; | 67 | static DEFINE_PER_CPU(int, xen_ipi_irq) = -1; |
68 | static DEFINE_PER_CPU(int, resched_irq) = -1; | 68 | static DEFINE_PER_CPU(int, xen_resched_irq) = -1; |
69 | static DEFINE_PER_CPU(int, cmc_irq) = -1; | 69 | static DEFINE_PER_CPU(int, xen_cmc_irq) = -1; |
70 | static DEFINE_PER_CPU(int, cmcp_irq) = -1; | 70 | static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1; |
71 | static DEFINE_PER_CPU(int, cpep_irq) = -1; | 71 | static DEFINE_PER_CPU(int, xen_cpep_irq) = -1; |
72 | #define NAME_SIZE 15 | 72 | #define NAME_SIZE 15 |
73 | static DEFINE_PER_CPU(char[NAME_SIZE], timer_name); | 73 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name); |
74 | static DEFINE_PER_CPU(char[NAME_SIZE], ipi_name); | 74 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name); |
75 | static DEFINE_PER_CPU(char[NAME_SIZE], resched_name); | 75 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name); |
76 | static DEFINE_PER_CPU(char[NAME_SIZE], cmc_name); | 76 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name); |
77 | static DEFINE_PER_CPU(char[NAME_SIZE], cmcp_name); | 77 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name); |
78 | static DEFINE_PER_CPU(char[NAME_SIZE], cpep_name); | 78 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name); |
79 | #undef NAME_SIZE | 79 | #undef NAME_SIZE |
80 | 80 | ||
81 | struct saved_irq { | 81 | struct saved_irq { |
@@ -144,64 +144,64 @@ __xen_register_percpu_irq(unsigned int cpu, unsigned int vec, | |||
144 | if (xen_slab_ready) { | 144 | if (xen_slab_ready) { |
145 | switch (vec) { | 145 | switch (vec) { |
146 | case IA64_TIMER_VECTOR: | 146 | case IA64_TIMER_VECTOR: |
147 | snprintf(per_cpu(timer_name, cpu), | 147 | snprintf(per_cpu(xen_timer_name, cpu), |
148 | sizeof(per_cpu(timer_name, cpu)), | 148 | sizeof(per_cpu(xen_timer_name, cpu)), |
149 | "%s%d", action->name, cpu); | 149 | "%s%d", action->name, cpu); |
150 | irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu, | 150 | irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu, |
151 | action->handler, action->flags, | 151 | action->handler, action->flags, |
152 | per_cpu(timer_name, cpu), action->dev_id); | 152 | per_cpu(xen_timer_name, cpu), action->dev_id); |
153 | per_cpu(timer_irq, cpu) = irq; | 153 | per_cpu(xen_timer_irq, cpu) = irq; |
154 | break; | 154 | break; |
155 | case IA64_IPI_RESCHEDULE: | 155 | case IA64_IPI_RESCHEDULE: |
156 | snprintf(per_cpu(resched_name, cpu), | 156 | snprintf(per_cpu(xen_resched_name, cpu), |
157 | sizeof(per_cpu(resched_name, cpu)), | 157 | sizeof(per_cpu(xen_resched_name, cpu)), |
158 | "%s%d", action->name, cpu); | 158 | "%s%d", action->name, cpu); |
159 | irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, | 159 | irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, |
160 | action->handler, action->flags, | 160 | action->handler, action->flags, |
161 | per_cpu(resched_name, cpu), action->dev_id); | 161 | per_cpu(xen_resched_name, cpu), action->dev_id); |
162 | per_cpu(resched_irq, cpu) = irq; | 162 | per_cpu(xen_resched_irq, cpu) = irq; |
163 | break; | 163 | break; |
164 | case IA64_IPI_VECTOR: | 164 | case IA64_IPI_VECTOR: |
165 | snprintf(per_cpu(ipi_name, cpu), | 165 | snprintf(per_cpu(xen_ipi_name, cpu), |
166 | sizeof(per_cpu(ipi_name, cpu)), | 166 | sizeof(per_cpu(xen_ipi_name, cpu)), |
167 | "%s%d", action->name, cpu); | 167 | "%s%d", action->name, cpu); |
168 | irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu, | 168 | irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu, |
169 | action->handler, action->flags, | 169 | action->handler, action->flags, |
170 | per_cpu(ipi_name, cpu), action->dev_id); | 170 | per_cpu(xen_ipi_name, cpu), action->dev_id); |
171 | per_cpu(ipi_irq, cpu) = irq; | 171 | per_cpu(xen_ipi_irq, cpu) = irq; |
172 | break; | 172 | break; |
173 | case IA64_CMC_VECTOR: | 173 | case IA64_CMC_VECTOR: |
174 | snprintf(per_cpu(cmc_name, cpu), | 174 | snprintf(per_cpu(xen_cmc_name, cpu), |
175 | sizeof(per_cpu(cmc_name, cpu)), | 175 | sizeof(per_cpu(xen_cmc_name, cpu)), |
176 | "%s%d", action->name, cpu); | 176 | "%s%d", action->name, cpu); |
177 | irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu, | 177 | irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu, |
178 | action->handler, | 178 | action->handler, |
179 | action->flags, | 179 | action->flags, |
180 | per_cpu(cmc_name, cpu), | 180 | per_cpu(xen_cmc_name, cpu), |
181 | action->dev_id); | 181 | action->dev_id); |
182 | per_cpu(cmc_irq, cpu) = irq; | 182 | per_cpu(xen_cmc_irq, cpu) = irq; |
183 | break; | 183 | break; |
184 | case IA64_CMCP_VECTOR: | 184 | case IA64_CMCP_VECTOR: |
185 | snprintf(per_cpu(cmcp_name, cpu), | 185 | snprintf(per_cpu(xen_cmcp_name, cpu), |
186 | sizeof(per_cpu(cmcp_name, cpu)), | 186 | sizeof(per_cpu(xen_cmcp_name, cpu)), |
187 | "%s%d", action->name, cpu); | 187 | "%s%d", action->name, cpu); |
188 | irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu, | 188 | irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu, |
189 | action->handler, | 189 | action->handler, |
190 | action->flags, | 190 | action->flags, |
191 | per_cpu(cmcp_name, cpu), | 191 | per_cpu(xen_cmcp_name, cpu), |
192 | action->dev_id); | 192 | action->dev_id); |
193 | per_cpu(cmcp_irq, cpu) = irq; | 193 | per_cpu(xen_cmcp_irq, cpu) = irq; |
194 | break; | 194 | break; |
195 | case IA64_CPEP_VECTOR: | 195 | case IA64_CPEP_VECTOR: |
196 | snprintf(per_cpu(cpep_name, cpu), | 196 | snprintf(per_cpu(xen_cpep_name, cpu), |
197 | sizeof(per_cpu(cpep_name, cpu)), | 197 | sizeof(per_cpu(xen_cpep_name, cpu)), |
198 | "%s%d", action->name, cpu); | 198 | "%s%d", action->name, cpu); |
199 | irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu, | 199 | irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu, |
200 | action->handler, | 200 | action->handler, |
201 | action->flags, | 201 | action->flags, |
202 | per_cpu(cpep_name, cpu), | 202 | per_cpu(xen_cpep_name, cpu), |
203 | action->dev_id); | 203 | action->dev_id); |
204 | per_cpu(cpep_irq, cpu) = irq; | 204 | per_cpu(xen_cpep_irq, cpu) = irq; |
205 | break; | 205 | break; |
206 | case IA64_CPE_VECTOR: | 206 | case IA64_CPE_VECTOR: |
207 | case IA64_MCA_RENDEZ_VECTOR: | 207 | case IA64_MCA_RENDEZ_VECTOR: |
@@ -275,30 +275,33 @@ unbind_evtchn_callback(struct notifier_block *nfb, | |||
275 | 275 | ||
276 | if (action == CPU_DEAD) { | 276 | if (action == CPU_DEAD) { |
277 | /* Unregister evtchn. */ | 277 | /* Unregister evtchn. */ |
278 | if (per_cpu(cpep_irq, cpu) >= 0) { | 278 | if (per_cpu(xen_cpep_irq, cpu) >= 0) { |
279 | unbind_from_irqhandler(per_cpu(cpep_irq, cpu), NULL); | 279 | unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu), |
280 | per_cpu(cpep_irq, cpu) = -1; | 280 | NULL); |
281 | per_cpu(xen_cpep_irq, cpu) = -1; | ||
281 | } | 282 | } |
282 | if (per_cpu(cmcp_irq, cpu) >= 0) { | 283 | if (per_cpu(xen_cmcp_irq, cpu) >= 0) { |
283 | unbind_from_irqhandler(per_cpu(cmcp_irq, cpu), NULL); | 284 | unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu), |
284 | per_cpu(cmcp_irq, cpu) = -1; | 285 | NULL); |
286 | per_cpu(xen_cmcp_irq, cpu) = -1; | ||
285 | } | 287 | } |
286 | if (per_cpu(cmc_irq, cpu) >= 0) { | 288 | if (per_cpu(xen_cmc_irq, cpu) >= 0) { |
287 | unbind_from_irqhandler(per_cpu(cmc_irq, cpu), NULL); | 289 | unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL); |
288 | per_cpu(cmc_irq, cpu) = -1; | 290 | per_cpu(xen_cmc_irq, cpu) = -1; |
289 | } | 291 | } |
290 | if (per_cpu(ipi_irq, cpu) >= 0) { | 292 | if (per_cpu(xen_ipi_irq, cpu) >= 0) { |
291 | unbind_from_irqhandler(per_cpu(ipi_irq, cpu), NULL); | 293 | unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL); |
292 | per_cpu(ipi_irq, cpu) = -1; | 294 | per_cpu(xen_ipi_irq, cpu) = -1; |
293 | } | 295 | } |
294 | if (per_cpu(resched_irq, cpu) >= 0) { | 296 | if (per_cpu(xen_resched_irq, cpu) >= 0) { |
295 | unbind_from_irqhandler(per_cpu(resched_irq, cpu), | 297 | unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), |
296 | NULL); | 298 | NULL); |
297 | per_cpu(resched_irq, cpu) = -1; | 299 | per_cpu(xen_resched_irq, cpu) = -1; |
298 | } | 300 | } |
299 | if (per_cpu(timer_irq, cpu) >= 0) { | 301 | if (per_cpu(xen_timer_irq, cpu) >= 0) { |
300 | unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL); | 302 | unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu), |
301 | per_cpu(timer_irq, cpu) = -1; | 303 | NULL); |
304 | per_cpu(xen_timer_irq, cpu) = -1; | ||
302 | } | 305 | } |
303 | } | 306 | } |
304 | return NOTIFY_OK; | 307 | return NOTIFY_OK; |
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c index dbeadb9c8e20..c1c544513e8d 100644 --- a/arch/ia64/xen/time.c +++ b/arch/ia64/xen/time.c | |||
@@ -34,15 +34,15 @@ | |||
34 | 34 | ||
35 | #include "../kernel/fsyscall_gtod_data.h" | 35 | #include "../kernel/fsyscall_gtod_data.h" |
36 | 36 | ||
37 | DEFINE_PER_CPU(struct vcpu_runstate_info, runstate); | 37 | static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate); |
38 | DEFINE_PER_CPU(unsigned long, processed_stolen_time); | 38 | static DEFINE_PER_CPU(unsigned long, xen_stolen_time); |
39 | DEFINE_PER_CPU(unsigned long, processed_blocked_time); | 39 | static DEFINE_PER_CPU(unsigned long, xen_blocked_time); |
40 | 40 | ||
41 | /* taken from i386/kernel/time-xen.c */ | 41 | /* taken from i386/kernel/time-xen.c */ |
42 | static void xen_init_missing_ticks_accounting(int cpu) | 42 | static void xen_init_missing_ticks_accounting(int cpu) |
43 | { | 43 | { |
44 | struct vcpu_register_runstate_memory_area area; | 44 | struct vcpu_register_runstate_memory_area area; |
45 | struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu); | 45 | struct vcpu_runstate_info *runstate = &per_cpu(xen_runstate, cpu); |
46 | int rc; | 46 | int rc; |
47 | 47 | ||
48 | memset(runstate, 0, sizeof(*runstate)); | 48 | memset(runstate, 0, sizeof(*runstate)); |
@@ -52,8 +52,8 @@ static void xen_init_missing_ticks_accounting(int cpu) | |||
52 | &area); | 52 | &area); |
53 | WARN_ON(rc && rc != -ENOSYS); | 53 | WARN_ON(rc && rc != -ENOSYS); |
54 | 54 | ||
55 | per_cpu(processed_blocked_time, cpu) = runstate->time[RUNSTATE_blocked]; | 55 | per_cpu(xen_blocked_time, cpu) = runstate->time[RUNSTATE_blocked]; |
56 | per_cpu(processed_stolen_time, cpu) = runstate->time[RUNSTATE_runnable] | 56 | per_cpu(xen_stolen_time, cpu) = runstate->time[RUNSTATE_runnable] |
57 | + runstate->time[RUNSTATE_offline]; | 57 | + runstate->time[RUNSTATE_offline]; |
58 | } | 58 | } |
59 | 59 | ||
@@ -68,7 +68,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res) | |||
68 | 68 | ||
69 | BUG_ON(preemptible()); | 69 | BUG_ON(preemptible()); |
70 | 70 | ||
71 | state = &__get_cpu_var(runstate); | 71 | state = &__get_cpu_var(xen_runstate); |
72 | 72 | ||
73 | /* | 73 | /* |
74 | * The runstate info is always updated by the hypervisor on | 74 | * The runstate info is always updated by the hypervisor on |
@@ -103,12 +103,12 @@ consider_steal_time(unsigned long new_itm) | |||
103 | * This function just checks and reject this effect. | 103 | * This function just checks and reject this effect. |
104 | */ | 104 | */ |
105 | if (!time_after_eq(runstate.time[RUNSTATE_blocked], | 105 | if (!time_after_eq(runstate.time[RUNSTATE_blocked], |
106 | per_cpu(processed_blocked_time, cpu))) | 106 | per_cpu(xen_blocked_time, cpu))) |
107 | blocked = 0; | 107 | blocked = 0; |
108 | 108 | ||
109 | if (!time_after_eq(runstate.time[RUNSTATE_runnable] + | 109 | if (!time_after_eq(runstate.time[RUNSTATE_runnable] + |
110 | runstate.time[RUNSTATE_offline], | 110 | runstate.time[RUNSTATE_offline], |
111 | per_cpu(processed_stolen_time, cpu))) | 111 | per_cpu(xen_stolen_time, cpu))) |
112 | stolen = 0; | 112 | stolen = 0; |
113 | 113 | ||
114 | if (!time_after(delta_itm + new_itm, ia64_get_itc())) | 114 | if (!time_after(delta_itm + new_itm, ia64_get_itc())) |
@@ -147,8 +147,8 @@ consider_steal_time(unsigned long new_itm) | |||
147 | } else { | 147 | } else { |
148 | local_cpu_data->itm_next = delta_itm + new_itm; | 148 | local_cpu_data->itm_next = delta_itm + new_itm; |
149 | } | 149 | } |
150 | per_cpu(processed_stolen_time, cpu) += NS_PER_TICK * stolen; | 150 | per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen; |
151 | per_cpu(processed_blocked_time, cpu) += NS_PER_TICK * blocked; | 151 | per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked; |
152 | } | 152 | } |
153 | return delta_itm; | 153 | return delta_itm; |
154 | } | 154 | } |