aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/include
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-01-04 19:17:33 -0500
committerTejun Heo <tj@kernel.org>2010-01-04 19:17:33 -0500
commit32032df6c2f6c9c6b2ada2ce42322231824f70c2 (patch)
treeb1ce838a37044bb38dfc128e2116ca35630e629a /arch/ia64/include
parent22b737f4c75197372d64afc6ed1bccd58c00e549 (diff)
parentc5974b835a909ff15c3b7e6cf6789b5eb919f419 (diff)
Merge branch 'master' into percpu
Conflicts: arch/powerpc/platforms/pseries/hvCall.S include/linux/percpu.h
Diffstat (limited to 'arch/ia64/include')
-rw-r--r--arch/ia64/include/asm/acpi.h6
-rw-r--r--arch/ia64/include/asm/asm-offsets.h1
-rw-r--r--arch/ia64/include/asm/bitops.h2
-rw-r--r--arch/ia64/include/asm/cacheflush.h1
-rw-r--r--arch/ia64/include/asm/dma-mapping.h2
-rw-r--r--arch/ia64/include/asm/elf.h1
-rw-r--r--arch/ia64/include/asm/hw_irq.h6
-rw-r--r--arch/ia64/include/asm/io.h2
-rw-r--r--arch/ia64/include/asm/irq.h2
-rw-r--r--arch/ia64/include/asm/kvm.h1
-rw-r--r--arch/ia64/include/asm/kvm_host.h1
-rw-r--r--arch/ia64/include/asm/mca.h5
-rw-r--r--arch/ia64/include/asm/numa.h2
-rw-r--r--arch/ia64/include/asm/perfmon_default_smpl.h2
-rw-r--r--arch/ia64/include/asm/rwsem.h2
-rw-r--r--arch/ia64/include/asm/sn/shubio.h2
-rw-r--r--arch/ia64/include/asm/socket.h2
-rw-r--r--arch/ia64/include/asm/spinlock.h136
-rw-r--r--arch/ia64/include/asm/spinlock_types.h10
-rw-r--r--arch/ia64/include/asm/swiotlb.h2
-rw-r--r--arch/ia64/include/asm/unistd.h3
-rw-r--r--arch/ia64/include/asm/xen/hypervisor.h28
22 files changed, 116 insertions, 103 deletions
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index 91df9686a0da..7ae58892ba8d 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -132,6 +132,12 @@ extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
132extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; 132extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
133#endif 133#endif
134 134
135static inline bool arch_has_acpi_pdc(void) { return true; }
136static inline void arch_acpi_set_pdc_bits(u32 *buf)
137{
138 buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
139}
140
135#define acpi_unlazy_tlb(x) 141#define acpi_unlazy_tlb(x)
136 142
137#ifdef CONFIG_ACPI_NUMA 143#ifdef CONFIG_ACPI_NUMA
diff --git a/arch/ia64/include/asm/asm-offsets.h b/arch/ia64/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..d370ee36a182
--- /dev/null
+++ b/arch/ia64/include/asm/asm-offsets.h
@@ -0,0 +1 @@
#include <generated/asm-offsets.h>
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index 57a2787bc9fb..6ebc229a1c51 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -127,7 +127,7 @@ clear_bit_unlock (int nr, volatile void *addr)
127 * @addr: Address to start counting from 127 * @addr: Address to start counting from
128 * 128 *
129 * Similarly to clear_bit_unlock, the implementation uses a store 129 * Similarly to clear_bit_unlock, the implementation uses a store
130 * with release semantics. See also __raw_spin_unlock(). 130 * with release semantics. See also arch_spin_unlock().
131 */ 131 */
132static __inline__ void 132static __inline__ void
133__clear_bit_unlock(int nr, void *addr) 133__clear_bit_unlock(int nr, void *addr)
diff --git a/arch/ia64/include/asm/cacheflush.h b/arch/ia64/include/asm/cacheflush.h
index c8ce2719fee8..429eefc93ee7 100644
--- a/arch/ia64/include/asm/cacheflush.h
+++ b/arch/ia64/include/asm/cacheflush.h
@@ -25,6 +25,7 @@
25#define flush_cache_vmap(start, end) do { } while (0) 25#define flush_cache_vmap(start, end) do { } while (0)
26#define flush_cache_vunmap(start, end) do { } while (0) 26#define flush_cache_vunmap(start, end) do { } while (0)
27 27
28#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
28#define flush_dcache_page(page) \ 29#define flush_dcache_page(page) \
29do { \ 30do { \
30 clear_bit(PG_arch_1, &(page)->flags); \ 31 clear_bit(PG_arch_1, &(page)->flags); \
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 8d3c79cd81e7..7d09a09cdaad 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -73,7 +73,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
73 if (!dev->dma_mask) 73 if (!dev->dma_mask)
74 return 0; 74 return 0;
75 75
76 return addr + size <= *dev->dma_mask; 76 return addr + size - 1 <= *dev->dma_mask;
77} 77}
78 78
79static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 79static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
index 86eddee029cb..e14108b19c09 100644
--- a/arch/ia64/include/asm/elf.h
+++ b/arch/ia64/include/asm/elf.h
@@ -25,7 +25,6 @@
25#define ELF_DATA ELFDATA2LSB 25#define ELF_DATA ELFDATA2LSB
26#define ELF_ARCH EM_IA_64 26#define ELF_ARCH EM_IA_64
27 27
28#define USE_ELF_CORE_DUMP
29#define CORE_DUMP_USE_REGSET 28#define CORE_DUMP_USE_REGSET
30 29
31/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are 30/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are
diff --git a/arch/ia64/include/asm/hw_irq.h b/arch/ia64/include/asm/hw_irq.h
index 91619b31dbf5..bf2e37493e04 100644
--- a/arch/ia64/include/asm/hw_irq.h
+++ b/arch/ia64/include/asm/hw_irq.h
@@ -59,7 +59,13 @@ typedef u16 ia64_vector;
59extern int ia64_first_device_vector; 59extern int ia64_first_device_vector;
60extern int ia64_last_device_vector; 60extern int ia64_last_device_vector;
61 61
62#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_DIG))
63/* Reserve the lower priority vector than device vectors for "move IRQ" IPI */
64#define IA64_IRQ_MOVE_VECTOR 0x30 /* "move IRQ" IPI */
65#define IA64_DEF_FIRST_DEVICE_VECTOR 0x31
66#else
62#define IA64_DEF_FIRST_DEVICE_VECTOR 0x30 67#define IA64_DEF_FIRST_DEVICE_VECTOR 0x30
68#endif
63#define IA64_DEF_LAST_DEVICE_VECTOR 0xe7 69#define IA64_DEF_LAST_DEVICE_VECTOR 0xe7
64#define IA64_FIRST_DEVICE_VECTOR ia64_first_device_vector 70#define IA64_FIRST_DEVICE_VECTOR ia64_first_device_vector
65#define IA64_LAST_DEVICE_VECTOR ia64_last_device_vector 71#define IA64_LAST_DEVICE_VECTOR ia64_last_device_vector
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index 0d9d16e2d949..cc8335eb3110 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -424,6 +424,8 @@ __writeq (unsigned long val, volatile void __iomem *addr)
424extern void __iomem * ioremap(unsigned long offset, unsigned long size); 424extern void __iomem * ioremap(unsigned long offset, unsigned long size);
425extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); 425extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
426extern void iounmap (volatile void __iomem *addr); 426extern void iounmap (volatile void __iomem *addr);
427extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size);
428extern void early_iounmap (volatile void __iomem *addr, unsigned long size);
427 429
428/* 430/*
429 * String version of IO memory access ops: 431 * String version of IO memory access ops:
diff --git a/arch/ia64/include/asm/irq.h b/arch/ia64/include/asm/irq.h
index 5282546cdf82..91b920fd7d53 100644
--- a/arch/ia64/include/asm/irq.h
+++ b/arch/ia64/include/asm/irq.h
@@ -13,7 +13,7 @@
13 13
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/cpumask.h> 15#include <linux/cpumask.h>
16#include <asm-ia64/nr-irqs.h> 16#include <generated/nr-irqs.h>
17 17
18static __inline__ int 18static __inline__ int
19irq_canonicalize (int irq) 19irq_canonicalize (int irq)
diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h
index 18a7e49abbc5..bc90c75adf67 100644
--- a/arch/ia64/include/asm/kvm.h
+++ b/arch/ia64/include/asm/kvm.h
@@ -60,6 +60,7 @@ struct kvm_ioapic_state {
60#define KVM_IRQCHIP_PIC_MASTER 0 60#define KVM_IRQCHIP_PIC_MASTER 0
61#define KVM_IRQCHIP_PIC_SLAVE 1 61#define KVM_IRQCHIP_PIC_SLAVE 1
62#define KVM_IRQCHIP_IOAPIC 2 62#define KVM_IRQCHIP_IOAPIC 2
63#define KVM_NR_IRQCHIPS 3
63 64
64#define KVM_CONTEXT_SIZE 8*1024 65#define KVM_CONTEXT_SIZE 8*1024
65 66
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index d9b6325a9328..a362e67e0ca6 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -475,7 +475,6 @@ struct kvm_arch {
475 struct list_head assigned_dev_head; 475 struct list_head assigned_dev_head;
476 struct iommu_domain *iommu_domain; 476 struct iommu_domain *iommu_domain;
477 int iommu_flags; 477 int iommu_flags;
478 struct hlist_head irq_ack_notifier_list;
479 478
480 unsigned long irq_sources_bitmap; 479 unsigned long irq_sources_bitmap;
481 unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; 480 unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h
index c171cdf0a789..43f96ab18fa0 100644
--- a/arch/ia64/include/asm/mca.h
+++ b/arch/ia64/include/asm/mca.h
@@ -106,6 +106,11 @@ struct ia64_sal_os_state {
106 unsigned long os_status; /* OS status to SAL, enum below */ 106 unsigned long os_status; /* OS status to SAL, enum below */
107 unsigned long context; /* 0 if return to same context 107 unsigned long context; /* 0 if return to same context
108 1 if return to new context */ 108 1 if return to new context */
109
110 /* I-resources */
111 unsigned long iip;
112 unsigned long ipsr;
113 unsigned long ifs;
109}; 114};
110 115
111enum { 116enum {
diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h
index 3499ff57bf42..6a8a27cfae3e 100644
--- a/arch/ia64/include/asm/numa.h
+++ b/arch/ia64/include/asm/numa.h
@@ -22,8 +22,6 @@
22 22
23#include <asm/mmzone.h> 23#include <asm/mmzone.h>
24 24
25#define NUMA_NO_NODE -1
26
27extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; 25extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
28extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; 26extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
29extern pg_data_t *pgdat_list[MAX_NUMNODES]; 27extern pg_data_t *pgdat_list[MAX_NUMNODES];
diff --git a/arch/ia64/include/asm/perfmon_default_smpl.h b/arch/ia64/include/asm/perfmon_default_smpl.h
index 48822c0811d8..74724b24c2b7 100644
--- a/arch/ia64/include/asm/perfmon_default_smpl.h
+++ b/arch/ia64/include/asm/perfmon_default_smpl.h
@@ -67,7 +67,7 @@ typedef struct {
67 unsigned long ip; /* where did the overflow interrupt happened */ 67 unsigned long ip; /* where did the overflow interrupt happened */
68 unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */ 68 unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */
69 69
70 unsigned short cpu; /* cpu on which the overfow occured */ 70 unsigned short cpu; /* cpu on which the overflow occured */
71 unsigned short set; /* event set active when overflow ocurred */ 71 unsigned short set; /* event set active when overflow ocurred */
72 int tgid; /* thread group id (for NPTL, this is getpid()) */ 72 int tgid; /* thread group id (for NPTL, this is getpid()) */
73} pfm_default_smpl_entry_t; 73} pfm_default_smpl_entry_t;
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
index fbee74b15782..e8762688e8e3 100644
--- a/arch/ia64/include/asm/rwsem.h
+++ b/arch/ia64/include/asm/rwsem.h
@@ -47,7 +47,7 @@ struct rw_semaphore {
47#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 47#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
48 48
49#define __RWSEM_INITIALIZER(name) \ 49#define __RWSEM_INITIALIZER(name) \
50 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ 50 { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
51 LIST_HEAD_INIT((name).wait_list) } 51 LIST_HEAD_INIT((name).wait_list) }
52 52
53#define DECLARE_RWSEM(name) \ 53#define DECLARE_RWSEM(name) \
diff --git a/arch/ia64/include/asm/sn/shubio.h b/arch/ia64/include/asm/sn/shubio.h
index 22a6f18a5313..6052422a22b3 100644
--- a/arch/ia64/include/asm/sn/shubio.h
+++ b/arch/ia64/include/asm/sn/shubio.h
@@ -3289,7 +3289,7 @@ typedef ii_icrb0_e_u_t icrbe_t;
3289#define IIO_IIDSR_LVL_SHIFT 0 3289#define IIO_IIDSR_LVL_SHIFT 0
3290#define IIO_IIDSR_LVL_MASK 0x000000ff 3290#define IIO_IIDSR_LVL_MASK 0x000000ff
3291 3291
3292/* Xtalk timeout threshhold register (IIO_IXTT) */ 3292/* Xtalk timeout threshold register (IIO_IXTT) */
3293#define IXTT_RRSP_TO_SHFT 55 /* read response timeout */ 3293#define IXTT_RRSP_TO_SHFT 55 /* read response timeout */
3294#define IXTT_RRSP_TO_MASK (0x1FULL << IXTT_RRSP_TO_SHFT) 3294#define IXTT_RRSP_TO_MASK (0x1FULL << IXTT_RRSP_TO_SHFT)
3295#define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */ 3295#define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */
diff --git a/arch/ia64/include/asm/socket.h b/arch/ia64/include/asm/socket.h
index 0b0d5ff062e5..51427eaa51ba 100644
--- a/arch/ia64/include/asm/socket.h
+++ b/arch/ia64/include/asm/socket.h
@@ -69,4 +69,6 @@
69#define SO_PROTOCOL 38 69#define SO_PROTOCOL 38
70#define SO_DOMAIN 39 70#define SO_DOMAIN 39
71 71
72#define SO_RXQ_OVFL 40
73
72#endif /* _ASM_IA64_SOCKET_H */ 74#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 30bb930e1111..1a91c9121d17 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -17,7 +17,7 @@
17#include <asm/intrinsics.h> 17#include <asm/intrinsics.h>
18#include <asm/system.h> 18#include <asm/system.h>
19 19
20#define __raw_spin_lock_init(x) ((x)->lock = 0) 20#define arch_spin_lock_init(x) ((x)->lock = 0)
21 21
22/* 22/*
23 * Ticket locks are conceptually two parts, one indicating the current head of 23 * Ticket locks are conceptually two parts, one indicating the current head of
@@ -25,108 +25,128 @@
25 * by atomically noting the tail and incrementing it by one (thus adding 25 * by atomically noting the tail and incrementing it by one (thus adding
26 * ourself to the queue and noting our position), then waiting until the head 26 * ourself to the queue and noting our position), then waiting until the head
27 * becomes equal to the the initial value of the tail. 27 * becomes equal to the the initial value of the tail.
28 * The pad bits in the middle are used to prevent the next_ticket number
29 * overflowing into the now_serving number.
28 * 30 *
29 * 63 32 31 0 31 * 31 17 16 15 14 0
30 * +----------------------------------------------------+ 32 * +----------------------------------------------------+
31 * | next_ticket_number | now_serving | 33 * | now_serving | padding | next_ticket |
32 * +----------------------------------------------------+ 34 * +----------------------------------------------------+
33 */ 35 */
34 36
35#define TICKET_SHIFT 32 37#define TICKET_SHIFT 17
38#define TICKET_BITS 15
39#define TICKET_MASK ((1 << TICKET_BITS) - 1)
36 40
37static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) 41static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
38{ 42{
39 int *p = (int *)&lock->lock, turn, now_serving; 43 int *p = (int *)&lock->lock, ticket, serve;
40 44
41 now_serving = *p; 45 ticket = ia64_fetchadd(1, p, acq);
42 turn = ia64_fetchadd(1, p+1, acq);
43 46
44 if (turn == now_serving) 47 if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
45 return; 48 return;
46 49
47 do { 50 ia64_invala();
51
52 for (;;) {
53 asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory");
54
55 if (!(((serve >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
56 return;
48 cpu_relax(); 57 cpu_relax();
49 } while (ACCESS_ONCE(*p) != turn); 58 }
50} 59}
51 60
52static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) 61static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
53{ 62{
54 long tmp = ACCESS_ONCE(lock->lock), try; 63 int tmp = ACCESS_ONCE(lock->lock);
55
56 if (!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1))) {
57 try = tmp + (1L << TICKET_SHIFT);
58 64
59 return ia64_cmpxchg(acq, &lock->lock, tmp, try, sizeof (tmp)) == tmp; 65 if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
60 } 66 return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
61 return 0; 67 return 0;
62} 68}
63 69
64static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) 70static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
65{ 71{
66 int *p = (int *)&lock->lock; 72 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
67 73
68 (void)ia64_fetchadd(1, p, rel); 74 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
75 ACCESS_ONCE(*p) = (tmp + 2) & ~1;
69} 76}
70 77
71static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) 78static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
79{
80 int *p = (int *)&lock->lock, ticket;
81
82 ia64_invala();
83
84 for (;;) {
85 asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : "memory");
86 if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
87 return;
88 cpu_relax();
89 }
90}
91
92static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
72{ 93{
73 long tmp = ACCESS_ONCE(lock->lock); 94 long tmp = ACCESS_ONCE(lock->lock);
74 95
75 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1)); 96 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
76} 97}
77 98
78static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) 99static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
79{ 100{
80 long tmp = ACCESS_ONCE(lock->lock); 101 long tmp = ACCESS_ONCE(lock->lock);
81 102
82 return (((tmp >> TICKET_SHIFT) - tmp) & ((1L << TICKET_SHIFT) - 1)) > 1; 103 return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
83} 104}
84 105
85static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 106static inline int arch_spin_is_locked(arch_spinlock_t *lock)
86{ 107{
87 return __ticket_spin_is_locked(lock); 108 return __ticket_spin_is_locked(lock);
88} 109}
89 110
90static inline int __raw_spin_is_contended(raw_spinlock_t *lock) 111static inline int arch_spin_is_contended(arch_spinlock_t *lock)
91{ 112{
92 return __ticket_spin_is_contended(lock); 113 return __ticket_spin_is_contended(lock);
93} 114}
94#define __raw_spin_is_contended __raw_spin_is_contended 115#define arch_spin_is_contended arch_spin_is_contended
95 116
96static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) 117static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
97{ 118{
98 __ticket_spin_lock(lock); 119 __ticket_spin_lock(lock);
99} 120}
100 121
101static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) 122static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
102{ 123{
103 return __ticket_spin_trylock(lock); 124 return __ticket_spin_trylock(lock);
104} 125}
105 126
106static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) 127static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
107{ 128{
108 __ticket_spin_unlock(lock); 129 __ticket_spin_unlock(lock);
109} 130}
110 131
111static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, 132static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
112 unsigned long flags) 133 unsigned long flags)
113{ 134{
114 __raw_spin_lock(lock); 135 arch_spin_lock(lock);
115} 136}
116 137
117static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 138static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
118{ 139{
119 while (__raw_spin_is_locked(lock)) 140 __ticket_spin_unlock_wait(lock);
120 cpu_relax();
121} 141}
122 142
123#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) 143#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
124#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) 144#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)
125 145
126#ifdef ASM_SUPPORTED 146#ifdef ASM_SUPPORTED
127 147
128static __always_inline void 148static __always_inline void
129__raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) 149arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
130{ 150{
131 __asm__ __volatile__ ( 151 __asm__ __volatile__ (
132 "tbit.nz p6, p0 = %1,%2\n" 152 "tbit.nz p6, p0 = %1,%2\n"
@@ -149,15 +169,15 @@ __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
149 : "p6", "p7", "r2", "memory"); 169 : "p6", "p7", "r2", "memory");
150} 170}
151 171
152#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0) 172#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
153 173
154#else /* !ASM_SUPPORTED */ 174#else /* !ASM_SUPPORTED */
155 175
156#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) 176#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
157 177
158#define __raw_read_lock(rw) \ 178#define arch_read_lock(rw) \
159do { \ 179do { \
160 raw_rwlock_t *__read_lock_ptr = (rw); \ 180 arch_rwlock_t *__read_lock_ptr = (rw); \
161 \ 181 \
162 while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ 182 while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
163 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ 183 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
@@ -168,16 +188,16 @@ do { \
168 188
169#endif /* !ASM_SUPPORTED */ 189#endif /* !ASM_SUPPORTED */
170 190
171#define __raw_read_unlock(rw) \ 191#define arch_read_unlock(rw) \
172do { \ 192do { \
173 raw_rwlock_t *__read_lock_ptr = (rw); \ 193 arch_rwlock_t *__read_lock_ptr = (rw); \
174 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ 194 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
175} while (0) 195} while (0)
176 196
177#ifdef ASM_SUPPORTED 197#ifdef ASM_SUPPORTED
178 198
179static __always_inline void 199static __always_inline void
180__raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) 200arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
181{ 201{
182 __asm__ __volatile__ ( 202 __asm__ __volatile__ (
183 "tbit.nz p6, p0 = %1, %2\n" 203 "tbit.nz p6, p0 = %1, %2\n"
@@ -201,9 +221,9 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
201 : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); 221 : "ar.ccv", "p6", "p7", "r2", "r29", "memory");
202} 222}
203 223
204#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0) 224#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
205 225
206#define __raw_write_trylock(rw) \ 226#define arch_write_trylock(rw) \
207({ \ 227({ \
208 register long result; \ 228 register long result; \
209 \ 229 \
@@ -215,7 +235,7 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
215 (result == 0); \ 235 (result == 0); \
216}) 236})
217 237
218static inline void __raw_write_unlock(raw_rwlock_t *x) 238static inline void arch_write_unlock(arch_rwlock_t *x)
219{ 239{
220 u8 *y = (u8 *)x; 240 u8 *y = (u8 *)x;
221 barrier(); 241 barrier();
@@ -224,9 +244,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
224 244
225#else /* !ASM_SUPPORTED */ 245#else /* !ASM_SUPPORTED */
226 246
227#define __raw_write_lock_flags(l, flags) __raw_write_lock(l) 247#define arch_write_lock_flags(l, flags) arch_write_lock(l)
228 248
229#define __raw_write_lock(l) \ 249#define arch_write_lock(l) \
230({ \ 250({ \
231 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ 251 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
232 __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ 252 __u32 *ia64_write_lock_ptr = (__u32 *) (l); \
@@ -237,7 +257,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
237 } while (ia64_val); \ 257 } while (ia64_val); \
238}) 258})
239 259
240#define __raw_write_trylock(rw) \ 260#define arch_write_trylock(rw) \
241({ \ 261({ \
242 __u64 ia64_val; \ 262 __u64 ia64_val; \
243 __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ 263 __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
@@ -245,7 +265,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
245 (ia64_val == 0); \ 265 (ia64_val == 0); \
246}) 266})
247 267
248static inline void __raw_write_unlock(raw_rwlock_t *x) 268static inline void arch_write_unlock(arch_rwlock_t *x)
249{ 269{
250 barrier(); 270 barrier();
251 x->write_lock = 0; 271 x->write_lock = 0;
@@ -253,10 +273,10 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
253 273
254#endif /* !ASM_SUPPORTED */ 274#endif /* !ASM_SUPPORTED */
255 275
256static inline int __raw_read_trylock(raw_rwlock_t *x) 276static inline int arch_read_trylock(arch_rwlock_t *x)
257{ 277{
258 union { 278 union {
259 raw_rwlock_t lock; 279 arch_rwlock_t lock;
260 __u32 word; 280 __u32 word;
261 } old, new; 281 } old, new;
262 old.lock = new.lock = *x; 282 old.lock = new.lock = *x;
@@ -265,8 +285,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *x)
265 return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; 285 return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
266} 286}
267 287
268#define _raw_spin_relax(lock) cpu_relax() 288#define arch_spin_relax(lock) cpu_relax()
269#define _raw_read_relax(lock) cpu_relax() 289#define arch_read_relax(lock) cpu_relax()
270#define _raw_write_relax(lock) cpu_relax() 290#define arch_write_relax(lock) cpu_relax()
271 291
272#endif /* _ASM_IA64_SPINLOCK_H */ 292#endif /* _ASM_IA64_SPINLOCK_H */
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h
index b61d136d9bc2..e2b42a52a6d3 100644
--- a/arch/ia64/include/asm/spinlock_types.h
+++ b/arch/ia64/include/asm/spinlock_types.h
@@ -6,16 +6,16 @@
6#endif 6#endif
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned long lock; 9 volatile unsigned int lock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int read_counter : 31; 15 volatile unsigned int read_counter : 31;
16 volatile unsigned int write_lock : 1; 16 volatile unsigned int write_lock : 1;
17} raw_rwlock_t; 17} arch_rwlock_t;
18 18
19#define __RAW_RW_LOCK_UNLOCKED { 0, 0 } 19#define __ARCH_RW_LOCK_UNLOCKED { 0, 0 }
20 20
21#endif 21#endif
diff --git a/arch/ia64/include/asm/swiotlb.h b/arch/ia64/include/asm/swiotlb.h
index dcbaea7ce128..f0acde68aaea 100644
--- a/arch/ia64/include/asm/swiotlb.h
+++ b/arch/ia64/include/asm/swiotlb.h
@@ -4,8 +4,6 @@
4#include <linux/dma-mapping.h> 4#include <linux/dma-mapping.h>
5#include <linux/swiotlb.h> 5#include <linux/swiotlb.h>
6 6
7extern int swiotlb_force;
8
9#ifdef CONFIG_SWIOTLB 7#ifdef CONFIG_SWIOTLB
10extern int swiotlb; 8extern int swiotlb;
11extern void pci_swiotlb_init(void); 9extern void pci_swiotlb_init(void);
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index 5a5347f5c4e4..10a8f21ca9e3 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -311,11 +311,12 @@
311#define __NR_preadv 1319 311#define __NR_preadv 1319
312#define __NR_pwritev 1320 312#define __NR_pwritev 1320
313#define __NR_rt_tgsigqueueinfo 1321 313#define __NR_rt_tgsigqueueinfo 1321
314#define __NR_recvmmsg 1322
314 315
315#ifdef __KERNEL__ 316#ifdef __KERNEL__
316 317
317 318
318#define NR_syscalls 298 /* length of syscall table */ 319#define NR_syscalls 299 /* length of syscall table */
319 320
320/* 321/*
321 * The following defines stop scripts/checksyscalls.sh from complaining about 322 * The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h
index 88afb54501e4..67455c2ed2b1 100644
--- a/arch/ia64/include/asm/xen/hypervisor.h
+++ b/arch/ia64/include/asm/xen/hypervisor.h
@@ -37,35 +37,9 @@
37#include <xen/interface/xen.h> 37#include <xen/interface/xen.h>
38#include <xen/interface/version.h> /* to compile feature.c */ 38#include <xen/interface/version.h> /* to compile feature.c */
39#include <xen/features.h> /* to comiple xen-netfront.c */ 39#include <xen/features.h> /* to comiple xen-netfront.c */
40#include <xen/xen.h>
40#include <asm/xen/hypercall.h> 41#include <asm/xen/hypercall.h>
41 42
42/* xen_domain_type is set before executing any C code by early_xen_setup */
43enum xen_domain_type {
44 XEN_NATIVE, /* running on bare hardware */
45 XEN_PV_DOMAIN, /* running in a PV domain */
46 XEN_HVM_DOMAIN, /* running in a Xen hvm domain*/
47};
48
49#ifdef CONFIG_XEN
50extern enum xen_domain_type xen_domain_type;
51#else
52#define xen_domain_type XEN_NATIVE
53#endif
54
55#define xen_domain() (xen_domain_type != XEN_NATIVE)
56#define xen_pv_domain() (xen_domain() && \
57 xen_domain_type == XEN_PV_DOMAIN)
58#define xen_hvm_domain() (xen_domain() && \
59 xen_domain_type == XEN_HVM_DOMAIN)
60
61#ifdef CONFIG_XEN_DOM0
62#define xen_initial_domain() (xen_pv_domain() && \
63 (xen_start_info->flags & SIF_INITDOMAIN))
64#else
65#define xen_initial_domain() (0)
66#endif
67
68
69#ifdef CONFIG_XEN 43#ifdef CONFIG_XEN
70extern struct shared_info *HYPERVISOR_shared_info; 44extern struct shared_info *HYPERVISOR_shared_info;
71extern struct start_info *xen_start_info; 45extern struct start_info *xen_start_info;