diff options
Diffstat (limited to 'arch/ia64')
30 files changed, 197 insertions, 308 deletions
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 674a8374c6d9..f332e3fe4237 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c | |||
@@ -1381,7 +1381,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, | |||
1381 | #endif | 1381 | #endif |
1382 | 1382 | ||
1383 | /* | 1383 | /* |
1384 | ** Not virtually contigous. | 1384 | ** Not virtually contiguous. |
1385 | ** Terminate prev chunk. | 1385 | ** Terminate prev chunk. |
1386 | ** Start a new chunk. | 1386 | ** Start a new chunk. |
1387 | ** | 1387 | ** |
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S index af9405cd70e5..2fd7479aa216 100644 --- a/arch/ia64/ia32/ia32_entry.S +++ b/arch/ia64/ia32/ia32_entry.S | |||
@@ -79,7 +79,7 @@ GLOBAL_ENTRY(ia32_ret_from_clone) | |||
79 | (p6) br.cond.spnt .ia32_strace_check_retval | 79 | (p6) br.cond.spnt .ia32_strace_check_retval |
80 | ;; // prevent RAW on r8 | 80 | ;; // prevent RAW on r8 |
81 | END(ia32_ret_from_clone) | 81 | END(ia32_ret_from_clone) |
82 | // fall thrugh | 82 | // fall through |
83 | GLOBAL_ENTRY(ia32_ret_from_syscall) | 83 | GLOBAL_ENTRY(ia32_ret_from_syscall) |
84 | PT_REGS_UNWIND_INFO(0) | 84 | PT_REGS_UNWIND_INFO(0) |
85 | 85 | ||
@@ -327,7 +327,7 @@ ia32_syscall_table: | |||
327 | data8 compat_sys_writev | 327 | data8 compat_sys_writev |
328 | data8 sys_getsid | 328 | data8 sys_getsid |
329 | data8 sys_fdatasync | 329 | data8 sys_fdatasync |
330 | data8 sys32_sysctl | 330 | data8 compat_sys_sysctl |
331 | data8 sys_mlock /* 150 */ | 331 | data8 sys_mlock /* 150 */ |
332 | data8 sys_munlock | 332 | data8 sys_munlock |
333 | data8 sys_mlockall | 333 | data8 sys_mlockall |
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index 625ed8f76fce..045b746b9808 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c | |||
@@ -858,6 +858,9 @@ ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot | |||
858 | 858 | ||
859 | prot = get_prot32(prot); | 859 | prot = get_prot32(prot); |
860 | 860 | ||
861 | if (flags & MAP_HUGETLB) | ||
862 | return -ENOMEM; | ||
863 | |||
861 | #if PAGE_SHIFT > IA32_PAGE_SHIFT | 864 | #if PAGE_SHIFT > IA32_PAGE_SHIFT |
862 | mutex_lock(&ia32_mmap_mutex); | 865 | mutex_lock(&ia32_mmap_mutex); |
863 | { | 866 | { |
@@ -1628,61 +1631,6 @@ sys32_msync (unsigned int start, unsigned int len, int flags) | |||
1628 | return sys_msync(addr, len + (start - addr), flags); | 1631 | return sys_msync(addr, len + (start - addr), flags); |
1629 | } | 1632 | } |
1630 | 1633 | ||
1631 | struct sysctl32 { | ||
1632 | unsigned int name; | ||
1633 | int nlen; | ||
1634 | unsigned int oldval; | ||
1635 | unsigned int oldlenp; | ||
1636 | unsigned int newval; | ||
1637 | unsigned int newlen; | ||
1638 | unsigned int __unused[4]; | ||
1639 | }; | ||
1640 | |||
1641 | #ifdef CONFIG_SYSCTL_SYSCALL | ||
1642 | asmlinkage long | ||
1643 | sys32_sysctl (struct sysctl32 __user *args) | ||
1644 | { | ||
1645 | struct sysctl32 a32; | ||
1646 | mm_segment_t old_fs = get_fs (); | ||
1647 | void __user *oldvalp, *newvalp; | ||
1648 | size_t oldlen; | ||
1649 | int __user *namep; | ||
1650 | long ret; | ||
1651 | |||
1652 | if (copy_from_user(&a32, args, sizeof(a32))) | ||
1653 | return -EFAULT; | ||
1654 | |||
1655 | /* | ||
1656 | * We need to pre-validate these because we have to disable address checking | ||
1657 | * before calling do_sysctl() because of OLDLEN but we can't run the risk of the | ||
1658 | * user specifying bad addresses here. Well, since we're dealing with 32 bit | ||
1659 | * addresses, we KNOW that access_ok() will always succeed, so this is an | ||
1660 | * expensive NOP, but so what... | ||
1661 | */ | ||
1662 | namep = (int __user *) compat_ptr(a32.name); | ||
1663 | oldvalp = compat_ptr(a32.oldval); | ||
1664 | newvalp = compat_ptr(a32.newval); | ||
1665 | |||
1666 | if ((oldvalp && get_user(oldlen, (int __user *) compat_ptr(a32.oldlenp))) | ||
1667 | || !access_ok(VERIFY_WRITE, namep, 0) | ||
1668 | || !access_ok(VERIFY_WRITE, oldvalp, 0) | ||
1669 | || !access_ok(VERIFY_WRITE, newvalp, 0)) | ||
1670 | return -EFAULT; | ||
1671 | |||
1672 | set_fs(KERNEL_DS); | ||
1673 | lock_kernel(); | ||
1674 | ret = do_sysctl(namep, a32.nlen, oldvalp, (size_t __user *) &oldlen, | ||
1675 | newvalp, (size_t) a32.newlen); | ||
1676 | unlock_kernel(); | ||
1677 | set_fs(old_fs); | ||
1678 | |||
1679 | if (oldvalp && put_user (oldlen, (int __user *) compat_ptr(a32.oldlenp))) | ||
1680 | return -EFAULT; | ||
1681 | |||
1682 | return ret; | ||
1683 | } | ||
1684 | #endif | ||
1685 | |||
1686 | asmlinkage long | 1634 | asmlinkage long |
1687 | sys32_newuname (struct new_utsname __user *name) | 1635 | sys32_newuname (struct new_utsname __user *name) |
1688 | { | 1636 | { |
diff --git a/arch/ia64/include/asm/cacheflush.h b/arch/ia64/include/asm/cacheflush.h index c8ce2719fee8..429eefc93ee7 100644 --- a/arch/ia64/include/asm/cacheflush.h +++ b/arch/ia64/include/asm/cacheflush.h | |||
@@ -25,6 +25,7 @@ | |||
25 | #define flush_cache_vmap(start, end) do { } while (0) | 25 | #define flush_cache_vmap(start, end) do { } while (0) |
26 | #define flush_cache_vunmap(start, end) do { } while (0) | 26 | #define flush_cache_vunmap(start, end) do { } while (0) |
27 | 27 | ||
28 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 | ||
28 | #define flush_dcache_page(page) \ | 29 | #define flush_dcache_page(page) \ |
29 | do { \ | 30 | do { \ |
30 | clear_bit(PG_arch_1, &(page)->flags); \ | 31 | clear_bit(PG_arch_1, &(page)->flags); \ |
diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h index 18a7e49abbc5..bc90c75adf67 100644 --- a/arch/ia64/include/asm/kvm.h +++ b/arch/ia64/include/asm/kvm.h | |||
@@ -60,6 +60,7 @@ struct kvm_ioapic_state { | |||
60 | #define KVM_IRQCHIP_PIC_MASTER 0 | 60 | #define KVM_IRQCHIP_PIC_MASTER 0 |
61 | #define KVM_IRQCHIP_PIC_SLAVE 1 | 61 | #define KVM_IRQCHIP_PIC_SLAVE 1 |
62 | #define KVM_IRQCHIP_IOAPIC 2 | 62 | #define KVM_IRQCHIP_IOAPIC 2 |
63 | #define KVM_NR_IRQCHIPS 3 | ||
63 | 64 | ||
64 | #define KVM_CONTEXT_SIZE 8*1024 | 65 | #define KVM_CONTEXT_SIZE 8*1024 |
65 | 66 | ||
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index d9b6325a9328..a362e67e0ca6 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h | |||
@@ -475,7 +475,6 @@ struct kvm_arch { | |||
475 | struct list_head assigned_dev_head; | 475 | struct list_head assigned_dev_head; |
476 | struct iommu_domain *iommu_domain; | 476 | struct iommu_domain *iommu_domain; |
477 | int iommu_flags; | 477 | int iommu_flags; |
478 | struct hlist_head irq_ack_notifier_list; | ||
479 | 478 | ||
480 | unsigned long irq_sources_bitmap; | 479 | unsigned long irq_sources_bitmap; |
481 | unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; | 480 | unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; |
diff --git a/arch/ia64/include/asm/perfmon_default_smpl.h b/arch/ia64/include/asm/perfmon_default_smpl.h index 48822c0811d8..74724b24c2b7 100644 --- a/arch/ia64/include/asm/perfmon_default_smpl.h +++ b/arch/ia64/include/asm/perfmon_default_smpl.h | |||
@@ -67,7 +67,7 @@ typedef struct { | |||
67 | unsigned long ip; /* where did the overflow interrupt happened */ | 67 | unsigned long ip; /* where did the overflow interrupt happened */ |
68 | unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */ | 68 | unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */ |
69 | 69 | ||
70 | unsigned short cpu; /* cpu on which the overfow occured */ | 70 | unsigned short cpu; /* cpu on which the overflow occured */ |
71 | unsigned short set; /* event set active when overflow ocurred */ | 71 | unsigned short set; /* event set active when overflow ocurred */ |
72 | int tgid; /* thread group id (for NPTL, this is getpid()) */ | 72 | int tgid; /* thread group id (for NPTL, this is getpid()) */ |
73 | } pfm_default_smpl_entry_t; | 73 | } pfm_default_smpl_entry_t; |
diff --git a/arch/ia64/include/asm/sn/shubio.h b/arch/ia64/include/asm/sn/shubio.h index 22a6f18a5313..6052422a22b3 100644 --- a/arch/ia64/include/asm/sn/shubio.h +++ b/arch/ia64/include/asm/sn/shubio.h | |||
@@ -3289,7 +3289,7 @@ typedef ii_icrb0_e_u_t icrbe_t; | |||
3289 | #define IIO_IIDSR_LVL_SHIFT 0 | 3289 | #define IIO_IIDSR_LVL_SHIFT 0 |
3290 | #define IIO_IIDSR_LVL_MASK 0x000000ff | 3290 | #define IIO_IIDSR_LVL_MASK 0x000000ff |
3291 | 3291 | ||
3292 | /* Xtalk timeout threshhold register (IIO_IXTT) */ | 3292 | /* Xtalk timeout threshold register (IIO_IXTT) */ |
3293 | #define IXTT_RRSP_TO_SHFT 55 /* read response timeout */ | 3293 | #define IXTT_RRSP_TO_SHFT 55 /* read response timeout */ |
3294 | #define IXTT_RRSP_TO_MASK (0x1FULL << IXTT_RRSP_TO_SHFT) | 3294 | #define IXTT_RRSP_TO_MASK (0x1FULL << IXTT_RRSP_TO_SHFT) |
3295 | #define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */ | 3295 | #define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */ |
diff --git a/arch/ia64/include/asm/socket.h b/arch/ia64/include/asm/socket.h index 0b0d5ff062e5..51427eaa51ba 100644 --- a/arch/ia64/include/asm/socket.h +++ b/arch/ia64/include/asm/socket.h | |||
@@ -69,4 +69,6 @@ | |||
69 | #define SO_PROTOCOL 38 | 69 | #define SO_PROTOCOL 38 |
70 | #define SO_DOMAIN 39 | 70 | #define SO_DOMAIN 39 |
71 | 71 | ||
72 | #define SO_RXQ_OVFL 40 | ||
73 | |||
72 | #endif /* _ASM_IA64_SOCKET_H */ | 74 | #endif /* _ASM_IA64_SOCKET_H */ |
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 30bb930e1111..239ecdc9516d 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h | |||
@@ -25,61 +25,82 @@ | |||
25 | * by atomically noting the tail and incrementing it by one (thus adding | 25 | * by atomically noting the tail and incrementing it by one (thus adding |
26 | * ourself to the queue and noting our position), then waiting until the head | 26 | * ourself to the queue and noting our position), then waiting until the head |
27 | * becomes equal to the the initial value of the tail. | 27 | * becomes equal to the the initial value of the tail. |
28 | * The pad bits in the middle are used to prevent the next_ticket number | ||
29 | * overflowing into the now_serving number. | ||
28 | * | 30 | * |
29 | * 63 32 31 0 | 31 | * 31 17 16 15 14 0 |
30 | * +----------------------------------------------------+ | 32 | * +----------------------------------------------------+ |
31 | * | next_ticket_number | now_serving | | 33 | * | now_serving | padding | next_ticket | |
32 | * +----------------------------------------------------+ | 34 | * +----------------------------------------------------+ |
33 | */ | 35 | */ |
34 | 36 | ||
35 | #define TICKET_SHIFT 32 | 37 | #define TICKET_SHIFT 17 |
38 | #define TICKET_BITS 15 | ||
39 | #define TICKET_MASK ((1 << TICKET_BITS) - 1) | ||
36 | 40 | ||
37 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | 41 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) |
38 | { | 42 | { |
39 | int *p = (int *)&lock->lock, turn, now_serving; | 43 | int *p = (int *)&lock->lock, ticket, serve; |
40 | 44 | ||
41 | now_serving = *p; | 45 | ticket = ia64_fetchadd(1, p, acq); |
42 | turn = ia64_fetchadd(1, p+1, acq); | ||
43 | 46 | ||
44 | if (turn == now_serving) | 47 | if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK)) |
45 | return; | 48 | return; |
46 | 49 | ||
47 | do { | 50 | ia64_invala(); |
51 | |||
52 | for (;;) { | ||
53 | asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory"); | ||
54 | |||
55 | if (!(((serve >> TICKET_SHIFT) ^ ticket) & TICKET_MASK)) | ||
56 | return; | ||
48 | cpu_relax(); | 57 | cpu_relax(); |
49 | } while (ACCESS_ONCE(*p) != turn); | 58 | } |
50 | } | 59 | } |
51 | 60 | ||
52 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | 61 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) |
53 | { | 62 | { |
54 | long tmp = ACCESS_ONCE(lock->lock), try; | 63 | int tmp = ACCESS_ONCE(lock->lock); |
55 | |||
56 | if (!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1))) { | ||
57 | try = tmp + (1L << TICKET_SHIFT); | ||
58 | 64 | ||
59 | return ia64_cmpxchg(acq, &lock->lock, tmp, try, sizeof (tmp)) == tmp; | 65 | if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK)) |
60 | } | 66 | return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp; |
61 | return 0; | 67 | return 0; |
62 | } | 68 | } |
63 | 69 | ||
64 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | 70 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) |
65 | { | 71 | { |
66 | int *p = (int *)&lock->lock; | 72 | unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; |
67 | 73 | ||
68 | (void)ia64_fetchadd(1, p, rel); | 74 | asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p)); |
75 | ACCESS_ONCE(*p) = (tmp + 2) & ~1; | ||
76 | } | ||
77 | |||
78 | static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) | ||
79 | { | ||
80 | int *p = (int *)&lock->lock, ticket; | ||
81 | |||
82 | ia64_invala(); | ||
83 | |||
84 | for (;;) { | ||
85 | asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : "memory"); | ||
86 | if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK)) | ||
87 | return; | ||
88 | cpu_relax(); | ||
89 | } | ||
69 | } | 90 | } |
70 | 91 | ||
71 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) | 92 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) |
72 | { | 93 | { |
73 | long tmp = ACCESS_ONCE(lock->lock); | 94 | long tmp = ACCESS_ONCE(lock->lock); |
74 | 95 | ||
75 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1)); | 96 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); |
76 | } | 97 | } |
77 | 98 | ||
78 | static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) | 99 | static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) |
79 | { | 100 | { |
80 | long tmp = ACCESS_ONCE(lock->lock); | 101 | long tmp = ACCESS_ONCE(lock->lock); |
81 | 102 | ||
82 | return (((tmp >> TICKET_SHIFT) - tmp) & ((1L << TICKET_SHIFT) - 1)) > 1; | 103 | return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; |
83 | } | 104 | } |
84 | 105 | ||
85 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | 106 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) |
@@ -116,8 +137,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, | |||
116 | 137 | ||
117 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | 138 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) |
118 | { | 139 | { |
119 | while (__raw_spin_is_locked(lock)) | 140 | __ticket_spin_unlock_wait(lock); |
120 | cpu_relax(); | ||
121 | } | 141 | } |
122 | 142 | ||
123 | #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) | 143 | #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) |
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h index b61d136d9bc2..474e46f1ab4a 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h | |||
@@ -6,7 +6,7 @@ | |||
6 | #endif | 6 | #endif |
7 | 7 | ||
8 | typedef struct { | 8 | typedef struct { |
9 | volatile unsigned long lock; | 9 | volatile unsigned int lock; |
10 | } raw_spinlock_t; | 10 | } raw_spinlock_t; |
11 | 11 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } |
diff --git a/arch/ia64/include/asm/swiotlb.h b/arch/ia64/include/asm/swiotlb.h index dcbaea7ce128..f0acde68aaea 100644 --- a/arch/ia64/include/asm/swiotlb.h +++ b/arch/ia64/include/asm/swiotlb.h | |||
@@ -4,8 +4,6 @@ | |||
4 | #include <linux/dma-mapping.h> | 4 | #include <linux/dma-mapping.h> |
5 | #include <linux/swiotlb.h> | 5 | #include <linux/swiotlb.h> |
6 | 6 | ||
7 | extern int swiotlb_force; | ||
8 | |||
9 | #ifdef CONFIG_SWIOTLB | 7 | #ifdef CONFIG_SWIOTLB |
10 | extern int swiotlb; | 8 | extern int swiotlb; |
11 | extern void pci_swiotlb_init(void); | 9 | extern void pci_swiotlb_init(void); |
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index 5a5347f5c4e4..10a8f21ca9e3 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h | |||
@@ -311,11 +311,12 @@ | |||
311 | #define __NR_preadv 1319 | 311 | #define __NR_preadv 1319 |
312 | #define __NR_pwritev 1320 | 312 | #define __NR_pwritev 1320 |
313 | #define __NR_rt_tgsigqueueinfo 1321 | 313 | #define __NR_rt_tgsigqueueinfo 1321 |
314 | #define __NR_recvmmsg 1322 | ||
314 | 315 | ||
315 | #ifdef __KERNEL__ | 316 | #ifdef __KERNEL__ |
316 | 317 | ||
317 | 318 | ||
318 | #define NR_syscalls 298 /* length of syscall table */ | 319 | #define NR_syscalls 299 /* length of syscall table */ |
319 | 320 | ||
320 | /* | 321 | /* |
321 | * The following defines stop scripts/checksyscalls.sh from complaining about | 322 | * The following defines stop scripts/checksyscalls.sh from complaining about |
diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h index 88afb54501e4..67455c2ed2b1 100644 --- a/arch/ia64/include/asm/xen/hypervisor.h +++ b/arch/ia64/include/asm/xen/hypervisor.h | |||
@@ -37,35 +37,9 @@ | |||
37 | #include <xen/interface/xen.h> | 37 | #include <xen/interface/xen.h> |
38 | #include <xen/interface/version.h> /* to compile feature.c */ | 38 | #include <xen/interface/version.h> /* to compile feature.c */ |
39 | #include <xen/features.h> /* to comiple xen-netfront.c */ | 39 | #include <xen/features.h> /* to comiple xen-netfront.c */ |
40 | #include <xen/xen.h> | ||
40 | #include <asm/xen/hypercall.h> | 41 | #include <asm/xen/hypercall.h> |
41 | 42 | ||
42 | /* xen_domain_type is set before executing any C code by early_xen_setup */ | ||
43 | enum xen_domain_type { | ||
44 | XEN_NATIVE, /* running on bare hardware */ | ||
45 | XEN_PV_DOMAIN, /* running in a PV domain */ | ||
46 | XEN_HVM_DOMAIN, /* running in a Xen hvm domain*/ | ||
47 | }; | ||
48 | |||
49 | #ifdef CONFIG_XEN | ||
50 | extern enum xen_domain_type xen_domain_type; | ||
51 | #else | ||
52 | #define xen_domain_type XEN_NATIVE | ||
53 | #endif | ||
54 | |||
55 | #define xen_domain() (xen_domain_type != XEN_NATIVE) | ||
56 | #define xen_pv_domain() (xen_domain() && \ | ||
57 | xen_domain_type == XEN_PV_DOMAIN) | ||
58 | #define xen_hvm_domain() (xen_domain() && \ | ||
59 | xen_domain_type == XEN_HVM_DOMAIN) | ||
60 | |||
61 | #ifdef CONFIG_XEN_DOM0 | ||
62 | #define xen_initial_domain() (xen_pv_domain() && \ | ||
63 | (xen_start_info->flags & SIF_INITDOMAIN)) | ||
64 | #else | ||
65 | #define xen_initial_domain() (0) | ||
66 | #endif | ||
67 | |||
68 | |||
69 | #ifdef CONFIG_XEN | 43 | #ifdef CONFIG_XEN |
70 | extern struct shared_info *HYPERVISOR_shared_info; | 44 | extern struct shared_info *HYPERVISOR_shared_info; |
71 | extern struct start_info *xen_start_info; | 45 | extern struct start_info *xen_start_info; |
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index 6631a9dfafdc..b942f4032d7a 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c | |||
@@ -239,32 +239,29 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) | |||
239 | #ifdef CONFIG_SYSCTL | 239 | #ifdef CONFIG_SYSCTL |
240 | static ctl_table kdump_ctl_table[] = { | 240 | static ctl_table kdump_ctl_table[] = { |
241 | { | 241 | { |
242 | .ctl_name = CTL_UNNUMBERED, | ||
243 | .procname = "kdump_on_init", | 242 | .procname = "kdump_on_init", |
244 | .data = &kdump_on_init, | 243 | .data = &kdump_on_init, |
245 | .maxlen = sizeof(int), | 244 | .maxlen = sizeof(int), |
246 | .mode = 0644, | 245 | .mode = 0644, |
247 | .proc_handler = &proc_dointvec, | 246 | .proc_handler = proc_dointvec, |
248 | }, | 247 | }, |
249 | { | 248 | { |
250 | .ctl_name = CTL_UNNUMBERED, | ||
251 | .procname = "kdump_on_fatal_mca", | 249 | .procname = "kdump_on_fatal_mca", |
252 | .data = &kdump_on_fatal_mca, | 250 | .data = &kdump_on_fatal_mca, |
253 | .maxlen = sizeof(int), | 251 | .maxlen = sizeof(int), |
254 | .mode = 0644, | 252 | .mode = 0644, |
255 | .proc_handler = &proc_dointvec, | 253 | .proc_handler = proc_dointvec, |
256 | }, | 254 | }, |
257 | { .ctl_name = 0 } | 255 | { } |
258 | }; | 256 | }; |
259 | 257 | ||
260 | static ctl_table sys_table[] = { | 258 | static ctl_table sys_table[] = { |
261 | { | 259 | { |
262 | .ctl_name = CTL_KERN, | ||
263 | .procname = "kernel", | 260 | .procname = "kernel", |
264 | .mode = 0555, | 261 | .mode = 0555, |
265 | .child = kdump_ctl_table, | 262 | .child = kdump_ctl_table, |
266 | }, | 263 | }, |
267 | { .ctl_name = 0 } | 264 | { } |
268 | }; | 265 | }; |
269 | #endif | 266 | #endif |
270 | 267 | ||
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index d0e7d37017b4..d75b872ca4dc 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -1806,6 +1806,7 @@ sys_call_table: | |||
1806 | data8 sys_preadv | 1806 | data8 sys_preadv |
1807 | data8 sys_pwritev // 1320 | 1807 | data8 sys_pwritev // 1320 |
1808 | data8 sys_rt_tgsigqueueinfo | 1808 | data8 sys_rt_tgsigqueueinfo |
1809 | data8 sys_recvmmsg | ||
1809 | 1810 | ||
1810 | .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls | 1811 | .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls |
1811 | #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ | 1812 | #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ |
diff --git a/arch/ia64/kernel/esi.c b/arch/ia64/kernel/esi.c index d5764a3d74af..b091111270cb 100644 --- a/arch/ia64/kernel/esi.c +++ b/arch/ia64/kernel/esi.c | |||
@@ -84,7 +84,7 @@ static int __init esi_init (void) | |||
84 | case ESI_DESC_ENTRY_POINT: | 84 | case ESI_DESC_ENTRY_POINT: |
85 | break; | 85 | break; |
86 | default: | 86 | default: |
87 | printk(KERN_WARNING "Unkown table type %d found in " | 87 | printk(KERN_WARNING "Unknown table type %d found in " |
88 | "ESI table, ignoring rest of table\n", *p); | 88 | "ESI table, ignoring rest of table\n", *p); |
89 | return -ENODEV; | 89 | return -ENODEV; |
90 | } | 90 | } |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index d2877a7bfe2e..496ac7a99488 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -887,6 +887,60 @@ ia64_mca_modify_comm(const struct task_struct *previous_current) | |||
887 | memcpy(current->comm, comm, sizeof(current->comm)); | 887 | memcpy(current->comm, comm, sizeof(current->comm)); |
888 | } | 888 | } |
889 | 889 | ||
890 | static void | ||
891 | finish_pt_regs(struct pt_regs *regs, const pal_min_state_area_t *ms, | ||
892 | unsigned long *nat) | ||
893 | { | ||
894 | const u64 *bank; | ||
895 | |||
896 | /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use | ||
897 | * pmsa_{xip,xpsr,xfs} | ||
898 | */ | ||
899 | if (ia64_psr(regs)->ic) { | ||
900 | regs->cr_iip = ms->pmsa_iip; | ||
901 | regs->cr_ipsr = ms->pmsa_ipsr; | ||
902 | regs->cr_ifs = ms->pmsa_ifs; | ||
903 | } else { | ||
904 | regs->cr_iip = ms->pmsa_xip; | ||
905 | regs->cr_ipsr = ms->pmsa_xpsr; | ||
906 | regs->cr_ifs = ms->pmsa_xfs; | ||
907 | } | ||
908 | regs->pr = ms->pmsa_pr; | ||
909 | regs->b0 = ms->pmsa_br0; | ||
910 | regs->ar_rsc = ms->pmsa_rsc; | ||
911 | copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, ®s->r1, nat); | ||
912 | copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, ®s->r2, nat); | ||
913 | copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, ®s->r3, nat); | ||
914 | copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, ®s->r8, nat); | ||
915 | copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, ®s->r9, nat); | ||
916 | copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, ®s->r10, nat); | ||
917 | copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, ®s->r11, nat); | ||
918 | copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, ®s->r12, nat); | ||
919 | copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, ®s->r13, nat); | ||
920 | copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, ®s->r14, nat); | ||
921 | copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, ®s->r15, nat); | ||
922 | if (ia64_psr(regs)->bn) | ||
923 | bank = ms->pmsa_bank1_gr; | ||
924 | else | ||
925 | bank = ms->pmsa_bank0_gr; | ||
926 | copy_reg(&bank[16-16], ms->pmsa_nat_bits, ®s->r16, nat); | ||
927 | copy_reg(&bank[17-16], ms->pmsa_nat_bits, ®s->r17, nat); | ||
928 | copy_reg(&bank[18-16], ms->pmsa_nat_bits, ®s->r18, nat); | ||
929 | copy_reg(&bank[19-16], ms->pmsa_nat_bits, ®s->r19, nat); | ||
930 | copy_reg(&bank[20-16], ms->pmsa_nat_bits, ®s->r20, nat); | ||
931 | copy_reg(&bank[21-16], ms->pmsa_nat_bits, ®s->r21, nat); | ||
932 | copy_reg(&bank[22-16], ms->pmsa_nat_bits, ®s->r22, nat); | ||
933 | copy_reg(&bank[23-16], ms->pmsa_nat_bits, ®s->r23, nat); | ||
934 | copy_reg(&bank[24-16], ms->pmsa_nat_bits, ®s->r24, nat); | ||
935 | copy_reg(&bank[25-16], ms->pmsa_nat_bits, ®s->r25, nat); | ||
936 | copy_reg(&bank[26-16], ms->pmsa_nat_bits, ®s->r26, nat); | ||
937 | copy_reg(&bank[27-16], ms->pmsa_nat_bits, ®s->r27, nat); | ||
938 | copy_reg(&bank[28-16], ms->pmsa_nat_bits, ®s->r28, nat); | ||
939 | copy_reg(&bank[29-16], ms->pmsa_nat_bits, ®s->r29, nat); | ||
940 | copy_reg(&bank[30-16], ms->pmsa_nat_bits, ®s->r30, nat); | ||
941 | copy_reg(&bank[31-16], ms->pmsa_nat_bits, ®s->r31, nat); | ||
942 | } | ||
943 | |||
890 | /* On entry to this routine, we are running on the per cpu stack, see | 944 | /* On entry to this routine, we are running on the per cpu stack, see |
891 | * mca_asm.h. The original stack has not been touched by this event. Some of | 945 | * mca_asm.h. The original stack has not been touched by this event. Some of |
892 | * the original stack's registers will be in the RBS on this stack. This stack | 946 | * the original stack's registers will be in the RBS on this stack. This stack |
@@ -921,7 +975,6 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, | |||
921 | u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1]; | 975 | u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1]; |
922 | u64 ar_bspstore = regs->ar_bspstore; | 976 | u64 ar_bspstore = regs->ar_bspstore; |
923 | u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16); | 977 | u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16); |
924 | const u64 *bank; | ||
925 | const char *msg; | 978 | const char *msg; |
926 | int cpu = smp_processor_id(); | 979 | int cpu = smp_processor_id(); |
927 | 980 | ||
@@ -1024,54 +1077,9 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, | |||
1024 | p = (char *)r12 - sizeof(*regs); | 1077 | p = (char *)r12 - sizeof(*regs); |
1025 | old_regs = (struct pt_regs *)p; | 1078 | old_regs = (struct pt_regs *)p; |
1026 | memcpy(old_regs, regs, sizeof(*regs)); | 1079 | memcpy(old_regs, regs, sizeof(*regs)); |
1027 | /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use | ||
1028 | * pmsa_{xip,xpsr,xfs} | ||
1029 | */ | ||
1030 | if (ia64_psr(regs)->ic) { | ||
1031 | old_regs->cr_iip = ms->pmsa_iip; | ||
1032 | old_regs->cr_ipsr = ms->pmsa_ipsr; | ||
1033 | old_regs->cr_ifs = ms->pmsa_ifs; | ||
1034 | } else { | ||
1035 | old_regs->cr_iip = ms->pmsa_xip; | ||
1036 | old_regs->cr_ipsr = ms->pmsa_xpsr; | ||
1037 | old_regs->cr_ifs = ms->pmsa_xfs; | ||
1038 | } | ||
1039 | old_regs->pr = ms->pmsa_pr; | ||
1040 | old_regs->b0 = ms->pmsa_br0; | ||
1041 | old_regs->loadrs = loadrs; | 1080 | old_regs->loadrs = loadrs; |
1042 | old_regs->ar_rsc = ms->pmsa_rsc; | ||
1043 | old_unat = old_regs->ar_unat; | 1081 | old_unat = old_regs->ar_unat; |
1044 | copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &old_regs->r1, &old_unat); | 1082 | finish_pt_regs(old_regs, ms, &old_unat); |
1045 | copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &old_regs->r2, &old_unat); | ||
1046 | copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &old_regs->r3, &old_unat); | ||
1047 | copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &old_regs->r8, &old_unat); | ||
1048 | copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &old_regs->r9, &old_unat); | ||
1049 | copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &old_regs->r10, &old_unat); | ||
1050 | copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &old_regs->r11, &old_unat); | ||
1051 | copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &old_regs->r12, &old_unat); | ||
1052 | copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &old_regs->r13, &old_unat); | ||
1053 | copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &old_regs->r14, &old_unat); | ||
1054 | copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &old_regs->r15, &old_unat); | ||
1055 | if (ia64_psr(old_regs)->bn) | ||
1056 | bank = ms->pmsa_bank1_gr; | ||
1057 | else | ||
1058 | bank = ms->pmsa_bank0_gr; | ||
1059 | copy_reg(&bank[16-16], ms->pmsa_nat_bits, &old_regs->r16, &old_unat); | ||
1060 | copy_reg(&bank[17-16], ms->pmsa_nat_bits, &old_regs->r17, &old_unat); | ||
1061 | copy_reg(&bank[18-16], ms->pmsa_nat_bits, &old_regs->r18, &old_unat); | ||
1062 | copy_reg(&bank[19-16], ms->pmsa_nat_bits, &old_regs->r19, &old_unat); | ||
1063 | copy_reg(&bank[20-16], ms->pmsa_nat_bits, &old_regs->r20, &old_unat); | ||
1064 | copy_reg(&bank[21-16], ms->pmsa_nat_bits, &old_regs->r21, &old_unat); | ||
1065 | copy_reg(&bank[22-16], ms->pmsa_nat_bits, &old_regs->r22, &old_unat); | ||
1066 | copy_reg(&bank[23-16], ms->pmsa_nat_bits, &old_regs->r23, &old_unat); | ||
1067 | copy_reg(&bank[24-16], ms->pmsa_nat_bits, &old_regs->r24, &old_unat); | ||
1068 | copy_reg(&bank[25-16], ms->pmsa_nat_bits, &old_regs->r25, &old_unat); | ||
1069 | copy_reg(&bank[26-16], ms->pmsa_nat_bits, &old_regs->r26, &old_unat); | ||
1070 | copy_reg(&bank[27-16], ms->pmsa_nat_bits, &old_regs->r27, &old_unat); | ||
1071 | copy_reg(&bank[28-16], ms->pmsa_nat_bits, &old_regs->r28, &old_unat); | ||
1072 | copy_reg(&bank[29-16], ms->pmsa_nat_bits, &old_regs->r29, &old_unat); | ||
1073 | copy_reg(&bank[30-16], ms->pmsa_nat_bits, &old_regs->r30, &old_unat); | ||
1074 | copy_reg(&bank[31-16], ms->pmsa_nat_bits, &old_regs->r31, &old_unat); | ||
1075 | 1083 | ||
1076 | /* Next stack a struct switch_stack. mca_asm.S built a partial | 1084 | /* Next stack a struct switch_stack. mca_asm.S built a partial |
1077 | * switch_stack, copy it and fill in the blanks using pt_regs and | 1085 | * switch_stack, copy it and fill in the blanks using pt_regs and |
@@ -1141,6 +1149,8 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, | |||
1141 | no_mod: | 1149 | no_mod: |
1142 | mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", | 1150 | mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", |
1143 | smp_processor_id(), type, msg); | 1151 | smp_processor_id(), type, msg); |
1152 | old_unat = regs->ar_unat; | ||
1153 | finish_pt_regs(regs, ms, &old_unat); | ||
1144 | return previous_current; | 1154 | return previous_current; |
1145 | } | 1155 | } |
1146 | 1156 | ||
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c index 285aae8431c6..53292abf846c 100644 --- a/arch/ia64/kernel/pci-swiotlb.c +++ b/arch/ia64/kernel/pci-swiotlb.c | |||
@@ -41,7 +41,7 @@ struct dma_map_ops swiotlb_dma_ops = { | |||
41 | void __init swiotlb_dma_init(void) | 41 | void __init swiotlb_dma_init(void) |
42 | { | 42 | { |
43 | dma_ops = &swiotlb_dma_ops; | 43 | dma_ops = &swiotlb_dma_ops; |
44 | swiotlb_init(); | 44 | swiotlb_init(1); |
45 | } | 45 | } |
46 | 46 | ||
47 | void __init pci_swiotlb_init(void) | 47 | void __init pci_swiotlb_init(void) |
@@ -51,7 +51,7 @@ void __init pci_swiotlb_init(void) | |||
51 | swiotlb = 1; | 51 | swiotlb = 1; |
52 | printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n"); | 52 | printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n"); |
53 | machvec_init("dig"); | 53 | machvec_init("dig"); |
54 | swiotlb_init(); | 54 | swiotlb_init(1); |
55 | dma_ops = &swiotlb_dma_ops; | 55 | dma_ops = &swiotlb_dma_ops; |
56 | #else | 56 | #else |
57 | panic("Unable to find Intel IOMMU"); | 57 | panic("Unable to find Intel IOMMU"); |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index f1782705b1f7..599b233bef75 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -522,42 +522,37 @@ EXPORT_SYMBOL(pfm_sysctl); | |||
522 | 522 | ||
523 | static ctl_table pfm_ctl_table[]={ | 523 | static ctl_table pfm_ctl_table[]={ |
524 | { | 524 | { |
525 | .ctl_name = CTL_UNNUMBERED, | ||
526 | .procname = "debug", | 525 | .procname = "debug", |
527 | .data = &pfm_sysctl.debug, | 526 | .data = &pfm_sysctl.debug, |
528 | .maxlen = sizeof(int), | 527 | .maxlen = sizeof(int), |
529 | .mode = 0666, | 528 | .mode = 0666, |
530 | .proc_handler = &proc_dointvec, | 529 | .proc_handler = proc_dointvec, |
531 | }, | 530 | }, |
532 | { | 531 | { |
533 | .ctl_name = CTL_UNNUMBERED, | ||
534 | .procname = "debug_ovfl", | 532 | .procname = "debug_ovfl", |
535 | .data = &pfm_sysctl.debug_ovfl, | 533 | .data = &pfm_sysctl.debug_ovfl, |
536 | .maxlen = sizeof(int), | 534 | .maxlen = sizeof(int), |
537 | .mode = 0666, | 535 | .mode = 0666, |
538 | .proc_handler = &proc_dointvec, | 536 | .proc_handler = proc_dointvec, |
539 | }, | 537 | }, |
540 | { | 538 | { |
541 | .ctl_name = CTL_UNNUMBERED, | ||
542 | .procname = "fastctxsw", | 539 | .procname = "fastctxsw", |
543 | .data = &pfm_sysctl.fastctxsw, | 540 | .data = &pfm_sysctl.fastctxsw, |
544 | .maxlen = sizeof(int), | 541 | .maxlen = sizeof(int), |
545 | .mode = 0600, | 542 | .mode = 0600, |
546 | .proc_handler = &proc_dointvec, | 543 | .proc_handler = proc_dointvec, |
547 | }, | 544 | }, |
548 | { | 545 | { |
549 | .ctl_name = CTL_UNNUMBERED, | ||
550 | .procname = "expert_mode", | 546 | .procname = "expert_mode", |
551 | .data = &pfm_sysctl.expert_mode, | 547 | .data = &pfm_sysctl.expert_mode, |
552 | .maxlen = sizeof(int), | 548 | .maxlen = sizeof(int), |
553 | .mode = 0600, | 549 | .mode = 0600, |
554 | .proc_handler = &proc_dointvec, | 550 | .proc_handler = proc_dointvec, |
555 | }, | 551 | }, |
556 | {} | 552 | {} |
557 | }; | 553 | }; |
558 | static ctl_table pfm_sysctl_dir[] = { | 554 | static ctl_table pfm_sysctl_dir[] = { |
559 | { | 555 | { |
560 | .ctl_name = CTL_UNNUMBERED, | ||
561 | .procname = "perfmon", | 556 | .procname = "perfmon", |
562 | .mode = 0555, | 557 | .mode = 0555, |
563 | .child = pfm_ctl_table, | 558 | .child = pfm_ctl_table, |
@@ -566,7 +561,6 @@ static ctl_table pfm_sysctl_dir[] = { | |||
566 | }; | 561 | }; |
567 | static ctl_table pfm_sysctl_root[] = { | 562 | static ctl_table pfm_sysctl_root[] = { |
568 | { | 563 | { |
569 | .ctl_name = CTL_KERN, | ||
570 | .procname = "kernel", | 564 | .procname = "kernel", |
571 | .mode = 0555, | 565 | .mode = 0555, |
572 | .child = pfm_sysctl_dir, | 566 | .child = pfm_sysctl_dir, |
@@ -3523,7 +3517,7 @@ pfm_use_debug_registers(struct task_struct *task) | |||
3523 | * IA64_THREAD_DBG_VALID set. This indicates a task which was | 3517 | * IA64_THREAD_DBG_VALID set. This indicates a task which was |
3524 | * able to use the debug registers for debugging purposes via | 3518 | * able to use the debug registers for debugging purposes via |
3525 | * ptrace(). Therefore we know it was not using them for | 3519 | * ptrace(). Therefore we know it was not using them for |
3526 | * perfmormance monitoring, so we only decrement the number | 3520 | * performance monitoring, so we only decrement the number |
3527 | * of "ptraced" debug register users to keep the count up to date | 3521 | * of "ptraced" debug register users to keep the count up to date |
3528 | */ | 3522 | */ |
3529 | int | 3523 | int |
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index 92ed83f34036..609d50056a6c 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c | |||
@@ -100,51 +100,7 @@ sys_getpagesize (void) | |||
100 | asmlinkage unsigned long | 100 | asmlinkage unsigned long |
101 | ia64_brk (unsigned long brk) | 101 | ia64_brk (unsigned long brk) |
102 | { | 102 | { |
103 | unsigned long rlim, retval, newbrk, oldbrk; | 103 | unsigned long retval = sys_brk(brk); |
104 | struct mm_struct *mm = current->mm; | ||
105 | |||
106 | /* | ||
107 | * Most of this replicates the code in sys_brk() except for an additional safety | ||
108 | * check and the clearing of r8. However, we can't call sys_brk() because we need | ||
109 | * to acquire the mmap_sem before we can do the test... | ||
110 | */ | ||
111 | down_write(&mm->mmap_sem); | ||
112 | |||
113 | if (brk < mm->end_code) | ||
114 | goto out; | ||
115 | newbrk = PAGE_ALIGN(brk); | ||
116 | oldbrk = PAGE_ALIGN(mm->brk); | ||
117 | if (oldbrk == newbrk) | ||
118 | goto set_brk; | ||
119 | |||
120 | /* Always allow shrinking brk. */ | ||
121 | if (brk <= mm->brk) { | ||
122 | if (!do_munmap(mm, newbrk, oldbrk-newbrk)) | ||
123 | goto set_brk; | ||
124 | goto out; | ||
125 | } | ||
126 | |||
127 | /* Check against unimplemented/unmapped addresses: */ | ||
128 | if ((newbrk - oldbrk) > RGN_MAP_LIMIT || REGION_OFFSET(newbrk) > RGN_MAP_LIMIT) | ||
129 | goto out; | ||
130 | |||
131 | /* Check against rlimit.. */ | ||
132 | rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; | ||
133 | if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim) | ||
134 | goto out; | ||
135 | |||
136 | /* Check against existing mmap mappings. */ | ||
137 | if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) | ||
138 | goto out; | ||
139 | |||
140 | /* Ok, looks good - let it rip. */ | ||
141 | if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) | ||
142 | goto out; | ||
143 | set_brk: | ||
144 | mm->brk = brk; | ||
145 | out: | ||
146 | retval = mm->brk; | ||
147 | up_write(&mm->mmap_sem); | ||
148 | force_successful_syscall_return(); | 104 | force_successful_syscall_return(); |
149 | return retval; | 105 | return retval; |
150 | } | 106 | } |
@@ -185,39 +141,6 @@ int ia64_mmap_check(unsigned long addr, unsigned long len, | |||
185 | return 0; | 141 | return 0; |
186 | } | 142 | } |
187 | 143 | ||
188 | static inline unsigned long | ||
189 | do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, unsigned long pgoff) | ||
190 | { | ||
191 | struct file *file = NULL; | ||
192 | |||
193 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
194 | if (!(flags & MAP_ANONYMOUS)) { | ||
195 | file = fget(fd); | ||
196 | if (!file) | ||
197 | return -EBADF; | ||
198 | |||
199 | if (!file->f_op || !file->f_op->mmap) { | ||
200 | addr = -ENODEV; | ||
201 | goto out; | ||
202 | } | ||
203 | } | ||
204 | |||
205 | /* Careful about overflows.. */ | ||
206 | len = PAGE_ALIGN(len); | ||
207 | if (!len || len > TASK_SIZE) { | ||
208 | addr = -EINVAL; | ||
209 | goto out; | ||
210 | } | ||
211 | |||
212 | down_write(¤t->mm->mmap_sem); | ||
213 | addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
214 | up_write(¤t->mm->mmap_sem); | ||
215 | |||
216 | out: if (file) | ||
217 | fput(file); | ||
218 | return addr; | ||
219 | } | ||
220 | |||
221 | /* | 144 | /* |
222 | * mmap2() is like mmap() except that the offset is expressed in units | 145 | * mmap2() is like mmap() except that the offset is expressed in units |
223 | * of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces | 146 | * of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces |
@@ -226,7 +149,7 @@ out: if (file) | |||
226 | asmlinkage unsigned long | 149 | asmlinkage unsigned long |
227 | sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff) | 150 | sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff) |
228 | { | 151 | { |
229 | addr = do_mmap2(addr, len, prot, flags, fd, pgoff); | 152 | addr = sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); |
230 | if (!IS_ERR((void *) addr)) | 153 | if (!IS_ERR((void *) addr)) |
231 | force_successful_syscall_return(); | 154 | force_successful_syscall_return(); |
232 | return addr; | 155 | return addr; |
@@ -238,7 +161,7 @@ sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, int fd, lo | |||
238 | if (offset_in_page(off) != 0) | 161 | if (offset_in_page(off) != 0) |
239 | return -EINVAL; | 162 | return -EINVAL; |
240 | 163 | ||
241 | addr = do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT); | 164 | addr = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); |
242 | if (!IS_ERR((void *) addr)) | 165 | if (!IS_ERR((void *) addr)) |
243 | force_successful_syscall_return(); | 166 | force_successful_syscall_return(); |
244 | return addr; | 167 | return addr; |
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 4990495d7531..a35c661e5e89 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
@@ -473,7 +473,7 @@ void update_vsyscall_tz(void) | |||
473 | { | 473 | { |
474 | } | 474 | } |
475 | 475 | ||
476 | void update_vsyscall(struct timespec *wall, struct clocksource *c) | 476 | void update_vsyscall(struct timespec *wall, struct clocksource *c, u32 mult) |
477 | { | 477 | { |
478 | unsigned long flags; | 478 | unsigned long flags; |
479 | 479 | ||
@@ -481,7 +481,7 @@ void update_vsyscall(struct timespec *wall, struct clocksource *c) | |||
481 | 481 | ||
482 | /* copy fsyscall clock data */ | 482 | /* copy fsyscall clock data */ |
483 | fsyscall_gtod_data.clk_mask = c->mask; | 483 | fsyscall_gtod_data.clk_mask = c->mask; |
484 | fsyscall_gtod_data.clk_mult = c->mult; | 484 | fsyscall_gtod_data.clk_mult = mult; |
485 | fsyscall_gtod_data.clk_shift = c->shift; | 485 | fsyscall_gtod_data.clk_shift = c->shift; |
486 | fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio; | 486 | fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio; |
487 | fsyscall_gtod_data.clk_cycle_last = c->cycle_last; | 487 | fsyscall_gtod_data.clk_cycle_last = c->cycle_last; |
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c index 6db08599ebbc..776dd40397e2 100644 --- a/arch/ia64/kernel/unaligned.c +++ b/arch/ia64/kernel/unaligned.c | |||
@@ -60,7 +60,6 @@ dump (const char *str, void *vp, size_t len) | |||
60 | */ | 60 | */ |
61 | int no_unaligned_warning; | 61 | int no_unaligned_warning; |
62 | int unaligned_dump_stack; | 62 | int unaligned_dump_stack; |
63 | static int noprint_warning; | ||
64 | 63 | ||
65 | /* | 64 | /* |
66 | * For M-unit: | 65 | * For M-unit: |
@@ -1357,9 +1356,8 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) | |||
1357 | /* watch for command names containing %s */ | 1356 | /* watch for command names containing %s */ |
1358 | printk(KERN_WARNING "%s", buf); | 1357 | printk(KERN_WARNING "%s", buf); |
1359 | } else { | 1358 | } else { |
1360 | if (no_unaligned_warning && !noprint_warning) { | 1359 | if (no_unaligned_warning) { |
1361 | noprint_warning = 1; | 1360 | printk_once(KERN_WARNING "%s(%d) encountered an " |
1362 | printk(KERN_WARNING "%s(%d) encountered an " | ||
1363 | "unaligned exception which required\n" | 1361 | "unaligned exception which required\n" |
1364 | "kernel assistance, which degrades " | 1362 | "kernel assistance, which degrades " |
1365 | "the performance of the application.\n" | 1363 | "the performance of the application.\n" |
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile index 0bb99b732908..1089b3e918ac 100644 --- a/arch/ia64/kvm/Makefile +++ b/arch/ia64/kvm/Makefile | |||
@@ -49,7 +49,7 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ | |||
49 | EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ | 49 | EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ |
50 | 50 | ||
51 | common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ | 51 | common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ |
52 | coalesced_mmio.o irq_comm.o) | 52 | coalesced_mmio.o irq_comm.o assigned-dev.o) |
53 | 53 | ||
54 | ifeq ($(CONFIG_IOMMU_API),y) | 54 | ifeq ($(CONFIG_IOMMU_API),y) |
55 | common-objs += $(addprefix ../../../virt/kvm/, iommu.o) | 55 | common-objs += $(addprefix ../../../virt/kvm/, iommu.o) |
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 0ad09f05efa9..5fdeec5fddcf 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -124,7 +124,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler) | |||
124 | 124 | ||
125 | static DEFINE_SPINLOCK(vp_lock); | 125 | static DEFINE_SPINLOCK(vp_lock); |
126 | 126 | ||
127 | void kvm_arch_hardware_enable(void *garbage) | 127 | int kvm_arch_hardware_enable(void *garbage) |
128 | { | 128 | { |
129 | long status; | 129 | long status; |
130 | long tmp_base; | 130 | long tmp_base; |
@@ -137,7 +137,7 @@ void kvm_arch_hardware_enable(void *garbage) | |||
137 | slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); | 137 | slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); |
138 | local_irq_restore(saved_psr); | 138 | local_irq_restore(saved_psr); |
139 | if (slot < 0) | 139 | if (slot < 0) |
140 | return; | 140 | return -EINVAL; |
141 | 141 | ||
142 | spin_lock(&vp_lock); | 142 | spin_lock(&vp_lock); |
143 | status = ia64_pal_vp_init_env(kvm_vsa_base ? | 143 | status = ia64_pal_vp_init_env(kvm_vsa_base ? |
@@ -145,7 +145,7 @@ void kvm_arch_hardware_enable(void *garbage) | |||
145 | __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); | 145 | __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); |
146 | if (status != 0) { | 146 | if (status != 0) { |
147 | printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); | 147 | printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); |
148 | return ; | 148 | return -EINVAL; |
149 | } | 149 | } |
150 | 150 | ||
151 | if (!kvm_vsa_base) { | 151 | if (!kvm_vsa_base) { |
@@ -154,6 +154,8 @@ void kvm_arch_hardware_enable(void *garbage) | |||
154 | } | 154 | } |
155 | spin_unlock(&vp_lock); | 155 | spin_unlock(&vp_lock); |
156 | ia64_ptr_entry(0x3, slot); | 156 | ia64_ptr_entry(0x3, slot); |
157 | |||
158 | return 0; | ||
157 | } | 159 | } |
158 | 160 | ||
159 | void kvm_arch_hardware_disable(void *garbage) | 161 | void kvm_arch_hardware_disable(void *garbage) |
@@ -851,8 +853,7 @@ static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, | |||
851 | r = 0; | 853 | r = 0; |
852 | switch (chip->chip_id) { | 854 | switch (chip->chip_id) { |
853 | case KVM_IRQCHIP_IOAPIC: | 855 | case KVM_IRQCHIP_IOAPIC: |
854 | memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm), | 856 | r = kvm_get_ioapic(kvm, &chip->chip.ioapic); |
855 | sizeof(struct kvm_ioapic_state)); | ||
856 | break; | 857 | break; |
857 | default: | 858 | default: |
858 | r = -EINVAL; | 859 | r = -EINVAL; |
@@ -868,9 +869,7 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) | |||
868 | r = 0; | 869 | r = 0; |
869 | switch (chip->chip_id) { | 870 | switch (chip->chip_id) { |
870 | case KVM_IRQCHIP_IOAPIC: | 871 | case KVM_IRQCHIP_IOAPIC: |
871 | memcpy(ioapic_irqchip(kvm), | 872 | r = kvm_set_ioapic(kvm, &chip->chip.ioapic); |
872 | &chip->chip.ioapic, | ||
873 | sizeof(struct kvm_ioapic_state)); | ||
874 | break; | 873 | break; |
875 | default: | 874 | default: |
876 | r = -EINVAL; | 875 | r = -EINVAL; |
@@ -944,7 +943,7 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
944 | { | 943 | { |
945 | struct kvm *kvm = filp->private_data; | 944 | struct kvm *kvm = filp->private_data; |
946 | void __user *argp = (void __user *)arg; | 945 | void __user *argp = (void __user *)arg; |
947 | int r = -EINVAL; | 946 | int r = -ENOTTY; |
948 | 947 | ||
949 | switch (ioctl) { | 948 | switch (ioctl) { |
950 | case KVM_SET_MEMORY_REGION: { | 949 | case KVM_SET_MEMORY_REGION: { |
@@ -985,10 +984,8 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
985 | goto out; | 984 | goto out; |
986 | if (irqchip_in_kernel(kvm)) { | 985 | if (irqchip_in_kernel(kvm)) { |
987 | __s32 status; | 986 | __s32 status; |
988 | mutex_lock(&kvm->irq_lock); | ||
989 | status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, | 987 | status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, |
990 | irq_event.irq, irq_event.level); | 988 | irq_event.irq, irq_event.level); |
991 | mutex_unlock(&kvm->irq_lock); | ||
992 | if (ioctl == KVM_IRQ_LINE_STATUS) { | 989 | if (ioctl == KVM_IRQ_LINE_STATUS) { |
993 | irq_event.status = status; | 990 | irq_event.status = status; |
994 | if (copy_to_user(argp, &irq_event, | 991 | if (copy_to_user(argp, &irq_event, |
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index f426dc78d959..ee09d261f2e6 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c | |||
@@ -100,24 +100,36 @@ wrap_mmu_context (struct mm_struct *mm) | |||
100 | * this primitive it can be moved up to a spinaphore.h header. | 100 | * this primitive it can be moved up to a spinaphore.h header. |
101 | */ | 101 | */ |
102 | struct spinaphore { | 102 | struct spinaphore { |
103 | atomic_t cur; | 103 | unsigned long ticket; |
104 | unsigned long serve; | ||
104 | }; | 105 | }; |
105 | 106 | ||
106 | static inline void spinaphore_init(struct spinaphore *ss, int val) | 107 | static inline void spinaphore_init(struct spinaphore *ss, int val) |
107 | { | 108 | { |
108 | atomic_set(&ss->cur, val); | 109 | ss->ticket = 0; |
110 | ss->serve = val; | ||
109 | } | 111 | } |
110 | 112 | ||
111 | static inline void down_spin(struct spinaphore *ss) | 113 | static inline void down_spin(struct spinaphore *ss) |
112 | { | 114 | { |
113 | while (unlikely(!atomic_add_unless(&ss->cur, -1, 0))) | 115 | unsigned long t = ia64_fetchadd(1, &ss->ticket, acq), serve; |
114 | while (atomic_read(&ss->cur) == 0) | 116 | |
115 | cpu_relax(); | 117 | if (time_before(t, ss->serve)) |
118 | return; | ||
119 | |||
120 | ia64_invala(); | ||
121 | |||
122 | for (;;) { | ||
123 | asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory"); | ||
124 | if (time_before(t, serve)) | ||
125 | return; | ||
126 | cpu_relax(); | ||
127 | } | ||
116 | } | 128 | } |
117 | 129 | ||
118 | static inline void up_spin(struct spinaphore *ss) | 130 | static inline void up_spin(struct spinaphore *ss) |
119 | { | 131 | { |
120 | atomic_add(1, &ss->cur); | 132 | ia64_fetchadd(1, &ss->serve, rel); |
121 | } | 133 | } |
122 | 134 | ||
123 | static struct spinaphore ptcg_sem; | 135 | static struct spinaphore ptcg_sem; |
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 7de76dd352fe..df639db779f9 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c | |||
@@ -56,10 +56,13 @@ int raw_pci_read(unsigned int seg, unsigned int bus, unsigned int devfn, | |||
56 | if ((seg | reg) <= 255) { | 56 | if ((seg | reg) <= 255) { |
57 | addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg); | 57 | addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg); |
58 | mode = 0; | 58 | mode = 0; |
59 | } else { | 59 | } else if (sal_revision >= SAL_VERSION_CODE(3,2)) { |
60 | addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg); | 60 | addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg); |
61 | mode = 1; | 61 | mode = 1; |
62 | } else { | ||
63 | return -EINVAL; | ||
62 | } | 64 | } |
65 | |||
63 | result = ia64_sal_pci_config_read(addr, mode, len, &data); | 66 | result = ia64_sal_pci_config_read(addr, mode, len, &data); |
64 | if (result != 0) | 67 | if (result != 0) |
65 | return -EINVAL; | 68 | return -EINVAL; |
@@ -80,9 +83,11 @@ int raw_pci_write(unsigned int seg, unsigned int bus, unsigned int devfn, | |||
80 | if ((seg | reg) <= 255) { | 83 | if ((seg | reg) <= 255) { |
81 | addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg); | 84 | addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg); |
82 | mode = 0; | 85 | mode = 0; |
83 | } else { | 86 | } else if (sal_revision >= SAL_VERSION_CODE(3,2)) { |
84 | addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg); | 87 | addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg); |
85 | mode = 1; | 88 | mode = 1; |
89 | } else { | ||
90 | return -EINVAL; | ||
86 | } | 91 | } |
87 | result = ia64_sal_pci_config_write(addr, mode, len, value); | 92 | result = ia64_sal_pci_config_write(addr, mode, len, value); |
88 | if (result != 0) | 93 | if (result != 0) |
@@ -126,6 +131,7 @@ alloc_pci_controller (int seg) | |||
126 | } | 131 | } |
127 | 132 | ||
128 | struct pci_root_info { | 133 | struct pci_root_info { |
134 | struct acpi_device *bridge; | ||
129 | struct pci_controller *controller; | 135 | struct pci_controller *controller; |
130 | char *name; | 136 | char *name; |
131 | }; | 137 | }; |
@@ -292,9 +298,20 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data) | |||
292 | window->offset = offset; | 298 | window->offset = offset; |
293 | 299 | ||
294 | if (insert_resource(root, &window->resource)) { | 300 | if (insert_resource(root, &window->resource)) { |
295 | printk(KERN_ERR "alloc 0x%llx-0x%llx from %s for %s failed\n", | 301 | dev_err(&info->bridge->dev, |
296 | window->resource.start, window->resource.end, | 302 | "can't allocate host bridge window %pR\n", |
297 | root->name, info->name); | 303 | &window->resource); |
304 | } else { | ||
305 | if (offset) | ||
306 | dev_info(&info->bridge->dev, "host bridge window %pR " | ||
307 | "(PCI address [%#llx-%#llx])\n", | ||
308 | &window->resource, | ||
309 | window->resource.start - offset, | ||
310 | window->resource.end - offset); | ||
311 | else | ||
312 | dev_info(&info->bridge->dev, | ||
313 | "host bridge window %pR\n", | ||
314 | &window->resource); | ||
298 | } | 315 | } |
299 | 316 | ||
300 | return AE_OK; | 317 | return AE_OK; |
@@ -314,8 +331,9 @@ pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl) | |||
314 | (res->end - res->start < 16)) | 331 | (res->end - res->start < 16)) |
315 | continue; | 332 | continue; |
316 | if (j >= PCI_BUS_NUM_RESOURCES) { | 333 | if (j >= PCI_BUS_NUM_RESOURCES) { |
317 | printk("Ignoring range [%#llx-%#llx] (%lx)\n", | 334 | dev_warn(&bus->dev, |
318 | res->start, res->end, res->flags); | 335 | "ignoring host bridge window %pR (no space)\n", |
336 | res); | ||
319 | continue; | 337 | continue; |
320 | } | 338 | } |
321 | bus->resource[j++] = res; | 339 | bus->resource[j++] = res; |
@@ -359,6 +377,7 @@ pci_acpi_scan_root(struct acpi_device *device, int domain, int bus) | |||
359 | goto out3; | 377 | goto out3; |
360 | 378 | ||
361 | sprintf(name, "PCI Bus %04x:%02x", domain, bus); | 379 | sprintf(name, "PCI Bus %04x:%02x", domain, bus); |
380 | info.bridge = device; | ||
362 | info.controller = controller; | 381 | info.controller = controller; |
363 | info.name = name; | 382 | info.name = name; |
364 | acpi_walk_resources(device->handle, METHOD_NAME__CRS, | 383 | acpi_walk_resources(device->handle, METHOD_NAME__CRS, |
@@ -715,9 +734,6 @@ int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size) | |||
715 | return ret; | 734 | return ret; |
716 | } | 735 | } |
717 | 736 | ||
718 | /* It's defined in drivers/pci/pci.c */ | ||
719 | extern u8 pci_cache_line_size; | ||
720 | |||
721 | /** | 737 | /** |
722 | * set_pci_cacheline_size - determine cacheline size for PCI devices | 738 | * set_pci_cacheline_size - determine cacheline size for PCI devices |
723 | * | 739 | * |
@@ -726,7 +742,7 @@ extern u8 pci_cache_line_size; | |||
726 | * | 742 | * |
727 | * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info(). | 743 | * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info(). |
728 | */ | 744 | */ |
729 | static void __init set_pci_cacheline_size(void) | 745 | static void __init set_pci_dfl_cacheline_size(void) |
730 | { | 746 | { |
731 | unsigned long levels, unique_caches; | 747 | unsigned long levels, unique_caches; |
732 | long status; | 748 | long status; |
@@ -746,7 +762,7 @@ static void __init set_pci_cacheline_size(void) | |||
746 | "(status=%ld)\n", __func__, status); | 762 | "(status=%ld)\n", __func__, status); |
747 | return; | 763 | return; |
748 | } | 764 | } |
749 | pci_cache_line_size = (1 << cci.pcci_line_size) / 4; | 765 | pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4; |
750 | } | 766 | } |
751 | 767 | ||
752 | u64 ia64_dma_get_required_mask(struct device *dev) | 768 | u64 ia64_dma_get_required_mask(struct device *dev) |
@@ -777,7 +793,7 @@ EXPORT_SYMBOL_GPL(dma_get_required_mask); | |||
777 | 793 | ||
778 | static int __init pcibios_init(void) | 794 | static int __init pcibios_init(void) |
779 | { | 795 | { |
780 | set_pci_cacheline_size(); | 796 | set_pci_dfl_cacheline_size(); |
781 | return 0; | 797 | return 0; |
782 | } | 798 | } |
783 | 799 | ||
diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c index fd50ff94302b..66f633bff059 100644 --- a/arch/ia64/sn/kernel/io_acpi_init.c +++ b/arch/ia64/sn/kernel/io_acpi_init.c | |||
@@ -390,7 +390,7 @@ sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info, | |||
390 | pcidev_match.handle = NULL; | 390 | pcidev_match.handle = NULL; |
391 | 391 | ||
392 | acpi_walk_namespace(ACPI_TYPE_DEVICE, rootbus_handle, ACPI_UINT32_MAX, | 392 | acpi_walk_namespace(ACPI_TYPE_DEVICE, rootbus_handle, ACPI_UINT32_MAX, |
393 | find_matching_device, &pcidev_match, NULL); | 393 | find_matching_device, NULL, &pcidev_match, NULL); |
394 | 394 | ||
395 | if (!pcidev_match.handle) { | 395 | if (!pcidev_match.handle) { |
396 | printk(KERN_ERR | 396 | printk(KERN_ERR |
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c index 25831c47c579..308e6595110e 100644 --- a/arch/ia64/sn/kernel/io_common.c +++ b/arch/ia64/sn/kernel/io_common.c | |||
@@ -119,7 +119,6 @@ sn_pcidev_info_get(struct pci_dev *dev) | |||
119 | * Additionally note that the struct sn_flush_device_war also has to be | 119 | * Additionally note that the struct sn_flush_device_war also has to be |
120 | * removed from arch/ia64/sn/include/xtalk/hubdev.h | 120 | * removed from arch/ia64/sn/include/xtalk/hubdev.h |
121 | */ | 121 | */ |
122 | static u8 war_implemented = 0; | ||
123 | 122 | ||
124 | static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device, | 123 | static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device, |
125 | struct sn_flush_device_common *common) | 124 | struct sn_flush_device_common *common) |
@@ -128,11 +127,8 @@ static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device, | |||
128 | struct sn_flush_device_war *dev_entry; | 127 | struct sn_flush_device_war *dev_entry; |
129 | struct ia64_sal_retval isrv = {0,0,0,0}; | 128 | struct ia64_sal_retval isrv = {0,0,0,0}; |
130 | 129 | ||
131 | if (!war_implemented) { | 130 | printk_once(KERN_WARNING |
132 | printk(KERN_WARNING "PROM version < 4.50 -- implementing old " | 131 | "PROM version < 4.50 -- implementing old PROM flush WAR\n"); |
133 | "PROM flush WAR\n"); | ||
134 | war_implemented = 1; | ||
135 | } | ||
136 | 132 | ||
137 | war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL); | 133 | war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL); |
138 | BUG_ON(!war_list); | 134 | BUG_ON(!war_list); |
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c index 4c7e74790958..55ac3c4e11d2 100644 --- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c | |||
@@ -786,17 +786,18 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, unsigned long arg) | |||
786 | break; | 786 | break; |
787 | 787 | ||
788 | case SN_HWPERF_GET_OBJ_NODE: | 788 | case SN_HWPERF_GET_OBJ_NODE: |
789 | if (a.sz != sizeof(u64) || a.arg < 0) { | 789 | i = a.arg; |
790 | if (a.sz != sizeof(u64) || i < 0) { | ||
790 | r = -EINVAL; | 791 | r = -EINVAL; |
791 | goto error; | 792 | goto error; |
792 | } | 793 | } |
793 | if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) { | 794 | if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) { |
794 | if (a.arg >= nobj) { | 795 | if (i >= nobj) { |
795 | r = -EINVAL; | 796 | r = -EINVAL; |
796 | vfree(objs); | 797 | vfree(objs); |
797 | goto error; | 798 | goto error; |
798 | } | 799 | } |
799 | if (objs[(i = a.arg)].id != a.arg) { | 800 | if (objs[i].id != a.arg) { |
800 | for (i = 0; i < nobj; i++) { | 801 | for (i = 0; i < nobj; i++) { |
801 | if (objs[i].id == a.arg) | 802 | if (objs[i].id == a.arg) |
802 | break; | 803 | break; |