aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-01-04 19:17:33 -0500
committerTejun Heo <tj@kernel.org>2010-01-04 19:17:33 -0500
commit32032df6c2f6c9c6b2ada2ce42322231824f70c2 (patch)
treeb1ce838a37044bb38dfc128e2116ca35630e629a /arch/ia64
parent22b737f4c75197372d64afc6ed1bccd58c00e549 (diff)
parentc5974b835a909ff15c3b7e6cf6789b5eb919f419 (diff)
Merge branch 'master' into percpu
Conflicts: arch/powerpc/platforms/pseries/hvCall.S include/linux/percpu.h
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Makefile2
-rw-r--r--arch/ia64/hp/common/sba_iommu.c40
-rw-r--r--arch/ia64/ia32/elfcore32.h2
-rw-r--r--arch/ia64/ia32/ia32_entry.S4
-rw-r--r--arch/ia64/ia32/sys_ia32.c58
-rw-r--r--arch/ia64/include/asm/acpi.h6
-rw-r--r--arch/ia64/include/asm/asm-offsets.h1
-rw-r--r--arch/ia64/include/asm/bitops.h2
-rw-r--r--arch/ia64/include/asm/cacheflush.h1
-rw-r--r--arch/ia64/include/asm/dma-mapping.h2
-rw-r--r--arch/ia64/include/asm/elf.h1
-rw-r--r--arch/ia64/include/asm/hw_irq.h6
-rw-r--r--arch/ia64/include/asm/io.h2
-rw-r--r--arch/ia64/include/asm/irq.h2
-rw-r--r--arch/ia64/include/asm/kvm.h1
-rw-r--r--arch/ia64/include/asm/kvm_host.h1
-rw-r--r--arch/ia64/include/asm/mca.h5
-rw-r--r--arch/ia64/include/asm/numa.h2
-rw-r--r--arch/ia64/include/asm/perfmon_default_smpl.h2
-rw-r--r--arch/ia64/include/asm/rwsem.h2
-rw-r--r--arch/ia64/include/asm/sn/shubio.h2
-rw-r--r--arch/ia64/include/asm/socket.h2
-rw-r--r--arch/ia64/include/asm/spinlock.h136
-rw-r--r--arch/ia64/include/asm/spinlock_types.h10
-rw-r--r--arch/ia64/include/asm/swiotlb.h2
-rw-r--r--arch/ia64/include/asm/unistd.h3
-rw-r--r--arch/ia64/include/asm/xen/hypervisor.h28
-rw-r--r--arch/ia64/kernel/Makefile11
-rw-r--r--arch/ia64/kernel/acpi-processor.c85
-rw-r--r--arch/ia64/kernel/crash.c11
-rw-r--r--arch/ia64/kernel/entry.S1
-rw-r--r--arch/ia64/kernel/esi.c2
-rw-r--r--arch/ia64/kernel/iosapic.c6
-rw-r--r--arch/ia64/kernel/irq.c4
-rw-r--r--arch/ia64/kernel/irq_ia64.c10
-rw-r--r--arch/ia64/kernel/mca.c109
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c4
-rw-r--r--arch/ia64/kernel/perfmon.c31
-rw-r--r--arch/ia64/kernel/sys_ia64.c83
-rw-r--r--arch/ia64/kernel/time.c4
-rw-r--r--arch/ia64/kernel/unaligned.c6
-rw-r--r--arch/ia64/kvm/Makefile2
-rw-r--r--arch/ia64/kvm/asm-offsets.c1
-rw-r--r--arch/ia64/kvm/kvm-ia64.c19
-rw-r--r--arch/ia64/kvm/vcpu.h9
-rw-r--r--arch/ia64/kvm/vmm.c4
-rw-r--r--arch/ia64/kvm/vtlb.c2
-rw-r--r--arch/ia64/mm/ioremap.c11
-rw-r--r--arch/ia64/mm/tlb.c24
-rw-r--r--arch/ia64/pci/pci.c42
-rw-r--r--arch/ia64/sn/kernel/io_acpi_init.c2
-rw-r--r--arch/ia64/sn/kernel/io_common.c8
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c7
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c19
54 files changed, 340 insertions, 502 deletions
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index e7cbaa02cd0b..475e2725fbde 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -103,4 +103,4 @@ archprepare: make_nr_irqs_h FORCE
103PHONY += make_nr_irqs_h FORCE 103PHONY += make_nr_irqs_h FORCE
104 104
105make_nr_irqs_h: FORCE 105make_nr_irqs_h: FORCE
106 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/asm-ia64/nr-irqs.h 106 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 674a8374c6d9..e14c492a8a93 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -677,12 +677,19 @@ sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
677 spin_unlock_irqrestore(&ioc->saved_lock, flags); 677 spin_unlock_irqrestore(&ioc->saved_lock, flags);
678 678
679 pide = sba_search_bitmap(ioc, dev, pages_needed, 0); 679 pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
680 if (unlikely(pide >= (ioc->res_size << 3))) 680 if (unlikely(pide >= (ioc->res_size << 3))) {
681 panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", 681 printk(KERN_WARNING "%s: I/O MMU @ %p is"
682 ioc->ioc_hpa); 682 "out of mapping resources, %u %u %lx\n",
683 __func__, ioc->ioc_hpa, ioc->res_size,
684 pages_needed, dma_get_seg_boundary(dev));
685 return -1;
686 }
683#else 687#else
684 panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", 688 printk(KERN_WARNING "%s: I/O MMU @ %p is"
685 ioc->ioc_hpa); 689 "out of mapping resources, %u %u %lx\n",
690 __func__, ioc->ioc_hpa, ioc->res_size,
691 pages_needed, dma_get_seg_boundary(dev));
692 return -1;
686#endif 693#endif
687 } 694 }
688 } 695 }
@@ -965,6 +972,8 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page,
965#endif 972#endif
966 973
967 pide = sba_alloc_range(ioc, dev, size); 974 pide = sba_alloc_range(ioc, dev, size);
975 if (pide < 0)
976 return 0;
968 977
969 iovp = (dma_addr_t) pide << iovp_shift; 978 iovp = (dma_addr_t) pide << iovp_shift;
970 979
@@ -1320,6 +1329,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1320 unsigned long dma_offset, dma_len; /* start/len of DMA stream */ 1329 unsigned long dma_offset, dma_len; /* start/len of DMA stream */
1321 int n_mappings = 0; 1330 int n_mappings = 0;
1322 unsigned int max_seg_size = dma_get_max_seg_size(dev); 1331 unsigned int max_seg_size = dma_get_max_seg_size(dev);
1332 int idx;
1323 1333
1324 while (nents > 0) { 1334 while (nents > 0) {
1325 unsigned long vaddr = (unsigned long) sba_sg_address(startsg); 1335 unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
@@ -1381,7 +1391,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1381#endif 1391#endif
1382 1392
1383 /* 1393 /*
1384 ** Not virtually contigous. 1394 ** Not virtually contiguous.
1385 ** Terminate prev chunk. 1395 ** Terminate prev chunk.
1386 ** Start a new chunk. 1396 ** Start a new chunk.
1387 ** 1397 **
@@ -1418,16 +1428,22 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1418 vcontig_sg->dma_length = vcontig_len; 1428 vcontig_sg->dma_length = vcontig_len;
1419 dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask; 1429 dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
1420 ASSERT(dma_len <= DMA_CHUNK_SIZE); 1430 ASSERT(dma_len <= DMA_CHUNK_SIZE);
1421 dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG 1431 idx = sba_alloc_range(ioc, dev, dma_len);
1422 | (sba_alloc_range(ioc, dev, dma_len) << iovp_shift) 1432 if (idx < 0) {
1423 | dma_offset); 1433 dma_sg->dma_length = 0;
1434 return -1;
1435 }
1436 dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift)
1437 | dma_offset);
1424 n_mappings++; 1438 n_mappings++;
1425 } 1439 }
1426 1440
1427 return n_mappings; 1441 return n_mappings;
1428} 1442}
1429 1443
1430 1444static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1445 int nents, enum dma_data_direction dir,
1446 struct dma_attrs *attrs);
1431/** 1447/**
1432 * sba_map_sg - map Scatter/Gather list 1448 * sba_map_sg - map Scatter/Gather list
1433 * @dev: instance of PCI owned by the driver that's asking. 1449 * @dev: instance of PCI owned by the driver that's asking.
@@ -1493,6 +1509,10 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
1493 ** Access to the virtual address is what forces a two pass algorithm. 1509 ** Access to the virtual address is what forces a two pass algorithm.
1494 */ 1510 */
1495 coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents); 1511 coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
1512 if (coalesced < 0) {
1513 sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
1514 return 0;
1515 }
1496 1516
1497 /* 1517 /*
1498 ** Program the I/O Pdir 1518 ** Program the I/O Pdir
diff --git a/arch/ia64/ia32/elfcore32.h b/arch/ia64/ia32/elfcore32.h
index 9a3abf58cea3..657725742617 100644
--- a/arch/ia64/ia32/elfcore32.h
+++ b/arch/ia64/ia32/elfcore32.h
@@ -11,8 +11,6 @@
11#include <asm/intrinsics.h> 11#include <asm/intrinsics.h>
12#include <asm/uaccess.h> 12#include <asm/uaccess.h>
13 13
14#define USE_ELF_CORE_DUMP 1
15
16/* Override elfcore.h */ 14/* Override elfcore.h */
17#define _LINUX_ELFCORE_H 1 15#define _LINUX_ELFCORE_H 1
18typedef unsigned int elf_greg_t; 16typedef unsigned int elf_greg_t;
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
index af9405cd70e5..2fd7479aa216 100644
--- a/arch/ia64/ia32/ia32_entry.S
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -79,7 +79,7 @@ GLOBAL_ENTRY(ia32_ret_from_clone)
79(p6) br.cond.spnt .ia32_strace_check_retval 79(p6) br.cond.spnt .ia32_strace_check_retval
80 ;; // prevent RAW on r8 80 ;; // prevent RAW on r8
81END(ia32_ret_from_clone) 81END(ia32_ret_from_clone)
82 // fall thrugh 82 // fall through
83GLOBAL_ENTRY(ia32_ret_from_syscall) 83GLOBAL_ENTRY(ia32_ret_from_syscall)
84 PT_REGS_UNWIND_INFO(0) 84 PT_REGS_UNWIND_INFO(0)
85 85
@@ -327,7 +327,7 @@ ia32_syscall_table:
327 data8 compat_sys_writev 327 data8 compat_sys_writev
328 data8 sys_getsid 328 data8 sys_getsid
329 data8 sys_fdatasync 329 data8 sys_fdatasync
330 data8 sys32_sysctl 330 data8 compat_sys_sysctl
331 data8 sys_mlock /* 150 */ 331 data8 sys_mlock /* 150 */
332 data8 sys_munlock 332 data8 sys_munlock
333 data8 sys_mlockall 333 data8 sys_mlockall
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 625ed8f76fce..045b746b9808 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -858,6 +858,9 @@ ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot
858 858
859 prot = get_prot32(prot); 859 prot = get_prot32(prot);
860 860
861 if (flags & MAP_HUGETLB)
862 return -ENOMEM;
863
861#if PAGE_SHIFT > IA32_PAGE_SHIFT 864#if PAGE_SHIFT > IA32_PAGE_SHIFT
862 mutex_lock(&ia32_mmap_mutex); 865 mutex_lock(&ia32_mmap_mutex);
863 { 866 {
@@ -1628,61 +1631,6 @@ sys32_msync (unsigned int start, unsigned int len, int flags)
1628 return sys_msync(addr, len + (start - addr), flags); 1631 return sys_msync(addr, len + (start - addr), flags);
1629} 1632}
1630 1633
1631struct sysctl32 {
1632 unsigned int name;
1633 int nlen;
1634 unsigned int oldval;
1635 unsigned int oldlenp;
1636 unsigned int newval;
1637 unsigned int newlen;
1638 unsigned int __unused[4];
1639};
1640
1641#ifdef CONFIG_SYSCTL_SYSCALL
1642asmlinkage long
1643sys32_sysctl (struct sysctl32 __user *args)
1644{
1645 struct sysctl32 a32;
1646 mm_segment_t old_fs = get_fs ();
1647 void __user *oldvalp, *newvalp;
1648 size_t oldlen;
1649 int __user *namep;
1650 long ret;
1651
1652 if (copy_from_user(&a32, args, sizeof(a32)))
1653 return -EFAULT;
1654
1655 /*
1656 * We need to pre-validate these because we have to disable address checking
1657 * before calling do_sysctl() because of OLDLEN but we can't run the risk of the
1658 * user specifying bad addresses here. Well, since we're dealing with 32 bit
1659 * addresses, we KNOW that access_ok() will always succeed, so this is an
1660 * expensive NOP, but so what...
1661 */
1662 namep = (int __user *) compat_ptr(a32.name);
1663 oldvalp = compat_ptr(a32.oldval);
1664 newvalp = compat_ptr(a32.newval);
1665
1666 if ((oldvalp && get_user(oldlen, (int __user *) compat_ptr(a32.oldlenp)))
1667 || !access_ok(VERIFY_WRITE, namep, 0)
1668 || !access_ok(VERIFY_WRITE, oldvalp, 0)
1669 || !access_ok(VERIFY_WRITE, newvalp, 0))
1670 return -EFAULT;
1671
1672 set_fs(KERNEL_DS);
1673 lock_kernel();
1674 ret = do_sysctl(namep, a32.nlen, oldvalp, (size_t __user *) &oldlen,
1675 newvalp, (size_t) a32.newlen);
1676 unlock_kernel();
1677 set_fs(old_fs);
1678
1679 if (oldvalp && put_user (oldlen, (int __user *) compat_ptr(a32.oldlenp)))
1680 return -EFAULT;
1681
1682 return ret;
1683}
1684#endif
1685
1686asmlinkage long 1634asmlinkage long
1687sys32_newuname (struct new_utsname __user *name) 1635sys32_newuname (struct new_utsname __user *name)
1688{ 1636{
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index 91df9686a0da..7ae58892ba8d 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -132,6 +132,12 @@ extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
132extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; 132extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
133#endif 133#endif
134 134
135static inline bool arch_has_acpi_pdc(void) { return true; }
136static inline void arch_acpi_set_pdc_bits(u32 *buf)
137{
138 buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
139}
140
135#define acpi_unlazy_tlb(x) 141#define acpi_unlazy_tlb(x)
136 142
137#ifdef CONFIG_ACPI_NUMA 143#ifdef CONFIG_ACPI_NUMA
diff --git a/arch/ia64/include/asm/asm-offsets.h b/arch/ia64/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..d370ee36a182
--- /dev/null
+++ b/arch/ia64/include/asm/asm-offsets.h
@@ -0,0 +1 @@
#include <generated/asm-offsets.h>
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index 57a2787bc9fb..6ebc229a1c51 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -127,7 +127,7 @@ clear_bit_unlock (int nr, volatile void *addr)
127 * @addr: Address to start counting from 127 * @addr: Address to start counting from
128 * 128 *
129 * Similarly to clear_bit_unlock, the implementation uses a store 129 * Similarly to clear_bit_unlock, the implementation uses a store
130 * with release semantics. See also __raw_spin_unlock(). 130 * with release semantics. See also arch_spin_unlock().
131 */ 131 */
132static __inline__ void 132static __inline__ void
133__clear_bit_unlock(int nr, void *addr) 133__clear_bit_unlock(int nr, void *addr)
diff --git a/arch/ia64/include/asm/cacheflush.h b/arch/ia64/include/asm/cacheflush.h
index c8ce2719fee8..429eefc93ee7 100644
--- a/arch/ia64/include/asm/cacheflush.h
+++ b/arch/ia64/include/asm/cacheflush.h
@@ -25,6 +25,7 @@
25#define flush_cache_vmap(start, end) do { } while (0) 25#define flush_cache_vmap(start, end) do { } while (0)
26#define flush_cache_vunmap(start, end) do { } while (0) 26#define flush_cache_vunmap(start, end) do { } while (0)
27 27
28#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
28#define flush_dcache_page(page) \ 29#define flush_dcache_page(page) \
29do { \ 30do { \
30 clear_bit(PG_arch_1, &(page)->flags); \ 31 clear_bit(PG_arch_1, &(page)->flags); \
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 8d3c79cd81e7..7d09a09cdaad 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -73,7 +73,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
73 if (!dev->dma_mask) 73 if (!dev->dma_mask)
74 return 0; 74 return 0;
75 75
76 return addr + size <= *dev->dma_mask; 76 return addr + size - 1 <= *dev->dma_mask;
77} 77}
78 78
79static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 79static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
index 86eddee029cb..e14108b19c09 100644
--- a/arch/ia64/include/asm/elf.h
+++ b/arch/ia64/include/asm/elf.h
@@ -25,7 +25,6 @@
25#define ELF_DATA ELFDATA2LSB 25#define ELF_DATA ELFDATA2LSB
26#define ELF_ARCH EM_IA_64 26#define ELF_ARCH EM_IA_64
27 27
28#define USE_ELF_CORE_DUMP
29#define CORE_DUMP_USE_REGSET 28#define CORE_DUMP_USE_REGSET
30 29
31/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are 30/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are
diff --git a/arch/ia64/include/asm/hw_irq.h b/arch/ia64/include/asm/hw_irq.h
index 91619b31dbf5..bf2e37493e04 100644
--- a/arch/ia64/include/asm/hw_irq.h
+++ b/arch/ia64/include/asm/hw_irq.h
@@ -59,7 +59,13 @@ typedef u16 ia64_vector;
59extern int ia64_first_device_vector; 59extern int ia64_first_device_vector;
60extern int ia64_last_device_vector; 60extern int ia64_last_device_vector;
61 61
62#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_DIG))
63/* Reserve the lower priority vector than device vectors for "move IRQ" IPI */
64#define IA64_IRQ_MOVE_VECTOR 0x30 /* "move IRQ" IPI */
65#define IA64_DEF_FIRST_DEVICE_VECTOR 0x31
66#else
62#define IA64_DEF_FIRST_DEVICE_VECTOR 0x30 67#define IA64_DEF_FIRST_DEVICE_VECTOR 0x30
68#endif
63#define IA64_DEF_LAST_DEVICE_VECTOR 0xe7 69#define IA64_DEF_LAST_DEVICE_VECTOR 0xe7
64#define IA64_FIRST_DEVICE_VECTOR ia64_first_device_vector 70#define IA64_FIRST_DEVICE_VECTOR ia64_first_device_vector
65#define IA64_LAST_DEVICE_VECTOR ia64_last_device_vector 71#define IA64_LAST_DEVICE_VECTOR ia64_last_device_vector
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index 0d9d16e2d949..cc8335eb3110 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -424,6 +424,8 @@ __writeq (unsigned long val, volatile void __iomem *addr)
424extern void __iomem * ioremap(unsigned long offset, unsigned long size); 424extern void __iomem * ioremap(unsigned long offset, unsigned long size);
425extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); 425extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
426extern void iounmap (volatile void __iomem *addr); 426extern void iounmap (volatile void __iomem *addr);
427extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size);
428extern void early_iounmap (volatile void __iomem *addr, unsigned long size);
427 429
428/* 430/*
429 * String version of IO memory access ops: 431 * String version of IO memory access ops:
diff --git a/arch/ia64/include/asm/irq.h b/arch/ia64/include/asm/irq.h
index 5282546cdf82..91b920fd7d53 100644
--- a/arch/ia64/include/asm/irq.h
+++ b/arch/ia64/include/asm/irq.h
@@ -13,7 +13,7 @@
13 13
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/cpumask.h> 15#include <linux/cpumask.h>
16#include <asm-ia64/nr-irqs.h> 16#include <generated/nr-irqs.h>
17 17
18static __inline__ int 18static __inline__ int
19irq_canonicalize (int irq) 19irq_canonicalize (int irq)
diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h
index 18a7e49abbc5..bc90c75adf67 100644
--- a/arch/ia64/include/asm/kvm.h
+++ b/arch/ia64/include/asm/kvm.h
@@ -60,6 +60,7 @@ struct kvm_ioapic_state {
60#define KVM_IRQCHIP_PIC_MASTER 0 60#define KVM_IRQCHIP_PIC_MASTER 0
61#define KVM_IRQCHIP_PIC_SLAVE 1 61#define KVM_IRQCHIP_PIC_SLAVE 1
62#define KVM_IRQCHIP_IOAPIC 2 62#define KVM_IRQCHIP_IOAPIC 2
63#define KVM_NR_IRQCHIPS 3
63 64
64#define KVM_CONTEXT_SIZE 8*1024 65#define KVM_CONTEXT_SIZE 8*1024
65 66
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index d9b6325a9328..a362e67e0ca6 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -475,7 +475,6 @@ struct kvm_arch {
475 struct list_head assigned_dev_head; 475 struct list_head assigned_dev_head;
476 struct iommu_domain *iommu_domain; 476 struct iommu_domain *iommu_domain;
477 int iommu_flags; 477 int iommu_flags;
478 struct hlist_head irq_ack_notifier_list;
479 478
480 unsigned long irq_sources_bitmap; 479 unsigned long irq_sources_bitmap;
481 unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; 480 unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h
index c171cdf0a789..43f96ab18fa0 100644
--- a/arch/ia64/include/asm/mca.h
+++ b/arch/ia64/include/asm/mca.h
@@ -106,6 +106,11 @@ struct ia64_sal_os_state {
106 unsigned long os_status; /* OS status to SAL, enum below */ 106 unsigned long os_status; /* OS status to SAL, enum below */
107 unsigned long context; /* 0 if return to same context 107 unsigned long context; /* 0 if return to same context
108 1 if return to new context */ 108 1 if return to new context */
109
110 /* I-resources */
111 unsigned long iip;
112 unsigned long ipsr;
113 unsigned long ifs;
109}; 114};
110 115
111enum { 116enum {
diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h
index 3499ff57bf42..6a8a27cfae3e 100644
--- a/arch/ia64/include/asm/numa.h
+++ b/arch/ia64/include/asm/numa.h
@@ -22,8 +22,6 @@
22 22
23#include <asm/mmzone.h> 23#include <asm/mmzone.h>
24 24
25#define NUMA_NO_NODE -1
26
27extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; 25extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
28extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; 26extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
29extern pg_data_t *pgdat_list[MAX_NUMNODES]; 27extern pg_data_t *pgdat_list[MAX_NUMNODES];
diff --git a/arch/ia64/include/asm/perfmon_default_smpl.h b/arch/ia64/include/asm/perfmon_default_smpl.h
index 48822c0811d8..74724b24c2b7 100644
--- a/arch/ia64/include/asm/perfmon_default_smpl.h
+++ b/arch/ia64/include/asm/perfmon_default_smpl.h
@@ -67,7 +67,7 @@ typedef struct {
67 unsigned long ip; /* where did the overflow interrupt happened */ 67 unsigned long ip; /* where did the overflow interrupt happened */
68 unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */ 68 unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */
69 69
70 unsigned short cpu; /* cpu on which the overfow occured */ 70 unsigned short cpu; /* cpu on which the overflow occured */
71 unsigned short set; /* event set active when overflow ocurred */ 71 unsigned short set; /* event set active when overflow ocurred */
72 int tgid; /* thread group id (for NPTL, this is getpid()) */ 72 int tgid; /* thread group id (for NPTL, this is getpid()) */
73} pfm_default_smpl_entry_t; 73} pfm_default_smpl_entry_t;
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
index fbee74b15782..e8762688e8e3 100644
--- a/arch/ia64/include/asm/rwsem.h
+++ b/arch/ia64/include/asm/rwsem.h
@@ -47,7 +47,7 @@ struct rw_semaphore {
47#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 47#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
48 48
49#define __RWSEM_INITIALIZER(name) \ 49#define __RWSEM_INITIALIZER(name) \
50 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ 50 { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
51 LIST_HEAD_INIT((name).wait_list) } 51 LIST_HEAD_INIT((name).wait_list) }
52 52
53#define DECLARE_RWSEM(name) \ 53#define DECLARE_RWSEM(name) \
diff --git a/arch/ia64/include/asm/sn/shubio.h b/arch/ia64/include/asm/sn/shubio.h
index 22a6f18a5313..6052422a22b3 100644
--- a/arch/ia64/include/asm/sn/shubio.h
+++ b/arch/ia64/include/asm/sn/shubio.h
@@ -3289,7 +3289,7 @@ typedef ii_icrb0_e_u_t icrbe_t;
3289#define IIO_IIDSR_LVL_SHIFT 0 3289#define IIO_IIDSR_LVL_SHIFT 0
3290#define IIO_IIDSR_LVL_MASK 0x000000ff 3290#define IIO_IIDSR_LVL_MASK 0x000000ff
3291 3291
3292/* Xtalk timeout threshhold register (IIO_IXTT) */ 3292/* Xtalk timeout threshold register (IIO_IXTT) */
3293#define IXTT_RRSP_TO_SHFT 55 /* read response timeout */ 3293#define IXTT_RRSP_TO_SHFT 55 /* read response timeout */
3294#define IXTT_RRSP_TO_MASK (0x1FULL << IXTT_RRSP_TO_SHFT) 3294#define IXTT_RRSP_TO_MASK (0x1FULL << IXTT_RRSP_TO_SHFT)
3295#define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */ 3295#define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */
diff --git a/arch/ia64/include/asm/socket.h b/arch/ia64/include/asm/socket.h
index 0b0d5ff062e5..51427eaa51ba 100644
--- a/arch/ia64/include/asm/socket.h
+++ b/arch/ia64/include/asm/socket.h
@@ -69,4 +69,6 @@
69#define SO_PROTOCOL 38 69#define SO_PROTOCOL 38
70#define SO_DOMAIN 39 70#define SO_DOMAIN 39
71 71
72#define SO_RXQ_OVFL 40
73
72#endif /* _ASM_IA64_SOCKET_H */ 74#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 30bb930e1111..1a91c9121d17 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -17,7 +17,7 @@
17#include <asm/intrinsics.h> 17#include <asm/intrinsics.h>
18#include <asm/system.h> 18#include <asm/system.h>
19 19
20#define __raw_spin_lock_init(x) ((x)->lock = 0) 20#define arch_spin_lock_init(x) ((x)->lock = 0)
21 21
22/* 22/*
23 * Ticket locks are conceptually two parts, one indicating the current head of 23 * Ticket locks are conceptually two parts, one indicating the current head of
@@ -25,108 +25,128 @@
25 * by atomically noting the tail and incrementing it by one (thus adding 25 * by atomically noting the tail and incrementing it by one (thus adding
26 * ourself to the queue and noting our position), then waiting until the head 26 * ourself to the queue and noting our position), then waiting until the head
27 * becomes equal to the the initial value of the tail. 27 * becomes equal to the the initial value of the tail.
28 * The pad bits in the middle are used to prevent the next_ticket number
29 * overflowing into the now_serving number.
28 * 30 *
29 * 63 32 31 0 31 * 31 17 16 15 14 0
30 * +----------------------------------------------------+ 32 * +----------------------------------------------------+
31 * | next_ticket_number | now_serving | 33 * | now_serving | padding | next_ticket |
32 * +----------------------------------------------------+ 34 * +----------------------------------------------------+
33 */ 35 */
34 36
35#define TICKET_SHIFT 32 37#define TICKET_SHIFT 17
38#define TICKET_BITS 15
39#define TICKET_MASK ((1 << TICKET_BITS) - 1)
36 40
37static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) 41static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
38{ 42{
39 int *p = (int *)&lock->lock, turn, now_serving; 43 int *p = (int *)&lock->lock, ticket, serve;
40 44
41 now_serving = *p; 45 ticket = ia64_fetchadd(1, p, acq);
42 turn = ia64_fetchadd(1, p+1, acq);
43 46
44 if (turn == now_serving) 47 if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
45 return; 48 return;
46 49
47 do { 50 ia64_invala();
51
52 for (;;) {
53 asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory");
54
55 if (!(((serve >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
56 return;
48 cpu_relax(); 57 cpu_relax();
49 } while (ACCESS_ONCE(*p) != turn); 58 }
50} 59}
51 60
52static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) 61static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
53{ 62{
54 long tmp = ACCESS_ONCE(lock->lock), try; 63 int tmp = ACCESS_ONCE(lock->lock);
55
56 if (!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1))) {
57 try = tmp + (1L << TICKET_SHIFT);
58 64
59 return ia64_cmpxchg(acq, &lock->lock, tmp, try, sizeof (tmp)) == tmp; 65 if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
60 } 66 return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
61 return 0; 67 return 0;
62} 68}
63 69
64static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) 70static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
65{ 71{
66 int *p = (int *)&lock->lock; 72 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
67 73
68 (void)ia64_fetchadd(1, p, rel); 74 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
75 ACCESS_ONCE(*p) = (tmp + 2) & ~1;
69} 76}
70 77
71static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) 78static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
79{
80 int *p = (int *)&lock->lock, ticket;
81
82 ia64_invala();
83
84 for (;;) {
85 asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : "memory");
86 if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
87 return;
88 cpu_relax();
89 }
90}
91
92static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
72{ 93{
73 long tmp = ACCESS_ONCE(lock->lock); 94 long tmp = ACCESS_ONCE(lock->lock);
74 95
75 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1)); 96 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
76} 97}
77 98
78static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) 99static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
79{ 100{
80 long tmp = ACCESS_ONCE(lock->lock); 101 long tmp = ACCESS_ONCE(lock->lock);
81 102
82 return (((tmp >> TICKET_SHIFT) - tmp) & ((1L << TICKET_SHIFT) - 1)) > 1; 103 return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
83} 104}
84 105
85static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 106static inline int arch_spin_is_locked(arch_spinlock_t *lock)
86{ 107{
87 return __ticket_spin_is_locked(lock); 108 return __ticket_spin_is_locked(lock);
88} 109}
89 110
90static inline int __raw_spin_is_contended(raw_spinlock_t *lock) 111static inline int arch_spin_is_contended(arch_spinlock_t *lock)
91{ 112{
92 return __ticket_spin_is_contended(lock); 113 return __ticket_spin_is_contended(lock);
93} 114}
94#define __raw_spin_is_contended __raw_spin_is_contended 115#define arch_spin_is_contended arch_spin_is_contended
95 116
96static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) 117static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
97{ 118{
98 __ticket_spin_lock(lock); 119 __ticket_spin_lock(lock);
99} 120}
100 121
101static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) 122static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
102{ 123{
103 return __ticket_spin_trylock(lock); 124 return __ticket_spin_trylock(lock);
104} 125}
105 126
106static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) 127static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
107{ 128{
108 __ticket_spin_unlock(lock); 129 __ticket_spin_unlock(lock);
109} 130}
110 131
111static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, 132static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
112 unsigned long flags) 133 unsigned long flags)
113{ 134{
114 __raw_spin_lock(lock); 135 arch_spin_lock(lock);
115} 136}
116 137
117static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 138static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
118{ 139{
119 while (__raw_spin_is_locked(lock)) 140 __ticket_spin_unlock_wait(lock);
120 cpu_relax();
121} 141}
122 142
123#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) 143#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
124#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) 144#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)
125 145
126#ifdef ASM_SUPPORTED 146#ifdef ASM_SUPPORTED
127 147
128static __always_inline void 148static __always_inline void
129__raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) 149arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
130{ 150{
131 __asm__ __volatile__ ( 151 __asm__ __volatile__ (
132 "tbit.nz p6, p0 = %1,%2\n" 152 "tbit.nz p6, p0 = %1,%2\n"
@@ -149,15 +169,15 @@ __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
149 : "p6", "p7", "r2", "memory"); 169 : "p6", "p7", "r2", "memory");
150} 170}
151 171
152#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0) 172#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
153 173
154#else /* !ASM_SUPPORTED */ 174#else /* !ASM_SUPPORTED */
155 175
156#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) 176#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
157 177
158#define __raw_read_lock(rw) \ 178#define arch_read_lock(rw) \
159do { \ 179do { \
160 raw_rwlock_t *__read_lock_ptr = (rw); \ 180 arch_rwlock_t *__read_lock_ptr = (rw); \
161 \ 181 \
162 while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ 182 while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
163 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ 183 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
@@ -168,16 +188,16 @@ do { \
168 188
169#endif /* !ASM_SUPPORTED */ 189#endif /* !ASM_SUPPORTED */
170 190
171#define __raw_read_unlock(rw) \ 191#define arch_read_unlock(rw) \
172do { \ 192do { \
173 raw_rwlock_t *__read_lock_ptr = (rw); \ 193 arch_rwlock_t *__read_lock_ptr = (rw); \
174 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ 194 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
175} while (0) 195} while (0)
176 196
177#ifdef ASM_SUPPORTED 197#ifdef ASM_SUPPORTED
178 198
179static __always_inline void 199static __always_inline void
180__raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) 200arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
181{ 201{
182 __asm__ __volatile__ ( 202 __asm__ __volatile__ (
183 "tbit.nz p6, p0 = %1, %2\n" 203 "tbit.nz p6, p0 = %1, %2\n"
@@ -201,9 +221,9 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
201 : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); 221 : "ar.ccv", "p6", "p7", "r2", "r29", "memory");
202} 222}
203 223
204#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0) 224#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
205 225
206#define __raw_write_trylock(rw) \ 226#define arch_write_trylock(rw) \
207({ \ 227({ \
208 register long result; \ 228 register long result; \
209 \ 229 \
@@ -215,7 +235,7 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
215 (result == 0); \ 235 (result == 0); \
216}) 236})
217 237
218static inline void __raw_write_unlock(raw_rwlock_t *x) 238static inline void arch_write_unlock(arch_rwlock_t *x)
219{ 239{
220 u8 *y = (u8 *)x; 240 u8 *y = (u8 *)x;
221 barrier(); 241 barrier();
@@ -224,9 +244,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
224 244
225#else /* !ASM_SUPPORTED */ 245#else /* !ASM_SUPPORTED */
226 246
227#define __raw_write_lock_flags(l, flags) __raw_write_lock(l) 247#define arch_write_lock_flags(l, flags) arch_write_lock(l)
228 248
229#define __raw_write_lock(l) \ 249#define arch_write_lock(l) \
230({ \ 250({ \
231 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ 251 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
232 __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ 252 __u32 *ia64_write_lock_ptr = (__u32 *) (l); \
@@ -237,7 +257,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
237 } while (ia64_val); \ 257 } while (ia64_val); \
238}) 258})
239 259
240#define __raw_write_trylock(rw) \ 260#define arch_write_trylock(rw) \
241({ \ 261({ \
242 __u64 ia64_val; \ 262 __u64 ia64_val; \
243 __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ 263 __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
@@ -245,7 +265,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
245 (ia64_val == 0); \ 265 (ia64_val == 0); \
246}) 266})
247 267
248static inline void __raw_write_unlock(raw_rwlock_t *x) 268static inline void arch_write_unlock(arch_rwlock_t *x)
249{ 269{
250 barrier(); 270 barrier();
251 x->write_lock = 0; 271 x->write_lock = 0;
@@ -253,10 +273,10 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
253 273
254#endif /* !ASM_SUPPORTED */ 274#endif /* !ASM_SUPPORTED */
255 275
256static inline int __raw_read_trylock(raw_rwlock_t *x) 276static inline int arch_read_trylock(arch_rwlock_t *x)
257{ 277{
258 union { 278 union {
259 raw_rwlock_t lock; 279 arch_rwlock_t lock;
260 __u32 word; 280 __u32 word;
261 } old, new; 281 } old, new;
262 old.lock = new.lock = *x; 282 old.lock = new.lock = *x;
@@ -265,8 +285,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *x)
265 return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; 285 return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
266} 286}
267 287
268#define _raw_spin_relax(lock) cpu_relax() 288#define arch_spin_relax(lock) cpu_relax()
269#define _raw_read_relax(lock) cpu_relax() 289#define arch_read_relax(lock) cpu_relax()
270#define _raw_write_relax(lock) cpu_relax() 290#define arch_write_relax(lock) cpu_relax()
271 291
272#endif /* _ASM_IA64_SPINLOCK_H */ 292#endif /* _ASM_IA64_SPINLOCK_H */
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h
index b61d136d9bc2..e2b42a52a6d3 100644
--- a/arch/ia64/include/asm/spinlock_types.h
+++ b/arch/ia64/include/asm/spinlock_types.h
@@ -6,16 +6,16 @@
6#endif 6#endif
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned long lock; 9 volatile unsigned int lock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int read_counter : 31; 15 volatile unsigned int read_counter : 31;
16 volatile unsigned int write_lock : 1; 16 volatile unsigned int write_lock : 1;
17} raw_rwlock_t; 17} arch_rwlock_t;
18 18
19#define __RAW_RW_LOCK_UNLOCKED { 0, 0 } 19#define __ARCH_RW_LOCK_UNLOCKED { 0, 0 }
20 20
21#endif 21#endif
diff --git a/arch/ia64/include/asm/swiotlb.h b/arch/ia64/include/asm/swiotlb.h
index dcbaea7ce128..f0acde68aaea 100644
--- a/arch/ia64/include/asm/swiotlb.h
+++ b/arch/ia64/include/asm/swiotlb.h
@@ -4,8 +4,6 @@
4#include <linux/dma-mapping.h> 4#include <linux/dma-mapping.h>
5#include <linux/swiotlb.h> 5#include <linux/swiotlb.h>
6 6
7extern int swiotlb_force;
8
9#ifdef CONFIG_SWIOTLB 7#ifdef CONFIG_SWIOTLB
10extern int swiotlb; 8extern int swiotlb;
11extern void pci_swiotlb_init(void); 9extern void pci_swiotlb_init(void);
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index 5a5347f5c4e4..10a8f21ca9e3 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -311,11 +311,12 @@
311#define __NR_preadv 1319 311#define __NR_preadv 1319
312#define __NR_pwritev 1320 312#define __NR_pwritev 1320
313#define __NR_rt_tgsigqueueinfo 1321 313#define __NR_rt_tgsigqueueinfo 1321
314#define __NR_recvmmsg 1322
314 315
315#ifdef __KERNEL__ 316#ifdef __KERNEL__
316 317
317 318
318#define NR_syscalls 298 /* length of syscall table */ 319#define NR_syscalls 299 /* length of syscall table */
319 320
320/* 321/*
321 * The following defines stop scripts/checksyscalls.sh from complaining about 322 * The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h
index 88afb54501e4..67455c2ed2b1 100644
--- a/arch/ia64/include/asm/xen/hypervisor.h
+++ b/arch/ia64/include/asm/xen/hypervisor.h
@@ -37,35 +37,9 @@
37#include <xen/interface/xen.h> 37#include <xen/interface/xen.h>
38#include <xen/interface/version.h> /* to compile feature.c */ 38#include <xen/interface/version.h> /* to compile feature.c */
39#include <xen/features.h> /* to comiple xen-netfront.c */ 39#include <xen/features.h> /* to comiple xen-netfront.c */
40#include <xen/xen.h>
40#include <asm/xen/hypercall.h> 41#include <asm/xen/hypercall.h>
41 42
42/* xen_domain_type is set before executing any C code by early_xen_setup */
43enum xen_domain_type {
44 XEN_NATIVE, /* running on bare hardware */
45 XEN_PV_DOMAIN, /* running in a PV domain */
46 XEN_HVM_DOMAIN, /* running in a Xen hvm domain*/
47};
48
49#ifdef CONFIG_XEN
50extern enum xen_domain_type xen_domain_type;
51#else
52#define xen_domain_type XEN_NATIVE
53#endif
54
55#define xen_domain() (xen_domain_type != XEN_NATIVE)
56#define xen_pv_domain() (xen_domain() && \
57 xen_domain_type == XEN_PV_DOMAIN)
58#define xen_hvm_domain() (xen_domain() && \
59 xen_domain_type == XEN_HVM_DOMAIN)
60
61#ifdef CONFIG_XEN_DOM0
62#define xen_initial_domain() (xen_pv_domain() && \
63 (xen_start_info->flags & SIF_INITDOMAIN))
64#else
65#define xen_initial_domain() (0)
66#endif
67
68
69#ifdef CONFIG_XEN 43#ifdef CONFIG_XEN
70extern struct shared_info *HYPERVISOR_shared_info; 44extern struct shared_info *HYPERVISOR_shared_info;
71extern struct start_info *xen_start_info; 45extern struct start_info *xen_start_info;
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 6b7edcab0cb5..e1236349c99f 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -18,10 +18,6 @@ obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
18obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o 18obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o
19obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o 19obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o
20 20
21ifneq ($(CONFIG_ACPI_PROCESSOR),)
22obj-y += acpi-processor.o
23endif
24
25obj-$(CONFIG_IA64_PALINFO) += palinfo.o 21obj-$(CONFIG_IA64_PALINFO) += palinfo.o
26obj-$(CONFIG_IOSAPIC) += iosapic.o 22obj-$(CONFIG_IOSAPIC) += iosapic.o
27obj-$(CONFIG_MODULES) += module.o 23obj-$(CONFIG_MODULES) += module.o
@@ -81,17 +77,14 @@ define cmd_nr_irqs
81endef 77endef
82 78
83# We use internal kbuild rules to avoid the "is up to date" message from make 79# We use internal kbuild rules to avoid the "is up to date" message from make
84arch/$(SRCARCH)/kernel/nr-irqs.s: $(srctree)/arch/$(SRCARCH)/kernel/nr-irqs.c \ 80arch/$(SRCARCH)/kernel/nr-irqs.s: arch/$(SRCARCH)/kernel/nr-irqs.c
85 $(wildcard $(srctree)/include/asm-ia64/*/irq.h)
86 $(Q)mkdir -p $(dir $@) 81 $(Q)mkdir -p $(dir $@)
87 $(call if_changed_dep,cc_s_c) 82 $(call if_changed_dep,cc_s_c)
88 83
89include/asm-ia64/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s 84include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s
90 $(Q)mkdir -p $(dir $@) 85 $(Q)mkdir -p $(dir $@)
91 $(call cmd,nr_irqs) 86 $(call cmd,nr_irqs)
92 87
93clean-files += $(objtree)/include/asm-ia64/nr-irqs.h
94
95# 88#
96# native ivt.S, entry.S and fsys.S 89# native ivt.S, entry.S and fsys.S
97# 90#
diff --git a/arch/ia64/kernel/acpi-processor.c b/arch/ia64/kernel/acpi-processor.c
deleted file mode 100644
index dbda7bde6112..000000000000
--- a/arch/ia64/kernel/acpi-processor.c
+++ /dev/null
@@ -1,85 +0,0 @@
1/*
2 * arch/ia64/kernel/acpi-processor.c
3 *
4 * Copyright (C) 2005 Intel Corporation
5 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
6 * - Added _PDC for platforms with Intel CPUs
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/acpi.h>
13
14#include <acpi/processor.h>
15#include <asm/acpi.h>
16
17static void init_intel_pdc(struct acpi_processor *pr)
18{
19 struct acpi_object_list *obj_list;
20 union acpi_object *obj;
21 u32 *buf;
22
23 /* allocate and initialize pdc. It will be used later. */
24 obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
25 if (!obj_list) {
26 printk(KERN_ERR "Memory allocation error\n");
27 return;
28 }
29
30 obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
31 if (!obj) {
32 printk(KERN_ERR "Memory allocation error\n");
33 kfree(obj_list);
34 return;
35 }
36
37 buf = kmalloc(12, GFP_KERNEL);
38 if (!buf) {
39 printk(KERN_ERR "Memory allocation error\n");
40 kfree(obj);
41 kfree(obj_list);
42 return;
43 }
44
45 buf[0] = ACPI_PDC_REVISION_ID;
46 buf[1] = 1;
47 buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
48 /*
49 * The default of PDC_SMP_T_SWCOORD bit is set for IA64 cpu so
50 * that OSPM is capable of native ACPI throttling software
51 * coordination using BIOS supplied _TSD info.
52 */
53 buf[2] |= ACPI_PDC_SMP_T_SWCOORD;
54
55 obj->type = ACPI_TYPE_BUFFER;
56 obj->buffer.length = 12;
57 obj->buffer.pointer = (u8 *) buf;
58 obj_list->count = 1;
59 obj_list->pointer = obj;
60 pr->pdc = obj_list;
61
62 return;
63}
64
65/* Initialize _PDC data based on the CPU vendor */
66void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
67{
68 pr->pdc = NULL;
69 init_intel_pdc(pr);
70 return;
71}
72
73EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
74
75void arch_acpi_processor_cleanup_pdc(struct acpi_processor *pr)
76{
77 if (pr->pdc) {
78 kfree(pr->pdc->pointer->buffer.pointer);
79 kfree(pr->pdc->pointer);
80 kfree(pr->pdc);
81 pr->pdc = NULL;
82 }
83}
84
85EXPORT_SYMBOL(arch_acpi_processor_cleanup_pdc);
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index 6631a9dfafdc..b942f4032d7a 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -239,32 +239,29 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
239#ifdef CONFIG_SYSCTL 239#ifdef CONFIG_SYSCTL
240static ctl_table kdump_ctl_table[] = { 240static ctl_table kdump_ctl_table[] = {
241 { 241 {
242 .ctl_name = CTL_UNNUMBERED,
243 .procname = "kdump_on_init", 242 .procname = "kdump_on_init",
244 .data = &kdump_on_init, 243 .data = &kdump_on_init,
245 .maxlen = sizeof(int), 244 .maxlen = sizeof(int),
246 .mode = 0644, 245 .mode = 0644,
247 .proc_handler = &proc_dointvec, 246 .proc_handler = proc_dointvec,
248 }, 247 },
249 { 248 {
250 .ctl_name = CTL_UNNUMBERED,
251 .procname = "kdump_on_fatal_mca", 249 .procname = "kdump_on_fatal_mca",
252 .data = &kdump_on_fatal_mca, 250 .data = &kdump_on_fatal_mca,
253 .maxlen = sizeof(int), 251 .maxlen = sizeof(int),
254 .mode = 0644, 252 .mode = 0644,
255 .proc_handler = &proc_dointvec, 253 .proc_handler = proc_dointvec,
256 }, 254 },
257 { .ctl_name = 0 } 255 { }
258}; 256};
259 257
260static ctl_table sys_table[] = { 258static ctl_table sys_table[] = {
261 { 259 {
262 .ctl_name = CTL_KERN,
263 .procname = "kernel", 260 .procname = "kernel",
264 .mode = 0555, 261 .mode = 0555,
265 .child = kdump_ctl_table, 262 .child = kdump_ctl_table,
266 }, 263 },
267 { .ctl_name = 0 } 264 { }
268}; 265};
269#endif 266#endif
270 267
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index d0e7d37017b4..d75b872ca4dc 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1806,6 +1806,7 @@ sys_call_table:
1806 data8 sys_preadv 1806 data8 sys_preadv
1807 data8 sys_pwritev // 1320 1807 data8 sys_pwritev // 1320
1808 data8 sys_rt_tgsigqueueinfo 1808 data8 sys_rt_tgsigqueueinfo
1809 data8 sys_recvmmsg
1809 1810
1810 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1811 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
1811#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ 1812#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/ia64/kernel/esi.c b/arch/ia64/kernel/esi.c
index d5764a3d74af..b091111270cb 100644
--- a/arch/ia64/kernel/esi.c
+++ b/arch/ia64/kernel/esi.c
@@ -84,7 +84,7 @@ static int __init esi_init (void)
84 case ESI_DESC_ENTRY_POINT: 84 case ESI_DESC_ENTRY_POINT:
85 break; 85 break;
86 default: 86 default:
87 printk(KERN_WARNING "Unkown table type %d found in " 87 printk(KERN_WARNING "Unknown table type %d found in "
88 "ESI table, ignoring rest of table\n", *p); 88 "ESI table, ignoring rest of table\n", *p);
89 return -ENODEV; 89 return -ENODEV;
90 } 90 }
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index dab4d393908c..95ac77aeae9b 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -793,12 +793,12 @@ iosapic_register_intr (unsigned int gsi,
793 goto unlock_iosapic_lock; 793 goto unlock_iosapic_lock;
794 } 794 }
795 795
796 spin_lock(&irq_desc[irq].lock); 796 raw_spin_lock(&irq_desc[irq].lock);
797 dest = get_target_cpu(gsi, irq); 797 dest = get_target_cpu(gsi, irq);
798 dmode = choose_dmode(); 798 dmode = choose_dmode();
799 err = register_intr(gsi, irq, dmode, polarity, trigger); 799 err = register_intr(gsi, irq, dmode, polarity, trigger);
800 if (err < 0) { 800 if (err < 0) {
801 spin_unlock(&irq_desc[irq].lock); 801 raw_spin_unlock(&irq_desc[irq].lock);
802 irq = err; 802 irq = err;
803 goto unlock_iosapic_lock; 803 goto unlock_iosapic_lock;
804 } 804 }
@@ -817,7 +817,7 @@ iosapic_register_intr (unsigned int gsi,
817 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), 817 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
818 cpu_logical_id(dest), dest, irq_to_vector(irq)); 818 cpu_logical_id(dest), dest, irq_to_vector(irq));
819 819
820 spin_unlock(&irq_desc[irq].lock); 820 raw_spin_unlock(&irq_desc[irq].lock);
821 unlock_iosapic_lock: 821 unlock_iosapic_lock:
822 spin_unlock_irqrestore(&iosapic_lock, flags); 822 spin_unlock_irqrestore(&iosapic_lock, flags);
823 return irq; 823 return irq;
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 7d8951229e7c..94ee9d067cbd 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v)
71 } 71 }
72 72
73 if (i < NR_IRQS) { 73 if (i < NR_IRQS) {
74 spin_lock_irqsave(&irq_desc[i].lock, flags); 74 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
75 action = irq_desc[i].action; 75 action = irq_desc[i].action;
76 if (!action) 76 if (!action)
77 goto skip; 77 goto skip;
@@ -91,7 +91,7 @@ int show_interrupts(struct seq_file *p, void *v)
91 91
92 seq_putc(p, '\n'); 92 seq_putc(p, '\n');
93skip: 93skip:
94 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 94 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
95 } else if (i == NR_IRQS) 95 } else if (i == NR_IRQS)
96 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 96 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
97 return 0; 97 return 0;
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index dd9d7b54f1a1..d4093a173a3e 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -260,7 +260,6 @@ void __setup_vector_irq(int cpu)
260} 260}
261 261
262#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) 262#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
263#define IA64_IRQ_MOVE_VECTOR IA64_DEF_FIRST_DEVICE_VECTOR
264 263
265static enum vector_domain_type { 264static enum vector_domain_type {
266 VECTOR_DOMAIN_NONE, 265 VECTOR_DOMAIN_NONE,
@@ -345,7 +344,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
345 344
346 desc = irq_desc + irq; 345 desc = irq_desc + irq;
347 cfg = irq_cfg + irq; 346 cfg = irq_cfg + irq;
348 spin_lock(&desc->lock); 347 raw_spin_lock(&desc->lock);
349 if (!cfg->move_cleanup_count) 348 if (!cfg->move_cleanup_count)
350 goto unlock; 349 goto unlock;
351 350
@@ -358,7 +357,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
358 spin_unlock_irqrestore(&vector_lock, flags); 357 spin_unlock_irqrestore(&vector_lock, flags);
359 cfg->move_cleanup_count--; 358 cfg->move_cleanup_count--;
360 unlock: 359 unlock:
361 spin_unlock(&desc->lock); 360 raw_spin_unlock(&desc->lock);
362 } 361 }
363 return IRQ_HANDLED; 362 return IRQ_HANDLED;
364} 363}
@@ -659,11 +658,8 @@ init_IRQ (void)
659 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); 658 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
660#ifdef CONFIG_SMP 659#ifdef CONFIG_SMP
661#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG) 660#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)
662 if (vector_domain_type != VECTOR_DOMAIN_NONE) { 661 if (vector_domain_type != VECTOR_DOMAIN_NONE)
663 BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR);
664 IA64_FIRST_DEVICE_VECTOR++;
665 register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction); 662 register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction);
666 }
667#endif 663#endif
668#endif 664#endif
669#ifdef CONFIG_PERFMON 665#ifdef CONFIG_PERFMON
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index d2877a7bfe2e..32f2639e9b0a 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -887,6 +887,65 @@ ia64_mca_modify_comm(const struct task_struct *previous_current)
887 memcpy(current->comm, comm, sizeof(current->comm)); 887 memcpy(current->comm, comm, sizeof(current->comm));
888} 888}
889 889
890static void
891finish_pt_regs(struct pt_regs *regs, struct ia64_sal_os_state *sos,
892 unsigned long *nat)
893{
894 const pal_min_state_area_t *ms = sos->pal_min_state;
895 const u64 *bank;
896
897 /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use
898 * pmsa_{xip,xpsr,xfs}
899 */
900 if (ia64_psr(regs)->ic) {
901 regs->cr_iip = ms->pmsa_iip;
902 regs->cr_ipsr = ms->pmsa_ipsr;
903 regs->cr_ifs = ms->pmsa_ifs;
904 } else {
905 regs->cr_iip = ms->pmsa_xip;
906 regs->cr_ipsr = ms->pmsa_xpsr;
907 regs->cr_ifs = ms->pmsa_xfs;
908
909 sos->iip = ms->pmsa_iip;
910 sos->ipsr = ms->pmsa_ipsr;
911 sos->ifs = ms->pmsa_ifs;
912 }
913 regs->pr = ms->pmsa_pr;
914 regs->b0 = ms->pmsa_br0;
915 regs->ar_rsc = ms->pmsa_rsc;
916 copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &regs->r1, nat);
917 copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &regs->r2, nat);
918 copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &regs->r3, nat);
919 copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &regs->r8, nat);
920 copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &regs->r9, nat);
921 copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &regs->r10, nat);
922 copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &regs->r11, nat);
923 copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &regs->r12, nat);
924 copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &regs->r13, nat);
925 copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &regs->r14, nat);
926 copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &regs->r15, nat);
927 if (ia64_psr(regs)->bn)
928 bank = ms->pmsa_bank1_gr;
929 else
930 bank = ms->pmsa_bank0_gr;
931 copy_reg(&bank[16-16], ms->pmsa_nat_bits, &regs->r16, nat);
932 copy_reg(&bank[17-16], ms->pmsa_nat_bits, &regs->r17, nat);
933 copy_reg(&bank[18-16], ms->pmsa_nat_bits, &regs->r18, nat);
934 copy_reg(&bank[19-16], ms->pmsa_nat_bits, &regs->r19, nat);
935 copy_reg(&bank[20-16], ms->pmsa_nat_bits, &regs->r20, nat);
936 copy_reg(&bank[21-16], ms->pmsa_nat_bits, &regs->r21, nat);
937 copy_reg(&bank[22-16], ms->pmsa_nat_bits, &regs->r22, nat);
938 copy_reg(&bank[23-16], ms->pmsa_nat_bits, &regs->r23, nat);
939 copy_reg(&bank[24-16], ms->pmsa_nat_bits, &regs->r24, nat);
940 copy_reg(&bank[25-16], ms->pmsa_nat_bits, &regs->r25, nat);
941 copy_reg(&bank[26-16], ms->pmsa_nat_bits, &regs->r26, nat);
942 copy_reg(&bank[27-16], ms->pmsa_nat_bits, &regs->r27, nat);
943 copy_reg(&bank[28-16], ms->pmsa_nat_bits, &regs->r28, nat);
944 copy_reg(&bank[29-16], ms->pmsa_nat_bits, &regs->r29, nat);
945 copy_reg(&bank[30-16], ms->pmsa_nat_bits, &regs->r30, nat);
946 copy_reg(&bank[31-16], ms->pmsa_nat_bits, &regs->r31, nat);
947}
948
890/* On entry to this routine, we are running on the per cpu stack, see 949/* On entry to this routine, we are running on the per cpu stack, see
891 * mca_asm.h. The original stack has not been touched by this event. Some of 950 * mca_asm.h. The original stack has not been touched by this event. Some of
892 * the original stack's registers will be in the RBS on this stack. This stack 951 * the original stack's registers will be in the RBS on this stack. This stack
@@ -921,7 +980,6 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
921 u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1]; 980 u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1];
922 u64 ar_bspstore = regs->ar_bspstore; 981 u64 ar_bspstore = regs->ar_bspstore;
923 u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16); 982 u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16);
924 const u64 *bank;
925 const char *msg; 983 const char *msg;
926 int cpu = smp_processor_id(); 984 int cpu = smp_processor_id();
927 985
@@ -1024,54 +1082,9 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
1024 p = (char *)r12 - sizeof(*regs); 1082 p = (char *)r12 - sizeof(*regs);
1025 old_regs = (struct pt_regs *)p; 1083 old_regs = (struct pt_regs *)p;
1026 memcpy(old_regs, regs, sizeof(*regs)); 1084 memcpy(old_regs, regs, sizeof(*regs));
1027 /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use
1028 * pmsa_{xip,xpsr,xfs}
1029 */
1030 if (ia64_psr(regs)->ic) {
1031 old_regs->cr_iip = ms->pmsa_iip;
1032 old_regs->cr_ipsr = ms->pmsa_ipsr;
1033 old_regs->cr_ifs = ms->pmsa_ifs;
1034 } else {
1035 old_regs->cr_iip = ms->pmsa_xip;
1036 old_regs->cr_ipsr = ms->pmsa_xpsr;
1037 old_regs->cr_ifs = ms->pmsa_xfs;
1038 }
1039 old_regs->pr = ms->pmsa_pr;
1040 old_regs->b0 = ms->pmsa_br0;
1041 old_regs->loadrs = loadrs; 1085 old_regs->loadrs = loadrs;
1042 old_regs->ar_rsc = ms->pmsa_rsc;
1043 old_unat = old_regs->ar_unat; 1086 old_unat = old_regs->ar_unat;
1044 copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &old_regs->r1, &old_unat); 1087 finish_pt_regs(old_regs, sos, &old_unat);
1045 copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &old_regs->r2, &old_unat);
1046 copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &old_regs->r3, &old_unat);
1047 copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &old_regs->r8, &old_unat);
1048 copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &old_regs->r9, &old_unat);
1049 copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &old_regs->r10, &old_unat);
1050 copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &old_regs->r11, &old_unat);
1051 copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &old_regs->r12, &old_unat);
1052 copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &old_regs->r13, &old_unat);
1053 copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &old_regs->r14, &old_unat);
1054 copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &old_regs->r15, &old_unat);
1055 if (ia64_psr(old_regs)->bn)
1056 bank = ms->pmsa_bank1_gr;
1057 else
1058 bank = ms->pmsa_bank0_gr;
1059 copy_reg(&bank[16-16], ms->pmsa_nat_bits, &old_regs->r16, &old_unat);
1060 copy_reg(&bank[17-16], ms->pmsa_nat_bits, &old_regs->r17, &old_unat);
1061 copy_reg(&bank[18-16], ms->pmsa_nat_bits, &old_regs->r18, &old_unat);
1062 copy_reg(&bank[19-16], ms->pmsa_nat_bits, &old_regs->r19, &old_unat);
1063 copy_reg(&bank[20-16], ms->pmsa_nat_bits, &old_regs->r20, &old_unat);
1064 copy_reg(&bank[21-16], ms->pmsa_nat_bits, &old_regs->r21, &old_unat);
1065 copy_reg(&bank[22-16], ms->pmsa_nat_bits, &old_regs->r22, &old_unat);
1066 copy_reg(&bank[23-16], ms->pmsa_nat_bits, &old_regs->r23, &old_unat);
1067 copy_reg(&bank[24-16], ms->pmsa_nat_bits, &old_regs->r24, &old_unat);
1068 copy_reg(&bank[25-16], ms->pmsa_nat_bits, &old_regs->r25, &old_unat);
1069 copy_reg(&bank[26-16], ms->pmsa_nat_bits, &old_regs->r26, &old_unat);
1070 copy_reg(&bank[27-16], ms->pmsa_nat_bits, &old_regs->r27, &old_unat);
1071 copy_reg(&bank[28-16], ms->pmsa_nat_bits, &old_regs->r28, &old_unat);
1072 copy_reg(&bank[29-16], ms->pmsa_nat_bits, &old_regs->r29, &old_unat);
1073 copy_reg(&bank[30-16], ms->pmsa_nat_bits, &old_regs->r30, &old_unat);
1074 copy_reg(&bank[31-16], ms->pmsa_nat_bits, &old_regs->r31, &old_unat);
1075 1088
1076 /* Next stack a struct switch_stack. mca_asm.S built a partial 1089 /* Next stack a struct switch_stack. mca_asm.S built a partial
1077 * switch_stack, copy it and fill in the blanks using pt_regs and 1090 * switch_stack, copy it and fill in the blanks using pt_regs and
@@ -1141,6 +1154,8 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
1141no_mod: 1154no_mod:
1142 mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", 1155 mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
1143 smp_processor_id(), type, msg); 1156 smp_processor_id(), type, msg);
1157 old_unat = regs->ar_unat;
1158 finish_pt_regs(regs, sos, &old_unat);
1144 return previous_current; 1159 return previous_current;
1145} 1160}
1146 1161
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index 285aae8431c6..53292abf846c 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -41,7 +41,7 @@ struct dma_map_ops swiotlb_dma_ops = {
41void __init swiotlb_dma_init(void) 41void __init swiotlb_dma_init(void)
42{ 42{
43 dma_ops = &swiotlb_dma_ops; 43 dma_ops = &swiotlb_dma_ops;
44 swiotlb_init(); 44 swiotlb_init(1);
45} 45}
46 46
47void __init pci_swiotlb_init(void) 47void __init pci_swiotlb_init(void)
@@ -51,7 +51,7 @@ void __init pci_swiotlb_init(void)
51 swiotlb = 1; 51 swiotlb = 1;
52 printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n"); 52 printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
53 machvec_init("dig"); 53 machvec_init("dig");
54 swiotlb_init(); 54 swiotlb_init(1);
55 dma_ops = &swiotlb_dma_ops; 55 dma_ops = &swiotlb_dma_ops;
56#else 56#else
57 panic("Unable to find Intel IOMMU"); 57 panic("Unable to find Intel IOMMU");
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index f1782705b1f7..5246285a95fb 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -522,42 +522,37 @@ EXPORT_SYMBOL(pfm_sysctl);
522 522
523static ctl_table pfm_ctl_table[]={ 523static ctl_table pfm_ctl_table[]={
524 { 524 {
525 .ctl_name = CTL_UNNUMBERED,
526 .procname = "debug", 525 .procname = "debug",
527 .data = &pfm_sysctl.debug, 526 .data = &pfm_sysctl.debug,
528 .maxlen = sizeof(int), 527 .maxlen = sizeof(int),
529 .mode = 0666, 528 .mode = 0666,
530 .proc_handler = &proc_dointvec, 529 .proc_handler = proc_dointvec,
531 }, 530 },
532 { 531 {
533 .ctl_name = CTL_UNNUMBERED,
534 .procname = "debug_ovfl", 532 .procname = "debug_ovfl",
535 .data = &pfm_sysctl.debug_ovfl, 533 .data = &pfm_sysctl.debug_ovfl,
536 .maxlen = sizeof(int), 534 .maxlen = sizeof(int),
537 .mode = 0666, 535 .mode = 0666,
538 .proc_handler = &proc_dointvec, 536 .proc_handler = proc_dointvec,
539 }, 537 },
540 { 538 {
541 .ctl_name = CTL_UNNUMBERED,
542 .procname = "fastctxsw", 539 .procname = "fastctxsw",
543 .data = &pfm_sysctl.fastctxsw, 540 .data = &pfm_sysctl.fastctxsw,
544 .maxlen = sizeof(int), 541 .maxlen = sizeof(int),
545 .mode = 0600, 542 .mode = 0600,
546 .proc_handler = &proc_dointvec, 543 .proc_handler = proc_dointvec,
547 }, 544 },
548 { 545 {
549 .ctl_name = CTL_UNNUMBERED,
550 .procname = "expert_mode", 546 .procname = "expert_mode",
551 .data = &pfm_sysctl.expert_mode, 547 .data = &pfm_sysctl.expert_mode,
552 .maxlen = sizeof(int), 548 .maxlen = sizeof(int),
553 .mode = 0600, 549 .mode = 0600,
554 .proc_handler = &proc_dointvec, 550 .proc_handler = proc_dointvec,
555 }, 551 },
556 {} 552 {}
557}; 553};
558static ctl_table pfm_sysctl_dir[] = { 554static ctl_table pfm_sysctl_dir[] = {
559 { 555 {
560 .ctl_name = CTL_UNNUMBERED,
561 .procname = "perfmon", 556 .procname = "perfmon",
562 .mode = 0555, 557 .mode = 0555,
563 .child = pfm_ctl_table, 558 .child = pfm_ctl_table,
@@ -566,7 +561,6 @@ static ctl_table pfm_sysctl_dir[] = {
566}; 561};
567static ctl_table pfm_sysctl_root[] = { 562static ctl_table pfm_sysctl_root[] = {
568 { 563 {
569 .ctl_name = CTL_KERN,
570 .procname = "kernel", 564 .procname = "kernel",
571 .mode = 0555, 565 .mode = 0555,
572 .child = pfm_sysctl_dir, 566 .child = pfm_sysctl_dir,
@@ -2206,7 +2200,7 @@ pfm_alloc_file(pfm_context_t *ctx)
2206{ 2200{
2207 struct file *file; 2201 struct file *file;
2208 struct inode *inode; 2202 struct inode *inode;
2209 struct dentry *dentry; 2203 struct path path;
2210 char name[32]; 2204 char name[32];
2211 struct qstr this; 2205 struct qstr this;
2212 2206
@@ -2231,18 +2225,19 @@ pfm_alloc_file(pfm_context_t *ctx)
2231 /* 2225 /*
2232 * allocate a new dcache entry 2226 * allocate a new dcache entry
2233 */ 2227 */
2234 dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this); 2228 path.dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);
2235 if (!dentry) { 2229 if (!path.dentry) {
2236 iput(inode); 2230 iput(inode);
2237 return ERR_PTR(-ENOMEM); 2231 return ERR_PTR(-ENOMEM);
2238 } 2232 }
2233 path.mnt = mntget(pfmfs_mnt);
2239 2234
2240 dentry->d_op = &pfmfs_dentry_operations; 2235 path.dentry->d_op = &pfmfs_dentry_operations;
2241 d_add(dentry, inode); 2236 d_add(path.dentry, inode);
2242 2237
2243 file = alloc_file(pfmfs_mnt, dentry, FMODE_READ, &pfm_file_ops); 2238 file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
2244 if (!file) { 2239 if (!file) {
2245 dput(dentry); 2240 path_put(&path);
2246 return ERR_PTR(-ENFILE); 2241 return ERR_PTR(-ENFILE);
2247 } 2242 }
2248 2243
@@ -3523,7 +3518,7 @@ pfm_use_debug_registers(struct task_struct *task)
3523 * IA64_THREAD_DBG_VALID set. This indicates a task which was 3518 * IA64_THREAD_DBG_VALID set. This indicates a task which was
3524 * able to use the debug registers for debugging purposes via 3519 * able to use the debug registers for debugging purposes via
3525 * ptrace(). Therefore we know it was not using them for 3520 * ptrace(). Therefore we know it was not using them for
3526 * perfmormance monitoring, so we only decrement the number 3521 * performance monitoring, so we only decrement the number
3527 * of "ptraced" debug register users to keep the count up to date 3522 * of "ptraced" debug register users to keep the count up to date
3528 */ 3523 */
3529int 3524int
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index 92ed83f34036..609d50056a6c 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -100,51 +100,7 @@ sys_getpagesize (void)
100asmlinkage unsigned long 100asmlinkage unsigned long
101ia64_brk (unsigned long brk) 101ia64_brk (unsigned long brk)
102{ 102{
103 unsigned long rlim, retval, newbrk, oldbrk; 103 unsigned long retval = sys_brk(brk);
104 struct mm_struct *mm = current->mm;
105
106 /*
107 * Most of this replicates the code in sys_brk() except for an additional safety
108 * check and the clearing of r8. However, we can't call sys_brk() because we need
109 * to acquire the mmap_sem before we can do the test...
110 */
111 down_write(&mm->mmap_sem);
112
113 if (brk < mm->end_code)
114 goto out;
115 newbrk = PAGE_ALIGN(brk);
116 oldbrk = PAGE_ALIGN(mm->brk);
117 if (oldbrk == newbrk)
118 goto set_brk;
119
120 /* Always allow shrinking brk. */
121 if (brk <= mm->brk) {
122 if (!do_munmap(mm, newbrk, oldbrk-newbrk))
123 goto set_brk;
124 goto out;
125 }
126
127 /* Check against unimplemented/unmapped addresses: */
128 if ((newbrk - oldbrk) > RGN_MAP_LIMIT || REGION_OFFSET(newbrk) > RGN_MAP_LIMIT)
129 goto out;
130
131 /* Check against rlimit.. */
132 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
133 if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
134 goto out;
135
136 /* Check against existing mmap mappings. */
137 if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
138 goto out;
139
140 /* Ok, looks good - let it rip. */
141 if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
142 goto out;
143set_brk:
144 mm->brk = brk;
145out:
146 retval = mm->brk;
147 up_write(&mm->mmap_sem);
148 force_successful_syscall_return(); 104 force_successful_syscall_return();
149 return retval; 105 return retval;
150} 106}
@@ -185,39 +141,6 @@ int ia64_mmap_check(unsigned long addr, unsigned long len,
185 return 0; 141 return 0;
186} 142}
187 143
188static inline unsigned long
189do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, unsigned long pgoff)
190{
191 struct file *file = NULL;
192
193 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
194 if (!(flags & MAP_ANONYMOUS)) {
195 file = fget(fd);
196 if (!file)
197 return -EBADF;
198
199 if (!file->f_op || !file->f_op->mmap) {
200 addr = -ENODEV;
201 goto out;
202 }
203 }
204
205 /* Careful about overflows.. */
206 len = PAGE_ALIGN(len);
207 if (!len || len > TASK_SIZE) {
208 addr = -EINVAL;
209 goto out;
210 }
211
212 down_write(&current->mm->mmap_sem);
213 addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
214 up_write(&current->mm->mmap_sem);
215
216out: if (file)
217 fput(file);
218 return addr;
219}
220
221/* 144/*
222 * mmap2() is like mmap() except that the offset is expressed in units 145 * mmap2() is like mmap() except that the offset is expressed in units
223 * of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces 146 * of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces
@@ -226,7 +149,7 @@ out: if (file)
226asmlinkage unsigned long 149asmlinkage unsigned long
227sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff) 150sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff)
228{ 151{
229 addr = do_mmap2(addr, len, prot, flags, fd, pgoff); 152 addr = sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
230 if (!IS_ERR((void *) addr)) 153 if (!IS_ERR((void *) addr))
231 force_successful_syscall_return(); 154 force_successful_syscall_return();
232 return addr; 155 return addr;
@@ -238,7 +161,7 @@ sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, int fd, lo
238 if (offset_in_page(off) != 0) 161 if (offset_in_page(off) != 0)
239 return -EINVAL; 162 return -EINVAL;
240 163
241 addr = do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT); 164 addr = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
242 if (!IS_ERR((void *) addr)) 165 if (!IS_ERR((void *) addr))
243 force_successful_syscall_return(); 166 force_successful_syscall_return();
244 return addr; 167 return addr;
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 4990495d7531..a35c661e5e89 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -473,7 +473,7 @@ void update_vsyscall_tz(void)
473{ 473{
474} 474}
475 475
476void update_vsyscall(struct timespec *wall, struct clocksource *c) 476void update_vsyscall(struct timespec *wall, struct clocksource *c, u32 mult)
477{ 477{
478 unsigned long flags; 478 unsigned long flags;
479 479
@@ -481,7 +481,7 @@ void update_vsyscall(struct timespec *wall, struct clocksource *c)
481 481
482 /* copy fsyscall clock data */ 482 /* copy fsyscall clock data */
483 fsyscall_gtod_data.clk_mask = c->mask; 483 fsyscall_gtod_data.clk_mask = c->mask;
484 fsyscall_gtod_data.clk_mult = c->mult; 484 fsyscall_gtod_data.clk_mult = mult;
485 fsyscall_gtod_data.clk_shift = c->shift; 485 fsyscall_gtod_data.clk_shift = c->shift;
486 fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio; 486 fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio;
487 fsyscall_gtod_data.clk_cycle_last = c->cycle_last; 487 fsyscall_gtod_data.clk_cycle_last = c->cycle_last;
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index 6db08599ebbc..776dd40397e2 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -60,7 +60,6 @@ dump (const char *str, void *vp, size_t len)
60 */ 60 */
61int no_unaligned_warning; 61int no_unaligned_warning;
62int unaligned_dump_stack; 62int unaligned_dump_stack;
63static int noprint_warning;
64 63
65/* 64/*
66 * For M-unit: 65 * For M-unit:
@@ -1357,9 +1356,8 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
1357 /* watch for command names containing %s */ 1356 /* watch for command names containing %s */
1358 printk(KERN_WARNING "%s", buf); 1357 printk(KERN_WARNING "%s", buf);
1359 } else { 1358 } else {
1360 if (no_unaligned_warning && !noprint_warning) { 1359 if (no_unaligned_warning) {
1361 noprint_warning = 1; 1360 printk_once(KERN_WARNING "%s(%d) encountered an "
1362 printk(KERN_WARNING "%s(%d) encountered an "
1363 "unaligned exception which required\n" 1361 "unaligned exception which required\n"
1364 "kernel assistance, which degrades " 1362 "kernel assistance, which degrades "
1365 "the performance of the application.\n" 1363 "the performance of the application.\n"
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile
index 0bb99b732908..1089b3e918ac 100644
--- a/arch/ia64/kvm/Makefile
+++ b/arch/ia64/kvm/Makefile
@@ -49,7 +49,7 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
49EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ 49EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
50 50
51common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ 51common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \
52 coalesced_mmio.o irq_comm.o) 52 coalesced_mmio.o irq_comm.o assigned-dev.o)
53 53
54ifeq ($(CONFIG_IOMMU_API),y) 54ifeq ($(CONFIG_IOMMU_API),y)
55common-objs += $(addprefix ../../../virt/kvm/, iommu.o) 55common-objs += $(addprefix ../../../virt/kvm/, iommu.o)
diff --git a/arch/ia64/kvm/asm-offsets.c b/arch/ia64/kvm/asm-offsets.c
index 0c3564a7a033..9324c875caf5 100644
--- a/arch/ia64/kvm/asm-offsets.c
+++ b/arch/ia64/kvm/asm-offsets.c
@@ -22,7 +22,6 @@
22 * 22 *
23 */ 23 */
24 24
25#include <linux/autoconf.h>
26#include <linux/kvm_host.h> 25#include <linux/kvm_host.h>
27#include <linux/kbuild.h> 26#include <linux/kbuild.h>
28 27
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 0ad09f05efa9..5fdeec5fddcf 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -124,7 +124,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
124 124
125static DEFINE_SPINLOCK(vp_lock); 125static DEFINE_SPINLOCK(vp_lock);
126 126
127void kvm_arch_hardware_enable(void *garbage) 127int kvm_arch_hardware_enable(void *garbage)
128{ 128{
129 long status; 129 long status;
130 long tmp_base; 130 long tmp_base;
@@ -137,7 +137,7 @@ void kvm_arch_hardware_enable(void *garbage)
137 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); 137 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
138 local_irq_restore(saved_psr); 138 local_irq_restore(saved_psr);
139 if (slot < 0) 139 if (slot < 0)
140 return; 140 return -EINVAL;
141 141
142 spin_lock(&vp_lock); 142 spin_lock(&vp_lock);
143 status = ia64_pal_vp_init_env(kvm_vsa_base ? 143 status = ia64_pal_vp_init_env(kvm_vsa_base ?
@@ -145,7 +145,7 @@ void kvm_arch_hardware_enable(void *garbage)
145 __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); 145 __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
146 if (status != 0) { 146 if (status != 0) {
147 printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); 147 printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
148 return ; 148 return -EINVAL;
149 } 149 }
150 150
151 if (!kvm_vsa_base) { 151 if (!kvm_vsa_base) {
@@ -154,6 +154,8 @@ void kvm_arch_hardware_enable(void *garbage)
154 } 154 }
155 spin_unlock(&vp_lock); 155 spin_unlock(&vp_lock);
156 ia64_ptr_entry(0x3, slot); 156 ia64_ptr_entry(0x3, slot);
157
158 return 0;
157} 159}
158 160
159void kvm_arch_hardware_disable(void *garbage) 161void kvm_arch_hardware_disable(void *garbage)
@@ -851,8 +853,7 @@ static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
851 r = 0; 853 r = 0;
852 switch (chip->chip_id) { 854 switch (chip->chip_id) {
853 case KVM_IRQCHIP_IOAPIC: 855 case KVM_IRQCHIP_IOAPIC:
854 memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm), 856 r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
855 sizeof(struct kvm_ioapic_state));
856 break; 857 break;
857 default: 858 default:
858 r = -EINVAL; 859 r = -EINVAL;
@@ -868,9 +869,7 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
868 r = 0; 869 r = 0;
869 switch (chip->chip_id) { 870 switch (chip->chip_id) {
870 case KVM_IRQCHIP_IOAPIC: 871 case KVM_IRQCHIP_IOAPIC:
871 memcpy(ioapic_irqchip(kvm), 872 r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
872 &chip->chip.ioapic,
873 sizeof(struct kvm_ioapic_state));
874 break; 873 break;
875 default: 874 default:
876 r = -EINVAL; 875 r = -EINVAL;
@@ -944,7 +943,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
944{ 943{
945 struct kvm *kvm = filp->private_data; 944 struct kvm *kvm = filp->private_data;
946 void __user *argp = (void __user *)arg; 945 void __user *argp = (void __user *)arg;
947 int r = -EINVAL; 946 int r = -ENOTTY;
948 947
949 switch (ioctl) { 948 switch (ioctl) {
950 case KVM_SET_MEMORY_REGION: { 949 case KVM_SET_MEMORY_REGION: {
@@ -985,10 +984,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
985 goto out; 984 goto out;
986 if (irqchip_in_kernel(kvm)) { 985 if (irqchip_in_kernel(kvm)) {
987 __s32 status; 986 __s32 status;
988 mutex_lock(&kvm->irq_lock);
989 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 987 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
990 irq_event.irq, irq_event.level); 988 irq_event.irq, irq_event.level);
991 mutex_unlock(&kvm->irq_lock);
992 if (ioctl == KVM_IRQ_LINE_STATUS) { 989 if (ioctl == KVM_IRQ_LINE_STATUS) {
993 irq_event.status = status; 990 irq_event.status = status;
994 if (copy_to_user(argp, &irq_event, 991 if (copy_to_user(argp, &irq_event,
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h
index 360724d3ae69..988911b4cc7a 100644
--- a/arch/ia64/kvm/vcpu.h
+++ b/arch/ia64/kvm/vcpu.h
@@ -388,6 +388,9 @@ static inline u64 __gpfn_is_io(u64 gpfn)
388#define _vmm_raw_spin_lock(x) do {}while(0) 388#define _vmm_raw_spin_lock(x) do {}while(0)
389#define _vmm_raw_spin_unlock(x) do {}while(0) 389#define _vmm_raw_spin_unlock(x) do {}while(0)
390#else 390#else
391typedef struct {
392 volatile unsigned int lock;
393} vmm_spinlock_t;
391#define _vmm_raw_spin_lock(x) \ 394#define _vmm_raw_spin_lock(x) \
392 do { \ 395 do { \
393 __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ 396 __u32 *ia64_spinlock_ptr = (__u32 *) (x); \
@@ -405,12 +408,12 @@ static inline u64 __gpfn_is_io(u64 gpfn)
405 408
406#define _vmm_raw_spin_unlock(x) \ 409#define _vmm_raw_spin_unlock(x) \
407 do { barrier(); \ 410 do { barrier(); \
408 ((spinlock_t *)x)->raw_lock.lock = 0; } \ 411 ((vmm_spinlock_t *)x)->lock = 0; } \
409while (0) 412while (0)
410#endif 413#endif
411 414
412void vmm_spin_lock(spinlock_t *lock); 415void vmm_spin_lock(vmm_spinlock_t *lock);
413void vmm_spin_unlock(spinlock_t *lock); 416void vmm_spin_unlock(vmm_spinlock_t *lock);
414enum { 417enum {
415 I_TLB = 1, 418 I_TLB = 1,
416 D_TLB = 2 419 D_TLB = 2
diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c
index f4b4c899bb6c..7a62f75778c5 100644
--- a/arch/ia64/kvm/vmm.c
+++ b/arch/ia64/kvm/vmm.c
@@ -60,12 +60,12 @@ static void __exit kvm_vmm_exit(void)
60 return ; 60 return ;
61} 61}
62 62
63void vmm_spin_lock(spinlock_t *lock) 63void vmm_spin_lock(vmm_spinlock_t *lock)
64{ 64{
65 _vmm_raw_spin_lock(lock); 65 _vmm_raw_spin_lock(lock);
66} 66}
67 67
68void vmm_spin_unlock(spinlock_t *lock) 68void vmm_spin_unlock(vmm_spinlock_t *lock)
69{ 69{
70 _vmm_raw_spin_unlock(lock); 70 _vmm_raw_spin_unlock(lock);
71} 71}
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 20b3852f7a6e..4332f7ee5203 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -182,7 +182,7 @@ void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
182{ 182{
183 u64 i, dirty_pages = 1; 183 u64 i, dirty_pages = 1;
184 u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT; 184 u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
185 spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa); 185 vmm_spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
186 void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE; 186 void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE;
187 187
188 dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT; 188 dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 2a140627dfd6..3dccdd8eb275 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -22,6 +22,12 @@ __ioremap (unsigned long phys_addr)
22} 22}
23 23
24void __iomem * 24void __iomem *
25early_ioremap (unsigned long phys_addr, unsigned long size)
26{
27 return __ioremap(phys_addr);
28}
29
30void __iomem *
25ioremap (unsigned long phys_addr, unsigned long size) 31ioremap (unsigned long phys_addr, unsigned long size)
26{ 32{
27 void __iomem *addr; 33 void __iomem *addr;
@@ -102,6 +108,11 @@ ioremap_nocache (unsigned long phys_addr, unsigned long size)
102EXPORT_SYMBOL(ioremap_nocache); 108EXPORT_SYMBOL(ioremap_nocache);
103 109
104void 110void
111early_iounmap (volatile void __iomem *addr, unsigned long size)
112{
113}
114
115void
105iounmap (volatile void __iomem *addr) 116iounmap (volatile void __iomem *addr)
106{ 117{
107 if (REGION_NUMBER(addr) == RGN_GATE) 118 if (REGION_NUMBER(addr) == RGN_GATE)
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index f426dc78d959..ee09d261f2e6 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -100,24 +100,36 @@ wrap_mmu_context (struct mm_struct *mm)
100 * this primitive it can be moved up to a spinaphore.h header. 100 * this primitive it can be moved up to a spinaphore.h header.
101 */ 101 */
102struct spinaphore { 102struct spinaphore {
103 atomic_t cur; 103 unsigned long ticket;
104 unsigned long serve;
104}; 105};
105 106
106static inline void spinaphore_init(struct spinaphore *ss, int val) 107static inline void spinaphore_init(struct spinaphore *ss, int val)
107{ 108{
108 atomic_set(&ss->cur, val); 109 ss->ticket = 0;
110 ss->serve = val;
109} 111}
110 112
111static inline void down_spin(struct spinaphore *ss) 113static inline void down_spin(struct spinaphore *ss)
112{ 114{
113 while (unlikely(!atomic_add_unless(&ss->cur, -1, 0))) 115 unsigned long t = ia64_fetchadd(1, &ss->ticket, acq), serve;
114 while (atomic_read(&ss->cur) == 0) 116
115 cpu_relax(); 117 if (time_before(t, ss->serve))
118 return;
119
120 ia64_invala();
121
122 for (;;) {
123 asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory");
124 if (time_before(t, serve))
125 return;
126 cpu_relax();
127 }
116} 128}
117 129
118static inline void up_spin(struct spinaphore *ss) 130static inline void up_spin(struct spinaphore *ss)
119{ 131{
120 atomic_add(1, &ss->cur); 132 ia64_fetchadd(1, &ss->serve, rel);
121} 133}
122 134
123static struct spinaphore ptcg_sem; 135static struct spinaphore ptcg_sem;
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 7de76dd352fe..df639db779f9 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -56,10 +56,13 @@ int raw_pci_read(unsigned int seg, unsigned int bus, unsigned int devfn,
56 if ((seg | reg) <= 255) { 56 if ((seg | reg) <= 255) {
57 addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg); 57 addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
58 mode = 0; 58 mode = 0;
59 } else { 59 } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
60 addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg); 60 addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
61 mode = 1; 61 mode = 1;
62 } else {
63 return -EINVAL;
62 } 64 }
65
63 result = ia64_sal_pci_config_read(addr, mode, len, &data); 66 result = ia64_sal_pci_config_read(addr, mode, len, &data);
64 if (result != 0) 67 if (result != 0)
65 return -EINVAL; 68 return -EINVAL;
@@ -80,9 +83,11 @@ int raw_pci_write(unsigned int seg, unsigned int bus, unsigned int devfn,
80 if ((seg | reg) <= 255) { 83 if ((seg | reg) <= 255) {
81 addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg); 84 addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
82 mode = 0; 85 mode = 0;
83 } else { 86 } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
84 addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg); 87 addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
85 mode = 1; 88 mode = 1;
89 } else {
90 return -EINVAL;
86 } 91 }
87 result = ia64_sal_pci_config_write(addr, mode, len, value); 92 result = ia64_sal_pci_config_write(addr, mode, len, value);
88 if (result != 0) 93 if (result != 0)
@@ -126,6 +131,7 @@ alloc_pci_controller (int seg)
126} 131}
127 132
128struct pci_root_info { 133struct pci_root_info {
134 struct acpi_device *bridge;
129 struct pci_controller *controller; 135 struct pci_controller *controller;
130 char *name; 136 char *name;
131}; 137};
@@ -292,9 +298,20 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
292 window->offset = offset; 298 window->offset = offset;
293 299
294 if (insert_resource(root, &window->resource)) { 300 if (insert_resource(root, &window->resource)) {
295 printk(KERN_ERR "alloc 0x%llx-0x%llx from %s for %s failed\n", 301 dev_err(&info->bridge->dev,
296 window->resource.start, window->resource.end, 302 "can't allocate host bridge window %pR\n",
297 root->name, info->name); 303 &window->resource);
304 } else {
305 if (offset)
306 dev_info(&info->bridge->dev, "host bridge window %pR "
307 "(PCI address [%#llx-%#llx])\n",
308 &window->resource,
309 window->resource.start - offset,
310 window->resource.end - offset);
311 else
312 dev_info(&info->bridge->dev,
313 "host bridge window %pR\n",
314 &window->resource);
298 } 315 }
299 316
300 return AE_OK; 317 return AE_OK;
@@ -314,8 +331,9 @@ pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl)
314 (res->end - res->start < 16)) 331 (res->end - res->start < 16))
315 continue; 332 continue;
316 if (j >= PCI_BUS_NUM_RESOURCES) { 333 if (j >= PCI_BUS_NUM_RESOURCES) {
317 printk("Ignoring range [%#llx-%#llx] (%lx)\n", 334 dev_warn(&bus->dev,
318 res->start, res->end, res->flags); 335 "ignoring host bridge window %pR (no space)\n",
336 res);
319 continue; 337 continue;
320 } 338 }
321 bus->resource[j++] = res; 339 bus->resource[j++] = res;
@@ -359,6 +377,7 @@ pci_acpi_scan_root(struct acpi_device *device, int domain, int bus)
359 goto out3; 377 goto out3;
360 378
361 sprintf(name, "PCI Bus %04x:%02x", domain, bus); 379 sprintf(name, "PCI Bus %04x:%02x", domain, bus);
380 info.bridge = device;
362 info.controller = controller; 381 info.controller = controller;
363 info.name = name; 382 info.name = name;
364 acpi_walk_resources(device->handle, METHOD_NAME__CRS, 383 acpi_walk_resources(device->handle, METHOD_NAME__CRS,
@@ -715,9 +734,6 @@ int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
715 return ret; 734 return ret;
716} 735}
717 736
718/* It's defined in drivers/pci/pci.c */
719extern u8 pci_cache_line_size;
720
721/** 737/**
722 * set_pci_cacheline_size - determine cacheline size for PCI devices 738 * set_pci_cacheline_size - determine cacheline size for PCI devices
723 * 739 *
@@ -726,7 +742,7 @@ extern u8 pci_cache_line_size;
726 * 742 *
727 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info(). 743 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
728 */ 744 */
729static void __init set_pci_cacheline_size(void) 745static void __init set_pci_dfl_cacheline_size(void)
730{ 746{
731 unsigned long levels, unique_caches; 747 unsigned long levels, unique_caches;
732 long status; 748 long status;
@@ -746,7 +762,7 @@ static void __init set_pci_cacheline_size(void)
746 "(status=%ld)\n", __func__, status); 762 "(status=%ld)\n", __func__, status);
747 return; 763 return;
748 } 764 }
749 pci_cache_line_size = (1 << cci.pcci_line_size) / 4; 765 pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4;
750} 766}
751 767
752u64 ia64_dma_get_required_mask(struct device *dev) 768u64 ia64_dma_get_required_mask(struct device *dev)
@@ -777,7 +793,7 @@ EXPORT_SYMBOL_GPL(dma_get_required_mask);
777 793
778static int __init pcibios_init(void) 794static int __init pcibios_init(void)
779{ 795{
780 set_pci_cacheline_size(); 796 set_pci_dfl_cacheline_size();
781 return 0; 797 return 0;
782} 798}
783 799
diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c
index fd50ff94302b..66f633bff059 100644
--- a/arch/ia64/sn/kernel/io_acpi_init.c
+++ b/arch/ia64/sn/kernel/io_acpi_init.c
@@ -390,7 +390,7 @@ sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info,
390 pcidev_match.handle = NULL; 390 pcidev_match.handle = NULL;
391 391
392 acpi_walk_namespace(ACPI_TYPE_DEVICE, rootbus_handle, ACPI_UINT32_MAX, 392 acpi_walk_namespace(ACPI_TYPE_DEVICE, rootbus_handle, ACPI_UINT32_MAX,
393 find_matching_device, &pcidev_match, NULL); 393 find_matching_device, NULL, &pcidev_match, NULL);
394 394
395 if (!pcidev_match.handle) { 395 if (!pcidev_match.handle) {
396 printk(KERN_ERR 396 printk(KERN_ERR
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
index 25831c47c579..308e6595110e 100644
--- a/arch/ia64/sn/kernel/io_common.c
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -119,7 +119,6 @@ sn_pcidev_info_get(struct pci_dev *dev)
119 * Additionally note that the struct sn_flush_device_war also has to be 119 * Additionally note that the struct sn_flush_device_war also has to be
120 * removed from arch/ia64/sn/include/xtalk/hubdev.h 120 * removed from arch/ia64/sn/include/xtalk/hubdev.h
121 */ 121 */
122static u8 war_implemented = 0;
123 122
124static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device, 123static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
125 struct sn_flush_device_common *common) 124 struct sn_flush_device_common *common)
@@ -128,11 +127,8 @@ static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
128 struct sn_flush_device_war *dev_entry; 127 struct sn_flush_device_war *dev_entry;
129 struct ia64_sal_retval isrv = {0,0,0,0}; 128 struct ia64_sal_retval isrv = {0,0,0,0};
130 129
131 if (!war_implemented) { 130 printk_once(KERN_WARNING
132 printk(KERN_WARNING "PROM version < 4.50 -- implementing old " 131 "PROM version < 4.50 -- implementing old PROM flush WAR\n");
133 "PROM flush WAR\n");
134 war_implemented = 1;
135 }
136 132
137 war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL); 133 war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
138 BUG_ON(!war_list); 134 BUG_ON(!war_list);
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 4c7e74790958..55ac3c4e11d2 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -786,17 +786,18 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, unsigned long arg)
786 break; 786 break;
787 787
788 case SN_HWPERF_GET_OBJ_NODE: 788 case SN_HWPERF_GET_OBJ_NODE:
789 if (a.sz != sizeof(u64) || a.arg < 0) { 789 i = a.arg;
790 if (a.sz != sizeof(u64) || i < 0) {
790 r = -EINVAL; 791 r = -EINVAL;
791 goto error; 792 goto error;
792 } 793 }
793 if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) { 794 if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
794 if (a.arg >= nobj) { 795 if (i >= nobj) {
795 r = -EINVAL; 796 r = -EINVAL;
796 vfree(objs); 797 vfree(objs);
797 goto error; 798 goto error;
798 } 799 }
799 if (objs[(i = a.arg)].id != a.arg) { 800 if (objs[i].id != a.arg) {
800 for (i = 0; i < nobj; i++) { 801 for (i = 0; i < nobj; i++) {
801 if (objs[i].id == a.arg) 802 if (objs[i].id == a.arg)
802 break; 803 break;
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index 35b2a27d2e77..efb454534e52 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -9,6 +9,7 @@
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/interrupt.h> 10#include <linux/interrupt.h>
11#include <linux/pci.h> 11#include <linux/pci.h>
12#include <linux/bitmap.h>
12#include <asm/sn/sn_sal.h> 13#include <asm/sn/sn_sal.h>
13#include <asm/sn/addrs.h> 14#include <asm/sn/addrs.h>
14#include <asm/sn/io.h> 15#include <asm/sn/io.h>
@@ -369,7 +370,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
369static dma_addr_t 370static dma_addr_t
370tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size) 371tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
371{ 372{
372 int i, ps, ps_shift, entry, entries, mapsize, last_entry; 373 int ps, ps_shift, entry, entries, mapsize;
373 u64 xio_addr, end_xio_addr; 374 u64 xio_addr, end_xio_addr;
374 struct tioca_common *tioca_common; 375 struct tioca_common *tioca_common;
375 struct tioca_kernel *tioca_kern; 376 struct tioca_kernel *tioca_kern;
@@ -410,23 +411,13 @@ tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
410 map = tioca_kern->ca_pcigart_pagemap; 411 map = tioca_kern->ca_pcigart_pagemap;
411 mapsize = tioca_kern->ca_pcigart_entries; 412 mapsize = tioca_kern->ca_pcigart_entries;
412 413
413 entry = find_first_zero_bit(map, mapsize); 414 entry = bitmap_find_next_zero_area(map, mapsize, 0, entries, 0);
414 while (entry < mapsize) { 415 if (entry >= mapsize) {
415 last_entry = find_next_bit(map, mapsize, entry);
416
417 if (last_entry - entry >= entries)
418 break;
419
420 entry = find_next_zero_bit(map, mapsize, last_entry);
421 }
422
423 if (entry > mapsize) {
424 kfree(ca_dmamap); 416 kfree(ca_dmamap);
425 goto map_return; 417 goto map_return;
426 } 418 }
427 419
428 for (i = 0; i < entries; i++) 420 bitmap_set(map, entry, entries);
429 set_bit(entry + i, map);
430 421
431 bus_addr = tioca_kern->ca_pciap_base + (entry * ps); 422 bus_addr = tioca_kern->ca_pciap_base + (entry * ps);
432 423