aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/Makefile22
-rw-r--r--arch/ia64/kernel/acpi.c22
-rw-r--r--arch/ia64/kernel/asm-offsets.c31
-rw-r--r--arch/ia64/kernel/entry.S5
-rw-r--r--arch/ia64/kernel/ivt.S6
-rw-r--r--arch/ia64/kernel/msi_ia64.c80
-rw-r--r--arch/ia64/kernel/nr-irqs.c1
-rw-r--r--arch/ia64/kernel/paravirt.c2
-rw-r--r--arch/ia64/kernel/paravirt_inst.h4
-rw-r--r--arch/ia64/kernel/pci-dma.c129
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c46
-rw-r--r--arch/ia64/kernel/perfmon.c7
-rw-r--r--arch/ia64/kernel/process.c22
-rw-r--r--arch/ia64/kernel/ptrace.c112
-rw-r--r--arch/ia64/kernel/setup.c42
-rw-r--r--arch/ia64/kernel/signal.c8
16 files changed, 468 insertions, 71 deletions
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 87fea11aecb7..c381ea954892 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -42,6 +42,10 @@ obj-$(CONFIG_IA64_ESI) += esi.o
42ifneq ($(CONFIG_IA64_ESI),) 42ifneq ($(CONFIG_IA64_ESI),)
43obj-y += esi_stub.o # must be in kernel proper 43obj-y += esi_stub.o # must be in kernel proper
44endif 44endif
45obj-$(CONFIG_DMAR) += pci-dma.o
46ifeq ($(CONFIG_DMAR), y)
47obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
48endif
45 49
46# The gate DSO image is built using a special linker script. 50# The gate DSO image is built using a special linker script.
47targets += gate.so gate-syms.o 51targets += gate.so gate-syms.o
@@ -112,5 +116,23 @@ clean-files += $(objtree)/include/asm-ia64/nr-irqs.h
112ASM_PARAVIRT_OBJS = ivt.o entry.o 116ASM_PARAVIRT_OBJS = ivt.o entry.o
113define paravirtualized_native 117define paravirtualized_native
114AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE 118AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE
119AFLAGS_pvchk-sed-$(1) += -D__IA64_ASM_PARAVIRTUALIZED_PVCHECK
120extra-y += pvchk-$(1)
115endef 121endef
116$(foreach obj,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_native,$(obj)))) 122$(foreach obj,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_native,$(obj))))
123
124#
125# Checker for paravirtualizations of privileged operations.
126#
127quiet_cmd_pv_check_sed = PVCHK $@
128define cmd_pv_check_sed
129 sed -f $(srctree)/arch/$(SRCARCH)/scripts/pvcheck.sed $< > $@
130endef
131
132$(obj)/pvchk-sed-%.s: $(src)/%.S $(srctree)/arch/$(SRCARCH)/scripts/pvcheck.sed FORCE
133 $(call if_changed_dep,as_s_S)
134$(obj)/pvchk-%.s: $(obj)/pvchk-sed-%.s FORCE
135 $(call if_changed,pv_check_sed)
136$(obj)/pvchk-%.o: $(obj)/pvchk-%.s FORCE
137 $(call if_changed,as_o_S)
138.PRECIOUS: $(obj)/pvchk-sed-%.s $(obj)/pvchk-%.s $(obj)/pvchk-%.o
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 5d1eb7ee2bf6..0635015d0aaa 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -52,6 +52,7 @@
52#include <asm/numa.h> 52#include <asm/numa.h>
53#include <asm/sal.h> 53#include <asm/sal.h>
54#include <asm/cyclone.h> 54#include <asm/cyclone.h>
55#include <asm/xen/hypervisor.h>
55 56
56#define BAD_MADT_ENTRY(entry, end) ( \ 57#define BAD_MADT_ENTRY(entry, end) ( \
57 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ 58 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
@@ -91,6 +92,9 @@ acpi_get_sysname(void)
91 struct acpi_table_rsdp *rsdp; 92 struct acpi_table_rsdp *rsdp;
92 struct acpi_table_xsdt *xsdt; 93 struct acpi_table_xsdt *xsdt;
93 struct acpi_table_header *hdr; 94 struct acpi_table_header *hdr;
95#ifdef CONFIG_DMAR
96 u64 i, nentries;
97#endif
94 98
95 rsdp_phys = acpi_find_rsdp(); 99 rsdp_phys = acpi_find_rsdp();
96 if (!rsdp_phys) { 100 if (!rsdp_phys) {
@@ -121,7 +125,21 @@ acpi_get_sysname(void)
121 return "uv"; 125 return "uv";
122 else 126 else
123 return "sn2"; 127 return "sn2";
128 } else if (xen_pv_domain() && !strcmp(hdr->oem_id, "XEN")) {
129 return "xen";
130 }
131
132#ifdef CONFIG_DMAR
133 /* Look for Intel IOMMU */
134 nentries = (hdr->length - sizeof(*hdr)) /
135 sizeof(xsdt->table_offset_entry[0]);
136 for (i = 0; i < nentries; i++) {
137 hdr = __va(xsdt->table_offset_entry[i]);
138 if (strncmp(hdr->signature, ACPI_SIG_DMAR,
139 sizeof(ACPI_SIG_DMAR) - 1) == 0)
140 return "dig_vtd";
124 } 141 }
142#endif
125 143
126 return "dig"; 144 return "dig";
127#else 145#else
@@ -137,6 +155,10 @@ acpi_get_sysname(void)
137 return "uv"; 155 return "uv";
138# elif defined (CONFIG_IA64_DIG) 156# elif defined (CONFIG_IA64_DIG)
139 return "dig"; 157 return "dig";
158# elif defined (CONFIG_IA64_XEN_GUEST)
159 return "xen";
160# elif defined(CONFIG_IA64_DIG_VTD)
161 return "dig_vtd";
140# else 162# else
141# error Unknown platform. Fix acpi.c. 163# error Unknown platform. Fix acpi.c.
142# endif 164# endif
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index 94c44b1ccfd0..742dbb1d5a4f 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -16,6 +16,9 @@
16#include <asm/sigcontext.h> 16#include <asm/sigcontext.h>
17#include <asm/mca.h> 17#include <asm/mca.h>
18 18
19#include <asm/xen/interface.h>
20#include <asm/xen/hypervisor.h>
21
19#include "../kernel/sigframe.h" 22#include "../kernel/sigframe.h"
20#include "../kernel/fsyscall_gtod_data.h" 23#include "../kernel/fsyscall_gtod_data.h"
21 24
@@ -286,4 +289,32 @@ void foo(void)
286 offsetof (struct itc_jitter_data_t, itc_jitter)); 289 offsetof (struct itc_jitter_data_t, itc_jitter));
287 DEFINE(IA64_ITC_LASTCYCLE_OFFSET, 290 DEFINE(IA64_ITC_LASTCYCLE_OFFSET,
288 offsetof (struct itc_jitter_data_t, itc_lastcycle)); 291 offsetof (struct itc_jitter_data_t, itc_lastcycle));
292
293#ifdef CONFIG_XEN
294 BLANK();
295
296 DEFINE(XEN_NATIVE_ASM, XEN_NATIVE);
297 DEFINE(XEN_PV_DOMAIN_ASM, XEN_PV_DOMAIN);
298
299#define DEFINE_MAPPED_REG_OFS(sym, field) \
300 DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(struct mapped_regs, field)))
301
302 DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr);
303 DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr);
304 DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip);
305 DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs);
306 DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs);
307 DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr);
308 DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa);
309 DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa);
310 DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim);
311 DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha);
312 DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir);
313 DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled);
314 DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum);
315 DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]);
316 DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]);
317 DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat);
318 DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat);
319#endif /* CONFIG_XEN */
289} 320}
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 0dd6c1419d8d..7ef0c594f5ed 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -534,6 +534,11 @@ GLOBAL_ENTRY(ia64_trace_syscall)
534 stf.spill [r16]=f10 534 stf.spill [r16]=f10
535 stf.spill [r17]=f11 535 stf.spill [r17]=f11
536 br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args 536 br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args
537 cmp.lt p6,p0=r8,r0 // check tracehook
538 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
539 adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
540 mov r10=0
541(p6) br.cond.sptk strace_error // syscall failed ->
537 adds r16=PT(F6)+16,sp 542 adds r16=PT(F6)+16,sp
538 adds r17=PT(F7)+16,sp 543 adds r17=PT(F7)+16,sp
539 ;; 544 ;;
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 416a952b19bd..f675d8e33853 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -580,7 +580,7 @@ ENTRY(dirty_bit)
580 mov b0=r29 // restore b0 580 mov b0=r29 // restore b0
581 ;; 581 ;;
582 st8 [r17]=r18 // store back updated PTE 582 st8 [r17]=r18 // store back updated PTE
583 itc.d r18 // install updated PTE 583 ITC_D(p0, r18, r16) // install updated PTE
584#endif 584#endif
585 mov pr=r31,-1 // restore pr 585 mov pr=r31,-1 // restore pr
586 RFI 586 RFI
@@ -646,7 +646,7 @@ ENTRY(iaccess_bit)
646 mov b0=r29 // restore b0 646 mov b0=r29 // restore b0
647 ;; 647 ;;
648 st8 [r17]=r18 // store back updated PTE 648 st8 [r17]=r18 // store back updated PTE
649 itc.i r18 // install updated PTE 649 ITC_I(p0, r18, r16) // install updated PTE
650#endif /* !CONFIG_SMP */ 650#endif /* !CONFIG_SMP */
651 mov pr=r31,-1 651 mov pr=r31,-1
652 RFI 652 RFI
@@ -698,7 +698,7 @@ ENTRY(daccess_bit)
698 or r18=_PAGE_A,r18 // set the accessed bit 698 or r18=_PAGE_A,r18 // set the accessed bit
699 ;; 699 ;;
700 st8 [r17]=r18 // store back updated PTE 700 st8 [r17]=r18 // store back updated PTE
701 itc.d r18 // install updated PTE 701 ITC_D(p0, r18, r16) // install updated PTE
702#endif 702#endif
703 mov b0=r29 // restore b0 703 mov b0=r29 // restore b0
704 mov pr=r31,-1 704 mov pr=r31,-1
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 60c6ef67ebb2..702a09c13238 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -5,6 +5,7 @@
5#include <linux/pci.h> 5#include <linux/pci.h>
6#include <linux/irq.h> 6#include <linux/irq.h>
7#include <linux/msi.h> 7#include <linux/msi.h>
8#include <linux/dmar.h>
8#include <asm/smp.h> 9#include <asm/smp.h>
9 10
10/* 11/*
@@ -162,3 +163,82 @@ void arch_teardown_msi_irq(unsigned int irq)
162 163
163 return ia64_teardown_msi_irq(irq); 164 return ia64_teardown_msi_irq(irq);
164} 165}
166
167#ifdef CONFIG_DMAR
168#ifdef CONFIG_SMP
169static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
170{
171 struct irq_cfg *cfg = irq_cfg + irq;
172 struct msi_msg msg;
173 int cpu = first_cpu(mask);
174
175
176 if (!cpu_online(cpu))
177 return;
178
179 if (irq_prepare_move(irq, cpu))
180 return;
181
182 dmar_msi_read(irq, &msg);
183
184 msg.data &= ~MSI_DATA_VECTOR_MASK;
185 msg.data |= MSI_DATA_VECTOR(cfg->vector);
186 msg.address_lo &= ~MSI_ADDR_DESTID_MASK;
187 msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
188
189 dmar_msi_write(irq, &msg);
190 irq_desc[irq].affinity = mask;
191}
192#endif /* CONFIG_SMP */
193
194struct irq_chip dmar_msi_type = {
195 .name = "DMAR_MSI",
196 .unmask = dmar_msi_unmask,
197 .mask = dmar_msi_mask,
198 .ack = ia64_ack_msi_irq,
199#ifdef CONFIG_SMP
200 .set_affinity = dmar_msi_set_affinity,
201#endif
202 .retrigger = ia64_msi_retrigger_irq,
203};
204
205static int
206msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
207{
208 struct irq_cfg *cfg = irq_cfg + irq;
209 unsigned dest;
210 cpumask_t mask;
211
212 cpus_and(mask, irq_to_domain(irq), cpu_online_map);
213 dest = cpu_physical_id(first_cpu(mask));
214
215 msg->address_hi = 0;
216 msg->address_lo =
217 MSI_ADDR_HEADER |
218 MSI_ADDR_DESTMODE_PHYS |
219 MSI_ADDR_REDIRECTION_CPU |
220 MSI_ADDR_DESTID_CPU(dest);
221
222 msg->data =
223 MSI_DATA_TRIGGER_EDGE |
224 MSI_DATA_LEVEL_ASSERT |
225 MSI_DATA_DELIVERY_FIXED |
226 MSI_DATA_VECTOR(cfg->vector);
227 return 0;
228}
229
230int arch_setup_dmar_msi(unsigned int irq)
231{
232 int ret;
233 struct msi_msg msg;
234
235 ret = msi_compose_msg(NULL, irq, &msg);
236 if (ret < 0)
237 return ret;
238 dmar_msi_write(irq, &msg);
239 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
240 "edge");
241 return 0;
242}
243#endif /* CONFIG_DMAR */
244
diff --git a/arch/ia64/kernel/nr-irqs.c b/arch/ia64/kernel/nr-irqs.c
index 8273afc32db8..ee564575148e 100644
--- a/arch/ia64/kernel/nr-irqs.c
+++ b/arch/ia64/kernel/nr-irqs.c
@@ -10,6 +10,7 @@
10#include <linux/kbuild.h> 10#include <linux/kbuild.h>
11#include <linux/threads.h> 11#include <linux/threads.h>
12#include <asm/native/irq.h> 12#include <asm/native/irq.h>
13#include <asm/xen/irq.h>
13 14
14void foo(void) 15void foo(void)
15{ 16{
diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c
index afaf5b9a2cf0..de35d8e8b7d2 100644
--- a/arch/ia64/kernel/paravirt.c
+++ b/arch/ia64/kernel/paravirt.c
@@ -332,7 +332,7 @@ ia64_native_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
332 332
333struct pv_iosapic_ops pv_iosapic_ops = { 333struct pv_iosapic_ops pv_iosapic_ops = {
334 .pcat_compat_init = ia64_native_iosapic_pcat_compat_init, 334 .pcat_compat_init = ia64_native_iosapic_pcat_compat_init,
335 .get_irq_chip = ia64_native_iosapic_get_irq_chip, 335 .__get_irq_chip = ia64_native_iosapic_get_irq_chip,
336 336
337 .__read = ia64_native_iosapic_read, 337 .__read = ia64_native_iosapic_read,
338 .__write = ia64_native_iosapic_write, 338 .__write = ia64_native_iosapic_write,
diff --git a/arch/ia64/kernel/paravirt_inst.h b/arch/ia64/kernel/paravirt_inst.h
index 5cad6fb2ed19..64d6d810c64b 100644
--- a/arch/ia64/kernel/paravirt_inst.h
+++ b/arch/ia64/kernel/paravirt_inst.h
@@ -20,7 +20,9 @@
20 * 20 *
21 */ 21 */
22 22
23#ifdef __IA64_ASM_PARAVIRTUALIZED_XEN 23#ifdef __IA64_ASM_PARAVIRTUALIZED_PVCHECK
24#include <asm/native/pvchk_inst.h>
25#elif defined(__IA64_ASM_PARAVIRTUALIZED_XEN)
24#include <asm/xen/inst.h> 26#include <asm/xen/inst.h>
25#include <asm/xen/minstate.h> 27#include <asm/xen/minstate.h>
26#else 28#else
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
new file mode 100644
index 000000000000..10a75b557650
--- /dev/null
+++ b/arch/ia64/kernel/pci-dma.c
@@ -0,0 +1,129 @@
1/*
2 * Dynamic DMA mapping support.
3 */
4
5#include <linux/types.h>
6#include <linux/mm.h>
7#include <linux/string.h>
8#include <linux/pci.h>
9#include <linux/module.h>
10#include <linux/dmar.h>
11#include <asm/iommu.h>
12#include <asm/machvec.h>
13#include <linux/dma-mapping.h>
14
15#include <asm/machvec.h>
16#include <asm/system.h>
17
18#ifdef CONFIG_DMAR
19
20#include <linux/kernel.h>
21#include <linux/string.h>
22
23#include <asm/page.h>
24#include <asm/iommu.h>
25
26dma_addr_t bad_dma_address __read_mostly;
27EXPORT_SYMBOL(bad_dma_address);
28
29static int iommu_sac_force __read_mostly;
30
31int no_iommu __read_mostly;
32#ifdef CONFIG_IOMMU_DEBUG
33int force_iommu __read_mostly = 1;
34#else
35int force_iommu __read_mostly;
36#endif
37
38/* Set this to 1 if there is a HW IOMMU in the system */
39int iommu_detected __read_mostly;
40
41/* Dummy device used for NULL arguments (normally ISA). Better would
42 be probably a smaller DMA mask, but this is bug-to-bug compatible
43 to i386. */
44struct device fallback_dev = {
45 .bus_id = "fallback device",
46 .coherent_dma_mask = DMA_32BIT_MASK,
47 .dma_mask = &fallback_dev.coherent_dma_mask,
48};
49
50void __init pci_iommu_alloc(void)
51{
52 /*
53 * The order of these functions is important for
54 * fall-back/fail-over reasons
55 */
56 detect_intel_iommu();
57
58#ifdef CONFIG_SWIOTLB
59 pci_swiotlb_init();
60#endif
61}
62
63static int __init pci_iommu_init(void)
64{
65 if (iommu_detected)
66 intel_iommu_init();
67
68 return 0;
69}
70
71/* Must execute after PCI subsystem */
72fs_initcall(pci_iommu_init);
73
74void pci_iommu_shutdown(void)
75{
76 return;
77}
78
79void __init
80iommu_dma_init(void)
81{
82 return;
83}
84
85struct dma_mapping_ops *dma_ops;
86EXPORT_SYMBOL(dma_ops);
87
88int iommu_dma_supported(struct device *dev, u64 mask)
89{
90 struct dma_mapping_ops *ops = get_dma_ops(dev);
91
92#ifdef CONFIG_PCI
93 if (mask > 0xffffffff && forbid_dac > 0) {
94 dev_info(dev, "Disallowing DAC for device\n");
95 return 0;
96 }
97#endif
98
99 if (ops->dma_supported_op)
100 return ops->dma_supported_op(dev, mask);
101
102 /* Copied from i386. Doesn't make much sense, because it will
103 only work for pci_alloc_coherent.
104 The caller just has to use GFP_DMA in this case. */
105 if (mask < DMA_24BIT_MASK)
106 return 0;
107
108 /* Tell the device to use SAC when IOMMU force is on. This
109 allows the driver to use cheaper accesses in some cases.
110
111 Problem with this is that if we overflow the IOMMU area and
112 return DAC as fallback address the device may not handle it
113 correctly.
114
115 As a special case some controllers have a 39bit address
116 mode that is as efficient as 32bit (aic79xx). Don't force
117 SAC for these. Assume all masks <= 40 bits are of this
118 type. Normally this doesn't make any difference, but gives
119 more gentle handling of IOMMU overflow. */
120 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
121 dev_info(dev, "Force SAC with mask %lx\n", mask);
122 return 0;
123 }
124
125 return 1;
126}
127EXPORT_SYMBOL(iommu_dma_supported);
128
129#endif
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
new file mode 100644
index 000000000000..16c50516dbc1
--- /dev/null
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -0,0 +1,46 @@
1/* Glue code to lib/swiotlb.c */
2
3#include <linux/pci.h>
4#include <linux/cache.h>
5#include <linux/module.h>
6#include <linux/dma-mapping.h>
7
8#include <asm/swiotlb.h>
9#include <asm/dma.h>
10#include <asm/iommu.h>
11#include <asm/machvec.h>
12
13int swiotlb __read_mostly;
14EXPORT_SYMBOL(swiotlb);
15
16struct dma_mapping_ops swiotlb_dma_ops = {
17 .mapping_error = swiotlb_dma_mapping_error,
18 .alloc_coherent = swiotlb_alloc_coherent,
19 .free_coherent = swiotlb_free_coherent,
20 .map_single = swiotlb_map_single,
21 .unmap_single = swiotlb_unmap_single,
22 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
23 .sync_single_for_device = swiotlb_sync_single_for_device,
24 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
25 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
26 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
27 .sync_sg_for_device = swiotlb_sync_sg_for_device,
28 .map_sg = swiotlb_map_sg,
29 .unmap_sg = swiotlb_unmap_sg,
30 .dma_supported_op = swiotlb_dma_supported,
31};
32
33void __init pci_swiotlb_init(void)
34{
35 if (!iommu_detected) {
36#ifdef CONFIG_IA64_GENERIC
37 swiotlb = 1;
38 printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
39 machvec_init("dig");
40 swiotlb_init();
41 dma_ops = &swiotlb_dma_ops;
42#else
43 panic("Unable to find Intel IOMMU");
44#endif
45 }
46}
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index fc8f3509df27..ada4605d1223 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -40,6 +40,7 @@
40#include <linux/capability.h> 40#include <linux/capability.h>
41#include <linux/rcupdate.h> 41#include <linux/rcupdate.h>
42#include <linux/completion.h> 42#include <linux/completion.h>
43#include <linux/tracehook.h>
43 44
44#include <asm/errno.h> 45#include <asm/errno.h>
45#include <asm/intrinsics.h> 46#include <asm/intrinsics.h>
@@ -3684,7 +3685,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3684 3685
3685 PFM_SET_WORK_PENDING(task, 1); 3686 PFM_SET_WORK_PENDING(task, 1);
3686 3687
3687 tsk_set_notify_resume(task); 3688 set_notify_resume(task);
3688 3689
3689 /* 3690 /*
3690 * XXX: send reschedule if task runs on another CPU 3691 * XXX: send reschedule if task runs on another CPU
@@ -5044,8 +5045,6 @@ pfm_handle_work(void)
5044 5045
5045 PFM_SET_WORK_PENDING(current, 0); 5046 PFM_SET_WORK_PENDING(current, 0);
5046 5047
5047 tsk_clear_notify_resume(current);
5048
5049 regs = task_pt_regs(current); 5048 regs = task_pt_regs(current);
5050 5049
5051 /* 5050 /*
@@ -5414,7 +5413,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
5414 * when coming from ctxsw, current still points to the 5413 * when coming from ctxsw, current still points to the
5415 * previous task, therefore we must work with task and not current. 5414 * previous task, therefore we must work with task and not current.
5416 */ 5415 */
5417 tsk_set_notify_resume(task); 5416 set_notify_resume(task);
5418 } 5417 }
5419 /* 5418 /*
5420 * defer until state is changed (shorten spin window). the context is locked 5419 * defer until state is changed (shorten spin window). the context is locked
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 3ab8373103ec..c57162705147 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -28,6 +28,7 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/kdebug.h> 29#include <linux/kdebug.h>
30#include <linux/utsname.h> 30#include <linux/utsname.h>
31#include <linux/tracehook.h>
31 32
32#include <asm/cpu.h> 33#include <asm/cpu.h>
33#include <asm/delay.h> 34#include <asm/delay.h>
@@ -160,21 +161,6 @@ show_regs (struct pt_regs *regs)
160 show_stack(NULL, NULL); 161 show_stack(NULL, NULL);
161} 162}
162 163
163void tsk_clear_notify_resume(struct task_struct *tsk)
164{
165#ifdef CONFIG_PERFMON
166 if (tsk->thread.pfm_needs_checking)
167 return;
168#endif
169 if (test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_RSE))
170 return;
171 clear_ti_thread_flag(task_thread_info(tsk), TIF_NOTIFY_RESUME);
172}
173
174/*
175 * do_notify_resume_user():
176 * Called from notify_resume_user at entry.S, with interrupts disabled.
177 */
178void 164void
179do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall) 165do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
180{ 166{
@@ -203,6 +189,11 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
203 ia64_do_signal(scr, in_syscall); 189 ia64_do_signal(scr, in_syscall);
204 } 190 }
205 191
192 if (test_thread_flag(TIF_NOTIFY_RESUME)) {
193 clear_thread_flag(TIF_NOTIFY_RESUME);
194 tracehook_notify_resume(&scr->pt);
195 }
196
206 /* copy user rbs to kernel rbs */ 197 /* copy user rbs to kernel rbs */
207 if (unlikely(test_thread_flag(TIF_RESTORE_RSE))) { 198 if (unlikely(test_thread_flag(TIF_RESTORE_RSE))) {
208 local_irq_enable(); /* force interrupt enable */ 199 local_irq_enable(); /* force interrupt enable */
@@ -251,7 +242,6 @@ default_idle (void)
251/* We don't actually take CPU down, just spin without interrupts. */ 242/* We don't actually take CPU down, just spin without interrupts. */
252static inline void play_dead(void) 243static inline void play_dead(void)
253{ 244{
254 extern void ia64_cpu_local_tick (void);
255 unsigned int this_cpu = smp_processor_id(); 245 unsigned int this_cpu = smp_processor_id();
256 246
257 /* Ack it */ 247 /* Ack it */
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 2a9943b5947f..92c9689b7d97 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -22,6 +22,7 @@
22#include <linux/signal.h> 22#include <linux/signal.h>
23#include <linux/regset.h> 23#include <linux/regset.h>
24#include <linux/elf.h> 24#include <linux/elf.h>
25#include <linux/tracehook.h>
25 26
26#include <asm/pgtable.h> 27#include <asm/pgtable.h>
27#include <asm/processor.h> 28#include <asm/processor.h>
@@ -603,7 +604,7 @@ void ia64_ptrace_stop(void)
603{ 604{
604 if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE)) 605 if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
605 return; 606 return;
606 tsk_set_notify_resume(current); 607 set_notify_resume(current);
607 unw_init_running(do_sync_rbs, ia64_sync_user_rbs); 608 unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
608} 609}
609 610
@@ -613,7 +614,6 @@ void ia64_ptrace_stop(void)
613void ia64_sync_krbs(void) 614void ia64_sync_krbs(void)
614{ 615{
615 clear_tsk_thread_flag(current, TIF_RESTORE_RSE); 616 clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
616 tsk_clear_notify_resume(current);
617 617
618 unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs); 618 unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
619} 619}
@@ -644,7 +644,7 @@ ptrace_attach_sync_user_rbs (struct task_struct *child)
644 spin_lock_irq(&child->sighand->siglock); 644 spin_lock_irq(&child->sighand->siglock);
645 if (child->state == TASK_STOPPED && 645 if (child->state == TASK_STOPPED &&
646 !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) { 646 !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
647 tsk_set_notify_resume(child); 647 set_notify_resume(child);
648 648
649 child->state = TASK_TRACED; 649 child->state = TASK_TRACED;
650 stopped = 1; 650 stopped = 1;
@@ -1232,37 +1232,16 @@ arch_ptrace (struct task_struct *child, long request, long addr, long data)
1232} 1232}
1233 1233
1234 1234
1235static void
1236syscall_trace (void)
1237{
1238 /*
1239 * The 0x80 provides a way for the tracing parent to
1240 * distinguish between a syscall stop and SIGTRAP delivery.
1241 */
1242 ptrace_notify(SIGTRAP
1243 | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
1244
1245 /*
1246 * This isn't the same as continuing with a signal, but it
1247 * will do for normal use. strace only continues with a
1248 * signal if the stopping signal is not SIGTRAP. -brl
1249 */
1250 if (current->exit_code) {
1251 send_sig(current->exit_code, current, 1);
1252 current->exit_code = 0;
1253 }
1254}
1255
1256/* "asmlinkage" so the input arguments are preserved... */ 1235/* "asmlinkage" so the input arguments are preserved... */
1257 1236
1258asmlinkage void 1237asmlinkage long
1259syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, 1238syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1260 long arg4, long arg5, long arg6, long arg7, 1239 long arg4, long arg5, long arg6, long arg7,
1261 struct pt_regs regs) 1240 struct pt_regs regs)
1262{ 1241{
1263 if (test_thread_flag(TIF_SYSCALL_TRACE) 1242 if (test_thread_flag(TIF_SYSCALL_TRACE))
1264 && (current->ptrace & PT_PTRACED)) 1243 if (tracehook_report_syscall_entry(&regs))
1265 syscall_trace(); 1244 return -ENOSYS;
1266 1245
1267 /* copy user rbs to kernel rbs */ 1246 /* copy user rbs to kernel rbs */
1268 if (test_thread_flag(TIF_RESTORE_RSE)) 1247 if (test_thread_flag(TIF_RESTORE_RSE))
@@ -1283,6 +1262,7 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1283 audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3); 1262 audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3);
1284 } 1263 }
1285 1264
1265 return 0;
1286} 1266}
1287 1267
1288/* "asmlinkage" so the input arguments are preserved... */ 1268/* "asmlinkage" so the input arguments are preserved... */
@@ -1292,6 +1272,8 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1292 long arg4, long arg5, long arg6, long arg7, 1272 long arg4, long arg5, long arg6, long arg7,
1293 struct pt_regs regs) 1273 struct pt_regs regs)
1294{ 1274{
1275 int step;
1276
1295 if (unlikely(current->audit_context)) { 1277 if (unlikely(current->audit_context)) {
1296 int success = AUDITSC_RESULT(regs.r10); 1278 int success = AUDITSC_RESULT(regs.r10);
1297 long result = regs.r8; 1279 long result = regs.r8;
@@ -1301,10 +1283,9 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1301 audit_syscall_exit(success, result); 1283 audit_syscall_exit(success, result);
1302 } 1284 }
1303 1285
1304 if ((test_thread_flag(TIF_SYSCALL_TRACE) 1286 step = test_thread_flag(TIF_SINGLESTEP);
1305 || test_thread_flag(TIF_SINGLESTEP)) 1287 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1306 && (current->ptrace & PT_PTRACED)) 1288 tracehook_report_syscall_exit(&regs, step);
1307 syscall_trace();
1308 1289
1309 /* copy user rbs to kernel rbs */ 1290 /* copy user rbs to kernel rbs */
1310 if (test_thread_flag(TIF_RESTORE_RSE)) 1291 if (test_thread_flag(TIF_RESTORE_RSE))
@@ -1940,7 +1921,7 @@ gpregs_writeback(struct task_struct *target,
1940{ 1921{
1941 if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE)) 1922 if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
1942 return 0; 1923 return 0;
1943 tsk_set_notify_resume(target); 1924 set_notify_resume(target);
1944 return do_regset_call(do_gpregs_writeback, target, regset, 0, 0, 1925 return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
1945 NULL, NULL); 1926 NULL, NULL);
1946} 1927}
@@ -2199,3 +2180,68 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
2199#endif 2180#endif
2200 return &user_ia64_view; 2181 return &user_ia64_view;
2201} 2182}
2183
2184struct syscall_get_set_args {
2185 unsigned int i;
2186 unsigned int n;
2187 unsigned long *args;
2188 struct pt_regs *regs;
2189 int rw;
2190};
2191
2192static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
2193{
2194 struct syscall_get_set_args *args = data;
2195 struct pt_regs *pt = args->regs;
2196 unsigned long *krbs, cfm, ndirty;
2197 int i, count;
2198
2199 if (unw_unwind_to_user(info) < 0)
2200 return;
2201
2202 cfm = pt->cr_ifs;
2203 krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
2204 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
2205
2206 count = 0;
2207 if (in_syscall(pt))
2208 count = min_t(int, args->n, cfm & 0x7f);
2209
2210 for (i = 0; i < count; i++) {
2211 if (args->rw)
2212 *ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
2213 args->args[i];
2214 else
2215 args->args[i] = *ia64_rse_skip_regs(krbs,
2216 ndirty + i + args->i);
2217 }
2218
2219 if (!args->rw) {
2220 while (i < args->n) {
2221 args->args[i] = 0;
2222 i++;
2223 }
2224 }
2225}
2226
2227void ia64_syscall_get_set_arguments(struct task_struct *task,
2228 struct pt_regs *regs, unsigned int i, unsigned int n,
2229 unsigned long *args, int rw)
2230{
2231 struct syscall_get_set_args data = {
2232 .i = i,
2233 .n = n,
2234 .args = args,
2235 .regs = regs,
2236 .rw = rw,
2237 };
2238
2239 if (task == current)
2240 unw_init_running(syscall_get_set_args_cb, &data);
2241 else {
2242 struct unw_frame_info ufi;
2243 memset(&ufi, 0, sizeof(ufi));
2244 unw_init_from_blocked_task(&ufi, task);
2245 syscall_get_set_args_cb(&ufi, &data);
2246 }
2247}
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 916ba898237f..ae7911702bf8 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -116,6 +116,13 @@ unsigned int num_io_spaces;
116 */ 116 */
117#define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */ 117#define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */
118unsigned long ia64_i_cache_stride_shift = ~0; 118unsigned long ia64_i_cache_stride_shift = ~0;
119/*
120 * "clflush_cache_range()" needs to know what processor dependent stride size to
121 * use when it flushes cache lines including both d-cache and i-cache.
122 */
123/* Safest way to go: 32 bytes by 32 bytes */
124#define CACHE_STRIDE_SHIFT 5
125unsigned long ia64_cache_stride_shift = ~0;
119 126
120/* 127/*
121 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This 128 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This
@@ -852,13 +859,14 @@ setup_per_cpu_areas (void)
852} 859}
853 860
854/* 861/*
855 * Calculate the max. cache line size. 862 * Do the following calculations:
856 * 863 *
857 * In addition, the minimum of the i-cache stride sizes is calculated for 864 * 1. the max. cache line size.
858 * "flush_icache_range()". 865 * 2. the minimum of the i-cache stride sizes for "flush_icache_range()".
866 * 3. the minimum of the cache stride sizes for "clflush_cache_range()".
859 */ 867 */
860static void __cpuinit 868static void __cpuinit
861get_max_cacheline_size (void) 869get_cache_info(void)
862{ 870{
863 unsigned long line_size, max = 1; 871 unsigned long line_size, max = 1;
864 u64 l, levels, unique_caches; 872 u64 l, levels, unique_caches;
@@ -872,12 +880,14 @@ get_max_cacheline_size (void)
872 max = SMP_CACHE_BYTES; 880 max = SMP_CACHE_BYTES;
873 /* Safest setup for "flush_icache_range()" */ 881 /* Safest setup for "flush_icache_range()" */
874 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; 882 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
883 /* Safest setup for "clflush_cache_range()" */
884 ia64_cache_stride_shift = CACHE_STRIDE_SHIFT;
875 goto out; 885 goto out;
876 } 886 }
877 887
878 for (l = 0; l < levels; ++l) { 888 for (l = 0; l < levels; ++l) {
879 status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2, 889 /* cache_type (data_or_unified)=2 */
880 &cci); 890 status = ia64_pal_cache_config_info(l, 2, &cci);
881 if (status != 0) { 891 if (status != 0) {
882 printk(KERN_ERR 892 printk(KERN_ERR
883 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n", 893 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n",
@@ -885,15 +895,21 @@ get_max_cacheline_size (void)
885 max = SMP_CACHE_BYTES; 895 max = SMP_CACHE_BYTES;
886 /* The safest setup for "flush_icache_range()" */ 896 /* The safest setup for "flush_icache_range()" */
887 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 897 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
898 /* The safest setup for "clflush_cache_range()" */
899 ia64_cache_stride_shift = CACHE_STRIDE_SHIFT;
888 cci.pcci_unified = 1; 900 cci.pcci_unified = 1;
901 } else {
902 if (cci.pcci_stride < ia64_cache_stride_shift)
903 ia64_cache_stride_shift = cci.pcci_stride;
904
905 line_size = 1 << cci.pcci_line_size;
906 if (line_size > max)
907 max = line_size;
889 } 908 }
890 line_size = 1 << cci.pcci_line_size; 909
891 if (line_size > max)
892 max = line_size;
893 if (!cci.pcci_unified) { 910 if (!cci.pcci_unified) {
894 status = ia64_pal_cache_config_info(l, 911 /* cache_type (instruction)=1*/
895 /* cache_type (instruction)= */ 1, 912 status = ia64_pal_cache_config_info(l, 1, &cci);
896 &cci);
897 if (status != 0) { 913 if (status != 0) {
898 printk(KERN_ERR 914 printk(KERN_ERR
899 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n", 915 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n",
@@ -947,7 +963,7 @@ cpu_init (void)
947 } 963 }
948#endif 964#endif
949 965
950 get_max_cacheline_size(); 966 get_cache_info();
951 967
952 /* 968 /*
953 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called 969 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 19c5a78636fc..e12500a9c443 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -11,6 +11,7 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/ptrace.h> 13#include <linux/ptrace.h>
14#include <linux/tracehook.h>
14#include <linux/sched.h> 15#include <linux/sched.h>
15#include <linux/signal.h> 16#include <linux/signal.h>
16#include <linux/smp.h> 17#include <linux/smp.h>
@@ -439,6 +440,13 @@ handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigse
439 sigaddset(&current->blocked, sig); 440 sigaddset(&current->blocked, sig);
440 recalc_sigpending(); 441 recalc_sigpending();
441 spin_unlock_irq(&current->sighand->siglock); 442 spin_unlock_irq(&current->sighand->siglock);
443
444 /*
445 * Let tracing know that we've done the handler setup.
446 */
447 tracehook_signal_handler(sig, info, ka, &scr->pt,
448 test_thread_flag(TIF_SINGLESTEP));
449
442 return 1; 450 return 1;
443} 451}
444 452