diff options
Diffstat (limited to 'arch/ia64/kernel')
35 files changed, 214 insertions, 614 deletions
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 6b7edcab0cb5..db10b1e378b0 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile | |||
@@ -8,19 +8,13 @@ endif | |||
8 | 8 | ||
9 | extra-y := head.o init_task.o vmlinux.lds | 9 | extra-y := head.o init_task.o vmlinux.lds |
10 | 10 | ||
11 | obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ | 11 | obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ |
12 | irq_lsapic.o ivt.o machvec.o pal.o paravirt_patchlist.o patch.o process.o perfmon.o ptrace.o sal.o \ | 12 | irq_lsapic.o ivt.o machvec.o pal.o paravirt_patchlist.o patch.o process.o perfmon.o ptrace.o sal.o \ |
13 | salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ | 13 | salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ |
14 | unwind.o mca.o mca_asm.o topology.o dma-mapping.o | 14 | unwind.o mca.o mca_asm.o topology.o dma-mapping.o |
15 | 15 | ||
16 | obj-$(CONFIG_ACPI) += acpi.o acpi-ext.o | ||
16 | obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o | 17 | obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o |
17 | obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o | ||
18 | obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o | ||
19 | obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o | ||
20 | |||
21 | ifneq ($(CONFIG_ACPI_PROCESSOR),) | ||
22 | obj-y += acpi-processor.o | ||
23 | endif | ||
24 | 18 | ||
25 | obj-$(CONFIG_IA64_PALINFO) += palinfo.o | 19 | obj-$(CONFIG_IA64_PALINFO) += palinfo.o |
26 | obj-$(CONFIG_IOSAPIC) += iosapic.o | 20 | obj-$(CONFIG_IOSAPIC) += iosapic.o |
@@ -51,6 +45,8 @@ endif | |||
51 | obj-$(CONFIG_DMAR) += pci-dma.o | 45 | obj-$(CONFIG_DMAR) += pci-dma.o |
52 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o | 46 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o |
53 | 47 | ||
48 | obj-$(CONFIG_BINFMT_ELF) += elfcore.o | ||
49 | |||
54 | # fp_emulate() expects f2-f5,f16-f31 to contain the user-level state. | 50 | # fp_emulate() expects f2-f5,f16-f31 to contain the user-level state. |
55 | CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31 | 51 | CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31 |
56 | 52 | ||
@@ -81,17 +77,14 @@ define cmd_nr_irqs | |||
81 | endef | 77 | endef |
82 | 78 | ||
83 | # We use internal kbuild rules to avoid the "is up to date" message from make | 79 | # We use internal kbuild rules to avoid the "is up to date" message from make |
84 | arch/$(SRCARCH)/kernel/nr-irqs.s: $(srctree)/arch/$(SRCARCH)/kernel/nr-irqs.c \ | 80 | arch/$(SRCARCH)/kernel/nr-irqs.s: arch/$(SRCARCH)/kernel/nr-irqs.c |
85 | $(wildcard $(srctree)/include/asm-ia64/*/irq.h) | ||
86 | $(Q)mkdir -p $(dir $@) | 81 | $(Q)mkdir -p $(dir $@) |
87 | $(call if_changed_dep,cc_s_c) | 82 | $(call if_changed_dep,cc_s_c) |
88 | 83 | ||
89 | include/asm-ia64/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s | 84 | include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s |
90 | $(Q)mkdir -p $(dir $@) | 85 | $(Q)mkdir -p $(dir $@) |
91 | $(call cmd,nr_irqs) | 86 | $(call cmd,nr_irqs) |
92 | 87 | ||
93 | clean-files += $(objtree)/include/asm-ia64/nr-irqs.h | ||
94 | |||
95 | # | 88 | # |
96 | # native ivt.S, entry.S and fsys.S | 89 | # native ivt.S, entry.S and fsys.S |
97 | # | 90 | # |
diff --git a/arch/ia64/kernel/acpi-ext.c b/arch/ia64/kernel/acpi-ext.c index b7515bc808a8..8b9318d311a0 100644 --- a/arch/ia64/kernel/acpi-ext.c +++ b/arch/ia64/kernel/acpi-ext.c | |||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/slab.h> | ||
13 | #include <linux/acpi.h> | 14 | #include <linux/acpi.h> |
14 | 15 | ||
15 | #include <asm/acpi-ext.h> | 16 | #include <asm/acpi-ext.h> |
diff --git a/arch/ia64/kernel/acpi-processor.c b/arch/ia64/kernel/acpi-processor.c deleted file mode 100644 index dbda7bde6112..000000000000 --- a/arch/ia64/kernel/acpi-processor.c +++ /dev/null | |||
@@ -1,85 +0,0 @@ | |||
1 | /* | ||
2 | * arch/ia64/kernel/acpi-processor.c | ||
3 | * | ||
4 | * Copyright (C) 2005 Intel Corporation | ||
5 | * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | ||
6 | * - Added _PDC for platforms with Intel CPUs | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/acpi.h> | ||
13 | |||
14 | #include <acpi/processor.h> | ||
15 | #include <asm/acpi.h> | ||
16 | |||
17 | static void init_intel_pdc(struct acpi_processor *pr) | ||
18 | { | ||
19 | struct acpi_object_list *obj_list; | ||
20 | union acpi_object *obj; | ||
21 | u32 *buf; | ||
22 | |||
23 | /* allocate and initialize pdc. It will be used later. */ | ||
24 | obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL); | ||
25 | if (!obj_list) { | ||
26 | printk(KERN_ERR "Memory allocation error\n"); | ||
27 | return; | ||
28 | } | ||
29 | |||
30 | obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL); | ||
31 | if (!obj) { | ||
32 | printk(KERN_ERR "Memory allocation error\n"); | ||
33 | kfree(obj_list); | ||
34 | return; | ||
35 | } | ||
36 | |||
37 | buf = kmalloc(12, GFP_KERNEL); | ||
38 | if (!buf) { | ||
39 | printk(KERN_ERR "Memory allocation error\n"); | ||
40 | kfree(obj); | ||
41 | kfree(obj_list); | ||
42 | return; | ||
43 | } | ||
44 | |||
45 | buf[0] = ACPI_PDC_REVISION_ID; | ||
46 | buf[1] = 1; | ||
47 | buf[2] = ACPI_PDC_EST_CAPABILITY_SMP; | ||
48 | /* | ||
49 | * The default of PDC_SMP_T_SWCOORD bit is set for IA64 cpu so | ||
50 | * that OSPM is capable of native ACPI throttling software | ||
51 | * coordination using BIOS supplied _TSD info. | ||
52 | */ | ||
53 | buf[2] |= ACPI_PDC_SMP_T_SWCOORD; | ||
54 | |||
55 | obj->type = ACPI_TYPE_BUFFER; | ||
56 | obj->buffer.length = 12; | ||
57 | obj->buffer.pointer = (u8 *) buf; | ||
58 | obj_list->count = 1; | ||
59 | obj_list->pointer = obj; | ||
60 | pr->pdc = obj_list; | ||
61 | |||
62 | return; | ||
63 | } | ||
64 | |||
65 | /* Initialize _PDC data based on the CPU vendor */ | ||
66 | void arch_acpi_processor_init_pdc(struct acpi_processor *pr) | ||
67 | { | ||
68 | pr->pdc = NULL; | ||
69 | init_intel_pdc(pr); | ||
70 | return; | ||
71 | } | ||
72 | |||
73 | EXPORT_SYMBOL(arch_acpi_processor_init_pdc); | ||
74 | |||
75 | void arch_acpi_processor_cleanup_pdc(struct acpi_processor *pr) | ||
76 | { | ||
77 | if (pr->pdc) { | ||
78 | kfree(pr->pdc->pointer->buffer.pointer); | ||
79 | kfree(pr->pdc->pointer); | ||
80 | kfree(pr->pdc); | ||
81 | pr->pdc = NULL; | ||
82 | } | ||
83 | } | ||
84 | |||
85 | EXPORT_SYMBOL(arch_acpi_processor_cleanup_pdc); | ||
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index baec6f00f7f3..4d1a7e9314cf 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -44,6 +44,8 @@ | |||
44 | #include <linux/efi.h> | 44 | #include <linux/efi.h> |
45 | #include <linux/mmzone.h> | 45 | #include <linux/mmzone.h> |
46 | #include <linux/nodemask.h> | 46 | #include <linux/nodemask.h> |
47 | #include <linux/slab.h> | ||
48 | #include <acpi/processor.h> | ||
47 | #include <asm/io.h> | 49 | #include <asm/io.h> |
48 | #include <asm/iosapic.h> | 50 | #include <asm/iosapic.h> |
49 | #include <asm/machvec.h> | 51 | #include <asm/machvec.h> |
@@ -60,11 +62,6 @@ | |||
60 | 62 | ||
61 | #define PREFIX "ACPI: " | 63 | #define PREFIX "ACPI: " |
62 | 64 | ||
63 | void (*pm_idle) (void); | ||
64 | EXPORT_SYMBOL(pm_idle); | ||
65 | void (*pm_power_off) (void); | ||
66 | EXPORT_SYMBOL(pm_power_off); | ||
67 | |||
68 | u32 acpi_rsdt_forced; | 65 | u32 acpi_rsdt_forced; |
69 | unsigned int acpi_cpei_override; | 66 | unsigned int acpi_cpei_override; |
70 | unsigned int acpi_cpei_phys_cpuid; | 67 | unsigned int acpi_cpei_phys_cpuid; |
@@ -83,12 +80,10 @@ static unsigned long __init acpi_find_rsdp(void) | |||
83 | "v1.0/r0.71 tables no longer supported\n"); | 80 | "v1.0/r0.71 tables no longer supported\n"); |
84 | return rsdp_phys; | 81 | return rsdp_phys; |
85 | } | 82 | } |
86 | #endif | ||
87 | 83 | ||
88 | const char __init * | 84 | const char __init * |
89 | acpi_get_sysname(void) | 85 | acpi_get_sysname(void) |
90 | { | 86 | { |
91 | #ifdef CONFIG_IA64_GENERIC | ||
92 | unsigned long rsdp_phys; | 87 | unsigned long rsdp_phys; |
93 | struct acpi_table_rsdp *rsdp; | 88 | struct acpi_table_rsdp *rsdp; |
94 | struct acpi_table_xsdt *xsdt; | 89 | struct acpi_table_xsdt *xsdt; |
@@ -143,30 +138,8 @@ acpi_get_sysname(void) | |||
143 | #endif | 138 | #endif |
144 | 139 | ||
145 | return "dig"; | 140 | return "dig"; |
146 | #else | ||
147 | # if defined (CONFIG_IA64_HP_SIM) | ||
148 | return "hpsim"; | ||
149 | # elif defined (CONFIG_IA64_HP_ZX1) | ||
150 | return "hpzx1"; | ||
151 | # elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB) | ||
152 | return "hpzx1_swiotlb"; | ||
153 | # elif defined (CONFIG_IA64_SGI_SN2) | ||
154 | return "sn2"; | ||
155 | # elif defined (CONFIG_IA64_SGI_UV) | ||
156 | return "uv"; | ||
157 | # elif defined (CONFIG_IA64_DIG) | ||
158 | return "dig"; | ||
159 | # elif defined (CONFIG_IA64_XEN_GUEST) | ||
160 | return "xen"; | ||
161 | # elif defined(CONFIG_IA64_DIG_VTD) | ||
162 | return "dig_vtd"; | ||
163 | # else | ||
164 | # error Unknown platform. Fix acpi.c. | ||
165 | # endif | ||
166 | #endif | ||
167 | } | 141 | } |
168 | 142 | #endif /* CONFIG_IA64_GENERIC */ | |
169 | #ifdef CONFIG_ACPI | ||
170 | 143 | ||
171 | #define ACPI_MAX_PLATFORM_INTERRUPTS 256 | 144 | #define ACPI_MAX_PLATFORM_INTERRUPTS 256 |
172 | 145 | ||
@@ -702,11 +675,23 @@ int __init early_acpi_boot_init(void) | |||
702 | printk(KERN_ERR PREFIX | 675 | printk(KERN_ERR PREFIX |
703 | "Error parsing MADT - no LAPIC entries\n"); | 676 | "Error parsing MADT - no LAPIC entries\n"); |
704 | 677 | ||
678 | #ifdef CONFIG_SMP | ||
679 | if (available_cpus == 0) { | ||
680 | printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); | ||
681 | printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id()); | ||
682 | smp_boot_data.cpu_phys_id[available_cpus] = | ||
683 | hard_smp_processor_id(); | ||
684 | available_cpus = 1; /* We've got at least one of these, no? */ | ||
685 | } | ||
686 | smp_boot_data.cpu_count = available_cpus; | ||
687 | #endif | ||
688 | /* Make boot-up look pretty */ | ||
689 | printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, | ||
690 | total_cpus); | ||
691 | |||
705 | return 0; | 692 | return 0; |
706 | } | 693 | } |
707 | 694 | ||
708 | |||
709 | |||
710 | int __init acpi_boot_init(void) | 695 | int __init acpi_boot_init(void) |
711 | { | 696 | { |
712 | 697 | ||
@@ -769,18 +754,8 @@ int __init acpi_boot_init(void) | |||
769 | if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt)) | 754 | if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt)) |
770 | printk(KERN_ERR PREFIX "Can't find FADT\n"); | 755 | printk(KERN_ERR PREFIX "Can't find FADT\n"); |
771 | 756 | ||
757 | #ifdef CONFIG_ACPI_NUMA | ||
772 | #ifdef CONFIG_SMP | 758 | #ifdef CONFIG_SMP |
773 | if (available_cpus == 0) { | ||
774 | printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); | ||
775 | printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id()); | ||
776 | smp_boot_data.cpu_phys_id[available_cpus] = | ||
777 | hard_smp_processor_id(); | ||
778 | available_cpus = 1; /* We've got at least one of these, no? */ | ||
779 | } | ||
780 | smp_boot_data.cpu_count = available_cpus; | ||
781 | |||
782 | smp_build_cpu_map(); | ||
783 | # ifdef CONFIG_ACPI_NUMA | ||
784 | if (srat_num_cpus == 0) { | 759 | if (srat_num_cpus == 0) { |
785 | int cpu, i = 1; | 760 | int cpu, i = 1; |
786 | for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) | 761 | for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) |
@@ -789,14 +764,9 @@ int __init acpi_boot_init(void) | |||
789 | node_cpuid[i++].phys_id = | 764 | node_cpuid[i++].phys_id = |
790 | smp_boot_data.cpu_phys_id[cpu]; | 765 | smp_boot_data.cpu_phys_id[cpu]; |
791 | } | 766 | } |
792 | # endif | ||
793 | #endif | 767 | #endif |
794 | #ifdef CONFIG_ACPI_NUMA | ||
795 | build_cpu_to_node_map(); | 768 | build_cpu_to_node_map(); |
796 | #endif | 769 | #endif |
797 | /* Make boot-up look pretty */ | ||
798 | printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, | ||
799 | total_cpus); | ||
800 | return 0; | 770 | return 0; |
801 | } | 771 | } |
802 | 772 | ||
@@ -884,8 +854,8 @@ __init void prefill_possible_map(void) | |||
884 | 854 | ||
885 | possible = available_cpus + additional_cpus; | 855 | possible = available_cpus + additional_cpus; |
886 | 856 | ||
887 | if (possible > NR_CPUS) | 857 | if (possible > nr_cpu_ids) |
888 | possible = NR_CPUS; | 858 | possible = nr_cpu_ids; |
889 | 859 | ||
890 | printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", | 860 | printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", |
891 | possible, max((possible - available_cpus), 0)); | 861 | possible, max((possible - available_cpus), 0)); |
@@ -939,6 +909,8 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu) | |||
939 | cpu_set(cpu, cpu_present_map); | 909 | cpu_set(cpu, cpu_present_map); |
940 | ia64_cpu_to_sapicid[cpu] = physid; | 910 | ia64_cpu_to_sapicid[cpu] = physid; |
941 | 911 | ||
912 | acpi_processor_set_pdc(handle); | ||
913 | |||
942 | *pcpu = cpu; | 914 | *pcpu = cpu; |
943 | return (0); | 915 | return (0); |
944 | } | 916 | } |
@@ -1063,5 +1035,3 @@ void acpi_restore_state_mem(void) {} | |||
1063 | * do_suspend_lowlevel() | 1035 | * do_suspend_lowlevel() |
1064 | */ | 1036 | */ |
1065 | void do_suspend_lowlevel(void) {} | 1037 | void do_suspend_lowlevel(void) {} |
1066 | |||
1067 | #endif /* CONFIG_ACPI */ | ||
diff --git a/arch/ia64/kernel/audit.c b/arch/ia64/kernel/audit.c index f3802ae89b10..96a9d18ff4c4 100644 --- a/arch/ia64/kernel/audit.c +++ b/arch/ia64/kernel/audit.c | |||
@@ -30,20 +30,11 @@ static unsigned signal_class[] = { | |||
30 | 30 | ||
31 | int audit_classify_arch(int arch) | 31 | int audit_classify_arch(int arch) |
32 | { | 32 | { |
33 | #ifdef CONFIG_IA32_SUPPORT | ||
34 | if (arch == AUDIT_ARCH_I386) | ||
35 | return 1; | ||
36 | #endif | ||
37 | return 0; | 33 | return 0; |
38 | } | 34 | } |
39 | 35 | ||
40 | int audit_classify_syscall(int abi, unsigned syscall) | 36 | int audit_classify_syscall(int abi, unsigned syscall) |
41 | { | 37 | { |
42 | #ifdef CONFIG_IA32_SUPPORT | ||
43 | extern int ia32_classify_syscall(unsigned); | ||
44 | if (abi == AUDIT_ARCH_I386) | ||
45 | return ia32_classify_syscall(syscall); | ||
46 | #endif | ||
47 | switch(syscall) { | 38 | switch(syscall) { |
48 | case __NR_open: | 39 | case __NR_open: |
49 | return 2; | 40 | return 2; |
@@ -58,18 +49,6 @@ int audit_classify_syscall(int abi, unsigned syscall) | |||
58 | 49 | ||
59 | static int __init audit_classes_init(void) | 50 | static int __init audit_classes_init(void) |
60 | { | 51 | { |
61 | #ifdef CONFIG_IA32_SUPPORT | ||
62 | extern __u32 ia32_dir_class[]; | ||
63 | extern __u32 ia32_write_class[]; | ||
64 | extern __u32 ia32_read_class[]; | ||
65 | extern __u32 ia32_chattr_class[]; | ||
66 | extern __u32 ia32_signal_class[]; | ||
67 | audit_register_class(AUDIT_CLASS_WRITE_32, ia32_write_class); | ||
68 | audit_register_class(AUDIT_CLASS_READ_32, ia32_read_class); | ||
69 | audit_register_class(AUDIT_CLASS_DIR_WRITE_32, ia32_dir_class); | ||
70 | audit_register_class(AUDIT_CLASS_CHATTR_32, ia32_chattr_class); | ||
71 | audit_register_class(AUDIT_CLASS_SIGNAL_32, ia32_signal_class); | ||
72 | #endif | ||
73 | audit_register_class(AUDIT_CLASS_WRITE, write_class); | 52 | audit_register_class(AUDIT_CLASS_WRITE, write_class); |
74 | audit_register_class(AUDIT_CLASS_READ, read_class); | 53 | audit_register_class(AUDIT_CLASS_READ, read_class); |
75 | audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); | 54 | audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); |
diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c index 7b435451b3dc..b0b4e6e710f2 100644 --- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c +++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/slab.h> | ||
13 | #include <linux/module.h> | 14 | #include <linux/module.h> |
14 | #include <linux/init.h> | 15 | #include <linux/init.h> |
15 | #include <linux/cpufreq.h> | 16 | #include <linux/cpufreq.h> |
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index 6631a9dfafdc..b942f4032d7a 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c | |||
@@ -239,32 +239,29 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) | |||
239 | #ifdef CONFIG_SYSCTL | 239 | #ifdef CONFIG_SYSCTL |
240 | static ctl_table kdump_ctl_table[] = { | 240 | static ctl_table kdump_ctl_table[] = { |
241 | { | 241 | { |
242 | .ctl_name = CTL_UNNUMBERED, | ||
243 | .procname = "kdump_on_init", | 242 | .procname = "kdump_on_init", |
244 | .data = &kdump_on_init, | 243 | .data = &kdump_on_init, |
245 | .maxlen = sizeof(int), | 244 | .maxlen = sizeof(int), |
246 | .mode = 0644, | 245 | .mode = 0644, |
247 | .proc_handler = &proc_dointvec, | 246 | .proc_handler = proc_dointvec, |
248 | }, | 247 | }, |
249 | { | 248 | { |
250 | .ctl_name = CTL_UNNUMBERED, | ||
251 | .procname = "kdump_on_fatal_mca", | 249 | .procname = "kdump_on_fatal_mca", |
252 | .data = &kdump_on_fatal_mca, | 250 | .data = &kdump_on_fatal_mca, |
253 | .maxlen = sizeof(int), | 251 | .maxlen = sizeof(int), |
254 | .mode = 0644, | 252 | .mode = 0644, |
255 | .proc_handler = &proc_dointvec, | 253 | .proc_handler = proc_dointvec, |
256 | }, | 254 | }, |
257 | { .ctl_name = 0 } | 255 | { } |
258 | }; | 256 | }; |
259 | 257 | ||
260 | static ctl_table sys_table[] = { | 258 | static ctl_table sys_table[] = { |
261 | { | 259 | { |
262 | .ctl_name = CTL_KERN, | ||
263 | .procname = "kernel", | 260 | .procname = "kernel", |
264 | .mode = 0555, | 261 | .mode = 0555, |
265 | .child = kdump_ctl_table, | 262 | .child = kdump_ctl_table, |
266 | }, | 263 | }, |
267 | { .ctl_name = 0 } | 264 | { } |
268 | }; | 265 | }; |
269 | #endif | 266 | #endif |
270 | 267 | ||
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index c745d0aeb6e0..a0f001928502 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
28 | #include <linux/types.h> | 28 | #include <linux/types.h> |
29 | #include <linux/slab.h> | ||
29 | #include <linux/time.h> | 30 | #include <linux/time.h> |
30 | #include <linux/efi.h> | 31 | #include <linux/efi.h> |
31 | #include <linux/kexec.h> | 32 | #include <linux/kexec.h> |
diff --git a/arch/ia64/kernel/elfcore.c b/arch/ia64/kernel/elfcore.c new file mode 100644 index 000000000000..bac1639bc320 --- /dev/null +++ b/arch/ia64/kernel/elfcore.c | |||
@@ -0,0 +1,80 @@ | |||
1 | #include <linux/elf.h> | ||
2 | #include <linux/coredump.h> | ||
3 | #include <linux/fs.h> | ||
4 | #include <linux/mm.h> | ||
5 | |||
6 | #include <asm/elf.h> | ||
7 | |||
8 | |||
9 | Elf64_Half elf_core_extra_phdrs(void) | ||
10 | { | ||
11 | return GATE_EHDR->e_phnum; | ||
12 | } | ||
13 | |||
14 | int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size, | ||
15 | unsigned long limit) | ||
16 | { | ||
17 | const struct elf_phdr *const gate_phdrs = | ||
18 | (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); | ||
19 | int i; | ||
20 | Elf64_Off ofs = 0; | ||
21 | |||
22 | for (i = 0; i < GATE_EHDR->e_phnum; ++i) { | ||
23 | struct elf_phdr phdr = gate_phdrs[i]; | ||
24 | |||
25 | if (phdr.p_type == PT_LOAD) { | ||
26 | phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz); | ||
27 | phdr.p_filesz = phdr.p_memsz; | ||
28 | if (ofs == 0) { | ||
29 | ofs = phdr.p_offset = offset; | ||
30 | offset += phdr.p_filesz; | ||
31 | } else { | ||
32 | phdr.p_offset = ofs; | ||
33 | } | ||
34 | } else { | ||
35 | phdr.p_offset += ofs; | ||
36 | } | ||
37 | phdr.p_paddr = 0; /* match other core phdrs */ | ||
38 | *size += sizeof(phdr); | ||
39 | if (*size > limit || !dump_write(file, &phdr, sizeof(phdr))) | ||
40 | return 0; | ||
41 | } | ||
42 | return 1; | ||
43 | } | ||
44 | |||
45 | int elf_core_write_extra_data(struct file *file, size_t *size, | ||
46 | unsigned long limit) | ||
47 | { | ||
48 | const struct elf_phdr *const gate_phdrs = | ||
49 | (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); | ||
50 | int i; | ||
51 | |||
52 | for (i = 0; i < GATE_EHDR->e_phnum; ++i) { | ||
53 | if (gate_phdrs[i].p_type == PT_LOAD) { | ||
54 | void *addr = (void *)gate_phdrs[i].p_vaddr; | ||
55 | size_t memsz = PAGE_ALIGN(gate_phdrs[i].p_memsz); | ||
56 | |||
57 | *size += memsz; | ||
58 | if (*size > limit || !dump_write(file, addr, memsz)) | ||
59 | return 0; | ||
60 | break; | ||
61 | } | ||
62 | } | ||
63 | return 1; | ||
64 | } | ||
65 | |||
66 | size_t elf_core_extra_data_size(void) | ||
67 | { | ||
68 | const struct elf_phdr *const gate_phdrs = | ||
69 | (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); | ||
70 | int i; | ||
71 | size_t size = 0; | ||
72 | |||
73 | for (i = 0; i < GATE_EHDR->e_phnum; ++i) { | ||
74 | if (gate_phdrs[i].p_type == PT_LOAD) { | ||
75 | size += PAGE_ALIGN(gate_phdrs[i].p_memsz); | ||
76 | break; | ||
77 | } | ||
78 | } | ||
79 | return size; | ||
80 | } | ||
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index d0e7d37017b4..9a260b317d8d 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -71,15 +71,6 @@ ENTRY(ia64_execve) | |||
71 | add out3=16,sp // regs | 71 | add out3=16,sp // regs |
72 | br.call.sptk.many rp=sys_execve | 72 | br.call.sptk.many rp=sys_execve |
73 | .ret0: | 73 | .ret0: |
74 | #ifdef CONFIG_IA32_SUPPORT | ||
75 | /* | ||
76 | * Check if we're returning to ia32 mode. If so, we need to restore ia32 registers | ||
77 | * from pt_regs. | ||
78 | */ | ||
79 | adds r16=PT(CR_IPSR)+16,sp | ||
80 | ;; | ||
81 | ld8 r16=[r16] | ||
82 | #endif | ||
83 | cmp4.ge p6,p7=r8,r0 | 74 | cmp4.ge p6,p7=r8,r0 |
84 | mov ar.pfs=loc1 // restore ar.pfs | 75 | mov ar.pfs=loc1 // restore ar.pfs |
85 | sxt4 r8=r8 // return 64-bit result | 76 | sxt4 r8=r8 // return 64-bit result |
@@ -108,12 +99,6 @@ ENTRY(ia64_execve) | |||
108 | ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0 | 99 | ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0 |
109 | ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0 | 100 | ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0 |
110 | ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0 | 101 | ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0 |
111 | #ifdef CONFIG_IA32_SUPPORT | ||
112 | tbit.nz p6,p0=r16, IA64_PSR_IS_BIT | ||
113 | movl loc0=ia64_ret_from_ia32_execve | ||
114 | ;; | ||
115 | (p6) mov rp=loc0 | ||
116 | #endif | ||
117 | br.ret.sptk.many rp | 102 | br.ret.sptk.many rp |
118 | END(ia64_execve) | 103 | END(ia64_execve) |
119 | 104 | ||
@@ -848,30 +833,6 @@ __paravirt_work_processed_syscall: | |||
848 | br.cond.sptk.many rbs_switch // B | 833 | br.cond.sptk.many rbs_switch // B |
849 | END(__paravirt_leave_syscall) | 834 | END(__paravirt_leave_syscall) |
850 | 835 | ||
851 | #ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE | ||
852 | #ifdef CONFIG_IA32_SUPPORT | ||
853 | GLOBAL_ENTRY(ia64_ret_from_ia32_execve) | ||
854 | PT_REGS_UNWIND_INFO(0) | ||
855 | adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 | ||
856 | adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10 | ||
857 | ;; | ||
858 | .mem.offset 0,0 | ||
859 | st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit | ||
860 | .mem.offset 8,0 | ||
861 | st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit | ||
862 | #ifdef CONFIG_PARAVIRT | ||
863 | ;; | ||
864 | // don't fall through, ia64_leave_kernel may be #define'd | ||
865 | br.cond.sptk.few ia64_leave_kernel | ||
866 | ;; | ||
867 | #endif /* CONFIG_PARAVIRT */ | ||
868 | END(ia64_ret_from_ia32_execve) | ||
869 | #ifndef CONFIG_PARAVIRT | ||
870 | // fall through | ||
871 | #endif | ||
872 | #endif /* CONFIG_IA32_SUPPORT */ | ||
873 | #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ | ||
874 | |||
875 | GLOBAL_ENTRY(__paravirt_leave_kernel) | 836 | GLOBAL_ENTRY(__paravirt_leave_kernel) |
876 | PT_REGS_UNWIND_INFO(0) | 837 | PT_REGS_UNWIND_INFO(0) |
877 | /* | 838 | /* |
@@ -1806,6 +1767,7 @@ sys_call_table: | |||
1806 | data8 sys_preadv | 1767 | data8 sys_preadv |
1807 | data8 sys_pwritev // 1320 | 1768 | data8 sys_pwritev // 1320 |
1808 | data8 sys_rt_tgsigqueueinfo | 1769 | data8 sys_rt_tgsigqueueinfo |
1770 | data8 sys_recvmmsg | ||
1809 | 1771 | ||
1810 | .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls | 1772 | .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls |
1811 | #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ | 1773 | #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ |
diff --git a/arch/ia64/kernel/esi.c b/arch/ia64/kernel/esi.c index d5764a3d74af..b091111270cb 100644 --- a/arch/ia64/kernel/esi.c +++ b/arch/ia64/kernel/esi.c | |||
@@ -84,7 +84,7 @@ static int __init esi_init (void) | |||
84 | case ESI_DESC_ENTRY_POINT: | 84 | case ESI_DESC_ENTRY_POINT: |
85 | break; | 85 | break; |
86 | default: | 86 | default: |
87 | printk(KERN_WARNING "Unkown table type %d found in " | 87 | printk(KERN_WARNING "Unknown table type %d found in " |
88 | "ESI table, ignoring rest of table\n", *p); | 88 | "ESI table, ignoring rest of table\n", *p); |
89 | return -ENODEV; | 89 | return -ENODEV; |
90 | } | 90 | } |
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 696eff28a0c4..17a9fba38930 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S | |||
@@ -1051,7 +1051,7 @@ END(ia64_delay_loop) | |||
1051 | * intermediate precision so that we can produce a full 64-bit result. | 1051 | * intermediate precision so that we can produce a full 64-bit result. |
1052 | */ | 1052 | */ |
1053 | GLOBAL_ENTRY(ia64_native_sched_clock) | 1053 | GLOBAL_ENTRY(ia64_native_sched_clock) |
1054 | addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 | 1054 | addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 |
1055 | mov.m r9=ar.itc // fetch cycle-counter (35 cyc) | 1055 | mov.m r9=ar.itc // fetch cycle-counter (35 cyc) |
1056 | ;; | 1056 | ;; |
1057 | ldf8 f8=[r8] | 1057 | ldf8 f8=[r8] |
@@ -1077,7 +1077,7 @@ sched_clock = ia64_native_sched_clock | |||
1077 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 1077 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
1078 | GLOBAL_ENTRY(cycle_to_cputime) | 1078 | GLOBAL_ENTRY(cycle_to_cputime) |
1079 | alloc r16=ar.pfs,1,0,0,0 | 1079 | alloc r16=ar.pfs,1,0,0,0 |
1080 | addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 | 1080 | addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 |
1081 | ;; | 1081 | ;; |
1082 | ldf8 f8=[r8] | 1082 | ldf8 f8=[r8] |
1083 | ;; | 1083 | ;; |
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c index 14d39e300627..7f4a0ed24152 100644 --- a/arch/ia64/kernel/ia64_ksyms.c +++ b/arch/ia64/kernel/ia64_ksyms.c | |||
@@ -30,9 +30,9 @@ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic | |||
30 | #endif | 30 | #endif |
31 | 31 | ||
32 | #include <asm/processor.h> | 32 | #include <asm/processor.h> |
33 | EXPORT_SYMBOL(per_cpu__cpu_info); | 33 | EXPORT_SYMBOL(ia64_cpu_info); |
34 | #ifdef CONFIG_SMP | 34 | #ifdef CONFIG_SMP |
35 | EXPORT_SYMBOL(per_cpu__local_per_cpu_offset); | 35 | EXPORT_SYMBOL(local_per_cpu_offset); |
36 | #endif | 36 | #endif |
37 | 37 | ||
38 | #include <asm/uaccess.h> | 38 | #include <asm/uaccess.h> |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index dab4d393908c..7ded76658d2d 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -86,6 +86,7 @@ | |||
86 | #include <linux/kernel.h> | 86 | #include <linux/kernel.h> |
87 | #include <linux/list.h> | 87 | #include <linux/list.h> |
88 | #include <linux/pci.h> | 88 | #include <linux/pci.h> |
89 | #include <linux/slab.h> | ||
89 | #include <linux/smp.h> | 90 | #include <linux/smp.h> |
90 | #include <linux/string.h> | 91 | #include <linux/string.h> |
91 | #include <linux/bootmem.h> | 92 | #include <linux/bootmem.h> |
@@ -793,12 +794,12 @@ iosapic_register_intr (unsigned int gsi, | |||
793 | goto unlock_iosapic_lock; | 794 | goto unlock_iosapic_lock; |
794 | } | 795 | } |
795 | 796 | ||
796 | spin_lock(&irq_desc[irq].lock); | 797 | raw_spin_lock(&irq_desc[irq].lock); |
797 | dest = get_target_cpu(gsi, irq); | 798 | dest = get_target_cpu(gsi, irq); |
798 | dmode = choose_dmode(); | 799 | dmode = choose_dmode(); |
799 | err = register_intr(gsi, irq, dmode, polarity, trigger); | 800 | err = register_intr(gsi, irq, dmode, polarity, trigger); |
800 | if (err < 0) { | 801 | if (err < 0) { |
801 | spin_unlock(&irq_desc[irq].lock); | 802 | raw_spin_unlock(&irq_desc[irq].lock); |
802 | irq = err; | 803 | irq = err; |
803 | goto unlock_iosapic_lock; | 804 | goto unlock_iosapic_lock; |
804 | } | 805 | } |
@@ -817,7 +818,7 @@ iosapic_register_intr (unsigned int gsi, | |||
817 | (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), | 818 | (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), |
818 | cpu_logical_id(dest), dest, irq_to_vector(irq)); | 819 | cpu_logical_id(dest), dest, irq_to_vector(irq)); |
819 | 820 | ||
820 | spin_unlock(&irq_desc[irq].lock); | 821 | raw_spin_unlock(&irq_desc[irq].lock); |
821 | unlock_iosapic_lock: | 822 | unlock_iosapic_lock: |
822 | spin_unlock_irqrestore(&iosapic_lock, flags); | 823 | spin_unlock_irqrestore(&iosapic_lock, flags); |
823 | return irq; | 824 | return irq; |
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 7d8951229e7c..94ee9d067cbd 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c | |||
@@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
71 | } | 71 | } |
72 | 72 | ||
73 | if (i < NR_IRQS) { | 73 | if (i < NR_IRQS) { |
74 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 74 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
75 | action = irq_desc[i].action; | 75 | action = irq_desc[i].action; |
76 | if (!action) | 76 | if (!action) |
77 | goto skip; | 77 | goto skip; |
@@ -91,7 +91,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
91 | 91 | ||
92 | seq_putc(p, '\n'); | 92 | seq_putc(p, '\n'); |
93 | skip: | 93 | skip: |
94 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 94 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
95 | } else if (i == NR_IRQS) | 95 | } else if (i == NR_IRQS) |
96 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | 96 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); |
97 | return 0; | 97 | return 0; |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index dd9d7b54f1a1..640479304ac0 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
23 | #include <linux/ioport.h> | 23 | #include <linux/ioport.h> |
24 | #include <linux/kernel_stat.h> | 24 | #include <linux/kernel_stat.h> |
25 | #include <linux/slab.h> | ||
26 | #include <linux/ptrace.h> | 25 | #include <linux/ptrace.h> |
27 | #include <linux/random.h> /* for rand_initialize_irq() */ | 26 | #include <linux/random.h> /* for rand_initialize_irq() */ |
28 | #include <linux/signal.h> | 27 | #include <linux/signal.h> |
@@ -260,7 +259,6 @@ void __setup_vector_irq(int cpu) | |||
260 | } | 259 | } |
261 | 260 | ||
262 | #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) | 261 | #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) |
263 | #define IA64_IRQ_MOVE_VECTOR IA64_DEF_FIRST_DEVICE_VECTOR | ||
264 | 262 | ||
265 | static enum vector_domain_type { | 263 | static enum vector_domain_type { |
266 | VECTOR_DOMAIN_NONE, | 264 | VECTOR_DOMAIN_NONE, |
@@ -345,7 +343,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) | |||
345 | 343 | ||
346 | desc = irq_desc + irq; | 344 | desc = irq_desc + irq; |
347 | cfg = irq_cfg + irq; | 345 | cfg = irq_cfg + irq; |
348 | spin_lock(&desc->lock); | 346 | raw_spin_lock(&desc->lock); |
349 | if (!cfg->move_cleanup_count) | 347 | if (!cfg->move_cleanup_count) |
350 | goto unlock; | 348 | goto unlock; |
351 | 349 | ||
@@ -358,7 +356,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) | |||
358 | spin_unlock_irqrestore(&vector_lock, flags); | 356 | spin_unlock_irqrestore(&vector_lock, flags); |
359 | cfg->move_cleanup_count--; | 357 | cfg->move_cleanup_count--; |
360 | unlock: | 358 | unlock: |
361 | spin_unlock(&desc->lock); | 359 | raw_spin_unlock(&desc->lock); |
362 | } | 360 | } |
363 | return IRQ_HANDLED; | 361 | return IRQ_HANDLED; |
364 | } | 362 | } |
@@ -659,11 +657,8 @@ init_IRQ (void) | |||
659 | register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); | 657 | register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); |
660 | #ifdef CONFIG_SMP | 658 | #ifdef CONFIG_SMP |
661 | #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG) | 659 | #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG) |
662 | if (vector_domain_type != VECTOR_DOMAIN_NONE) { | 660 | if (vector_domain_type != VECTOR_DOMAIN_NONE) |
663 | BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR); | ||
664 | IA64_FIRST_DEVICE_VECTOR++; | ||
665 | register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction); | 661 | register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction); |
666 | } | ||
667 | #endif | 662 | #endif |
668 | #endif | 663 | #endif |
669 | #ifdef CONFIG_PERFMON | 664 | #ifdef CONFIG_PERFMON |
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index ec9a5fdfa1b9..179fd122e837 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S | |||
@@ -49,7 +49,6 @@ | |||
49 | 49 | ||
50 | #include <asm/asmmacro.h> | 50 | #include <asm/asmmacro.h> |
51 | #include <asm/break.h> | 51 | #include <asm/break.h> |
52 | #include <asm/ia32.h> | ||
53 | #include <asm/kregs.h> | 52 | #include <asm/kregs.h> |
54 | #include <asm/asm-offsets.h> | 53 | #include <asm/asm-offsets.h> |
55 | #include <asm/pgtable.h> | 54 | #include <asm/pgtable.h> |
@@ -1386,28 +1385,6 @@ END(ia32_exception) | |||
1386 | // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) | 1385 | // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) |
1387 | ENTRY(ia32_intercept) | 1386 | ENTRY(ia32_intercept) |
1388 | DBG_FAULT(46) | 1387 | DBG_FAULT(46) |
1389 | #ifdef CONFIG_IA32_SUPPORT | ||
1390 | mov r31=pr | ||
1391 | MOV_FROM_ISR(r16) | ||
1392 | ;; | ||
1393 | extr.u r17=r16,16,8 // get ISR.code | ||
1394 | mov r18=ar.eflag | ||
1395 | MOV_FROM_IIM(r19) // old eflag value | ||
1396 | ;; | ||
1397 | cmp.ne p6,p0=2,r17 | ||
1398 | (p6) br.cond.spnt 1f // not a system flag fault | ||
1399 | xor r16=r18,r19 | ||
1400 | ;; | ||
1401 | extr.u r17=r16,18,1 // get the eflags.ac bit | ||
1402 | ;; | ||
1403 | cmp.eq p6,p0=0,r17 | ||
1404 | (p6) br.cond.spnt 1f // eflags.ac bit didn't change | ||
1405 | ;; | ||
1406 | mov pr=r31,-1 // restore predicate registers | ||
1407 | RFI | ||
1408 | |||
1409 | 1: | ||
1410 | #endif // CONFIG_IA32_SUPPORT | ||
1411 | FAULT(46) | 1388 | FAULT(46) |
1412 | END(ia32_intercept) | 1389 | END(ia32_intercept) |
1413 | 1390 | ||
@@ -1416,12 +1393,7 @@ END(ia32_intercept) | |||
1416 | // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74) | 1393 | // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74) |
1417 | ENTRY(ia32_interrupt) | 1394 | ENTRY(ia32_interrupt) |
1418 | DBG_FAULT(47) | 1395 | DBG_FAULT(47) |
1419 | #ifdef CONFIG_IA32_SUPPORT | ||
1420 | mov r31=pr | ||
1421 | br.sptk.many dispatch_to_ia32_handler | ||
1422 | #else | ||
1423 | FAULT(47) | 1396 | FAULT(47) |
1424 | #endif | ||
1425 | END(ia32_interrupt) | 1397 | END(ia32_interrupt) |
1426 | 1398 | ||
1427 | .org ia64_ivt+0x6c00 | 1399 | .org ia64_ivt+0x6c00 |
@@ -1715,89 +1687,3 @@ ENTRY(dispatch_illegal_op_fault) | |||
1715 | (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel | 1687 | (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel |
1716 | br.sptk.many ia64_leave_kernel | 1688 | br.sptk.many ia64_leave_kernel |
1717 | END(dispatch_illegal_op_fault) | 1689 | END(dispatch_illegal_op_fault) |
1718 | |||
1719 | #ifdef CONFIG_IA32_SUPPORT | ||
1720 | |||
1721 | /* | ||
1722 | * There is no particular reason for this code to be here, other than that | ||
1723 | * there happens to be space here that would go unused otherwise. If this | ||
1724 | * fault ever gets "unreserved", simply moved the following code to a more | ||
1725 | * suitable spot... | ||
1726 | */ | ||
1727 | |||
1728 | // IA32 interrupt entry point | ||
1729 | |||
1730 | ENTRY(dispatch_to_ia32_handler) | ||
1731 | SAVE_MIN | ||
1732 | ;; | ||
1733 | MOV_FROM_ISR(r14) | ||
1734 | SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24) | ||
1735 | // guarantee that interruption collection is on | ||
1736 | ;; | ||
1737 | SSM_PSR_I(p15, p15, r3) | ||
1738 | adds r3=8,r2 // Base pointer for SAVE_REST | ||
1739 | ;; | ||
1740 | SAVE_REST | ||
1741 | ;; | ||
1742 | mov r15=0x80 | ||
1743 | shr r14=r14,16 // Get interrupt number | ||
1744 | ;; | ||
1745 | cmp.ne p6,p0=r14,r15 | ||
1746 | (p6) br.call.dpnt.many b6=non_ia32_syscall | ||
1747 | |||
1748 | adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions | ||
1749 | adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp | ||
1750 | ;; | ||
1751 | cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0 | ||
1752 | ld8 r8=[r14] // get r8 | ||
1753 | ;; | ||
1754 | st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP) | ||
1755 | ;; | ||
1756 | alloc r15=ar.pfs,0,0,6,0 // must first in an insn group | ||
1757 | ;; | ||
1758 | ld4 r8=[r14],8 // r8 == eax (syscall number) | ||
1759 | mov r15=IA32_NR_syscalls | ||
1760 | ;; | ||
1761 | cmp.ltu.unc p6,p7=r8,r15 | ||
1762 | ld4 out1=[r14],8 // r9 == ecx | ||
1763 | ;; | ||
1764 | ld4 out2=[r14],8 // r10 == edx | ||
1765 | ;; | ||
1766 | ld4 out0=[r14] // r11 == ebx | ||
1767 | adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp | ||
1768 | ;; | ||
1769 | ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp | ||
1770 | ;; | ||
1771 | ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi | ||
1772 | adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 | ||
1773 | ;; | ||
1774 | ld4 out4=[r14] // r15 == edi | ||
1775 | movl r16=ia32_syscall_table | ||
1776 | ;; | ||
1777 | (p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number | ||
1778 | ld4 r2=[r2] // r2 = current_thread_info()->flags | ||
1779 | ;; | ||
1780 | ld8 r16=[r16] | ||
1781 | and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit | ||
1782 | ;; | ||
1783 | mov b6=r16 | ||
1784 | movl r15=ia32_ret_from_syscall | ||
1785 | cmp.eq p8,p0=r2,r0 | ||
1786 | ;; | ||
1787 | mov rp=r15 | ||
1788 | (p8) br.call.sptk.many b6=b6 | ||
1789 | br.cond.sptk ia32_trace_syscall | ||
1790 | |||
1791 | non_ia32_syscall: | ||
1792 | alloc r15=ar.pfs,0,0,2,0 | ||
1793 | mov out0=r14 // interrupt # | ||
1794 | add out1=16,sp // pointer to pt_regs | ||
1795 | ;; // avoid WAW on CFM | ||
1796 | br.call.sptk.many rp=ia32_bad_interrupt | ||
1797 | .ret1: movl r15=ia64_leave_kernel | ||
1798 | ;; | ||
1799 | mov rp=r15 | ||
1800 | br.ret.sptk.many rp | ||
1801 | END(dispatch_to_ia32_handler) | ||
1802 | |||
1803 | #endif /* CONFIG_IA32_SUPPORT */ | ||
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 9adac441ac9b..7026b29e277a 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c | |||
@@ -870,7 +870,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) | |||
870 | return 1; | 870 | return 1; |
871 | 871 | ||
872 | ss_probe: | 872 | ss_probe: |
873 | #if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER) | 873 | #if !defined(CONFIG_PREEMPT) |
874 | if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) { | 874 | if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) { |
875 | /* Boost up -- we can execute copied instructions directly */ | 875 | /* Boost up -- we can execute copied instructions directly */ |
876 | ia64_psr(regs)->ri = p->ainsn.slot; | 876 | ia64_psr(regs)->ri = p->ainsn.slot; |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 496ac7a99488..a0220dc5ff42 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -85,6 +85,7 @@ | |||
85 | #include <linux/cpumask.h> | 85 | #include <linux/cpumask.h> |
86 | #include <linux/kdebug.h> | 86 | #include <linux/kdebug.h> |
87 | #include <linux/cpu.h> | 87 | #include <linux/cpu.h> |
88 | #include <linux/gfp.h> | ||
88 | 89 | ||
89 | #include <asm/delay.h> | 90 | #include <asm/delay.h> |
90 | #include <asm/machvec.h> | 91 | #include <asm/machvec.h> |
@@ -888,9 +889,10 @@ ia64_mca_modify_comm(const struct task_struct *previous_current) | |||
888 | } | 889 | } |
889 | 890 | ||
890 | static void | 891 | static void |
891 | finish_pt_regs(struct pt_regs *regs, const pal_min_state_area_t *ms, | 892 | finish_pt_regs(struct pt_regs *regs, struct ia64_sal_os_state *sos, |
892 | unsigned long *nat) | 893 | unsigned long *nat) |
893 | { | 894 | { |
895 | const pal_min_state_area_t *ms = sos->pal_min_state; | ||
894 | const u64 *bank; | 896 | const u64 *bank; |
895 | 897 | ||
896 | /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use | 898 | /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use |
@@ -904,6 +906,10 @@ finish_pt_regs(struct pt_regs *regs, const pal_min_state_area_t *ms, | |||
904 | regs->cr_iip = ms->pmsa_xip; | 906 | regs->cr_iip = ms->pmsa_xip; |
905 | regs->cr_ipsr = ms->pmsa_xpsr; | 907 | regs->cr_ipsr = ms->pmsa_xpsr; |
906 | regs->cr_ifs = ms->pmsa_xfs; | 908 | regs->cr_ifs = ms->pmsa_xfs; |
909 | |||
910 | sos->iip = ms->pmsa_iip; | ||
911 | sos->ipsr = ms->pmsa_ipsr; | ||
912 | sos->ifs = ms->pmsa_ifs; | ||
907 | } | 913 | } |
908 | regs->pr = ms->pmsa_pr; | 914 | regs->pr = ms->pmsa_pr; |
909 | regs->b0 = ms->pmsa_br0; | 915 | regs->b0 = ms->pmsa_br0; |
@@ -1079,7 +1085,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, | |||
1079 | memcpy(old_regs, regs, sizeof(*regs)); | 1085 | memcpy(old_regs, regs, sizeof(*regs)); |
1080 | old_regs->loadrs = loadrs; | 1086 | old_regs->loadrs = loadrs; |
1081 | old_unat = old_regs->ar_unat; | 1087 | old_unat = old_regs->ar_unat; |
1082 | finish_pt_regs(old_regs, ms, &old_unat); | 1088 | finish_pt_regs(old_regs, sos, &old_unat); |
1083 | 1089 | ||
1084 | /* Next stack a struct switch_stack. mca_asm.S built a partial | 1090 | /* Next stack a struct switch_stack. mca_asm.S built a partial |
1085 | * switch_stack, copy it and fill in the blanks using pt_regs and | 1091 | * switch_stack, copy it and fill in the blanks using pt_regs and |
@@ -1150,7 +1156,7 @@ no_mod: | |||
1150 | mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", | 1156 | mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", |
1151 | smp_processor_id(), type, msg); | 1157 | smp_processor_id(), type, msg); |
1152 | old_unat = regs->ar_unat; | 1158 | old_unat = regs->ar_unat; |
1153 | finish_pt_regs(regs, ms, &old_unat); | 1159 | finish_pt_regs(regs, sos, &old_unat); |
1154 | return previous_current; | 1160 | return previous_current; |
1155 | } | 1161 | } |
1156 | 1162 | ||
@@ -1220,9 +1226,12 @@ static void mca_insert_tr(u64 iord) | |||
1220 | unsigned long psr; | 1226 | unsigned long psr; |
1221 | int cpu = smp_processor_id(); | 1227 | int cpu = smp_processor_id(); |
1222 | 1228 | ||
1229 | if (!ia64_idtrs[cpu]) | ||
1230 | return; | ||
1231 | |||
1223 | psr = ia64_clear_ic(); | 1232 | psr = ia64_clear_ic(); |
1224 | for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) { | 1233 | for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) { |
1225 | p = &__per_cpu_idtrs[cpu][iord-1][i]; | 1234 | p = ia64_idtrs[cpu] + (iord - 1) * IA64_TR_ALLOC_MAX; |
1226 | if (p->pte & 0x1) { | 1235 | if (p->pte & 0x1) { |
1227 | old_rr = ia64_get_rr(p->ifa); | 1236 | old_rr = ia64_get_rr(p->ifa); |
1228 | if (old_rr != p->rr) { | 1237 | if (old_rr != p->rr) { |
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index 7461d2573d41..d5bdf9de36b6 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S | |||
@@ -59,7 +59,7 @@ | |||
59 | ia64_do_tlb_purge: | 59 | ia64_do_tlb_purge: |
60 | #define O(member) IA64_CPUINFO_##member##_OFFSET | 60 | #define O(member) IA64_CPUINFO_##member##_OFFSET |
61 | 61 | ||
62 | GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 | 62 | GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2 |
63 | ;; | 63 | ;; |
64 | addl r17=O(PTCE_STRIDE),r2 | 64 | addl r17=O(PTCE_STRIDE),r2 |
65 | addl r2=O(PTCE_BASE),r2 | 65 | addl r2=O(PTCE_BASE),r2 |
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index f94aaa86933f..09b4d6828c45 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/smp.h> | 22 | #include <linux/smp.h> |
23 | #include <linux/workqueue.h> | 23 | #include <linux/workqueue.h> |
24 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
25 | #include <linux/slab.h> | ||
25 | 26 | ||
26 | #include <asm/delay.h> | 27 | #include <asm/delay.h> |
27 | #include <asm/machvec.h> | 28 | #include <asm/machvec.h> |
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c index 285aae8431c6..3095654f9ab3 100644 --- a/arch/ia64/kernel/pci-swiotlb.c +++ b/arch/ia64/kernel/pci-swiotlb.c | |||
@@ -1,6 +1,7 @@ | |||
1 | /* Glue code to lib/swiotlb.c */ | 1 | /* Glue code to lib/swiotlb.c */ |
2 | 2 | ||
3 | #include <linux/pci.h> | 3 | #include <linux/pci.h> |
4 | #include <linux/gfp.h> | ||
4 | #include <linux/cache.h> | 5 | #include <linux/cache.h> |
5 | #include <linux/module.h> | 6 | #include <linux/module.h> |
6 | #include <linux/dma-mapping.h> | 7 | #include <linux/dma-mapping.h> |
@@ -41,7 +42,7 @@ struct dma_map_ops swiotlb_dma_ops = { | |||
41 | void __init swiotlb_dma_init(void) | 42 | void __init swiotlb_dma_init(void) |
42 | { | 43 | { |
43 | dma_ops = &swiotlb_dma_ops; | 44 | dma_ops = &swiotlb_dma_ops; |
44 | swiotlb_init(); | 45 | swiotlb_init(1); |
45 | } | 46 | } |
46 | 47 | ||
47 | void __init pci_swiotlb_init(void) | 48 | void __init pci_swiotlb_init(void) |
@@ -51,7 +52,7 @@ void __init pci_swiotlb_init(void) | |||
51 | swiotlb = 1; | 52 | swiotlb = 1; |
52 | printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n"); | 53 | printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n"); |
53 | machvec_init("dig"); | 54 | machvec_init("dig"); |
54 | swiotlb_init(); | 55 | swiotlb_init(1); |
55 | dma_ops = &swiotlb_dma_ops; | 56 | dma_ops = &swiotlb_dma_ops; |
56 | #else | 57 | #else |
57 | panic("Unable to find Intel IOMMU"); | 58 | panic("Unable to find Intel IOMMU"); |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index f1782705b1f7..ab985f785c14 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/rcupdate.h> | 41 | #include <linux/rcupdate.h> |
42 | #include <linux/completion.h> | 42 | #include <linux/completion.h> |
43 | #include <linux/tracehook.h> | 43 | #include <linux/tracehook.h> |
44 | #include <linux/slab.h> | ||
44 | 45 | ||
45 | #include <asm/errno.h> | 46 | #include <asm/errno.h> |
46 | #include <asm/intrinsics.h> | 47 | #include <asm/intrinsics.h> |
@@ -522,42 +523,37 @@ EXPORT_SYMBOL(pfm_sysctl); | |||
522 | 523 | ||
523 | static ctl_table pfm_ctl_table[]={ | 524 | static ctl_table pfm_ctl_table[]={ |
524 | { | 525 | { |
525 | .ctl_name = CTL_UNNUMBERED, | ||
526 | .procname = "debug", | 526 | .procname = "debug", |
527 | .data = &pfm_sysctl.debug, | 527 | .data = &pfm_sysctl.debug, |
528 | .maxlen = sizeof(int), | 528 | .maxlen = sizeof(int), |
529 | .mode = 0666, | 529 | .mode = 0666, |
530 | .proc_handler = &proc_dointvec, | 530 | .proc_handler = proc_dointvec, |
531 | }, | 531 | }, |
532 | { | 532 | { |
533 | .ctl_name = CTL_UNNUMBERED, | ||
534 | .procname = "debug_ovfl", | 533 | .procname = "debug_ovfl", |
535 | .data = &pfm_sysctl.debug_ovfl, | 534 | .data = &pfm_sysctl.debug_ovfl, |
536 | .maxlen = sizeof(int), | 535 | .maxlen = sizeof(int), |
537 | .mode = 0666, | 536 | .mode = 0666, |
538 | .proc_handler = &proc_dointvec, | 537 | .proc_handler = proc_dointvec, |
539 | }, | 538 | }, |
540 | { | 539 | { |
541 | .ctl_name = CTL_UNNUMBERED, | ||
542 | .procname = "fastctxsw", | 540 | .procname = "fastctxsw", |
543 | .data = &pfm_sysctl.fastctxsw, | 541 | .data = &pfm_sysctl.fastctxsw, |
544 | .maxlen = sizeof(int), | 542 | .maxlen = sizeof(int), |
545 | .mode = 0600, | 543 | .mode = 0600, |
546 | .proc_handler = &proc_dointvec, | 544 | .proc_handler = proc_dointvec, |
547 | }, | 545 | }, |
548 | { | 546 | { |
549 | .ctl_name = CTL_UNNUMBERED, | ||
550 | .procname = "expert_mode", | 547 | .procname = "expert_mode", |
551 | .data = &pfm_sysctl.expert_mode, | 548 | .data = &pfm_sysctl.expert_mode, |
552 | .maxlen = sizeof(int), | 549 | .maxlen = sizeof(int), |
553 | .mode = 0600, | 550 | .mode = 0600, |
554 | .proc_handler = &proc_dointvec, | 551 | .proc_handler = proc_dointvec, |
555 | }, | 552 | }, |
556 | {} | 553 | {} |
557 | }; | 554 | }; |
558 | static ctl_table pfm_sysctl_dir[] = { | 555 | static ctl_table pfm_sysctl_dir[] = { |
559 | { | 556 | { |
560 | .ctl_name = CTL_UNNUMBERED, | ||
561 | .procname = "perfmon", | 557 | .procname = "perfmon", |
562 | .mode = 0555, | 558 | .mode = 0555, |
563 | .child = pfm_ctl_table, | 559 | .child = pfm_ctl_table, |
@@ -566,7 +562,6 @@ static ctl_table pfm_sysctl_dir[] = { | |||
566 | }; | 562 | }; |
567 | static ctl_table pfm_sysctl_root[] = { | 563 | static ctl_table pfm_sysctl_root[] = { |
568 | { | 564 | { |
569 | .ctl_name = CTL_KERN, | ||
570 | .procname = "kernel", | 565 | .procname = "kernel", |
571 | .mode = 0555, | 566 | .mode = 0555, |
572 | .child = pfm_sysctl_dir, | 567 | .child = pfm_sysctl_dir, |
@@ -2206,7 +2201,7 @@ pfm_alloc_file(pfm_context_t *ctx) | |||
2206 | { | 2201 | { |
2207 | struct file *file; | 2202 | struct file *file; |
2208 | struct inode *inode; | 2203 | struct inode *inode; |
2209 | struct dentry *dentry; | 2204 | struct path path; |
2210 | char name[32]; | 2205 | char name[32]; |
2211 | struct qstr this; | 2206 | struct qstr this; |
2212 | 2207 | ||
@@ -2231,18 +2226,19 @@ pfm_alloc_file(pfm_context_t *ctx) | |||
2231 | /* | 2226 | /* |
2232 | * allocate a new dcache entry | 2227 | * allocate a new dcache entry |
2233 | */ | 2228 | */ |
2234 | dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this); | 2229 | path.dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this); |
2235 | if (!dentry) { | 2230 | if (!path.dentry) { |
2236 | iput(inode); | 2231 | iput(inode); |
2237 | return ERR_PTR(-ENOMEM); | 2232 | return ERR_PTR(-ENOMEM); |
2238 | } | 2233 | } |
2234 | path.mnt = mntget(pfmfs_mnt); | ||
2239 | 2235 | ||
2240 | dentry->d_op = &pfmfs_dentry_operations; | 2236 | path.dentry->d_op = &pfmfs_dentry_operations; |
2241 | d_add(dentry, inode); | 2237 | d_add(path.dentry, inode); |
2242 | 2238 | ||
2243 | file = alloc_file(pfmfs_mnt, dentry, FMODE_READ, &pfm_file_ops); | 2239 | file = alloc_file(&path, FMODE_READ, &pfm_file_ops); |
2244 | if (!file) { | 2240 | if (!file) { |
2245 | dput(dentry); | 2241 | path_put(&path); |
2246 | return ERR_PTR(-ENFILE); | 2242 | return ERR_PTR(-ENFILE); |
2247 | } | 2243 | } |
2248 | 2244 | ||
@@ -2298,7 +2294,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t | |||
2298 | * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur) | 2294 | * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur) |
2299 | * return -ENOMEM; | 2295 | * return -ENOMEM; |
2300 | */ | 2296 | */ |
2301 | if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur) | 2297 | if (size > task_rlimit(task, RLIMIT_MEMLOCK)) |
2302 | return -ENOMEM; | 2298 | return -ENOMEM; |
2303 | 2299 | ||
2304 | /* | 2300 | /* |
@@ -2320,6 +2316,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t | |||
2320 | DPRINT(("Cannot allocate vma\n")); | 2316 | DPRINT(("Cannot allocate vma\n")); |
2321 | goto error_kmem; | 2317 | goto error_kmem; |
2322 | } | 2318 | } |
2319 | INIT_LIST_HEAD(&vma->anon_vma_chain); | ||
2323 | 2320 | ||
2324 | /* | 2321 | /* |
2325 | * partially initialize the vma for the sampling buffer | 2322 | * partially initialize the vma for the sampling buffer |
@@ -2718,7 +2715,7 @@ pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg | |||
2718 | goto buffer_error; | 2715 | goto buffer_error; |
2719 | } | 2716 | } |
2720 | 2717 | ||
2721 | DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d \n", | 2718 | DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d\n", |
2722 | ctx, | 2719 | ctx, |
2723 | ctx_flags, | 2720 | ctx_flags, |
2724 | ctx->ctx_fl_system, | 2721 | ctx->ctx_fl_system, |
@@ -3523,7 +3520,7 @@ pfm_use_debug_registers(struct task_struct *task) | |||
3523 | * IA64_THREAD_DBG_VALID set. This indicates a task which was | 3520 | * IA64_THREAD_DBG_VALID set. This indicates a task which was |
3524 | * able to use the debug registers for debugging purposes via | 3521 | * able to use the debug registers for debugging purposes via |
3525 | * ptrace(). Therefore we know it was not using them for | 3522 | * ptrace(). Therefore we know it was not using them for |
3526 | * perfmormance monitoring, so we only decrement the number | 3523 | * performance monitoring, so we only decrement the number |
3527 | * of "ptraced" debug register users to keep the count up to date | 3524 | * of "ptraced" debug register users to keep the count up to date |
3528 | */ | 3525 | */ |
3529 | int | 3526 | int |
@@ -3682,7 +3679,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |||
3682 | * "self-monitoring". | 3679 | * "self-monitoring". |
3683 | */ | 3680 | */ |
3684 | if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) { | 3681 | if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) { |
3685 | DPRINT(("unblocking [%d] \n", task_pid_nr(task))); | 3682 | DPRINT(("unblocking [%d]\n", task_pid_nr(task))); |
3686 | complete(&ctx->ctx_restart_done); | 3683 | complete(&ctx->ctx_restart_done); |
3687 | } else { | 3684 | } else { |
3688 | DPRINT(("[%d] armed exit trap\n", task_pid_nr(task))); | 3685 | DPRINT(("[%d] armed exit trap\n", task_pid_nr(task))); |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 9bcec9945c12..53f1648c8b81 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -15,11 +15,11 @@ | |||
15 | #include <linux/kallsyms.h> | 15 | #include <linux/kallsyms.h> |
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <linux/slab.h> | ||
18 | #include <linux/module.h> | 19 | #include <linux/module.h> |
19 | #include <linux/notifier.h> | 20 | #include <linux/notifier.h> |
20 | #include <linux/personality.h> | 21 | #include <linux/personality.h> |
21 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
22 | #include <linux/slab.h> | ||
23 | #include <linux/stddef.h> | 23 | #include <linux/stddef.h> |
24 | #include <linux/thread_info.h> | 24 | #include <linux/thread_info.h> |
25 | #include <linux/unistd.h> | 25 | #include <linux/unistd.h> |
@@ -33,7 +33,6 @@ | |||
33 | #include <asm/cpu.h> | 33 | #include <asm/cpu.h> |
34 | #include <asm/delay.h> | 34 | #include <asm/delay.h> |
35 | #include <asm/elf.h> | 35 | #include <asm/elf.h> |
36 | #include <asm/ia32.h> | ||
37 | #include <asm/irq.h> | 36 | #include <asm/irq.h> |
38 | #include <asm/kexec.h> | 37 | #include <asm/kexec.h> |
39 | #include <asm/pgalloc.h> | 38 | #include <asm/pgalloc.h> |
@@ -60,6 +59,10 @@ unsigned long idle_halt; | |||
60 | EXPORT_SYMBOL(idle_halt); | 59 | EXPORT_SYMBOL(idle_halt); |
61 | unsigned long idle_nomwait; | 60 | unsigned long idle_nomwait; |
62 | EXPORT_SYMBOL(idle_nomwait); | 61 | EXPORT_SYMBOL(idle_nomwait); |
62 | void (*pm_idle) (void); | ||
63 | EXPORT_SYMBOL(pm_idle); | ||
64 | void (*pm_power_off) (void); | ||
65 | EXPORT_SYMBOL(pm_power_off); | ||
63 | 66 | ||
64 | void | 67 | void |
65 | ia64_do_show_stack (struct unw_frame_info *info, void *arg) | 68 | ia64_do_show_stack (struct unw_frame_info *info, void *arg) |
@@ -358,11 +361,6 @@ ia64_save_extra (struct task_struct *task) | |||
358 | if (info & PFM_CPUINFO_SYST_WIDE) | 361 | if (info & PFM_CPUINFO_SYST_WIDE) |
359 | pfm_syst_wide_update_task(task, info, 0); | 362 | pfm_syst_wide_update_task(task, info, 0); |
360 | #endif | 363 | #endif |
361 | |||
362 | #ifdef CONFIG_IA32_SUPPORT | ||
363 | if (IS_IA32_PROCESS(task_pt_regs(task))) | ||
364 | ia32_save_state(task); | ||
365 | #endif | ||
366 | } | 364 | } |
367 | 365 | ||
368 | void | 366 | void |
@@ -383,11 +381,6 @@ ia64_load_extra (struct task_struct *task) | |||
383 | if (info & PFM_CPUINFO_SYST_WIDE) | 381 | if (info & PFM_CPUINFO_SYST_WIDE) |
384 | pfm_syst_wide_update_task(task, info, 1); | 382 | pfm_syst_wide_update_task(task, info, 1); |
385 | #endif | 383 | #endif |
386 | |||
387 | #ifdef CONFIG_IA32_SUPPORT | ||
388 | if (IS_IA32_PROCESS(task_pt_regs(task))) | ||
389 | ia32_load_state(task); | ||
390 | #endif | ||
391 | } | 384 | } |
392 | 385 | ||
393 | /* | 386 | /* |
@@ -426,7 +419,7 @@ copy_thread(unsigned long clone_flags, | |||
426 | unsigned long user_stack_base, unsigned long user_stack_size, | 419 | unsigned long user_stack_base, unsigned long user_stack_size, |
427 | struct task_struct *p, struct pt_regs *regs) | 420 | struct task_struct *p, struct pt_regs *regs) |
428 | { | 421 | { |
429 | extern char ia64_ret_from_clone, ia32_ret_from_clone; | 422 | extern char ia64_ret_from_clone; |
430 | struct switch_stack *child_stack, *stack; | 423 | struct switch_stack *child_stack, *stack; |
431 | unsigned long rbs, child_rbs, rbs_size; | 424 | unsigned long rbs, child_rbs, rbs_size; |
432 | struct pt_regs *child_ptregs; | 425 | struct pt_regs *child_ptregs; |
@@ -457,7 +450,7 @@ copy_thread(unsigned long clone_flags, | |||
457 | memcpy((void *) child_rbs, (void *) rbs, rbs_size); | 450 | memcpy((void *) child_rbs, (void *) rbs, rbs_size); |
458 | 451 | ||
459 | if (likely(user_mode(child_ptregs))) { | 452 | if (likely(user_mode(child_ptregs))) { |
460 | if ((clone_flags & CLONE_SETTLS) && !IS_IA32_PROCESS(regs)) | 453 | if (clone_flags & CLONE_SETTLS) |
461 | child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */ | 454 | child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */ |
462 | if (user_stack_base) { | 455 | if (user_stack_base) { |
463 | child_ptregs->r12 = user_stack_base + user_stack_size - 16; | 456 | child_ptregs->r12 = user_stack_base + user_stack_size - 16; |
@@ -477,10 +470,7 @@ copy_thread(unsigned long clone_flags, | |||
477 | child_ptregs->r13 = (unsigned long) p; /* set `current' pointer */ | 470 | child_ptregs->r13 = (unsigned long) p; /* set `current' pointer */ |
478 | } | 471 | } |
479 | child_stack->ar_bspstore = child_rbs + rbs_size; | 472 | child_stack->ar_bspstore = child_rbs + rbs_size; |
480 | if (IS_IA32_PROCESS(regs)) | 473 | child_stack->b0 = (unsigned long) &ia64_ret_from_clone; |
481 | child_stack->b0 = (unsigned long) &ia32_ret_from_clone; | ||
482 | else | ||
483 | child_stack->b0 = (unsigned long) &ia64_ret_from_clone; | ||
484 | 474 | ||
485 | /* copy parts of thread_struct: */ | 475 | /* copy parts of thread_struct: */ |
486 | p->thread.ksp = (unsigned long) child_stack - 16; | 476 | p->thread.ksp = (unsigned long) child_stack - 16; |
@@ -515,22 +505,6 @@ copy_thread(unsigned long clone_flags, | |||
515 | p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR) | 505 | p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR) |
516 | | THREAD_FLAGS_TO_SET); | 506 | | THREAD_FLAGS_TO_SET); |
517 | ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */ | 507 | ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */ |
518 | #ifdef CONFIG_IA32_SUPPORT | ||
519 | /* | ||
520 | * If we're cloning an IA32 task then save the IA32 extra | ||
521 | * state from the current task to the new task | ||
522 | */ | ||
523 | if (IS_IA32_PROCESS(task_pt_regs(current))) { | ||
524 | ia32_save_state(p); | ||
525 | if (clone_flags & CLONE_SETTLS) | ||
526 | retval = ia32_clone_tls(p, child_ptregs); | ||
527 | |||
528 | /* Copy partially mapped page list */ | ||
529 | if (!retval) | ||
530 | retval = ia32_copy_ia64_partial_page_list(p, | ||
531 | clone_flags); | ||
532 | } | ||
533 | #endif | ||
534 | 508 | ||
535 | #ifdef CONFIG_PERFMON | 509 | #ifdef CONFIG_PERFMON |
536 | if (current->thread.pfm_context) | 510 | if (current->thread.pfm_context) |
@@ -704,15 +678,6 @@ EXPORT_SYMBOL(kernel_thread); | |||
704 | int | 678 | int |
705 | kernel_thread_helper (int (*fn)(void *), void *arg) | 679 | kernel_thread_helper (int (*fn)(void *), void *arg) |
706 | { | 680 | { |
707 | #ifdef CONFIG_IA32_SUPPORT | ||
708 | if (IS_IA32_PROCESS(task_pt_regs(current))) { | ||
709 | /* A kernel thread is always a 64-bit process. */ | ||
710 | current->thread.map_base = DEFAULT_MAP_BASE; | ||
711 | current->thread.task_size = DEFAULT_TASK_SIZE; | ||
712 | ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob); | ||
713 | ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1); | ||
714 | } | ||
715 | #endif | ||
716 | return (*fn)(arg); | 681 | return (*fn)(arg); |
717 | } | 682 | } |
718 | 683 | ||
@@ -725,14 +690,6 @@ flush_thread (void) | |||
725 | /* drop floating-point and debug-register state if it exists: */ | 690 | /* drop floating-point and debug-register state if it exists: */ |
726 | current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); | 691 | current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); |
727 | ia64_drop_fpu(current); | 692 | ia64_drop_fpu(current); |
728 | #ifdef CONFIG_IA32_SUPPORT | ||
729 | if (IS_IA32_PROCESS(task_pt_regs(current))) { | ||
730 | ia32_drop_ia64_partial_page_list(current); | ||
731 | current->thread.task_size = IA32_PAGE_OFFSET; | ||
732 | set_fs(USER_DS); | ||
733 | memset(current->thread.tls_array, 0, sizeof(current->thread.tls_array)); | ||
734 | } | ||
735 | #endif | ||
736 | } | 693 | } |
737 | 694 | ||
738 | /* | 695 | /* |
@@ -753,8 +710,6 @@ exit_thread (void) | |||
753 | if (current->thread.flags & IA64_THREAD_DBG_VALID) | 710 | if (current->thread.flags & IA64_THREAD_DBG_VALID) |
754 | pfm_release_debug_registers(current); | 711 | pfm_release_debug_registers(current); |
755 | #endif | 712 | #endif |
756 | if (IS_IA32_PROCESS(task_pt_regs(current))) | ||
757 | ia32_drop_ia64_partial_page_list(current); | ||
758 | } | 713 | } |
759 | 714 | ||
760 | unsigned long | 715 | unsigned long |
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 9daa87fdb018..0dec7f702448 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c | |||
@@ -11,7 +11,6 @@ | |||
11 | */ | 11 | */ |
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/slab.h> | ||
15 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
16 | #include <linux/errno.h> | 15 | #include <linux/errno.h> |
17 | #include <linux/ptrace.h> | 16 | #include <linux/ptrace.h> |
@@ -1250,13 +1249,8 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, | |||
1250 | long syscall; | 1249 | long syscall; |
1251 | int arch; | 1250 | int arch; |
1252 | 1251 | ||
1253 | if (IS_IA32_PROCESS(®s)) { | 1252 | syscall = regs.r15; |
1254 | syscall = regs.r1; | 1253 | arch = AUDIT_ARCH_IA64; |
1255 | arch = AUDIT_ARCH_I386; | ||
1256 | } else { | ||
1257 | syscall = regs.r15; | ||
1258 | arch = AUDIT_ARCH_IA64; | ||
1259 | } | ||
1260 | 1254 | ||
1261 | audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3); | 1255 | audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3); |
1262 | } | 1256 | } |
@@ -2172,11 +2166,6 @@ static const struct user_regset_view user_ia64_view = { | |||
2172 | 2166 | ||
2173 | const struct user_regset_view *task_user_regset_view(struct task_struct *tsk) | 2167 | const struct user_regset_view *task_user_regset_view(struct task_struct *tsk) |
2174 | { | 2168 | { |
2175 | #ifdef CONFIG_IA32_SUPPORT | ||
2176 | extern const struct user_regset_view user_ia32_view; | ||
2177 | if (IS_IA32_PROCESS(task_pt_regs(tsk))) | ||
2178 | return &user_ia32_view; | ||
2179 | #endif | ||
2180 | return &user_ia64_view; | 2169 | return &user_ia64_view; |
2181 | } | 2170 | } |
2182 | 2171 | ||
diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S index 32f6fc131fbe..c370e02f0061 100644 --- a/arch/ia64/kernel/relocate_kernel.S +++ b/arch/ia64/kernel/relocate_kernel.S | |||
@@ -61,7 +61,7 @@ GLOBAL_ENTRY(relocate_new_kernel) | |||
61 | 61 | ||
62 | // purge all TC entries | 62 | // purge all TC entries |
63 | #define O(member) IA64_CPUINFO_##member##_OFFSET | 63 | #define O(member) IA64_CPUINFO_##member##_OFFSET |
64 | GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 | 64 | GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2 |
65 | ;; | 65 | ;; |
66 | addl r17=O(PTCE_STRIDE),r2 | 66 | addl r17=O(PTCE_STRIDE),r2 |
67 | addl r2=O(PTCE_BASE),r2 | 67 | addl r2=O(PTCE_BASE),r2 |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 1de86c96801d..41ae6a596b50 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -46,7 +46,6 @@ | |||
46 | #include <linux/kexec.h> | 46 | #include <linux/kexec.h> |
47 | #include <linux/crash_dump.h> | 47 | #include <linux/crash_dump.h> |
48 | 48 | ||
49 | #include <asm/ia32.h> | ||
50 | #include <asm/machvec.h> | 49 | #include <asm/machvec.h> |
51 | #include <asm/mca.h> | 50 | #include <asm/mca.h> |
52 | #include <asm/meminit.h> | 51 | #include <asm/meminit.h> |
@@ -74,7 +73,7 @@ unsigned long __per_cpu_offset[NR_CPUS]; | |||
74 | EXPORT_SYMBOL(__per_cpu_offset); | 73 | EXPORT_SYMBOL(__per_cpu_offset); |
75 | #endif | 74 | #endif |
76 | 75 | ||
77 | DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); | 76 | DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info); |
78 | DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); | 77 | DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); |
79 | unsigned long ia64_cycles_per_usec; | 78 | unsigned long ia64_cycles_per_usec; |
80 | struct ia64_boot_param *ia64_boot_param; | 79 | struct ia64_boot_param *ia64_boot_param; |
@@ -566,19 +565,18 @@ setup_arch (char **cmdline_p) | |||
566 | early_acpi_boot_init(); | 565 | early_acpi_boot_init(); |
567 | # ifdef CONFIG_ACPI_NUMA | 566 | # ifdef CONFIG_ACPI_NUMA |
568 | acpi_numa_init(); | 567 | acpi_numa_init(); |
569 | #ifdef CONFIG_ACPI_HOTPLUG_CPU | 568 | # ifdef CONFIG_ACPI_HOTPLUG_CPU |
570 | prefill_possible_map(); | 569 | prefill_possible_map(); |
571 | #endif | 570 | # endif |
572 | per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? | 571 | per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? |
573 | 32 : cpus_weight(early_cpu_possible_map)), | 572 | 32 : cpus_weight(early_cpu_possible_map)), |
574 | additional_cpus > 0 ? additional_cpus : 0); | 573 | additional_cpus > 0 ? additional_cpus : 0); |
575 | # endif | 574 | # endif |
576 | #else | ||
577 | # ifdef CONFIG_SMP | ||
578 | smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */ | ||
579 | # endif | ||
580 | #endif /* CONFIG_APCI_BOOT */ | 575 | #endif /* CONFIG_APCI_BOOT */ |
581 | 576 | ||
577 | #ifdef CONFIG_SMP | ||
578 | smp_build_cpu_map(); | ||
579 | #endif | ||
582 | find_memory(); | 580 | find_memory(); |
583 | 581 | ||
584 | /* process SAL system table: */ | 582 | /* process SAL system table: */ |
@@ -856,18 +854,6 @@ identify_cpu (struct cpuinfo_ia64 *c) | |||
856 | } | 854 | } |
857 | 855 | ||
858 | /* | 856 | /* |
859 | * In UP configuration, setup_per_cpu_areas() is defined in | ||
860 | * include/linux/percpu.h | ||
861 | */ | ||
862 | #ifdef CONFIG_SMP | ||
863 | void __init | ||
864 | setup_per_cpu_areas (void) | ||
865 | { | ||
866 | /* start_kernel() requires this... */ | ||
867 | } | ||
868 | #endif | ||
869 | |||
870 | /* | ||
871 | * Do the following calculations: | 857 | * Do the following calculations: |
872 | * | 858 | * |
873 | * 1. the max. cache line size. | 859 | * 1. the max. cache line size. |
@@ -980,7 +966,7 @@ cpu_init (void) | |||
980 | * depends on the data returned by identify_cpu(). We break the dependency by | 966 | * depends on the data returned by identify_cpu(). We break the dependency by |
981 | * accessing cpu_data() through the canonical per-CPU address. | 967 | * accessing cpu_data() through the canonical per-CPU address. |
982 | */ | 968 | */ |
983 | cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start); | 969 | cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start); |
984 | identify_cpu(cpu_info); | 970 | identify_cpu(cpu_info); |
985 | 971 | ||
986 | #ifdef CONFIG_MCKINLEY | 972 | #ifdef CONFIG_MCKINLEY |
@@ -1029,10 +1015,6 @@ cpu_init (void) | |||
1029 | ia64_mmu_init(ia64_imva(cpu_data)); | 1015 | ia64_mmu_init(ia64_imva(cpu_data)); |
1030 | ia64_mca_cpu_init(ia64_imva(cpu_data)); | 1016 | ia64_mca_cpu_init(ia64_imva(cpu_data)); |
1031 | 1017 | ||
1032 | #ifdef CONFIG_IA32_SUPPORT | ||
1033 | ia32_cpu_init(); | ||
1034 | #endif | ||
1035 | |||
1036 | /* Clear ITC to eliminate sched_clock() overflows in human time. */ | 1018 | /* Clear ITC to eliminate sched_clock() overflows in human time. */ |
1037 | ia64_set_itc(0); | 1019 | ia64_set_itc(0); |
1038 | 1020 | ||
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index e1821ca4c7df..7bdafc8788bd 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/unistd.h> | 21 | #include <linux/unistd.h> |
22 | #include <linux/wait.h> | 22 | #include <linux/wait.h> |
23 | 23 | ||
24 | #include <asm/ia32.h> | ||
25 | #include <asm/intrinsics.h> | 24 | #include <asm/intrinsics.h> |
26 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
27 | #include <asm/rse.h> | 26 | #include <asm/rse.h> |
@@ -425,14 +424,8 @@ static long | |||
425 | handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, | 424 | handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, |
426 | struct sigscratch *scr) | 425 | struct sigscratch *scr) |
427 | { | 426 | { |
428 | if (IS_IA32_PROCESS(&scr->pt)) { | 427 | if (!setup_frame(sig, ka, info, oldset, scr)) |
429 | /* send signal to IA-32 process */ | 428 | return 0; |
430 | if (!ia32_setup_frame1(sig, ka, info, oldset, &scr->pt)) | ||
431 | return 0; | ||
432 | } else | ||
433 | /* send signal to IA-64 process */ | ||
434 | if (!setup_frame(sig, ka, info, oldset, scr)) | ||
435 | return 0; | ||
436 | 429 | ||
437 | spin_lock_irq(¤t->sighand->siglock); | 430 | spin_lock_irq(¤t->sighand->siglock); |
438 | sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); | 431 | sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); |
@@ -462,7 +455,6 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) | |||
462 | siginfo_t info; | 455 | siginfo_t info; |
463 | long restart = in_syscall; | 456 | long restart = in_syscall; |
464 | long errno = scr->pt.r8; | 457 | long errno = scr->pt.r8; |
465 | # define ERR_CODE(c) (IS_IA32_PROCESS(&scr->pt) ? -(c) : (c)) | ||
466 | 458 | ||
467 | /* | 459 | /* |
468 | * In the ia64_leave_kernel code path, we want the common case to go fast, which | 460 | * In the ia64_leave_kernel code path, we want the common case to go fast, which |
@@ -490,14 +482,7 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) | |||
490 | * inferior call), thus it's important to check for restarting _after_ | 482 | * inferior call), thus it's important to check for restarting _after_ |
491 | * get_signal_to_deliver(). | 483 | * get_signal_to_deliver(). |
492 | */ | 484 | */ |
493 | if (IS_IA32_PROCESS(&scr->pt)) { | 485 | if ((long) scr->pt.r10 != -1) |
494 | if (in_syscall) { | ||
495 | if (errno >= 0) | ||
496 | restart = 0; | ||
497 | else | ||
498 | errno = -errno; | ||
499 | } | ||
500 | } else if ((long) scr->pt.r10 != -1) | ||
501 | /* | 486 | /* |
502 | * A system calls has to be restarted only if one of the error codes | 487 | * A system calls has to be restarted only if one of the error codes |
503 | * ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned. If r10 | 488 | * ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned. If r10 |
@@ -513,22 +498,18 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) | |||
513 | switch (errno) { | 498 | switch (errno) { |
514 | case ERESTART_RESTARTBLOCK: | 499 | case ERESTART_RESTARTBLOCK: |
515 | case ERESTARTNOHAND: | 500 | case ERESTARTNOHAND: |
516 | scr->pt.r8 = ERR_CODE(EINTR); | 501 | scr->pt.r8 = EINTR; |
517 | /* note: scr->pt.r10 is already -1 */ | 502 | /* note: scr->pt.r10 is already -1 */ |
518 | break; | 503 | break; |
519 | 504 | ||
520 | case ERESTARTSYS: | 505 | case ERESTARTSYS: |
521 | if ((ka.sa.sa_flags & SA_RESTART) == 0) { | 506 | if ((ka.sa.sa_flags & SA_RESTART) == 0) { |
522 | scr->pt.r8 = ERR_CODE(EINTR); | 507 | scr->pt.r8 = EINTR; |
523 | /* note: scr->pt.r10 is already -1 */ | 508 | /* note: scr->pt.r10 is already -1 */ |
524 | break; | 509 | break; |
525 | } | 510 | } |
526 | case ERESTARTNOINTR: | 511 | case ERESTARTNOINTR: |
527 | if (IS_IA32_PROCESS(&scr->pt)) { | 512 | ia64_decrement_ip(&scr->pt); |
528 | scr->pt.r8 = scr->pt.r1; | ||
529 | scr->pt.cr_iip -= 2; | ||
530 | } else | ||
531 | ia64_decrement_ip(&scr->pt); | ||
532 | restart = 0; /* don't restart twice if handle_signal() fails... */ | 513 | restart = 0; /* don't restart twice if handle_signal() fails... */ |
533 | } | 514 | } |
534 | } | 515 | } |
@@ -555,21 +536,14 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) | |||
555 | if (errno == ERESTARTNOHAND || errno == ERESTARTSYS || errno == ERESTARTNOINTR | 536 | if (errno == ERESTARTNOHAND || errno == ERESTARTSYS || errno == ERESTARTNOINTR |
556 | || errno == ERESTART_RESTARTBLOCK) | 537 | || errno == ERESTART_RESTARTBLOCK) |
557 | { | 538 | { |
558 | if (IS_IA32_PROCESS(&scr->pt)) { | 539 | /* |
559 | scr->pt.r8 = scr->pt.r1; | 540 | * Note: the syscall number is in r15 which is saved in |
560 | scr->pt.cr_iip -= 2; | 541 | * pt_regs so all we need to do here is adjust ip so that |
561 | if (errno == ERESTART_RESTARTBLOCK) | 542 | * the "break" instruction gets re-executed. |
562 | scr->pt.r8 = 0; /* x86 version of __NR_restart_syscall */ | 543 | */ |
563 | } else { | 544 | ia64_decrement_ip(&scr->pt); |
564 | /* | 545 | if (errno == ERESTART_RESTARTBLOCK) |
565 | * Note: the syscall number is in r15 which is saved in | 546 | scr->pt.r15 = __NR_restart_syscall; |
566 | * pt_regs so all we need to do here is adjust ip so that | ||
567 | * the "break" instruction gets re-executed. | ||
568 | */ | ||
569 | ia64_decrement_ip(&scr->pt); | ||
570 | if (errno == ERESTART_RESTARTBLOCK) | ||
571 | scr->pt.r15 = __NR_restart_syscall; | ||
572 | } | ||
573 | } | 547 | } |
574 | } | 548 | } |
575 | 549 | ||
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index de100aa7ff03..e5230b2ff2c5 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #include <asm/cache.h> | 44 | #include <asm/cache.h> |
45 | #include <asm/current.h> | 45 | #include <asm/current.h> |
46 | #include <asm/delay.h> | 46 | #include <asm/delay.h> |
47 | #include <asm/ia32.h> | ||
48 | #include <asm/io.h> | 47 | #include <asm/io.h> |
49 | #include <asm/irq.h> | 48 | #include <asm/irq.h> |
50 | #include <asm/machvec.h> | 49 | #include <asm/machvec.h> |
@@ -443,10 +442,6 @@ smp_callin (void) | |||
443 | calibrate_delay(); | 442 | calibrate_delay(); |
444 | local_cpu_data->loops_per_jiffy = loops_per_jiffy; | 443 | local_cpu_data->loops_per_jiffy = loops_per_jiffy; |
445 | 444 | ||
446 | #ifdef CONFIG_IA32_SUPPORT | ||
447 | ia32_gdt_init(); | ||
448 | #endif | ||
449 | |||
450 | /* | 445 | /* |
451 | * Allow the master to continue. | 446 | * Allow the master to continue. |
452 | */ | 447 | */ |
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index 92ed83f34036..609d50056a6c 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c | |||
@@ -100,51 +100,7 @@ sys_getpagesize (void) | |||
100 | asmlinkage unsigned long | 100 | asmlinkage unsigned long |
101 | ia64_brk (unsigned long brk) | 101 | ia64_brk (unsigned long brk) |
102 | { | 102 | { |
103 | unsigned long rlim, retval, newbrk, oldbrk; | 103 | unsigned long retval = sys_brk(brk); |
104 | struct mm_struct *mm = current->mm; | ||
105 | |||
106 | /* | ||
107 | * Most of this replicates the code in sys_brk() except for an additional safety | ||
108 | * check and the clearing of r8. However, we can't call sys_brk() because we need | ||
109 | * to acquire the mmap_sem before we can do the test... | ||
110 | */ | ||
111 | down_write(&mm->mmap_sem); | ||
112 | |||
113 | if (brk < mm->end_code) | ||
114 | goto out; | ||
115 | newbrk = PAGE_ALIGN(brk); | ||
116 | oldbrk = PAGE_ALIGN(mm->brk); | ||
117 | if (oldbrk == newbrk) | ||
118 | goto set_brk; | ||
119 | |||
120 | /* Always allow shrinking brk. */ | ||
121 | if (brk <= mm->brk) { | ||
122 | if (!do_munmap(mm, newbrk, oldbrk-newbrk)) | ||
123 | goto set_brk; | ||
124 | goto out; | ||
125 | } | ||
126 | |||
127 | /* Check against unimplemented/unmapped addresses: */ | ||
128 | if ((newbrk - oldbrk) > RGN_MAP_LIMIT || REGION_OFFSET(newbrk) > RGN_MAP_LIMIT) | ||
129 | goto out; | ||
130 | |||
131 | /* Check against rlimit.. */ | ||
132 | rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; | ||
133 | if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim) | ||
134 | goto out; | ||
135 | |||
136 | /* Check against existing mmap mappings. */ | ||
137 | if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) | ||
138 | goto out; | ||
139 | |||
140 | /* Ok, looks good - let it rip. */ | ||
141 | if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) | ||
142 | goto out; | ||
143 | set_brk: | ||
144 | mm->brk = brk; | ||
145 | out: | ||
146 | retval = mm->brk; | ||
147 | up_write(&mm->mmap_sem); | ||
148 | force_successful_syscall_return(); | 104 | force_successful_syscall_return(); |
149 | return retval; | 105 | return retval; |
150 | } | 106 | } |
@@ -185,39 +141,6 @@ int ia64_mmap_check(unsigned long addr, unsigned long len, | |||
185 | return 0; | 141 | return 0; |
186 | } | 142 | } |
187 | 143 | ||
188 | static inline unsigned long | ||
189 | do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, unsigned long pgoff) | ||
190 | { | ||
191 | struct file *file = NULL; | ||
192 | |||
193 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
194 | if (!(flags & MAP_ANONYMOUS)) { | ||
195 | file = fget(fd); | ||
196 | if (!file) | ||
197 | return -EBADF; | ||
198 | |||
199 | if (!file->f_op || !file->f_op->mmap) { | ||
200 | addr = -ENODEV; | ||
201 | goto out; | ||
202 | } | ||
203 | } | ||
204 | |||
205 | /* Careful about overflows.. */ | ||
206 | len = PAGE_ALIGN(len); | ||
207 | if (!len || len > TASK_SIZE) { | ||
208 | addr = -EINVAL; | ||
209 | goto out; | ||
210 | } | ||
211 | |||
212 | down_write(¤t->mm->mmap_sem); | ||
213 | addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
214 | up_write(¤t->mm->mmap_sem); | ||
215 | |||
216 | out: if (file) | ||
217 | fput(file); | ||
218 | return addr; | ||
219 | } | ||
220 | |||
221 | /* | 144 | /* |
222 | * mmap2() is like mmap() except that the offset is expressed in units | 145 | * mmap2() is like mmap() except that the offset is expressed in units |
223 | * of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces | 146 | * of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces |
@@ -226,7 +149,7 @@ out: if (file) | |||
226 | asmlinkage unsigned long | 149 | asmlinkage unsigned long |
227 | sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff) | 150 | sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff) |
228 | { | 151 | { |
229 | addr = do_mmap2(addr, len, prot, flags, fd, pgoff); | 152 | addr = sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); |
230 | if (!IS_ERR((void *) addr)) | 153 | if (!IS_ERR((void *) addr)) |
231 | force_successful_syscall_return(); | 154 | force_successful_syscall_return(); |
232 | return addr; | 155 | return addr; |
@@ -238,7 +161,7 @@ sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, int fd, lo | |||
238 | if (offset_in_page(off) != 0) | 161 | if (offset_in_page(off) != 0) |
239 | return -EINVAL; | 162 | return -EINVAL; |
240 | 163 | ||
241 | addr = do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT); | 164 | addr = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); |
242 | if (!IS_ERR((void *) addr)) | 165 | if (!IS_ERR((void *) addr)) |
243 | force_successful_syscall_return(); | 166 | force_successful_syscall_return(); |
244 | return addr; | 167 | return addr; |
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 4990495d7531..47a192781b0a 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
@@ -61,7 +61,7 @@ unsigned long long sched_clock(void) | |||
61 | 61 | ||
62 | #ifdef CONFIG_PARAVIRT | 62 | #ifdef CONFIG_PARAVIRT |
63 | static void | 63 | static void |
64 | paravirt_clocksource_resume(void) | 64 | paravirt_clocksource_resume(struct clocksource *cs) |
65 | { | 65 | { |
66 | if (pv_time_ops.clocksource_resume) | 66 | if (pv_time_ops.clocksource_resume) |
67 | pv_time_ops.clocksource_resume(); | 67 | pv_time_ops.clocksource_resume(); |
@@ -473,7 +473,7 @@ void update_vsyscall_tz(void) | |||
473 | { | 473 | { |
474 | } | 474 | } |
475 | 475 | ||
476 | void update_vsyscall(struct timespec *wall, struct clocksource *c) | 476 | void update_vsyscall(struct timespec *wall, struct clocksource *c, u32 mult) |
477 | { | 477 | { |
478 | unsigned long flags; | 478 | unsigned long flags; |
479 | 479 | ||
@@ -481,7 +481,7 @@ void update_vsyscall(struct timespec *wall, struct clocksource *c) | |||
481 | 481 | ||
482 | /* copy fsyscall clock data */ | 482 | /* copy fsyscall clock data */ |
483 | fsyscall_gtod_data.clk_mask = c->mask; | 483 | fsyscall_gtod_data.clk_mask = c->mask; |
484 | fsyscall_gtod_data.clk_mult = c->mult; | 484 | fsyscall_gtod_data.clk_mult = mult; |
485 | fsyscall_gtod_data.clk_shift = c->shift; | 485 | fsyscall_gtod_data.clk_shift = c->shift; |
486 | fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio; | 486 | fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio; |
487 | fsyscall_gtod_data.clk_cycle_last = c->cycle_last; | 487 | fsyscall_gtod_data.clk_cycle_last = c->cycle_last; |
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index 8f060352e129..28f299de2903 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
19 | #include <linux/node.h> | 19 | #include <linux/node.h> |
20 | #include <linux/slab.h> | ||
20 | #include <linux/init.h> | 21 | #include <linux/init.h> |
21 | #include <linux/bootmem.h> | 22 | #include <linux/bootmem.h> |
22 | #include <linux/nodemask.h> | 23 | #include <linux/nodemask.h> |
@@ -282,7 +283,7 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char * | |||
282 | return ret; | 283 | return ret; |
283 | } | 284 | } |
284 | 285 | ||
285 | static struct sysfs_ops cache_sysfs_ops = { | 286 | static const struct sysfs_ops cache_sysfs_ops = { |
286 | .show = cache_show | 287 | .show = cache_show |
287 | }; | 288 | }; |
288 | 289 | ||
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index f0cda765e681..fd80e70018a9 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/kdebug.h> | 19 | #include <linux/kdebug.h> |
20 | 20 | ||
21 | #include <asm/fpswa.h> | 21 | #include <asm/fpswa.h> |
22 | #include <asm/ia32.h> | ||
23 | #include <asm/intrinsics.h> | 22 | #include <asm/intrinsics.h> |
24 | #include <asm/processor.h> | 23 | #include <asm/processor.h> |
25 | #include <asm/uaccess.h> | 24 | #include <asm/uaccess.h> |
@@ -626,10 +625,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, | |||
626 | break; | 625 | break; |
627 | 626 | ||
628 | case 45: | 627 | case 45: |
629 | #ifdef CONFIG_IA32_SUPPORT | ||
630 | if (ia32_exception(®s, isr) == 0) | ||
631 | return; | ||
632 | #endif | ||
633 | printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n"); | 628 | printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n"); |
634 | printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n", | 629 | printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n", |
635 | iip, ifa, isr); | 630 | iip, ifa, isr); |
@@ -637,10 +632,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, | |||
637 | break; | 632 | break; |
638 | 633 | ||
639 | case 46: | 634 | case 46: |
640 | #ifdef CONFIG_IA32_SUPPORT | ||
641 | if (ia32_intercept(®s, isr) == 0) | ||
642 | return; | ||
643 | #endif | ||
644 | printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n"); | 635 | printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n"); |
645 | printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n", | 636 | printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n", |
646 | iip, ifa, isr, iim); | 637 | iip, ifa, isr, iim); |
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index a595823582d9..c4696d217ce0 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c | |||
@@ -18,9 +18,9 @@ | |||
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
20 | #include <linux/string.h> | 20 | #include <linux/string.h> |
21 | #include <linux/slab.h> | ||
22 | #include <linux/efi.h> | 21 | #include <linux/efi.h> |
23 | #include <linux/genalloc.h> | 22 | #include <linux/genalloc.h> |
23 | #include <linux/gfp.h> | ||
24 | #include <asm/page.h> | 24 | #include <asm/page.h> |
25 | #include <asm/pal.h> | 25 | #include <asm/pal.h> |
26 | #include <asm/system.h> | 26 | #include <asm/system.h> |
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 0a0c77b2c988..1295ba327f6f 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S | |||
@@ -166,6 +166,12 @@ SECTIONS | |||
166 | } | 166 | } |
167 | #endif | 167 | #endif |
168 | 168 | ||
169 | #ifdef CONFIG_SMP | ||
170 | . = ALIGN(PERCPU_PAGE_SIZE); | ||
171 | __cpu0_per_cpu = .; | ||
172 | . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */ | ||
173 | #endif | ||
174 | |||
169 | . = ALIGN(PAGE_SIZE); | 175 | . = ALIGN(PAGE_SIZE); |
170 | __init_end = .; | 176 | __init_end = .; |
171 | 177 | ||
@@ -198,11 +204,6 @@ SECTIONS | |||
198 | data : { } :data | 204 | data : { } :data |
199 | .data : AT(ADDR(.data) - LOAD_OFFSET) | 205 | .data : AT(ADDR(.data) - LOAD_OFFSET) |
200 | { | 206 | { |
201 | #ifdef CONFIG_SMP | ||
202 | . = ALIGN(PERCPU_PAGE_SIZE); | ||
203 | __cpu0_per_cpu = .; | ||
204 | . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */ | ||
205 | #endif | ||
206 | INIT_TASK_DATA(PAGE_SIZE) | 207 | INIT_TASK_DATA(PAGE_SIZE) |
207 | CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES) | 208 | CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES) |
208 | READ_MOSTLY_DATA(SMP_CACHE_BYTES) | 209 | READ_MOSTLY_DATA(SMP_CACHE_BYTES) |