aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/ia32/binfmt_elf32.c8
-rw-r--r--arch/ia64/ia32/ia32_support.c2
-rw-r--r--arch/ia64/ia32/ia32priv.h2
-rw-r--r--arch/ia64/ia32/sys_ia32.c2
-rw-r--r--arch/ia64/kernel/kprobes.c2
-rw-r--r--arch/ia64/kernel/palinfo.c2
-rw-r--r--arch/ia64/kernel/perfmon.c2
-rw-r--r--arch/ia64/kernel/salinfo.c2
-rw-r--r--arch/ia64/kernel/topology.c8
-rw-r--r--arch/ia64/mm/hugetlbpage.c5
-rw-r--r--arch/ia64/mm/init.c4
11 files changed, 19 insertions, 20 deletions
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
index daa6b91bc921..578737ec7629 100644
--- a/arch/ia64/ia32/binfmt_elf32.c
+++ b/arch/ia64/ia32/binfmt_elf32.c
@@ -91,7 +91,7 @@ ia64_elf32_init (struct pt_regs *regs)
91 * it with privilege level 3 because the IVE uses non-privileged accesses to these 91 * it with privilege level 3 because the IVE uses non-privileged accesses to these
92 * tables. IA-32 segmentation is used to protect against IA-32 accesses to them. 92 * tables. IA-32 segmentation is used to protect against IA-32 accesses to them.
93 */ 93 */
94 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 94 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
95 if (vma) { 95 if (vma) {
96 memset(vma, 0, sizeof(*vma)); 96 memset(vma, 0, sizeof(*vma));
97 vma->vm_mm = current->mm; 97 vma->vm_mm = current->mm;
@@ -117,7 +117,7 @@ ia64_elf32_init (struct pt_regs *regs)
117 * code is locked in specific gate page, which is pointed by pretcode 117 * code is locked in specific gate page, which is pointed by pretcode
118 * when setup_frame_ia32 118 * when setup_frame_ia32
119 */ 119 */
120 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 120 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
121 if (vma) { 121 if (vma) {
122 memset(vma, 0, sizeof(*vma)); 122 memset(vma, 0, sizeof(*vma));
123 vma->vm_mm = current->mm; 123 vma->vm_mm = current->mm;
@@ -142,7 +142,7 @@ ia64_elf32_init (struct pt_regs *regs)
142 * Install LDT as anonymous memory. This gives us all-zero segment descriptors 142 * Install LDT as anonymous memory. This gives us all-zero segment descriptors
143 * until a task modifies them via modify_ldt(). 143 * until a task modifies them via modify_ldt().
144 */ 144 */
145 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 145 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
146 if (vma) { 146 if (vma) {
147 memset(vma, 0, sizeof(*vma)); 147 memset(vma, 0, sizeof(*vma));
148 vma->vm_mm = current->mm; 148 vma->vm_mm = current->mm;
@@ -214,7 +214,7 @@ ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
214 bprm->loader += stack_base; 214 bprm->loader += stack_base;
215 bprm->exec += stack_base; 215 bprm->exec += stack_base;
216 216
217 mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 217 mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
218 if (!mpnt) 218 if (!mpnt)
219 return -ENOMEM; 219 return -ENOMEM;
220 220
diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c
index c187743965a0..6af400a12ca1 100644
--- a/arch/ia64/ia32/ia32_support.c
+++ b/arch/ia64/ia32/ia32_support.c
@@ -249,7 +249,7 @@ ia32_init (void)
249 249
250#if PAGE_SHIFT > IA32_PAGE_SHIFT 250#if PAGE_SHIFT > IA32_PAGE_SHIFT
251 { 251 {
252 extern kmem_cache_t *partial_page_cachep; 252 extern struct kmem_cache *partial_page_cachep;
253 253
254 partial_page_cachep = kmem_cache_create("partial_page_cache", 254 partial_page_cachep = kmem_cache_create("partial_page_cache",
255 sizeof(struct partial_page), 0, 0, 255 sizeof(struct partial_page), 0, 0,
diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
index 703a67c934f8..cfa0bc0026b5 100644
--- a/arch/ia64/ia32/ia32priv.h
+++ b/arch/ia64/ia32/ia32priv.h
@@ -330,8 +330,6 @@ struct old_linux32_dirent {
330void ia64_elf32_init(struct pt_regs *regs); 330void ia64_elf32_init(struct pt_regs *regs);
331#define ELF_PLAT_INIT(_r, load_addr) ia64_elf32_init(_r) 331#define ELF_PLAT_INIT(_r, load_addr) ia64_elf32_init(_r)
332 332
333#define elf_addr_t u32
334
335/* This macro yields a bitmask that programs can use to figure out 333/* This macro yields a bitmask that programs can use to figure out
336 what instruction set this CPU supports. */ 334 what instruction set this CPU supports. */
337#define ELF_HWCAP 0 335#define ELF_HWCAP 0
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 9d6a3f210148..a4a6e1463af8 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -254,7 +254,7 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro
254} 254}
255 255
256/* SLAB cache for partial_page structures */ 256/* SLAB cache for partial_page structures */
257kmem_cache_t *partial_page_cachep; 257struct kmem_cache *partial_page_cachep;
258 258
259/* 259/*
260 * init partial_page_list. 260 * init partial_page_list.
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 898a07d1d6f1..76e778951e20 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -481,7 +481,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
481void __kprobes arch_remove_kprobe(struct kprobe *p) 481void __kprobes arch_remove_kprobe(struct kprobe *p)
482{ 482{
483 mutex_lock(&kprobe_mutex); 483 mutex_lock(&kprobe_mutex);
484 free_insn_slot(p->ainsn.insn); 484 free_insn_slot(p->ainsn.insn, 0);
485 mutex_unlock(&kprobe_mutex); 485 mutex_unlock(&kprobe_mutex);
486} 486}
487/* 487/*
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 32f81b38ae48..a71df9ae0397 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -968,7 +968,6 @@ remove_palinfo_proc_entries(unsigned int hcpu)
968 } 968 }
969} 969}
970 970
971#ifdef CONFIG_HOTPLUG_CPU
972static int palinfo_cpu_callback(struct notifier_block *nfb, 971static int palinfo_cpu_callback(struct notifier_block *nfb,
973 unsigned long action, void *hcpu) 972 unsigned long action, void *hcpu)
974{ 973{
@@ -990,7 +989,6 @@ static struct notifier_block palinfo_cpu_notifier =
990 .notifier_call = palinfo_cpu_callback, 989 .notifier_call = palinfo_cpu_callback,
991 .priority = 0, 990 .priority = 0,
992}; 991};
993#endif
994 992
995static int __init 993static int __init
996palinfo_init(void) 994palinfo_init(void)
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index fa4a1a7eb67d..dbb28164b19b 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2301,7 +2301,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon
2301 DPRINT(("smpl_buf @%p\n", smpl_buf)); 2301 DPRINT(("smpl_buf @%p\n", smpl_buf));
2302 2302
2303 /* allocate vma */ 2303 /* allocate vma */
2304 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 2304 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
2305 if (!vma) { 2305 if (!vma) {
2306 DPRINT(("Cannot allocate vma\n")); 2306 DPRINT(("Cannot allocate vma\n"));
2307 goto error_kmem; 2307 goto error_kmem;
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index e63b8ca5344a..fd607ca51a8d 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -575,7 +575,6 @@ static struct file_operations salinfo_data_fops = {
575 .write = salinfo_log_write, 575 .write = salinfo_log_write,
576}; 576};
577 577
578#ifdef CONFIG_HOTPLUG_CPU
579static int __devinit 578static int __devinit
580salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) 579salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
581{ 580{
@@ -620,7 +619,6 @@ static struct notifier_block salinfo_cpu_notifier =
620 .notifier_call = salinfo_cpu_callback, 619 .notifier_call = salinfo_cpu_callback,
621 .priority = 0, 620 .priority = 0,
622}; 621};
623#endif /* CONFIG_HOTPLUG_CPU */
624 622
625static int __init 623static int __init
626salinfo_init(void) 624salinfo_init(void)
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 5629b45e89c6..687500ddb4b8 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -31,11 +31,11 @@ int arch_register_cpu(int num)
31{ 31{
32#if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU) 32#if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU)
33 /* 33 /*
34 * If CPEI cannot be re-targetted, and this is 34 * If CPEI can be re-targetted or if this is not
35 * CPEI target, then dont create the control file 35 * CPEI target, then it is hotpluggable
36 */ 36 */
37 if (!can_cpei_retarget() && is_cpu_cpei_target(num)) 37 if (can_cpei_retarget() || !is_cpu_cpei_target(num))
38 sysfs_cpus[num].cpu.no_control = 1; 38 sysfs_cpus[num].cpu.hotpluggable = 1;
39 map_cpu_to_node(num, node_cpuid[num].nid); 39 map_cpu_to_node(num, node_cpuid[num].nid);
40#endif 40#endif
41 41
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index f3a9585e98a8..0c7e94edc20e 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -64,6 +64,11 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr)
64 return pte; 64 return pte;
65} 65}
66 66
67int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
68{
69 return 0;
70}
71
67#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; } 72#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
68 73
69/* 74/*
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index ff87a5cba399..56dc2024220e 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -156,7 +156,7 @@ ia64_init_addr_space (void)
156 * the problem. When the process attempts to write to the register backing store 156 * the problem. When the process attempts to write to the register backing store
157 * for the first time, it will get a SEGFAULT in this case. 157 * for the first time, it will get a SEGFAULT in this case.
158 */ 158 */
159 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 159 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
160 if (vma) { 160 if (vma) {
161 memset(vma, 0, sizeof(*vma)); 161 memset(vma, 0, sizeof(*vma));
162 vma->vm_mm = current->mm; 162 vma->vm_mm = current->mm;
@@ -175,7 +175,7 @@ ia64_init_addr_space (void)
175 175
176 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ 176 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
177 if (!(current->personality & MMAP_PAGE_ZERO)) { 177 if (!(current->personality & MMAP_PAGE_ZERO)) {
178 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 178 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
179 if (vma) { 179 if (vma) {
180 memset(vma, 0, sizeof(*vma)); 180 memset(vma, 0, sizeof(*vma));
181 vma->vm_mm = current->mm; 181 vma->vm_mm = current->mm;