aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2006-12-07 16:35:17 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-12-07 16:35:17 -0500
commit21b4e736922f546e0f1aa7b9d6c442f309a2444a (patch)
treee1be8645297f8ebe87445251743ebcc52081a20d /arch/ia64
parent34161db6b14d984fb9b06c735b7b42f8803f6851 (diff)
parent68380b581383c028830f79ec2670f4a193854aa6 (diff)
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/ into merge_linus
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/ia32/binfmt_elf32.c8
-rw-r--r--arch/ia64/ia32/ia32_support.c2
-rw-r--r--arch/ia64/ia32/ia32priv.h2
-rw-r--r--arch/ia64/ia32/sys_ia32.c2
-rw-r--r--arch/ia64/kernel/kprobes.c2
-rw-r--r--arch/ia64/kernel/palinfo.c2
-rw-r--r--arch/ia64/kernel/perfmon.c2
-rw-r--r--arch/ia64/kernel/salinfo.c2
-rw-r--r--arch/ia64/kernel/topology.c8
-rw-r--r--arch/ia64/mm/hugetlbpage.c5
-rw-r--r--arch/ia64/mm/init.c4
11 files changed, 19 insertions, 20 deletions
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
index daa6b91bc921..578737ec7629 100644
--- a/arch/ia64/ia32/binfmt_elf32.c
+++ b/arch/ia64/ia32/binfmt_elf32.c
@@ -91,7 +91,7 @@ ia64_elf32_init (struct pt_regs *regs)
91 * it with privilege level 3 because the IVE uses non-privileged accesses to these 91 * it with privilege level 3 because the IVE uses non-privileged accesses to these
92 * tables. IA-32 segmentation is used to protect against IA-32 accesses to them. 92 * tables. IA-32 segmentation is used to protect against IA-32 accesses to them.
93 */ 93 */
94 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 94 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
95 if (vma) { 95 if (vma) {
96 memset(vma, 0, sizeof(*vma)); 96 memset(vma, 0, sizeof(*vma));
97 vma->vm_mm = current->mm; 97 vma->vm_mm = current->mm;
@@ -117,7 +117,7 @@ ia64_elf32_init (struct pt_regs *regs)
117 * code is locked in specific gate page, which is pointed by pretcode 117 * code is locked in specific gate page, which is pointed by pretcode
118 * when setup_frame_ia32 118 * when setup_frame_ia32
119 */ 119 */
120 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 120 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
121 if (vma) { 121 if (vma) {
122 memset(vma, 0, sizeof(*vma)); 122 memset(vma, 0, sizeof(*vma));
123 vma->vm_mm = current->mm; 123 vma->vm_mm = current->mm;
@@ -142,7 +142,7 @@ ia64_elf32_init (struct pt_regs *regs)
142 * Install LDT as anonymous memory. This gives us all-zero segment descriptors 142 * Install LDT as anonymous memory. This gives us all-zero segment descriptors
143 * until a task modifies them via modify_ldt(). 143 * until a task modifies them via modify_ldt().
144 */ 144 */
145 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 145 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
146 if (vma) { 146 if (vma) {
147 memset(vma, 0, sizeof(*vma)); 147 memset(vma, 0, sizeof(*vma));
148 vma->vm_mm = current->mm; 148 vma->vm_mm = current->mm;
@@ -214,7 +214,7 @@ ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
214 bprm->loader += stack_base; 214 bprm->loader += stack_base;
215 bprm->exec += stack_base; 215 bprm->exec += stack_base;
216 216
217 mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 217 mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
218 if (!mpnt) 218 if (!mpnt)
219 return -ENOMEM; 219 return -ENOMEM;
220 220
diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c
index c187743965a0..6af400a12ca1 100644
--- a/arch/ia64/ia32/ia32_support.c
+++ b/arch/ia64/ia32/ia32_support.c
@@ -249,7 +249,7 @@ ia32_init (void)
249 249
250#if PAGE_SHIFT > IA32_PAGE_SHIFT 250#if PAGE_SHIFT > IA32_PAGE_SHIFT
251 { 251 {
252 extern kmem_cache_t *partial_page_cachep; 252 extern struct kmem_cache *partial_page_cachep;
253 253
254 partial_page_cachep = kmem_cache_create("partial_page_cache", 254 partial_page_cachep = kmem_cache_create("partial_page_cache",
255 sizeof(struct partial_page), 0, 0, 255 sizeof(struct partial_page), 0, 0,
diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
index 703a67c934f8..cfa0bc0026b5 100644
--- a/arch/ia64/ia32/ia32priv.h
+++ b/arch/ia64/ia32/ia32priv.h
@@ -330,8 +330,6 @@ struct old_linux32_dirent {
330void ia64_elf32_init(struct pt_regs *regs); 330void ia64_elf32_init(struct pt_regs *regs);
331#define ELF_PLAT_INIT(_r, load_addr) ia64_elf32_init(_r) 331#define ELF_PLAT_INIT(_r, load_addr) ia64_elf32_init(_r)
332 332
333#define elf_addr_t u32
334
335/* This macro yields a bitmask that programs can use to figure out 333/* This macro yields a bitmask that programs can use to figure out
336 what instruction set this CPU supports. */ 334 what instruction set this CPU supports. */
337#define ELF_HWCAP 0 335#define ELF_HWCAP 0
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 9d6a3f210148..a4a6e1463af8 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -254,7 +254,7 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro
254} 254}
255 255
256/* SLAB cache for partial_page structures */ 256/* SLAB cache for partial_page structures */
257kmem_cache_t *partial_page_cachep; 257struct kmem_cache *partial_page_cachep;
258 258
259/* 259/*
260 * init partial_page_list. 260 * init partial_page_list.
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 51217d63285e..4d592ee9300b 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -481,7 +481,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
481void __kprobes arch_remove_kprobe(struct kprobe *p) 481void __kprobes arch_remove_kprobe(struct kprobe *p)
482{ 482{
483 mutex_lock(&kprobe_mutex); 483 mutex_lock(&kprobe_mutex);
484 free_insn_slot(p->ainsn.insn); 484 free_insn_slot(p->ainsn.insn, 0);
485 mutex_unlock(&kprobe_mutex); 485 mutex_unlock(&kprobe_mutex);
486} 486}
487/* 487/*
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 0b546e2b36ac..c4c10a0b99d9 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -952,7 +952,6 @@ remove_palinfo_proc_entries(unsigned int hcpu)
952 } 952 }
953} 953}
954 954
955#ifdef CONFIG_HOTPLUG_CPU
956static int palinfo_cpu_callback(struct notifier_block *nfb, 955static int palinfo_cpu_callback(struct notifier_block *nfb,
957 unsigned long action, void *hcpu) 956 unsigned long action, void *hcpu)
958{ 957{
@@ -974,7 +973,6 @@ static struct notifier_block palinfo_cpu_notifier =
974 .notifier_call = palinfo_cpu_callback, 973 .notifier_call = palinfo_cpu_callback,
975 .priority = 0, 974 .priority = 0,
976}; 975};
977#endif
978 976
979static int __init 977static int __init
980palinfo_init(void) 978palinfo_init(void)
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 3aaede0d6981..e2321536ee4c 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2302,7 +2302,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon
2302 DPRINT(("smpl_buf @%p\n", smpl_buf)); 2302 DPRINT(("smpl_buf @%p\n", smpl_buf));
2303 2303
2304 /* allocate vma */ 2304 /* allocate vma */
2305 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 2305 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
2306 if (!vma) { 2306 if (!vma) {
2307 DPRINT(("Cannot allocate vma\n")); 2307 DPRINT(("Cannot allocate vma\n"));
2308 goto error_kmem; 2308 goto error_kmem;
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index e63b8ca5344a..fd607ca51a8d 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -575,7 +575,6 @@ static struct file_operations salinfo_data_fops = {
575 .write = salinfo_log_write, 575 .write = salinfo_log_write,
576}; 576};
577 577
578#ifdef CONFIG_HOTPLUG_CPU
579static int __devinit 578static int __devinit
580salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) 579salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
581{ 580{
@@ -620,7 +619,6 @@ static struct notifier_block salinfo_cpu_notifier =
620 .notifier_call = salinfo_cpu_callback, 619 .notifier_call = salinfo_cpu_callback,
621 .priority = 0, 620 .priority = 0,
622}; 621};
623#endif /* CONFIG_HOTPLUG_CPU */
624 622
625static int __init 623static int __init
626salinfo_init(void) 624salinfo_init(void)
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 5629b45e89c6..687500ddb4b8 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -31,11 +31,11 @@ int arch_register_cpu(int num)
31{ 31{
32#if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU) 32#if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU)
33 /* 33 /*
34 * If CPEI cannot be re-targetted, and this is 34 * If CPEI can be re-targetted or if this is not
35 * CPEI target, then dont create the control file 35 * CPEI target, then it is hotpluggable
36 */ 36 */
37 if (!can_cpei_retarget() && is_cpu_cpei_target(num)) 37 if (can_cpei_retarget() || !is_cpu_cpei_target(num))
38 sysfs_cpus[num].cpu.no_control = 1; 38 sysfs_cpus[num].cpu.hotpluggable = 1;
39 map_cpu_to_node(num, node_cpuid[num].nid); 39 map_cpu_to_node(num, node_cpuid[num].nid);
40#endif 40#endif
41 41
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index f3a9585e98a8..0c7e94edc20e 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -64,6 +64,11 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr)
64 return pte; 64 return pte;
65} 65}
66 66
67int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
68{
69 return 0;
70}
71
67#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; } 72#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
68 73
69/* 74/*
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index ff87a5cba399..56dc2024220e 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -156,7 +156,7 @@ ia64_init_addr_space (void)
156 * the problem. When the process attempts to write to the register backing store 156 * the problem. When the process attempts to write to the register backing store
157 * for the first time, it will get a SEGFAULT in this case. 157 * for the first time, it will get a SEGFAULT in this case.
158 */ 158 */
159 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 159 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
160 if (vma) { 160 if (vma) {
161 memset(vma, 0, sizeof(*vma)); 161 memset(vma, 0, sizeof(*vma));
162 vma->vm_mm = current->mm; 162 vma->vm_mm = current->mm;
@@ -175,7 +175,7 @@ ia64_init_addr_space (void)
175 175
176 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ 176 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
177 if (!(current->personality & MMAP_PAGE_ZERO)) { 177 if (!(current->personality & MMAP_PAGE_ZERO)) {
178 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 178 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
179 if (vma) { 179 if (vma) {
180 memset(vma, 0, sizeof(*vma)); 180 memset(vma, 0, sizeof(*vma));
181 vma->vm_mm = current->mm; 181 vma->vm_mm = current->mm;