diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/hp/sim/simserial.c | 4 | ||||
-rw-r--r-- | arch/ia64/ia32/binfmt_elf32.c | 8 | ||||
-rw-r--r-- | arch/ia64/ia32/ia32_support.c | 2 | ||||
-rw-r--r-- | arch/ia64/ia32/ia32priv.h | 2 | ||||
-rw-r--r-- | arch/ia64/ia32/sys_ia32.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/kprobes.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 8 | ||||
-rw-r--r-- | arch/ia64/kernel/palinfo.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/salinfo.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 12 | ||||
-rw-r--r-- | arch/ia64/kernel/topology.c | 8 | ||||
-rw-r--r-- | arch/ia64/mm/hugetlbpage.c | 5 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 4 | ||||
-rw-r--r-- | arch/ia64/pci/pci.c | 4 |
15 files changed, 34 insertions, 33 deletions
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index caab986af70c..b62f0c4d2c7c 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c | |||
@@ -209,7 +209,7 @@ static void do_serial_bh(void) | |||
209 | } | 209 | } |
210 | #endif | 210 | #endif |
211 | 211 | ||
212 | static void do_softint(void *private_) | 212 | static void do_softint(struct work_struct *private_) |
213 | { | 213 | { |
214 | printk(KERN_ERR "simserial: do_softint called\n"); | 214 | printk(KERN_ERR "simserial: do_softint called\n"); |
215 | } | 215 | } |
@@ -698,7 +698,7 @@ static int get_async_struct(int line, struct async_struct **ret_info) | |||
698 | info->flags = sstate->flags; | 698 | info->flags = sstate->flags; |
699 | info->xmit_fifo_size = sstate->xmit_fifo_size; | 699 | info->xmit_fifo_size = sstate->xmit_fifo_size; |
700 | info->line = line; | 700 | info->line = line; |
701 | INIT_WORK(&info->work, do_softint, info); | 701 | INIT_WORK(&info->work, do_softint); |
702 | info->state = sstate; | 702 | info->state = sstate; |
703 | if (sstate->info) { | 703 | if (sstate->info) { |
704 | kfree(info); | 704 | kfree(info); |
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c index daa6b91bc921..578737ec7629 100644 --- a/arch/ia64/ia32/binfmt_elf32.c +++ b/arch/ia64/ia32/binfmt_elf32.c | |||
@@ -91,7 +91,7 @@ ia64_elf32_init (struct pt_regs *regs) | |||
91 | * it with privilege level 3 because the IVE uses non-privileged accesses to these | 91 | * it with privilege level 3 because the IVE uses non-privileged accesses to these |
92 | * tables. IA-32 segmentation is used to protect against IA-32 accesses to them. | 92 | * tables. IA-32 segmentation is used to protect against IA-32 accesses to them. |
93 | */ | 93 | */ |
94 | vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); | 94 | vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); |
95 | if (vma) { | 95 | if (vma) { |
96 | memset(vma, 0, sizeof(*vma)); | 96 | memset(vma, 0, sizeof(*vma)); |
97 | vma->vm_mm = current->mm; | 97 | vma->vm_mm = current->mm; |
@@ -117,7 +117,7 @@ ia64_elf32_init (struct pt_regs *regs) | |||
117 | * code is locked in specific gate page, which is pointed by pretcode | 117 | * code is locked in specific gate page, which is pointed by pretcode |
118 | * when setup_frame_ia32 | 118 | * when setup_frame_ia32 |
119 | */ | 119 | */ |
120 | vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); | 120 | vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); |
121 | if (vma) { | 121 | if (vma) { |
122 | memset(vma, 0, sizeof(*vma)); | 122 | memset(vma, 0, sizeof(*vma)); |
123 | vma->vm_mm = current->mm; | 123 | vma->vm_mm = current->mm; |
@@ -142,7 +142,7 @@ ia64_elf32_init (struct pt_regs *regs) | |||
142 | * Install LDT as anonymous memory. This gives us all-zero segment descriptors | 142 | * Install LDT as anonymous memory. This gives us all-zero segment descriptors |
143 | * until a task modifies them via modify_ldt(). | 143 | * until a task modifies them via modify_ldt(). |
144 | */ | 144 | */ |
145 | vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); | 145 | vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); |
146 | if (vma) { | 146 | if (vma) { |
147 | memset(vma, 0, sizeof(*vma)); | 147 | memset(vma, 0, sizeof(*vma)); |
148 | vma->vm_mm = current->mm; | 148 | vma->vm_mm = current->mm; |
@@ -214,7 +214,7 @@ ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack) | |||
214 | bprm->loader += stack_base; | 214 | bprm->loader += stack_base; |
215 | bprm->exec += stack_base; | 215 | bprm->exec += stack_base; |
216 | 216 | ||
217 | mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); | 217 | mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); |
218 | if (!mpnt) | 218 | if (!mpnt) |
219 | return -ENOMEM; | 219 | return -ENOMEM; |
220 | 220 | ||
diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c index c187743965a0..6af400a12ca1 100644 --- a/arch/ia64/ia32/ia32_support.c +++ b/arch/ia64/ia32/ia32_support.c | |||
@@ -249,7 +249,7 @@ ia32_init (void) | |||
249 | 249 | ||
250 | #if PAGE_SHIFT > IA32_PAGE_SHIFT | 250 | #if PAGE_SHIFT > IA32_PAGE_SHIFT |
251 | { | 251 | { |
252 | extern kmem_cache_t *partial_page_cachep; | 252 | extern struct kmem_cache *partial_page_cachep; |
253 | 253 | ||
254 | partial_page_cachep = kmem_cache_create("partial_page_cache", | 254 | partial_page_cachep = kmem_cache_create("partial_page_cache", |
255 | sizeof(struct partial_page), 0, 0, | 255 | sizeof(struct partial_page), 0, 0, |
diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h index 703a67c934f8..cfa0bc0026b5 100644 --- a/arch/ia64/ia32/ia32priv.h +++ b/arch/ia64/ia32/ia32priv.h | |||
@@ -330,8 +330,6 @@ struct old_linux32_dirent { | |||
330 | void ia64_elf32_init(struct pt_regs *regs); | 330 | void ia64_elf32_init(struct pt_regs *regs); |
331 | #define ELF_PLAT_INIT(_r, load_addr) ia64_elf32_init(_r) | 331 | #define ELF_PLAT_INIT(_r, load_addr) ia64_elf32_init(_r) |
332 | 332 | ||
333 | #define elf_addr_t u32 | ||
334 | |||
335 | /* This macro yields a bitmask that programs can use to figure out | 333 | /* This macro yields a bitmask that programs can use to figure out |
336 | what instruction set this CPU supports. */ | 334 | what instruction set this CPU supports. */ |
337 | #define ELF_HWCAP 0 | 335 | #define ELF_HWCAP 0 |
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index 9d6a3f210148..a4a6e1463af8 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c | |||
@@ -254,7 +254,7 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro | |||
254 | } | 254 | } |
255 | 255 | ||
256 | /* SLAB cache for partial_page structures */ | 256 | /* SLAB cache for partial_page structures */ |
257 | kmem_cache_t *partial_page_cachep; | 257 | struct kmem_cache *partial_page_cachep; |
258 | 258 | ||
259 | /* | 259 | /* |
260 | * init partial_page_list. | 260 | * init partial_page_list. |
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 51217d63285e..4d592ee9300b 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c | |||
@@ -481,7 +481,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) | |||
481 | void __kprobes arch_remove_kprobe(struct kprobe *p) | 481 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
482 | { | 482 | { |
483 | mutex_lock(&kprobe_mutex); | 483 | mutex_lock(&kprobe_mutex); |
484 | free_insn_slot(p->ainsn.insn); | 484 | free_insn_slot(p->ainsn.insn, 0); |
485 | mutex_unlock(&kprobe_mutex); | 485 | mutex_unlock(&kprobe_mutex); |
486 | } | 486 | } |
487 | /* | 487 | /* |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 7cfa63a98cb3..6bedd97570ca 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -678,7 +678,7 @@ ia64_mca_cmc_vector_enable (void *dummy) | |||
678 | * disable the cmc interrupt vector. | 678 | * disable the cmc interrupt vector. |
679 | */ | 679 | */ |
680 | static void | 680 | static void |
681 | ia64_mca_cmc_vector_disable_keventd(void *unused) | 681 | ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) |
682 | { | 682 | { |
683 | on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0); | 683 | on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0); |
684 | } | 684 | } |
@@ -690,7 +690,7 @@ ia64_mca_cmc_vector_disable_keventd(void *unused) | |||
690 | * enable the cmc interrupt vector. | 690 | * enable the cmc interrupt vector. |
691 | */ | 691 | */ |
692 | static void | 692 | static void |
693 | ia64_mca_cmc_vector_enable_keventd(void *unused) | 693 | ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused) |
694 | { | 694 | { |
695 | on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0); | 695 | on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0); |
696 | } | 696 | } |
@@ -1247,8 +1247,8 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1247 | monarch_cpu = -1; | 1247 | monarch_cpu = -1; |
1248 | } | 1248 | } |
1249 | 1249 | ||
1250 | static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL); | 1250 | static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd); |
1251 | static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL); | 1251 | static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd); |
1252 | 1252 | ||
1253 | /* | 1253 | /* |
1254 | * ia64_mca_cmc_int_handler | 1254 | * ia64_mca_cmc_int_handler |
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 0b546e2b36ac..c4c10a0b99d9 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c | |||
@@ -952,7 +952,6 @@ remove_palinfo_proc_entries(unsigned int hcpu) | |||
952 | } | 952 | } |
953 | } | 953 | } |
954 | 954 | ||
955 | #ifdef CONFIG_HOTPLUG_CPU | ||
956 | static int palinfo_cpu_callback(struct notifier_block *nfb, | 955 | static int palinfo_cpu_callback(struct notifier_block *nfb, |
957 | unsigned long action, void *hcpu) | 956 | unsigned long action, void *hcpu) |
958 | { | 957 | { |
@@ -974,7 +973,6 @@ static struct notifier_block palinfo_cpu_notifier = | |||
974 | .notifier_call = palinfo_cpu_callback, | 973 | .notifier_call = palinfo_cpu_callback, |
975 | .priority = 0, | 974 | .priority = 0, |
976 | }; | 975 | }; |
977 | #endif | ||
978 | 976 | ||
979 | static int __init | 977 | static int __init |
980 | palinfo_init(void) | 978 | palinfo_init(void) |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 3aaede0d6981..e2321536ee4c 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -2302,7 +2302,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon | |||
2302 | DPRINT(("smpl_buf @%p\n", smpl_buf)); | 2302 | DPRINT(("smpl_buf @%p\n", smpl_buf)); |
2303 | 2303 | ||
2304 | /* allocate vma */ | 2304 | /* allocate vma */ |
2305 | vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); | 2305 | vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); |
2306 | if (!vma) { | 2306 | if (!vma) { |
2307 | DPRINT(("Cannot allocate vma\n")); | 2307 | DPRINT(("Cannot allocate vma\n")); |
2308 | goto error_kmem; | 2308 | goto error_kmem; |
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index e63b8ca5344a..fd607ca51a8d 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c | |||
@@ -575,7 +575,6 @@ static struct file_operations salinfo_data_fops = { | |||
575 | .write = salinfo_log_write, | 575 | .write = salinfo_log_write, |
576 | }; | 576 | }; |
577 | 577 | ||
578 | #ifdef CONFIG_HOTPLUG_CPU | ||
579 | static int __devinit | 578 | static int __devinit |
580 | salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) | 579 | salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) |
581 | { | 580 | { |
@@ -620,7 +619,6 @@ static struct notifier_block salinfo_cpu_notifier = | |||
620 | .notifier_call = salinfo_cpu_callback, | 619 | .notifier_call = salinfo_cpu_callback, |
621 | .priority = 0, | 620 | .priority = 0, |
622 | }; | 621 | }; |
623 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
624 | 622 | ||
625 | static int __init | 623 | static int __init |
626 | salinfo_init(void) | 624 | salinfo_init(void) |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index f7d7f5668144..b21ddecea943 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -463,15 +463,17 @@ struct pt_regs * __devinit idle_regs(struct pt_regs *regs) | |||
463 | } | 463 | } |
464 | 464 | ||
465 | struct create_idle { | 465 | struct create_idle { |
466 | struct work_struct work; | ||
466 | struct task_struct *idle; | 467 | struct task_struct *idle; |
467 | struct completion done; | 468 | struct completion done; |
468 | int cpu; | 469 | int cpu; |
469 | }; | 470 | }; |
470 | 471 | ||
471 | void | 472 | void |
472 | do_fork_idle(void *_c_idle) | 473 | do_fork_idle(struct work_struct *work) |
473 | { | 474 | { |
474 | struct create_idle *c_idle = _c_idle; | 475 | struct create_idle *c_idle = |
476 | container_of(work, struct create_idle, work); | ||
475 | 477 | ||
476 | c_idle->idle = fork_idle(c_idle->cpu); | 478 | c_idle->idle = fork_idle(c_idle->cpu); |
477 | complete(&c_idle->done); | 479 | complete(&c_idle->done); |
@@ -482,10 +484,10 @@ do_boot_cpu (int sapicid, int cpu) | |||
482 | { | 484 | { |
483 | int timeout; | 485 | int timeout; |
484 | struct create_idle c_idle = { | 486 | struct create_idle c_idle = { |
487 | .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle), | ||
485 | .cpu = cpu, | 488 | .cpu = cpu, |
486 | .done = COMPLETION_INITIALIZER(c_idle.done), | 489 | .done = COMPLETION_INITIALIZER(c_idle.done), |
487 | }; | 490 | }; |
488 | DECLARE_WORK(work, do_fork_idle, &c_idle); | ||
489 | 491 | ||
490 | c_idle.idle = get_idle_for_cpu(cpu); | 492 | c_idle.idle = get_idle_for_cpu(cpu); |
491 | if (c_idle.idle) { | 493 | if (c_idle.idle) { |
@@ -497,9 +499,9 @@ do_boot_cpu (int sapicid, int cpu) | |||
497 | * We can't use kernel_thread since we must avoid to reschedule the child. | 499 | * We can't use kernel_thread since we must avoid to reschedule the child. |
498 | */ | 500 | */ |
499 | if (!keventd_up() || current_is_keventd()) | 501 | if (!keventd_up() || current_is_keventd()) |
500 | work.func(work.data); | 502 | c_idle.work.func(&c_idle.work); |
501 | else { | 503 | else { |
502 | schedule_work(&work); | 504 | schedule_work(&c_idle.work); |
503 | wait_for_completion(&c_idle.done); | 505 | wait_for_completion(&c_idle.done); |
504 | } | 506 | } |
505 | 507 | ||
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index 5629b45e89c6..687500ddb4b8 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c | |||
@@ -31,11 +31,11 @@ int arch_register_cpu(int num) | |||
31 | { | 31 | { |
32 | #if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU) | 32 | #if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU) |
33 | /* | 33 | /* |
34 | * If CPEI cannot be re-targetted, and this is | 34 | * If CPEI can be re-targetted or if this is not |
35 | * CPEI target, then dont create the control file | 35 | * CPEI target, then it is hotpluggable |
36 | */ | 36 | */ |
37 | if (!can_cpei_retarget() && is_cpu_cpei_target(num)) | 37 | if (can_cpei_retarget() || !is_cpu_cpei_target(num)) |
38 | sysfs_cpus[num].cpu.no_control = 1; | 38 | sysfs_cpus[num].cpu.hotpluggable = 1; |
39 | map_cpu_to_node(num, node_cpuid[num].nid); | 39 | map_cpu_to_node(num, node_cpuid[num].nid); |
40 | #endif | 40 | #endif |
41 | 41 | ||
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index f3a9585e98a8..0c7e94edc20e 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c | |||
@@ -64,6 +64,11 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr) | |||
64 | return pte; | 64 | return pte; |
65 | } | 65 | } |
66 | 66 | ||
67 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) | ||
68 | { | ||
69 | return 0; | ||
70 | } | ||
71 | |||
67 | #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; } | 72 | #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; } |
68 | 73 | ||
69 | /* | 74 | /* |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index ff87a5cba399..56dc2024220e 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -156,7 +156,7 @@ ia64_init_addr_space (void) | |||
156 | * the problem. When the process attempts to write to the register backing store | 156 | * the problem. When the process attempts to write to the register backing store |
157 | * for the first time, it will get a SEGFAULT in this case. | 157 | * for the first time, it will get a SEGFAULT in this case. |
158 | */ | 158 | */ |
159 | vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); | 159 | vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); |
160 | if (vma) { | 160 | if (vma) { |
161 | memset(vma, 0, sizeof(*vma)); | 161 | memset(vma, 0, sizeof(*vma)); |
162 | vma->vm_mm = current->mm; | 162 | vma->vm_mm = current->mm; |
@@ -175,7 +175,7 @@ ia64_init_addr_space (void) | |||
175 | 175 | ||
176 | /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ | 176 | /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ |
177 | if (!(current->personality & MMAP_PAGE_ZERO)) { | 177 | if (!(current->personality & MMAP_PAGE_ZERO)) { |
178 | vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); | 178 | vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); |
179 | if (vma) { | 179 | if (vma) { |
180 | memset(vma, 0, sizeof(*vma)); | 180 | memset(vma, 0, sizeof(*vma)); |
181 | vma->vm_mm = current->mm; | 181 | vma->vm_mm = current->mm; |
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index f4edfbf27134..eb92cef9cd0d 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c | |||
@@ -564,8 +564,8 @@ pcibios_enable_device (struct pci_dev *dev, int mask) | |||
564 | void | 564 | void |
565 | pcibios_disable_device (struct pci_dev *dev) | 565 | pcibios_disable_device (struct pci_dev *dev) |
566 | { | 566 | { |
567 | if (dev->is_enabled) | 567 | BUG_ON(atomic_read(&dev->enable_cnt)); |
568 | acpi_pci_irq_disable(dev); | 568 | acpi_pci_irq_disable(dev); |
569 | } | 569 | } |
570 | 570 | ||
571 | void | 571 | void |