diff options
Diffstat (limited to 'arch/ia64')
| -rw-r--r-- | arch/ia64/ia32/binfmt_elf32.c | 13 | ||||
| -rw-r--r-- | arch/ia64/kernel/perfmon.c | 3 | ||||
| -rw-r--r-- | arch/ia64/mm/init.c | 6 |
3 files changed, 7 insertions, 15 deletions
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c index 578737ec7629..c05bda662364 100644 --- a/arch/ia64/ia32/binfmt_elf32.c +++ b/arch/ia64/ia32/binfmt_elf32.c | |||
| @@ -91,9 +91,8 @@ ia64_elf32_init (struct pt_regs *regs) | |||
| 91 | * it with privilege level 3 because the IVE uses non-privileged accesses to these | 91 | * it with privilege level 3 because the IVE uses non-privileged accesses to these |
| 92 | * tables. IA-32 segmentation is used to protect against IA-32 accesses to them. | 92 | * tables. IA-32 segmentation is used to protect against IA-32 accesses to them. |
| 93 | */ | 93 | */ |
| 94 | vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); | 94 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); |
| 95 | if (vma) { | 95 | if (vma) { |
| 96 | memset(vma, 0, sizeof(*vma)); | ||
| 97 | vma->vm_mm = current->mm; | 96 | vma->vm_mm = current->mm; |
| 98 | vma->vm_start = IA32_GDT_OFFSET; | 97 | vma->vm_start = IA32_GDT_OFFSET; |
| 99 | vma->vm_end = vma->vm_start + PAGE_SIZE; | 98 | vma->vm_end = vma->vm_start + PAGE_SIZE; |
| @@ -117,9 +116,8 @@ ia64_elf32_init (struct pt_regs *regs) | |||
| 117 | * code is locked in specific gate page, which is pointed by pretcode | 116 | * code is locked in specific gate page, which is pointed by pretcode |
| 118 | * when setup_frame_ia32 | 117 | * when setup_frame_ia32 |
| 119 | */ | 118 | */ |
| 120 | vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); | 119 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); |
| 121 | if (vma) { | 120 | if (vma) { |
| 122 | memset(vma, 0, sizeof(*vma)); | ||
| 123 | vma->vm_mm = current->mm; | 121 | vma->vm_mm = current->mm; |
| 124 | vma->vm_start = IA32_GATE_OFFSET; | 122 | vma->vm_start = IA32_GATE_OFFSET; |
| 125 | vma->vm_end = vma->vm_start + PAGE_SIZE; | 123 | vma->vm_end = vma->vm_start + PAGE_SIZE; |
| @@ -142,9 +140,8 @@ ia64_elf32_init (struct pt_regs *regs) | |||
| 142 | * Install LDT as anonymous memory. This gives us all-zero segment descriptors | 140 | * Install LDT as anonymous memory. This gives us all-zero segment descriptors |
| 143 | * until a task modifies them via modify_ldt(). | 141 | * until a task modifies them via modify_ldt(). |
| 144 | */ | 142 | */ |
| 145 | vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); | 143 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); |
| 146 | if (vma) { | 144 | if (vma) { |
| 147 | memset(vma, 0, sizeof(*vma)); | ||
| 148 | vma->vm_mm = current->mm; | 145 | vma->vm_mm = current->mm; |
| 149 | vma->vm_start = IA32_LDT_OFFSET; | 146 | vma->vm_start = IA32_LDT_OFFSET; |
| 150 | vma->vm_end = vma->vm_start + PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE); | 147 | vma->vm_end = vma->vm_start + PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE); |
| @@ -214,12 +211,10 @@ ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack) | |||
| 214 | bprm->loader += stack_base; | 211 | bprm->loader += stack_base; |
| 215 | bprm->exec += stack_base; | 212 | bprm->exec += stack_base; |
| 216 | 213 | ||
| 217 | mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); | 214 | mpnt = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); |
| 218 | if (!mpnt) | 215 | if (!mpnt) |
| 219 | return -ENOMEM; | 216 | return -ENOMEM; |
| 220 | 217 | ||
| 221 | memset(mpnt, 0, sizeof(*mpnt)); | ||
| 222 | |||
| 223 | down_write(¤t->mm->mmap_sem); | 218 | down_write(¤t->mm->mmap_sem); |
| 224 | { | 219 | { |
| 225 | mpnt->vm_mm = current->mm; | 220 | mpnt->vm_mm = current->mm; |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index aa94f60fa8e7..86e144f321ff 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
| @@ -2301,12 +2301,11 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon | |||
| 2301 | DPRINT(("smpl_buf @%p\n", smpl_buf)); | 2301 | DPRINT(("smpl_buf @%p\n", smpl_buf)); |
| 2302 | 2302 | ||
| 2303 | /* allocate vma */ | 2303 | /* allocate vma */ |
| 2304 | vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); | 2304 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); |
| 2305 | if (!vma) { | 2305 | if (!vma) { |
| 2306 | DPRINT(("Cannot allocate vma\n")); | 2306 | DPRINT(("Cannot allocate vma\n")); |
| 2307 | goto error_kmem; | 2307 | goto error_kmem; |
| 2308 | } | 2308 | } |
| 2309 | memset(vma, 0, sizeof(*vma)); | ||
| 2310 | 2309 | ||
| 2311 | /* | 2310 | /* |
| 2312 | * partially initialize the vma for the sampling buffer | 2311 | * partially initialize the vma for the sampling buffer |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 760dda4d5b6e..f225dd72968b 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
| @@ -176,9 +176,8 @@ ia64_init_addr_space (void) | |||
| 176 | * the problem. When the process attempts to write to the register backing store | 176 | * the problem. When the process attempts to write to the register backing store |
| 177 | * for the first time, it will get a SEGFAULT in this case. | 177 | * for the first time, it will get a SEGFAULT in this case. |
| 178 | */ | 178 | */ |
| 179 | vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); | 179 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); |
| 180 | if (vma) { | 180 | if (vma) { |
| 181 | memset(vma, 0, sizeof(*vma)); | ||
| 182 | vma->vm_mm = current->mm; | 181 | vma->vm_mm = current->mm; |
| 183 | vma->vm_start = current->thread.rbs_bot & PAGE_MASK; | 182 | vma->vm_start = current->thread.rbs_bot & PAGE_MASK; |
| 184 | vma->vm_end = vma->vm_start + PAGE_SIZE; | 183 | vma->vm_end = vma->vm_start + PAGE_SIZE; |
| @@ -195,9 +194,8 @@ ia64_init_addr_space (void) | |||
| 195 | 194 | ||
| 196 | /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ | 195 | /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ |
| 197 | if (!(current->personality & MMAP_PAGE_ZERO)) { | 196 | if (!(current->personality & MMAP_PAGE_ZERO)) { |
| 198 | vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); | 197 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); |
| 199 | if (vma) { | 198 | if (vma) { |
| 200 | memset(vma, 0, sizeof(*vma)); | ||
| 201 | vma->vm_mm = current->mm; | 199 | vma->vm_mm = current->mm; |
| 202 | vma->vm_end = PAGE_SIZE; | 200 | vma->vm_end = PAGE_SIZE; |
| 203 | vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); | 201 | vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); |
