aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/appldata/appldata_base.c3
-rw-r--r--arch/s390/kernel/debug.c11
2 files changed, 4 insertions, 10 deletions
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index d06a8d71c71d..54d35c130907 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -531,12 +531,11 @@ int appldata_register_ops(struct appldata_ops *ops)
531 P_ERROR("ctl_nr %i already in use!\n", ops->ctl_nr); 531 P_ERROR("ctl_nr %i already in use!\n", ops->ctl_nr);
532 return -EBUSY; 532 return -EBUSY;
533 } 533 }
534 ops->ctl_table = kmalloc(4*sizeof(struct ctl_table), GFP_KERNEL); 534 ops->ctl_table = kzalloc(4*sizeof(struct ctl_table), GFP_KERNEL);
535 if (ops->ctl_table == NULL) { 535 if (ops->ctl_table == NULL) {
536 P_ERROR("Not enough memory for %s ctl_table!\n", ops->name); 536 P_ERROR("Not enough memory for %s ctl_table!\n", ops->name);
537 return -ENOMEM; 537 return -ENOMEM;
538 } 538 }
539 memset(ops->ctl_table, 0, 4*sizeof(struct ctl_table));
540 539
541 spin_lock(&appldata_ops_lock); 540 spin_lock(&appldata_ops_lock);
542 list_for_each(lh, &appldata_ops_list) { 541 list_for_each(lh, &appldata_ops_list) {
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 896d39d0e4ce..06a3fbc12536 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -204,16 +204,13 @@ debug_areas_alloc(int pages_per_area, int nr_areas)
204 goto fail_malloc_areas2; 204 goto fail_malloc_areas2;
205 } 205 }
206 for(j = 0; j < pages_per_area; j++) { 206 for(j = 0; j < pages_per_area; j++) {
207 areas[i][j] = (debug_entry_t*)kmalloc(PAGE_SIZE, 207 areas[i][j] = kzalloc(PAGE_SIZE, GFP_KERNEL);
208 GFP_KERNEL);
209 if(!areas[i][j]) { 208 if(!areas[i][j]) {
210 for(j--; j >=0 ; j--) { 209 for(j--; j >=0 ; j--) {
211 kfree(areas[i][j]); 210 kfree(areas[i][j]);
212 } 211 }
213 kfree(areas[i]); 212 kfree(areas[i]);
214 goto fail_malloc_areas2; 213 goto fail_malloc_areas2;
215 } else {
216 memset(areas[i][j],0,PAGE_SIZE);
217 } 214 }
218 } 215 }
219 } 216 }
@@ -249,14 +246,12 @@ debug_info_alloc(char *name, int pages_per_area, int nr_areas, int buf_size,
249 rc = (debug_info_t*) kmalloc(sizeof(debug_info_t), GFP_KERNEL); 246 rc = (debug_info_t*) kmalloc(sizeof(debug_info_t), GFP_KERNEL);
250 if(!rc) 247 if(!rc)
251 goto fail_malloc_rc; 248 goto fail_malloc_rc;
252 rc->active_entries = (int*)kmalloc(nr_areas * sizeof(int), GFP_KERNEL); 249 rc->active_entries = kcalloc(nr_areas, sizeof(int), GFP_KERNEL);
253 if(!rc->active_entries) 250 if(!rc->active_entries)
254 goto fail_malloc_active_entries; 251 goto fail_malloc_active_entries;
255 memset(rc->active_entries, 0, nr_areas * sizeof(int)); 252 rc->active_pages = kcalloc(nr_areas, sizeof(int), GFP_KERNEL);
256 rc->active_pages = (int*)kmalloc(nr_areas * sizeof(int), GFP_KERNEL);
257 if(!rc->active_pages) 253 if(!rc->active_pages)
258 goto fail_malloc_active_pages; 254 goto fail_malloc_active_pages;
259 memset(rc->active_pages, 0, nr_areas * sizeof(int));
260 if((mode == ALL_AREAS) && (pages_per_area != 0)){ 255 if((mode == ALL_AREAS) && (pages_per_area != 0)){
261 rc->areas = debug_areas_alloc(pages_per_area, nr_areas); 256 rc->areas = debug_areas_alloc(pages_per_area, nr_areas);
262 if(!rc->areas) 257 if(!rc->areas)