diff options
author | Chris Metcalf <cmetcalf@tilera.com> | 2010-06-25 17:04:17 -0400 |
---|---|---|
committer | Chris Metcalf <cmetcalf@tilera.com> | 2010-07-06 13:41:51 -0400 |
commit | 0707ad30d10110aebc01a5a64fb63f4b32d20b73 (patch) | |
tree | 64d8ba73e605ac26e56808d1d77701b3f83cf8b2 /arch/tile/mm | |
parent | c78095bd8c77fca2619769ff8efb639fd100e373 (diff) |
arch/tile: Miscellaneous cleanup changes.
This commit is primarily changes caused by reviewing "sparse"
and "checkpatch" output on our sources, so is somewhat noisy, since
things like "printk() -> pr_err()" (or whatever) throughout the
codebase tend to get tedious to read. Rather than trying to tease
apart precisely which things changed due to which type of code
review, this commit includes various cleanups in the code:
- sparse: Add declarations in headers for globals.
- sparse: Fix __user annotations.
- sparse: Using gfp_t consistently instead of int.
- sparse: removing functions not actually used.
- checkpatch: Clean up printk() warnings by using pr_info(), etc.;
also avoid partial-line printks except in bootup code.
- checkpatch: Use exposed structs rather than typedefs.
- checkpatch: Change some C99 comments to C89 comments.
In addition, a couple of minor other changes are rolled in
to this commit:
- Add support for a "raise" instruction to cause SIGFPE, etc., to be raised.
- Remove some compat code that is unnecessary when we fully eliminate
some of the deprecated syscalls from the generic syscall ABI.
- Update the tile_defconfig to reflect current config contents.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/tile/mm')
-rw-r--r-- | arch/tile/mm/elf.c | 4 | ||||
-rw-r--r-- | arch/tile/mm/fault.c | 64 | ||||
-rw-r--r-- | arch/tile/mm/highmem.c | 2 | ||||
-rw-r--r-- | arch/tile/mm/homecache.c | 18 | ||||
-rw-r--r-- | arch/tile/mm/hugetlbpage.c | 2 | ||||
-rw-r--r-- | arch/tile/mm/init.c | 99 | ||||
-rw-r--r-- | arch/tile/mm/pgtable.c | 46 |
7 files changed, 76 insertions, 159 deletions
diff --git a/arch/tile/mm/elf.c b/arch/tile/mm/elf.c index 818c9bef060c..55e58e93bfc5 100644 --- a/arch/tile/mm/elf.c +++ b/arch/tile/mm/elf.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/elf.h> | 20 | #include <linux/elf.h> |
21 | #include <asm/pgtable.h> | 21 | #include <asm/pgtable.h> |
22 | #include <asm/pgalloc.h> | 22 | #include <asm/pgalloc.h> |
23 | #include <asm/sections.h> | ||
23 | 24 | ||
24 | /* Notify a running simulator, if any, that an exec just occurred. */ | 25 | /* Notify a running simulator, if any, that an exec just occurred. */ |
25 | static void sim_notify_exec(const char *binary_name) | 26 | static void sim_notify_exec(const char *binary_name) |
@@ -77,9 +78,8 @@ static void *vdso_page; | |||
77 | /* One-entry array used for install_special_mapping. */ | 78 | /* One-entry array used for install_special_mapping. */ |
78 | static struct page *vdso_pages[1]; | 79 | static struct page *vdso_pages[1]; |
79 | 80 | ||
80 | int __init vdso_setup(void) | 81 | static int __init vdso_setup(void) |
81 | { | 82 | { |
82 | extern char __rt_sigreturn[], __rt_sigreturn_end[]; | ||
83 | vdso_page = (void *)get_zeroed_page(GFP_ATOMIC); | 83 | vdso_page = (void *)get_zeroed_page(GFP_ATOMIC); |
84 | memcpy(vdso_page, __rt_sigreturn, __rt_sigreturn_end - __rt_sigreturn); | 84 | memcpy(vdso_page, __rt_sigreturn, __rt_sigreturn_end - __rt_sigreturn); |
85 | vdso_pages[0] = virt_to_page(vdso_page); | 85 | vdso_pages[0] = virt_to_page(vdso_page); |
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index 9b6b92f07def..0011f06b4fe2 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c | |||
@@ -39,32 +39,11 @@ | |||
39 | #include <asm/system.h> | 39 | #include <asm/system.h> |
40 | #include <asm/pgalloc.h> | 40 | #include <asm/pgalloc.h> |
41 | #include <asm/sections.h> | 41 | #include <asm/sections.h> |
42 | #include <asm/traps.h> | ||
43 | #include <asm/syscalls.h> | ||
42 | 44 | ||
43 | #include <arch/interrupts.h> | 45 | #include <arch/interrupts.h> |
44 | 46 | ||
45 | /* | ||
46 | * Unlock any spinlocks which will prevent us from getting the | ||
47 | * message out | ||
48 | */ | ||
49 | void bust_spinlocks(int yes) | ||
50 | { | ||
51 | int loglevel_save = console_loglevel; | ||
52 | |||
53 | if (yes) { | ||
54 | oops_in_progress = 1; | ||
55 | return; | ||
56 | } | ||
57 | oops_in_progress = 0; | ||
58 | /* | ||
59 | * OK, the message is on the console. Now we call printk() | ||
60 | * without oops_in_progress set so that printk will give klogd | ||
61 | * a poke. Hold onto your hats... | ||
62 | */ | ||
63 | console_loglevel = 15; /* NMI oopser may have shut the console up */ | ||
64 | printk(" "); | ||
65 | console_loglevel = loglevel_save; | ||
66 | } | ||
67 | |||
68 | static noinline void force_sig_info_fault(int si_signo, int si_code, | 47 | static noinline void force_sig_info_fault(int si_signo, int si_code, |
69 | unsigned long address, int fault_num, struct task_struct *tsk) | 48 | unsigned long address, int fault_num, struct task_struct *tsk) |
70 | { | 49 | { |
@@ -301,10 +280,10 @@ static int handle_page_fault(struct pt_regs *regs, | |||
301 | */ | 280 | */ |
302 | stack_offset = stack_pointer & (THREAD_SIZE-1); | 281 | stack_offset = stack_pointer & (THREAD_SIZE-1); |
303 | if (stack_offset < THREAD_SIZE / 8) { | 282 | if (stack_offset < THREAD_SIZE / 8) { |
304 | printk(KERN_ALERT "Potential stack overrun: sp %#lx\n", | 283 | pr_alert("Potential stack overrun: sp %#lx\n", |
305 | stack_pointer); | 284 | stack_pointer); |
306 | show_regs(regs); | 285 | show_regs(regs); |
307 | printk(KERN_ALERT "Killing current process %d/%s\n", | 286 | pr_alert("Killing current process %d/%s\n", |
308 | tsk->pid, tsk->comm); | 287 | tsk->pid, tsk->comm); |
309 | do_group_exit(SIGKILL); | 288 | do_group_exit(SIGKILL); |
310 | } | 289 | } |
@@ -422,7 +401,7 @@ good_area: | |||
422 | } else if (write) { | 401 | } else if (write) { |
423 | #ifdef TEST_VERIFY_AREA | 402 | #ifdef TEST_VERIFY_AREA |
424 | if (!is_page_fault && regs->cs == KERNEL_CS) | 403 | if (!is_page_fault && regs->cs == KERNEL_CS) |
425 | printk("WP fault at "REGFMT"\n", regs->eip); | 404 | pr_err("WP fault at "REGFMT"\n", regs->eip); |
426 | #endif | 405 | #endif |
427 | if (!(vma->vm_flags & VM_WRITE)) | 406 | if (!(vma->vm_flags & VM_WRITE)) |
428 | goto bad_area; | 407 | goto bad_area; |
@@ -450,6 +429,7 @@ good_area: | |||
450 | else | 429 | else |
451 | tsk->min_flt++; | 430 | tsk->min_flt++; |
452 | 431 | ||
432 | #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() | ||
453 | /* | 433 | /* |
454 | * If this was an asynchronous fault, | 434 | * If this was an asynchronous fault, |
455 | * restart the appropriate engine. | 435 | * restart the appropriate engine. |
@@ -472,6 +452,7 @@ good_area: | |||
472 | break; | 452 | break; |
473 | #endif | 453 | #endif |
474 | } | 454 | } |
455 | #endif | ||
475 | 456 | ||
476 | up_read(&mm->mmap_sem); | 457 | up_read(&mm->mmap_sem); |
477 | return 1; | 458 | return 1; |
@@ -514,17 +495,17 @@ no_context: | |||
514 | pte_t *pte = lookup_address(address); | 495 | pte_t *pte = lookup_address(address); |
515 | 496 | ||
516 | if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) | 497 | if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) |
517 | printk(KERN_CRIT "kernel tried to execute" | 498 | pr_crit("kernel tried to execute" |
518 | " non-executable page - exploit attempt?" | 499 | " non-executable page - exploit attempt?" |
519 | " (uid: %d)\n", current->uid); | 500 | " (uid: %d)\n", current->uid); |
520 | } | 501 | } |
521 | #endif | 502 | #endif |
522 | if (address < PAGE_SIZE) | 503 | if (address < PAGE_SIZE) |
523 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference\n"); | 504 | pr_alert("Unable to handle kernel NULL pointer dereference\n"); |
524 | else | 505 | else |
525 | printk(KERN_ALERT "Unable to handle kernel paging request\n"); | 506 | pr_alert("Unable to handle kernel paging request\n"); |
526 | printk(" at virtual address "REGFMT", pc "REGFMT"\n", | 507 | pr_alert(" at virtual address "REGFMT", pc "REGFMT"\n", |
527 | address, regs->pc); | 508 | address, regs->pc); |
528 | 509 | ||
529 | show_regs(regs); | 510 | show_regs(regs); |
530 | 511 | ||
@@ -555,7 +536,7 @@ out_of_memory: | |||
555 | down_read(&mm->mmap_sem); | 536 | down_read(&mm->mmap_sem); |
556 | goto survive; | 537 | goto survive; |
557 | } | 538 | } |
558 | printk("VM: killing process %s\n", tsk->comm); | 539 | pr_alert("VM: killing process %s\n", tsk->comm); |
559 | if (!is_kernel_mode) | 540 | if (!is_kernel_mode) |
560 | do_group_exit(SIGKILL); | 541 | do_group_exit(SIGKILL); |
561 | goto no_context; | 542 | goto no_context; |
@@ -573,31 +554,12 @@ do_sigbus: | |||
573 | 554 | ||
574 | #ifndef __tilegx__ | 555 | #ifndef __tilegx__ |
575 | 556 | ||
576 | extern char sys_cmpxchg[], __sys_cmpxchg_end[]; | ||
577 | extern char __sys_cmpxchg_grab_lock[]; | ||
578 | extern char __start_atomic_asm_code[], __end_atomic_asm_code[]; | ||
579 | |||
580 | /* | ||
581 | * We return this structure in registers to avoid having to write | ||
582 | * additional save/restore code in the intvec.S caller. | ||
583 | */ | ||
584 | struct intvec_state { | ||
585 | void *handler; | ||
586 | unsigned long vecnum; | ||
587 | unsigned long fault_num; | ||
588 | unsigned long info; | ||
589 | unsigned long retval; | ||
590 | }; | ||
591 | |||
592 | /* We must release ICS before panicking or we won't get anywhere. */ | 557 | /* We must release ICS before panicking or we won't get anywhere. */ |
593 | #define ics_panic(fmt, ...) do { \ | 558 | #define ics_panic(fmt, ...) do { \ |
594 | __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \ | 559 | __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \ |
595 | panic(fmt, __VA_ARGS__); \ | 560 | panic(fmt, __VA_ARGS__); \ |
596 | } while (0) | 561 | } while (0) |
597 | 562 | ||
598 | void do_page_fault(struct pt_regs *regs, int fault_num, | ||
599 | unsigned long address, unsigned long write); | ||
600 | |||
601 | /* | 563 | /* |
602 | * When we take an ITLB or DTLB fault or access violation in the | 564 | * When we take an ITLB or DTLB fault or access violation in the |
603 | * supervisor while the critical section bit is set, the hypervisor is | 565 | * supervisor while the critical section bit is set, the hypervisor is |
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c index 1fcecc5b9e03..ff1cdff5114d 100644 --- a/arch/tile/mm/highmem.c +++ b/arch/tile/mm/highmem.c | |||
@@ -121,7 +121,7 @@ static struct list_head amp_list = LIST_HEAD_INIT(amp_list); | |||
121 | struct kmap_amps { | 121 | struct kmap_amps { |
122 | struct atomic_mapped_page per_type[KM_TYPE_NR]; | 122 | struct atomic_mapped_page per_type[KM_TYPE_NR]; |
123 | }; | 123 | }; |
124 | DEFINE_PER_CPU(struct kmap_amps, amps); | 124 | static DEFINE_PER_CPU(struct kmap_amps, amps); |
125 | 125 | ||
126 | /* | 126 | /* |
127 | * Add a page and va, on this cpu, to the list of kmap_atomic pages, | 127 | * Add a page and va, on this cpu, to the list of kmap_atomic pages, |
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c index 52feb77133ce..97c478e7be27 100644 --- a/arch/tile/mm/homecache.c +++ b/arch/tile/mm/homecache.c | |||
@@ -46,7 +46,7 @@ | |||
46 | * locally from a remote home. There's no point in using it if we | 46 | * locally from a remote home. There's no point in using it if we |
47 | * don't have coherent local caching, though. | 47 | * don't have coherent local caching, though. |
48 | */ | 48 | */ |
49 | int __write_once noallocl2; | 49 | static int __write_once noallocl2; |
50 | static int __init set_noallocl2(char *str) | 50 | static int __init set_noallocl2(char *str) |
51 | { | 51 | { |
52 | noallocl2 = 1; | 52 | noallocl2 = 1; |
@@ -60,15 +60,11 @@ early_param("noallocl2", set_noallocl2); | |||
60 | 60 | ||
61 | #endif | 61 | #endif |
62 | 62 | ||
63 | |||
64 | |||
65 | /* Provide no-op versions of these routines to keep flush_remote() cleaner. */ | 63 | /* Provide no-op versions of these routines to keep flush_remote() cleaner. */ |
66 | #define mark_caches_evicted_start() 0 | 64 | #define mark_caches_evicted_start() 0 |
67 | #define mark_caches_evicted_finish(mask, timestamp) do {} while (0) | 65 | #define mark_caches_evicted_finish(mask, timestamp) do {} while (0) |
68 | 66 | ||
69 | 67 | ||
70 | |||
71 | |||
72 | /* | 68 | /* |
73 | * Update the irq_stat for cpus that we are going to interrupt | 69 | * Update the irq_stat for cpus that we are going to interrupt |
74 | * with TLB or cache flushes. Also handle removing dataplane cpus | 70 | * with TLB or cache flushes. Also handle removing dataplane cpus |
@@ -171,20 +167,12 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control, | |||
171 | cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy); | 167 | cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy); |
172 | cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy); | 168 | cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy); |
173 | 169 | ||
174 | printk("hv_flush_remote(%#llx, %#lx, %p [%s]," | 170 | pr_err("hv_flush_remote(%#llx, %#lx, %p [%s]," |
175 | " %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n", | 171 | " %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n", |
176 | cache_pa, cache_control, cache_cpumask, cache_buf, | 172 | cache_pa, cache_control, cache_cpumask, cache_buf, |
177 | (unsigned long)tlb_va, tlb_length, tlb_pgsize, | 173 | (unsigned long)tlb_va, tlb_length, tlb_pgsize, |
178 | tlb_cpumask, tlb_buf, | 174 | tlb_cpumask, tlb_buf, |
179 | asids, asidcount, rc); | 175 | asids, asidcount, rc); |
180 | if (asidcount > 0) { | ||
181 | int i; | ||
182 | printk(" asids:"); | ||
183 | for (i = 0; i < asidcount; ++i) | ||
184 | printk(" %d,%d,%d", | ||
185 | asids[i].x, asids[i].y, asids[i].asid); | ||
186 | printk("\n"); | ||
187 | } | ||
188 | panic("Unsafe to continue."); | 176 | panic("Unsafe to continue."); |
189 | } | 177 | } |
190 | 178 | ||
@@ -293,7 +281,7 @@ pte_t pte_set_home(pte_t pte, int home) | |||
293 | */ | 281 | */ |
294 | if (hv_pte_get_nc(pte) && home != PAGE_HOME_IMMUTABLE) { | 282 | if (hv_pte_get_nc(pte) && home != PAGE_HOME_IMMUTABLE) { |
295 | pte = hv_pte_clear_nc(pte); | 283 | pte = hv_pte_clear_nc(pte); |
296 | printk("non-immutable page incoherently referenced: %#llx\n", | 284 | pr_err("non-immutable page incoherently referenced: %#llx\n", |
297 | pte.val); | 285 | pte.val); |
298 | } | 286 | } |
299 | 287 | ||
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index c38570f8f0d0..24688b697a8d 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c | |||
@@ -332,7 +332,7 @@ static __init int setup_hugepagesz(char *opt) | |||
332 | } else if (ps == PUD_SIZE) { | 332 | } else if (ps == PUD_SIZE) { |
333 | hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); | 333 | hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); |
334 | } else { | 334 | } else { |
335 | printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n", | 335 | pr_err("hugepagesz: Unsupported page size %lu M\n", |
336 | ps >> 20); | 336 | ps >> 20); |
337 | return 0; | 337 | return 0; |
338 | } | 338 | } |
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index 125ac53b60fc..d89c9eacd162 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c | |||
@@ -67,7 +67,9 @@ | |||
67 | 67 | ||
68 | #define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0)) | 68 | #define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0)) |
69 | 69 | ||
70 | #ifndef __tilegx__ | ||
70 | unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE; | 71 | unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE; |
72 | #endif | ||
71 | 73 | ||
72 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 74 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
73 | 75 | ||
@@ -282,9 +284,9 @@ static pgprot_t __init init_pgprot(ulong address) | |||
282 | /* | 284 | /* |
283 | * Everything else that isn't data or bss is heap, so mark it | 285 | * Everything else that isn't data or bss is heap, so mark it |
284 | * with the initial heap home (hash-for-home, or this cpu). This | 286 | * with the initial heap home (hash-for-home, or this cpu). This |
285 | * includes any addresses after the loaded image; any address before | 287 | * includes any addresses after the loaded image and any address before |
286 | * _einittext (since we already captured the case of text before | 288 | * _einitdata, since we already captured the case of text before |
287 | * _sinittext); and any init-data pages. | 289 | * _sinittext, and __pa(einittext) is approximately __pa(sinitdata). |
288 | * | 290 | * |
289 | * All the LOWMEM pages that we mark this way will get their | 291 | * All the LOWMEM pages that we mark this way will get their |
290 | * struct page homecache properly marked later, in set_page_homes(). | 292 | * struct page homecache properly marked later, in set_page_homes(). |
@@ -292,9 +294,7 @@ static pgprot_t __init init_pgprot(ulong address) | |||
292 | * homes, but with a zero free_time we don't have to actually | 294 | * homes, but with a zero free_time we don't have to actually |
293 | * do a flush action the first time we use them, either. | 295 | * do a flush action the first time we use them, either. |
294 | */ | 296 | */ |
295 | if (address >= (ulong) _end || address < (ulong) _sdata || | 297 | if (address >= (ulong) _end || address < (ulong) _einitdata) |
296 | (address >= (ulong) _sinitdata && | ||
297 | address < (ulong) _einitdata)) | ||
298 | return construct_pgprot(PAGE_KERNEL, initial_heap_home()); | 298 | return construct_pgprot(PAGE_KERNEL, initial_heap_home()); |
299 | 299 | ||
300 | #if CHIP_HAS_CBOX_HOME_MAP() | 300 | #if CHIP_HAS_CBOX_HOME_MAP() |
@@ -304,35 +304,38 @@ static pgprot_t __init init_pgprot(ulong address) | |||
304 | #endif | 304 | #endif |
305 | 305 | ||
306 | /* | 306 | /* |
307 | * Make the w1data homed like heap to start with, to avoid | ||
308 | * making it part of the page-striped data area when we're just | ||
309 | * going to convert it to read-only soon anyway. | ||
310 | */ | ||
311 | if (address >= (ulong)__w1data_begin && address < (ulong)__w1data_end) | ||
312 | return construct_pgprot(PAGE_KERNEL, initial_heap_home()); | ||
313 | |||
314 | /* | ||
307 | * Otherwise we just hand out consecutive cpus. To avoid | 315 | * Otherwise we just hand out consecutive cpus. To avoid |
308 | * requiring this function to hold state, we just walk forward from | 316 | * requiring this function to hold state, we just walk forward from |
309 | * _sdata by PAGE_SIZE, skipping the readonly and init data, to reach | 317 | * _sdata by PAGE_SIZE, skipping the readonly and init data, to reach |
310 | * the requested address, while walking cpu home around kdata_mask. | 318 | * the requested address, while walking cpu home around kdata_mask. |
311 | * This is typically no more than a dozen or so iterations. | 319 | * This is typically no more than a dozen or so iterations. |
312 | */ | 320 | */ |
313 | BUG_ON(_einitdata != __bss_start); | 321 | page = (((ulong)__w1data_end) + PAGE_SIZE - 1) & PAGE_MASK; |
314 | for (page = (ulong)_sdata, cpu = NR_CPUS; ; ) { | 322 | BUG_ON(address < page || address >= (ulong)_end); |
315 | cpu = cpumask_next(cpu, &kdata_mask); | 323 | cpu = cpumask_first(&kdata_mask); |
316 | if (cpu == NR_CPUS) | 324 | for (; page < address; page += PAGE_SIZE) { |
317 | cpu = cpumask_first(&kdata_mask); | 325 | if (page >= (ulong)&init_thread_union && |
318 | if (page >= address) | 326 | page < (ulong)&init_thread_union + THREAD_SIZE) |
319 | break; | 327 | continue; |
320 | page += PAGE_SIZE; | ||
321 | if (page == (ulong)__start_rodata) | ||
322 | page = (ulong)__end_rodata; | ||
323 | if (page == (ulong)&init_thread_union) | ||
324 | page += THREAD_SIZE; | ||
325 | if (page == (ulong)_sinitdata) | ||
326 | page = (ulong)_einitdata; | ||
327 | if (page == (ulong)empty_zero_page) | 328 | if (page == (ulong)empty_zero_page) |
328 | page += PAGE_SIZE; | 329 | continue; |
329 | #ifndef __tilegx__ | 330 | #ifndef __tilegx__ |
330 | #if !ATOMIC_LOCKS_FOUND_VIA_TABLE() | 331 | #if !ATOMIC_LOCKS_FOUND_VIA_TABLE() |
331 | if (page == (ulong)atomic_locks) | 332 | if (page == (ulong)atomic_locks) |
332 | page += PAGE_SIZE; | 333 | continue; |
333 | #endif | 334 | #endif |
334 | #endif | 335 | #endif |
335 | 336 | cpu = cpumask_next(cpu, &kdata_mask); | |
337 | if (cpu == NR_CPUS) | ||
338 | cpu = cpumask_first(&kdata_mask); | ||
336 | } | 339 | } |
337 | return construct_pgprot(PAGE_KERNEL, cpu); | 340 | return construct_pgprot(PAGE_KERNEL, cpu); |
338 | } | 341 | } |
@@ -362,7 +365,7 @@ static int __init setup_ktext(char *str) | |||
362 | /* If you have a leading "nocache", turn off ktext caching */ | 365 | /* If you have a leading "nocache", turn off ktext caching */ |
363 | if (strncmp(str, "nocache", 7) == 0) { | 366 | if (strncmp(str, "nocache", 7) == 0) { |
364 | ktext_nocache = 1; | 367 | ktext_nocache = 1; |
365 | printk("ktext: disabling local caching of kernel text\n"); | 368 | pr_info("ktext: disabling local caching of kernel text\n"); |
366 | str += 7; | 369 | str += 7; |
367 | if (*str == ',') | 370 | if (*str == ',') |
368 | ++str; | 371 | ++str; |
@@ -374,20 +377,20 @@ static int __init setup_ktext(char *str) | |||
374 | 377 | ||
375 | /* Default setting on Tile64: use a huge page */ | 378 | /* Default setting on Tile64: use a huge page */ |
376 | if (strcmp(str, "huge") == 0) | 379 | if (strcmp(str, "huge") == 0) |
377 | printk("ktext: using one huge locally cached page\n"); | 380 | pr_info("ktext: using one huge locally cached page\n"); |
378 | 381 | ||
379 | /* Pay TLB cost but get no cache benefit: cache small pages locally */ | 382 | /* Pay TLB cost but get no cache benefit: cache small pages locally */ |
380 | else if (strcmp(str, "local") == 0) { | 383 | else if (strcmp(str, "local") == 0) { |
381 | ktext_small = 1; | 384 | ktext_small = 1; |
382 | ktext_local = 1; | 385 | ktext_local = 1; |
383 | printk("ktext: using small pages with local caching\n"); | 386 | pr_info("ktext: using small pages with local caching\n"); |
384 | } | 387 | } |
385 | 388 | ||
386 | /* Neighborhood cache ktext pages on all cpus. */ | 389 | /* Neighborhood cache ktext pages on all cpus. */ |
387 | else if (strcmp(str, "all") == 0) { | 390 | else if (strcmp(str, "all") == 0) { |
388 | ktext_small = 1; | 391 | ktext_small = 1; |
389 | ktext_all = 1; | 392 | ktext_all = 1; |
390 | printk("ktext: using maximal caching neighborhood\n"); | 393 | pr_info("ktext: using maximal caching neighborhood\n"); |
391 | } | 394 | } |
392 | 395 | ||
393 | 396 | ||
@@ -397,10 +400,10 @@ static int __init setup_ktext(char *str) | |||
397 | cpulist_scnprintf(buf, sizeof(buf), &ktext_mask); | 400 | cpulist_scnprintf(buf, sizeof(buf), &ktext_mask); |
398 | if (cpumask_weight(&ktext_mask) > 1) { | 401 | if (cpumask_weight(&ktext_mask) > 1) { |
399 | ktext_small = 1; | 402 | ktext_small = 1; |
400 | printk("ktext: using caching neighborhood %s " | 403 | pr_info("ktext: using caching neighborhood %s " |
401 | "with small pages\n", buf); | 404 | "with small pages\n", buf); |
402 | } else { | 405 | } else { |
403 | printk("ktext: caching on cpu %s with one huge page\n", | 406 | pr_info("ktext: caching on cpu %s with one huge page\n", |
404 | buf); | 407 | buf); |
405 | } | 408 | } |
406 | } | 409 | } |
@@ -470,19 +473,19 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | |||
470 | 473 | ||
471 | #if CHIP_HAS_CBOX_HOME_MAP() | 474 | #if CHIP_HAS_CBOX_HOME_MAP() |
472 | if (ktext_arg_seen && ktext_hash) { | 475 | if (ktext_arg_seen && ktext_hash) { |
473 | printk("warning: \"ktext\" boot argument ignored" | 476 | pr_warning("warning: \"ktext\" boot argument ignored" |
474 | " if \"kcache_hash\" sets up text hash-for-home\n"); | 477 | " if \"kcache_hash\" sets up text hash-for-home\n"); |
475 | ktext_small = 0; | 478 | ktext_small = 0; |
476 | } | 479 | } |
477 | 480 | ||
478 | if (kdata_arg_seen && kdata_hash) { | 481 | if (kdata_arg_seen && kdata_hash) { |
479 | printk("warning: \"kdata\" boot argument ignored" | 482 | pr_warning("warning: \"kdata\" boot argument ignored" |
480 | " if \"kcache_hash\" sets up data hash-for-home\n"); | 483 | " if \"kcache_hash\" sets up data hash-for-home\n"); |
481 | } | 484 | } |
482 | 485 | ||
483 | if (kdata_huge && !hash_default) { | 486 | if (kdata_huge && !hash_default) { |
484 | printk("warning: disabling \"kdata=huge\"; requires" | 487 | pr_warning("warning: disabling \"kdata=huge\"; requires" |
485 | " kcache_hash=all or =allbutstack\n"); | 488 | " kcache_hash=all or =allbutstack\n"); |
486 | kdata_huge = 0; | 489 | kdata_huge = 0; |
487 | } | 490 | } |
488 | #endif | 491 | #endif |
@@ -556,11 +559,11 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | |||
556 | if (!cpumask_empty(&bad)) { | 559 | if (!cpumask_empty(&bad)) { |
557 | char buf[NR_CPUS * 5]; | 560 | char buf[NR_CPUS * 5]; |
558 | cpulist_scnprintf(buf, sizeof(buf), &bad); | 561 | cpulist_scnprintf(buf, sizeof(buf), &bad); |
559 | printk("ktext: not using unavailable cpus %s\n", buf); | 562 | pr_info("ktext: not using unavailable cpus %s\n", buf); |
560 | } | 563 | } |
561 | if (cpumask_empty(&ktext_mask)) { | 564 | if (cpumask_empty(&ktext_mask)) { |
562 | printk("ktext: no valid cpus; caching on %d.\n", | 565 | pr_warning("ktext: no valid cpus; caching on %d.\n", |
563 | smp_processor_id()); | 566 | smp_processor_id()); |
564 | cpumask_copy(&ktext_mask, | 567 | cpumask_copy(&ktext_mask, |
565 | cpumask_of(smp_processor_id())); | 568 | cpumask_of(smp_processor_id())); |
566 | } | 569 | } |
@@ -737,17 +740,18 @@ static void __init set_non_bootmem_pages_init(void) | |||
737 | for_each_zone(z) { | 740 | for_each_zone(z) { |
738 | unsigned long start, end; | 741 | unsigned long start, end; |
739 | int nid = z->zone_pgdat->node_id; | 742 | int nid = z->zone_pgdat->node_id; |
743 | int idx = zone_idx(z); | ||
740 | 744 | ||
741 | start = z->zone_start_pfn; | 745 | start = z->zone_start_pfn; |
742 | if (start == 0) | 746 | if (start == 0) |
743 | continue; /* bootmem */ | 747 | continue; /* bootmem */ |
744 | end = start + z->spanned_pages; | 748 | end = start + z->spanned_pages; |
745 | if (zone_idx(z) == ZONE_NORMAL) { | 749 | if (idx == ZONE_NORMAL) { |
746 | BUG_ON(start != node_start_pfn[nid]); | 750 | BUG_ON(start != node_start_pfn[nid]); |
747 | start = node_free_pfn[nid]; | 751 | start = node_free_pfn[nid]; |
748 | } | 752 | } |
749 | #ifdef CONFIG_HIGHMEM | 753 | #ifdef CONFIG_HIGHMEM |
750 | if (zone_idx(z) == ZONE_HIGHMEM) | 754 | if (idx == ZONE_HIGHMEM) |
751 | totalhigh_pages += z->spanned_pages; | 755 | totalhigh_pages += z->spanned_pages; |
752 | #endif | 756 | #endif |
753 | if (kdata_huge) { | 757 | if (kdata_huge) { |
@@ -841,9 +845,9 @@ void __init mem_init(void) | |||
841 | #ifdef CONFIG_HIGHMEM | 845 | #ifdef CONFIG_HIGHMEM |
842 | /* check that fixmap and pkmap do not overlap */ | 846 | /* check that fixmap and pkmap do not overlap */ |
843 | if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) { | 847 | if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) { |
844 | printk(KERN_ERR "fixmap and kmap areas overlap" | 848 | pr_err("fixmap and kmap areas overlap" |
845 | " - this will crash\n"); | 849 | " - this will crash\n"); |
846 | printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n", | 850 | pr_err("pkstart: %lxh pkend: %lxh fixstart %lxh\n", |
847 | PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1), | 851 | PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1), |
848 | FIXADDR_START); | 852 | FIXADDR_START); |
849 | BUG(); | 853 | BUG(); |
@@ -863,7 +867,7 @@ void __init mem_init(void) | |||
863 | initsize = (unsigned long)&_einittext - (unsigned long)&_sinittext; | 867 | initsize = (unsigned long)&_einittext - (unsigned long)&_sinittext; |
864 | initsize += (unsigned long)&_einitdata - (unsigned long)&_sinitdata; | 868 | initsize += (unsigned long)&_einitdata - (unsigned long)&_sinitdata; |
865 | 869 | ||
866 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n", | 870 | pr_info("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n", |
867 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | 871 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), |
868 | num_physpages << (PAGE_SHIFT-10), | 872 | num_physpages << (PAGE_SHIFT-10), |
869 | codesize >> 10, | 873 | codesize >> 10, |
@@ -968,7 +972,6 @@ static void mark_w1data_ro(void) | |||
968 | BUG_ON((addr & (PAGE_SIZE-1)) != 0); | 972 | BUG_ON((addr & (PAGE_SIZE-1)) != 0); |
969 | for (; addr <= (unsigned long)__w1data_end - 1; addr += PAGE_SIZE) { | 973 | for (; addr <= (unsigned long)__w1data_end - 1; addr += PAGE_SIZE) { |
970 | unsigned long pfn = kaddr_to_pfn((void *)addr); | 974 | unsigned long pfn = kaddr_to_pfn((void *)addr); |
971 | struct page *page = pfn_to_page(pfn); | ||
972 | pte_t *ptep = virt_to_pte(NULL, addr); | 975 | pte_t *ptep = virt_to_pte(NULL, addr); |
973 | BUG_ON(pte_huge(*ptep)); /* not relevant for kdata_huge */ | 976 | BUG_ON(pte_huge(*ptep)); /* not relevant for kdata_huge */ |
974 | set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL_RO)); | 977 | set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL_RO)); |
@@ -986,7 +989,7 @@ static long __write_once initfree = 1; | |||
986 | static int __init set_initfree(char *str) | 989 | static int __init set_initfree(char *str) |
987 | { | 990 | { |
988 | strict_strtol(str, 0, &initfree); | 991 | strict_strtol(str, 0, &initfree); |
989 | printk("initfree: %s free init pages\n", initfree ? "will" : "won't"); | 992 | pr_info("initfree: %s free init pages\n", initfree ? "will" : "won't"); |
990 | return 1; | 993 | return 1; |
991 | } | 994 | } |
992 | __setup("initfree=", set_initfree); | 995 | __setup("initfree=", set_initfree); |
@@ -996,8 +999,8 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end) | |||
996 | unsigned long addr = (unsigned long) begin; | 999 | unsigned long addr = (unsigned long) begin; |
997 | 1000 | ||
998 | if (kdata_huge && !initfree) { | 1001 | if (kdata_huge && !initfree) { |
999 | printk("Warning: ignoring initfree=0:" | 1002 | pr_warning("Warning: ignoring initfree=0:" |
1000 | " incompatible with kdata=huge\n"); | 1003 | " incompatible with kdata=huge\n"); |
1001 | initfree = 1; | 1004 | initfree = 1; |
1002 | } | 1005 | } |
1003 | end = (end + PAGE_SIZE - 1) & PAGE_MASK; | 1006 | end = (end + PAGE_SIZE - 1) & PAGE_MASK; |
@@ -1033,7 +1036,7 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end) | |||
1033 | free_page(addr); | 1036 | free_page(addr); |
1034 | totalram_pages++; | 1037 | totalram_pages++; |
1035 | } | 1038 | } |
1036 | printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); | 1039 | pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10); |
1037 | } | 1040 | } |
1038 | 1041 | ||
1039 | void free_initmem(void) | 1042 | void free_initmem(void) |
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c index 289e729bbd76..28c23140c947 100644 --- a/arch/tile/mm/pgtable.c +++ b/arch/tile/mm/pgtable.c | |||
@@ -46,7 +46,7 @@ void show_mem(void) | |||
46 | { | 46 | { |
47 | struct zone *zone; | 47 | struct zone *zone; |
48 | 48 | ||
49 | printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu" | 49 | pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu" |
50 | " free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu" | 50 | " free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu" |
51 | " pagecache:%lu swap:%lu\n", | 51 | " pagecache:%lu swap:%lu\n", |
52 | (global_page_state(NR_ACTIVE_ANON) + | 52 | (global_page_state(NR_ACTIVE_ANON) + |
@@ -71,7 +71,6 @@ void show_mem(void) | |||
71 | if (!populated_zone(zone)) | 71 | if (!populated_zone(zone)) |
72 | continue; | 72 | continue; |
73 | 73 | ||
74 | printk("Node %d %7s: ", zone_to_nid(zone), zone->name); | ||
75 | spin_lock_irqsave(&zone->lock, flags); | 74 | spin_lock_irqsave(&zone->lock, flags); |
76 | for (order = 0; order < MAX_ORDER; order++) { | 75 | for (order = 0; order < MAX_ORDER; order++) { |
77 | int nr = zone->free_area[order].nr_free; | 76 | int nr = zone->free_area[order].nr_free; |
@@ -80,7 +79,8 @@ void show_mem(void) | |||
80 | largest_order = order; | 79 | largest_order = order; |
81 | } | 80 | } |
82 | spin_unlock_irqrestore(&zone->lock, flags); | 81 | spin_unlock_irqrestore(&zone->lock, flags); |
83 | printk("%lukB (largest %luKb)\n", | 82 | pr_err("Node %d %7s: %lukB (largest %luKb)\n", |
83 | zone_to_nid(zone), zone->name, | ||
84 | K(total), largest_order ? K(1UL) << largest_order : 0); | 84 | K(total), largest_order ? K(1UL) << largest_order : 0); |
85 | } | 85 | } |
86 | } | 86 | } |
@@ -123,42 +123,6 @@ static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) | |||
123 | local_flush_tlb_page(NULL, vaddr, PAGE_SIZE); | 123 | local_flush_tlb_page(NULL, vaddr, PAGE_SIZE); |
124 | } | 124 | } |
125 | 125 | ||
126 | /* | ||
127 | * Associate a huge virtual page frame with a given physical page frame | ||
128 | * and protection flags for that frame. pfn is for the base of the page, | ||
129 | * vaddr is what the page gets mapped to - both must be properly aligned. | ||
130 | * The pmd must already be instantiated. | ||
131 | */ | ||
132 | void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) | ||
133 | { | ||
134 | pgd_t *pgd; | ||
135 | pud_t *pud; | ||
136 | pmd_t *pmd; | ||
137 | |||
138 | if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */ | ||
139 | printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n"); | ||
140 | return; /* BUG(); */ | ||
141 | } | ||
142 | if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */ | ||
143 | printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n"); | ||
144 | return; /* BUG(); */ | ||
145 | } | ||
146 | pgd = swapper_pg_dir + pgd_index(vaddr); | ||
147 | if (pgd_none(*pgd)) { | ||
148 | printk(KERN_WARNING "set_pmd_pfn: pgd_none\n"); | ||
149 | return; /* BUG(); */ | ||
150 | } | ||
151 | pud = pud_offset(pgd, vaddr); | ||
152 | pmd = pmd_offset(pud, vaddr); | ||
153 | set_pmd(pmd, ptfn_pmd(HV_PFN_TO_PTFN(pfn), flags)); | ||
154 | /* | ||
155 | * It's enough to flush this one mapping. | ||
156 | * We flush both small and huge TSBs to be sure. | ||
157 | */ | ||
158 | local_flush_tlb_page(NULL, vaddr, HPAGE_SIZE); | ||
159 | local_flush_tlb_pages(NULL, vaddr, PAGE_SIZE, HPAGE_SIZE); | ||
160 | } | ||
161 | |||
162 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags) | 126 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags) |
163 | { | 127 | { |
164 | unsigned long address = __fix_to_virt(idx); | 128 | unsigned long address = __fix_to_virt(idx); |
@@ -257,7 +221,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
257 | 221 | ||
258 | struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) | 222 | struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) |
259 | { | 223 | { |
260 | int flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO|__GFP_COMP; | 224 | gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO|__GFP_COMP; |
261 | struct page *p; | 225 | struct page *p; |
262 | 226 | ||
263 | #ifdef CONFIG_HIGHPTE | 227 | #ifdef CONFIG_HIGHPTE |
@@ -550,7 +514,7 @@ void iounmap(volatile void __iomem *addr_in) | |||
550 | read_unlock(&vmlist_lock); | 514 | read_unlock(&vmlist_lock); |
551 | 515 | ||
552 | if (!p) { | 516 | if (!p) { |
553 | printk("iounmap: bad address %p\n", addr); | 517 | pr_err("iounmap: bad address %p\n", addr); |
554 | dump_stack(); | 518 | dump_stack(); |
555 | return; | 519 | return; |
556 | } | 520 | } |