diff options
| author | Joe Perches <joe@perches.com> | 2014-10-31 13:50:46 -0400 |
|---|---|---|
| committer | Chris Metcalf <cmetcalf@tilera.com> | 2014-11-11 15:51:42 -0500 |
| commit | f47436734dc89ece62654d4db8d08163a89dd7ca (patch) | |
| tree | 1977a1e352588c026c87cc1fc34c93fabba6f2b7 | |
| parent | ebd25caf7d511312d1a9724ab5752e9e661dfe60 (diff) | |
tile: Use the more common pr_warn instead of pr_warning
And other message logging neatening.
Other miscellanea:
o coalesce formats
o realign arguments
o standardize a couple of macros
o use __func__ instead of embedding the function name
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
| -rw-r--r-- | arch/tile/include/asm/io.h | 5 | ||||
| -rw-r--r-- | arch/tile/include/asm/pgtable.h | 4 | ||||
| -rw-r--r-- | arch/tile/include/asm/pgtable_64.h | 2 | ||||
| -rw-r--r-- | arch/tile/kernel/hardwall.c | 6 | ||||
| -rw-r--r-- | arch/tile/kernel/irq.c | 5 | ||||
| -rw-r--r-- | arch/tile/kernel/kprobes.c | 3 | ||||
| -rw-r--r-- | arch/tile/kernel/machine_kexec.c | 28 | ||||
| -rw-r--r-- | arch/tile/kernel/messaging.c | 5 | ||||
| -rw-r--r-- | arch/tile/kernel/module.c | 12 | ||||
| -rw-r--r-- | arch/tile/kernel/pci.c | 7 | ||||
| -rw-r--r-- | arch/tile/kernel/pci_gx.c | 95 | ||||
| -rw-r--r-- | arch/tile/kernel/process.c | 16 | ||||
| -rw-r--r-- | arch/tile/kernel/setup.c | 36 | ||||
| -rw-r--r-- | arch/tile/kernel/signal.c | 6 | ||||
| -rw-r--r-- | arch/tile/kernel/single_step.c | 6 | ||||
| -rw-r--r-- | arch/tile/kernel/smpboot.c | 5 | ||||
| -rw-r--r-- | arch/tile/kernel/stack.c | 7 | ||||
| -rw-r--r-- | arch/tile/kernel/time.c | 4 | ||||
| -rw-r--r-- | arch/tile/kernel/traps.c | 10 | ||||
| -rw-r--r-- | arch/tile/kernel/unaligned.c | 22 | ||||
| -rw-r--r-- | arch/tile/mm/fault.c | 34 | ||||
| -rw-r--r-- | arch/tile/mm/homecache.c | 6 | ||||
| -rw-r--r-- | arch/tile/mm/hugetlbpage.c | 18 | ||||
| -rw-r--r-- | arch/tile/mm/init.c | 32 | ||||
| -rw-r--r-- | arch/tile/mm/pgtable.c | 4 |
25 files changed, 158 insertions, 220 deletions
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h index 9fe434969fab..4353539fb887 100644 --- a/arch/tile/include/asm/io.h +++ b/arch/tile/include/asm/io.h | |||
| @@ -392,8 +392,7 @@ extern void ioport_unmap(void __iomem *addr); | |||
| 392 | static inline long ioport_panic(void) | 392 | static inline long ioport_panic(void) |
| 393 | { | 393 | { |
| 394 | #ifdef __tilegx__ | 394 | #ifdef __tilegx__ |
| 395 | panic("PCI IO space support is disabled. Configure the kernel with" | 395 | panic("PCI IO space support is disabled. Configure the kernel with CONFIG_TILE_PCI_IO to enable it"); |
| 396 | " CONFIG_TILE_PCI_IO to enable it"); | ||
| 397 | #else | 396 | #else |
| 398 | panic("inb/outb and friends do not exist on tile"); | 397 | panic("inb/outb and friends do not exist on tile"); |
| 399 | #endif | 398 | #endif |
| @@ -402,7 +401,7 @@ static inline long ioport_panic(void) | |||
| 402 | 401 | ||
| 403 | static inline void __iomem *ioport_map(unsigned long port, unsigned int len) | 402 | static inline void __iomem *ioport_map(unsigned long port, unsigned int len) |
| 404 | { | 403 | { |
| 405 | pr_info("ioport_map: mapping IO resources is unsupported on tile.\n"); | 404 | pr_info("ioport_map: mapping IO resources is unsupported on tile\n"); |
| 406 | return NULL; | 405 | return NULL; |
| 407 | } | 406 | } |
| 408 | 407 | ||
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h index 33587f16c152..5d1950788c69 100644 --- a/arch/tile/include/asm/pgtable.h +++ b/arch/tile/include/asm/pgtable.h | |||
| @@ -235,9 +235,9 @@ static inline void __pte_clear(pte_t *ptep) | |||
| 235 | #define pte_donemigrate(x) hv_pte_set_present(hv_pte_clear_migrating(x)) | 235 | #define pte_donemigrate(x) hv_pte_set_present(hv_pte_clear_migrating(x)) |
| 236 | 236 | ||
| 237 | #define pte_ERROR(e) \ | 237 | #define pte_ERROR(e) \ |
| 238 | pr_err("%s:%d: bad pte 0x%016llx.\n", __FILE__, __LINE__, pte_val(e)) | 238 | pr_err("%s:%d: bad pte 0x%016llx\n", __FILE__, __LINE__, pte_val(e)) |
| 239 | #define pgd_ERROR(e) \ | 239 | #define pgd_ERROR(e) \ |
| 240 | pr_err("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e)) | 240 | pr_err("%s:%d: bad pgd 0x%016llx\n", __FILE__, __LINE__, pgd_val(e)) |
| 241 | 241 | ||
| 242 | /* Return PA and protection info for a given kernel VA. */ | 242 | /* Return PA and protection info for a given kernel VA. */ |
| 243 | int va_to_cpa_and_pte(void *va, phys_addr_t *cpa, pte_t *pte); | 243 | int va_to_cpa_and_pte(void *va, phys_addr_t *cpa, pte_t *pte); |
diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h index 2c8a9cd102d3..e96cec52f6d8 100644 --- a/arch/tile/include/asm/pgtable_64.h +++ b/arch/tile/include/asm/pgtable_64.h | |||
| @@ -86,7 +86,7 @@ static inline int pud_huge_page(pud_t pud) | |||
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | #define pmd_ERROR(e) \ | 88 | #define pmd_ERROR(e) \ |
| 89 | pr_err("%s:%d: bad pmd 0x%016llx.\n", __FILE__, __LINE__, pmd_val(e)) | 89 | pr_err("%s:%d: bad pmd 0x%016llx\n", __FILE__, __LINE__, pmd_val(e)) |
| 90 | 90 | ||
| 91 | static inline void pud_clear(pud_t *pudp) | 91 | static inline void pud_clear(pud_t *pudp) |
| 92 | { | 92 | { |
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c index aca6000bca75..c4646bb99342 100644 --- a/arch/tile/kernel/hardwall.c +++ b/arch/tile/kernel/hardwall.c | |||
| @@ -365,8 +365,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num) | |||
| 365 | * to quiesce. | 365 | * to quiesce. |
| 366 | */ | 366 | */ |
| 367 | if (rect->teardown_in_progress) { | 367 | if (rect->teardown_in_progress) { |
| 368 | pr_notice("cpu %d: detected %s hardwall violation %#lx" | 368 | pr_notice("cpu %d: detected %s hardwall violation %#lx while teardown already in progress\n", |
| 369 | " while teardown already in progress\n", | ||
| 370 | cpu, hwt->name, | 369 | cpu, hwt->name, |
| 371 | (long)mfspr_XDN(hwt, DIRECTION_PROTECT)); | 370 | (long)mfspr_XDN(hwt, DIRECTION_PROTECT)); |
| 372 | goto done; | 371 | goto done; |
| @@ -630,8 +629,7 @@ static void _hardwall_deactivate(struct hardwall_type *hwt, | |||
| 630 | struct thread_struct *ts = &task->thread; | 629 | struct thread_struct *ts = &task->thread; |
| 631 | 630 | ||
| 632 | if (cpumask_weight(&task->cpus_allowed) != 1) { | 631 | if (cpumask_weight(&task->cpus_allowed) != 1) { |
| 633 | pr_err("pid %d (%s) releasing %s hardwall with" | 632 | pr_err("pid %d (%s) releasing %s hardwall with an affinity mask containing %d cpus!\n", |
| 634 | " an affinity mask containing %d cpus!\n", | ||
| 635 | task->pid, task->comm, hwt->name, | 633 | task->pid, task->comm, hwt->name, |
| 636 | cpumask_weight(&task->cpus_allowed)); | 634 | cpumask_weight(&task->cpus_allowed)); |
| 637 | BUG(); | 635 | BUG(); |
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c index ba85765e1436..22044fc691ef 100644 --- a/arch/tile/kernel/irq.c +++ b/arch/tile/kernel/irq.c | |||
| @@ -107,9 +107,8 @@ void tile_dev_intr(struct pt_regs *regs, int intnum) | |||
| 107 | { | 107 | { |
| 108 | long sp = stack_pointer - (long) current_thread_info(); | 108 | long sp = stack_pointer - (long) current_thread_info(); |
| 109 | if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { | 109 | if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { |
| 110 | pr_emerg("tile_dev_intr: " | 110 | pr_emerg("%s: stack overflow: %ld\n", |
| 111 | "stack overflow: %ld\n", | 111 | __func__, sp - sizeof(struct thread_info)); |
| 112 | sp - sizeof(struct thread_info)); | ||
| 113 | dump_stack(); | 112 | dump_stack(); |
| 114 | } | 113 | } |
| 115 | } | 114 | } |
diff --git a/arch/tile/kernel/kprobes.c b/arch/tile/kernel/kprobes.c index 27cdcacbe81d..f8a45c51e9e4 100644 --- a/arch/tile/kernel/kprobes.c +++ b/arch/tile/kernel/kprobes.c | |||
| @@ -90,8 +90,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) | |||
| 90 | return -EINVAL; | 90 | return -EINVAL; |
| 91 | 91 | ||
| 92 | if (insn_has_control(*p->addr)) { | 92 | if (insn_has_control(*p->addr)) { |
| 93 | pr_notice("Kprobes for control instructions are not " | 93 | pr_notice("Kprobes for control instructions are not supported\n"); |
| 94 | "supported\n"); | ||
| 95 | return -EINVAL; | 94 | return -EINVAL; |
| 96 | } | 95 | } |
| 97 | 96 | ||
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c index f0b54a934712..008aa2faef55 100644 --- a/arch/tile/kernel/machine_kexec.c +++ b/arch/tile/kernel/machine_kexec.c | |||
| @@ -77,16 +77,13 @@ void machine_crash_shutdown(struct pt_regs *regs) | |||
| 77 | int machine_kexec_prepare(struct kimage *image) | 77 | int machine_kexec_prepare(struct kimage *image) |
| 78 | { | 78 | { |
| 79 | if (num_online_cpus() > 1) { | 79 | if (num_online_cpus() > 1) { |
| 80 | pr_warning("%s: detected attempt to kexec " | 80 | pr_warn("%s: detected attempt to kexec with num_online_cpus() > 1\n", |
| 81 | "with num_online_cpus() > 1\n", | 81 | __func__); |
| 82 | __func__); | ||
| 83 | return -ENOSYS; | 82 | return -ENOSYS; |
| 84 | } | 83 | } |
| 85 | if (image->type != KEXEC_TYPE_DEFAULT) { | 84 | if (image->type != KEXEC_TYPE_DEFAULT) { |
| 86 | pr_warning("%s: detected attempt to kexec " | 85 | pr_warn("%s: detected attempt to kexec with unsupported type: %d\n", |
| 87 | "with unsupported type: %d\n", | 86 | __func__, image->type); |
| 88 | __func__, | ||
| 89 | image->type); | ||
| 90 | return -ENOSYS; | 87 | return -ENOSYS; |
| 91 | } | 88 | } |
| 92 | return 0; | 89 | return 0; |
| @@ -131,8 +128,8 @@ static unsigned char *kexec_bn2cl(void *pg) | |||
| 131 | */ | 128 | */ |
| 132 | csum = ip_compute_csum(pg, bhdrp->b_size); | 129 | csum = ip_compute_csum(pg, bhdrp->b_size); |
| 133 | if (csum != 0) { | 130 | if (csum != 0) { |
| 134 | pr_warning("%s: bad checksum %#x (size %d)\n", | 131 | pr_warn("%s: bad checksum %#x (size %d)\n", |
| 135 | __func__, csum, bhdrp->b_size); | 132 | __func__, csum, bhdrp->b_size); |
| 136 | return 0; | 133 | return 0; |
| 137 | } | 134 | } |
| 138 | 135 | ||
| @@ -160,8 +157,7 @@ static unsigned char *kexec_bn2cl(void *pg) | |||
| 160 | while (*desc != '\0') { | 157 | while (*desc != '\0') { |
| 161 | desc++; | 158 | desc++; |
| 162 | if (((unsigned long)desc & PAGE_MASK) != (unsigned long)pg) { | 159 | if (((unsigned long)desc & PAGE_MASK) != (unsigned long)pg) { |
| 163 | pr_info("%s: ran off end of page\n", | 160 | pr_info("%s: ran off end of page\n", __func__); |
| 164 | __func__); | ||
| 165 | return 0; | 161 | return 0; |
| 166 | } | 162 | } |
| 167 | } | 163 | } |
| @@ -195,20 +191,18 @@ static void kexec_find_and_set_command_line(struct kimage *image) | |||
| 195 | } | 191 | } |
| 196 | 192 | ||
| 197 | if (command_line != 0) { | 193 | if (command_line != 0) { |
| 198 | pr_info("setting new command line to \"%s\"\n", | 194 | pr_info("setting new command line to \"%s\"\n", command_line); |
| 199 | command_line); | ||
| 200 | 195 | ||
| 201 | hverr = hv_set_command_line( | 196 | hverr = hv_set_command_line( |
| 202 | (HV_VirtAddr) command_line, strlen(command_line)); | 197 | (HV_VirtAddr) command_line, strlen(command_line)); |
| 203 | kunmap_atomic(command_line); | 198 | kunmap_atomic(command_line); |
| 204 | } else { | 199 | } else { |
| 205 | pr_info("%s: no command line found; making empty\n", | 200 | pr_info("%s: no command line found; making empty\n", __func__); |
| 206 | __func__); | ||
| 207 | hverr = hv_set_command_line((HV_VirtAddr) command_line, 0); | 201 | hverr = hv_set_command_line((HV_VirtAddr) command_line, 0); |
| 208 | } | 202 | } |
| 209 | if (hverr) | 203 | if (hverr) |
| 210 | pr_warning("%s: hv_set_command_line returned error: %d\n", | 204 | pr_warn("%s: hv_set_command_line returned error: %d\n", |
| 211 | __func__, hverr); | 205 | __func__, hverr); |
| 212 | } | 206 | } |
| 213 | 207 | ||
| 214 | /* | 208 | /* |
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c index ac950be1318e..7475af3aacec 100644 --- a/arch/tile/kernel/messaging.c +++ b/arch/tile/kernel/messaging.c | |||
| @@ -59,9 +59,8 @@ void hv_message_intr(struct pt_regs *regs, int intnum) | |||
| 59 | { | 59 | { |
| 60 | long sp = stack_pointer - (long) current_thread_info(); | 60 | long sp = stack_pointer - (long) current_thread_info(); |
| 61 | if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { | 61 | if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { |
| 62 | pr_emerg("hv_message_intr: " | 62 | pr_emerg("%s: stack overflow: %ld\n", |
| 63 | "stack overflow: %ld\n", | 63 | __func__, sp - sizeof(struct thread_info)); |
| 64 | sp - sizeof(struct thread_info)); | ||
| 65 | dump_stack(); | 64 | dump_stack(); |
| 66 | } | 65 | } |
| 67 | } | 66 | } |
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c index d19b13e3a59f..96447c9160a0 100644 --- a/arch/tile/kernel/module.c +++ b/arch/tile/kernel/module.c | |||
| @@ -96,8 +96,8 @@ void module_free(struct module *mod, void *module_region) | |||
| 96 | static int validate_hw2_last(long value, struct module *me) | 96 | static int validate_hw2_last(long value, struct module *me) |
| 97 | { | 97 | { |
| 98 | if (((value << 16) >> 16) != value) { | 98 | if (((value << 16) >> 16) != value) { |
| 99 | pr_warning("module %s: Out of range HW2_LAST value %#lx\n", | 99 | pr_warn("module %s: Out of range HW2_LAST value %#lx\n", |
| 100 | me->name, value); | 100 | me->name, value); |
| 101 | return 0; | 101 | return 0; |
| 102 | } | 102 | } |
| 103 | return 1; | 103 | return 1; |
| @@ -210,10 +210,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, | |||
| 210 | value -= (unsigned long) location; /* pc-relative */ | 210 | value -= (unsigned long) location; /* pc-relative */ |
| 211 | value = (long) value >> 3; /* count by instrs */ | 211 | value = (long) value >> 3; /* count by instrs */ |
| 212 | if (!validate_jumpoff(value)) { | 212 | if (!validate_jumpoff(value)) { |
| 213 | pr_warning("module %s: Out of range jump to" | 213 | pr_warn("module %s: Out of range jump to %#llx at %#llx (%p)\n", |
| 214 | " %#llx at %#llx (%p)\n", me->name, | 214 | me->name, |
| 215 | sym->st_value + rel[i].r_addend, | 215 | sym->st_value + rel[i].r_addend, |
| 216 | rel[i].r_offset, location); | 216 | rel[i].r_offset, location); |
| 217 | return -ENOEXEC; | 217 | return -ENOEXEC; |
| 218 | } | 218 | } |
| 219 | MUNGE(create_JumpOff_X1); | 219 | MUNGE(create_JumpOff_X1); |
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c index 1f80a88c75a6..f70c7892fa25 100644 --- a/arch/tile/kernel/pci.c +++ b/arch/tile/kernel/pci.c | |||
| @@ -178,8 +178,8 @@ int __init tile_pci_init(void) | |||
| 178 | continue; | 178 | continue; |
| 179 | hv_cfg_fd1 = tile_pcie_open(i, 1); | 179 | hv_cfg_fd1 = tile_pcie_open(i, 1); |
| 180 | if (hv_cfg_fd1 < 0) { | 180 | if (hv_cfg_fd1 < 0) { |
| 181 | pr_err("PCI: Couldn't open config fd to HV " | 181 | pr_err("PCI: Couldn't open config fd to HV for controller %d\n", |
| 182 | "for controller %d\n", i); | 182 | i); |
| 183 | goto err_cont; | 183 | goto err_cont; |
| 184 | } | 184 | } |
| 185 | 185 | ||
| @@ -423,8 +423,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) | |||
| 423 | for (i = 0; i < 6; i++) { | 423 | for (i = 0; i < 6; i++) { |
| 424 | r = &dev->resource[i]; | 424 | r = &dev->resource[i]; |
| 425 | if (r->flags & IORESOURCE_UNSET) { | 425 | if (r->flags & IORESOURCE_UNSET) { |
| 426 | pr_err("PCI: Device %s not available " | 426 | pr_err("PCI: Device %s not available because of resource collisions\n", |
| 427 | "because of resource collisions\n", | ||
| 428 | pci_name(dev)); | 427 | pci_name(dev)); |
| 429 | return -EINVAL; | 428 | return -EINVAL; |
| 430 | } | 429 | } |
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c index e39f9c542807..47e048e31641 100644 --- a/arch/tile/kernel/pci_gx.c +++ b/arch/tile/kernel/pci_gx.c | |||
| @@ -131,8 +131,7 @@ static int tile_irq_cpu(int irq) | |||
| 131 | 131 | ||
| 132 | count = cpumask_weight(&intr_cpus_map); | 132 | count = cpumask_weight(&intr_cpus_map); |
| 133 | if (unlikely(count == 0)) { | 133 | if (unlikely(count == 0)) { |
| 134 | pr_warning("intr_cpus_map empty, interrupts will be" | 134 | pr_warn("intr_cpus_map empty, interrupts will be delievered to dataplane tiles\n"); |
| 135 | " delievered to dataplane tiles\n"); | ||
| 136 | return irq % (smp_height * smp_width); | 135 | return irq % (smp_height * smp_width); |
| 137 | } | 136 | } |
| 138 | 137 | ||
| @@ -197,16 +196,16 @@ static int tile_pcie_open(int trio_index) | |||
| 197 | /* Get the properties of the PCIe ports on this TRIO instance. */ | 196 | /* Get the properties of the PCIe ports on this TRIO instance. */ |
| 198 | ret = gxio_trio_get_port_property(context, &pcie_ports[trio_index]); | 197 | ret = gxio_trio_get_port_property(context, &pcie_ports[trio_index]); |
| 199 | if (ret < 0) { | 198 | if (ret < 0) { |
| 200 | pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d," | 199 | pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d, on TRIO %d\n", |
| 201 | " on TRIO %d\n", ret, trio_index); | 200 | ret, trio_index); |
| 202 | goto get_port_property_failure; | 201 | goto get_port_property_failure; |
| 203 | } | 202 | } |
| 204 | 203 | ||
| 205 | context->mmio_base_mac = | 204 | context->mmio_base_mac = |
| 206 | iorpc_ioremap(context->fd, 0, HV_TRIO_CONFIG_IOREMAP_SIZE); | 205 | iorpc_ioremap(context->fd, 0, HV_TRIO_CONFIG_IOREMAP_SIZE); |
| 207 | if (context->mmio_base_mac == NULL) { | 206 | if (context->mmio_base_mac == NULL) { |
| 208 | pr_err("PCI: TRIO config space mapping failure, error %d," | 207 | pr_err("PCI: TRIO config space mapping failure, error %d, on TRIO %d\n", |
| 209 | " on TRIO %d\n", ret, trio_index); | 208 | ret, trio_index); |
| 210 | ret = -ENOMEM; | 209 | ret = -ENOMEM; |
| 211 | 210 | ||
| 212 | goto trio_mmio_mapping_failure; | 211 | goto trio_mmio_mapping_failure; |
| @@ -622,9 +621,8 @@ static void fixup_read_and_payload_sizes(struct pci_controller *controller) | |||
| 622 | dev_control.max_read_req_sz, | 621 | dev_control.max_read_req_sz, |
| 623 | mac); | 622 | mac); |
| 624 | if (err < 0) { | 623 | if (err < 0) { |
| 625 | pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, " | 624 | pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, MAC %d on TRIO %d\n", |
| 626 | "MAC %d on TRIO %d\n", | 625 | mac, controller->trio_index); |
| 627 | mac, controller->trio_index); | ||
| 628 | } | 626 | } |
| 629 | } | 627 | } |
| 630 | 628 | ||
| @@ -720,27 +718,24 @@ int __init pcibios_init(void) | |||
| 720 | reg_offset); | 718 | reg_offset); |
| 721 | if (!port_status.dl_up) { | 719 | if (!port_status.dl_up) { |
| 722 | if (rc_delay[trio_index][mac]) { | 720 | if (rc_delay[trio_index][mac]) { |
| 723 | pr_info("Delaying PCIe RC TRIO init %d sec" | 721 | pr_info("Delaying PCIe RC TRIO init %d sec on MAC %d on TRIO %d\n", |
| 724 | " on MAC %d on TRIO %d\n", | ||
| 725 | rc_delay[trio_index][mac], mac, | 722 | rc_delay[trio_index][mac], mac, |
| 726 | trio_index); | 723 | trio_index); |
| 727 | msleep(rc_delay[trio_index][mac] * 1000); | 724 | msleep(rc_delay[trio_index][mac] * 1000); |
| 728 | } | 725 | } |
| 729 | ret = gxio_trio_force_rc_link_up(trio_context, mac); | 726 | ret = gxio_trio_force_rc_link_up(trio_context, mac); |
| 730 | if (ret < 0) | 727 | if (ret < 0) |
| 731 | pr_err("PCI: PCIE_FORCE_LINK_UP failure, " | 728 | pr_err("PCI: PCIE_FORCE_LINK_UP failure, MAC %d on TRIO %d\n", |
| 732 | "MAC %d on TRIO %d\n", mac, trio_index); | 729 | mac, trio_index); |
| 733 | } | 730 | } |
| 734 | 731 | ||
| 735 | pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n", i, | 732 | pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n", |
| 736 | trio_index, controller->mac); | 733 | i, trio_index, controller->mac); |
| 737 | 734 | ||
| 738 | /* Delay the bus probe if needed. */ | 735 | /* Delay the bus probe if needed. */ |
| 739 | if (rc_delay[trio_index][mac]) { | 736 | if (rc_delay[trio_index][mac]) { |
| 740 | pr_info("Delaying PCIe RC bus enumerating %d sec" | 737 | pr_info("Delaying PCIe RC bus enumerating %d sec on MAC %d on TRIO %d\n", |
| 741 | " on MAC %d on TRIO %d\n", | 738 | rc_delay[trio_index][mac], mac, trio_index); |
| 742 | rc_delay[trio_index][mac], mac, | ||
| 743 | trio_index); | ||
| 744 | msleep(rc_delay[trio_index][mac] * 1000); | 739 | msleep(rc_delay[trio_index][mac] * 1000); |
| 745 | } else { | 740 | } else { |
| 746 | /* | 741 | /* |
| @@ -758,11 +753,10 @@ int __init pcibios_init(void) | |||
| 758 | if (pcie_ports[trio_index].ports[mac].removable) { | 753 | if (pcie_ports[trio_index].ports[mac].removable) { |
| 759 | pr_info("PCI: link is down, MAC %d on TRIO %d\n", | 754 | pr_info("PCI: link is down, MAC %d on TRIO %d\n", |
| 760 | mac, trio_index); | 755 | mac, trio_index); |
| 761 | pr_info("This is expected if no PCIe card" | 756 | pr_info("This is expected if no PCIe card is connected to this link\n"); |
| 762 | " is connected to this link\n"); | ||
| 763 | } else | 757 | } else |
| 764 | pr_err("PCI: link is down, MAC %d on TRIO %d\n", | 758 | pr_err("PCI: link is down, MAC %d on TRIO %d\n", |
| 765 | mac, trio_index); | 759 | mac, trio_index); |
| 766 | continue; | 760 | continue; |
| 767 | } | 761 | } |
| 768 | 762 | ||
| @@ -829,8 +823,8 @@ int __init pcibios_init(void) | |||
| 829 | /* Alloc a PIO region for PCI config access per MAC. */ | 823 | /* Alloc a PIO region for PCI config access per MAC. */ |
| 830 | ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); | 824 | ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); |
| 831 | if (ret < 0) { | 825 | if (ret < 0) { |
| 832 | pr_err("PCI: PCI CFG PIO alloc failure for mac %d " | 826 | pr_err("PCI: PCI CFG PIO alloc failure for mac %d on TRIO %d, give up\n", |
| 833 | "on TRIO %d, give up\n", mac, trio_index); | 827 | mac, trio_index); |
| 834 | 828 | ||
| 835 | continue; | 829 | continue; |
| 836 | } | 830 | } |
| @@ -842,8 +836,8 @@ int __init pcibios_init(void) | |||
| 842 | trio_context->pio_cfg_index[mac], | 836 | trio_context->pio_cfg_index[mac], |
| 843 | mac, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE); | 837 | mac, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE); |
| 844 | if (ret < 0) { | 838 | if (ret < 0) { |
| 845 | pr_err("PCI: PCI CFG PIO init failure for mac %d " | 839 | pr_err("PCI: PCI CFG PIO init failure for mac %d on TRIO %d, give up\n", |
| 846 | "on TRIO %d, give up\n", mac, trio_index); | 840 | mac, trio_index); |
| 847 | 841 | ||
| 848 | continue; | 842 | continue; |
| 849 | } | 843 | } |
| @@ -865,7 +859,7 @@ int __init pcibios_init(void) | |||
| 865 | (TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT - 1))); | 859 | (TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT - 1))); |
| 866 | if (trio_context->mmio_base_pio_cfg[mac] == NULL) { | 860 | if (trio_context->mmio_base_pio_cfg[mac] == NULL) { |
| 867 | pr_err("PCI: PIO map failure for mac %d on TRIO %d\n", | 861 | pr_err("PCI: PIO map failure for mac %d on TRIO %d\n", |
| 868 | mac, trio_index); | 862 | mac, trio_index); |
| 869 | 863 | ||
| 870 | continue; | 864 | continue; |
| 871 | } | 865 | } |
| @@ -925,9 +919,8 @@ int __init pcibios_init(void) | |||
| 925 | /* Alloc a PIO region for PCI memory access for each RC port. */ | 919 | /* Alloc a PIO region for PCI memory access for each RC port. */ |
| 926 | ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); | 920 | ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); |
| 927 | if (ret < 0) { | 921 | if (ret < 0) { |
| 928 | pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, " | 922 | pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, give up\n", |
| 929 | "give up\n", controller->trio_index, | 923 | controller->trio_index, controller->mac); |
| 930 | controller->mac); | ||
| 931 | 924 | ||
| 932 | continue; | 925 | continue; |
| 933 | } | 926 | } |
| @@ -944,9 +937,8 @@ int __init pcibios_init(void) | |||
| 944 | 0, | 937 | 0, |
| 945 | 0); | 938 | 0); |
| 946 | if (ret < 0) { | 939 | if (ret < 0) { |
| 947 | pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, " | 940 | pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, give up\n", |
| 948 | "give up\n", controller->trio_index, | 941 | controller->trio_index, controller->mac); |
| 949 | controller->mac); | ||
| 950 | 942 | ||
| 951 | continue; | 943 | continue; |
| 952 | } | 944 | } |
| @@ -957,9 +949,8 @@ int __init pcibios_init(void) | |||
| 957 | */ | 949 | */ |
| 958 | ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); | 950 | ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); |
| 959 | if (ret < 0) { | 951 | if (ret < 0) { |
| 960 | pr_err("PCI: I/O PIO alloc failure on TRIO %d mac %d, " | 952 | pr_err("PCI: I/O PIO alloc failure on TRIO %d mac %d, give up\n", |
| 961 | "give up\n", controller->trio_index, | 953 | controller->trio_index, controller->mac); |
| 962 | controller->mac); | ||
| 963 | 954 | ||
| 964 | continue; | 955 | continue; |
| 965 | } | 956 | } |
| @@ -976,9 +967,8 @@ int __init pcibios_init(void) | |||
| 976 | 0, | 967 | 0, |
| 977 | HV_TRIO_PIO_FLAG_IO_SPACE); | 968 | HV_TRIO_PIO_FLAG_IO_SPACE); |
| 978 | if (ret < 0) { | 969 | if (ret < 0) { |
| 979 | pr_err("PCI: I/O PIO init failure on TRIO %d mac %d, " | 970 | pr_err("PCI: I/O PIO init failure on TRIO %d mac %d, give up\n", |
| 980 | "give up\n", controller->trio_index, | 971 | controller->trio_index, controller->mac); |
| 981 | controller->mac); | ||
| 982 | 972 | ||
| 983 | continue; | 973 | continue; |
| 984 | } | 974 | } |
| @@ -997,10 +987,9 @@ int __init pcibios_init(void) | |||
| 997 | ret = gxio_trio_alloc_memory_maps(trio_context, 1, 0, | 987 | ret = gxio_trio_alloc_memory_maps(trio_context, 1, 0, |
| 998 | 0); | 988 | 0); |
| 999 | if (ret < 0) { | 989 | if (ret < 0) { |
| 1000 | pr_err("PCI: Mem-Map alloc failure on TRIO %d " | 990 | pr_err("PCI: Mem-Map alloc failure on TRIO %d mac %d for MC %d, give up\n", |
| 1001 | "mac %d for MC %d, give up\n", | 991 | controller->trio_index, controller->mac, |
| 1002 | controller->trio_index, | 992 | j); |
| 1003 | controller->mac, j); | ||
| 1004 | 993 | ||
| 1005 | goto alloc_mem_map_failed; | 994 | goto alloc_mem_map_failed; |
| 1006 | } | 995 | } |
| @@ -1030,10 +1019,9 @@ int __init pcibios_init(void) | |||
| 1030 | j, | 1019 | j, |
| 1031 | GXIO_TRIO_ORDER_MODE_UNORDERED); | 1020 | GXIO_TRIO_ORDER_MODE_UNORDERED); |
| 1032 | if (ret < 0) { | 1021 | if (ret < 0) { |
| 1033 | pr_err("PCI: Mem-Map init failure on TRIO %d " | 1022 | pr_err("PCI: Mem-Map init failure on TRIO %d mac %d for MC %d, give up\n", |
| 1034 | "mac %d for MC %d, give up\n", | 1023 | controller->trio_index, controller->mac, |
| 1035 | controller->trio_index, | 1024 | j); |
| 1036 | controller->mac, j); | ||
| 1037 | 1025 | ||
| 1038 | goto alloc_mem_map_failed; | 1026 | goto alloc_mem_map_failed; |
| 1039 | } | 1027 | } |
| @@ -1510,9 +1498,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) | |||
| 1510 | * Most PCIe endpoint devices do support 64-bit message addressing. | 1498 | * Most PCIe endpoint devices do support 64-bit message addressing. |
| 1511 | */ | 1499 | */ |
| 1512 | if (desc->msi_attrib.is_64 == 0) { | 1500 | if (desc->msi_attrib.is_64 == 0) { |
| 1513 | dev_printk(KERN_INFO, &pdev->dev, | 1501 | dev_info(&pdev->dev, "64-bit MSI message address not supported, falling back to legacy interrupts\n"); |
| 1514 | "64-bit MSI message address not supported, " | ||
| 1515 | "falling back to legacy interrupts.\n"); | ||
| 1516 | 1502 | ||
| 1517 | ret = -ENOMEM; | 1503 | ret = -ENOMEM; |
| 1518 | goto is_64_failure; | 1504 | goto is_64_failure; |
| @@ -1549,11 +1535,8 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) | |||
| 1549 | /* SQ regions are out, allocate from map mem regions. */ | 1535 | /* SQ regions are out, allocate from map mem regions. */ |
| 1550 | mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0); | 1536 | mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0); |
| 1551 | if (mem_map < 0) { | 1537 | if (mem_map < 0) { |
| 1552 | dev_printk(KERN_INFO, &pdev->dev, | 1538 | dev_info(&pdev->dev, "%s Mem-Map alloc failure - failed to initialize MSI interrupts - falling back to legacy interrupts\n", |
| 1553 | "%s Mem-Map alloc failure. " | 1539 | desc->msi_attrib.is_msix ? "MSI-X" : "MSI"); |
| 1554 | "Failed to initialize MSI interrupts. " | ||
| 1555 | "Falling back to legacy interrupts.\n", | ||
| 1556 | desc->msi_attrib.is_msix ? "MSI-X" : "MSI"); | ||
| 1557 | ret = -ENOMEM; | 1540 | ret = -ENOMEM; |
| 1558 | goto msi_mem_map_alloc_failure; | 1541 | goto msi_mem_map_alloc_failure; |
| 1559 | } | 1542 | } |
| @@ -1580,7 +1563,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) | |||
| 1580 | mem_map, mem_map_base, mem_map_limit, | 1563 | mem_map, mem_map_base, mem_map_limit, |
| 1581 | trio_context->asid); | 1564 | trio_context->asid); |
| 1582 | if (ret < 0) { | 1565 | if (ret < 0) { |
| 1583 | dev_printk(KERN_INFO, &pdev->dev, "HV MSI config failed.\n"); | 1566 | dev_info(&pdev->dev, "HV MSI config failed\n"); |
| 1584 | 1567 | ||
| 1585 | goto hv_msi_config_failure; | 1568 | goto hv_msi_config_failure; |
| 1586 | } | 1569 | } |
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 0050cbc1d9de..48e5773dd0b7 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c | |||
| @@ -52,7 +52,7 @@ static int __init idle_setup(char *str) | |||
| 52 | return -EINVAL; | 52 | return -EINVAL; |
| 53 | 53 | ||
| 54 | if (!strcmp(str, "poll")) { | 54 | if (!strcmp(str, "poll")) { |
| 55 | pr_info("using polling idle threads.\n"); | 55 | pr_info("using polling idle threads\n"); |
| 56 | cpu_idle_poll_ctrl(true); | 56 | cpu_idle_poll_ctrl(true); |
| 57 | return 0; | 57 | return 0; |
| 58 | } else if (!strcmp(str, "halt")) { | 58 | } else if (!strcmp(str, "halt")) { |
| @@ -547,27 +547,25 @@ void show_regs(struct pt_regs *regs) | |||
| 547 | struct task_struct *tsk = validate_current(); | 547 | struct task_struct *tsk = validate_current(); |
| 548 | int i; | 548 | int i; |
| 549 | 549 | ||
| 550 | pr_err("\n"); | ||
| 551 | if (tsk != &corrupt_current) | 550 | if (tsk != &corrupt_current) |
| 552 | show_regs_print_info(KERN_ERR); | 551 | show_regs_print_info(KERN_ERR); |
| 553 | #ifdef __tilegx__ | 552 | #ifdef __tilegx__ |
| 554 | for (i = 0; i < 17; i++) | 553 | for (i = 0; i < 17; i++) |
| 555 | pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n", | 554 | pr_err(" r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT "\n", |
| 556 | i, regs->regs[i], i+18, regs->regs[i+18], | 555 | i, regs->regs[i], i+18, regs->regs[i+18], |
| 557 | i+36, regs->regs[i+36]); | 556 | i+36, regs->regs[i+36]); |
| 558 | pr_err(" r17: "REGFMT" r35: "REGFMT" tp : "REGFMT"\n", | 557 | pr_err(" r17: " REGFMT " r35: " REGFMT " tp : " REGFMT "\n", |
| 559 | regs->regs[17], regs->regs[35], regs->tp); | 558 | regs->regs[17], regs->regs[35], regs->tp); |
| 560 | pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr); | 559 | pr_err(" sp : " REGFMT " lr : " REGFMT "\n", regs->sp, regs->lr); |
| 561 | #else | 560 | #else |
| 562 | for (i = 0; i < 13; i++) | 561 | for (i = 0; i < 13; i++) |
| 563 | pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT | 562 | pr_err(" r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT "\n", |
| 564 | " r%-2d: "REGFMT" r%-2d: "REGFMT"\n", | ||
| 565 | i, regs->regs[i], i+14, regs->regs[i+14], | 563 | i, regs->regs[i], i+14, regs->regs[i+14], |
| 566 | i+27, regs->regs[i+27], i+40, regs->regs[i+40]); | 564 | i+27, regs->regs[i+27], i+40, regs->regs[i+40]); |
| 567 | pr_err(" r13: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n", | 565 | pr_err(" r13: " REGFMT " tp : " REGFMT " sp : " REGFMT " lr : " REGFMT "\n", |
| 568 | regs->regs[13], regs->tp, regs->sp, regs->lr); | 566 | regs->regs[13], regs->tp, regs->sp, regs->lr); |
| 569 | #endif | 567 | #endif |
| 570 | pr_err(" pc : "REGFMT" ex1: %ld faultnum: %ld\n", | 568 | pr_err(" pc : " REGFMT " ex1: %ld faultnum: %ld\n", |
| 571 | regs->pc, regs->ex1, regs->faultnum); | 569 | regs->pc, regs->ex1, regs->faultnum); |
| 572 | 570 | ||
| 573 | dump_stack_regs(regs); | 571 | dump_stack_regs(regs); |
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index b9736ded06f2..f183f1c92b4f 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c | |||
| @@ -130,7 +130,7 @@ static int __init setup_maxmem(char *str) | |||
| 130 | 130 | ||
| 131 | maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT); | 131 | maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT); |
| 132 | pr_info("Forcing RAM used to no more than %dMB\n", | 132 | pr_info("Forcing RAM used to no more than %dMB\n", |
| 133 | maxmem_pfn >> (20 - PAGE_SHIFT)); | 133 | maxmem_pfn >> (20 - PAGE_SHIFT)); |
| 134 | return 0; | 134 | return 0; |
| 135 | } | 135 | } |
| 136 | early_param("maxmem", setup_maxmem); | 136 | early_param("maxmem", setup_maxmem); |
| @@ -149,7 +149,7 @@ static int __init setup_maxnodemem(char *str) | |||
| 149 | maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) << | 149 | maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) << |
| 150 | (HPAGE_SHIFT - PAGE_SHIFT); | 150 | (HPAGE_SHIFT - PAGE_SHIFT); |
| 151 | pr_info("Forcing RAM used on node %ld to no more than %dMB\n", | 151 | pr_info("Forcing RAM used on node %ld to no more than %dMB\n", |
| 152 | node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); | 152 | node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); |
| 153 | return 0; | 153 | return 0; |
| 154 | } | 154 | } |
| 155 | early_param("maxnodemem", setup_maxnodemem); | 155 | early_param("maxnodemem", setup_maxnodemem); |
| @@ -417,8 +417,7 @@ static void __init setup_memory(void) | |||
| 417 | range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK; | 417 | range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK; |
| 418 | range.size -= (range.start - start_pa); | 418 | range.size -= (range.start - start_pa); |
| 419 | range.size &= HPAGE_MASK; | 419 | range.size &= HPAGE_MASK; |
| 420 | pr_err("Range not hugepage-aligned: %#llx..%#llx:" | 420 | pr_err("Range not hugepage-aligned: %#llx..%#llx: now %#llx-%#llx\n", |
| 421 | " now %#llx-%#llx\n", | ||
| 422 | start_pa, start_pa + orig_size, | 421 | start_pa, start_pa + orig_size, |
| 423 | range.start, range.start + range.size); | 422 | range.start, range.start + range.size); |
| 424 | } | 423 | } |
| @@ -437,8 +436,8 @@ static void __init setup_memory(void) | |||
| 437 | if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) { | 436 | if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) { |
| 438 | int max_size = maxnodemem_pfn[i]; | 437 | int max_size = maxnodemem_pfn[i]; |
| 439 | if (max_size > 0) { | 438 | if (max_size > 0) { |
| 440 | pr_err("Maxnodemem reduced node %d to" | 439 | pr_err("Maxnodemem reduced node %d to %d pages\n", |
| 441 | " %d pages\n", i, max_size); | 440 | i, max_size); |
| 442 | range.size = PFN_PHYS(max_size); | 441 | range.size = PFN_PHYS(max_size); |
| 443 | } else { | 442 | } else { |
| 444 | pr_err("Maxnodemem disabled node %d\n", i); | 443 | pr_err("Maxnodemem disabled node %d\n", i); |
| @@ -490,8 +489,8 @@ static void __init setup_memory(void) | |||
| 490 | NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT); | 489 | NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT); |
| 491 | if (end < pci_reserve_end_pfn + percpu_pages) { | 490 | if (end < pci_reserve_end_pfn + percpu_pages) { |
| 492 | end = pci_reserve_start_pfn; | 491 | end = pci_reserve_start_pfn; |
| 493 | pr_err("PCI mapping region reduced node %d to" | 492 | pr_err("PCI mapping region reduced node %d to %ld pages\n", |
| 494 | " %ld pages\n", i, end - start); | 493 | i, end - start); |
| 495 | } | 494 | } |
| 496 | } | 495 | } |
| 497 | #endif | 496 | #endif |
| @@ -556,10 +555,9 @@ static void __init setup_memory(void) | |||
| 556 | MAXMEM_PFN : mappable_physpages; | 555 | MAXMEM_PFN : mappable_physpages; |
| 557 | highmem_pages = (long) (physpages - lowmem_pages); | 556 | highmem_pages = (long) (physpages - lowmem_pages); |
| 558 | 557 | ||
| 559 | pr_notice("%ldMB HIGHMEM available.\n", | 558 | pr_notice("%ldMB HIGHMEM available\n", |
| 560 | pages_to_mb(highmem_pages > 0 ? highmem_pages : 0)); | 559 | pages_to_mb(highmem_pages > 0 ? highmem_pages : 0)); |
| 561 | pr_notice("%ldMB LOWMEM available.\n", | 560 | pr_notice("%ldMB LOWMEM available\n", pages_to_mb(lowmem_pages)); |
| 562 | pages_to_mb(lowmem_pages)); | ||
| 563 | #else | 561 | #else |
| 564 | /* Set max_low_pfn based on what node 0 can directly address. */ | 562 | /* Set max_low_pfn based on what node 0 can directly address. */ |
| 565 | max_low_pfn = node_end_pfn[0]; | 563 | max_low_pfn = node_end_pfn[0]; |
| @@ -573,8 +571,8 @@ static void __init setup_memory(void) | |||
| 573 | max_pfn = MAXMEM_PFN; | 571 | max_pfn = MAXMEM_PFN; |
| 574 | node_end_pfn[0] = MAXMEM_PFN; | 572 | node_end_pfn[0] = MAXMEM_PFN; |
| 575 | } else { | 573 | } else { |
| 576 | pr_notice("%ldMB memory available.\n", | 574 | pr_notice("%ldMB memory available\n", |
| 577 | pages_to_mb(node_end_pfn[0])); | 575 | pages_to_mb(node_end_pfn[0])); |
| 578 | } | 576 | } |
| 579 | for (i = 1; i < MAX_NUMNODES; ++i) { | 577 | for (i = 1; i < MAX_NUMNODES; ++i) { |
| 580 | node_start_pfn[i] = 0; | 578 | node_start_pfn[i] = 0; |
| @@ -589,8 +587,7 @@ static void __init setup_memory(void) | |||
| 589 | if (pages) | 587 | if (pages) |
| 590 | high_memory = pfn_to_kaddr(node_end_pfn[i]); | 588 | high_memory = pfn_to_kaddr(node_end_pfn[i]); |
| 591 | } | 589 | } |
| 592 | pr_notice("%ldMB memory available.\n", | 590 | pr_notice("%ldMB memory available\n", pages_to_mb(lowmem_pages)); |
| 593 | pages_to_mb(lowmem_pages)); | ||
| 594 | #endif | 591 | #endif |
| 595 | #endif | 592 | #endif |
| 596 | } | 593 | } |
| @@ -1540,8 +1537,7 @@ static void __init pcpu_fc_populate_pte(unsigned long addr) | |||
| 1540 | 1537 | ||
| 1541 | BUG_ON(pgd_addr_invalid(addr)); | 1538 | BUG_ON(pgd_addr_invalid(addr)); |
| 1542 | if (addr < VMALLOC_START || addr >= VMALLOC_END) | 1539 | if (addr < VMALLOC_START || addr >= VMALLOC_END) |
| 1543 | panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;" | 1540 | panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx; try increasing CONFIG_VMALLOC_RESERVE\n", |
| 1544 | " try increasing CONFIG_VMALLOC_RESERVE\n", | ||
| 1545 | addr, VMALLOC_START, VMALLOC_END); | 1541 | addr, VMALLOC_START, VMALLOC_END); |
| 1546 | 1542 | ||
| 1547 | pgd = swapper_pg_dir + pgd_index(addr); | 1543 | pgd = swapper_pg_dir + pgd_index(addr); |
| @@ -1596,8 +1592,8 @@ void __init setup_per_cpu_areas(void) | |||
| 1596 | lowmem_va = (unsigned long)pfn_to_kaddr(pfn); | 1592 | lowmem_va = (unsigned long)pfn_to_kaddr(pfn); |
| 1597 | ptep = virt_to_kpte(lowmem_va); | 1593 | ptep = virt_to_kpte(lowmem_va); |
| 1598 | if (pte_huge(*ptep)) { | 1594 | if (pte_huge(*ptep)) { |
| 1599 | printk(KERN_DEBUG "early shatter of huge page" | 1595 | printk(KERN_DEBUG "early shatter of huge page at %#lx\n", |
| 1600 | " at %#lx\n", lowmem_va); | 1596 | lowmem_va); |
| 1601 | shatter_pmd((pmd_t *)ptep); | 1597 | shatter_pmd((pmd_t *)ptep); |
| 1602 | ptep = virt_to_kpte(lowmem_va); | 1598 | ptep = virt_to_kpte(lowmem_va); |
| 1603 | BUG_ON(pte_huge(*ptep)); | 1599 | BUG_ON(pte_huge(*ptep)); |
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c index 491669065ffb..bb0a9ce7ae23 100644 --- a/arch/tile/kernel/signal.c +++ b/arch/tile/kernel/signal.c | |||
| @@ -337,7 +337,6 @@ static void dump_mem(void __user *address) | |||
| 337 | int i, j, k; | 337 | int i, j, k; |
| 338 | int found_readable_mem = 0; | 338 | int found_readable_mem = 0; |
| 339 | 339 | ||
| 340 | pr_err("\n"); | ||
| 341 | if (!access_ok(VERIFY_READ, address, 1)) { | 340 | if (!access_ok(VERIFY_READ, address, 1)) { |
| 342 | pr_err("Not dumping at address 0x%lx (kernel address)\n", | 341 | pr_err("Not dumping at address 0x%lx (kernel address)\n", |
| 343 | (unsigned long)address); | 342 | (unsigned long)address); |
| @@ -359,7 +358,7 @@ static void dump_mem(void __user *address) | |||
| 359 | (unsigned long)address); | 358 | (unsigned long)address); |
| 360 | found_readable_mem = 1; | 359 | found_readable_mem = 1; |
| 361 | } | 360 | } |
| 362 | j = sprintf(line, REGFMT":", (unsigned long)addr); | 361 | j = sprintf(line, REGFMT ":", (unsigned long)addr); |
| 363 | for (k = 0; k < bytes_per_line; ++k) | 362 | for (k = 0; k < bytes_per_line; ++k) |
| 364 | j += sprintf(&line[j], " %02x", buf[k]); | 363 | j += sprintf(&line[j], " %02x", buf[k]); |
| 365 | pr_err("%s\n", line); | 364 | pr_err("%s\n", line); |
| @@ -403,8 +402,7 @@ void trace_unhandled_signal(const char *type, struct pt_regs *regs, | |||
| 403 | case SIGFPE: | 402 | case SIGFPE: |
| 404 | case SIGSEGV: | 403 | case SIGSEGV: |
| 405 | case SIGBUS: | 404 | case SIGBUS: |
| 406 | pr_err("User crash: signal %d," | 405 | pr_err("User crash: signal %d, trap %ld, address 0x%lx\n", |
| 407 | " trap %ld, address 0x%lx\n", | ||
| 408 | sig, regs->faultnum, address); | 406 | sig, regs->faultnum, address); |
| 409 | show_regs(regs); | 407 | show_regs(regs); |
| 410 | dump_mem((void __user *)address); | 408 | dump_mem((void __user *)address); |
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c index 6cb2ce31b5a2..862973074bf9 100644 --- a/arch/tile/kernel/single_step.c +++ b/arch/tile/kernel/single_step.c | |||
| @@ -222,11 +222,9 @@ static tilepro_bundle_bits rewrite_load_store_unaligned( | |||
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | if (unaligned_printk || unaligned_fixup_count == 0) { | 224 | if (unaligned_printk || unaligned_fixup_count == 0) { |
| 225 | pr_info("Process %d/%s: PC %#lx: Fixup of" | 225 | pr_info("Process %d/%s: PC %#lx: Fixup of unaligned %s at %#lx\n", |
| 226 | " unaligned %s at %#lx.\n", | ||
| 227 | current->pid, current->comm, regs->pc, | 226 | current->pid, current->comm, regs->pc, |
| 228 | (mem_op == MEMOP_LOAD || | 227 | mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR ? |
| 229 | mem_op == MEMOP_LOAD_POSTINCR) ? | ||
| 230 | "load" : "store", | 228 | "load" : "store", |
| 231 | (unsigned long)addr); | 229 | (unsigned long)addr); |
| 232 | if (!unaligned_printk) { | 230 | if (!unaligned_printk) { |
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c index 0d59a1b60c74..20d52a98e171 100644 --- a/arch/tile/kernel/smpboot.c +++ b/arch/tile/kernel/smpboot.c | |||
| @@ -127,8 +127,7 @@ static __init int reset_init_affinity(void) | |||
| 127 | { | 127 | { |
| 128 | long rc = sched_setaffinity(current->pid, &init_affinity); | 128 | long rc = sched_setaffinity(current->pid, &init_affinity); |
| 129 | if (rc != 0) | 129 | if (rc != 0) |
| 130 | pr_warning("couldn't reset init affinity (%ld)\n", | 130 | pr_warn("couldn't reset init affinity (%ld)\n", rc); |
| 131 | rc); | ||
| 132 | return 0; | 131 | return 0; |
| 133 | } | 132 | } |
| 134 | late_initcall(reset_init_affinity); | 133 | late_initcall(reset_init_affinity); |
| @@ -174,7 +173,7 @@ static void start_secondary(void) | |||
| 174 | /* Indicate that we're ready to come up. */ | 173 | /* Indicate that we're ready to come up. */ |
| 175 | /* Must not do this before we're ready to receive messages */ | 174 | /* Must not do this before we're ready to receive messages */ |
| 176 | if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) { | 175 | if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) { |
| 177 | pr_warning("CPU#%d already started!\n", cpuid); | 176 | pr_warn("CPU#%d already started!\n", cpuid); |
| 178 | for (;;) | 177 | for (;;) |
| 179 | local_irq_enable(); | 178 | local_irq_enable(); |
| 180 | } | 179 | } |
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index c93977a62116..7ff5afdbd3aa 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c | |||
| @@ -387,9 +387,7 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers) | |||
| 387 | * then bust_spinlocks() spit out a space in front of us | 387 | * then bust_spinlocks() spit out a space in front of us |
| 388 | * and it will mess up our KERN_ERR. | 388 | * and it will mess up our KERN_ERR. |
| 389 | */ | 389 | */ |
| 390 | pr_err("\n"); | 390 | pr_err("Starting stack dump of tid %d, pid %d (%s) on cpu %d at cycle %lld\n", |
| 391 | pr_err("Starting stack dump of tid %d, pid %d (%s)" | ||
| 392 | " on cpu %d at cycle %lld\n", | ||
| 393 | kbt->task->pid, kbt->task->tgid, kbt->task->comm, | 391 | kbt->task->pid, kbt->task->tgid, kbt->task->comm, |
| 394 | raw_smp_processor_id(), get_cycles()); | 392 | raw_smp_processor_id(), get_cycles()); |
| 395 | } | 393 | } |
| @@ -411,8 +409,7 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers) | |||
| 411 | i++, address, namebuf, (unsigned long)(kbt->it.sp)); | 409 | i++, address, namebuf, (unsigned long)(kbt->it.sp)); |
| 412 | 410 | ||
| 413 | if (i >= 100) { | 411 | if (i >= 100) { |
| 414 | pr_err("Stack dump truncated" | 412 | pr_err("Stack dump truncated (%d frames)\n", i); |
| 415 | " (%d frames)\n", i); | ||
| 416 | break; | 413 | break; |
| 417 | } | 414 | } |
| 418 | } | 415 | } |
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index b854a1cd0079..d412b0856c0a 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c | |||
| @@ -98,8 +98,8 @@ void __init calibrate_delay(void) | |||
| 98 | { | 98 | { |
| 99 | loops_per_jiffy = get_clock_rate() / HZ; | 99 | loops_per_jiffy = get_clock_rate() / HZ; |
| 100 | pr_info("Clock rate yields %lu.%02lu BogoMIPS (lpj=%lu)\n", | 100 | pr_info("Clock rate yields %lu.%02lu BogoMIPS (lpj=%lu)\n", |
| 101 | loops_per_jiffy/(500000/HZ), | 101 | loops_per_jiffy / (500000 / HZ), |
| 102 | (loops_per_jiffy/(5000/HZ)) % 100, loops_per_jiffy); | 102 | (loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy); |
| 103 | } | 103 | } |
| 104 | 104 | ||
| 105 | /* Called fairly late in init/main.c, but before we go smp. */ | 105 | /* Called fairly late in init/main.c, but before we go smp. */ |
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c index 86900ccd4977..bf841ca517bb 100644 --- a/arch/tile/kernel/traps.c +++ b/arch/tile/kernel/traps.c | |||
| @@ -46,9 +46,9 @@ static int __init setup_unaligned_fixup(char *str) | |||
| 46 | return 0; | 46 | return 0; |
| 47 | 47 | ||
| 48 | pr_info("Fixups for unaligned data accesses are %s\n", | 48 | pr_info("Fixups for unaligned data accesses are %s\n", |
| 49 | unaligned_fixup >= 0 ? | 49 | unaligned_fixup >= 0 ? |
| 50 | (unaligned_fixup ? "enabled" : "disabled") : | 50 | (unaligned_fixup ? "enabled" : "disabled") : |
| 51 | "completely disabled"); | 51 | "completely disabled"); |
| 52 | return 1; | 52 | return 1; |
| 53 | } | 53 | } |
| 54 | __setup("unaligned_fixup=", setup_unaligned_fixup); | 54 | __setup("unaligned_fixup=", setup_unaligned_fixup); |
| @@ -305,8 +305,8 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num, | |||
| 305 | case INT_ILL: | 305 | case INT_ILL: |
| 306 | if (copy_from_user(&instr, (void __user *)regs->pc, | 306 | if (copy_from_user(&instr, (void __user *)regs->pc, |
| 307 | sizeof(instr))) { | 307 | sizeof(instr))) { |
| 308 | pr_err("Unreadable instruction for INT_ILL:" | 308 | pr_err("Unreadable instruction for INT_ILL: %#lx\n", |
| 309 | " %#lx\n", regs->pc); | 309 | regs->pc); |
| 310 | do_exit(SIGKILL); | 310 | do_exit(SIGKILL); |
| 311 | return; | 311 | return; |
| 312 | } | 312 | } |
diff --git a/arch/tile/kernel/unaligned.c b/arch/tile/kernel/unaligned.c index c02ea2a45f67..7d9a83be0aca 100644 --- a/arch/tile/kernel/unaligned.c +++ b/arch/tile/kernel/unaligned.c | |||
| @@ -969,8 +969,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle, | |||
| 969 | unaligned_fixup_count++; | 969 | unaligned_fixup_count++; |
| 970 | 970 | ||
| 971 | if (unaligned_printk) { | 971 | if (unaligned_printk) { |
| 972 | pr_info("%s/%d. Unalign fixup for kernel access " | 972 | pr_info("%s/%d - Unalign fixup for kernel access to userspace %lx\n", |
| 973 | "to userspace %lx.", | ||
| 974 | current->comm, current->pid, regs->regs[ra]); | 973 | current->comm, current->pid, regs->regs[ra]); |
| 975 | } | 974 | } |
| 976 | 975 | ||
| @@ -985,7 +984,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle, | |||
| 985 | .si_addr = (unsigned char __user *)0 | 984 | .si_addr = (unsigned char __user *)0 |
| 986 | }; | 985 | }; |
| 987 | if (unaligned_printk) | 986 | if (unaligned_printk) |
| 988 | pr_info("Unalign bundle: unexp @%llx, %llx", | 987 | pr_info("Unalign bundle: unexp @%llx, %llx\n", |
| 989 | (unsigned long long)regs->pc, | 988 | (unsigned long long)regs->pc, |
| 990 | (unsigned long long)bundle); | 989 | (unsigned long long)bundle); |
| 991 | 990 | ||
| @@ -1370,8 +1369,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle, | |||
| 1370 | frag.bundle = bundle; | 1369 | frag.bundle = bundle; |
| 1371 | 1370 | ||
| 1372 | if (unaligned_printk) { | 1371 | if (unaligned_printk) { |
| 1373 | pr_info("%s/%d, Unalign fixup: pc=%lx " | 1372 | pr_info("%s/%d, Unalign fixup: pc=%lx bundle=%lx %d %d %d %d %d %d %d %d\n", |
| 1374 | "bundle=%lx %d %d %d %d %d %d %d %d.", | ||
| 1375 | current->comm, current->pid, | 1373 | current->comm, current->pid, |
| 1376 | (unsigned long)frag.pc, | 1374 | (unsigned long)frag.pc, |
| 1377 | (unsigned long)frag.bundle, | 1375 | (unsigned long)frag.bundle, |
| @@ -1380,8 +1378,8 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle, | |||
| 1380 | (int)y1_lr, (int)y1_br, (int)x1_add); | 1378 | (int)y1_lr, (int)y1_br, (int)x1_add); |
| 1381 | 1379 | ||
| 1382 | for (k = 0; k < n; k += 2) | 1380 | for (k = 0; k < n; k += 2) |
| 1383 | pr_info("[%d] %016llx %016llx", k, | 1381 | pr_info("[%d] %016llx %016llx\n", |
| 1384 | (unsigned long long)frag.insn[k], | 1382 | k, (unsigned long long)frag.insn[k], |
| 1385 | (unsigned long long)frag.insn[k+1]); | 1383 | (unsigned long long)frag.insn[k+1]); |
| 1386 | } | 1384 | } |
| 1387 | 1385 | ||
| @@ -1402,7 +1400,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle, | |||
| 1402 | .si_addr = (void __user *)&jit_code_area[idx] | 1400 | .si_addr = (void __user *)&jit_code_area[idx] |
| 1403 | }; | 1401 | }; |
| 1404 | 1402 | ||
| 1405 | pr_warn("Unalign fixup: pid=%d %s jit_code_area=%llx", | 1403 | pr_warn("Unalign fixup: pid=%d %s jit_code_area=%llx\n", |
| 1406 | current->pid, current->comm, | 1404 | current->pid, current->comm, |
| 1407 | (unsigned long long)&jit_code_area[idx]); | 1405 | (unsigned long long)&jit_code_area[idx]); |
| 1408 | 1406 | ||
| @@ -1485,7 +1483,7 @@ void do_unaligned(struct pt_regs *regs, int vecnum) | |||
| 1485 | /* If exception came from kernel, try fix it up. */ | 1483 | /* If exception came from kernel, try fix it up. */ |
| 1486 | if (fixup_exception(regs)) { | 1484 | if (fixup_exception(regs)) { |
| 1487 | if (unaligned_printk) | 1485 | if (unaligned_printk) |
| 1488 | pr_info("Unalign fixup: %d %llx @%llx", | 1486 | pr_info("Unalign fixup: %d %llx @%llx\n", |
| 1489 | (int)unaligned_fixup, | 1487 | (int)unaligned_fixup, |
| 1490 | (unsigned long long)regs->ex1, | 1488 | (unsigned long long)regs->ex1, |
| 1491 | (unsigned long long)regs->pc); | 1489 | (unsigned long long)regs->pc); |
| @@ -1519,7 +1517,7 @@ void do_unaligned(struct pt_regs *regs, int vecnum) | |||
| 1519 | }; | 1517 | }; |
| 1520 | 1518 | ||
| 1521 | if (unaligned_printk) | 1519 | if (unaligned_printk) |
| 1522 | pr_info("Unalign fixup: %d %llx @%llx", | 1520 | pr_info("Unalign fixup: %d %llx @%llx\n", |
| 1523 | (int)unaligned_fixup, | 1521 | (int)unaligned_fixup, |
| 1524 | (unsigned long long)regs->ex1, | 1522 | (unsigned long long)regs->ex1, |
| 1525 | (unsigned long long)regs->pc); | 1523 | (unsigned long long)regs->pc); |
| @@ -1579,14 +1577,14 @@ void do_unaligned(struct pt_regs *regs, int vecnum) | |||
| 1579 | 0); | 1577 | 0); |
| 1580 | 1578 | ||
| 1581 | if (IS_ERR((void __force *)user_page)) { | 1579 | if (IS_ERR((void __force *)user_page)) { |
| 1582 | pr_err("Out of kernel pages trying do_mmap.\n"); | 1580 | pr_err("Out of kernel pages trying do_mmap\n"); |
| 1583 | return; | 1581 | return; |
| 1584 | } | 1582 | } |
| 1585 | 1583 | ||
| 1586 | /* Save the address in the thread_info struct */ | 1584 | /* Save the address in the thread_info struct */ |
| 1587 | info->unalign_jit_base = user_page; | 1585 | info->unalign_jit_base = user_page; |
| 1588 | if (unaligned_printk) | 1586 | if (unaligned_printk) |
| 1589 | pr_info("Unalign bundle: %d:%d, allocate page @%llx", | 1587 | pr_info("Unalign bundle: %d:%d, allocate page @%llx\n", |
| 1590 | raw_smp_processor_id(), current->pid, | 1588 | raw_smp_processor_id(), current->pid, |
| 1591 | (unsigned long long)user_page); | 1589 | (unsigned long long)user_page); |
| 1592 | } | 1590 | } |
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index 6c0571216a9d..565e25a98334 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c | |||
| @@ -169,8 +169,7 @@ static void wait_for_migration(pte_t *pte) | |||
| 169 | while (pte_migrating(*pte)) { | 169 | while (pte_migrating(*pte)) { |
| 170 | barrier(); | 170 | barrier(); |
| 171 | if (++retries > bound) | 171 | if (++retries > bound) |
| 172 | panic("Hit migrating PTE (%#llx) and" | 172 | panic("Hit migrating PTE (%#llx) and page PFN %#lx still migrating", |
| 173 | " page PFN %#lx still migrating", | ||
| 174 | pte->val, pte_pfn(*pte)); | 173 | pte->val, pte_pfn(*pte)); |
| 175 | } | 174 | } |
| 176 | } | 175 | } |
| @@ -292,11 +291,10 @@ static int handle_page_fault(struct pt_regs *regs, | |||
| 292 | */ | 291 | */ |
| 293 | stack_offset = stack_pointer & (THREAD_SIZE-1); | 292 | stack_offset = stack_pointer & (THREAD_SIZE-1); |
| 294 | if (stack_offset < THREAD_SIZE / 8) { | 293 | if (stack_offset < THREAD_SIZE / 8) { |
| 295 | pr_alert("Potential stack overrun: sp %#lx\n", | 294 | pr_alert("Potential stack overrun: sp %#lx\n", stack_pointer); |
| 296 | stack_pointer); | ||
| 297 | show_regs(regs); | 295 | show_regs(regs); |
| 298 | pr_alert("Killing current process %d/%s\n", | 296 | pr_alert("Killing current process %d/%s\n", |
| 299 | tsk->pid, tsk->comm); | 297 | tsk->pid, tsk->comm); |
| 300 | do_group_exit(SIGKILL); | 298 | do_group_exit(SIGKILL); |
| 301 | } | 299 | } |
| 302 | 300 | ||
| @@ -421,7 +419,7 @@ good_area: | |||
| 421 | } else if (write) { | 419 | } else if (write) { |
| 422 | #ifdef TEST_VERIFY_AREA | 420 | #ifdef TEST_VERIFY_AREA |
| 423 | if (!is_page_fault && regs->cs == KERNEL_CS) | 421 | if (!is_page_fault && regs->cs == KERNEL_CS) |
| 424 | pr_err("WP fault at "REGFMT"\n", regs->eip); | 422 | pr_err("WP fault at " REGFMT "\n", regs->eip); |
| 425 | #endif | 423 | #endif |
| 426 | if (!(vma->vm_flags & VM_WRITE)) | 424 | if (!(vma->vm_flags & VM_WRITE)) |
| 427 | goto bad_area; | 425 | goto bad_area; |
| @@ -519,16 +517,15 @@ no_context: | |||
| 519 | pte_t *pte = lookup_address(address); | 517 | pte_t *pte = lookup_address(address); |
| 520 | 518 | ||
| 521 | if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) | 519 | if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) |
| 522 | pr_crit("kernel tried to execute" | 520 | pr_crit("kernel tried to execute non-executable page - exploit attempt? (uid: %d)\n", |
| 523 | " non-executable page - exploit attempt?" | 521 | current->uid); |
| 524 | " (uid: %d)\n", current->uid); | ||
| 525 | } | 522 | } |
| 526 | #endif | 523 | #endif |
| 527 | if (address < PAGE_SIZE) | 524 | if (address < PAGE_SIZE) |
| 528 | pr_alert("Unable to handle kernel NULL pointer dereference\n"); | 525 | pr_alert("Unable to handle kernel NULL pointer dereference\n"); |
| 529 | else | 526 | else |
| 530 | pr_alert("Unable to handle kernel paging request\n"); | 527 | pr_alert("Unable to handle kernel paging request\n"); |
| 531 | pr_alert(" at virtual address "REGFMT", pc "REGFMT"\n", | 528 | pr_alert(" at virtual address " REGFMT ", pc " REGFMT "\n", |
| 532 | address, regs->pc); | 529 | address, regs->pc); |
| 533 | 530 | ||
| 534 | show_regs(regs); | 531 | show_regs(regs); |
| @@ -575,9 +572,10 @@ do_sigbus: | |||
| 575 | #ifndef __tilegx__ | 572 | #ifndef __tilegx__ |
| 576 | 573 | ||
| 577 | /* We must release ICS before panicking or we won't get anywhere. */ | 574 | /* We must release ICS before panicking or we won't get anywhere. */ |
| 578 | #define ics_panic(fmt, ...) do { \ | 575 | #define ics_panic(fmt, ...) \ |
| 579 | __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \ | 576 | do { \ |
| 580 | panic(fmt, __VA_ARGS__); \ | 577 | __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \ |
| 578 | panic(fmt, ##__VA_ARGS__); \ | ||
| 581 | } while (0) | 579 | } while (0) |
| 582 | 580 | ||
| 583 | /* | 581 | /* |
| @@ -615,8 +613,7 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num, | |||
| 615 | fault_num != INT_DTLB_ACCESS)) { | 613 | fault_num != INT_DTLB_ACCESS)) { |
| 616 | unsigned long old_pc = regs->pc; | 614 | unsigned long old_pc = regs->pc; |
| 617 | regs->pc = pc; | 615 | regs->pc = pc; |
| 618 | ics_panic("Bad ICS page fault args:" | 616 | ics_panic("Bad ICS page fault args: old PC %#lx, fault %d/%d at %#lx", |
| 619 | " old PC %#lx, fault %d/%d at %#lx\n", | ||
| 620 | old_pc, fault_num, write, address); | 617 | old_pc, fault_num, write, address); |
| 621 | } | 618 | } |
| 622 | 619 | ||
| @@ -669,8 +666,8 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num, | |||
| 669 | #endif | 666 | #endif |
| 670 | fixup = search_exception_tables(pc); | 667 | fixup = search_exception_tables(pc); |
| 671 | if (!fixup) | 668 | if (!fixup) |
| 672 | ics_panic("ICS atomic fault not in table:" | 669 | ics_panic("ICS atomic fault not in table: PC %#lx, fault %d", |
| 673 | " PC %#lx, fault %d", pc, fault_num); | 670 | pc, fault_num); |
| 674 | regs->pc = fixup->fixup; | 671 | regs->pc = fixup->fixup; |
| 675 | regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0); | 672 | regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0); |
| 676 | } | 673 | } |
| @@ -826,8 +823,7 @@ void do_page_fault(struct pt_regs *regs, int fault_num, | |||
| 826 | 823 | ||
| 827 | set_thread_flag(TIF_ASYNC_TLB); | 824 | set_thread_flag(TIF_ASYNC_TLB); |
| 828 | if (async->fault_num != 0) { | 825 | if (async->fault_num != 0) { |
| 829 | panic("Second async fault %d;" | 826 | panic("Second async fault %d; old fault was %d (%#lx/%ld)", |
| 830 | " old fault was %d (%#lx/%ld)", | ||
| 831 | fault_num, async->fault_num, | 827 | fault_num, async->fault_num, |
| 832 | address, write); | 828 | address, write); |
| 833 | } | 829 | } |
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c index 33294fdc402e..cd3387370ebb 100644 --- a/arch/tile/mm/homecache.c +++ b/arch/tile/mm/homecache.c | |||
| @@ -152,12 +152,10 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control, | |||
| 152 | cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy); | 152 | cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy); |
| 153 | cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy); | 153 | cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy); |
| 154 | 154 | ||
| 155 | pr_err("hv_flush_remote(%#llx, %#lx, %p [%s]," | 155 | pr_err("hv_flush_remote(%#llx, %#lx, %p [%s], %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n", |
| 156 | " %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n", | ||
| 157 | cache_pa, cache_control, cache_cpumask, cache_buf, | 156 | cache_pa, cache_control, cache_cpumask, cache_buf, |
| 158 | (unsigned long)tlb_va, tlb_length, tlb_pgsize, | 157 | (unsigned long)tlb_va, tlb_length, tlb_pgsize, |
| 159 | tlb_cpumask, tlb_buf, | 158 | tlb_cpumask, tlb_buf, asids, asidcount, rc); |
| 160 | asids, asidcount, rc); | ||
| 161 | panic("Unsafe to continue."); | 159 | panic("Unsafe to continue."); |
| 162 | } | 160 | } |
| 163 | 161 | ||
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index e514899e1100..3270e0019266 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c | |||
| @@ -284,22 +284,21 @@ static __init int __setup_hugepagesz(unsigned long ps) | |||
| 284 | int level, base_shift; | 284 | int level, base_shift; |
| 285 | 285 | ||
| 286 | if ((1UL << log_ps) != ps || (log_ps & 1) != 0) { | 286 | if ((1UL << log_ps) != ps || (log_ps & 1) != 0) { |
| 287 | pr_warn("Not enabling %ld byte huge pages;" | 287 | pr_warn("Not enabling %ld byte huge pages; must be a power of four\n", |
| 288 | " must be a power of four.\n", ps); | 288 | ps); |
| 289 | return -EINVAL; | 289 | return -EINVAL; |
| 290 | } | 290 | } |
| 291 | 291 | ||
| 292 | if (ps > 64*1024*1024*1024UL) { | 292 | if (ps > 64*1024*1024*1024UL) { |
| 293 | pr_warn("Not enabling %ld MB huge pages;" | 293 | pr_warn("Not enabling %ld MB huge pages; largest legal value is 64 GB\n", |
| 294 | " largest legal value is 64 GB .\n", ps >> 20); | 294 | ps >> 20); |
| 295 | return -EINVAL; | 295 | return -EINVAL; |
| 296 | } else if (ps >= PUD_SIZE) { | 296 | } else if (ps >= PUD_SIZE) { |
| 297 | static long hv_jpage_size; | 297 | static long hv_jpage_size; |
| 298 | if (hv_jpage_size == 0) | 298 | if (hv_jpage_size == 0) |
| 299 | hv_jpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO); | 299 | hv_jpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO); |
| 300 | if (hv_jpage_size != PUD_SIZE) { | 300 | if (hv_jpage_size != PUD_SIZE) { |
| 301 | pr_warn("Not enabling >= %ld MB huge pages:" | 301 | pr_warn("Not enabling >= %ld MB huge pages: hypervisor reports size %ld\n", |
| 302 | " hypervisor reports size %ld\n", | ||
| 303 | PUD_SIZE >> 20, hv_jpage_size); | 302 | PUD_SIZE >> 20, hv_jpage_size); |
| 304 | return -EINVAL; | 303 | return -EINVAL; |
| 305 | } | 304 | } |
| @@ -320,14 +319,13 @@ static __init int __setup_hugepagesz(unsigned long ps) | |||
| 320 | int shift_val = log_ps - base_shift; | 319 | int shift_val = log_ps - base_shift; |
| 321 | if (huge_shift[level] != 0) { | 320 | if (huge_shift[level] != 0) { |
| 322 | int old_shift = base_shift + huge_shift[level]; | 321 | int old_shift = base_shift + huge_shift[level]; |
| 323 | pr_warn("Not enabling %ld MB huge pages;" | 322 | pr_warn("Not enabling %ld MB huge pages; already have size %ld MB\n", |
| 324 | " already have size %ld MB.\n", | ||
| 325 | ps >> 20, (1UL << old_shift) >> 20); | 323 | ps >> 20, (1UL << old_shift) >> 20); |
| 326 | return -EINVAL; | 324 | return -EINVAL; |
| 327 | } | 325 | } |
| 328 | if (hv_set_pte_super_shift(level, shift_val) != 0) { | 326 | if (hv_set_pte_super_shift(level, shift_val) != 0) { |
| 329 | pr_warn("Not enabling %ld MB huge pages;" | 327 | pr_warn("Not enabling %ld MB huge pages; no hypervisor support\n", |
| 330 | " no hypervisor support.\n", ps >> 20); | 328 | ps >> 20); |
| 331 | return -EINVAL; | 329 | return -EINVAL; |
| 332 | } | 330 | } |
| 333 | printk(KERN_DEBUG "Enabled %ld MB huge pages\n", ps >> 20); | 331 | printk(KERN_DEBUG "Enabled %ld MB huge pages\n", ps >> 20); |
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index caa270165f86..be240cc4978d 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c | |||
| @@ -357,11 +357,11 @@ static int __init setup_ktext(char *str) | |||
| 357 | cpulist_scnprintf(buf, sizeof(buf), &ktext_mask); | 357 | cpulist_scnprintf(buf, sizeof(buf), &ktext_mask); |
| 358 | if (cpumask_weight(&ktext_mask) > 1) { | 358 | if (cpumask_weight(&ktext_mask) > 1) { |
| 359 | ktext_small = 1; | 359 | ktext_small = 1; |
| 360 | pr_info("ktext: using caching neighborhood %s " | 360 | pr_info("ktext: using caching neighborhood %s with small pages\n", |
| 361 | "with small pages\n", buf); | 361 | buf); |
| 362 | } else { | 362 | } else { |
| 363 | pr_info("ktext: caching on cpu %s with one huge page\n", | 363 | pr_info("ktext: caching on cpu %s with one huge page\n", |
| 364 | buf); | 364 | buf); |
| 365 | } | 365 | } |
| 366 | } | 366 | } |
| 367 | 367 | ||
| @@ -413,19 +413,16 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | |||
| 413 | int rc, i; | 413 | int rc, i; |
| 414 | 414 | ||
| 415 | if (ktext_arg_seen && ktext_hash) { | 415 | if (ktext_arg_seen && ktext_hash) { |
| 416 | pr_warning("warning: \"ktext\" boot argument ignored" | 416 | pr_warn("warning: \"ktext\" boot argument ignored if \"kcache_hash\" sets up text hash-for-home\n"); |
| 417 | " if \"kcache_hash\" sets up text hash-for-home\n"); | ||
| 418 | ktext_small = 0; | 417 | ktext_small = 0; |
| 419 | } | 418 | } |
| 420 | 419 | ||
| 421 | if (kdata_arg_seen && kdata_hash) { | 420 | if (kdata_arg_seen && kdata_hash) { |
| 422 | pr_warning("warning: \"kdata\" boot argument ignored" | 421 | pr_warn("warning: \"kdata\" boot argument ignored if \"kcache_hash\" sets up data hash-for-home\n"); |
| 423 | " if \"kcache_hash\" sets up data hash-for-home\n"); | ||
| 424 | } | 422 | } |
| 425 | 423 | ||
| 426 | if (kdata_huge && !hash_default) { | 424 | if (kdata_huge && !hash_default) { |
| 427 | pr_warning("warning: disabling \"kdata=huge\"; requires" | 425 | pr_warn("warning: disabling \"kdata=huge\"; requires kcache_hash=all or =allbutstack\n"); |
| 428 | " kcache_hash=all or =allbutstack\n"); | ||
| 429 | kdata_huge = 0; | 426 | kdata_huge = 0; |
| 430 | } | 427 | } |
| 431 | 428 | ||
| @@ -470,8 +467,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | |||
| 470 | pte[pte_ofs] = pfn_pte(pfn, prot); | 467 | pte[pte_ofs] = pfn_pte(pfn, prot); |
| 471 | } else { | 468 | } else { |
| 472 | if (kdata_huge) | 469 | if (kdata_huge) |
| 473 | printk(KERN_DEBUG "pre-shattered huge" | 470 | printk(KERN_DEBUG "pre-shattered huge page at %#lx\n", |
| 474 | " page at %#lx\n", address); | 471 | address); |
| 475 | for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE; | 472 | for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE; |
| 476 | pfn++, pte_ofs++, address += PAGE_SIZE) { | 473 | pfn++, pte_ofs++, address += PAGE_SIZE) { |
| 477 | pgprot_t prot = init_pgprot(address); | 474 | pgprot_t prot = init_pgprot(address); |
| @@ -501,8 +498,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | |||
| 501 | pr_info("ktext: not using unavailable cpus %s\n", buf); | 498 | pr_info("ktext: not using unavailable cpus %s\n", buf); |
| 502 | } | 499 | } |
| 503 | if (cpumask_empty(&ktext_mask)) { | 500 | if (cpumask_empty(&ktext_mask)) { |
| 504 | pr_warning("ktext: no valid cpus; caching on %d.\n", | 501 | pr_warn("ktext: no valid cpus; caching on %d\n", |
| 505 | smp_processor_id()); | 502 | smp_processor_id()); |
| 506 | cpumask_copy(&ktext_mask, | 503 | cpumask_copy(&ktext_mask, |
| 507 | cpumask_of(smp_processor_id())); | 504 | cpumask_of(smp_processor_id())); |
| 508 | } | 505 | } |
| @@ -798,11 +795,9 @@ void __init mem_init(void) | |||
| 798 | #ifdef CONFIG_HIGHMEM | 795 | #ifdef CONFIG_HIGHMEM |
| 799 | /* check that fixmap and pkmap do not overlap */ | 796 | /* check that fixmap and pkmap do not overlap */ |
| 800 | if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) { | 797 | if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) { |
| 801 | pr_err("fixmap and kmap areas overlap" | 798 | pr_err("fixmap and kmap areas overlap - this will crash\n"); |
| 802 | " - this will crash\n"); | ||
| 803 | pr_err("pkstart: %lxh pkend: %lxh fixstart %lxh\n", | 799 | pr_err("pkstart: %lxh pkend: %lxh fixstart %lxh\n", |
| 804 | PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1), | 800 | PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1), FIXADDR_START); |
| 805 | FIXADDR_START); | ||
| 806 | BUG(); | 801 | BUG(); |
| 807 | } | 802 | } |
| 808 | #endif | 803 | #endif |
| @@ -926,8 +921,7 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end) | |||
| 926 | unsigned long addr = (unsigned long) begin; | 921 | unsigned long addr = (unsigned long) begin; |
| 927 | 922 | ||
| 928 | if (kdata_huge && !initfree) { | 923 | if (kdata_huge && !initfree) { |
| 929 | pr_warning("Warning: ignoring initfree=0:" | 924 | pr_warn("Warning: ignoring initfree=0: incompatible with kdata=huge\n"); |
| 930 | " incompatible with kdata=huge\n"); | ||
| 931 | initfree = 1; | 925 | initfree = 1; |
| 932 | } | 926 | } |
| 933 | end = (end + PAGE_SIZE - 1) & PAGE_MASK; | 927 | end = (end + PAGE_SIZE - 1) & PAGE_MASK; |
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c index 5e86eac4bfae..7bf2491a9c1f 100644 --- a/arch/tile/mm/pgtable.c +++ b/arch/tile/mm/pgtable.c | |||
| @@ -44,9 +44,7 @@ void show_mem(unsigned int filter) | |||
| 44 | { | 44 | { |
| 45 | struct zone *zone; | 45 | struct zone *zone; |
| 46 | 46 | ||
| 47 | pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu" | 47 | pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu pagecache:%lu swap:%lu\n", |
| 48 | " free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu" | ||
| 49 | " pagecache:%lu swap:%lu\n", | ||
| 50 | (global_page_state(NR_ACTIVE_ANON) + | 48 | (global_page_state(NR_ACTIVE_ANON) + |
| 51 | global_page_state(NR_ACTIVE_FILE)), | 49 | global_page_state(NR_ACTIVE_FILE)), |
| 52 | (global_page_state(NR_INACTIVE_ANON) + | 50 | (global_page_state(NR_INACTIVE_ANON) + |
