aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2015-02-10 14:35:36 -0500
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2015-02-10 14:35:36 -0500
commit4ba24fef3eb3b142197135223b90ced2f319cd53 (patch)
treea20c125b27740ec7b4c761b11d801108e1b316b2 /arch/tile/kernel
parent47c1ffb2b6b630894e9a16442611c056ab21c057 (diff)
parent98a4a59ee31a12105a2b84f5b8b515ac2cb208ef (diff)
Merge branch 'next' into for-linus
Prepare first round of input updates for 3.20.
Diffstat (limited to 'arch/tile/kernel')
-rw-r--r--arch/tile/kernel/early_printk.c19
-rw-r--r--arch/tile/kernel/hardwall.c6
-rw-r--r--arch/tile/kernel/irq.c19
-rw-r--r--arch/tile/kernel/kgdb.c6
-rw-r--r--arch/tile/kernel/kprobes.c3
-rw-r--r--arch/tile/kernel/machine_kexec.c28
-rw-r--r--arch/tile/kernel/messaging.c9
-rw-r--r--arch/tile/kernel/module.c12
-rw-r--r--arch/tile/kernel/pci.c7
-rw-r--r--arch/tile/kernel/pci_gx.c103
-rw-r--r--arch/tile/kernel/perf_event.c12
-rw-r--r--arch/tile/kernel/process.c18
-rw-r--r--arch/tile/kernel/setup.c84
-rw-r--r--arch/tile/kernel/signal.c20
-rw-r--r--arch/tile/kernel/single_step.c10
-rw-r--r--arch/tile/kernel/smp.c2
-rw-r--r--arch/tile/kernel/smpboot.c11
-rw-r--r--arch/tile/kernel/stack.c7
-rw-r--r--arch/tile/kernel/time.c73
-rw-r--r--arch/tile/kernel/traps.c12
-rw-r--r--arch/tile/kernel/unaligned.c22
-rw-r--r--arch/tile/kernel/vdso/vdso.lds.S2
-rw-r--r--arch/tile/kernel/vdso/vgettimeofday.c176
-rw-r--r--arch/tile/kernel/vmlinux.lds.S2
24 files changed, 359 insertions, 304 deletions
diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c
index b608e00e7f6d..aefb2c086726 100644
--- a/arch/tile/kernel/early_printk.c
+++ b/arch/tile/kernel/early_printk.c
@@ -43,13 +43,20 @@ static struct console early_hv_console = {
43 43
44void early_panic(const char *fmt, ...) 44void early_panic(const char *fmt, ...)
45{ 45{
46 va_list ap; 46 struct va_format vaf;
47 va_list args;
48
47 arch_local_irq_disable_all(); 49 arch_local_irq_disable_all();
48 va_start(ap, fmt); 50
49 early_printk("Kernel panic - not syncing: "); 51 va_start(args, fmt);
50 early_vprintk(fmt, ap); 52
51 early_printk("\n"); 53 vaf.fmt = fmt;
52 va_end(ap); 54 vaf.va = &args;
55
56 early_printk("Kernel panic - not syncing: %pV", &vaf);
57
58 va_end(args);
59
53 dump_stack(); 60 dump_stack();
54 hv_halt(); 61 hv_halt();
55} 62}
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index aca6000bca75..c4646bb99342 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -365,8 +365,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
365 * to quiesce. 365 * to quiesce.
366 */ 366 */
367 if (rect->teardown_in_progress) { 367 if (rect->teardown_in_progress) {
368 pr_notice("cpu %d: detected %s hardwall violation %#lx" 368 pr_notice("cpu %d: detected %s hardwall violation %#lx while teardown already in progress\n",
369 " while teardown already in progress\n",
370 cpu, hwt->name, 369 cpu, hwt->name,
371 (long)mfspr_XDN(hwt, DIRECTION_PROTECT)); 370 (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
372 goto done; 371 goto done;
@@ -630,8 +629,7 @@ static void _hardwall_deactivate(struct hardwall_type *hwt,
630 struct thread_struct *ts = &task->thread; 629 struct thread_struct *ts = &task->thread;
631 630
632 if (cpumask_weight(&task->cpus_allowed) != 1) { 631 if (cpumask_weight(&task->cpus_allowed) != 1) {
633 pr_err("pid %d (%s) releasing %s hardwall with" 632 pr_err("pid %d (%s) releasing %s hardwall with an affinity mask containing %d cpus!\n",
634 " an affinity mask containing %d cpus!\n",
635 task->pid, task->comm, hwt->name, 633 task->pid, task->comm, hwt->name,
636 cpumask_weight(&task->cpus_allowed)); 634 cpumask_weight(&task->cpus_allowed));
637 BUG(); 635 BUG();
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index 637f2ffaa5f5..22044fc691ef 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -73,7 +73,7 @@ static DEFINE_PER_CPU(int, irq_depth);
73 */ 73 */
74void tile_dev_intr(struct pt_regs *regs, int intnum) 74void tile_dev_intr(struct pt_regs *regs, int intnum)
75{ 75{
76 int depth = __get_cpu_var(irq_depth)++; 76 int depth = __this_cpu_inc_return(irq_depth);
77 unsigned long original_irqs; 77 unsigned long original_irqs;
78 unsigned long remaining_irqs; 78 unsigned long remaining_irqs;
79 struct pt_regs *old_regs; 79 struct pt_regs *old_regs;
@@ -107,9 +107,8 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
107 { 107 {
108 long sp = stack_pointer - (long) current_thread_info(); 108 long sp = stack_pointer - (long) current_thread_info();
109 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { 109 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
110 pr_emerg("tile_dev_intr: " 110 pr_emerg("%s: stack overflow: %ld\n",
111 "stack overflow: %ld\n", 111 __func__, sp - sizeof(struct thread_info));
112 sp - sizeof(struct thread_info));
113 dump_stack(); 112 dump_stack();
114 } 113 }
115 } 114 }
@@ -120,7 +119,7 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
120 119
121 /* Count device irqs; Linux IPIs are counted elsewhere. */ 120 /* Count device irqs; Linux IPIs are counted elsewhere. */
122 if (irq != IRQ_RESCHEDULE) 121 if (irq != IRQ_RESCHEDULE)
123 __get_cpu_var(irq_stat).irq_dev_intr_count++; 122 __this_cpu_inc(irq_stat.irq_dev_intr_count);
124 123
125 generic_handle_irq(irq); 124 generic_handle_irq(irq);
126 } 125 }
@@ -130,10 +129,10 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
130 * including any that were reenabled during interrupt 129 * including any that were reenabled during interrupt
131 * handling. 130 * handling.
132 */ 131 */
133 if (depth == 0) 132 if (depth == 1)
134 unmask_irqs(~__get_cpu_var(irq_disable_mask)); 133 unmask_irqs(~__this_cpu_read(irq_disable_mask));
135 134
136 __get_cpu_var(irq_depth)--; 135 __this_cpu_dec(irq_depth);
137 136
138 /* 137 /*
139 * Track time spent against the current process again and 138 * Track time spent against the current process again and
@@ -151,7 +150,7 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
151static void tile_irq_chip_enable(struct irq_data *d) 150static void tile_irq_chip_enable(struct irq_data *d)
152{ 151{
153 get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq); 152 get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
154 if (__get_cpu_var(irq_depth) == 0) 153 if (__this_cpu_read(irq_depth) == 0)
155 unmask_irqs(1UL << d->irq); 154 unmask_irqs(1UL << d->irq);
156 put_cpu_var(irq_disable_mask); 155 put_cpu_var(irq_disable_mask);
157} 156}
@@ -197,7 +196,7 @@ static void tile_irq_chip_ack(struct irq_data *d)
197 */ 196 */
198static void tile_irq_chip_eoi(struct irq_data *d) 197static void tile_irq_chip_eoi(struct irq_data *d)
199{ 198{
200 if (!(__get_cpu_var(irq_disable_mask) & (1UL << d->irq))) 199 if (!(__this_cpu_read(irq_disable_mask) & (1UL << d->irq)))
201 unmask_irqs(1UL << d->irq); 200 unmask_irqs(1UL << d->irq);
202} 201}
203 202
diff --git a/arch/tile/kernel/kgdb.c b/arch/tile/kernel/kgdb.c
index 4cd88381a83e..ff5335ae050d 100644
--- a/arch/tile/kernel/kgdb.c
+++ b/arch/tile/kernel/kgdb.c
@@ -125,9 +125,7 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
125void 125void
126sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) 126sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
127{ 127{
128 int reg;
129 struct pt_regs *thread_regs; 128 struct pt_regs *thread_regs;
130 unsigned long *ptr = gdb_regs;
131 129
132 if (task == NULL) 130 if (task == NULL)
133 return; 131 return;
@@ -136,9 +134,7 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
136 memset(gdb_regs, 0, NUMREGBYTES); 134 memset(gdb_regs, 0, NUMREGBYTES);
137 135
138 thread_regs = task_pt_regs(task); 136 thread_regs = task_pt_regs(task);
139 for (reg = 0; reg <= TREG_LAST_GPR; reg++) 137 memcpy(gdb_regs, thread_regs, TREG_LAST_GPR * sizeof(unsigned long));
140 *(ptr++) = thread_regs->regs[reg];
141
142 gdb_regs[TILEGX_PC_REGNUM] = thread_regs->pc; 138 gdb_regs[TILEGX_PC_REGNUM] = thread_regs->pc;
143 gdb_regs[TILEGX_FAULTNUM_REGNUM] = thread_regs->faultnum; 139 gdb_regs[TILEGX_FAULTNUM_REGNUM] = thread_regs->faultnum;
144} 140}
diff --git a/arch/tile/kernel/kprobes.c b/arch/tile/kernel/kprobes.c
index 27cdcacbe81d..f8a45c51e9e4 100644
--- a/arch/tile/kernel/kprobes.c
+++ b/arch/tile/kernel/kprobes.c
@@ -90,8 +90,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
90 return -EINVAL; 90 return -EINVAL;
91 91
92 if (insn_has_control(*p->addr)) { 92 if (insn_has_control(*p->addr)) {
93 pr_notice("Kprobes for control instructions are not " 93 pr_notice("Kprobes for control instructions are not supported\n");
94 "supported\n");
95 return -EINVAL; 94 return -EINVAL;
96 } 95 }
97 96
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c
index f0b54a934712..008aa2faef55 100644
--- a/arch/tile/kernel/machine_kexec.c
+++ b/arch/tile/kernel/machine_kexec.c
@@ -77,16 +77,13 @@ void machine_crash_shutdown(struct pt_regs *regs)
77int machine_kexec_prepare(struct kimage *image) 77int machine_kexec_prepare(struct kimage *image)
78{ 78{
79 if (num_online_cpus() > 1) { 79 if (num_online_cpus() > 1) {
80 pr_warning("%s: detected attempt to kexec " 80 pr_warn("%s: detected attempt to kexec with num_online_cpus() > 1\n",
81 "with num_online_cpus() > 1\n", 81 __func__);
82 __func__);
83 return -ENOSYS; 82 return -ENOSYS;
84 } 83 }
85 if (image->type != KEXEC_TYPE_DEFAULT) { 84 if (image->type != KEXEC_TYPE_DEFAULT) {
86 pr_warning("%s: detected attempt to kexec " 85 pr_warn("%s: detected attempt to kexec with unsupported type: %d\n",
87 "with unsupported type: %d\n", 86 __func__, image->type);
88 __func__,
89 image->type);
90 return -ENOSYS; 87 return -ENOSYS;
91 } 88 }
92 return 0; 89 return 0;
@@ -131,8 +128,8 @@ static unsigned char *kexec_bn2cl(void *pg)
131 */ 128 */
132 csum = ip_compute_csum(pg, bhdrp->b_size); 129 csum = ip_compute_csum(pg, bhdrp->b_size);
133 if (csum != 0) { 130 if (csum != 0) {
134 pr_warning("%s: bad checksum %#x (size %d)\n", 131 pr_warn("%s: bad checksum %#x (size %d)\n",
135 __func__, csum, bhdrp->b_size); 132 __func__, csum, bhdrp->b_size);
136 return 0; 133 return 0;
137 } 134 }
138 135
@@ -160,8 +157,7 @@ static unsigned char *kexec_bn2cl(void *pg)
160 while (*desc != '\0') { 157 while (*desc != '\0') {
161 desc++; 158 desc++;
162 if (((unsigned long)desc & PAGE_MASK) != (unsigned long)pg) { 159 if (((unsigned long)desc & PAGE_MASK) != (unsigned long)pg) {
163 pr_info("%s: ran off end of page\n", 160 pr_info("%s: ran off end of page\n", __func__);
164 __func__);
165 return 0; 161 return 0;
166 } 162 }
167 } 163 }
@@ -195,20 +191,18 @@ static void kexec_find_and_set_command_line(struct kimage *image)
195 } 191 }
196 192
197 if (command_line != 0) { 193 if (command_line != 0) {
198 pr_info("setting new command line to \"%s\"\n", 194 pr_info("setting new command line to \"%s\"\n", command_line);
199 command_line);
200 195
201 hverr = hv_set_command_line( 196 hverr = hv_set_command_line(
202 (HV_VirtAddr) command_line, strlen(command_line)); 197 (HV_VirtAddr) command_line, strlen(command_line));
203 kunmap_atomic(command_line); 198 kunmap_atomic(command_line);
204 } else { 199 } else {
205 pr_info("%s: no command line found; making empty\n", 200 pr_info("%s: no command line found; making empty\n", __func__);
206 __func__);
207 hverr = hv_set_command_line((HV_VirtAddr) command_line, 0); 201 hverr = hv_set_command_line((HV_VirtAddr) command_line, 0);
208 } 202 }
209 if (hverr) 203 if (hverr)
210 pr_warning("%s: hv_set_command_line returned error: %d\n", 204 pr_warn("%s: hv_set_command_line returned error: %d\n",
211 __func__, hverr); 205 __func__, hverr);
212} 206}
213 207
214/* 208/*
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index 7867266f9716..7475af3aacec 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -28,7 +28,7 @@ static DEFINE_PER_CPU(HV_MsgState, msg_state);
28void init_messaging(void) 28void init_messaging(void)
29{ 29{
30 /* Allocate storage for messages in kernel space */ 30 /* Allocate storage for messages in kernel space */
31 HV_MsgState *state = &__get_cpu_var(msg_state); 31 HV_MsgState *state = this_cpu_ptr(&msg_state);
32 int rc = hv_register_message_state(state); 32 int rc = hv_register_message_state(state);
33 if (rc != HV_OK) 33 if (rc != HV_OK)
34 panic("hv_register_message_state: error %d", rc); 34 panic("hv_register_message_state: error %d", rc);
@@ -59,9 +59,8 @@ void hv_message_intr(struct pt_regs *regs, int intnum)
59 { 59 {
60 long sp = stack_pointer - (long) current_thread_info(); 60 long sp = stack_pointer - (long) current_thread_info();
61 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { 61 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
62 pr_emerg("hv_message_intr: " 62 pr_emerg("%s: stack overflow: %ld\n",
63 "stack overflow: %ld\n", 63 __func__, sp - sizeof(struct thread_info));
64 sp - sizeof(struct thread_info));
65 dump_stack(); 64 dump_stack();
66 } 65 }
67 } 66 }
@@ -96,7 +95,7 @@ void hv_message_intr(struct pt_regs *regs, int intnum)
96 struct hv_driver_cb *cb = 95 struct hv_driver_cb *cb =
97 (struct hv_driver_cb *)him->intarg; 96 (struct hv_driver_cb *)him->intarg;
98 cb->callback(cb, him->intdata); 97 cb->callback(cb, him->intdata);
99 __get_cpu_var(irq_stat).irq_hv_msg_count++; 98 __this_cpu_inc(irq_stat.irq_hv_msg_count);
100 } 99 }
101 } 100 }
102 101
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
index d19b13e3a59f..96447c9160a0 100644
--- a/arch/tile/kernel/module.c
+++ b/arch/tile/kernel/module.c
@@ -96,8 +96,8 @@ void module_free(struct module *mod, void *module_region)
96static int validate_hw2_last(long value, struct module *me) 96static int validate_hw2_last(long value, struct module *me)
97{ 97{
98 if (((value << 16) >> 16) != value) { 98 if (((value << 16) >> 16) != value) {
99 pr_warning("module %s: Out of range HW2_LAST value %#lx\n", 99 pr_warn("module %s: Out of range HW2_LAST value %#lx\n",
100 me->name, value); 100 me->name, value);
101 return 0; 101 return 0;
102 } 102 }
103 return 1; 103 return 1;
@@ -210,10 +210,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
210 value -= (unsigned long) location; /* pc-relative */ 210 value -= (unsigned long) location; /* pc-relative */
211 value = (long) value >> 3; /* count by instrs */ 211 value = (long) value >> 3; /* count by instrs */
212 if (!validate_jumpoff(value)) { 212 if (!validate_jumpoff(value)) {
213 pr_warning("module %s: Out of range jump to" 213 pr_warn("module %s: Out of range jump to %#llx at %#llx (%p)\n",
214 " %#llx at %#llx (%p)\n", me->name, 214 me->name,
215 sym->st_value + rel[i].r_addend, 215 sym->st_value + rel[i].r_addend,
216 rel[i].r_offset, location); 216 rel[i].r_offset, location);
217 return -ENOEXEC; 217 return -ENOEXEC;
218 } 218 }
219 MUNGE(create_JumpOff_X1); 219 MUNGE(create_JumpOff_X1);
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
index 1f80a88c75a6..f70c7892fa25 100644
--- a/arch/tile/kernel/pci.c
+++ b/arch/tile/kernel/pci.c
@@ -178,8 +178,8 @@ int __init tile_pci_init(void)
178 continue; 178 continue;
179 hv_cfg_fd1 = tile_pcie_open(i, 1); 179 hv_cfg_fd1 = tile_pcie_open(i, 1);
180 if (hv_cfg_fd1 < 0) { 180 if (hv_cfg_fd1 < 0) {
181 pr_err("PCI: Couldn't open config fd to HV " 181 pr_err("PCI: Couldn't open config fd to HV for controller %d\n",
182 "for controller %d\n", i); 182 i);
183 goto err_cont; 183 goto err_cont;
184 } 184 }
185 185
@@ -423,8 +423,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
423 for (i = 0; i < 6; i++) { 423 for (i = 0; i < 6; i++) {
424 r = &dev->resource[i]; 424 r = &dev->resource[i];
425 if (r->flags & IORESOURCE_UNSET) { 425 if (r->flags & IORESOURCE_UNSET) {
426 pr_err("PCI: Device %s not available " 426 pr_err("PCI: Device %s not available because of resource collisions\n",
427 "because of resource collisions\n",
428 pci_name(dev)); 427 pci_name(dev));
429 return -EINVAL; 428 return -EINVAL;
430 } 429 }
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c
index e39f9c542807..2c95f37ebbed 100644
--- a/arch/tile/kernel/pci_gx.c
+++ b/arch/tile/kernel/pci_gx.c
@@ -131,8 +131,7 @@ static int tile_irq_cpu(int irq)
131 131
132 count = cpumask_weight(&intr_cpus_map); 132 count = cpumask_weight(&intr_cpus_map);
133 if (unlikely(count == 0)) { 133 if (unlikely(count == 0)) {
134 pr_warning("intr_cpus_map empty, interrupts will be" 134 pr_warn("intr_cpus_map empty, interrupts will be delievered to dataplane tiles\n");
135 " delievered to dataplane tiles\n");
136 return irq % (smp_height * smp_width); 135 return irq % (smp_height * smp_width);
137 } 136 }
138 137
@@ -197,16 +196,16 @@ static int tile_pcie_open(int trio_index)
197 /* Get the properties of the PCIe ports on this TRIO instance. */ 196 /* Get the properties of the PCIe ports on this TRIO instance. */
198 ret = gxio_trio_get_port_property(context, &pcie_ports[trio_index]); 197 ret = gxio_trio_get_port_property(context, &pcie_ports[trio_index]);
199 if (ret < 0) { 198 if (ret < 0) {
200 pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d," 199 pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d, on TRIO %d\n",
201 " on TRIO %d\n", ret, trio_index); 200 ret, trio_index);
202 goto get_port_property_failure; 201 goto get_port_property_failure;
203 } 202 }
204 203
205 context->mmio_base_mac = 204 context->mmio_base_mac =
206 iorpc_ioremap(context->fd, 0, HV_TRIO_CONFIG_IOREMAP_SIZE); 205 iorpc_ioremap(context->fd, 0, HV_TRIO_CONFIG_IOREMAP_SIZE);
207 if (context->mmio_base_mac == NULL) { 206 if (context->mmio_base_mac == NULL) {
208 pr_err("PCI: TRIO config space mapping failure, error %d," 207 pr_err("PCI: TRIO config space mapping failure, error %d, on TRIO %d\n",
209 " on TRIO %d\n", ret, trio_index); 208 ret, trio_index);
210 ret = -ENOMEM; 209 ret = -ENOMEM;
211 210
212 goto trio_mmio_mapping_failure; 211 goto trio_mmio_mapping_failure;
@@ -622,9 +621,8 @@ static void fixup_read_and_payload_sizes(struct pci_controller *controller)
622 dev_control.max_read_req_sz, 621 dev_control.max_read_req_sz,
623 mac); 622 mac);
624 if (err < 0) { 623 if (err < 0) {
625 pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, " 624 pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, MAC %d on TRIO %d\n",
626 "MAC %d on TRIO %d\n", 625 mac, controller->trio_index);
627 mac, controller->trio_index);
628 } 626 }
629} 627}
630 628
@@ -720,27 +718,24 @@ int __init pcibios_init(void)
720 reg_offset); 718 reg_offset);
721 if (!port_status.dl_up) { 719 if (!port_status.dl_up) {
722 if (rc_delay[trio_index][mac]) { 720 if (rc_delay[trio_index][mac]) {
723 pr_info("Delaying PCIe RC TRIO init %d sec" 721 pr_info("Delaying PCIe RC TRIO init %d sec on MAC %d on TRIO %d\n",
724 " on MAC %d on TRIO %d\n",
725 rc_delay[trio_index][mac], mac, 722 rc_delay[trio_index][mac], mac,
726 trio_index); 723 trio_index);
727 msleep(rc_delay[trio_index][mac] * 1000); 724 msleep(rc_delay[trio_index][mac] * 1000);
728 } 725 }
729 ret = gxio_trio_force_rc_link_up(trio_context, mac); 726 ret = gxio_trio_force_rc_link_up(trio_context, mac);
730 if (ret < 0) 727 if (ret < 0)
731 pr_err("PCI: PCIE_FORCE_LINK_UP failure, " 728 pr_err("PCI: PCIE_FORCE_LINK_UP failure, MAC %d on TRIO %d\n",
732 "MAC %d on TRIO %d\n", mac, trio_index); 729 mac, trio_index);
733 } 730 }
734 731
735 pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n", i, 732 pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n",
736 trio_index, controller->mac); 733 i, trio_index, controller->mac);
737 734
738 /* Delay the bus probe if needed. */ 735 /* Delay the bus probe if needed. */
739 if (rc_delay[trio_index][mac]) { 736 if (rc_delay[trio_index][mac]) {
740 pr_info("Delaying PCIe RC bus enumerating %d sec" 737 pr_info("Delaying PCIe RC bus enumerating %d sec on MAC %d on TRIO %d\n",
741 " on MAC %d on TRIO %d\n", 738 rc_delay[trio_index][mac], mac, trio_index);
742 rc_delay[trio_index][mac], mac,
743 trio_index);
744 msleep(rc_delay[trio_index][mac] * 1000); 739 msleep(rc_delay[trio_index][mac] * 1000);
745 } else { 740 } else {
746 /* 741 /*
@@ -758,11 +753,10 @@ int __init pcibios_init(void)
758 if (pcie_ports[trio_index].ports[mac].removable) { 753 if (pcie_ports[trio_index].ports[mac].removable) {
759 pr_info("PCI: link is down, MAC %d on TRIO %d\n", 754 pr_info("PCI: link is down, MAC %d on TRIO %d\n",
760 mac, trio_index); 755 mac, trio_index);
761 pr_info("This is expected if no PCIe card" 756 pr_info("This is expected if no PCIe card is connected to this link\n");
762 " is connected to this link\n");
763 } else 757 } else
764 pr_err("PCI: link is down, MAC %d on TRIO %d\n", 758 pr_err("PCI: link is down, MAC %d on TRIO %d\n",
765 mac, trio_index); 759 mac, trio_index);
766 continue; 760 continue;
767 } 761 }
768 762
@@ -829,8 +823,8 @@ int __init pcibios_init(void)
829 /* Alloc a PIO region for PCI config access per MAC. */ 823 /* Alloc a PIO region for PCI config access per MAC. */
830 ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); 824 ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
831 if (ret < 0) { 825 if (ret < 0) {
832 pr_err("PCI: PCI CFG PIO alloc failure for mac %d " 826 pr_err("PCI: PCI CFG PIO alloc failure for mac %d on TRIO %d, give up\n",
833 "on TRIO %d, give up\n", mac, trio_index); 827 mac, trio_index);
834 828
835 continue; 829 continue;
836 } 830 }
@@ -842,8 +836,8 @@ int __init pcibios_init(void)
842 trio_context->pio_cfg_index[mac], 836 trio_context->pio_cfg_index[mac],
843 mac, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE); 837 mac, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE);
844 if (ret < 0) { 838 if (ret < 0) {
845 pr_err("PCI: PCI CFG PIO init failure for mac %d " 839 pr_err("PCI: PCI CFG PIO init failure for mac %d on TRIO %d, give up\n",
846 "on TRIO %d, give up\n", mac, trio_index); 840 mac, trio_index);
847 841
848 continue; 842 continue;
849 } 843 }
@@ -865,7 +859,7 @@ int __init pcibios_init(void)
865 (TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT - 1))); 859 (TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT - 1)));
866 if (trio_context->mmio_base_pio_cfg[mac] == NULL) { 860 if (trio_context->mmio_base_pio_cfg[mac] == NULL) {
867 pr_err("PCI: PIO map failure for mac %d on TRIO %d\n", 861 pr_err("PCI: PIO map failure for mac %d on TRIO %d\n",
868 mac, trio_index); 862 mac, trio_index);
869 863
870 continue; 864 continue;
871 } 865 }
@@ -925,9 +919,8 @@ int __init pcibios_init(void)
925 /* Alloc a PIO region for PCI memory access for each RC port. */ 919 /* Alloc a PIO region for PCI memory access for each RC port. */
926 ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); 920 ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
927 if (ret < 0) { 921 if (ret < 0) {
928 pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, " 922 pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, give up\n",
929 "give up\n", controller->trio_index, 923 controller->trio_index, controller->mac);
930 controller->mac);
931 924
932 continue; 925 continue;
933 } 926 }
@@ -944,9 +937,8 @@ int __init pcibios_init(void)
944 0, 937 0,
945 0); 938 0);
946 if (ret < 0) { 939 if (ret < 0) {
947 pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, " 940 pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, give up\n",
948 "give up\n", controller->trio_index, 941 controller->trio_index, controller->mac);
949 controller->mac);
950 942
951 continue; 943 continue;
952 } 944 }
@@ -957,9 +949,8 @@ int __init pcibios_init(void)
957 */ 949 */
958 ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); 950 ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
959 if (ret < 0) { 951 if (ret < 0) {
960 pr_err("PCI: I/O PIO alloc failure on TRIO %d mac %d, " 952 pr_err("PCI: I/O PIO alloc failure on TRIO %d mac %d, give up\n",
961 "give up\n", controller->trio_index, 953 controller->trio_index, controller->mac);
962 controller->mac);
963 954
964 continue; 955 continue;
965 } 956 }
@@ -976,9 +967,8 @@ int __init pcibios_init(void)
976 0, 967 0,
977 HV_TRIO_PIO_FLAG_IO_SPACE); 968 HV_TRIO_PIO_FLAG_IO_SPACE);
978 if (ret < 0) { 969 if (ret < 0) {
979 pr_err("PCI: I/O PIO init failure on TRIO %d mac %d, " 970 pr_err("PCI: I/O PIO init failure on TRIO %d mac %d, give up\n",
980 "give up\n", controller->trio_index, 971 controller->trio_index, controller->mac);
981 controller->mac);
982 972
983 continue; 973 continue;
984 } 974 }
@@ -997,10 +987,9 @@ int __init pcibios_init(void)
997 ret = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 987 ret = gxio_trio_alloc_memory_maps(trio_context, 1, 0,
998 0); 988 0);
999 if (ret < 0) { 989 if (ret < 0) {
1000 pr_err("PCI: Mem-Map alloc failure on TRIO %d " 990 pr_err("PCI: Mem-Map alloc failure on TRIO %d mac %d for MC %d, give up\n",
1001 "mac %d for MC %d, give up\n", 991 controller->trio_index, controller->mac,
1002 controller->trio_index, 992 j);
1003 controller->mac, j);
1004 993
1005 goto alloc_mem_map_failed; 994 goto alloc_mem_map_failed;
1006 } 995 }
@@ -1030,10 +1019,9 @@ int __init pcibios_init(void)
1030 j, 1019 j,
1031 GXIO_TRIO_ORDER_MODE_UNORDERED); 1020 GXIO_TRIO_ORDER_MODE_UNORDERED);
1032 if (ret < 0) { 1021 if (ret < 0) {
1033 pr_err("PCI: Mem-Map init failure on TRIO %d " 1022 pr_err("PCI: Mem-Map init failure on TRIO %d mac %d for MC %d, give up\n",
1034 "mac %d for MC %d, give up\n", 1023 controller->trio_index, controller->mac,
1035 controller->trio_index, 1024 j);
1036 controller->mac, j);
1037 1025
1038 goto alloc_mem_map_failed; 1026 goto alloc_mem_map_failed;
1039 } 1027 }
@@ -1453,7 +1441,7 @@ static struct pci_ops tile_cfg_ops = {
1453static unsigned int tilegx_msi_startup(struct irq_data *d) 1441static unsigned int tilegx_msi_startup(struct irq_data *d)
1454{ 1442{
1455 if (d->msi_desc) 1443 if (d->msi_desc)
1456 unmask_msi_irq(d); 1444 pci_msi_unmask_irq(d);
1457 1445
1458 return 0; 1446 return 0;
1459} 1447}
@@ -1465,14 +1453,14 @@ static void tilegx_msi_ack(struct irq_data *d)
1465 1453
1466static void tilegx_msi_mask(struct irq_data *d) 1454static void tilegx_msi_mask(struct irq_data *d)
1467{ 1455{
1468 mask_msi_irq(d); 1456 pci_msi_mask_irq(d);
1469 __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq); 1457 __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq);
1470} 1458}
1471 1459
1472static void tilegx_msi_unmask(struct irq_data *d) 1460static void tilegx_msi_unmask(struct irq_data *d)
1473{ 1461{
1474 __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq); 1462 __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq);
1475 unmask_msi_irq(d); 1463 pci_msi_unmask_irq(d);
1476} 1464}
1477 1465
1478static struct irq_chip tilegx_msi_chip = { 1466static struct irq_chip tilegx_msi_chip = {
@@ -1510,9 +1498,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
1510 * Most PCIe endpoint devices do support 64-bit message addressing. 1498 * Most PCIe endpoint devices do support 64-bit message addressing.
1511 */ 1499 */
1512 if (desc->msi_attrib.is_64 == 0) { 1500 if (desc->msi_attrib.is_64 == 0) {
1513 dev_printk(KERN_INFO, &pdev->dev, 1501 dev_info(&pdev->dev, "64-bit MSI message address not supported, falling back to legacy interrupts\n");
1514 "64-bit MSI message address not supported, "
1515 "falling back to legacy interrupts.\n");
1516 1502
1517 ret = -ENOMEM; 1503 ret = -ENOMEM;
1518 goto is_64_failure; 1504 goto is_64_failure;
@@ -1549,11 +1535,8 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
1549 /* SQ regions are out, allocate from map mem regions. */ 1535 /* SQ regions are out, allocate from map mem regions. */
1550 mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0); 1536 mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0);
1551 if (mem_map < 0) { 1537 if (mem_map < 0) {
1552 dev_printk(KERN_INFO, &pdev->dev, 1538 dev_info(&pdev->dev, "%s Mem-Map alloc failure - failed to initialize MSI interrupts - falling back to legacy interrupts\n",
1553 "%s Mem-Map alloc failure. " 1539 desc->msi_attrib.is_msix ? "MSI-X" : "MSI");
1554 "Failed to initialize MSI interrupts. "
1555 "Falling back to legacy interrupts.\n",
1556 desc->msi_attrib.is_msix ? "MSI-X" : "MSI");
1557 ret = -ENOMEM; 1540 ret = -ENOMEM;
1558 goto msi_mem_map_alloc_failure; 1541 goto msi_mem_map_alloc_failure;
1559 } 1542 }
@@ -1580,7 +1563,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
1580 mem_map, mem_map_base, mem_map_limit, 1563 mem_map, mem_map_base, mem_map_limit,
1581 trio_context->asid); 1564 trio_context->asid);
1582 if (ret < 0) { 1565 if (ret < 0) {
1583 dev_printk(KERN_INFO, &pdev->dev, "HV MSI config failed.\n"); 1566 dev_info(&pdev->dev, "HV MSI config failed\n");
1584 1567
1585 goto hv_msi_config_failure; 1568 goto hv_msi_config_failure;
1586 } 1569 }
@@ -1590,7 +1573,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
1590 msg.address_hi = msi_addr >> 32; 1573 msg.address_hi = msi_addr >> 32;
1591 msg.address_lo = msi_addr & 0xffffffff; 1574 msg.address_lo = msi_addr & 0xffffffff;
1592 1575
1593 write_msi_msg(irq, &msg); 1576 pci_write_msi_msg(irq, &msg);
1594 irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq); 1577 irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq);
1595 irq_set_handler_data(irq, controller); 1578 irq_set_handler_data(irq, controller);
1596 1579
diff --git a/arch/tile/kernel/perf_event.c b/arch/tile/kernel/perf_event.c
index 2bf6c9c135c1..bb509cee3b59 100644
--- a/arch/tile/kernel/perf_event.c
+++ b/arch/tile/kernel/perf_event.c
@@ -590,7 +590,7 @@ static int tile_event_set_period(struct perf_event *event)
590 */ 590 */
591static void tile_pmu_stop(struct perf_event *event, int flags) 591static void tile_pmu_stop(struct perf_event *event, int flags)
592{ 592{
593 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 593 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
594 struct hw_perf_event *hwc = &event->hw; 594 struct hw_perf_event *hwc = &event->hw;
595 int idx = hwc->idx; 595 int idx = hwc->idx;
596 596
@@ -616,7 +616,7 @@ static void tile_pmu_stop(struct perf_event *event, int flags)
616 */ 616 */
617static void tile_pmu_start(struct perf_event *event, int flags) 617static void tile_pmu_start(struct perf_event *event, int flags)
618{ 618{
619 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 619 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
620 int idx = event->hw.idx; 620 int idx = event->hw.idx;
621 621
622 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) 622 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
@@ -650,7 +650,7 @@ static void tile_pmu_start(struct perf_event *event, int flags)
650 */ 650 */
651static int tile_pmu_add(struct perf_event *event, int flags) 651static int tile_pmu_add(struct perf_event *event, int flags)
652{ 652{
653 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 653 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
654 struct hw_perf_event *hwc; 654 struct hw_perf_event *hwc;
655 unsigned long mask; 655 unsigned long mask;
656 int b, max_cnt; 656 int b, max_cnt;
@@ -706,7 +706,7 @@ static int tile_pmu_add(struct perf_event *event, int flags)
706 */ 706 */
707static void tile_pmu_del(struct perf_event *event, int flags) 707static void tile_pmu_del(struct perf_event *event, int flags)
708{ 708{
709 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 709 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
710 int i; 710 int i;
711 711
712 /* 712 /*
@@ -880,14 +880,14 @@ static struct pmu tilera_pmu = {
880int tile_pmu_handle_irq(struct pt_regs *regs, int fault) 880int tile_pmu_handle_irq(struct pt_regs *regs, int fault)
881{ 881{
882 struct perf_sample_data data; 882 struct perf_sample_data data;
883 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 883 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
884 struct perf_event *event; 884 struct perf_event *event;
885 struct hw_perf_event *hwc; 885 struct hw_perf_event *hwc;
886 u64 val; 886 u64 val;
887 unsigned long status; 887 unsigned long status;
888 int bit; 888 int bit;
889 889
890 __get_cpu_var(perf_irqs)++; 890 __this_cpu_inc(perf_irqs);
891 891
892 if (!atomic_read(&tile_active_events)) 892 if (!atomic_read(&tile_active_events))
893 return 0; 893 return 0;
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 16ed58948757..48e5773dd0b7 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -52,7 +52,7 @@ static int __init idle_setup(char *str)
52 return -EINVAL; 52 return -EINVAL;
53 53
54 if (!strcmp(str, "poll")) { 54 if (!strcmp(str, "poll")) {
55 pr_info("using polling idle threads.\n"); 55 pr_info("using polling idle threads\n");
56 cpu_idle_poll_ctrl(true); 56 cpu_idle_poll_ctrl(true);
57 return 0; 57 return 0;
58 } else if (!strcmp(str, "halt")) { 58 } else if (!strcmp(str, "halt")) {
@@ -64,7 +64,7 @@ early_param("idle", idle_setup);
64 64
65void arch_cpu_idle(void) 65void arch_cpu_idle(void)
66{ 66{
67 __get_cpu_var(irq_stat).idle_timestamp = jiffies; 67 __this_cpu_write(irq_stat.idle_timestamp, jiffies);
68 _cpu_idle(); 68 _cpu_idle();
69} 69}
70 70
@@ -547,27 +547,25 @@ void show_regs(struct pt_regs *regs)
547 struct task_struct *tsk = validate_current(); 547 struct task_struct *tsk = validate_current();
548 int i; 548 int i;
549 549
550 pr_err("\n");
551 if (tsk != &corrupt_current) 550 if (tsk != &corrupt_current)
552 show_regs_print_info(KERN_ERR); 551 show_regs_print_info(KERN_ERR);
553#ifdef __tilegx__ 552#ifdef __tilegx__
554 for (i = 0; i < 17; i++) 553 for (i = 0; i < 17; i++)
555 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n", 554 pr_err(" r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT "\n",
556 i, regs->regs[i], i+18, regs->regs[i+18], 555 i, regs->regs[i], i+18, regs->regs[i+18],
557 i+36, regs->regs[i+36]); 556 i+36, regs->regs[i+36]);
558 pr_err(" r17: "REGFMT" r35: "REGFMT" tp : "REGFMT"\n", 557 pr_err(" r17: " REGFMT " r35: " REGFMT " tp : " REGFMT "\n",
559 regs->regs[17], regs->regs[35], regs->tp); 558 regs->regs[17], regs->regs[35], regs->tp);
560 pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr); 559 pr_err(" sp : " REGFMT " lr : " REGFMT "\n", regs->sp, regs->lr);
561#else 560#else
562 for (i = 0; i < 13; i++) 561 for (i = 0; i < 13; i++)
563 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT 562 pr_err(" r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT "\n",
564 " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
565 i, regs->regs[i], i+14, regs->regs[i+14], 563 i, regs->regs[i], i+14, regs->regs[i+14],
566 i+27, regs->regs[i+27], i+40, regs->regs[i+40]); 564 i+27, regs->regs[i+27], i+40, regs->regs[i+40]);
567 pr_err(" r13: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n", 565 pr_err(" r13: " REGFMT " tp : " REGFMT " sp : " REGFMT " lr : " REGFMT "\n",
568 regs->regs[13], regs->tp, regs->sp, regs->lr); 566 regs->regs[13], regs->tp, regs->sp, regs->lr);
569#endif 567#endif
570 pr_err(" pc : "REGFMT" ex1: %ld faultnum: %ld\n", 568 pr_err(" pc : " REGFMT " ex1: %ld faultnum: %ld\n",
571 regs->pc, regs->ex1, regs->faultnum); 569 regs->pc, regs->ex1, regs->faultnum);
572 570
573 dump_stack_regs(regs); 571 dump_stack_regs(regs);
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 112ababa9e55..864eea69556d 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -130,7 +130,7 @@ static int __init setup_maxmem(char *str)
130 130
131 maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT); 131 maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT);
132 pr_info("Forcing RAM used to no more than %dMB\n", 132 pr_info("Forcing RAM used to no more than %dMB\n",
133 maxmem_pfn >> (20 - PAGE_SHIFT)); 133 maxmem_pfn >> (20 - PAGE_SHIFT));
134 return 0; 134 return 0;
135} 135}
136early_param("maxmem", setup_maxmem); 136early_param("maxmem", setup_maxmem);
@@ -149,7 +149,7 @@ static int __init setup_maxnodemem(char *str)
149 maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) << 149 maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) <<
150 (HPAGE_SHIFT - PAGE_SHIFT); 150 (HPAGE_SHIFT - PAGE_SHIFT);
151 pr_info("Forcing RAM used on node %ld to no more than %dMB\n", 151 pr_info("Forcing RAM used on node %ld to no more than %dMB\n",
152 node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); 152 node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT));
153 return 0; 153 return 0;
154} 154}
155early_param("maxnodemem", setup_maxnodemem); 155early_param("maxnodemem", setup_maxnodemem);
@@ -417,8 +417,7 @@ static void __init setup_memory(void)
417 range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK; 417 range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK;
418 range.size -= (range.start - start_pa); 418 range.size -= (range.start - start_pa);
419 range.size &= HPAGE_MASK; 419 range.size &= HPAGE_MASK;
420 pr_err("Range not hugepage-aligned: %#llx..%#llx:" 420 pr_err("Range not hugepage-aligned: %#llx..%#llx: now %#llx-%#llx\n",
421 " now %#llx-%#llx\n",
422 start_pa, start_pa + orig_size, 421 start_pa, start_pa + orig_size,
423 range.start, range.start + range.size); 422 range.start, range.start + range.size);
424 } 423 }
@@ -437,8 +436,8 @@ static void __init setup_memory(void)
437 if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) { 436 if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) {
438 int max_size = maxnodemem_pfn[i]; 437 int max_size = maxnodemem_pfn[i];
439 if (max_size > 0) { 438 if (max_size > 0) {
440 pr_err("Maxnodemem reduced node %d to" 439 pr_err("Maxnodemem reduced node %d to %d pages\n",
441 " %d pages\n", i, max_size); 440 i, max_size);
442 range.size = PFN_PHYS(max_size); 441 range.size = PFN_PHYS(max_size);
443 } else { 442 } else {
444 pr_err("Maxnodemem disabled node %d\n", i); 443 pr_err("Maxnodemem disabled node %d\n", i);
@@ -490,8 +489,8 @@ static void __init setup_memory(void)
490 NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT); 489 NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT);
491 if (end < pci_reserve_end_pfn + percpu_pages) { 490 if (end < pci_reserve_end_pfn + percpu_pages) {
492 end = pci_reserve_start_pfn; 491 end = pci_reserve_start_pfn;
493 pr_err("PCI mapping region reduced node %d to" 492 pr_err("PCI mapping region reduced node %d to %ld pages\n",
494 " %ld pages\n", i, end - start); 493 i, end - start);
495 } 494 }
496 } 495 }
497#endif 496#endif
@@ -534,11 +533,10 @@ static void __init setup_memory(void)
534 } 533 }
535 } 534 }
536 physpages -= dropped_pages; 535 physpages -= dropped_pages;
537 pr_warning("Only using %ldMB memory;" 536 pr_warn("Only using %ldMB memory - ignoring %ldMB\n",
538 " ignoring %ldMB.\n", 537 physpages >> (20 - PAGE_SHIFT),
539 physpages >> (20 - PAGE_SHIFT), 538 dropped_pages >> (20 - PAGE_SHIFT));
540 dropped_pages >> (20 - PAGE_SHIFT)); 539 pr_warn("Consider using a larger page size\n");
541 pr_warning("Consider using a larger page size.\n");
542 } 540 }
543#endif 541#endif
544 542
@@ -556,25 +554,23 @@ static void __init setup_memory(void)
556 MAXMEM_PFN : mappable_physpages; 554 MAXMEM_PFN : mappable_physpages;
557 highmem_pages = (long) (physpages - lowmem_pages); 555 highmem_pages = (long) (physpages - lowmem_pages);
558 556
559 pr_notice("%ldMB HIGHMEM available.\n", 557 pr_notice("%ldMB HIGHMEM available\n",
560 pages_to_mb(highmem_pages > 0 ? highmem_pages : 0)); 558 pages_to_mb(highmem_pages > 0 ? highmem_pages : 0));
561 pr_notice("%ldMB LOWMEM available.\n", 559 pr_notice("%ldMB LOWMEM available\n", pages_to_mb(lowmem_pages));
562 pages_to_mb(lowmem_pages));
563#else 560#else
564 /* Set max_low_pfn based on what node 0 can directly address. */ 561 /* Set max_low_pfn based on what node 0 can directly address. */
565 max_low_pfn = node_end_pfn[0]; 562 max_low_pfn = node_end_pfn[0];
566 563
567#ifndef __tilegx__ 564#ifndef __tilegx__
568 if (node_end_pfn[0] > MAXMEM_PFN) { 565 if (node_end_pfn[0] > MAXMEM_PFN) {
569 pr_warning("Only using %ldMB LOWMEM.\n", 566 pr_warn("Only using %ldMB LOWMEM\n", MAXMEM >> 20);
570 MAXMEM>>20); 567 pr_warn("Use a HIGHMEM enabled kernel\n");
571 pr_warning("Use a HIGHMEM enabled kernel.\n");
572 max_low_pfn = MAXMEM_PFN; 568 max_low_pfn = MAXMEM_PFN;
573 max_pfn = MAXMEM_PFN; 569 max_pfn = MAXMEM_PFN;
574 node_end_pfn[0] = MAXMEM_PFN; 570 node_end_pfn[0] = MAXMEM_PFN;
575 } else { 571 } else {
576 pr_notice("%ldMB memory available.\n", 572 pr_notice("%ldMB memory available\n",
577 pages_to_mb(node_end_pfn[0])); 573 pages_to_mb(node_end_pfn[0]));
578 } 574 }
579 for (i = 1; i < MAX_NUMNODES; ++i) { 575 for (i = 1; i < MAX_NUMNODES; ++i) {
580 node_start_pfn[i] = 0; 576 node_start_pfn[i] = 0;
@@ -589,8 +585,7 @@ static void __init setup_memory(void)
589 if (pages) 585 if (pages)
590 high_memory = pfn_to_kaddr(node_end_pfn[i]); 586 high_memory = pfn_to_kaddr(node_end_pfn[i]);
591 } 587 }
592 pr_notice("%ldMB memory available.\n", 588 pr_notice("%ldMB memory available\n", pages_to_mb(lowmem_pages));
593 pages_to_mb(lowmem_pages));
594#endif 589#endif
595#endif 590#endif
596} 591}
@@ -1112,8 +1107,8 @@ static void __init load_hv_initrd(void)
1112 fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); 1107 fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
1113 if (fd == HV_ENOENT) { 1108 if (fd == HV_ENOENT) {
1114 if (set_initramfs_file) { 1109 if (set_initramfs_file) {
1115 pr_warning("No such hvfs initramfs file '%s'\n", 1110 pr_warn("No such hvfs initramfs file '%s'\n",
1116 initramfs_file); 1111 initramfs_file);
1117 return; 1112 return;
1118 } else { 1113 } else {
1119 /* Try old backwards-compatible name. */ 1114 /* Try old backwards-compatible name. */
@@ -1126,8 +1121,8 @@ static void __init load_hv_initrd(void)
1126 stat = hv_fs_fstat(fd); 1121 stat = hv_fs_fstat(fd);
1127 BUG_ON(stat.size < 0); 1122 BUG_ON(stat.size < 0);
1128 if (stat.flags & HV_FS_ISDIR) { 1123 if (stat.flags & HV_FS_ISDIR) {
1129 pr_warning("Ignoring hvfs file '%s': it's a directory.\n", 1124 pr_warn("Ignoring hvfs file '%s': it's a directory\n",
1130 initramfs_file); 1125 initramfs_file);
1131 return; 1126 return;
1132 } 1127 }
1133 initrd = alloc_bootmem_pages(stat.size); 1128 initrd = alloc_bootmem_pages(stat.size);
@@ -1185,9 +1180,8 @@ static void __init validate_hv(void)
1185 HV_Topology topology = hv_inquire_topology(); 1180 HV_Topology topology = hv_inquire_topology();
1186 BUG_ON(topology.coord.x != 0 || topology.coord.y != 0); 1181 BUG_ON(topology.coord.x != 0 || topology.coord.y != 0);
1187 if (topology.width != 1 || topology.height != 1) { 1182 if (topology.width != 1 || topology.height != 1) {
1188 pr_warning("Warning: booting UP kernel on %dx%d grid;" 1183 pr_warn("Warning: booting UP kernel on %dx%d grid; will ignore all but first tile\n",
1189 " will ignore all but first tile.\n", 1184 topology.width, topology.height);
1190 topology.width, topology.height);
1191 } 1185 }
1192#endif 1186#endif
1193 1187
@@ -1208,9 +1202,8 @@ static void __init validate_hv(void)
1208 * We use a struct cpumask for this, so it must be big enough. 1202 * We use a struct cpumask for this, so it must be big enough.
1209 */ 1203 */
1210 if ((smp_height * smp_width) > nr_cpu_ids) 1204 if ((smp_height * smp_width) > nr_cpu_ids)
1211 early_panic("Hypervisor %d x %d grid too big for Linux" 1205 early_panic("Hypervisor %d x %d grid too big for Linux NR_CPUS %d\n",
1212 " NR_CPUS %d\n", smp_height, smp_width, 1206 smp_height, smp_width, nr_cpu_ids);
1213 nr_cpu_ids);
1214#endif 1207#endif
1215 1208
1216 /* 1209 /*
@@ -1218,7 +1211,8 @@ static void __init validate_hv(void)
1218 * various asid variables to their appropriate initial states. 1211 * various asid variables to their appropriate initial states.
1219 */ 1212 */
1220 asid_range = hv_inquire_asid(0); 1213 asid_range = hv_inquire_asid(0);
1221 __get_cpu_var(current_asid) = min_asid = asid_range.start; 1214 min_asid = asid_range.start;
1215 __this_cpu_write(current_asid, min_asid);
1222 max_asid = asid_range.start + asid_range.size - 1; 1216 max_asid = asid_range.start + asid_range.size - 1;
1223 1217
1224 if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model, 1218 if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model,
@@ -1264,10 +1258,9 @@ static void __init validate_va(void)
1264 1258
1265 /* Kernel PCs must have their high bit set; see intvec.S. */ 1259 /* Kernel PCs must have their high bit set; see intvec.S. */
1266 if ((long)VMALLOC_START >= 0) 1260 if ((long)VMALLOC_START >= 0)
1267 early_panic( 1261 early_panic("Linux VMALLOC region below the 2GB line (%#lx)!\n"
1268 "Linux VMALLOC region below the 2GB line (%#lx)!\n" 1262 "Reconfigure the kernel with smaller VMALLOC_RESERVE\n",
1269 "Reconfigure the kernel with smaller VMALLOC_RESERVE.\n", 1263 VMALLOC_START);
1270 VMALLOC_START);
1271#endif 1264#endif
1272} 1265}
1273 1266
@@ -1394,7 +1387,7 @@ static void __init setup_cpu_maps(void)
1394 1387
1395static int __init dataplane(char *str) 1388static int __init dataplane(char *str)
1396{ 1389{
1397 pr_warning("WARNING: dataplane support disabled in this kernel\n"); 1390 pr_warn("WARNING: dataplane support disabled in this kernel\n");
1398 return 0; 1391 return 0;
1399} 1392}
1400 1393
@@ -1412,8 +1405,8 @@ void __init setup_arch(char **cmdline_p)
1412 len = hv_get_command_line((HV_VirtAddr) boot_command_line, 1405 len = hv_get_command_line((HV_VirtAddr) boot_command_line,
1413 COMMAND_LINE_SIZE); 1406 COMMAND_LINE_SIZE);
1414 if (boot_command_line[0]) 1407 if (boot_command_line[0])
1415 pr_warning("WARNING: ignoring dynamic command line \"%s\"\n", 1408 pr_warn("WARNING: ignoring dynamic command line \"%s\"\n",
1416 boot_command_line); 1409 boot_command_line);
1417 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); 1410 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
1418#else 1411#else
1419 char *hv_cmdline; 1412 char *hv_cmdline;
@@ -1539,8 +1532,7 @@ static void __init pcpu_fc_populate_pte(unsigned long addr)
1539 1532
1540 BUG_ON(pgd_addr_invalid(addr)); 1533 BUG_ON(pgd_addr_invalid(addr));
1541 if (addr < VMALLOC_START || addr >= VMALLOC_END) 1534 if (addr < VMALLOC_START || addr >= VMALLOC_END)
1542 panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;" 1535 panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx; try increasing CONFIG_VMALLOC_RESERVE\n",
1543 " try increasing CONFIG_VMALLOC_RESERVE\n",
1544 addr, VMALLOC_START, VMALLOC_END); 1536 addr, VMALLOC_START, VMALLOC_END);
1545 1537
1546 pgd = swapper_pg_dir + pgd_index(addr); 1538 pgd = swapper_pg_dir + pgd_index(addr);
@@ -1595,8 +1587,8 @@ void __init setup_per_cpu_areas(void)
1595 lowmem_va = (unsigned long)pfn_to_kaddr(pfn); 1587 lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
1596 ptep = virt_to_kpte(lowmem_va); 1588 ptep = virt_to_kpte(lowmem_va);
1597 if (pte_huge(*ptep)) { 1589 if (pte_huge(*ptep)) {
1598 printk(KERN_DEBUG "early shatter of huge page" 1590 printk(KERN_DEBUG "early shatter of huge page at %#lx\n",
1599 " at %#lx\n", lowmem_va); 1591 lowmem_va);
1600 shatter_pmd((pmd_t *)ptep); 1592 shatter_pmd((pmd_t *)ptep);
1601 ptep = virt_to_kpte(lowmem_va); 1593 ptep = virt_to_kpte(lowmem_va);
1602 BUG_ON(pte_huge(*ptep)); 1594 BUG_ON(pte_huge(*ptep));
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index 7c2fecc52177..bb0a9ce7ae23 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -45,8 +45,7 @@
45int restore_sigcontext(struct pt_regs *regs, 45int restore_sigcontext(struct pt_regs *regs,
46 struct sigcontext __user *sc) 46 struct sigcontext __user *sc)
47{ 47{
48 int err = 0; 48 int err;
49 int i;
50 49
51 /* Always make any pending restarted system calls return -EINTR */ 50 /* Always make any pending restarted system calls return -EINTR */
52 current_thread_info()->restart_block.fn = do_no_restart_syscall; 51 current_thread_info()->restart_block.fn = do_no_restart_syscall;
@@ -57,9 +56,7 @@ int restore_sigcontext(struct pt_regs *regs,
57 */ 56 */
58 BUILD_BUG_ON(sizeof(struct sigcontext) != sizeof(struct pt_regs)); 57 BUILD_BUG_ON(sizeof(struct sigcontext) != sizeof(struct pt_regs));
59 BUILD_BUG_ON(sizeof(struct sigcontext) % 8 != 0); 58 BUILD_BUG_ON(sizeof(struct sigcontext) % 8 != 0);
60 59 err = __copy_from_user(regs, sc, sizeof(*regs));
61 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
62 err |= __get_user(regs->regs[i], &sc->gregs[i]);
63 60
64 /* Ensure that the PL is always set to USER_PL. */ 61 /* Ensure that the PL is always set to USER_PL. */
65 regs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(regs->ex1)); 62 regs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(regs->ex1));
@@ -110,12 +107,7 @@ badframe:
110 107
111int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) 108int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
112{ 109{
113 int i, err = 0; 110 return __copy_to_user(sc, regs, sizeof(*regs));
114
115 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
116 err |= __put_user(regs->regs[i], &sc->gregs[i]);
117
118 return err;
119} 111}
120 112
121/* 113/*
@@ -345,7 +337,6 @@ static void dump_mem(void __user *address)
345 int i, j, k; 337 int i, j, k;
346 int found_readable_mem = 0; 338 int found_readable_mem = 0;
347 339
348 pr_err("\n");
349 if (!access_ok(VERIFY_READ, address, 1)) { 340 if (!access_ok(VERIFY_READ, address, 1)) {
350 pr_err("Not dumping at address 0x%lx (kernel address)\n", 341 pr_err("Not dumping at address 0x%lx (kernel address)\n",
351 (unsigned long)address); 342 (unsigned long)address);
@@ -367,7 +358,7 @@ static void dump_mem(void __user *address)
367 (unsigned long)address); 358 (unsigned long)address);
368 found_readable_mem = 1; 359 found_readable_mem = 1;
369 } 360 }
370 j = sprintf(line, REGFMT":", (unsigned long)addr); 361 j = sprintf(line, REGFMT ":", (unsigned long)addr);
371 for (k = 0; k < bytes_per_line; ++k) 362 for (k = 0; k < bytes_per_line; ++k)
372 j += sprintf(&line[j], " %02x", buf[k]); 363 j += sprintf(&line[j], " %02x", buf[k]);
373 pr_err("%s\n", line); 364 pr_err("%s\n", line);
@@ -411,8 +402,7 @@ void trace_unhandled_signal(const char *type, struct pt_regs *regs,
411 case SIGFPE: 402 case SIGFPE:
412 case SIGSEGV: 403 case SIGSEGV:
413 case SIGBUS: 404 case SIGBUS:
414 pr_err("User crash: signal %d," 405 pr_err("User crash: signal %d, trap %ld, address 0x%lx\n",
415 " trap %ld, address 0x%lx\n",
416 sig, regs->faultnum, address); 406 sig, regs->faultnum, address);
417 show_regs(regs); 407 show_regs(regs);
418 dump_mem((void __user *)address); 408 dump_mem((void __user *)address);
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index de07fa7d1315..862973074bf9 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -222,11 +222,9 @@ static tilepro_bundle_bits rewrite_load_store_unaligned(
222 } 222 }
223 223
224 if (unaligned_printk || unaligned_fixup_count == 0) { 224 if (unaligned_printk || unaligned_fixup_count == 0) {
225 pr_info("Process %d/%s: PC %#lx: Fixup of" 225 pr_info("Process %d/%s: PC %#lx: Fixup of unaligned %s at %#lx\n",
226 " unaligned %s at %#lx.\n",
227 current->pid, current->comm, regs->pc, 226 current->pid, current->comm, regs->pc,
228 (mem_op == MEMOP_LOAD || 227 mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR ?
229 mem_op == MEMOP_LOAD_POSTINCR) ?
230 "load" : "store", 228 "load" : "store",
231 (unsigned long)addr); 229 (unsigned long)addr);
232 if (!unaligned_printk) { 230 if (!unaligned_printk) {
@@ -740,7 +738,7 @@ static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
740 738
741void gx_singlestep_handle(struct pt_regs *regs, int fault_num) 739void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
742{ 740{
743 unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc); 741 unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
744 struct thread_info *info = (void *)current_thread_info(); 742 struct thread_info *info = (void *)current_thread_info();
745 int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP); 743 int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
746 unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K); 744 unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
@@ -766,7 +764,7 @@ void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
766 764
767void single_step_once(struct pt_regs *regs) 765void single_step_once(struct pt_regs *regs)
768{ 766{
769 unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc); 767 unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
770 unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K); 768 unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
771 769
772 *ss_pc = regs->pc; 770 *ss_pc = regs->pc;
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 19eaa62d456a..d3c4ed780ce2 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -189,7 +189,7 @@ EXPORT_SYMBOL(flush_icache_range);
189/* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */ 189/* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */
190static irqreturn_t handle_reschedule_ipi(int irq, void *token) 190static irqreturn_t handle_reschedule_ipi(int irq, void *token)
191{ 191{
192 __get_cpu_var(irq_stat).irq_resched_count++; 192 __this_cpu_inc(irq_stat.irq_resched_count);
193 scheduler_ipi(); 193 scheduler_ipi();
194 194
195 return IRQ_HANDLED; 195 return IRQ_HANDLED;
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index 732e9d138661..20d52a98e171 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -41,7 +41,7 @@ void __init smp_prepare_boot_cpu(void)
41 int cpu = smp_processor_id(); 41 int cpu = smp_processor_id();
42 set_cpu_online(cpu, 1); 42 set_cpu_online(cpu, 1);
43 set_cpu_present(cpu, 1); 43 set_cpu_present(cpu, 1);
44 __get_cpu_var(cpu_state) = CPU_ONLINE; 44 __this_cpu_write(cpu_state, CPU_ONLINE);
45 45
46 init_messaging(); 46 init_messaging();
47} 47}
@@ -127,8 +127,7 @@ static __init int reset_init_affinity(void)
127{ 127{
128 long rc = sched_setaffinity(current->pid, &init_affinity); 128 long rc = sched_setaffinity(current->pid, &init_affinity);
129 if (rc != 0) 129 if (rc != 0)
130 pr_warning("couldn't reset init affinity (%ld)\n", 130 pr_warn("couldn't reset init affinity (%ld)\n", rc);
131 rc);
132 return 0; 131 return 0;
133} 132}
134late_initcall(reset_init_affinity); 133late_initcall(reset_init_affinity);
@@ -158,7 +157,7 @@ static void start_secondary(void)
158 /* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */ 157 /* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */
159 158
160 /* Initialize the current asid for our first page table. */ 159 /* Initialize the current asid for our first page table. */
161 __get_cpu_var(current_asid) = min_asid; 160 __this_cpu_write(current_asid, min_asid);
162 161
163 /* Set up this thread as another owner of the init_mm */ 162 /* Set up this thread as another owner of the init_mm */
164 atomic_inc(&init_mm.mm_count); 163 atomic_inc(&init_mm.mm_count);
@@ -174,7 +173,7 @@ static void start_secondary(void)
174 /* Indicate that we're ready to come up. */ 173 /* Indicate that we're ready to come up. */
175 /* Must not do this before we're ready to receive messages */ 174 /* Must not do this before we're ready to receive messages */
176 if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) { 175 if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) {
177 pr_warning("CPU#%d already started!\n", cpuid); 176 pr_warn("CPU#%d already started!\n", cpuid);
178 for (;;) 177 for (;;)
179 local_irq_enable(); 178 local_irq_enable();
180 } 179 }
@@ -201,7 +200,7 @@ void online_secondary(void)
201 notify_cpu_starting(smp_processor_id()); 200 notify_cpu_starting(smp_processor_id());
202 201
203 set_cpu_online(smp_processor_id(), 1); 202 set_cpu_online(smp_processor_id(), 1);
204 __get_cpu_var(cpu_state) = CPU_ONLINE; 203 __this_cpu_write(cpu_state, CPU_ONLINE);
205 204
206 /* Set up tile-specific state for this cpu. */ 205 /* Set up tile-specific state for this cpu. */
207 setup_cpu(0); 206 setup_cpu(0);
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index c93977a62116..7ff5afdbd3aa 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -387,9 +387,7 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
387 * then bust_spinlocks() spit out a space in front of us 387 * then bust_spinlocks() spit out a space in front of us
388 * and it will mess up our KERN_ERR. 388 * and it will mess up our KERN_ERR.
389 */ 389 */
390 pr_err("\n"); 390 pr_err("Starting stack dump of tid %d, pid %d (%s) on cpu %d at cycle %lld\n",
391 pr_err("Starting stack dump of tid %d, pid %d (%s)"
392 " on cpu %d at cycle %lld\n",
393 kbt->task->pid, kbt->task->tgid, kbt->task->comm, 391 kbt->task->pid, kbt->task->tgid, kbt->task->comm,
394 raw_smp_processor_id(), get_cycles()); 392 raw_smp_processor_id(), get_cycles());
395 } 393 }
@@ -411,8 +409,7 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
411 i++, address, namebuf, (unsigned long)(kbt->it.sp)); 409 i++, address, namebuf, (unsigned long)(kbt->it.sp));
412 410
413 if (i >= 100) { 411 if (i >= 100) {
414 pr_err("Stack dump truncated" 412 pr_err("Stack dump truncated (%d frames)\n", i);
415 " (%d frames)\n", i);
416 break; 413 break;
417 } 414 }
418 } 415 }
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index d8fbc289e680..d412b0856c0a 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -98,8 +98,8 @@ void __init calibrate_delay(void)
98{ 98{
99 loops_per_jiffy = get_clock_rate() / HZ; 99 loops_per_jiffy = get_clock_rate() / HZ;
100 pr_info("Clock rate yields %lu.%02lu BogoMIPS (lpj=%lu)\n", 100 pr_info("Clock rate yields %lu.%02lu BogoMIPS (lpj=%lu)\n",
101 loops_per_jiffy/(500000/HZ), 101 loops_per_jiffy / (500000 / HZ),
102 (loops_per_jiffy/(5000/HZ)) % 100, loops_per_jiffy); 102 (loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy);
103} 103}
104 104
105/* Called fairly late in init/main.c, but before we go smp. */ 105/* Called fairly late in init/main.c, but before we go smp. */
@@ -162,7 +162,7 @@ static DEFINE_PER_CPU(struct clock_event_device, tile_timer) = {
162 162
163void setup_tile_timer(void) 163void setup_tile_timer(void)
164{ 164{
165 struct clock_event_device *evt = &__get_cpu_var(tile_timer); 165 struct clock_event_device *evt = this_cpu_ptr(&tile_timer);
166 166
167 /* Fill in fields that are speed-specific. */ 167 /* Fill in fields that are speed-specific. */
168 clockevents_calc_mult_shift(evt, cycles_per_sec, TILE_MINSEC); 168 clockevents_calc_mult_shift(evt, cycles_per_sec, TILE_MINSEC);
@@ -182,7 +182,7 @@ void setup_tile_timer(void)
182void do_timer_interrupt(struct pt_regs *regs, int fault_num) 182void do_timer_interrupt(struct pt_regs *regs, int fault_num)
183{ 183{
184 struct pt_regs *old_regs = set_irq_regs(regs); 184 struct pt_regs *old_regs = set_irq_regs(regs);
185 struct clock_event_device *evt = &__get_cpu_var(tile_timer); 185 struct clock_event_device *evt = this_cpu_ptr(&tile_timer);
186 186
187 /* 187 /*
188 * Mask the timer interrupt here, since we are a oneshot timer 188 * Mask the timer interrupt here, since we are a oneshot timer
@@ -194,7 +194,7 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
194 irq_enter(); 194 irq_enter();
195 195
196 /* Track interrupt count. */ 196 /* Track interrupt count. */
197 __get_cpu_var(irq_stat).irq_timer_count++; 197 __this_cpu_inc(irq_stat.irq_timer_count);
198 198
199 /* Call the generic timer handler */ 199 /* Call the generic timer handler */
200 evt->event_handler(evt); 200 evt->event_handler(evt);
@@ -235,7 +235,7 @@ cycles_t ns2cycles(unsigned long nsecs)
235 * We do not have to disable preemption here as each core has the same 235 * We do not have to disable preemption here as each core has the same
236 * clock frequency. 236 * clock frequency.
237 */ 237 */
238 struct clock_event_device *dev = &__raw_get_cpu_var(tile_timer); 238 struct clock_event_device *dev = raw_cpu_ptr(&tile_timer);
239 239
240 /* 240 /*
241 * as in clocksource.h and x86's timer.h, we split the calculation 241 * as in clocksource.h and x86's timer.h, we split the calculation
@@ -249,33 +249,52 @@ cycles_t ns2cycles(unsigned long nsecs)
249 249
250void update_vsyscall_tz(void) 250void update_vsyscall_tz(void)
251{ 251{
252 /* Userspace gettimeofday will spin while this value is odd. */ 252 write_seqcount_begin(&vdso_data->tz_seq);
253 ++vdso_data->tz_update_count;
254 smp_wmb();
255 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; 253 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
256 vdso_data->tz_dsttime = sys_tz.tz_dsttime; 254 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
257 smp_wmb(); 255 write_seqcount_end(&vdso_data->tz_seq);
258 ++vdso_data->tz_update_count;
259} 256}
260 257
261void update_vsyscall(struct timekeeper *tk) 258void update_vsyscall(struct timekeeper *tk)
262{ 259{
263 struct timespec *wtm = &tk->wall_to_monotonic; 260 if (tk->tkr.clock != &cycle_counter_cs)
264 struct clocksource *clock = tk->tkr.clock;
265
266 if (clock != &cycle_counter_cs)
267 return; 261 return;
268 262
269 /* Userspace gettimeofday will spin while this value is odd. */ 263 write_seqcount_begin(&vdso_data->tb_seq);
270 ++vdso_data->tb_update_count; 264
271 smp_wmb(); 265 vdso_data->cycle_last = tk->tkr.cycle_last;
272 vdso_data->xtime_tod_stamp = tk->tkr.cycle_last; 266 vdso_data->mask = tk->tkr.mask;
273 vdso_data->xtime_clock_sec = tk->xtime_sec; 267 vdso_data->mult = tk->tkr.mult;
274 vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec; 268 vdso_data->shift = tk->tkr.shift;
275 vdso_data->wtom_clock_sec = wtm->tv_sec; 269
276 vdso_data->wtom_clock_nsec = wtm->tv_nsec; 270 vdso_data->wall_time_sec = tk->xtime_sec;
277 vdso_data->mult = tk->tkr.mult; 271 vdso_data->wall_time_snsec = tk->tkr.xtime_nsec;
278 vdso_data->shift = tk->tkr.shift; 272
279 smp_wmb(); 273 vdso_data->monotonic_time_sec = tk->xtime_sec
280 ++vdso_data->tb_update_count; 274 + tk->wall_to_monotonic.tv_sec;
275 vdso_data->monotonic_time_snsec = tk->tkr.xtime_nsec
276 + ((u64)tk->wall_to_monotonic.tv_nsec
277 << tk->tkr.shift);
278 while (vdso_data->monotonic_time_snsec >=
279 (((u64)NSEC_PER_SEC) << tk->tkr.shift)) {
280 vdso_data->monotonic_time_snsec -=
281 ((u64)NSEC_PER_SEC) << tk->tkr.shift;
282 vdso_data->monotonic_time_sec++;
283 }
284
285 vdso_data->wall_time_coarse_sec = tk->xtime_sec;
286 vdso_data->wall_time_coarse_nsec = (long)(tk->tkr.xtime_nsec >>
287 tk->tkr.shift);
288
289 vdso_data->monotonic_time_coarse_sec =
290 vdso_data->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
291 vdso_data->monotonic_time_coarse_nsec =
292 vdso_data->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
293
294 while (vdso_data->monotonic_time_coarse_nsec >= NSEC_PER_SEC) {
295 vdso_data->monotonic_time_coarse_nsec -= NSEC_PER_SEC;
296 vdso_data->monotonic_time_coarse_sec++;
297 }
298
299 write_seqcount_end(&vdso_data->tb_seq);
281} 300}
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index f3ceb6308e42..bf841ca517bb 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -46,9 +46,9 @@ static int __init setup_unaligned_fixup(char *str)
46 return 0; 46 return 0;
47 47
48 pr_info("Fixups for unaligned data accesses are %s\n", 48 pr_info("Fixups for unaligned data accesses are %s\n",
49 unaligned_fixup >= 0 ? 49 unaligned_fixup >= 0 ?
50 (unaligned_fixup ? "enabled" : "disabled") : 50 (unaligned_fixup ? "enabled" : "disabled") :
51 "completely disabled"); 51 "completely disabled");
52 return 1; 52 return 1;
53} 53}
54__setup("unaligned_fixup=", setup_unaligned_fixup); 54__setup("unaligned_fixup=", setup_unaligned_fixup);
@@ -277,7 +277,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
277 if (fixup_exception(regs)) /* ILL_TRANS or UNALIGN_DATA */ 277 if (fixup_exception(regs)) /* ILL_TRANS or UNALIGN_DATA */
278 return; 278 return;
279 if (fault_num >= 0 && 279 if (fault_num >= 0 &&
280 fault_num < sizeof(int_name)/sizeof(int_name[0]) && 280 fault_num < ARRAY_SIZE(int_name) &&
281 int_name[fault_num] != NULL) 281 int_name[fault_num] != NULL)
282 name = int_name[fault_num]; 282 name = int_name[fault_num];
283 else 283 else
@@ -305,8 +305,8 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
305 case INT_ILL: 305 case INT_ILL:
306 if (copy_from_user(&instr, (void __user *)regs->pc, 306 if (copy_from_user(&instr, (void __user *)regs->pc,
307 sizeof(instr))) { 307 sizeof(instr))) {
308 pr_err("Unreadable instruction for INT_ILL:" 308 pr_err("Unreadable instruction for INT_ILL: %#lx\n",
309 " %#lx\n", regs->pc); 309 regs->pc);
310 do_exit(SIGKILL); 310 do_exit(SIGKILL);
311 return; 311 return;
312 } 312 }
diff --git a/arch/tile/kernel/unaligned.c b/arch/tile/kernel/unaligned.c
index c02ea2a45f67..7d9a83be0aca 100644
--- a/arch/tile/kernel/unaligned.c
+++ b/arch/tile/kernel/unaligned.c
@@ -969,8 +969,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
969 unaligned_fixup_count++; 969 unaligned_fixup_count++;
970 970
971 if (unaligned_printk) { 971 if (unaligned_printk) {
972 pr_info("%s/%d. Unalign fixup for kernel access " 972 pr_info("%s/%d - Unalign fixup for kernel access to userspace %lx\n",
973 "to userspace %lx.",
974 current->comm, current->pid, regs->regs[ra]); 973 current->comm, current->pid, regs->regs[ra]);
975 } 974 }
976 975
@@ -985,7 +984,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
985 .si_addr = (unsigned char __user *)0 984 .si_addr = (unsigned char __user *)0
986 }; 985 };
987 if (unaligned_printk) 986 if (unaligned_printk)
988 pr_info("Unalign bundle: unexp @%llx, %llx", 987 pr_info("Unalign bundle: unexp @%llx, %llx\n",
989 (unsigned long long)regs->pc, 988 (unsigned long long)regs->pc,
990 (unsigned long long)bundle); 989 (unsigned long long)bundle);
991 990
@@ -1370,8 +1369,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
1370 frag.bundle = bundle; 1369 frag.bundle = bundle;
1371 1370
1372 if (unaligned_printk) { 1371 if (unaligned_printk) {
1373 pr_info("%s/%d, Unalign fixup: pc=%lx " 1372 pr_info("%s/%d, Unalign fixup: pc=%lx bundle=%lx %d %d %d %d %d %d %d %d\n",
1374 "bundle=%lx %d %d %d %d %d %d %d %d.",
1375 current->comm, current->pid, 1373 current->comm, current->pid,
1376 (unsigned long)frag.pc, 1374 (unsigned long)frag.pc,
1377 (unsigned long)frag.bundle, 1375 (unsigned long)frag.bundle,
@@ -1380,8 +1378,8 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
1380 (int)y1_lr, (int)y1_br, (int)x1_add); 1378 (int)y1_lr, (int)y1_br, (int)x1_add);
1381 1379
1382 for (k = 0; k < n; k += 2) 1380 for (k = 0; k < n; k += 2)
1383 pr_info("[%d] %016llx %016llx", k, 1381 pr_info("[%d] %016llx %016llx\n",
1384 (unsigned long long)frag.insn[k], 1382 k, (unsigned long long)frag.insn[k],
1385 (unsigned long long)frag.insn[k+1]); 1383 (unsigned long long)frag.insn[k+1]);
1386 } 1384 }
1387 1385
@@ -1402,7 +1400,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
1402 .si_addr = (void __user *)&jit_code_area[idx] 1400 .si_addr = (void __user *)&jit_code_area[idx]
1403 }; 1401 };
1404 1402
1405 pr_warn("Unalign fixup: pid=%d %s jit_code_area=%llx", 1403 pr_warn("Unalign fixup: pid=%d %s jit_code_area=%llx\n",
1406 current->pid, current->comm, 1404 current->pid, current->comm,
1407 (unsigned long long)&jit_code_area[idx]); 1405 (unsigned long long)&jit_code_area[idx]);
1408 1406
@@ -1485,7 +1483,7 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
1485 /* If exception came from kernel, try fix it up. */ 1483 /* If exception came from kernel, try fix it up. */
1486 if (fixup_exception(regs)) { 1484 if (fixup_exception(regs)) {
1487 if (unaligned_printk) 1485 if (unaligned_printk)
1488 pr_info("Unalign fixup: %d %llx @%llx", 1486 pr_info("Unalign fixup: %d %llx @%llx\n",
1489 (int)unaligned_fixup, 1487 (int)unaligned_fixup,
1490 (unsigned long long)regs->ex1, 1488 (unsigned long long)regs->ex1,
1491 (unsigned long long)regs->pc); 1489 (unsigned long long)regs->pc);
@@ -1519,7 +1517,7 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
1519 }; 1517 };
1520 1518
1521 if (unaligned_printk) 1519 if (unaligned_printk)
1522 pr_info("Unalign fixup: %d %llx @%llx", 1520 pr_info("Unalign fixup: %d %llx @%llx\n",
1523 (int)unaligned_fixup, 1521 (int)unaligned_fixup,
1524 (unsigned long long)regs->ex1, 1522 (unsigned long long)regs->ex1,
1525 (unsigned long long)regs->pc); 1523 (unsigned long long)regs->pc);
@@ -1579,14 +1577,14 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
1579 0); 1577 0);
1580 1578
1581 if (IS_ERR((void __force *)user_page)) { 1579 if (IS_ERR((void __force *)user_page)) {
1582 pr_err("Out of kernel pages trying do_mmap.\n"); 1580 pr_err("Out of kernel pages trying do_mmap\n");
1583 return; 1581 return;
1584 } 1582 }
1585 1583
1586 /* Save the address in the thread_info struct */ 1584 /* Save the address in the thread_info struct */
1587 info->unalign_jit_base = user_page; 1585 info->unalign_jit_base = user_page;
1588 if (unaligned_printk) 1586 if (unaligned_printk)
1589 pr_info("Unalign bundle: %d:%d, allocate page @%llx", 1587 pr_info("Unalign bundle: %d:%d, allocate page @%llx\n",
1590 raw_smp_processor_id(), current->pid, 1588 raw_smp_processor_id(), current->pid,
1591 (unsigned long long)user_page); 1589 (unsigned long long)user_page);
1592 } 1590 }
diff --git a/arch/tile/kernel/vdso/vdso.lds.S b/arch/tile/kernel/vdso/vdso.lds.S
index 041cd6c39c83..731529f3f06f 100644
--- a/arch/tile/kernel/vdso/vdso.lds.S
+++ b/arch/tile/kernel/vdso/vdso.lds.S
@@ -82,6 +82,8 @@ VERSION
82 __vdso_rt_sigreturn; 82 __vdso_rt_sigreturn;
83 __vdso_gettimeofday; 83 __vdso_gettimeofday;
84 gettimeofday; 84 gettimeofday;
85 __vdso_clock_gettime;
86 clock_gettime;
85 local:*; 87 local:*;
86 }; 88 };
87} 89}
diff --git a/arch/tile/kernel/vdso/vgettimeofday.c b/arch/tile/kernel/vdso/vgettimeofday.c
index e933fb9fbf5c..8bb21eda07d8 100644
--- a/arch/tile/kernel/vdso/vgettimeofday.c
+++ b/arch/tile/kernel/vdso/vgettimeofday.c
@@ -15,6 +15,7 @@
15#define VDSO_BUILD /* avoid some shift warnings for -m32 in <asm/page.h> */ 15#define VDSO_BUILD /* avoid some shift warnings for -m32 in <asm/page.h> */
16#include <linux/time.h> 16#include <linux/time.h>
17#include <asm/timex.h> 17#include <asm/timex.h>
18#include <asm/unistd.h>
18#include <asm/vdso.h> 19#include <asm/vdso.h>
19 20
20#if CHIP_HAS_SPLIT_CYCLE() 21#if CHIP_HAS_SPLIT_CYCLE()
@@ -35,6 +36,11 @@ static inline cycles_t get_cycles_inline(void)
35#define get_cycles get_cycles_inline 36#define get_cycles get_cycles_inline
36#endif 37#endif
37 38
39struct syscall_return_value {
40 long value;
41 long error;
42};
43
38/* 44/*
39 * Find out the vDSO data page address in the process address space. 45 * Find out the vDSO data page address in the process address space.
40 */ 46 */
@@ -50,59 +56,143 @@ inline unsigned long get_datapage(void)
50 return ret; 56 return ret;
51} 57}
52 58
53int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) 59static inline u64 vgetsns(struct vdso_data *vdso)
60{
61 return ((get_cycles() - vdso->cycle_last) & vdso->mask) * vdso->mult;
62}
63
64static inline int do_realtime(struct vdso_data *vdso, struct timespec *ts)
65{
66 unsigned count;
67 u64 ns;
68
69 do {
70 count = read_seqcount_begin(&vdso->tb_seq);
71 ts->tv_sec = vdso->wall_time_sec;
72 ns = vdso->wall_time_snsec;
73 ns += vgetsns(vdso);
74 ns >>= vdso->shift;
75 } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
76
77 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
78 ts->tv_nsec = ns;
79
80 return 0;
81}
82
83static inline int do_monotonic(struct vdso_data *vdso, struct timespec *ts)
84{
85 unsigned count;
86 u64 ns;
87
88 do {
89 count = read_seqcount_begin(&vdso->tb_seq);
90 ts->tv_sec = vdso->monotonic_time_sec;
91 ns = vdso->monotonic_time_snsec;
92 ns += vgetsns(vdso);
93 ns >>= vdso->shift;
94 } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
95
96 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
97 ts->tv_nsec = ns;
98
99 return 0;
100}
101
102static inline int do_realtime_coarse(struct vdso_data *vdso,
103 struct timespec *ts)
104{
105 unsigned count;
106
107 do {
108 count = read_seqcount_begin(&vdso->tb_seq);
109 ts->tv_sec = vdso->wall_time_coarse_sec;
110 ts->tv_nsec = vdso->wall_time_coarse_nsec;
111 } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
112
113 return 0;
114}
115
116static inline int do_monotonic_coarse(struct vdso_data *vdso,
117 struct timespec *ts)
54{ 118{
55 cycles_t cycles; 119 unsigned count;
56 unsigned long count, sec, ns; 120
57 volatile struct vdso_data *vdso_data; 121 do {
122 count = read_seqcount_begin(&vdso->tb_seq);
123 ts->tv_sec = vdso->monotonic_time_coarse_sec;
124 ts->tv_nsec = vdso->monotonic_time_coarse_nsec;
125 } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
126
127 return 0;
128}
129
130struct syscall_return_value __vdso_gettimeofday(struct timeval *tv,
131 struct timezone *tz)
132{
133 struct syscall_return_value ret = { 0, 0 };
134 unsigned count;
135 struct vdso_data *vdso = (struct vdso_data *)get_datapage();
58 136
59 vdso_data = (struct vdso_data *)get_datapage();
60 /* The use of the timezone is obsolete, normally tz is NULL. */ 137 /* The use of the timezone is obsolete, normally tz is NULL. */
61 if (unlikely(tz != NULL)) { 138 if (unlikely(tz != NULL)) {
62 while (1) { 139 do {
63 /* Spin until the update finish. */ 140 count = read_seqcount_begin(&vdso->tz_seq);
64 count = vdso_data->tz_update_count; 141 tz->tz_minuteswest = vdso->tz_minuteswest;
65 if (count & 1) 142 tz->tz_dsttime = vdso->tz_dsttime;
66 continue; 143 } while (unlikely(read_seqcount_retry(&vdso->tz_seq, count)));
67
68 tz->tz_minuteswest = vdso_data->tz_minuteswest;
69 tz->tz_dsttime = vdso_data->tz_dsttime;
70
71 /* Check whether updated, read again if so. */
72 if (count == vdso_data->tz_update_count)
73 break;
74 }
75 } 144 }
76 145
77 if (unlikely(tv == NULL)) 146 if (unlikely(tv == NULL))
78 return 0; 147 return ret;
79
80 while (1) {
81 /* Spin until the update finish. */
82 count = vdso_data->tb_update_count;
83 if (count & 1)
84 continue;
85
86 sec = vdso_data->xtime_clock_sec;
87 cycles = get_cycles() - vdso_data->xtime_tod_stamp;
88 ns = (cycles * vdso_data->mult) + vdso_data->xtime_clock_nsec;
89 ns >>= vdso_data->shift;
90
91 if (ns >= NSEC_PER_SEC) {
92 ns -= NSEC_PER_SEC;
93 sec += 1;
94 }
95
96 /* Check whether updated, read again if so. */
97 if (count == vdso_data->tb_update_count)
98 break;
99 }
100 148
101 tv->tv_sec = sec; 149 do_realtime(vdso, (struct timespec *)tv);
102 tv->tv_usec = ns / 1000; 150 tv->tv_usec /= 1000;
103 151
104 return 0; 152 return ret;
105} 153}
106 154
107int gettimeofday(struct timeval *tv, struct timezone *tz) 155int gettimeofday(struct timeval *tv, struct timezone *tz)
108 __attribute__((weak, alias("__vdso_gettimeofday"))); 156 __attribute__((weak, alias("__vdso_gettimeofday")));
157
158static struct syscall_return_value vdso_fallback_gettime(long clock,
159 struct timespec *ts)
160{
161 struct syscall_return_value ret;
162 __asm__ __volatile__ (
163 "swint1"
164 : "=R00" (ret.value), "=R01" (ret.error)
165 : "R10" (__NR_clock_gettime), "R00" (clock), "R01" (ts)
166 : "r2", "r3", "r4", "r5", "r6", "r7",
167 "r8", "r9", "r11", "r12", "r13", "r14", "r15",
168 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
169 "r24", "r25", "r26", "r27", "r28", "r29", "memory");
170 return ret;
171}
172
173struct syscall_return_value __vdso_clock_gettime(clockid_t clock,
174 struct timespec *ts)
175{
176 struct vdso_data *vdso = (struct vdso_data *)get_datapage();
177 struct syscall_return_value ret = { 0, 0 };
178
179 switch (clock) {
180 case CLOCK_REALTIME:
181 do_realtime(vdso, ts);
182 return ret;
183 case CLOCK_MONOTONIC:
184 do_monotonic(vdso, ts);
185 return ret;
186 case CLOCK_REALTIME_COARSE:
187 do_realtime_coarse(vdso, ts);
188 return ret;
189 case CLOCK_MONOTONIC_COARSE:
190 do_monotonic_coarse(vdso, ts);
191 return ret;
192 default:
193 return vdso_fallback_gettime(clock, ts);
194 }
195}
196
197int clock_gettime(clockid_t clock, struct timespec *ts)
198 __attribute__((weak, alias("__vdso_clock_gettime")));
diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S
index f1819423ffc9..0e059a0101ea 100644
--- a/arch/tile/kernel/vmlinux.lds.S
+++ b/arch/tile/kernel/vmlinux.lds.S
@@ -66,11 +66,9 @@ SECTIONS
66 66
67 . = ALIGN(PAGE_SIZE); 67 . = ALIGN(PAGE_SIZE);
68 __init_begin = .; 68 __init_begin = .;
69 VMLINUX_SYMBOL(_sinitdata) = .;
70 INIT_DATA_SECTION(16) :data =0 69 INIT_DATA_SECTION(16) :data =0
71 PERCPU_SECTION(L2_CACHE_BYTES) 70 PERCPU_SECTION(L2_CACHE_BYTES)
72 . = ALIGN(PAGE_SIZE); 71 . = ALIGN(PAGE_SIZE);
73 VMLINUX_SYMBOL(_einitdata) = .;
74 __init_end = .; 72 __init_end = .;
75 73
76 _sdata = .; /* Start of data section */ 74 _sdata = .; /* Start of data section */