aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-03-17 22:34:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-17 22:34:12 -0400
commit08351fc6a75731226e1112fc7254542bd3a2912e (patch)
tree8b25bd168e0663c766f0332c8be082aa7d6ed265 /arch/tile/kernel
parent0df0914d414a504b975f3cc66ace0c16ef55b7f3 (diff)
parent0dccb0489f9a5a13a33e828ab965aa49685d12f8 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
* git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile: (27 commits) arch/tile: support newer binutils assembler shift semantics arch/tile: fix deadlock bugs in rwlock implementation drivers/edac: provide support for tile architecture tile on-chip network driver: sync up with latest fixes arch/tile: support 4KB page size as well as 64KB arch/tile: add some more VMSPLIT options and use consistent naming arch/tile: fix some comments and whitespace arch/tile: export some additional module symbols arch/tile: enhance existing finv_buffer_remote() routine arch/tile: fix two bugs in the backtracer code arch/tile: use extended assembly to inline __mb_incoherent() arch/tile: use a cleaner technique to enable interrupt for cpu_idle() arch/tile: sync up with <arch/sim.h> and <arch/sim_def.h> changes arch/tile: fix reversed test of strict_strtol() return value arch/tile: avoid a simulator warning during bootup arch/tile: export <asm/hardwall.h> to userspace arch/tile: warn and retry if an IPI is not accepted by the target cpu arch/tile: stop disabling INTCTRL_1 interrupts during hypervisor downcalls arch/tile: fix __ndelay etc to work better arch/tile: bug fix: exec'ed task thought it was still single-stepping ... Fix up trivial conflict in arch/tile/kernel/vmlinux.lds.S (percpu alignment vs section naming convention fix)
Diffstat (limited to 'arch/tile/kernel')
-rw-r--r--arch/tile/kernel/entry.S22
-rw-r--r--arch/tile/kernel/head_32.S15
-rw-r--r--arch/tile/kernel/intvec_32.S74
-rw-r--r--arch/tile/kernel/irq.c38
-rw-r--r--arch/tile/kernel/machine_kexec.c7
-rw-r--r--arch/tile/kernel/pci-dma.c38
-rw-r--r--arch/tile/kernel/process.c6
-rw-r--r--arch/tile/kernel/setup.c20
-rw-r--r--arch/tile/kernel/single_step.c21
-rw-r--r--arch/tile/kernel/smp.c33
-rw-r--r--arch/tile/kernel/stack.c28
-rw-r--r--arch/tile/kernel/time.c10
-rw-r--r--arch/tile/kernel/vmlinux.lds.S5
13 files changed, 163 insertions, 154 deletions
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
index fd8dc42abdcb..431e9ae60488 100644
--- a/arch/tile/kernel/entry.S
+++ b/arch/tile/kernel/entry.S
@@ -38,12 +38,6 @@ STD_ENTRY(kernel_execve)
38 jrp lr 38 jrp lr
39 STD_ENDPROC(kernel_execve) 39 STD_ENDPROC(kernel_execve)
40 40
41/* Delay a fixed number of cycles. */
42STD_ENTRY(__delay)
43 { addi r0, r0, -1; bnzt r0, . }
44 jrp lr
45 STD_ENDPROC(__delay)
46
47/* 41/*
48 * We don't run this function directly, but instead copy it to a page 42 * We don't run this function directly, but instead copy it to a page
49 * we map into every user process. See vdso_setup(). 43 * we map into every user process. See vdso_setup().
@@ -97,23 +91,17 @@ STD_ENTRY(smp_nap)
97 91
98/* 92/*
99 * Enable interrupts racelessly and then nap until interrupted. 93 * Enable interrupts racelessly and then nap until interrupted.
94 * Architecturally, we are guaranteed that enabling interrupts via
95 * mtspr to INTERRUPT_CRITICAL_SECTION only interrupts at the next PC.
100 * This function's _cpu_idle_nap address is special; see intvec.S. 96 * This function's _cpu_idle_nap address is special; see intvec.S.
101 * When interrupted at _cpu_idle_nap, we bump the PC forward 8, and 97 * When interrupted at _cpu_idle_nap, we bump the PC forward 8, and
102 * as a result return to the function that called _cpu_idle(). 98 * as a result return to the function that called _cpu_idle().
103 */ 99 */
104STD_ENTRY(_cpu_idle) 100STD_ENTRY(_cpu_idle)
105 { 101 movei r1, 1
106 lnk r0 102 mtspr INTERRUPT_CRITICAL_SECTION, r1
107 movei r1, KERNEL_PL
108 }
109 {
110 addli r0, r0, _cpu_idle_nap - .
111 mtspr INTERRUPT_CRITICAL_SECTION, r1
112 }
113 IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */ 103 IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */
114 mtspr SPR_EX_CONTEXT_K_1, r1 /* Kernel PL, ICS clear */ 104 mtspr INTERRUPT_CRITICAL_SECTION, zero
115 mtspr SPR_EX_CONTEXT_K_0, r0
116 iret
117 .global _cpu_idle_nap 105 .global _cpu_idle_nap
118_cpu_idle_nap: 106_cpu_idle_nap:
119 nap 107 nap
diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S
index 90e7c4435693..1a39b7c1c87e 100644
--- a/arch/tile/kernel/head_32.S
+++ b/arch/tile/kernel/head_32.S
@@ -133,7 +133,7 @@ ENTRY(_start)
133 } 133 }
134 ENDPROC(_start) 134 ENDPROC(_start)
135 135
136.section ".bss.page_aligned","w" 136__PAGE_ALIGNED_BSS
137 .align PAGE_SIZE 137 .align PAGE_SIZE
138ENTRY(empty_zero_page) 138ENTRY(empty_zero_page)
139 .fill PAGE_SIZE,1,0 139 .fill PAGE_SIZE,1,0
@@ -145,10 +145,10 @@ ENTRY(empty_zero_page)
145 .endif 145 .endif
146 .word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \ 146 .word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \
147 (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) 147 (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE)
148 .word (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN) 148 .word (\bits1) | (HV_CPA_TO_PFN(\cpa) << (HV_PTE_INDEX_PFN - 32))
149 .endm 149 .endm
150 150
151.section ".data.page_aligned","wa" 151__PAGE_ALIGNED_DATA
152 .align PAGE_SIZE 152 .align PAGE_SIZE
153ENTRY(swapper_pg_dir) 153ENTRY(swapper_pg_dir)
154 /* 154 /*
@@ -158,12 +158,14 @@ ENTRY(swapper_pg_dir)
158 */ 158 */
159 .set addr, 0 159 .set addr, 0
160 .rept (MEM_USER_INTRPT - PAGE_OFFSET) >> PGDIR_SHIFT 160 .rept (MEM_USER_INTRPT - PAGE_OFFSET) >> PGDIR_SHIFT
161 PTE addr + PAGE_OFFSET, addr, HV_PTE_READABLE | HV_PTE_WRITABLE 161 PTE addr + PAGE_OFFSET, addr, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
162 (1 << (HV_PTE_INDEX_WRITABLE - 32))
162 .set addr, addr + PGDIR_SIZE 163 .set addr, addr + PGDIR_SIZE
163 .endr 164 .endr
164 165
165 /* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */ 166 /* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */
166 PTE MEM_SV_INTRPT, 0, HV_PTE_READABLE | HV_PTE_EXECUTABLE 167 PTE MEM_SV_INTRPT, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
168 (1 << (HV_PTE_INDEX_EXECUTABLE - 32))
167 .org swapper_pg_dir + HV_L1_SIZE 169 .org swapper_pg_dir + HV_L1_SIZE
168 END(swapper_pg_dir) 170 END(swapper_pg_dir)
169 171
@@ -176,6 +178,7 @@ ENTRY(swapper_pg_dir)
176 __INITDATA 178 __INITDATA
177 .align CHIP_L2_LINE_SIZE() 179 .align CHIP_L2_LINE_SIZE()
178ENTRY(swapper_pgprot) 180ENTRY(swapper_pgprot)
179 PTE 0, 0, HV_PTE_READABLE | HV_PTE_WRITABLE, 1 181 PTE 0, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
182 (1 << (HV_PTE_INDEX_WRITABLE - 32)), 1
180 .align CHIP_L2_LINE_SIZE() 183 .align CHIP_L2_LINE_SIZE()
181 END(swapper_pgprot) 184 END(swapper_pgprot)
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index 5eed4a02bf62..fffcfa6b3a62 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -32,10 +32,6 @@
32# error "No support for kernel preemption currently" 32# error "No support for kernel preemption currently"
33#endif 33#endif
34 34
35#if INT_INTCTRL_K < 32 || INT_INTCTRL_K >= 48
36# error INT_INTCTRL_K coded to set high interrupt mask
37#endif
38
39#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg) 35#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
40 36
41#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR) 37#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
@@ -1199,46 +1195,6 @@ STD_ENTRY(interrupt_return)
1199 STD_ENDPROC(interrupt_return) 1195 STD_ENDPROC(interrupt_return)
1200 1196
1201 /* 1197 /*
1202 * This interrupt variant clears the INT_INTCTRL_K interrupt mask bit
1203 * before returning, so we can properly get more downcalls.
1204 */
1205 .pushsection .text.handle_interrupt_downcall,"ax"
1206handle_interrupt_downcall:
1207 finish_interrupt_save handle_interrupt_downcall
1208 check_single_stepping normal, .Ldispatch_downcall
1209.Ldispatch_downcall:
1210
1211 /* Clear INTCTRL_K from the set of interrupts we ever enable. */
1212 GET_INTERRUPTS_ENABLED_MASK_PTR(r30)
1213 {
1214 addi r30, r30, 4
1215 movei r31, INT_MASK(INT_INTCTRL_K)
1216 }
1217 {
1218 lw r20, r30
1219 nor r21, r31, zero
1220 }
1221 and r20, r20, r21
1222 sw r30, r20
1223
1224 {
1225 jalr r0
1226 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1227 }
1228 FEEDBACK_REENTER(handle_interrupt_downcall)
1229
1230 /* Allow INTCTRL_K to be enabled next time we enable interrupts. */
1231 lw r20, r30
1232 or r20, r20, r31
1233 sw r30, r20
1234
1235 {
1236 movei r30, 0 /* not an NMI */
1237 j interrupt_return
1238 }
1239 STD_ENDPROC(handle_interrupt_downcall)
1240
1241 /*
1242 * Some interrupts don't check for single stepping 1198 * Some interrupts don't check for single stepping
1243 */ 1199 */
1244 .pushsection .text.handle_interrupt_no_single_step,"ax" 1200 .pushsection .text.handle_interrupt_no_single_step,"ax"
@@ -1600,7 +1556,10 @@ STD_ENTRY(_sys_clone)
1600 .align 64 1556 .align 64
1601 /* Align much later jump on the start of a cache line. */ 1557 /* Align much later jump on the start of a cache line. */
1602#if !ATOMIC_LOCKS_FOUND_VIA_TABLE() 1558#if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
1603 nop; nop 1559 nop
1560#if PAGE_SIZE >= 0x10000
1561 nop
1562#endif
1604#endif 1563#endif
1605ENTRY(sys_cmpxchg) 1564ENTRY(sys_cmpxchg)
1606 1565
@@ -1628,9 +1587,13 @@ ENTRY(sys_cmpxchg)
1628 * about aliasing among multiple mappings of the same physical page, 1587 * about aliasing among multiple mappings of the same physical page,
1629 * and we ignore the low 3 bits so we have one lock that covers 1588 * and we ignore the low 3 bits so we have one lock that covers
1630 * both a cmpxchg64() and a cmpxchg() on either its low or high word. 1589 * both a cmpxchg64() and a cmpxchg() on either its low or high word.
1631 * NOTE: this code must match __atomic_hashed_lock() in lib/atomic.c. 1590 * NOTE: this must match __atomic_hashed_lock() in lib/atomic_32.c.
1632 */ 1591 */
1633 1592
1593#if (PAGE_OFFSET & 0xffff) != 0
1594# error Code here assumes PAGE_OFFSET can be loaded with just hi16()
1595#endif
1596
1634#if ATOMIC_LOCKS_FOUND_VIA_TABLE() 1597#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
1635 { 1598 {
1636 /* Check for unaligned input. */ 1599 /* Check for unaligned input. */
@@ -1723,11 +1686,14 @@ ENTRY(sys_cmpxchg)
1723 lw r26, r0 1686 lw r26, r0
1724 } 1687 }
1725 { 1688 {
1726 /* atomic_locks is page aligned so this suffices to get its addr. */ 1689 auli r21, zero, ha16(atomic_locks)
1727 auli r21, zero, hi16(atomic_locks)
1728 1690
1729 bbns r23, .Lcmpxchg_badaddr 1691 bbns r23, .Lcmpxchg_badaddr
1730 } 1692 }
1693#if PAGE_SIZE < 0x10000
1694 /* atomic_locks is page-aligned so for big pages we don't need this. */
1695 addli r21, r21, lo16(atomic_locks)
1696#endif
1731 { 1697 {
1732 /* 1698 /*
1733 * Insert the hash bits into the page-aligned pointer. 1699 * Insert the hash bits into the page-aligned pointer.
@@ -1762,7 +1728,7 @@ ENTRY(sys_cmpxchg)
1762 1728
1763 /* 1729 /*
1764 * Perform the actual cmpxchg or atomic_update. 1730 * Perform the actual cmpxchg or atomic_update.
1765 * Note that __futex_mark_unlocked() in uClibc relies on 1731 * Note that the system <arch/atomic.h> header relies on
1766 * atomic_update() to always perform an "mf", so don't make 1732 * atomic_update() to always perform an "mf", so don't make
1767 * it optional or conditional without modifying that code. 1733 * it optional or conditional without modifying that code.
1768 */ 1734 */
@@ -2014,17 +1980,17 @@ int_unalign:
2014#endif 1980#endif
2015 int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr 1981 int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr
2016 int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \ 1982 int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
2017 hv_message_intr, handle_interrupt_downcall 1983 hv_message_intr
2018 int_hand INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, \ 1984 int_hand INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, \
2019 tile_dev_intr, handle_interrupt_downcall 1985 tile_dev_intr
2020 int_hand INT_I_ASID, I_ASID, bad_intr 1986 int_hand INT_I_ASID, I_ASID, bad_intr
2021 int_hand INT_D_ASID, D_ASID, bad_intr 1987 int_hand INT_D_ASID, D_ASID, bad_intr
2022 int_hand INT_DMATLB_MISS_DWNCL, DMATLB_MISS_DWNCL, \ 1988 int_hand INT_DMATLB_MISS_DWNCL, DMATLB_MISS_DWNCL, \
2023 do_page_fault, handle_interrupt_downcall 1989 do_page_fault
2024 int_hand INT_SNITLB_MISS_DWNCL, SNITLB_MISS_DWNCL, \ 1990 int_hand INT_SNITLB_MISS_DWNCL, SNITLB_MISS_DWNCL, \
2025 do_page_fault, handle_interrupt_downcall 1991 do_page_fault
2026 int_hand INT_DMATLB_ACCESS_DWNCL, DMATLB_ACCESS_DWNCL, \ 1992 int_hand INT_DMATLB_ACCESS_DWNCL, DMATLB_ACCESS_DWNCL, \
2027 do_page_fault, handle_interrupt_downcall 1993 do_page_fault
2028 int_hand INT_SN_CPL, SN_CPL, bad_intr 1994 int_hand INT_SN_CPL, SN_CPL, bad_intr
2029 int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap 1995 int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap
2030#if CHIP_HAS_AUX_PERF_COUNTERS() 1996#if CHIP_HAS_AUX_PERF_COUNTERS()
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index 128805ef8f2c..0baa7580121f 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -176,43 +176,43 @@ void disable_percpu_irq(unsigned int irq)
176EXPORT_SYMBOL(disable_percpu_irq); 176EXPORT_SYMBOL(disable_percpu_irq);
177 177
178/* Mask an interrupt. */ 178/* Mask an interrupt. */
179static void tile_irq_chip_mask(unsigned int irq) 179static void tile_irq_chip_mask(struct irq_data *d)
180{ 180{
181 mask_irqs(1UL << irq); 181 mask_irqs(1UL << d->irq);
182} 182}
183 183
184/* Unmask an interrupt. */ 184/* Unmask an interrupt. */
185static void tile_irq_chip_unmask(unsigned int irq) 185static void tile_irq_chip_unmask(struct irq_data *d)
186{ 186{
187 unmask_irqs(1UL << irq); 187 unmask_irqs(1UL << d->irq);
188} 188}
189 189
190/* 190/*
191 * Clear an interrupt before processing it so that any new assertions 191 * Clear an interrupt before processing it so that any new assertions
192 * will trigger another irq. 192 * will trigger another irq.
193 */ 193 */
194static void tile_irq_chip_ack(unsigned int irq) 194static void tile_irq_chip_ack(struct irq_data *d)
195{ 195{
196 if ((unsigned long)get_irq_chip_data(irq) != IS_HW_CLEARED) 196 if ((unsigned long)irq_data_get_irq_chip_data(d) != IS_HW_CLEARED)
197 clear_irqs(1UL << irq); 197 clear_irqs(1UL << d->irq);
198} 198}
199 199
200/* 200/*
201 * For per-cpu interrupts, we need to avoid unmasking any interrupts 201 * For per-cpu interrupts, we need to avoid unmasking any interrupts
202 * that we disabled via disable_percpu_irq(). 202 * that we disabled via disable_percpu_irq().
203 */ 203 */
204static void tile_irq_chip_eoi(unsigned int irq) 204static void tile_irq_chip_eoi(struct irq_data *d)
205{ 205{
206 if (!(__get_cpu_var(irq_disable_mask) & (1UL << irq))) 206 if (!(__get_cpu_var(irq_disable_mask) & (1UL << d->irq)))
207 unmask_irqs(1UL << irq); 207 unmask_irqs(1UL << d->irq);
208} 208}
209 209
210static struct irq_chip tile_irq_chip = { 210static struct irq_chip tile_irq_chip = {
211 .name = "tile_irq_chip", 211 .name = "tile_irq_chip",
212 .ack = tile_irq_chip_ack, 212 .irq_ack = tile_irq_chip_ack,
213 .eoi = tile_irq_chip_eoi, 213 .irq_eoi = tile_irq_chip_eoi,
214 .mask = tile_irq_chip_mask, 214 .irq_mask = tile_irq_chip_mask,
215 .unmask = tile_irq_chip_unmask, 215 .irq_unmask = tile_irq_chip_unmask,
216}; 216};
217 217
218void __init init_IRQ(void) 218void __init init_IRQ(void)
@@ -277,8 +277,10 @@ int show_interrupts(struct seq_file *p, void *v)
277 } 277 }
278 278
279 if (i < NR_IRQS) { 279 if (i < NR_IRQS) {
280 raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 280 struct irq_desc *desc = irq_to_desc(i);
281 action = irq_desc[i].action; 281
282 raw_spin_lock_irqsave(&desc->lock, flags);
283 action = desc->action;
282 if (!action) 284 if (!action)
283 goto skip; 285 goto skip;
284 seq_printf(p, "%3d: ", i); 286 seq_printf(p, "%3d: ", i);
@@ -288,7 +290,7 @@ int show_interrupts(struct seq_file *p, void *v)
288 for_each_online_cpu(j) 290 for_each_online_cpu(j)
289 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 291 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
290#endif 292#endif
291 seq_printf(p, " %14s", irq_desc[i].chip->name); 293 seq_printf(p, " %14s", get_irq_desc_chip(desc)->name);
292 seq_printf(p, " %s", action->name); 294 seq_printf(p, " %s", action->name);
293 295
294 for (action = action->next; action; action = action->next) 296 for (action = action->next; action; action = action->next)
@@ -296,7 +298,7 @@ int show_interrupts(struct seq_file *p, void *v)
296 298
297 seq_putc(p, '\n'); 299 seq_putc(p, '\n');
298skip: 300skip:
299 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 301 raw_spin_unlock_irqrestore(&desc->lock, flags);
300 } 302 }
301 return 0; 303 return 0;
302} 304}
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c
index 0d8b9e933487..e00d7179989e 100644
--- a/arch/tile/kernel/machine_kexec.c
+++ b/arch/tile/kernel/machine_kexec.c
@@ -240,8 +240,11 @@ static void setup_quasi_va_is_pa(void)
240 pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE); 240 pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE);
241 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3); 241 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
242 242
243 for (i = 0; i < pgd_index(PAGE_OFFSET); i++) 243 for (i = 0; i < pgd_index(PAGE_OFFSET); i++) {
244 pgtable[i] = pfn_pte(i << (HPAGE_SHIFT - PAGE_SHIFT), pte); 244 unsigned long pfn = i << (HPAGE_SHIFT - PAGE_SHIFT);
245 if (pfn_valid(pfn))
246 __set_pte(&pgtable[i], pfn_pte(pfn, pte));
247 }
245} 248}
246 249
247 250
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
index 5ad5e13b0fa6..658752b2835e 100644
--- a/arch/tile/kernel/pci-dma.c
+++ b/arch/tile/kernel/pci-dma.c
@@ -86,6 +86,21 @@ EXPORT_SYMBOL(dma_free_coherent);
86 * can count on nothing having been touched. 86 * can count on nothing having been touched.
87 */ 87 */
88 88
89/* Flush a PA range from cache page by page. */
90static void __dma_map_pa_range(dma_addr_t dma_addr, size_t size)
91{
92 struct page *page = pfn_to_page(PFN_DOWN(dma_addr));
93 size_t bytesleft = PAGE_SIZE - (dma_addr & (PAGE_SIZE - 1));
94
95 while ((ssize_t)size > 0) {
96 /* Flush the page. */
97 homecache_flush_cache(page++, 0);
98
99 /* Figure out if we need to continue on the next page. */
100 size -= bytesleft;
101 bytesleft = PAGE_SIZE;
102 }
103}
89 104
90/* 105/*
91 * dma_map_single can be passed any memory address, and there appear 106 * dma_map_single can be passed any memory address, and there appear
@@ -97,26 +112,12 @@ EXPORT_SYMBOL(dma_free_coherent);
97dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, 112dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
98 enum dma_data_direction direction) 113 enum dma_data_direction direction)
99{ 114{
100 struct page *page; 115 dma_addr_t dma_addr = __pa(ptr);
101 dma_addr_t dma_addr;
102 int thispage;
103 116
104 BUG_ON(!valid_dma_direction(direction)); 117 BUG_ON(!valid_dma_direction(direction));
105 WARN_ON(size == 0); 118 WARN_ON(size == 0);
106 119
107 dma_addr = __pa(ptr); 120 __dma_map_pa_range(dma_addr, size);
108
109 /* We might have been handed a buffer that wraps a page boundary */
110 while ((int)size > 0) {
111 /* The amount to flush that's on this page */
112 thispage = PAGE_SIZE - ((unsigned long)ptr & (PAGE_SIZE - 1));
113 thispage = min((int)thispage, (int)size);
114 /* Is this valid for any page we could be handed? */
115 page = pfn_to_page(kaddr_to_pfn(ptr));
116 homecache_flush_cache(page, 0);
117 ptr += thispage;
118 size -= thispage;
119 }
120 121
121 return dma_addr; 122 return dma_addr;
122} 123}
@@ -140,10 +141,8 @@ int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
140 WARN_ON(nents == 0 || sglist->length == 0); 141 WARN_ON(nents == 0 || sglist->length == 0);
141 142
142 for_each_sg(sglist, sg, nents, i) { 143 for_each_sg(sglist, sg, nents, i) {
143 struct page *page;
144 sg->dma_address = sg_phys(sg); 144 sg->dma_address = sg_phys(sg);
145 page = pfn_to_page(sg->dma_address >> PAGE_SHIFT); 145 __dma_map_pa_range(sg->dma_address, sg->length);
146 homecache_flush_cache(page, 0);
147 } 146 }
148 147
149 return nents; 148 return nents;
@@ -163,6 +162,7 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,
163{ 162{
164 BUG_ON(!valid_dma_direction(direction)); 163 BUG_ON(!valid_dma_direction(direction));
165 164
165 BUG_ON(offset + size > PAGE_SIZE);
166 homecache_flush_cache(page, 0); 166 homecache_flush_cache(page, 0);
167 167
168 return page_to_pa(page) + offset; 168 return page_to_pa(page) + offset;
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index e90eb53173b0..b9cd962e1d30 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -165,7 +165,7 @@ void free_thread_info(struct thread_info *info)
165 kfree(step_state); 165 kfree(step_state);
166 } 166 }
167 167
168 free_page((unsigned long)info); 168 free_pages((unsigned long)info, THREAD_SIZE_ORDER);
169} 169}
170 170
171static void save_arch_state(struct thread_struct *t); 171static void save_arch_state(struct thread_struct *t);
@@ -574,6 +574,8 @@ SYSCALL_DEFINE4(execve, const char __user *, path,
574 goto out; 574 goto out;
575 error = do_execve(filename, argv, envp, regs); 575 error = do_execve(filename, argv, envp, regs);
576 putname(filename); 576 putname(filename);
577 if (error == 0)
578 single_step_execve();
577out: 579out:
578 return error; 580 return error;
579} 581}
@@ -593,6 +595,8 @@ long compat_sys_execve(const char __user *path,
593 goto out; 595 goto out;
594 error = compat_do_execve(filename, argv, envp, regs); 596 error = compat_do_execve(filename, argv, envp, regs);
595 putname(filename); 597 putname(filename);
598 if (error == 0)
599 single_step_execve();
596out: 600out:
597 return error; 601 return error;
598} 602}
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index f18573643ed1..3696b1832566 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -59,6 +59,8 @@ unsigned long __initdata node_memmap_pfn[MAX_NUMNODES];
59unsigned long __initdata node_percpu_pfn[MAX_NUMNODES]; 59unsigned long __initdata node_percpu_pfn[MAX_NUMNODES];
60unsigned long __initdata node_free_pfn[MAX_NUMNODES]; 60unsigned long __initdata node_free_pfn[MAX_NUMNODES];
61 61
62static unsigned long __initdata node_percpu[MAX_NUMNODES];
63
62#ifdef CONFIG_HIGHMEM 64#ifdef CONFIG_HIGHMEM
63/* Page frame index of end of lowmem on each controller. */ 65/* Page frame index of end of lowmem on each controller. */
64unsigned long __cpuinitdata node_lowmem_end_pfn[MAX_NUMNODES]; 66unsigned long __cpuinitdata node_lowmem_end_pfn[MAX_NUMNODES];
@@ -554,7 +556,6 @@ static void __init setup_bootmem_allocator(void)
554 reserve_bootmem(crashk_res.start, 556 reserve_bootmem(crashk_res.start,
555 crashk_res.end - crashk_res.start + 1, 0); 557 crashk_res.end - crashk_res.start + 1, 0);
556#endif 558#endif
557
558} 559}
559 560
560void *__init alloc_remap(int nid, unsigned long size) 561void *__init alloc_remap(int nid, unsigned long size)
@@ -568,11 +569,13 @@ void *__init alloc_remap(int nid, unsigned long size)
568 569
569static int __init percpu_size(void) 570static int __init percpu_size(void)
570{ 571{
571 int size = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE); 572 int size = __per_cpu_end - __per_cpu_start;
572#ifdef CONFIG_MODULES 573 size += PERCPU_MODULE_RESERVE;
573 if (size < PERCPU_ENOUGH_ROOM) 574 size += PERCPU_DYNAMIC_EARLY_SIZE;
574 size = PERCPU_ENOUGH_ROOM; 575 if (size < PCPU_MIN_UNIT_SIZE)
575#endif 576 size = PCPU_MIN_UNIT_SIZE;
577 size = roundup(size, PAGE_SIZE);
578
576 /* In several places we assume the per-cpu data fits on a huge page. */ 579 /* In several places we assume the per-cpu data fits on a huge page. */
577 BUG_ON(kdata_huge && size > HPAGE_SIZE); 580 BUG_ON(kdata_huge && size > HPAGE_SIZE);
578 return size; 581 return size;
@@ -589,7 +592,6 @@ static inline unsigned long alloc_bootmem_pfn(int size, unsigned long goal)
589static void __init zone_sizes_init(void) 592static void __init zone_sizes_init(void)
590{ 593{
591 unsigned long zones_size[MAX_NR_ZONES] = { 0 }; 594 unsigned long zones_size[MAX_NR_ZONES] = { 0 };
592 unsigned long node_percpu[MAX_NUMNODES] = { 0 };
593 int size = percpu_size(); 595 int size = percpu_size();
594 int num_cpus = smp_height * smp_width; 596 int num_cpus = smp_height * smp_width;
595 int i; 597 int i;
@@ -674,7 +676,7 @@ static void __init zone_sizes_init(void)
674 NODE_DATA(i)->bdata = NODE_DATA(0)->bdata; 676 NODE_DATA(i)->bdata = NODE_DATA(0)->bdata;
675 677
676 free_area_init_node(i, zones_size, start, NULL); 678 free_area_init_node(i, zones_size, start, NULL);
677 printk(KERN_DEBUG " DMA zone: %ld per-cpu pages\n", 679 printk(KERN_DEBUG " Normal zone: %ld per-cpu pages\n",
678 PFN_UP(node_percpu[i])); 680 PFN_UP(node_percpu[i]));
679 681
680 /* Track the type of memory on each node */ 682 /* Track the type of memory on each node */
@@ -1312,6 +1314,8 @@ static void *__init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
1312 1314
1313 BUG_ON(size % PAGE_SIZE != 0); 1315 BUG_ON(size % PAGE_SIZE != 0);
1314 pfn_offset[nid] += size / PAGE_SIZE; 1316 pfn_offset[nid] += size / PAGE_SIZE;
1317 BUG_ON(node_percpu[nid] < size);
1318 node_percpu[nid] -= size;
1315 if (percpu_pfn[cpu] == 0) 1319 if (percpu_pfn[cpu] == 0)
1316 percpu_pfn[cpu] = pfn; 1320 percpu_pfn[cpu] = pfn;
1317 return pfn_to_kaddr(pfn); 1321 return pfn_to_kaddr(pfn);
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index 1eb3b39e36c7..84a729e06ec4 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -56,7 +56,7 @@ enum mem_op {
56 MEMOP_STORE_POSTINCR 56 MEMOP_STORE_POSTINCR
57}; 57};
58 58
59static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, int32_t offset) 59static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, s32 offset)
60{ 60{
61 tile_bundle_bits result; 61 tile_bundle_bits result;
62 62
@@ -254,6 +254,18 @@ P("\n");
254 return bundle; 254 return bundle;
255} 255}
256 256
257/*
258 * Called after execve() has started the new image. This allows us
259 * to reset the info state. Note that the the mmap'ed memory, if there
260 * was any, has already been unmapped by the exec.
261 */
262void single_step_execve(void)
263{
264 struct thread_info *ti = current_thread_info();
265 kfree(ti->step_state);
266 ti->step_state = NULL;
267}
268
257/** 269/**
258 * single_step_once() - entry point when single stepping has been triggered. 270 * single_step_once() - entry point when single stepping has been triggered.
259 * @regs: The machine register state 271 * @regs: The machine register state
@@ -373,7 +385,7 @@ void single_step_once(struct pt_regs *regs)
373 /* branches */ 385 /* branches */
374 case BRANCH_OPCODE_X1: 386 case BRANCH_OPCODE_X1:
375 { 387 {
376 int32_t offset = signExtend17(get_BrOff_X1(bundle)); 388 s32 offset = signExtend17(get_BrOff_X1(bundle));
377 389
378 /* 390 /*
379 * For branches, we use a rewriting trick to let the 391 * For branches, we use a rewriting trick to let the
@@ -731,4 +743,9 @@ void single_step_once(struct pt_regs *regs)
731 __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL); 743 __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL);
732} 744}
733 745
746void single_step_execve(void)
747{
748 /* Nothing */
749}
750
734#endif /* !__tilegx__ */ 751#endif /* !__tilegx__ */
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 9575b37a8b75..a4293102ef81 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -36,6 +36,22 @@ static unsigned long __iomem *ipi_mappings[NR_CPUS];
36/* Set by smp_send_stop() to avoid recursive panics. */ 36/* Set by smp_send_stop() to avoid recursive panics. */
37static int stopping_cpus; 37static int stopping_cpus;
38 38
39static void __send_IPI_many(HV_Recipient *recip, int nrecip, int tag)
40{
41 int sent = 0;
42 while (sent < nrecip) {
43 int rc = hv_send_message(recip, nrecip,
44 (HV_VirtAddr)&tag, sizeof(tag));
45 if (rc < 0) {
46 if (!stopping_cpus) /* avoid recursive panic */
47 panic("hv_send_message returned %d", rc);
48 break;
49 }
50 WARN_ONCE(rc == 0, "hv_send_message() returned zero\n");
51 sent += rc;
52 }
53}
54
39void send_IPI_single(int cpu, int tag) 55void send_IPI_single(int cpu, int tag)
40{ 56{
41 HV_Recipient recip = { 57 HV_Recipient recip = {
@@ -43,14 +59,13 @@ void send_IPI_single(int cpu, int tag)
43 .x = cpu % smp_width, 59 .x = cpu % smp_width,
44 .state = HV_TO_BE_SENT 60 .state = HV_TO_BE_SENT
45 }; 61 };
46 int rc = hv_send_message(&recip, 1, (HV_VirtAddr)&tag, sizeof(tag)); 62 __send_IPI_many(&recip, 1, tag);
47 BUG_ON(rc <= 0);
48} 63}
49 64
50void send_IPI_many(const struct cpumask *mask, int tag) 65void send_IPI_many(const struct cpumask *mask, int tag)
51{ 66{
52 HV_Recipient recip[NR_CPUS]; 67 HV_Recipient recip[NR_CPUS];
53 int cpu, sent; 68 int cpu;
54 int nrecip = 0; 69 int nrecip = 0;
55 int my_cpu = smp_processor_id(); 70 int my_cpu = smp_processor_id();
56 for_each_cpu(cpu, mask) { 71 for_each_cpu(cpu, mask) {
@@ -61,17 +76,7 @@ void send_IPI_many(const struct cpumask *mask, int tag)
61 r->x = cpu % smp_width; 76 r->x = cpu % smp_width;
62 r->state = HV_TO_BE_SENT; 77 r->state = HV_TO_BE_SENT;
63 } 78 }
64 sent = 0; 79 __send_IPI_many(recip, nrecip, tag);
65 while (sent < nrecip) {
66 int rc = hv_send_message(recip, nrecip,
67 (HV_VirtAddr)&tag, sizeof(tag));
68 if (rc <= 0) {
69 if (!stopping_cpus) /* avoid recursive panic */
70 panic("hv_send_message returned %d", rc);
71 break;
72 }
73 sent += rc;
74 }
75} 80}
76 81
77void send_IPI_allbutself(int tag) 82void send_IPI_allbutself(int tag)
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index 0d54106be3d6..dd81713a90dc 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -44,13 +44,6 @@ static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp)
44 return sp >= kstack_base && sp < kstack_base + THREAD_SIZE; 44 return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
45} 45}
46 46
47/* Is address in the specified kernel code? */
48static int in_kernel_text(VirtualAddress address)
49{
50 return (address >= MEM_SV_INTRPT &&
51 address < MEM_SV_INTRPT + HPAGE_SIZE);
52}
53
54/* Is address valid for reading? */ 47/* Is address valid for reading? */
55static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address) 48static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address)
56{ 49{
@@ -63,6 +56,23 @@ static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address)
63 if (l1_pgtable == NULL) 56 if (l1_pgtable == NULL)
64 return 0; /* can't read user space in other tasks */ 57 return 0; /* can't read user space in other tasks */
65 58
59#ifdef CONFIG_64BIT
60 /* Find the real l1_pgtable by looking in the l0_pgtable. */
61 pte = l1_pgtable[HV_L0_INDEX(address)];
62 if (!hv_pte_get_present(pte))
63 return 0;
64 pfn = hv_pte_get_pfn(pte);
65 if (pte_huge(pte)) {
66 if (!pfn_valid(pfn)) {
67 pr_err("L0 huge page has bad pfn %#lx\n", pfn);
68 return 0;
69 }
70 return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
71 }
72 page = pfn_to_page(pfn);
73 BUG_ON(PageHighMem(page)); /* No HIGHMEM on 64-bit. */
74 l1_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
75#endif
66 pte = l1_pgtable[HV_L1_INDEX(address)]; 76 pte = l1_pgtable[HV_L1_INDEX(address)];
67 if (!hv_pte_get_present(pte)) 77 if (!hv_pte_get_present(pte))
68 return 0; 78 return 0;
@@ -92,7 +102,7 @@ static bool read_memory_func(void *result, VirtualAddress address,
92{ 102{
93 int retval; 103 int retval;
94 struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt; 104 struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
95 if (in_kernel_text(address)) { 105 if (__kernel_text_address(address)) {
96 /* OK to read kernel code. */ 106 /* OK to read kernel code. */
97 } else if (address >= PAGE_OFFSET) { 107 } else if (address >= PAGE_OFFSET) {
98 /* We only tolerate kernel-space reads of this task's stack */ 108 /* We only tolerate kernel-space reads of this task's stack */
@@ -132,7 +142,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
132 } 142 }
133 } 143 }
134 if (EX1_PL(p->ex1) == KERNEL_PL && 144 if (EX1_PL(p->ex1) == KERNEL_PL &&
135 in_kernel_text(p->pc) && 145 __kernel_text_address(p->pc) &&
136 in_kernel_stack(kbt, p->sp) && 146 in_kernel_stack(kbt, p->sp) &&
137 p->sp >= sp) { 147 p->sp >= sp) {
138 if (kbt->verbose) 148 if (kbt->verbose)
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index f2e156e44692..49a605be94c5 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -224,3 +224,13 @@ int setup_profiling_timer(unsigned int multiplier)
224{ 224{
225 return -EINVAL; 225 return -EINVAL;
226} 226}
227
228/*
229 * Use the tile timer to convert nsecs to core clock cycles, relying
230 * on it having the same frequency as SPR_CYCLE.
231 */
232cycles_t ns2cycles(unsigned long nsecs)
233{
234 struct clock_event_device *dev = &__get_cpu_var(tile_timer);
235 return ((u64)nsecs * dev->mult) >> dev->shift;
236}
diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S
index c6ce378e0678..38f64fafdc10 100644
--- a/arch/tile/kernel/vmlinux.lds.S
+++ b/arch/tile/kernel/vmlinux.lds.S
@@ -59,10 +59,7 @@ SECTIONS
59 59
60 . = ALIGN(PAGE_SIZE); 60 . = ALIGN(PAGE_SIZE);
61 VMLINUX_SYMBOL(_sinitdata) = .; 61 VMLINUX_SYMBOL(_sinitdata) = .;
62 .init.page : AT (ADDR(.init.page) - LOAD_OFFSET) { 62 INIT_DATA_SECTION(16) :data =0
63 *(.init.page)
64 } :data =0
65 INIT_DATA_SECTION(16)
66 PERCPU(L2_CACHE_BYTES, PAGE_SIZE) 63 PERCPU(L2_CACHE_BYTES, PAGE_SIZE)
67 . = ALIGN(PAGE_SIZE); 64 . = ALIGN(PAGE_SIZE);
68 VMLINUX_SYMBOL(_einitdata) = .; 65 VMLINUX_SYMBOL(_einitdata) = .;