aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile')
-rw-r--r--arch/tile/include/arch/chip_tile64.h3
-rw-r--r--arch/tile/include/arch/chip_tilepro.h3
-rw-r--r--arch/tile/include/asm/irq.h62
-rw-r--r--arch/tile/include/asm/smp.h27
-rw-r--r--arch/tile/include/hv/hypervisor.h117
-rw-r--r--arch/tile/include/hv/pagesize.h32
-rw-r--r--arch/tile/kernel/hvglue.lds14
-rw-r--r--arch/tile/kernel/irq.c259
-rw-r--r--arch/tile/kernel/smp.c72
9 files changed, 435 insertions, 154 deletions
diff --git a/arch/tile/include/arch/chip_tile64.h b/arch/tile/include/arch/chip_tile64.h
index 18b5bc8e563f..1246573be59e 100644
--- a/arch/tile/include/arch/chip_tile64.h
+++ b/arch/tile/include/arch/chip_tile64.h
@@ -248,5 +248,8 @@
248/** Does the chip support rev1 DMA packets? */ 248/** Does the chip support rev1 DMA packets? */
249#define CHIP_HAS_REV1_DMA_PACKETS() 0 249#define CHIP_HAS_REV1_DMA_PACKETS() 0
250 250
251/** Does the chip have an IPI shim? */
252#define CHIP_HAS_IPI() 0
253
251#endif /* !__OPEN_SOURCE__ */ 254#endif /* !__OPEN_SOURCE__ */
252#endif /* __ARCH_CHIP_H__ */ 255#endif /* __ARCH_CHIP_H__ */
diff --git a/arch/tile/include/arch/chip_tilepro.h b/arch/tile/include/arch/chip_tilepro.h
index 9852af163862..e864c47fc89c 100644
--- a/arch/tile/include/arch/chip_tilepro.h
+++ b/arch/tile/include/arch/chip_tilepro.h
@@ -248,5 +248,8 @@
248/** Does the chip support rev1 DMA packets? */ 248/** Does the chip support rev1 DMA packets? */
249#define CHIP_HAS_REV1_DMA_PACKETS() 1 249#define CHIP_HAS_REV1_DMA_PACKETS() 1
250 250
251/** Does the chip have an IPI shim? */
252#define CHIP_HAS_IPI() 0
253
251#endif /* !__OPEN_SOURCE__ */ 254#endif /* !__OPEN_SOURCE__ */
252#endif /* __ARCH_CHIP_H__ */ 255#endif /* __ARCH_CHIP_H__ */
diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h
index 9be1f849fac9..572fd3ef1d73 100644
--- a/arch/tile/include/asm/irq.h
+++ b/arch/tile/include/asm/irq.h
@@ -23,15 +23,65 @@
23/* IRQ numbers used for linux IPIs. */ 23/* IRQ numbers used for linux IPIs. */
24#define IRQ_RESCHEDULE 1 24#define IRQ_RESCHEDULE 1
25 25
26/* The HV interrupt state object. */
27DECLARE_PER_CPU(HV_IntrState, dev_intr_state);
28
29void ack_bad_irq(unsigned int irq); 26void ack_bad_irq(unsigned int irq);
30 27
31/* 28/*
32 * Paravirtualized drivers should call this when their init calls 29 * Different ways of handling interrupts. Tile interrupts are always
33 * discover a valid HV IRQ. 30 * per-cpu; there is no global interrupt controller to implement
31 * enable/disable. Most onboard devices can send their interrupts to
32 * many tiles at the same time, and Tile-specific drivers know how to
33 * deal with this.
34 *
35 * However, generic devices (usually PCIE based, sometimes GPIO)
36 * expect that interrupts will fire on a single core at a time and
37 * that the irq can be enabled or disabled from any core at any time.
38 * We implement this by directing such interrupts to a single core.
39 *
40 * One added wrinkle is that PCI interrupts can be either
41 * hardware-cleared (legacy interrupts) or software cleared (MSI).
42 * Other generic device systems (GPIO) are always software-cleared.
43 *
44 * The enums below are used by drivers for onboard devices, including
45 * the internals of PCI root complex and GPIO. They allow the driver
46 * to tell the generic irq code what kind of interrupt is mapped to a
47 * particular IRQ number.
48 */
49enum {
50 /* per-cpu interrupt; use enable/disable_percpu_irq() to mask */
51 TILE_IRQ_PERCPU,
52 /* global interrupt, hardware responsible for clearing. */
53 TILE_IRQ_HW_CLEAR,
54 /* global interrupt, software responsible for clearing. */
55 TILE_IRQ_SW_CLEAR,
56};
57
58
59/*
60 * Paravirtualized drivers should call this when they dynamically
61 * allocate a new IRQ or discover an IRQ that was pre-allocated by the
62 * hypervisor for use with their particular device. This gives the
63 * IRQ subsystem an opportunity to do interrupt-type-specific
64 * initialization.
65 *
66 * ISSUE: We should modify this API so that registering anything
67 * except percpu interrupts also requires providing callback methods
68 * for enabling and disabling the interrupt. This would allow the
69 * generic IRQ code to proxy enable/disable_irq() calls back into the
70 * PCI subsystem, which in turn could enable or disable the interrupt
71 * at the PCI shim.
34 */ 72 */
35void tile_irq_activate(unsigned int irq); 73void tile_irq_activate(unsigned int irq, int tile_irq_type);
74
75/*
76 * For onboard, non-PCI (e.g. TILE_IRQ_PERCPU) devices, drivers know
77 * how to use enable/disable_percpu_irq() to manage interrupts on each
78 * core. We can't use the generic enable/disable_irq() because they
79 * use a single reference count per irq, rather than per cpu per irq.
80 */
81void enable_percpu_irq(unsigned int irq);
82void disable_percpu_irq(unsigned int irq);
83
84
85void setup_irq_regs(void);
36 86
37#endif /* _ASM_TILE_IRQ_H */ 87#endif /* _ASM_TILE_IRQ_H */
diff --git a/arch/tile/include/asm/smp.h b/arch/tile/include/asm/smp.h
index da24858a7392..532124ae4b12 100644
--- a/arch/tile/include/asm/smp.h
+++ b/arch/tile/include/asm/smp.h
@@ -20,6 +20,7 @@
20#include <asm/processor.h> 20#include <asm/processor.h>
21#include <linux/cpumask.h> 21#include <linux/cpumask.h>
22#include <linux/irqreturn.h> 22#include <linux/irqreturn.h>
23#include <hv/hypervisor.h>
23 24
24/* Set up this tile to support receiving hypervisor messages */ 25/* Set up this tile to support receiving hypervisor messages */
25void init_messaging(void); 26void init_messaging(void);
@@ -39,9 +40,6 @@ void send_IPI_single(int dest, int tag);
39/* Process an IPI message */ 40/* Process an IPI message */
40void evaluate_message(int tag); 41void evaluate_message(int tag);
41 42
42/* Process an IRQ_RESCHEDULE IPI. */
43irqreturn_t handle_reschedule_ipi(int irq, void *token);
44
45/* Boot a secondary cpu */ 43/* Boot a secondary cpu */
46void online_secondary(void); 44void online_secondary(void);
47 45
@@ -56,6 +54,20 @@ extern HV_Topology smp_topology;
56#define smp_height (smp_topology.height) 54#define smp_height (smp_topology.height)
57#define smp_width (smp_topology.width) 55#define smp_width (smp_topology.width)
58 56
57/* Convenience functions for converting cpu <-> coords. */
58static inline int cpu_x(int cpu)
59{
60 return cpu % smp_width;
61}
62static inline int cpu_y(int cpu)
63{
64 return cpu / smp_width;
65}
66static inline int xy_to_cpu(int x, int y)
67{
68 return y * smp_width + x;
69}
70
59/* Hypervisor message tags sent via the tile send_IPI*() routines. */ 71/* Hypervisor message tags sent via the tile send_IPI*() routines. */
60#define MSG_TAG_START_CPU 1 72#define MSG_TAG_START_CPU 1
61#define MSG_TAG_STOP_CPU 2 73#define MSG_TAG_STOP_CPU 2
@@ -85,6 +97,9 @@ void print_disabled_cpus(void);
85#define smp_master_cpu 0 97#define smp_master_cpu 0
86#define smp_height 1 98#define smp_height 1
87#define smp_width 1 99#define smp_width 1
100#define cpu_x(cpu) 0
101#define cpu_y(cpu) 0
102#define xy_to_cpu(x, y) 0
88 103
89#endif /* !CONFIG_SMP */ 104#endif /* !CONFIG_SMP */
90 105
@@ -123,4 +138,10 @@ static inline int __cpulist_parse_crop(const char *buf, struct cpumask *dstp,
123 return bitmap_parselist_crop(buf, cpumask_bits(dstp), nbits); 138 return bitmap_parselist_crop(buf, cpumask_bits(dstp), nbits);
124} 139}
125 140
141/* Initialize the IPI subsystem. */
142void ipi_init(void);
143
144/* Function for start-cpu message to cause us to jump to. */
145extern unsigned long start_cpu_function_addr;
146
126#endif /* _ASM_TILE_SMP_H */ 147#endif /* _ASM_TILE_SMP_H */
diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h
index 84b31551080a..a90d2989587a 100644
--- a/arch/tile/include/hv/hypervisor.h
+++ b/arch/tile/include/hv/hypervisor.h
@@ -20,12 +20,9 @@
20#ifndef _TILE_HV_H 20#ifndef _TILE_HV_H
21#define _TILE_HV_H 21#define _TILE_HV_H
22 22
23#ifdef __tile__
24#include <arch/chip.h> 23#include <arch/chip.h>
25#else 24
26/* HACK: Allow use by "tools/cpack/". */ 25#include <hv/pagesize.h>
27#include "install/include/arch/chip.h"
28#endif
29 26
30/* Linux builds want unsigned long constants, but assembler wants numbers */ 27/* Linux builds want unsigned long constants, but assembler wants numbers */
31#ifdef __ASSEMBLER__ 28#ifdef __ASSEMBLER__
@@ -39,7 +36,6 @@
39#define __HV_SIZE_ONE 1UL 36#define __HV_SIZE_ONE 1UL
40#endif 37#endif
41 38
42
43/** The log2 of the span of a level-1 page table, in bytes. 39/** The log2 of the span of a level-1 page table, in bytes.
44 */ 40 */
45#define HV_LOG2_L1_SPAN 32 41#define HV_LOG2_L1_SPAN 32
@@ -48,21 +44,11 @@
48 */ 44 */
49#define HV_L1_SPAN (__HV_SIZE_ONE << HV_LOG2_L1_SPAN) 45#define HV_L1_SPAN (__HV_SIZE_ONE << HV_LOG2_L1_SPAN)
50 46
51/** The log2 of the size of small pages, in bytes. This value should
52 * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL).
53 */
54#define HV_LOG2_PAGE_SIZE_SMALL 16
55
56/** The size of small pages, in bytes. This value should be verified 47/** The size of small pages, in bytes. This value should be verified
57 * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL). 48 * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL).
58 */ 49 */
59#define HV_PAGE_SIZE_SMALL (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_SMALL) 50#define HV_PAGE_SIZE_SMALL (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_SMALL)
60 51
61/** The log2 of the size of large pages, in bytes. This value should be
62 * verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE).
63 */
64#define HV_LOG2_PAGE_SIZE_LARGE 24
65
66/** The size of large pages, in bytes. This value should be verified 52/** The size of large pages, in bytes. This value should be verified
67 * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE). 53 * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE).
68 */ 54 */
@@ -93,7 +79,7 @@
93#define HV_DISPATCH_ENTRY_SIZE 32 79#define HV_DISPATCH_ENTRY_SIZE 32
94 80
95/** Version of the hypervisor interface defined by this file */ 81/** Version of the hypervisor interface defined by this file */
96#define _HV_VERSION 10 82#define _HV_VERSION 11
97 83
98/* Index into hypervisor interface dispatch code blocks. 84/* Index into hypervisor interface dispatch code blocks.
99 * 85 *
@@ -253,8 +239,10 @@
253/** hv_set_command_line */ 239/** hv_set_command_line */
254#define HV_DISPATCH_SET_COMMAND_LINE 47 240#define HV_DISPATCH_SET_COMMAND_LINE 47
255 241
256/** hv_dev_register_intr_state */ 242#if !CHIP_HAS_IPI()
257#define HV_DISPATCH_DEV_REGISTER_INTR_STATE 48 243
244/** hv_clear_intr */
245#define HV_DISPATCH_CLEAR_INTR 48
258 246
259/** hv_enable_intr */ 247/** hv_enable_intr */
260#define HV_DISPATCH_ENABLE_INTR 49 248#define HV_DISPATCH_ENABLE_INTR 49
@@ -262,20 +250,30 @@
262/** hv_disable_intr */ 250/** hv_disable_intr */
263#define HV_DISPATCH_DISABLE_INTR 50 251#define HV_DISPATCH_DISABLE_INTR 50
264 252
253/** hv_raise_intr */
254#define HV_DISPATCH_RAISE_INTR 51
255
265/** hv_trigger_ipi */ 256/** hv_trigger_ipi */
266#define HV_DISPATCH_TRIGGER_IPI 51 257#define HV_DISPATCH_TRIGGER_IPI 52
258
259#endif /* !CHIP_HAS_IPI() */
267 260
268/** hv_store_mapping */ 261/** hv_store_mapping */
269#define HV_DISPATCH_STORE_MAPPING 52 262#define HV_DISPATCH_STORE_MAPPING 53
270 263
271/** hv_inquire_realpa */ 264/** hv_inquire_realpa */
272#define HV_DISPATCH_INQUIRE_REALPA 53 265#define HV_DISPATCH_INQUIRE_REALPA 54
273 266
274/** hv_flush_all */ 267/** hv_flush_all */
275#define HV_DISPATCH_FLUSH_ALL 54 268#define HV_DISPATCH_FLUSH_ALL 55
269
270#if CHIP_HAS_IPI()
271/** hv_get_ipi_pte */
272#define HV_DISPATCH_GET_IPI_PTE 56
273#endif
276 274
277/** One more than the largest dispatch value */ 275/** One more than the largest dispatch value */
278#define _HV_DISPATCH_END 55 276#define _HV_DISPATCH_END 57
279 277
280 278
281#ifndef __ASSEMBLER__ 279#ifndef __ASSEMBLER__
@@ -484,21 +482,6 @@ typedef enum {
484 */ 482 */
485int hv_confstr(HV_ConfstrQuery query, HV_VirtAddr buf, int len); 483int hv_confstr(HV_ConfstrQuery query, HV_VirtAddr buf, int len);
486 484
487/** State object used to enable and disable one-shot and level-sensitive
488 * interrupts. */
489typedef struct
490{
491#if CHIP_VA_WIDTH() > 32
492 __hv64 opaque[2]; /**< No user-serviceable parts inside */
493#else
494 __hv32 opaque[2]; /**< No user-serviceable parts inside */
495#endif
496}
497HV_IntrState;
498
499/** A set of interrupts. */
500typedef __hv32 HV_IntrMask;
501
502/** Tile coordinate */ 485/** Tile coordinate */
503typedef struct 486typedef struct
504{ 487{
@@ -509,34 +492,51 @@ typedef struct
509 int y; 492 int y;
510} HV_Coord; 493} HV_Coord;
511 494
495
496#if CHIP_HAS_IPI()
497
498/** Get the PTE for sending an IPI to a particular tile.
499 *
500 * @param tile Tile which will receive the IPI.
501 * @param pl Indicates which IPI registers: 0 = IPI_0, 1 = IPI_1.
502 * @param pte Filled with resulting PTE.
503 * @result Zero if no error, non-zero for invalid parameters.
504 */
505int hv_get_ipi_pte(HV_Coord tile, int pl, HV_PTE* pte);
506
507#else /* !CHIP_HAS_IPI() */
508
509/** A set of interrupts. */
510typedef __hv32 HV_IntrMask;
511
512/** The low interrupt numbers are reserved for use by the client in 512/** The low interrupt numbers are reserved for use by the client in
513 * delivering IPIs. Any interrupt numbers higher than this value are 513 * delivering IPIs. Any interrupt numbers higher than this value are
514 * reserved for use by HV device drivers. */ 514 * reserved for use by HV device drivers. */
515#define HV_MAX_IPI_INTERRUPT 7 515#define HV_MAX_IPI_INTERRUPT 7
516 516
517/** Register an interrupt state object. This object is used to enable and 517/** Enable a set of device interrupts.
518 * disable one-shot and level-sensitive interrupts. Once the state is
519 * registered, the client must not read or write the state object; doing
520 * so will cause undefined results.
521 * 518 *
522 * @param intr_state Pointer to interrupt state object. 519 * @param enab_mask Bitmap of interrupts to enable.
523 * @return HV_OK on success, or a hypervisor error code.
524 */ 520 */
525HV_Errno hv_dev_register_intr_state(HV_IntrState* intr_state); 521void hv_enable_intr(HV_IntrMask enab_mask);
526 522
527/** Enable a set of one-shot and level-sensitive interrupts. 523/** Disable a set of device interrupts.
528 * 524 *
529 * @param intr_state Pointer to interrupt state object. 525 * @param disab_mask Bitmap of interrupts to disable.
530 * @param enab_mask Bitmap of interrupts to enable.
531 */ 526 */
532void hv_enable_intr(HV_IntrState* intr_state, HV_IntrMask enab_mask); 527void hv_disable_intr(HV_IntrMask disab_mask);
533 528
534/** Disable a set of one-shot and level-sensitive interrupts. 529/** Clear a set of device interrupts.
535 * 530 *
536 * @param intr_state Pointer to interrupt state object. 531 * @param clear_mask Bitmap of interrupts to clear.
537 * @param disab_mask Bitmap of interrupts to disable.
538 */ 532 */
539void hv_disable_intr(HV_IntrState* intr_state, HV_IntrMask disab_mask); 533void hv_clear_intr(HV_IntrMask clear_mask);
534
535/** Assert a set of device interrupts.
536 *
537 * @param assert_mask Bitmap of interrupts to clear.
538 */
539void hv_assert_intr(HV_IntrMask assert_mask);
540 540
541/** Trigger a one-shot interrupt on some tile 541/** Trigger a one-shot interrupt on some tile
542 * 542 *
@@ -547,6 +547,8 @@ void hv_disable_intr(HV_IntrState* intr_state, HV_IntrMask disab_mask);
547 */ 547 */
548HV_Errno hv_trigger_ipi(HV_Coord tile, int interrupt); 548HV_Errno hv_trigger_ipi(HV_Coord tile, int interrupt);
549 549
550#endif // !CHIP_HAS_IPI()
551
550/** Store memory mapping in debug memory so that external debugger can read it. 552/** Store memory mapping in debug memory so that external debugger can read it.
551 * A maximum of 16 entries can be stored. 553 * A maximum of 16 entries can be stored.
552 * 554 *
@@ -1010,6 +1012,13 @@ int hv_console_write(HV_VirtAddr bytes, int len);
1010 * it will return to the code which was interrupted by the INTCTRL_1 1012 * it will return to the code which was interrupted by the INTCTRL_1
1011 * interrupt. 1013 * interrupt.
1012 * 1014 *
1015 * Under some circumstances, the firing of INTCTRL_1 can race with
1016 * the lowering of a device interrupt. In such a case, the
1017 * hv_downcall_dispatch service may issue an iret instruction instead
1018 * of entering one of the client's actual downcall-handling interrupt
1019 * vectors. This will return execution to the location that was
1020 * interrupted by INTCTRL_1.
1021 *
1013 * Any saving of registers should be done by the actual handling 1022 * Any saving of registers should be done by the actual handling
1014 * vectors; no registers should be changed by the INTCTRL_1 handler. 1023 * vectors; no registers should be changed by the INTCTRL_1 handler.
1015 * In particular, the client should not use a jal instruction to invoke 1024 * In particular, the client should not use a jal instruction to invoke
diff --git a/arch/tile/include/hv/pagesize.h b/arch/tile/include/hv/pagesize.h
new file mode 100644
index 000000000000..58bed114fedd
--- /dev/null
+++ b/arch/tile/include/hv/pagesize.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/**
16 * @file pagesize.h
17 */
18
19#ifndef _HV_PAGESIZE_H
20#define _HV_PAGESIZE_H
21
22/** The log2 of the size of small pages, in bytes. This value should
23 * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL).
24 */
25#define HV_LOG2_PAGE_SIZE_SMALL 16
26
27/** The log2 of the size of large pages, in bytes. This value should be
28 * verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE).
29 */
30#define HV_LOG2_PAGE_SIZE_LARGE 24
31
32#endif /* _HV_PAGESIZE_H */
diff --git a/arch/tile/kernel/hvglue.lds b/arch/tile/kernel/hvglue.lds
index 698489b4c7ab..2b7cd0a659a9 100644
--- a/arch/tile/kernel/hvglue.lds
+++ b/arch/tile/kernel/hvglue.lds
@@ -46,11 +46,13 @@ hv_inquire_tiles = TEXT_OFFSET + 0x10580;
46hv_confstr = TEXT_OFFSET + 0x105a0; 46hv_confstr = TEXT_OFFSET + 0x105a0;
47hv_reexec = TEXT_OFFSET + 0x105c0; 47hv_reexec = TEXT_OFFSET + 0x105c0;
48hv_set_command_line = TEXT_OFFSET + 0x105e0; 48hv_set_command_line = TEXT_OFFSET + 0x105e0;
49hv_dev_register_intr_state = TEXT_OFFSET + 0x10600; 49hv_clear_intr = TEXT_OFFSET + 0x10600;
50hv_enable_intr = TEXT_OFFSET + 0x10620; 50hv_enable_intr = TEXT_OFFSET + 0x10620;
51hv_disable_intr = TEXT_OFFSET + 0x10640; 51hv_disable_intr = TEXT_OFFSET + 0x10640;
52hv_trigger_ipi = TEXT_OFFSET + 0x10660; 52hv_raise_intr = TEXT_OFFSET + 0x10660;
53hv_store_mapping = TEXT_OFFSET + 0x10680; 53hv_trigger_ipi = TEXT_OFFSET + 0x10680;
54hv_inquire_realpa = TEXT_OFFSET + 0x106a0; 54hv_store_mapping = TEXT_OFFSET + 0x106a0;
55hv_flush_all = TEXT_OFFSET + 0x106c0; 55hv_inquire_realpa = TEXT_OFFSET + 0x106c0;
56hv_glue_internals = TEXT_OFFSET + 0x106e0; 56hv_flush_all = TEXT_OFFSET + 0x106e0;
57hv_get_ipi_pte = TEXT_OFFSET + 0x10700;
58hv_glue_internals = TEXT_OFFSET + 0x10720;
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index 24cc6b2abc2c..596c60086930 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -19,6 +19,11 @@
19#include <linux/kernel_stat.h> 19#include <linux/kernel_stat.h>
20#include <linux/uaccess.h> 20#include <linux/uaccess.h>
21#include <hv/drv_pcie_rc_intf.h> 21#include <hv/drv_pcie_rc_intf.h>
22#include <arch/spr_def.h>
23#include <asm/traps.h>
24
25/* Bit-flag stored in irq_desc->chip_data to indicate HW-cleared irqs. */
26#define IS_HW_CLEARED 1
22 27
23/* 28/*
24 * The set of interrupts we enable for raw_local_irq_enable(). 29 * The set of interrupts we enable for raw_local_irq_enable().
@@ -31,30 +36,74 @@ DEFINE_PER_CPU(unsigned long long, interrupts_enabled_mask) =
31 INITIAL_INTERRUPTS_ENABLED; 36 INITIAL_INTERRUPTS_ENABLED;
32EXPORT_PER_CPU_SYMBOL(interrupts_enabled_mask); 37EXPORT_PER_CPU_SYMBOL(interrupts_enabled_mask);
33 38
34/* Define per-tile device interrupt state */ 39/* Define per-tile device interrupt statistics state. */
35DEFINE_PER_CPU(HV_IntrState, dev_intr_state);
36
37DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp; 40DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
38EXPORT_PER_CPU_SYMBOL(irq_stat); 41EXPORT_PER_CPU_SYMBOL(irq_stat);
39 42
43/*
44 * Define per-tile irq disable mask; the hardware/HV only has a single
45 * mask that we use to implement both masking and disabling.
46 */
47static DEFINE_PER_CPU(unsigned long, irq_disable_mask)
48 ____cacheline_internodealigned_in_smp;
49
50/*
51 * Per-tile IRQ nesting depth. Used to make sure we enable newly
52 * enabled IRQs before exiting the outermost interrupt.
53 */
54static DEFINE_PER_CPU(int, irq_depth);
55
56/* State for allocating IRQs on Gx. */
57#if CHIP_HAS_IPI()
58static unsigned long available_irqs = ~(1UL << IRQ_RESCHEDULE);
59static DEFINE_SPINLOCK(available_irqs_lock);
60#endif
40 61
62#if CHIP_HAS_IPI()
63/* Use SPRs to manipulate device interrupts. */
64#define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_1, irq_mask)
65#define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_1, irq_mask)
66#define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_1, irq_mask)
67#else
68/* Use HV to manipulate device interrupts. */
69#define mask_irqs(irq_mask) hv_disable_intr(irq_mask)
70#define unmask_irqs(irq_mask) hv_enable_intr(irq_mask)
71#define clear_irqs(irq_mask) hv_clear_intr(irq_mask)
72#endif
41 73
42/* 74/*
43 * Interrupt dispatcher, invoked upon a hypervisor device interrupt downcall 75 * The interrupt handling path, implemented in terms of HV interrupt
76 * emulation on TILE64 and TILEPro, and IPI hardware on TILE-Gx.
44 */ 77 */
45void tile_dev_intr(struct pt_regs *regs, int intnum) 78void tile_dev_intr(struct pt_regs *regs, int intnum)
46{ 79{
47 int irq; 80 int depth = __get_cpu_var(irq_depth)++;
81 unsigned long original_irqs;
82 unsigned long remaining_irqs;
83 struct pt_regs *old_regs;
48 84
85#if CHIP_HAS_IPI()
49 /* 86 /*
50 * Get the device interrupt pending mask from where the hypervisor 87 * Pending interrupts are listed in an SPR. We might be
51 * has tucked it away for us. 88 * nested, so be sure to only handle irqs that weren't already
89 * masked by a previous interrupt. Then, mask out the ones
90 * we're going to handle.
52 */ 91 */
53 unsigned long pending_dev_intr_mask = __insn_mfspr(SPR_SYSTEM_SAVE_1_3); 92 unsigned long masked = __insn_mfspr(SPR_IPI_MASK_1);
54 93 original_irqs = __insn_mfspr(SPR_IPI_EVENT_1) & ~masked;
94 __insn_mtspr(SPR_IPI_MASK_SET_1, original_irqs);
95#else
96 /*
97 * Hypervisor performs the equivalent of the Gx code above and
98 * then puts the pending interrupt mask into a system save reg
99 * for us to find.
100 */
101 original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_1_3);
102#endif
103 remaining_irqs = original_irqs;
55 104
56 /* Track time spent here in an interrupt context. */ 105 /* Track time spent here in an interrupt context. */
57 struct pt_regs *old_regs = set_irq_regs(regs); 106 old_regs = set_irq_regs(regs);
58 irq_enter(); 107 irq_enter();
59 108
60#ifdef CONFIG_DEBUG_STACKOVERFLOW 109#ifdef CONFIG_DEBUG_STACKOVERFLOW
@@ -62,26 +111,35 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
62 { 111 {
63 long sp = stack_pointer - (long) current_thread_info(); 112 long sp = stack_pointer - (long) current_thread_info();
64 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { 113 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
65 printk(KERN_EMERG "tile_dev_intr: " 114 pr_emerg("tile_dev_intr: "
66 "stack overflow: %ld\n", 115 "stack overflow: %ld\n",
67 sp - sizeof(struct thread_info)); 116 sp - sizeof(struct thread_info));
68 dump_stack(); 117 dump_stack();
69 } 118 }
70 } 119 }
71#endif 120#endif
121 while (remaining_irqs) {
122 unsigned long irq = __ffs(remaining_irqs);
123 remaining_irqs &= ~(1UL << irq);
72 124
73 for (irq = 0; pending_dev_intr_mask; ++irq) { 125 /* Count device irqs; Linux IPIs are counted elsewhere. */
74 if (pending_dev_intr_mask & 0x1) { 126 if (irq != IRQ_RESCHEDULE)
75 generic_handle_irq(irq); 127 __get_cpu_var(irq_stat).irq_dev_intr_count++;
76 128
77 /* Count device irqs; IPIs are counted elsewhere. */ 129 generic_handle_irq(irq);
78 if (irq > HV_MAX_IPI_INTERRUPT)
79 __get_cpu_var(irq_stat).irq_dev_intr_count++;
80 }
81 pending_dev_intr_mask >>= 1;
82 } 130 }
83 131
84 /* 132 /*
133 * If we weren't nested, turn on all enabled interrupts,
134 * including any that were reenabled during interrupt
135 * handling.
136 */
137 if (depth == 0)
138 unmask_irqs(~__get_cpu_var(irq_disable_mask));
139
140 __get_cpu_var(irq_depth)--;
141
142 /*
85 * Track time spent against the current process again and 143 * Track time spent against the current process again and
86 * process any softirqs if they are waiting. 144 * process any softirqs if they are waiting.
87 */ 145 */
@@ -90,97 +148,114 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
90} 148}
91 149
92 150
151/*
152 * Remove an irq from the disabled mask. If we're in an interrupt
153 * context, defer enabling the HW interrupt until we leave.
154 */
155void enable_percpu_irq(unsigned int irq)
156{
157 get_cpu_var(irq_disable_mask) &= ~(1UL << irq);
158 if (__get_cpu_var(irq_depth) == 0)
159 unmask_irqs(1UL << irq);
160 put_cpu_var(irq_disable_mask);
161}
162EXPORT_SYMBOL(enable_percpu_irq);
163
164/*
165 * Add an irq to the disabled mask. We disable the HW interrupt
166 * immediately so that there's no possibility of it firing. If we're
167 * in an interrupt context, the return path is careful to avoid
168 * unmasking a newly disabled interrupt.
169 */
170void disable_percpu_irq(unsigned int irq)
171{
172 get_cpu_var(irq_disable_mask) |= (1UL << irq);
173 mask_irqs(1UL << irq);
174 put_cpu_var(irq_disable_mask);
175}
176EXPORT_SYMBOL(disable_percpu_irq);
177
93/* Mask an interrupt. */ 178/* Mask an interrupt. */
94static void hv_dev_irq_mask(unsigned int irq) 179static void tile_irq_chip_mask(unsigned int irq)
95{ 180{
96 HV_IntrState *p_intr_state = &__get_cpu_var(dev_intr_state); 181 mask_irqs(1UL << irq);
97 hv_disable_intr(p_intr_state, 1 << irq);
98} 182}
99 183
100/* Unmask an interrupt. */ 184/* Unmask an interrupt. */
101static void hv_dev_irq_unmask(unsigned int irq) 185static void tile_irq_chip_unmask(unsigned int irq)
102{ 186{
103 /* Re-enable the hypervisor to generate interrupts. */ 187 unmask_irqs(1UL << irq);
104 HV_IntrState *p_intr_state = &__get_cpu_var(dev_intr_state);
105 hv_enable_intr(p_intr_state, 1 << irq);
106} 188}
107 189
108/* 190/*
109 * The HV doesn't latch incoming interrupts while an interrupt is 191 * Clear an interrupt before processing it so that any new assertions
110 * disabled, so we need to reenable interrupts before running the 192 * will trigger another irq.
111 * handler.
112 *
113 * ISSUE: Enabling the interrupt this early avoids any race conditions
114 * but introduces the possibility of nested interrupt stack overflow.
115 * An imminent change to the HV IRQ model will fix this.
116 */ 193 */
117static void hv_dev_irq_ack(unsigned int irq) 194static void tile_irq_chip_ack(unsigned int irq)
118{ 195{
119 hv_dev_irq_unmask(irq); 196 if ((unsigned long)get_irq_chip_data(irq) != IS_HW_CLEARED)
197 clear_irqs(1UL << irq);
120} 198}
121 199
122/* 200/*
123 * Since ack() reenables interrupts, there's nothing to do at eoi(). 201 * For per-cpu interrupts, we need to avoid unmasking any interrupts
202 * that we disabled via disable_percpu_irq().
124 */ 203 */
125static void hv_dev_irq_eoi(unsigned int irq) 204static void tile_irq_chip_eoi(unsigned int irq)
126{ 205{
206 if (!(__get_cpu_var(irq_disable_mask) & (1UL << irq)))
207 unmask_irqs(1UL << irq);
127} 208}
128 209
129static struct irq_chip hv_dev_irq_chip = { 210static struct irq_chip tile_irq_chip = {
130 .typename = "hv_dev_irq_chip", 211 .typename = "tile_irq_chip",
131 .ack = hv_dev_irq_ack, 212 .ack = tile_irq_chip_ack,
132 .mask = hv_dev_irq_mask, 213 .eoi = tile_irq_chip_eoi,
133 .unmask = hv_dev_irq_unmask, 214 .mask = tile_irq_chip_mask,
134 .eoi = hv_dev_irq_eoi, 215 .unmask = tile_irq_chip_unmask,
135};
136
137static struct irqaction resched_action = {
138 .handler = handle_reschedule_ipi,
139 .name = "resched",
140 .dev_id = handle_reschedule_ipi /* unique token */,
141}; 216};
142 217
143void __init init_IRQ(void) 218void __init init_IRQ(void)
144{ 219{
145 /* Bind IPI irqs. Does this belong somewhere else in init? */ 220 ipi_init();
146 tile_irq_activate(IRQ_RESCHEDULE);
147 BUG_ON(setup_irq(IRQ_RESCHEDULE, &resched_action));
148} 221}
149 222
150void __cpuinit init_per_tile_IRQs(void) 223void __cpuinit setup_irq_regs(void)
151{ 224{
152 int rc; 225 /* Enable interrupt delivery. */
153 226 unmask_irqs(~0UL);
154 /* Set the pointer to the per-tile device interrupt state. */ 227#if CHIP_HAS_IPI()
155 HV_IntrState *sv_ptr = &__get_cpu_var(dev_intr_state); 228 raw_local_irq_unmask(INT_IPI_1);
156 rc = hv_dev_register_intr_state(sv_ptr); 229#endif
157 if (rc != HV_OK)
158 panic("hv_dev_register_intr_state: error %d", rc);
159
160} 230}
161 231
162void tile_irq_activate(unsigned int irq) 232void tile_irq_activate(unsigned int irq, int tile_irq_type)
163{ 233{
164 /* 234 /*
165 * Paravirtualized drivers can call up to the HV to find out 235 * We use handle_level_irq() by default because the pending
166 * which irq they're associated with. The HV interface 236 * interrupt vector (whether modeled by the HV on TILE64 and
167 * doesn't provide a generic call for discovering all valid 237 * TILEPro or implemented in hardware on TILE-Gx) has
168 * IRQs, so drivers must call this method to initialize newly 238 * level-style semantics for each bit. An interrupt fires
169 * discovered IRQs. 239 * whenever a bit is high, not just at edges.
170 * 240 */
171 * We could also just initialize all 32 IRQs at startup, but 241 irq_flow_handler_t handle = handle_level_irq;
172 * doing so would lead to a kernel fault if an unexpected 242 if (tile_irq_type == TILE_IRQ_PERCPU)
173 * interrupt fires and jumps to a NULL action. By defering 243 handle = handle_percpu_irq;
174 * the set_irq_chip_and_handler() call, unexpected IRQs are 244 set_irq_chip_and_handler(irq, &tile_irq_chip, handle);
175 * handled properly by handle_bad_irq(). 245
246 /*
247 * Flag interrupts that are hardware-cleared so that ack()
248 * won't clear them.
176 */ 249 */
177 hv_dev_irq_mask(irq); 250 if (tile_irq_type == TILE_IRQ_HW_CLEAR)
178 set_irq_chip_and_handler(irq, &hv_dev_irq_chip, handle_percpu_irq); 251 set_irq_chip_data(irq, (void *)IS_HW_CLEARED);
179} 252}
253EXPORT_SYMBOL(tile_irq_activate);
254
180 255
181void ack_bad_irq(unsigned int irq) 256void ack_bad_irq(unsigned int irq)
182{ 257{
183 printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq); 258 pr_err("unexpected IRQ trap at vector %02x\n", irq);
184} 259}
185 260
186/* 261/*
@@ -225,3 +300,35 @@ skip:
225 } 300 }
226 return 0; 301 return 0;
227} 302}
303
304#if CHIP_HAS_IPI()
305int create_irq(void)
306{
307 unsigned long flags;
308 int result;
309
310 spin_lock_irqsave(&available_irqs_lock, flags);
311 if (available_irqs == 0)
312 result = -ENOMEM;
313 else {
314 result = __ffs(available_irqs);
315 available_irqs &= ~(1UL << result);
316 dynamic_irq_init(result);
317 }
318 spin_unlock_irqrestore(&available_irqs_lock, flags);
319
320 return result;
321}
322EXPORT_SYMBOL(create_irq);
323
324void destroy_irq(unsigned int irq)
325{
326 unsigned long flags;
327
328 spin_lock_irqsave(&available_irqs_lock, flags);
329 available_irqs |= (1UL << irq);
330 dynamic_irq_cleanup(irq);
331 spin_unlock_irqrestore(&available_irqs_lock, flags);
332}
333EXPORT_SYMBOL(destroy_irq);
334#endif
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 782c1bfa6dfe..1cb5ec79de04 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -15,10 +15,18 @@
15 */ 15 */
16 16
17#include <linux/smp.h> 17#include <linux/smp.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
18#include <linux/irq.h> 20#include <linux/irq.h>
21#include <linux/module.h>
19#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
20 23
21HV_Topology smp_topology __write_once; 24HV_Topology smp_topology __write_once;
25EXPORT_SYMBOL(smp_topology);
26
27#if CHIP_HAS_IPI()
28static unsigned long __iomem *ipi_mappings[NR_CPUS];
29#endif
22 30
23 31
24/* 32/*
@@ -100,7 +108,6 @@ void on_each_cpu_mask(const struct cpumask *mask, void (*func)(void *),
100/* Handler to start the current cpu. */ 108/* Handler to start the current cpu. */
101static void smp_start_cpu_interrupt(void) 109static void smp_start_cpu_interrupt(void)
102{ 110{
103 extern unsigned long start_cpu_function_addr;
104 get_irq_regs()->pc = start_cpu_function_addr; 111 get_irq_regs()->pc = start_cpu_function_addr;
105} 112}
106 113
@@ -174,12 +181,8 @@ void flush_icache_range(unsigned long start, unsigned long end)
174} 181}
175 182
176 183
177/* 184/* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */
178 * The smp_send_reschedule() path does not use the hv_message_intr() 185static irqreturn_t handle_reschedule_ipi(int irq, void *token)
179 * path but instead the faster tile_dev_intr() path for interrupts.
180 */
181
182irqreturn_t handle_reschedule_ipi(int irq, void *token)
183{ 186{
184 /* 187 /*
185 * Nothing to do here; when we return from interrupt, the 188 * Nothing to do here; when we return from interrupt, the
@@ -191,12 +194,63 @@ irqreturn_t handle_reschedule_ipi(int irq, void *token)
191 return IRQ_HANDLED; 194 return IRQ_HANDLED;
192} 195}
193 196
197static struct irqaction resched_action = {
198 .handler = handle_reschedule_ipi,
199 .name = "resched",
200 .dev_id = handle_reschedule_ipi /* unique token */,
201};
202
203void __init ipi_init(void)
204{
205#if CHIP_HAS_IPI()
206 int cpu;
207 /* Map IPI trigger MMIO addresses. */
208 for_each_possible_cpu(cpu) {
209 HV_Coord tile;
210 HV_PTE pte;
211 unsigned long offset;
212
213 tile.x = cpu_x(cpu);
214 tile.y = cpu_y(cpu);
215 if (hv_get_ipi_pte(tile, 1, &pte) != 0)
216 panic("Failed to initialize IPI for cpu %d\n", cpu);
217
218 offset = hv_pte_get_pfn(pte) << PAGE_SHIFT;
219 ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte);
220 }
221#endif
222
223 /* Bind handle_reschedule_ipi() to IRQ_RESCHEDULE. */
224 tile_irq_activate(IRQ_RESCHEDULE, TILE_IRQ_PERCPU);
225 BUG_ON(setup_irq(IRQ_RESCHEDULE, &resched_action));
226}
227
228#if CHIP_HAS_IPI()
229
230void smp_send_reschedule(int cpu)
231{
232 WARN_ON(cpu_is_offline(cpu));
233
234 /*
235 * We just want to do an MMIO store. The traditional writeq()
236 * functions aren't really correct here, since they're always
237 * directed at the PCI shim. For now, just do a raw store,
238 * casting away the __iomem attribute.
239 */
240 ((unsigned long __force *)ipi_mappings[cpu])[IRQ_RESCHEDULE] = 0;
241}
242
243#else
244
194void smp_send_reschedule(int cpu) 245void smp_send_reschedule(int cpu)
195{ 246{
196 HV_Coord coord; 247 HV_Coord coord;
197 248
198 WARN_ON(cpu_is_offline(cpu)); 249 WARN_ON(cpu_is_offline(cpu));
199 coord.y = cpu / smp_width; 250
200 coord.x = cpu % smp_width; 251 coord.y = cpu_y(cpu);
252 coord.x = cpu_x(cpu);
201 hv_trigger_ipi(coord, IRQ_RESCHEDULE); 253 hv_trigger_ipi(coord, IRQ_RESCHEDULE);
202} 254}
255
256#endif /* CHIP_HAS_IPI() */