aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell/spufs/switch.c
diff options
context:
space:
mode:
authorMark Nutter <mnutter@us.ibm.com>2005-11-15 15:53:49 -0500
committerPaul Mackerras <paulus@samba.org>2006-01-08 22:49:16 -0500
commit5473af049d8b3556874174e61ce1986c9b5e8fa6 (patch)
tree53da74c13eb9125b85e85f9fc44981d3d1b41b49 /arch/powerpc/platforms/cell/spufs/switch.c
parent67207b9664a8d603138ef1556141e6d0a102bea7 (diff)
[PATCH] spufs: switchable spu contexts
Add some infrastructure for saving and restoring the context of an SPE. This patch creates a new structure that can hold the whole state of a physical SPE in memory. It also contains code that avoids races during the context switch and the binary code that is loaded to the SPU in order to access its registers. The actual PPE- and SPE-side context switch code are two separate patches. Signed-off-by: Arnd Bergmann <arndb@de.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/switch.c')
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c174
1 files changed, 174 insertions, 0 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
new file mode 100644
index 00000000000..6804342e99c
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -0,0 +1,174 @@
1/*
2 * spu_switch.c
3 *
4 * (C) Copyright IBM Corp. 2005
5 *
6 * Author: Mark Nutter <mnutter@us.ibm.com>
7 *
8 * Host-side part of SPU context switch sequence outlined in
9 * Synergistic Processor Element, Book IV.
10 *
11 * A fully premptive switch of an SPE is very expensive in terms
12 * of time and system resources. SPE Book IV indicates that SPE
13 * allocation should follow a "serially reusable device" model,
14 * in which the SPE is assigned a task until it completes. When
15 * this is not possible, this sequence may be used to premptively
16 * save, and then later (optionally) restore the context of a
17 * program executing on an SPE.
18 *
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2, or (at your option)
23 * any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34
35#include <linux/config.h>
36#include <linux/module.h>
37#include <linux/errno.h>
38#include <linux/sched.h>
39#include <linux/kernel.h>
40#include <linux/mm.h>
41#include <linux/vmalloc.h>
42#include <linux/smp.h>
43#include <linux/smp_lock.h>
44#include <linux/stddef.h>
45#include <linux/unistd.h>
46
47#include <asm/io.h>
48#include <asm/spu.h>
49#include <asm/spu_csa.h>
50#include <asm/mmu_context.h>
51
52#include "spu_save_dump.h"
53#include "spu_restore_dump.h"
54
55/**
56 * spu_save - SPU context save, with locking.
57 * @prev: pointer to SPU context save area, to be saved.
58 * @spu: pointer to SPU iomem structure.
59 *
60 * Acquire locks, perform the save operation then return.
61 */
62int spu_save(struct spu_state *prev, struct spu *spu)
63{
64 /* XXX missing */
65
66 return 0;
67}
68
69/**
70 * spu_restore - SPU context restore, with harvest and locking.
71 * @new: pointer to SPU context save area, to be restored.
72 * @spu: pointer to SPU iomem structure.
73 *
74 * Perform harvest + restore, as we may not be coming
75 * from a previous succesful save operation, and the
76 * hardware state is unknown.
77 */
78int spu_restore(struct spu_state *new, struct spu *spu)
79{
80 /* XXX missing */
81
82 return 0;
83}
84
85/**
86 * spu_switch - SPU context switch (save + restore).
87 * @prev: pointer to SPU context save area, to be saved.
88 * @new: pointer to SPU context save area, to be restored.
89 * @spu: pointer to SPU iomem structure.
90 *
91 * Perform save, then restore. Only harvest if the
92 * save fails, as cleanup is otherwise not needed.
93 */
94int spu_switch(struct spu_state *prev, struct spu_state *new, struct spu *spu)
95{
96 /* XXX missing */
97
98 return 0;
99}
100
101static void init_prob(struct spu_state *csa)
102{
103 csa->spu_chnlcnt_RW[9] = 1;
104 csa->spu_chnlcnt_RW[21] = 16;
105 csa->spu_chnlcnt_RW[23] = 1;
106 csa->spu_chnlcnt_RW[28] = 1;
107 csa->spu_chnlcnt_RW[30] = 1;
108 csa->prob.spu_runcntl_RW = SPU_RUNCNTL_STOP;
109}
110
111static void init_priv1(struct spu_state *csa)
112{
113 /* Enable decode, relocate, tlbie response, master runcntl. */
114 csa->priv1.mfc_sr1_RW = MFC_STATE1_LOCAL_STORAGE_DECODE_MASK |
115 MFC_STATE1_MASTER_RUN_CONTROL_MASK |
116 MFC_STATE1_PROBLEM_STATE_MASK |
117 MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK;
118
119 /* Set storage description. */
120 csa->priv1.mfc_sdr_RW = mfspr(SPRN_SDR1);
121
122 /* Enable OS-specific set of interrupts. */
123 csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR |
124 CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR |
125 CLASS0_ENABLE_SPU_ERROR_INTR;
126 csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
127 CLASS1_ENABLE_STORAGE_FAULT_INTR;
128 csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_MAILBOX_INTR |
129 CLASS2_ENABLE_SPU_STOP_INTR | CLASS2_ENABLE_SPU_HALT_INTR;
130}
131
132static void init_priv2(struct spu_state *csa)
133{
134 csa->priv2.spu_lslr_RW = LS_ADDR_MASK;
135 csa->priv2.mfc_control_RW = MFC_CNTL_RESUME_DMA_QUEUE |
136 MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION |
137 MFC_CNTL_DMA_QUEUES_EMPTY_MASK;
138}
139
140/**
141 * spu_alloc_csa - allocate and initialize an SPU context save area.
142 *
143 * Allocate and initialize the contents of an SPU context save area.
144 * This includes enabling address translation, interrupt masks, etc.,
145 * as appropriate for the given OS environment.
146 *
147 * Note that storage for the 'lscsa' is allocated separately,
148 * as it is by far the largest of the context save regions,
149 * and may need to be pinned or otherwise specially aligned.
150 */
151void spu_init_csa(struct spu_state *csa)
152{
153 struct spu_lscsa *lscsa;
154
155 if (!csa)
156 return;
157 memset(csa, 0, sizeof(struct spu_state));
158
159 lscsa = vmalloc(sizeof(struct spu_lscsa));
160 if (!lscsa)
161 return;
162
163 memset(lscsa, 0, sizeof(struct spu_lscsa));
164 csa->lscsa = lscsa;
165
166 init_prob(csa);
167 init_priv1(csa);
168 init_priv2(csa);
169}
170
171void spu_fini_csa(struct spu_state *csa)
172{
173 vfree(csa->lscsa);
174}