aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell/spufs/spufs.h
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2005-11-15 15:53:52 -0500
committerPaul Mackerras <paulus@samba.org>2006-01-08 22:49:30 -0500
commit8b3d6663c6217e4f50cc3720935a96da9b984117 (patch)
tree5295c29787ac66c26ddf715868fda7fcd3ad5f97 /arch/powerpc/platforms/cell/spufs/spufs.h
parent05b841174c289ca62a6b42d883b8791d9ac3a4bd (diff)
[PATCH] spufs: cooperative scheduler support
This adds a scheduler for SPUs to make it possible to use more logical SPUs than physical ones are present in the system. Currently, there is no support for preempting a running SPU thread, they have to leave the SPU by either triggering an event on the SPU that causes it to return to the owning thread or by sending a signal to it. This patch also adds operations that enable accessing an SPU in either runnable or saved state. We use an RW semaphore to protect the state of the SPU from changing underneath us, while we are holding it readable. In order to change the state, it is acquired writeable and a context save or restore is executed before downgrading the semaphore to read-only. From: Mark Nutter <mnutter@us.ibm.com>, Uli Weigand <Ulrich.Weigand@de.ibm.com> Signed-off-by: Arnd Bergmann <arndb@de.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/spufs.h')
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h55
1 files changed, 52 insertions, 3 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 67aff57faf6..93c6a053756 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -35,15 +35,50 @@ enum {
35 SPUFS_MAGIC = 0x23c9b64e, 35 SPUFS_MAGIC = 0x23c9b64e,
36}; 36};
37 37
38struct spu_context_ops;
39
38struct spu_context { 40struct spu_context {
39 struct spu *spu; /* pointer to a physical SPU */ 41 struct spu *spu; /* pointer to a physical SPU */
40 struct spu_state csa; /* SPU context save area. */ 42 struct spu_state csa; /* SPU context save area. */
41 struct rw_semaphore backing_sema; /* protects the above */
42 spinlock_t mmio_lock; /* protects mmio access */ 43 spinlock_t mmio_lock; /* protects mmio access */
44 struct address_space *local_store;/* local store backing store */
45
46 enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
47 struct rw_semaphore state_sema;
48
49 struct mm_struct *owner;
43 50
44 struct kref kref; 51 struct kref kref;
52 wait_queue_head_t ibox_wq;
53 wait_queue_head_t wbox_wq;
54 struct fasync_struct *ibox_fasync;
55 struct fasync_struct *wbox_fasync;
56 struct spu_context_ops *ops;
57};
58
59/* SPU context query/set operations. */
60struct spu_context_ops {
61 int (*mbox_read) (struct spu_context * ctx, u32 * data);
62 u32(*mbox_stat_read) (struct spu_context * ctx);
63 int (*ibox_read) (struct spu_context * ctx, u32 * data);
64 int (*wbox_write) (struct spu_context * ctx, u32 data);
65 u32(*signal1_read) (struct spu_context * ctx);
66 void (*signal1_write) (struct spu_context * ctx, u32 data);
67 u32(*signal2_read) (struct spu_context * ctx);
68 void (*signal2_write) (struct spu_context * ctx, u32 data);
69 void (*signal1_type_set) (struct spu_context * ctx, u64 val);
70 u64(*signal1_type_get) (struct spu_context * ctx);
71 void (*signal2_type_set) (struct spu_context * ctx, u64 val);
72 u64(*signal2_type_get) (struct spu_context * ctx);
73 u32(*npc_read) (struct spu_context * ctx);
74 void (*npc_write) (struct spu_context * ctx, u32 data);
75 u32(*status_read) (struct spu_context * ctx);
76 char*(*get_ls) (struct spu_context * ctx);
45}; 77};
46 78
79extern struct spu_context_ops spu_hw_ops;
80extern struct spu_context_ops spu_backing_ops;
81
47struct spufs_inode_info { 82struct spufs_inode_info {
48 struct spu_context *i_ctx; 83 struct spu_context *i_ctx;
49 struct inode vfs_inode; 84 struct inode vfs_inode;
@@ -60,14 +95,28 @@ long spufs_create_thread(struct nameidata *nd, const char *name,
60 unsigned int flags, mode_t mode); 95 unsigned int flags, mode_t mode);
61 96
62/* context management */ 97/* context management */
63struct spu_context * alloc_spu_context(void); 98struct spu_context * alloc_spu_context(struct address_space *local_store);
64void destroy_spu_context(struct kref *kref); 99void destroy_spu_context(struct kref *kref);
65struct spu_context * get_spu_context(struct spu_context *ctx); 100struct spu_context * get_spu_context(struct spu_context *ctx);
66int put_spu_context(struct spu_context *ctx); 101int put_spu_context(struct spu_context *ctx);
67 102
103void spu_forget(struct spu_context *ctx);
68void spu_acquire(struct spu_context *ctx); 104void spu_acquire(struct spu_context *ctx);
69void spu_release(struct spu_context *ctx); 105void spu_release(struct spu_context *ctx);
70void spu_acquire_runnable(struct spu_context *ctx); 106int spu_acquire_runnable(struct spu_context *ctx);
71void spu_acquire_saved(struct spu_context *ctx); 107void spu_acquire_saved(struct spu_context *ctx);
72 108
109int spu_activate(struct spu_context *ctx, u64 flags);
110void spu_deactivate(struct spu_context *ctx);
111void spu_yield(struct spu_context *ctx);
112int __init spu_sched_init(void);
113void __exit spu_sched_exit(void);
114
115size_t spu_wbox_write(struct spu_context *ctx, u32 data);
116size_t spu_ibox_read(struct spu_context *ctx, u32 *data);
117
118/* irq callback funcs. */
119void spufs_ibox_callback(struct spu *spu);
120void spufs_wbox_callback(struct spu *spu);
121
73#endif 122#endif