aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc/spu.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-powerpc/spu.h')
-rw-r--r--include/asm-powerpc/spu.h62
1 files changed, 50 insertions, 12 deletions
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h
index eedc828cef2d..8836c0f1f2f7 100644
--- a/include/asm-powerpc/spu.h
+++ b/include/asm-powerpc/spu.h
@@ -107,10 +107,10 @@ struct spu_runqueue;
107struct device_node; 107struct device_node;
108 108
109enum spu_utilization_state { 109enum spu_utilization_state {
110 SPU_UTIL_SYSTEM,
111 SPU_UTIL_USER, 110 SPU_UTIL_USER,
111 SPU_UTIL_SYSTEM,
112 SPU_UTIL_IOWAIT, 112 SPU_UTIL_IOWAIT,
113 SPU_UTIL_IDLE, 113 SPU_UTIL_IDLE_LOADED,
114 SPU_UTIL_MAX 114 SPU_UTIL_MAX
115}; 115};
116 116
@@ -121,9 +121,9 @@ struct spu {
121 unsigned long problem_phys; 121 unsigned long problem_phys;
122 struct spu_problem __iomem *problem; 122 struct spu_problem __iomem *problem;
123 struct spu_priv2 __iomem *priv2; 123 struct spu_priv2 __iomem *priv2;
124 struct list_head list; 124 struct list_head cbe_list;
125 struct list_head sched_list;
126 struct list_head full_list; 125 struct list_head full_list;
126 enum { SPU_FREE, SPU_USED } alloc_state;
127 int number; 127 int number;
128 unsigned int irqs[3]; 128 unsigned int irqs[3];
129 u32 node; 129 u32 node;
@@ -137,6 +137,7 @@ struct spu {
137 struct spu_runqueue *rq; 137 struct spu_runqueue *rq;
138 unsigned long long timestamp; 138 unsigned long long timestamp;
139 pid_t pid; 139 pid_t pid;
140 pid_t tgid;
140 int class_0_pending; 141 int class_0_pending;
141 spinlock_t register_lock; 142 spinlock_t register_lock;
142 143
@@ -165,11 +166,14 @@ struct spu {
165 166
166 struct sys_device sysdev; 167 struct sys_device sysdev;
167 168
169 int has_mem_affinity;
170 struct list_head aff_list;
171
168 struct { 172 struct {
169 /* protected by interrupt reentrancy */ 173 /* protected by interrupt reentrancy */
170 enum spu_utilization_state utilization_state; 174 enum spu_utilization_state util_state;
171 unsigned long tstamp; /* time of last ctx switch */ 175 unsigned long long tstamp;
172 unsigned long times[SPU_UTIL_MAX]; 176 unsigned long long times[SPU_UTIL_MAX];
173 unsigned long long vol_ctx_switch; 177 unsigned long long vol_ctx_switch;
174 unsigned long long invol_ctx_switch; 178 unsigned long long invol_ctx_switch;
175 unsigned long long min_flt; 179 unsigned long long min_flt;
@@ -181,13 +185,29 @@ struct spu {
181 } stats; 185 } stats;
182}; 186};
183 187
184struct spu *spu_alloc(void); 188struct cbe_spu_info {
185struct spu *spu_alloc_node(int node); 189 struct mutex list_mutex;
186void spu_free(struct spu *spu); 190 struct list_head spus;
191 int n_spus;
192 int nr_active;
193 atomic_t reserved_spus;
194};
195
196extern struct cbe_spu_info cbe_spu_info[];
197
198void spu_init_channels(struct spu *spu);
187int spu_irq_class_0_bottom(struct spu *spu); 199int spu_irq_class_0_bottom(struct spu *spu);
188int spu_irq_class_1_bottom(struct spu *spu); 200int spu_irq_class_1_bottom(struct spu *spu);
189void spu_irq_setaffinity(struct spu *spu, int cpu); 201void spu_irq_setaffinity(struct spu *spu, int cpu);
190 202
203#ifdef CONFIG_KEXEC
204void crash_register_spus(struct list_head *list);
205#else
206static inline void crash_register_spus(struct list_head *list)
207{
208}
209#endif
210
191extern void spu_invalidate_slbs(struct spu *spu); 211extern void spu_invalidate_slbs(struct spu *spu);
192extern void spu_associate_mm(struct spu *spu, struct mm_struct *mm); 212extern void spu_associate_mm(struct spu *spu, struct mm_struct *mm);
193 213
@@ -195,6 +215,20 @@ extern void spu_associate_mm(struct spu *spu, struct mm_struct *mm);
195struct mm_struct; 215struct mm_struct;
196extern void spu_flush_all_slbs(struct mm_struct *mm); 216extern void spu_flush_all_slbs(struct mm_struct *mm);
197 217
218/* This interface allows a profiler (e.g., OProfile) to store a ref
219 * to spu context information that it creates. This caching technique
220 * avoids the need to recreate this information after a save/restore operation.
221 *
222 * Assumes the caller has already incremented the ref count to
223 * profile_info; then spu_context_destroy must call kref_put
224 * on prof_info_kref.
225 */
226void spu_set_profile_private_kref(struct spu_context *ctx,
227 struct kref *prof_info_kref,
228 void ( * prof_info_release) (struct kref *kref));
229
230void *spu_get_profile_private_kref(struct spu_context *ctx);
231
198/* system callbacks from the SPU */ 232/* system callbacks from the SPU */
199struct spu_syscall_block { 233struct spu_syscall_block {
200 u64 nr_ret; 234 u64 nr_ret;
@@ -206,7 +240,8 @@ extern long spu_sys_callback(struct spu_syscall_block *s);
206struct file; 240struct file;
207extern struct spufs_calls { 241extern struct spufs_calls {
208 asmlinkage long (*create_thread)(const char __user *name, 242 asmlinkage long (*create_thread)(const char __user *name,
209 unsigned int flags, mode_t mode); 243 unsigned int flags, mode_t mode,
244 struct file *neighbor);
210 asmlinkage long (*spu_run)(struct file *filp, __u32 __user *unpc, 245 asmlinkage long (*spu_run)(struct file *filp, __u32 __user *unpc,
211 __u32 __user *ustatus); 246 __u32 __user *ustatus);
212 struct module *owner; 247 struct module *owner;
@@ -233,8 +268,10 @@ struct spu_coredump_calls {
233#define SPU_CREATE_GANG 0x0002 268#define SPU_CREATE_GANG 0x0002
234#define SPU_CREATE_NOSCHED 0x0004 269#define SPU_CREATE_NOSCHED 0x0004
235#define SPU_CREATE_ISOLATE 0x0008 270#define SPU_CREATE_ISOLATE 0x0008
271#define SPU_CREATE_AFFINITY_SPU 0x0010
272#define SPU_CREATE_AFFINITY_MEM 0x0020
236 273
237#define SPU_CREATE_FLAG_ALL 0x000f /* mask of all valid flags */ 274#define SPU_CREATE_FLAG_ALL 0x003f /* mask of all valid flags */
238 275
239 276
240#ifdef CONFIG_SPU_FS_MODULE 277#ifdef CONFIG_SPU_FS_MODULE
@@ -403,6 +440,7 @@ struct spu_priv2 {
403#define MFC_CNTL_RESUME_DMA_QUEUE (0ull << 0) 440#define MFC_CNTL_RESUME_DMA_QUEUE (0ull << 0)
404#define MFC_CNTL_SUSPEND_DMA_QUEUE (1ull << 0) 441#define MFC_CNTL_SUSPEND_DMA_QUEUE (1ull << 0)
405#define MFC_CNTL_SUSPEND_DMA_QUEUE_MASK (1ull << 0) 442#define MFC_CNTL_SUSPEND_DMA_QUEUE_MASK (1ull << 0)
443#define MFC_CNTL_SUSPEND_MASK (1ull << 4)
406#define MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION (0ull << 8) 444#define MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION (0ull << 8)
407#define MFC_CNTL_SUSPEND_IN_PROGRESS (1ull << 8) 445#define MFC_CNTL_SUSPEND_IN_PROGRESS (1ull << 8)
408#define MFC_CNTL_SUSPEND_COMPLETE (3ull << 8) 446#define MFC_CNTL_SUSPEND_COMPLETE (3ull << 8)