aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc')
-rw-r--r--arch/parisc/kernel/drivers.c8
-rw-r--r--arch/parisc/kernel/entry.S1
-rw-r--r--arch/parisc/kernel/inventory.c2
-rw-r--r--arch/parisc/kernel/ioctl32.c546
-rw-r--r--arch/parisc/kernel/irq.c110
-rw-r--r--arch/parisc/kernel/perf.c33
-rw-r--r--arch/parisc/kernel/ptrace.c5
-rw-r--r--arch/parisc/kernel/signal.c1
-rw-r--r--arch/parisc/kernel/smp.c24
-rw-r--r--arch/parisc/kernel/syscall.S3
10 files changed, 143 insertions, 590 deletions
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index 988844a169e6..d016d672ec2b 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -499,8 +499,12 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
499 499
500 dev = create_parisc_device(mod_path); 500 dev = create_parisc_device(mod_path);
501 if (dev->id.hw_type != HPHW_FAULTY) { 501 if (dev->id.hw_type != HPHW_FAULTY) {
502 printk("Two devices have hardware path %s. Please file a bug with HP.\n" 502 printk(KERN_ERR "Two devices have hardware path [%s]. "
503 "In the meantime, you could try rearranging your cards.\n", parisc_pathname(dev)); 503 "IODC data for second device: "
504 "%02x%02x%02x%02x%02x%02x\n"
505 "Rearranging GSC cards sometimes helps\n",
506 parisc_pathname(dev), iodc_data[0], iodc_data[1],
507 iodc_data[3], iodc_data[4], iodc_data[5], iodc_data[6]);
504 return NULL; 508 return NULL;
505 } 509 }
506 510
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index c7e66ee5b083..9af4b22a6d77 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -1846,6 +1846,7 @@ sys_clone_wrapper:
1846 ldo -16(%r30),%r29 /* Reference param save area */ 1846 ldo -16(%r30),%r29 /* Reference param save area */
1847#endif 1847#endif
1848 1848
1849 /* WARNING - Clobbers r19 and r21, userspace must save these! */
1849 STREG %r2,PT_GR19(%r1) /* save for child */ 1850 STREG %r2,PT_GR19(%r1) /* save for child */
1850 STREG %r30,PT_GR21(%r1) 1851 STREG %r30,PT_GR21(%r1)
1851 BL sys_clone,%r2 1852 BL sys_clone,%r2
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
index 1a1c66422736..8f563871e83c 100644
--- a/arch/parisc/kernel/inventory.c
+++ b/arch/parisc/kernel/inventory.c
@@ -188,7 +188,7 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
188 temp = pa_pdc_cell.cba; 188 temp = pa_pdc_cell.cba;
189 dev = alloc_pa_dev(PAT_GET_CBA(temp), &pa_pdc_cell.mod_path); 189 dev = alloc_pa_dev(PAT_GET_CBA(temp), &pa_pdc_cell.mod_path);
190 if (!dev) { 190 if (!dev) {
191 return PDC_NE_MOD; 191 return PDC_OK;
192 } 192 }
193 193
194 /* alloc_pa_dev sets dev->hpa */ 194 /* alloc_pa_dev sets dev->hpa */
diff --git a/arch/parisc/kernel/ioctl32.c b/arch/parisc/kernel/ioctl32.c
index 0a331104ad56..4eada1bb27f0 100644
--- a/arch/parisc/kernel/ioctl32.c
+++ b/arch/parisc/kernel/ioctl32.c
@@ -19,536 +19,6 @@
19#define CODE 19#define CODE
20#include "compat_ioctl.c" 20#include "compat_ioctl.c"
21 21
22/* Use this to get at 32-bit user passed pointers.
23 See sys_sparc32.c for description about these. */
24#define A(__x) ((unsigned long)(__x))
25/* The same for use with copy_from_user() and copy_to_user(). */
26#define B(__x) ((void *)(unsigned long)(__x))
27
28#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
29/* This really belongs in include/linux/drm.h -DaveM */
30#include "../../../drivers/char/drm/drm.h"
31
32typedef struct drm32_version {
33 int version_major; /* Major version */
34 int version_minor; /* Minor version */
35 int version_patchlevel;/* Patch level */
36 int name_len; /* Length of name buffer */
37 u32 name; /* Name of driver */
38 int date_len; /* Length of date buffer */
39 u32 date; /* User-space buffer to hold date */
40 int desc_len; /* Length of desc buffer */
41 u32 desc; /* User-space buffer to hold desc */
42} drm32_version_t;
43#define DRM32_IOCTL_VERSION DRM_IOWR(0x00, drm32_version_t)
44
45static int drm32_version(unsigned int fd, unsigned int cmd, unsigned long arg)
46{
47 drm32_version_t *uversion = (drm32_version_t *)arg;
48 char *name_ptr, *date_ptr, *desc_ptr;
49 u32 tmp1, tmp2, tmp3;
50 drm_version_t kversion;
51 mm_segment_t old_fs;
52 int ret;
53
54 memset(&kversion, 0, sizeof(kversion));
55 if (get_user(kversion.name_len, &uversion->name_len) ||
56 get_user(kversion.date_len, &uversion->date_len) ||
57 get_user(kversion.desc_len, &uversion->desc_len) ||
58 get_user(tmp1, &uversion->name) ||
59 get_user(tmp2, &uversion->date) ||
60 get_user(tmp3, &uversion->desc))
61 return -EFAULT;
62
63 name_ptr = (char *) A(tmp1);
64 date_ptr = (char *) A(tmp2);
65 desc_ptr = (char *) A(tmp3);
66
67 ret = -ENOMEM;
68 if (kversion.name_len && name_ptr) {
69 kversion.name = kmalloc(kversion.name_len, GFP_KERNEL);
70 if (!kversion.name)
71 goto out;
72 }
73 if (kversion.date_len && date_ptr) {
74 kversion.date = kmalloc(kversion.date_len, GFP_KERNEL);
75 if (!kversion.date)
76 goto out;
77 }
78 if (kversion.desc_len && desc_ptr) {
79 kversion.desc = kmalloc(kversion.desc_len, GFP_KERNEL);
80 if (!kversion.desc)
81 goto out;
82 }
83
84 old_fs = get_fs();
85 set_fs(KERNEL_DS);
86 ret = sys_ioctl (fd, DRM_IOCTL_VERSION, (unsigned long)&kversion);
87 set_fs(old_fs);
88
89 if (!ret) {
90 if ((kversion.name &&
91 copy_to_user(name_ptr, kversion.name, kversion.name_len)) ||
92 (kversion.date &&
93 copy_to_user(date_ptr, kversion.date, kversion.date_len)) ||
94 (kversion.desc &&
95 copy_to_user(desc_ptr, kversion.desc, kversion.desc_len)))
96 ret = -EFAULT;
97 if (put_user(kversion.version_major, &uversion->version_major) ||
98 put_user(kversion.version_minor, &uversion->version_minor) ||
99 put_user(kversion.version_patchlevel, &uversion->version_patchlevel) ||
100 put_user(kversion.name_len, &uversion->name_len) ||
101 put_user(kversion.date_len, &uversion->date_len) ||
102 put_user(kversion.desc_len, &uversion->desc_len))
103 ret = -EFAULT;
104 }
105
106out:
107 kfree(kversion.name);
108 kfree(kversion.date);
109 kfree(kversion.desc);
110 return ret;
111}
112
113typedef struct drm32_unique {
114 int unique_len; /* Length of unique */
115 u32 unique; /* Unique name for driver instantiation */
116} drm32_unique_t;
117#define DRM32_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm32_unique_t)
118#define DRM32_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm32_unique_t)
119
120static int drm32_getsetunique(unsigned int fd, unsigned int cmd, unsigned long arg)
121{
122 drm32_unique_t *uarg = (drm32_unique_t *)arg;
123 drm_unique_t karg;
124 mm_segment_t old_fs;
125 char *uptr;
126 u32 tmp;
127 int ret;
128
129 if (get_user(karg.unique_len, &uarg->unique_len))
130 return -EFAULT;
131 karg.unique = NULL;
132
133 if (get_user(tmp, &uarg->unique))
134 return -EFAULT;
135
136 uptr = (char *) A(tmp);
137
138 if (uptr) {
139 karg.unique = kmalloc(karg.unique_len, GFP_KERNEL);
140 if (!karg.unique)
141 return -ENOMEM;
142 if (cmd == DRM32_IOCTL_SET_UNIQUE &&
143 copy_from_user(karg.unique, uptr, karg.unique_len)) {
144 kfree(karg.unique);
145 return -EFAULT;
146 }
147 }
148
149 old_fs = get_fs();
150 set_fs(KERNEL_DS);
151 if (cmd == DRM32_IOCTL_GET_UNIQUE)
152 ret = sys_ioctl (fd, DRM_IOCTL_GET_UNIQUE, (unsigned long)&karg);
153 else
154 ret = sys_ioctl (fd, DRM_IOCTL_SET_UNIQUE, (unsigned long)&karg);
155 set_fs(old_fs);
156
157 if (!ret) {
158 if (cmd == DRM32_IOCTL_GET_UNIQUE &&
159 uptr != NULL &&
160 copy_to_user(uptr, karg.unique, karg.unique_len))
161 ret = -EFAULT;
162 if (put_user(karg.unique_len, &uarg->unique_len))
163 ret = -EFAULT;
164 }
165
166 kfree(karg.unique);
167 return ret;
168}
169
170typedef struct drm32_map {
171 u32 offset; /* Requested physical address (0 for SAREA)*/
172 u32 size; /* Requested physical size (bytes) */
173 drm_map_type_t type; /* Type of memory to map */
174 drm_map_flags_t flags; /* Flags */
175 u32 handle; /* User-space: "Handle" to pass to mmap */
176 /* Kernel-space: kernel-virtual address */
177 int mtrr; /* MTRR slot used */
178 /* Private data */
179} drm32_map_t;
180#define DRM32_IOCTL_ADD_MAP DRM_IOWR(0x15, drm32_map_t)
181
182static int drm32_addmap(unsigned int fd, unsigned int cmd, unsigned long arg)
183{
184 drm32_map_t *uarg = (drm32_map_t *) arg;
185 drm_map_t karg;
186 mm_segment_t old_fs;
187 u32 tmp;
188 int ret;
189
190 ret = get_user(karg.offset, &uarg->offset);
191 ret |= get_user(karg.size, &uarg->size);
192 ret |= get_user(karg.type, &uarg->type);
193 ret |= get_user(karg.flags, &uarg->flags);
194 ret |= get_user(tmp, &uarg->handle);
195 ret |= get_user(karg.mtrr, &uarg->mtrr);
196 if (ret)
197 return -EFAULT;
198
199 karg.handle = (void *) A(tmp);
200
201 old_fs = get_fs();
202 set_fs(KERNEL_DS);
203 ret = sys_ioctl(fd, DRM_IOCTL_ADD_MAP, (unsigned long) &karg);
204 set_fs(old_fs);
205
206 if (!ret) {
207 ret = put_user(karg.offset, &uarg->offset);
208 ret |= put_user(karg.size, &uarg->size);
209 ret |= put_user(karg.type, &uarg->type);
210 ret |= put_user(karg.flags, &uarg->flags);
211 tmp = (u32) (long)karg.handle;
212 ret |= put_user(tmp, &uarg->handle);
213 ret |= put_user(karg.mtrr, &uarg->mtrr);
214 if (ret)
215 ret = -EFAULT;
216 }
217
218 return ret;
219}
220
221typedef struct drm32_buf_info {
222 int count; /* Entries in list */
223 u32 list; /* (drm_buf_desc_t *) */
224} drm32_buf_info_t;
225#define DRM32_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm32_buf_info_t)
226
227static int drm32_info_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
228{
229 drm32_buf_info_t *uarg = (drm32_buf_info_t *)arg;
230 drm_buf_desc_t *ulist;
231 drm_buf_info_t karg;
232 mm_segment_t old_fs;
233 int orig_count, ret;
234 u32 tmp;
235
236 if (get_user(karg.count, &uarg->count) ||
237 get_user(tmp, &uarg->list))
238 return -EFAULT;
239
240 ulist = (drm_buf_desc_t *) A(tmp);
241
242 orig_count = karg.count;
243
244 karg.list = kmalloc(karg.count * sizeof(drm_buf_desc_t), GFP_KERNEL);
245 if (!karg.list)
246 return -EFAULT;
247
248 old_fs = get_fs();
249 set_fs(KERNEL_DS);
250 ret = sys_ioctl(fd, DRM_IOCTL_INFO_BUFS, (unsigned long) &karg);
251 set_fs(old_fs);
252
253 if (!ret) {
254 if (karg.count <= orig_count &&
255 (copy_to_user(ulist, karg.list,
256 karg.count * sizeof(drm_buf_desc_t))))
257 ret = -EFAULT;
258 if (put_user(karg.count, &uarg->count))
259 ret = -EFAULT;
260 }
261
262 kfree(karg.list);
263 return ret;
264}
265
266typedef struct drm32_buf_free {
267 int count;
268 u32 list; /* (int *) */
269} drm32_buf_free_t;
270#define DRM32_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm32_buf_free_t)
271
272static int drm32_free_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
273{
274 drm32_buf_free_t *uarg = (drm32_buf_free_t *)arg;
275 drm_buf_free_t karg;
276 mm_segment_t old_fs;
277 int *ulist;
278 int ret;
279 u32 tmp;
280
281 if (get_user(karg.count, &uarg->count) ||
282 get_user(tmp, &uarg->list))
283 return -EFAULT;
284
285 ulist = (int *) A(tmp);
286
287 karg.list = kmalloc(karg.count * sizeof(int), GFP_KERNEL);
288 if (!karg.list)
289 return -ENOMEM;
290
291 ret = -EFAULT;
292 if (copy_from_user(karg.list, ulist, (karg.count * sizeof(int))))
293 goto out;
294
295 old_fs = get_fs();
296 set_fs(KERNEL_DS);
297 ret = sys_ioctl(fd, DRM_IOCTL_FREE_BUFS, (unsigned long) &karg);
298 set_fs(old_fs);
299
300out:
301 kfree(karg.list);
302 return ret;
303}
304
305typedef struct drm32_buf_pub {
306 int idx; /* Index into master buflist */
307 int total; /* Buffer size */
308 int used; /* Amount of buffer in use (for DMA) */
309 u32 address; /* Address of buffer (void *) */
310} drm32_buf_pub_t;
311
312typedef struct drm32_buf_map {
313 int count; /* Length of buflist */
314 u32 virtual; /* Mmaped area in user-virtual (void *) */
315 u32 list; /* Buffer information (drm_buf_pub_t *) */
316} drm32_buf_map_t;
317#define DRM32_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm32_buf_map_t)
318
319static int drm32_map_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
320{
321 drm32_buf_map_t *uarg = (drm32_buf_map_t *)arg;
322 drm32_buf_pub_t *ulist;
323 drm_buf_map_t karg;
324 mm_segment_t old_fs;
325 int orig_count, ret, i;
326 u32 tmp1, tmp2;
327
328 if (get_user(karg.count, &uarg->count) ||
329 get_user(tmp1, &uarg->virtual) ||
330 get_user(tmp2, &uarg->list))
331 return -EFAULT;
332
333 karg.virtual = (void *) A(tmp1);
334 ulist = (drm32_buf_pub_t *) A(tmp2);
335
336 orig_count = karg.count;
337
338 karg.list = kmalloc(karg.count * sizeof(drm_buf_pub_t), GFP_KERNEL);
339 if (!karg.list)
340 return -ENOMEM;
341
342 ret = -EFAULT;
343 for (i = 0; i < karg.count; i++) {
344 if (get_user(karg.list[i].idx, &ulist[i].idx) ||
345 get_user(karg.list[i].total, &ulist[i].total) ||
346 get_user(karg.list[i].used, &ulist[i].used) ||
347 get_user(tmp1, &ulist[i].address))
348 goto out;
349
350 karg.list[i].address = (void *) A(tmp1);
351 }
352
353 old_fs = get_fs();
354 set_fs(KERNEL_DS);
355 ret = sys_ioctl(fd, DRM_IOCTL_MAP_BUFS, (unsigned long) &karg);
356 set_fs(old_fs);
357
358 if (!ret) {
359 for (i = 0; i < orig_count; i++) {
360 tmp1 = (u32) (long) karg.list[i].address;
361 if (put_user(karg.list[i].idx, &ulist[i].idx) ||
362 put_user(karg.list[i].total, &ulist[i].total) ||
363 put_user(karg.list[i].used, &ulist[i].used) ||
364 put_user(tmp1, &ulist[i].address)) {
365 ret = -EFAULT;
366 goto out;
367 }
368 }
369 if (put_user(karg.count, &uarg->count))
370 ret = -EFAULT;
371 }
372
373out:
374 kfree(karg.list);
375 return ret;
376}
377
378typedef struct drm32_dma {
379 /* Indices here refer to the offset into
380 buflist in drm_buf_get_t. */
381 int context; /* Context handle */
382 int send_count; /* Number of buffers to send */
383 u32 send_indices; /* List of handles to buffers (int *) */
384 u32 send_sizes; /* Lengths of data to send (int *) */
385 drm_dma_flags_t flags; /* Flags */
386 int request_count; /* Number of buffers requested */
387 int request_size; /* Desired size for buffers */
388 u32 request_indices; /* Buffer information (int *) */
389 u32 request_sizes; /* (int *) */
390 int granted_count; /* Number of buffers granted */
391} drm32_dma_t;
392#define DRM32_IOCTL_DMA DRM_IOWR(0x29, drm32_dma_t)
393
394/* RED PEN The DRM layer blindly dereferences the send/request
395 * indice/size arrays even though they are userland
396 * pointers. -DaveM
397 */
398static int drm32_dma(unsigned int fd, unsigned int cmd, unsigned long arg)
399{
400 drm32_dma_t *uarg = (drm32_dma_t *) arg;
401 int *u_si, *u_ss, *u_ri, *u_rs;
402 drm_dma_t karg;
403 mm_segment_t old_fs;
404 int ret;
405 u32 tmp1, tmp2, tmp3, tmp4;
406
407 karg.send_indices = karg.send_sizes = NULL;
408 karg.request_indices = karg.request_sizes = NULL;
409
410 if (get_user(karg.context, &uarg->context) ||
411 get_user(karg.send_count, &uarg->send_count) ||
412 get_user(tmp1, &uarg->send_indices) ||
413 get_user(tmp2, &uarg->send_sizes) ||
414 get_user(karg.flags, &uarg->flags) ||
415 get_user(karg.request_count, &uarg->request_count) ||
416 get_user(karg.request_size, &uarg->request_size) ||
417 get_user(tmp3, &uarg->request_indices) ||
418 get_user(tmp4, &uarg->request_sizes) ||
419 get_user(karg.granted_count, &uarg->granted_count))
420 return -EFAULT;
421
422 u_si = (int *) A(tmp1);
423 u_ss = (int *) A(tmp2);
424 u_ri = (int *) A(tmp3);
425 u_rs = (int *) A(tmp4);
426
427 if (karg.send_count) {
428 karg.send_indices = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL);
429 karg.send_sizes = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL);
430
431 ret = -ENOMEM;
432 if (!karg.send_indices || !karg.send_sizes)
433 goto out;
434
435 ret = -EFAULT;
436 if (copy_from_user(karg.send_indices, u_si,
437 (karg.send_count * sizeof(int))) ||
438 copy_from_user(karg.send_sizes, u_ss,
439 (karg.send_count * sizeof(int))))
440 goto out;
441 }
442
443 if (karg.request_count) {
444 karg.request_indices = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL);
445 karg.request_sizes = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL);
446
447 ret = -ENOMEM;
448 if (!karg.request_indices || !karg.request_sizes)
449 goto out;
450
451 ret = -EFAULT;
452 if (copy_from_user(karg.request_indices, u_ri,
453 (karg.request_count * sizeof(int))) ||
454 copy_from_user(karg.request_sizes, u_rs,
455 (karg.request_count * sizeof(int))))
456 goto out;
457 }
458
459 old_fs = get_fs();
460 set_fs(KERNEL_DS);
461 ret = sys_ioctl(fd, DRM_IOCTL_DMA, (unsigned long) &karg);
462 set_fs(old_fs);
463
464 if (!ret) {
465 if (put_user(karg.context, &uarg->context) ||
466 put_user(karg.send_count, &uarg->send_count) ||
467 put_user(karg.flags, &uarg->flags) ||
468 put_user(karg.request_count, &uarg->request_count) ||
469 put_user(karg.request_size, &uarg->request_size) ||
470 put_user(karg.granted_count, &uarg->granted_count))
471 ret = -EFAULT;
472
473 if (karg.send_count) {
474 if (copy_to_user(u_si, karg.send_indices,
475 (karg.send_count * sizeof(int))) ||
476 copy_to_user(u_ss, karg.send_sizes,
477 (karg.send_count * sizeof(int))))
478 ret = -EFAULT;
479 }
480 if (karg.request_count) {
481 if (copy_to_user(u_ri, karg.request_indices,
482 (karg.request_count * sizeof(int))) ||
483 copy_to_user(u_rs, karg.request_sizes,
484 (karg.request_count * sizeof(int))))
485 ret = -EFAULT;
486 }
487 }
488
489out:
490 kfree(karg.send_indices);
491 kfree(karg.send_sizes);
492 kfree(karg.request_indices);
493 kfree(karg.request_sizes);
494 return ret;
495}
496
497typedef struct drm32_ctx_res {
498 int count;
499 u32 contexts; /* (drm_ctx_t *) */
500} drm32_ctx_res_t;
501#define DRM32_IOCTL_RES_CTX DRM_IOWR(0x26, drm32_ctx_res_t)
502
503static int drm32_res_ctx(unsigned int fd, unsigned int cmd, unsigned long arg)
504{
505 drm32_ctx_res_t *uarg = (drm32_ctx_res_t *) arg;
506 drm_ctx_t *ulist;
507 drm_ctx_res_t karg;
508 mm_segment_t old_fs;
509 int orig_count, ret;
510 u32 tmp;
511
512 karg.contexts = NULL;
513 if (get_user(karg.count, &uarg->count) ||
514 get_user(tmp, &uarg->contexts))
515 return -EFAULT;
516
517 ulist = (drm_ctx_t *) A(tmp);
518
519 orig_count = karg.count;
520 if (karg.count && ulist) {
521 karg.contexts = kmalloc((karg.count * sizeof(drm_ctx_t)), GFP_KERNEL);
522 if (!karg.contexts)
523 return -ENOMEM;
524 if (copy_from_user(karg.contexts, ulist,
525 (karg.count * sizeof(drm_ctx_t)))) {
526 kfree(karg.contexts);
527 return -EFAULT;
528 }
529 }
530
531 old_fs = get_fs();
532 set_fs(KERNEL_DS);
533 ret = sys_ioctl(fd, DRM_IOCTL_RES_CTX, (unsigned long) &karg);
534 set_fs(old_fs);
535
536 if (!ret) {
537 if (orig_count) {
538 if (copy_to_user(ulist, karg.contexts,
539 (orig_count * sizeof(drm_ctx_t))))
540 ret = -EFAULT;
541 }
542 if (put_user(karg.count, &uarg->count))
543 ret = -EFAULT;
544 }
545
546 kfree(karg.contexts);
547 return ret;
548}
549
550#endif
551
552#define HANDLE_IOCTL(cmd, handler) { cmd, (ioctl_trans_handler_t)handler, NULL }, 22#define HANDLE_IOCTL(cmd, handler) { cmd, (ioctl_trans_handler_t)handler, NULL },
553#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL(cmd, sys_ioctl) 23#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL(cmd, sys_ioctl)
554 24
@@ -561,11 +31,6 @@ IOCTL_TABLE_START
561#define DECLARES 31#define DECLARES
562#include "compat_ioctl.c" 32#include "compat_ioctl.c"
563 33
564/* PA-specific ioctls */
565COMPATIBLE_IOCTL(PA_PERF_ON)
566COMPATIBLE_IOCTL(PA_PERF_OFF)
567COMPATIBLE_IOCTL(PA_PERF_VERSION)
568
569/* And these ioctls need translation */ 34/* And these ioctls need translation */
570HANDLE_IOCTL(SIOCGPPPSTATS, dev_ifsioc) 35HANDLE_IOCTL(SIOCGPPPSTATS, dev_ifsioc)
571HANDLE_IOCTL(SIOCGPPPCSTATS, dev_ifsioc) 36HANDLE_IOCTL(SIOCGPPPCSTATS, dev_ifsioc)
@@ -590,17 +55,6 @@ HANDLE_IOCTL(RTC_EPOCH_READ, w_long)
590COMPATIBLE_IOCTL(RTC_EPOCH_SET) 55COMPATIBLE_IOCTL(RTC_EPOCH_SET)
591#endif 56#endif
592 57
593#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
594HANDLE_IOCTL(DRM32_IOCTL_VERSION, drm32_version);
595HANDLE_IOCTL(DRM32_IOCTL_GET_UNIQUE, drm32_getsetunique);
596HANDLE_IOCTL(DRM32_IOCTL_SET_UNIQUE, drm32_getsetunique);
597HANDLE_IOCTL(DRM32_IOCTL_ADD_MAP, drm32_addmap);
598HANDLE_IOCTL(DRM32_IOCTL_INFO_BUFS, drm32_info_bufs);
599HANDLE_IOCTL(DRM32_IOCTL_FREE_BUFS, drm32_free_bufs);
600HANDLE_IOCTL(DRM32_IOCTL_MAP_BUFS, drm32_map_bufs);
601HANDLE_IOCTL(DRM32_IOCTL_DMA, drm32_dma);
602HANDLE_IOCTL(DRM32_IOCTL_RES_CTX, drm32_res_ctx);
603#endif /* DRM */
604IOCTL_TABLE_END 58IOCTL_TABLE_END
605 59
606int ioctl_table_size = ARRAY_SIZE(ioctl_start); 60int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 006385dbee66..197936d9359a 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -30,6 +30,9 @@
30#include <linux/seq_file.h> 30#include <linux/seq_file.h>
31#include <linux/spinlock.h> 31#include <linux/spinlock.h>
32#include <linux/types.h> 32#include <linux/types.h>
33#include <asm/io.h>
34
35#include <asm/smp.h>
33 36
34#undef PARISC_IRQ_CR16_COUNTS 37#undef PARISC_IRQ_CR16_COUNTS
35 38
@@ -43,26 +46,34 @@ extern irqreturn_t ipi_interrupt(int, void *, struct pt_regs *);
43*/ 46*/
44static volatile unsigned long cpu_eiem = 0; 47static volatile unsigned long cpu_eiem = 0;
45 48
46static void cpu_set_eiem(void *info) 49static void cpu_disable_irq(unsigned int irq)
47{
48 set_eiem((unsigned long) info);
49}
50
51static inline void cpu_disable_irq(unsigned int irq)
52{ 50{
53 unsigned long eirr_bit = EIEM_MASK(irq); 51 unsigned long eirr_bit = EIEM_MASK(irq);
54 52
55 cpu_eiem &= ~eirr_bit; 53 cpu_eiem &= ~eirr_bit;
56 on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1); 54 /* Do nothing on the other CPUs. If they get this interrupt,
55 * The & cpu_eiem in the do_cpu_irq_mask() ensures they won't
56 * handle it, and the set_eiem() at the bottom will ensure it
57 * then gets disabled */
57} 58}
58 59
59static void cpu_enable_irq(unsigned int irq) 60static void cpu_enable_irq(unsigned int irq)
60{ 61{
61 unsigned long eirr_bit = EIEM_MASK(irq); 62 unsigned long eirr_bit = EIEM_MASK(irq);
62 63
63 mtctl(eirr_bit, 23); /* clear EIRR bit before unmasking */
64 cpu_eiem |= eirr_bit; 64 cpu_eiem |= eirr_bit;
65 on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1); 65
66 /* FIXME: while our interrupts aren't nested, we cannot reset
67 * the eiem mask if we're already in an interrupt. Once we
68 * implement nested interrupts, this can go away
69 */
70 if (!in_interrupt())
71 set_eiem(cpu_eiem);
72
73 /* This is just a simple NOP IPI. But what it does is cause
74 * all the other CPUs to do a set_eiem(cpu_eiem) at the end
75 * of the interrupt handler */
76 smp_send_all_nop();
66} 77}
67 78
68static unsigned int cpu_startup_irq(unsigned int irq) 79static unsigned int cpu_startup_irq(unsigned int irq)
@@ -74,6 +85,35 @@ static unsigned int cpu_startup_irq(unsigned int irq)
74void no_ack_irq(unsigned int irq) { } 85void no_ack_irq(unsigned int irq) { }
75void no_end_irq(unsigned int irq) { } 86void no_end_irq(unsigned int irq) { }
76 87
88#ifdef CONFIG_SMP
89int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
90{
91 int cpu_dest;
92
93 /* timer and ipi have to always be received on all CPUs */
94 if (irq == TIMER_IRQ || irq == IPI_IRQ) {
95 /* Bad linux design decision. The mask has already
96 * been set; we must reset it */
97 irq_affinity[irq] = CPU_MASK_ALL;
98 return -EINVAL;
99 }
100
101 /* whatever mask they set, we just allow one CPU */
102 cpu_dest = first_cpu(*dest);
103 *dest = cpumask_of_cpu(cpu_dest);
104
105 return 0;
106}
107
108static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest)
109{
110 if (cpu_check_affinity(irq, &dest))
111 return;
112
113 irq_affinity[irq] = dest;
114}
115#endif
116
77static struct hw_interrupt_type cpu_interrupt_type = { 117static struct hw_interrupt_type cpu_interrupt_type = {
78 .typename = "CPU", 118 .typename = "CPU",
79 .startup = cpu_startup_irq, 119 .startup = cpu_startup_irq,
@@ -82,7 +122,9 @@ static struct hw_interrupt_type cpu_interrupt_type = {
82 .disable = cpu_disable_irq, 122 .disable = cpu_disable_irq,
83 .ack = no_ack_irq, 123 .ack = no_ack_irq,
84 .end = no_end_irq, 124 .end = no_end_irq,
85// .set_affinity = cpu_set_affinity_irq, 125#ifdef CONFIG_SMP
126 .set_affinity = cpu_set_affinity_irq,
127#endif
86}; 128};
87 129
88int show_interrupts(struct seq_file *p, void *v) 130int show_interrupts(struct seq_file *p, void *v)
@@ -219,6 +261,17 @@ int txn_alloc_irq(unsigned int bits_wide)
219 return -1; 261 return -1;
220} 262}
221 263
264
265unsigned long txn_affinity_addr(unsigned int irq, int cpu)
266{
267#ifdef CONFIG_SMP
268 irq_affinity[irq] = cpumask_of_cpu(cpu);
269#endif
270
271 return cpu_data[cpu].txn_addr;
272}
273
274
222unsigned long txn_alloc_addr(unsigned int virt_irq) 275unsigned long txn_alloc_addr(unsigned int virt_irq)
223{ 276{
224 static int next_cpu = -1; 277 static int next_cpu = -1;
@@ -233,7 +286,7 @@ unsigned long txn_alloc_addr(unsigned int virt_irq)
233 if (next_cpu >= NR_CPUS) 286 if (next_cpu >= NR_CPUS)
234 next_cpu = 0; /* nothing else, assign monarch */ 287 next_cpu = 0; /* nothing else, assign monarch */
235 288
236 return cpu_data[next_cpu].txn_addr; 289 return txn_affinity_addr(virt_irq, next_cpu);
237} 290}
238 291
239 292
@@ -250,10 +303,11 @@ void do_cpu_irq_mask(struct pt_regs *regs)
250 irq_enter(); 303 irq_enter();
251 304
252 /* 305 /*
253 * Only allow interrupt processing to be interrupted by the 306 * Don't allow TIMER or IPI nested interrupts.
254 * timer tick 307 * Allowing any single interrupt to nest can lead to that CPU
308 * handling interrupts with all enabled interrupts unmasked.
255 */ 309 */
256 set_eiem(EIEM_MASK(TIMER_IRQ)); 310 set_eiem(0UL);
257 311
258 /* 1) only process IRQs that are enabled/unmasked (cpu_eiem) 312 /* 1) only process IRQs that are enabled/unmasked (cpu_eiem)
259 * 2) We loop here on EIRR contents in order to avoid 313 * 2) We loop here on EIRR contents in order to avoid
@@ -267,23 +321,41 @@ void do_cpu_irq_mask(struct pt_regs *regs)
267 if (!eirr_val) 321 if (!eirr_val)
268 break; 322 break;
269 323
270 if (eirr_val & EIEM_MASK(TIMER_IRQ))
271 set_eiem(0);
272
273 mtctl(eirr_val, 23); /* reset bits we are going to process */ 324 mtctl(eirr_val, 23); /* reset bits we are going to process */
274 325
275 /* Work our way from MSb to LSb...same order we alloc EIRs */ 326 /* Work our way from MSb to LSb...same order we alloc EIRs */
276 for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) { 327 for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) {
328#ifdef CONFIG_SMP
329 cpumask_t dest = irq_affinity[irq];
330#endif
277 if (!(bit & eirr_val)) 331 if (!(bit & eirr_val))
278 continue; 332 continue;
279 333
280 /* clear bit in mask - can exit loop sooner */ 334 /* clear bit in mask - can exit loop sooner */
281 eirr_val &= ~bit; 335 eirr_val &= ~bit;
282 336
337#ifdef CONFIG_SMP
338 /* FIXME: because generic set affinity mucks
339 * with the affinity before sending it to us
340 * we can get the situation where the affinity is
341 * wrong for our CPU type interrupts */
342 if (irq != TIMER_IRQ && irq != IPI_IRQ &&
343 !cpu_isset(smp_processor_id(), dest)) {
344 int cpu = first_cpu(dest);
345
346 printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
347 irq, smp_processor_id(), cpu);
348 gsc_writel(irq + CPU_IRQ_BASE,
349 cpu_data[cpu].hpa);
350 continue;
351 }
352#endif
353
283 __do_IRQ(irq, regs); 354 __do_IRQ(irq, regs);
284 } 355 }
285 } 356 }
286 set_eiem(cpu_eiem); 357
358 set_eiem(cpu_eiem); /* restore original mask */
287 irq_exit(); 359 irq_exit();
288} 360}
289 361
@@ -291,12 +363,14 @@ void do_cpu_irq_mask(struct pt_regs *regs)
291static struct irqaction timer_action = { 363static struct irqaction timer_action = {
292 .handler = timer_interrupt, 364 .handler = timer_interrupt,
293 .name = "timer", 365 .name = "timer",
366 .flags = SA_INTERRUPT,
294}; 367};
295 368
296#ifdef CONFIG_SMP 369#ifdef CONFIG_SMP
297static struct irqaction ipi_action = { 370static struct irqaction ipi_action = {
298 .handler = ipi_interrupt, 371 .handler = ipi_interrupt,
299 .name = "IPI", 372 .name = "IPI",
373 .flags = SA_INTERRUPT,
300}; 374};
301#endif 375#endif
302 376
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
index 44670d6e06f4..f6fec62b6a2f 100644
--- a/arch/parisc/kernel/perf.c
+++ b/arch/parisc/kernel/perf.c
@@ -196,8 +196,7 @@ static int perf_open(struct inode *inode, struct file *file);
196static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos); 196static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
197static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, 197static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
198 loff_t *ppos); 198 loff_t *ppos);
199static int perf_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 199static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
200 unsigned long arg);
201static void perf_start_counters(void); 200static void perf_start_counters(void);
202static int perf_stop_counters(uint32_t *raddr); 201static int perf_stop_counters(uint32_t *raddr);
203static struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num); 202static struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num);
@@ -438,48 +437,56 @@ static void perf_patch_images(void)
438 * must be running on the processor that you wish to change. 437 * must be running on the processor that you wish to change.
439 */ 438 */
440 439
441static int perf_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 440static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
442 unsigned long arg)
443{ 441{
444 long error_start; 442 long error_start;
445 uint32_t raddr[4]; 443 uint32_t raddr[4];
444 int error = 0;
446 445
446 lock_kernel();
447 switch (cmd) { 447 switch (cmd) {
448 448
449 case PA_PERF_ON: 449 case PA_PERF_ON:
450 /* Start the counters */ 450 /* Start the counters */
451 perf_start_counters(); 451 perf_start_counters();
452 return 0; 452 break;
453 453
454 case PA_PERF_OFF: 454 case PA_PERF_OFF:
455 error_start = perf_stop_counters(raddr); 455 error_start = perf_stop_counters(raddr);
456 if (error_start != 0) { 456 if (error_start != 0) {
457 printk(KERN_ERR "perf_off: perf_stop_counters = %ld\n", error_start); 457 printk(KERN_ERR "perf_off: perf_stop_counters = %ld\n", error_start);
458 return -EFAULT; 458 error = -EFAULT;
459 break;
459 } 460 }
460 461
461 /* copy out the Counters */ 462 /* copy out the Counters */
462 if (copy_to_user((void __user *)arg, raddr, 463 if (copy_to_user((void __user *)arg, raddr,
463 sizeof (raddr)) != 0) { 464 sizeof (raddr)) != 0) {
464 return -EFAULT; 465 error = -EFAULT;
466 break;
465 } 467 }
466 return 0; 468 break;
467 469
468 case PA_PERF_VERSION: 470 case PA_PERF_VERSION:
469 /* Return the version # */ 471 /* Return the version # */
470 return put_user(PERF_VERSION, (int *)arg); 472 error = put_user(PERF_VERSION, (int *)arg);
473 break;
471 474
472 default: 475 default:
473 break; 476 error = -ENOTTY;
474 } 477 }
475 return -ENOTTY; 478
479 unlock_kernel();
480
481 return error;
476} 482}
477 483
478static struct file_operations perf_fops = { 484static struct file_operations perf_fops = {
479 .llseek = no_llseek, 485 .llseek = no_llseek,
480 .read = perf_read, 486 .read = perf_read,
481 .write = perf_write, 487 .write = perf_write,
482 .ioctl = perf_ioctl, 488 .unlocked_ioctl = perf_ioctl,
489 .compat_ioctl = perf_ioctl,
483 .open = perf_open, 490 .open = perf_open,
484 .release = perf_release 491 .release = perf_release
485}; 492};
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index b6fe202a620d..27160e8bf15b 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -264,6 +264,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
264 * sigkill. perhaps it should be put in the status 264 * sigkill. perhaps it should be put in the status
265 * that it wants to exit. 265 * that it wants to exit.
266 */ 266 */
267 ret = 0;
267 DBG("sys_ptrace(KILL)\n"); 268 DBG("sys_ptrace(KILL)\n");
268 if (child->exit_state == EXIT_ZOMBIE) /* already dead */ 269 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
269 goto out_tsk; 270 goto out_tsk;
@@ -344,11 +345,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
344 345
345 case PTRACE_GETEVENTMSG: 346 case PTRACE_GETEVENTMSG:
346 ret = put_user(child->ptrace_message, (unsigned int __user *) data); 347 ret = put_user(child->ptrace_message, (unsigned int __user *) data);
347 goto out; 348 goto out_tsk;
348 349
349 default: 350 default:
350 ret = ptrace_request(child, request, addr, data); 351 ret = ptrace_request(child, request, addr, data);
351 goto out; 352 goto out_tsk;
352 } 353 }
353 354
354out_wake_notrap: 355out_wake_notrap:
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 82c24e62ab63..3a25a7bd673e 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -296,7 +296,6 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
296 struct rt_sigframe __user *frame; 296 struct rt_sigframe __user *frame;
297 unsigned long rp, usp; 297 unsigned long rp, usp;
298 unsigned long haddr, sigframe_size; 298 unsigned long haddr, sigframe_size;
299 struct siginfo si;
300 int err = 0; 299 int err = 0;
301#ifdef __LP64__ 300#ifdef __LP64__
302 compat_int_t compat_val; 301 compat_int_t compat_val;
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index a9ecf6465784..ce89da0f654d 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -181,12 +181,19 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
181 while (ops) { 181 while (ops) {
182 unsigned long which = ffz(~ops); 182 unsigned long which = ffz(~ops);
183 183
184 ops &= ~(1 << which);
185
184 switch (which) { 186 switch (which) {
187 case IPI_NOP:
188#if (kDEBUG>=100)
189 printk(KERN_DEBUG "CPU%d IPI_NOP\n",this_cpu);
190#endif /* kDEBUG */
191 break;
192
185 case IPI_RESCHEDULE: 193 case IPI_RESCHEDULE:
186#if (kDEBUG>=100) 194#if (kDEBUG>=100)
187 printk(KERN_DEBUG "CPU%d IPI_RESCHEDULE\n",this_cpu); 195 printk(KERN_DEBUG "CPU%d IPI_RESCHEDULE\n",this_cpu);
188#endif /* kDEBUG */ 196#endif /* kDEBUG */
189 ops &= ~(1 << IPI_RESCHEDULE);
190 /* 197 /*
191 * Reschedule callback. Everything to be 198 * Reschedule callback. Everything to be
192 * done is done by the interrupt return path. 199 * done is done by the interrupt return path.
@@ -197,7 +204,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
197#if (kDEBUG>=100) 204#if (kDEBUG>=100)
198 printk(KERN_DEBUG "CPU%d IPI_CALL_FUNC\n",this_cpu); 205 printk(KERN_DEBUG "CPU%d IPI_CALL_FUNC\n",this_cpu);
199#endif /* kDEBUG */ 206#endif /* kDEBUG */
200 ops &= ~(1 << IPI_CALL_FUNC);
201 { 207 {
202 volatile struct smp_call_struct *data; 208 volatile struct smp_call_struct *data;
203 void (*func)(void *info); 209 void (*func)(void *info);
@@ -231,7 +237,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
231#if (kDEBUG>=100) 237#if (kDEBUG>=100)
232 printk(KERN_DEBUG "CPU%d IPI_CPU_START\n",this_cpu); 238 printk(KERN_DEBUG "CPU%d IPI_CPU_START\n",this_cpu);
233#endif /* kDEBUG */ 239#endif /* kDEBUG */
234 ops &= ~(1 << IPI_CPU_START);
235#ifdef ENTRY_SYS_CPUS 240#ifdef ENTRY_SYS_CPUS
236 p->state = STATE_RUNNING; 241 p->state = STATE_RUNNING;
237#endif 242#endif
@@ -241,7 +246,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
241#if (kDEBUG>=100) 246#if (kDEBUG>=100)
242 printk(KERN_DEBUG "CPU%d IPI_CPU_STOP\n",this_cpu); 247 printk(KERN_DEBUG "CPU%d IPI_CPU_STOP\n",this_cpu);
243#endif /* kDEBUG */ 248#endif /* kDEBUG */
244 ops &= ~(1 << IPI_CPU_STOP);
245#ifdef ENTRY_SYS_CPUS 249#ifdef ENTRY_SYS_CPUS
246#else 250#else
247 halt_processor(); 251 halt_processor();
@@ -252,13 +256,11 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
252#if (kDEBUG>=100) 256#if (kDEBUG>=100)
253 printk(KERN_DEBUG "CPU%d is alive!\n",this_cpu); 257 printk(KERN_DEBUG "CPU%d is alive!\n",this_cpu);
254#endif /* kDEBUG */ 258#endif /* kDEBUG */
255 ops &= ~(1 << IPI_CPU_TEST);
256 break; 259 break;
257 260
258 default: 261 default:
259 printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n", 262 printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
260 this_cpu, which); 263 this_cpu, which);
261 ops &= ~(1 << which);
262 return IRQ_NONE; 264 return IRQ_NONE;
263 } /* Switch */ 265 } /* Switch */
264 } /* while (ops) */ 266 } /* while (ops) */
@@ -312,6 +314,12 @@ smp_send_start(void) { send_IPI_allbutself(IPI_CPU_START); }
312void 314void
313smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); } 315smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
314 316
317void
318smp_send_all_nop(void)
319{
320 send_IPI_allbutself(IPI_NOP);
321}
322
315 323
316/** 324/**
317 * Run a function on all other CPUs. 325 * Run a function on all other CPUs.
@@ -338,6 +346,10 @@ smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
338 346
339 /* Can deadlock when called with interrupts disabled */ 347 /* Can deadlock when called with interrupts disabled */
340 WARN_ON(irqs_disabled()); 348 WARN_ON(irqs_disabled());
349
350 /* can also deadlock if IPIs are disabled */
351 WARN_ON((get_eiem() & (1UL<<(CPU_IRQ_MAX - IPI_IRQ))) == 0);
352
341 353
342 data.func = func; 354 data.func = func;
343 data.info = info; 355 data.info = info;
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index b29b76b42bb7..d66163492890 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -164,7 +164,7 @@ linux_gateway_entry:
164#endif 164#endif
165 STREG %r2, TASK_PT_GR30(%r1) /* ... and save it */ 165 STREG %r2, TASK_PT_GR30(%r1) /* ... and save it */
166 166
167 STREG %r20, TASK_PT_GR20(%r1) 167 STREG %r20, TASK_PT_GR20(%r1) /* Syscall number */
168 STREG %r21, TASK_PT_GR21(%r1) 168 STREG %r21, TASK_PT_GR21(%r1)
169 STREG %r22, TASK_PT_GR22(%r1) 169 STREG %r22, TASK_PT_GR22(%r1)
170 STREG %r23, TASK_PT_GR23(%r1) /* 4th argument */ 170 STREG %r23, TASK_PT_GR23(%r1) /* 4th argument */
@@ -527,6 +527,7 @@ lws_compare_and_swap:
527 We *must* giveup this call and fail. 527 We *must* giveup this call and fail.
528 */ 528 */
529 ldw 4(%sr2,%r20), %r28 /* Load thread register */ 529 ldw 4(%sr2,%r20), %r28 /* Load thread register */
530 /* WARNING: If cr27 cycles to the same value we have problems */
530 mfctl %cr27, %r21 /* Get current thread register */ 531 mfctl %cr27, %r21 /* Get current thread register */
531 cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */ 532 cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */
532 b lws_exit /* Return error! */ 533 b lws_exit /* Return error! */