aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-11-18 17:25:40 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-18 17:25:40 -0500
commit62ae144f56b61f541193df4a6465c06ee7eb9096 (patch)
tree913cc058b162c48e6c72d2b016b9e44201b16cee
parentbcd039b230f738243193ef7dbb03298d967b8370 (diff)
parent2161558fa5bebfeb272493ae91e836b497029023 (diff)
Merge branch 'parisc' of master.kernel.org:/pub/scm/linux/kernel/git/kyle/parisc-2.6
-rw-r--r--CREDITS12
-rw-r--r--arch/parisc/kernel/drivers.c8
-rw-r--r--arch/parisc/kernel/entry.S1
-rw-r--r--arch/parisc/kernel/inventory.c2
-rw-r--r--arch/parisc/kernel/ioctl32.c546
-rw-r--r--arch/parisc/kernel/irq.c110
-rw-r--r--arch/parisc/kernel/perf.c33
-rw-r--r--arch/parisc/kernel/ptrace.c5
-rw-r--r--arch/parisc/kernel/signal.c1
-rw-r--r--arch/parisc/kernel/smp.c24
-rw-r--r--arch/parisc/kernel/syscall.S3
-rw-r--r--drivers/ide/Kconfig2
-rw-r--r--drivers/isdn/hisax/Kconfig12
-rw-r--r--drivers/isdn/pcbit/Kconfig2
-rw-r--r--drivers/parisc/iosapic.c26
-rw-r--r--drivers/parisc/superio.c35
-rw-r--r--drivers/serial/Kconfig2
-rw-r--r--drivers/serial/mux.c19
-rw-r--r--include/asm-parisc/irq.h5
-rw-r--r--include/asm-parisc/smp.h7
-rw-r--r--include/asm-parisc/spinlock.h19
-rw-r--r--include/asm-parisc/tlbflush.h16
-rw-r--r--include/linux/mm.h2
-rw-r--r--mm/mmap.c2
24 files changed, 245 insertions, 649 deletions
diff --git a/CREDITS b/CREDITS
index 192f749eba25..1b4f8694fa48 100644
--- a/CREDITS
+++ b/CREDITS
@@ -611,8 +611,7 @@ S: USA
611N: Randolph Chung 611N: Randolph Chung
612E: tausq@debian.org 612E: tausq@debian.org
613D: Linux/PA-RISC hacker 613D: Linux/PA-RISC hacker
614S: Los Altos, CA 94022 614S: Hong Kong
615S: USA
616 615
617N: Juan Jose Ciarlante 616N: Juan Jose Ciarlante
618W: http://juanjox.kernelnotes.org/ 617W: http://juanjox.kernelnotes.org/
@@ -3405,6 +3404,15 @@ S: Chudenicka 8
3405S: 10200 Prague 10, Hostivar 3404S: 10200 Prague 10, Hostivar
3406S: Czech Republic 3405S: Czech Republic
3407 3406
3407N: Thibaut Varene
3408E: T-Bone@parisc-linux.org
3409W: http://www.parisc-linux.org/
3410P: 1024D/B7D2F063 E67C 0D43 A75E 12A5 BB1C FA2F 1E32 C3DA B7D2 F063
3411D: PA-RISC port minion, PDC and GSCPS2 drivers, debuglocks and other bits
3412D: Some bits in an ARM port, S1D13XXX FB driver, random patches here and there
3413D: AD1889 sound driver
3414S: Paris, France
3415
3408N: Heikki Vatiainen 3416N: Heikki Vatiainen
3409E: hessu@cs.tut.fi 3417E: hessu@cs.tut.fi
3410D: Co-author of Multi-Protocol Over ATM (MPOA), some LANE hacks 3418D: Co-author of Multi-Protocol Over ATM (MPOA), some LANE hacks
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index 988844a169e6..d016d672ec2b 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -499,8 +499,12 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
499 499
500 dev = create_parisc_device(mod_path); 500 dev = create_parisc_device(mod_path);
501 if (dev->id.hw_type != HPHW_FAULTY) { 501 if (dev->id.hw_type != HPHW_FAULTY) {
502 printk("Two devices have hardware path %s. Please file a bug with HP.\n" 502 printk(KERN_ERR "Two devices have hardware path [%s]. "
503 "In the meantime, you could try rearranging your cards.\n", parisc_pathname(dev)); 503 "IODC data for second device: "
504 "%02x%02x%02x%02x%02x%02x\n"
505 "Rearranging GSC cards sometimes helps\n",
506 parisc_pathname(dev), iodc_data[0], iodc_data[1],
507 iodc_data[3], iodc_data[4], iodc_data[5], iodc_data[6]);
504 return NULL; 508 return NULL;
505 } 509 }
506 510
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index c7e66ee5b083..9af4b22a6d77 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -1846,6 +1846,7 @@ sys_clone_wrapper:
1846 ldo -16(%r30),%r29 /* Reference param save area */ 1846 ldo -16(%r30),%r29 /* Reference param save area */
1847#endif 1847#endif
1848 1848
1849 /* WARNING - Clobbers r19 and r21, userspace must save these! */
1849 STREG %r2,PT_GR19(%r1) /* save for child */ 1850 STREG %r2,PT_GR19(%r1) /* save for child */
1850 STREG %r30,PT_GR21(%r1) 1851 STREG %r30,PT_GR21(%r1)
1851 BL sys_clone,%r2 1852 BL sys_clone,%r2
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
index 1a1c66422736..8f563871e83c 100644
--- a/arch/parisc/kernel/inventory.c
+++ b/arch/parisc/kernel/inventory.c
@@ -188,7 +188,7 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
188 temp = pa_pdc_cell.cba; 188 temp = pa_pdc_cell.cba;
189 dev = alloc_pa_dev(PAT_GET_CBA(temp), &pa_pdc_cell.mod_path); 189 dev = alloc_pa_dev(PAT_GET_CBA(temp), &pa_pdc_cell.mod_path);
190 if (!dev) { 190 if (!dev) {
191 return PDC_NE_MOD; 191 return PDC_OK;
192 } 192 }
193 193
194 /* alloc_pa_dev sets dev->hpa */ 194 /* alloc_pa_dev sets dev->hpa */
diff --git a/arch/parisc/kernel/ioctl32.c b/arch/parisc/kernel/ioctl32.c
index 0a331104ad56..4eada1bb27f0 100644
--- a/arch/parisc/kernel/ioctl32.c
+++ b/arch/parisc/kernel/ioctl32.c
@@ -19,536 +19,6 @@
19#define CODE 19#define CODE
20#include "compat_ioctl.c" 20#include "compat_ioctl.c"
21 21
22/* Use this to get at 32-bit user passed pointers.
23 See sys_sparc32.c for description about these. */
24#define A(__x) ((unsigned long)(__x))
25/* The same for use with copy_from_user() and copy_to_user(). */
26#define B(__x) ((void *)(unsigned long)(__x))
27
28#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
29/* This really belongs in include/linux/drm.h -DaveM */
30#include "../../../drivers/char/drm/drm.h"
31
32typedef struct drm32_version {
33 int version_major; /* Major version */
34 int version_minor; /* Minor version */
35 int version_patchlevel;/* Patch level */
36 int name_len; /* Length of name buffer */
37 u32 name; /* Name of driver */
38 int date_len; /* Length of date buffer */
39 u32 date; /* User-space buffer to hold date */
40 int desc_len; /* Length of desc buffer */
41 u32 desc; /* User-space buffer to hold desc */
42} drm32_version_t;
43#define DRM32_IOCTL_VERSION DRM_IOWR(0x00, drm32_version_t)
44
45static int drm32_version(unsigned int fd, unsigned int cmd, unsigned long arg)
46{
47 drm32_version_t *uversion = (drm32_version_t *)arg;
48 char *name_ptr, *date_ptr, *desc_ptr;
49 u32 tmp1, tmp2, tmp3;
50 drm_version_t kversion;
51 mm_segment_t old_fs;
52 int ret;
53
54 memset(&kversion, 0, sizeof(kversion));
55 if (get_user(kversion.name_len, &uversion->name_len) ||
56 get_user(kversion.date_len, &uversion->date_len) ||
57 get_user(kversion.desc_len, &uversion->desc_len) ||
58 get_user(tmp1, &uversion->name) ||
59 get_user(tmp2, &uversion->date) ||
60 get_user(tmp3, &uversion->desc))
61 return -EFAULT;
62
63 name_ptr = (char *) A(tmp1);
64 date_ptr = (char *) A(tmp2);
65 desc_ptr = (char *) A(tmp3);
66
67 ret = -ENOMEM;
68 if (kversion.name_len && name_ptr) {
69 kversion.name = kmalloc(kversion.name_len, GFP_KERNEL);
70 if (!kversion.name)
71 goto out;
72 }
73 if (kversion.date_len && date_ptr) {
74 kversion.date = kmalloc(kversion.date_len, GFP_KERNEL);
75 if (!kversion.date)
76 goto out;
77 }
78 if (kversion.desc_len && desc_ptr) {
79 kversion.desc = kmalloc(kversion.desc_len, GFP_KERNEL);
80 if (!kversion.desc)
81 goto out;
82 }
83
84 old_fs = get_fs();
85 set_fs(KERNEL_DS);
86 ret = sys_ioctl (fd, DRM_IOCTL_VERSION, (unsigned long)&kversion);
87 set_fs(old_fs);
88
89 if (!ret) {
90 if ((kversion.name &&
91 copy_to_user(name_ptr, kversion.name, kversion.name_len)) ||
92 (kversion.date &&
93 copy_to_user(date_ptr, kversion.date, kversion.date_len)) ||
94 (kversion.desc &&
95 copy_to_user(desc_ptr, kversion.desc, kversion.desc_len)))
96 ret = -EFAULT;
97 if (put_user(kversion.version_major, &uversion->version_major) ||
98 put_user(kversion.version_minor, &uversion->version_minor) ||
99 put_user(kversion.version_patchlevel, &uversion->version_patchlevel) ||
100 put_user(kversion.name_len, &uversion->name_len) ||
101 put_user(kversion.date_len, &uversion->date_len) ||
102 put_user(kversion.desc_len, &uversion->desc_len))
103 ret = -EFAULT;
104 }
105
106out:
107 kfree(kversion.name);
108 kfree(kversion.date);
109 kfree(kversion.desc);
110 return ret;
111}
112
113typedef struct drm32_unique {
114 int unique_len; /* Length of unique */
115 u32 unique; /* Unique name for driver instantiation */
116} drm32_unique_t;
117#define DRM32_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm32_unique_t)
118#define DRM32_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm32_unique_t)
119
120static int drm32_getsetunique(unsigned int fd, unsigned int cmd, unsigned long arg)
121{
122 drm32_unique_t *uarg = (drm32_unique_t *)arg;
123 drm_unique_t karg;
124 mm_segment_t old_fs;
125 char *uptr;
126 u32 tmp;
127 int ret;
128
129 if (get_user(karg.unique_len, &uarg->unique_len))
130 return -EFAULT;
131 karg.unique = NULL;
132
133 if (get_user(tmp, &uarg->unique))
134 return -EFAULT;
135
136 uptr = (char *) A(tmp);
137
138 if (uptr) {
139 karg.unique = kmalloc(karg.unique_len, GFP_KERNEL);
140 if (!karg.unique)
141 return -ENOMEM;
142 if (cmd == DRM32_IOCTL_SET_UNIQUE &&
143 copy_from_user(karg.unique, uptr, karg.unique_len)) {
144 kfree(karg.unique);
145 return -EFAULT;
146 }
147 }
148
149 old_fs = get_fs();
150 set_fs(KERNEL_DS);
151 if (cmd == DRM32_IOCTL_GET_UNIQUE)
152 ret = sys_ioctl (fd, DRM_IOCTL_GET_UNIQUE, (unsigned long)&karg);
153 else
154 ret = sys_ioctl (fd, DRM_IOCTL_SET_UNIQUE, (unsigned long)&karg);
155 set_fs(old_fs);
156
157 if (!ret) {
158 if (cmd == DRM32_IOCTL_GET_UNIQUE &&
159 uptr != NULL &&
160 copy_to_user(uptr, karg.unique, karg.unique_len))
161 ret = -EFAULT;
162 if (put_user(karg.unique_len, &uarg->unique_len))
163 ret = -EFAULT;
164 }
165
166 kfree(karg.unique);
167 return ret;
168}
169
170typedef struct drm32_map {
171 u32 offset; /* Requested physical address (0 for SAREA)*/
172 u32 size; /* Requested physical size (bytes) */
173 drm_map_type_t type; /* Type of memory to map */
174 drm_map_flags_t flags; /* Flags */
175 u32 handle; /* User-space: "Handle" to pass to mmap */
176 /* Kernel-space: kernel-virtual address */
177 int mtrr; /* MTRR slot used */
178 /* Private data */
179} drm32_map_t;
180#define DRM32_IOCTL_ADD_MAP DRM_IOWR(0x15, drm32_map_t)
181
182static int drm32_addmap(unsigned int fd, unsigned int cmd, unsigned long arg)
183{
184 drm32_map_t *uarg = (drm32_map_t *) arg;
185 drm_map_t karg;
186 mm_segment_t old_fs;
187 u32 tmp;
188 int ret;
189
190 ret = get_user(karg.offset, &uarg->offset);
191 ret |= get_user(karg.size, &uarg->size);
192 ret |= get_user(karg.type, &uarg->type);
193 ret |= get_user(karg.flags, &uarg->flags);
194 ret |= get_user(tmp, &uarg->handle);
195 ret |= get_user(karg.mtrr, &uarg->mtrr);
196 if (ret)
197 return -EFAULT;
198
199 karg.handle = (void *) A(tmp);
200
201 old_fs = get_fs();
202 set_fs(KERNEL_DS);
203 ret = sys_ioctl(fd, DRM_IOCTL_ADD_MAP, (unsigned long) &karg);
204 set_fs(old_fs);
205
206 if (!ret) {
207 ret = put_user(karg.offset, &uarg->offset);
208 ret |= put_user(karg.size, &uarg->size);
209 ret |= put_user(karg.type, &uarg->type);
210 ret |= put_user(karg.flags, &uarg->flags);
211 tmp = (u32) (long)karg.handle;
212 ret |= put_user(tmp, &uarg->handle);
213 ret |= put_user(karg.mtrr, &uarg->mtrr);
214 if (ret)
215 ret = -EFAULT;
216 }
217
218 return ret;
219}
220
221typedef struct drm32_buf_info {
222 int count; /* Entries in list */
223 u32 list; /* (drm_buf_desc_t *) */
224} drm32_buf_info_t;
225#define DRM32_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm32_buf_info_t)
226
227static int drm32_info_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
228{
229 drm32_buf_info_t *uarg = (drm32_buf_info_t *)arg;
230 drm_buf_desc_t *ulist;
231 drm_buf_info_t karg;
232 mm_segment_t old_fs;
233 int orig_count, ret;
234 u32 tmp;
235
236 if (get_user(karg.count, &uarg->count) ||
237 get_user(tmp, &uarg->list))
238 return -EFAULT;
239
240 ulist = (drm_buf_desc_t *) A(tmp);
241
242 orig_count = karg.count;
243
244 karg.list = kmalloc(karg.count * sizeof(drm_buf_desc_t), GFP_KERNEL);
245 if (!karg.list)
246 return -EFAULT;
247
248 old_fs = get_fs();
249 set_fs(KERNEL_DS);
250 ret = sys_ioctl(fd, DRM_IOCTL_INFO_BUFS, (unsigned long) &karg);
251 set_fs(old_fs);
252
253 if (!ret) {
254 if (karg.count <= orig_count &&
255 (copy_to_user(ulist, karg.list,
256 karg.count * sizeof(drm_buf_desc_t))))
257 ret = -EFAULT;
258 if (put_user(karg.count, &uarg->count))
259 ret = -EFAULT;
260 }
261
262 kfree(karg.list);
263 return ret;
264}
265
266typedef struct drm32_buf_free {
267 int count;
268 u32 list; /* (int *) */
269} drm32_buf_free_t;
270#define DRM32_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm32_buf_free_t)
271
272static int drm32_free_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
273{
274 drm32_buf_free_t *uarg = (drm32_buf_free_t *)arg;
275 drm_buf_free_t karg;
276 mm_segment_t old_fs;
277 int *ulist;
278 int ret;
279 u32 tmp;
280
281 if (get_user(karg.count, &uarg->count) ||
282 get_user(tmp, &uarg->list))
283 return -EFAULT;
284
285 ulist = (int *) A(tmp);
286
287 karg.list = kmalloc(karg.count * sizeof(int), GFP_KERNEL);
288 if (!karg.list)
289 return -ENOMEM;
290
291 ret = -EFAULT;
292 if (copy_from_user(karg.list, ulist, (karg.count * sizeof(int))))
293 goto out;
294
295 old_fs = get_fs();
296 set_fs(KERNEL_DS);
297 ret = sys_ioctl(fd, DRM_IOCTL_FREE_BUFS, (unsigned long) &karg);
298 set_fs(old_fs);
299
300out:
301 kfree(karg.list);
302 return ret;
303}
304
305typedef struct drm32_buf_pub {
306 int idx; /* Index into master buflist */
307 int total; /* Buffer size */
308 int used; /* Amount of buffer in use (for DMA) */
309 u32 address; /* Address of buffer (void *) */
310} drm32_buf_pub_t;
311
312typedef struct drm32_buf_map {
313 int count; /* Length of buflist */
314 u32 virtual; /* Mmaped area in user-virtual (void *) */
315 u32 list; /* Buffer information (drm_buf_pub_t *) */
316} drm32_buf_map_t;
317#define DRM32_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm32_buf_map_t)
318
319static int drm32_map_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
320{
321 drm32_buf_map_t *uarg = (drm32_buf_map_t *)arg;
322 drm32_buf_pub_t *ulist;
323 drm_buf_map_t karg;
324 mm_segment_t old_fs;
325 int orig_count, ret, i;
326 u32 tmp1, tmp2;
327
328 if (get_user(karg.count, &uarg->count) ||
329 get_user(tmp1, &uarg->virtual) ||
330 get_user(tmp2, &uarg->list))
331 return -EFAULT;
332
333 karg.virtual = (void *) A(tmp1);
334 ulist = (drm32_buf_pub_t *) A(tmp2);
335
336 orig_count = karg.count;
337
338 karg.list = kmalloc(karg.count * sizeof(drm_buf_pub_t), GFP_KERNEL);
339 if (!karg.list)
340 return -ENOMEM;
341
342 ret = -EFAULT;
343 for (i = 0; i < karg.count; i++) {
344 if (get_user(karg.list[i].idx, &ulist[i].idx) ||
345 get_user(karg.list[i].total, &ulist[i].total) ||
346 get_user(karg.list[i].used, &ulist[i].used) ||
347 get_user(tmp1, &ulist[i].address))
348 goto out;
349
350 karg.list[i].address = (void *) A(tmp1);
351 }
352
353 old_fs = get_fs();
354 set_fs(KERNEL_DS);
355 ret = sys_ioctl(fd, DRM_IOCTL_MAP_BUFS, (unsigned long) &karg);
356 set_fs(old_fs);
357
358 if (!ret) {
359 for (i = 0; i < orig_count; i++) {
360 tmp1 = (u32) (long) karg.list[i].address;
361 if (put_user(karg.list[i].idx, &ulist[i].idx) ||
362 put_user(karg.list[i].total, &ulist[i].total) ||
363 put_user(karg.list[i].used, &ulist[i].used) ||
364 put_user(tmp1, &ulist[i].address)) {
365 ret = -EFAULT;
366 goto out;
367 }
368 }
369 if (put_user(karg.count, &uarg->count))
370 ret = -EFAULT;
371 }
372
373out:
374 kfree(karg.list);
375 return ret;
376}
377
378typedef struct drm32_dma {
379 /* Indices here refer to the offset into
380 buflist in drm_buf_get_t. */
381 int context; /* Context handle */
382 int send_count; /* Number of buffers to send */
383 u32 send_indices; /* List of handles to buffers (int *) */
384 u32 send_sizes; /* Lengths of data to send (int *) */
385 drm_dma_flags_t flags; /* Flags */
386 int request_count; /* Number of buffers requested */
387 int request_size; /* Desired size for buffers */
388 u32 request_indices; /* Buffer information (int *) */
389 u32 request_sizes; /* (int *) */
390 int granted_count; /* Number of buffers granted */
391} drm32_dma_t;
392#define DRM32_IOCTL_DMA DRM_IOWR(0x29, drm32_dma_t)
393
394/* RED PEN The DRM layer blindly dereferences the send/request
395 * indice/size arrays even though they are userland
396 * pointers. -DaveM
397 */
398static int drm32_dma(unsigned int fd, unsigned int cmd, unsigned long arg)
399{
400 drm32_dma_t *uarg = (drm32_dma_t *) arg;
401 int *u_si, *u_ss, *u_ri, *u_rs;
402 drm_dma_t karg;
403 mm_segment_t old_fs;
404 int ret;
405 u32 tmp1, tmp2, tmp3, tmp4;
406
407 karg.send_indices = karg.send_sizes = NULL;
408 karg.request_indices = karg.request_sizes = NULL;
409
410 if (get_user(karg.context, &uarg->context) ||
411 get_user(karg.send_count, &uarg->send_count) ||
412 get_user(tmp1, &uarg->send_indices) ||
413 get_user(tmp2, &uarg->send_sizes) ||
414 get_user(karg.flags, &uarg->flags) ||
415 get_user(karg.request_count, &uarg->request_count) ||
416 get_user(karg.request_size, &uarg->request_size) ||
417 get_user(tmp3, &uarg->request_indices) ||
418 get_user(tmp4, &uarg->request_sizes) ||
419 get_user(karg.granted_count, &uarg->granted_count))
420 return -EFAULT;
421
422 u_si = (int *) A(tmp1);
423 u_ss = (int *) A(tmp2);
424 u_ri = (int *) A(tmp3);
425 u_rs = (int *) A(tmp4);
426
427 if (karg.send_count) {
428 karg.send_indices = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL);
429 karg.send_sizes = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL);
430
431 ret = -ENOMEM;
432 if (!karg.send_indices || !karg.send_sizes)
433 goto out;
434
435 ret = -EFAULT;
436 if (copy_from_user(karg.send_indices, u_si,
437 (karg.send_count * sizeof(int))) ||
438 copy_from_user(karg.send_sizes, u_ss,
439 (karg.send_count * sizeof(int))))
440 goto out;
441 }
442
443 if (karg.request_count) {
444 karg.request_indices = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL);
445 karg.request_sizes = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL);
446
447 ret = -ENOMEM;
448 if (!karg.request_indices || !karg.request_sizes)
449 goto out;
450
451 ret = -EFAULT;
452 if (copy_from_user(karg.request_indices, u_ri,
453 (karg.request_count * sizeof(int))) ||
454 copy_from_user(karg.request_sizes, u_rs,
455 (karg.request_count * sizeof(int))))
456 goto out;
457 }
458
459 old_fs = get_fs();
460 set_fs(KERNEL_DS);
461 ret = sys_ioctl(fd, DRM_IOCTL_DMA, (unsigned long) &karg);
462 set_fs(old_fs);
463
464 if (!ret) {
465 if (put_user(karg.context, &uarg->context) ||
466 put_user(karg.send_count, &uarg->send_count) ||
467 put_user(karg.flags, &uarg->flags) ||
468 put_user(karg.request_count, &uarg->request_count) ||
469 put_user(karg.request_size, &uarg->request_size) ||
470 put_user(karg.granted_count, &uarg->granted_count))
471 ret = -EFAULT;
472
473 if (karg.send_count) {
474 if (copy_to_user(u_si, karg.send_indices,
475 (karg.send_count * sizeof(int))) ||
476 copy_to_user(u_ss, karg.send_sizes,
477 (karg.send_count * sizeof(int))))
478 ret = -EFAULT;
479 }
480 if (karg.request_count) {
481 if (copy_to_user(u_ri, karg.request_indices,
482 (karg.request_count * sizeof(int))) ||
483 copy_to_user(u_rs, karg.request_sizes,
484 (karg.request_count * sizeof(int))))
485 ret = -EFAULT;
486 }
487 }
488
489out:
490 kfree(karg.send_indices);
491 kfree(karg.send_sizes);
492 kfree(karg.request_indices);
493 kfree(karg.request_sizes);
494 return ret;
495}
496
497typedef struct drm32_ctx_res {
498 int count;
499 u32 contexts; /* (drm_ctx_t *) */
500} drm32_ctx_res_t;
501#define DRM32_IOCTL_RES_CTX DRM_IOWR(0x26, drm32_ctx_res_t)
502
503static int drm32_res_ctx(unsigned int fd, unsigned int cmd, unsigned long arg)
504{
505 drm32_ctx_res_t *uarg = (drm32_ctx_res_t *) arg;
506 drm_ctx_t *ulist;
507 drm_ctx_res_t karg;
508 mm_segment_t old_fs;
509 int orig_count, ret;
510 u32 tmp;
511
512 karg.contexts = NULL;
513 if (get_user(karg.count, &uarg->count) ||
514 get_user(tmp, &uarg->contexts))
515 return -EFAULT;
516
517 ulist = (drm_ctx_t *) A(tmp);
518
519 orig_count = karg.count;
520 if (karg.count && ulist) {
521 karg.contexts = kmalloc((karg.count * sizeof(drm_ctx_t)), GFP_KERNEL);
522 if (!karg.contexts)
523 return -ENOMEM;
524 if (copy_from_user(karg.contexts, ulist,
525 (karg.count * sizeof(drm_ctx_t)))) {
526 kfree(karg.contexts);
527 return -EFAULT;
528 }
529 }
530
531 old_fs = get_fs();
532 set_fs(KERNEL_DS);
533 ret = sys_ioctl(fd, DRM_IOCTL_RES_CTX, (unsigned long) &karg);
534 set_fs(old_fs);
535
536 if (!ret) {
537 if (orig_count) {
538 if (copy_to_user(ulist, karg.contexts,
539 (orig_count * sizeof(drm_ctx_t))))
540 ret = -EFAULT;
541 }
542 if (put_user(karg.count, &uarg->count))
543 ret = -EFAULT;
544 }
545
546 kfree(karg.contexts);
547 return ret;
548}
549
550#endif
551
552#define HANDLE_IOCTL(cmd, handler) { cmd, (ioctl_trans_handler_t)handler, NULL }, 22#define HANDLE_IOCTL(cmd, handler) { cmd, (ioctl_trans_handler_t)handler, NULL },
553#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL(cmd, sys_ioctl) 23#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL(cmd, sys_ioctl)
554 24
@@ -561,11 +31,6 @@ IOCTL_TABLE_START
561#define DECLARES 31#define DECLARES
562#include "compat_ioctl.c" 32#include "compat_ioctl.c"
563 33
564/* PA-specific ioctls */
565COMPATIBLE_IOCTL(PA_PERF_ON)
566COMPATIBLE_IOCTL(PA_PERF_OFF)
567COMPATIBLE_IOCTL(PA_PERF_VERSION)
568
569/* And these ioctls need translation */ 34/* And these ioctls need translation */
570HANDLE_IOCTL(SIOCGPPPSTATS, dev_ifsioc) 35HANDLE_IOCTL(SIOCGPPPSTATS, dev_ifsioc)
571HANDLE_IOCTL(SIOCGPPPCSTATS, dev_ifsioc) 36HANDLE_IOCTL(SIOCGPPPCSTATS, dev_ifsioc)
@@ -590,17 +55,6 @@ HANDLE_IOCTL(RTC_EPOCH_READ, w_long)
590COMPATIBLE_IOCTL(RTC_EPOCH_SET) 55COMPATIBLE_IOCTL(RTC_EPOCH_SET)
591#endif 56#endif
592 57
593#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
594HANDLE_IOCTL(DRM32_IOCTL_VERSION, drm32_version);
595HANDLE_IOCTL(DRM32_IOCTL_GET_UNIQUE, drm32_getsetunique);
596HANDLE_IOCTL(DRM32_IOCTL_SET_UNIQUE, drm32_getsetunique);
597HANDLE_IOCTL(DRM32_IOCTL_ADD_MAP, drm32_addmap);
598HANDLE_IOCTL(DRM32_IOCTL_INFO_BUFS, drm32_info_bufs);
599HANDLE_IOCTL(DRM32_IOCTL_FREE_BUFS, drm32_free_bufs);
600HANDLE_IOCTL(DRM32_IOCTL_MAP_BUFS, drm32_map_bufs);
601HANDLE_IOCTL(DRM32_IOCTL_DMA, drm32_dma);
602HANDLE_IOCTL(DRM32_IOCTL_RES_CTX, drm32_res_ctx);
603#endif /* DRM */
604IOCTL_TABLE_END 58IOCTL_TABLE_END
605 59
606int ioctl_table_size = ARRAY_SIZE(ioctl_start); 60int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 006385dbee66..197936d9359a 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -30,6 +30,9 @@
30#include <linux/seq_file.h> 30#include <linux/seq_file.h>
31#include <linux/spinlock.h> 31#include <linux/spinlock.h>
32#include <linux/types.h> 32#include <linux/types.h>
33#include <asm/io.h>
34
35#include <asm/smp.h>
33 36
34#undef PARISC_IRQ_CR16_COUNTS 37#undef PARISC_IRQ_CR16_COUNTS
35 38
@@ -43,26 +46,34 @@ extern irqreturn_t ipi_interrupt(int, void *, struct pt_regs *);
43*/ 46*/
44static volatile unsigned long cpu_eiem = 0; 47static volatile unsigned long cpu_eiem = 0;
45 48
46static void cpu_set_eiem(void *info) 49static void cpu_disable_irq(unsigned int irq)
47{
48 set_eiem((unsigned long) info);
49}
50
51static inline void cpu_disable_irq(unsigned int irq)
52{ 50{
53 unsigned long eirr_bit = EIEM_MASK(irq); 51 unsigned long eirr_bit = EIEM_MASK(irq);
54 52
55 cpu_eiem &= ~eirr_bit; 53 cpu_eiem &= ~eirr_bit;
56 on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1); 54 /* Do nothing on the other CPUs. If they get this interrupt,
55 * The & cpu_eiem in the do_cpu_irq_mask() ensures they won't
56 * handle it, and the set_eiem() at the bottom will ensure it
57 * then gets disabled */
57} 58}
58 59
59static void cpu_enable_irq(unsigned int irq) 60static void cpu_enable_irq(unsigned int irq)
60{ 61{
61 unsigned long eirr_bit = EIEM_MASK(irq); 62 unsigned long eirr_bit = EIEM_MASK(irq);
62 63
63 mtctl(eirr_bit, 23); /* clear EIRR bit before unmasking */
64 cpu_eiem |= eirr_bit; 64 cpu_eiem |= eirr_bit;
65 on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1); 65
66 /* FIXME: while our interrupts aren't nested, we cannot reset
67 * the eiem mask if we're already in an interrupt. Once we
68 * implement nested interrupts, this can go away
69 */
70 if (!in_interrupt())
71 set_eiem(cpu_eiem);
72
73 /* This is just a simple NOP IPI. But what it does is cause
74 * all the other CPUs to do a set_eiem(cpu_eiem) at the end
75 * of the interrupt handler */
76 smp_send_all_nop();
66} 77}
67 78
68static unsigned int cpu_startup_irq(unsigned int irq) 79static unsigned int cpu_startup_irq(unsigned int irq)
@@ -74,6 +85,35 @@ static unsigned int cpu_startup_irq(unsigned int irq)
74void no_ack_irq(unsigned int irq) { } 85void no_ack_irq(unsigned int irq) { }
75void no_end_irq(unsigned int irq) { } 86void no_end_irq(unsigned int irq) { }
76 87
88#ifdef CONFIG_SMP
89int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
90{
91 int cpu_dest;
92
93 /* timer and ipi have to always be received on all CPUs */
94 if (irq == TIMER_IRQ || irq == IPI_IRQ) {
95 /* Bad linux design decision. The mask has already
96 * been set; we must reset it */
97 irq_affinity[irq] = CPU_MASK_ALL;
98 return -EINVAL;
99 }
100
101 /* whatever mask they set, we just allow one CPU */
102 cpu_dest = first_cpu(*dest);
103 *dest = cpumask_of_cpu(cpu_dest);
104
105 return 0;
106}
107
108static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest)
109{
110 if (cpu_check_affinity(irq, &dest))
111 return;
112
113 irq_affinity[irq] = dest;
114}
115#endif
116
77static struct hw_interrupt_type cpu_interrupt_type = { 117static struct hw_interrupt_type cpu_interrupt_type = {
78 .typename = "CPU", 118 .typename = "CPU",
79 .startup = cpu_startup_irq, 119 .startup = cpu_startup_irq,
@@ -82,7 +122,9 @@ static struct hw_interrupt_type cpu_interrupt_type = {
82 .disable = cpu_disable_irq, 122 .disable = cpu_disable_irq,
83 .ack = no_ack_irq, 123 .ack = no_ack_irq,
84 .end = no_end_irq, 124 .end = no_end_irq,
85// .set_affinity = cpu_set_affinity_irq, 125#ifdef CONFIG_SMP
126 .set_affinity = cpu_set_affinity_irq,
127#endif
86}; 128};
87 129
88int show_interrupts(struct seq_file *p, void *v) 130int show_interrupts(struct seq_file *p, void *v)
@@ -219,6 +261,17 @@ int txn_alloc_irq(unsigned int bits_wide)
219 return -1; 261 return -1;
220} 262}
221 263
264
265unsigned long txn_affinity_addr(unsigned int irq, int cpu)
266{
267#ifdef CONFIG_SMP
268 irq_affinity[irq] = cpumask_of_cpu(cpu);
269#endif
270
271 return cpu_data[cpu].txn_addr;
272}
273
274
222unsigned long txn_alloc_addr(unsigned int virt_irq) 275unsigned long txn_alloc_addr(unsigned int virt_irq)
223{ 276{
224 static int next_cpu = -1; 277 static int next_cpu = -1;
@@ -233,7 +286,7 @@ unsigned long txn_alloc_addr(unsigned int virt_irq)
233 if (next_cpu >= NR_CPUS) 286 if (next_cpu >= NR_CPUS)
234 next_cpu = 0; /* nothing else, assign monarch */ 287 next_cpu = 0; /* nothing else, assign monarch */
235 288
236 return cpu_data[next_cpu].txn_addr; 289 return txn_affinity_addr(virt_irq, next_cpu);
237} 290}
238 291
239 292
@@ -250,10 +303,11 @@ void do_cpu_irq_mask(struct pt_regs *regs)
250 irq_enter(); 303 irq_enter();
251 304
252 /* 305 /*
253 * Only allow interrupt processing to be interrupted by the 306 * Don't allow TIMER or IPI nested interrupts.
254 * timer tick 307 * Allowing any single interrupt to nest can lead to that CPU
308 * handling interrupts with all enabled interrupts unmasked.
255 */ 309 */
256 set_eiem(EIEM_MASK(TIMER_IRQ)); 310 set_eiem(0UL);
257 311
258 /* 1) only process IRQs that are enabled/unmasked (cpu_eiem) 312 /* 1) only process IRQs that are enabled/unmasked (cpu_eiem)
259 * 2) We loop here on EIRR contents in order to avoid 313 * 2) We loop here on EIRR contents in order to avoid
@@ -267,23 +321,41 @@ void do_cpu_irq_mask(struct pt_regs *regs)
267 if (!eirr_val) 321 if (!eirr_val)
268 break; 322 break;
269 323
270 if (eirr_val & EIEM_MASK(TIMER_IRQ))
271 set_eiem(0);
272
273 mtctl(eirr_val, 23); /* reset bits we are going to process */ 324 mtctl(eirr_val, 23); /* reset bits we are going to process */
274 325
275 /* Work our way from MSb to LSb...same order we alloc EIRs */ 326 /* Work our way from MSb to LSb...same order we alloc EIRs */
276 for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) { 327 for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) {
328#ifdef CONFIG_SMP
329 cpumask_t dest = irq_affinity[irq];
330#endif
277 if (!(bit & eirr_val)) 331 if (!(bit & eirr_val))
278 continue; 332 continue;
279 333
280 /* clear bit in mask - can exit loop sooner */ 334 /* clear bit in mask - can exit loop sooner */
281 eirr_val &= ~bit; 335 eirr_val &= ~bit;
282 336
337#ifdef CONFIG_SMP
338 /* FIXME: because generic set affinity mucks
339 * with the affinity before sending it to us
340 * we can get the situation where the affinity is
341 * wrong for our CPU type interrupts */
342 if (irq != TIMER_IRQ && irq != IPI_IRQ &&
343 !cpu_isset(smp_processor_id(), dest)) {
344 int cpu = first_cpu(dest);
345
346 printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
347 irq, smp_processor_id(), cpu);
348 gsc_writel(irq + CPU_IRQ_BASE,
349 cpu_data[cpu].hpa);
350 continue;
351 }
352#endif
353
283 __do_IRQ(irq, regs); 354 __do_IRQ(irq, regs);
284 } 355 }
285 } 356 }
286 set_eiem(cpu_eiem); 357
358 set_eiem(cpu_eiem); /* restore original mask */
287 irq_exit(); 359 irq_exit();
288} 360}
289 361
@@ -291,12 +363,14 @@ void do_cpu_irq_mask(struct pt_regs *regs)
291static struct irqaction timer_action = { 363static struct irqaction timer_action = {
292 .handler = timer_interrupt, 364 .handler = timer_interrupt,
293 .name = "timer", 365 .name = "timer",
366 .flags = SA_INTERRUPT,
294}; 367};
295 368
296#ifdef CONFIG_SMP 369#ifdef CONFIG_SMP
297static struct irqaction ipi_action = { 370static struct irqaction ipi_action = {
298 .handler = ipi_interrupt, 371 .handler = ipi_interrupt,
299 .name = "IPI", 372 .name = "IPI",
373 .flags = SA_INTERRUPT,
300}; 374};
301#endif 375#endif
302 376
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
index 44670d6e06f4..f6fec62b6a2f 100644
--- a/arch/parisc/kernel/perf.c
+++ b/arch/parisc/kernel/perf.c
@@ -196,8 +196,7 @@ static int perf_open(struct inode *inode, struct file *file);
196static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos); 196static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
197static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, 197static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
198 loff_t *ppos); 198 loff_t *ppos);
199static int perf_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 199static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
200 unsigned long arg);
201static void perf_start_counters(void); 200static void perf_start_counters(void);
202static int perf_stop_counters(uint32_t *raddr); 201static int perf_stop_counters(uint32_t *raddr);
203static struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num); 202static struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num);
@@ -438,48 +437,56 @@ static void perf_patch_images(void)
438 * must be running on the processor that you wish to change. 437 * must be running on the processor that you wish to change.
439 */ 438 */
440 439
441static int perf_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 440static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
442 unsigned long arg)
443{ 441{
444 long error_start; 442 long error_start;
445 uint32_t raddr[4]; 443 uint32_t raddr[4];
444 int error = 0;
446 445
446 lock_kernel();
447 switch (cmd) { 447 switch (cmd) {
448 448
449 case PA_PERF_ON: 449 case PA_PERF_ON:
450 /* Start the counters */ 450 /* Start the counters */
451 perf_start_counters(); 451 perf_start_counters();
452 return 0; 452 break;
453 453
454 case PA_PERF_OFF: 454 case PA_PERF_OFF:
455 error_start = perf_stop_counters(raddr); 455 error_start = perf_stop_counters(raddr);
456 if (error_start != 0) { 456 if (error_start != 0) {
457 printk(KERN_ERR "perf_off: perf_stop_counters = %ld\n", error_start); 457 printk(KERN_ERR "perf_off: perf_stop_counters = %ld\n", error_start);
458 return -EFAULT; 458 error = -EFAULT;
459 break;
459 } 460 }
460 461
461 /* copy out the Counters */ 462 /* copy out the Counters */
462 if (copy_to_user((void __user *)arg, raddr, 463 if (copy_to_user((void __user *)arg, raddr,
463 sizeof (raddr)) != 0) { 464 sizeof (raddr)) != 0) {
464 return -EFAULT; 465 error = -EFAULT;
466 break;
465 } 467 }
466 return 0; 468 break;
467 469
468 case PA_PERF_VERSION: 470 case PA_PERF_VERSION:
469 /* Return the version # */ 471 /* Return the version # */
470 return put_user(PERF_VERSION, (int *)arg); 472 error = put_user(PERF_VERSION, (int *)arg);
473 break;
471 474
472 default: 475 default:
473 break; 476 error = -ENOTTY;
474 } 477 }
475 return -ENOTTY; 478
479 unlock_kernel();
480
481 return error;
476} 482}
477 483
478static struct file_operations perf_fops = { 484static struct file_operations perf_fops = {
479 .llseek = no_llseek, 485 .llseek = no_llseek,
480 .read = perf_read, 486 .read = perf_read,
481 .write = perf_write, 487 .write = perf_write,
482 .ioctl = perf_ioctl, 488 .unlocked_ioctl = perf_ioctl,
489 .compat_ioctl = perf_ioctl,
483 .open = perf_open, 490 .open = perf_open,
484 .release = perf_release 491 .release = perf_release
485}; 492};
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index b6fe202a620d..27160e8bf15b 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -264,6 +264,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
264 * sigkill. perhaps it should be put in the status 264 * sigkill. perhaps it should be put in the status
265 * that it wants to exit. 265 * that it wants to exit.
266 */ 266 */
267 ret = 0;
267 DBG("sys_ptrace(KILL)\n"); 268 DBG("sys_ptrace(KILL)\n");
268 if (child->exit_state == EXIT_ZOMBIE) /* already dead */ 269 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
269 goto out_tsk; 270 goto out_tsk;
@@ -344,11 +345,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
344 345
345 case PTRACE_GETEVENTMSG: 346 case PTRACE_GETEVENTMSG:
346 ret = put_user(child->ptrace_message, (unsigned int __user *) data); 347 ret = put_user(child->ptrace_message, (unsigned int __user *) data);
347 goto out; 348 goto out_tsk;
348 349
349 default: 350 default:
350 ret = ptrace_request(child, request, addr, data); 351 ret = ptrace_request(child, request, addr, data);
351 goto out; 352 goto out_tsk;
352 } 353 }
353 354
354out_wake_notrap: 355out_wake_notrap:
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 82c24e62ab63..3a25a7bd673e 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -296,7 +296,6 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
296 struct rt_sigframe __user *frame; 296 struct rt_sigframe __user *frame;
297 unsigned long rp, usp; 297 unsigned long rp, usp;
298 unsigned long haddr, sigframe_size; 298 unsigned long haddr, sigframe_size;
299 struct siginfo si;
300 int err = 0; 299 int err = 0;
301#ifdef __LP64__ 300#ifdef __LP64__
302 compat_int_t compat_val; 301 compat_int_t compat_val;
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index a9ecf6465784..ce89da0f654d 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -181,12 +181,19 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
181 while (ops) { 181 while (ops) {
182 unsigned long which = ffz(~ops); 182 unsigned long which = ffz(~ops);
183 183
184 ops &= ~(1 << which);
185
184 switch (which) { 186 switch (which) {
187 case IPI_NOP:
188#if (kDEBUG>=100)
189 printk(KERN_DEBUG "CPU%d IPI_NOP\n",this_cpu);
190#endif /* kDEBUG */
191 break;
192
185 case IPI_RESCHEDULE: 193 case IPI_RESCHEDULE:
186#if (kDEBUG>=100) 194#if (kDEBUG>=100)
187 printk(KERN_DEBUG "CPU%d IPI_RESCHEDULE\n",this_cpu); 195 printk(KERN_DEBUG "CPU%d IPI_RESCHEDULE\n",this_cpu);
188#endif /* kDEBUG */ 196#endif /* kDEBUG */
189 ops &= ~(1 << IPI_RESCHEDULE);
190 /* 197 /*
191 * Reschedule callback. Everything to be 198 * Reschedule callback. Everything to be
192 * done is done by the interrupt return path. 199 * done is done by the interrupt return path.
@@ -197,7 +204,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
197#if (kDEBUG>=100) 204#if (kDEBUG>=100)
198 printk(KERN_DEBUG "CPU%d IPI_CALL_FUNC\n",this_cpu); 205 printk(KERN_DEBUG "CPU%d IPI_CALL_FUNC\n",this_cpu);
199#endif /* kDEBUG */ 206#endif /* kDEBUG */
200 ops &= ~(1 << IPI_CALL_FUNC);
201 { 207 {
202 volatile struct smp_call_struct *data; 208 volatile struct smp_call_struct *data;
203 void (*func)(void *info); 209 void (*func)(void *info);
@@ -231,7 +237,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
231#if (kDEBUG>=100) 237#if (kDEBUG>=100)
232 printk(KERN_DEBUG "CPU%d IPI_CPU_START\n",this_cpu); 238 printk(KERN_DEBUG "CPU%d IPI_CPU_START\n",this_cpu);
233#endif /* kDEBUG */ 239#endif /* kDEBUG */
234 ops &= ~(1 << IPI_CPU_START);
235#ifdef ENTRY_SYS_CPUS 240#ifdef ENTRY_SYS_CPUS
236 p->state = STATE_RUNNING; 241 p->state = STATE_RUNNING;
237#endif 242#endif
@@ -241,7 +246,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
241#if (kDEBUG>=100) 246#if (kDEBUG>=100)
242 printk(KERN_DEBUG "CPU%d IPI_CPU_STOP\n",this_cpu); 247 printk(KERN_DEBUG "CPU%d IPI_CPU_STOP\n",this_cpu);
243#endif /* kDEBUG */ 248#endif /* kDEBUG */
244 ops &= ~(1 << IPI_CPU_STOP);
245#ifdef ENTRY_SYS_CPUS 249#ifdef ENTRY_SYS_CPUS
246#else 250#else
247 halt_processor(); 251 halt_processor();
@@ -252,13 +256,11 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
252#if (kDEBUG>=100) 256#if (kDEBUG>=100)
253 printk(KERN_DEBUG "CPU%d is alive!\n",this_cpu); 257 printk(KERN_DEBUG "CPU%d is alive!\n",this_cpu);
254#endif /* kDEBUG */ 258#endif /* kDEBUG */
255 ops &= ~(1 << IPI_CPU_TEST);
256 break; 259 break;
257 260
258 default: 261 default:
259 printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n", 262 printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
260 this_cpu, which); 263 this_cpu, which);
261 ops &= ~(1 << which);
262 return IRQ_NONE; 264 return IRQ_NONE;
263 } /* Switch */ 265 } /* Switch */
264 } /* while (ops) */ 266 } /* while (ops) */
@@ -312,6 +314,12 @@ smp_send_start(void) { send_IPI_allbutself(IPI_CPU_START); }
312void 314void
313smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); } 315smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
314 316
317void
318smp_send_all_nop(void)
319{
320 send_IPI_allbutself(IPI_NOP);
321}
322
315 323
316/** 324/**
317 * Run a function on all other CPUs. 325 * Run a function on all other CPUs.
@@ -338,6 +346,10 @@ smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
338 346
339 /* Can deadlock when called with interrupts disabled */ 347 /* Can deadlock when called with interrupts disabled */
340 WARN_ON(irqs_disabled()); 348 WARN_ON(irqs_disabled());
349
350 /* can also deadlock if IPIs are disabled */
351 WARN_ON((get_eiem() & (1UL<<(CPU_IRQ_MAX - IPI_IRQ))) == 0);
352
341 353
342 data.func = func; 354 data.func = func;
343 data.info = info; 355 data.info = info;
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index b29b76b42bb7..d66163492890 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -164,7 +164,7 @@ linux_gateway_entry:
164#endif 164#endif
165 STREG %r2, TASK_PT_GR30(%r1) /* ... and save it */ 165 STREG %r2, TASK_PT_GR30(%r1) /* ... and save it */
166 166
167 STREG %r20, TASK_PT_GR20(%r1) 167 STREG %r20, TASK_PT_GR20(%r1) /* Syscall number */
168 STREG %r21, TASK_PT_GR21(%r1) 168 STREG %r21, TASK_PT_GR21(%r1)
169 STREG %r22, TASK_PT_GR22(%r1) 169 STREG %r22, TASK_PT_GR22(%r1)
170 STREG %r23, TASK_PT_GR23(%r1) /* 4th argument */ 170 STREG %r23, TASK_PT_GR23(%r1) /* 4th argument */
@@ -527,6 +527,7 @@ lws_compare_and_swap:
527 We *must* giveup this call and fail. 527 We *must* giveup this call and fail.
528 */ 528 */
529 ldw 4(%sr2,%r20), %r28 /* Load thread register */ 529 ldw 4(%sr2,%r20), %r28 /* Load thread register */
530 /* WARNING: If cr27 cycles to the same value we have problems */
530 mfctl %cr27, %r21 /* Get current thread register */ 531 mfctl %cr27, %r21 /* Get current thread register */
531 cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */ 532 cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */
532 b lws_exit /* Return error! */ 533 b lws_exit /* Return error! */
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index ed2bc87f475b..31e649a9ff71 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -625,7 +625,7 @@ config BLK_DEV_NS87415
625 tristate "NS87415 chipset support" 625 tristate "NS87415 chipset support"
626 help 626 help
627 This driver adds detection and support for the NS87415 chip 627 This driver adds detection and support for the NS87415 chip
628 (used in SPARC64, among others). 628 (used mainly on SPARC64 and PA-RISC machines).
629 629
630 Please read the comments at the top of <file:drivers/ide/pci/ns87415.c>. 630 Please read the comments at the top of <file:drivers/ide/pci/ns87415.c>.
631 631
diff --git a/drivers/isdn/hisax/Kconfig b/drivers/isdn/hisax/Kconfig
index 801c98f30e5c..c82105920d71 100644
--- a/drivers/isdn/hisax/Kconfig
+++ b/drivers/isdn/hisax/Kconfig
@@ -110,7 +110,7 @@ config HISAX_16_3
110 110
111config HISAX_TELESPCI 111config HISAX_TELESPCI
112 bool "Teles PCI" 112 bool "Teles PCI"
113 depends on PCI && (BROKEN || !(SPARC64 || PPC)) 113 depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K))
114 help 114 help
115 This enables HiSax support for the Teles PCI. 115 This enables HiSax support for the Teles PCI.
116 See <file:Documentation/isdn/README.HiSax> on how to configure it. 116 See <file:Documentation/isdn/README.HiSax> on how to configure it.
@@ -238,7 +238,7 @@ config HISAX_MIC
238 238
239config HISAX_NETJET 239config HISAX_NETJET
240 bool "NETjet card" 240 bool "NETjet card"
241 depends on PCI && (BROKEN || !(SPARC64 || PPC)) 241 depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K))
242 help 242 help
243 This enables HiSax support for the NetJet from Traverse 243 This enables HiSax support for the NetJet from Traverse
244 Technologies. 244 Technologies.
@@ -249,7 +249,7 @@ config HISAX_NETJET
249 249
250config HISAX_NETJET_U 250config HISAX_NETJET_U
251 bool "NETspider U card" 251 bool "NETspider U card"
252 depends on PCI && (BROKEN || !(SPARC64 || PPC)) 252 depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K))
253 help 253 help
254 This enables HiSax support for the Netspider U interface ISDN card 254 This enables HiSax support for the Netspider U interface ISDN card
255 from Traverse Technologies. 255 from Traverse Technologies.
@@ -317,7 +317,7 @@ config HISAX_GAZEL
317 317
318config HISAX_HFC_PCI 318config HISAX_HFC_PCI
319 bool "HFC PCI-Bus cards" 319 bool "HFC PCI-Bus cards"
320 depends on PCI && (BROKEN || !(SPARC64 || PPC)) 320 depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K))
321 help 321 help
322 This enables HiSax support for the HFC-S PCI 2BDS0 based cards. 322 This enables HiSax support for the HFC-S PCI 2BDS0 based cards.
323 323
@@ -344,14 +344,14 @@ config HISAX_HFC_SX
344 344
345config HISAX_ENTERNOW_PCI 345config HISAX_ENTERNOW_PCI
346 bool "Formula-n enter:now PCI card" 346 bool "Formula-n enter:now PCI card"
347 depends on PCI && (BROKEN || !(SPARC64 || PPC)) 347 depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K))
348 help 348 help
349 This enables HiSax support for the Formula-n enter:now PCI 349 This enables HiSax support for the Formula-n enter:now PCI
350 ISDN card. 350 ISDN card.
351 351
352config HISAX_AMD7930 352config HISAX_AMD7930
353 bool "Am7930 (EXPERIMENTAL)" 353 bool "Am7930 (EXPERIMENTAL)"
354 depends on EXPERIMENTAL && (SPARC32 || SPARC64) 354 depends on EXPERIMENTAL && SPARC
355 help 355 help
356 This enables HiSax support for the AMD7930 chips on some SPARCs. 356 This enables HiSax support for the AMD7930 chips on some SPARCs.
357 This code is not finished yet. 357 This code is not finished yet.
diff --git a/drivers/isdn/pcbit/Kconfig b/drivers/isdn/pcbit/Kconfig
index f06997faef16..0933881ab0c2 100644
--- a/drivers/isdn/pcbit/Kconfig
+++ b/drivers/isdn/pcbit/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4config ISDN_DRV_PCBIT 4config ISDN_DRV_PCBIT
5 tristate "PCBIT-D support" 5 tristate "PCBIT-D support"
6 depends on ISDN_I4L && ISA && (BROKEN || !PPC) 6 depends on ISDN_I4L && ISA && (BROKEN || X86)
7 help 7 help
8 This enables support for the PCBIT ISDN-card. This card is 8 This enables support for the PCBIT ISDN-card. This card is
9 manufactured in Portugal by Octal. For running this card, 9 manufactured in Portugal by Octal. For running this card,
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index a39fbfef789a..19657efa8dc3 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -700,6 +700,28 @@ static unsigned int iosapic_startup_irq(unsigned int irq)
700 return 0; 700 return 0;
701} 701}
702 702
703#ifdef CONFIG_SMP
704static void iosapic_set_affinity_irq(unsigned int irq, cpumask_t dest)
705{
706 struct vector_info *vi = iosapic_get_vector(irq);
707 u32 d0, d1, dummy_d0;
708 unsigned long flags;
709
710 if (cpu_check_affinity(irq, &dest))
711 return;
712
713 vi->txn_addr = txn_affinity_addr(irq, first_cpu(dest));
714
715 spin_lock_irqsave(&iosapic_lock, flags);
716 /* d1 contains the destination CPU, so only want to set that
717 * entry */
718 iosapic_rd_irt_entry(vi, &d0, &d1);
719 iosapic_set_irt_data(vi, &dummy_d0, &d1);
720 iosapic_wr_irt_entry(vi, d0, d1);
721 spin_unlock_irqrestore(&iosapic_lock, flags);
722}
723#endif
724
703static struct hw_interrupt_type iosapic_interrupt_type = { 725static struct hw_interrupt_type iosapic_interrupt_type = {
704 .typename = "IO-SAPIC-level", 726 .typename = "IO-SAPIC-level",
705 .startup = iosapic_startup_irq, 727 .startup = iosapic_startup_irq,
@@ -708,7 +730,9 @@ static struct hw_interrupt_type iosapic_interrupt_type = {
708 .disable = iosapic_disable_irq, 730 .disable = iosapic_disable_irq,
709 .ack = no_ack_irq, 731 .ack = no_ack_irq,
710 .end = iosapic_end_irq, 732 .end = iosapic_end_irq,
711// .set_affinity = iosapic_set_affinity_irq, 733#ifdef CONFIG_SMP
734 .set_affinity = iosapic_set_affinity_irq,
735#endif
712}; 736};
713 737
714int iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev) 738int iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev)
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index bab3bcabcb6e..d14888e149bb 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -24,6 +24,9 @@
24 * Major changes to get basic interrupt infrastructure working to 24 * Major changes to get basic interrupt infrastructure working to
25 * hopefully be able to support all SuperIO devices. Currently 25 * hopefully be able to support all SuperIO devices. Currently
26 * works with serial. -- John Marvin <jsm@fc.hp.com> 26 * works with serial. -- John Marvin <jsm@fc.hp.com>
27 *
28 * Converted superio_init() to be a PCI_FIXUP_FINAL callee.
29 * -- Kyle McMartin <kyle@parisc-linux.org>
27 */ 30 */
28 31
29 32
@@ -141,10 +144,10 @@ superio_interrupt(int parent_irq, void *devp, struct pt_regs *regs)
141} 144}
142 145
143/* Initialize Super I/O device */ 146/* Initialize Super I/O device */
144 147static void
145static void __devinit 148superio_init(struct pci_dev *pcidev)
146superio_init(struct superio_device *sio)
147{ 149{
150 struct superio_device *sio = &sio_dev;
148 struct pci_dev *pdev = sio->lio_pdev; 151 struct pci_dev *pdev = sio->lio_pdev;
149 u16 word; 152 u16 word;
150 153
@@ -160,8 +163,8 @@ superio_init(struct superio_device *sio)
160 /* ...then properly fixup the USB to point at suckyio PIC */ 163 /* ...then properly fixup the USB to point at suckyio PIC */
161 sio->usb_pdev->irq = superio_fixup_irq(sio->usb_pdev); 164 sio->usb_pdev->irq = superio_fixup_irq(sio->usb_pdev);
162 165
163 printk (KERN_INFO "SuperIO: Found NS87560 Legacy I/O device at %s (IRQ %i) \n", 166 printk(KERN_INFO "SuperIO: Found NS87560 Legacy I/O device at %s (IRQ %i) \n",
164 pci_name(pdev),pdev->irq); 167 pci_name(pdev), pdev->irq);
165 168
166 pci_read_config_dword (pdev, SIO_SP1BAR, &sio->sp1_base); 169 pci_read_config_dword (pdev, SIO_SP1BAR, &sio->sp1_base);
167 sio->sp1_base &= ~1; 170 sio->sp1_base &= ~1;
@@ -274,7 +277,7 @@ superio_init(struct superio_device *sio)
274 277
275 sio->suckyio_irq_enabled = 1; 278 sio->suckyio_irq_enabled = 1;
276} 279}
277 280DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_LIO, superio_init);
278 281
279static void superio_disable_irq(unsigned int irq) 282static void superio_disable_irq(unsigned int irq)
280{ 283{
@@ -452,8 +455,10 @@ static void superio_fixup_pci(struct pci_dev *pdev)
452DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87415, superio_fixup_pci); 455DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87415, superio_fixup_pci);
453 456
454 457
455static int __devinit superio_probe(struct pci_dev *dev, const struct pci_device_id *id) 458static int __devinit
459superio_probe(struct pci_dev *dev, const struct pci_device_id *id)
456{ 460{
461 struct superio_device *sio = &sio_dev;
457 462
458 /* 463 /*
459 ** superio_probe(00:0e.0) ven 0x100b dev 0x2 sv 0x0 sd 0x0 class 0x1018a 464 ** superio_probe(00:0e.0) ven 0x100b dev 0x2 sv 0x0 sd 0x0 class 0x1018a
@@ -466,7 +471,8 @@ static int __devinit superio_probe(struct pci_dev *dev, const struct pci_device_
466 dev->subsystem_vendor, dev->subsystem_device, 471 dev->subsystem_vendor, dev->subsystem_device,
467 dev->class); 472 dev->class);
468 473
469 superio_init(&sio_dev); 474 if (!sio->suckyio_irq_enabled)
475 BUG(); /* Enabled by PCI_FIXUP_FINAL */
470 476
471 if (dev->device == PCI_DEVICE_ID_NS_87560_LIO) { /* Function 1 */ 477 if (dev->device == PCI_DEVICE_ID_NS_87560_LIO) { /* Function 1 */
472 superio_parport_init(); 478 superio_parport_init();
@@ -481,19 +487,21 @@ static int __devinit superio_probe(struct pci_dev *dev, const struct pci_device_
481 DBG_INIT("superio_probe: WTF? Fire Extinguisher?\n"); 487 DBG_INIT("superio_probe: WTF? Fire Extinguisher?\n");
482 } 488 }
483 489
484 /* Let appropriate other driver claim this device. */ 490 /* Let appropriate other driver claim this device. */
485 return -ENODEV; 491 return -ENODEV;
486} 492}
487 493
488static struct pci_device_id superio_tbl[] = { 494static struct pci_device_id superio_tbl[] = {
489 { PCI_VENDOR_ID_NS, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 495 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_LIO) },
496 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_USB) },
497 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87415) },
490 { 0, } 498 { 0, }
491}; 499};
492 500
493static struct pci_driver superio_driver = { 501static struct pci_driver superio_driver = {
494 .name = "SuperIO", 502 .name = "SuperIO",
495 .id_table = superio_tbl, 503 .id_table = superio_tbl,
496 .probe = superio_probe, 504 .probe = superio_probe,
497}; 505};
498 506
499static int __init superio_modinit(void) 507static int __init superio_modinit(void)
@@ -506,6 +514,5 @@ static void __exit superio_exit(void)
506 pci_unregister_driver(&superio_driver); 514 pci_unregister_driver(&superio_driver);
507} 515}
508 516
509
510module_init(superio_modinit); 517module_init(superio_modinit);
511module_exit(superio_exit); 518module_exit(superio_exit);
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index ff36f0c9fdad..ad47c1b84c3f 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -507,7 +507,7 @@ config SERIAL_SUNSU_CONSOLE
507 507
508config SERIAL_MUX 508config SERIAL_MUX
509 tristate "Serial MUX support" 509 tristate "Serial MUX support"
510 depends on PARISC 510 depends on GSC
511 select SERIAL_CORE 511 select SERIAL_CORE
512 default y 512 default y
513 ---help--- 513 ---help---
diff --git a/drivers/serial/mux.c b/drivers/serial/mux.c
index 660bae5ba179..7633132a10aa 100644
--- a/drivers/serial/mux.c
+++ b/drivers/serial/mux.c
@@ -65,8 +65,8 @@ static struct uart_driver mux_driver = {
65 65
66static struct timer_list mux_timer; 66static struct timer_list mux_timer;
67 67
68#define UART_PUT_CHAR(p, c) __raw_writel((c), (unsigned long)(p)->membase + IO_DATA_REG_OFFSET) 68#define UART_PUT_CHAR(p, c) __raw_writel((c), (p)->membase + IO_DATA_REG_OFFSET)
69#define UART_GET_FIFO_CNT(p) __raw_readl((unsigned long)(p)->membase + IO_DCOUNT_REG_OFFSET) 69#define UART_GET_FIFO_CNT(p) __raw_readl((p)->membase + IO_DCOUNT_REG_OFFSET)
70#define GET_MUX_PORTS(iodc_data) ((((iodc_data)[4] & 0xf0) >> 4) * 8) + 8 70#define GET_MUX_PORTS(iodc_data) ((((iodc_data)[4] & 0xf0) >> 4) * 8) + 8
71 71
72/** 72/**
@@ -79,10 +79,7 @@ static struct timer_list mux_timer;
79 */ 79 */
80static unsigned int mux_tx_empty(struct uart_port *port) 80static unsigned int mux_tx_empty(struct uart_port *port)
81{ 81{
82 unsigned int cnt = __raw_readl((unsigned long)port->membase 82 return UART_GET_FIFO_CNT(port) ? 0 : TIOCSER_TEMT;
83 + IO_DCOUNT_REG_OFFSET);
84
85 return cnt ? 0 : TIOCSER_TEMT;
86} 83}
87 84
88/** 85/**
@@ -218,8 +215,7 @@ static void mux_read(struct uart_port *port)
218 __u32 start_count = port->icount.rx; 215 __u32 start_count = port->icount.rx;
219 216
220 while(1) { 217 while(1) {
221 data = __raw_readl((unsigned long)port->membase 218 data = __raw_readl(port->membase + IO_DATA_REG_OFFSET);
222 + IO_DATA_REG_OFFSET);
223 219
224 if (MUX_STATUS(data)) 220 if (MUX_STATUS(data))
225 continue; 221 continue;
@@ -481,6 +477,13 @@ static int __init mux_probe(struct parisc_device *dev)
481 port->ops = &mux_pops; 477 port->ops = &mux_pops;
482 port->flags = UPF_BOOT_AUTOCONF; 478 port->flags = UPF_BOOT_AUTOCONF;
483 port->line = port_cnt; 479 port->line = port_cnt;
480
481 /* The port->timeout needs to match what is present in
482 * uart_wait_until_sent in serial_core.c. Otherwise
483 * the time spent in msleep_interruptable will be very
484 * long, causing the appearance of a console hang.
485 */
486 port->timeout = HZ / 50;
484 spin_lock_init(&port->lock); 487 spin_lock_init(&port->lock);
485 status = uart_add_one_port(&mux_driver, port); 488 status = uart_add_one_port(&mux_driver, port);
486 BUG_ON(status); 489 BUG_ON(status);
diff --git a/include/asm-parisc/irq.h b/include/asm-parisc/irq.h
index f876bdf22056..b0a30e2c9813 100644
--- a/include/asm-parisc/irq.h
+++ b/include/asm-parisc/irq.h
@@ -8,6 +8,7 @@
8#define _ASM_PARISC_IRQ_H 8#define _ASM_PARISC_IRQ_H
9 9
10#include <linux/config.h> 10#include <linux/config.h>
11#include <linux/cpumask.h>
11#include <asm/types.h> 12#include <asm/types.h>
12 13
13#define NO_IRQ (-1) 14#define NO_IRQ (-1)
@@ -49,10 +50,10 @@ extern int txn_alloc_irq(unsigned int nbits);
49extern int txn_claim_irq(int); 50extern int txn_claim_irq(int);
50extern unsigned int txn_alloc_data(unsigned int); 51extern unsigned int txn_alloc_data(unsigned int);
51extern unsigned long txn_alloc_addr(unsigned int); 52extern unsigned long txn_alloc_addr(unsigned int);
53extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
52 54
53extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *); 55extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *);
54 56extern int cpu_check_affinity(unsigned int irq, cpumask_t *dest);
55extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *);
56 57
57/* soft power switch support (power.c) */ 58/* soft power switch support (power.c) */
58extern struct tasklet_struct power_tasklet; 59extern struct tasklet_struct power_tasklet;
diff --git a/include/asm-parisc/smp.h b/include/asm-parisc/smp.h
index 9413f67a540b..dbdbd2e9fdf9 100644
--- a/include/asm-parisc/smp.h
+++ b/include/asm-parisc/smp.h
@@ -29,6 +29,7 @@ extern cpumask_t cpu_online_map;
29#define cpu_logical_map(cpu) (cpu) 29#define cpu_logical_map(cpu) (cpu)
30 30
31extern void smp_send_reschedule(int cpu); 31extern void smp_send_reschedule(int cpu);
32extern void smp_send_all_nop(void);
32 33
33#endif /* !ASSEMBLY */ 34#endif /* !ASSEMBLY */
34 35
@@ -53,7 +54,11 @@ extern unsigned long cpu_present_mask;
53 54
54#define raw_smp_processor_id() (current_thread_info()->cpu) 55#define raw_smp_processor_id() (current_thread_info()->cpu)
55 56
56#endif /* CONFIG_SMP */ 57#else /* CONFIG_SMP */
58
59static inline void smp_send_all_nop(void) { return; }
60
61#endif
57 62
58#define NO_PROC_ID 0xFF /* No processor magic marker */ 63#define NO_PROC_ID 0xFF /* No processor magic marker */
59#define ANY_PROC_ID 0xFF /* Any processor magic marker */ 64#define ANY_PROC_ID 0xFF /* Any processor magic marker */
diff --git a/include/asm-parisc/spinlock.h b/include/asm-parisc/spinlock.h
index 7c3f406a746a..16c2ac075fc5 100644
--- a/include/asm-parisc/spinlock.h
+++ b/include/asm-parisc/spinlock.h
@@ -11,18 +11,25 @@ static inline int __raw_spin_is_locked(raw_spinlock_t *x)
11 return *a == 0; 11 return *a == 0;
12} 12}
13 13
14#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 14#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
15#define __raw_spin_unlock_wait(x) \ 15#define __raw_spin_unlock_wait(x) \
16 do { cpu_relax(); } while (__raw_spin_is_locked(x)) 16 do { cpu_relax(); } while (__raw_spin_is_locked(x))
17 17
18static inline void __raw_spin_lock(raw_spinlock_t *x) 18static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
19 unsigned long flags)
19{ 20{
20 volatile unsigned int *a; 21 volatile unsigned int *a;
21 22
22 mb(); 23 mb();
23 a = __ldcw_align(x); 24 a = __ldcw_align(x);
24 while (__ldcw(a) == 0) 25 while (__ldcw(a) == 0)
25 while (*a == 0); 26 while (*a == 0)
27 if (flags & PSW_SM_I) {
28 local_irq_enable();
29 cpu_relax();
30 local_irq_disable();
31 } else
32 cpu_relax();
26 mb(); 33 mb();
27} 34}
28 35
@@ -60,26 +67,20 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x)
60 67
61static __inline__ void __raw_read_lock(raw_rwlock_t *rw) 68static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
62{ 69{
63 unsigned long flags;
64 local_irq_save(flags);
65 __raw_spin_lock(&rw->lock); 70 __raw_spin_lock(&rw->lock);
66 71
67 rw->counter++; 72 rw->counter++;
68 73
69 __raw_spin_unlock(&rw->lock); 74 __raw_spin_unlock(&rw->lock);
70 local_irq_restore(flags);
71} 75}
72 76
73static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) 77static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
74{ 78{
75 unsigned long flags;
76 local_irq_save(flags);
77 __raw_spin_lock(&rw->lock); 79 __raw_spin_lock(&rw->lock);
78 80
79 rw->counter--; 81 rw->counter--;
80 82
81 __raw_spin_unlock(&rw->lock); 83 __raw_spin_unlock(&rw->lock);
82 local_irq_restore(flags);
83} 84}
84 85
85/* write_lock is less trivial. We optimistically grab the lock and check 86/* write_lock is less trivial. We optimistically grab the lock and check
diff --git a/include/asm-parisc/tlbflush.h b/include/asm-parisc/tlbflush.h
index e97aa8d1eff5..c9ec39c6fc6c 100644
--- a/include/asm-parisc/tlbflush.h
+++ b/include/asm-parisc/tlbflush.h
@@ -12,21 +12,15 @@
12 * N class systems, only one PxTLB inter processor broadcast can be 12 * N class systems, only one PxTLB inter processor broadcast can be
13 * active at any one time on the Merced bus. This tlb purge 13 * active at any one time on the Merced bus. This tlb purge
14 * synchronisation is fairly lightweight and harmless so we activate 14 * synchronisation is fairly lightweight and harmless so we activate
15 * it on all SMP systems not just the N class. */ 15 * it on all SMP systems not just the N class. We also need to have
16#ifdef CONFIG_SMP 16 * preemption disabled on uniprocessor machines, and spin_lock does that
17 * nicely.
18 */
17extern spinlock_t pa_tlb_lock; 19extern spinlock_t pa_tlb_lock;
18 20
19#define purge_tlb_start(x) spin_lock(&pa_tlb_lock) 21#define purge_tlb_start(x) spin_lock(&pa_tlb_lock)
20#define purge_tlb_end(x) spin_unlock(&pa_tlb_lock) 22#define purge_tlb_end(x) spin_unlock(&pa_tlb_lock)
21 23
22#else
23
24#define purge_tlb_start(x) do { } while(0)
25#define purge_tlb_end(x) do { } while (0)
26
27#endif
28
29
30extern void flush_tlb_all(void); 24extern void flush_tlb_all(void);
31 25
32/* 26/*
@@ -88,7 +82,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
88 if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */ 82 if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
89 flush_tlb_all(); 83 flush_tlb_all();
90 else { 84 else {
91 preempt_disable();
92 mtsp(vma->vm_mm->context,1); 85 mtsp(vma->vm_mm->context,1);
93 purge_tlb_start(); 86 purge_tlb_start();
94 if (split_tlb) { 87 if (split_tlb) {
@@ -102,7 +95,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
102 pdtlb(start); 95 pdtlb(start);
103 start += PAGE_SIZE; 96 start += PAGE_SIZE;
104 } 97 }
105 preempt_enable();
106 } 98 }
107 purge_tlb_end(); 99 purge_tlb_end();
108 } 100 }
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1013a42d10b1..0986d19be0b7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -940,7 +940,9 @@ unsigned long max_sane_readahead(unsigned long nr);
940 940
941/* Do stack extension */ 941/* Do stack extension */
942extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 942extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
943#ifdef CONFIG_IA64
943extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 944extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
945#endif
944 946
945/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 947/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
946extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); 948extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
diff --git a/mm/mmap.c b/mm/mmap.c
index 6c997b159600..4f8def03428c 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1501,7 +1501,7 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
1501 * PA-RISC uses this for its stack; IA64 for its Register Backing Store. 1501 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
1502 * vma is the last one with address > vma->vm_end. Have to extend vma. 1502 * vma is the last one with address > vma->vm_end. Have to extend vma.
1503 */ 1503 */
1504#ifdef CONFIG_STACK_GROWSUP 1504#ifndef CONFIG_IA64
1505static inline 1505static inline
1506#endif 1506#endif
1507int expand_upwards(struct vm_area_struct *vma, unsigned long address) 1507int expand_upwards(struct vm_area_struct *vma, unsigned long address)