aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-11-07 22:51:47 -0500
committerJeff Garzik <jgarzik@pobox.com>2005-11-07 22:51:47 -0500
commit6b995751c2e851d2bc9c277b5884d0adb519e31d (patch)
tree7a15b41b5d8ce612915584a0773c670d5c0ab5b8 /arch/sparc64
parent6c2f4267833f453156f8f439cc32eb4c92f357b4 (diff)
parentd27ba47e7e8c466c18983a1779d611f82d6a354f (diff)
Merge branch 'master'
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/Kconfig13
-rw-r--r--arch/sparc64/Kconfig.debug10
-rw-r--r--arch/sparc64/kernel/ioctl32.c459
-rw-r--r--arch/sparc64/kernel/kprobes.c165
-rw-r--r--arch/sparc64/kernel/setup.c12
-rw-r--r--arch/sparc64/kernel/signal32.c6
-rw-r--r--arch/sparc64/kernel/smp.c79
-rw-r--r--arch/sparc64/kernel/sunos_ioctl32.c1
-rw-r--r--arch/sparc64/kernel/time.c13
-rw-r--r--arch/sparc64/kernel/us2e_cpufreq.c7
-rw-r--r--arch/sparc64/kernel/us3_cpufreq.c7
-rw-r--r--arch/sparc64/oprofile/Kconfig6
12 files changed, 135 insertions, 643 deletions
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index 1e9d8638a28a..3fded69b1922 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -377,8 +377,21 @@ source "drivers/fc4/Kconfig"
377 377
378source "fs/Kconfig" 378source "fs/Kconfig"
379 379
380menu "Instrumentation Support"
381 depends on EXPERIMENTAL
382
380source "arch/sparc64/oprofile/Kconfig" 383source "arch/sparc64/oprofile/Kconfig"
381 384
385config KPROBES
386 bool "Kprobes (EXPERIMENTAL)"
387 help
388 Kprobes allows you to trap at almost any kernel address and
389 execute a callback function. register_kprobe() establishes
390 a probepoint and specifies the callback. Kprobes is useful
391 for kernel debugging, non-intrusive instrumentation and testing.
392 If in doubt, say "N".
393endmenu
394
382source "arch/sparc64/Kconfig.debug" 395source "arch/sparc64/Kconfig.debug"
383 396
384source "security/Kconfig" 397source "security/Kconfig"
diff --git a/arch/sparc64/Kconfig.debug b/arch/sparc64/Kconfig.debug
index fa06ea04837b..3e31be494e54 100644
--- a/arch/sparc64/Kconfig.debug
+++ b/arch/sparc64/Kconfig.debug
@@ -11,16 +11,6 @@ config DEBUG_STACK_USAGE
11 11
12 This option will slow down process creation somewhat. 12 This option will slow down process creation somewhat.
13 13
14config KPROBES
15 bool "Kprobes"
16 depends on DEBUG_KERNEL
17 help
18 Kprobes allows you to trap at almost any kernel address and
19 execute a callback function. register_kprobe() establishes
20 a probepoint and specifies the callback. Kprobes is useful
21 for kernel debugging, non-intrusive instrumentation and testing.
22 If in doubt, say "N".
23
24config DEBUG_DCFLUSH 14config DEBUG_DCFLUSH
25 bool "D-cache flush debugging" 15 bool "D-cache flush debugging"
26 depends on DEBUG_KERNEL 16 depends on DEBUG_KERNEL
diff --git a/arch/sparc64/kernel/ioctl32.c b/arch/sparc64/kernel/ioctl32.c
index e6a00325075a..92e26304de90 100644
--- a/arch/sparc64/kernel/ioctl32.c
+++ b/arch/sparc64/kernel/ioctl32.c
@@ -11,33 +11,14 @@
11 11
12#define INCLUDES 12#define INCLUDES
13#include "compat_ioctl.c" 13#include "compat_ioctl.c"
14#include <linux/ncp_fs.h>
15#include <linux/syscalls.h> 14#include <linux/syscalls.h>
16#include <asm/fbio.h> 15#include <asm/fbio.h>
17#include <asm/kbio.h>
18#include <asm/vuid_event.h>
19#include <asm/envctrl.h>
20#include <asm/display7seg.h>
21#include <asm/openpromio.h>
22#include <asm/audioio.h>
23#include <asm/watchdog.h>
24 16
25/* Use this to get at 32-bit user passed pointers. 17/* Use this to get at 32-bit user passed pointers.
26 * See sys_sparc32.c for description about it. 18 * See sys_sparc32.c for description about it.
27 */ 19 */
28#define A(__x) compat_ptr(__x) 20#define A(__x) compat_ptr(__x)
29 21
30static __inline__ void *alloc_user_space(long len)
31{
32 struct pt_regs *regs = current_thread_info()->kregs;
33 unsigned long usp = regs->u_regs[UREG_I6];
34
35 if (!(test_thread_flag(TIF_32BIT)))
36 usp += STACK_BIAS;
37
38 return (void *) (usp - len);
39}
40
41#define CODE 22#define CODE
42#include "compat_ioctl.c" 23#include "compat_ioctl.c"
43 24
@@ -111,357 +92,6 @@ static int fbiogscursor(unsigned int fd, unsigned int cmd, unsigned long arg)
111 return sys_ioctl (fd, FBIOSCURSOR, (unsigned long)p); 92 return sys_ioctl (fd, FBIOSCURSOR, (unsigned long)p);
112} 93}
113 94
114#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
115/* This really belongs in include/linux/drm.h -DaveM */
116#include "../../../drivers/char/drm/drm.h"
117
118typedef struct drm32_version {
119 int version_major; /* Major version */
120 int version_minor; /* Minor version */
121 int version_patchlevel;/* Patch level */
122 int name_len; /* Length of name buffer */
123 u32 name; /* Name of driver */
124 int date_len; /* Length of date buffer */
125 u32 date; /* User-space buffer to hold date */
126 int desc_len; /* Length of desc buffer */
127 u32 desc; /* User-space buffer to hold desc */
128} drm32_version_t;
129#define DRM32_IOCTL_VERSION DRM_IOWR(0x00, drm32_version_t)
130
131static int drm32_version(unsigned int fd, unsigned int cmd, unsigned long arg)
132{
133 drm32_version_t __user *uversion = (drm32_version_t __user *)arg;
134 drm_version_t __user *p = compat_alloc_user_space(sizeof(*p));
135 compat_uptr_t addr;
136 int n;
137 int ret;
138
139 if (clear_user(p, 3 * sizeof(int)) ||
140 get_user(n, &uversion->name_len) ||
141 put_user(n, &p->name_len) ||
142 get_user(addr, &uversion->name) ||
143 put_user(compat_ptr(addr), &p->name) ||
144 get_user(n, &uversion->date_len) ||
145 put_user(n, &p->date_len) ||
146 get_user(addr, &uversion->date) ||
147 put_user(compat_ptr(addr), &p->date) ||
148 get_user(n, &uversion->desc_len) ||
149 put_user(n, &p->desc_len) ||
150 get_user(addr, &uversion->desc) ||
151 put_user(compat_ptr(addr), &p->desc))
152 return -EFAULT;
153
154 ret = sys_ioctl(fd, DRM_IOCTL_VERSION, (unsigned long)p);
155 if (ret)
156 return ret;
157
158 if (copy_in_user(uversion, p, 3 * sizeof(int)) ||
159 get_user(n, &p->name_len) ||
160 put_user(n, &uversion->name_len) ||
161 get_user(n, &p->date_len) ||
162 put_user(n, &uversion->date_len) ||
163 get_user(n, &p->desc_len) ||
164 put_user(n, &uversion->desc_len))
165 return -EFAULT;
166
167 return 0;
168}
169
170typedef struct drm32_unique {
171 int unique_len; /* Length of unique */
172 u32 unique; /* Unique name for driver instantiation */
173} drm32_unique_t;
174#define DRM32_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm32_unique_t)
175#define DRM32_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm32_unique_t)
176
177static int drm32_getsetunique(unsigned int fd, unsigned int cmd, unsigned long arg)
178{
179 drm32_unique_t __user *uarg = (drm32_unique_t __user *)arg;
180 drm_unique_t __user *p = compat_alloc_user_space(sizeof(*p));
181 compat_uptr_t addr;
182 int n;
183 int ret;
184
185 if (get_user(n, &uarg->unique_len) ||
186 put_user(n, &p->unique_len) ||
187 get_user(addr, &uarg->unique) ||
188 put_user(compat_ptr(addr), &p->unique))
189 return -EFAULT;
190
191 if (cmd == DRM32_IOCTL_GET_UNIQUE)
192 ret = sys_ioctl (fd, DRM_IOCTL_GET_UNIQUE, (unsigned long)p);
193 else
194 ret = sys_ioctl (fd, DRM_IOCTL_SET_UNIQUE, (unsigned long)p);
195
196 if (ret)
197 return ret;
198
199 if (get_user(n, &p->unique_len) || put_user(n, &uarg->unique_len))
200 return -EFAULT;
201
202 return 0;
203}
204
205typedef struct drm32_map {
206 u32 offset; /* Requested physical address (0 for SAREA)*/
207 u32 size; /* Requested physical size (bytes) */
208 drm_map_type_t type; /* Type of memory to map */
209 drm_map_flags_t flags; /* Flags */
210 u32 handle; /* User-space: "Handle" to pass to mmap */
211 /* Kernel-space: kernel-virtual address */
212 int mtrr; /* MTRR slot used */
213 /* Private data */
214} drm32_map_t;
215#define DRM32_IOCTL_ADD_MAP DRM_IOWR(0x15, drm32_map_t)
216
217static int drm32_addmap(unsigned int fd, unsigned int cmd, unsigned long arg)
218{
219 drm32_map_t __user *uarg = (drm32_map_t __user *) arg;
220 drm_map_t karg;
221 mm_segment_t old_fs;
222 u32 tmp;
223 int ret;
224
225 ret = get_user(karg.offset, &uarg->offset);
226 ret |= get_user(karg.size, &uarg->size);
227 ret |= get_user(karg.type, &uarg->type);
228 ret |= get_user(karg.flags, &uarg->flags);
229 ret |= get_user(tmp, &uarg->handle);
230 ret |= get_user(karg.mtrr, &uarg->mtrr);
231 if (ret)
232 return -EFAULT;
233
234 karg.handle = (void *) (unsigned long) tmp;
235
236 old_fs = get_fs();
237 set_fs(KERNEL_DS);
238 ret = sys_ioctl(fd, DRM_IOCTL_ADD_MAP, (unsigned long) &karg);
239 set_fs(old_fs);
240
241 if (!ret) {
242 ret = put_user(karg.offset, &uarg->offset);
243 ret |= put_user(karg.size, &uarg->size);
244 ret |= put_user(karg.type, &uarg->type);
245 ret |= put_user(karg.flags, &uarg->flags);
246 tmp = (u32) (long)karg.handle;
247 ret |= put_user(tmp, &uarg->handle);
248 ret |= put_user(karg.mtrr, &uarg->mtrr);
249 if (ret)
250 ret = -EFAULT;
251 }
252
253 return ret;
254}
255
256typedef struct drm32_buf_info {
257 int count; /* Entries in list */
258 u32 list; /* (drm_buf_desc_t *) */
259} drm32_buf_info_t;
260#define DRM32_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm32_buf_info_t)
261
262static int drm32_info_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
263{
264 drm32_buf_info_t __user *uarg = (drm32_buf_info_t __user *)arg;
265 drm_buf_info_t __user *p = compat_alloc_user_space(sizeof(*p));
266 compat_uptr_t addr;
267 int n;
268 int ret;
269
270 if (get_user(n, &uarg->count) || put_user(n, &p->count) ||
271 get_user(addr, &uarg->list) || put_user(compat_ptr(addr), &p->list))
272 return -EFAULT;
273
274 ret = sys_ioctl(fd, DRM_IOCTL_INFO_BUFS, (unsigned long)p);
275 if (ret)
276 return ret;
277
278 if (get_user(n, &p->count) || put_user(n, &uarg->count))
279 return -EFAULT;
280
281 return 0;
282}
283
284typedef struct drm32_buf_free {
285 int count;
286 u32 list; /* (int *) */
287} drm32_buf_free_t;
288#define DRM32_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm32_buf_free_t)
289
290static int drm32_free_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
291{
292 drm32_buf_free_t __user *uarg = (drm32_buf_free_t __user *)arg;
293 drm_buf_free_t __user *p = compat_alloc_user_space(sizeof(*p));
294 compat_uptr_t addr;
295 int n;
296
297 if (get_user(n, &uarg->count) || put_user(n, &p->count) ||
298 get_user(addr, &uarg->list) || put_user(compat_ptr(addr), &p->list))
299 return -EFAULT;
300
301 return sys_ioctl(fd, DRM_IOCTL_FREE_BUFS, (unsigned long)p);
302}
303
304typedef struct drm32_buf_pub {
305 int idx; /* Index into master buflist */
306 int total; /* Buffer size */
307 int used; /* Amount of buffer in use (for DMA) */
308 u32 address; /* Address of buffer (void *) */
309} drm32_buf_pub_t;
310
311typedef struct drm32_buf_map {
312 int count; /* Length of buflist */
313 u32 virtual; /* Mmaped area in user-virtual (void *) */
314 u32 list; /* Buffer information (drm_buf_pub_t *) */
315} drm32_buf_map_t;
316#define DRM32_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm32_buf_map_t)
317
318static int drm32_map_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
319{
320 drm32_buf_map_t __user *uarg = (drm32_buf_map_t __user *)arg;
321 drm32_buf_pub_t __user *ulist;
322 drm_buf_map_t __user *arg64;
323 drm_buf_pub_t __user *list;
324 int orig_count, ret, i;
325 int n;
326 compat_uptr_t addr;
327
328 if (get_user(orig_count, &uarg->count))
329 return -EFAULT;
330
331 arg64 = compat_alloc_user_space(sizeof(drm_buf_map_t) +
332 (size_t)orig_count * sizeof(drm_buf_pub_t));
333 list = (void __user *)(arg64 + 1);
334
335 if (put_user(orig_count, &arg64->count) ||
336 put_user(list, &arg64->list) ||
337 get_user(addr, &uarg->virtual) ||
338 put_user(compat_ptr(addr), &arg64->virtual) ||
339 get_user(addr, &uarg->list))
340 return -EFAULT;
341
342 ulist = compat_ptr(addr);
343
344 for (i = 0; i < orig_count; i++) {
345 if (get_user(n, &ulist[i].idx) ||
346 put_user(n, &list[i].idx) ||
347 get_user(n, &ulist[i].total) ||
348 put_user(n, &list[i].total) ||
349 get_user(n, &ulist[i].used) ||
350 put_user(n, &list[i].used) ||
351 get_user(addr, &ulist[i].address) ||
352 put_user(compat_ptr(addr), &list[i].address))
353 return -EFAULT;
354 }
355
356 ret = sys_ioctl(fd, DRM_IOCTL_MAP_BUFS, (unsigned long) arg64);
357 if (ret)
358 return ret;
359
360 for (i = 0; i < orig_count; i++) {
361 void __user *p;
362 if (get_user(n, &list[i].idx) ||
363 put_user(n, &ulist[i].idx) ||
364 get_user(n, &list[i].total) ||
365 put_user(n, &ulist[i].total) ||
366 get_user(n, &list[i].used) ||
367 put_user(n, &ulist[i].used) ||
368 get_user(p, &list[i].address) ||
369 put_user((unsigned long)p, &ulist[i].address))
370 return -EFAULT;
371 }
372
373 if (get_user(n, &arg64->count) || put_user(n, &uarg->count))
374 return -EFAULT;
375
376 return 0;
377}
378
379typedef struct drm32_dma {
380 /* Indices here refer to the offset into
381 buflist in drm_buf_get_t. */
382 int context; /* Context handle */
383 int send_count; /* Number of buffers to send */
384 u32 send_indices; /* List of handles to buffers (int *) */
385 u32 send_sizes; /* Lengths of data to send (int *) */
386 drm_dma_flags_t flags; /* Flags */
387 int request_count; /* Number of buffers requested */
388 int request_size; /* Desired size for buffers */
389 u32 request_indices; /* Buffer information (int *) */
390 u32 request_sizes; /* (int *) */
391 int granted_count; /* Number of buffers granted */
392} drm32_dma_t;
393#define DRM32_IOCTL_DMA DRM_IOWR(0x29, drm32_dma_t)
394
395/* RED PEN The DRM layer blindly dereferences the send/request
396 * index/size arrays even though they are userland
397 * pointers. -DaveM
398 */
399static int drm32_dma(unsigned int fd, unsigned int cmd, unsigned long arg)
400{
401 drm32_dma_t __user *uarg = (drm32_dma_t __user *) arg;
402 drm_dma_t __user *p = compat_alloc_user_space(sizeof(*p));
403 compat_uptr_t addr;
404 int ret;
405
406 if (copy_in_user(p, uarg, 2 * sizeof(int)) ||
407 get_user(addr, &uarg->send_indices) ||
408 put_user(compat_ptr(addr), &p->send_indices) ||
409 get_user(addr, &uarg->send_sizes) ||
410 put_user(compat_ptr(addr), &p->send_sizes) ||
411 copy_in_user(&p->flags, &uarg->flags, sizeof(drm_dma_flags_t)) ||
412 copy_in_user(&p->request_count, &uarg->request_count, sizeof(int))||
413 copy_in_user(&p->request_size, &uarg->request_size, sizeof(int)) ||
414 get_user(addr, &uarg->request_indices) ||
415 put_user(compat_ptr(addr), &p->request_indices) ||
416 get_user(addr, &uarg->request_sizes) ||
417 put_user(compat_ptr(addr), &p->request_sizes) ||
418 copy_in_user(&p->granted_count, &uarg->granted_count, sizeof(int)))
419 return -EFAULT;
420
421 ret = sys_ioctl(fd, DRM_IOCTL_DMA, (unsigned long)p);
422 if (ret)
423 return ret;
424
425 if (copy_in_user(uarg, p, 2 * sizeof(int)) ||
426 copy_in_user(&uarg->flags, &p->flags, sizeof(drm_dma_flags_t)) ||
427 copy_in_user(&uarg->request_count, &p->request_count, sizeof(int))||
428 copy_in_user(&uarg->request_size, &p->request_size, sizeof(int)) ||
429 copy_in_user(&uarg->granted_count, &p->granted_count, sizeof(int)))
430 return -EFAULT;
431
432 return 0;
433}
434
435typedef struct drm32_ctx_res {
436 int count;
437 u32 contexts; /* (drm_ctx_t *) */
438} drm32_ctx_res_t;
439#define DRM32_IOCTL_RES_CTX DRM_IOWR(0x26, drm32_ctx_res_t)
440
441static int drm32_res_ctx(unsigned int fd, unsigned int cmd, unsigned long arg)
442{
443 drm32_ctx_res_t __user *uarg = (drm32_ctx_res_t __user *) arg;
444 drm_ctx_res_t __user *p = compat_alloc_user_space(sizeof(*p));
445 compat_uptr_t addr;
446 int ret;
447
448 if (copy_in_user(p, uarg, sizeof(int)) ||
449 get_user(addr, &uarg->contexts) ||
450 put_user(compat_ptr(addr), &p->contexts))
451 return -EFAULT;
452
453 ret = sys_ioctl(fd, DRM_IOCTL_RES_CTX, (unsigned long)p);
454 if (ret)
455 return ret;
456
457 if (copy_in_user(uarg, p, sizeof(int)))
458 return -EFAULT;
459
460 return 0;
461}
462
463#endif
464
465typedef int (* ioctl32_handler_t)(unsigned int, unsigned int, unsigned long, struct file *); 95typedef int (* ioctl32_handler_t)(unsigned int, unsigned int, unsigned long, struct file *);
466 96
467#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL((cmd),sys_ioctl) 97#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL((cmd),sys_ioctl)
@@ -485,103 +115,14 @@ COMPATIBLE_IOCTL(FBIOSCURPOS)
485COMPATIBLE_IOCTL(FBIOGCURPOS) 115COMPATIBLE_IOCTL(FBIOGCURPOS)
486COMPATIBLE_IOCTL(FBIOGCURMAX) 116COMPATIBLE_IOCTL(FBIOGCURMAX)
487/* Little k */ 117/* Little k */
488COMPATIBLE_IOCTL(KIOCTYPE)
489COMPATIBLE_IOCTL(KIOCLAYOUT)
490COMPATIBLE_IOCTL(KIOCGTRANS)
491COMPATIBLE_IOCTL(KIOCTRANS)
492COMPATIBLE_IOCTL(KIOCCMD)
493COMPATIBLE_IOCTL(KIOCSDIRECT)
494COMPATIBLE_IOCTL(KIOCSLED)
495COMPATIBLE_IOCTL(KIOCGLED)
496COMPATIBLE_IOCTL(KIOCSRATE)
497COMPATIBLE_IOCTL(KIOCGRATE)
498COMPATIBLE_IOCTL(VUIDSFORMAT)
499COMPATIBLE_IOCTL(VUIDGFORMAT)
500/* Little v, the video4linux ioctls */ 118/* Little v, the video4linux ioctls */
501COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */ 119COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */
502COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */ 120COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */
503COMPATIBLE_IOCTL(ENVCTRL_RD_WARNING_TEMPERATURE)
504COMPATIBLE_IOCTL(ENVCTRL_RD_SHUTDOWN_TEMPERATURE)
505COMPATIBLE_IOCTL(ENVCTRL_RD_CPU_TEMPERATURE)
506COMPATIBLE_IOCTL(ENVCTRL_RD_FAN_STATUS)
507COMPATIBLE_IOCTL(ENVCTRL_RD_VOLTAGE_STATUS)
508COMPATIBLE_IOCTL(ENVCTRL_RD_SCSI_TEMPERATURE)
509COMPATIBLE_IOCTL(ENVCTRL_RD_ETHERNET_TEMPERATURE)
510COMPATIBLE_IOCTL(ENVCTRL_RD_MTHRBD_TEMPERATURE)
511COMPATIBLE_IOCTL(ENVCTRL_RD_CPU_VOLTAGE)
512COMPATIBLE_IOCTL(ENVCTRL_RD_GLOBALADDRESS)
513/* COMPATIBLE_IOCTL(D7SIOCRD) same value as ENVCTRL_RD_VOLTAGE_STATUS */
514COMPATIBLE_IOCTL(D7SIOCWR)
515COMPATIBLE_IOCTL(D7SIOCTM)
516/* OPENPROMIO, SunOS/Solaris only, the NetBSD one's have
517 * embedded pointers in the arg which we'd need to clean up...
518 */
519COMPATIBLE_IOCTL(OPROMGETOPT)
520COMPATIBLE_IOCTL(OPROMSETOPT)
521COMPATIBLE_IOCTL(OPROMNXTOPT)
522COMPATIBLE_IOCTL(OPROMSETOPT2)
523COMPATIBLE_IOCTL(OPROMNEXT)
524COMPATIBLE_IOCTL(OPROMCHILD)
525COMPATIBLE_IOCTL(OPROMGETPROP)
526COMPATIBLE_IOCTL(OPROMNXTPROP)
527COMPATIBLE_IOCTL(OPROMU2P)
528COMPATIBLE_IOCTL(OPROMGETCONS)
529COMPATIBLE_IOCTL(OPROMGETFBNAME)
530COMPATIBLE_IOCTL(OPROMGETBOOTARGS)
531COMPATIBLE_IOCTL(OPROMSETCUR)
532COMPATIBLE_IOCTL(OPROMPCI2NODE)
533COMPATIBLE_IOCTL(OPROMPATH2NODE)
534/* Big L */
535COMPATIBLE_IOCTL(LOOP_SET_STATUS64)
536COMPATIBLE_IOCTL(LOOP_GET_STATUS64)
537/* Big A */
538COMPATIBLE_IOCTL(AUDIO_GETINFO)
539COMPATIBLE_IOCTL(AUDIO_SETINFO)
540COMPATIBLE_IOCTL(AUDIO_DRAIN)
541COMPATIBLE_IOCTL(AUDIO_GETDEV)
542COMPATIBLE_IOCTL(AUDIO_GETDEV_SUNOS)
543COMPATIBLE_IOCTL(AUDIO_FLUSH)
544COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE_MULTI)
545#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
546COMPATIBLE_IOCTL(DRM_IOCTL_GET_MAGIC)
547COMPATIBLE_IOCTL(DRM_IOCTL_IRQ_BUSID)
548COMPATIBLE_IOCTL(DRM_IOCTL_AUTH_MAGIC)
549COMPATIBLE_IOCTL(DRM_IOCTL_BLOCK)
550COMPATIBLE_IOCTL(DRM_IOCTL_UNBLOCK)
551COMPATIBLE_IOCTL(DRM_IOCTL_CONTROL)
552COMPATIBLE_IOCTL(DRM_IOCTL_ADD_BUFS)
553COMPATIBLE_IOCTL(DRM_IOCTL_MARK_BUFS)
554COMPATIBLE_IOCTL(DRM_IOCTL_ADD_CTX)
555COMPATIBLE_IOCTL(DRM_IOCTL_RM_CTX)
556COMPATIBLE_IOCTL(DRM_IOCTL_MOD_CTX)
557COMPATIBLE_IOCTL(DRM_IOCTL_GET_CTX)
558COMPATIBLE_IOCTL(DRM_IOCTL_SWITCH_CTX)
559COMPATIBLE_IOCTL(DRM_IOCTL_NEW_CTX)
560COMPATIBLE_IOCTL(DRM_IOCTL_ADD_DRAW)
561COMPATIBLE_IOCTL(DRM_IOCTL_RM_DRAW)
562COMPATIBLE_IOCTL(DRM_IOCTL_LOCK)
563COMPATIBLE_IOCTL(DRM_IOCTL_UNLOCK)
564COMPATIBLE_IOCTL(DRM_IOCTL_FINISH)
565#endif /* DRM */
566COMPATIBLE_IOCTL(WIOCSTART)
567COMPATIBLE_IOCTL(WIOCSTOP)
568COMPATIBLE_IOCTL(WIOCGSTAT)
569/* And these ioctls need translation */ 121/* And these ioctls need translation */
570/* Note SIOCRTMSG is no longer, so this is safe and * the user would have seen just an -EINVAL anyways. */ 122/* Note SIOCRTMSG is no longer, so this is safe and * the user would have seen just an -EINVAL anyways. */
571HANDLE_IOCTL(FBIOPUTCMAP32, fbiogetputcmap) 123HANDLE_IOCTL(FBIOPUTCMAP32, fbiogetputcmap)
572HANDLE_IOCTL(FBIOGETCMAP32, fbiogetputcmap) 124HANDLE_IOCTL(FBIOGETCMAP32, fbiogetputcmap)
573HANDLE_IOCTL(FBIOSCURSOR32, fbiogscursor) 125HANDLE_IOCTL(FBIOSCURSOR32, fbiogscursor)
574#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
575HANDLE_IOCTL(DRM32_IOCTL_VERSION, drm32_version)
576HANDLE_IOCTL(DRM32_IOCTL_GET_UNIQUE, drm32_getsetunique)
577HANDLE_IOCTL(DRM32_IOCTL_SET_UNIQUE, drm32_getsetunique)
578HANDLE_IOCTL(DRM32_IOCTL_ADD_MAP, drm32_addmap)
579HANDLE_IOCTL(DRM32_IOCTL_INFO_BUFS, drm32_info_bufs)
580HANDLE_IOCTL(DRM32_IOCTL_FREE_BUFS, drm32_free_bufs)
581HANDLE_IOCTL(DRM32_IOCTL_MAP_BUFS, drm32_map_bufs)
582HANDLE_IOCTL(DRM32_IOCTL_DMA, drm32_dma)
583HANDLE_IOCTL(DRM32_IOCTL_RES_CTX, drm32_res_ctx)
584#endif /* DRM */
585#if 0 126#if 0
586HANDLE_IOCTL(RTC32_IRQP_READ, do_rtc_ioctl) 127HANDLE_IOCTL(RTC32_IRQP_READ, do_rtc_ioctl)
587HANDLE_IOCTL(RTC32_IRQP_SET, do_rtc_ioctl) 128HANDLE_IOCTL(RTC32_IRQP_SET, do_rtc_ioctl)
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c
index 0d66d07c8c6e..96bd09b098f4 100644
--- a/arch/sparc64/kernel/kprobes.c
+++ b/arch/sparc64/kernel/kprobes.c
@@ -38,6 +38,9 @@
38 * - Mark that we are no longer actively in a kprobe. 38 * - Mark that we are no longer actively in a kprobe.
39 */ 39 */
40 40
41DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
42DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
43
41int __kprobes arch_prepare_kprobe(struct kprobe *p) 44int __kprobes arch_prepare_kprobe(struct kprobe *p)
42{ 45{
43 return 0; 46 return 0;
@@ -66,46 +69,39 @@ void __kprobes arch_remove_kprobe(struct kprobe *p)
66{ 69{
67} 70}
68 71
69static struct kprobe *current_kprobe; 72static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
70static unsigned long current_kprobe_orig_tnpc;
71static unsigned long current_kprobe_orig_tstate_pil;
72static unsigned int kprobe_status;
73static struct kprobe *kprobe_prev;
74static unsigned long kprobe_orig_tnpc_prev;
75static unsigned long kprobe_orig_tstate_pil_prev;
76static unsigned int kprobe_status_prev;
77
78static inline void save_previous_kprobe(void)
79{ 73{
80 kprobe_status_prev = kprobe_status; 74 kcb->prev_kprobe.kp = kprobe_running();
81 kprobe_orig_tnpc_prev = current_kprobe_orig_tnpc; 75 kcb->prev_kprobe.status = kcb->kprobe_status;
82 kprobe_orig_tstate_pil_prev = current_kprobe_orig_tstate_pil; 76 kcb->prev_kprobe.orig_tnpc = kcb->kprobe_orig_tnpc;
83 kprobe_prev = current_kprobe; 77 kcb->prev_kprobe.orig_tstate_pil = kcb->kprobe_orig_tstate_pil;
84} 78}
85 79
86static inline void restore_previous_kprobe(void) 80static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
87{ 81{
88 kprobe_status = kprobe_status_prev; 82 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
89 current_kprobe_orig_tnpc = kprobe_orig_tnpc_prev; 83 kcb->kprobe_status = kcb->prev_kprobe.status;
90 current_kprobe_orig_tstate_pil = kprobe_orig_tstate_pil_prev; 84 kcb->kprobe_orig_tnpc = kcb->prev_kprobe.orig_tnpc;
91 current_kprobe = kprobe_prev; 85 kcb->kprobe_orig_tstate_pil = kcb->prev_kprobe.orig_tstate_pil;
92} 86}
93 87
94static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs) 88static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
89 struct kprobe_ctlblk *kcb)
95{ 90{
96 current_kprobe_orig_tnpc = regs->tnpc; 91 __get_cpu_var(current_kprobe) = p;
97 current_kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL); 92 kcb->kprobe_orig_tnpc = regs->tnpc;
98 current_kprobe = p; 93 kcb->kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL);
99} 94}
100 95
101static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 96static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
97 struct kprobe_ctlblk *kcb)
102{ 98{
103 regs->tstate |= TSTATE_PIL; 99 regs->tstate |= TSTATE_PIL;
104 100
105 /*single step inline, if it a breakpoint instruction*/ 101 /*single step inline, if it a breakpoint instruction*/
106 if (p->opcode == BREAKPOINT_INSTRUCTION) { 102 if (p->opcode == BREAKPOINT_INSTRUCTION) {
107 regs->tpc = (unsigned long) p->addr; 103 regs->tpc = (unsigned long) p->addr;
108 regs->tnpc = current_kprobe_orig_tnpc; 104 regs->tnpc = kcb->kprobe_orig_tnpc;
109 } else { 105 } else {
110 regs->tpc = (unsigned long) &p->ainsn.insn[0]; 106 regs->tpc = (unsigned long) &p->ainsn.insn[0];
111 regs->tnpc = (unsigned long) &p->ainsn.insn[1]; 107 regs->tnpc = (unsigned long) &p->ainsn.insn[1];
@@ -117,19 +113,21 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
117 struct kprobe *p; 113 struct kprobe *p;
118 void *addr = (void *) regs->tpc; 114 void *addr = (void *) regs->tpc;
119 int ret = 0; 115 int ret = 0;
116 struct kprobe_ctlblk *kcb;
120 117
118 /*
119 * We don't want to be preempted for the entire
120 * duration of kprobe processing
121 */
121 preempt_disable(); 122 preempt_disable();
123 kcb = get_kprobe_ctlblk();
122 124
123 if (kprobe_running()) { 125 if (kprobe_running()) {
124 /* We *are* holding lock here, so this is safe.
125 * Disarm the probe we just hit, and ignore it.
126 */
127 p = get_kprobe(addr); 126 p = get_kprobe(addr);
128 if (p) { 127 if (p) {
129 if (kprobe_status == KPROBE_HIT_SS) { 128 if (kcb->kprobe_status == KPROBE_HIT_SS) {
130 regs->tstate = ((regs->tstate & ~TSTATE_PIL) | 129 regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
131 current_kprobe_orig_tstate_pil); 130 kcb->kprobe_orig_tstate_pil);
132 unlock_kprobes();
133 goto no_kprobe; 131 goto no_kprobe;
134 } 132 }
135 /* We have reentered the kprobe_handler(), since 133 /* We have reentered the kprobe_handler(), since
@@ -138,25 +136,22 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
138 * just single step on the instruction of the new probe 136 * just single step on the instruction of the new probe
139 * without calling any user handlers. 137 * without calling any user handlers.
140 */ 138 */
141 save_previous_kprobe(); 139 save_previous_kprobe(kcb);
142 set_current_kprobe(p, regs); 140 set_current_kprobe(p, regs, kcb);
143 p->nmissed++; 141 p->nmissed++;
144 kprobe_status = KPROBE_REENTER; 142 kcb->kprobe_status = KPROBE_REENTER;
145 prepare_singlestep(p, regs); 143 prepare_singlestep(p, regs, kcb);
146 return 1; 144 return 1;
147 } else { 145 } else {
148 p = current_kprobe; 146 p = __get_cpu_var(current_kprobe);
149 if (p->break_handler && p->break_handler(p, regs)) 147 if (p->break_handler && p->break_handler(p, regs))
150 goto ss_probe; 148 goto ss_probe;
151 } 149 }
152 /* If it's not ours, can't be delete race, (we hold lock). */
153 goto no_kprobe; 150 goto no_kprobe;
154 } 151 }
155 152
156 lock_kprobes();
157 p = get_kprobe(addr); 153 p = get_kprobe(addr);
158 if (!p) { 154 if (!p) {
159 unlock_kprobes();
160 if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) { 155 if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
161 /* 156 /*
162 * The breakpoint instruction was removed right 157 * The breakpoint instruction was removed right
@@ -171,14 +166,14 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
171 goto no_kprobe; 166 goto no_kprobe;
172 } 167 }
173 168
174 set_current_kprobe(p, regs); 169 set_current_kprobe(p, regs, kcb);
175 kprobe_status = KPROBE_HIT_ACTIVE; 170 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
176 if (p->pre_handler && p->pre_handler(p, regs)) 171 if (p->pre_handler && p->pre_handler(p, regs))
177 return 1; 172 return 1;
178 173
179ss_probe: 174ss_probe:
180 prepare_singlestep(p, regs); 175 prepare_singlestep(p, regs, kcb);
181 kprobe_status = KPROBE_HIT_SS; 176 kcb->kprobe_status = KPROBE_HIT_SS;
182 return 1; 177 return 1;
183 178
184no_kprobe: 179no_kprobe:
@@ -260,11 +255,12 @@ static void __kprobes retpc_fixup(struct pt_regs *regs, u32 insn,
260 * This function prepares to return from the post-single-step 255 * This function prepares to return from the post-single-step
261 * breakpoint trap. 256 * breakpoint trap.
262 */ 257 */
263static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) 258static void __kprobes resume_execution(struct kprobe *p,
259 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
264{ 260{
265 u32 insn = p->ainsn.insn[0]; 261 u32 insn = p->ainsn.insn[0];
266 262
267 regs->tpc = current_kprobe_orig_tnpc; 263 regs->tpc = kcb->kprobe_orig_tnpc;
268 regs->tnpc = relbranch_fixup(insn, 264 regs->tnpc = relbranch_fixup(insn,
269 (unsigned long) p->addr, 265 (unsigned long) p->addr,
270 (unsigned long) &p->ainsn.insn[0], 266 (unsigned long) &p->ainsn.insn[0],
@@ -272,44 +268,48 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
272 retpc_fixup(regs, insn, (unsigned long) p->addr); 268 retpc_fixup(regs, insn, (unsigned long) p->addr);
273 269
274 regs->tstate = ((regs->tstate & ~TSTATE_PIL) | 270 regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
275 current_kprobe_orig_tstate_pil); 271 kcb->kprobe_orig_tstate_pil);
276} 272}
277 273
278static inline int post_kprobe_handler(struct pt_regs *regs) 274static inline int post_kprobe_handler(struct pt_regs *regs)
279{ 275{
280 if (!kprobe_running()) 276 struct kprobe *cur = kprobe_running();
277 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
278
279 if (!cur)
281 return 0; 280 return 0;
282 281
283 if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) { 282 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
284 kprobe_status = KPROBE_HIT_SSDONE; 283 kcb->kprobe_status = KPROBE_HIT_SSDONE;
285 current_kprobe->post_handler(current_kprobe, regs, 0); 284 cur->post_handler(cur, regs, 0);
286 } 285 }
287 286
288 resume_execution(current_kprobe, regs); 287 resume_execution(cur, regs, kcb);
289 288
290 /*Restore back the original saved kprobes variables and continue. */ 289 /*Restore back the original saved kprobes variables and continue. */
291 if (kprobe_status == KPROBE_REENTER) { 290 if (kcb->kprobe_status == KPROBE_REENTER) {
292 restore_previous_kprobe(); 291 restore_previous_kprobe(kcb);
293 goto out; 292 goto out;
294 } 293 }
295 unlock_kprobes(); 294 reset_current_kprobe();
296out: 295out:
297 preempt_enable_no_resched(); 296 preempt_enable_no_resched();
298 297
299 return 1; 298 return 1;
300} 299}
301 300
302/* Interrupts disabled, kprobe_lock held. */
303static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 301static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
304{ 302{
305 if (current_kprobe->fault_handler 303 struct kprobe *cur = kprobe_running();
306 && current_kprobe->fault_handler(current_kprobe, regs, trapnr)) 304 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
305
306 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
307 return 1; 307 return 1;
308 308
309 if (kprobe_status & KPROBE_HIT_SS) { 309 if (kcb->kprobe_status & KPROBE_HIT_SS) {
310 resume_execution(current_kprobe, regs); 310 resume_execution(cur, regs, kcb);
311 311
312 unlock_kprobes(); 312 reset_current_kprobe();
313 preempt_enable_no_resched(); 313 preempt_enable_no_resched();
314 } 314 }
315 return 0; 315 return 0;
@@ -322,29 +322,30 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
322 unsigned long val, void *data) 322 unsigned long val, void *data)
323{ 323{
324 struct die_args *args = (struct die_args *)data; 324 struct die_args *args = (struct die_args *)data;
325 int ret = NOTIFY_DONE;
326
325 switch (val) { 327 switch (val) {
326 case DIE_DEBUG: 328 case DIE_DEBUG:
327 if (kprobe_handler(args->regs)) 329 if (kprobe_handler(args->regs))
328 return NOTIFY_STOP; 330 ret = NOTIFY_STOP;
329 break; 331 break;
330 case DIE_DEBUG_2: 332 case DIE_DEBUG_2:
331 if (post_kprobe_handler(args->regs)) 333 if (post_kprobe_handler(args->regs))
332 return NOTIFY_STOP; 334 ret = NOTIFY_STOP;
333 break; 335 break;
334 case DIE_GPF: 336 case DIE_GPF:
335 if (kprobe_running() &&
336 kprobe_fault_handler(args->regs, args->trapnr))
337 return NOTIFY_STOP;
338 break;
339 case DIE_PAGE_FAULT: 337 case DIE_PAGE_FAULT:
338 /* kprobe_running() needs smp_processor_id() */
339 preempt_disable();
340 if (kprobe_running() && 340 if (kprobe_running() &&
341 kprobe_fault_handler(args->regs, args->trapnr)) 341 kprobe_fault_handler(args->regs, args->trapnr))
342 return NOTIFY_STOP; 342 ret = NOTIFY_STOP;
343 preempt_enable();
343 break; 344 break;
344 default: 345 default:
345 break; 346 break;
346 } 347 }
347 return NOTIFY_DONE; 348 return ret;
348} 349}
349 350
350asmlinkage void __kprobes kprobe_trap(unsigned long trap_level, 351asmlinkage void __kprobes kprobe_trap(unsigned long trap_level,
@@ -368,24 +369,21 @@ asmlinkage void __kprobes kprobe_trap(unsigned long trap_level,
368} 369}
369 370
370/* Jprobes support. */ 371/* Jprobes support. */
371static struct pt_regs jprobe_saved_regs;
372static struct pt_regs *jprobe_saved_regs_location;
373static struct sparc_stackf jprobe_saved_stack;
374
375int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 372int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
376{ 373{
377 struct jprobe *jp = container_of(p, struct jprobe, kp); 374 struct jprobe *jp = container_of(p, struct jprobe, kp);
375 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
378 376
379 jprobe_saved_regs_location = regs; 377 kcb->jprobe_saved_regs_location = regs;
380 memcpy(&jprobe_saved_regs, regs, sizeof(*regs)); 378 memcpy(&(kcb->jprobe_saved_regs), regs, sizeof(*regs));
381 379
382 /* Save a whole stack frame, this gets arguments 380 /* Save a whole stack frame, this gets arguments
383 * pushed onto the stack after using up all the 381 * pushed onto the stack after using up all the
384 * arg registers. 382 * arg registers.
385 */ 383 */
386 memcpy(&jprobe_saved_stack, 384 memcpy(&(kcb->jprobe_saved_stack),
387 (char *) (regs->u_regs[UREG_FP] + STACK_BIAS), 385 (char *) (regs->u_regs[UREG_FP] + STACK_BIAS),
388 sizeof(jprobe_saved_stack)); 386 sizeof(kcb->jprobe_saved_stack));
389 387
390 regs->tpc = (unsigned long) jp->entry; 388 regs->tpc = (unsigned long) jp->entry;
391 regs->tnpc = ((unsigned long) jp->entry) + 0x4UL; 389 regs->tnpc = ((unsigned long) jp->entry) + 0x4UL;
@@ -396,7 +394,6 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
396 394
397void __kprobes jprobe_return(void) 395void __kprobes jprobe_return(void)
398{ 396{
399 preempt_enable_no_resched();
400 __asm__ __volatile__( 397 __asm__ __volatile__(
401 ".globl jprobe_return_trap_instruction\n" 398 ".globl jprobe_return_trap_instruction\n"
402"jprobe_return_trap_instruction:\n\t" 399"jprobe_return_trap_instruction:\n\t"
@@ -410,14 +407,15 @@ extern void __show_regs(struct pt_regs * regs);
410int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 407int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
411{ 408{
412 u32 *addr = (u32 *) regs->tpc; 409 u32 *addr = (u32 *) regs->tpc;
410 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
413 411
414 if (addr == (u32 *) jprobe_return_trap_instruction) { 412 if (addr == (u32 *) jprobe_return_trap_instruction) {
415 if (jprobe_saved_regs_location != regs) { 413 if (kcb->jprobe_saved_regs_location != regs) {
416 printk("JPROBE: Current regs (%p) does not match " 414 printk("JPROBE: Current regs (%p) does not match "
417 "saved regs (%p).\n", 415 "saved regs (%p).\n",
418 regs, jprobe_saved_regs_location); 416 regs, kcb->jprobe_saved_regs_location);
419 printk("JPROBE: Saved registers\n"); 417 printk("JPROBE: Saved registers\n");
420 __show_regs(jprobe_saved_regs_location); 418 __show_regs(kcb->jprobe_saved_regs_location);
421 printk("JPROBE: Current registers\n"); 419 printk("JPROBE: Current registers\n");
422 __show_regs(regs); 420 __show_regs(regs);
423 BUG(); 421 BUG();
@@ -426,12 +424,13 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
426 * first so that UREG_FP is the original one for 424 * first so that UREG_FP is the original one for
427 * the stack frame restore. 425 * the stack frame restore.
428 */ 426 */
429 memcpy(regs, &jprobe_saved_regs, sizeof(*regs)); 427 memcpy(regs, &(kcb->jprobe_saved_regs), sizeof(*regs));
430 428
431 memcpy((char *) (regs->u_regs[UREG_FP] + STACK_BIAS), 429 memcpy((char *) (regs->u_regs[UREG_FP] + STACK_BIAS),
432 &jprobe_saved_stack, 430 &(kcb->jprobe_saved_stack),
433 sizeof(jprobe_saved_stack)); 431 sizeof(kcb->jprobe_saved_stack));
434 432
433 preempt_enable_no_resched();
435 return 1; 434 return 1;
436 } 435 }
437 return 0; 436 return 0;
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index c1f34237cdf2..bf1849dd9c49 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -154,6 +154,7 @@ int prom_callback(long *args)
154 pud_t *pudp; 154 pud_t *pudp;
155 pmd_t *pmdp; 155 pmd_t *pmdp;
156 pte_t *ptep; 156 pte_t *ptep;
157 pte_t pte;
157 158
158 for_each_process(p) { 159 for_each_process(p) {
159 mm = p->mm; 160 mm = p->mm;
@@ -178,8 +179,9 @@ int prom_callback(long *args)
178 * being called from inside OBP. 179 * being called from inside OBP.
179 */ 180 */
180 ptep = pte_offset_map(pmdp, va); 181 ptep = pte_offset_map(pmdp, va);
181 if (pte_present(*ptep)) { 182 pte = *ptep;
182 tte = pte_val(*ptep); 183 if (pte_present(pte)) {
184 tte = pte_val(pte);
183 res = PROM_TRUE; 185 res = PROM_TRUE;
184 } 186 }
185 pte_unmap(ptep); 187 pte_unmap(ptep);
@@ -218,6 +220,7 @@ int prom_callback(long *args)
218 pud_t *pudp; 220 pud_t *pudp;
219 pmd_t *pmdp; 221 pmd_t *pmdp;
220 pte_t *ptep; 222 pte_t *ptep;
223 pte_t pte;
221 int error; 224 int error;
222 225
223 if ((va >= LOW_OBP_ADDRESS) && (va < HI_OBP_ADDRESS)) { 226 if ((va >= LOW_OBP_ADDRESS) && (va < HI_OBP_ADDRESS)) {
@@ -240,8 +243,9 @@ int prom_callback(long *args)
240 * being called from inside OBP. 243 * being called from inside OBP.
241 */ 244 */
242 ptep = pte_offset_kernel(pmdp, va); 245 ptep = pte_offset_kernel(pmdp, va);
243 if (pte_present(*ptep)) { 246 pte = *ptep;
244 tte = pte_val(*ptep); 247 if (pte_present(pte)) {
248 tte = pte_val(pte);
245 res = PROM_TRUE; 249 res = PROM_TRUE;
246 } 250 }
247 goto done; 251 goto done;
diff --git a/arch/sparc64/kernel/signal32.c b/arch/sparc64/kernel/signal32.c
index aecccd0df1d1..009a86e5ded4 100644
--- a/arch/sparc64/kernel/signal32.c
+++ b/arch/sparc64/kernel/signal32.c
@@ -863,6 +863,7 @@ static void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
863 pud_t *pudp = pud_offset(pgdp, address); 863 pud_t *pudp = pud_offset(pgdp, address);
864 pmd_t *pmdp = pmd_offset(pudp, address); 864 pmd_t *pmdp = pmd_offset(pudp, address);
865 pte_t *ptep; 865 pte_t *ptep;
866 pte_t pte;
866 867
867 regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2); 868 regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
868 869
@@ -873,9 +874,10 @@ static void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
873 874
874 preempt_disable(); 875 preempt_disable();
875 ptep = pte_offset_map(pmdp, address); 876 ptep = pte_offset_map(pmdp, address);
876 if (pte_present(*ptep)) { 877 pte = *ptep;
878 if (pte_present(pte)) {
877 unsigned long page = (unsigned long) 879 unsigned long page = (unsigned long)
878 page_address(pte_page(*ptep)); 880 page_address(pte_page(pte));
879 881
880 wmb(); 882 wmb();
881 __asm__ __volatile__("flush %0 + %1" 883 __asm__ __volatile__("flush %0 + %1"
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index b137fd63f5e1..5d90ee9aebf1 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -839,43 +839,29 @@ void smp_flush_tlb_all(void)
839 * questionable (in theory the big win for threads is the massive sharing of 839 * questionable (in theory the big win for threads is the massive sharing of
840 * address space state across processors). 840 * address space state across processors).
841 */ 841 */
842
843/* This currently is only used by the hugetlb arch pre-fault
844 * hook on UltraSPARC-III+ and later when changing the pagesize
845 * bits of the context register for an address space.
846 */
842void smp_flush_tlb_mm(struct mm_struct *mm) 847void smp_flush_tlb_mm(struct mm_struct *mm)
843{ 848{
844 /* 849 u32 ctx = CTX_HWBITS(mm->context);
845 * This code is called from two places, dup_mmap and exit_mmap. In the 850 int cpu = get_cpu();
846 * former case, we really need a flush. In the later case, the callers
847 * are single threaded exec_mmap (really need a flush), multithreaded
848 * exec_mmap case (do not need to flush, since the caller gets a new
849 * context via activate_mm), and all other callers of mmput() whence
850 * the flush can be optimized since the associated threads are dead and
851 * the mm is being torn down (__exit_mm and other mmput callers) or the
852 * owning thread is dissociating itself from the mm. The
853 * (atomic_read(&mm->mm_users) == 0) check ensures real work is done
854 * for single thread exec and dup_mmap cases. An alternate check might
855 * have been (current->mm != mm).
856 * Kanoj Sarcar
857 */
858 if (atomic_read(&mm->mm_users) == 0)
859 return;
860
861 {
862 u32 ctx = CTX_HWBITS(mm->context);
863 int cpu = get_cpu();
864 851
865 if (atomic_read(&mm->mm_users) == 1) { 852 if (atomic_read(&mm->mm_users) == 1) {
866 mm->cpu_vm_mask = cpumask_of_cpu(cpu); 853 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
867 goto local_flush_and_out; 854 goto local_flush_and_out;
868 } 855 }
869 856
870 smp_cross_call_masked(&xcall_flush_tlb_mm, 857 smp_cross_call_masked(&xcall_flush_tlb_mm,
871 ctx, 0, 0, 858 ctx, 0, 0,
872 mm->cpu_vm_mask); 859 mm->cpu_vm_mask);
873 860
874 local_flush_and_out: 861local_flush_and_out:
875 __flush_tlb_mm(ctx, SECONDARY_CONTEXT); 862 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
876 863
877 put_cpu(); 864 put_cpu();
878 }
879} 865}
880 866
881void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) 867void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
@@ -883,34 +869,13 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
883 u32 ctx = CTX_HWBITS(mm->context); 869 u32 ctx = CTX_HWBITS(mm->context);
884 int cpu = get_cpu(); 870 int cpu = get_cpu();
885 871
886 if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) { 872 if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
887 mm->cpu_vm_mask = cpumask_of_cpu(cpu); 873 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
888 goto local_flush_and_out; 874 else
889 } else { 875 smp_cross_call_masked(&xcall_flush_tlb_pending,
890 /* This optimization is not valid. Normally 876 ctx, nr, (unsigned long) vaddrs,
891 * we will be holding the page_table_lock, but 877 mm->cpu_vm_mask);
892 * there is an exception which is copy_page_range()
893 * when forking. The lock is held during the individual
894 * page table updates in the parent, but not at the
895 * top level, which is where we are invoked.
896 */
897 if (0) {
898 cpumask_t this_cpu_mask = cpumask_of_cpu(cpu);
899
900 /* By virtue of running under the mm->page_table_lock,
901 * and mmu_context.h:switch_mm doing the same, the
902 * following operation is safe.
903 */
904 if (cpus_equal(mm->cpu_vm_mask, this_cpu_mask))
905 goto local_flush_and_out;
906 }
907 }
908
909 smp_cross_call_masked(&xcall_flush_tlb_pending,
910 ctx, nr, (unsigned long) vaddrs,
911 mm->cpu_vm_mask);
912 878
913local_flush_and_out:
914 __flush_tlb_pending(ctx, nr, vaddrs); 879 __flush_tlb_pending(ctx, nr, vaddrs);
915 880
916 put_cpu(); 881 put_cpu();
diff --git a/arch/sparc64/kernel/sunos_ioctl32.c b/arch/sparc64/kernel/sunos_ioctl32.c
index 7654b8a7f03a..3f619ead22cc 100644
--- a/arch/sparc64/kernel/sunos_ioctl32.c
+++ b/arch/sparc64/kernel/sunos_ioctl32.c
@@ -24,7 +24,6 @@
24#include <linux/smp_lock.h> 24#include <linux/smp_lock.h>
25#include <linux/syscalls.h> 25#include <linux/syscalls.h>
26#include <linux/compat.h> 26#include <linux/compat.h>
27#include <asm/kbio.h>
28 27
29#define SUNOS_NR_OPEN 256 28#define SUNOS_NR_OPEN 256
30 29
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 38c5525087a2..459c8fbe02b4 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -60,17 +60,6 @@ static void __iomem *mstk48t59_regs;
60 60
61static int set_rtc_mmss(unsigned long); 61static int set_rtc_mmss(unsigned long);
62 62
63static __init unsigned long dummy_get_tick(void)
64{
65 return 0;
66}
67
68static __initdata struct sparc64_tick_ops dummy_tick_ops = {
69 .get_tick = dummy_get_tick,
70};
71
72struct sparc64_tick_ops *tick_ops __read_mostly = &dummy_tick_ops;
73
74#define TICK_PRIV_BIT (1UL << 63) 63#define TICK_PRIV_BIT (1UL << 63)
75 64
76#ifdef CONFIG_SMP 65#ifdef CONFIG_SMP
@@ -200,6 +189,8 @@ static struct sparc64_tick_ops tick_operations __read_mostly = {
200 .softint_mask = 1UL << 0, 189 .softint_mask = 1UL << 0,
201}; 190};
202 191
192struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations;
193
203static void stick_init_tick(unsigned long offset) 194static void stick_init_tick(unsigned long offset)
204{ 195{
205 tick_disable_protection(); 196 tick_disable_protection();
diff --git a/arch/sparc64/kernel/us2e_cpufreq.c b/arch/sparc64/kernel/us2e_cpufreq.c
index 686e526bec04..b35dc8dc995a 100644
--- a/arch/sparc64/kernel/us2e_cpufreq.c
+++ b/arch/sparc64/kernel/us2e_cpufreq.c
@@ -388,10 +388,8 @@ err_out:
388 kfree(driver); 388 kfree(driver);
389 cpufreq_us2e_driver = NULL; 389 cpufreq_us2e_driver = NULL;
390 } 390 }
391 if (us2e_freq_table) { 391 kfree(us2e_freq_table);
392 kfree(us2e_freq_table); 392 us2e_freq_table = NULL;
393 us2e_freq_table = NULL;
394 }
395 return ret; 393 return ret;
396 } 394 }
397 395
@@ -402,7 +400,6 @@ static void __exit us2e_freq_exit(void)
402{ 400{
403 if (cpufreq_us2e_driver) { 401 if (cpufreq_us2e_driver) {
404 cpufreq_unregister_driver(cpufreq_us2e_driver); 402 cpufreq_unregister_driver(cpufreq_us2e_driver);
405
406 kfree(cpufreq_us2e_driver); 403 kfree(cpufreq_us2e_driver);
407 cpufreq_us2e_driver = NULL; 404 cpufreq_us2e_driver = NULL;
408 kfree(us2e_freq_table); 405 kfree(us2e_freq_table);
diff --git a/arch/sparc64/kernel/us3_cpufreq.c b/arch/sparc64/kernel/us3_cpufreq.c
index 0340041f6143..6d1f9a3c464f 100644
--- a/arch/sparc64/kernel/us3_cpufreq.c
+++ b/arch/sparc64/kernel/us3_cpufreq.c
@@ -249,10 +249,8 @@ err_out:
249 kfree(driver); 249 kfree(driver);
250 cpufreq_us3_driver = NULL; 250 cpufreq_us3_driver = NULL;
251 } 251 }
252 if (us3_freq_table) { 252 kfree(us3_freq_table);
253 kfree(us3_freq_table); 253 us3_freq_table = NULL;
254 us3_freq_table = NULL;
255 }
256 return ret; 254 return ret;
257 } 255 }
258 256
@@ -263,7 +261,6 @@ static void __exit us3_freq_exit(void)
263{ 261{
264 if (cpufreq_us3_driver) { 262 if (cpufreq_us3_driver) {
265 cpufreq_unregister_driver(cpufreq_us3_driver); 263 cpufreq_unregister_driver(cpufreq_us3_driver);
266
267 kfree(cpufreq_us3_driver); 264 kfree(cpufreq_us3_driver);
268 cpufreq_us3_driver = NULL; 265 cpufreq_us3_driver = NULL;
269 kfree(us3_freq_table); 266 kfree(us3_freq_table);
diff --git a/arch/sparc64/oprofile/Kconfig b/arch/sparc64/oprofile/Kconfig
index 5ade19801b97..d8a84088471a 100644
--- a/arch/sparc64/oprofile/Kconfig
+++ b/arch/sparc64/oprofile/Kconfig
@@ -1,7 +1,3 @@
1
2menu "Profiling support"
3 depends on EXPERIMENTAL
4
5config PROFILING 1config PROFILING
6 bool "Profiling support (EXPERIMENTAL)" 2 bool "Profiling support (EXPERIMENTAL)"
7 help 3 help
@@ -19,5 +15,3 @@ config OPROFILE
19 15
20 If unsure, say N. 16 If unsure, say N.
21 17
22endmenu
23