diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2007-02-13 01:43:25 -0500 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2007-02-13 01:43:25 -0500 |
commit | d9bc125caf592b7d081021f32ce5b717efdf70c8 (patch) | |
tree | 263b7066ba22ddce21db610c0300f6eaac6f2064 /arch/mips/kernel | |
parent | 43d78ef2ba5bec26d0315859e8324bfc0be23766 (diff) | |
parent | ec2f9d1331f658433411c58077871e1eef4ee1b4 (diff) |
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/
Conflicts:
net/sunrpc/auth_gss/gss_krb5_crypto.c
net/sunrpc/auth_gss/gss_spkm3_token.c
net/sunrpc/clnt.c
Merge with mainline and fix conflicts.
Diffstat (limited to 'arch/mips/kernel')
32 files changed, 772 insertions, 1447 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index bbbb8d7cb89b..1bf2c8448912 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
@@ -14,8 +14,6 @@ binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \ | |||
14 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 14 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
15 | obj-$(CONFIG_MODULES) += mips_ksyms.o module.o | 15 | obj-$(CONFIG_MODULES) += mips_ksyms.o module.o |
16 | 16 | ||
17 | obj-$(CONFIG_APM) += apm.o | ||
18 | |||
19 | obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o | 17 | obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o |
20 | obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o | 18 | obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o |
21 | obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o | 19 | obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o |
diff --git a/arch/mips/kernel/apm.c b/arch/mips/kernel/apm.c deleted file mode 100644 index ba16d07588cb..000000000000 --- a/arch/mips/kernel/apm.c +++ /dev/null | |||
@@ -1,604 +0,0 @@ | |||
1 | /* | ||
2 | * bios-less APM driver for MIPS Linux | ||
3 | * Jamey Hicks <jamey@crl.dec.com> | ||
4 | * adapted from the APM BIOS driver for Linux by Stephen Rothwell (sfr@linuxcare.com) | ||
5 | * | ||
6 | * APM 1.2 Reference: | ||
7 | * Intel Corporation, Microsoft Corporation. Advanced Power Management | ||
8 | * (APM) BIOS Interface Specification, Revision 1.2, February 1996. | ||
9 | * | ||
10 | * [This document is available from Microsoft at: | ||
11 | * http://www.microsoft.com/hwdev/busbios/amp_12.htm] | ||
12 | */ | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/poll.h> | ||
15 | #include <linux/timer.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/proc_fs.h> | ||
18 | #include <linux/miscdevice.h> | ||
19 | #include <linux/apm_bios.h> | ||
20 | #include <linux/capability.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/pm.h> | ||
23 | #include <linux/device.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/list.h> | ||
26 | #include <linux/init.h> | ||
27 | #include <linux/completion.h> | ||
28 | |||
29 | #include <asm/apm.h> /* apm_power_info */ | ||
30 | #include <asm/system.h> | ||
31 | |||
32 | /* | ||
33 | * The apm_bios device is one of the misc char devices. | ||
34 | * This is its minor number. | ||
35 | */ | ||
36 | #define APM_MINOR_DEV 134 | ||
37 | |||
38 | /* | ||
39 | * See Documentation/Config.help for the configuration options. | ||
40 | * | ||
41 | * Various options can be changed at boot time as follows: | ||
42 | * (We allow underscores for compatibility with the modules code) | ||
43 | * apm=on/off enable/disable APM | ||
44 | */ | ||
45 | |||
46 | /* | ||
47 | * Maximum number of events stored | ||
48 | */ | ||
49 | #define APM_MAX_EVENTS 16 | ||
50 | |||
51 | struct apm_queue { | ||
52 | unsigned int event_head; | ||
53 | unsigned int event_tail; | ||
54 | apm_event_t events[APM_MAX_EVENTS]; | ||
55 | }; | ||
56 | |||
57 | /* | ||
58 | * The per-file APM data | ||
59 | */ | ||
60 | struct apm_user { | ||
61 | struct list_head list; | ||
62 | |||
63 | unsigned int suser: 1; | ||
64 | unsigned int writer: 1; | ||
65 | unsigned int reader: 1; | ||
66 | |||
67 | int suspend_result; | ||
68 | unsigned int suspend_state; | ||
69 | #define SUSPEND_NONE 0 /* no suspend pending */ | ||
70 | #define SUSPEND_PENDING 1 /* suspend pending read */ | ||
71 | #define SUSPEND_READ 2 /* suspend read, pending ack */ | ||
72 | #define SUSPEND_ACKED 3 /* suspend acked */ | ||
73 | #define SUSPEND_DONE 4 /* suspend completed */ | ||
74 | |||
75 | struct apm_queue queue; | ||
76 | }; | ||
77 | |||
78 | /* | ||
79 | * Local variables | ||
80 | */ | ||
81 | static int suspends_pending; | ||
82 | static int apm_disabled; | ||
83 | static int mips_apm_active; | ||
84 | |||
85 | static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue); | ||
86 | static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue); | ||
87 | |||
88 | /* | ||
89 | * This is a list of everyone who has opened /dev/apm_bios | ||
90 | */ | ||
91 | static DECLARE_RWSEM(user_list_lock); | ||
92 | static LIST_HEAD(apm_user_list); | ||
93 | |||
94 | /* | ||
95 | * kapmd info. kapmd provides us a process context to handle | ||
96 | * "APM" events within - specifically necessary if we're going | ||
97 | * to be suspending the system. | ||
98 | */ | ||
99 | static DECLARE_WAIT_QUEUE_HEAD(kapmd_wait); | ||
100 | static DECLARE_COMPLETION(kapmd_exit); | ||
101 | static DEFINE_SPINLOCK(kapmd_queue_lock); | ||
102 | static struct apm_queue kapmd_queue; | ||
103 | |||
104 | |||
105 | static const char driver_version[] = "1.13"; /* no spaces */ | ||
106 | |||
107 | |||
108 | |||
109 | /* | ||
110 | * Compatibility cruft until the IPAQ people move over to the new | ||
111 | * interface. | ||
112 | */ | ||
113 | static void __apm_get_power_status(struct apm_power_info *info) | ||
114 | { | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * This allows machines to provide their own "apm get power status" function. | ||
119 | */ | ||
120 | void (*apm_get_power_status)(struct apm_power_info *) = __apm_get_power_status; | ||
121 | EXPORT_SYMBOL(apm_get_power_status); | ||
122 | |||
123 | |||
124 | /* | ||
125 | * APM event queue management. | ||
126 | */ | ||
127 | static inline int queue_empty(struct apm_queue *q) | ||
128 | { | ||
129 | return q->event_head == q->event_tail; | ||
130 | } | ||
131 | |||
132 | static inline apm_event_t queue_get_event(struct apm_queue *q) | ||
133 | { | ||
134 | q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS; | ||
135 | return q->events[q->event_tail]; | ||
136 | } | ||
137 | |||
138 | static void queue_add_event(struct apm_queue *q, apm_event_t event) | ||
139 | { | ||
140 | q->event_head = (q->event_head + 1) % APM_MAX_EVENTS; | ||
141 | if (q->event_head == q->event_tail) { | ||
142 | static int notified; | ||
143 | |||
144 | if (notified++ == 0) | ||
145 | printk(KERN_ERR "apm: an event queue overflowed\n"); | ||
146 | q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS; | ||
147 | } | ||
148 | q->events[q->event_head] = event; | ||
149 | } | ||
150 | |||
151 | static void queue_event_one_user(struct apm_user *as, apm_event_t event) | ||
152 | { | ||
153 | if (as->suser && as->writer) { | ||
154 | switch (event) { | ||
155 | case APM_SYS_SUSPEND: | ||
156 | case APM_USER_SUSPEND: | ||
157 | /* | ||
158 | * If this user already has a suspend pending, | ||
159 | * don't queue another one. | ||
160 | */ | ||
161 | if (as->suspend_state != SUSPEND_NONE) | ||
162 | return; | ||
163 | |||
164 | as->suspend_state = SUSPEND_PENDING; | ||
165 | suspends_pending++; | ||
166 | break; | ||
167 | } | ||
168 | } | ||
169 | queue_add_event(&as->queue, event); | ||
170 | } | ||
171 | |||
172 | static void queue_event(apm_event_t event, struct apm_user *sender) | ||
173 | { | ||
174 | struct apm_user *as; | ||
175 | |||
176 | down_read(&user_list_lock); | ||
177 | list_for_each_entry(as, &apm_user_list, list) { | ||
178 | if (as != sender && as->reader) | ||
179 | queue_event_one_user(as, event); | ||
180 | } | ||
181 | up_read(&user_list_lock); | ||
182 | wake_up_interruptible(&apm_waitqueue); | ||
183 | } | ||
184 | |||
185 | static void apm_suspend(void) | ||
186 | { | ||
187 | struct apm_user *as; | ||
188 | int err = pm_suspend(PM_SUSPEND_MEM); | ||
189 | |||
190 | /* | ||
191 | * Anyone on the APM queues will think we're still suspended. | ||
192 | * Send a message so everyone knows we're now awake again. | ||
193 | */ | ||
194 | queue_event(APM_NORMAL_RESUME, NULL); | ||
195 | |||
196 | /* | ||
197 | * Finally, wake up anyone who is sleeping on the suspend. | ||
198 | */ | ||
199 | down_read(&user_list_lock); | ||
200 | list_for_each_entry(as, &apm_user_list, list) { | ||
201 | as->suspend_result = err; | ||
202 | as->suspend_state = SUSPEND_DONE; | ||
203 | } | ||
204 | up_read(&user_list_lock); | ||
205 | |||
206 | wake_up(&apm_suspend_waitqueue); | ||
207 | } | ||
208 | |||
209 | static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos) | ||
210 | { | ||
211 | struct apm_user *as = fp->private_data; | ||
212 | apm_event_t event; | ||
213 | int i = count, ret = 0; | ||
214 | |||
215 | if (count < sizeof(apm_event_t)) | ||
216 | return -EINVAL; | ||
217 | |||
218 | if (queue_empty(&as->queue) && fp->f_flags & O_NONBLOCK) | ||
219 | return -EAGAIN; | ||
220 | |||
221 | wait_event_interruptible(apm_waitqueue, !queue_empty(&as->queue)); | ||
222 | |||
223 | while ((i >= sizeof(event)) && !queue_empty(&as->queue)) { | ||
224 | event = queue_get_event(&as->queue); | ||
225 | |||
226 | ret = -EFAULT; | ||
227 | if (copy_to_user(buf, &event, sizeof(event))) | ||
228 | break; | ||
229 | |||
230 | if (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND) | ||
231 | as->suspend_state = SUSPEND_READ; | ||
232 | |||
233 | buf += sizeof(event); | ||
234 | i -= sizeof(event); | ||
235 | } | ||
236 | |||
237 | if (i < count) | ||
238 | ret = count - i; | ||
239 | |||
240 | return ret; | ||
241 | } | ||
242 | |||
243 | static unsigned int apm_poll(struct file *fp, poll_table * wait) | ||
244 | { | ||
245 | struct apm_user *as = fp->private_data; | ||
246 | |||
247 | poll_wait(fp, &apm_waitqueue, wait); | ||
248 | return queue_empty(&as->queue) ? 0 : POLLIN | POLLRDNORM; | ||
249 | } | ||
250 | |||
251 | /* | ||
252 | * apm_ioctl - handle APM ioctl | ||
253 | * | ||
254 | * APM_IOC_SUSPEND | ||
255 | * This IOCTL is overloaded, and performs two functions. It is used to: | ||
256 | * - initiate a suspend | ||
257 | * - acknowledge a suspend read from /dev/apm_bios. | ||
258 | * Only when everyone who has opened /dev/apm_bios with write permission | ||
259 | * has acknowledge does the actual suspend happen. | ||
260 | */ | ||
261 | static int | ||
262 | apm_ioctl(struct inode * inode, struct file *filp, unsigned int cmd, unsigned long arg) | ||
263 | { | ||
264 | struct apm_user *as = filp->private_data; | ||
265 | unsigned long flags; | ||
266 | int err = -EINVAL; | ||
267 | |||
268 | if (!as->suser || !as->writer) | ||
269 | return -EPERM; | ||
270 | |||
271 | switch (cmd) { | ||
272 | case APM_IOC_SUSPEND: | ||
273 | as->suspend_result = -EINTR; | ||
274 | |||
275 | if (as->suspend_state == SUSPEND_READ) { | ||
276 | /* | ||
277 | * If we read a suspend command from /dev/apm_bios, | ||
278 | * then the corresponding APM_IOC_SUSPEND ioctl is | ||
279 | * interpreted as an acknowledge. | ||
280 | */ | ||
281 | as->suspend_state = SUSPEND_ACKED; | ||
282 | suspends_pending--; | ||
283 | } else { | ||
284 | /* | ||
285 | * Otherwise it is a request to suspend the system. | ||
286 | * Queue an event for all readers, and expect an | ||
287 | * acknowledge from all writers who haven't already | ||
288 | * acknowledged. | ||
289 | */ | ||
290 | queue_event(APM_USER_SUSPEND, as); | ||
291 | } | ||
292 | |||
293 | /* | ||
294 | * If there are no further acknowledges required, suspend | ||
295 | * the system. | ||
296 | */ | ||
297 | if (suspends_pending == 0) | ||
298 | apm_suspend(); | ||
299 | |||
300 | /* | ||
301 | * Wait for the suspend/resume to complete. If there are | ||
302 | * pending acknowledges, we wait here for them. | ||
303 | * | ||
304 | * Note that we need to ensure that the PM subsystem does | ||
305 | * not kick us out of the wait when it suspends the threads. | ||
306 | */ | ||
307 | flags = current->flags; | ||
308 | current->flags |= PF_NOFREEZE; | ||
309 | |||
310 | /* | ||
311 | * Note: do not allow a thread which is acking the suspend | ||
312 | * to escape until the resume is complete. | ||
313 | */ | ||
314 | if (as->suspend_state == SUSPEND_ACKED) | ||
315 | wait_event(apm_suspend_waitqueue, | ||
316 | as->suspend_state == SUSPEND_DONE); | ||
317 | else | ||
318 | wait_event_interruptible(apm_suspend_waitqueue, | ||
319 | as->suspend_state == SUSPEND_DONE); | ||
320 | |||
321 | current->flags = flags; | ||
322 | err = as->suspend_result; | ||
323 | as->suspend_state = SUSPEND_NONE; | ||
324 | break; | ||
325 | } | ||
326 | |||
327 | return err; | ||
328 | } | ||
329 | |||
330 | static int apm_release(struct inode * inode, struct file * filp) | ||
331 | { | ||
332 | struct apm_user *as = filp->private_data; | ||
333 | filp->private_data = NULL; | ||
334 | |||
335 | down_write(&user_list_lock); | ||
336 | list_del(&as->list); | ||
337 | up_write(&user_list_lock); | ||
338 | |||
339 | /* | ||
340 | * We are now unhooked from the chain. As far as new | ||
341 | * events are concerned, we no longer exist. However, we | ||
342 | * need to balance suspends_pending, which means the | ||
343 | * possibility of sleeping. | ||
344 | */ | ||
345 | if (as->suspend_state != SUSPEND_NONE) { | ||
346 | suspends_pending -= 1; | ||
347 | if (suspends_pending == 0) | ||
348 | apm_suspend(); | ||
349 | } | ||
350 | |||
351 | kfree(as); | ||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | static int apm_open(struct inode * inode, struct file * filp) | ||
356 | { | ||
357 | struct apm_user *as; | ||
358 | |||
359 | as = kzalloc(sizeof(*as), GFP_KERNEL); | ||
360 | if (as) { | ||
361 | /* | ||
362 | * XXX - this is a tiny bit broken, when we consider BSD | ||
363 | * process accounting. If the device is opened by root, we | ||
364 | * instantly flag that we used superuser privs. Who knows, | ||
365 | * we might close the device immediately without doing a | ||
366 | * privileged operation -- cevans | ||
367 | */ | ||
368 | as->suser = capable(CAP_SYS_ADMIN); | ||
369 | as->writer = (filp->f_mode & FMODE_WRITE) == FMODE_WRITE; | ||
370 | as->reader = (filp->f_mode & FMODE_READ) == FMODE_READ; | ||
371 | |||
372 | down_write(&user_list_lock); | ||
373 | list_add(&as->list, &apm_user_list); | ||
374 | up_write(&user_list_lock); | ||
375 | |||
376 | filp->private_data = as; | ||
377 | } | ||
378 | |||
379 | return as ? 0 : -ENOMEM; | ||
380 | } | ||
381 | |||
382 | static struct file_operations apm_bios_fops = { | ||
383 | .owner = THIS_MODULE, | ||
384 | .read = apm_read, | ||
385 | .poll = apm_poll, | ||
386 | .ioctl = apm_ioctl, | ||
387 | .open = apm_open, | ||
388 | .release = apm_release, | ||
389 | }; | ||
390 | |||
391 | static struct miscdevice apm_device = { | ||
392 | .minor = APM_MINOR_DEV, | ||
393 | .name = "apm_bios", | ||
394 | .fops = &apm_bios_fops | ||
395 | }; | ||
396 | |||
397 | |||
398 | #ifdef CONFIG_PROC_FS | ||
399 | /* | ||
400 | * Arguments, with symbols from linux/apm_bios.h. | ||
401 | * | ||
402 | * 0) Linux driver version (this will change if format changes) | ||
403 | * 1) APM BIOS Version. Usually 1.0, 1.1 or 1.2. | ||
404 | * 2) APM flags from APM Installation Check (0x00): | ||
405 | * bit 0: APM_16_BIT_SUPPORT | ||
406 | * bit 1: APM_32_BIT_SUPPORT | ||
407 | * bit 2: APM_IDLE_SLOWS_CLOCK | ||
408 | * bit 3: APM_BIOS_DISABLED | ||
409 | * bit 4: APM_BIOS_DISENGAGED | ||
410 | * 3) AC line status | ||
411 | * 0x00: Off-line | ||
412 | * 0x01: On-line | ||
413 | * 0x02: On backup power (BIOS >= 1.1 only) | ||
414 | * 0xff: Unknown | ||
415 | * 4) Battery status | ||
416 | * 0x00: High | ||
417 | * 0x01: Low | ||
418 | * 0x02: Critical | ||
419 | * 0x03: Charging | ||
420 | * 0x04: Selected battery not present (BIOS >= 1.2 only) | ||
421 | * 0xff: Unknown | ||
422 | * 5) Battery flag | ||
423 | * bit 0: High | ||
424 | * bit 1: Low | ||
425 | * bit 2: Critical | ||
426 | * bit 3: Charging | ||
427 | * bit 7: No system battery | ||
428 | * 0xff: Unknown | ||
429 | * 6) Remaining battery life (percentage of charge): | ||
430 | * 0-100: valid | ||
431 | * -1: Unknown | ||
432 | * 7) Remaining battery life (time units): | ||
433 | * Number of remaining minutes or seconds | ||
434 | * -1: Unknown | ||
435 | * 8) min = minutes; sec = seconds | ||
436 | */ | ||
437 | static int apm_get_info(char *buf, char **start, off_t fpos, int length) | ||
438 | { | ||
439 | struct apm_power_info info; | ||
440 | char *units; | ||
441 | int ret; | ||
442 | |||
443 | info.ac_line_status = 0xff; | ||
444 | info.battery_status = 0xff; | ||
445 | info.battery_flag = 0xff; | ||
446 | info.battery_life = -1; | ||
447 | info.time = -1; | ||
448 | info.units = -1; | ||
449 | |||
450 | if (apm_get_power_status) | ||
451 | apm_get_power_status(&info); | ||
452 | |||
453 | switch (info.units) { | ||
454 | default: units = "?"; break; | ||
455 | case 0: units = "min"; break; | ||
456 | case 1: units = "sec"; break; | ||
457 | } | ||
458 | |||
459 | ret = sprintf(buf, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n", | ||
460 | driver_version, APM_32_BIT_SUPPORT, | ||
461 | info.ac_line_status, info.battery_status, | ||
462 | info.battery_flag, info.battery_life, | ||
463 | info.time, units); | ||
464 | |||
465 | return ret; | ||
466 | } | ||
467 | #endif | ||
468 | |||
469 | static int kapmd(void *arg) | ||
470 | { | ||
471 | daemonize("kapmd"); | ||
472 | current->flags |= PF_NOFREEZE; | ||
473 | |||
474 | do { | ||
475 | apm_event_t event; | ||
476 | |||
477 | wait_event_interruptible(kapmd_wait, | ||
478 | !queue_empty(&kapmd_queue) || !mips_apm_active); | ||
479 | |||
480 | if (!mips_apm_active) | ||
481 | break; | ||
482 | |||
483 | spin_lock_irq(&kapmd_queue_lock); | ||
484 | event = 0; | ||
485 | if (!queue_empty(&kapmd_queue)) | ||
486 | event = queue_get_event(&kapmd_queue); | ||
487 | spin_unlock_irq(&kapmd_queue_lock); | ||
488 | |||
489 | switch (event) { | ||
490 | case 0: | ||
491 | break; | ||
492 | |||
493 | case APM_LOW_BATTERY: | ||
494 | case APM_POWER_STATUS_CHANGE: | ||
495 | queue_event(event, NULL); | ||
496 | break; | ||
497 | |||
498 | case APM_USER_SUSPEND: | ||
499 | case APM_SYS_SUSPEND: | ||
500 | queue_event(event, NULL); | ||
501 | if (suspends_pending == 0) | ||
502 | apm_suspend(); | ||
503 | break; | ||
504 | |||
505 | case APM_CRITICAL_SUSPEND: | ||
506 | apm_suspend(); | ||
507 | break; | ||
508 | } | ||
509 | } while (1); | ||
510 | |||
511 | complete_and_exit(&kapmd_exit, 0); | ||
512 | } | ||
513 | |||
514 | static int __init apm_init(void) | ||
515 | { | ||
516 | int ret; | ||
517 | |||
518 | if (apm_disabled) { | ||
519 | printk(KERN_NOTICE "apm: disabled on user request.\n"); | ||
520 | return -ENODEV; | ||
521 | } | ||
522 | |||
523 | mips_apm_active = 1; | ||
524 | |||
525 | ret = kernel_thread(kapmd, NULL, CLONE_KERNEL); | ||
526 | if (ret < 0) { | ||
527 | mips_apm_active = 0; | ||
528 | return ret; | ||
529 | } | ||
530 | |||
531 | #ifdef CONFIG_PROC_FS | ||
532 | create_proc_info_entry("apm", 0, NULL, apm_get_info); | ||
533 | #endif | ||
534 | |||
535 | ret = misc_register(&apm_device); | ||
536 | if (ret != 0) { | ||
537 | remove_proc_entry("apm", NULL); | ||
538 | |||
539 | mips_apm_active = 0; | ||
540 | wake_up(&kapmd_wait); | ||
541 | wait_for_completion(&kapmd_exit); | ||
542 | } | ||
543 | |||
544 | return ret; | ||
545 | } | ||
546 | |||
547 | static void __exit apm_exit(void) | ||
548 | { | ||
549 | misc_deregister(&apm_device); | ||
550 | remove_proc_entry("apm", NULL); | ||
551 | |||
552 | mips_apm_active = 0; | ||
553 | wake_up(&kapmd_wait); | ||
554 | wait_for_completion(&kapmd_exit); | ||
555 | } | ||
556 | |||
557 | module_init(apm_init); | ||
558 | module_exit(apm_exit); | ||
559 | |||
560 | MODULE_AUTHOR("Stephen Rothwell"); | ||
561 | MODULE_DESCRIPTION("Advanced Power Management"); | ||
562 | MODULE_LICENSE("GPL"); | ||
563 | |||
564 | #ifndef MODULE | ||
565 | static int __init apm_setup(char *str) | ||
566 | { | ||
567 | while ((str != NULL) && (*str != '\0')) { | ||
568 | if (strncmp(str, "off", 3) == 0) | ||
569 | apm_disabled = 1; | ||
570 | if (strncmp(str, "on", 2) == 0) | ||
571 | apm_disabled = 0; | ||
572 | str = strchr(str, ','); | ||
573 | if (str != NULL) | ||
574 | str += strspn(str, ", \t"); | ||
575 | } | ||
576 | return 1; | ||
577 | } | ||
578 | |||
579 | __setup("apm=", apm_setup); | ||
580 | #endif | ||
581 | |||
582 | /** | ||
583 | * apm_queue_event - queue an APM event for kapmd | ||
584 | * @event: APM event | ||
585 | * | ||
586 | * Queue an APM event for kapmd to process and ultimately take the | ||
587 | * appropriate action. Only a subset of events are handled: | ||
588 | * %APM_LOW_BATTERY | ||
589 | * %APM_POWER_STATUS_CHANGE | ||
590 | * %APM_USER_SUSPEND | ||
591 | * %APM_SYS_SUSPEND | ||
592 | * %APM_CRITICAL_SUSPEND | ||
593 | */ | ||
594 | void apm_queue_event(apm_event_t event) | ||
595 | { | ||
596 | unsigned long flags; | ||
597 | |||
598 | spin_lock_irqsave(&kapmd_queue_lock, flags); | ||
599 | queue_add_event(&kapmd_queue, event); | ||
600 | spin_unlock_irqrestore(&kapmd_queue_lock, flags); | ||
601 | |||
602 | wake_up_interruptible(&kapmd_wait); | ||
603 | } | ||
604 | EXPORT_SYMBOL(apm_queue_event); | ||
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index ff88b06f89df..c0b089d47181 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c | |||
@@ -234,10 +234,6 @@ void output_mm_defines(void) | |||
234 | constant("#define _PMD_SHIFT ", PMD_SHIFT); | 234 | constant("#define _PMD_SHIFT ", PMD_SHIFT); |
235 | constant("#define _PGDIR_SHIFT ", PGDIR_SHIFT); | 235 | constant("#define _PGDIR_SHIFT ", PGDIR_SHIFT); |
236 | linefeed; | 236 | linefeed; |
237 | constant("#define _PGD_ORDER ", PGD_ORDER); | ||
238 | constant("#define _PMD_ORDER ", PMD_ORDER); | ||
239 | constant("#define _PTE_ORDER ", PTE_ORDER); | ||
240 | linefeed; | ||
241 | constant("#define _PTRS_PER_PGD ", PTRS_PER_PGD); | 237 | constant("#define _PTRS_PER_PGD ", PTRS_PER_PGD); |
242 | constant("#define _PTRS_PER_PMD ", PTRS_PER_PMD); | 238 | constant("#define _PTRS_PER_PMD ", PTRS_PER_PMD); |
243 | constant("#define _PTRS_PER_PTE ", PTRS_PER_PTE); | 239 | constant("#define _PTRS_PER_PTE ", PTRS_PER_PTE); |
@@ -253,7 +249,6 @@ void output_sc_defines(void) | |||
253 | offset("#define SC_MDHI ", struct sigcontext, sc_mdhi); | 249 | offset("#define SC_MDHI ", struct sigcontext, sc_mdhi); |
254 | offset("#define SC_MDLO ", struct sigcontext, sc_mdlo); | 250 | offset("#define SC_MDLO ", struct sigcontext, sc_mdlo); |
255 | offset("#define SC_PC ", struct sigcontext, sc_pc); | 251 | offset("#define SC_PC ", struct sigcontext, sc_pc); |
256 | offset("#define SC_STATUS ", struct sigcontext, sc_status); | ||
257 | offset("#define SC_FPC_CSR ", struct sigcontext, sc_fpc_csr); | 252 | offset("#define SC_FPC_CSR ", struct sigcontext, sc_fpc_csr); |
258 | offset("#define SC_FPC_EIR ", struct sigcontext, sc_fpc_eir); | 253 | offset("#define SC_FPC_EIR ", struct sigcontext, sc_fpc_eir); |
259 | offset("#define SC_HI1 ", struct sigcontext, sc_hi1); | 254 | offset("#define SC_HI1 ", struct sigcontext, sc_hi1); |
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 442839e9578c..f59ef271d247 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c | |||
@@ -565,7 +565,7 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c) | |||
565 | if (config3 & MIPS_CONF3_VEIC) | 565 | if (config3 & MIPS_CONF3_VEIC) |
566 | c->options |= MIPS_CPU_VEIC; | 566 | c->options |= MIPS_CPU_VEIC; |
567 | if (config3 & MIPS_CONF3_MT) | 567 | if (config3 & MIPS_CONF3_MT) |
568 | c->ases |= MIPS_ASE_MIPSMT; | 568 | c->ases |= MIPS_ASE_MIPSMT; |
569 | 569 | ||
570 | return config3 & MIPS_CONF_M; | 570 | return config3 & MIPS_CONF_M; |
571 | } | 571 | } |
diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c index 719d26968cb2..7bc882049269 100644 --- a/arch/mips/kernel/gdb-stub.c +++ b/arch/mips/kernel/gdb-stub.c | |||
@@ -505,13 +505,13 @@ void show_gdbregs(struct gdb_regs * regs) | |||
505 | */ | 505 | */ |
506 | printk("$0 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", | 506 | printk("$0 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", |
507 | regs->reg0, regs->reg1, regs->reg2, regs->reg3, | 507 | regs->reg0, regs->reg1, regs->reg2, regs->reg3, |
508 | regs->reg4, regs->reg5, regs->reg6, regs->reg7); | 508 | regs->reg4, regs->reg5, regs->reg6, regs->reg7); |
509 | printk("$8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", | 509 | printk("$8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", |
510 | regs->reg8, regs->reg9, regs->reg10, regs->reg11, | 510 | regs->reg8, regs->reg9, regs->reg10, regs->reg11, |
511 | regs->reg12, regs->reg13, regs->reg14, regs->reg15); | 511 | regs->reg12, regs->reg13, regs->reg14, regs->reg15); |
512 | printk("$16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", | 512 | printk("$16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", |
513 | regs->reg16, regs->reg17, regs->reg18, regs->reg19, | 513 | regs->reg16, regs->reg17, regs->reg18, regs->reg19, |
514 | regs->reg20, regs->reg21, regs->reg22, regs->reg23); | 514 | regs->reg20, regs->reg21, regs->reg22, regs->reg23); |
515 | printk("$24: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", | 515 | printk("$24: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", |
516 | regs->reg24, regs->reg25, regs->reg26, regs->reg27, | 516 | regs->reg24, regs->reg25, regs->reg26, regs->reg27, |
517 | regs->reg28, regs->reg29, regs->reg30, regs->reg31); | 517 | regs->reg28, regs->reg29, regs->reg30, regs->reg31); |
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index 9a7811d13db2..6f57ca44291f 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S | |||
@@ -231,28 +231,3 @@ NESTED(smp_bootstrap, 16, sp) | |||
231 | #endif /* CONFIG_SMP */ | 231 | #endif /* CONFIG_SMP */ |
232 | 232 | ||
233 | __FINIT | 233 | __FINIT |
234 | |||
235 | .comm kernelsp, NR_CPUS * 8, 8 | ||
236 | .comm pgd_current, NR_CPUS * 8, 8 | ||
237 | |||
238 | .comm fw_arg0, SZREG, SZREG # firmware arguments | ||
239 | .comm fw_arg1, SZREG, SZREG | ||
240 | .comm fw_arg2, SZREG, SZREG | ||
241 | .comm fw_arg3, SZREG, SZREG | ||
242 | |||
243 | .macro page name, order | ||
244 | .comm \name, (_PAGE_SIZE << \order), (_PAGE_SIZE << \order) | ||
245 | .endm | ||
246 | |||
247 | /* | ||
248 | * On 64-bit we've got three-level pagetables with a slightly | ||
249 | * different layout ... | ||
250 | */ | ||
251 | page swapper_pg_dir, _PGD_ORDER | ||
252 | #ifdef CONFIG_64BIT | ||
253 | #if defined(CONFIG_MODULES) && !defined(CONFIG_BUILD_ELF64) | ||
254 | page module_pg_dir, _PGD_ORDER | ||
255 | #endif | ||
256 | page invalid_pmd_table, _PMD_ORDER | ||
257 | #endif | ||
258 | page invalid_pte_table, _PTE_ORDER | ||
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index b59a676c6d0e..b33ba6cd7f5b 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c | |||
@@ -54,9 +54,11 @@ static unsigned int cached_irq_mask = 0xffff; | |||
54 | 54 | ||
55 | void disable_8259A_irq(unsigned int irq) | 55 | void disable_8259A_irq(unsigned int irq) |
56 | { | 56 | { |
57 | unsigned int mask = 1 << irq; | 57 | unsigned int mask; |
58 | unsigned long flags; | 58 | unsigned long flags; |
59 | 59 | ||
60 | irq -= I8259A_IRQ_BASE; | ||
61 | mask = 1 << irq; | ||
60 | spin_lock_irqsave(&i8259A_lock, flags); | 62 | spin_lock_irqsave(&i8259A_lock, flags); |
61 | cached_irq_mask |= mask; | 63 | cached_irq_mask |= mask; |
62 | if (irq & 8) | 64 | if (irq & 8) |
@@ -68,9 +70,11 @@ void disable_8259A_irq(unsigned int irq) | |||
68 | 70 | ||
69 | void enable_8259A_irq(unsigned int irq) | 71 | void enable_8259A_irq(unsigned int irq) |
70 | { | 72 | { |
71 | unsigned int mask = ~(1 << irq); | 73 | unsigned int mask; |
72 | unsigned long flags; | 74 | unsigned long flags; |
73 | 75 | ||
76 | irq -= I8259A_IRQ_BASE; | ||
77 | mask = ~(1 << irq); | ||
74 | spin_lock_irqsave(&i8259A_lock, flags); | 78 | spin_lock_irqsave(&i8259A_lock, flags); |
75 | cached_irq_mask &= mask; | 79 | cached_irq_mask &= mask; |
76 | if (irq & 8) | 80 | if (irq & 8) |
@@ -82,10 +86,12 @@ void enable_8259A_irq(unsigned int irq) | |||
82 | 86 | ||
83 | int i8259A_irq_pending(unsigned int irq) | 87 | int i8259A_irq_pending(unsigned int irq) |
84 | { | 88 | { |
85 | unsigned int mask = 1 << irq; | 89 | unsigned int mask; |
86 | unsigned long flags; | 90 | unsigned long flags; |
87 | int ret; | 91 | int ret; |
88 | 92 | ||
93 | irq -= I8259A_IRQ_BASE; | ||
94 | mask = 1 << irq; | ||
89 | spin_lock_irqsave(&i8259A_lock, flags); | 95 | spin_lock_irqsave(&i8259A_lock, flags); |
90 | if (irq < 8) | 96 | if (irq < 8) |
91 | ret = inb(PIC_MASTER_CMD) & mask; | 97 | ret = inb(PIC_MASTER_CMD) & mask; |
@@ -134,9 +140,11 @@ static inline int i8259A_irq_real(unsigned int irq) | |||
134 | */ | 140 | */ |
135 | void mask_and_ack_8259A(unsigned int irq) | 141 | void mask_and_ack_8259A(unsigned int irq) |
136 | { | 142 | { |
137 | unsigned int irqmask = 1 << irq; | 143 | unsigned int irqmask; |
138 | unsigned long flags; | 144 | unsigned long flags; |
139 | 145 | ||
146 | irq -= I8259A_IRQ_BASE; | ||
147 | irqmask = 1 << irq; | ||
140 | spin_lock_irqsave(&i8259A_lock, flags); | 148 | spin_lock_irqsave(&i8259A_lock, flags); |
141 | /* | 149 | /* |
142 | * Lightweight spurious IRQ detection. We do not want | 150 | * Lightweight spurious IRQ detection. We do not want |
@@ -169,8 +177,8 @@ handle_real_irq: | |||
169 | outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */ | 177 | outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */ |
170 | } | 178 | } |
171 | #ifdef CONFIG_MIPS_MT_SMTC | 179 | #ifdef CONFIG_MIPS_MT_SMTC |
172 | if (irq_hwmask[irq] & ST0_IM) | 180 | if (irq_hwmask[irq] & ST0_IM) |
173 | set_c0_status(irq_hwmask[irq] & ST0_IM); | 181 | set_c0_status(irq_hwmask[irq] & ST0_IM); |
174 | #endif /* CONFIG_MIPS_MT_SMTC */ | 182 | #endif /* CONFIG_MIPS_MT_SMTC */ |
175 | spin_unlock_irqrestore(&i8259A_lock, flags); | 183 | spin_unlock_irqrestore(&i8259A_lock, flags); |
176 | return; | 184 | return; |
@@ -322,8 +330,8 @@ void __init init_i8259_irqs (void) | |||
322 | 330 | ||
323 | init_8259A(0); | 331 | init_8259A(0); |
324 | 332 | ||
325 | for (i = 0; i < 16; i++) | 333 | for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++) |
326 | set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq); | 334 | set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq); |
327 | 335 | ||
328 | setup_irq(PIC_CASCADE_IR, &irq2); | 336 | setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2); |
329 | } | 337 | } |
diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c index 37cad5de515c..3cc25c05d367 100644 --- a/arch/mips/kernel/irixelf.c +++ b/arch/mips/kernel/irixelf.c | |||
@@ -10,6 +10,8 @@ | |||
10 | * Copyright (C) 1996 - 2004 David S. Miller <dm@engr.sgi.com> | 10 | * Copyright (C) 1996 - 2004 David S. Miller <dm@engr.sgi.com> |
11 | * Copyright (C) 2004 - 2005 Steven J. Hill <sjhill@realitydiluted.com> | 11 | * Copyright (C) 2004 - 2005 Steven J. Hill <sjhill@realitydiluted.com> |
12 | */ | 12 | */ |
13 | #undef DEBUG | ||
14 | |||
13 | #include <linux/module.h> | 15 | #include <linux/module.h> |
14 | #include <linux/fs.h> | 16 | #include <linux/fs.h> |
15 | #include <linux/stat.h> | 17 | #include <linux/stat.h> |
@@ -40,8 +42,6 @@ | |||
40 | 42 | ||
41 | #include <linux/elf.h> | 43 | #include <linux/elf.h> |
42 | 44 | ||
43 | #undef DEBUG | ||
44 | |||
45 | static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs); | 45 | static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs); |
46 | static int load_irix_library(struct file *); | 46 | static int load_irix_library(struct file *); |
47 | static int irix_core_dump(long signr, struct pt_regs * regs, | 47 | static int irix_core_dump(long signr, struct pt_regs * regs, |
@@ -52,72 +52,102 @@ static struct linux_binfmt irix_format = { | |||
52 | irix_core_dump, PAGE_SIZE | 52 | irix_core_dump, PAGE_SIZE |
53 | }; | 53 | }; |
54 | 54 | ||
55 | #ifdef DEBUG | ||
56 | /* Debugging routines. */ | 55 | /* Debugging routines. */ |
57 | static char *get_elf_p_type(Elf32_Word p_type) | 56 | static char *get_elf_p_type(Elf32_Word p_type) |
58 | { | 57 | { |
59 | int i = (int) p_type; | 58 | #ifdef DEBUG |
60 | 59 | switch (p_type) { | |
61 | switch(i) { | 60 | case PT_NULL: |
62 | case PT_NULL: return("PT_NULL"); break; | 61 | return "PT_NULL"; |
63 | case PT_LOAD: return("PT_LOAD"); break; | 62 | break; |
64 | case PT_DYNAMIC: return("PT_DYNAMIC"); break; | 63 | |
65 | case PT_INTERP: return("PT_INTERP"); break; | 64 | case PT_LOAD: |
66 | case PT_NOTE: return("PT_NOTE"); break; | 65 | return "PT_LOAD"; |
67 | case PT_SHLIB: return("PT_SHLIB"); break; | 66 | break; |
68 | case PT_PHDR: return("PT_PHDR"); break; | 67 | |
69 | case PT_LOPROC: return("PT_LOPROC/REGINFO"); break; | 68 | case PT_DYNAMIC: |
70 | case PT_HIPROC: return("PT_HIPROC"); break; | 69 | return "PT_DYNAMIC"; |
71 | default: return("PT_BOGUS"); break; | 70 | break; |
71 | |||
72 | case PT_INTERP: | ||
73 | return "PT_INTERP"; | ||
74 | break; | ||
75 | |||
76 | case PT_NOTE: | ||
77 | return "PT_NOTE"; | ||
78 | break; | ||
79 | |||
80 | case PT_SHLIB: | ||
81 | return "PT_SHLIB"; | ||
82 | break; | ||
83 | |||
84 | case PT_PHDR: | ||
85 | return "PT_PHDR"; | ||
86 | break; | ||
87 | |||
88 | case PT_LOPROC: | ||
89 | return "PT_LOPROC/REGINFO"; | ||
90 | break; | ||
91 | |||
92 | case PT_HIPROC: | ||
93 | return "PT_HIPROC"; | ||
94 | break; | ||
95 | |||
96 | default: | ||
97 | return "PT_BOGUS"; | ||
98 | break; | ||
72 | } | 99 | } |
100 | #endif | ||
73 | } | 101 | } |
74 | 102 | ||
75 | static void print_elfhdr(struct elfhdr *ehp) | 103 | static void print_elfhdr(struct elfhdr *ehp) |
76 | { | 104 | { |
77 | int i; | 105 | int i; |
78 | 106 | ||
79 | printk("ELFHDR: e_ident<"); | 107 | pr_debug("ELFHDR: e_ident<"); |
80 | for(i = 0; i < (EI_NIDENT - 1); i++) printk("%x ", ehp->e_ident[i]); | 108 | for (i = 0; i < (EI_NIDENT - 1); i++) |
81 | printk("%x>\n", ehp->e_ident[i]); | 109 | pr_debug("%x ", ehp->e_ident[i]); |
82 | printk(" e_type[%04x] e_machine[%04x] e_version[%08lx]\n", | 110 | pr_debug("%x>\n", ehp->e_ident[i]); |
83 | (unsigned short) ehp->e_type, (unsigned short) ehp->e_machine, | 111 | pr_debug(" e_type[%04x] e_machine[%04x] e_version[%08lx]\n", |
84 | (unsigned long) ehp->e_version); | 112 | (unsigned short) ehp->e_type, (unsigned short) ehp->e_machine, |
85 | printk(" e_entry[%08lx] e_phoff[%08lx] e_shoff[%08lx] " | 113 | (unsigned long) ehp->e_version); |
86 | "e_flags[%08lx]\n", | 114 | pr_debug(" e_entry[%08lx] e_phoff[%08lx] e_shoff[%08lx] " |
87 | (unsigned long) ehp->e_entry, (unsigned long) ehp->e_phoff, | 115 | "e_flags[%08lx]\n", |
88 | (unsigned long) ehp->e_shoff, (unsigned long) ehp->e_flags); | 116 | (unsigned long) ehp->e_entry, (unsigned long) ehp->e_phoff, |
89 | printk(" e_ehsize[%04x] e_phentsize[%04x] e_phnum[%04x]\n", | 117 | (unsigned long) ehp->e_shoff, (unsigned long) ehp->e_flags); |
90 | (unsigned short) ehp->e_ehsize, (unsigned short) ehp->e_phentsize, | 118 | pr_debug(" e_ehsize[%04x] e_phentsize[%04x] e_phnum[%04x]\n", |
91 | (unsigned short) ehp->e_phnum); | 119 | (unsigned short) ehp->e_ehsize, |
92 | printk(" e_shentsize[%04x] e_shnum[%04x] e_shstrndx[%04x]\n", | 120 | (unsigned short) ehp->e_phentsize, |
93 | (unsigned short) ehp->e_shentsize, (unsigned short) ehp->e_shnum, | 121 | (unsigned short) ehp->e_phnum); |
94 | (unsigned short) ehp->e_shstrndx); | 122 | pr_debug(" e_shentsize[%04x] e_shnum[%04x] e_shstrndx[%04x]\n", |
123 | (unsigned short) ehp->e_shentsize, | ||
124 | (unsigned short) ehp->e_shnum, | ||
125 | (unsigned short) ehp->e_shstrndx); | ||
95 | } | 126 | } |
96 | 127 | ||
97 | static void print_phdr(int i, struct elf_phdr *ep) | 128 | static void print_phdr(int i, struct elf_phdr *ep) |
98 | { | 129 | { |
99 | printk("PHDR[%d]: p_type[%s] p_offset[%08lx] p_vaddr[%08lx] " | 130 | pr_debug("PHDR[%d]: p_type[%s] p_offset[%08lx] p_vaddr[%08lx] " |
100 | "p_paddr[%08lx]\n", i, get_elf_p_type(ep->p_type), | 131 | "p_paddr[%08lx]\n", i, get_elf_p_type(ep->p_type), |
101 | (unsigned long) ep->p_offset, (unsigned long) ep->p_vaddr, | 132 | (unsigned long) ep->p_offset, (unsigned long) ep->p_vaddr, |
102 | (unsigned long) ep->p_paddr); | 133 | (unsigned long) ep->p_paddr); |
103 | printk(" p_filesz[%08lx] p_memsz[%08lx] p_flags[%08lx] " | 134 | pr_debug(" p_filesz[%08lx] p_memsz[%08lx] p_flags[%08lx] " |
104 | "p_align[%08lx]\n", (unsigned long) ep->p_filesz, | 135 | "p_align[%08lx]\n", (unsigned long) ep->p_filesz, |
105 | (unsigned long) ep->p_memsz, (unsigned long) ep->p_flags, | 136 | (unsigned long) ep->p_memsz, (unsigned long) ep->p_flags, |
106 | (unsigned long) ep->p_align); | 137 | (unsigned long) ep->p_align); |
107 | } | 138 | } |
108 | 139 | ||
109 | static void dump_phdrs(struct elf_phdr *ep, int pnum) | 140 | static void dump_phdrs(struct elf_phdr *ep, int pnum) |
110 | { | 141 | { |
111 | int i; | 142 | int i; |
112 | 143 | ||
113 | for(i = 0; i < pnum; i++, ep++) { | 144 | for (i = 0; i < pnum; i++, ep++) { |
114 | if((ep->p_type == PT_LOAD) || | 145 | if ((ep->p_type == PT_LOAD) || |
115 | (ep->p_type == PT_INTERP) || | 146 | (ep->p_type == PT_INTERP) || |
116 | (ep->p_type == PT_PHDR)) | 147 | (ep->p_type == PT_PHDR)) |
117 | print_phdr(i, ep); | 148 | print_phdr(i, ep); |
118 | } | 149 | } |
119 | } | 150 | } |
120 | #endif /* DEBUG */ | ||
121 | 151 | ||
122 | static void set_brk(unsigned long start, unsigned long end) | 152 | static void set_brk(unsigned long start, unsigned long end) |
123 | { | 153 | { |
@@ -156,11 +186,10 @@ static unsigned long * create_irix_tables(char * p, int argc, int envc, | |||
156 | elf_addr_t *envp; | 186 | elf_addr_t *envp; |
157 | elf_addr_t *sp, *csp; | 187 | elf_addr_t *sp, *csp; |
158 | 188 | ||
159 | #ifdef DEBUG | 189 | pr_debug("create_irix_tables: p[%p] argc[%d] envc[%d] " |
160 | printk("create_irix_tables: p[%p] argc[%d] envc[%d] " | 190 | "load_addr[%08x] interp_load_addr[%08x]\n", |
161 | "load_addr[%08x] interp_load_addr[%08x]\n", | 191 | p, argc, envc, load_addr, interp_load_addr); |
162 | p, argc, envc, load_addr, interp_load_addr); | 192 | |
163 | #endif | ||
164 | sp = (elf_addr_t *) (~15UL & (unsigned long) p); | 193 | sp = (elf_addr_t *) (~15UL & (unsigned long) p); |
165 | csp = sp; | 194 | csp = sp; |
166 | csp -= exec ? DLINFO_ITEMS*2 : 2; | 195 | csp -= exec ? DLINFO_ITEMS*2 : 2; |
@@ -181,7 +210,7 @@ static unsigned long * create_irix_tables(char * p, int argc, int envc, | |||
181 | sp -= 2; | 210 | sp -= 2; |
182 | NEW_AUX_ENT(0, AT_NULL, 0); | 211 | NEW_AUX_ENT(0, AT_NULL, 0); |
183 | 212 | ||
184 | if(exec) { | 213 | if (exec) { |
185 | sp -= 11*2; | 214 | sp -= 11*2; |
186 | 215 | ||
187 | NEW_AUX_ENT (0, AT_PHDR, load_addr + exec->e_phoff); | 216 | NEW_AUX_ENT (0, AT_PHDR, load_addr + exec->e_phoff); |
@@ -245,9 +274,7 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex, | |||
245 | last_bss = 0; | 274 | last_bss = 0; |
246 | error = load_addr = 0; | 275 | error = load_addr = 0; |
247 | 276 | ||
248 | #ifdef DEBUG | ||
249 | print_elfhdr(interp_elf_ex); | 277 | print_elfhdr(interp_elf_ex); |
250 | #endif | ||
251 | 278 | ||
252 | /* First of all, some simple consistency checks */ | 279 | /* First of all, some simple consistency checks */ |
253 | if ((interp_elf_ex->e_type != ET_EXEC && | 280 | if ((interp_elf_ex->e_type != ET_EXEC && |
@@ -258,7 +285,7 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex, | |||
258 | } | 285 | } |
259 | 286 | ||
260 | /* Now read in all of the header information */ | 287 | /* Now read in all of the header information */ |
261 | if(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > PAGE_SIZE) { | 288 | if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > PAGE_SIZE) { |
262 | printk("IRIX interp header bigger than a page (%d)\n", | 289 | printk("IRIX interp header bigger than a page (%d)\n", |
263 | (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum)); | 290 | (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum)); |
264 | return 0xffffffff; | 291 | return 0xffffffff; |
@@ -267,15 +294,15 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex, | |||
267 | elf_phdata = kmalloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum, | 294 | elf_phdata = kmalloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum, |
268 | GFP_KERNEL); | 295 | GFP_KERNEL); |
269 | 296 | ||
270 | if(!elf_phdata) { | 297 | if (!elf_phdata) { |
271 | printk("Cannot kmalloc phdata for IRIX interp.\n"); | 298 | printk("Cannot kmalloc phdata for IRIX interp.\n"); |
272 | return 0xffffffff; | 299 | return 0xffffffff; |
273 | } | 300 | } |
274 | 301 | ||
275 | /* If the size of this structure has changed, then punt, since | 302 | /* If the size of this structure has changed, then punt, since |
276 | * we will be doing the wrong thing. | 303 | * we will be doing the wrong thing. |
277 | */ | 304 | */ |
278 | if(interp_elf_ex->e_phentsize != 32) { | 305 | if (interp_elf_ex->e_phentsize != 32) { |
279 | printk("IRIX interp e_phentsize == %d != 32 ", | 306 | printk("IRIX interp e_phentsize == %d != 32 ", |
280 | interp_elf_ex->e_phentsize); | 307 | interp_elf_ex->e_phentsize); |
281 | kfree(elf_phdata); | 308 | kfree(elf_phdata); |
@@ -286,61 +313,71 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex, | |||
286 | (char *) elf_phdata, | 313 | (char *) elf_phdata, |
287 | sizeof(struct elf_phdr) * interp_elf_ex->e_phnum); | 314 | sizeof(struct elf_phdr) * interp_elf_ex->e_phnum); |
288 | 315 | ||
289 | #ifdef DEBUG | ||
290 | dump_phdrs(elf_phdata, interp_elf_ex->e_phnum); | 316 | dump_phdrs(elf_phdata, interp_elf_ex->e_phnum); |
291 | #endif | ||
292 | 317 | ||
293 | eppnt = elf_phdata; | 318 | eppnt = elf_phdata; |
294 | for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) { | 319 | for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) { |
295 | if(eppnt->p_type == PT_LOAD) { | 320 | if (eppnt->p_type == PT_LOAD) { |
296 | int elf_type = MAP_PRIVATE | MAP_DENYWRITE; | 321 | int elf_type = MAP_PRIVATE | MAP_DENYWRITE; |
297 | int elf_prot = 0; | 322 | int elf_prot = 0; |
298 | unsigned long vaddr = 0; | 323 | unsigned long vaddr = 0; |
299 | if (eppnt->p_flags & PF_R) elf_prot = PROT_READ; | 324 | if (eppnt->p_flags & PF_R) |
300 | if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; | 325 | elf_prot = PROT_READ; |
301 | if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; | 326 | if (eppnt->p_flags & PF_W) |
302 | elf_type |= MAP_FIXED; | 327 | elf_prot |= PROT_WRITE; |
303 | vaddr = eppnt->p_vaddr; | 328 | if (eppnt->p_flags & PF_X) |
304 | 329 | elf_prot |= PROT_EXEC; | |
305 | pr_debug("INTERP do_mmap(%p, %08lx, %08lx, %08lx, %08lx, %08lx) ", | 330 | elf_type |= MAP_FIXED; |
306 | interpreter, vaddr, | 331 | vaddr = eppnt->p_vaddr; |
307 | (unsigned long) (eppnt->p_filesz + (eppnt->p_vaddr & 0xfff)), | 332 | |
308 | (unsigned long) elf_prot, (unsigned long) elf_type, | 333 | pr_debug("INTERP do_mmap" |
309 | (unsigned long) (eppnt->p_offset & 0xfffff000)); | 334 | "(%p, %08lx, %08lx, %08lx, %08lx, %08lx) ", |
310 | down_write(¤t->mm->mmap_sem); | 335 | interpreter, vaddr, |
311 | error = do_mmap(interpreter, vaddr, | 336 | (unsigned long) |
312 | eppnt->p_filesz + (eppnt->p_vaddr & 0xfff), | 337 | (eppnt->p_filesz + (eppnt->p_vaddr & 0xfff)), |
313 | elf_prot, elf_type, | 338 | (unsigned long) |
314 | eppnt->p_offset & 0xfffff000); | 339 | elf_prot, (unsigned long) elf_type, |
315 | up_write(¤t->mm->mmap_sem); | 340 | (unsigned long) |
316 | 341 | (eppnt->p_offset & 0xfffff000)); | |
317 | if(error < 0 && error > -1024) { | 342 | |
318 | printk("Aieee IRIX interp mmap error=%d\n", error); | 343 | down_write(¤t->mm->mmap_sem); |
319 | break; /* Real error */ | 344 | error = do_mmap(interpreter, vaddr, |
320 | } | 345 | eppnt->p_filesz + (eppnt->p_vaddr & 0xfff), |
321 | pr_debug("error=%08lx ", (unsigned long) error); | 346 | elf_prot, elf_type, |
322 | if(!load_addr && interp_elf_ex->e_type == ET_DYN) { | 347 | eppnt->p_offset & 0xfffff000); |
323 | load_addr = error; | 348 | up_write(¤t->mm->mmap_sem); |
324 | pr_debug("load_addr = error "); | 349 | |
325 | } | 350 | if (error < 0 && error > -1024) { |
326 | 351 | printk("Aieee IRIX interp mmap error=%d\n", | |
327 | /* Find the end of the file mapping for this phdr, and keep | 352 | error); |
328 | * track of the largest address we see for this. | 353 | break; /* Real error */ |
329 | */ | 354 | } |
330 | k = eppnt->p_vaddr + eppnt->p_filesz; | 355 | pr_debug("error=%08lx ", (unsigned long) error); |
331 | if(k > elf_bss) elf_bss = k; | 356 | if (!load_addr && interp_elf_ex->e_type == ET_DYN) { |
332 | 357 | load_addr = error; | |
333 | /* Do the same thing for the memory mapping - between | 358 | pr_debug("load_addr = error "); |
334 | * elf_bss and last_bss is the bss section. | 359 | } |
335 | */ | 360 | |
336 | k = eppnt->p_memsz + eppnt->p_vaddr; | 361 | /* |
337 | if(k > last_bss) last_bss = k; | 362 | * Find the end of the file mapping for this phdr, and |
338 | pr_debug("\n"); | 363 | * keep track of the largest address we see for this. |
339 | } | 364 | */ |
365 | k = eppnt->p_vaddr + eppnt->p_filesz; | ||
366 | if (k > elf_bss) | ||
367 | elf_bss = k; | ||
368 | |||
369 | /* Do the same thing for the memory mapping - between | ||
370 | * elf_bss and last_bss is the bss section. | ||
371 | */ | ||
372 | k = eppnt->p_memsz + eppnt->p_vaddr; | ||
373 | if (k > last_bss) | ||
374 | last_bss = k; | ||
375 | pr_debug("\n"); | ||
376 | } | ||
340 | } | 377 | } |
341 | 378 | ||
342 | /* Now use mmap to map the library into memory. */ | 379 | /* Now use mmap to map the library into memory. */ |
343 | if(error < 0 && error > -1024) { | 380 | if (error < 0 && error > -1024) { |
344 | pr_debug("got error %d\n", error); | 381 | pr_debug("got error %d\n", error); |
345 | kfree(elf_phdata); | 382 | kfree(elf_phdata); |
346 | return 0xffffffff; | 383 | return 0xffffffff; |
@@ -377,7 +414,7 @@ static int verify_binary(struct elfhdr *ehp, struct linux_binprm *bprm) | |||
377 | return -ENOEXEC; | 414 | return -ENOEXEC; |
378 | 415 | ||
379 | /* First of all, some simple consistency checks */ | 416 | /* First of all, some simple consistency checks */ |
380 | if((ehp->e_type != ET_EXEC && ehp->e_type != ET_DYN) || | 417 | if ((ehp->e_type != ET_EXEC && ehp->e_type != ET_DYN) || |
381 | !bprm->file->f_op->mmap) { | 418 | !bprm->file->f_op->mmap) { |
382 | return -ENOEXEC; | 419 | return -ENOEXEC; |
383 | } | 420 | } |
@@ -388,7 +425,7 @@ static int verify_binary(struct elfhdr *ehp, struct linux_binprm *bprm) | |||
388 | * XXX all registers as 64bits on cpu's capable of this at | 425 | * XXX all registers as 64bits on cpu's capable of this at |
389 | * XXX exception time plus frob the XTLB exception vector. | 426 | * XXX exception time plus frob the XTLB exception vector. |
390 | */ | 427 | */ |
391 | if((ehp->e_flags & EF_MIPS_ABI2)) | 428 | if ((ehp->e_flags & EF_MIPS_ABI2)) |
392 | return -ENOEXEC; | 429 | return -ENOEXEC; |
393 | 430 | ||
394 | return 0; | 431 | return 0; |
@@ -410,7 +447,7 @@ static inline int look_for_irix_interpreter(char **name, | |||
410 | struct file *file = NULL; | 447 | struct file *file = NULL; |
411 | 448 | ||
412 | *name = NULL; | 449 | *name = NULL; |
413 | for(i = 0; i < pnum; i++, epp++) { | 450 | for (i = 0; i < pnum; i++, epp++) { |
414 | if (epp->p_type != PT_INTERP) | 451 | if (epp->p_type != PT_INTERP) |
415 | continue; | 452 | continue; |
416 | 453 | ||
@@ -467,8 +504,8 @@ static inline void map_executable(struct file *fp, struct elf_phdr *epp, int pnu | |||
467 | unsigned int tmp; | 504 | unsigned int tmp; |
468 | int i, prot; | 505 | int i, prot; |
469 | 506 | ||
470 | for(i = 0; i < pnum; i++, epp++) { | 507 | for (i = 0; i < pnum; i++, epp++) { |
471 | if(epp->p_type != PT_LOAD) | 508 | if (epp->p_type != PT_LOAD) |
472 | continue; | 509 | continue; |
473 | 510 | ||
474 | /* Map it. */ | 511 | /* Map it. */ |
@@ -483,23 +520,23 @@ static inline void map_executable(struct file *fp, struct elf_phdr *epp, int pnu | |||
483 | up_write(¤t->mm->mmap_sem); | 520 | up_write(¤t->mm->mmap_sem); |
484 | 521 | ||
485 | /* Fixup location tracking vars. */ | 522 | /* Fixup location tracking vars. */ |
486 | if((epp->p_vaddr & 0xfffff000) < *estack) | 523 | if ((epp->p_vaddr & 0xfffff000) < *estack) |
487 | *estack = (epp->p_vaddr & 0xfffff000); | 524 | *estack = (epp->p_vaddr & 0xfffff000); |
488 | if(!*laddr) | 525 | if (!*laddr) |
489 | *laddr = epp->p_vaddr - epp->p_offset; | 526 | *laddr = epp->p_vaddr - epp->p_offset; |
490 | if(epp->p_vaddr < *scode) | 527 | if (epp->p_vaddr < *scode) |
491 | *scode = epp->p_vaddr; | 528 | *scode = epp->p_vaddr; |
492 | 529 | ||
493 | tmp = epp->p_vaddr + epp->p_filesz; | 530 | tmp = epp->p_vaddr + epp->p_filesz; |
494 | if(tmp > *ebss) | 531 | if (tmp > *ebss) |
495 | *ebss = tmp; | 532 | *ebss = tmp; |
496 | if((epp->p_flags & PF_X) && *ecode < tmp) | 533 | if ((epp->p_flags & PF_X) && *ecode < tmp) |
497 | *ecode = tmp; | 534 | *ecode = tmp; |
498 | if(*edata < tmp) | 535 | if (*edata < tmp) |
499 | *edata = tmp; | 536 | *edata = tmp; |
500 | 537 | ||
501 | tmp = epp->p_vaddr + epp->p_memsz; | 538 | tmp = epp->p_vaddr + epp->p_memsz; |
502 | if(tmp > *ebrk) | 539 | if (tmp > *ebrk) |
503 | *ebrk = tmp; | 540 | *ebrk = tmp; |
504 | } | 541 | } |
505 | 542 | ||
@@ -513,12 +550,12 @@ static inline int map_interpreter(struct elf_phdr *epp, struct elfhdr *ihp, | |||
513 | int i; | 550 | int i; |
514 | 551 | ||
515 | *eentry = 0xffffffff; | 552 | *eentry = 0xffffffff; |
516 | for(i = 0; i < pnum; i++, epp++) { | 553 | for (i = 0; i < pnum; i++, epp++) { |
517 | if(epp->p_type != PT_INTERP) | 554 | if (epp->p_type != PT_INTERP) |
518 | continue; | 555 | continue; |
519 | 556 | ||
520 | /* We should have fielded this error elsewhere... */ | 557 | /* We should have fielded this error elsewhere... */ |
521 | if(*eentry != 0xffffffff) | 558 | if (*eentry != 0xffffffff) |
522 | return -1; | 559 | return -1; |
523 | 560 | ||
524 | set_fs(old_fs); | 561 | set_fs(old_fs); |
@@ -604,9 +641,7 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
604 | if (elf_ex.e_shnum > 20) | 641 | if (elf_ex.e_shnum > 20) |
605 | goto out; | 642 | goto out; |
606 | 643 | ||
607 | #ifdef DEBUG | ||
608 | print_elfhdr(&elf_ex); | 644 | print_elfhdr(&elf_ex); |
609 | #endif | ||
610 | 645 | ||
611 | /* Now read in all of the header information */ | 646 | /* Now read in all of the header information */ |
612 | size = elf_ex.e_phentsize * elf_ex.e_phnum; | 647 | size = elf_ex.e_phentsize * elf_ex.e_phnum; |
@@ -622,13 +657,11 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
622 | if (retval < 0) | 657 | if (retval < 0) |
623 | goto out_free_ph; | 658 | goto out_free_ph; |
624 | 659 | ||
625 | #ifdef DEBUG | ||
626 | dump_phdrs(elf_phdata, elf_ex.e_phnum); | 660 | dump_phdrs(elf_phdata, elf_ex.e_phnum); |
627 | #endif | ||
628 | 661 | ||
629 | /* Set some things for later. */ | 662 | /* Set some things for later. */ |
630 | for(i = 0; i < elf_ex.e_phnum; i++) { | 663 | for (i = 0; i < elf_ex.e_phnum; i++) { |
631 | switch(elf_phdata[i].p_type) { | 664 | switch (elf_phdata[i].p_type) { |
632 | case PT_INTERP: | 665 | case PT_INTERP: |
633 | has_interp = 1; | 666 | has_interp = 1; |
634 | elf_ihdr = &elf_phdata[i]; | 667 | elf_ihdr = &elf_phdata[i]; |
@@ -667,7 +700,7 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
667 | 700 | ||
668 | if (elf_interpreter) { | 701 | if (elf_interpreter) { |
669 | retval = verify_irix_interpreter(&interp_elf_ex); | 702 | retval = verify_irix_interpreter(&interp_elf_ex); |
670 | if(retval) | 703 | if (retval) |
671 | goto out_free_interp; | 704 | goto out_free_interp; |
672 | } | 705 | } |
673 | 706 | ||
@@ -706,12 +739,12 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
706 | &load_addr, &start_code, &elf_bss, &end_code, | 739 | &load_addr, &start_code, &elf_bss, &end_code, |
707 | &end_data, &elf_brk); | 740 | &end_data, &elf_brk); |
708 | 741 | ||
709 | if(elf_interpreter) { | 742 | if (elf_interpreter) { |
710 | retval = map_interpreter(elf_phdata, &interp_elf_ex, | 743 | retval = map_interpreter(elf_phdata, &interp_elf_ex, |
711 | interpreter, &interp_load_addr, | 744 | interpreter, &interp_load_addr, |
712 | elf_ex.e_phnum, old_fs, &elf_entry); | 745 | elf_ex.e_phnum, old_fs, &elf_entry); |
713 | kfree(elf_interpreter); | 746 | kfree(elf_interpreter); |
714 | if(retval) { | 747 | if (retval) { |
715 | set_fs(old_fs); | 748 | set_fs(old_fs); |
716 | printk("Unable to load IRIX ELF interpreter\n"); | 749 | printk("Unable to load IRIX ELF interpreter\n"); |
717 | send_sig(SIGSEGV, current, 0); | 750 | send_sig(SIGSEGV, current, 0); |
@@ -809,12 +842,12 @@ static int load_irix_library(struct file *file) | |||
809 | return -ENOEXEC; | 842 | return -ENOEXEC; |
810 | 843 | ||
811 | /* First of all, some simple consistency checks. */ | 844 | /* First of all, some simple consistency checks. */ |
812 | if(elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 || | 845 | if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 || |
813 | !file->f_op->mmap) | 846 | !file->f_op->mmap) |
814 | return -ENOEXEC; | 847 | return -ENOEXEC; |
815 | 848 | ||
816 | /* Now read in all of the header information. */ | 849 | /* Now read in all of the header information. */ |
817 | if(sizeof(struct elf_phdr) * elf_ex.e_phnum > PAGE_SIZE) | 850 | if (sizeof(struct elf_phdr) * elf_ex.e_phnum > PAGE_SIZE) |
818 | return -ENOEXEC; | 851 | return -ENOEXEC; |
819 | 852 | ||
820 | elf_phdata = kmalloc(sizeof(struct elf_phdr) * elf_ex.e_phnum, GFP_KERNEL); | 853 | elf_phdata = kmalloc(sizeof(struct elf_phdr) * elf_ex.e_phnum, GFP_KERNEL); |
@@ -825,15 +858,15 @@ static int load_irix_library(struct file *file) | |||
825 | sizeof(struct elf_phdr) * elf_ex.e_phnum); | 858 | sizeof(struct elf_phdr) * elf_ex.e_phnum); |
826 | 859 | ||
827 | j = 0; | 860 | j = 0; |
828 | for(i=0; i<elf_ex.e_phnum; i++) | 861 | for (i=0; i<elf_ex.e_phnum; i++) |
829 | if((elf_phdata + i)->p_type == PT_LOAD) j++; | 862 | if ((elf_phdata + i)->p_type == PT_LOAD) j++; |
830 | 863 | ||
831 | if(j != 1) { | 864 | if (j != 1) { |
832 | kfree(elf_phdata); | 865 | kfree(elf_phdata); |
833 | return -ENOEXEC; | 866 | return -ENOEXEC; |
834 | } | 867 | } |
835 | 868 | ||
836 | while(elf_phdata->p_type != PT_LOAD) elf_phdata++; | 869 | while (elf_phdata->p_type != PT_LOAD) elf_phdata++; |
837 | 870 | ||
838 | /* Now use mmap to map the library into memory. */ | 871 | /* Now use mmap to map the library into memory. */ |
839 | down_write(¤t->mm->mmap_sem); | 872 | down_write(¤t->mm->mmap_sem); |
@@ -889,9 +922,7 @@ unsigned long irix_mapelf(int fd, struct elf_phdr __user *user_phdrp, int cnt) | |||
889 | return -EFAULT; | 922 | return -EFAULT; |
890 | } | 923 | } |
891 | 924 | ||
892 | #ifdef DEBUG | ||
893 | dump_phdrs(user_phdrp, cnt); | 925 | dump_phdrs(user_phdrp, cnt); |
894 | #endif | ||
895 | 926 | ||
896 | for (i = 0; i < cnt; i++, hp++) { | 927 | for (i = 0; i < cnt; i++, hp++) { |
897 | if (__get_user(type, &hp->p_type)) | 928 | if (__get_user(type, &hp->p_type)) |
@@ -905,14 +936,14 @@ unsigned long irix_mapelf(int fd, struct elf_phdr __user *user_phdrp, int cnt) | |||
905 | filp = fget(fd); | 936 | filp = fget(fd); |
906 | if (!filp) | 937 | if (!filp) |
907 | return -EACCES; | 938 | return -EACCES; |
908 | if(!filp->f_op) { | 939 | if (!filp->f_op) { |
909 | printk("irix_mapelf: Bogon filp!\n"); | 940 | printk("irix_mapelf: Bogon filp!\n"); |
910 | fput(filp); | 941 | fput(filp); |
911 | return -EACCES; | 942 | return -EACCES; |
912 | } | 943 | } |
913 | 944 | ||
914 | hp = user_phdrp; | 945 | hp = user_phdrp; |
915 | for(i = 0; i < cnt; i++, hp++) { | 946 | for (i = 0; i < cnt; i++, hp++) { |
916 | int prot; | 947 | int prot; |
917 | 948 | ||
918 | retval = __get_user(vaddr, &hp->p_vaddr); | 949 | retval = __get_user(vaddr, &hp->p_vaddr); |
@@ -1015,8 +1046,6 @@ static int notesize(struct memelfnote *en) | |||
1015 | return sz; | 1046 | return sz; |
1016 | } | 1047 | } |
1017 | 1048 | ||
1018 | /* #define DEBUG */ | ||
1019 | |||
1020 | #define DUMP_WRITE(addr, nr) \ | 1049 | #define DUMP_WRITE(addr, nr) \ |
1021 | if (!dump_write(file, (addr), (nr))) \ | 1050 | if (!dump_write(file, (addr), (nr))) \ |
1022 | goto end_coredump; | 1051 | goto end_coredump; |
@@ -1093,9 +1122,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file) | |||
1093 | 1122 | ||
1094 | segs++; | 1123 | segs++; |
1095 | } | 1124 | } |
1096 | #ifdef DEBUG | 1125 | pr_debug("irix_core_dump: %d segs taking %d bytes\n", segs, size); |
1097 | printk("irix_core_dump: %d segs taking %d bytes\n", segs, size); | ||
1098 | #endif | ||
1099 | 1126 | ||
1100 | /* Set up header. */ | 1127 | /* Set up header. */ |
1101 | memcpy(elf.e_ident, ELFMAG, SELFMAG); | 1128 | memcpy(elf.e_ident, ELFMAG, SELFMAG); |
@@ -1221,7 +1248,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file) | |||
1221 | struct elf_phdr phdr; | 1248 | struct elf_phdr phdr; |
1222 | int sz = 0; | 1249 | int sz = 0; |
1223 | 1250 | ||
1224 | for(i = 0; i < numnote; i++) | 1251 | for (i = 0; i < numnote; i++) |
1225 | sz += notesize(¬es[i]); | 1252 | sz += notesize(¬es[i]); |
1226 | 1253 | ||
1227 | phdr.p_type = PT_NOTE; | 1254 | phdr.p_type = PT_NOTE; |
@@ -1241,7 +1268,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file) | |||
1241 | dataoff = offset = roundup(offset, PAGE_SIZE); | 1268 | dataoff = offset = roundup(offset, PAGE_SIZE); |
1242 | 1269 | ||
1243 | /* Write program headers for segments dump. */ | 1270 | /* Write program headers for segments dump. */ |
1244 | for(vma = current->mm->mmap, i = 0; | 1271 | for (vma = current->mm->mmap, i = 0; |
1245 | i < segs && vma != NULL; vma = vma->vm_next) { | 1272 | i < segs && vma != NULL; vma = vma->vm_next) { |
1246 | struct elf_phdr phdr; | 1273 | struct elf_phdr phdr; |
1247 | size_t sz; | 1274 | size_t sz; |
@@ -1267,7 +1294,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file) | |||
1267 | DUMP_WRITE(&phdr, sizeof(phdr)); | 1294 | DUMP_WRITE(&phdr, sizeof(phdr)); |
1268 | } | 1295 | } |
1269 | 1296 | ||
1270 | for(i = 0; i < numnote; i++) | 1297 | for (i = 0; i < numnote; i++) |
1271 | if (!writenote(¬es[i], file)) | 1298 | if (!writenote(¬es[i], file)) |
1272 | goto end_coredump; | 1299 | goto end_coredump; |
1273 | 1300 | ||
@@ -1275,7 +1302,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file) | |||
1275 | 1302 | ||
1276 | DUMP_SEEK(dataoff); | 1303 | DUMP_SEEK(dataoff); |
1277 | 1304 | ||
1278 | for(i = 0, vma = current->mm->mmap; | 1305 | for (i = 0, vma = current->mm->mmap; |
1279 | i < segs && vma != NULL; | 1306 | i < segs && vma != NULL; |
1280 | vma = vma->vm_next) { | 1307 | vma = vma->vm_next) { |
1281 | unsigned long addr = vma->vm_start; | 1308 | unsigned long addr = vma->vm_start; |
@@ -1284,9 +1311,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file) | |||
1284 | if (!maydump(vma)) | 1311 | if (!maydump(vma)) |
1285 | continue; | 1312 | continue; |
1286 | i++; | 1313 | i++; |
1287 | #ifdef DEBUG | 1314 | pr_debug("elf_core_dump: writing %08lx %lx\n", addr, len); |
1288 | printk("elf_core_dump: writing %08lx %lx\n", addr, len); | ||
1289 | #endif | ||
1290 | DUMP_WRITE((void __user *)addr, len); | 1315 | DUMP_WRITE((void __user *)addr, len); |
1291 | } | 1316 | } |
1292 | 1317 | ||
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c index bcaad6696082..2967537221e2 100644 --- a/arch/mips/kernel/irq-msc01.c +++ b/arch/mips/kernel/irq-msc01.c | |||
@@ -112,7 +112,7 @@ msc_bind_eic_interrupt (unsigned int irq, unsigned int set) | |||
112 | } | 112 | } |
113 | 113 | ||
114 | struct irq_chip msc_levelirq_type = { | 114 | struct irq_chip msc_levelirq_type = { |
115 | .typename = "SOC-it-Level", | 115 | .name = "SOC-it-Level", |
116 | .ack = level_mask_and_ack_msc_irq, | 116 | .ack = level_mask_and_ack_msc_irq, |
117 | .mask = mask_msc_irq, | 117 | .mask = mask_msc_irq, |
118 | .mask_ack = level_mask_and_ack_msc_irq, | 118 | .mask_ack = level_mask_and_ack_msc_irq, |
@@ -122,7 +122,7 @@ struct irq_chip msc_levelirq_type = { | |||
122 | }; | 122 | }; |
123 | 123 | ||
124 | struct irq_chip msc_edgeirq_type = { | 124 | struct irq_chip msc_edgeirq_type = { |
125 | .typename = "SOC-it-Edge", | 125 | .name = "SOC-it-Edge", |
126 | .ack = edge_mask_and_ack_msc_irq, | 126 | .ack = edge_mask_and_ack_msc_irq, |
127 | .mask = mask_msc_irq, | 127 | .mask = mask_msc_irq, |
128 | .mask_ack = edge_mask_and_ack_msc_irq, | 128 | .mask_ack = edge_mask_and_ack_msc_irq, |
diff --git a/arch/mips/kernel/irq-mv6434x.c b/arch/mips/kernel/irq-mv6434x.c index efbd219845b5..3dd561832e4c 100644 --- a/arch/mips/kernel/irq-mv6434x.c +++ b/arch/mips/kernel/irq-mv6434x.c | |||
@@ -23,13 +23,13 @@ static unsigned int irq_base; | |||
23 | 23 | ||
24 | static inline int ls1bit32(unsigned int x) | 24 | static inline int ls1bit32(unsigned int x) |
25 | { | 25 | { |
26 | int b = 31, s; | 26 | int b = 31, s; |
27 | 27 | ||
28 | s = 16; if (x << 16 == 0) s = 0; b -= s; x <<= s; | 28 | s = 16; if (x << 16 == 0) s = 0; b -= s; x <<= s; |
29 | s = 8; if (x << 8 == 0) s = 0; b -= s; x <<= s; | 29 | s = 8; if (x << 8 == 0) s = 0; b -= s; x <<= s; |
30 | s = 4; if (x << 4 == 0) s = 0; b -= s; x <<= s; | 30 | s = 4; if (x << 4 == 0) s = 0; b -= s; x <<= s; |
31 | s = 2; if (x << 2 == 0) s = 0; b -= s; x <<= s; | 31 | s = 2; if (x << 2 == 0) s = 0; b -= s; x <<= s; |
32 | s = 1; if (x << 1 == 0) s = 0; b -= s; | 32 | s = 1; if (x << 1 == 0) s = 0; b -= s; |
33 | 33 | ||
34 | return b; | 34 | return b; |
35 | } | 35 | } |
@@ -92,7 +92,7 @@ void ll_mv64340_irq(void) | |||
92 | } | 92 | } |
93 | 93 | ||
94 | struct irq_chip mv64340_irq_type = { | 94 | struct irq_chip mv64340_irq_type = { |
95 | .typename = "MV-64340", | 95 | .name = "MV-64340", |
96 | .ack = mask_mv64340_irq, | 96 | .ack = mask_mv64340_irq, |
97 | .mask = mask_mv64340_irq, | 97 | .mask = mask_mv64340_irq, |
98 | .mask_ack = mask_mv64340_irq, | 98 | .mask_ack = mask_mv64340_irq, |
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c index 123324ba8c14..250732883488 100644 --- a/arch/mips/kernel/irq-rm7000.c +++ b/arch/mips/kernel/irq-rm7000.c | |||
@@ -17,28 +17,27 @@ | |||
17 | #include <asm/mipsregs.h> | 17 | #include <asm/mipsregs.h> |
18 | #include <asm/system.h> | 18 | #include <asm/system.h> |
19 | 19 | ||
20 | static int irq_base; | ||
21 | |||
22 | static inline void unmask_rm7k_irq(unsigned int irq) | 20 | static inline void unmask_rm7k_irq(unsigned int irq) |
23 | { | 21 | { |
24 | set_c0_intcontrol(0x100 << (irq - irq_base)); | 22 | set_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE)); |
25 | } | 23 | } |
26 | 24 | ||
27 | static inline void mask_rm7k_irq(unsigned int irq) | 25 | static inline void mask_rm7k_irq(unsigned int irq) |
28 | { | 26 | { |
29 | clear_c0_intcontrol(0x100 << (irq - irq_base)); | 27 | clear_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE)); |
30 | } | 28 | } |
31 | 29 | ||
32 | static struct irq_chip rm7k_irq_controller = { | 30 | static struct irq_chip rm7k_irq_controller = { |
33 | .typename = "RM7000", | 31 | .name = "RM7000", |
34 | .ack = mask_rm7k_irq, | 32 | .ack = mask_rm7k_irq, |
35 | .mask = mask_rm7k_irq, | 33 | .mask = mask_rm7k_irq, |
36 | .mask_ack = mask_rm7k_irq, | 34 | .mask_ack = mask_rm7k_irq, |
37 | .unmask = unmask_rm7k_irq, | 35 | .unmask = unmask_rm7k_irq, |
38 | }; | 36 | }; |
39 | 37 | ||
40 | void __init rm7k_cpu_irq_init(int base) | 38 | void __init rm7k_cpu_irq_init(void) |
41 | { | 39 | { |
40 | int base = RM7K_CPU_IRQ_BASE; | ||
42 | int i; | 41 | int i; |
43 | 42 | ||
44 | clear_c0_intcontrol(0x00000f00); /* Mask all */ | 43 | clear_c0_intcontrol(0x00000f00); /* Mask all */ |
@@ -46,6 +45,4 @@ void __init rm7k_cpu_irq_init(int base) | |||
46 | for (i = base; i < base + 4; i++) | 45 | for (i = base; i < base + 4; i++) |
47 | set_irq_chip_and_handler(i, &rm7k_irq_controller, | 46 | set_irq_chip_and_handler(i, &rm7k_irq_controller, |
48 | handle_level_irq); | 47 | handle_level_irq); |
49 | |||
50 | irq_base = base; | ||
51 | } | 48 | } |
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c index 0e6f4c5349d2..ae83d2df6f31 100644 --- a/arch/mips/kernel/irq-rm9000.c +++ b/arch/mips/kernel/irq-rm9000.c | |||
@@ -18,16 +18,14 @@ | |||
18 | #include <asm/mipsregs.h> | 18 | #include <asm/mipsregs.h> |
19 | #include <asm/system.h> | 19 | #include <asm/system.h> |
20 | 20 | ||
21 | static int irq_base; | ||
22 | |||
23 | static inline void unmask_rm9k_irq(unsigned int irq) | 21 | static inline void unmask_rm9k_irq(unsigned int irq) |
24 | { | 22 | { |
25 | set_c0_intcontrol(0x1000 << (irq - irq_base)); | 23 | set_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE)); |
26 | } | 24 | } |
27 | 25 | ||
28 | static inline void mask_rm9k_irq(unsigned int irq) | 26 | static inline void mask_rm9k_irq(unsigned int irq) |
29 | { | 27 | { |
30 | clear_c0_intcontrol(0x1000 << (irq - irq_base)); | 28 | clear_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE)); |
31 | } | 29 | } |
32 | 30 | ||
33 | static inline void rm9k_cpu_irq_enable(unsigned int irq) | 31 | static inline void rm9k_cpu_irq_enable(unsigned int irq) |
@@ -39,15 +37,6 @@ static inline void rm9k_cpu_irq_enable(unsigned int irq) | |||
39 | local_irq_restore(flags); | 37 | local_irq_restore(flags); |
40 | } | 38 | } |
41 | 39 | ||
42 | static void rm9k_cpu_irq_disable(unsigned int irq) | ||
43 | { | ||
44 | unsigned long flags; | ||
45 | |||
46 | local_irq_save(flags); | ||
47 | mask_rm9k_irq(irq); | ||
48 | local_irq_restore(flags); | ||
49 | } | ||
50 | |||
51 | /* | 40 | /* |
52 | * Performance counter interrupts are global on all processors. | 41 | * Performance counter interrupts are global on all processors. |
53 | */ | 42 | */ |
@@ -81,7 +70,7 @@ static void rm9k_perfcounter_irq_shutdown(unsigned int irq) | |||
81 | } | 70 | } |
82 | 71 | ||
83 | static struct irq_chip rm9k_irq_controller = { | 72 | static struct irq_chip rm9k_irq_controller = { |
84 | .typename = "RM9000", | 73 | .name = "RM9000", |
85 | .ack = mask_rm9k_irq, | 74 | .ack = mask_rm9k_irq, |
86 | .mask = mask_rm9k_irq, | 75 | .mask = mask_rm9k_irq, |
87 | .mask_ack = mask_rm9k_irq, | 76 | .mask_ack = mask_rm9k_irq, |
@@ -89,7 +78,7 @@ static struct irq_chip rm9k_irq_controller = { | |||
89 | }; | 78 | }; |
90 | 79 | ||
91 | static struct irq_chip rm9k_perfcounter_irq = { | 80 | static struct irq_chip rm9k_perfcounter_irq = { |
92 | .typename = "RM9000", | 81 | .name = "RM9000", |
93 | .startup = rm9k_perfcounter_irq_startup, | 82 | .startup = rm9k_perfcounter_irq_startup, |
94 | .shutdown = rm9k_perfcounter_irq_shutdown, | 83 | .shutdown = rm9k_perfcounter_irq_shutdown, |
95 | .ack = mask_rm9k_irq, | 84 | .ack = mask_rm9k_irq, |
@@ -102,8 +91,9 @@ unsigned int rm9000_perfcount_irq; | |||
102 | 91 | ||
103 | EXPORT_SYMBOL(rm9000_perfcount_irq); | 92 | EXPORT_SYMBOL(rm9000_perfcount_irq); |
104 | 93 | ||
105 | void __init rm9k_cpu_irq_init(int base) | 94 | void __init rm9k_cpu_irq_init(void) |
106 | { | 95 | { |
96 | int base = RM9K_CPU_IRQ_BASE; | ||
107 | int i; | 97 | int i; |
108 | 98 | ||
109 | clear_c0_intcontrol(0x0000f000); /* Mask all */ | 99 | clear_c0_intcontrol(0x0000f000); /* Mask all */ |
@@ -115,6 +105,4 @@ void __init rm9k_cpu_irq_init(int base) | |||
115 | rm9000_perfcount_irq = base + 1; | 105 | rm9000_perfcount_irq = base + 1; |
116 | set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq, | 106 | set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq, |
117 | handle_level_irq); | 107 | handle_level_irq); |
118 | |||
119 | irq_base = base; | ||
120 | } | 108 | } |
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c index fcc86b96ccf6..7b66e03b5899 100644 --- a/arch/mips/kernel/irq_cpu.c +++ b/arch/mips/kernel/irq_cpu.c | |||
@@ -25,7 +25,7 @@ | |||
25 | * Don't even think about using this on SMP. You have been warned. | 25 | * Don't even think about using this on SMP. You have been warned. |
26 | * | 26 | * |
27 | * This file exports one global function: | 27 | * This file exports one global function: |
28 | * void mips_cpu_irq_init(int irq_base); | 28 | * void mips_cpu_irq_init(void); |
29 | */ | 29 | */ |
30 | #include <linux/init.h> | 30 | #include <linux/init.h> |
31 | #include <linux/interrupt.h> | 31 | #include <linux/interrupt.h> |
@@ -36,22 +36,20 @@ | |||
36 | #include <asm/mipsmtregs.h> | 36 | #include <asm/mipsmtregs.h> |
37 | #include <asm/system.h> | 37 | #include <asm/system.h> |
38 | 38 | ||
39 | static int mips_cpu_irq_base; | ||
40 | |||
41 | static inline void unmask_mips_irq(unsigned int irq) | 39 | static inline void unmask_mips_irq(unsigned int irq) |
42 | { | 40 | { |
43 | set_c0_status(0x100 << (irq - mips_cpu_irq_base)); | 41 | set_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE)); |
44 | irq_enable_hazard(); | 42 | irq_enable_hazard(); |
45 | } | 43 | } |
46 | 44 | ||
47 | static inline void mask_mips_irq(unsigned int irq) | 45 | static inline void mask_mips_irq(unsigned int irq) |
48 | { | 46 | { |
49 | clear_c0_status(0x100 << (irq - mips_cpu_irq_base)); | 47 | clear_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE)); |
50 | irq_disable_hazard(); | 48 | irq_disable_hazard(); |
51 | } | 49 | } |
52 | 50 | ||
53 | static struct irq_chip mips_cpu_irq_controller = { | 51 | static struct irq_chip mips_cpu_irq_controller = { |
54 | .typename = "MIPS", | 52 | .name = "MIPS", |
55 | .ack = mask_mips_irq, | 53 | .ack = mask_mips_irq, |
56 | .mask = mask_mips_irq, | 54 | .mask = mask_mips_irq, |
57 | .mask_ack = mask_mips_irq, | 55 | .mask_ack = mask_mips_irq, |
@@ -70,7 +68,7 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq) | |||
70 | { | 68 | { |
71 | unsigned int vpflags = dvpe(); | 69 | unsigned int vpflags = dvpe(); |
72 | 70 | ||
73 | clear_c0_cause(0x100 << (irq - mips_cpu_irq_base)); | 71 | clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE)); |
74 | evpe(vpflags); | 72 | evpe(vpflags); |
75 | unmask_mips_mt_irq(irq); | 73 | unmask_mips_mt_irq(irq); |
76 | 74 | ||
@@ -84,13 +82,13 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq) | |||
84 | static void mips_mt_cpu_irq_ack(unsigned int irq) | 82 | static void mips_mt_cpu_irq_ack(unsigned int irq) |
85 | { | 83 | { |
86 | unsigned int vpflags = dvpe(); | 84 | unsigned int vpflags = dvpe(); |
87 | clear_c0_cause(0x100 << (irq - mips_cpu_irq_base)); | 85 | clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE)); |
88 | evpe(vpflags); | 86 | evpe(vpflags); |
89 | mask_mips_mt_irq(irq); | 87 | mask_mips_mt_irq(irq); |
90 | } | 88 | } |
91 | 89 | ||
92 | static struct irq_chip mips_mt_cpu_irq_controller = { | 90 | static struct irq_chip mips_mt_cpu_irq_controller = { |
93 | .typename = "MIPS", | 91 | .name = "MIPS", |
94 | .startup = mips_mt_cpu_irq_startup, | 92 | .startup = mips_mt_cpu_irq_startup, |
95 | .ack = mips_mt_cpu_irq_ack, | 93 | .ack = mips_mt_cpu_irq_ack, |
96 | .mask = mask_mips_mt_irq, | 94 | .mask = mask_mips_mt_irq, |
@@ -99,8 +97,9 @@ static struct irq_chip mips_mt_cpu_irq_controller = { | |||
99 | .eoi = unmask_mips_mt_irq, | 97 | .eoi = unmask_mips_mt_irq, |
100 | }; | 98 | }; |
101 | 99 | ||
102 | void __init mips_cpu_irq_init(int irq_base) | 100 | void __init mips_cpu_irq_init(void) |
103 | { | 101 | { |
102 | int irq_base = MIPS_CPU_IRQ_BASE; | ||
104 | int i; | 103 | int i; |
105 | 104 | ||
106 | /* Mask interrupts. */ | 105 | /* Mask interrupts. */ |
@@ -118,6 +117,4 @@ void __init mips_cpu_irq_init(int irq_base) | |||
118 | for (i = irq_base + 2; i < irq_base + 8; i++) | 117 | for (i = irq_base + 2; i < irq_base + 8; i++) |
119 | set_irq_chip_and_handler(i, &mips_cpu_irq_controller, | 118 | set_irq_chip_and_handler(i, &mips_cpu_irq_controller, |
120 | handle_level_irq); | 119 | handle_level_irq); |
121 | |||
122 | mips_cpu_irq_base = irq_base; | ||
123 | } | 120 | } |
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index de3fae260ff8..ca7ad78f4def 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c | |||
@@ -193,50 +193,6 @@ sysn32_waitid(int which, compat_pid_t pid, | |||
193 | return ret; | 193 | return ret; |
194 | } | 194 | } |
195 | 195 | ||
196 | struct sysinfo32 { | ||
197 | s32 uptime; | ||
198 | u32 loads[3]; | ||
199 | u32 totalram; | ||
200 | u32 freeram; | ||
201 | u32 sharedram; | ||
202 | u32 bufferram; | ||
203 | u32 totalswap; | ||
204 | u32 freeswap; | ||
205 | u16 procs; | ||
206 | u32 totalhigh; | ||
207 | u32 freehigh; | ||
208 | u32 mem_unit; | ||
209 | char _f[8]; | ||
210 | }; | ||
211 | |||
212 | asmlinkage int sys32_sysinfo(struct sysinfo32 __user *info) | ||
213 | { | ||
214 | struct sysinfo s; | ||
215 | int ret, err; | ||
216 | mm_segment_t old_fs = get_fs (); | ||
217 | |||
218 | set_fs (KERNEL_DS); | ||
219 | ret = sys_sysinfo((struct sysinfo __user *)&s); | ||
220 | set_fs (old_fs); | ||
221 | err = put_user (s.uptime, &info->uptime); | ||
222 | err |= __put_user (s.loads[0], &info->loads[0]); | ||
223 | err |= __put_user (s.loads[1], &info->loads[1]); | ||
224 | err |= __put_user (s.loads[2], &info->loads[2]); | ||
225 | err |= __put_user (s.totalram, &info->totalram); | ||
226 | err |= __put_user (s.freeram, &info->freeram); | ||
227 | err |= __put_user (s.sharedram, &info->sharedram); | ||
228 | err |= __put_user (s.bufferram, &info->bufferram); | ||
229 | err |= __put_user (s.totalswap, &info->totalswap); | ||
230 | err |= __put_user (s.freeswap, &info->freeswap); | ||
231 | err |= __put_user (s.procs, &info->procs); | ||
232 | err |= __put_user (s.totalhigh, &info->totalhigh); | ||
233 | err |= __put_user (s.freehigh, &info->freehigh); | ||
234 | err |= __put_user (s.mem_unit, &info->mem_unit); | ||
235 | if (err) | ||
236 | return -EFAULT; | ||
237 | return ret; | ||
238 | } | ||
239 | |||
240 | #define RLIM_INFINITY32 0x7fffffff | 196 | #define RLIM_INFINITY32 0x7fffffff |
241 | #define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x) | 197 | #define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x) |
242 | 198 | ||
@@ -558,7 +514,7 @@ extern asmlinkage long sys_ustat(dev_t dev, struct ustat __user * ubuf); | |||
558 | asmlinkage int sys32_ustat(dev_t dev, struct ustat32 __user * ubuf32) | 514 | asmlinkage int sys32_ustat(dev_t dev, struct ustat32 __user * ubuf32) |
559 | { | 515 | { |
560 | int err; | 516 | int err; |
561 | struct ustat tmp; | 517 | struct ustat tmp; |
562 | struct ustat32 tmp32; | 518 | struct ustat32 tmp32; |
563 | mm_segment_t old_fs = get_fs(); | 519 | mm_segment_t old_fs = get_fs(); |
564 | 520 | ||
@@ -569,11 +525,11 @@ asmlinkage int sys32_ustat(dev_t dev, struct ustat32 __user * ubuf32) | |||
569 | if (err) | 525 | if (err) |
570 | goto out; | 526 | goto out; |
571 | 527 | ||
572 | memset(&tmp32,0,sizeof(struct ustat32)); | 528 | memset(&tmp32,0,sizeof(struct ustat32)); |
573 | tmp32.f_tfree = tmp.f_tfree; | 529 | tmp32.f_tfree = tmp.f_tfree; |
574 | tmp32.f_tinode = tmp.f_tinode; | 530 | tmp32.f_tinode = tmp.f_tinode; |
575 | 531 | ||
576 | err = copy_to_user(ubuf32,&tmp32,sizeof(struct ustat32)) ? -EFAULT : 0; | 532 | err = copy_to_user(ubuf32,&tmp32,sizeof(struct ustat32)) ? -EFAULT : 0; |
577 | 533 | ||
578 | out: | 534 | out: |
579 | return err; | 535 | return err; |
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index c1373a6e668b..ba01800b6018 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c | |||
@@ -3,9 +3,11 @@ | |||
3 | * Copyright (C) 2005 Mips Technologies, Inc | 3 | * Copyright (C) 2005 Mips Technologies, Inc |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <linux/device.h> | ||
6 | #include <linux/kernel.h> | 7 | #include <linux/kernel.h> |
7 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
8 | #include <linux/cpumask.h> | 9 | #include <linux/cpumask.h> |
10 | #include <linux/module.h> | ||
9 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
10 | #include <linux/security.h> | 12 | #include <linux/security.h> |
11 | 13 | ||
@@ -96,6 +98,10 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, | |||
96 | goto out_unlock; | 98 | goto out_unlock; |
97 | } | 99 | } |
98 | 100 | ||
101 | retval = security_task_setscheduler(p, 0, NULL); | ||
102 | if (retval) | ||
103 | goto out_unlock; | ||
104 | |||
99 | /* Record new user-specified CPU set for future reference */ | 105 | /* Record new user-specified CPU set for future reference */ |
100 | p->thread.user_cpus_allowed = new_mask; | 106 | p->thread.user_cpus_allowed = new_mask; |
101 | 107 | ||
@@ -141,8 +147,9 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, | |||
141 | p = find_process_by_pid(pid); | 147 | p = find_process_by_pid(pid); |
142 | if (!p) | 148 | if (!p) |
143 | goto out_unlock; | 149 | goto out_unlock; |
144 | 150 | retval = security_task_getscheduler(p); | |
145 | retval = 0; | 151 | if (retval) |
152 | goto out_unlock; | ||
146 | 153 | ||
147 | cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map); | 154 | cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map); |
148 | 155 | ||
@@ -448,3 +455,20 @@ void mt_cflush_release(void) | |||
448 | #endif /* CONFIG_MIPS_MT_SMTC */ | 455 | #endif /* CONFIG_MIPS_MT_SMTC */ |
449 | /* FILL IN VSMP and AP/SP VERSIONS HERE */ | 456 | /* FILL IN VSMP and AP/SP VERSIONS HERE */ |
450 | } | 457 | } |
458 | |||
459 | struct class *mt_class; | ||
460 | |||
461 | static int __init mt_init(void) | ||
462 | { | ||
463 | struct class *mtc; | ||
464 | |||
465 | mtc = class_create(THIS_MODULE, "mt"); | ||
466 | if (IS_ERR(mtc)) | ||
467 | return PTR_ERR(mtc); | ||
468 | |||
469 | mt_class = mtc; | ||
470 | |||
471 | return 0; | ||
472 | } | ||
473 | |||
474 | subsys_initcall(mt_init); | ||
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 4ed37ba19731..5ddc2e9deecf 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c | |||
@@ -31,13 +31,13 @@ static const char *cpu_name[] = { | |||
31 | [CPU_R4000PC] = "R4000PC", | 31 | [CPU_R4000PC] = "R4000PC", |
32 | [CPU_R4000SC] = "R4000SC", | 32 | [CPU_R4000SC] = "R4000SC", |
33 | [CPU_R4000MC] = "R4000MC", | 33 | [CPU_R4000MC] = "R4000MC", |
34 | [CPU_R4200] = "R4200", | 34 | [CPU_R4200] = "R4200", |
35 | [CPU_R4400PC] = "R4400PC", | 35 | [CPU_R4400PC] = "R4400PC", |
36 | [CPU_R4400SC] = "R4400SC", | 36 | [CPU_R4400SC] = "R4400SC", |
37 | [CPU_R4400MC] = "R4400MC", | 37 | [CPU_R4400MC] = "R4400MC", |
38 | [CPU_R4600] = "R4600", | 38 | [CPU_R4600] = "R4600", |
39 | [CPU_R6000] = "R6000", | 39 | [CPU_R6000] = "R6000", |
40 | [CPU_R6000A] = "R6000A", | 40 | [CPU_R6000A] = "R6000A", |
41 | [CPU_R8000] = "R8000", | 41 | [CPU_R8000] = "R8000", |
42 | [CPU_R10000] = "R10000", | 42 | [CPU_R10000] = "R10000", |
43 | [CPU_R12000] = "R12000", | 43 | [CPU_R12000] = "R12000", |
@@ -46,14 +46,14 @@ static const char *cpu_name[] = { | |||
46 | [CPU_R4650] = "R4650", | 46 | [CPU_R4650] = "R4650", |
47 | [CPU_R4700] = "R4700", | 47 | [CPU_R4700] = "R4700", |
48 | [CPU_R5000] = "R5000", | 48 | [CPU_R5000] = "R5000", |
49 | [CPU_R5000A] = "R5000A", | 49 | [CPU_R5000A] = "R5000A", |
50 | [CPU_R4640] = "R4640", | 50 | [CPU_R4640] = "R4640", |
51 | [CPU_NEVADA] = "Nevada", | 51 | [CPU_NEVADA] = "Nevada", |
52 | [CPU_RM7000] = "RM7000", | 52 | [CPU_RM7000] = "RM7000", |
53 | [CPU_RM9000] = "RM9000", | 53 | [CPU_RM9000] = "RM9000", |
54 | [CPU_R5432] = "R5432", | 54 | [CPU_R5432] = "R5432", |
55 | [CPU_4KC] = "MIPS 4Kc", | 55 | [CPU_4KC] = "MIPS 4Kc", |
56 | [CPU_5KC] = "MIPS 5Kc", | 56 | [CPU_5KC] = "MIPS 5Kc", |
57 | [CPU_R4310] = "R4310", | 57 | [CPU_R4310] = "R4310", |
58 | [CPU_SB1] = "SiByte SB1", | 58 | [CPU_SB1] = "SiByte SB1", |
59 | [CPU_SB1A] = "SiByte SB1A", | 59 | [CPU_SB1A] = "SiByte SB1A", |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index ec8209f3a0c6..04e5b38d327d 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -41,10 +41,6 @@ | |||
41 | #include <asm/isadep.h> | 41 | #include <asm/isadep.h> |
42 | #include <asm/inst.h> | 42 | #include <asm/inst.h> |
43 | #include <asm/stacktrace.h> | 43 | #include <asm/stacktrace.h> |
44 | #ifdef CONFIG_MIPS_MT_SMTC | ||
45 | #include <asm/mipsmtregs.h> | ||
46 | extern void smtc_idle_loop_hook(void); | ||
47 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
48 | 44 | ||
49 | /* | 45 | /* |
50 | * The idle thread. There's no useful work to be done, so just try to conserve | 46 | * The idle thread. There's no useful work to be done, so just try to conserve |
@@ -57,6 +53,8 @@ ATTRIB_NORET void cpu_idle(void) | |||
57 | while (1) { | 53 | while (1) { |
58 | while (!need_resched()) { | 54 | while (!need_resched()) { |
59 | #ifdef CONFIG_MIPS_MT_SMTC | 55 | #ifdef CONFIG_MIPS_MT_SMTC |
56 | extern void smtc_idle_loop_hook(void); | ||
57 | |||
60 | smtc_idle_loop_hook(); | 58 | smtc_idle_loop_hook(); |
61 | #endif /* CONFIG_MIPS_MT_SMTC */ | 59 | #endif /* CONFIG_MIPS_MT_SMTC */ |
62 | if (cpu_wait) | 60 | if (cpu_wait) |
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index 880fa6e841ee..59c1577ecbb3 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S | |||
@@ -114,6 +114,14 @@ LEAF(_save_fp_context32) | |||
114 | */ | 114 | */ |
115 | LEAF(_restore_fp_context) | 115 | LEAF(_restore_fp_context) |
116 | EX lw t0, SC_FPC_CSR(a0) | 116 | EX lw t0, SC_FPC_CSR(a0) |
117 | |||
118 | /* Fail if the CSR has exceptions pending */ | ||
119 | srl t1, t0, 5 | ||
120 | and t1, t0 | ||
121 | andi t1, 0x1f << 7 | ||
122 | bnez t1, fault | ||
123 | nop | ||
124 | |||
117 | #ifdef CONFIG_64BIT | 125 | #ifdef CONFIG_64BIT |
118 | EX ldc1 $f1, SC_FPREGS+8(a0) | 126 | EX ldc1 $f1, SC_FPREGS+8(a0) |
119 | EX ldc1 $f3, SC_FPREGS+24(a0) | 127 | EX ldc1 $f3, SC_FPREGS+24(a0) |
@@ -157,6 +165,14 @@ LEAF(_restore_fp_context) | |||
157 | LEAF(_restore_fp_context32) | 165 | LEAF(_restore_fp_context32) |
158 | /* Restore an o32 sigcontext. */ | 166 | /* Restore an o32 sigcontext. */ |
159 | EX lw t0, SC32_FPC_CSR(a0) | 167 | EX lw t0, SC32_FPC_CSR(a0) |
168 | |||
169 | /* Fail if the CSR has exceptions pending */ | ||
170 | srl t1, t0, 5 | ||
171 | and t1, t0 | ||
172 | andi t1, 0x1f << 7 | ||
173 | bnez t1, fault | ||
174 | nop | ||
175 | |||
160 | EX ldc1 $f0, SC32_FPREGS+0(a0) | 176 | EX ldc1 $f0, SC32_FPREGS+0(a0) |
161 | EX ldc1 $f2, SC32_FPREGS+16(a0) | 177 | EX ldc1 $f2, SC32_FPREGS+16(a0) |
162 | EX ldc1 $f4, SC32_FPREGS+32(a0) | 178 | EX ldc1 $f4, SC32_FPREGS+32(a0) |
@@ -177,9 +193,10 @@ LEAF(_restore_fp_context32) | |||
177 | jr ra | 193 | jr ra |
178 | li v0, 0 # success | 194 | li v0, 0 # success |
179 | END(_restore_fp_context32) | 195 | END(_restore_fp_context32) |
180 | .set reorder | ||
181 | #endif | 196 | #endif |
182 | 197 | ||
198 | .set reorder | ||
199 | |||
183 | .type fault@function | 200 | .type fault@function |
184 | .ent fault | 201 | .ent fault |
185 | fault: li v0, -EFAULT # failure | 202 | fault: li v0, -EFAULT # failure |
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index 5a99e3e0c96d..d92c48e0d7a6 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c | |||
@@ -17,6 +17,7 @@ | |||
17 | * | 17 | * |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/device.h> | ||
20 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
21 | #include <linux/module.h> | 22 | #include <linux/module.h> |
22 | #include <linux/fs.h> | 23 | #include <linux/fs.h> |
@@ -34,6 +35,7 @@ | |||
34 | #include <linux/sched.h> | 35 | #include <linux/sched.h> |
35 | #include <linux/wait.h> | 36 | #include <linux/wait.h> |
36 | #include <asm/mipsmtregs.h> | 37 | #include <asm/mipsmtregs.h> |
38 | #include <asm/mips_mt.h> | ||
37 | #include <asm/cacheflush.h> | 39 | #include <asm/cacheflush.h> |
38 | #include <asm/atomic.h> | 40 | #include <asm/atomic.h> |
39 | #include <asm/cpu.h> | 41 | #include <asm/cpu.h> |
@@ -63,7 +65,7 @@ extern void *vpe_get_shared(int index); | |||
63 | 65 | ||
64 | static void rtlx_dispatch(void) | 66 | static void rtlx_dispatch(void) |
65 | { | 67 | { |
66 | do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ); | 68 | do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ); |
67 | } | 69 | } |
68 | 70 | ||
69 | 71 | ||
@@ -476,7 +478,7 @@ static ssize_t file_write(struct file *file, const char __user * buffer, | |||
476 | return rtlx_write(minor, (void *)buffer, count, 1); | 478 | return rtlx_write(minor, (void *)buffer, count, 1); |
477 | } | 479 | } |
478 | 480 | ||
479 | static struct file_operations rtlx_fops = { | 481 | static const struct file_operations rtlx_fops = { |
480 | .owner = THIS_MODULE, | 482 | .owner = THIS_MODULE, |
481 | .open = file_open, | 483 | .open = file_open, |
482 | .release = file_release, | 484 | .release = file_release, |
@@ -491,14 +493,15 @@ static struct irqaction rtlx_irq = { | |||
491 | .name = "RTLX", | 493 | .name = "RTLX", |
492 | }; | 494 | }; |
493 | 495 | ||
494 | static int rtlx_irq_num = MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ; | 496 | static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ; |
495 | 497 | ||
496 | static char register_chrdev_failed[] __initdata = | 498 | static char register_chrdev_failed[] __initdata = |
497 | KERN_ERR "rtlx_module_init: unable to register device\n"; | 499 | KERN_ERR "rtlx_module_init: unable to register device\n"; |
498 | 500 | ||
499 | static int rtlx_module_init(void) | 501 | static int rtlx_module_init(void) |
500 | { | 502 | { |
501 | int i; | 503 | struct device *dev; |
504 | int i, err; | ||
502 | 505 | ||
503 | major = register_chrdev(0, module_name, &rtlx_fops); | 506 | major = register_chrdev(0, module_name, &rtlx_fops); |
504 | if (major < 0) { | 507 | if (major < 0) { |
@@ -511,6 +514,13 @@ static int rtlx_module_init(void) | |||
511 | init_waitqueue_head(&channel_wqs[i].rt_queue); | 514 | init_waitqueue_head(&channel_wqs[i].rt_queue); |
512 | init_waitqueue_head(&channel_wqs[i].lx_queue); | 515 | init_waitqueue_head(&channel_wqs[i].lx_queue); |
513 | channel_wqs[i].in_open = 0; | 516 | channel_wqs[i].in_open = 0; |
517 | |||
518 | dev = device_create(mt_class, NULL, MKDEV(major, i), | ||
519 | "%s%d", module_name, i); | ||
520 | if (IS_ERR(dev)) { | ||
521 | err = PTR_ERR(dev); | ||
522 | goto out_chrdev; | ||
523 | } | ||
514 | } | 524 | } |
515 | 525 | ||
516 | /* set up notifiers */ | 526 | /* set up notifiers */ |
@@ -525,10 +535,21 @@ static int rtlx_module_init(void) | |||
525 | setup_irq(rtlx_irq_num, &rtlx_irq); | 535 | setup_irq(rtlx_irq_num, &rtlx_irq); |
526 | 536 | ||
527 | return 0; | 537 | return 0; |
538 | |||
539 | out_chrdev: | ||
540 | for (i = 0; i < RTLX_CHANNELS; i++) | ||
541 | device_destroy(mt_class, MKDEV(major, i)); | ||
542 | |||
543 | return err; | ||
528 | } | 544 | } |
529 | 545 | ||
530 | static void __exit rtlx_module_exit(void) | 546 | static void __exit rtlx_module_exit(void) |
531 | { | 547 | { |
548 | int i; | ||
549 | |||
550 | for (i = 0; i < RTLX_CHANNELS; i++) | ||
551 | device_destroy(mt_class, MKDEV(major, i)); | ||
552 | |||
532 | unregister_chrdev(major, module_name); | 553 | unregister_chrdev(major, module_name); |
533 | } | 554 | } |
534 | 555 | ||
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index a7bff2a54723..ee8802b59758 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -217,7 +217,7 @@ EXPORT(sysn32_call_table) | |||
217 | PTR sys32_gettimeofday | 217 | PTR sys32_gettimeofday |
218 | PTR compat_sys_getrlimit /* 6095 */ | 218 | PTR compat_sys_getrlimit /* 6095 */ |
219 | PTR compat_sys_getrusage | 219 | PTR compat_sys_getrusage |
220 | PTR sys32_sysinfo | 220 | PTR compat_sys_sysinfo |
221 | PTR compat_sys_times | 221 | PTR compat_sys_times |
222 | PTR sys32_ptrace | 222 | PTR sys32_ptrace |
223 | PTR sys_getuid /* 6100 */ | 223 | PTR sys_getuid /* 6100 */ |
@@ -384,7 +384,7 @@ EXPORT(sysn32_call_table) | |||
384 | PTR sys_readlinkat | 384 | PTR sys_readlinkat |
385 | PTR sys_fchmodat | 385 | PTR sys_fchmodat |
386 | PTR sys_faccessat | 386 | PTR sys_faccessat |
387 | PTR sys_pselect6 | 387 | PTR compat_sys_pselect6 |
388 | PTR sys_ppoll /* 6265 */ | 388 | PTR sys_ppoll /* 6265 */ |
389 | PTR sys_unshare | 389 | PTR sys_unshare |
390 | PTR sys_splice | 390 | PTR sys_splice |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index e91379c1be1d..c5f590ca99b0 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
@@ -321,7 +321,7 @@ sys_call_table: | |||
321 | PTR sys_ni_syscall /* sys_vm86 */ | 321 | PTR sys_ni_syscall /* sys_vm86 */ |
322 | PTR compat_sys_wait4 | 322 | PTR compat_sys_wait4 |
323 | PTR sys_swapoff /* 4115 */ | 323 | PTR sys_swapoff /* 4115 */ |
324 | PTR sys32_sysinfo | 324 | PTR compat_sys_sysinfo |
325 | PTR sys32_ipc | 325 | PTR sys32_ipc |
326 | PTR sys_fsync | 326 | PTR sys_fsync |
327 | PTR sys32_sigreturn | 327 | PTR sys32_sigreturn |
@@ -506,7 +506,7 @@ sys_call_table: | |||
506 | PTR sys_readlinkat | 506 | PTR sys_readlinkat |
507 | PTR sys_fchmodat | 507 | PTR sys_fchmodat |
508 | PTR sys_faccessat /* 4300 */ | 508 | PTR sys_faccessat /* 4300 */ |
509 | PTR sys_pselect6 | 509 | PTR compat_sys_pselect6 |
510 | PTR sys_ppoll | 510 | PTR sys_ppoll |
511 | PTR sys_unshare | 511 | PTR sys_unshare |
512 | PTR sys_splice | 512 | PTR sys_splice |
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 89440a0d8528..394540fad769 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c | |||
@@ -271,8 +271,7 @@ static void __init bootmem_init(void) | |||
271 | static void __init bootmem_init(void) | 271 | static void __init bootmem_init(void) |
272 | { | 272 | { |
273 | unsigned long reserved_end; | 273 | unsigned long reserved_end; |
274 | unsigned long highest = 0; | 274 | unsigned long mapstart = ~0UL; |
275 | unsigned long mapstart = -1UL; | ||
276 | unsigned long bootmap_size; | 275 | unsigned long bootmap_size; |
277 | int i; | 276 | int i; |
278 | 277 | ||
@@ -284,6 +283,13 @@ static void __init bootmem_init(void) | |||
284 | reserved_end = max(init_initrd(), PFN_UP(__pa_symbol(&_end))); | 283 | reserved_end = max(init_initrd(), PFN_UP(__pa_symbol(&_end))); |
285 | 284 | ||
286 | /* | 285 | /* |
286 | * max_low_pfn is not a number of pages. The number of pages | ||
287 | * of the system is given by 'max_low_pfn - min_low_pfn'. | ||
288 | */ | ||
289 | min_low_pfn = ~0UL; | ||
290 | max_low_pfn = 0; | ||
291 | |||
292 | /* | ||
287 | * Find the highest page frame number we have available. | 293 | * Find the highest page frame number we have available. |
288 | */ | 294 | */ |
289 | for (i = 0; i < boot_mem_map.nr_map; i++) { | 295 | for (i = 0; i < boot_mem_map.nr_map; i++) { |
@@ -296,8 +302,10 @@ static void __init bootmem_init(void) | |||
296 | end = PFN_DOWN(boot_mem_map.map[i].addr | 302 | end = PFN_DOWN(boot_mem_map.map[i].addr |
297 | + boot_mem_map.map[i].size); | 303 | + boot_mem_map.map[i].size); |
298 | 304 | ||
299 | if (end > highest) | 305 | if (end > max_low_pfn) |
300 | highest = end; | 306 | max_low_pfn = end; |
307 | if (start < min_low_pfn) | ||
308 | min_low_pfn = start; | ||
301 | if (end <= reserved_end) | 309 | if (end <= reserved_end) |
302 | continue; | 310 | continue; |
303 | if (start >= mapstart) | 311 | if (start >= mapstart) |
@@ -305,22 +313,36 @@ static void __init bootmem_init(void) | |||
305 | mapstart = max(reserved_end, start); | 313 | mapstart = max(reserved_end, start); |
306 | } | 314 | } |
307 | 315 | ||
316 | if (min_low_pfn >= max_low_pfn) | ||
317 | panic("Incorrect memory mapping !!!"); | ||
318 | if (min_low_pfn > ARCH_PFN_OFFSET) { | ||
319 | printk(KERN_INFO | ||
320 | "Wasting %lu bytes for tracking %lu unused pages\n", | ||
321 | (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page), | ||
322 | min_low_pfn - ARCH_PFN_OFFSET); | ||
323 | } else if (min_low_pfn < ARCH_PFN_OFFSET) { | ||
324 | printk(KERN_INFO | ||
325 | "%lu free pages won't be used\n", | ||
326 | ARCH_PFN_OFFSET - min_low_pfn); | ||
327 | } | ||
328 | min_low_pfn = ARCH_PFN_OFFSET; | ||
329 | |||
308 | /* | 330 | /* |
309 | * Determine low and high memory ranges | 331 | * Determine low and high memory ranges |
310 | */ | 332 | */ |
311 | if (highest > PFN_DOWN(HIGHMEM_START)) { | 333 | if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) { |
312 | #ifdef CONFIG_HIGHMEM | 334 | #ifdef CONFIG_HIGHMEM |
313 | highstart_pfn = PFN_DOWN(HIGHMEM_START); | 335 | highstart_pfn = PFN_DOWN(HIGHMEM_START); |
314 | highend_pfn = highest; | 336 | highend_pfn = max_low_pfn; |
315 | #endif | 337 | #endif |
316 | highest = PFN_DOWN(HIGHMEM_START); | 338 | max_low_pfn = PFN_DOWN(HIGHMEM_START); |
317 | } | 339 | } |
318 | 340 | ||
319 | /* | 341 | /* |
320 | * Initialize the boot-time allocator with low memory only. | 342 | * Initialize the boot-time allocator with low memory only. |
321 | */ | 343 | */ |
322 | bootmap_size = init_bootmem(mapstart, highest); | 344 | bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart, |
323 | 345 | min_low_pfn, max_low_pfn); | |
324 | /* | 346 | /* |
325 | * Register fully available low RAM pages with the bootmem allocator. | 347 | * Register fully available low RAM pages with the bootmem allocator. |
326 | */ | 348 | */ |
@@ -430,7 +452,7 @@ static void __init arch_mem_init(char **cmdline_p) | |||
430 | print_memory_map(); | 452 | print_memory_map(); |
431 | 453 | ||
432 | strlcpy(command_line, arcs_cmdline, sizeof(command_line)); | 454 | strlcpy(command_line, arcs_cmdline, sizeof(command_line)); |
433 | strlcpy(saved_command_line, command_line, COMMAND_LINE_SIZE); | 455 | strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); |
434 | 456 | ||
435 | *cmdline_p = command_line; | 457 | *cmdline_p = command_line; |
436 | 458 | ||
@@ -507,9 +529,9 @@ void __init setup_arch(char **cmdline_p) | |||
507 | 529 | ||
508 | #if defined(CONFIG_VT) | 530 | #if defined(CONFIG_VT) |
509 | #if defined(CONFIG_VGA_CONSOLE) | 531 | #if defined(CONFIG_VGA_CONSOLE) |
510 | conswitchp = &vga_con; | 532 | conswitchp = &vga_con; |
511 | #elif defined(CONFIG_DUMMY_CONSOLE) | 533 | #elif defined(CONFIG_DUMMY_CONSOLE) |
512 | conswitchp = &dummy_con; | 534 | conswitchp = &dummy_con; |
513 | #endif | 535 | #endif |
514 | #endif | 536 | #endif |
515 | 537 | ||
@@ -541,3 +563,6 @@ int __init dsp_disable(char *s) | |||
541 | } | 563 | } |
542 | 564 | ||
543 | __setup("nodsp", dsp_disable); | 565 | __setup("nodsp", dsp_disable); |
566 | |||
567 | unsigned long kernelsp[NR_CPUS]; | ||
568 | unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3; | ||
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h index b1f09d54ebe6..9a8abd67ec5c 100644 --- a/arch/mips/kernel/signal-common.h +++ b/arch/mips/kernel/signal-common.h | |||
@@ -8,169 +8,57 @@ | |||
8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | 8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef __SIGNAL_COMMON_H | ||
12 | #define __SIGNAL_COMMON_H | ||
11 | 13 | ||
12 | static inline int | 14 | /* #define DEBUG_SIG */ |
13 | setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) | ||
14 | { | ||
15 | int err = 0; | ||
16 | 15 | ||
17 | err |= __put_user(regs->cp0_epc, &sc->sc_pc); | 16 | #ifdef DEBUG_SIG |
17 | # define DEBUGP(fmt, args...) printk("%s: " fmt, __FUNCTION__ , ##args) | ||
18 | #else | ||
19 | # define DEBUGP(fmt, args...) | ||
20 | #endif | ||
18 | 21 | ||
19 | #define save_gp_reg(i) do { \ | 22 | /* |
20 | err |= __put_user(regs->regs[i], &sc->sc_regs[i]); \ | 23 | * Horribly complicated - with the bloody RM9000 workarounds enabled |
21 | } while(0) | 24 | * the signal trampolines is moving to the end of the structure so we can |
22 | __put_user(0, &sc->sc_regs[0]); save_gp_reg(1); save_gp_reg(2); | 25 | * increase the alignment without breaking software compatibility. |
23 | save_gp_reg(3); save_gp_reg(4); save_gp_reg(5); save_gp_reg(6); | 26 | */ |
24 | save_gp_reg(7); save_gp_reg(8); save_gp_reg(9); save_gp_reg(10); | 27 | #if ICACHE_REFILLS_WORKAROUND_WAR == 0 |
25 | save_gp_reg(11); save_gp_reg(12); save_gp_reg(13); save_gp_reg(14); | ||
26 | save_gp_reg(15); save_gp_reg(16); save_gp_reg(17); save_gp_reg(18); | ||
27 | save_gp_reg(19); save_gp_reg(20); save_gp_reg(21); save_gp_reg(22); | ||
28 | save_gp_reg(23); save_gp_reg(24); save_gp_reg(25); save_gp_reg(26); | ||
29 | save_gp_reg(27); save_gp_reg(28); save_gp_reg(29); save_gp_reg(30); | ||
30 | save_gp_reg(31); | ||
31 | #undef save_gp_reg | ||
32 | |||
33 | err |= __put_user(regs->hi, &sc->sc_mdhi); | ||
34 | err |= __put_user(regs->lo, &sc->sc_mdlo); | ||
35 | if (cpu_has_dsp) { | ||
36 | err |= __put_user(mfhi1(), &sc->sc_hi1); | ||
37 | err |= __put_user(mflo1(), &sc->sc_lo1); | ||
38 | err |= __put_user(mfhi2(), &sc->sc_hi2); | ||
39 | err |= __put_user(mflo2(), &sc->sc_lo2); | ||
40 | err |= __put_user(mfhi3(), &sc->sc_hi3); | ||
41 | err |= __put_user(mflo3(), &sc->sc_lo3); | ||
42 | err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); | ||
43 | } | ||
44 | |||
45 | err |= __put_user(!!used_math(), &sc->sc_used_math); | ||
46 | |||
47 | if (!used_math()) | ||
48 | goto out; | ||
49 | |||
50 | /* | ||
51 | * Save FPU state to signal context. Signal handler will "inherit" | ||
52 | * current FPU state. | ||
53 | */ | ||
54 | preempt_disable(); | ||
55 | |||
56 | if (!is_fpu_owner()) { | ||
57 | own_fpu(); | ||
58 | restore_fp(current); | ||
59 | } | ||
60 | err |= save_fp_context(sc); | ||
61 | |||
62 | preempt_enable(); | ||
63 | |||
64 | out: | ||
65 | return err; | ||
66 | } | ||
67 | |||
68 | static inline int | ||
69 | restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) | ||
70 | { | ||
71 | unsigned int used_math; | ||
72 | unsigned long treg; | ||
73 | int err = 0; | ||
74 | |||
75 | /* Always make any pending restarted system calls return -EINTR */ | ||
76 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
77 | |||
78 | err |= __get_user(regs->cp0_epc, &sc->sc_pc); | ||
79 | err |= __get_user(regs->hi, &sc->sc_mdhi); | ||
80 | err |= __get_user(regs->lo, &sc->sc_mdlo); | ||
81 | if (cpu_has_dsp) { | ||
82 | err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); | ||
83 | err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); | ||
84 | err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); | ||
85 | err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); | ||
86 | err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); | ||
87 | err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); | ||
88 | err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); | ||
89 | } | ||
90 | |||
91 | #define restore_gp_reg(i) do { \ | ||
92 | err |= __get_user(regs->regs[i], &sc->sc_regs[i]); \ | ||
93 | } while(0) | ||
94 | restore_gp_reg( 1); restore_gp_reg( 2); restore_gp_reg( 3); | ||
95 | restore_gp_reg( 4); restore_gp_reg( 5); restore_gp_reg( 6); | ||
96 | restore_gp_reg( 7); restore_gp_reg( 8); restore_gp_reg( 9); | ||
97 | restore_gp_reg(10); restore_gp_reg(11); restore_gp_reg(12); | ||
98 | restore_gp_reg(13); restore_gp_reg(14); restore_gp_reg(15); | ||
99 | restore_gp_reg(16); restore_gp_reg(17); restore_gp_reg(18); | ||
100 | restore_gp_reg(19); restore_gp_reg(20); restore_gp_reg(21); | ||
101 | restore_gp_reg(22); restore_gp_reg(23); restore_gp_reg(24); | ||
102 | restore_gp_reg(25); restore_gp_reg(26); restore_gp_reg(27); | ||
103 | restore_gp_reg(28); restore_gp_reg(29); restore_gp_reg(30); | ||
104 | restore_gp_reg(31); | ||
105 | #undef restore_gp_reg | ||
106 | 28 | ||
107 | err |= __get_user(used_math, &sc->sc_used_math); | 29 | struct sigframe { |
108 | conditional_used_math(used_math); | 30 | u32 sf_ass[4]; /* argument save space for o32 */ |
31 | u32 sf_code[2]; /* signal trampoline */ | ||
32 | struct sigcontext sf_sc; | ||
33 | sigset_t sf_mask; | ||
34 | }; | ||
109 | 35 | ||
110 | preempt_disable(); | 36 | #else /* ICACHE_REFILLS_WORKAROUND_WAR */ |
111 | 37 | ||
112 | if (used_math()) { | 38 | struct sigframe { |
113 | /* restore fpu context if we have used it before */ | 39 | u32 sf_ass[4]; /* argument save space for o32 */ |
114 | own_fpu(); | 40 | u32 sf_pad[2]; |
115 | err |= restore_fp_context(sc); | 41 | struct sigcontext sf_sc; /* hw context */ |
116 | } else { | 42 | sigset_t sf_mask; |
117 | /* signal handler may have used FPU. Give it up. */ | 43 | u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */ |
118 | lose_fpu(); | 44 | }; |
119 | } | ||
120 | 45 | ||
121 | preempt_enable(); | 46 | #endif /* !ICACHE_REFILLS_WORKAROUND_WAR */ |
122 | 47 | ||
123 | return err; | 48 | /* |
124 | } | 49 | * handle hardware context |
50 | */ | ||
51 | extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *); | ||
52 | extern int restore_sigcontext(struct pt_regs *, struct sigcontext __user *); | ||
125 | 53 | ||
126 | /* | 54 | /* |
127 | * Determine which stack to use.. | 55 | * Determine which stack to use.. |
128 | */ | 56 | */ |
129 | static inline void __user * | 57 | extern void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, |
130 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) | 58 | size_t frame_size); |
131 | { | 59 | /* |
132 | unsigned long sp; | 60 | * install trampoline code to get back from the sig handler |
133 | 61 | */ | |
134 | /* Default to using normal stack */ | 62 | extern int install_sigtramp(unsigned int __user *tramp, unsigned int syscall); |
135 | sp = regs->regs[29]; | ||
136 | |||
137 | /* | ||
138 | * FPU emulator may have it's own trampoline active just | ||
139 | * above the user stack, 16-bytes before the next lowest | ||
140 | * 16 byte boundary. Try to avoid trashing it. | ||
141 | */ | ||
142 | sp -= 32; | ||
143 | |||
144 | /* This is the X/Open sanctioned signal stack switching. */ | ||
145 | if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0)) | ||
146 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
147 | |||
148 | return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK)); | ||
149 | } | ||
150 | |||
151 | static inline int install_sigtramp(unsigned int __user *tramp, | ||
152 | unsigned int syscall) | ||
153 | { | ||
154 | int err; | ||
155 | |||
156 | /* | ||
157 | * Set up the return code ... | ||
158 | * | ||
159 | * li v0, __NR__foo_sigreturn | ||
160 | * syscall | ||
161 | */ | ||
162 | |||
163 | err = __put_user(0x24020000 + syscall, tramp + 0); | ||
164 | err |= __put_user(0x0000000c , tramp + 1); | ||
165 | if (ICACHE_REFILLS_WORKAROUND_WAR) { | ||
166 | err |= __put_user(0, tramp + 2); | ||
167 | err |= __put_user(0, tramp + 3); | ||
168 | err |= __put_user(0, tramp + 4); | ||
169 | err |= __put_user(0, tramp + 5); | ||
170 | err |= __put_user(0, tramp + 6); | ||
171 | err |= __put_user(0, tramp + 7); | ||
172 | } | ||
173 | flush_cache_sigtramp((unsigned long) tramp); | ||
174 | 63 | ||
175 | return err; | 64 | #endif /* __SIGNAL_COMMON_H */ |
176 | } | ||
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index b9d358e05214..54398af2371f 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
@@ -34,18 +34,174 @@ | |||
34 | 34 | ||
35 | #include "signal-common.h" | 35 | #include "signal-common.h" |
36 | 36 | ||
37 | #define DEBUG_SIG 0 | ||
38 | |||
39 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 37 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
40 | 38 | ||
39 | #if ICACHE_REFILLS_WORKAROUND_WAR == 0 | ||
40 | |||
41 | struct rt_sigframe { | ||
42 | u32 rs_ass[4]; /* argument save space for o32 */ | ||
43 | u32 rs_code[2]; /* signal trampoline */ | ||
44 | struct siginfo rs_info; | ||
45 | struct ucontext rs_uc; | ||
46 | }; | ||
47 | |||
48 | #else | ||
49 | |||
50 | struct rt_sigframe { | ||
51 | u32 rs_ass[4]; /* argument save space for o32 */ | ||
52 | u32 rs_pad[2]; | ||
53 | struct siginfo rs_info; | ||
54 | struct ucontext rs_uc; | ||
55 | u32 rs_code[8] ____cacheline_aligned; /* signal trampoline */ | ||
56 | }; | ||
57 | |||
58 | #endif | ||
59 | |||
60 | /* | ||
61 | * Helper routines | ||
62 | */ | ||
63 | int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) | ||
64 | { | ||
65 | int err = 0; | ||
66 | int i; | ||
67 | |||
68 | err |= __put_user(regs->cp0_epc, &sc->sc_pc); | ||
69 | |||
70 | err |= __put_user(0, &sc->sc_regs[0]); | ||
71 | for (i = 1; i < 32; i++) | ||
72 | err |= __put_user(regs->regs[i], &sc->sc_regs[i]); | ||
73 | |||
74 | err |= __put_user(regs->hi, &sc->sc_mdhi); | ||
75 | err |= __put_user(regs->lo, &sc->sc_mdlo); | ||
76 | if (cpu_has_dsp) { | ||
77 | err |= __put_user(mfhi1(), &sc->sc_hi1); | ||
78 | err |= __put_user(mflo1(), &sc->sc_lo1); | ||
79 | err |= __put_user(mfhi2(), &sc->sc_hi2); | ||
80 | err |= __put_user(mflo2(), &sc->sc_lo2); | ||
81 | err |= __put_user(mfhi3(), &sc->sc_hi3); | ||
82 | err |= __put_user(mflo3(), &sc->sc_lo3); | ||
83 | err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); | ||
84 | } | ||
85 | |||
86 | err |= __put_user(!!used_math(), &sc->sc_used_math); | ||
87 | |||
88 | if (used_math()) { | ||
89 | /* | ||
90 | * Save FPU state to signal context. Signal handler | ||
91 | * will "inherit" current FPU state. | ||
92 | */ | ||
93 | preempt_disable(); | ||
94 | |||
95 | if (!is_fpu_owner()) { | ||
96 | own_fpu(); | ||
97 | restore_fp(current); | ||
98 | } | ||
99 | err |= save_fp_context(sc); | ||
100 | |||
101 | preempt_enable(); | ||
102 | } | ||
103 | return err; | ||
104 | } | ||
105 | |||
106 | int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) | ||
107 | { | ||
108 | unsigned int used_math; | ||
109 | unsigned long treg; | ||
110 | int err = 0; | ||
111 | int i; | ||
112 | |||
113 | /* Always make any pending restarted system calls return -EINTR */ | ||
114 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
115 | |||
116 | err |= __get_user(regs->cp0_epc, &sc->sc_pc); | ||
117 | err |= __get_user(regs->hi, &sc->sc_mdhi); | ||
118 | err |= __get_user(regs->lo, &sc->sc_mdlo); | ||
119 | if (cpu_has_dsp) { | ||
120 | err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); | ||
121 | err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); | ||
122 | err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); | ||
123 | err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); | ||
124 | err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); | ||
125 | err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); | ||
126 | err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); | ||
127 | } | ||
128 | |||
129 | for (i = 1; i < 32; i++) | ||
130 | err |= __get_user(regs->regs[i], &sc->sc_regs[i]); | ||
131 | |||
132 | err |= __get_user(used_math, &sc->sc_used_math); | ||
133 | conditional_used_math(used_math); | ||
134 | |||
135 | preempt_disable(); | ||
136 | |||
137 | if (used_math()) { | ||
138 | /* restore fpu context if we have used it before */ | ||
139 | own_fpu(); | ||
140 | err |= restore_fp_context(sc); | ||
141 | } else { | ||
142 | /* signal handler may have used FPU. Give it up. */ | ||
143 | lose_fpu(); | ||
144 | } | ||
145 | |||
146 | preempt_enable(); | ||
147 | |||
148 | return err; | ||
149 | } | ||
150 | |||
151 | void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, | ||
152 | size_t frame_size) | ||
153 | { | ||
154 | unsigned long sp; | ||
155 | |||
156 | /* Default to using normal stack */ | ||
157 | sp = regs->regs[29]; | ||
158 | |||
159 | /* | ||
160 | * FPU emulator may have it's own trampoline active just | ||
161 | * above the user stack, 16-bytes before the next lowest | ||
162 | * 16 byte boundary. Try to avoid trashing it. | ||
163 | */ | ||
164 | sp -= 32; | ||
165 | |||
166 | /* This is the X/Open sanctioned signal stack switching. */ | ||
167 | if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0)) | ||
168 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
169 | |||
170 | return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK)); | ||
171 | } | ||
172 | |||
173 | int install_sigtramp(unsigned int __user *tramp, unsigned int syscall) | ||
174 | { | ||
175 | int err; | ||
176 | |||
177 | /* | ||
178 | * Set up the return code ... | ||
179 | * | ||
180 | * li v0, __NR__foo_sigreturn | ||
181 | * syscall | ||
182 | */ | ||
183 | |||
184 | err = __put_user(0x24020000 + syscall, tramp + 0); | ||
185 | err |= __put_user(0x0000000c , tramp + 1); | ||
186 | if (ICACHE_REFILLS_WORKAROUND_WAR) { | ||
187 | err |= __put_user(0, tramp + 2); | ||
188 | err |= __put_user(0, tramp + 3); | ||
189 | err |= __put_user(0, tramp + 4); | ||
190 | err |= __put_user(0, tramp + 5); | ||
191 | err |= __put_user(0, tramp + 6); | ||
192 | err |= __put_user(0, tramp + 7); | ||
193 | } | ||
194 | flush_cache_sigtramp((unsigned long) tramp); | ||
195 | |||
196 | return err; | ||
197 | } | ||
198 | |||
41 | /* | 199 | /* |
42 | * Atomically swap in the new signal mask, and wait for a signal. | 200 | * Atomically swap in the new signal mask, and wait for a signal. |
43 | */ | 201 | */ |
44 | 202 | ||
45 | #ifdef CONFIG_TRAD_SIGNALS | 203 | #ifdef CONFIG_TRAD_SIGNALS |
46 | save_static_function(sys_sigsuspend); | 204 | asmlinkage int sys_sigsuspend(nabi_no_regargs struct pt_regs regs) |
47 | __attribute_used__ noinline static int | ||
48 | _sys_sigsuspend(nabi_no_regargs struct pt_regs regs) | ||
49 | { | 205 | { |
50 | sigset_t newset; | 206 | sigset_t newset; |
51 | sigset_t __user *uset; | 207 | sigset_t __user *uset; |
@@ -68,9 +224,7 @@ _sys_sigsuspend(nabi_no_regargs struct pt_regs regs) | |||
68 | } | 224 | } |
69 | #endif | 225 | #endif |
70 | 226 | ||
71 | save_static_function(sys_rt_sigsuspend); | 227 | asmlinkage int sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) |
72 | __attribute_used__ noinline static int | ||
73 | _sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) | ||
74 | { | 228 | { |
75 | sigset_t newset; | 229 | sigset_t newset; |
76 | sigset_t __user *unewset; | 230 | sigset_t __user *unewset; |
@@ -89,7 +243,7 @@ _sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) | |||
89 | spin_lock_irq(¤t->sighand->siglock); | 243 | spin_lock_irq(¤t->sighand->siglock); |
90 | current->saved_sigmask = current->blocked; | 244 | current->saved_sigmask = current->blocked; |
91 | current->blocked = newset; | 245 | current->blocked = newset; |
92 | recalc_sigpending(); | 246 | recalc_sigpending(); |
93 | spin_unlock_irq(¤t->sighand->siglock); | 247 | spin_unlock_irq(¤t->sighand->siglock); |
94 | 248 | ||
95 | current->state = TASK_INTERRUPTIBLE; | 249 | current->state = TASK_INTERRUPTIBLE; |
@@ -124,7 +278,7 @@ asmlinkage int sys_sigaction(int sig, const struct sigaction __user *act, | |||
124 | 278 | ||
125 | if (!ret && oact) { | 279 | if (!ret && oact) { |
126 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) | 280 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) |
127 | return -EFAULT; | 281 | return -EFAULT; |
128 | err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | 282 | err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); |
129 | err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler); | 283 | err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler); |
130 | err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig); | 284 | err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig); |
@@ -148,45 +302,8 @@ asmlinkage int sys_sigaltstack(nabi_no_regargs struct pt_regs regs) | |||
148 | return do_sigaltstack(uss, uoss, usp); | 302 | return do_sigaltstack(uss, uoss, usp); |
149 | } | 303 | } |
150 | 304 | ||
151 | /* | ||
152 | * Horribly complicated - with the bloody RM9000 workarounds enabled | ||
153 | * the signal trampolines is moving to the end of the structure so we can | ||
154 | * increase the alignment without breaking software compatibility. | ||
155 | */ | ||
156 | #ifdef CONFIG_TRAD_SIGNALS | 305 | #ifdef CONFIG_TRAD_SIGNALS |
157 | struct sigframe { | 306 | asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) |
158 | u32 sf_ass[4]; /* argument save space for o32 */ | ||
159 | #if ICACHE_REFILLS_WORKAROUND_WAR | ||
160 | u32 sf_pad[2]; | ||
161 | #else | ||
162 | u32 sf_code[2]; /* signal trampoline */ | ||
163 | #endif | ||
164 | struct sigcontext sf_sc; | ||
165 | sigset_t sf_mask; | ||
166 | #if ICACHE_REFILLS_WORKAROUND_WAR | ||
167 | u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */ | ||
168 | #endif | ||
169 | }; | ||
170 | #endif | ||
171 | |||
172 | struct rt_sigframe { | ||
173 | u32 rs_ass[4]; /* argument save space for o32 */ | ||
174 | #if ICACHE_REFILLS_WORKAROUND_WAR | ||
175 | u32 rs_pad[2]; | ||
176 | #else | ||
177 | u32 rs_code[2]; /* signal trampoline */ | ||
178 | #endif | ||
179 | struct siginfo rs_info; | ||
180 | struct ucontext rs_uc; | ||
181 | #if ICACHE_REFILLS_WORKAROUND_WAR | ||
182 | u32 rs_code[8] ____cacheline_aligned; /* signal trampoline */ | ||
183 | #endif | ||
184 | }; | ||
185 | |||
186 | #ifdef CONFIG_TRAD_SIGNALS | ||
187 | save_static_function(sys_sigreturn); | ||
188 | __attribute_used__ noinline static void | ||
189 | _sys_sigreturn(nabi_no_regargs struct pt_regs regs) | ||
190 | { | 307 | { |
191 | struct sigframe __user *frame; | 308 | struct sigframe __user *frame; |
192 | sigset_t blocked; | 309 | sigset_t blocked; |
@@ -221,9 +338,7 @@ badframe: | |||
221 | } | 338 | } |
222 | #endif /* CONFIG_TRAD_SIGNALS */ | 339 | #endif /* CONFIG_TRAD_SIGNALS */ |
223 | 340 | ||
224 | save_static_function(sys_rt_sigreturn); | 341 | asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) |
225 | __attribute_used__ noinline static void | ||
226 | _sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | ||
227 | { | 342 | { |
228 | struct rt_sigframe __user *frame; | 343 | struct rt_sigframe __user *frame; |
229 | sigset_t set; | 344 | sigset_t set; |
@@ -275,7 +390,7 @@ int setup_frame(struct k_sigaction * ka, struct pt_regs *regs, | |||
275 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) | 390 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) |
276 | goto give_sigsegv; | 391 | goto give_sigsegv; |
277 | 392 | ||
278 | install_sigtramp(frame->sf_code, __NR_sigreturn); | 393 | err |= install_sigtramp(frame->sf_code, __NR_sigreturn); |
279 | 394 | ||
280 | err |= setup_sigcontext(regs, &frame->sf_sc); | 395 | err |= setup_sigcontext(regs, &frame->sf_sc); |
281 | err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); | 396 | err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); |
@@ -299,12 +414,10 @@ int setup_frame(struct k_sigaction * ka, struct pt_regs *regs, | |||
299 | regs->regs[31] = (unsigned long) frame->sf_code; | 414 | regs->regs[31] = (unsigned long) frame->sf_code; |
300 | regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; | 415 | regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; |
301 | 416 | ||
302 | #if DEBUG_SIG | 417 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", |
303 | printk("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%p\n", | ||
304 | current->comm, current->pid, | 418 | current->comm, current->pid, |
305 | frame, regs->cp0_epc, frame->regs[31]); | 419 | frame, regs->cp0_epc, regs->regs[31]); |
306 | #endif | 420 | return 0; |
307 | return 0; | ||
308 | 421 | ||
309 | give_sigsegv: | 422 | give_sigsegv: |
310 | force_sigsegv(signr, current); | 423 | force_sigsegv(signr, current); |
@@ -322,7 +435,7 @@ int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, | |||
322 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) | 435 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) |
323 | goto give_sigsegv; | 436 | goto give_sigsegv; |
324 | 437 | ||
325 | install_sigtramp(frame->rs_code, __NR_rt_sigreturn); | 438 | err |= install_sigtramp(frame->rs_code, __NR_rt_sigreturn); |
326 | 439 | ||
327 | /* Create siginfo. */ | 440 | /* Create siginfo. */ |
328 | err |= copy_siginfo_to_user(&frame->rs_info, info); | 441 | err |= copy_siginfo_to_user(&frame->rs_info, info); |
@@ -359,11 +472,10 @@ int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, | |||
359 | regs->regs[31] = (unsigned long) frame->rs_code; | 472 | regs->regs[31] = (unsigned long) frame->rs_code; |
360 | regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; | 473 | regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; |
361 | 474 | ||
362 | #if DEBUG_SIG | 475 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", |
363 | printk("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%p\n", | ||
364 | current->comm, current->pid, | 476 | current->comm, current->pid, |
365 | frame, regs->cp0_epc, regs->regs[31]); | 477 | frame, regs->cp0_epc, regs->regs[31]); |
366 | #endif | 478 | |
367 | return 0; | 479 | return 0; |
368 | 480 | ||
369 | give_sigsegv: | 481 | give_sigsegv: |
@@ -371,7 +483,7 @@ give_sigsegv: | |||
371 | return -EFAULT; | 483 | return -EFAULT; |
372 | } | 484 | } |
373 | 485 | ||
374 | static inline int handle_signal(unsigned long sig, siginfo_t *info, | 486 | static int handle_signal(unsigned long sig, siginfo_t *info, |
375 | struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) | 487 | struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) |
376 | { | 488 | { |
377 | int ret; | 489 | int ret; |
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index c86a5ddff050..183fc7e55f34 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c | |||
@@ -33,6 +33,8 @@ | |||
33 | #include <asm/fpu.h> | 33 | #include <asm/fpu.h> |
34 | #include <asm/war.h> | 34 | #include <asm/war.h> |
35 | 35 | ||
36 | #include "signal-common.h" | ||
37 | |||
36 | #define SI_PAD_SIZE32 ((SI_MAX_SIZE/sizeof(int)) - 3) | 38 | #define SI_PAD_SIZE32 ((SI_MAX_SIZE/sizeof(int)) - 3) |
37 | 39 | ||
38 | typedef struct compat_siginfo { | 40 | typedef struct compat_siginfo { |
@@ -102,8 +104,6 @@ typedef struct compat_siginfo { | |||
102 | #define __NR_O32_rt_sigreturn 4193 | 104 | #define __NR_O32_rt_sigreturn 4193 |
103 | #define __NR_O32_restart_syscall 4253 | 105 | #define __NR_O32_restart_syscall 4253 |
104 | 106 | ||
105 | #define DEBUG_SIG 0 | ||
106 | |||
107 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 107 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
108 | 108 | ||
109 | /* 32-bit compatibility types */ | 109 | /* 32-bit compatibility types */ |
@@ -139,6 +139,123 @@ struct ucontext32 { | |||
139 | sigset_t32 uc_sigmask; /* mask last for extensibility */ | 139 | sigset_t32 uc_sigmask; /* mask last for extensibility */ |
140 | }; | 140 | }; |
141 | 141 | ||
142 | #if ICACHE_REFILLS_WORKAROUND_WAR == 0 | ||
143 | |||
144 | struct rt_sigframe32 { | ||
145 | u32 rs_ass[4]; /* argument save space for o32 */ | ||
146 | u32 rs_code[2]; /* signal trampoline */ | ||
147 | compat_siginfo_t rs_info; | ||
148 | struct ucontext32 rs_uc; | ||
149 | }; | ||
150 | |||
151 | #else /* ICACHE_REFILLS_WORKAROUND_WAR */ | ||
152 | |||
153 | struct rt_sigframe32 { | ||
154 | u32 rs_ass[4]; /* argument save space for o32 */ | ||
155 | u32 rs_pad[2]; | ||
156 | compat_siginfo_t rs_info; | ||
157 | struct ucontext32 rs_uc; | ||
158 | u32 rs_code[8] __attribute__((aligned(32))); /* signal trampoline */ | ||
159 | }; | ||
160 | |||
161 | #endif /* !ICACHE_REFILLS_WORKAROUND_WAR */ | ||
162 | |||
163 | /* | ||
164 | * sigcontext handlers | ||
165 | */ | ||
166 | static int setup_sigcontext32(struct pt_regs *regs, | ||
167 | struct sigcontext32 __user *sc) | ||
168 | { | ||
169 | int err = 0; | ||
170 | int i; | ||
171 | |||
172 | err |= __put_user(regs->cp0_epc, &sc->sc_pc); | ||
173 | |||
174 | err |= __put_user(0, &sc->sc_regs[0]); | ||
175 | for (i = 1; i < 32; i++) | ||
176 | err |= __put_user(regs->regs[i], &sc->sc_regs[i]); | ||
177 | |||
178 | err |= __put_user(regs->hi, &sc->sc_mdhi); | ||
179 | err |= __put_user(regs->lo, &sc->sc_mdlo); | ||
180 | if (cpu_has_dsp) { | ||
181 | err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); | ||
182 | err |= __put_user(mfhi1(), &sc->sc_hi1); | ||
183 | err |= __put_user(mflo1(), &sc->sc_lo1); | ||
184 | err |= __put_user(mfhi2(), &sc->sc_hi2); | ||
185 | err |= __put_user(mflo2(), &sc->sc_lo2); | ||
186 | err |= __put_user(mfhi3(), &sc->sc_hi3); | ||
187 | err |= __put_user(mflo3(), &sc->sc_lo3); | ||
188 | } | ||
189 | |||
190 | err |= __put_user(!!used_math(), &sc->sc_used_math); | ||
191 | |||
192 | if (used_math()) { | ||
193 | /* | ||
194 | * Save FPU state to signal context. Signal handler | ||
195 | * will "inherit" current FPU state. | ||
196 | */ | ||
197 | preempt_disable(); | ||
198 | |||
199 | if (!is_fpu_owner()) { | ||
200 | own_fpu(); | ||
201 | restore_fp(current); | ||
202 | } | ||
203 | err |= save_fp_context32(sc); | ||
204 | |||
205 | preempt_enable(); | ||
206 | } | ||
207 | return err; | ||
208 | } | ||
209 | |||
210 | static int restore_sigcontext32(struct pt_regs *regs, | ||
211 | struct sigcontext32 __user *sc) | ||
212 | { | ||
213 | u32 used_math; | ||
214 | int err = 0; | ||
215 | s32 treg; | ||
216 | int i; | ||
217 | |||
218 | /* Always make any pending restarted system calls return -EINTR */ | ||
219 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
220 | |||
221 | err |= __get_user(regs->cp0_epc, &sc->sc_pc); | ||
222 | err |= __get_user(regs->hi, &sc->sc_mdhi); | ||
223 | err |= __get_user(regs->lo, &sc->sc_mdlo); | ||
224 | if (cpu_has_dsp) { | ||
225 | err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); | ||
226 | err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); | ||
227 | err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); | ||
228 | err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); | ||
229 | err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); | ||
230 | err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); | ||
231 | err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); | ||
232 | } | ||
233 | |||
234 | for (i = 1; i < 32; i++) | ||
235 | err |= __get_user(regs->regs[i], &sc->sc_regs[i]); | ||
236 | |||
237 | err |= __get_user(used_math, &sc->sc_used_math); | ||
238 | conditional_used_math(used_math); | ||
239 | |||
240 | preempt_disable(); | ||
241 | |||
242 | if (used_math()) { | ||
243 | /* restore fpu context if we have used it before */ | ||
244 | own_fpu(); | ||
245 | err |= restore_fp_context32(sc); | ||
246 | } else { | ||
247 | /* signal handler may have used FPU. Give it up. */ | ||
248 | lose_fpu(); | ||
249 | } | ||
250 | |||
251 | preempt_enable(); | ||
252 | |||
253 | return err; | ||
254 | } | ||
255 | |||
256 | /* | ||
257 | * | ||
258 | */ | ||
142 | extern void __put_sigset_unknown_nsig(void); | 259 | extern void __put_sigset_unknown_nsig(void); |
143 | extern void __get_sigset_unknown_nsig(void); | 260 | extern void __get_sigset_unknown_nsig(void); |
144 | 261 | ||
@@ -191,9 +308,7 @@ static inline int get_sigset(sigset_t *kbuf, const compat_sigset_t __user *ubuf) | |||
191 | * Atomically swap in the new signal mask, and wait for a signal. | 308 | * Atomically swap in the new signal mask, and wait for a signal. |
192 | */ | 309 | */ |
193 | 310 | ||
194 | save_static_function(sys32_sigsuspend); | 311 | asmlinkage int sys32_sigsuspend(nabi_no_regargs struct pt_regs regs) |
195 | __attribute_used__ noinline static int | ||
196 | _sys32_sigsuspend(nabi_no_regargs struct pt_regs regs) | ||
197 | { | 312 | { |
198 | compat_sigset_t __user *uset; | 313 | compat_sigset_t __user *uset; |
199 | sigset_t newset; | 314 | sigset_t newset; |
@@ -215,9 +330,7 @@ _sys32_sigsuspend(nabi_no_regargs struct pt_regs regs) | |||
215 | return -ERESTARTNOHAND; | 330 | return -ERESTARTNOHAND; |
216 | } | 331 | } |
217 | 332 | ||
218 | save_static_function(sys32_rt_sigsuspend); | 333 | asmlinkage int sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) |
219 | __attribute_used__ noinline static int | ||
220 | _sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) | ||
221 | { | 334 | { |
222 | compat_sigset_t __user *uset; | 335 | compat_sigset_t __user *uset; |
223 | sigset_t newset; | 336 | sigset_t newset; |
@@ -326,91 +439,6 @@ asmlinkage int sys32_sigaltstack(nabi_no_regargs struct pt_regs regs) | |||
326 | return ret; | 439 | return ret; |
327 | } | 440 | } |
328 | 441 | ||
329 | static int restore_sigcontext32(struct pt_regs *regs, struct sigcontext32 __user *sc) | ||
330 | { | ||
331 | u32 used_math; | ||
332 | int err = 0; | ||
333 | s32 treg; | ||
334 | |||
335 | /* Always make any pending restarted system calls return -EINTR */ | ||
336 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
337 | |||
338 | err |= __get_user(regs->cp0_epc, &sc->sc_pc); | ||
339 | err |= __get_user(regs->hi, &sc->sc_mdhi); | ||
340 | err |= __get_user(regs->lo, &sc->sc_mdlo); | ||
341 | if (cpu_has_dsp) { | ||
342 | err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); | ||
343 | err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); | ||
344 | err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); | ||
345 | err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); | ||
346 | err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); | ||
347 | err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); | ||
348 | err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); | ||
349 | } | ||
350 | |||
351 | #define restore_gp_reg(i) do { \ | ||
352 | err |= __get_user(regs->regs[i], &sc->sc_regs[i]); \ | ||
353 | } while(0) | ||
354 | restore_gp_reg( 1); restore_gp_reg( 2); restore_gp_reg( 3); | ||
355 | restore_gp_reg( 4); restore_gp_reg( 5); restore_gp_reg( 6); | ||
356 | restore_gp_reg( 7); restore_gp_reg( 8); restore_gp_reg( 9); | ||
357 | restore_gp_reg(10); restore_gp_reg(11); restore_gp_reg(12); | ||
358 | restore_gp_reg(13); restore_gp_reg(14); restore_gp_reg(15); | ||
359 | restore_gp_reg(16); restore_gp_reg(17); restore_gp_reg(18); | ||
360 | restore_gp_reg(19); restore_gp_reg(20); restore_gp_reg(21); | ||
361 | restore_gp_reg(22); restore_gp_reg(23); restore_gp_reg(24); | ||
362 | restore_gp_reg(25); restore_gp_reg(26); restore_gp_reg(27); | ||
363 | restore_gp_reg(28); restore_gp_reg(29); restore_gp_reg(30); | ||
364 | restore_gp_reg(31); | ||
365 | #undef restore_gp_reg | ||
366 | |||
367 | err |= __get_user(used_math, &sc->sc_used_math); | ||
368 | conditional_used_math(used_math); | ||
369 | |||
370 | preempt_disable(); | ||
371 | |||
372 | if (used_math()) { | ||
373 | /* restore fpu context if we have used it before */ | ||
374 | own_fpu(); | ||
375 | err |= restore_fp_context32(sc); | ||
376 | } else { | ||
377 | /* signal handler may have used FPU. Give it up. */ | ||
378 | lose_fpu(); | ||
379 | } | ||
380 | |||
381 | preempt_enable(); | ||
382 | |||
383 | return err; | ||
384 | } | ||
385 | |||
386 | struct sigframe { | ||
387 | u32 sf_ass[4]; /* argument save space for o32 */ | ||
388 | #if ICACHE_REFILLS_WORKAROUND_WAR | ||
389 | u32 sf_pad[2]; | ||
390 | #else | ||
391 | u32 sf_code[2]; /* signal trampoline */ | ||
392 | #endif | ||
393 | struct sigcontext32 sf_sc; | ||
394 | sigset_t sf_mask; | ||
395 | #if ICACHE_REFILLS_WORKAROUND_WAR | ||
396 | u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */ | ||
397 | #endif | ||
398 | }; | ||
399 | |||
400 | struct rt_sigframe32 { | ||
401 | u32 rs_ass[4]; /* argument save space for o32 */ | ||
402 | #if ICACHE_REFILLS_WORKAROUND_WAR | ||
403 | u32 rs_pad[2]; | ||
404 | #else | ||
405 | u32 rs_code[2]; /* signal trampoline */ | ||
406 | #endif | ||
407 | compat_siginfo_t rs_info; | ||
408 | struct ucontext32 rs_uc; | ||
409 | #if ICACHE_REFILLS_WORKAROUND_WAR | ||
410 | u32 rs_code[8] __attribute__((aligned(32))); /* signal trampoline */ | ||
411 | #endif | ||
412 | }; | ||
413 | |||
414 | int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) | 442 | int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) |
415 | { | 443 | { |
416 | int err; | 444 | int err; |
@@ -463,9 +491,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) | |||
463 | return err; | 491 | return err; |
464 | } | 492 | } |
465 | 493 | ||
466 | save_static_function(sys32_sigreturn); | 494 | asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) |
467 | __attribute_used__ noinline static void | ||
468 | _sys32_sigreturn(nabi_no_regargs struct pt_regs regs) | ||
469 | { | 495 | { |
470 | struct sigframe __user *frame; | 496 | struct sigframe __user *frame; |
471 | sigset_t blocked; | 497 | sigset_t blocked; |
@@ -499,9 +525,7 @@ badframe: | |||
499 | force_sig(SIGSEGV, current); | 525 | force_sig(SIGSEGV, current); |
500 | } | 526 | } |
501 | 527 | ||
502 | save_static_function(sys32_rt_sigreturn); | 528 | asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) |
503 | __attribute_used__ noinline static void | ||
504 | _sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | ||
505 | { | 529 | { |
506 | struct rt_sigframe32 __user *frame; | 530 | struct rt_sigframe32 __user *frame; |
507 | mm_segment_t old_fs; | 531 | mm_segment_t old_fs; |
@@ -554,89 +578,6 @@ badframe: | |||
554 | force_sig(SIGSEGV, current); | 578 | force_sig(SIGSEGV, current); |
555 | } | 579 | } |
556 | 580 | ||
557 | static inline int setup_sigcontext32(struct pt_regs *regs, | ||
558 | struct sigcontext32 __user *sc) | ||
559 | { | ||
560 | int err = 0; | ||
561 | |||
562 | err |= __put_user(regs->cp0_epc, &sc->sc_pc); | ||
563 | err |= __put_user(regs->cp0_status, &sc->sc_status); | ||
564 | |||
565 | #define save_gp_reg(i) { \ | ||
566 | err |= __put_user(regs->regs[i], &sc->sc_regs[i]); \ | ||
567 | } while(0) | ||
568 | __put_user(0, &sc->sc_regs[0]); save_gp_reg(1); save_gp_reg(2); | ||
569 | save_gp_reg(3); save_gp_reg(4); save_gp_reg(5); save_gp_reg(6); | ||
570 | save_gp_reg(7); save_gp_reg(8); save_gp_reg(9); save_gp_reg(10); | ||
571 | save_gp_reg(11); save_gp_reg(12); save_gp_reg(13); save_gp_reg(14); | ||
572 | save_gp_reg(15); save_gp_reg(16); save_gp_reg(17); save_gp_reg(18); | ||
573 | save_gp_reg(19); save_gp_reg(20); save_gp_reg(21); save_gp_reg(22); | ||
574 | save_gp_reg(23); save_gp_reg(24); save_gp_reg(25); save_gp_reg(26); | ||
575 | save_gp_reg(27); save_gp_reg(28); save_gp_reg(29); save_gp_reg(30); | ||
576 | save_gp_reg(31); | ||
577 | #undef save_gp_reg | ||
578 | |||
579 | err |= __put_user(regs->hi, &sc->sc_mdhi); | ||
580 | err |= __put_user(regs->lo, &sc->sc_mdlo); | ||
581 | if (cpu_has_dsp) { | ||
582 | err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); | ||
583 | err |= __put_user(mfhi1(), &sc->sc_hi1); | ||
584 | err |= __put_user(mflo1(), &sc->sc_lo1); | ||
585 | err |= __put_user(mfhi2(), &sc->sc_hi2); | ||
586 | err |= __put_user(mflo2(), &sc->sc_lo2); | ||
587 | err |= __put_user(mfhi3(), &sc->sc_hi3); | ||
588 | err |= __put_user(mflo3(), &sc->sc_lo3); | ||
589 | } | ||
590 | |||
591 | err |= __put_user(!!used_math(), &sc->sc_used_math); | ||
592 | |||
593 | if (!used_math()) | ||
594 | goto out; | ||
595 | |||
596 | /* | ||
597 | * Save FPU state to signal context. Signal handler will "inherit" | ||
598 | * current FPU state. | ||
599 | */ | ||
600 | preempt_disable(); | ||
601 | |||
602 | if (!is_fpu_owner()) { | ||
603 | own_fpu(); | ||
604 | restore_fp(current); | ||
605 | } | ||
606 | err |= save_fp_context32(sc); | ||
607 | |||
608 | preempt_enable(); | ||
609 | |||
610 | out: | ||
611 | return err; | ||
612 | } | ||
613 | |||
614 | /* | ||
615 | * Determine which stack to use.. | ||
616 | */ | ||
617 | static inline void __user *get_sigframe(struct k_sigaction *ka, | ||
618 | struct pt_regs *regs, | ||
619 | size_t frame_size) | ||
620 | { | ||
621 | unsigned long sp; | ||
622 | |||
623 | /* Default to using normal stack */ | ||
624 | sp = regs->regs[29]; | ||
625 | |||
626 | /* | ||
627 | * FPU emulator may have it's own trampoline active just | ||
628 | * above the user stack, 16-bytes before the next lowest | ||
629 | * 16 byte boundary. Try to avoid trashing it. | ||
630 | */ | ||
631 | sp -= 32; | ||
632 | |||
633 | /* This is the X/Open sanctioned signal stack switching. */ | ||
634 | if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0)) | ||
635 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
636 | |||
637 | return (void __user *)((sp - frame_size) & ALMASK); | ||
638 | } | ||
639 | |||
640 | int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs, | 581 | int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs, |
641 | int signr, sigset_t *set) | 582 | int signr, sigset_t *set) |
642 | { | 583 | { |
@@ -647,15 +588,7 @@ int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs, | |||
647 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) | 588 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) |
648 | goto give_sigsegv; | 589 | goto give_sigsegv; |
649 | 590 | ||
650 | /* | 591 | err |= install_sigtramp(frame->sf_code, __NR_O32_sigreturn); |
651 | * Set up the return code ... | ||
652 | * | ||
653 | * li v0, __NR_O32_sigreturn | ||
654 | * syscall | ||
655 | */ | ||
656 | err |= __put_user(0x24020000 + __NR_O32_sigreturn, frame->sf_code + 0); | ||
657 | err |= __put_user(0x0000000c , frame->sf_code + 1); | ||
658 | flush_cache_sigtramp((unsigned long) frame->sf_code); | ||
659 | 592 | ||
660 | err |= setup_sigcontext32(regs, &frame->sf_sc); | 593 | err |= setup_sigcontext32(regs, &frame->sf_sc); |
661 | err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); | 594 | err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); |
@@ -679,11 +612,10 @@ int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs, | |||
679 | regs->regs[31] = (unsigned long) frame->sf_code; | 612 | regs->regs[31] = (unsigned long) frame->sf_code; |
680 | regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; | 613 | regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; |
681 | 614 | ||
682 | #if DEBUG_SIG | 615 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", |
683 | printk("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%p\n", | ||
684 | current->comm, current->pid, | 616 | current->comm, current->pid, |
685 | frame, regs->cp0_epc, frame->sf_code); | 617 | frame, regs->cp0_epc, regs->regs[31]); |
686 | #endif | 618 | |
687 | return 0; | 619 | return 0; |
688 | 620 | ||
689 | give_sigsegv: | 621 | give_sigsegv: |
@@ -702,17 +634,7 @@ int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs, | |||
702 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) | 634 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) |
703 | goto give_sigsegv; | 635 | goto give_sigsegv; |
704 | 636 | ||
705 | /* Set up to return from userspace. If provided, use a stub already | 637 | err |= install_sigtramp(frame->rs_code, __NR_O32_rt_sigreturn); |
706 | in userspace. */ | ||
707 | /* | ||
708 | * Set up the return code ... | ||
709 | * | ||
710 | * li v0, __NR_O32_rt_sigreturn | ||
711 | * syscall | ||
712 | */ | ||
713 | err |= __put_user(0x24020000 + __NR_O32_rt_sigreturn, frame->rs_code + 0); | ||
714 | err |= __put_user(0x0000000c , frame->rs_code + 1); | ||
715 | flush_cache_sigtramp((unsigned long) frame->rs_code); | ||
716 | 638 | ||
717 | /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */ | 639 | /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */ |
718 | err |= copy_siginfo_to_user32(&frame->rs_info, info); | 640 | err |= copy_siginfo_to_user32(&frame->rs_info, info); |
@@ -750,11 +672,10 @@ int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs, | |||
750 | regs->regs[31] = (unsigned long) frame->rs_code; | 672 | regs->regs[31] = (unsigned long) frame->rs_code; |
751 | regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; | 673 | regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; |
752 | 674 | ||
753 | #if DEBUG_SIG | 675 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", |
754 | printk("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%p\n", | ||
755 | current->comm, current->pid, | 676 | current->comm, current->pid, |
756 | frame, regs->cp0_epc, frame->rs_code); | 677 | frame, regs->cp0_epc, regs->regs[31]); |
757 | #endif | 678 | |
758 | return 0; | 679 | return 0; |
759 | 680 | ||
760 | give_sigsegv: | 681 | give_sigsegv: |
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index a67c18555ed3..57456e6a0c62 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c | |||
@@ -47,8 +47,6 @@ | |||
47 | #define __NR_N32_rt_sigreturn 6211 | 47 | #define __NR_N32_rt_sigreturn 6211 |
48 | #define __NR_N32_restart_syscall 6214 | 48 | #define __NR_N32_restart_syscall 6214 |
49 | 49 | ||
50 | #define DEBUG_SIG 0 | ||
51 | |||
52 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 50 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
53 | 51 | ||
54 | /* IRIX compatible stack_t */ | 52 | /* IRIX compatible stack_t */ |
@@ -66,25 +64,30 @@ struct ucontextn32 { | |||
66 | sigset_t uc_sigmask; /* mask last for extensibility */ | 64 | sigset_t uc_sigmask; /* mask last for extensibility */ |
67 | }; | 65 | }; |
68 | 66 | ||
67 | #if ICACHE_REFILLS_WORKAROUND_WAR == 0 | ||
68 | |||
69 | struct rt_sigframe_n32 { | 69 | struct rt_sigframe_n32 { |
70 | u32 rs_ass[4]; /* argument save space for o32 */ | 70 | u32 rs_ass[4]; /* argument save space for o32 */ |
71 | #if ICACHE_REFILLS_WORKAROUND_WAR | ||
72 | u32 rs_pad[2]; | ||
73 | #else | ||
74 | u32 rs_code[2]; /* signal trampoline */ | 71 | u32 rs_code[2]; /* signal trampoline */ |
75 | #endif | ||
76 | struct siginfo rs_info; | 72 | struct siginfo rs_info; |
77 | struct ucontextn32 rs_uc; | 73 | struct ucontextn32 rs_uc; |
78 | #if ICACHE_REFILLS_WORKAROUND_WAR | 74 | }; |
75 | |||
76 | #else /* ICACHE_REFILLS_WORKAROUND_WAR */ | ||
77 | |||
78 | struct rt_sigframe_n32 { | ||
79 | u32 rs_ass[4]; /* argument save space for o32 */ | ||
80 | u32 rs_pad[2]; | ||
81 | struct siginfo rs_info; | ||
82 | struct ucontextn32 rs_uc; | ||
79 | u32 rs_code[8] ____cacheline_aligned; /* signal trampoline */ | 83 | u32 rs_code[8] ____cacheline_aligned; /* signal trampoline */ |
80 | #endif | ||
81 | }; | 84 | }; |
82 | 85 | ||
86 | #endif /* !ICACHE_REFILLS_WORKAROUND_WAR */ | ||
87 | |||
83 | extern void sigset_from_compat (sigset_t *set, compat_sigset_t *compat); | 88 | extern void sigset_from_compat (sigset_t *set, compat_sigset_t *compat); |
84 | 89 | ||
85 | save_static_function(sysn32_rt_sigsuspend); | 90 | asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) |
86 | __attribute_used__ noinline static int | ||
87 | _sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) | ||
88 | { | 91 | { |
89 | compat_sigset_t __user *unewset; | 92 | compat_sigset_t __user *unewset; |
90 | compat_sigset_t uset; | 93 | compat_sigset_t uset; |
@@ -105,7 +108,7 @@ _sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) | |||
105 | spin_lock_irq(¤t->sighand->siglock); | 108 | spin_lock_irq(¤t->sighand->siglock); |
106 | current->saved_sigmask = current->blocked; | 109 | current->saved_sigmask = current->blocked; |
107 | current->blocked = newset; | 110 | current->blocked = newset; |
108 | recalc_sigpending(); | 111 | recalc_sigpending(); |
109 | spin_unlock_irq(¤t->sighand->siglock); | 112 | spin_unlock_irq(¤t->sighand->siglock); |
110 | 113 | ||
111 | current->state = TASK_INTERRUPTIBLE; | 114 | current->state = TASK_INTERRUPTIBLE; |
@@ -114,9 +117,7 @@ _sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) | |||
114 | return -ERESTARTNOHAND; | 117 | return -ERESTARTNOHAND; |
115 | } | 118 | } |
116 | 119 | ||
117 | save_static_function(sysn32_rt_sigreturn); | 120 | asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) |
118 | __attribute_used__ noinline static void | ||
119 | _sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | ||
120 | { | 121 | { |
121 | struct rt_sigframe_n32 __user *frame; | 122 | struct rt_sigframe_n32 __user *frame; |
122 | sigset_t set; | 123 | sigset_t set; |
@@ -184,7 +185,7 @@ int setup_rt_frame_n32(struct k_sigaction * ka, | |||
184 | /* Create the ucontext. */ | 185 | /* Create the ucontext. */ |
185 | err |= __put_user(0, &frame->rs_uc.uc_flags); | 186 | err |= __put_user(0, &frame->rs_uc.uc_flags); |
186 | err |= __put_user(0, &frame->rs_uc.uc_link); | 187 | err |= __put_user(0, &frame->rs_uc.uc_link); |
187 | sp = (int) (long) current->sas_ss_sp; | 188 | sp = (int) (long) current->sas_ss_sp; |
188 | err |= __put_user(sp, | 189 | err |= __put_user(sp, |
189 | &frame->rs_uc.uc_stack.ss_sp); | 190 | &frame->rs_uc.uc_stack.ss_sp); |
190 | err |= __put_user(sas_ss_flags(regs->regs[29]), | 191 | err |= __put_user(sas_ss_flags(regs->regs[29]), |
@@ -214,11 +215,10 @@ int setup_rt_frame_n32(struct k_sigaction * ka, | |||
214 | regs->regs[31] = (unsigned long) frame->rs_code; | 215 | regs->regs[31] = (unsigned long) frame->rs_code; |
215 | regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; | 216 | regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; |
216 | 217 | ||
217 | #if DEBUG_SIG | 218 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", |
218 | printk("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%p\n", | ||
219 | current->comm, current->pid, | 219 | current->comm, current->pid, |
220 | frame, regs->cp0_epc, regs->regs[31]); | 220 | frame, regs->cp0_epc, regs->regs[31]); |
221 | #endif | 221 | |
222 | return 0; | 222 | return 0; |
223 | 223 | ||
224 | give_sigsegv: | 224 | give_sigsegv: |
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 1ee689c0e0c9..64b62bdfb4f6 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c | |||
@@ -35,7 +35,6 @@ | |||
35 | #include <asm/mipsregs.h> | 35 | #include <asm/mipsregs.h> |
36 | #include <asm/mipsmtregs.h> | 36 | #include <asm/mipsmtregs.h> |
37 | #include <asm/mips_mt.h> | 37 | #include <asm/mips_mt.h> |
38 | #include <asm/mips-boards/maltaint.h> /* This is f*cking wrong */ | ||
39 | 38 | ||
40 | #define MIPS_CPU_IPI_RESCHED_IRQ 0 | 39 | #define MIPS_CPU_IPI_RESCHED_IRQ 0 |
41 | #define MIPS_CPU_IPI_CALL_IRQ 1 | 40 | #define MIPS_CPU_IPI_CALL_IRQ 1 |
@@ -108,12 +107,12 @@ void __init sanitize_tlb_entries(void) | |||
108 | 107 | ||
109 | static void ipi_resched_dispatch(void) | 108 | static void ipi_resched_dispatch(void) |
110 | { | 109 | { |
111 | do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ); | 110 | do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ); |
112 | } | 111 | } |
113 | 112 | ||
114 | static void ipi_call_dispatch(void) | 113 | static void ipi_call_dispatch(void) |
115 | { | 114 | { |
116 | do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ); | 115 | do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ); |
117 | } | 116 | } |
118 | 117 | ||
119 | static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) | 118 | static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) |
@@ -270,8 +269,8 @@ void __init plat_prepare_cpus(unsigned int max_cpus) | |||
270 | set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch); | 269 | set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch); |
271 | } | 270 | } |
272 | 271 | ||
273 | cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ; | 272 | cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ; |
274 | cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ; | 273 | cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ; |
275 | 274 | ||
276 | setup_irq(cpu_ipi_resched_irq, &irq_resched); | 275 | setup_irq(cpu_ipi_resched_irq, &irq_resched); |
277 | setup_irq(cpu_ipi_call_irq, &irq_call); | 276 | setup_irq(cpu_ipi_call_irq, &irq_call); |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 6a857bf030b0..9251ea824937 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -26,16 +26,6 @@ | |||
26 | * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. | 26 | * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. |
27 | */ | 27 | */ |
28 | 28 | ||
29 | /* | ||
30 | * MIPSCPU_INT_BASE is identically defined in both | ||
31 | * asm-mips/mips-boards/maltaint.h and asm-mips/mips-boards/simint.h, | ||
32 | * but as yet there's no properly organized include structure that | ||
33 | * will ensure that the right *int.h file will be included for a | ||
34 | * given platform build. | ||
35 | */ | ||
36 | |||
37 | #define MIPSCPU_INT_BASE 16 | ||
38 | |||
39 | #define MIPS_CPU_IPI_IRQ 1 | 29 | #define MIPS_CPU_IPI_IRQ 1 |
40 | 30 | ||
41 | #define LOCK_MT_PRA() \ | 31 | #define LOCK_MT_PRA() \ |
@@ -77,15 +67,15 @@ unsigned int ipi_timer_latch[NR_CPUS]; | |||
77 | 67 | ||
78 | #define IPIBUF_PER_CPU 4 | 68 | #define IPIBUF_PER_CPU 4 |
79 | 69 | ||
80 | struct smtc_ipi_q IPIQ[NR_CPUS]; | 70 | static struct smtc_ipi_q IPIQ[NR_CPUS]; |
81 | struct smtc_ipi_q freeIPIq; | 71 | static struct smtc_ipi_q freeIPIq; |
82 | 72 | ||
83 | 73 | ||
84 | /* Forward declarations */ | 74 | /* Forward declarations */ |
85 | 75 | ||
86 | void ipi_decode(struct smtc_ipi *); | 76 | void ipi_decode(struct smtc_ipi *); |
87 | void post_direct_ipi(int cpu, struct smtc_ipi *pipi); | 77 | static void post_direct_ipi(int cpu, struct smtc_ipi *pipi); |
88 | void setup_cross_vpe_interrupts(void); | 78 | static void setup_cross_vpe_interrupts(void); |
89 | void init_smtc_stats(void); | 79 | void init_smtc_stats(void); |
90 | 80 | ||
91 | /* Global SMTC Status */ | 81 | /* Global SMTC Status */ |
@@ -200,7 +190,7 @@ void __init sanitize_tlb_entries(void) | |||
200 | * Configure shared TLB - VPC configuration bit must be set by caller | 190 | * Configure shared TLB - VPC configuration bit must be set by caller |
201 | */ | 191 | */ |
202 | 192 | ||
203 | void smtc_configure_tlb(void) | 193 | static void smtc_configure_tlb(void) |
204 | { | 194 | { |
205 | int i,tlbsiz,vpes; | 195 | int i,tlbsiz,vpes; |
206 | unsigned long mvpconf0; | 196 | unsigned long mvpconf0; |
@@ -648,7 +638,7 @@ int setup_irq_smtc(unsigned int irq, struct irqaction * new, | |||
648 | * the VPE. | 638 | * the VPE. |
649 | */ | 639 | */ |
650 | 640 | ||
651 | void smtc_ipi_qdump(void) | 641 | static void smtc_ipi_qdump(void) |
652 | { | 642 | { |
653 | int i; | 643 | int i; |
654 | 644 | ||
@@ -686,28 +676,6 @@ static __inline__ int atomic_postincrement(unsigned int *pv) | |||
686 | return result; | 676 | return result; |
687 | } | 677 | } |
688 | 678 | ||
689 | /* No longer used in IPI dispatch, but retained for future recycling */ | ||
690 | |||
691 | static __inline__ int atomic_postclear(unsigned int *pv) | ||
692 | { | ||
693 | unsigned long result; | ||
694 | |||
695 | unsigned long temp; | ||
696 | |||
697 | __asm__ __volatile__( | ||
698 | "1: ll %0, %2 \n" | ||
699 | " or %1, $0, $0 \n" | ||
700 | " sc %1, %2 \n" | ||
701 | " beqz %1, 1b \n" | ||
702 | " sync \n" | ||
703 | : "=&r" (result), "=&r" (temp), "=m" (*pv) | ||
704 | : "m" (*pv) | ||
705 | : "memory"); | ||
706 | |||
707 | return result; | ||
708 | } | ||
709 | |||
710 | |||
711 | void smtc_send_ipi(int cpu, int type, unsigned int action) | 679 | void smtc_send_ipi(int cpu, int type, unsigned int action) |
712 | { | 680 | { |
713 | int tcstatus; | 681 | int tcstatus; |
@@ -781,7 +749,7 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) | |||
781 | /* | 749 | /* |
782 | * Send IPI message to Halted TC, TargTC/TargVPE already having been set | 750 | * Send IPI message to Halted TC, TargTC/TargVPE already having been set |
783 | */ | 751 | */ |
784 | void post_direct_ipi(int cpu, struct smtc_ipi *pipi) | 752 | static void post_direct_ipi(int cpu, struct smtc_ipi *pipi) |
785 | { | 753 | { |
786 | struct pt_regs *kstack; | 754 | struct pt_regs *kstack; |
787 | unsigned long tcstatus; | 755 | unsigned long tcstatus; |
@@ -921,7 +889,7 @@ void smtc_timer_broadcast(int vpe) | |||
921 | * interrupts. | 889 | * interrupts. |
922 | */ | 890 | */ |
923 | 891 | ||
924 | static int cpu_ipi_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_IRQ; | 892 | static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ; |
925 | 893 | ||
926 | static irqreturn_t ipi_interrupt(int irq, void *dev_idm) | 894 | static irqreturn_t ipi_interrupt(int irq, void *dev_idm) |
927 | { | 895 | { |
@@ -1000,7 +968,7 @@ static void ipi_irq_dispatch(void) | |||
1000 | 968 | ||
1001 | static struct irqaction irq_ipi; | 969 | static struct irqaction irq_ipi; |
1002 | 970 | ||
1003 | void setup_cross_vpe_interrupts(void) | 971 | static void setup_cross_vpe_interrupts(void) |
1004 | { | 972 | { |
1005 | if (!cpu_has_vint) | 973 | if (!cpu_has_vint) |
1006 | panic("SMTC Kernel requires Vectored Interupt support"); | 974 | panic("SMTC Kernel requires Vectored Interupt support"); |
@@ -1191,7 +1159,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |||
1191 | * It would be nice to be able to use a spinlock here, | 1159 | * It would be nice to be able to use a spinlock here, |
1192 | * but this is invoked from within TLB flush routines | 1160 | * but this is invoked from within TLB flush routines |
1193 | * that protect themselves with DVPE, so if a lock is | 1161 | * that protect themselves with DVPE, so if a lock is |
1194 | * held by another TC, it'll never be freed. | 1162 | * held by another TC, it'll never be freed. |
1195 | * | 1163 | * |
1196 | * DVPE/DMT must not be done with interrupts enabled, | 1164 | * DVPE/DMT must not be done with interrupts enabled, |
1197 | * so even so most callers will already have disabled | 1165 | * so even so most callers will already have disabled |
@@ -1296,7 +1264,7 @@ void smtc_flush_tlb_asid(unsigned long asid) | |||
1296 | * Support for single-threading cache flush operations. | 1264 | * Support for single-threading cache flush operations. |
1297 | */ | 1265 | */ |
1298 | 1266 | ||
1299 | int halt_state_save[NR_CPUS]; | 1267 | static int halt_state_save[NR_CPUS]; |
1300 | 1268 | ||
1301 | /* | 1269 | /* |
1302 | * To really, really be sure that nothing is being done | 1270 | * To really, really be sure that nothing is being done |
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c index 6c2406a93f2b..93a148486f88 100644 --- a/arch/mips/kernel/sysirix.c +++ b/arch/mips/kernel/sysirix.c | |||
@@ -669,7 +669,7 @@ asmlinkage int irix_mount(char __user *dev_name, char __user *dir_name, | |||
669 | 669 | ||
670 | struct irix_statfs { | 670 | struct irix_statfs { |
671 | short f_type; | 671 | short f_type; |
672 | long f_bsize, f_frsize, f_blocks, f_bfree, f_files, f_ffree; | 672 | long f_bsize, f_frsize, f_blocks, f_bfree, f_files, f_ffree; |
673 | char f_fname[6], f_fpack[6]; | 673 | char f_fname[6], f_fpack[6]; |
674 | }; | 674 | }; |
675 | 675 | ||
@@ -959,7 +959,7 @@ static inline loff_t llseek(struct file *file, loff_t offset, int origin) | |||
959 | 959 | ||
960 | fn = default_llseek; | 960 | fn = default_llseek; |
961 | if (file->f_op && file->f_op->llseek) | 961 | if (file->f_op && file->f_op->llseek) |
962 | fn = file->f_op->llseek; | 962 | fn = file->f_op->llseek; |
963 | lock_kernel(); | 963 | lock_kernel(); |
964 | retval = fn(file, offset, origin); | 964 | retval = fn(file, offset, origin); |
965 | unlock_kernel(); | 965 | unlock_kernel(); |
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index 8aa544f73a5e..545fcbc8cea2 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c | |||
@@ -455,8 +455,3 @@ EXPORT_SYMBOL(rtc_lock); | |||
455 | EXPORT_SYMBOL(to_tm); | 455 | EXPORT_SYMBOL(to_tm); |
456 | EXPORT_SYMBOL(rtc_mips_set_time); | 456 | EXPORT_SYMBOL(rtc_mips_set_time); |
457 | EXPORT_SYMBOL(rtc_mips_get_time); | 457 | EXPORT_SYMBOL(rtc_mips_get_time); |
458 | |||
459 | unsigned long long sched_clock(void) | ||
460 | { | ||
461 | return (unsigned long long)jiffies*(1000000000/HZ); | ||
462 | } | ||
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index cecff24cc972..c76b793310c2 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S | |||
@@ -113,10 +113,12 @@ SECTIONS | |||
113 | references from .rodata */ | 113 | references from .rodata */ |
114 | .exit.text : { *(.exit.text) } | 114 | .exit.text : { *(.exit.text) } |
115 | .exit.data : { *(.exit.data) } | 115 | .exit.data : { *(.exit.data) } |
116 | #if defined(CONFIG_BLK_DEV_INITRD) | ||
116 | . = ALIGN(_PAGE_SIZE); | 117 | . = ALIGN(_PAGE_SIZE); |
117 | __initramfs_start = .; | 118 | __initramfs_start = .; |
118 | .init.ramfs : { *(.init.ramfs) } | 119 | .init.ramfs : { *(.init.ramfs) } |
119 | __initramfs_end = .; | 120 | __initramfs_end = .; |
121 | #endif | ||
120 | . = ALIGN(32); | 122 | . = ALIGN(32); |
121 | __per_cpu_start = .; | 123 | __per_cpu_start = .; |
122 | .data.percpu : { *(.data.percpu) } | 124 | .data.percpu : { *(.data.percpu) } |
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 458fccf87c54..9aca871a307f 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c | |||
@@ -29,6 +29,7 @@ | |||
29 | */ | 29 | */ |
30 | 30 | ||
31 | #include <linux/kernel.h> | 31 | #include <linux/kernel.h> |
32 | #include <linux/device.h> | ||
32 | #include <linux/module.h> | 33 | #include <linux/module.h> |
33 | #include <linux/fs.h> | 34 | #include <linux/fs.h> |
34 | #include <linux/init.h> | 35 | #include <linux/init.h> |
@@ -48,6 +49,7 @@ | |||
48 | #include <asm/cacheflush.h> | 49 | #include <asm/cacheflush.h> |
49 | #include <asm/atomic.h> | 50 | #include <asm/atomic.h> |
50 | #include <asm/cpu.h> | 51 | #include <asm/cpu.h> |
52 | #include <asm/mips_mt.h> | ||
51 | #include <asm/processor.h> | 53 | #include <asm/processor.h> |
52 | #include <asm/system.h> | 54 | #include <asm/system.h> |
53 | #include <asm/vpe.h> | 55 | #include <asm/vpe.h> |
@@ -64,6 +66,7 @@ typedef void *vpe_handle; | |||
64 | 66 | ||
65 | static char module_name[] = "vpe"; | 67 | static char module_name[] = "vpe"; |
66 | static int major; | 68 | static int major; |
69 | static const int minor = 1; /* fixed for now */ | ||
67 | 70 | ||
68 | #ifdef CONFIG_MIPS_APSP_KSPD | 71 | #ifdef CONFIG_MIPS_APSP_KSPD |
69 | static struct kspd_notifications kspd_events; | 72 | static struct kspd_notifications kspd_events; |
@@ -522,7 +525,7 @@ static int (*reloc_handlers[]) (struct module *me, uint32_t *location, | |||
522 | }; | 525 | }; |
523 | 526 | ||
524 | static char *rstrs[] = { | 527 | static char *rstrs[] = { |
525 | [R_MIPS_NONE] = "MIPS_NONE", | 528 | [R_MIPS_NONE] = "MIPS_NONE", |
526 | [R_MIPS_32] = "MIPS_32", | 529 | [R_MIPS_32] = "MIPS_32", |
527 | [R_MIPS_26] = "MIPS_26", | 530 | [R_MIPS_26] = "MIPS_26", |
528 | [R_MIPS_HI16] = "MIPS_HI16", | 531 | [R_MIPS_HI16] = "MIPS_HI16", |
@@ -695,7 +698,7 @@ static void dump_tclist(void) | |||
695 | } | 698 | } |
696 | 699 | ||
697 | /* We are prepared so configure and start the VPE... */ | 700 | /* We are prepared so configure and start the VPE... */ |
698 | int vpe_run(struct vpe * v) | 701 | static int vpe_run(struct vpe * v) |
699 | { | 702 | { |
700 | struct vpe_notifications *n; | 703 | struct vpe_notifications *n; |
701 | unsigned long val, dmt_flag; | 704 | unsigned long val, dmt_flag; |
@@ -713,16 +716,16 @@ int vpe_run(struct vpe * v) | |||
713 | dvpe(); | 716 | dvpe(); |
714 | 717 | ||
715 | if (!list_empty(&v->tc)) { | 718 | if (!list_empty(&v->tc)) { |
716 | if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) { | 719 | if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) { |
717 | printk(KERN_WARNING "VPE loader: TC %d is already in use.\n", | 720 | printk(KERN_WARNING "VPE loader: TC %d is already in use.\n", |
718 | t->index); | 721 | t->index); |
719 | return -ENOEXEC; | 722 | return -ENOEXEC; |
720 | } | 723 | } |
721 | } else { | 724 | } else { |
722 | printk(KERN_WARNING "VPE loader: No TC's associated with VPE %d\n", | 725 | printk(KERN_WARNING "VPE loader: No TC's associated with VPE %d\n", |
723 | v->minor); | 726 | v->minor); |
724 | return -ENOEXEC; | 727 | return -ENOEXEC; |
725 | } | 728 | } |
726 | 729 | ||
727 | /* Put MVPE's into 'configuration state' */ | 730 | /* Put MVPE's into 'configuration state' */ |
728 | set_c0_mvpcontrol(MVPCONTROL_VPC); | 731 | set_c0_mvpcontrol(MVPCONTROL_VPC); |
@@ -775,14 +778,14 @@ int vpe_run(struct vpe * v) | |||
775 | 778 | ||
776 | back_to_back_c0_hazard(); | 779 | back_to_back_c0_hazard(); |
777 | 780 | ||
778 | /* Set up the XTC bit in vpeconf0 to point at our tc */ | 781 | /* Set up the XTC bit in vpeconf0 to point at our tc */ |
779 | write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC)) | 782 | write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC)) |
780 | | (t->index << VPECONF0_XTC_SHIFT)); | 783 | | (t->index << VPECONF0_XTC_SHIFT)); |
781 | 784 | ||
782 | back_to_back_c0_hazard(); | 785 | back_to_back_c0_hazard(); |
783 | 786 | ||
784 | /* enable this VPE */ | 787 | /* enable this VPE */ |
785 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); | 788 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); |
786 | 789 | ||
787 | /* clear out any left overs from a previous program */ | 790 | /* clear out any left overs from a previous program */ |
788 | write_vpe_c0_status(0); | 791 | write_vpe_c0_status(0); |
@@ -832,7 +835,7 @@ static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs, | |||
832 | * contents of the program (p)buffer performing relocatations/etc, free's it | 835 | * contents of the program (p)buffer performing relocatations/etc, free's it |
833 | * when finished. | 836 | * when finished. |
834 | */ | 837 | */ |
835 | int vpe_elfload(struct vpe * v) | 838 | static int vpe_elfload(struct vpe * v) |
836 | { | 839 | { |
837 | Elf_Ehdr *hdr; | 840 | Elf_Ehdr *hdr; |
838 | Elf_Shdr *sechdrs; | 841 | Elf_Shdr *sechdrs; |
@@ -1205,7 +1208,7 @@ static ssize_t vpe_write(struct file *file, const char __user * buffer, | |||
1205 | return ret; | 1208 | return ret; |
1206 | } | 1209 | } |
1207 | 1210 | ||
1208 | static struct file_operations vpe_fops = { | 1211 | static const struct file_operations vpe_fops = { |
1209 | .owner = THIS_MODULE, | 1212 | .owner = THIS_MODULE, |
1210 | .open = vpe_open, | 1213 | .open = vpe_open, |
1211 | .release = vpe_release, | 1214 | .release = vpe_release, |
@@ -1365,12 +1368,15 @@ static void kspd_sp_exit( int sp_id) | |||
1365 | } | 1368 | } |
1366 | #endif | 1369 | #endif |
1367 | 1370 | ||
1371 | static struct device *vpe_dev; | ||
1372 | |||
1368 | static int __init vpe_module_init(void) | 1373 | static int __init vpe_module_init(void) |
1369 | { | 1374 | { |
1370 | struct vpe *v = NULL; | 1375 | struct vpe *v = NULL; |
1376 | struct device *dev; | ||
1371 | struct tc *t; | 1377 | struct tc *t; |
1372 | unsigned long val; | 1378 | unsigned long val; |
1373 | int i; | 1379 | int i, err; |
1374 | 1380 | ||
1375 | if (!cpu_has_mipsmt) { | 1381 | if (!cpu_has_mipsmt) { |
1376 | printk("VPE loader: not a MIPS MT capable processor\n"); | 1382 | printk("VPE loader: not a MIPS MT capable processor\n"); |
@@ -1383,6 +1389,14 @@ static int __init vpe_module_init(void) | |||
1383 | return major; | 1389 | return major; |
1384 | } | 1390 | } |
1385 | 1391 | ||
1392 | dev = device_create(mt_class, NULL, MKDEV(major, minor), | ||
1393 | "tc%d", minor); | ||
1394 | if (IS_ERR(dev)) { | ||
1395 | err = PTR_ERR(dev); | ||
1396 | goto out_chrdev; | ||
1397 | } | ||
1398 | vpe_dev = dev; | ||
1399 | |||
1386 | dmt(); | 1400 | dmt(); |
1387 | dvpe(); | 1401 | dvpe(); |
1388 | 1402 | ||
@@ -1478,6 +1492,11 @@ static int __init vpe_module_init(void) | |||
1478 | kspd_events.kspd_sp_exit = kspd_sp_exit; | 1492 | kspd_events.kspd_sp_exit = kspd_sp_exit; |
1479 | #endif | 1493 | #endif |
1480 | return 0; | 1494 | return 0; |
1495 | |||
1496 | out_chrdev: | ||
1497 | unregister_chrdev(major, module_name); | ||
1498 | |||
1499 | return err; | ||
1481 | } | 1500 | } |
1482 | 1501 | ||
1483 | static void __exit vpe_module_exit(void) | 1502 | static void __exit vpe_module_exit(void) |
@@ -1490,6 +1509,7 @@ static void __exit vpe_module_exit(void) | |||
1490 | } | 1509 | } |
1491 | } | 1510 | } |
1492 | 1511 | ||
1512 | device_destroy(mt_class, MKDEV(major, minor)); | ||
1493 | unregister_chrdev(major, module_name); | 1513 | unregister_chrdev(major, module_name); |
1494 | } | 1514 | } |
1495 | 1515 | ||