diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/arm/kernel |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/arm/kernel')
35 files changed, 12731 insertions, 0 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile new file mode 100644 index 000000000000..07a56ff61494 --- /dev/null +++ b/arch/arm/kernel/Makefile | |||
@@ -0,0 +1,38 @@ | |||
1 | # | ||
2 | # Makefile for the linux kernel. | ||
3 | # | ||
4 | |||
5 | AFLAGS_head.o := -DTEXTADDR=$(TEXTADDR) -DDATAADDR=$(DATAADDR) | ||
6 | |||
7 | # Object file lists. | ||
8 | |||
9 | obj-y := arch.o compat.o dma.o entry-armv.o entry-common.o irq.o \ | ||
10 | process.o ptrace.o semaphore.o setup.o signal.o sys_arm.o \ | ||
11 | time.o traps.o | ||
12 | |||
13 | obj-$(CONFIG_APM) += apm.o | ||
14 | obj-$(CONFIG_ARCH_ACORN) += ecard.o | ||
15 | obj-$(CONFIG_FOOTBRIDGE) += isa.o | ||
16 | obj-$(CONFIG_FIQ) += fiq.o | ||
17 | obj-$(CONFIG_MODULES) += armksyms.o module.o | ||
18 | obj-$(CONFIG_ARTHUR) += arthur.o | ||
19 | obj-$(CONFIG_ISA_DMA) += dma-isa.o | ||
20 | obj-$(CONFIG_PCI) += bios32.o | ||
21 | obj-$(CONFIG_SMP) += smp.o | ||
22 | |||
23 | obj-$(CONFIG_IWMMXT) += iwmmxt.o | ||
24 | AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt | ||
25 | |||
26 | ifneq ($(CONFIG_ARCH_EBSA110),y) | ||
27 | obj-y += io.o | ||
28 | endif | ||
29 | |||
30 | head-y := head.o | ||
31 | obj-$(CONFIG_DEBUG_LL) += debug.o | ||
32 | |||
33 | extra-y := $(head-y) init_task.o vmlinux.lds | ||
34 | |||
35 | # Spell out some dependencies that aren't automatically figured out | ||
36 | $(obj)/entry-armv.o: $(obj)/entry-header.S include/asm-arm/constants.h | ||
37 | $(obj)/entry-common.o: $(obj)/entry-header.S include/asm-arm/constants.h \ | ||
38 | $(obj)/calls.S | ||
diff --git a/arch/arm/kernel/apm.c b/arch/arm/kernel/apm.c new file mode 100644 index 000000000000..b0bbd1e62ebb --- /dev/null +++ b/arch/arm/kernel/apm.c | |||
@@ -0,0 +1,610 @@ | |||
1 | /* | ||
2 | * bios-less APM driver for ARM Linux | ||
3 | * Jamey Hicks <jamey@crl.dec.com> | ||
4 | * adapted from the APM BIOS driver for Linux by Stephen Rothwell (sfr@linuxcare.com) | ||
5 | * | ||
6 | * APM 1.2 Reference: | ||
7 | * Intel Corporation, Microsoft Corporation. Advanced Power Management | ||
8 | * (APM) BIOS Interface Specification, Revision 1.2, February 1996. | ||
9 | * | ||
10 | * [This document is available from Microsoft at: | ||
11 | * http://www.microsoft.com/hwdev/busbios/amp_12.htm] | ||
12 | */ | ||
13 | #include <linux/config.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/poll.h> | ||
16 | #include <linux/timer.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/proc_fs.h> | ||
19 | #include <linux/miscdevice.h> | ||
20 | #include <linux/apm_bios.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/pm.h> | ||
23 | #include <linux/device.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/list.h> | ||
26 | #include <linux/init.h> | ||
27 | #include <linux/completion.h> | ||
28 | |||
29 | #include <asm/apm.h> /* apm_power_info */ | ||
30 | #include <asm/system.h> | ||
31 | |||
32 | /* | ||
33 | * The apm_bios device is one of the misc char devices. | ||
34 | * This is its minor number. | ||
35 | */ | ||
36 | #define APM_MINOR_DEV 134 | ||
37 | |||
38 | /* | ||
39 | * See Documentation/Config.help for the configuration options. | ||
40 | * | ||
41 | * Various options can be changed at boot time as follows: | ||
42 | * (We allow underscores for compatibility with the modules code) | ||
43 | * apm=on/off enable/disable APM | ||
44 | */ | ||
45 | |||
46 | /* | ||
47 | * Maximum number of events stored | ||
48 | */ | ||
49 | #define APM_MAX_EVENTS 16 | ||
50 | |||
51 | struct apm_queue { | ||
52 | unsigned int event_head; | ||
53 | unsigned int event_tail; | ||
54 | apm_event_t events[APM_MAX_EVENTS]; | ||
55 | }; | ||
56 | |||
57 | /* | ||
58 | * The per-file APM data | ||
59 | */ | ||
60 | struct apm_user { | ||
61 | struct list_head list; | ||
62 | |||
63 | unsigned int suser: 1; | ||
64 | unsigned int writer: 1; | ||
65 | unsigned int reader: 1; | ||
66 | |||
67 | int suspend_result; | ||
68 | unsigned int suspend_state; | ||
69 | #define SUSPEND_NONE 0 /* no suspend pending */ | ||
70 | #define SUSPEND_PENDING 1 /* suspend pending read */ | ||
71 | #define SUSPEND_READ 2 /* suspend read, pending ack */ | ||
72 | #define SUSPEND_ACKED 3 /* suspend acked */ | ||
73 | #define SUSPEND_DONE 4 /* suspend completed */ | ||
74 | |||
75 | struct apm_queue queue; | ||
76 | }; | ||
77 | |||
78 | /* | ||
79 | * Local variables | ||
80 | */ | ||
81 | static int suspends_pending; | ||
82 | static int apm_disabled; | ||
83 | |||
84 | static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue); | ||
85 | static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue); | ||
86 | |||
87 | /* | ||
88 | * This is a list of everyone who has opened /dev/apm_bios | ||
89 | */ | ||
90 | static DECLARE_RWSEM(user_list_lock); | ||
91 | static LIST_HEAD(apm_user_list); | ||
92 | |||
93 | /* | ||
94 | * kapmd info. kapmd provides us a process context to handle | ||
95 | * "APM" events within - specifically necessary if we're going | ||
96 | * to be suspending the system. | ||
97 | */ | ||
98 | static DECLARE_WAIT_QUEUE_HEAD(kapmd_wait); | ||
99 | static DECLARE_COMPLETION(kapmd_exit); | ||
100 | static DEFINE_SPINLOCK(kapmd_queue_lock); | ||
101 | static struct apm_queue kapmd_queue; | ||
102 | |||
103 | |||
104 | static const char driver_version[] = "1.13"; /* no spaces */ | ||
105 | |||
106 | |||
107 | |||
108 | /* | ||
109 | * Compatibility cruft until the IPAQ people move over to the new | ||
110 | * interface. | ||
111 | */ | ||
112 | static void __apm_get_power_status(struct apm_power_info *info) | ||
113 | { | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * This allows machines to provide their own "apm get power status" function. | ||
118 | */ | ||
119 | void (*apm_get_power_status)(struct apm_power_info *) = __apm_get_power_status; | ||
120 | EXPORT_SYMBOL(apm_get_power_status); | ||
121 | |||
122 | |||
123 | /* | ||
124 | * APM event queue management. | ||
125 | */ | ||
126 | static inline int queue_empty(struct apm_queue *q) | ||
127 | { | ||
128 | return q->event_head == q->event_tail; | ||
129 | } | ||
130 | |||
131 | static inline apm_event_t queue_get_event(struct apm_queue *q) | ||
132 | { | ||
133 | q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS; | ||
134 | return q->events[q->event_tail]; | ||
135 | } | ||
136 | |||
137 | static void queue_add_event(struct apm_queue *q, apm_event_t event) | ||
138 | { | ||
139 | q->event_head = (q->event_head + 1) % APM_MAX_EVENTS; | ||
140 | if (q->event_head == q->event_tail) { | ||
141 | static int notified; | ||
142 | |||
143 | if (notified++ == 0) | ||
144 | printk(KERN_ERR "apm: an event queue overflowed\n"); | ||
145 | q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS; | ||
146 | } | ||
147 | q->events[q->event_head] = event; | ||
148 | } | ||
149 | |||
150 | static void queue_event_one_user(struct apm_user *as, apm_event_t event) | ||
151 | { | ||
152 | if (as->suser && as->writer) { | ||
153 | switch (event) { | ||
154 | case APM_SYS_SUSPEND: | ||
155 | case APM_USER_SUSPEND: | ||
156 | /* | ||
157 | * If this user already has a suspend pending, | ||
158 | * don't queue another one. | ||
159 | */ | ||
160 | if (as->suspend_state != SUSPEND_NONE) | ||
161 | return; | ||
162 | |||
163 | as->suspend_state = SUSPEND_PENDING; | ||
164 | suspends_pending++; | ||
165 | break; | ||
166 | } | ||
167 | } | ||
168 | queue_add_event(&as->queue, event); | ||
169 | } | ||
170 | |||
171 | static void queue_event(apm_event_t event, struct apm_user *sender) | ||
172 | { | ||
173 | struct apm_user *as; | ||
174 | |||
175 | down_read(&user_list_lock); | ||
176 | list_for_each_entry(as, &apm_user_list, list) { | ||
177 | if (as != sender && as->reader) | ||
178 | queue_event_one_user(as, event); | ||
179 | } | ||
180 | up_read(&user_list_lock); | ||
181 | wake_up_interruptible(&apm_waitqueue); | ||
182 | } | ||
183 | |||
184 | static void apm_suspend(void) | ||
185 | { | ||
186 | struct apm_user *as; | ||
187 | int err = pm_suspend(PM_SUSPEND_MEM); | ||
188 | |||
189 | /* | ||
190 | * Anyone on the APM queues will think we're still suspended. | ||
191 | * Send a message so everyone knows we're now awake again. | ||
192 | */ | ||
193 | queue_event(APM_NORMAL_RESUME, NULL); | ||
194 | |||
195 | /* | ||
196 | * Finally, wake up anyone who is sleeping on the suspend. | ||
197 | */ | ||
198 | down_read(&user_list_lock); | ||
199 | list_for_each_entry(as, &apm_user_list, list) { | ||
200 | as->suspend_result = err; | ||
201 | as->suspend_state = SUSPEND_DONE; | ||
202 | } | ||
203 | up_read(&user_list_lock); | ||
204 | |||
205 | wake_up(&apm_suspend_waitqueue); | ||
206 | } | ||
207 | |||
208 | static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos) | ||
209 | { | ||
210 | struct apm_user *as = fp->private_data; | ||
211 | apm_event_t event; | ||
212 | int i = count, ret = 0; | ||
213 | |||
214 | if (count < sizeof(apm_event_t)) | ||
215 | return -EINVAL; | ||
216 | |||
217 | if (queue_empty(&as->queue) && fp->f_flags & O_NONBLOCK) | ||
218 | return -EAGAIN; | ||
219 | |||
220 | wait_event_interruptible(apm_waitqueue, !queue_empty(&as->queue)); | ||
221 | |||
222 | while ((i >= sizeof(event)) && !queue_empty(&as->queue)) { | ||
223 | event = queue_get_event(&as->queue); | ||
224 | |||
225 | ret = -EFAULT; | ||
226 | if (copy_to_user(buf, &event, sizeof(event))) | ||
227 | break; | ||
228 | |||
229 | if (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND) | ||
230 | as->suspend_state = SUSPEND_READ; | ||
231 | |||
232 | buf += sizeof(event); | ||
233 | i -= sizeof(event); | ||
234 | } | ||
235 | |||
236 | if (i < count) | ||
237 | ret = count - i; | ||
238 | |||
239 | return ret; | ||
240 | } | ||
241 | |||
242 | static unsigned int apm_poll(struct file *fp, poll_table * wait) | ||
243 | { | ||
244 | struct apm_user *as = fp->private_data; | ||
245 | |||
246 | poll_wait(fp, &apm_waitqueue, wait); | ||
247 | return queue_empty(&as->queue) ? 0 : POLLIN | POLLRDNORM; | ||
248 | } | ||
249 | |||
250 | /* | ||
251 | * apm_ioctl - handle APM ioctl | ||
252 | * | ||
253 | * APM_IOC_SUSPEND | ||
254 | * This IOCTL is overloaded, and performs two functions. It is used to: | ||
255 | * - initiate a suspend | ||
256 | * - acknowledge a suspend read from /dev/apm_bios. | ||
257 | * Only when everyone who has opened /dev/apm_bios with write permission | ||
258 | * has acknowledge does the actual suspend happen. | ||
259 | */ | ||
260 | static int | ||
261 | apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg) | ||
262 | { | ||
263 | struct apm_user *as = filp->private_data; | ||
264 | unsigned long flags; | ||
265 | int err = -EINVAL; | ||
266 | |||
267 | if (!as->suser || !as->writer) | ||
268 | return -EPERM; | ||
269 | |||
270 | switch (cmd) { | ||
271 | case APM_IOC_SUSPEND: | ||
272 | as->suspend_result = -EINTR; | ||
273 | |||
274 | if (as->suspend_state == SUSPEND_READ) { | ||
275 | /* | ||
276 | * If we read a suspend command from /dev/apm_bios, | ||
277 | * then the corresponding APM_IOC_SUSPEND ioctl is | ||
278 | * interpreted as an acknowledge. | ||
279 | */ | ||
280 | as->suspend_state = SUSPEND_ACKED; | ||
281 | suspends_pending--; | ||
282 | } else { | ||
283 | /* | ||
284 | * Otherwise it is a request to suspend the system. | ||
285 | * Queue an event for all readers, and expect an | ||
286 | * acknowledge from all writers who haven't already | ||
287 | * acknowledged. | ||
288 | */ | ||
289 | queue_event(APM_USER_SUSPEND, as); | ||
290 | } | ||
291 | |||
292 | /* | ||
293 | * If there are no further acknowledges required, suspend | ||
294 | * the system. | ||
295 | */ | ||
296 | if (suspends_pending == 0) | ||
297 | apm_suspend(); | ||
298 | |||
299 | /* | ||
300 | * Wait for the suspend/resume to complete. If there are | ||
301 | * pending acknowledges, we wait here for them. | ||
302 | * | ||
303 | * Note that we need to ensure that the PM subsystem does | ||
304 | * not kick us out of the wait when it suspends the threads. | ||
305 | */ | ||
306 | flags = current->flags; | ||
307 | current->flags |= PF_NOFREEZE; | ||
308 | |||
309 | /* | ||
310 | * Note: do not allow a thread which is acking the suspend | ||
311 | * to escape until the resume is complete. | ||
312 | */ | ||
313 | if (as->suspend_state == SUSPEND_ACKED) | ||
314 | wait_event(apm_suspend_waitqueue, | ||
315 | as->suspend_state == SUSPEND_DONE); | ||
316 | else | ||
317 | wait_event_interruptible(apm_suspend_waitqueue, | ||
318 | as->suspend_state == SUSPEND_DONE); | ||
319 | |||
320 | current->flags = flags; | ||
321 | err = as->suspend_result; | ||
322 | as->suspend_state = SUSPEND_NONE; | ||
323 | break; | ||
324 | } | ||
325 | |||
326 | return err; | ||
327 | } | ||
328 | |||
329 | static int apm_release(struct inode * inode, struct file * filp) | ||
330 | { | ||
331 | struct apm_user *as = filp->private_data; | ||
332 | filp->private_data = NULL; | ||
333 | |||
334 | down_write(&user_list_lock); | ||
335 | list_del(&as->list); | ||
336 | up_write(&user_list_lock); | ||
337 | |||
338 | /* | ||
339 | * We are now unhooked from the chain. As far as new | ||
340 | * events are concerned, we no longer exist. However, we | ||
341 | * need to balance suspends_pending, which means the | ||
342 | * possibility of sleeping. | ||
343 | */ | ||
344 | if (as->suspend_state != SUSPEND_NONE) { | ||
345 | suspends_pending -= 1; | ||
346 | if (suspends_pending == 0) | ||
347 | apm_suspend(); | ||
348 | } | ||
349 | |||
350 | kfree(as); | ||
351 | return 0; | ||
352 | } | ||
353 | |||
354 | static int apm_open(struct inode * inode, struct file * filp) | ||
355 | { | ||
356 | struct apm_user *as; | ||
357 | |||
358 | as = (struct apm_user *)kmalloc(sizeof(*as), GFP_KERNEL); | ||
359 | if (as) { | ||
360 | memset(as, 0, sizeof(*as)); | ||
361 | |||
362 | /* | ||
363 | * XXX - this is a tiny bit broken, when we consider BSD | ||
364 | * process accounting. If the device is opened by root, we | ||
365 | * instantly flag that we used superuser privs. Who knows, | ||
366 | * we might close the device immediately without doing a | ||
367 | * privileged operation -- cevans | ||
368 | */ | ||
369 | as->suser = capable(CAP_SYS_ADMIN); | ||
370 | as->writer = (filp->f_mode & FMODE_WRITE) == FMODE_WRITE; | ||
371 | as->reader = (filp->f_mode & FMODE_READ) == FMODE_READ; | ||
372 | |||
373 | down_write(&user_list_lock); | ||
374 | list_add(&as->list, &apm_user_list); | ||
375 | up_write(&user_list_lock); | ||
376 | |||
377 | filp->private_data = as; | ||
378 | } | ||
379 | |||
380 | return as ? 0 : -ENOMEM; | ||
381 | } | ||
382 | |||
383 | static struct file_operations apm_bios_fops = { | ||
384 | .owner = THIS_MODULE, | ||
385 | .read = apm_read, | ||
386 | .poll = apm_poll, | ||
387 | .ioctl = apm_ioctl, | ||
388 | .open = apm_open, | ||
389 | .release = apm_release, | ||
390 | }; | ||
391 | |||
392 | static struct miscdevice apm_device = { | ||
393 | .minor = APM_MINOR_DEV, | ||
394 | .name = "apm_bios", | ||
395 | .fops = &apm_bios_fops | ||
396 | }; | ||
397 | |||
398 | |||
399 | #ifdef CONFIG_PROC_FS | ||
400 | /* | ||
401 | * Arguments, with symbols from linux/apm_bios.h. | ||
402 | * | ||
403 | * 0) Linux driver version (this will change if format changes) | ||
404 | * 1) APM BIOS Version. Usually 1.0, 1.1 or 1.2. | ||
405 | * 2) APM flags from APM Installation Check (0x00): | ||
406 | * bit 0: APM_16_BIT_SUPPORT | ||
407 | * bit 1: APM_32_BIT_SUPPORT | ||
408 | * bit 2: APM_IDLE_SLOWS_CLOCK | ||
409 | * bit 3: APM_BIOS_DISABLED | ||
410 | * bit 4: APM_BIOS_DISENGAGED | ||
411 | * 3) AC line status | ||
412 | * 0x00: Off-line | ||
413 | * 0x01: On-line | ||
414 | * 0x02: On backup power (BIOS >= 1.1 only) | ||
415 | * 0xff: Unknown | ||
416 | * 4) Battery status | ||
417 | * 0x00: High | ||
418 | * 0x01: Low | ||
419 | * 0x02: Critical | ||
420 | * 0x03: Charging | ||
421 | * 0x04: Selected battery not present (BIOS >= 1.2 only) | ||
422 | * 0xff: Unknown | ||
423 | * 5) Battery flag | ||
424 | * bit 0: High | ||
425 | * bit 1: Low | ||
426 | * bit 2: Critical | ||
427 | * bit 3: Charging | ||
428 | * bit 7: No system battery | ||
429 | * 0xff: Unknown | ||
430 | * 6) Remaining battery life (percentage of charge): | ||
431 | * 0-100: valid | ||
432 | * -1: Unknown | ||
433 | * 7) Remaining battery life (time units): | ||
434 | * Number of remaining minutes or seconds | ||
435 | * -1: Unknown | ||
436 | * 8) min = minutes; sec = seconds | ||
437 | */ | ||
438 | static int apm_get_info(char *buf, char **start, off_t fpos, int length) | ||
439 | { | ||
440 | struct apm_power_info info; | ||
441 | char *units; | ||
442 | int ret; | ||
443 | |||
444 | info.ac_line_status = 0xff; | ||
445 | info.battery_status = 0xff; | ||
446 | info.battery_flag = 0xff; | ||
447 | info.battery_life = -1; | ||
448 | info.time = -1; | ||
449 | info.units = -1; | ||
450 | |||
451 | if (apm_get_power_status) | ||
452 | apm_get_power_status(&info); | ||
453 | |||
454 | switch (info.units) { | ||
455 | default: units = "?"; break; | ||
456 | case 0: units = "min"; break; | ||
457 | case 1: units = "sec"; break; | ||
458 | } | ||
459 | |||
460 | ret = sprintf(buf, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n", | ||
461 | driver_version, APM_32_BIT_SUPPORT, | ||
462 | info.ac_line_status, info.battery_status, | ||
463 | info.battery_flag, info.battery_life, | ||
464 | info.time, units); | ||
465 | |||
466 | return ret; | ||
467 | } | ||
468 | #endif | ||
469 | |||
470 | static int kapmd(void *arg) | ||
471 | { | ||
472 | daemonize("kapmd"); | ||
473 | current->flags |= PF_NOFREEZE; | ||
474 | |||
475 | do { | ||
476 | apm_event_t event; | ||
477 | |||
478 | wait_event_interruptible(kapmd_wait, | ||
479 | !queue_empty(&kapmd_queue) || !pm_active); | ||
480 | |||
481 | if (!pm_active) | ||
482 | break; | ||
483 | |||
484 | spin_lock_irq(&kapmd_queue_lock); | ||
485 | event = 0; | ||
486 | if (!queue_empty(&kapmd_queue)) | ||
487 | event = queue_get_event(&kapmd_queue); | ||
488 | spin_unlock_irq(&kapmd_queue_lock); | ||
489 | |||
490 | switch (event) { | ||
491 | case 0: | ||
492 | break; | ||
493 | |||
494 | case APM_LOW_BATTERY: | ||
495 | case APM_POWER_STATUS_CHANGE: | ||
496 | queue_event(event, NULL); | ||
497 | break; | ||
498 | |||
499 | case APM_USER_SUSPEND: | ||
500 | case APM_SYS_SUSPEND: | ||
501 | queue_event(event, NULL); | ||
502 | if (suspends_pending == 0) | ||
503 | apm_suspend(); | ||
504 | break; | ||
505 | |||
506 | case APM_CRITICAL_SUSPEND: | ||
507 | apm_suspend(); | ||
508 | break; | ||
509 | } | ||
510 | } while (1); | ||
511 | |||
512 | complete_and_exit(&kapmd_exit, 0); | ||
513 | } | ||
514 | |||
515 | static int __init apm_init(void) | ||
516 | { | ||
517 | int ret; | ||
518 | |||
519 | if (apm_disabled) { | ||
520 | printk(KERN_NOTICE "apm: disabled on user request.\n"); | ||
521 | return -ENODEV; | ||
522 | } | ||
523 | |||
524 | if (PM_IS_ACTIVE()) { | ||
525 | printk(KERN_NOTICE "apm: overridden by ACPI.\n"); | ||
526 | return -EINVAL; | ||
527 | } | ||
528 | |||
529 | pm_active = 1; | ||
530 | |||
531 | ret = kernel_thread(kapmd, NULL, CLONE_KERNEL); | ||
532 | if (ret < 0) { | ||
533 | pm_active = 0; | ||
534 | return ret; | ||
535 | } | ||
536 | |||
537 | #ifdef CONFIG_PROC_FS | ||
538 | create_proc_info_entry("apm", 0, NULL, apm_get_info); | ||
539 | #endif | ||
540 | |||
541 | ret = misc_register(&apm_device); | ||
542 | if (ret != 0) { | ||
543 | remove_proc_entry("apm", NULL); | ||
544 | |||
545 | pm_active = 0; | ||
546 | wake_up(&kapmd_wait); | ||
547 | wait_for_completion(&kapmd_exit); | ||
548 | } | ||
549 | |||
550 | return ret; | ||
551 | } | ||
552 | |||
553 | static void __exit apm_exit(void) | ||
554 | { | ||
555 | misc_deregister(&apm_device); | ||
556 | remove_proc_entry("apm", NULL); | ||
557 | |||
558 | pm_active = 0; | ||
559 | wake_up(&kapmd_wait); | ||
560 | wait_for_completion(&kapmd_exit); | ||
561 | } | ||
562 | |||
563 | module_init(apm_init); | ||
564 | module_exit(apm_exit); | ||
565 | |||
566 | MODULE_AUTHOR("Stephen Rothwell"); | ||
567 | MODULE_DESCRIPTION("Advanced Power Management"); | ||
568 | MODULE_LICENSE("GPL"); | ||
569 | |||
570 | #ifndef MODULE | ||
571 | static int __init apm_setup(char *str) | ||
572 | { | ||
573 | while ((str != NULL) && (*str != '\0')) { | ||
574 | if (strncmp(str, "off", 3) == 0) | ||
575 | apm_disabled = 1; | ||
576 | if (strncmp(str, "on", 2) == 0) | ||
577 | apm_disabled = 0; | ||
578 | str = strchr(str, ','); | ||
579 | if (str != NULL) | ||
580 | str += strspn(str, ", \t"); | ||
581 | } | ||
582 | return 1; | ||
583 | } | ||
584 | |||
585 | __setup("apm=", apm_setup); | ||
586 | #endif | ||
587 | |||
588 | /** | ||
589 | * apm_queue_event - queue an APM event for kapmd | ||
590 | * @event: APM event | ||
591 | * | ||
592 | * Queue an APM event for kapmd to process and ultimately take the | ||
593 | * appropriate action. Only a subset of events are handled: | ||
594 | * %APM_LOW_BATTERY | ||
595 | * %APM_POWER_STATUS_CHANGE | ||
596 | * %APM_USER_SUSPEND | ||
597 | * %APM_SYS_SUSPEND | ||
598 | * %APM_CRITICAL_SUSPEND | ||
599 | */ | ||
600 | void apm_queue_event(apm_event_t event) | ||
601 | { | ||
602 | unsigned long flags; | ||
603 | |||
604 | spin_lock_irqsave(&kapmd_queue_lock, flags); | ||
605 | queue_add_event(&kapmd_queue, event); | ||
606 | spin_unlock_irqrestore(&kapmd_queue_lock, flags); | ||
607 | |||
608 | wake_up_interruptible(&kapmd_wait); | ||
609 | } | ||
610 | EXPORT_SYMBOL(apm_queue_event); | ||
diff --git a/arch/arm/kernel/arch.c b/arch/arm/kernel/arch.c new file mode 100644 index 000000000000..4e02fbeb10a6 --- /dev/null +++ b/arch/arm/kernel/arch.c | |||
@@ -0,0 +1,46 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/arch.c | ||
3 | * | ||
4 | * Architecture specific fixups. | ||
5 | */ | ||
6 | #include <linux/config.h> | ||
7 | #include <linux/init.h> | ||
8 | #include <linux/types.h> | ||
9 | |||
10 | #include <asm/elf.h> | ||
11 | #include <asm/page.h> | ||
12 | #include <asm/setup.h> | ||
13 | #include <asm/mach/arch.h> | ||
14 | |||
15 | unsigned int vram_size; | ||
16 | |||
17 | #ifdef CONFIG_ARCH_ACORN | ||
18 | |||
19 | unsigned int memc_ctrl_reg; | ||
20 | unsigned int number_mfm_drives; | ||
21 | |||
22 | static int __init parse_tag_acorn(const struct tag *tag) | ||
23 | { | ||
24 | memc_ctrl_reg = tag->u.acorn.memc_control_reg; | ||
25 | number_mfm_drives = tag->u.acorn.adfsdrives; | ||
26 | |||
27 | switch (tag->u.acorn.vram_pages) { | ||
28 | case 512: | ||
29 | vram_size += PAGE_SIZE * 256; | ||
30 | case 256: | ||
31 | vram_size += PAGE_SIZE * 256; | ||
32 | default: | ||
33 | break; | ||
34 | } | ||
35 | #if 0 | ||
36 | if (vram_size) { | ||
37 | desc->video_start = 0x02000000; | ||
38 | desc->video_end = 0x02000000 + vram_size; | ||
39 | } | ||
40 | #endif | ||
41 | return 0; | ||
42 | } | ||
43 | |||
44 | __tagtable(ATAG_ACORN, parse_tag_acorn); | ||
45 | |||
46 | #endif | ||
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c new file mode 100644 index 000000000000..4c38bd8bc298 --- /dev/null +++ b/arch/arm/kernel/armksyms.c | |||
@@ -0,0 +1,175 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/armksyms.c | ||
3 | * | ||
4 | * Copyright (C) 2000 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/string.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <linux/in6.h> | ||
14 | #include <linux/syscalls.h> | ||
15 | |||
16 | #include <asm/checksum.h> | ||
17 | #include <asm/io.h> | ||
18 | #include <asm/system.h> | ||
19 | #include <asm/uaccess.h> | ||
20 | |||
21 | /* | ||
22 | * libgcc functions - functions that are used internally by the | ||
23 | * compiler... (prototypes are not correct though, but that | ||
24 | * doesn't really matter since they're not versioned). | ||
25 | */ | ||
26 | extern void __ashldi3(void); | ||
27 | extern void __ashrdi3(void); | ||
28 | extern void __divsi3(void); | ||
29 | extern void __lshrdi3(void); | ||
30 | extern void __modsi3(void); | ||
31 | extern void __muldi3(void); | ||
32 | extern void __ucmpdi2(void); | ||
33 | extern void __udivdi3(void); | ||
34 | extern void __umoddi3(void); | ||
35 | extern void __udivmoddi4(void); | ||
36 | extern void __udivsi3(void); | ||
37 | extern void __umodsi3(void); | ||
38 | extern void __do_div64(void); | ||
39 | |||
40 | extern void fpundefinstr(void); | ||
41 | extern void fp_enter(void); | ||
42 | |||
43 | /* | ||
44 | * This has a special calling convention; it doesn't | ||
45 | * modify any of the usual registers, except for LR. | ||
46 | */ | ||
47 | #define EXPORT_SYMBOL_ALIAS(sym,orig) \ | ||
48 | const struct kernel_symbol __ksymtab_##sym \ | ||
49 | __attribute__((section("__ksymtab"))) = \ | ||
50 | { (unsigned long)&orig, #sym }; | ||
51 | |||
52 | /* | ||
53 | * floating point math emulator support. | ||
54 | * These symbols will never change their calling convention... | ||
55 | */ | ||
56 | EXPORT_SYMBOL_ALIAS(kern_fp_enter,fp_enter); | ||
57 | EXPORT_SYMBOL_ALIAS(fp_printk,printk); | ||
58 | EXPORT_SYMBOL_ALIAS(fp_send_sig,send_sig); | ||
59 | |||
60 | EXPORT_SYMBOL(__backtrace); | ||
61 | |||
62 | /* platform dependent support */ | ||
63 | EXPORT_SYMBOL(__udelay); | ||
64 | EXPORT_SYMBOL(__const_udelay); | ||
65 | |||
66 | /* networking */ | ||
67 | EXPORT_SYMBOL(csum_partial); | ||
68 | EXPORT_SYMBOL(csum_partial_copy_nocheck); | ||
69 | EXPORT_SYMBOL(__csum_ipv6_magic); | ||
70 | |||
71 | /* io */ | ||
72 | #ifndef __raw_readsb | ||
73 | EXPORT_SYMBOL(__raw_readsb); | ||
74 | #endif | ||
75 | #ifndef __raw_readsw | ||
76 | EXPORT_SYMBOL(__raw_readsw); | ||
77 | #endif | ||
78 | #ifndef __raw_readsl | ||
79 | EXPORT_SYMBOL(__raw_readsl); | ||
80 | #endif | ||
81 | #ifndef __raw_writesb | ||
82 | EXPORT_SYMBOL(__raw_writesb); | ||
83 | #endif | ||
84 | #ifndef __raw_writesw | ||
85 | EXPORT_SYMBOL(__raw_writesw); | ||
86 | #endif | ||
87 | #ifndef __raw_writesl | ||
88 | EXPORT_SYMBOL(__raw_writesl); | ||
89 | #endif | ||
90 | |||
91 | /* string / mem functions */ | ||
92 | EXPORT_SYMBOL(strcpy); | ||
93 | EXPORT_SYMBOL(strncpy); | ||
94 | EXPORT_SYMBOL(strcat); | ||
95 | EXPORT_SYMBOL(strncat); | ||
96 | EXPORT_SYMBOL(strcmp); | ||
97 | EXPORT_SYMBOL(strncmp); | ||
98 | EXPORT_SYMBOL(strchr); | ||
99 | EXPORT_SYMBOL(strlen); | ||
100 | EXPORT_SYMBOL(strnlen); | ||
101 | EXPORT_SYMBOL(strpbrk); | ||
102 | EXPORT_SYMBOL(strrchr); | ||
103 | EXPORT_SYMBOL(strstr); | ||
104 | EXPORT_SYMBOL(memset); | ||
105 | EXPORT_SYMBOL(memcpy); | ||
106 | EXPORT_SYMBOL(memmove); | ||
107 | EXPORT_SYMBOL(memcmp); | ||
108 | EXPORT_SYMBOL(memscan); | ||
109 | EXPORT_SYMBOL(memchr); | ||
110 | EXPORT_SYMBOL(__memzero); | ||
111 | |||
112 | /* user mem (segment) */ | ||
113 | EXPORT_SYMBOL(__arch_copy_from_user); | ||
114 | EXPORT_SYMBOL(__arch_copy_to_user); | ||
115 | EXPORT_SYMBOL(__arch_clear_user); | ||
116 | EXPORT_SYMBOL(__arch_strnlen_user); | ||
117 | EXPORT_SYMBOL(__arch_strncpy_from_user); | ||
118 | |||
119 | EXPORT_SYMBOL(__get_user_1); | ||
120 | EXPORT_SYMBOL(__get_user_2); | ||
121 | EXPORT_SYMBOL(__get_user_4); | ||
122 | EXPORT_SYMBOL(__get_user_8); | ||
123 | |||
124 | EXPORT_SYMBOL(__put_user_1); | ||
125 | EXPORT_SYMBOL(__put_user_2); | ||
126 | EXPORT_SYMBOL(__put_user_4); | ||
127 | EXPORT_SYMBOL(__put_user_8); | ||
128 | |||
129 | /* gcc lib functions */ | ||
130 | EXPORT_SYMBOL(__ashldi3); | ||
131 | EXPORT_SYMBOL(__ashrdi3); | ||
132 | EXPORT_SYMBOL(__divsi3); | ||
133 | EXPORT_SYMBOL(__lshrdi3); | ||
134 | EXPORT_SYMBOL(__modsi3); | ||
135 | EXPORT_SYMBOL(__muldi3); | ||
136 | EXPORT_SYMBOL(__ucmpdi2); | ||
137 | EXPORT_SYMBOL(__udivdi3); | ||
138 | EXPORT_SYMBOL(__umoddi3); | ||
139 | EXPORT_SYMBOL(__udivmoddi4); | ||
140 | EXPORT_SYMBOL(__udivsi3); | ||
141 | EXPORT_SYMBOL(__umodsi3); | ||
142 | EXPORT_SYMBOL(__do_div64); | ||
143 | |||
144 | /* bitops */ | ||
145 | EXPORT_SYMBOL(_set_bit_le); | ||
146 | EXPORT_SYMBOL(_test_and_set_bit_le); | ||
147 | EXPORT_SYMBOL(_clear_bit_le); | ||
148 | EXPORT_SYMBOL(_test_and_clear_bit_le); | ||
149 | EXPORT_SYMBOL(_change_bit_le); | ||
150 | EXPORT_SYMBOL(_test_and_change_bit_le); | ||
151 | EXPORT_SYMBOL(_find_first_zero_bit_le); | ||
152 | EXPORT_SYMBOL(_find_next_zero_bit_le); | ||
153 | EXPORT_SYMBOL(_find_first_bit_le); | ||
154 | EXPORT_SYMBOL(_find_next_bit_le); | ||
155 | |||
156 | #ifdef __ARMEB__ | ||
157 | EXPORT_SYMBOL(_set_bit_be); | ||
158 | EXPORT_SYMBOL(_test_and_set_bit_be); | ||
159 | EXPORT_SYMBOL(_clear_bit_be); | ||
160 | EXPORT_SYMBOL(_test_and_clear_bit_be); | ||
161 | EXPORT_SYMBOL(_change_bit_be); | ||
162 | EXPORT_SYMBOL(_test_and_change_bit_be); | ||
163 | EXPORT_SYMBOL(_find_first_zero_bit_be); | ||
164 | EXPORT_SYMBOL(_find_next_zero_bit_be); | ||
165 | EXPORT_SYMBOL(_find_first_bit_be); | ||
166 | EXPORT_SYMBOL(_find_next_bit_be); | ||
167 | #endif | ||
168 | |||
169 | /* syscalls */ | ||
170 | EXPORT_SYMBOL(sys_write); | ||
171 | EXPORT_SYMBOL(sys_read); | ||
172 | EXPORT_SYMBOL(sys_lseek); | ||
173 | EXPORT_SYMBOL(sys_open); | ||
174 | EXPORT_SYMBOL(sys_exit); | ||
175 | EXPORT_SYMBOL(sys_wait4); | ||
diff --git a/arch/arm/kernel/arthur.c b/arch/arm/kernel/arthur.c new file mode 100644 index 000000000000..a418dad6692c --- /dev/null +++ b/arch/arm/kernel/arthur.c | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/arthur.c | ||
3 | * | ||
4 | * Copyright (C) 1998, 1999, 2000, 2001 Philip Blundell | ||
5 | * | ||
6 | * Arthur personality | ||
7 | */ | ||
8 | |||
9 | /* | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * as published by the Free Software Foundation; either version | ||
13 | * 2 of the License, or (at your option) any later version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/personality.h> | ||
18 | #include <linux/stddef.h> | ||
19 | #include <linux/signal.h> | ||
20 | #include <linux/init.h> | ||
21 | |||
22 | #include <asm/ptrace.h> | ||
23 | |||
24 | /* Arthur doesn't have many signals, and a lot of those that it does | ||
25 | have don't map easily to any Linux equivalent. Never mind. */ | ||
26 | |||
27 | #define ARTHUR_SIGABRT 1 | ||
28 | #define ARTHUR_SIGFPE 2 | ||
29 | #define ARTHUR_SIGILL 3 | ||
30 | #define ARTHUR_SIGINT 4 | ||
31 | #define ARTHUR_SIGSEGV 5 | ||
32 | #define ARTHUR_SIGTERM 6 | ||
33 | #define ARTHUR_SIGSTAK 7 | ||
34 | #define ARTHUR_SIGUSR1 8 | ||
35 | #define ARTHUR_SIGUSR2 9 | ||
36 | #define ARTHUR_SIGOSERROR 10 | ||
37 | |||
38 | static unsigned long arthur_to_linux_signals[32] = { | ||
39 | 0, 1, 2, 3, 4, 5, 6, 7, | ||
40 | 8, 9, 10, 11, 12, 13, 14, 15, | ||
41 | 16, 17, 18, 19, 20, 21, 22, 23, | ||
42 | 24, 25, 26, 27, 28, 29, 30, 31 | ||
43 | }; | ||
44 | |||
45 | static unsigned long linux_to_arthur_signals[32] = { | ||
46 | 0, -1, ARTHUR_SIGINT, -1, | ||
47 | ARTHUR_SIGILL, 5, ARTHUR_SIGABRT, 7, | ||
48 | ARTHUR_SIGFPE, 9, ARTHUR_SIGUSR1, ARTHUR_SIGSEGV, | ||
49 | ARTHUR_SIGUSR2, 13, 14, ARTHUR_SIGTERM, | ||
50 | 16, 17, 18, 19, | ||
51 | 20, 21, 22, 23, | ||
52 | 24, 25, 26, 27, | ||
53 | 28, 29, 30, 31 | ||
54 | }; | ||
55 | |||
56 | static void arthur_lcall7(int nr, struct pt_regs *regs) | ||
57 | { | ||
58 | struct siginfo info; | ||
59 | info.si_signo = SIGSWI; | ||
60 | info.si_errno = nr; | ||
61 | /* Bounce it to the emulator */ | ||
62 | send_sig_info(SIGSWI, &info, current); | ||
63 | } | ||
64 | |||
65 | static struct exec_domain arthur_exec_domain = { | ||
66 | .name = "Arthur", | ||
67 | .handler = arthur_lcall7, | ||
68 | .pers_low = PER_RISCOS, | ||
69 | .pers_high = PER_RISCOS, | ||
70 | .signal_map = arthur_to_linux_signals, | ||
71 | .signal_invmap = linux_to_arthur_signals, | ||
72 | .module = THIS_MODULE, | ||
73 | }; | ||
74 | |||
75 | /* | ||
76 | * We could do with some locking to stop Arthur being removed while | ||
77 | * processes are using it. | ||
78 | */ | ||
79 | |||
80 | static int __init arthur_init(void) | ||
81 | { | ||
82 | return register_exec_domain(&arthur_exec_domain); | ||
83 | } | ||
84 | |||
85 | static void __exit arthur_exit(void) | ||
86 | { | ||
87 | unregister_exec_domain(&arthur_exec_domain); | ||
88 | } | ||
89 | |||
90 | module_init(arthur_init); | ||
91 | module_exit(arthur_exit); | ||
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c new file mode 100644 index 000000000000..99d43259ff89 --- /dev/null +++ b/arch/arm/kernel/asm-offsets.c | |||
@@ -0,0 +1,83 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1995-2003 Russell King | ||
3 | * 2001-2002 Keith Owens | ||
4 | * | ||
5 | * Generate definitions needed by assembly language modules. | ||
6 | * This code generates raw asm output which is post-processed to extract | ||
7 | * and format the required data. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <asm/mach/arch.h> | ||
16 | #include <asm/thread_info.h> | ||
17 | #include <asm/memory.h> | ||
18 | |||
19 | /* | ||
20 | * Make sure that the compiler and target are compatible. | ||
21 | */ | ||
22 | #if defined(__APCS_26__) | ||
23 | #error Sorry, your compiler targets APCS-26 but this kernel requires APCS-32 | ||
24 | #endif | ||
25 | /* | ||
26 | * GCC 2.95.1, 2.95.2: ignores register clobber list in asm(). | ||
27 | * GCC 3.0, 3.1: general bad code generation. | ||
28 | * GCC 3.2.0: incorrect function argument offset calculation. | ||
29 | * GCC 3.2.x: miscompiles NEW_AUX_ENT in fs/binfmt_elf.c | ||
30 | * (http://gcc.gnu.org/PR8896) and incorrect structure | ||
31 | * initialisation in fs/jffs2/erase.c | ||
32 | */ | ||
33 | #if __GNUC__ < 2 || \ | ||
34 | (__GNUC__ == 2 && __GNUC_MINOR__ < 95) || \ | ||
35 | (__GNUC__ == 2 && __GNUC_MINOR__ == 95 && __GNUC_PATCHLEVEL__ != 0 && \ | ||
36 | __GNUC_PATCHLEVEL__ < 3) || \ | ||
37 | (__GNUC__ == 3 && __GNUC_MINOR__ < 3) | ||
38 | #error Your compiler is too buggy; it is known to miscompile kernels. | ||
39 | #error Known good compilers: 2.95.3, 2.95.4, 2.96, 3.3 | ||
40 | #endif | ||
41 | |||
42 | /* Use marker if you need to separate the values later */ | ||
43 | |||
44 | #define DEFINE(sym, val) \ | ||
45 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
46 | |||
47 | #define BLANK() asm volatile("\n->" : : ) | ||
48 | |||
49 | int main(void) | ||
50 | { | ||
51 | DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); | ||
52 | BLANK(); | ||
53 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); | ||
54 | DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); | ||
55 | DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); | ||
56 | DEFINE(TI_TASK, offsetof(struct thread_info, task)); | ||
57 | DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); | ||
58 | DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); | ||
59 | DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain)); | ||
60 | DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context)); | ||
61 | DEFINE(TI_USED_CP, offsetof(struct thread_info, used_cp)); | ||
62 | DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value)); | ||
63 | DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate)); | ||
64 | DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate)); | ||
65 | DEFINE(TI_IWMMXT_STATE, (offsetof(struct thread_info, fpstate)+4)&~7); | ||
66 | BLANK(); | ||
67 | #if __LINUX_ARM_ARCH__ >= 6 | ||
68 | DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id)); | ||
69 | BLANK(); | ||
70 | #endif | ||
71 | DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); | ||
72 | DEFINE(VMA_VM_FLAGS, offsetof(struct vm_area_struct, vm_flags)); | ||
73 | BLANK(); | ||
74 | DEFINE(VM_EXEC, VM_EXEC); | ||
75 | BLANK(); | ||
76 | DEFINE(PAGE_SZ, PAGE_SIZE); | ||
77 | DEFINE(VIRT_OFFSET, PAGE_OFFSET); | ||
78 | BLANK(); | ||
79 | DEFINE(SYS_ERROR0, 0x9f0000); | ||
80 | BLANK(); | ||
81 | DEFINE(SIZEOF_MACHINE_DESC, sizeof(struct machine_desc)); | ||
82 | return 0; | ||
83 | } | ||
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c new file mode 100644 index 000000000000..ad26e98f1e62 --- /dev/null +++ b/arch/arm/kernel/bios32.c | |||
@@ -0,0 +1,699 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/bios32.c | ||
3 | * | ||
4 | * PCI bios-type initialisation for PCI machines | ||
5 | * | ||
6 | * Bits taken from various places. | ||
7 | */ | ||
8 | #include <linux/config.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/pci.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/init.h> | ||
14 | |||
15 | #include <asm/io.h> | ||
16 | #include <asm/mach-types.h> | ||
17 | #include <asm/mach/pci.h> | ||
18 | |||
19 | static int debug_pci; | ||
20 | static int use_firmware; | ||
21 | |||
22 | /* | ||
23 | * We can't use pci_find_device() here since we are | ||
24 | * called from interrupt context. | ||
25 | */ | ||
26 | static void pcibios_bus_report_status(struct pci_bus *bus, u_int status_mask, int warn) | ||
27 | { | ||
28 | struct pci_dev *dev; | ||
29 | |||
30 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
31 | u16 status; | ||
32 | |||
33 | /* | ||
34 | * ignore host bridge - we handle | ||
35 | * that separately | ||
36 | */ | ||
37 | if (dev->bus->number == 0 && dev->devfn == 0) | ||
38 | continue; | ||
39 | |||
40 | pci_read_config_word(dev, PCI_STATUS, &status); | ||
41 | if (status == 0xffff) | ||
42 | continue; | ||
43 | |||
44 | if ((status & status_mask) == 0) | ||
45 | continue; | ||
46 | |||
47 | /* clear the status errors */ | ||
48 | pci_write_config_word(dev, PCI_STATUS, status & status_mask); | ||
49 | |||
50 | if (warn) | ||
51 | printk("(%s: %04X) ", pci_name(dev), status); | ||
52 | } | ||
53 | |||
54 | list_for_each_entry(dev, &bus->devices, bus_list) | ||
55 | if (dev->subordinate) | ||
56 | pcibios_bus_report_status(dev->subordinate, status_mask, warn); | ||
57 | } | ||
58 | |||
59 | void pcibios_report_status(u_int status_mask, int warn) | ||
60 | { | ||
61 | struct list_head *l; | ||
62 | |||
63 | list_for_each(l, &pci_root_buses) { | ||
64 | struct pci_bus *bus = pci_bus_b(l); | ||
65 | |||
66 | pcibios_bus_report_status(bus, status_mask, warn); | ||
67 | } | ||
68 | } | ||
69 | |||
70 | /* | ||
71 | * We don't use this to fix the device, but initialisation of it. | ||
72 | * It's not the correct use for this, but it works. | ||
73 | * Note that the arbiter/ISA bridge appears to be buggy, specifically in | ||
74 | * the following area: | ||
75 | * 1. park on CPU | ||
76 | * 2. ISA bridge ping-pong | ||
77 | * 3. ISA bridge master handling of target RETRY | ||
78 | * | ||
79 | * Bug 3 is responsible for the sound DMA grinding to a halt. We now | ||
80 | * live with bug 2. | ||
81 | */ | ||
82 | static void __devinit pci_fixup_83c553(struct pci_dev *dev) | ||
83 | { | ||
84 | /* | ||
85 | * Set memory region to start at address 0, and enable IO | ||
86 | */ | ||
87 | pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_SPACE_MEMORY); | ||
88 | pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_IO); | ||
89 | |||
90 | dev->resource[0].end -= dev->resource[0].start; | ||
91 | dev->resource[0].start = 0; | ||
92 | |||
93 | /* | ||
94 | * All memory requests from ISA to be channelled to PCI | ||
95 | */ | ||
96 | pci_write_config_byte(dev, 0x48, 0xff); | ||
97 | |||
98 | /* | ||
99 | * Enable ping-pong on bus master to ISA bridge transactions. | ||
100 | * This improves the sound DMA substantially. The fixed | ||
101 | * priority arbiter also helps (see below). | ||
102 | */ | ||
103 | pci_write_config_byte(dev, 0x42, 0x01); | ||
104 | |||
105 | /* | ||
106 | * Enable PCI retry | ||
107 | */ | ||
108 | pci_write_config_byte(dev, 0x40, 0x22); | ||
109 | |||
110 | /* | ||
111 | * We used to set the arbiter to "park on last master" (bit | ||
112 | * 1 set), but unfortunately the CyberPro does not park the | ||
113 | * bus. We must therefore park on CPU. Unfortunately, this | ||
114 | * may trigger yet another bug in the 553. | ||
115 | */ | ||
116 | pci_write_config_byte(dev, 0x83, 0x02); | ||
117 | |||
118 | /* | ||
119 | * Make the ISA DMA request lowest priority, and disable | ||
120 | * rotating priorities completely. | ||
121 | */ | ||
122 | pci_write_config_byte(dev, 0x80, 0x11); | ||
123 | pci_write_config_byte(dev, 0x81, 0x00); | ||
124 | |||
125 | /* | ||
126 | * Route INTA input to IRQ 11, and set IRQ11 to be level | ||
127 | * sensitive. | ||
128 | */ | ||
129 | pci_write_config_word(dev, 0x44, 0xb000); | ||
130 | outb(0x08, 0x4d1); | ||
131 | } | ||
132 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_83C553, pci_fixup_83c553); | ||
133 | |||
134 | static void __devinit pci_fixup_unassign(struct pci_dev *dev) | ||
135 | { | ||
136 | dev->resource[0].end -= dev->resource[0].start; | ||
137 | dev->resource[0].start = 0; | ||
138 | } | ||
139 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_89C940F, pci_fixup_unassign); | ||
140 | |||
141 | /* | ||
142 | * Prevent the PCI layer from seeing the resources allocated to this device | ||
143 | * if it is the host bridge by marking it as such. These resources are of | ||
144 | * no consequence to the PCI layer (they are handled elsewhere). | ||
145 | */ | ||
146 | static void __devinit pci_fixup_dec21285(struct pci_dev *dev) | ||
147 | { | ||
148 | int i; | ||
149 | |||
150 | if (dev->devfn == 0) { | ||
151 | dev->class &= 0xff; | ||
152 | dev->class |= PCI_CLASS_BRIDGE_HOST << 8; | ||
153 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | ||
154 | dev->resource[i].start = 0; | ||
155 | dev->resource[i].end = 0; | ||
156 | dev->resource[i].flags = 0; | ||
157 | } | ||
158 | } | ||
159 | } | ||
160 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, pci_fixup_dec21285); | ||
161 | |||
162 | /* | ||
163 | * Same as above. The PrPMC800 carrier board for the PrPMC1100 | ||
164 | * card maps the host-bridge @ 00:01:00 for some reason and it | ||
165 | * ends up getting scanned. Note that we only want to do this | ||
166 | * fixup when we find the IXP4xx on a PrPMC system, which is why | ||
167 | * we check the machine type. We could be running on a board | ||
168 | * with an IXP4xx target device and we don't want to kill the | ||
169 | * resources in that case. | ||
170 | */ | ||
171 | static void __devinit pci_fixup_prpmc1100(struct pci_dev *dev) | ||
172 | { | ||
173 | int i; | ||
174 | |||
175 | if (machine_is_prpmc1100()) { | ||
176 | dev->class &= 0xff; | ||
177 | dev->class |= PCI_CLASS_BRIDGE_HOST << 8; | ||
178 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | ||
179 | dev->resource[i].start = 0; | ||
180 | dev->resource[i].end = 0; | ||
181 | dev->resource[i].flags = 0; | ||
182 | } | ||
183 | } | ||
184 | } | ||
185 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IXP4XX, pci_fixup_prpmc1100); | ||
186 | |||
187 | /* | ||
188 | * PCI IDE controllers use non-standard I/O port decoding, respect it. | ||
189 | */ | ||
190 | static void __devinit pci_fixup_ide_bases(struct pci_dev *dev) | ||
191 | { | ||
192 | struct resource *r; | ||
193 | int i; | ||
194 | |||
195 | if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE) | ||
196 | return; | ||
197 | |||
198 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | ||
199 | r = dev->resource + i; | ||
200 | if ((r->start & ~0x80) == 0x374) { | ||
201 | r->start |= 2; | ||
202 | r->end = r->start; | ||
203 | } | ||
204 | } | ||
205 | } | ||
206 | DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases); | ||
207 | |||
208 | /* | ||
209 | * Put the DEC21142 to sleep | ||
210 | */ | ||
211 | static void __devinit pci_fixup_dec21142(struct pci_dev *dev) | ||
212 | { | ||
213 | pci_write_config_dword(dev, 0x40, 0x80000000); | ||
214 | } | ||
215 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142, pci_fixup_dec21142); | ||
216 | |||
217 | /* | ||
218 | * The CY82C693 needs some rather major fixups to ensure that it does | ||
219 | * the right thing. Idea from the Alpha people, with a few additions. | ||
220 | * | ||
221 | * We ensure that the IDE base registers are set to 1f0/3f4 for the | ||
222 | * primary bus, and 170/374 for the secondary bus. Also, hide them | ||
223 | * from the PCI subsystem view as well so we won't try to perform | ||
224 | * our own auto-configuration on them. | ||
225 | * | ||
226 | * In addition, we ensure that the PCI IDE interrupts are routed to | ||
227 | * IRQ 14 and IRQ 15 respectively. | ||
228 | * | ||
229 | * The above gets us to a point where the IDE on this device is | ||
230 | * functional. However, The CY82C693U _does not work_ in bus | ||
231 | * master mode without locking the PCI bus solid. | ||
232 | */ | ||
233 | static void __devinit pci_fixup_cy82c693(struct pci_dev *dev) | ||
234 | { | ||
235 | if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) { | ||
236 | u32 base0, base1; | ||
237 | |||
238 | if (dev->class & 0x80) { /* primary */ | ||
239 | base0 = 0x1f0; | ||
240 | base1 = 0x3f4; | ||
241 | } else { /* secondary */ | ||
242 | base0 = 0x170; | ||
243 | base1 = 0x374; | ||
244 | } | ||
245 | |||
246 | pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, | ||
247 | base0 | PCI_BASE_ADDRESS_SPACE_IO); | ||
248 | pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, | ||
249 | base1 | PCI_BASE_ADDRESS_SPACE_IO); | ||
250 | |||
251 | dev->resource[0].start = 0; | ||
252 | dev->resource[0].end = 0; | ||
253 | dev->resource[0].flags = 0; | ||
254 | |||
255 | dev->resource[1].start = 0; | ||
256 | dev->resource[1].end = 0; | ||
257 | dev->resource[1].flags = 0; | ||
258 | } else if (PCI_FUNC(dev->devfn) == 0) { | ||
259 | /* | ||
260 | * Setup IDE IRQ routing. | ||
261 | */ | ||
262 | pci_write_config_byte(dev, 0x4b, 14); | ||
263 | pci_write_config_byte(dev, 0x4c, 15); | ||
264 | |||
265 | /* | ||
266 | * Disable FREQACK handshake, enable USB. | ||
267 | */ | ||
268 | pci_write_config_byte(dev, 0x4d, 0x41); | ||
269 | |||
270 | /* | ||
271 | * Enable PCI retry, and PCI post-write buffer. | ||
272 | */ | ||
273 | pci_write_config_byte(dev, 0x44, 0x17); | ||
274 | |||
275 | /* | ||
276 | * Enable ISA master and DMA post write buffering. | ||
277 | */ | ||
278 | pci_write_config_byte(dev, 0x45, 0x03); | ||
279 | } | ||
280 | } | ||
281 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, pci_fixup_cy82c693); | ||
282 | |||
283 | void __devinit pcibios_update_irq(struct pci_dev *dev, int irq) | ||
284 | { | ||
285 | if (debug_pci) | ||
286 | printk("PCI: Assigning IRQ %02d to %s\n", irq, pci_name(dev)); | ||
287 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); | ||
288 | } | ||
289 | |||
290 | /* | ||
291 | * If the bus contains any of these devices, then we must not turn on | ||
292 | * parity checking of any kind. Currently this is CyberPro 20x0 only. | ||
293 | */ | ||
294 | static inline int pdev_bad_for_parity(struct pci_dev *dev) | ||
295 | { | ||
296 | return (dev->vendor == PCI_VENDOR_ID_INTERG && | ||
297 | (dev->device == PCI_DEVICE_ID_INTERG_2000 || | ||
298 | dev->device == PCI_DEVICE_ID_INTERG_2010)); | ||
299 | } | ||
300 | |||
301 | /* | ||
302 | * Adjust the device resources from bus-centric to Linux-centric. | ||
303 | */ | ||
304 | static void __devinit | ||
305 | pdev_fixup_device_resources(struct pci_sys_data *root, struct pci_dev *dev) | ||
306 | { | ||
307 | unsigned long offset; | ||
308 | int i; | ||
309 | |||
310 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | ||
311 | if (dev->resource[i].start == 0) | ||
312 | continue; | ||
313 | if (dev->resource[i].flags & IORESOURCE_MEM) | ||
314 | offset = root->mem_offset; | ||
315 | else | ||
316 | offset = root->io_offset; | ||
317 | |||
318 | dev->resource[i].start += offset; | ||
319 | dev->resource[i].end += offset; | ||
320 | } | ||
321 | } | ||
322 | |||
323 | static void __devinit | ||
324 | pbus_assign_bus_resources(struct pci_bus *bus, struct pci_sys_data *root) | ||
325 | { | ||
326 | struct pci_dev *dev = bus->self; | ||
327 | int i; | ||
328 | |||
329 | if (!dev) { | ||
330 | /* | ||
331 | * Assign root bus resources. | ||
332 | */ | ||
333 | for (i = 0; i < 3; i++) | ||
334 | bus->resource[i] = root->resource[i]; | ||
335 | } | ||
336 | } | ||
337 | |||
338 | /* | ||
339 | * pcibios_fixup_bus - Called after each bus is probed, | ||
340 | * but before its children are examined. | ||
341 | */ | ||
342 | void __devinit pcibios_fixup_bus(struct pci_bus *bus) | ||
343 | { | ||
344 | struct pci_sys_data *root = bus->sysdata; | ||
345 | struct pci_dev *dev; | ||
346 | u16 features = PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_FAST_BACK; | ||
347 | |||
348 | pbus_assign_bus_resources(bus, root); | ||
349 | |||
350 | /* | ||
351 | * Walk the devices on this bus, working out what we can | ||
352 | * and can't support. | ||
353 | */ | ||
354 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
355 | u16 status; | ||
356 | |||
357 | pdev_fixup_device_resources(root, dev); | ||
358 | |||
359 | pci_read_config_word(dev, PCI_STATUS, &status); | ||
360 | |||
361 | /* | ||
362 | * If any device on this bus does not support fast back | ||
363 | * to back transfers, then the bus as a whole is not able | ||
364 | * to support them. Having fast back to back transfers | ||
365 | * on saves us one PCI cycle per transaction. | ||
366 | */ | ||
367 | if (!(status & PCI_STATUS_FAST_BACK)) | ||
368 | features &= ~PCI_COMMAND_FAST_BACK; | ||
369 | |||
370 | if (pdev_bad_for_parity(dev)) | ||
371 | features &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY); | ||
372 | |||
373 | switch (dev->class >> 8) { | ||
374 | #if defined(CONFIG_ISA) || defined(CONFIG_EISA) | ||
375 | case PCI_CLASS_BRIDGE_ISA: | ||
376 | case PCI_CLASS_BRIDGE_EISA: | ||
377 | /* | ||
378 | * If this device is an ISA bridge, set isa_bridge | ||
379 | * to point at this device. We will then go looking | ||
380 | * for things like keyboard, etc. | ||
381 | */ | ||
382 | isa_bridge = dev; | ||
383 | break; | ||
384 | #endif | ||
385 | case PCI_CLASS_BRIDGE_PCI: | ||
386 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &status); | ||
387 | status |= PCI_BRIDGE_CTL_PARITY|PCI_BRIDGE_CTL_MASTER_ABORT; | ||
388 | status &= ~(PCI_BRIDGE_CTL_BUS_RESET|PCI_BRIDGE_CTL_FAST_BACK); | ||
389 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, status); | ||
390 | break; | ||
391 | |||
392 | case PCI_CLASS_BRIDGE_CARDBUS: | ||
393 | pci_read_config_word(dev, PCI_CB_BRIDGE_CONTROL, &status); | ||
394 | status |= PCI_CB_BRIDGE_CTL_PARITY|PCI_CB_BRIDGE_CTL_MASTER_ABORT; | ||
395 | pci_write_config_word(dev, PCI_CB_BRIDGE_CONTROL, status); | ||
396 | break; | ||
397 | } | ||
398 | } | ||
399 | |||
400 | /* | ||
401 | * Now walk the devices again, this time setting them up. | ||
402 | */ | ||
403 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
404 | u16 cmd; | ||
405 | |||
406 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | ||
407 | cmd |= features; | ||
408 | pci_write_config_word(dev, PCI_COMMAND, cmd); | ||
409 | |||
410 | pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, | ||
411 | L1_CACHE_BYTES >> 2); | ||
412 | } | ||
413 | |||
414 | /* | ||
415 | * Propagate the flags to the PCI bridge. | ||
416 | */ | ||
417 | if (bus->self && bus->self->hdr_type == PCI_HEADER_TYPE_BRIDGE) { | ||
418 | if (features & PCI_COMMAND_FAST_BACK) | ||
419 | bus->bridge_ctl |= PCI_BRIDGE_CTL_FAST_BACK; | ||
420 | if (features & PCI_COMMAND_PARITY) | ||
421 | bus->bridge_ctl |= PCI_BRIDGE_CTL_PARITY; | ||
422 | } | ||
423 | |||
424 | /* | ||
425 | * Report what we did for this bus | ||
426 | */ | ||
427 | printk(KERN_INFO "PCI: bus%d: Fast back to back transfers %sabled\n", | ||
428 | bus->number, (features & PCI_COMMAND_FAST_BACK) ? "en" : "dis"); | ||
429 | } | ||
430 | |||
431 | /* | ||
432 | * Convert from Linux-centric to bus-centric addresses for bridge devices. | ||
433 | */ | ||
434 | void __devinit | ||
435 | pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, | ||
436 | struct resource *res) | ||
437 | { | ||
438 | struct pci_sys_data *root = dev->sysdata; | ||
439 | unsigned long offset = 0; | ||
440 | |||
441 | if (res->flags & IORESOURCE_IO) | ||
442 | offset = root->io_offset; | ||
443 | if (res->flags & IORESOURCE_MEM) | ||
444 | offset = root->mem_offset; | ||
445 | |||
446 | region->start = res->start - offset; | ||
447 | region->end = res->end - offset; | ||
448 | } | ||
449 | |||
450 | #ifdef CONFIG_HOTPLUG | ||
451 | EXPORT_SYMBOL(pcibios_fixup_bus); | ||
452 | EXPORT_SYMBOL(pcibios_resource_to_bus); | ||
453 | #endif | ||
454 | |||
455 | /* | ||
456 | * This is the standard PCI-PCI bridge swizzling algorithm: | ||
457 | * | ||
458 | * Dev: 0 1 2 3 | ||
459 | * A A B C D | ||
460 | * B B C D A | ||
461 | * C C D A B | ||
462 | * D D A B C | ||
463 | * ^^^^^^^^^^ irq pin on bridge | ||
464 | */ | ||
465 | u8 __devinit pci_std_swizzle(struct pci_dev *dev, u8 *pinp) | ||
466 | { | ||
467 | int pin = *pinp - 1; | ||
468 | |||
469 | while (dev->bus->self) { | ||
470 | pin = (pin + PCI_SLOT(dev->devfn)) & 3; | ||
471 | /* | ||
472 | * move up the chain of bridges, | ||
473 | * swizzling as we go. | ||
474 | */ | ||
475 | dev = dev->bus->self; | ||
476 | } | ||
477 | *pinp = pin + 1; | ||
478 | |||
479 | return PCI_SLOT(dev->devfn); | ||
480 | } | ||
481 | |||
482 | /* | ||
483 | * Swizzle the device pin each time we cross a bridge. | ||
484 | * This might update pin and returns the slot number. | ||
485 | */ | ||
486 | static u8 __devinit pcibios_swizzle(struct pci_dev *dev, u8 *pin) | ||
487 | { | ||
488 | struct pci_sys_data *sys = dev->sysdata; | ||
489 | int slot = 0, oldpin = *pin; | ||
490 | |||
491 | if (sys->swizzle) | ||
492 | slot = sys->swizzle(dev, pin); | ||
493 | |||
494 | if (debug_pci) | ||
495 | printk("PCI: %s swizzling pin %d => pin %d slot %d\n", | ||
496 | pci_name(dev), oldpin, *pin, slot); | ||
497 | |||
498 | return slot; | ||
499 | } | ||
500 | |||
501 | /* | ||
502 | * Map a slot/pin to an IRQ. | ||
503 | */ | ||
504 | static int pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | ||
505 | { | ||
506 | struct pci_sys_data *sys = dev->sysdata; | ||
507 | int irq = -1; | ||
508 | |||
509 | if (sys->map_irq) | ||
510 | irq = sys->map_irq(dev, slot, pin); | ||
511 | |||
512 | if (debug_pci) | ||
513 | printk("PCI: %s mapping slot %d pin %d => irq %d\n", | ||
514 | pci_name(dev), slot, pin, irq); | ||
515 | |||
516 | return irq; | ||
517 | } | ||
518 | |||
519 | static void __init pcibios_init_hw(struct hw_pci *hw) | ||
520 | { | ||
521 | struct pci_sys_data *sys = NULL; | ||
522 | int ret; | ||
523 | int nr, busnr; | ||
524 | |||
525 | for (nr = busnr = 0; nr < hw->nr_controllers; nr++) { | ||
526 | sys = kmalloc(sizeof(struct pci_sys_data), GFP_KERNEL); | ||
527 | if (!sys) | ||
528 | panic("PCI: unable to allocate sys data!"); | ||
529 | |||
530 | memset(sys, 0, sizeof(struct pci_sys_data)); | ||
531 | |||
532 | sys->hw = hw; | ||
533 | sys->busnr = busnr; | ||
534 | sys->swizzle = hw->swizzle; | ||
535 | sys->map_irq = hw->map_irq; | ||
536 | sys->resource[0] = &ioport_resource; | ||
537 | sys->resource[1] = &iomem_resource; | ||
538 | |||
539 | ret = hw->setup(nr, sys); | ||
540 | |||
541 | if (ret > 0) { | ||
542 | sys->bus = hw->scan(nr, sys); | ||
543 | |||
544 | if (!sys->bus) | ||
545 | panic("PCI: unable to scan bus!"); | ||
546 | |||
547 | busnr = sys->bus->subordinate + 1; | ||
548 | |||
549 | list_add(&sys->node, &hw->buses); | ||
550 | } else { | ||
551 | kfree(sys); | ||
552 | if (ret < 0) | ||
553 | break; | ||
554 | } | ||
555 | } | ||
556 | } | ||
557 | |||
558 | void __init pci_common_init(struct hw_pci *hw) | ||
559 | { | ||
560 | struct pci_sys_data *sys; | ||
561 | |||
562 | INIT_LIST_HEAD(&hw->buses); | ||
563 | |||
564 | if (hw->preinit) | ||
565 | hw->preinit(); | ||
566 | pcibios_init_hw(hw); | ||
567 | if (hw->postinit) | ||
568 | hw->postinit(); | ||
569 | |||
570 | pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq); | ||
571 | |||
572 | list_for_each_entry(sys, &hw->buses, node) { | ||
573 | struct pci_bus *bus = sys->bus; | ||
574 | |||
575 | if (!use_firmware) { | ||
576 | /* | ||
577 | * Size the bridge windows. | ||
578 | */ | ||
579 | pci_bus_size_bridges(bus); | ||
580 | |||
581 | /* | ||
582 | * Assign resources. | ||
583 | */ | ||
584 | pci_bus_assign_resources(bus); | ||
585 | } | ||
586 | |||
587 | /* | ||
588 | * Tell drivers about devices found. | ||
589 | */ | ||
590 | pci_bus_add_devices(bus); | ||
591 | } | ||
592 | } | ||
593 | |||
594 | char * __init pcibios_setup(char *str) | ||
595 | { | ||
596 | if (!strcmp(str, "debug")) { | ||
597 | debug_pci = 1; | ||
598 | return NULL; | ||
599 | } else if (!strcmp(str, "firmware")) { | ||
600 | use_firmware = 1; | ||
601 | return NULL; | ||
602 | } | ||
603 | return str; | ||
604 | } | ||
605 | |||
606 | /* | ||
607 | * From arch/i386/kernel/pci-i386.c: | ||
608 | * | ||
609 | * We need to avoid collisions with `mirrored' VGA ports | ||
610 | * and other strange ISA hardware, so we always want the | ||
611 | * addresses to be allocated in the 0x000-0x0ff region | ||
612 | * modulo 0x400. | ||
613 | * | ||
614 | * Why? Because some silly external IO cards only decode | ||
615 | * the low 10 bits of the IO address. The 0x00-0xff region | ||
616 | * is reserved for motherboard devices that decode all 16 | ||
617 | * bits, so it's ok to allocate at, say, 0x2800-0x28ff, | ||
618 | * but we want to try to avoid allocating at 0x2900-0x2bff | ||
619 | * which might be mirrored at 0x0100-0x03ff.. | ||
620 | */ | ||
621 | void pcibios_align_resource(void *data, struct resource *res, | ||
622 | unsigned long size, unsigned long align) | ||
623 | { | ||
624 | unsigned long start = res->start; | ||
625 | |||
626 | if (res->flags & IORESOURCE_IO && start & 0x300) | ||
627 | start = (start + 0x3ff) & ~0x3ff; | ||
628 | |||
629 | res->start = (start + align - 1) & ~(align - 1); | ||
630 | } | ||
631 | |||
632 | /** | ||
633 | * pcibios_enable_device - Enable I/O and memory. | ||
634 | * @dev: PCI device to be enabled | ||
635 | */ | ||
636 | int pcibios_enable_device(struct pci_dev *dev, int mask) | ||
637 | { | ||
638 | u16 cmd, old_cmd; | ||
639 | int idx; | ||
640 | struct resource *r; | ||
641 | |||
642 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | ||
643 | old_cmd = cmd; | ||
644 | for (idx = 0; idx < 6; idx++) { | ||
645 | /* Only set up the requested stuff */ | ||
646 | if (!(mask & (1 << idx))) | ||
647 | continue; | ||
648 | |||
649 | r = dev->resource + idx; | ||
650 | if (!r->start && r->end) { | ||
651 | printk(KERN_ERR "PCI: Device %s not available because" | ||
652 | " of resource collisions\n", pci_name(dev)); | ||
653 | return -EINVAL; | ||
654 | } | ||
655 | if (r->flags & IORESOURCE_IO) | ||
656 | cmd |= PCI_COMMAND_IO; | ||
657 | if (r->flags & IORESOURCE_MEM) | ||
658 | cmd |= PCI_COMMAND_MEMORY; | ||
659 | } | ||
660 | |||
661 | /* | ||
662 | * Bridges (eg, cardbus bridges) need to be fully enabled | ||
663 | */ | ||
664 | if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) | ||
665 | cmd |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY; | ||
666 | |||
667 | if (cmd != old_cmd) { | ||
668 | printk("PCI: enabling device %s (%04x -> %04x)\n", | ||
669 | pci_name(dev), old_cmd, cmd); | ||
670 | pci_write_config_word(dev, PCI_COMMAND, cmd); | ||
671 | } | ||
672 | return 0; | ||
673 | } | ||
674 | |||
675 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | ||
676 | enum pci_mmap_state mmap_state, int write_combine) | ||
677 | { | ||
678 | struct pci_sys_data *root = dev->sysdata; | ||
679 | unsigned long phys; | ||
680 | |||
681 | if (mmap_state == pci_mmap_io) { | ||
682 | return -EINVAL; | ||
683 | } else { | ||
684 | phys = vma->vm_pgoff + (root->mem_offset >> PAGE_SHIFT); | ||
685 | } | ||
686 | |||
687 | /* | ||
688 | * Mark this as IO | ||
689 | */ | ||
690 | vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO; | ||
691 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
692 | |||
693 | if (remap_pfn_range(vma, vma->vm_start, phys, | ||
694 | vma->vm_end - vma->vm_start, | ||
695 | vma->vm_page_prot)) | ||
696 | return -EAGAIN; | ||
697 | |||
698 | return 0; | ||
699 | } | ||
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S new file mode 100644 index 000000000000..e5d370c235d7 --- /dev/null +++ b/arch/arm/kernel/calls.S | |||
@@ -0,0 +1,335 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/calls.S | ||
3 | * | ||
4 | * Copyright (C) 1995-2005 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This file is included twice in entry-common.S | ||
11 | */ | ||
12 | #ifndef NR_syscalls | ||
13 | #define NR_syscalls 320 | ||
14 | #else | ||
15 | |||
16 | __syscall_start: | ||
17 | /* 0 */ .long sys_restart_syscall | ||
18 | .long sys_exit | ||
19 | .long sys_fork_wrapper | ||
20 | .long sys_read | ||
21 | .long sys_write | ||
22 | /* 5 */ .long sys_open | ||
23 | .long sys_close | ||
24 | .long sys_ni_syscall /* was sys_waitpid */ | ||
25 | .long sys_creat | ||
26 | .long sys_link | ||
27 | /* 10 */ .long sys_unlink | ||
28 | .long sys_execve_wrapper | ||
29 | .long sys_chdir | ||
30 | .long sys_time /* used by libc4 */ | ||
31 | .long sys_mknod | ||
32 | /* 15 */ .long sys_chmod | ||
33 | .long sys_lchown16 | ||
34 | .long sys_ni_syscall /* was sys_break */ | ||
35 | .long sys_ni_syscall /* was sys_stat */ | ||
36 | .long sys_lseek | ||
37 | /* 20 */ .long sys_getpid | ||
38 | .long sys_mount | ||
39 | .long sys_oldumount /* used by libc4 */ | ||
40 | .long sys_setuid16 | ||
41 | .long sys_getuid16 | ||
42 | /* 25 */ .long sys_stime | ||
43 | .long sys_ptrace | ||
44 | .long sys_alarm /* used by libc4 */ | ||
45 | .long sys_ni_syscall /* was sys_fstat */ | ||
46 | .long sys_pause | ||
47 | /* 30 */ .long sys_utime /* used by libc4 */ | ||
48 | .long sys_ni_syscall /* was sys_stty */ | ||
49 | .long sys_ni_syscall /* was sys_getty */ | ||
50 | .long sys_access | ||
51 | .long sys_nice | ||
52 | /* 35 */ .long sys_ni_syscall /* was sys_ftime */ | ||
53 | .long sys_sync | ||
54 | .long sys_kill | ||
55 | .long sys_rename | ||
56 | .long sys_mkdir | ||
57 | /* 40 */ .long sys_rmdir | ||
58 | .long sys_dup | ||
59 | .long sys_pipe | ||
60 | .long sys_times | ||
61 | .long sys_ni_syscall /* was sys_prof */ | ||
62 | /* 45 */ .long sys_brk | ||
63 | .long sys_setgid16 | ||
64 | .long sys_getgid16 | ||
65 | .long sys_ni_syscall /* was sys_signal */ | ||
66 | .long sys_geteuid16 | ||
67 | /* 50 */ .long sys_getegid16 | ||
68 | .long sys_acct | ||
69 | .long sys_umount | ||
70 | .long sys_ni_syscall /* was sys_lock */ | ||
71 | .long sys_ioctl | ||
72 | /* 55 */ .long sys_fcntl | ||
73 | .long sys_ni_syscall /* was sys_mpx */ | ||
74 | .long sys_setpgid | ||
75 | .long sys_ni_syscall /* was sys_ulimit */ | ||
76 | .long sys_ni_syscall /* was sys_olduname */ | ||
77 | /* 60 */ .long sys_umask | ||
78 | .long sys_chroot | ||
79 | .long sys_ustat | ||
80 | .long sys_dup2 | ||
81 | .long sys_getppid | ||
82 | /* 65 */ .long sys_getpgrp | ||
83 | .long sys_setsid | ||
84 | .long sys_sigaction | ||
85 | .long sys_ni_syscall /* was sys_sgetmask */ | ||
86 | .long sys_ni_syscall /* was sys_ssetmask */ | ||
87 | /* 70 */ .long sys_setreuid16 | ||
88 | .long sys_setregid16 | ||
89 | .long sys_sigsuspend_wrapper | ||
90 | .long sys_sigpending | ||
91 | .long sys_sethostname | ||
92 | /* 75 */ .long sys_setrlimit | ||
93 | .long sys_old_getrlimit /* used by libc4 */ | ||
94 | .long sys_getrusage | ||
95 | .long sys_gettimeofday | ||
96 | .long sys_settimeofday | ||
97 | /* 80 */ .long sys_getgroups16 | ||
98 | .long sys_setgroups16 | ||
99 | .long old_select /* used by libc4 */ | ||
100 | .long sys_symlink | ||
101 | .long sys_ni_syscall /* was sys_lstat */ | ||
102 | /* 85 */ .long sys_readlink | ||
103 | .long sys_uselib | ||
104 | .long sys_swapon | ||
105 | .long sys_reboot | ||
106 | .long old_readdir /* used by libc4 */ | ||
107 | /* 90 */ .long old_mmap /* used by libc4 */ | ||
108 | .long sys_munmap | ||
109 | .long sys_truncate | ||
110 | .long sys_ftruncate | ||
111 | .long sys_fchmod | ||
112 | /* 95 */ .long sys_fchown16 | ||
113 | .long sys_getpriority | ||
114 | .long sys_setpriority | ||
115 | .long sys_ni_syscall /* was sys_profil */ | ||
116 | .long sys_statfs | ||
117 | /* 100 */ .long sys_fstatfs | ||
118 | .long sys_ni_syscall | ||
119 | .long sys_socketcall | ||
120 | .long sys_syslog | ||
121 | .long sys_setitimer | ||
122 | /* 105 */ .long sys_getitimer | ||
123 | .long sys_newstat | ||
124 | .long sys_newlstat | ||
125 | .long sys_newfstat | ||
126 | .long sys_ni_syscall /* was sys_uname */ | ||
127 | /* 110 */ .long sys_ni_syscall /* was sys_iopl */ | ||
128 | .long sys_vhangup | ||
129 | .long sys_ni_syscall | ||
130 | .long sys_syscall /* call a syscall */ | ||
131 | .long sys_wait4 | ||
132 | /* 115 */ .long sys_swapoff | ||
133 | .long sys_sysinfo | ||
134 | .long sys_ipc | ||
135 | .long sys_fsync | ||
136 | .long sys_sigreturn_wrapper | ||
137 | /* 120 */ .long sys_clone_wrapper | ||
138 | .long sys_setdomainname | ||
139 | .long sys_newuname | ||
140 | .long sys_ni_syscall | ||
141 | .long sys_adjtimex | ||
142 | /* 125 */ .long sys_mprotect | ||
143 | .long sys_sigprocmask | ||
144 | .long sys_ni_syscall /* was sys_create_module */ | ||
145 | .long sys_init_module | ||
146 | .long sys_delete_module | ||
147 | /* 130 */ .long sys_ni_syscall /* was sys_get_kernel_syms */ | ||
148 | .long sys_quotactl | ||
149 | .long sys_getpgid | ||
150 | .long sys_fchdir | ||
151 | .long sys_bdflush | ||
152 | /* 135 */ .long sys_sysfs | ||
153 | .long sys_personality | ||
154 | .long sys_ni_syscall /* .long _sys_afs_syscall */ | ||
155 | .long sys_setfsuid16 | ||
156 | .long sys_setfsgid16 | ||
157 | /* 140 */ .long sys_llseek | ||
158 | .long sys_getdents | ||
159 | .long sys_select | ||
160 | .long sys_flock | ||
161 | .long sys_msync | ||
162 | /* 145 */ .long sys_readv | ||
163 | .long sys_writev | ||
164 | .long sys_getsid | ||
165 | .long sys_fdatasync | ||
166 | .long sys_sysctl | ||
167 | /* 150 */ .long sys_mlock | ||
168 | .long sys_munlock | ||
169 | .long sys_mlockall | ||
170 | .long sys_munlockall | ||
171 | .long sys_sched_setparam | ||
172 | /* 155 */ .long sys_sched_getparam | ||
173 | .long sys_sched_setscheduler | ||
174 | .long sys_sched_getscheduler | ||
175 | .long sys_sched_yield | ||
176 | .long sys_sched_get_priority_max | ||
177 | /* 160 */ .long sys_sched_get_priority_min | ||
178 | .long sys_sched_rr_get_interval | ||
179 | .long sys_nanosleep | ||
180 | .long sys_arm_mremap | ||
181 | .long sys_setresuid16 | ||
182 | /* 165 */ .long sys_getresuid16 | ||
183 | .long sys_ni_syscall | ||
184 | .long sys_ni_syscall /* was sys_query_module */ | ||
185 | .long sys_poll | ||
186 | .long sys_nfsservctl | ||
187 | /* 170 */ .long sys_setresgid16 | ||
188 | .long sys_getresgid16 | ||
189 | .long sys_prctl | ||
190 | .long sys_rt_sigreturn_wrapper | ||
191 | .long sys_rt_sigaction | ||
192 | /* 175 */ .long sys_rt_sigprocmask | ||
193 | .long sys_rt_sigpending | ||
194 | .long sys_rt_sigtimedwait | ||
195 | .long sys_rt_sigqueueinfo | ||
196 | .long sys_rt_sigsuspend_wrapper | ||
197 | /* 180 */ .long sys_pread64 | ||
198 | .long sys_pwrite64 | ||
199 | .long sys_chown16 | ||
200 | .long sys_getcwd | ||
201 | .long sys_capget | ||
202 | /* 185 */ .long sys_capset | ||
203 | .long sys_sigaltstack_wrapper | ||
204 | .long sys_sendfile | ||
205 | .long sys_ni_syscall | ||
206 | .long sys_ni_syscall | ||
207 | /* 190 */ .long sys_vfork_wrapper | ||
208 | .long sys_getrlimit | ||
209 | .long sys_mmap2 | ||
210 | .long sys_truncate64 | ||
211 | .long sys_ftruncate64 | ||
212 | /* 195 */ .long sys_stat64 | ||
213 | .long sys_lstat64 | ||
214 | .long sys_fstat64 | ||
215 | .long sys_lchown | ||
216 | .long sys_getuid | ||
217 | /* 200 */ .long sys_getgid | ||
218 | .long sys_geteuid | ||
219 | .long sys_getegid | ||
220 | .long sys_setreuid | ||
221 | .long sys_setregid | ||
222 | /* 205 */ .long sys_getgroups | ||
223 | .long sys_setgroups | ||
224 | .long sys_fchown | ||
225 | .long sys_setresuid | ||
226 | .long sys_getresuid | ||
227 | /* 210 */ .long sys_setresgid | ||
228 | .long sys_getresgid | ||
229 | .long sys_chown | ||
230 | .long sys_setuid | ||
231 | .long sys_setgid | ||
232 | /* 215 */ .long sys_setfsuid | ||
233 | .long sys_setfsgid | ||
234 | .long sys_getdents64 | ||
235 | .long sys_pivot_root | ||
236 | .long sys_mincore | ||
237 | /* 220 */ .long sys_madvise | ||
238 | .long sys_fcntl64 | ||
239 | .long sys_ni_syscall /* TUX */ | ||
240 | .long sys_ni_syscall | ||
241 | .long sys_gettid | ||
242 | /* 225 */ .long sys_readahead | ||
243 | .long sys_setxattr | ||
244 | .long sys_lsetxattr | ||
245 | .long sys_fsetxattr | ||
246 | .long sys_getxattr | ||
247 | /* 230 */ .long sys_lgetxattr | ||
248 | .long sys_fgetxattr | ||
249 | .long sys_listxattr | ||
250 | .long sys_llistxattr | ||
251 | .long sys_flistxattr | ||
252 | /* 235 */ .long sys_removexattr | ||
253 | .long sys_lremovexattr | ||
254 | .long sys_fremovexattr | ||
255 | .long sys_tkill | ||
256 | .long sys_sendfile64 | ||
257 | /* 240 */ .long sys_futex_wrapper | ||
258 | .long sys_sched_setaffinity | ||
259 | .long sys_sched_getaffinity | ||
260 | .long sys_io_setup | ||
261 | .long sys_io_destroy | ||
262 | /* 245 */ .long sys_io_getevents | ||
263 | .long sys_io_submit | ||
264 | .long sys_io_cancel | ||
265 | .long sys_exit_group | ||
266 | .long sys_lookup_dcookie | ||
267 | /* 250 */ .long sys_epoll_create | ||
268 | .long sys_epoll_ctl | ||
269 | .long sys_epoll_wait | ||
270 | .long sys_remap_file_pages | ||
271 | .long sys_ni_syscall /* sys_set_thread_area */ | ||
272 | /* 255 */ .long sys_ni_syscall /* sys_get_thread_area */ | ||
273 | .long sys_set_tid_address | ||
274 | .long sys_timer_create | ||
275 | .long sys_timer_settime | ||
276 | .long sys_timer_gettime | ||
277 | /* 260 */ .long sys_timer_getoverrun | ||
278 | .long sys_timer_delete | ||
279 | .long sys_clock_settime | ||
280 | .long sys_clock_gettime | ||
281 | .long sys_clock_getres | ||
282 | /* 265 */ .long sys_clock_nanosleep | ||
283 | .long sys_statfs64 | ||
284 | .long sys_fstatfs64 | ||
285 | .long sys_tgkill | ||
286 | .long sys_utimes | ||
287 | /* 270 */ .long sys_fadvise64_64 | ||
288 | .long sys_pciconfig_iobase | ||
289 | .long sys_pciconfig_read | ||
290 | .long sys_pciconfig_write | ||
291 | .long sys_mq_open | ||
292 | /* 275 */ .long sys_mq_unlink | ||
293 | .long sys_mq_timedsend | ||
294 | .long sys_mq_timedreceive | ||
295 | .long sys_mq_notify | ||
296 | .long sys_mq_getsetattr | ||
297 | /* 280 */ .long sys_waitid | ||
298 | .long sys_socket | ||
299 | .long sys_bind | ||
300 | .long sys_connect | ||
301 | .long sys_listen | ||
302 | /* 285 */ .long sys_accept | ||
303 | .long sys_getsockname | ||
304 | .long sys_getpeername | ||
305 | .long sys_socketpair | ||
306 | .long sys_send | ||
307 | /* 290 */ .long sys_sendto | ||
308 | .long sys_recv | ||
309 | .long sys_recvfrom | ||
310 | .long sys_shutdown | ||
311 | .long sys_setsockopt | ||
312 | /* 295 */ .long sys_getsockopt | ||
313 | .long sys_sendmsg | ||
314 | .long sys_recvmsg | ||
315 | .long sys_semop | ||
316 | .long sys_semget | ||
317 | /* 300 */ .long sys_semctl | ||
318 | .long sys_msgsnd | ||
319 | .long sys_msgrcv | ||
320 | .long sys_msgget | ||
321 | .long sys_msgctl | ||
322 | /* 305 */ .long sys_shmat | ||
323 | .long sys_shmdt | ||
324 | .long sys_shmget | ||
325 | .long sys_shmctl | ||
326 | .long sys_add_key | ||
327 | /* 310 */ .long sys_request_key | ||
328 | .long sys_keyctl | ||
329 | .long sys_semtimedop | ||
330 | __syscall_end: | ||
331 | |||
332 | .rept NR_syscalls - (__syscall_end - __syscall_start) / 4 | ||
333 | .long sys_ni_syscall | ||
334 | .endr | ||
335 | #endif | ||
diff --git a/arch/arm/kernel/compat.c b/arch/arm/kernel/compat.c new file mode 100644 index 000000000000..7195add42e74 --- /dev/null +++ b/arch/arm/kernel/compat.c | |||
@@ -0,0 +1,225 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/compat.c | ||
3 | * | ||
4 | * Copyright (C) 2001 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * We keep the old params compatibility cruft in one place (here) | ||
11 | * so we don't end up with lots of mess around other places. | ||
12 | * | ||
13 | * NOTE: | ||
14 | * The old struct param_struct is deprecated, but it will be kept in | ||
15 | * the kernel for 5 years from now (2001). This will allow boot loaders | ||
16 | * to convert to the new struct tag way. | ||
17 | */ | ||
18 | #include <linux/config.h> | ||
19 | #include <linux/types.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/init.h> | ||
23 | |||
24 | #include <asm/setup.h> | ||
25 | #include <asm/mach-types.h> | ||
26 | #include <asm/page.h> | ||
27 | |||
28 | #include <asm/mach/arch.h> | ||
29 | |||
30 | /* | ||
31 | * Usage: | ||
32 | * - do not go blindly adding fields, add them at the end | ||
33 | * - when adding fields, don't rely on the address until | ||
34 | * a patch from me has been released | ||
35 | * - unused fields should be zero (for future expansion) | ||
36 | * - this structure is relatively short-lived - only | ||
37 | * guaranteed to contain useful data in setup_arch() | ||
38 | * | ||
39 | * This is the old deprecated way to pass parameters to the kernel | ||
40 | */ | ||
41 | struct param_struct { | ||
42 | union { | ||
43 | struct { | ||
44 | unsigned long page_size; /* 0 */ | ||
45 | unsigned long nr_pages; /* 4 */ | ||
46 | unsigned long ramdisk_size; /* 8 */ | ||
47 | unsigned long flags; /* 12 */ | ||
48 | #define FLAG_READONLY 1 | ||
49 | #define FLAG_RDLOAD 4 | ||
50 | #define FLAG_RDPROMPT 8 | ||
51 | unsigned long rootdev; /* 16 */ | ||
52 | unsigned long video_num_cols; /* 20 */ | ||
53 | unsigned long video_num_rows; /* 24 */ | ||
54 | unsigned long video_x; /* 28 */ | ||
55 | unsigned long video_y; /* 32 */ | ||
56 | unsigned long memc_control_reg; /* 36 */ | ||
57 | unsigned char sounddefault; /* 40 */ | ||
58 | unsigned char adfsdrives; /* 41 */ | ||
59 | unsigned char bytes_per_char_h; /* 42 */ | ||
60 | unsigned char bytes_per_char_v; /* 43 */ | ||
61 | unsigned long pages_in_bank[4]; /* 44 */ | ||
62 | unsigned long pages_in_vram; /* 60 */ | ||
63 | unsigned long initrd_start; /* 64 */ | ||
64 | unsigned long initrd_size; /* 68 */ | ||
65 | unsigned long rd_start; /* 72 */ | ||
66 | unsigned long system_rev; /* 76 */ | ||
67 | unsigned long system_serial_low; /* 80 */ | ||
68 | unsigned long system_serial_high; /* 84 */ | ||
69 | unsigned long mem_fclk_21285; /* 88 */ | ||
70 | } s; | ||
71 | char unused[256]; | ||
72 | } u1; | ||
73 | union { | ||
74 | char paths[8][128]; | ||
75 | struct { | ||
76 | unsigned long magic; | ||
77 | char n[1024 - sizeof(unsigned long)]; | ||
78 | } s; | ||
79 | } u2; | ||
80 | char commandline[COMMAND_LINE_SIZE]; | ||
81 | }; | ||
82 | |||
83 | static struct tag * __init memtag(struct tag *tag, unsigned long start, unsigned long size) | ||
84 | { | ||
85 | tag = tag_next(tag); | ||
86 | tag->hdr.tag = ATAG_MEM; | ||
87 | tag->hdr.size = tag_size(tag_mem32); | ||
88 | tag->u.mem.size = size; | ||
89 | tag->u.mem.start = start; | ||
90 | |||
91 | return tag; | ||
92 | } | ||
93 | |||
94 | static void __init build_tag_list(struct param_struct *params, void *taglist) | ||
95 | { | ||
96 | struct tag *tag = taglist; | ||
97 | |||
98 | if (params->u1.s.page_size != PAGE_SIZE) { | ||
99 | printk(KERN_WARNING "Warning: bad configuration page, " | ||
100 | "trying to continue\n"); | ||
101 | return; | ||
102 | } | ||
103 | |||
104 | printk(KERN_DEBUG "Converting old-style param struct to taglist\n"); | ||
105 | |||
106 | #ifdef CONFIG_ARCH_NETWINDER | ||
107 | if (params->u1.s.nr_pages != 0x02000 && | ||
108 | params->u1.s.nr_pages != 0x04000 && | ||
109 | params->u1.s.nr_pages != 0x08000 && | ||
110 | params->u1.s.nr_pages != 0x10000) { | ||
111 | printk(KERN_WARNING "Warning: bad NeTTrom parameters " | ||
112 | "detected, using defaults\n"); | ||
113 | |||
114 | params->u1.s.nr_pages = 0x1000; /* 16MB */ | ||
115 | params->u1.s.ramdisk_size = 0; | ||
116 | params->u1.s.flags = FLAG_READONLY; | ||
117 | params->u1.s.initrd_start = 0; | ||
118 | params->u1.s.initrd_size = 0; | ||
119 | params->u1.s.rd_start = 0; | ||
120 | } | ||
121 | #endif | ||
122 | |||
123 | tag->hdr.tag = ATAG_CORE; | ||
124 | tag->hdr.size = tag_size(tag_core); | ||
125 | tag->u.core.flags = params->u1.s.flags & FLAG_READONLY; | ||
126 | tag->u.core.pagesize = params->u1.s.page_size; | ||
127 | tag->u.core.rootdev = params->u1.s.rootdev; | ||
128 | |||
129 | tag = tag_next(tag); | ||
130 | tag->hdr.tag = ATAG_RAMDISK; | ||
131 | tag->hdr.size = tag_size(tag_ramdisk); | ||
132 | tag->u.ramdisk.flags = (params->u1.s.flags & FLAG_RDLOAD ? 1 : 0) | | ||
133 | (params->u1.s.flags & FLAG_RDPROMPT ? 2 : 0); | ||
134 | tag->u.ramdisk.size = params->u1.s.ramdisk_size; | ||
135 | tag->u.ramdisk.start = params->u1.s.rd_start; | ||
136 | |||
137 | tag = tag_next(tag); | ||
138 | tag->hdr.tag = ATAG_INITRD; | ||
139 | tag->hdr.size = tag_size(tag_initrd); | ||
140 | tag->u.initrd.start = params->u1.s.initrd_start; | ||
141 | tag->u.initrd.size = params->u1.s.initrd_size; | ||
142 | |||
143 | tag = tag_next(tag); | ||
144 | tag->hdr.tag = ATAG_SERIAL; | ||
145 | tag->hdr.size = tag_size(tag_serialnr); | ||
146 | tag->u.serialnr.low = params->u1.s.system_serial_low; | ||
147 | tag->u.serialnr.high = params->u1.s.system_serial_high; | ||
148 | |||
149 | tag = tag_next(tag); | ||
150 | tag->hdr.tag = ATAG_REVISION; | ||
151 | tag->hdr.size = tag_size(tag_revision); | ||
152 | tag->u.revision.rev = params->u1.s.system_rev; | ||
153 | |||
154 | #ifdef CONFIG_ARCH_ACORN | ||
155 | if (machine_is_riscpc()) { | ||
156 | int i; | ||
157 | for (i = 0; i < 4; i++) | ||
158 | tag = memtag(tag, PHYS_OFFSET + (i << 26), | ||
159 | params->u1.s.pages_in_bank[i] * PAGE_SIZE); | ||
160 | } else | ||
161 | #endif | ||
162 | tag = memtag(tag, PHYS_OFFSET, params->u1.s.nr_pages * PAGE_SIZE); | ||
163 | |||
164 | #ifdef CONFIG_FOOTBRIDGE | ||
165 | if (params->u1.s.mem_fclk_21285) { | ||
166 | tag = tag_next(tag); | ||
167 | tag->hdr.tag = ATAG_MEMCLK; | ||
168 | tag->hdr.size = tag_size(tag_memclk); | ||
169 | tag->u.memclk.fmemclk = params->u1.s.mem_fclk_21285; | ||
170 | } | ||
171 | #endif | ||
172 | |||
173 | #ifdef CONFIG_ARCH_EBSA285 | ||
174 | if (machine_is_ebsa285()) { | ||
175 | tag = tag_next(tag); | ||
176 | tag->hdr.tag = ATAG_VIDEOTEXT; | ||
177 | tag->hdr.size = tag_size(tag_videotext); | ||
178 | tag->u.videotext.x = params->u1.s.video_x; | ||
179 | tag->u.videotext.y = params->u1.s.video_y; | ||
180 | tag->u.videotext.video_page = 0; | ||
181 | tag->u.videotext.video_mode = 0; | ||
182 | tag->u.videotext.video_cols = params->u1.s.video_num_cols; | ||
183 | tag->u.videotext.video_ega_bx = 0; | ||
184 | tag->u.videotext.video_lines = params->u1.s.video_num_rows; | ||
185 | tag->u.videotext.video_isvga = 1; | ||
186 | tag->u.videotext.video_points = 8; | ||
187 | } | ||
188 | #endif | ||
189 | |||
190 | #ifdef CONFIG_ARCH_ACORN | ||
191 | tag = tag_next(tag); | ||
192 | tag->hdr.tag = ATAG_ACORN; | ||
193 | tag->hdr.size = tag_size(tag_acorn); | ||
194 | tag->u.acorn.memc_control_reg = params->u1.s.memc_control_reg; | ||
195 | tag->u.acorn.vram_pages = params->u1.s.pages_in_vram; | ||
196 | tag->u.acorn.sounddefault = params->u1.s.sounddefault; | ||
197 | tag->u.acorn.adfsdrives = params->u1.s.adfsdrives; | ||
198 | #endif | ||
199 | |||
200 | tag = tag_next(tag); | ||
201 | tag->hdr.tag = ATAG_CMDLINE; | ||
202 | tag->hdr.size = (strlen(params->commandline) + 3 + | ||
203 | sizeof(struct tag_header)) >> 2; | ||
204 | strcpy(tag->u.cmdline.cmdline, params->commandline); | ||
205 | |||
206 | tag = tag_next(tag); | ||
207 | tag->hdr.tag = ATAG_NONE; | ||
208 | tag->hdr.size = 0; | ||
209 | |||
210 | memmove(params, taglist, ((int)tag) - ((int)taglist) + | ||
211 | sizeof(struct tag_header)); | ||
212 | } | ||
213 | |||
214 | void __init convert_to_tag_list(struct tag *tags) | ||
215 | { | ||
216 | struct param_struct *params = (struct param_struct *)tags; | ||
217 | build_tag_list(params, ¶ms->u2); | ||
218 | } | ||
219 | |||
220 | void __init squash_mem_tags(struct tag *tag) | ||
221 | { | ||
222 | for (; tag->hdr.size; tag = tag_next(tag)) | ||
223 | if (tag->hdr.tag == ATAG_MEM) | ||
224 | tag->hdr.tag = ATAG_NONE; | ||
225 | } | ||
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S new file mode 100644 index 000000000000..caaa919ab47a --- /dev/null +++ b/arch/arm/kernel/debug.S | |||
@@ -0,0 +1,106 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/debug.S | ||
3 | * | ||
4 | * Copyright (C) 1994-1999 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * 32-bit debugging code | ||
11 | */ | ||
12 | #include <linux/config.h> | ||
13 | #include <linux/linkage.h> | ||
14 | #include <asm/hardware.h> | ||
15 | |||
16 | .text | ||
17 | |||
18 | /* | ||
19 | * Some debugging routines (useful if you've got MM problems and | ||
20 | * printk isn't working). For DEBUGGING ONLY!!! Do not leave | ||
21 | * references to these in a production kernel! | ||
22 | */ | ||
23 | |||
24 | #if defined(CONFIG_DEBUG_ICEDCC) | ||
25 | @@ debug using ARM EmbeddedICE DCC channel | ||
26 | .macro addruart, rx | ||
27 | .endm | ||
28 | |||
29 | .macro senduart, rd, rx | ||
30 | mcr p14, 0, \rd, c1, c0, 0 | ||
31 | .endm | ||
32 | |||
33 | .macro busyuart, rd, rx | ||
34 | 1001: | ||
35 | mrc p14, 0, \rx, c0, c0, 0 | ||
36 | tst \rx, #2 | ||
37 | beq 1001b | ||
38 | |||
39 | .endm | ||
40 | |||
41 | .macro waituart, rd, rx | ||
42 | mov \rd, #0x2000000 | ||
43 | 1001: | ||
44 | subs \rd, \rd, #1 | ||
45 | bmi 1002f | ||
46 | mrc p14, 0, \rx, c0, c0, 0 | ||
47 | tst \rx, #2 | ||
48 | bne 1001b | ||
49 | 1002: | ||
50 | .endm | ||
51 | #else | ||
52 | #include <asm/arch/debug-macro.S> | ||
53 | #endif | ||
54 | |||
55 | /* | ||
56 | * Useful debugging routines | ||
57 | */ | ||
58 | ENTRY(printhex8) | ||
59 | mov r1, #8 | ||
60 | b printhex | ||
61 | |||
62 | ENTRY(printhex4) | ||
63 | mov r1, #4 | ||
64 | b printhex | ||
65 | |||
66 | ENTRY(printhex2) | ||
67 | mov r1, #2 | ||
68 | printhex: adr r2, hexbuf | ||
69 | add r3, r2, r1 | ||
70 | mov r1, #0 | ||
71 | strb r1, [r3] | ||
72 | 1: and r1, r0, #15 | ||
73 | mov r0, r0, lsr #4 | ||
74 | cmp r1, #10 | ||
75 | addlt r1, r1, #'0' | ||
76 | addge r1, r1, #'a' - 10 | ||
77 | strb r1, [r3, #-1]! | ||
78 | teq r3, r2 | ||
79 | bne 1b | ||
80 | mov r0, r2 | ||
81 | b printascii | ||
82 | |||
83 | .ltorg | ||
84 | |||
85 | ENTRY(printascii) | ||
86 | addruart r3 | ||
87 | b 2f | ||
88 | 1: waituart r2, r3 | ||
89 | senduart r1, r3 | ||
90 | busyuart r2, r3 | ||
91 | teq r1, #'\n' | ||
92 | moveq r1, #'\r' | ||
93 | beq 1b | ||
94 | 2: teq r0, #0 | ||
95 | ldrneb r1, [r0], #1 | ||
96 | teqne r1, #0 | ||
97 | bne 1b | ||
98 | mov pc, lr | ||
99 | |||
100 | ENTRY(printch) | ||
101 | addruart r3 | ||
102 | mov r1, r0 | ||
103 | mov r0, #0 | ||
104 | b 1b | ||
105 | |||
106 | hexbuf: .space 16 | ||
diff --git a/arch/arm/kernel/dma-isa.c b/arch/arm/kernel/dma-isa.c new file mode 100644 index 000000000000..e9a36304ec3e --- /dev/null +++ b/arch/arm/kernel/dma-isa.c | |||
@@ -0,0 +1,207 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/dma-isa.c | ||
3 | * | ||
4 | * Copyright (C) 1999-2000 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * ISA DMA primitives | ||
11 | * Taken from various sources, including: | ||
12 | * linux/include/asm/dma.h: Defines for using and allocating dma channels. | ||
13 | * Written by Hennus Bergman, 1992. | ||
14 | * High DMA channel support & info by Hannu Savolainen and John Boyd, | ||
15 | * Nov. 1992. | ||
16 | * arch/arm/kernel/dma-ebsa285.c | ||
17 | * Copyright (C) 1998 Phil Blundell | ||
18 | */ | ||
19 | #include <linux/ioport.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/pci.h> | ||
22 | |||
23 | #include <asm/dma.h> | ||
24 | #include <asm/io.h> | ||
25 | |||
26 | #include <asm/mach/dma.h> | ||
27 | |||
28 | #define ISA_DMA_MODE_READ 0x44 | ||
29 | #define ISA_DMA_MODE_WRITE 0x48 | ||
30 | #define ISA_DMA_MODE_CASCADE 0xc0 | ||
31 | #define ISA_DMA_AUTOINIT 0x10 | ||
32 | |||
33 | #define ISA_DMA_MASK 0 | ||
34 | #define ISA_DMA_MODE 1 | ||
35 | #define ISA_DMA_CLRFF 2 | ||
36 | #define ISA_DMA_PGHI 3 | ||
37 | #define ISA_DMA_PGLO 4 | ||
38 | #define ISA_DMA_ADDR 5 | ||
39 | #define ISA_DMA_COUNT 6 | ||
40 | |||
41 | static unsigned int isa_dma_port[8][7] = { | ||
42 | /* MASK MODE CLRFF PAGE_HI PAGE_LO ADDR COUNT */ | ||
43 | { 0x0a, 0x0b, 0x0c, 0x487, 0x087, 0x00, 0x01 }, | ||
44 | { 0x0a, 0x0b, 0x0c, 0x483, 0x083, 0x02, 0x03 }, | ||
45 | { 0x0a, 0x0b, 0x0c, 0x481, 0x081, 0x04, 0x05 }, | ||
46 | { 0x0a, 0x0b, 0x0c, 0x482, 0x082, 0x06, 0x07 }, | ||
47 | { 0xd4, 0xd6, 0xd8, 0x000, 0x000, 0xc0, 0xc2 }, | ||
48 | { 0xd4, 0xd6, 0xd8, 0x48b, 0x08b, 0xc4, 0xc6 }, | ||
49 | { 0xd4, 0xd6, 0xd8, 0x489, 0x089, 0xc8, 0xca }, | ||
50 | { 0xd4, 0xd6, 0xd8, 0x48a, 0x08a, 0xcc, 0xce } | ||
51 | }; | ||
52 | |||
53 | static int isa_get_dma_residue(dmach_t channel, dma_t *dma) | ||
54 | { | ||
55 | unsigned int io_port = isa_dma_port[channel][ISA_DMA_COUNT]; | ||
56 | int count; | ||
57 | |||
58 | count = 1 + inb(io_port); | ||
59 | count |= inb(io_port) << 8; | ||
60 | |||
61 | return channel < 4 ? count : (count << 1); | ||
62 | } | ||
63 | |||
64 | static void isa_enable_dma(dmach_t channel, dma_t *dma) | ||
65 | { | ||
66 | if (dma->invalid) { | ||
67 | unsigned long address, length; | ||
68 | unsigned int mode, direction; | ||
69 | |||
70 | mode = channel & 3; | ||
71 | switch (dma->dma_mode & DMA_MODE_MASK) { | ||
72 | case DMA_MODE_READ: | ||
73 | mode |= ISA_DMA_MODE_READ; | ||
74 | direction = PCI_DMA_FROMDEVICE; | ||
75 | break; | ||
76 | |||
77 | case DMA_MODE_WRITE: | ||
78 | mode |= ISA_DMA_MODE_WRITE; | ||
79 | direction = PCI_DMA_TODEVICE; | ||
80 | break; | ||
81 | |||
82 | case DMA_MODE_CASCADE: | ||
83 | mode |= ISA_DMA_MODE_CASCADE; | ||
84 | direction = PCI_DMA_BIDIRECTIONAL; | ||
85 | break; | ||
86 | |||
87 | default: | ||
88 | direction = PCI_DMA_NONE; | ||
89 | break; | ||
90 | } | ||
91 | |||
92 | if (!dma->using_sg) { | ||
93 | /* | ||
94 | * Cope with ISA-style drivers which expect cache | ||
95 | * coherence. | ||
96 | */ | ||
97 | dma->buf.dma_address = pci_map_single(NULL, | ||
98 | dma->buf.__address, dma->buf.length, | ||
99 | direction); | ||
100 | } | ||
101 | |||
102 | address = dma->buf.dma_address; | ||
103 | length = dma->buf.length - 1; | ||
104 | |||
105 | outb(address >> 16, isa_dma_port[channel][ISA_DMA_PGLO]); | ||
106 | outb(address >> 24, isa_dma_port[channel][ISA_DMA_PGHI]); | ||
107 | |||
108 | if (channel >= 4) { | ||
109 | address >>= 1; | ||
110 | length >>= 1; | ||
111 | } | ||
112 | |||
113 | outb(0, isa_dma_port[channel][ISA_DMA_CLRFF]); | ||
114 | |||
115 | outb(address, isa_dma_port[channel][ISA_DMA_ADDR]); | ||
116 | outb(address >> 8, isa_dma_port[channel][ISA_DMA_ADDR]); | ||
117 | |||
118 | outb(length, isa_dma_port[channel][ISA_DMA_COUNT]); | ||
119 | outb(length >> 8, isa_dma_port[channel][ISA_DMA_COUNT]); | ||
120 | |||
121 | if (dma->dma_mode & DMA_AUTOINIT) | ||
122 | mode |= ISA_DMA_AUTOINIT; | ||
123 | |||
124 | outb(mode, isa_dma_port[channel][ISA_DMA_MODE]); | ||
125 | dma->invalid = 0; | ||
126 | } | ||
127 | outb(channel & 3, isa_dma_port[channel][ISA_DMA_MASK]); | ||
128 | } | ||
129 | |||
130 | static void isa_disable_dma(dmach_t channel, dma_t *dma) | ||
131 | { | ||
132 | outb(channel | 4, isa_dma_port[channel][ISA_DMA_MASK]); | ||
133 | } | ||
134 | |||
135 | static struct dma_ops isa_dma_ops = { | ||
136 | .type = "ISA", | ||
137 | .enable = isa_enable_dma, | ||
138 | .disable = isa_disable_dma, | ||
139 | .residue = isa_get_dma_residue, | ||
140 | }; | ||
141 | |||
142 | static struct resource dma_resources[] = { | ||
143 | { "dma1", 0x0000, 0x000f }, | ||
144 | { "dma low page", 0x0080, 0x008f }, | ||
145 | { "dma2", 0x00c0, 0x00df }, | ||
146 | { "dma high page", 0x0480, 0x048f } | ||
147 | }; | ||
148 | |||
149 | void __init isa_init_dma(dma_t *dma) | ||
150 | { | ||
151 | /* | ||
152 | * Try to autodetect presence of an ISA DMA controller. | ||
153 | * We do some minimal initialisation, and check that | ||
154 | * channel 0's DMA address registers are writeable. | ||
155 | */ | ||
156 | outb(0xff, 0x0d); | ||
157 | outb(0xff, 0xda); | ||
158 | |||
159 | /* | ||
160 | * Write high and low address, and then read them back | ||
161 | * in the same order. | ||
162 | */ | ||
163 | outb(0x55, 0x00); | ||
164 | outb(0xaa, 0x00); | ||
165 | |||
166 | if (inb(0) == 0x55 && inb(0) == 0xaa) { | ||
167 | int channel, i; | ||
168 | |||
169 | for (channel = 0; channel < 8; channel++) { | ||
170 | dma[channel].d_ops = &isa_dma_ops; | ||
171 | isa_disable_dma(channel, NULL); | ||
172 | } | ||
173 | |||
174 | outb(0x40, 0x0b); | ||
175 | outb(0x41, 0x0b); | ||
176 | outb(0x42, 0x0b); | ||
177 | outb(0x43, 0x0b); | ||
178 | |||
179 | outb(0xc0, 0xd6); | ||
180 | outb(0x41, 0xd6); | ||
181 | outb(0x42, 0xd6); | ||
182 | outb(0x43, 0xd6); | ||
183 | |||
184 | outb(0, 0xd4); | ||
185 | |||
186 | outb(0x10, 0x08); | ||
187 | outb(0x10, 0xd0); | ||
188 | |||
189 | /* | ||
190 | * Is this correct? According to my documentation, it | ||
191 | * doesn't appear to be. It should be: | ||
192 | * outb(0x3f, 0x40b); outb(0x3f, 0x4d6); | ||
193 | */ | ||
194 | outb(0x30, 0x40b); | ||
195 | outb(0x31, 0x40b); | ||
196 | outb(0x32, 0x40b); | ||
197 | outb(0x33, 0x40b); | ||
198 | outb(0x31, 0x4d6); | ||
199 | outb(0x32, 0x4d6); | ||
200 | outb(0x33, 0x4d6); | ||
201 | |||
202 | request_dma(DMA_ISA_CASCADE, "cascade"); | ||
203 | |||
204 | for (i = 0; i < sizeof(dma_resources) / sizeof(dma_resources[0]); i++) | ||
205 | request_resource(&ioport_resource, dma_resources + i); | ||
206 | } | ||
207 | } | ||
diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c new file mode 100644 index 000000000000..2b7883884234 --- /dev/null +++ b/arch/arm/kernel/dma.c | |||
@@ -0,0 +1,302 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/dma.c | ||
3 | * | ||
4 | * Copyright (C) 1995-2000 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * Front-end to the DMA handling. This handles the allocation/freeing | ||
11 | * of DMA channels, and provides a unified interface to the machines | ||
12 | * DMA facilities. | ||
13 | */ | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/mman.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | #include <linux/errno.h> | ||
20 | |||
21 | #include <asm/dma.h> | ||
22 | |||
23 | #include <asm/mach/dma.h> | ||
24 | |||
25 | DEFINE_SPINLOCK(dma_spin_lock); | ||
26 | |||
27 | #if MAX_DMA_CHANNELS > 0 | ||
28 | |||
29 | static dma_t dma_chan[MAX_DMA_CHANNELS]; | ||
30 | |||
31 | /* | ||
32 | * Get dma list for /proc/dma | ||
33 | */ | ||
34 | int get_dma_list(char *buf) | ||
35 | { | ||
36 | dma_t *dma; | ||
37 | char *p = buf; | ||
38 | int i; | ||
39 | |||
40 | for (i = 0, dma = dma_chan; i < MAX_DMA_CHANNELS; i++, dma++) | ||
41 | if (dma->lock) | ||
42 | p += sprintf(p, "%2d: %14s %s\n", i, | ||
43 | dma->d_ops->type, dma->device_id); | ||
44 | |||
45 | return p - buf; | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * Request DMA channel | ||
50 | * | ||
51 | * On certain platforms, we have to allocate an interrupt as well... | ||
52 | */ | ||
53 | int request_dma(dmach_t channel, const char *device_id) | ||
54 | { | ||
55 | dma_t *dma = dma_chan + channel; | ||
56 | int ret; | ||
57 | |||
58 | if (channel >= MAX_DMA_CHANNELS || !dma->d_ops) | ||
59 | goto bad_dma; | ||
60 | |||
61 | if (xchg(&dma->lock, 1) != 0) | ||
62 | goto busy; | ||
63 | |||
64 | dma->device_id = device_id; | ||
65 | dma->active = 0; | ||
66 | dma->invalid = 1; | ||
67 | |||
68 | ret = 0; | ||
69 | if (dma->d_ops->request) | ||
70 | ret = dma->d_ops->request(channel, dma); | ||
71 | |||
72 | if (ret) | ||
73 | xchg(&dma->lock, 0); | ||
74 | |||
75 | return ret; | ||
76 | |||
77 | bad_dma: | ||
78 | printk(KERN_ERR "dma: trying to allocate DMA%d\n", channel); | ||
79 | return -EINVAL; | ||
80 | |||
81 | busy: | ||
82 | return -EBUSY; | ||
83 | } | ||
84 | |||
85 | /* | ||
86 | * Free DMA channel | ||
87 | * | ||
88 | * On certain platforms, we have to free interrupt as well... | ||
89 | */ | ||
90 | void free_dma(dmach_t channel) | ||
91 | { | ||
92 | dma_t *dma = dma_chan + channel; | ||
93 | |||
94 | if (channel >= MAX_DMA_CHANNELS || !dma->d_ops) | ||
95 | goto bad_dma; | ||
96 | |||
97 | if (dma->active) { | ||
98 | printk(KERN_ERR "dma%d: freeing active DMA\n", channel); | ||
99 | dma->d_ops->disable(channel, dma); | ||
100 | dma->active = 0; | ||
101 | } | ||
102 | |||
103 | if (xchg(&dma->lock, 0) != 0) { | ||
104 | if (dma->d_ops->free) | ||
105 | dma->d_ops->free(channel, dma); | ||
106 | return; | ||
107 | } | ||
108 | |||
109 | printk(KERN_ERR "dma%d: trying to free free DMA\n", channel); | ||
110 | return; | ||
111 | |||
112 | bad_dma: | ||
113 | printk(KERN_ERR "dma: trying to free DMA%d\n", channel); | ||
114 | } | ||
115 | |||
116 | /* Set DMA Scatter-Gather list | ||
117 | */ | ||
118 | void set_dma_sg (dmach_t channel, struct scatterlist *sg, int nr_sg) | ||
119 | { | ||
120 | dma_t *dma = dma_chan + channel; | ||
121 | |||
122 | if (dma->active) | ||
123 | printk(KERN_ERR "dma%d: altering DMA SG while " | ||
124 | "DMA active\n", channel); | ||
125 | |||
126 | dma->sg = sg; | ||
127 | dma->sgcount = nr_sg; | ||
128 | dma->using_sg = 1; | ||
129 | dma->invalid = 1; | ||
130 | } | ||
131 | |||
132 | /* Set DMA address | ||
133 | * | ||
134 | * Copy address to the structure, and set the invalid bit | ||
135 | */ | ||
136 | void set_dma_addr (dmach_t channel, unsigned long physaddr) | ||
137 | { | ||
138 | dma_t *dma = dma_chan + channel; | ||
139 | |||
140 | if (dma->active) | ||
141 | printk(KERN_ERR "dma%d: altering DMA address while " | ||
142 | "DMA active\n", channel); | ||
143 | |||
144 | dma->sg = &dma->buf; | ||
145 | dma->sgcount = 1; | ||
146 | dma->buf.__address = bus_to_virt(physaddr); | ||
147 | dma->using_sg = 0; | ||
148 | dma->invalid = 1; | ||
149 | } | ||
150 | |||
151 | /* Set DMA byte count | ||
152 | * | ||
153 | * Copy address to the structure, and set the invalid bit | ||
154 | */ | ||
155 | void set_dma_count (dmach_t channel, unsigned long count) | ||
156 | { | ||
157 | dma_t *dma = dma_chan + channel; | ||
158 | |||
159 | if (dma->active) | ||
160 | printk(KERN_ERR "dma%d: altering DMA count while " | ||
161 | "DMA active\n", channel); | ||
162 | |||
163 | dma->sg = &dma->buf; | ||
164 | dma->sgcount = 1; | ||
165 | dma->buf.length = count; | ||
166 | dma->using_sg = 0; | ||
167 | dma->invalid = 1; | ||
168 | } | ||
169 | |||
170 | /* Set DMA direction mode | ||
171 | */ | ||
172 | void set_dma_mode (dmach_t channel, dmamode_t mode) | ||
173 | { | ||
174 | dma_t *dma = dma_chan + channel; | ||
175 | |||
176 | if (dma->active) | ||
177 | printk(KERN_ERR "dma%d: altering DMA mode while " | ||
178 | "DMA active\n", channel); | ||
179 | |||
180 | dma->dma_mode = mode; | ||
181 | dma->invalid = 1; | ||
182 | } | ||
183 | |||
184 | /* Enable DMA channel | ||
185 | */ | ||
186 | void enable_dma (dmach_t channel) | ||
187 | { | ||
188 | dma_t *dma = dma_chan + channel; | ||
189 | |||
190 | if (!dma->lock) | ||
191 | goto free_dma; | ||
192 | |||
193 | if (dma->active == 0) { | ||
194 | dma->active = 1; | ||
195 | dma->d_ops->enable(channel, dma); | ||
196 | } | ||
197 | return; | ||
198 | |||
199 | free_dma: | ||
200 | printk(KERN_ERR "dma%d: trying to enable free DMA\n", channel); | ||
201 | BUG(); | ||
202 | } | ||
203 | |||
204 | /* Disable DMA channel | ||
205 | */ | ||
206 | void disable_dma (dmach_t channel) | ||
207 | { | ||
208 | dma_t *dma = dma_chan + channel; | ||
209 | |||
210 | if (!dma->lock) | ||
211 | goto free_dma; | ||
212 | |||
213 | if (dma->active == 1) { | ||
214 | dma->active = 0; | ||
215 | dma->d_ops->disable(channel, dma); | ||
216 | } | ||
217 | return; | ||
218 | |||
219 | free_dma: | ||
220 | printk(KERN_ERR "dma%d: trying to disable free DMA\n", channel); | ||
221 | BUG(); | ||
222 | } | ||
223 | |||
224 | /* | ||
225 | * Is the specified DMA channel active? | ||
226 | */ | ||
227 | int dma_channel_active(dmach_t channel) | ||
228 | { | ||
229 | return dma_chan[channel].active; | ||
230 | } | ||
231 | |||
232 | void set_dma_page(dmach_t channel, char pagenr) | ||
233 | { | ||
234 | printk(KERN_ERR "dma%d: trying to set_dma_page\n", channel); | ||
235 | } | ||
236 | |||
237 | void set_dma_speed(dmach_t channel, int cycle_ns) | ||
238 | { | ||
239 | dma_t *dma = dma_chan + channel; | ||
240 | int ret = 0; | ||
241 | |||
242 | if (dma->d_ops->setspeed) | ||
243 | ret = dma->d_ops->setspeed(channel, dma, cycle_ns); | ||
244 | dma->speed = ret; | ||
245 | } | ||
246 | |||
247 | int get_dma_residue(dmach_t channel) | ||
248 | { | ||
249 | dma_t *dma = dma_chan + channel; | ||
250 | int ret = 0; | ||
251 | |||
252 | if (dma->d_ops->residue) | ||
253 | ret = dma->d_ops->residue(channel, dma); | ||
254 | |||
255 | return ret; | ||
256 | } | ||
257 | |||
258 | void __init init_dma(void) | ||
259 | { | ||
260 | arch_dma_init(dma_chan); | ||
261 | } | ||
262 | |||
263 | #else | ||
264 | |||
265 | int request_dma(dmach_t channel, const char *device_id) | ||
266 | { | ||
267 | return -EINVAL; | ||
268 | } | ||
269 | |||
270 | int get_dma_residue(dmach_t channel) | ||
271 | { | ||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | #define GLOBAL_ALIAS(_a,_b) asm (".set " #_a "," #_b "; .globl " #_a) | ||
276 | GLOBAL_ALIAS(disable_dma, get_dma_residue); | ||
277 | GLOBAL_ALIAS(enable_dma, get_dma_residue); | ||
278 | GLOBAL_ALIAS(free_dma, get_dma_residue); | ||
279 | GLOBAL_ALIAS(get_dma_list, get_dma_residue); | ||
280 | GLOBAL_ALIAS(set_dma_mode, get_dma_residue); | ||
281 | GLOBAL_ALIAS(set_dma_page, get_dma_residue); | ||
282 | GLOBAL_ALIAS(set_dma_count, get_dma_residue); | ||
283 | GLOBAL_ALIAS(set_dma_addr, get_dma_residue); | ||
284 | GLOBAL_ALIAS(set_dma_sg, get_dma_residue); | ||
285 | GLOBAL_ALIAS(set_dma_speed, get_dma_residue); | ||
286 | GLOBAL_ALIAS(init_dma, get_dma_residue); | ||
287 | |||
288 | #endif | ||
289 | |||
290 | EXPORT_SYMBOL(request_dma); | ||
291 | EXPORT_SYMBOL(free_dma); | ||
292 | EXPORT_SYMBOL(enable_dma); | ||
293 | EXPORT_SYMBOL(disable_dma); | ||
294 | EXPORT_SYMBOL(set_dma_addr); | ||
295 | EXPORT_SYMBOL(set_dma_count); | ||
296 | EXPORT_SYMBOL(set_dma_mode); | ||
297 | EXPORT_SYMBOL(set_dma_page); | ||
298 | EXPORT_SYMBOL(get_dma_residue); | ||
299 | EXPORT_SYMBOL(set_dma_sg); | ||
300 | EXPORT_SYMBOL(set_dma_speed); | ||
301 | |||
302 | EXPORT_SYMBOL(dma_spin_lock); | ||
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c new file mode 100644 index 000000000000..3dc15b131f53 --- /dev/null +++ b/arch/arm/kernel/ecard.c | |||
@@ -0,0 +1,1210 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/ecard.c | ||
3 | * | ||
4 | * Copyright 1995-2001 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * Find all installed expansion cards, and handle interrupts from them. | ||
11 | * | ||
12 | * Created from information from Acorns RiscOS3 PRMs | ||
13 | * | ||
14 | * 08-Dec-1996 RMK Added code for the 9'th expansion card - the ether | ||
15 | * podule slot. | ||
16 | * 06-May-1997 RMK Added blacklist for cards whose loader doesn't work. | ||
17 | * 12-Sep-1997 RMK Created new handling of interrupt enables/disables | ||
18 | * - cards can now register their own routine to control | ||
19 | * interrupts (recommended). | ||
20 | * 29-Sep-1997 RMK Expansion card interrupt hardware not being re-enabled | ||
21 | * on reset from Linux. (Caused cards not to respond | ||
22 | * under RiscOS without hard reset). | ||
23 | * 15-Feb-1998 RMK Added DMA support | ||
24 | * 12-Sep-1998 RMK Added EASI support | ||
25 | * 10-Jan-1999 RMK Run loaders in a simulated RISC OS environment. | ||
26 | * 17-Apr-1999 RMK Support for EASI Type C cycles. | ||
27 | */ | ||
28 | #define ECARD_C | ||
29 | |||
30 | #include <linux/config.h> | ||
31 | #include <linux/module.h> | ||
32 | #include <linux/kernel.h> | ||
33 | #include <linux/types.h> | ||
34 | #include <linux/sched.h> | ||
35 | #include <linux/interrupt.h> | ||
36 | #include <linux/completion.h> | ||
37 | #include <linux/reboot.h> | ||
38 | #include <linux/mm.h> | ||
39 | #include <linux/slab.h> | ||
40 | #include <linux/proc_fs.h> | ||
41 | #include <linux/device.h> | ||
42 | #include <linux/init.h> | ||
43 | |||
44 | #include <asm/dma.h> | ||
45 | #include <asm/ecard.h> | ||
46 | #include <asm/hardware.h> | ||
47 | #include <asm/io.h> | ||
48 | #include <asm/irq.h> | ||
49 | #include <asm/mmu_context.h> | ||
50 | #include <asm/mach/irq.h> | ||
51 | #include <asm/tlbflush.h> | ||
52 | |||
53 | #ifndef CONFIG_ARCH_RPC | ||
54 | #define HAVE_EXPMASK | ||
55 | #endif | ||
56 | |||
57 | struct ecard_request { | ||
58 | void (*fn)(struct ecard_request *); | ||
59 | ecard_t *ec; | ||
60 | unsigned int address; | ||
61 | unsigned int length; | ||
62 | unsigned int use_loader; | ||
63 | void *buffer; | ||
64 | struct completion *complete; | ||
65 | }; | ||
66 | |||
67 | struct expcard_blacklist { | ||
68 | unsigned short manufacturer; | ||
69 | unsigned short product; | ||
70 | const char *type; | ||
71 | }; | ||
72 | |||
73 | static ecard_t *cards; | ||
74 | static ecard_t *slot_to_expcard[MAX_ECARDS]; | ||
75 | static unsigned int ectcr; | ||
76 | #ifdef HAS_EXPMASK | ||
77 | static unsigned int have_expmask; | ||
78 | #endif | ||
79 | |||
80 | /* List of descriptions of cards which don't have an extended | ||
81 | * identification, or chunk directories containing a description. | ||
82 | */ | ||
83 | static struct expcard_blacklist __initdata blacklist[] = { | ||
84 | { MANU_ACORN, PROD_ACORN_ETHER1, "Acorn Ether1" } | ||
85 | }; | ||
86 | |||
87 | asmlinkage extern int | ||
88 | ecard_loader_reset(unsigned long base, loader_t loader); | ||
89 | asmlinkage extern int | ||
90 | ecard_loader_read(int off, unsigned long base, loader_t loader); | ||
91 | |||
92 | static inline unsigned short ecard_getu16(unsigned char *v) | ||
93 | { | ||
94 | return v[0] | v[1] << 8; | ||
95 | } | ||
96 | |||
97 | static inline signed long ecard_gets24(unsigned char *v) | ||
98 | { | ||
99 | return v[0] | v[1] << 8 | v[2] << 16 | ((v[2] & 0x80) ? 0xff000000 : 0); | ||
100 | } | ||
101 | |||
102 | static inline ecard_t *slot_to_ecard(unsigned int slot) | ||
103 | { | ||
104 | return slot < MAX_ECARDS ? slot_to_expcard[slot] : NULL; | ||
105 | } | ||
106 | |||
107 | /* ===================== Expansion card daemon ======================== */ | ||
108 | /* | ||
109 | * Since the loader programs on the expansion cards need to be run | ||
110 | * in a specific environment, create a separate task with this | ||
111 | * environment up, and pass requests to this task as and when we | ||
112 | * need to. | ||
113 | * | ||
114 | * This should allow 99% of loaders to be called from Linux. | ||
115 | * | ||
116 | * From a security standpoint, we trust the card vendors. This | ||
117 | * may be a misplaced trust. | ||
118 | */ | ||
119 | static void ecard_task_reset(struct ecard_request *req) | ||
120 | { | ||
121 | struct expansion_card *ec = req->ec; | ||
122 | struct resource *res; | ||
123 | |||
124 | res = ec->slot_no == 8 | ||
125 | ? &ec->resource[ECARD_RES_MEMC] | ||
126 | : ec->type == ECARD_EASI | ||
127 | ? &ec->resource[ECARD_RES_EASI] | ||
128 | : &ec->resource[ECARD_RES_IOCSYNC]; | ||
129 | |||
130 | ecard_loader_reset(res->start, ec->loader); | ||
131 | } | ||
132 | |||
133 | static void ecard_task_readbytes(struct ecard_request *req) | ||
134 | { | ||
135 | struct expansion_card *ec = req->ec; | ||
136 | unsigned char *buf = req->buffer; | ||
137 | unsigned int len = req->length; | ||
138 | unsigned int off = req->address; | ||
139 | |||
140 | if (ec->slot_no == 8) { | ||
141 | void __iomem *base = (void __iomem *) | ||
142 | ec->resource[ECARD_RES_MEMC].start; | ||
143 | |||
144 | /* | ||
145 | * The card maintains an index which increments the address | ||
146 | * into a 4096-byte page on each access. We need to keep | ||
147 | * track of the counter. | ||
148 | */ | ||
149 | static unsigned int index; | ||
150 | unsigned int page; | ||
151 | |||
152 | page = (off >> 12) * 4; | ||
153 | if (page > 256 * 4) | ||
154 | return; | ||
155 | |||
156 | off &= 4095; | ||
157 | |||
158 | /* | ||
159 | * If we are reading offset 0, or our current index is | ||
160 | * greater than the offset, reset the hardware index counter. | ||
161 | */ | ||
162 | if (off == 0 || index > off) { | ||
163 | writeb(0, base); | ||
164 | index = 0; | ||
165 | } | ||
166 | |||
167 | /* | ||
168 | * Increment the hardware index counter until we get to the | ||
169 | * required offset. The read bytes are discarded. | ||
170 | */ | ||
171 | while (index < off) { | ||
172 | readb(base + page); | ||
173 | index += 1; | ||
174 | } | ||
175 | |||
176 | while (len--) { | ||
177 | *buf++ = readb(base + page); | ||
178 | index += 1; | ||
179 | } | ||
180 | } else { | ||
181 | unsigned long base = (ec->type == ECARD_EASI | ||
182 | ? &ec->resource[ECARD_RES_EASI] | ||
183 | : &ec->resource[ECARD_RES_IOCSYNC])->start; | ||
184 | void __iomem *pbase = (void __iomem *)base; | ||
185 | |||
186 | if (!req->use_loader || !ec->loader) { | ||
187 | off *= 4; | ||
188 | while (len--) { | ||
189 | *buf++ = readb(pbase + off); | ||
190 | off += 4; | ||
191 | } | ||
192 | } else { | ||
193 | while(len--) { | ||
194 | /* | ||
195 | * The following is required by some | ||
196 | * expansion card loader programs. | ||
197 | */ | ||
198 | *(unsigned long *)0x108 = 0; | ||
199 | *buf++ = ecard_loader_read(off++, base, | ||
200 | ec->loader); | ||
201 | } | ||
202 | } | ||
203 | } | ||
204 | |||
205 | } | ||
206 | |||
207 | static DECLARE_WAIT_QUEUE_HEAD(ecard_wait); | ||
208 | static struct ecard_request *ecard_req; | ||
209 | static DECLARE_MUTEX(ecard_sem); | ||
210 | |||
211 | /* | ||
212 | * Set up the expansion card daemon's page tables. | ||
213 | */ | ||
214 | static void ecard_init_pgtables(struct mm_struct *mm) | ||
215 | { | ||
216 | struct vm_area_struct vma; | ||
217 | |||
218 | /* We want to set up the page tables for the following mapping: | ||
219 | * Virtual Physical | ||
220 | * 0x03000000 0x03000000 | ||
221 | * 0x03010000 unmapped | ||
222 | * 0x03210000 0x03210000 | ||
223 | * 0x03400000 unmapped | ||
224 | * 0x08000000 0x08000000 | ||
225 | * 0x10000000 unmapped | ||
226 | * | ||
227 | * FIXME: we don't follow this 100% yet. | ||
228 | */ | ||
229 | pgd_t *src_pgd, *dst_pgd; | ||
230 | |||
231 | src_pgd = pgd_offset(mm, (unsigned long)IO_BASE); | ||
232 | dst_pgd = pgd_offset(mm, IO_START); | ||
233 | |||
234 | memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (IO_SIZE / PGDIR_SIZE)); | ||
235 | |||
236 | src_pgd = pgd_offset(mm, EASI_BASE); | ||
237 | dst_pgd = pgd_offset(mm, EASI_START); | ||
238 | |||
239 | memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE)); | ||
240 | |||
241 | vma.vm_mm = mm; | ||
242 | |||
243 | flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE); | ||
244 | flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE); | ||
245 | } | ||
246 | |||
247 | static int ecard_init_mm(void) | ||
248 | { | ||
249 | struct mm_struct * mm = mm_alloc(); | ||
250 | struct mm_struct *active_mm = current->active_mm; | ||
251 | |||
252 | if (!mm) | ||
253 | return -ENOMEM; | ||
254 | |||
255 | current->mm = mm; | ||
256 | current->active_mm = mm; | ||
257 | activate_mm(active_mm, mm); | ||
258 | mmdrop(active_mm); | ||
259 | ecard_init_pgtables(mm); | ||
260 | return 0; | ||
261 | } | ||
262 | |||
263 | static int | ||
264 | ecard_task(void * unused) | ||
265 | { | ||
266 | daemonize("kecardd"); | ||
267 | |||
268 | /* | ||
269 | * Allocate a mm. We're not a lazy-TLB kernel task since we need | ||
270 | * to set page table entries where the user space would be. Note | ||
271 | * that this also creates the page tables. Failure is not an | ||
272 | * option here. | ||
273 | */ | ||
274 | if (ecard_init_mm()) | ||
275 | panic("kecardd: unable to alloc mm\n"); | ||
276 | |||
277 | while (1) { | ||
278 | struct ecard_request *req; | ||
279 | |||
280 | wait_event_interruptible(ecard_wait, ecard_req != NULL); | ||
281 | |||
282 | req = xchg(&ecard_req, NULL); | ||
283 | if (req != NULL) { | ||
284 | req->fn(req); | ||
285 | complete(req->complete); | ||
286 | } | ||
287 | } | ||
288 | } | ||
289 | |||
290 | /* | ||
291 | * Wake the expansion card daemon to action our request. | ||
292 | * | ||
293 | * FIXME: The test here is not sufficient to detect if the | ||
294 | * kcardd is running. | ||
295 | */ | ||
296 | static void ecard_call(struct ecard_request *req) | ||
297 | { | ||
298 | DECLARE_COMPLETION(completion); | ||
299 | |||
300 | req->complete = &completion; | ||
301 | |||
302 | down(&ecard_sem); | ||
303 | ecard_req = req; | ||
304 | wake_up(&ecard_wait); | ||
305 | |||
306 | /* | ||
307 | * Now wait for kecardd to run. | ||
308 | */ | ||
309 | wait_for_completion(&completion); | ||
310 | up(&ecard_sem); | ||
311 | } | ||
312 | |||
313 | /* ======================= Mid-level card control ===================== */ | ||
314 | |||
315 | static void | ||
316 | ecard_readbytes(void *addr, ecard_t *ec, int off, int len, int useld) | ||
317 | { | ||
318 | struct ecard_request req; | ||
319 | |||
320 | req.fn = ecard_task_readbytes; | ||
321 | req.ec = ec; | ||
322 | req.address = off; | ||
323 | req.length = len; | ||
324 | req.use_loader = useld; | ||
325 | req.buffer = addr; | ||
326 | |||
327 | ecard_call(&req); | ||
328 | } | ||
329 | |||
330 | int ecard_readchunk(struct in_chunk_dir *cd, ecard_t *ec, int id, int num) | ||
331 | { | ||
332 | struct ex_chunk_dir excd; | ||
333 | int index = 16; | ||
334 | int useld = 0; | ||
335 | |||
336 | if (!ec->cid.cd) | ||
337 | return 0; | ||
338 | |||
339 | while(1) { | ||
340 | ecard_readbytes(&excd, ec, index, 8, useld); | ||
341 | index += 8; | ||
342 | if (c_id(&excd) == 0) { | ||
343 | if (!useld && ec->loader) { | ||
344 | useld = 1; | ||
345 | index = 0; | ||
346 | continue; | ||
347 | } | ||
348 | return 0; | ||
349 | } | ||
350 | if (c_id(&excd) == 0xf0) { /* link */ | ||
351 | index = c_start(&excd); | ||
352 | continue; | ||
353 | } | ||
354 | if (c_id(&excd) == 0x80) { /* loader */ | ||
355 | if (!ec->loader) { | ||
356 | ec->loader = (loader_t)kmalloc(c_len(&excd), | ||
357 | GFP_KERNEL); | ||
358 | if (ec->loader) | ||
359 | ecard_readbytes(ec->loader, ec, | ||
360 | (int)c_start(&excd), | ||
361 | c_len(&excd), useld); | ||
362 | else | ||
363 | return 0; | ||
364 | } | ||
365 | continue; | ||
366 | } | ||
367 | if (c_id(&excd) == id && num-- == 0) | ||
368 | break; | ||
369 | } | ||
370 | |||
371 | if (c_id(&excd) & 0x80) { | ||
372 | switch (c_id(&excd) & 0x70) { | ||
373 | case 0x70: | ||
374 | ecard_readbytes((unsigned char *)excd.d.string, ec, | ||
375 | (int)c_start(&excd), c_len(&excd), | ||
376 | useld); | ||
377 | break; | ||
378 | case 0x00: | ||
379 | break; | ||
380 | } | ||
381 | } | ||
382 | cd->start_offset = c_start(&excd); | ||
383 | memcpy(cd->d.string, excd.d.string, 256); | ||
384 | return 1; | ||
385 | } | ||
386 | |||
387 | /* ======================= Interrupt control ============================ */ | ||
388 | |||
389 | static void ecard_def_irq_enable(ecard_t *ec, int irqnr) | ||
390 | { | ||
391 | #ifdef HAS_EXPMASK | ||
392 | if (irqnr < 4 && have_expmask) { | ||
393 | have_expmask |= 1 << irqnr; | ||
394 | __raw_writeb(have_expmask, EXPMASK_ENABLE); | ||
395 | } | ||
396 | #endif | ||
397 | } | ||
398 | |||
399 | static void ecard_def_irq_disable(ecard_t *ec, int irqnr) | ||
400 | { | ||
401 | #ifdef HAS_EXPMASK | ||
402 | if (irqnr < 4 && have_expmask) { | ||
403 | have_expmask &= ~(1 << irqnr); | ||
404 | __raw_writeb(have_expmask, EXPMASK_ENABLE); | ||
405 | } | ||
406 | #endif | ||
407 | } | ||
408 | |||
409 | static int ecard_def_irq_pending(ecard_t *ec) | ||
410 | { | ||
411 | return !ec->irqmask || readb(ec->irqaddr) & ec->irqmask; | ||
412 | } | ||
413 | |||
414 | static void ecard_def_fiq_enable(ecard_t *ec, int fiqnr) | ||
415 | { | ||
416 | panic("ecard_def_fiq_enable called - impossible"); | ||
417 | } | ||
418 | |||
419 | static void ecard_def_fiq_disable(ecard_t *ec, int fiqnr) | ||
420 | { | ||
421 | panic("ecard_def_fiq_disable called - impossible"); | ||
422 | } | ||
423 | |||
424 | static int ecard_def_fiq_pending(ecard_t *ec) | ||
425 | { | ||
426 | return !ec->fiqmask || readb(ec->fiqaddr) & ec->fiqmask; | ||
427 | } | ||
428 | |||
429 | static expansioncard_ops_t ecard_default_ops = { | ||
430 | ecard_def_irq_enable, | ||
431 | ecard_def_irq_disable, | ||
432 | ecard_def_irq_pending, | ||
433 | ecard_def_fiq_enable, | ||
434 | ecard_def_fiq_disable, | ||
435 | ecard_def_fiq_pending | ||
436 | }; | ||
437 | |||
438 | /* | ||
439 | * Enable and disable interrupts from expansion cards. | ||
440 | * (interrupts are disabled for these functions). | ||
441 | * | ||
442 | * They are not meant to be called directly, but via enable/disable_irq. | ||
443 | */ | ||
444 | static void ecard_irq_unmask(unsigned int irqnr) | ||
445 | { | ||
446 | ecard_t *ec = slot_to_ecard(irqnr - 32); | ||
447 | |||
448 | if (ec) { | ||
449 | if (!ec->ops) | ||
450 | ec->ops = &ecard_default_ops; | ||
451 | |||
452 | if (ec->claimed && ec->ops->irqenable) | ||
453 | ec->ops->irqenable(ec, irqnr); | ||
454 | else | ||
455 | printk(KERN_ERR "ecard: rejecting request to " | ||
456 | "enable IRQs for %d\n", irqnr); | ||
457 | } | ||
458 | } | ||
459 | |||
460 | static void ecard_irq_mask(unsigned int irqnr) | ||
461 | { | ||
462 | ecard_t *ec = slot_to_ecard(irqnr - 32); | ||
463 | |||
464 | if (ec) { | ||
465 | if (!ec->ops) | ||
466 | ec->ops = &ecard_default_ops; | ||
467 | |||
468 | if (ec->ops && ec->ops->irqdisable) | ||
469 | ec->ops->irqdisable(ec, irqnr); | ||
470 | } | ||
471 | } | ||
472 | |||
473 | static struct irqchip ecard_chip = { | ||
474 | .ack = ecard_irq_mask, | ||
475 | .mask = ecard_irq_mask, | ||
476 | .unmask = ecard_irq_unmask, | ||
477 | }; | ||
478 | |||
479 | void ecard_enablefiq(unsigned int fiqnr) | ||
480 | { | ||
481 | ecard_t *ec = slot_to_ecard(fiqnr); | ||
482 | |||
483 | if (ec) { | ||
484 | if (!ec->ops) | ||
485 | ec->ops = &ecard_default_ops; | ||
486 | |||
487 | if (ec->claimed && ec->ops->fiqenable) | ||
488 | ec->ops->fiqenable(ec, fiqnr); | ||
489 | else | ||
490 | printk(KERN_ERR "ecard: rejecting request to " | ||
491 | "enable FIQs for %d\n", fiqnr); | ||
492 | } | ||
493 | } | ||
494 | |||
495 | void ecard_disablefiq(unsigned int fiqnr) | ||
496 | { | ||
497 | ecard_t *ec = slot_to_ecard(fiqnr); | ||
498 | |||
499 | if (ec) { | ||
500 | if (!ec->ops) | ||
501 | ec->ops = &ecard_default_ops; | ||
502 | |||
503 | if (ec->ops->fiqdisable) | ||
504 | ec->ops->fiqdisable(ec, fiqnr); | ||
505 | } | ||
506 | } | ||
507 | |||
508 | static void ecard_dump_irq_state(void) | ||
509 | { | ||
510 | ecard_t *ec; | ||
511 | |||
512 | printk("Expansion card IRQ state:\n"); | ||
513 | |||
514 | for (ec = cards; ec; ec = ec->next) { | ||
515 | if (ec->slot_no == 8) | ||
516 | continue; | ||
517 | |||
518 | printk(" %d: %sclaimed, ", | ||
519 | ec->slot_no, ec->claimed ? "" : "not "); | ||
520 | |||
521 | if (ec->ops && ec->ops->irqpending && | ||
522 | ec->ops != &ecard_default_ops) | ||
523 | printk("irq %spending\n", | ||
524 | ec->ops->irqpending(ec) ? "" : "not "); | ||
525 | else | ||
526 | printk("irqaddr %p, mask = %02X, status = %02X\n", | ||
527 | ec->irqaddr, ec->irqmask, readb(ec->irqaddr)); | ||
528 | } | ||
529 | } | ||
530 | |||
531 | static void ecard_check_lockup(struct irqdesc *desc) | ||
532 | { | ||
533 | static unsigned long last; | ||
534 | static int lockup; | ||
535 | |||
536 | /* | ||
537 | * If the timer interrupt has not run since the last million | ||
538 | * unrecognised expansion card interrupts, then there is | ||
539 | * something seriously wrong. Disable the expansion card | ||
540 | * interrupts so at least we can continue. | ||
541 | * | ||
542 | * Maybe we ought to start a timer to re-enable them some time | ||
543 | * later? | ||
544 | */ | ||
545 | if (last == jiffies) { | ||
546 | lockup += 1; | ||
547 | if (lockup > 1000000) { | ||
548 | printk(KERN_ERR "\nInterrupt lockup detected - " | ||
549 | "disabling all expansion card interrupts\n"); | ||
550 | |||
551 | desc->chip->mask(IRQ_EXPANSIONCARD); | ||
552 | ecard_dump_irq_state(); | ||
553 | } | ||
554 | } else | ||
555 | lockup = 0; | ||
556 | |||
557 | /* | ||
558 | * If we did not recognise the source of this interrupt, | ||
559 | * warn the user, but don't flood the user with these messages. | ||
560 | */ | ||
561 | if (!last || time_after(jiffies, last + 5*HZ)) { | ||
562 | last = jiffies; | ||
563 | printk(KERN_WARNING "Unrecognised interrupt from backplane\n"); | ||
564 | ecard_dump_irq_state(); | ||
565 | } | ||
566 | } | ||
567 | |||
568 | static void | ||
569 | ecard_irq_handler(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) | ||
570 | { | ||
571 | ecard_t *ec; | ||
572 | int called = 0; | ||
573 | |||
574 | desc->chip->mask(irq); | ||
575 | for (ec = cards; ec; ec = ec->next) { | ||
576 | int pending; | ||
577 | |||
578 | if (!ec->claimed || ec->irq == NO_IRQ || ec->slot_no == 8) | ||
579 | continue; | ||
580 | |||
581 | if (ec->ops && ec->ops->irqpending) | ||
582 | pending = ec->ops->irqpending(ec); | ||
583 | else | ||
584 | pending = ecard_default_ops.irqpending(ec); | ||
585 | |||
586 | if (pending) { | ||
587 | struct irqdesc *d = irq_desc + ec->irq; | ||
588 | d->handle(ec->irq, d, regs); | ||
589 | called ++; | ||
590 | } | ||
591 | } | ||
592 | desc->chip->unmask(irq); | ||
593 | |||
594 | if (called == 0) | ||
595 | ecard_check_lockup(desc); | ||
596 | } | ||
597 | |||
598 | #ifdef HAS_EXPMASK | ||
599 | static unsigned char priority_masks[] = | ||
600 | { | ||
601 | 0xf0, 0xf1, 0xf3, 0xf7, 0xff, 0xff, 0xff, 0xff | ||
602 | }; | ||
603 | |||
604 | static unsigned char first_set[] = | ||
605 | { | ||
606 | 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, | ||
607 | 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00 | ||
608 | }; | ||
609 | |||
610 | static void | ||
611 | ecard_irqexp_handler(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) | ||
612 | { | ||
613 | const unsigned int statusmask = 15; | ||
614 | unsigned int status; | ||
615 | |||
616 | status = __raw_readb(EXPMASK_STATUS) & statusmask; | ||
617 | if (status) { | ||
618 | unsigned int slot = first_set[status]; | ||
619 | ecard_t *ec = slot_to_ecard(slot); | ||
620 | |||
621 | if (ec->claimed) { | ||
622 | struct irqdesc *d = irqdesc + ec->irq; | ||
623 | /* | ||
624 | * this ugly code is so that we can operate a | ||
625 | * prioritorising system: | ||
626 | * | ||
627 | * Card 0 highest priority | ||
628 | * Card 1 | ||
629 | * Card 2 | ||
630 | * Card 3 lowest priority | ||
631 | * | ||
632 | * Serial cards should go in 0/1, ethernet/scsi in 2/3 | ||
633 | * otherwise you will lose serial data at high speeds! | ||
634 | */ | ||
635 | d->handle(ec->irq, d, regs); | ||
636 | } else { | ||
637 | printk(KERN_WARNING "card%d: interrupt from unclaimed " | ||
638 | "card???\n", slot); | ||
639 | have_expmask &= ~(1 << slot); | ||
640 | __raw_writeb(have_expmask, EXPMASK_ENABLE); | ||
641 | } | ||
642 | } else | ||
643 | printk(KERN_WARNING "Wild interrupt from backplane (masks)\n"); | ||
644 | } | ||
645 | |||
646 | static int __init ecard_probeirqhw(void) | ||
647 | { | ||
648 | ecard_t *ec; | ||
649 | int found; | ||
650 | |||
651 | __raw_writeb(0x00, EXPMASK_ENABLE); | ||
652 | __raw_writeb(0xff, EXPMASK_STATUS); | ||
653 | found = (__raw_readb(EXPMASK_STATUS) & 15) == 0; | ||
654 | __raw_writeb(0xff, EXPMASK_ENABLE); | ||
655 | |||
656 | if (found) { | ||
657 | printk(KERN_DEBUG "Expansion card interrupt " | ||
658 | "management hardware found\n"); | ||
659 | |||
660 | /* for each card present, set a bit to '1' */ | ||
661 | have_expmask = 0x80000000; | ||
662 | |||
663 | for (ec = cards; ec; ec = ec->next) | ||
664 | have_expmask |= 1 << ec->slot_no; | ||
665 | |||
666 | __raw_writeb(have_expmask, EXPMASK_ENABLE); | ||
667 | } | ||
668 | |||
669 | return found; | ||
670 | } | ||
671 | #else | ||
672 | #define ecard_irqexp_handler NULL | ||
673 | #define ecard_probeirqhw() (0) | ||
674 | #endif | ||
675 | |||
676 | #ifndef IO_EC_MEMC8_BASE | ||
677 | #define IO_EC_MEMC8_BASE 0 | ||
678 | #endif | ||
679 | |||
680 | unsigned int __ecard_address(ecard_t *ec, card_type_t type, card_speed_t speed) | ||
681 | { | ||
682 | unsigned long address = 0; | ||
683 | int slot = ec->slot_no; | ||
684 | |||
685 | if (ec->slot_no == 8) | ||
686 | return IO_EC_MEMC8_BASE; | ||
687 | |||
688 | ectcr &= ~(1 << slot); | ||
689 | |||
690 | switch (type) { | ||
691 | case ECARD_MEMC: | ||
692 | if (slot < 4) | ||
693 | address = IO_EC_MEMC_BASE + (slot << 12); | ||
694 | break; | ||
695 | |||
696 | case ECARD_IOC: | ||
697 | if (slot < 4) | ||
698 | address = IO_EC_IOC_BASE + (slot << 12); | ||
699 | #ifdef IO_EC_IOC4_BASE | ||
700 | else | ||
701 | address = IO_EC_IOC4_BASE + ((slot - 4) << 12); | ||
702 | #endif | ||
703 | if (address) | ||
704 | address += speed << 17; | ||
705 | break; | ||
706 | |||
707 | #ifdef IO_EC_EASI_BASE | ||
708 | case ECARD_EASI: | ||
709 | address = IO_EC_EASI_BASE + (slot << 22); | ||
710 | if (speed == ECARD_FAST) | ||
711 | ectcr |= 1 << slot; | ||
712 | break; | ||
713 | #endif | ||
714 | default: | ||
715 | break; | ||
716 | } | ||
717 | |||
718 | #ifdef IOMD_ECTCR | ||
719 | iomd_writeb(ectcr, IOMD_ECTCR); | ||
720 | #endif | ||
721 | return address; | ||
722 | } | ||
723 | |||
724 | static int ecard_prints(char *buffer, ecard_t *ec) | ||
725 | { | ||
726 | char *start = buffer; | ||
727 | |||
728 | buffer += sprintf(buffer, " %d: %s ", ec->slot_no, | ||
729 | ec->type == ECARD_EASI ? "EASI" : " "); | ||
730 | |||
731 | if (ec->cid.id == 0) { | ||
732 | struct in_chunk_dir incd; | ||
733 | |||
734 | buffer += sprintf(buffer, "[%04X:%04X] ", | ||
735 | ec->cid.manufacturer, ec->cid.product); | ||
736 | |||
737 | if (!ec->card_desc && ec->cid.cd && | ||
738 | ecard_readchunk(&incd, ec, 0xf5, 0)) { | ||
739 | ec->card_desc = kmalloc(strlen(incd.d.string)+1, GFP_KERNEL); | ||
740 | |||
741 | if (ec->card_desc) | ||
742 | strcpy((char *)ec->card_desc, incd.d.string); | ||
743 | } | ||
744 | |||
745 | buffer += sprintf(buffer, "%s\n", ec->card_desc ? ec->card_desc : "*unknown*"); | ||
746 | } else | ||
747 | buffer += sprintf(buffer, "Simple card %d\n", ec->cid.id); | ||
748 | |||
749 | return buffer - start; | ||
750 | } | ||
751 | |||
752 | static int get_ecard_dev_info(char *buf, char **start, off_t pos, int count) | ||
753 | { | ||
754 | ecard_t *ec = cards; | ||
755 | off_t at = 0; | ||
756 | int len, cnt; | ||
757 | |||
758 | cnt = 0; | ||
759 | while (ec && count > cnt) { | ||
760 | len = ecard_prints(buf, ec); | ||
761 | at += len; | ||
762 | if (at >= pos) { | ||
763 | if (!*start) { | ||
764 | *start = buf + (pos - (at - len)); | ||
765 | cnt = at - pos; | ||
766 | } else | ||
767 | cnt += len; | ||
768 | buf += len; | ||
769 | } | ||
770 | ec = ec->next; | ||
771 | } | ||
772 | return (count > cnt) ? cnt : count; | ||
773 | } | ||
774 | |||
775 | static struct proc_dir_entry *proc_bus_ecard_dir = NULL; | ||
776 | |||
777 | static void ecard_proc_init(void) | ||
778 | { | ||
779 | proc_bus_ecard_dir = proc_mkdir("ecard", proc_bus); | ||
780 | create_proc_info_entry("devices", 0, proc_bus_ecard_dir, | ||
781 | get_ecard_dev_info); | ||
782 | } | ||
783 | |||
784 | #define ec_set_resource(ec,nr,st,sz) \ | ||
785 | do { \ | ||
786 | (ec)->resource[nr].name = ec->dev.bus_id; \ | ||
787 | (ec)->resource[nr].start = st; \ | ||
788 | (ec)->resource[nr].end = (st) + (sz) - 1; \ | ||
789 | (ec)->resource[nr].flags = IORESOURCE_MEM; \ | ||
790 | } while (0) | ||
791 | |||
792 | static void __init ecard_free_card(struct expansion_card *ec) | ||
793 | { | ||
794 | int i; | ||
795 | |||
796 | for (i = 0; i < ECARD_NUM_RESOURCES; i++) | ||
797 | if (ec->resource[i].flags) | ||
798 | release_resource(&ec->resource[i]); | ||
799 | |||
800 | kfree(ec); | ||
801 | } | ||
802 | |||
803 | static struct expansion_card *__init ecard_alloc_card(int type, int slot) | ||
804 | { | ||
805 | struct expansion_card *ec; | ||
806 | unsigned long base; | ||
807 | int i; | ||
808 | |||
809 | ec = kmalloc(sizeof(ecard_t), GFP_KERNEL); | ||
810 | if (!ec) { | ||
811 | ec = ERR_PTR(-ENOMEM); | ||
812 | goto nomem; | ||
813 | } | ||
814 | |||
815 | memset(ec, 0, sizeof(ecard_t)); | ||
816 | |||
817 | ec->slot_no = slot; | ||
818 | ec->type = type; | ||
819 | ec->irq = NO_IRQ; | ||
820 | ec->fiq = NO_IRQ; | ||
821 | ec->dma = NO_DMA; | ||
822 | ec->ops = &ecard_default_ops; | ||
823 | |||
824 | snprintf(ec->dev.bus_id, sizeof(ec->dev.bus_id), "ecard%d", slot); | ||
825 | ec->dev.parent = NULL; | ||
826 | ec->dev.bus = &ecard_bus_type; | ||
827 | ec->dev.dma_mask = &ec->dma_mask; | ||
828 | ec->dma_mask = (u64)0xffffffff; | ||
829 | |||
830 | if (slot < 4) { | ||
831 | ec_set_resource(ec, ECARD_RES_MEMC, | ||
832 | PODSLOT_MEMC_BASE + (slot << 14), | ||
833 | PODSLOT_MEMC_SIZE); | ||
834 | base = PODSLOT_IOC0_BASE + (slot << 14); | ||
835 | } else | ||
836 | base = PODSLOT_IOC4_BASE + ((slot - 4) << 14); | ||
837 | |||
838 | #ifdef CONFIG_ARCH_RPC | ||
839 | if (slot < 8) { | ||
840 | ec_set_resource(ec, ECARD_RES_EASI, | ||
841 | PODSLOT_EASI_BASE + (slot << 24), | ||
842 | PODSLOT_EASI_SIZE); | ||
843 | } | ||
844 | |||
845 | if (slot == 8) { | ||
846 | ec_set_resource(ec, ECARD_RES_MEMC, NETSLOT_BASE, NETSLOT_SIZE); | ||
847 | } else | ||
848 | #endif | ||
849 | |||
850 | for (i = 0; i <= ECARD_RES_IOCSYNC - ECARD_RES_IOCSLOW; i++) | ||
851 | ec_set_resource(ec, i + ECARD_RES_IOCSLOW, | ||
852 | base + (i << 19), PODSLOT_IOC_SIZE); | ||
853 | |||
854 | for (i = 0; i < ECARD_NUM_RESOURCES; i++) { | ||
855 | if (ec->resource[i].flags && | ||
856 | request_resource(&iomem_resource, &ec->resource[i])) { | ||
857 | printk(KERN_ERR "%s: resource(s) not available\n", | ||
858 | ec->dev.bus_id); | ||
859 | ec->resource[i].end -= ec->resource[i].start; | ||
860 | ec->resource[i].start = 0; | ||
861 | ec->resource[i].flags = 0; | ||
862 | } | ||
863 | } | ||
864 | |||
865 | nomem: | ||
866 | return ec; | ||
867 | } | ||
868 | |||
869 | static ssize_t ecard_show_irq(struct device *dev, char *buf) | ||
870 | { | ||
871 | struct expansion_card *ec = ECARD_DEV(dev); | ||
872 | return sprintf(buf, "%u\n", ec->irq); | ||
873 | } | ||
874 | |||
875 | static ssize_t ecard_show_dma(struct device *dev, char *buf) | ||
876 | { | ||
877 | struct expansion_card *ec = ECARD_DEV(dev); | ||
878 | return sprintf(buf, "%u\n", ec->dma); | ||
879 | } | ||
880 | |||
881 | static ssize_t ecard_show_resources(struct device *dev, char *buf) | ||
882 | { | ||
883 | struct expansion_card *ec = ECARD_DEV(dev); | ||
884 | char *str = buf; | ||
885 | int i; | ||
886 | |||
887 | for (i = 0; i < ECARD_NUM_RESOURCES; i++) | ||
888 | str += sprintf(str, "%08lx %08lx %08lx\n", | ||
889 | ec->resource[i].start, | ||
890 | ec->resource[i].end, | ||
891 | ec->resource[i].flags); | ||
892 | |||
893 | return str - buf; | ||
894 | } | ||
895 | |||
896 | static ssize_t ecard_show_vendor(struct device *dev, char *buf) | ||
897 | { | ||
898 | struct expansion_card *ec = ECARD_DEV(dev); | ||
899 | return sprintf(buf, "%u\n", ec->cid.manufacturer); | ||
900 | } | ||
901 | |||
902 | static ssize_t ecard_show_device(struct device *dev, char *buf) | ||
903 | { | ||
904 | struct expansion_card *ec = ECARD_DEV(dev); | ||
905 | return sprintf(buf, "%u\n", ec->cid.product); | ||
906 | } | ||
907 | |||
908 | static ssize_t ecard_show_type(struct device *dev, char *buf) | ||
909 | { | ||
910 | struct expansion_card *ec = ECARD_DEV(dev); | ||
911 | return sprintf(buf, "%s\n", ec->type == ECARD_EASI ? "EASI" : "IOC"); | ||
912 | } | ||
913 | |||
914 | static struct device_attribute ecard_dev_attrs[] = { | ||
915 | __ATTR(device, S_IRUGO, ecard_show_device, NULL), | ||
916 | __ATTR(dma, S_IRUGO, ecard_show_dma, NULL), | ||
917 | __ATTR(irq, S_IRUGO, ecard_show_irq, NULL), | ||
918 | __ATTR(resource, S_IRUGO, ecard_show_resources, NULL), | ||
919 | __ATTR(type, S_IRUGO, ecard_show_type, NULL), | ||
920 | __ATTR(vendor, S_IRUGO, ecard_show_vendor, NULL), | ||
921 | __ATTR_NULL, | ||
922 | }; | ||
923 | |||
924 | |||
925 | int ecard_request_resources(struct expansion_card *ec) | ||
926 | { | ||
927 | int i, err = 0; | ||
928 | |||
929 | for (i = 0; i < ECARD_NUM_RESOURCES; i++) { | ||
930 | if (ecard_resource_end(ec, i) && | ||
931 | !request_mem_region(ecard_resource_start(ec, i), | ||
932 | ecard_resource_len(ec, i), | ||
933 | ec->dev.driver->name)) { | ||
934 | err = -EBUSY; | ||
935 | break; | ||
936 | } | ||
937 | } | ||
938 | |||
939 | if (err) { | ||
940 | while (i--) | ||
941 | if (ecard_resource_end(ec, i)) | ||
942 | release_mem_region(ecard_resource_start(ec, i), | ||
943 | ecard_resource_len(ec, i)); | ||
944 | } | ||
945 | return err; | ||
946 | } | ||
947 | EXPORT_SYMBOL(ecard_request_resources); | ||
948 | |||
949 | void ecard_release_resources(struct expansion_card *ec) | ||
950 | { | ||
951 | int i; | ||
952 | |||
953 | for (i = 0; i < ECARD_NUM_RESOURCES; i++) | ||
954 | if (ecard_resource_end(ec, i)) | ||
955 | release_mem_region(ecard_resource_start(ec, i), | ||
956 | ecard_resource_len(ec, i)); | ||
957 | } | ||
958 | EXPORT_SYMBOL(ecard_release_resources); | ||
959 | |||
960 | /* | ||
961 | * Probe for an expansion card. | ||
962 | * | ||
963 | * If bit 1 of the first byte of the card is set, then the | ||
964 | * card does not exist. | ||
965 | */ | ||
966 | static int __init | ||
967 | ecard_probe(int slot, card_type_t type) | ||
968 | { | ||
969 | ecard_t **ecp; | ||
970 | ecard_t *ec; | ||
971 | struct ex_ecid cid; | ||
972 | int i, rc; | ||
973 | |||
974 | ec = ecard_alloc_card(type, slot); | ||
975 | if (IS_ERR(ec)) { | ||
976 | rc = PTR_ERR(ec); | ||
977 | goto nomem; | ||
978 | } | ||
979 | |||
980 | rc = -ENODEV; | ||
981 | if ((ec->podaddr = ecard_address(ec, type, ECARD_SYNC)) == 0) | ||
982 | goto nodev; | ||
983 | |||
984 | cid.r_zero = 1; | ||
985 | ecard_readbytes(&cid, ec, 0, 16, 0); | ||
986 | if (cid.r_zero) | ||
987 | goto nodev; | ||
988 | |||
989 | ec->cid.id = cid.r_id; | ||
990 | ec->cid.cd = cid.r_cd; | ||
991 | ec->cid.is = cid.r_is; | ||
992 | ec->cid.w = cid.r_w; | ||
993 | ec->cid.manufacturer = ecard_getu16(cid.r_manu); | ||
994 | ec->cid.product = ecard_getu16(cid.r_prod); | ||
995 | ec->cid.country = cid.r_country; | ||
996 | ec->cid.irqmask = cid.r_irqmask; | ||
997 | ec->cid.irqoff = ecard_gets24(cid.r_irqoff); | ||
998 | ec->cid.fiqmask = cid.r_fiqmask; | ||
999 | ec->cid.fiqoff = ecard_gets24(cid.r_fiqoff); | ||
1000 | ec->fiqaddr = | ||
1001 | ec->irqaddr = (void __iomem *)ioaddr(ec->podaddr); | ||
1002 | |||
1003 | if (ec->cid.is) { | ||
1004 | ec->irqmask = ec->cid.irqmask; | ||
1005 | ec->irqaddr += ec->cid.irqoff; | ||
1006 | ec->fiqmask = ec->cid.fiqmask; | ||
1007 | ec->fiqaddr += ec->cid.fiqoff; | ||
1008 | } else { | ||
1009 | ec->irqmask = 1; | ||
1010 | ec->fiqmask = 4; | ||
1011 | } | ||
1012 | |||
1013 | for (i = 0; i < sizeof(blacklist) / sizeof(*blacklist); i++) | ||
1014 | if (blacklist[i].manufacturer == ec->cid.manufacturer && | ||
1015 | blacklist[i].product == ec->cid.product) { | ||
1016 | ec->card_desc = blacklist[i].type; | ||
1017 | break; | ||
1018 | } | ||
1019 | |||
1020 | /* | ||
1021 | * hook the interrupt handlers | ||
1022 | */ | ||
1023 | if (slot < 8) { | ||
1024 | ec->irq = 32 + slot; | ||
1025 | set_irq_chip(ec->irq, &ecard_chip); | ||
1026 | set_irq_handler(ec->irq, do_level_IRQ); | ||
1027 | set_irq_flags(ec->irq, IRQF_VALID); | ||
1028 | } | ||
1029 | |||
1030 | #ifdef IO_EC_MEMC8_BASE | ||
1031 | if (slot == 8) | ||
1032 | ec->irq = 11; | ||
1033 | #endif | ||
1034 | #ifdef CONFIG_ARCH_RPC | ||
1035 | /* On RiscPC, only first two slots have DMA capability */ | ||
1036 | if (slot < 2) | ||
1037 | ec->dma = 2 + slot; | ||
1038 | #endif | ||
1039 | |||
1040 | for (ecp = &cards; *ecp; ecp = &(*ecp)->next); | ||
1041 | |||
1042 | *ecp = ec; | ||
1043 | slot_to_expcard[slot] = ec; | ||
1044 | |||
1045 | device_register(&ec->dev); | ||
1046 | |||
1047 | return 0; | ||
1048 | |||
1049 | nodev: | ||
1050 | ecard_free_card(ec); | ||
1051 | nomem: | ||
1052 | return rc; | ||
1053 | } | ||
1054 | |||
1055 | /* | ||
1056 | * Initialise the expansion card system. | ||
1057 | * Locate all hardware - interrupt management and | ||
1058 | * actual cards. | ||
1059 | */ | ||
1060 | static int __init ecard_init(void) | ||
1061 | { | ||
1062 | int slot, irqhw, ret; | ||
1063 | |||
1064 | ret = kernel_thread(ecard_task, NULL, CLONE_KERNEL); | ||
1065 | if (ret < 0) { | ||
1066 | printk(KERN_ERR "Ecard: unable to create kernel thread: %d\n", | ||
1067 | ret); | ||
1068 | return ret; | ||
1069 | } | ||
1070 | |||
1071 | printk("Probing expansion cards\n"); | ||
1072 | |||
1073 | for (slot = 0; slot < 8; slot ++) { | ||
1074 | if (ecard_probe(slot, ECARD_EASI) == -ENODEV) | ||
1075 | ecard_probe(slot, ECARD_IOC); | ||
1076 | } | ||
1077 | |||
1078 | #ifdef IO_EC_MEMC8_BASE | ||
1079 | ecard_probe(8, ECARD_IOC); | ||
1080 | #endif | ||
1081 | |||
1082 | irqhw = ecard_probeirqhw(); | ||
1083 | |||
1084 | set_irq_chained_handler(IRQ_EXPANSIONCARD, | ||
1085 | irqhw ? ecard_irqexp_handler : ecard_irq_handler); | ||
1086 | |||
1087 | ecard_proc_init(); | ||
1088 | |||
1089 | return 0; | ||
1090 | } | ||
1091 | |||
1092 | subsys_initcall(ecard_init); | ||
1093 | |||
1094 | /* | ||
1095 | * ECARD "bus" | ||
1096 | */ | ||
1097 | static const struct ecard_id * | ||
1098 | ecard_match_device(const struct ecard_id *ids, struct expansion_card *ec) | ||
1099 | { | ||
1100 | int i; | ||
1101 | |||
1102 | for (i = 0; ids[i].manufacturer != 65535; i++) | ||
1103 | if (ec->cid.manufacturer == ids[i].manufacturer && | ||
1104 | ec->cid.product == ids[i].product) | ||
1105 | return ids + i; | ||
1106 | |||
1107 | return NULL; | ||
1108 | } | ||
1109 | |||
1110 | static int ecard_drv_probe(struct device *dev) | ||
1111 | { | ||
1112 | struct expansion_card *ec = ECARD_DEV(dev); | ||
1113 | struct ecard_driver *drv = ECARD_DRV(dev->driver); | ||
1114 | const struct ecard_id *id; | ||
1115 | int ret; | ||
1116 | |||
1117 | id = ecard_match_device(drv->id_table, ec); | ||
1118 | |||
1119 | ecard_claim(ec); | ||
1120 | ret = drv->probe(ec, id); | ||
1121 | if (ret) | ||
1122 | ecard_release(ec); | ||
1123 | return ret; | ||
1124 | } | ||
1125 | |||
1126 | static int ecard_drv_remove(struct device *dev) | ||
1127 | { | ||
1128 | struct expansion_card *ec = ECARD_DEV(dev); | ||
1129 | struct ecard_driver *drv = ECARD_DRV(dev->driver); | ||
1130 | |||
1131 | drv->remove(ec); | ||
1132 | ecard_release(ec); | ||
1133 | |||
1134 | return 0; | ||
1135 | } | ||
1136 | |||
1137 | /* | ||
1138 | * Before rebooting, we must make sure that the expansion card is in a | ||
1139 | * sensible state, so it can be re-detected. This means that the first | ||
1140 | * page of the ROM must be visible. We call the expansion cards reset | ||
1141 | * handler, if any. | ||
1142 | */ | ||
1143 | static void ecard_drv_shutdown(struct device *dev) | ||
1144 | { | ||
1145 | struct expansion_card *ec = ECARD_DEV(dev); | ||
1146 | struct ecard_driver *drv = ECARD_DRV(dev->driver); | ||
1147 | struct ecard_request req; | ||
1148 | |||
1149 | if (drv->shutdown) | ||
1150 | drv->shutdown(ec); | ||
1151 | ecard_release(ec); | ||
1152 | |||
1153 | /* | ||
1154 | * If this card has a loader, call the reset handler. | ||
1155 | */ | ||
1156 | if (ec->loader) { | ||
1157 | req.fn = ecard_task_reset; | ||
1158 | req.ec = ec; | ||
1159 | ecard_call(&req); | ||
1160 | } | ||
1161 | } | ||
1162 | |||
1163 | int ecard_register_driver(struct ecard_driver *drv) | ||
1164 | { | ||
1165 | drv->drv.bus = &ecard_bus_type; | ||
1166 | drv->drv.probe = ecard_drv_probe; | ||
1167 | drv->drv.remove = ecard_drv_remove; | ||
1168 | drv->drv.shutdown = ecard_drv_shutdown; | ||
1169 | |||
1170 | return driver_register(&drv->drv); | ||
1171 | } | ||
1172 | |||
1173 | void ecard_remove_driver(struct ecard_driver *drv) | ||
1174 | { | ||
1175 | driver_unregister(&drv->drv); | ||
1176 | } | ||
1177 | |||
1178 | static int ecard_match(struct device *_dev, struct device_driver *_drv) | ||
1179 | { | ||
1180 | struct expansion_card *ec = ECARD_DEV(_dev); | ||
1181 | struct ecard_driver *drv = ECARD_DRV(_drv); | ||
1182 | int ret; | ||
1183 | |||
1184 | if (drv->id_table) { | ||
1185 | ret = ecard_match_device(drv->id_table, ec) != NULL; | ||
1186 | } else { | ||
1187 | ret = ec->cid.id == drv->id; | ||
1188 | } | ||
1189 | |||
1190 | return ret; | ||
1191 | } | ||
1192 | |||
1193 | struct bus_type ecard_bus_type = { | ||
1194 | .name = "ecard", | ||
1195 | .dev_attrs = ecard_dev_attrs, | ||
1196 | .match = ecard_match, | ||
1197 | }; | ||
1198 | |||
1199 | static int ecard_bus_init(void) | ||
1200 | { | ||
1201 | return bus_register(&ecard_bus_type); | ||
1202 | } | ||
1203 | |||
1204 | postcore_initcall(ecard_bus_init); | ||
1205 | |||
1206 | EXPORT_SYMBOL(ecard_readchunk); | ||
1207 | EXPORT_SYMBOL(__ecard_address); | ||
1208 | EXPORT_SYMBOL(ecard_register_driver); | ||
1209 | EXPORT_SYMBOL(ecard_remove_driver); | ||
1210 | EXPORT_SYMBOL(ecard_bus_type); | ||
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S new file mode 100644 index 000000000000..bb27c317d94b --- /dev/null +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -0,0 +1,745 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/entry-armv.S | ||
3 | * | ||
4 | * Copyright (C) 1996,1997,1998 Russell King. | ||
5 | * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * Low-level vector interface routines | ||
12 | * | ||
13 | * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction that causes | ||
14 | * it to save wrong values... Be aware! | ||
15 | */ | ||
16 | #include <linux/config.h> | ||
17 | #include <linux/init.h> | ||
18 | |||
19 | #include <asm/thread_info.h> | ||
20 | #include <asm/glue.h> | ||
21 | #include <asm/ptrace.h> | ||
22 | #include <asm/vfpmacros.h> | ||
23 | |||
24 | #include "entry-header.S" | ||
25 | |||
26 | /* | ||
27 | * Invalid mode handlers | ||
28 | */ | ||
29 | .macro inv_entry, sym, reason | ||
30 | sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go | ||
31 | stmia sp, {r0 - lr} @ Save XXX r0 - lr | ||
32 | ldr r4, .LC\sym | ||
33 | mov r1, #\reason | ||
34 | .endm | ||
35 | |||
36 | __pabt_invalid: | ||
37 | inv_entry abt, BAD_PREFETCH | ||
38 | b 1f | ||
39 | |||
40 | __dabt_invalid: | ||
41 | inv_entry abt, BAD_DATA | ||
42 | b 1f | ||
43 | |||
44 | __irq_invalid: | ||
45 | inv_entry irq, BAD_IRQ | ||
46 | b 1f | ||
47 | |||
48 | __und_invalid: | ||
49 | inv_entry und, BAD_UNDEFINSTR | ||
50 | |||
51 | 1: zero_fp | ||
52 | ldmia r4, {r5 - r7} @ Get XXX pc, cpsr, old_r0 | ||
53 | add r4, sp, #S_PC | ||
54 | stmia r4, {r5 - r7} @ Save XXX pc, cpsr, old_r0 | ||
55 | mov r0, sp | ||
56 | and r2, r6, #31 @ int mode | ||
57 | b bad_mode | ||
58 | |||
59 | /* | ||
60 | * SVC mode handlers | ||
61 | */ | ||
62 | .macro svc_entry, sym | ||
63 | sub sp, sp, #S_FRAME_SIZE | ||
64 | stmia sp, {r0 - r12} @ save r0 - r12 | ||
65 | ldr r2, .LC\sym | ||
66 | add r0, sp, #S_FRAME_SIZE | ||
67 | ldmia r2, {r2 - r4} @ get pc, cpsr | ||
68 | add r5, sp, #S_SP | ||
69 | mov r1, lr | ||
70 | |||
71 | @ | ||
72 | @ We are now ready to fill in the remaining blanks on the stack: | ||
73 | @ | ||
74 | @ r0 - sp_svc | ||
75 | @ r1 - lr_svc | ||
76 | @ r2 - lr_<exception>, already fixed up for correct return/restart | ||
77 | @ r3 - spsr_<exception> | ||
78 | @ r4 - orig_r0 (see pt_regs definition in ptrace.h) | ||
79 | @ | ||
80 | stmia r5, {r0 - r4} | ||
81 | .endm | ||
82 | |||
83 | .align 5 | ||
84 | __dabt_svc: | ||
85 | svc_entry abt | ||
86 | |||
87 | @ | ||
88 | @ get ready to re-enable interrupts if appropriate | ||
89 | @ | ||
90 | mrs r9, cpsr | ||
91 | tst r3, #PSR_I_BIT | ||
92 | biceq r9, r9, #PSR_I_BIT | ||
93 | |||
94 | @ | ||
95 | @ Call the processor-specific abort handler: | ||
96 | @ | ||
97 | @ r2 - aborted context pc | ||
98 | @ r3 - aborted context cpsr | ||
99 | @ | ||
100 | @ The abort handler must return the aborted address in r0, and | ||
101 | @ the fault status register in r1. r9 must be preserved. | ||
102 | @ | ||
103 | #ifdef MULTI_ABORT | ||
104 | ldr r4, .LCprocfns | ||
105 | mov lr, pc | ||
106 | ldr pc, [r4] | ||
107 | #else | ||
108 | bl CPU_ABORT_HANDLER | ||
109 | #endif | ||
110 | |||
111 | @ | ||
112 | @ set desired IRQ state, then call main handler | ||
113 | @ | ||
114 | msr cpsr_c, r9 | ||
115 | mov r2, sp | ||
116 | bl do_DataAbort | ||
117 | |||
118 | @ | ||
119 | @ IRQs off again before pulling preserved data off the stack | ||
120 | @ | ||
121 | disable_irq r0 | ||
122 | |||
123 | @ | ||
124 | @ restore SPSR and restart the instruction | ||
125 | @ | ||
126 | ldr r0, [sp, #S_PSR] | ||
127 | msr spsr_cxsf, r0 | ||
128 | ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr | ||
129 | |||
130 | .align 5 | ||
131 | __irq_svc: | ||
132 | svc_entry irq | ||
133 | #ifdef CONFIG_PREEMPT | ||
134 | get_thread_info r8 | ||
135 | ldr r9, [r8, #TI_PREEMPT] @ get preempt count | ||
136 | add r7, r9, #1 @ increment it | ||
137 | str r7, [r8, #TI_PREEMPT] | ||
138 | #endif | ||
139 | 1: get_irqnr_and_base r0, r6, r5, lr | ||
140 | movne r1, sp | ||
141 | @ | ||
142 | @ routine called with r0 = irq number, r1 = struct pt_regs * | ||
143 | @ | ||
144 | adrne lr, 1b | ||
145 | bne asm_do_IRQ | ||
146 | #ifdef CONFIG_PREEMPT | ||
147 | ldr r0, [r8, #TI_FLAGS] @ get flags | ||
148 | tst r0, #_TIF_NEED_RESCHED | ||
149 | blne svc_preempt | ||
150 | preempt_return: | ||
151 | ldr r0, [r8, #TI_PREEMPT] @ read preempt value | ||
152 | teq r0, r7 | ||
153 | str r9, [r8, #TI_PREEMPT] @ restore preempt count | ||
154 | strne r0, [r0, -r0] @ bug() | ||
155 | #endif | ||
156 | ldr r0, [sp, #S_PSR] @ irqs are already disabled | ||
157 | msr spsr_cxsf, r0 | ||
158 | ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr | ||
159 | |||
160 | .ltorg | ||
161 | |||
162 | #ifdef CONFIG_PREEMPT | ||
163 | svc_preempt: | ||
164 | teq r9, #0 @ was preempt count = 0 | ||
165 | ldreq r6, .LCirq_stat | ||
166 | movne pc, lr @ no | ||
167 | ldr r0, [r6, #4] @ local_irq_count | ||
168 | ldr r1, [r6, #8] @ local_bh_count | ||
169 | adds r0, r0, r1 | ||
170 | movne pc, lr | ||
171 | mov r7, #0 @ preempt_schedule_irq | ||
172 | str r7, [r8, #TI_PREEMPT] @ expects preempt_count == 0 | ||
173 | 1: bl preempt_schedule_irq @ irq en/disable is done inside | ||
174 | ldr r0, [r8, #TI_FLAGS] @ get new tasks TI_FLAGS | ||
175 | tst r0, #_TIF_NEED_RESCHED | ||
176 | beq preempt_return @ go again | ||
177 | b 1b | ||
178 | #endif | ||
179 | |||
180 | .align 5 | ||
181 | __und_svc: | ||
182 | svc_entry und | ||
183 | |||
184 | @ | ||
185 | @ call emulation code, which returns using r9 if it has emulated | ||
186 | @ the instruction, or the more conventional lr if we are to treat | ||
187 | @ this as a real undefined instruction | ||
188 | @ | ||
189 | @ r0 - instruction | ||
190 | @ | ||
191 | ldr r0, [r2, #-4] | ||
192 | adr r9, 1f | ||
193 | bl call_fpe | ||
194 | |||
195 | mov r0, sp @ struct pt_regs *regs | ||
196 | bl do_undefinstr | ||
197 | |||
198 | @ | ||
199 | @ IRQs off again before pulling preserved data off the stack | ||
200 | @ | ||
201 | 1: disable_irq r0 | ||
202 | |||
203 | @ | ||
204 | @ restore SPSR and restart the instruction | ||
205 | @ | ||
206 | ldr lr, [sp, #S_PSR] @ Get SVC cpsr | ||
207 | msr spsr_cxsf, lr | ||
208 | ldmia sp, {r0 - pc}^ @ Restore SVC registers | ||
209 | |||
210 | .align 5 | ||
211 | __pabt_svc: | ||
212 | svc_entry abt | ||
213 | |||
214 | @ | ||
215 | @ re-enable interrupts if appropriate | ||
216 | @ | ||
217 | mrs r9, cpsr | ||
218 | tst r3, #PSR_I_BIT | ||
219 | biceq r9, r9, #PSR_I_BIT | ||
220 | msr cpsr_c, r9 | ||
221 | |||
222 | @ | ||
223 | @ set args, then call main handler | ||
224 | @ | ||
225 | @ r0 - address of faulting instruction | ||
226 | @ r1 - pointer to registers on stack | ||
227 | @ | ||
228 | mov r0, r2 @ address (pc) | ||
229 | mov r1, sp @ regs | ||
230 | bl do_PrefetchAbort @ call abort handler | ||
231 | |||
232 | @ | ||
233 | @ IRQs off again before pulling preserved data off the stack | ||
234 | @ | ||
235 | disable_irq r0 | ||
236 | |||
237 | @ | ||
238 | @ restore SPSR and restart the instruction | ||
239 | @ | ||
240 | ldr r0, [sp, #S_PSR] | ||
241 | msr spsr_cxsf, r0 | ||
242 | ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr | ||
243 | |||
244 | .align 5 | ||
245 | .LCirq: | ||
246 | .word __temp_irq | ||
247 | .LCund: | ||
248 | .word __temp_und | ||
249 | .LCabt: | ||
250 | .word __temp_abt | ||
251 | #ifdef MULTI_ABORT | ||
252 | .LCprocfns: | ||
253 | .word processor | ||
254 | #endif | ||
255 | .LCfp: | ||
256 | .word fp_enter | ||
257 | #ifdef CONFIG_PREEMPT | ||
258 | .LCirq_stat: | ||
259 | .word irq_stat | ||
260 | #endif | ||
261 | |||
262 | /* | ||
263 | * User mode handlers | ||
264 | */ | ||
265 | .macro usr_entry, sym | ||
266 | sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go | ||
267 | stmia sp, {r0 - r12} @ save r0 - r12 | ||
268 | ldr r7, .LC\sym | ||
269 | add r5, sp, #S_PC | ||
270 | ldmia r7, {r2 - r4} @ Get USR pc, cpsr | ||
271 | |||
272 | @ | ||
273 | @ We are now ready to fill in the remaining blanks on the stack: | ||
274 | @ | ||
275 | @ r2 - lr_<exception>, already fixed up for correct return/restart | ||
276 | @ r3 - spsr_<exception> | ||
277 | @ r4 - orig_r0 (see pt_regs definition in ptrace.h) | ||
278 | @ | ||
279 | @ Also, separately save sp_usr and lr_usr | ||
280 | @ | ||
281 | stmia r5, {r2 - r4} | ||
282 | stmdb r5, {sp, lr}^ | ||
283 | |||
284 | @ | ||
285 | @ Enable the alignment trap while in kernel mode | ||
286 | @ | ||
287 | alignment_trap r7, r0, __temp_\sym | ||
288 | |||
289 | @ | ||
290 | @ Clear FP to mark the first stack frame | ||
291 | @ | ||
292 | zero_fp | ||
293 | .endm | ||
294 | |||
295 | .align 5 | ||
296 | __dabt_usr: | ||
297 | usr_entry abt | ||
298 | |||
299 | @ | ||
300 | @ Call the processor-specific abort handler: | ||
301 | @ | ||
302 | @ r2 - aborted context pc | ||
303 | @ r3 - aborted context cpsr | ||
304 | @ | ||
305 | @ The abort handler must return the aborted address in r0, and | ||
306 | @ the fault status register in r1. | ||
307 | @ | ||
308 | #ifdef MULTI_ABORT | ||
309 | ldr r4, .LCprocfns | ||
310 | mov lr, pc | ||
311 | ldr pc, [r4] | ||
312 | #else | ||
313 | bl CPU_ABORT_HANDLER | ||
314 | #endif | ||
315 | |||
316 | @ | ||
317 | @ IRQs on, then call the main handler | ||
318 | @ | ||
319 | enable_irq r2 | ||
320 | mov r2, sp | ||
321 | adr lr, ret_from_exception | ||
322 | b do_DataAbort | ||
323 | |||
324 | .align 5 | ||
325 | __irq_usr: | ||
326 | usr_entry irq | ||
327 | |||
328 | #ifdef CONFIG_PREEMPT | ||
329 | get_thread_info r8 | ||
330 | ldr r9, [r8, #TI_PREEMPT] @ get preempt count | ||
331 | add r7, r9, #1 @ increment it | ||
332 | str r7, [r8, #TI_PREEMPT] | ||
333 | #endif | ||
334 | 1: get_irqnr_and_base r0, r6, r5, lr | ||
335 | movne r1, sp | ||
336 | adrne lr, 1b | ||
337 | @ | ||
338 | @ routine called with r0 = irq number, r1 = struct pt_regs * | ||
339 | @ | ||
340 | bne asm_do_IRQ | ||
341 | #ifdef CONFIG_PREEMPT | ||
342 | ldr r0, [r8, #TI_PREEMPT] | ||
343 | teq r0, r7 | ||
344 | str r9, [r8, #TI_PREEMPT] | ||
345 | strne r0, [r0, -r0] | ||
346 | mov tsk, r8 | ||
347 | #else | ||
348 | get_thread_info tsk | ||
349 | #endif | ||
350 | mov why, #0 | ||
351 | b ret_to_user | ||
352 | |||
353 | .ltorg | ||
354 | |||
355 | .align 5 | ||
356 | __und_usr: | ||
357 | usr_entry und | ||
358 | |||
359 | tst r3, #PSR_T_BIT @ Thumb mode? | ||
360 | bne fpundefinstr @ ignore FP | ||
361 | sub r4, r2, #4 | ||
362 | |||
363 | @ | ||
364 | @ fall through to the emulation code, which returns using r9 if | ||
365 | @ it has emulated the instruction, or the more conventional lr | ||
366 | @ if we are to treat this as a real undefined instruction | ||
367 | @ | ||
368 | @ r0 - instruction | ||
369 | @ | ||
370 | 1: ldrt r0, [r4] | ||
371 | adr r9, ret_from_exception | ||
372 | adr lr, fpundefinstr | ||
373 | @ | ||
374 | @ fallthrough to call_fpe | ||
375 | @ | ||
376 | |||
377 | /* | ||
378 | * The out of line fixup for the ldrt above. | ||
379 | */ | ||
380 | .section .fixup, "ax" | ||
381 | 2: mov pc, r9 | ||
382 | .previous | ||
383 | .section __ex_table,"a" | ||
384 | .long 1b, 2b | ||
385 | .previous | ||
386 | |||
387 | /* | ||
388 | * Check whether the instruction is a co-processor instruction. | ||
389 | * If yes, we need to call the relevant co-processor handler. | ||
390 | * | ||
391 | * Note that we don't do a full check here for the co-processor | ||
392 | * instructions; all instructions with bit 27 set are well | ||
393 | * defined. The only instructions that should fault are the | ||
394 | * co-processor instructions. However, we have to watch out | ||
395 | * for the ARM6/ARM7 SWI bug. | ||
396 | * | ||
397 | * Emulators may wish to make use of the following registers: | ||
398 | * r0 = instruction opcode. | ||
399 | * r2 = PC+4 | ||
400 | * r10 = this threads thread_info structure. | ||
401 | */ | ||
402 | call_fpe: | ||
403 | tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 | ||
404 | #if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710) | ||
405 | and r8, r0, #0x0f000000 @ mask out op-code bits | ||
406 | teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)? | ||
407 | #endif | ||
408 | moveq pc, lr | ||
409 | get_thread_info r10 @ get current thread | ||
410 | and r8, r0, #0x00000f00 @ mask out CP number | ||
411 | mov r7, #1 | ||
412 | add r6, r10, #TI_USED_CP | ||
413 | strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[] | ||
414 | #ifdef CONFIG_IWMMXT | ||
415 | @ Test if we need to give access to iWMMXt coprocessors | ||
416 | ldr r5, [r10, #TI_FLAGS] | ||
417 | rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only | ||
418 | movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) | ||
419 | bcs iwmmxt_task_enable | ||
420 | #endif | ||
421 | enable_irq r7 | ||
422 | add pc, pc, r8, lsr #6 | ||
423 | mov r0, r0 | ||
424 | |||
425 | mov pc, lr @ CP#0 | ||
426 | b do_fpe @ CP#1 (FPE) | ||
427 | b do_fpe @ CP#2 (FPE) | ||
428 | mov pc, lr @ CP#3 | ||
429 | mov pc, lr @ CP#4 | ||
430 | mov pc, lr @ CP#5 | ||
431 | mov pc, lr @ CP#6 | ||
432 | mov pc, lr @ CP#7 | ||
433 | mov pc, lr @ CP#8 | ||
434 | mov pc, lr @ CP#9 | ||
435 | #ifdef CONFIG_VFP | ||
436 | b do_vfp @ CP#10 (VFP) | ||
437 | b do_vfp @ CP#11 (VFP) | ||
438 | #else | ||
439 | mov pc, lr @ CP#10 (VFP) | ||
440 | mov pc, lr @ CP#11 (VFP) | ||
441 | #endif | ||
442 | mov pc, lr @ CP#12 | ||
443 | mov pc, lr @ CP#13 | ||
444 | mov pc, lr @ CP#14 (Debug) | ||
445 | mov pc, lr @ CP#15 (Control) | ||
446 | |||
447 | do_fpe: | ||
448 | ldr r4, .LCfp | ||
449 | add r10, r10, #TI_FPSTATE @ r10 = workspace | ||
450 | ldr pc, [r4] @ Call FP module USR entry point | ||
451 | |||
452 | /* | ||
453 | * The FP module is called with these registers set: | ||
454 | * r0 = instruction | ||
455 | * r2 = PC+4 | ||
456 | * r9 = normal "successful" return address | ||
457 | * r10 = FP workspace | ||
458 | * lr = unrecognised FP instruction return address | ||
459 | */ | ||
460 | |||
461 | .data | ||
462 | ENTRY(fp_enter) | ||
463 | .word fpundefinstr | ||
464 | .text | ||
465 | |||
466 | fpundefinstr: | ||
467 | mov r0, sp | ||
468 | adr lr, ret_from_exception | ||
469 | b do_undefinstr | ||
470 | |||
471 | .align 5 | ||
472 | __pabt_usr: | ||
473 | usr_entry abt | ||
474 | |||
475 | enable_irq r0 @ Enable interrupts | ||
476 | mov r0, r2 @ address (pc) | ||
477 | mov r1, sp @ regs | ||
478 | bl do_PrefetchAbort @ call abort handler | ||
479 | /* fall through */ | ||
480 | /* | ||
481 | * This is the return code to user mode for abort handlers | ||
482 | */ | ||
483 | ENTRY(ret_from_exception) | ||
484 | get_thread_info tsk | ||
485 | mov why, #0 | ||
486 | b ret_to_user | ||
487 | |||
488 | /* | ||
489 | * Register switch for ARMv3 and ARMv4 processors | ||
490 | * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info | ||
491 | * previous and next are guaranteed not to be the same. | ||
492 | */ | ||
493 | ENTRY(__switch_to) | ||
494 | add ip, r1, #TI_CPU_SAVE | ||
495 | ldr r3, [r2, #TI_TP_VALUE] | ||
496 | stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack | ||
497 | ldr r6, [r2, #TI_CPU_DOMAIN]! | ||
498 | #if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT) | ||
499 | mra r4, r5, acc0 | ||
500 | stmia ip, {r4, r5} | ||
501 | #endif | ||
502 | mov r4, #0xffff0fff | ||
503 | str r3, [r4, #-3] @ Set TLS ptr | ||
504 | mcr p15, 0, r6, c3, c0, 0 @ Set domain register | ||
505 | #ifdef CONFIG_VFP | ||
506 | @ Always disable VFP so we can lazily save/restore the old | ||
507 | @ state. This occurs in the context of the previous thread. | ||
508 | VFPFMRX r4, FPEXC | ||
509 | bic r4, r4, #FPEXC_ENABLE | ||
510 | VFPFMXR FPEXC, r4 | ||
511 | #endif | ||
512 | #if defined(CONFIG_IWMMXT) | ||
513 | bl iwmmxt_task_switch | ||
514 | #elif defined(CONFIG_CPU_XSCALE) | ||
515 | add r4, r2, #40 @ cpu_context_save->extra | ||
516 | ldmib r4, {r4, r5} | ||
517 | mar acc0, r4, r5 | ||
518 | #endif | ||
519 | ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously | ||
520 | |||
521 | __INIT | ||
522 | /* | ||
523 | * Vector stubs. | ||
524 | * | ||
525 | * This code is copied to 0x200 or 0xffff0200 so we can use branches in the | ||
526 | * vectors, rather than ldr's. | ||
527 | * | ||
528 | * Common stub entry macro: | ||
529 | * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC | ||
530 | */ | ||
531 | .macro vector_stub, name, sym, correction=0 | ||
532 | .align 5 | ||
533 | |||
534 | vector_\name: | ||
535 | ldr r13, .LCs\sym | ||
536 | .if \correction | ||
537 | sub lr, lr, #\correction | ||
538 | .endif | ||
539 | str lr, [r13] @ save lr_IRQ | ||
540 | mrs lr, spsr | ||
541 | str lr, [r13, #4] @ save spsr_IRQ | ||
542 | @ | ||
543 | @ now branch to the relevant MODE handling routine | ||
544 | @ | ||
545 | mrs r13, cpsr | ||
546 | bic r13, r13, #MODE_MASK | ||
547 | orr r13, r13, #MODE_SVC | ||
548 | msr spsr_cxsf, r13 @ switch to SVC_32 mode | ||
549 | |||
550 | and lr, lr, #15 | ||
551 | ldr lr, [pc, lr, lsl #2] | ||
552 | movs pc, lr @ Changes mode and branches | ||
553 | .endm | ||
554 | |||
555 | __stubs_start: | ||
556 | /* | ||
557 | * Interrupt dispatcher | ||
558 | */ | ||
559 | vector_stub irq, irq, 4 | ||
560 | |||
561 | .long __irq_usr @ 0 (USR_26 / USR_32) | ||
562 | .long __irq_invalid @ 1 (FIQ_26 / FIQ_32) | ||
563 | .long __irq_invalid @ 2 (IRQ_26 / IRQ_32) | ||
564 | .long __irq_svc @ 3 (SVC_26 / SVC_32) | ||
565 | .long __irq_invalid @ 4 | ||
566 | .long __irq_invalid @ 5 | ||
567 | .long __irq_invalid @ 6 | ||
568 | .long __irq_invalid @ 7 | ||
569 | .long __irq_invalid @ 8 | ||
570 | .long __irq_invalid @ 9 | ||
571 | .long __irq_invalid @ a | ||
572 | .long __irq_invalid @ b | ||
573 | .long __irq_invalid @ c | ||
574 | .long __irq_invalid @ d | ||
575 | .long __irq_invalid @ e | ||
576 | .long __irq_invalid @ f | ||
577 | |||
578 | /* | ||
579 | * Data abort dispatcher | ||
580 | * Enter in ABT mode, spsr = USR CPSR, lr = USR PC | ||
581 | */ | ||
582 | vector_stub dabt, abt, 8 | ||
583 | |||
584 | .long __dabt_usr @ 0 (USR_26 / USR_32) | ||
585 | .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32) | ||
586 | .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32) | ||
587 | .long __dabt_svc @ 3 (SVC_26 / SVC_32) | ||
588 | .long __dabt_invalid @ 4 | ||
589 | .long __dabt_invalid @ 5 | ||
590 | .long __dabt_invalid @ 6 | ||
591 | .long __dabt_invalid @ 7 | ||
592 | .long __dabt_invalid @ 8 | ||
593 | .long __dabt_invalid @ 9 | ||
594 | .long __dabt_invalid @ a | ||
595 | .long __dabt_invalid @ b | ||
596 | .long __dabt_invalid @ c | ||
597 | .long __dabt_invalid @ d | ||
598 | .long __dabt_invalid @ e | ||
599 | .long __dabt_invalid @ f | ||
600 | |||
601 | /* | ||
602 | * Prefetch abort dispatcher | ||
603 | * Enter in ABT mode, spsr = USR CPSR, lr = USR PC | ||
604 | */ | ||
605 | vector_stub pabt, abt, 4 | ||
606 | |||
607 | .long __pabt_usr @ 0 (USR_26 / USR_32) | ||
608 | .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32) | ||
609 | .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32) | ||
610 | .long __pabt_svc @ 3 (SVC_26 / SVC_32) | ||
611 | .long __pabt_invalid @ 4 | ||
612 | .long __pabt_invalid @ 5 | ||
613 | .long __pabt_invalid @ 6 | ||
614 | .long __pabt_invalid @ 7 | ||
615 | .long __pabt_invalid @ 8 | ||
616 | .long __pabt_invalid @ 9 | ||
617 | .long __pabt_invalid @ a | ||
618 | .long __pabt_invalid @ b | ||
619 | .long __pabt_invalid @ c | ||
620 | .long __pabt_invalid @ d | ||
621 | .long __pabt_invalid @ e | ||
622 | .long __pabt_invalid @ f | ||
623 | |||
624 | /* | ||
625 | * Undef instr entry dispatcher | ||
626 | * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC | ||
627 | */ | ||
628 | vector_stub und, und | ||
629 | |||
630 | .long __und_usr @ 0 (USR_26 / USR_32) | ||
631 | .long __und_invalid @ 1 (FIQ_26 / FIQ_32) | ||
632 | .long __und_invalid @ 2 (IRQ_26 / IRQ_32) | ||
633 | .long __und_svc @ 3 (SVC_26 / SVC_32) | ||
634 | .long __und_invalid @ 4 | ||
635 | .long __und_invalid @ 5 | ||
636 | .long __und_invalid @ 6 | ||
637 | .long __und_invalid @ 7 | ||
638 | .long __und_invalid @ 8 | ||
639 | .long __und_invalid @ 9 | ||
640 | .long __und_invalid @ a | ||
641 | .long __und_invalid @ b | ||
642 | .long __und_invalid @ c | ||
643 | .long __und_invalid @ d | ||
644 | .long __und_invalid @ e | ||
645 | .long __und_invalid @ f | ||
646 | |||
647 | .align 5 | ||
648 | |||
649 | /*============================================================================= | ||
650 | * Undefined FIQs | ||
651 | *----------------------------------------------------------------------------- | ||
652 | * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC | ||
653 | * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg. | ||
654 | * Basically to switch modes, we *HAVE* to clobber one register... brain | ||
655 | * damage alert! I don't think that we can execute any code in here in any | ||
656 | * other mode than FIQ... Ok you can switch to another mode, but you can't | ||
657 | * get out of that mode without clobbering one register. | ||
658 | */ | ||
659 | vector_fiq: | ||
660 | disable_fiq | ||
661 | subs pc, lr, #4 | ||
662 | |||
663 | /*============================================================================= | ||
664 | * Address exception handler | ||
665 | *----------------------------------------------------------------------------- | ||
666 | * These aren't too critical. | ||
667 | * (they're not supposed to happen, and won't happen in 32-bit data mode). | ||
668 | */ | ||
669 | |||
670 | vector_addrexcptn: | ||
671 | b vector_addrexcptn | ||
672 | |||
673 | /* | ||
674 | * We group all the following data together to optimise | ||
675 | * for CPUs with separate I & D caches. | ||
676 | */ | ||
677 | .align 5 | ||
678 | |||
679 | .LCvswi: | ||
680 | .word vector_swi | ||
681 | |||
682 | .LCsirq: | ||
683 | .word __temp_irq | ||
684 | .LCsund: | ||
685 | .word __temp_und | ||
686 | .LCsabt: | ||
687 | .word __temp_abt | ||
688 | |||
689 | __stubs_end: | ||
690 | |||
691 | .equ __real_stubs_start, .LCvectors + 0x200 | ||
692 | |||
693 | .LCvectors: | ||
694 | swi SYS_ERROR0 | ||
695 | b __real_stubs_start + (vector_und - __stubs_start) | ||
696 | ldr pc, __real_stubs_start + (.LCvswi - __stubs_start) | ||
697 | b __real_stubs_start + (vector_pabt - __stubs_start) | ||
698 | b __real_stubs_start + (vector_dabt - __stubs_start) | ||
699 | b __real_stubs_start + (vector_addrexcptn - __stubs_start) | ||
700 | b __real_stubs_start + (vector_irq - __stubs_start) | ||
701 | b __real_stubs_start + (vector_fiq - __stubs_start) | ||
702 | |||
703 | ENTRY(__trap_init) | ||
704 | stmfd sp!, {r4 - r6, lr} | ||
705 | |||
706 | mov r0, #0xff000000 | ||
707 | orr r0, r0, #0x00ff0000 @ high vectors position | ||
708 | adr r1, .LCvectors @ set up the vectors | ||
709 | ldmia r1, {r1, r2, r3, r4, r5, r6, ip, lr} | ||
710 | stmia r0, {r1, r2, r3, r4, r5, r6, ip, lr} | ||
711 | |||
712 | add r2, r0, #0x200 | ||
713 | adr r0, __stubs_start @ copy stubs to 0x200 | ||
714 | adr r1, __stubs_end | ||
715 | 1: ldr r3, [r0], #4 | ||
716 | str r3, [r2], #4 | ||
717 | cmp r0, r1 | ||
718 | blt 1b | ||
719 | LOADREGS(fd, sp!, {r4 - r6, pc}) | ||
720 | |||
721 | .data | ||
722 | |||
723 | /* | ||
724 | * Do not reorder these, and do not insert extra data between... | ||
725 | */ | ||
726 | |||
727 | __temp_irq: | ||
728 | .word 0 @ saved lr_irq | ||
729 | .word 0 @ saved spsr_irq | ||
730 | .word -1 @ old_r0 | ||
731 | __temp_und: | ||
732 | .word 0 @ Saved lr_und | ||
733 | .word 0 @ Saved spsr_und | ||
734 | .word -1 @ old_r0 | ||
735 | __temp_abt: | ||
736 | .word 0 @ Saved lr_abt | ||
737 | .word 0 @ Saved spsr_abt | ||
738 | .word -1 @ old_r0 | ||
739 | |||
740 | .globl cr_alignment | ||
741 | .globl cr_no_alignment | ||
742 | cr_alignment: | ||
743 | .space 4 | ||
744 | cr_no_alignment: | ||
745 | .space 4 | ||
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S new file mode 100644 index 000000000000..53a7e0dea44d --- /dev/null +++ b/arch/arm/kernel/entry-common.S | |||
@@ -0,0 +1,260 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/entry-common.S | ||
3 | * | ||
4 | * Copyright (C) 2000 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/config.h> | ||
11 | |||
12 | #include <asm/thread_info.h> | ||
13 | #include <asm/ptrace.h> | ||
14 | #include <asm/unistd.h> | ||
15 | |||
16 | #include "entry-header.S" | ||
17 | |||
18 | /* | ||
19 | * We rely on the fact that R0 is at the bottom of the stack (due to | ||
20 | * slow/fast restore user regs). | ||
21 | */ | ||
22 | #if S_R0 != 0 | ||
23 | #error "Please fix" | ||
24 | #endif | ||
25 | |||
26 | .align 5 | ||
27 | /* | ||
28 | * This is the fast syscall return path. We do as little as | ||
29 | * possible here, and this includes saving r0 back into the SVC | ||
30 | * stack. | ||
31 | */ | ||
32 | ret_fast_syscall: | ||
33 | disable_irq r1 @ disable interrupts | ||
34 | ldr r1, [tsk, #TI_FLAGS] | ||
35 | tst r1, #_TIF_WORK_MASK | ||
36 | bne fast_work_pending | ||
37 | fast_restore_user_regs | ||
38 | |||
39 | /* | ||
40 | * Ok, we need to do extra processing, enter the slow path. | ||
41 | */ | ||
42 | fast_work_pending: | ||
43 | str r0, [sp, #S_R0+S_OFF]! @ returned r0 | ||
44 | work_pending: | ||
45 | tst r1, #_TIF_NEED_RESCHED | ||
46 | bne work_resched | ||
47 | tst r1, #_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | ||
48 | beq no_work_pending | ||
49 | mov r0, sp @ 'regs' | ||
50 | mov r2, why @ 'syscall' | ||
51 | bl do_notify_resume | ||
52 | disable_irq r1 @ disable interrupts | ||
53 | b no_work_pending | ||
54 | |||
55 | work_resched: | ||
56 | bl schedule | ||
57 | /* | ||
58 | * "slow" syscall return path. "why" tells us if this was a real syscall. | ||
59 | */ | ||
60 | ENTRY(ret_to_user) | ||
61 | ret_slow_syscall: | ||
62 | disable_irq r1 @ disable interrupts | ||
63 | ldr r1, [tsk, #TI_FLAGS] | ||
64 | tst r1, #_TIF_WORK_MASK | ||
65 | bne work_pending | ||
66 | no_work_pending: | ||
67 | slow_restore_user_regs | ||
68 | |||
69 | /* | ||
70 | * This is how we return from a fork. | ||
71 | */ | ||
72 | ENTRY(ret_from_fork) | ||
73 | bl schedule_tail | ||
74 | get_thread_info tsk | ||
75 | ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing | ||
76 | mov why, #1 | ||
77 | tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? | ||
78 | beq ret_slow_syscall | ||
79 | mov r1, sp | ||
80 | mov r0, #1 @ trace exit [IP = 1] | ||
81 | bl syscall_trace | ||
82 | b ret_slow_syscall | ||
83 | |||
84 | |||
85 | #include "calls.S" | ||
86 | |||
87 | /*============================================================================= | ||
88 | * SWI handler | ||
89 | *----------------------------------------------------------------------------- | ||
90 | */ | ||
91 | |||
92 | /* If we're optimising for StrongARM the resulting code won't | ||
93 | run on an ARM7 and we can save a couple of instructions. | ||
94 | --pb */ | ||
95 | #ifdef CONFIG_CPU_ARM710 | ||
96 | .macro arm710_bug_check, instr, temp | ||
97 | and \temp, \instr, #0x0f000000 @ check for SWI | ||
98 | teq \temp, #0x0f000000 | ||
99 | bne .Larm700bug | ||
100 | .endm | ||
101 | |||
102 | .Larm700bug: | ||
103 | ldr r0, [sp, #S_PSR] @ Get calling cpsr | ||
104 | sub lr, lr, #4 | ||
105 | str lr, [r8] | ||
106 | msr spsr_cxsf, r0 | ||
107 | ldmia sp, {r0 - lr}^ @ Get calling r0 - lr | ||
108 | mov r0, r0 | ||
109 | ldr lr, [sp, #S_PC] @ Get PC | ||
110 | add sp, sp, #S_FRAME_SIZE | ||
111 | movs pc, lr | ||
112 | #else | ||
113 | .macro arm710_bug_check, instr, temp | ||
114 | .endm | ||
115 | #endif | ||
116 | |||
117 | .align 5 | ||
118 | ENTRY(vector_swi) | ||
119 | save_user_regs | ||
120 | zero_fp | ||
121 | get_scno | ||
122 | arm710_bug_check scno, ip | ||
123 | |||
124 | #ifdef CONFIG_ALIGNMENT_TRAP | ||
125 | ldr ip, __cr_alignment | ||
126 | ldr ip, [ip] | ||
127 | mcr p15, 0, ip, c1, c0 @ update control register | ||
128 | #endif | ||
129 | enable_irq ip | ||
130 | |||
131 | str r4, [sp, #-S_OFF]! @ push fifth arg | ||
132 | |||
133 | get_thread_info tsk | ||
134 | ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing | ||
135 | bic scno, scno, #0xff000000 @ mask off SWI op-code | ||
136 | eor scno, scno, #OS_NUMBER << 20 @ check OS number | ||
137 | adr tbl, sys_call_table @ load syscall table pointer | ||
138 | tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? | ||
139 | bne __sys_trace | ||
140 | |||
141 | adr lr, ret_fast_syscall @ return address | ||
142 | cmp scno, #NR_syscalls @ check upper syscall limit | ||
143 | ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine | ||
144 | |||
145 | add r1, sp, #S_OFF | ||
146 | 2: mov why, #0 @ no longer a real syscall | ||
147 | cmp scno, #ARMSWI_OFFSET | ||
148 | eor r0, scno, #OS_NUMBER << 20 @ put OS number back | ||
149 | bcs arm_syscall | ||
150 | b sys_ni_syscall @ not private func | ||
151 | |||
152 | /* | ||
153 | * This is the really slow path. We're going to be doing | ||
154 | * context switches, and waiting for our parent to respond. | ||
155 | */ | ||
156 | __sys_trace: | ||
157 | add r1, sp, #S_OFF | ||
158 | mov r0, #0 @ trace entry [IP = 0] | ||
159 | bl syscall_trace | ||
160 | |||
161 | adr lr, __sys_trace_return @ return address | ||
162 | add r1, sp, #S_R0 + S_OFF @ pointer to regs | ||
163 | cmp scno, #NR_syscalls @ check upper syscall limit | ||
164 | ldmccia r1, {r0 - r3} @ have to reload r0 - r3 | ||
165 | ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine | ||
166 | b 2b | ||
167 | |||
168 | __sys_trace_return: | ||
169 | str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 | ||
170 | mov r1, sp | ||
171 | mov r0, #1 @ trace exit [IP = 1] | ||
172 | bl syscall_trace | ||
173 | b ret_slow_syscall | ||
174 | |||
175 | .align 5 | ||
176 | #ifdef CONFIG_ALIGNMENT_TRAP | ||
177 | .type __cr_alignment, #object | ||
178 | __cr_alignment: | ||
179 | .word cr_alignment | ||
180 | #endif | ||
181 | |||
182 | .type sys_call_table, #object | ||
183 | ENTRY(sys_call_table) | ||
184 | #include "calls.S" | ||
185 | |||
186 | /*============================================================================ | ||
187 | * Special system call wrappers | ||
188 | */ | ||
189 | @ r0 = syscall number | ||
190 | @ r5 = syscall table | ||
191 | .type sys_syscall, #function | ||
192 | sys_syscall: | ||
193 | eor scno, r0, #OS_NUMBER << 20 | ||
194 | cmp scno, #__NR_syscall - __NR_SYSCALL_BASE | ||
195 | cmpne scno, #NR_syscalls @ check range | ||
196 | stmloia sp, {r5, r6} @ shuffle args | ||
197 | movlo r0, r1 | ||
198 | movlo r1, r2 | ||
199 | movlo r2, r3 | ||
200 | movlo r3, r4 | ||
201 | ldrlo pc, [tbl, scno, lsl #2] | ||
202 | b sys_ni_syscall | ||
203 | |||
204 | sys_fork_wrapper: | ||
205 | add r0, sp, #S_OFF | ||
206 | b sys_fork | ||
207 | |||
208 | sys_vfork_wrapper: | ||
209 | add r0, sp, #S_OFF | ||
210 | b sys_vfork | ||
211 | |||
212 | sys_execve_wrapper: | ||
213 | add r3, sp, #S_OFF | ||
214 | b sys_execve | ||
215 | |||
216 | sys_clone_wrapper: | ||
217 | add ip, sp, #S_OFF | ||
218 | str ip, [sp, #4] | ||
219 | b sys_clone | ||
220 | |||
221 | sys_sigsuspend_wrapper: | ||
222 | add r3, sp, #S_OFF | ||
223 | b sys_sigsuspend | ||
224 | |||
225 | sys_rt_sigsuspend_wrapper: | ||
226 | add r2, sp, #S_OFF | ||
227 | b sys_rt_sigsuspend | ||
228 | |||
229 | sys_sigreturn_wrapper: | ||
230 | add r0, sp, #S_OFF | ||
231 | b sys_sigreturn | ||
232 | |||
233 | sys_rt_sigreturn_wrapper: | ||
234 | add r0, sp, #S_OFF | ||
235 | b sys_rt_sigreturn | ||
236 | |||
237 | sys_sigaltstack_wrapper: | ||
238 | ldr r2, [sp, #S_OFF + S_SP] | ||
239 | b do_sigaltstack | ||
240 | |||
241 | sys_futex_wrapper: | ||
242 | str r5, [sp, #4] @ push sixth arg | ||
243 | b sys_futex | ||
244 | |||
245 | /* | ||
246 | * Note: off_4k (r5) is always units of 4K. If we can't do the requested | ||
247 | * offset, we return EINVAL. | ||
248 | */ | ||
249 | sys_mmap2: | ||
250 | #if PAGE_SHIFT > 12 | ||
251 | tst r5, #PGOFF_MASK | ||
252 | moveq r5, r5, lsr #PAGE_SHIFT - 12 | ||
253 | streq r5, [sp, #4] | ||
254 | beq do_mmap2 | ||
255 | mov r0, #-EINVAL | ||
256 | RETINSTR(mov,pc, lr) | ||
257 | #else | ||
258 | str r5, [sp, #4] | ||
259 | b do_mmap2 | ||
260 | #endif | ||
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S new file mode 100644 index 000000000000..4039d8c120b5 --- /dev/null +++ b/arch/arm/kernel/entry-header.S | |||
@@ -0,0 +1,182 @@ | |||
1 | #include <linux/config.h> /* for CONFIG_ARCH_xxxx */ | ||
2 | #include <linux/linkage.h> | ||
3 | |||
4 | #include <asm/assembler.h> | ||
5 | #include <asm/constants.h> | ||
6 | #include <asm/errno.h> | ||
7 | #include <asm/hardware.h> | ||
8 | #include <asm/arch/irqs.h> | ||
9 | #include <asm/arch/entry-macro.S> | ||
10 | |||
11 | #ifndef MODE_SVC | ||
12 | #define MODE_SVC 0x13 | ||
13 | #endif | ||
14 | |||
15 | .macro zero_fp | ||
16 | #ifdef CONFIG_FRAME_POINTER | ||
17 | mov fp, #0 | ||
18 | #endif | ||
19 | .endm | ||
20 | |||
21 | .text | ||
22 | |||
23 | @ Bad Abort numbers | ||
24 | @ ----------------- | ||
25 | @ | ||
26 | #define BAD_PREFETCH 0 | ||
27 | #define BAD_DATA 1 | ||
28 | #define BAD_ADDREXCPTN 2 | ||
29 | #define BAD_IRQ 3 | ||
30 | #define BAD_UNDEFINSTR 4 | ||
31 | |||
32 | #define PT_TRACESYS 0x00000002 | ||
33 | |||
34 | @ OS version number used in SWIs | ||
35 | @ RISC OS is 0 | ||
36 | @ RISC iX is 8 | ||
37 | @ | ||
38 | #define OS_NUMBER 9 | ||
39 | #define ARMSWI_OFFSET 0x000f0000 | ||
40 | |||
41 | @ | ||
42 | @ Stack format (ensured by USER_* and SVC_*) | ||
43 | @ | ||
44 | #define S_FRAME_SIZE 72 | ||
45 | #define S_OLD_R0 68 | ||
46 | #define S_PSR 64 | ||
47 | |||
48 | #define S_PC 60 | ||
49 | #define S_LR 56 | ||
50 | #define S_SP 52 | ||
51 | #define S_IP 48 | ||
52 | #define S_FP 44 | ||
53 | #define S_R10 40 | ||
54 | #define S_R9 36 | ||
55 | #define S_R8 32 | ||
56 | #define S_R7 28 | ||
57 | #define S_R6 24 | ||
58 | #define S_R5 20 | ||
59 | #define S_R4 16 | ||
60 | #define S_R3 12 | ||
61 | #define S_R2 8 | ||
62 | #define S_R1 4 | ||
63 | #define S_R0 0 | ||
64 | #define S_OFF 8 | ||
65 | |||
66 | .macro set_cpsr_c, reg, mode | ||
67 | msr cpsr_c, \mode | ||
68 | .endm | ||
69 | |||
70 | #if __LINUX_ARM_ARCH__ >= 6 | ||
71 | .macro disable_irq, temp | ||
72 | cpsid i | ||
73 | .endm | ||
74 | |||
75 | .macro enable_irq, temp | ||
76 | cpsie i | ||
77 | .endm | ||
78 | #else | ||
79 | .macro disable_irq, temp | ||
80 | set_cpsr_c \temp, #PSR_I_BIT | MODE_SVC | ||
81 | .endm | ||
82 | |||
83 | .macro enable_irq, temp | ||
84 | set_cpsr_c \temp, #MODE_SVC | ||
85 | .endm | ||
86 | #endif | ||
87 | |||
88 | .macro save_user_regs | ||
89 | sub sp, sp, #S_FRAME_SIZE | ||
90 | stmia sp, {r0 - r12} @ Calling r0 - r12 | ||
91 | add r8, sp, #S_PC | ||
92 | stmdb r8, {sp, lr}^ @ Calling sp, lr | ||
93 | mrs r8, spsr @ called from non-FIQ mode, so ok. | ||
94 | str lr, [sp, #S_PC] @ Save calling PC | ||
95 | str r8, [sp, #S_PSR] @ Save CPSR | ||
96 | str r0, [sp, #S_OLD_R0] @ Save OLD_R0 | ||
97 | .endm | ||
98 | |||
99 | .macro restore_user_regs | ||
100 | ldr r1, [sp, #S_PSR] @ Get calling cpsr | ||
101 | disable_irq ip @ disable IRQs | ||
102 | ldr lr, [sp, #S_PC]! @ Get PC | ||
103 | msr spsr_cxsf, r1 @ save in spsr_svc | ||
104 | ldmdb sp, {r0 - lr}^ @ Get calling r0 - lr | ||
105 | mov r0, r0 | ||
106 | add sp, sp, #S_FRAME_SIZE - S_PC | ||
107 | movs pc, lr @ return & move spsr_svc into cpsr | ||
108 | .endm | ||
109 | |||
110 | /* | ||
111 | * Must be called with IRQs already disabled. | ||
112 | */ | ||
113 | .macro fast_restore_user_regs | ||
114 | ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr | ||
115 | ldr lr, [sp, #S_OFF + S_PC]! @ get pc | ||
116 | msr spsr_cxsf, r1 @ save in spsr_svc | ||
117 | ldmdb sp, {r1 - lr}^ @ get calling r1 - lr | ||
118 | mov r0, r0 | ||
119 | add sp, sp, #S_FRAME_SIZE - S_PC | ||
120 | movs pc, lr @ return & move spsr_svc into cpsr | ||
121 | .endm | ||
122 | |||
123 | /* | ||
124 | * Must be called with IRQs already disabled. | ||
125 | */ | ||
126 | .macro slow_restore_user_regs | ||
127 | ldr r1, [sp, #S_PSR] @ get calling cpsr | ||
128 | ldr lr, [sp, #S_PC]! @ get pc | ||
129 | msr spsr_cxsf, r1 @ save in spsr_svc | ||
130 | ldmdb sp, {r0 - lr}^ @ get calling r1 - lr | ||
131 | mov r0, r0 | ||
132 | add sp, sp, #S_FRAME_SIZE - S_PC | ||
133 | movs pc, lr @ return & move spsr_svc into cpsr | ||
134 | .endm | ||
135 | |||
136 | .macro mask_pc, rd, rm | ||
137 | .endm | ||
138 | |||
139 | .macro get_thread_info, rd | ||
140 | mov \rd, sp, lsr #13 | ||
141 | mov \rd, \rd, lsl #13 | ||
142 | .endm | ||
143 | |||
144 | .macro alignment_trap, rbase, rtemp, sym | ||
145 | #ifdef CONFIG_ALIGNMENT_TRAP | ||
146 | #define OFF_CR_ALIGNMENT(x) cr_alignment - x | ||
147 | |||
148 | ldr \rtemp, [\rbase, #OFF_CR_ALIGNMENT(\sym)] | ||
149 | mcr p15, 0, \rtemp, c1, c0 | ||
150 | #endif | ||
151 | .endm | ||
152 | |||
153 | |||
154 | /* | ||
155 | * These are the registers used in the syscall handler, and allow us to | ||
156 | * have in theory up to 7 arguments to a function - r0 to r6. | ||
157 | * | ||
158 | * r7 is reserved for the system call number for thumb mode. | ||
159 | * | ||
160 | * Note that tbl == why is intentional. | ||
161 | * | ||
162 | * We must set at least "tsk" and "why" when calling ret_with_reschedule. | ||
163 | */ | ||
164 | scno .req r7 @ syscall number | ||
165 | tbl .req r8 @ syscall table pointer | ||
166 | why .req r8 @ Linux syscall (!= 0) | ||
167 | tsk .req r9 @ current thread_info | ||
168 | |||
169 | /* | ||
170 | * Get the system call number. | ||
171 | */ | ||
172 | .macro get_scno | ||
173 | #ifdef CONFIG_ARM_THUMB | ||
174 | tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs | ||
175 | addne scno, r7, #OS_NUMBER << 20 @ put OS number in | ||
176 | ldreq scno, [lr, #-4] | ||
177 | |||
178 | #else | ||
179 | mask_pc lr, lr | ||
180 | ldr scno, [lr, #-4] @ get SWI instruction | ||
181 | #endif | ||
182 | .endm | ||
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c new file mode 100644 index 000000000000..9299dfc25698 --- /dev/null +++ b/arch/arm/kernel/fiq.c | |||
@@ -0,0 +1,181 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/fiq.c | ||
3 | * | ||
4 | * Copyright (C) 1998 Russell King | ||
5 | * Copyright (C) 1998, 1999 Phil Blundell | ||
6 | * | ||
7 | * FIQ support written by Philip Blundell <philb@gnu.org>, 1998. | ||
8 | * | ||
9 | * FIQ support re-written by Russell King to be more generic | ||
10 | * | ||
11 | * We now properly support a method by which the FIQ handlers can | ||
12 | * be stacked onto the vector. We still do not support sharing | ||
13 | * the FIQ vector itself. | ||
14 | * | ||
15 | * Operation is as follows: | ||
16 | * 1. Owner A claims FIQ: | ||
17 | * - default_fiq relinquishes control. | ||
18 | * 2. Owner A: | ||
19 | * - inserts code. | ||
20 | * - sets any registers, | ||
21 | * - enables FIQ. | ||
22 | * 3. Owner B claims FIQ: | ||
23 | * - if owner A has a relinquish function. | ||
24 | * - disable FIQs. | ||
25 | * - saves any registers. | ||
26 | * - returns zero. | ||
27 | * 4. Owner B: | ||
28 | * - inserts code. | ||
29 | * - sets any registers, | ||
30 | * - enables FIQ. | ||
31 | * 5. Owner B releases FIQ: | ||
32 | * - Owner A is asked to reacquire FIQ: | ||
33 | * - inserts code. | ||
34 | * - restores saved registers. | ||
35 | * - enables FIQ. | ||
36 | * 6. Goto 3 | ||
37 | */ | ||
38 | #include <linux/module.h> | ||
39 | #include <linux/kernel.h> | ||
40 | #include <linux/init.h> | ||
41 | #include <linux/seq_file.h> | ||
42 | |||
43 | #include <asm/cacheflush.h> | ||
44 | #include <asm/fiq.h> | ||
45 | #include <asm/irq.h> | ||
46 | #include <asm/system.h> | ||
47 | #include <asm/uaccess.h> | ||
48 | |||
49 | static unsigned long no_fiq_insn; | ||
50 | |||
51 | /* Default reacquire function | ||
52 | * - we always relinquish FIQ control | ||
53 | * - we always reacquire FIQ control | ||
54 | */ | ||
55 | static int fiq_def_op(void *ref, int relinquish) | ||
56 | { | ||
57 | if (!relinquish) | ||
58 | set_fiq_handler(&no_fiq_insn, sizeof(no_fiq_insn)); | ||
59 | |||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | static struct fiq_handler default_owner = { | ||
64 | .name = "default", | ||
65 | .fiq_op = fiq_def_op, | ||
66 | }; | ||
67 | |||
68 | static struct fiq_handler *current_fiq = &default_owner; | ||
69 | |||
70 | int show_fiq_list(struct seq_file *p, void *v) | ||
71 | { | ||
72 | if (current_fiq != &default_owner) | ||
73 | seq_printf(p, "FIQ: %s\n", current_fiq->name); | ||
74 | |||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | void set_fiq_handler(void *start, unsigned int length) | ||
79 | { | ||
80 | memcpy((void *)0xffff001c, start, length); | ||
81 | flush_icache_range(0xffff001c, 0xffff001c + length); | ||
82 | if (!vectors_high()) | ||
83 | flush_icache_range(0x1c, 0x1c + length); | ||
84 | } | ||
85 | |||
86 | /* | ||
87 | * Taking an interrupt in FIQ mode is death, so both these functions | ||
88 | * disable irqs for the duration. Note - these functions are almost | ||
89 | * entirely coded in assembly. | ||
90 | */ | ||
91 | void __attribute__((naked)) set_fiq_regs(struct pt_regs *regs) | ||
92 | { | ||
93 | register unsigned long tmp; | ||
94 | asm volatile ( | ||
95 | "mov ip, sp\n\ | ||
96 | stmfd sp!, {fp, ip, lr, pc}\n\ | ||
97 | sub fp, ip, #4\n\ | ||
98 | mrs %0, cpsr\n\ | ||
99 | msr cpsr_c, %2 @ select FIQ mode\n\ | ||
100 | mov r0, r0\n\ | ||
101 | ldmia %1, {r8 - r14}\n\ | ||
102 | msr cpsr_c, %0 @ return to SVC mode\n\ | ||
103 | mov r0, r0\n\ | ||
104 | ldmea fp, {fp, sp, pc}" | ||
105 | : "=&r" (tmp) | ||
106 | : "r" (®s->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE)); | ||
107 | } | ||
108 | |||
109 | void __attribute__((naked)) get_fiq_regs(struct pt_regs *regs) | ||
110 | { | ||
111 | register unsigned long tmp; | ||
112 | asm volatile ( | ||
113 | "mov ip, sp\n\ | ||
114 | stmfd sp!, {fp, ip, lr, pc}\n\ | ||
115 | sub fp, ip, #4\n\ | ||
116 | mrs %0, cpsr\n\ | ||
117 | msr cpsr_c, %2 @ select FIQ mode\n\ | ||
118 | mov r0, r0\n\ | ||
119 | stmia %1, {r8 - r14}\n\ | ||
120 | msr cpsr_c, %0 @ return to SVC mode\n\ | ||
121 | mov r0, r0\n\ | ||
122 | ldmea fp, {fp, sp, pc}" | ||
123 | : "=&r" (tmp) | ||
124 | : "r" (®s->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE)); | ||
125 | } | ||
126 | |||
127 | int claim_fiq(struct fiq_handler *f) | ||
128 | { | ||
129 | int ret = 0; | ||
130 | |||
131 | if (current_fiq) { | ||
132 | ret = -EBUSY; | ||
133 | |||
134 | if (current_fiq->fiq_op != NULL) | ||
135 | ret = current_fiq->fiq_op(current_fiq->dev_id, 1); | ||
136 | } | ||
137 | |||
138 | if (!ret) { | ||
139 | f->next = current_fiq; | ||
140 | current_fiq = f; | ||
141 | } | ||
142 | |||
143 | return ret; | ||
144 | } | ||
145 | |||
146 | void release_fiq(struct fiq_handler *f) | ||
147 | { | ||
148 | if (current_fiq != f) { | ||
149 | printk(KERN_ERR "%s FIQ trying to release %s FIQ\n", | ||
150 | f->name, current_fiq->name); | ||
151 | dump_stack(); | ||
152 | return; | ||
153 | } | ||
154 | |||
155 | do | ||
156 | current_fiq = current_fiq->next; | ||
157 | while (current_fiq->fiq_op(current_fiq->dev_id, 0)); | ||
158 | } | ||
159 | |||
160 | void enable_fiq(int fiq) | ||
161 | { | ||
162 | enable_irq(fiq + FIQ_START); | ||
163 | } | ||
164 | |||
165 | void disable_fiq(int fiq) | ||
166 | { | ||
167 | disable_irq(fiq + FIQ_START); | ||
168 | } | ||
169 | |||
170 | EXPORT_SYMBOL(set_fiq_handler); | ||
171 | EXPORT_SYMBOL(set_fiq_regs); | ||
172 | EXPORT_SYMBOL(get_fiq_regs); | ||
173 | EXPORT_SYMBOL(claim_fiq); | ||
174 | EXPORT_SYMBOL(release_fiq); | ||
175 | EXPORT_SYMBOL(enable_fiq); | ||
176 | EXPORT_SYMBOL(disable_fiq); | ||
177 | |||
178 | void __init init_FIQ(void) | ||
179 | { | ||
180 | no_fiq_insn = *(unsigned long *)0xffff001c; | ||
181 | } | ||
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S new file mode 100644 index 000000000000..171b3e811c71 --- /dev/null +++ b/arch/arm/kernel/head.S | |||
@@ -0,0 +1,516 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/head.S | ||
3 | * | ||
4 | * Copyright (C) 1994-2002 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * Kernel startup code for all 32-bit CPUs | ||
11 | */ | ||
12 | #include <linux/config.h> | ||
13 | #include <linux/linkage.h> | ||
14 | #include <linux/init.h> | ||
15 | |||
16 | #include <asm/assembler.h> | ||
17 | #include <asm/domain.h> | ||
18 | #include <asm/mach-types.h> | ||
19 | #include <asm/procinfo.h> | ||
20 | #include <asm/ptrace.h> | ||
21 | #include <asm/constants.h> | ||
22 | #include <asm/system.h> | ||
23 | |||
24 | #define PROCINFO_MMUFLAGS 8 | ||
25 | #define PROCINFO_INITFUNC 12 | ||
26 | |||
27 | #define MACHINFO_TYPE 0 | ||
28 | #define MACHINFO_PHYSRAM 4 | ||
29 | #define MACHINFO_PHYSIO 8 | ||
30 | #define MACHINFO_PGOFFIO 12 | ||
31 | #define MACHINFO_NAME 16 | ||
32 | |||
33 | #ifndef CONFIG_XIP_KERNEL | ||
34 | /* | ||
35 | * We place the page tables 16K below TEXTADDR. Therefore, we must make sure | ||
36 | * that TEXTADDR is correctly set. Currently, we expect the least significant | ||
37 | * 16 bits to be 0x8000, but we could probably relax this restriction to | ||
38 | * TEXTADDR >= PAGE_OFFSET + 0x4000 | ||
39 | * | ||
40 | * Note that swapper_pg_dir is the virtual address of the page tables, and | ||
41 | * pgtbl gives us a position-independent reference to these tables. We can | ||
42 | * do this because stext == TEXTADDR | ||
43 | */ | ||
44 | #if (TEXTADDR & 0xffff) != 0x8000 | ||
45 | #error TEXTADDR must start at 0xXXXX8000 | ||
46 | #endif | ||
47 | |||
48 | .globl swapper_pg_dir | ||
49 | .equ swapper_pg_dir, TEXTADDR - 0x4000 | ||
50 | |||
51 | .macro pgtbl, rd, phys | ||
52 | adr \rd, stext | ||
53 | sub \rd, \rd, #0x4000 | ||
54 | .endm | ||
55 | #else | ||
56 | /* | ||
57 | * XIP Kernel: | ||
58 | * | ||
59 | * We place the page tables 16K below DATAADDR. Therefore, we must make sure | ||
60 | * that DATAADDR is correctly set. Currently, we expect the least significant | ||
61 | * 16 bits to be 0x8000, but we could probably relax this restriction to | ||
62 | * DATAADDR >= PAGE_OFFSET + 0x4000 | ||
63 | * | ||
64 | * Note that pgtbl is meant to return the physical address of swapper_pg_dir. | ||
65 | * We can't make it relative to the kernel position in this case since | ||
66 | * the kernel can physically be anywhere. | ||
67 | */ | ||
68 | #if (DATAADDR & 0xffff) != 0x8000 | ||
69 | #error DATAADDR must start at 0xXXXX8000 | ||
70 | #endif | ||
71 | |||
72 | .globl swapper_pg_dir | ||
73 | .equ swapper_pg_dir, DATAADDR - 0x4000 | ||
74 | |||
75 | .macro pgtbl, rd, phys | ||
76 | ldr \rd, =((DATAADDR - 0x4000) - VIRT_OFFSET) | ||
77 | add \rd, \rd, \phys | ||
78 | .endm | ||
79 | #endif | ||
80 | |||
81 | /* | ||
82 | * Kernel startup entry point. | ||
83 | * --------------------------- | ||
84 | * | ||
85 | * This is normally called from the decompressor code. The requirements | ||
86 | * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0, | ||
87 | * r1 = machine nr. | ||
88 | * | ||
89 | * This code is mostly position independent, so if you link the kernel at | ||
90 | * 0xc0008000, you call this at __pa(0xc0008000). | ||
91 | * | ||
92 | * See linux/arch/arm/tools/mach-types for the complete list of machine | ||
93 | * numbers for r1. | ||
94 | * | ||
95 | * We're trying to keep crap to a minimum; DO NOT add any machine specific | ||
96 | * crap here - that's what the boot loader (or in extreme, well justified | ||
97 | * circumstances, zImage) is for. | ||
98 | */ | ||
99 | __INIT | ||
100 | .type stext, %function | ||
101 | ENTRY(stext) | ||
102 | msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC @ ensure svc mode | ||
103 | @ and irqs disabled | ||
104 | bl __lookup_processor_type @ r5=procinfo r9=cpuid | ||
105 | movs r10, r5 @ invalid processor (r5=0)? | ||
106 | beq __error_p @ yes, error 'p' | ||
107 | bl __lookup_machine_type @ r5=machinfo | ||
108 | movs r8, r5 @ invalid machine (r5=0)? | ||
109 | beq __error_a @ yes, error 'a' | ||
110 | bl __create_page_tables | ||
111 | |||
112 | /* | ||
113 | * The following calls CPU specific code in a position independent | ||
114 | * manner. See arch/arm/mm/proc-*.S for details. r10 = base of | ||
115 | * xxx_proc_info structure selected by __lookup_machine_type | ||
116 | * above. On return, the CPU will be ready for the MMU to be | ||
117 | * turned on, and r0 will hold the CPU control register value. | ||
118 | */ | ||
119 | ldr r13, __switch_data @ address to jump to after | ||
120 | @ mmu has been enabled | ||
121 | adr lr, __enable_mmu @ return (PIC) address | ||
122 | add pc, r10, #PROCINFO_INITFUNC | ||
123 | |||
124 | .type __switch_data, %object | ||
125 | __switch_data: | ||
126 | .long __mmap_switched | ||
127 | .long __data_loc @ r4 | ||
128 | .long __data_start @ r5 | ||
129 | .long __bss_start @ r6 | ||
130 | .long _end @ r7 | ||
131 | .long processor_id @ r4 | ||
132 | .long __machine_arch_type @ r5 | ||
133 | .long cr_alignment @ r6 | ||
134 | .long init_thread_union+8192 @ sp | ||
135 | |||
136 | /* | ||
137 | * The following fragment of code is executed with the MMU on, and uses | ||
138 | * absolute addresses; this is not position independent. | ||
139 | * | ||
140 | * r0 = cp#15 control register | ||
141 | * r1 = machine ID | ||
142 | * r9 = processor ID | ||
143 | */ | ||
144 | .type __mmap_switched, %function | ||
145 | __mmap_switched: | ||
146 | adr r3, __switch_data + 4 | ||
147 | |||
148 | ldmia r3!, {r4, r5, r6, r7} | ||
149 | cmp r4, r5 @ Copy data segment if needed | ||
150 | 1: cmpne r5, r6 | ||
151 | ldrne fp, [r4], #4 | ||
152 | strne fp, [r5], #4 | ||
153 | bne 1b | ||
154 | |||
155 | mov fp, #0 @ Clear BSS (and zero fp) | ||
156 | 1: cmp r6, r7 | ||
157 | strcc fp, [r6],#4 | ||
158 | bcc 1b | ||
159 | |||
160 | ldmia r3, {r4, r5, r6, sp} | ||
161 | str r9, [r4] @ Save processor ID | ||
162 | str r1, [r5] @ Save machine type | ||
163 | bic r4, r0, #CR_A @ Clear 'A' bit | ||
164 | stmia r6, {r0, r4} @ Save control register values | ||
165 | b start_kernel | ||
166 | |||
167 | |||
168 | |||
169 | /* | ||
170 | * Setup common bits before finally enabling the MMU. Essentially | ||
171 | * this is just loading the page table pointer and domain access | ||
172 | * registers. | ||
173 | */ | ||
174 | .type __enable_mmu, %function | ||
175 | __enable_mmu: | ||
176 | #ifdef CONFIG_ALIGNMENT_TRAP | ||
177 | orr r0, r0, #CR_A | ||
178 | #else | ||
179 | bic r0, r0, #CR_A | ||
180 | #endif | ||
181 | #ifdef CONFIG_CPU_DCACHE_DISABLE | ||
182 | bic r0, r0, #CR_C | ||
183 | #endif | ||
184 | #ifdef CONFIG_CPU_BPREDICT_DISABLE | ||
185 | bic r0, r0, #CR_Z | ||
186 | #endif | ||
187 | #ifdef CONFIG_CPU_ICACHE_DISABLE | ||
188 | bic r0, r0, #CR_I | ||
189 | #endif | ||
190 | mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ | ||
191 | domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ | ||
192 | domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ | ||
193 | domain_val(DOMAIN_IO, DOMAIN_CLIENT)) | ||
194 | mcr p15, 0, r5, c3, c0, 0 @ load domain access register | ||
195 | mcr p15, 0, r4, c2, c0, 0 @ load page table pointer | ||
196 | b __turn_mmu_on | ||
197 | |||
198 | /* | ||
199 | * Enable the MMU. This completely changes the structure of the visible | ||
200 | * memory space. You will not be able to trace execution through this. | ||
201 | * If you have an enquiry about this, *please* check the linux-arm-kernel | ||
202 | * mailing list archives BEFORE sending another post to the list. | ||
203 | * | ||
204 | * r0 = cp#15 control register | ||
205 | * r13 = *virtual* address to jump to upon completion | ||
206 | * | ||
207 | * other registers depend on the function called upon completion | ||
208 | */ | ||
209 | .align 5 | ||
210 | .type __turn_mmu_on, %function | ||
211 | __turn_mmu_on: | ||
212 | mov r0, r0 | ||
213 | mcr p15, 0, r0, c1, c0, 0 @ write control reg | ||
214 | mrc p15, 0, r3, c0, c0, 0 @ read id reg | ||
215 | mov r3, r3 | ||
216 | mov r3, r3 | ||
217 | mov pc, r13 | ||
218 | |||
219 | |||
220 | |||
221 | /* | ||
222 | * Setup the initial page tables. We only setup the barest | ||
223 | * amount which are required to get the kernel running, which | ||
224 | * generally means mapping in the kernel code. | ||
225 | * | ||
226 | * r8 = machinfo | ||
227 | * r9 = cpuid | ||
228 | * r10 = procinfo | ||
229 | * | ||
230 | * Returns: | ||
231 | * r0, r3, r5, r6, r7 corrupted | ||
232 | * r4 = physical page table address | ||
233 | */ | ||
234 | .type __create_page_tables, %function | ||
235 | __create_page_tables: | ||
236 | ldr r5, [r8, #MACHINFO_PHYSRAM] @ physram | ||
237 | pgtbl r4, r5 @ page table address | ||
238 | |||
239 | /* | ||
240 | * Clear the 16K level 1 swapper page table | ||
241 | */ | ||
242 | mov r0, r4 | ||
243 | mov r3, #0 | ||
244 | add r6, r0, #0x4000 | ||
245 | 1: str r3, [r0], #4 | ||
246 | str r3, [r0], #4 | ||
247 | str r3, [r0], #4 | ||
248 | str r3, [r0], #4 | ||
249 | teq r0, r6 | ||
250 | bne 1b | ||
251 | |||
252 | ldr r7, [r10, #PROCINFO_MMUFLAGS] @ mmuflags | ||
253 | |||
254 | /* | ||
255 | * Create identity mapping for first MB of kernel to | ||
256 | * cater for the MMU enable. This identity mapping | ||
257 | * will be removed by paging_init(). We use our current program | ||
258 | * counter to determine corresponding section base address. | ||
259 | */ | ||
260 | mov r6, pc, lsr #20 @ start of kernel section | ||
261 | orr r3, r7, r6, lsl #20 @ flags + kernel base | ||
262 | str r3, [r4, r6, lsl #2] @ identity mapping | ||
263 | |||
264 | /* | ||
265 | * Now setup the pagetables for our kernel direct | ||
266 | * mapped region. We round TEXTADDR down to the | ||
267 | * nearest megabyte boundary. It is assumed that | ||
268 | * the kernel fits within 4 contigous 1MB sections. | ||
269 | */ | ||
270 | add r0, r4, #(TEXTADDR & 0xff000000) >> 18 @ start of kernel | ||
271 | str r3, [r0, #(TEXTADDR & 0x00f00000) >> 18]! | ||
272 | add r3, r3, #1 << 20 | ||
273 | str r3, [r0, #4]! @ KERNEL + 1MB | ||
274 | add r3, r3, #1 << 20 | ||
275 | str r3, [r0, #4]! @ KERNEL + 2MB | ||
276 | add r3, r3, #1 << 20 | ||
277 | str r3, [r0, #4] @ KERNEL + 3MB | ||
278 | |||
279 | /* | ||
280 | * Then map first 1MB of ram in case it contains our boot params. | ||
281 | */ | ||
282 | add r0, r4, #VIRT_OFFSET >> 18 | ||
283 | orr r6, r5, r7 | ||
284 | str r6, [r0] | ||
285 | |||
286 | #ifdef CONFIG_XIP_KERNEL | ||
287 | /* | ||
288 | * Map some ram to cover our .data and .bss areas. | ||
289 | * Mapping 3MB should be plenty. | ||
290 | */ | ||
291 | sub r3, r4, r5 | ||
292 | mov r3, r3, lsr #20 | ||
293 | add r0, r0, r3, lsl #2 | ||
294 | add r6, r6, r3, lsl #20 | ||
295 | str r6, [r0], #4 | ||
296 | add r6, r6, #(1 << 20) | ||
297 | str r6, [r0], #4 | ||
298 | add r6, r6, #(1 << 20) | ||
299 | str r6, [r0] | ||
300 | #endif | ||
301 | |||
302 | bic r7, r7, #0x0c @ turn off cacheable | ||
303 | @ and bufferable bits | ||
304 | #ifdef CONFIG_DEBUG_LL | ||
305 | /* | ||
306 | * Map in IO space for serial debugging. | ||
307 | * This allows debug messages to be output | ||
308 | * via a serial console before paging_init. | ||
309 | */ | ||
310 | ldr r3, [r8, #MACHINFO_PGOFFIO] | ||
311 | add r0, r4, r3 | ||
312 | rsb r3, r3, #0x4000 @ PTRS_PER_PGD*sizeof(long) | ||
313 | cmp r3, #0x0800 @ limit to 512MB | ||
314 | movhi r3, #0x0800 | ||
315 | add r6, r0, r3 | ||
316 | ldr r3, [r8, #MACHINFO_PHYSIO] | ||
317 | orr r3, r3, r7 | ||
318 | 1: str r3, [r0], #4 | ||
319 | add r3, r3, #1 << 20 | ||
320 | teq r0, r6 | ||
321 | bne 1b | ||
322 | #if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS) | ||
323 | /* | ||
324 | * If we're using the NetWinder, we need to map in | ||
325 | * the 16550-type serial port for the debug messages | ||
326 | */ | ||
327 | teq r1, #MACH_TYPE_NETWINDER | ||
328 | teqne r1, #MACH_TYPE_CATS | ||
329 | bne 1f | ||
330 | add r0, r4, #0x3fc0 @ ff000000 | ||
331 | mov r3, #0x7c000000 | ||
332 | orr r3, r3, r7 | ||
333 | str r3, [r0], #4 | ||
334 | add r3, r3, #1 << 20 | ||
335 | str r3, [r0], #4 | ||
336 | 1: | ||
337 | #endif | ||
338 | #endif | ||
339 | #ifdef CONFIG_ARCH_RPC | ||
340 | /* | ||
341 | * Map in screen at 0x02000000 & SCREEN2_BASE | ||
342 | * Similar reasons here - for debug. This is | ||
343 | * only for Acorn RiscPC architectures. | ||
344 | */ | ||
345 | add r0, r4, #0x80 @ 02000000 | ||
346 | mov r3, #0x02000000 | ||
347 | orr r3, r3, r7 | ||
348 | str r3, [r0] | ||
349 | add r0, r4, #0x3600 @ d8000000 | ||
350 | str r3, [r0] | ||
351 | #endif | ||
352 | mov pc, lr | ||
353 | .ltorg | ||
354 | |||
355 | |||
356 | |||
357 | /* | ||
358 | * Exception handling. Something went wrong and we can't proceed. We | ||
359 | * ought to tell the user, but since we don't have any guarantee that | ||
360 | * we're even running on the right architecture, we do virtually nothing. | ||
361 | * | ||
362 | * If CONFIG_DEBUG_LL is set we try to print out something about the error | ||
363 | * and hope for the best (useful if bootloader fails to pass a proper | ||
364 | * machine ID for example). | ||
365 | */ | ||
366 | |||
367 | .type __error_p, %function | ||
368 | __error_p: | ||
369 | #ifdef CONFIG_DEBUG_LL | ||
370 | adr r0, str_p1 | ||
371 | bl printascii | ||
372 | b __error | ||
373 | str_p1: .asciz "\nError: unrecognized/unsupported processor variant.\n" | ||
374 | .align | ||
375 | #endif | ||
376 | |||
377 | .type __error_a, %function | ||
378 | __error_a: | ||
379 | #ifdef CONFIG_DEBUG_LL | ||
380 | mov r4, r1 @ preserve machine ID | ||
381 | adr r0, str_a1 | ||
382 | bl printascii | ||
383 | mov r0, r4 | ||
384 | bl printhex8 | ||
385 | adr r0, str_a2 | ||
386 | bl printascii | ||
387 | adr r3, 3f | ||
388 | ldmia r3, {r4, r5, r6} @ get machine desc list | ||
389 | sub r4, r3, r4 @ get offset between virt&phys | ||
390 | add r5, r5, r4 @ convert virt addresses to | ||
391 | add r6, r6, r4 @ physical address space | ||
392 | 1: ldr r0, [r5, #MACHINFO_TYPE] @ get machine type | ||
393 | bl printhex8 | ||
394 | mov r0, #'\t' | ||
395 | bl printch | ||
396 | ldr r0, [r5, #MACHINFO_NAME] @ get machine name | ||
397 | add r0, r0, r4 | ||
398 | bl printascii | ||
399 | mov r0, #'\n' | ||
400 | bl printch | ||
401 | add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc | ||
402 | cmp r5, r6 | ||
403 | blo 1b | ||
404 | adr r0, str_a3 | ||
405 | bl printascii | ||
406 | b __error | ||
407 | str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x" | ||
408 | str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n" | ||
409 | str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n" | ||
410 | .align | ||
411 | #endif | ||
412 | |||
413 | .type __error, %function | ||
414 | __error: | ||
415 | #ifdef CONFIG_ARCH_RPC | ||
416 | /* | ||
417 | * Turn the screen red on a error - RiscPC only. | ||
418 | */ | ||
419 | mov r0, #0x02000000 | ||
420 | mov r3, #0x11 | ||
421 | orr r3, r3, r3, lsl #8 | ||
422 | orr r3, r3, r3, lsl #16 | ||
423 | str r3, [r0], #4 | ||
424 | str r3, [r0], #4 | ||
425 | str r3, [r0], #4 | ||
426 | str r3, [r0], #4 | ||
427 | #endif | ||
428 | 1: mov r0, r0 | ||
429 | b 1b | ||
430 | |||
431 | |||
432 | /* | ||
433 | * Read processor ID register (CP#15, CR0), and look up in the linker-built | ||
434 | * supported processor list. Note that we can't use the absolute addresses | ||
435 | * for the __proc_info lists since we aren't running with the MMU on | ||
436 | * (and therefore, we are not in the correct address space). We have to | ||
437 | * calculate the offset. | ||
438 | * | ||
439 | * Returns: | ||
440 | * r3, r4, r6 corrupted | ||
441 | * r5 = proc_info pointer in physical address space | ||
442 | * r9 = cpuid | ||
443 | */ | ||
444 | .type __lookup_processor_type, %function | ||
445 | __lookup_processor_type: | ||
446 | adr r3, 3f | ||
447 | ldmda r3, {r5, r6, r9} | ||
448 | sub r3, r3, r9 @ get offset between virt&phys | ||
449 | add r5, r5, r3 @ convert virt addresses to | ||
450 | add r6, r6, r3 @ physical address space | ||
451 | mrc p15, 0, r9, c0, c0 @ get processor id | ||
452 | 1: ldmia r5, {r3, r4} @ value, mask | ||
453 | and r4, r4, r9 @ mask wanted bits | ||
454 | teq r3, r4 | ||
455 | beq 2f | ||
456 | add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list) | ||
457 | cmp r5, r6 | ||
458 | blo 1b | ||
459 | mov r5, #0 @ unknown processor | ||
460 | 2: mov pc, lr | ||
461 | |||
462 | /* | ||
463 | * This provides a C-API version of the above function. | ||
464 | */ | ||
465 | ENTRY(lookup_processor_type) | ||
466 | stmfd sp!, {r4 - r6, r9, lr} | ||
467 | bl __lookup_processor_type | ||
468 | mov r0, r5 | ||
469 | ldmfd sp!, {r4 - r6, r9, pc} | ||
470 | |||
471 | /* | ||
472 | * Look in include/asm-arm/procinfo.h and arch/arm/kernel/arch.[ch] for | ||
473 | * more information about the __proc_info and __arch_info structures. | ||
474 | */ | ||
475 | .long __proc_info_begin | ||
476 | .long __proc_info_end | ||
477 | 3: .long . | ||
478 | .long __arch_info_begin | ||
479 | .long __arch_info_end | ||
480 | |||
481 | /* | ||
482 | * Lookup machine architecture in the linker-build list of architectures. | ||
483 | * Note that we can't use the absolute addresses for the __arch_info | ||
484 | * lists since we aren't running with the MMU on (and therefore, we are | ||
485 | * not in the correct address space). We have to calculate the offset. | ||
486 | * | ||
487 | * r1 = machine architecture number | ||
488 | * Returns: | ||
489 | * r3, r4, r6 corrupted | ||
490 | * r5 = mach_info pointer in physical address space | ||
491 | */ | ||
492 | .type __lookup_machine_type, %function | ||
493 | __lookup_machine_type: | ||
494 | adr r3, 3b | ||
495 | ldmia r3, {r4, r5, r6} | ||
496 | sub r3, r3, r4 @ get offset between virt&phys | ||
497 | add r5, r5, r3 @ convert virt addresses to | ||
498 | add r6, r6, r3 @ physical address space | ||
499 | 1: ldr r3, [r5, #MACHINFO_TYPE] @ get machine type | ||
500 | teq r3, r1 @ matches loader number? | ||
501 | beq 2f @ found | ||
502 | add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc | ||
503 | cmp r5, r6 | ||
504 | blo 1b | ||
505 | mov r5, #0 @ unknown machine | ||
506 | 2: mov pc, lr | ||
507 | |||
508 | /* | ||
509 | * This provides a C-API version of the above function. | ||
510 | */ | ||
511 | ENTRY(lookup_machine_type) | ||
512 | stmfd sp!, {r4 - r6, lr} | ||
513 | mov r1, r0 | ||
514 | bl __lookup_machine_type | ||
515 | mov r0, r5 | ||
516 | ldmfd sp!, {r4 - r6, pc} | ||
diff --git a/arch/arm/kernel/init_task.c b/arch/arm/kernel/init_task.c new file mode 100644 index 000000000000..a00cca0000bd --- /dev/null +++ b/arch/arm/kernel/init_task.c | |||
@@ -0,0 +1,44 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/init_task.c | ||
3 | */ | ||
4 | #include <linux/mm.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/fs.h> | ||
7 | #include <linux/sched.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/init_task.h> | ||
10 | #include <linux/mqueue.h> | ||
11 | |||
12 | #include <asm/uaccess.h> | ||
13 | #include <asm/pgtable.h> | ||
14 | |||
15 | static struct fs_struct init_fs = INIT_FS; | ||
16 | static struct files_struct init_files = INIT_FILES; | ||
17 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); | ||
18 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | ||
19 | struct mm_struct init_mm = INIT_MM(init_mm); | ||
20 | |||
21 | EXPORT_SYMBOL(init_mm); | ||
22 | |||
23 | /* | ||
24 | * Initial thread structure. | ||
25 | * | ||
26 | * We need to make sure that this is 8192-byte aligned due to the | ||
27 | * way process stacks are handled. This is done by making sure | ||
28 | * the linker maps this in the .text segment right after head.S, | ||
29 | * and making head.S ensure the proper alignment. | ||
30 | * | ||
31 | * The things we do for performance.. | ||
32 | */ | ||
33 | union thread_union init_thread_union | ||
34 | __attribute__((__section__(".init.task"))) = | ||
35 | { INIT_THREAD_INFO(init_task) }; | ||
36 | |||
37 | /* | ||
38 | * Initial task structure. | ||
39 | * | ||
40 | * All other task structs will be allocated on slabs in fork.c | ||
41 | */ | ||
42 | struct task_struct init_task = INIT_TASK(init_task); | ||
43 | |||
44 | EXPORT_SYMBOL(init_task); | ||
diff --git a/arch/arm/kernel/io.c b/arch/arm/kernel/io.c new file mode 100644 index 000000000000..6c20c1188b60 --- /dev/null +++ b/arch/arm/kernel/io.c | |||
@@ -0,0 +1,51 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include <linux/types.h> | ||
3 | |||
4 | #include <asm/io.h> | ||
5 | |||
6 | /* | ||
7 | * Copy data from IO memory space to "real" memory space. | ||
8 | * This needs to be optimized. | ||
9 | */ | ||
10 | void _memcpy_fromio(void *to, void __iomem *from, size_t count) | ||
11 | { | ||
12 | unsigned char *t = to; | ||
13 | while (count) { | ||
14 | count--; | ||
15 | *t = readb(from); | ||
16 | t++; | ||
17 | from++; | ||
18 | } | ||
19 | } | ||
20 | |||
21 | /* | ||
22 | * Copy data from "real" memory space to IO memory space. | ||
23 | * This needs to be optimized. | ||
24 | */ | ||
25 | void _memcpy_toio(void __iomem *to, const void *from, size_t count) | ||
26 | { | ||
27 | const unsigned char *f = from; | ||
28 | while (count) { | ||
29 | count--; | ||
30 | writeb(*f, to); | ||
31 | f++; | ||
32 | to++; | ||
33 | } | ||
34 | } | ||
35 | |||
36 | /* | ||
37 | * "memset" on IO memory space. | ||
38 | * This needs to be optimized. | ||
39 | */ | ||
40 | void _memset_io(void __iomem *dst, int c, size_t count) | ||
41 | { | ||
42 | while (count) { | ||
43 | count--; | ||
44 | writeb(c, dst); | ||
45 | dst++; | ||
46 | } | ||
47 | } | ||
48 | |||
49 | EXPORT_SYMBOL(_memcpy_fromio); | ||
50 | EXPORT_SYMBOL(_memcpy_toio); | ||
51 | EXPORT_SYMBOL(_memset_io); | ||
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c new file mode 100644 index 000000000000..ff187f4308f0 --- /dev/null +++ b/arch/arm/kernel/irq.c | |||
@@ -0,0 +1,1038 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/irq.c | ||
3 | * | ||
4 | * Copyright (C) 1992 Linus Torvalds | ||
5 | * Modifications for ARM processor Copyright (C) 1995-2000 Russell King. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This file contains the code used by various IRQ handling routines: | ||
12 | * asking for different IRQ's should be done through these routines | ||
13 | * instead of just grabbing them. Thus setups with different IRQ numbers | ||
14 | * shouldn't result in any weird surprises, and installing new handlers | ||
15 | * should be easier. | ||
16 | * | ||
17 | * IRQ's are in fact implemented a bit like signal handlers for the kernel. | ||
18 | * Naturally it's not a 1:1 relation, but there are similarities. | ||
19 | */ | ||
20 | #include <linux/config.h> | ||
21 | #include <linux/kernel_stat.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/signal.h> | ||
24 | #include <linux/ioport.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/ptrace.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/random.h> | ||
29 | #include <linux/smp.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/seq_file.h> | ||
32 | #include <linux/errno.h> | ||
33 | #include <linux/list.h> | ||
34 | #include <linux/kallsyms.h> | ||
35 | #include <linux/proc_fs.h> | ||
36 | |||
37 | #include <asm/irq.h> | ||
38 | #include <asm/system.h> | ||
39 | #include <asm/mach/irq.h> | ||
40 | |||
41 | /* | ||
42 | * Maximum IRQ count. Currently, this is arbitary. However, it should | ||
43 | * not be set too low to prevent false triggering. Conversely, if it | ||
44 | * is set too high, then you could miss a stuck IRQ. | ||
45 | * | ||
46 | * Maybe we ought to set a timer and re-enable the IRQ at a later time? | ||
47 | */ | ||
48 | #define MAX_IRQ_CNT 100000 | ||
49 | |||
50 | static int noirqdebug; | ||
51 | static volatile unsigned long irq_err_count; | ||
52 | static DEFINE_SPINLOCK(irq_controller_lock); | ||
53 | static LIST_HEAD(irq_pending); | ||
54 | |||
55 | struct irqdesc irq_desc[NR_IRQS]; | ||
56 | void (*init_arch_irq)(void) __initdata = NULL; | ||
57 | |||
58 | /* | ||
59 | * No architecture-specific irq_finish function defined in arm/arch/irqs.h. | ||
60 | */ | ||
61 | #ifndef irq_finish | ||
62 | #define irq_finish(irq) do { } while (0) | ||
63 | #endif | ||
64 | |||
65 | /* | ||
66 | * Dummy mask/unmask handler | ||
67 | */ | ||
68 | void dummy_mask_unmask_irq(unsigned int irq) | ||
69 | { | ||
70 | } | ||
71 | |||
72 | irqreturn_t no_action(int irq, void *dev_id, struct pt_regs *regs) | ||
73 | { | ||
74 | return IRQ_NONE; | ||
75 | } | ||
76 | |||
77 | void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) | ||
78 | { | ||
79 | irq_err_count += 1; | ||
80 | printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq); | ||
81 | } | ||
82 | |||
83 | static struct irqchip bad_chip = { | ||
84 | .ack = dummy_mask_unmask_irq, | ||
85 | .mask = dummy_mask_unmask_irq, | ||
86 | .unmask = dummy_mask_unmask_irq, | ||
87 | }; | ||
88 | |||
89 | static struct irqdesc bad_irq_desc = { | ||
90 | .chip = &bad_chip, | ||
91 | .handle = do_bad_IRQ, | ||
92 | .pend = LIST_HEAD_INIT(bad_irq_desc.pend), | ||
93 | .disable_depth = 1, | ||
94 | }; | ||
95 | |||
96 | #ifdef CONFIG_SMP | ||
97 | void synchronize_irq(unsigned int irq) | ||
98 | { | ||
99 | struct irqdesc *desc = irq_desc + irq; | ||
100 | |||
101 | while (desc->running) | ||
102 | barrier(); | ||
103 | } | ||
104 | EXPORT_SYMBOL(synchronize_irq); | ||
105 | |||
106 | #define smp_set_running(desc) do { desc->running = 1; } while (0) | ||
107 | #define smp_clear_running(desc) do { desc->running = 0; } while (0) | ||
108 | #else | ||
109 | #define smp_set_running(desc) do { } while (0) | ||
110 | #define smp_clear_running(desc) do { } while (0) | ||
111 | #endif | ||
112 | |||
113 | /** | ||
114 | * disable_irq_nosync - disable an irq without waiting | ||
115 | * @irq: Interrupt to disable | ||
116 | * | ||
117 | * Disable the selected interrupt line. Enables and disables | ||
118 | * are nested. We do this lazily. | ||
119 | * | ||
120 | * This function may be called from IRQ context. | ||
121 | */ | ||
122 | void disable_irq_nosync(unsigned int irq) | ||
123 | { | ||
124 | struct irqdesc *desc = irq_desc + irq; | ||
125 | unsigned long flags; | ||
126 | |||
127 | spin_lock_irqsave(&irq_controller_lock, flags); | ||
128 | desc->disable_depth++; | ||
129 | list_del_init(&desc->pend); | ||
130 | spin_unlock_irqrestore(&irq_controller_lock, flags); | ||
131 | } | ||
132 | EXPORT_SYMBOL(disable_irq_nosync); | ||
133 | |||
134 | /** | ||
135 | * disable_irq - disable an irq and wait for completion | ||
136 | * @irq: Interrupt to disable | ||
137 | * | ||
138 | * Disable the selected interrupt line. Enables and disables | ||
139 | * are nested. This functions waits for any pending IRQ | ||
140 | * handlers for this interrupt to complete before returning. | ||
141 | * If you use this function while holding a resource the IRQ | ||
142 | * handler may need you will deadlock. | ||
143 | * | ||
144 | * This function may be called - with care - from IRQ context. | ||
145 | */ | ||
146 | void disable_irq(unsigned int irq) | ||
147 | { | ||
148 | struct irqdesc *desc = irq_desc + irq; | ||
149 | |||
150 | disable_irq_nosync(irq); | ||
151 | if (desc->action) | ||
152 | synchronize_irq(irq); | ||
153 | } | ||
154 | EXPORT_SYMBOL(disable_irq); | ||
155 | |||
156 | /** | ||
157 | * enable_irq - enable interrupt handling on an irq | ||
158 | * @irq: Interrupt to enable | ||
159 | * | ||
160 | * Re-enables the processing of interrupts on this IRQ line. | ||
161 | * Note that this may call the interrupt handler, so you may | ||
162 | * get unexpected results if you hold IRQs disabled. | ||
163 | * | ||
164 | * This function may be called from IRQ context. | ||
165 | */ | ||
166 | void enable_irq(unsigned int irq) | ||
167 | { | ||
168 | struct irqdesc *desc = irq_desc + irq; | ||
169 | unsigned long flags; | ||
170 | |||
171 | spin_lock_irqsave(&irq_controller_lock, flags); | ||
172 | if (unlikely(!desc->disable_depth)) { | ||
173 | printk("enable_irq(%u) unbalanced from %p\n", irq, | ||
174 | __builtin_return_address(0)); | ||
175 | } else if (!--desc->disable_depth) { | ||
176 | desc->probing = 0; | ||
177 | desc->chip->unmask(irq); | ||
178 | |||
179 | /* | ||
180 | * If the interrupt is waiting to be processed, | ||
181 | * try to re-run it. We can't directly run it | ||
182 | * from here since the caller might be in an | ||
183 | * interrupt-protected region. | ||
184 | */ | ||
185 | if (desc->pending && list_empty(&desc->pend)) { | ||
186 | desc->pending = 0; | ||
187 | if (!desc->chip->retrigger || | ||
188 | desc->chip->retrigger(irq)) | ||
189 | list_add(&desc->pend, &irq_pending); | ||
190 | } | ||
191 | } | ||
192 | spin_unlock_irqrestore(&irq_controller_lock, flags); | ||
193 | } | ||
194 | EXPORT_SYMBOL(enable_irq); | ||
195 | |||
196 | /* | ||
197 | * Enable wake on selected irq | ||
198 | */ | ||
199 | void enable_irq_wake(unsigned int irq) | ||
200 | { | ||
201 | struct irqdesc *desc = irq_desc + irq; | ||
202 | unsigned long flags; | ||
203 | |||
204 | spin_lock_irqsave(&irq_controller_lock, flags); | ||
205 | if (desc->chip->wake) | ||
206 | desc->chip->wake(irq, 1); | ||
207 | spin_unlock_irqrestore(&irq_controller_lock, flags); | ||
208 | } | ||
209 | EXPORT_SYMBOL(enable_irq_wake); | ||
210 | |||
211 | void disable_irq_wake(unsigned int irq) | ||
212 | { | ||
213 | struct irqdesc *desc = irq_desc + irq; | ||
214 | unsigned long flags; | ||
215 | |||
216 | spin_lock_irqsave(&irq_controller_lock, flags); | ||
217 | if (desc->chip->wake) | ||
218 | desc->chip->wake(irq, 0); | ||
219 | spin_unlock_irqrestore(&irq_controller_lock, flags); | ||
220 | } | ||
221 | EXPORT_SYMBOL(disable_irq_wake); | ||
222 | |||
223 | int show_interrupts(struct seq_file *p, void *v) | ||
224 | { | ||
225 | int i = *(loff_t *) v, cpu; | ||
226 | struct irqaction * action; | ||
227 | unsigned long flags; | ||
228 | |||
229 | if (i == 0) { | ||
230 | char cpuname[12]; | ||
231 | |||
232 | seq_printf(p, " "); | ||
233 | for_each_present_cpu(cpu) { | ||
234 | sprintf(cpuname, "CPU%d", cpu); | ||
235 | seq_printf(p, " %10s", cpuname); | ||
236 | } | ||
237 | seq_putc(p, '\n'); | ||
238 | } | ||
239 | |||
240 | if (i < NR_IRQS) { | ||
241 | spin_lock_irqsave(&irq_controller_lock, flags); | ||
242 | action = irq_desc[i].action; | ||
243 | if (!action) | ||
244 | goto unlock; | ||
245 | |||
246 | seq_printf(p, "%3d: ", i); | ||
247 | for_each_present_cpu(cpu) | ||
248 | seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]); | ||
249 | seq_printf(p, " %s", action->name); | ||
250 | for (action = action->next; action; action = action->next) | ||
251 | seq_printf(p, ", %s", action->name); | ||
252 | |||
253 | seq_putc(p, '\n'); | ||
254 | unlock: | ||
255 | spin_unlock_irqrestore(&irq_controller_lock, flags); | ||
256 | } else if (i == NR_IRQS) { | ||
257 | #ifdef CONFIG_ARCH_ACORN | ||
258 | show_fiq_list(p, v); | ||
259 | #endif | ||
260 | #ifdef CONFIG_SMP | ||
261 | show_ipi_list(p); | ||
262 | #endif | ||
263 | seq_printf(p, "Err: %10lu\n", irq_err_count); | ||
264 | } | ||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | /* | ||
269 | * IRQ lock detection. | ||
270 | * | ||
271 | * Hopefully, this should get us out of a few locked situations. | ||
272 | * However, it may take a while for this to happen, since we need | ||
273 | * a large number if IRQs to appear in the same jiffie with the | ||
274 | * same instruction pointer (or within 2 instructions). | ||
275 | */ | ||
276 | static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs) | ||
277 | { | ||
278 | unsigned long instr_ptr = instruction_pointer(regs); | ||
279 | |||
280 | if (desc->lck_jif == jiffies && | ||
281 | desc->lck_pc >= instr_ptr && desc->lck_pc < instr_ptr + 8) { | ||
282 | desc->lck_cnt += 1; | ||
283 | |||
284 | if (desc->lck_cnt > MAX_IRQ_CNT) { | ||
285 | printk(KERN_ERR "IRQ LOCK: IRQ%d is locking the system, disabled\n", irq); | ||
286 | return 1; | ||
287 | } | ||
288 | } else { | ||
289 | desc->lck_cnt = 0; | ||
290 | desc->lck_pc = instruction_pointer(regs); | ||
291 | desc->lck_jif = jiffies; | ||
292 | } | ||
293 | return 0; | ||
294 | } | ||
295 | |||
296 | static void | ||
297 | report_bad_irq(unsigned int irq, struct pt_regs *regs, struct irqdesc *desc, int ret) | ||
298 | { | ||
299 | static int count = 100; | ||
300 | struct irqaction *action; | ||
301 | |||
302 | if (!count || noirqdebug) | ||
303 | return; | ||
304 | |||
305 | count--; | ||
306 | |||
307 | if (ret != IRQ_HANDLED && ret != IRQ_NONE) { | ||
308 | printk("irq%u: bogus retval mask %x\n", irq, ret); | ||
309 | } else { | ||
310 | printk("irq%u: nobody cared\n", irq); | ||
311 | } | ||
312 | show_regs(regs); | ||
313 | dump_stack(); | ||
314 | printk(KERN_ERR "handlers:"); | ||
315 | action = desc->action; | ||
316 | do { | ||
317 | printk("\n" KERN_ERR "[<%p>]", action->handler); | ||
318 | print_symbol(" (%s)", (unsigned long)action->handler); | ||
319 | action = action->next; | ||
320 | } while (action); | ||
321 | printk("\n"); | ||
322 | } | ||
323 | |||
324 | static int | ||
325 | __do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs) | ||
326 | { | ||
327 | unsigned int status; | ||
328 | int ret, retval = 0; | ||
329 | |||
330 | spin_unlock(&irq_controller_lock); | ||
331 | |||
332 | if (!(action->flags & SA_INTERRUPT)) | ||
333 | local_irq_enable(); | ||
334 | |||
335 | status = 0; | ||
336 | do { | ||
337 | ret = action->handler(irq, action->dev_id, regs); | ||
338 | if (ret == IRQ_HANDLED) | ||
339 | status |= action->flags; | ||
340 | retval |= ret; | ||
341 | action = action->next; | ||
342 | } while (action); | ||
343 | |||
344 | if (status & SA_SAMPLE_RANDOM) | ||
345 | add_interrupt_randomness(irq); | ||
346 | |||
347 | spin_lock_irq(&irq_controller_lock); | ||
348 | |||
349 | return retval; | ||
350 | } | ||
351 | |||
352 | /* | ||
353 | * This is for software-decoded IRQs. The caller is expected to | ||
354 | * handle the ack, clear, mask and unmask issues. | ||
355 | */ | ||
356 | void | ||
357 | do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) | ||
358 | { | ||
359 | struct irqaction *action; | ||
360 | const unsigned int cpu = smp_processor_id(); | ||
361 | |||
362 | desc->triggered = 1; | ||
363 | |||
364 | kstat_cpu(cpu).irqs[irq]++; | ||
365 | |||
366 | smp_set_running(desc); | ||
367 | |||
368 | action = desc->action; | ||
369 | if (action) { | ||
370 | int ret = __do_irq(irq, action, regs); | ||
371 | if (ret != IRQ_HANDLED) | ||
372 | report_bad_irq(irq, regs, desc, ret); | ||
373 | } | ||
374 | |||
375 | smp_clear_running(desc); | ||
376 | } | ||
377 | |||
378 | /* | ||
379 | * Most edge-triggered IRQ implementations seem to take a broken | ||
380 | * approach to this. Hence the complexity. | ||
381 | */ | ||
382 | void | ||
383 | do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) | ||
384 | { | ||
385 | const unsigned int cpu = smp_processor_id(); | ||
386 | |||
387 | desc->triggered = 1; | ||
388 | |||
389 | /* | ||
390 | * If we're currently running this IRQ, or its disabled, | ||
391 | * we shouldn't process the IRQ. Instead, turn on the | ||
392 | * hardware masks. | ||
393 | */ | ||
394 | if (unlikely(desc->running || desc->disable_depth)) | ||
395 | goto running; | ||
396 | |||
397 | /* | ||
398 | * Acknowledge and clear the IRQ, but don't mask it. | ||
399 | */ | ||
400 | desc->chip->ack(irq); | ||
401 | |||
402 | /* | ||
403 | * Mark the IRQ currently in progress. | ||
404 | */ | ||
405 | desc->running = 1; | ||
406 | |||
407 | kstat_cpu(cpu).irqs[irq]++; | ||
408 | |||
409 | do { | ||
410 | struct irqaction *action; | ||
411 | |||
412 | action = desc->action; | ||
413 | if (!action) | ||
414 | break; | ||
415 | |||
416 | if (desc->pending && !desc->disable_depth) { | ||
417 | desc->pending = 0; | ||
418 | desc->chip->unmask(irq); | ||
419 | } | ||
420 | |||
421 | __do_irq(irq, action, regs); | ||
422 | } while (desc->pending && !desc->disable_depth); | ||
423 | |||
424 | desc->running = 0; | ||
425 | |||
426 | /* | ||
427 | * If we were disabled or freed, shut down the handler. | ||
428 | */ | ||
429 | if (likely(desc->action && !check_irq_lock(desc, irq, regs))) | ||
430 | return; | ||
431 | |||
432 | running: | ||
433 | /* | ||
434 | * We got another IRQ while this one was masked or | ||
435 | * currently running. Delay it. | ||
436 | */ | ||
437 | desc->pending = 1; | ||
438 | desc->chip->mask(irq); | ||
439 | desc->chip->ack(irq); | ||
440 | } | ||
441 | |||
442 | /* | ||
443 | * Level-based IRQ handler. Nice and simple. | ||
444 | */ | ||
445 | void | ||
446 | do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) | ||
447 | { | ||
448 | struct irqaction *action; | ||
449 | const unsigned int cpu = smp_processor_id(); | ||
450 | |||
451 | desc->triggered = 1; | ||
452 | |||
453 | /* | ||
454 | * Acknowledge, clear _AND_ disable the interrupt. | ||
455 | */ | ||
456 | desc->chip->ack(irq); | ||
457 | |||
458 | if (likely(!desc->disable_depth)) { | ||
459 | kstat_cpu(cpu).irqs[irq]++; | ||
460 | |||
461 | smp_set_running(desc); | ||
462 | |||
463 | /* | ||
464 | * Return with this interrupt masked if no action | ||
465 | */ | ||
466 | action = desc->action; | ||
467 | if (action) { | ||
468 | int ret = __do_irq(irq, desc->action, regs); | ||
469 | |||
470 | if (ret != IRQ_HANDLED) | ||
471 | report_bad_irq(irq, regs, desc, ret); | ||
472 | |||
473 | if (likely(!desc->disable_depth && | ||
474 | !check_irq_lock(desc, irq, regs))) | ||
475 | desc->chip->unmask(irq); | ||
476 | } | ||
477 | |||
478 | smp_clear_running(desc); | ||
479 | } | ||
480 | } | ||
481 | |||
482 | static void do_pending_irqs(struct pt_regs *regs) | ||
483 | { | ||
484 | struct list_head head, *l, *n; | ||
485 | |||
486 | do { | ||
487 | struct irqdesc *desc; | ||
488 | |||
489 | /* | ||
490 | * First, take the pending interrupts off the list. | ||
491 | * The act of calling the handlers may add some IRQs | ||
492 | * back onto the list. | ||
493 | */ | ||
494 | head = irq_pending; | ||
495 | INIT_LIST_HEAD(&irq_pending); | ||
496 | head.next->prev = &head; | ||
497 | head.prev->next = &head; | ||
498 | |||
499 | /* | ||
500 | * Now run each entry. We must delete it from our | ||
501 | * list before calling the handler. | ||
502 | */ | ||
503 | list_for_each_safe(l, n, &head) { | ||
504 | desc = list_entry(l, struct irqdesc, pend); | ||
505 | list_del_init(&desc->pend); | ||
506 | desc->handle(desc - irq_desc, desc, regs); | ||
507 | } | ||
508 | |||
509 | /* | ||
510 | * The list must be empty. | ||
511 | */ | ||
512 | BUG_ON(!list_empty(&head)); | ||
513 | } while (!list_empty(&irq_pending)); | ||
514 | } | ||
515 | |||
516 | /* | ||
517 | * do_IRQ handles all hardware IRQ's. Decoded IRQs should not | ||
518 | * come via this function. Instead, they should provide their | ||
519 | * own 'handler' | ||
520 | */ | ||
521 | asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) | ||
522 | { | ||
523 | struct irqdesc *desc = irq_desc + irq; | ||
524 | |||
525 | /* | ||
526 | * Some hardware gives randomly wrong interrupts. Rather | ||
527 | * than crashing, do something sensible. | ||
528 | */ | ||
529 | if (irq >= NR_IRQS) | ||
530 | desc = &bad_irq_desc; | ||
531 | |||
532 | irq_enter(); | ||
533 | spin_lock(&irq_controller_lock); | ||
534 | desc->handle(irq, desc, regs); | ||
535 | |||
536 | /* | ||
537 | * Now re-run any pending interrupts. | ||
538 | */ | ||
539 | if (!list_empty(&irq_pending)) | ||
540 | do_pending_irqs(regs); | ||
541 | |||
542 | irq_finish(irq); | ||
543 | |||
544 | spin_unlock(&irq_controller_lock); | ||
545 | irq_exit(); | ||
546 | } | ||
547 | |||
548 | void __set_irq_handler(unsigned int irq, irq_handler_t handle, int is_chained) | ||
549 | { | ||
550 | struct irqdesc *desc; | ||
551 | unsigned long flags; | ||
552 | |||
553 | if (irq >= NR_IRQS) { | ||
554 | printk(KERN_ERR "Trying to install handler for IRQ%d\n", irq); | ||
555 | return; | ||
556 | } | ||
557 | |||
558 | if (handle == NULL) | ||
559 | handle = do_bad_IRQ; | ||
560 | |||
561 | desc = irq_desc + irq; | ||
562 | |||
563 | if (is_chained && desc->chip == &bad_chip) | ||
564 | printk(KERN_WARNING "Trying to install chained handler for IRQ%d\n", irq); | ||
565 | |||
566 | spin_lock_irqsave(&irq_controller_lock, flags); | ||
567 | if (handle == do_bad_IRQ) { | ||
568 | desc->chip->mask(irq); | ||
569 | desc->chip->ack(irq); | ||
570 | desc->disable_depth = 1; | ||
571 | } | ||
572 | desc->handle = handle; | ||
573 | if (handle != do_bad_IRQ && is_chained) { | ||
574 | desc->valid = 0; | ||
575 | desc->probe_ok = 0; | ||
576 | desc->disable_depth = 0; | ||
577 | desc->chip->unmask(irq); | ||
578 | } | ||
579 | spin_unlock_irqrestore(&irq_controller_lock, flags); | ||
580 | } | ||
581 | |||
582 | void set_irq_chip(unsigned int irq, struct irqchip *chip) | ||
583 | { | ||
584 | struct irqdesc *desc; | ||
585 | unsigned long flags; | ||
586 | |||
587 | if (irq >= NR_IRQS) { | ||
588 | printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq); | ||
589 | return; | ||
590 | } | ||
591 | |||
592 | if (chip == NULL) | ||
593 | chip = &bad_chip; | ||
594 | |||
595 | desc = irq_desc + irq; | ||
596 | spin_lock_irqsave(&irq_controller_lock, flags); | ||
597 | desc->chip = chip; | ||
598 | spin_unlock_irqrestore(&irq_controller_lock, flags); | ||
599 | } | ||
600 | |||
601 | int set_irq_type(unsigned int irq, unsigned int type) | ||
602 | { | ||
603 | struct irqdesc *desc; | ||
604 | unsigned long flags; | ||
605 | int ret = -ENXIO; | ||
606 | |||
607 | if (irq >= NR_IRQS) { | ||
608 | printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq); | ||
609 | return -ENODEV; | ||
610 | } | ||
611 | |||
612 | desc = irq_desc + irq; | ||
613 | if (desc->chip->type) { | ||
614 | spin_lock_irqsave(&irq_controller_lock, flags); | ||
615 | ret = desc->chip->type(irq, type); | ||
616 | spin_unlock_irqrestore(&irq_controller_lock, flags); | ||
617 | } | ||
618 | |||
619 | return ret; | ||
620 | } | ||
621 | EXPORT_SYMBOL(set_irq_type); | ||
622 | |||
623 | void set_irq_flags(unsigned int irq, unsigned int iflags) | ||
624 | { | ||
625 | struct irqdesc *desc; | ||
626 | unsigned long flags; | ||
627 | |||
628 | if (irq >= NR_IRQS) { | ||
629 | printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq); | ||
630 | return; | ||
631 | } | ||
632 | |||
633 | desc = irq_desc + irq; | ||
634 | spin_lock_irqsave(&irq_controller_lock, flags); | ||
635 | desc->valid = (iflags & IRQF_VALID) != 0; | ||
636 | desc->probe_ok = (iflags & IRQF_PROBE) != 0; | ||
637 | desc->noautoenable = (iflags & IRQF_NOAUTOEN) != 0; | ||
638 | spin_unlock_irqrestore(&irq_controller_lock, flags); | ||
639 | } | ||
640 | |||
641 | int setup_irq(unsigned int irq, struct irqaction *new) | ||
642 | { | ||
643 | int shared = 0; | ||
644 | struct irqaction *old, **p; | ||
645 | unsigned long flags; | ||
646 | struct irqdesc *desc; | ||
647 | |||
648 | /* | ||
649 | * Some drivers like serial.c use request_irq() heavily, | ||
650 | * so we have to be careful not to interfere with a | ||
651 | * running system. | ||
652 | */ | ||
653 | if (new->flags & SA_SAMPLE_RANDOM) { | ||
654 | /* | ||
655 | * This function might sleep, we want to call it first, | ||
656 | * outside of the atomic block. | ||
657 | * Yes, this might clear the entropy pool if the wrong | ||
658 | * driver is attempted to be loaded, without actually | ||
659 | * installing a new handler, but is this really a problem, | ||
660 | * only the sysadmin is able to do this. | ||
661 | */ | ||
662 | rand_initialize_irq(irq); | ||
663 | } | ||
664 | |||
665 | /* | ||
666 | * The following block of code has to be executed atomically | ||
667 | */ | ||
668 | desc = irq_desc + irq; | ||
669 | spin_lock_irqsave(&irq_controller_lock, flags); | ||
670 | p = &desc->action; | ||
671 | if ((old = *p) != NULL) { | ||
672 | /* Can't share interrupts unless both agree to */ | ||
673 | if (!(old->flags & new->flags & SA_SHIRQ)) { | ||
674 | spin_unlock_irqrestore(&irq_controller_lock, flags); | ||
675 | return -EBUSY; | ||
676 | } | ||
677 | |||
678 | /* add new interrupt at end of irq queue */ | ||
679 | do { | ||
680 | p = &old->next; | ||
681 | old = *p; | ||
682 | } while (old); | ||
683 | shared = 1; | ||
684 | } | ||
685 | |||
686 | *p = new; | ||
687 | |||
688 | if (!shared) { | ||
689 | desc->probing = 0; | ||
690 | desc->running = 0; | ||
691 | desc->pending = 0; | ||
692 | desc->disable_depth = 1; | ||
693 | if (!desc->noautoenable) { | ||
694 | desc->disable_depth = 0; | ||
695 | desc->chip->unmask(irq); | ||
696 | } | ||
697 | } | ||
698 | |||
699 | spin_unlock_irqrestore(&irq_controller_lock, flags); | ||
700 | return 0; | ||
701 | } | ||
702 | |||
703 | /** | ||
704 | * request_irq - allocate an interrupt line | ||
705 | * @irq: Interrupt line to allocate | ||
706 | * @handler: Function to be called when the IRQ occurs | ||
707 | * @irqflags: Interrupt type flags | ||
708 | * @devname: An ascii name for the claiming device | ||
709 | * @dev_id: A cookie passed back to the handler function | ||
710 | * | ||
711 | * This call allocates interrupt resources and enables the | ||
712 | * interrupt line and IRQ handling. From the point this | ||
713 | * call is made your handler function may be invoked. Since | ||
714 | * your handler function must clear any interrupt the board | ||
715 | * raises, you must take care both to initialise your hardware | ||
716 | * and to set up the interrupt handler in the right order. | ||
717 | * | ||
718 | * Dev_id must be globally unique. Normally the address of the | ||
719 | * device data structure is used as the cookie. Since the handler | ||
720 | * receives this value it makes sense to use it. | ||
721 | * | ||
722 | * If your interrupt is shared you must pass a non NULL dev_id | ||
723 | * as this is required when freeing the interrupt. | ||
724 | * | ||
725 | * Flags: | ||
726 | * | ||
727 | * SA_SHIRQ Interrupt is shared | ||
728 | * | ||
729 | * SA_INTERRUPT Disable local interrupts while processing | ||
730 | * | ||
731 | * SA_SAMPLE_RANDOM The interrupt can be used for entropy | ||
732 | * | ||
733 | */ | ||
734 | int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), | ||
735 | unsigned long irq_flags, const char * devname, void *dev_id) | ||
736 | { | ||
737 | unsigned long retval; | ||
738 | struct irqaction *action; | ||
739 | |||
740 | if (irq >= NR_IRQS || !irq_desc[irq].valid || !handler || | ||
741 | (irq_flags & SA_SHIRQ && !dev_id)) | ||
742 | return -EINVAL; | ||
743 | |||
744 | action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL); | ||
745 | if (!action) | ||
746 | return -ENOMEM; | ||
747 | |||
748 | action->handler = handler; | ||
749 | action->flags = irq_flags; | ||
750 | cpus_clear(action->mask); | ||
751 | action->name = devname; | ||
752 | action->next = NULL; | ||
753 | action->dev_id = dev_id; | ||
754 | |||
755 | retval = setup_irq(irq, action); | ||
756 | |||
757 | if (retval) | ||
758 | kfree(action); | ||
759 | return retval; | ||
760 | } | ||
761 | |||
762 | EXPORT_SYMBOL(request_irq); | ||
763 | |||
764 | /** | ||
765 | * free_irq - free an interrupt | ||
766 | * @irq: Interrupt line to free | ||
767 | * @dev_id: Device identity to free | ||
768 | * | ||
769 | * Remove an interrupt handler. The handler is removed and if the | ||
770 | * interrupt line is no longer in use by any driver it is disabled. | ||
771 | * On a shared IRQ the caller must ensure the interrupt is disabled | ||
772 | * on the card it drives before calling this function. | ||
773 | * | ||
774 | * This function must not be called from interrupt context. | ||
775 | */ | ||
776 | void free_irq(unsigned int irq, void *dev_id) | ||
777 | { | ||
778 | struct irqaction * action, **p; | ||
779 | unsigned long flags; | ||
780 | |||
781 | if (irq >= NR_IRQS || !irq_desc[irq].valid) { | ||
782 | printk(KERN_ERR "Trying to free IRQ%d\n",irq); | ||
783 | dump_stack(); | ||
784 | return; | ||
785 | } | ||
786 | |||
787 | spin_lock_irqsave(&irq_controller_lock, flags); | ||
788 | for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) { | ||
789 | if (action->dev_id != dev_id) | ||
790 | continue; | ||
791 | |||
792 | /* Found it - now free it */ | ||
793 | *p = action->next; | ||
794 | break; | ||
795 | } | ||
796 | spin_unlock_irqrestore(&irq_controller_lock, flags); | ||
797 | |||
798 | if (!action) { | ||
799 | printk(KERN_ERR "Trying to free free IRQ%d\n",irq); | ||
800 | dump_stack(); | ||
801 | } else { | ||
802 | synchronize_irq(irq); | ||
803 | kfree(action); | ||
804 | } | ||
805 | } | ||
806 | |||
807 | EXPORT_SYMBOL(free_irq); | ||
808 | |||
809 | static DECLARE_MUTEX(probe_sem); | ||
810 | |||
811 | /* Start the interrupt probing. Unlike other architectures, | ||
812 | * we don't return a mask of interrupts from probe_irq_on, | ||
813 | * but return the number of interrupts enabled for the probe. | ||
814 | * The interrupts which have been enabled for probing is | ||
815 | * instead recorded in the irq_desc structure. | ||
816 | */ | ||
817 | unsigned long probe_irq_on(void) | ||
818 | { | ||
819 | unsigned int i, irqs = 0; | ||
820 | unsigned long delay; | ||
821 | |||
822 | down(&probe_sem); | ||
823 | |||
824 | /* | ||
825 | * first snaffle up any unassigned but | ||
826 | * probe-able interrupts | ||
827 | */ | ||
828 | spin_lock_irq(&irq_controller_lock); | ||
829 | for (i = 0; i < NR_IRQS; i++) { | ||
830 | if (!irq_desc[i].probe_ok || irq_desc[i].action) | ||
831 | continue; | ||
832 | |||
833 | irq_desc[i].probing = 1; | ||
834 | irq_desc[i].triggered = 0; | ||
835 | if (irq_desc[i].chip->type) | ||
836 | irq_desc[i].chip->type(i, IRQT_PROBE); | ||
837 | irq_desc[i].chip->unmask(i); | ||
838 | irqs += 1; | ||
839 | } | ||
840 | spin_unlock_irq(&irq_controller_lock); | ||
841 | |||
842 | /* | ||
843 | * wait for spurious interrupts to mask themselves out again | ||
844 | */ | ||
845 | for (delay = jiffies + HZ/10; time_before(jiffies, delay); ) | ||
846 | /* min 100ms delay */; | ||
847 | |||
848 | /* | ||
849 | * now filter out any obviously spurious interrupts | ||
850 | */ | ||
851 | spin_lock_irq(&irq_controller_lock); | ||
852 | for (i = 0; i < NR_IRQS; i++) { | ||
853 | if (irq_desc[i].probing && irq_desc[i].triggered) { | ||
854 | irq_desc[i].probing = 0; | ||
855 | irqs -= 1; | ||
856 | } | ||
857 | } | ||
858 | spin_unlock_irq(&irq_controller_lock); | ||
859 | |||
860 | return irqs; | ||
861 | } | ||
862 | |||
863 | EXPORT_SYMBOL(probe_irq_on); | ||
864 | |||
865 | unsigned int probe_irq_mask(unsigned long irqs) | ||
866 | { | ||
867 | unsigned int mask = 0, i; | ||
868 | |||
869 | spin_lock_irq(&irq_controller_lock); | ||
870 | for (i = 0; i < 16 && i < NR_IRQS; i++) | ||
871 | if (irq_desc[i].probing && irq_desc[i].triggered) | ||
872 | mask |= 1 << i; | ||
873 | spin_unlock_irq(&irq_controller_lock); | ||
874 | |||
875 | up(&probe_sem); | ||
876 | |||
877 | return mask; | ||
878 | } | ||
879 | EXPORT_SYMBOL(probe_irq_mask); | ||
880 | |||
881 | /* | ||
882 | * Possible return values: | ||
883 | * >= 0 - interrupt number | ||
884 | * -1 - no interrupt/many interrupts | ||
885 | */ | ||
886 | int probe_irq_off(unsigned long irqs) | ||
887 | { | ||
888 | unsigned int i; | ||
889 | int irq_found = NO_IRQ; | ||
890 | |||
891 | /* | ||
892 | * look at the interrupts, and find exactly one | ||
893 | * that we were probing has been triggered | ||
894 | */ | ||
895 | spin_lock_irq(&irq_controller_lock); | ||
896 | for (i = 0; i < NR_IRQS; i++) { | ||
897 | if (irq_desc[i].probing && | ||
898 | irq_desc[i].triggered) { | ||
899 | if (irq_found != NO_IRQ) { | ||
900 | irq_found = NO_IRQ; | ||
901 | goto out; | ||
902 | } | ||
903 | irq_found = i; | ||
904 | } | ||
905 | } | ||
906 | |||
907 | if (irq_found == -1) | ||
908 | irq_found = NO_IRQ; | ||
909 | out: | ||
910 | spin_unlock_irq(&irq_controller_lock); | ||
911 | |||
912 | up(&probe_sem); | ||
913 | |||
914 | return irq_found; | ||
915 | } | ||
916 | |||
917 | EXPORT_SYMBOL(probe_irq_off); | ||
918 | |||
919 | #ifdef CONFIG_SMP | ||
920 | static void route_irq(struct irqdesc *desc, unsigned int irq, unsigned int cpu) | ||
921 | { | ||
922 | pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu); | ||
923 | |||
924 | spin_lock_irq(&irq_controller_lock); | ||
925 | desc->cpu = cpu; | ||
926 | desc->chip->set_cpu(desc, irq, cpu); | ||
927 | spin_unlock_irq(&irq_controller_lock); | ||
928 | } | ||
929 | |||
930 | #ifdef CONFIG_PROC_FS | ||
931 | static int | ||
932 | irq_affinity_read_proc(char *page, char **start, off_t off, int count, | ||
933 | int *eof, void *data) | ||
934 | { | ||
935 | struct irqdesc *desc = irq_desc + ((int)data); | ||
936 | int len = cpumask_scnprintf(page, count, desc->affinity); | ||
937 | |||
938 | if (count - len < 2) | ||
939 | return -EINVAL; | ||
940 | page[len++] = '\n'; | ||
941 | page[len] = '\0'; | ||
942 | |||
943 | return len; | ||
944 | } | ||
945 | |||
946 | static int | ||
947 | irq_affinity_write_proc(struct file *file, const char __user *buffer, | ||
948 | unsigned long count, void *data) | ||
949 | { | ||
950 | unsigned int irq = (unsigned int)data; | ||
951 | struct irqdesc *desc = irq_desc + irq; | ||
952 | cpumask_t affinity, tmp; | ||
953 | int ret = -EIO; | ||
954 | |||
955 | if (!desc->chip->set_cpu) | ||
956 | goto out; | ||
957 | |||
958 | ret = cpumask_parse(buffer, count, affinity); | ||
959 | if (ret) | ||
960 | goto out; | ||
961 | |||
962 | cpus_and(tmp, affinity, cpu_online_map); | ||
963 | if (cpus_empty(tmp)) { | ||
964 | ret = -EINVAL; | ||
965 | goto out; | ||
966 | } | ||
967 | |||
968 | desc->affinity = affinity; | ||
969 | route_irq(desc, irq, first_cpu(tmp)); | ||
970 | ret = count; | ||
971 | |||
972 | out: | ||
973 | return ret; | ||
974 | } | ||
975 | #endif | ||
976 | #endif | ||
977 | |||
978 | void __init init_irq_proc(void) | ||
979 | { | ||
980 | #if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS) | ||
981 | struct proc_dir_entry *dir; | ||
982 | int irq; | ||
983 | |||
984 | dir = proc_mkdir("irq", 0); | ||
985 | if (!dir) | ||
986 | return; | ||
987 | |||
988 | for (irq = 0; irq < NR_IRQS; irq++) { | ||
989 | struct proc_dir_entry *entry; | ||
990 | struct irqdesc *desc; | ||
991 | char name[16]; | ||
992 | |||
993 | desc = irq_desc + irq; | ||
994 | memset(name, 0, sizeof(name)); | ||
995 | snprintf(name, sizeof(name) - 1, "%u", irq); | ||
996 | |||
997 | desc->procdir = proc_mkdir(name, dir); | ||
998 | if (!desc->procdir) | ||
999 | continue; | ||
1000 | |||
1001 | entry = create_proc_entry("smp_affinity", 0600, desc->procdir); | ||
1002 | if (entry) { | ||
1003 | entry->nlink = 1; | ||
1004 | entry->data = (void *)irq; | ||
1005 | entry->read_proc = irq_affinity_read_proc; | ||
1006 | entry->write_proc = irq_affinity_write_proc; | ||
1007 | } | ||
1008 | } | ||
1009 | #endif | ||
1010 | } | ||
1011 | |||
1012 | void __init init_IRQ(void) | ||
1013 | { | ||
1014 | struct irqdesc *desc; | ||
1015 | extern void init_dma(void); | ||
1016 | int irq; | ||
1017 | |||
1018 | #ifdef CONFIG_SMP | ||
1019 | bad_irq_desc.affinity = CPU_MASK_ALL; | ||
1020 | bad_irq_desc.cpu = smp_processor_id(); | ||
1021 | #endif | ||
1022 | |||
1023 | for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) { | ||
1024 | *desc = bad_irq_desc; | ||
1025 | INIT_LIST_HEAD(&desc->pend); | ||
1026 | } | ||
1027 | |||
1028 | init_arch_irq(); | ||
1029 | init_dma(); | ||
1030 | } | ||
1031 | |||
1032 | static int __init noirqdebug_setup(char *str) | ||
1033 | { | ||
1034 | noirqdebug = 1; | ||
1035 | return 1; | ||
1036 | } | ||
1037 | |||
1038 | __setup("noirqdebug", noirqdebug_setup); | ||
diff --git a/arch/arm/kernel/isa.c b/arch/arm/kernel/isa.c new file mode 100644 index 000000000000..685c3e591a7e --- /dev/null +++ b/arch/arm/kernel/isa.c | |||
@@ -0,0 +1,53 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/isa.c | ||
3 | * | ||
4 | * Copyright (C) 1999 Phil Blundell | ||
5 | * | ||
6 | * ISA shared memory and I/O port support | ||
7 | */ | ||
8 | |||
9 | /* | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * as published by the Free Software Foundation; either version | ||
13 | * 2 of the License, or (at your option) any later version. | ||
14 | */ | ||
15 | |||
16 | /* | ||
17 | * Nothing about this is actually ARM specific. One day we could move | ||
18 | * it into kernel/resource.c or some place like that. | ||
19 | */ | ||
20 | |||
21 | #include <linux/stddef.h> | ||
22 | #include <linux/types.h> | ||
23 | #include <linux/fs.h> | ||
24 | #include <linux/sysctl.h> | ||
25 | #include <linux/init.h> | ||
26 | |||
27 | static unsigned int isa_membase, isa_portbase, isa_portshift; | ||
28 | |||
29 | static ctl_table ctl_isa_vars[4] = { | ||
30 | {BUS_ISA_MEM_BASE, "membase", &isa_membase, | ||
31 | sizeof(isa_membase), 0444, NULL, &proc_dointvec}, | ||
32 | {BUS_ISA_PORT_BASE, "portbase", &isa_portbase, | ||
33 | sizeof(isa_portbase), 0444, NULL, &proc_dointvec}, | ||
34 | {BUS_ISA_PORT_SHIFT, "portshift", &isa_portshift, | ||
35 | sizeof(isa_portshift), 0444, NULL, &proc_dointvec}, | ||
36 | {0} | ||
37 | }; | ||
38 | |||
39 | static struct ctl_table_header *isa_sysctl_header; | ||
40 | |||
41 | static ctl_table ctl_isa[2] = {{CTL_BUS_ISA, "isa", NULL, 0, 0555, ctl_isa_vars}, | ||
42 | {0}}; | ||
43 | static ctl_table ctl_bus[2] = {{CTL_BUS, "bus", NULL, 0, 0555, ctl_isa}, | ||
44 | {0}}; | ||
45 | |||
46 | void __init | ||
47 | register_isa_ports(unsigned int membase, unsigned int portbase, unsigned int portshift) | ||
48 | { | ||
49 | isa_membase = membase; | ||
50 | isa_portbase = portbase; | ||
51 | isa_portshift = portshift; | ||
52 | isa_sysctl_header = register_sysctl_table(ctl_bus, 0); | ||
53 | } | ||
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S new file mode 100644 index 000000000000..8f74e24536ba --- /dev/null +++ b/arch/arm/kernel/iwmmxt.S | |||
@@ -0,0 +1,320 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/iwmmxt.S | ||
3 | * | ||
4 | * XScale iWMMXt (Concan) context switching and handling | ||
5 | * | ||
6 | * Initial code: | ||
7 | * Copyright (c) 2003, Intel Corporation | ||
8 | * | ||
9 | * Full lazy switching support, optimizations and more, by Nicolas Pitre | ||
10 | * Copyright (c) 2003-2004, MontaVista Software, Inc. | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License version 2 as | ||
14 | * published by the Free Software Foundation. | ||
15 | */ | ||
16 | |||
17 | #include <linux/linkage.h> | ||
18 | #include <asm/ptrace.h> | ||
19 | #include <asm/thread_info.h> | ||
20 | #include <asm/constants.h> | ||
21 | |||
22 | #define MMX_WR0 (0x00) | ||
23 | #define MMX_WR1 (0x08) | ||
24 | #define MMX_WR2 (0x10) | ||
25 | #define MMX_WR3 (0x18) | ||
26 | #define MMX_WR4 (0x20) | ||
27 | #define MMX_WR5 (0x28) | ||
28 | #define MMX_WR6 (0x30) | ||
29 | #define MMX_WR7 (0x38) | ||
30 | #define MMX_WR8 (0x40) | ||
31 | #define MMX_WR9 (0x48) | ||
32 | #define MMX_WR10 (0x50) | ||
33 | #define MMX_WR11 (0x58) | ||
34 | #define MMX_WR12 (0x60) | ||
35 | #define MMX_WR13 (0x68) | ||
36 | #define MMX_WR14 (0x70) | ||
37 | #define MMX_WR15 (0x78) | ||
38 | #define MMX_WCSSF (0x80) | ||
39 | #define MMX_WCASF (0x84) | ||
40 | #define MMX_WCGR0 (0x88) | ||
41 | #define MMX_WCGR1 (0x8C) | ||
42 | #define MMX_WCGR2 (0x90) | ||
43 | #define MMX_WCGR3 (0x94) | ||
44 | |||
45 | #define MMX_SIZE (0x98) | ||
46 | |||
47 | .text | ||
48 | |||
49 | /* | ||
50 | * Lazy switching of Concan coprocessor context | ||
51 | * | ||
52 | * r10 = struct thread_info pointer | ||
53 | * r9 = ret_from_exception | ||
54 | * lr = undefined instr exit | ||
55 | * | ||
56 | * called from prefetch exception handler with interrupts disabled | ||
57 | */ | ||
58 | |||
59 | ENTRY(iwmmxt_task_enable) | ||
60 | |||
61 | mrc p15, 0, r2, c15, c1, 0 | ||
62 | tst r2, #0x3 @ CP0 and CP1 accessible? | ||
63 | movne pc, lr @ if so no business here | ||
64 | orr r2, r2, #0x3 @ enable access to CP0 and CP1 | ||
65 | mcr p15, 0, r2, c15, c1, 0 | ||
66 | |||
67 | ldr r3, =concan_owner | ||
68 | add r0, r10, #TI_IWMMXT_STATE @ get task Concan save area | ||
69 | ldr r2, [sp, #60] @ current task pc value | ||
70 | ldr r1, [r3] @ get current Concan owner | ||
71 | str r0, [r3] @ this task now owns Concan regs | ||
72 | sub r2, r2, #4 @ adjust pc back | ||
73 | str r2, [sp, #60] | ||
74 | |||
75 | mrc p15, 0, r2, c2, c0, 0 | ||
76 | mov r2, r2 @ cpwait | ||
77 | |||
78 | teq r1, #0 @ test for last ownership | ||
79 | mov lr, r9 @ normal exit from exception | ||
80 | beq concan_load @ no owner, skip save | ||
81 | |||
82 | concan_save: | ||
83 | |||
84 | tmrc r2, wCon | ||
85 | |||
86 | @ CUP? wCx | ||
87 | tst r2, #0x1 | ||
88 | beq 1f | ||
89 | |||
90 | concan_dump: | ||
91 | |||
92 | wstrw wCSSF, [r1, #MMX_WCSSF] | ||
93 | wstrw wCASF, [r1, #MMX_WCASF] | ||
94 | wstrw wCGR0, [r1, #MMX_WCGR0] | ||
95 | wstrw wCGR1, [r1, #MMX_WCGR1] | ||
96 | wstrw wCGR2, [r1, #MMX_WCGR2] | ||
97 | wstrw wCGR3, [r1, #MMX_WCGR3] | ||
98 | |||
99 | 1: @ MUP? wRn | ||
100 | tst r2, #0x2 | ||
101 | beq 2f | ||
102 | |||
103 | wstrd wR0, [r1, #MMX_WR0] | ||
104 | wstrd wR1, [r1, #MMX_WR1] | ||
105 | wstrd wR2, [r1, #MMX_WR2] | ||
106 | wstrd wR3, [r1, #MMX_WR3] | ||
107 | wstrd wR4, [r1, #MMX_WR4] | ||
108 | wstrd wR5, [r1, #MMX_WR5] | ||
109 | wstrd wR6, [r1, #MMX_WR6] | ||
110 | wstrd wR7, [r1, #MMX_WR7] | ||
111 | wstrd wR8, [r1, #MMX_WR8] | ||
112 | wstrd wR9, [r1, #MMX_WR9] | ||
113 | wstrd wR10, [r1, #MMX_WR10] | ||
114 | wstrd wR11, [r1, #MMX_WR11] | ||
115 | wstrd wR12, [r1, #MMX_WR12] | ||
116 | wstrd wR13, [r1, #MMX_WR13] | ||
117 | wstrd wR14, [r1, #MMX_WR14] | ||
118 | wstrd wR15, [r1, #MMX_WR15] | ||
119 | |||
120 | 2: teq r0, #0 @ anything to load? | ||
121 | moveq pc, lr | ||
122 | |||
123 | concan_load: | ||
124 | |||
125 | @ Load wRn | ||
126 | wldrd wR0, [r0, #MMX_WR0] | ||
127 | wldrd wR1, [r0, #MMX_WR1] | ||
128 | wldrd wR2, [r0, #MMX_WR2] | ||
129 | wldrd wR3, [r0, #MMX_WR3] | ||
130 | wldrd wR4, [r0, #MMX_WR4] | ||
131 | wldrd wR5, [r0, #MMX_WR5] | ||
132 | wldrd wR6, [r0, #MMX_WR6] | ||
133 | wldrd wR7, [r0, #MMX_WR7] | ||
134 | wldrd wR8, [r0, #MMX_WR8] | ||
135 | wldrd wR9, [r0, #MMX_WR9] | ||
136 | wldrd wR10, [r0, #MMX_WR10] | ||
137 | wldrd wR11, [r0, #MMX_WR11] | ||
138 | wldrd wR12, [r0, #MMX_WR12] | ||
139 | wldrd wR13, [r0, #MMX_WR13] | ||
140 | wldrd wR14, [r0, #MMX_WR14] | ||
141 | wldrd wR15, [r0, #MMX_WR15] | ||
142 | |||
143 | @ Load wCx | ||
144 | wldrw wCSSF, [r0, #MMX_WCSSF] | ||
145 | wldrw wCASF, [r0, #MMX_WCASF] | ||
146 | wldrw wCGR0, [r0, #MMX_WCGR0] | ||
147 | wldrw wCGR1, [r0, #MMX_WCGR1] | ||
148 | wldrw wCGR2, [r0, #MMX_WCGR2] | ||
149 | wldrw wCGR3, [r0, #MMX_WCGR3] | ||
150 | |||
151 | @ clear CUP/MUP (only if r1 != 0) | ||
152 | teq r1, #0 | ||
153 | mov r2, #0 | ||
154 | moveq pc, lr | ||
155 | tmcr wCon, r2 | ||
156 | mov pc, lr | ||
157 | |||
158 | /* | ||
159 | * Back up Concan regs to save area and disable access to them | ||
160 | * (mainly for gdb or sleep mode usage) | ||
161 | * | ||
162 | * r0 = struct thread_info pointer of target task or NULL for any | ||
163 | */ | ||
164 | |||
165 | ENTRY(iwmmxt_task_disable) | ||
166 | |||
167 | stmfd sp!, {r4, lr} | ||
168 | |||
169 | mrs ip, cpsr | ||
170 | orr r2, ip, #PSR_I_BIT @ disable interrupts | ||
171 | msr cpsr_c, r2 | ||
172 | |||
173 | ldr r3, =concan_owner | ||
174 | add r2, r0, #TI_IWMMXT_STATE @ get task Concan save area | ||
175 | ldr r1, [r3] @ get current Concan owner | ||
176 | teq r1, #0 @ any current owner? | ||
177 | beq 1f @ no: quit | ||
178 | teq r0, #0 @ any owner? | ||
179 | teqne r1, r2 @ or specified one? | ||
180 | bne 1f @ no: quit | ||
181 | |||
182 | mrc p15, 0, r4, c15, c1, 0 | ||
183 | orr r4, r4, #0x3 @ enable access to CP0 and CP1 | ||
184 | mcr p15, 0, r4, c15, c1, 0 | ||
185 | mov r0, #0 @ nothing to load | ||
186 | str r0, [r3] @ no more current owner | ||
187 | mrc p15, 0, r2, c2, c0, 0 | ||
188 | mov r2, r2 @ cpwait | ||
189 | bl concan_save | ||
190 | |||
191 | bic r4, r4, #0x3 @ disable access to CP0 and CP1 | ||
192 | mcr p15, 0, r4, c15, c1, 0 | ||
193 | mrc p15, 0, r2, c2, c0, 0 | ||
194 | mov r2, r2 @ cpwait | ||
195 | |||
196 | 1: msr cpsr_c, ip @ restore interrupt mode | ||
197 | ldmfd sp!, {r4, pc} | ||
198 | |||
199 | /* | ||
200 | * Copy Concan state to given memory address | ||
201 | * | ||
202 | * r0 = struct thread_info pointer of target task | ||
203 | * r1 = memory address where to store Concan state | ||
204 | * | ||
205 | * this is called mainly in the creation of signal stack frames | ||
206 | */ | ||
207 | |||
208 | ENTRY(iwmmxt_task_copy) | ||
209 | |||
210 | mrs ip, cpsr | ||
211 | orr r2, ip, #PSR_I_BIT @ disable interrupts | ||
212 | msr cpsr_c, r2 | ||
213 | |||
214 | ldr r3, =concan_owner | ||
215 | add r2, r0, #TI_IWMMXT_STATE @ get task Concan save area | ||
216 | ldr r3, [r3] @ get current Concan owner | ||
217 | teq r2, r3 @ does this task own it... | ||
218 | beq 1f | ||
219 | |||
220 | @ current Concan values are in the task save area | ||
221 | msr cpsr_c, ip @ restore interrupt mode | ||
222 | mov r0, r1 | ||
223 | mov r1, r2 | ||
224 | mov r2, #MMX_SIZE | ||
225 | b memcpy | ||
226 | |||
227 | 1: @ this task owns Concan regs -- grab a copy from there | ||
228 | mov r0, #0 @ nothing to load | ||
229 | mov r2, #3 @ save all regs | ||
230 | mov r3, lr @ preserve return address | ||
231 | bl concan_dump | ||
232 | msr cpsr_c, ip @ restore interrupt mode | ||
233 | mov pc, r3 | ||
234 | |||
235 | /* | ||
236 | * Restore Concan state from given memory address | ||
237 | * | ||
238 | * r0 = struct thread_info pointer of target task | ||
239 | * r1 = memory address where to get Concan state from | ||
240 | * | ||
241 | * this is used to restore Concan state when unwinding a signal stack frame | ||
242 | */ | ||
243 | |||
244 | ENTRY(iwmmxt_task_restore) | ||
245 | |||
246 | mrs ip, cpsr | ||
247 | orr r2, ip, #PSR_I_BIT @ disable interrupts | ||
248 | msr cpsr_c, r2 | ||
249 | |||
250 | ldr r3, =concan_owner | ||
251 | add r2, r0, #TI_IWMMXT_STATE @ get task Concan save area | ||
252 | ldr r3, [r3] @ get current Concan owner | ||
253 | bic r2, r2, #0x7 @ 64-bit alignment | ||
254 | teq r2, r3 @ does this task own it... | ||
255 | beq 1f | ||
256 | |||
257 | @ this task doesn't own Concan regs -- use its save area | ||
258 | msr cpsr_c, ip @ restore interrupt mode | ||
259 | mov r0, r2 | ||
260 | mov r2, #MMX_SIZE | ||
261 | b memcpy | ||
262 | |||
263 | 1: @ this task owns Concan regs -- load them directly | ||
264 | mov r0, r1 | ||
265 | mov r1, #0 @ don't clear CUP/MUP | ||
266 | mov r3, lr @ preserve return address | ||
267 | bl concan_load | ||
268 | msr cpsr_c, ip @ restore interrupt mode | ||
269 | mov pc, r3 | ||
270 | |||
271 | /* | ||
272 | * Concan handling on task switch | ||
273 | * | ||
274 | * r0 = previous task_struct pointer (must be preserved) | ||
275 | * r1 = previous thread_info pointer | ||
276 | * r2 = next thread_info.cpu_domain pointer (must be preserved) | ||
277 | * | ||
278 | * Called only from __switch_to with task preemption disabled. | ||
279 | * No need to care about preserving r4 and above. | ||
280 | */ | ||
281 | ENTRY(iwmmxt_task_switch) | ||
282 | |||
283 | mrc p15, 0, r4, c15, c1, 0 | ||
284 | tst r4, #0x3 @ CP0 and CP1 accessible? | ||
285 | bne 1f @ yes: block them for next task | ||
286 | |||
287 | ldr r5, =concan_owner | ||
288 | add r6, r2, #(TI_IWMMXT_STATE - TI_CPU_DOMAIN) @ get next task Concan save area | ||
289 | ldr r5, [r5] @ get current Concan owner | ||
290 | teq r5, r6 @ next task owns it? | ||
291 | movne pc, lr @ no: leave Concan disabled | ||
292 | |||
293 | 1: eor r4, r4, #3 @ flip Concan access | ||
294 | mcr p15, 0, r4, c15, c1, 0 | ||
295 | |||
296 | mrc p15, 0, r4, c2, c0, 0 | ||
297 | sub pc, lr, r4, lsr #32 @ cpwait and return | ||
298 | |||
299 | /* | ||
300 | * Remove Concan ownership of given task | ||
301 | * | ||
302 | * r0 = struct thread_info pointer | ||
303 | */ | ||
304 | ENTRY(iwmmxt_task_release) | ||
305 | |||
306 | mrs r2, cpsr | ||
307 | orr ip, r2, #PSR_I_BIT @ disable interrupts | ||
308 | msr cpsr_c, ip | ||
309 | ldr r3, =concan_owner | ||
310 | add r0, r0, #TI_IWMMXT_STATE @ get task Concan save area | ||
311 | ldr r1, [r3] @ get current Concan owner | ||
312 | eors r0, r0, r1 @ if equal... | ||
313 | streq r0, [r3] @ then clear ownership | ||
314 | msr cpsr_c, r2 @ restore interrupts | ||
315 | mov pc, lr | ||
316 | |||
317 | .data | ||
318 | concan_owner: | ||
319 | .word 0 | ||
320 | |||
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c new file mode 100644 index 000000000000..1a85cfdad5ac --- /dev/null +++ b/arch/arm/kernel/module.c | |||
@@ -0,0 +1,152 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/module.c | ||
3 | * | ||
4 | * Copyright (C) 2002 Russell King. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * Module allocation method suggested by Andi Kleen. | ||
11 | */ | ||
12 | #include <linux/config.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/elf.h> | ||
16 | #include <linux/vmalloc.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/fs.h> | ||
19 | #include <linux/string.h> | ||
20 | |||
21 | #include <asm/pgtable.h> | ||
22 | |||
23 | #ifdef CONFIG_XIP_KERNEL | ||
24 | /* | ||
25 | * The XIP kernel text is mapped in the module area for modules and | ||
26 | * some other stuff to work without any indirect relocations. | ||
27 | * MODULE_START is redefined here and not in asm/memory.h to avoid | ||
28 | * recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off. | ||
29 | */ | ||
30 | extern void _etext; | ||
31 | #undef MODULE_START | ||
32 | #define MODULE_START (((unsigned long)&_etext + ~PGDIR_MASK) & PGDIR_MASK) | ||
33 | #endif | ||
34 | |||
35 | void *module_alloc(unsigned long size) | ||
36 | { | ||
37 | struct vm_struct *area; | ||
38 | |||
39 | size = PAGE_ALIGN(size); | ||
40 | if (!size) | ||
41 | return NULL; | ||
42 | |||
43 | area = __get_vm_area(size, VM_ALLOC, MODULE_START, MODULE_END); | ||
44 | if (!area) | ||
45 | return NULL; | ||
46 | |||
47 | return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL); | ||
48 | } | ||
49 | |||
50 | void module_free(struct module *module, void *region) | ||
51 | { | ||
52 | vfree(region); | ||
53 | } | ||
54 | |||
55 | int module_frob_arch_sections(Elf_Ehdr *hdr, | ||
56 | Elf_Shdr *sechdrs, | ||
57 | char *secstrings, | ||
58 | struct module *mod) | ||
59 | { | ||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | int | ||
64 | apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, | ||
65 | unsigned int relindex, struct module *module) | ||
66 | { | ||
67 | Elf32_Shdr *symsec = sechdrs + symindex; | ||
68 | Elf32_Shdr *relsec = sechdrs + relindex; | ||
69 | Elf32_Shdr *dstsec = sechdrs + relsec->sh_info; | ||
70 | Elf32_Rel *rel = (void *)relsec->sh_addr; | ||
71 | unsigned int i; | ||
72 | |||
73 | for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) { | ||
74 | unsigned long loc; | ||
75 | Elf32_Sym *sym; | ||
76 | s32 offset; | ||
77 | |||
78 | offset = ELF32_R_SYM(rel->r_info); | ||
79 | if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) { | ||
80 | printk(KERN_ERR "%s: bad relocation, section %d reloc %d\n", | ||
81 | module->name, relindex, i); | ||
82 | return -ENOEXEC; | ||
83 | } | ||
84 | |||
85 | sym = ((Elf32_Sym *)symsec->sh_addr) + offset; | ||
86 | |||
87 | if (rel->r_offset < 0 || rel->r_offset > dstsec->sh_size - sizeof(u32)) { | ||
88 | printk(KERN_ERR "%s: out of bounds relocation, " | ||
89 | "section %d reloc %d offset %d size %d\n", | ||
90 | module->name, relindex, i, rel->r_offset, | ||
91 | dstsec->sh_size); | ||
92 | return -ENOEXEC; | ||
93 | } | ||
94 | |||
95 | loc = dstsec->sh_addr + rel->r_offset; | ||
96 | |||
97 | switch (ELF32_R_TYPE(rel->r_info)) { | ||
98 | case R_ARM_ABS32: | ||
99 | *(u32 *)loc += sym->st_value; | ||
100 | break; | ||
101 | |||
102 | case R_ARM_PC24: | ||
103 | offset = (*(u32 *)loc & 0x00ffffff) << 2; | ||
104 | if (offset & 0x02000000) | ||
105 | offset -= 0x04000000; | ||
106 | |||
107 | offset += sym->st_value - loc; | ||
108 | if (offset & 3 || | ||
109 | offset <= (s32)0xfc000000 || | ||
110 | offset >= (s32)0x04000000) { | ||
111 | printk(KERN_ERR | ||
112 | "%s: relocation out of range, section " | ||
113 | "%d reloc %d sym '%s'\n", module->name, | ||
114 | relindex, i, strtab + sym->st_name); | ||
115 | return -ENOEXEC; | ||
116 | } | ||
117 | |||
118 | offset >>= 2; | ||
119 | |||
120 | *(u32 *)loc &= 0xff000000; | ||
121 | *(u32 *)loc |= offset & 0x00ffffff; | ||
122 | break; | ||
123 | |||
124 | default: | ||
125 | printk(KERN_ERR "%s: unknown relocation: %u\n", | ||
126 | module->name, ELF32_R_TYPE(rel->r_info)); | ||
127 | return -ENOEXEC; | ||
128 | } | ||
129 | } | ||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | int | ||
134 | apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, | ||
135 | unsigned int symindex, unsigned int relsec, struct module *module) | ||
136 | { | ||
137 | printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n", | ||
138 | module->name); | ||
139 | return -ENOEXEC; | ||
140 | } | ||
141 | |||
142 | int | ||
143 | module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, | ||
144 | struct module *module) | ||
145 | { | ||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | void | ||
150 | module_arch_cleanup(struct module *mod) | ||
151 | { | ||
152 | } | ||
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c new file mode 100644 index 000000000000..dbd8ca89b385 --- /dev/null +++ b/arch/arm/kernel/process.c | |||
@@ -0,0 +1,460 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/process.c | ||
3 | * | ||
4 | * Copyright (C) 1996-2000 Russell King - Converted to ARM. | ||
5 | * Original Copyright (C) 1995 Linus Torvalds | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #include <stdarg.h> | ||
12 | |||
13 | #include <linux/config.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/mm.h> | ||
18 | #include <linux/stddef.h> | ||
19 | #include <linux/unistd.h> | ||
20 | #include <linux/ptrace.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/user.h> | ||
23 | #include <linux/a.out.h> | ||
24 | #include <linux/delay.h> | ||
25 | #include <linux/reboot.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | #include <linux/kallsyms.h> | ||
28 | #include <linux/init.h> | ||
29 | |||
30 | #include <asm/system.h> | ||
31 | #include <asm/io.h> | ||
32 | #include <asm/leds.h> | ||
33 | #include <asm/processor.h> | ||
34 | #include <asm/uaccess.h> | ||
35 | |||
36 | extern const char *processor_modes[]; | ||
37 | extern void setup_mm_for_reboot(char mode); | ||
38 | |||
39 | static volatile int hlt_counter; | ||
40 | |||
41 | #include <asm/arch/system.h> | ||
42 | |||
43 | void disable_hlt(void) | ||
44 | { | ||
45 | hlt_counter++; | ||
46 | } | ||
47 | |||
48 | EXPORT_SYMBOL(disable_hlt); | ||
49 | |||
50 | void enable_hlt(void) | ||
51 | { | ||
52 | hlt_counter--; | ||
53 | } | ||
54 | |||
55 | EXPORT_SYMBOL(enable_hlt); | ||
56 | |||
57 | static int __init nohlt_setup(char *__unused) | ||
58 | { | ||
59 | hlt_counter = 1; | ||
60 | return 1; | ||
61 | } | ||
62 | |||
63 | static int __init hlt_setup(char *__unused) | ||
64 | { | ||
65 | hlt_counter = 0; | ||
66 | return 1; | ||
67 | } | ||
68 | |||
69 | __setup("nohlt", nohlt_setup); | ||
70 | __setup("hlt", hlt_setup); | ||
71 | |||
72 | /* | ||
73 | * The following aren't currently used. | ||
74 | */ | ||
75 | void (*pm_idle)(void); | ||
76 | EXPORT_SYMBOL(pm_idle); | ||
77 | |||
78 | void (*pm_power_off)(void); | ||
79 | EXPORT_SYMBOL(pm_power_off); | ||
80 | |||
81 | /* | ||
82 | * This is our default idle handler. We need to disable | ||
83 | * interrupts here to ensure we don't miss a wakeup call. | ||
84 | */ | ||
85 | void default_idle(void) | ||
86 | { | ||
87 | local_irq_disable(); | ||
88 | if (!need_resched() && !hlt_counter) | ||
89 | arch_idle(); | ||
90 | local_irq_enable(); | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * The idle thread. We try to conserve power, while trying to keep | ||
95 | * overall latency low. The architecture specific idle is passed | ||
96 | * a value to indicate the level of "idleness" of the system. | ||
97 | */ | ||
98 | void cpu_idle(void) | ||
99 | { | ||
100 | local_fiq_enable(); | ||
101 | |||
102 | /* endless idle loop with no priority at all */ | ||
103 | while (1) { | ||
104 | void (*idle)(void) = pm_idle; | ||
105 | if (!idle) | ||
106 | idle = default_idle; | ||
107 | preempt_disable(); | ||
108 | leds_event(led_idle_start); | ||
109 | while (!need_resched()) | ||
110 | idle(); | ||
111 | leds_event(led_idle_end); | ||
112 | preempt_enable(); | ||
113 | schedule(); | ||
114 | } | ||
115 | } | ||
116 | |||
117 | static char reboot_mode = 'h'; | ||
118 | |||
119 | int __init reboot_setup(char *str) | ||
120 | { | ||
121 | reboot_mode = str[0]; | ||
122 | return 1; | ||
123 | } | ||
124 | |||
125 | __setup("reboot=", reboot_setup); | ||
126 | |||
127 | void machine_halt(void) | ||
128 | { | ||
129 | } | ||
130 | |||
131 | EXPORT_SYMBOL(machine_halt); | ||
132 | |||
133 | void machine_power_off(void) | ||
134 | { | ||
135 | if (pm_power_off) | ||
136 | pm_power_off(); | ||
137 | } | ||
138 | |||
139 | EXPORT_SYMBOL(machine_power_off); | ||
140 | |||
141 | void machine_restart(char * __unused) | ||
142 | { | ||
143 | /* | ||
144 | * Clean and disable cache, and turn off interrupts | ||
145 | */ | ||
146 | cpu_proc_fin(); | ||
147 | |||
148 | /* | ||
149 | * Tell the mm system that we are going to reboot - | ||
150 | * we may need it to insert some 1:1 mappings so that | ||
151 | * soft boot works. | ||
152 | */ | ||
153 | setup_mm_for_reboot(reboot_mode); | ||
154 | |||
155 | /* | ||
156 | * Now call the architecture specific reboot code. | ||
157 | */ | ||
158 | arch_reset(reboot_mode); | ||
159 | |||
160 | /* | ||
161 | * Whoops - the architecture was unable to reboot. | ||
162 | * Tell the user! | ||
163 | */ | ||
164 | mdelay(1000); | ||
165 | printk("Reboot failed -- System halted\n"); | ||
166 | while (1); | ||
167 | } | ||
168 | |||
169 | EXPORT_SYMBOL(machine_restart); | ||
170 | |||
171 | void show_regs(struct pt_regs * regs) | ||
172 | { | ||
173 | unsigned long flags; | ||
174 | |||
175 | flags = condition_codes(regs); | ||
176 | |||
177 | print_symbol("PC is at %s\n", instruction_pointer(regs)); | ||
178 | print_symbol("LR is at %s\n", regs->ARM_lr); | ||
179 | printk("pc : [<%08lx>] lr : [<%08lx>] %s\n" | ||
180 | "sp : %08lx ip : %08lx fp : %08lx\n", | ||
181 | instruction_pointer(regs), | ||
182 | regs->ARM_lr, print_tainted(), regs->ARM_sp, | ||
183 | regs->ARM_ip, regs->ARM_fp); | ||
184 | printk("r10: %08lx r9 : %08lx r8 : %08lx\n", | ||
185 | regs->ARM_r10, regs->ARM_r9, | ||
186 | regs->ARM_r8); | ||
187 | printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", | ||
188 | regs->ARM_r7, regs->ARM_r6, | ||
189 | regs->ARM_r5, regs->ARM_r4); | ||
190 | printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", | ||
191 | regs->ARM_r3, regs->ARM_r2, | ||
192 | regs->ARM_r1, regs->ARM_r0); | ||
193 | printk("Flags: %c%c%c%c", | ||
194 | flags & PSR_N_BIT ? 'N' : 'n', | ||
195 | flags & PSR_Z_BIT ? 'Z' : 'z', | ||
196 | flags & PSR_C_BIT ? 'C' : 'c', | ||
197 | flags & PSR_V_BIT ? 'V' : 'v'); | ||
198 | printk(" IRQs o%s FIQs o%s Mode %s%s Segment %s\n", | ||
199 | interrupts_enabled(regs) ? "n" : "ff", | ||
200 | fast_interrupts_enabled(regs) ? "n" : "ff", | ||
201 | processor_modes[processor_mode(regs)], | ||
202 | thumb_mode(regs) ? " (T)" : "", | ||
203 | get_fs() == get_ds() ? "kernel" : "user"); | ||
204 | { | ||
205 | unsigned int ctrl, transbase, dac; | ||
206 | __asm__ ( | ||
207 | " mrc p15, 0, %0, c1, c0\n" | ||
208 | " mrc p15, 0, %1, c2, c0\n" | ||
209 | " mrc p15, 0, %2, c3, c0\n" | ||
210 | : "=r" (ctrl), "=r" (transbase), "=r" (dac)); | ||
211 | printk("Control: %04X Table: %08X DAC: %08X\n", | ||
212 | ctrl, transbase, dac); | ||
213 | } | ||
214 | } | ||
215 | |||
216 | void show_fpregs(struct user_fp *regs) | ||
217 | { | ||
218 | int i; | ||
219 | |||
220 | for (i = 0; i < 8; i++) { | ||
221 | unsigned long *p; | ||
222 | char type; | ||
223 | |||
224 | p = (unsigned long *)(regs->fpregs + i); | ||
225 | |||
226 | switch (regs->ftype[i]) { | ||
227 | case 1: type = 'f'; break; | ||
228 | case 2: type = 'd'; break; | ||
229 | case 3: type = 'e'; break; | ||
230 | default: type = '?'; break; | ||
231 | } | ||
232 | if (regs->init_flag) | ||
233 | type = '?'; | ||
234 | |||
235 | printk(" f%d(%c): %08lx %08lx %08lx%c", | ||
236 | i, type, p[0], p[1], p[2], i & 1 ? '\n' : ' '); | ||
237 | } | ||
238 | |||
239 | |||
240 | printk("FPSR: %08lx FPCR: %08lx\n", | ||
241 | (unsigned long)regs->fpsr, | ||
242 | (unsigned long)regs->fpcr); | ||
243 | } | ||
244 | |||
245 | /* | ||
246 | * Task structure and kernel stack allocation. | ||
247 | */ | ||
248 | static unsigned long *thread_info_head; | ||
249 | static unsigned int nr_thread_info; | ||
250 | |||
251 | #define EXTRA_TASK_STRUCT 4 | ||
252 | #define ll_alloc_task_struct() ((struct thread_info *) __get_free_pages(GFP_KERNEL,1)) | ||
253 | #define ll_free_task_struct(p) free_pages((unsigned long)(p),1) | ||
254 | |||
255 | struct thread_info *alloc_thread_info(struct task_struct *task) | ||
256 | { | ||
257 | struct thread_info *thread = NULL; | ||
258 | |||
259 | if (EXTRA_TASK_STRUCT) { | ||
260 | unsigned long *p = thread_info_head; | ||
261 | |||
262 | if (p) { | ||
263 | thread_info_head = (unsigned long *)p[0]; | ||
264 | nr_thread_info -= 1; | ||
265 | } | ||
266 | thread = (struct thread_info *)p; | ||
267 | } | ||
268 | |||
269 | if (!thread) | ||
270 | thread = ll_alloc_task_struct(); | ||
271 | |||
272 | #ifdef CONFIG_MAGIC_SYSRQ | ||
273 | /* | ||
274 | * The stack must be cleared if you want SYSRQ-T to | ||
275 | * give sensible stack usage information | ||
276 | */ | ||
277 | if (thread) { | ||
278 | char *p = (char *)thread; | ||
279 | memzero(p+KERNEL_STACK_SIZE, KERNEL_STACK_SIZE); | ||
280 | } | ||
281 | #endif | ||
282 | return thread; | ||
283 | } | ||
284 | |||
285 | void free_thread_info(struct thread_info *thread) | ||
286 | { | ||
287 | if (EXTRA_TASK_STRUCT && nr_thread_info < EXTRA_TASK_STRUCT) { | ||
288 | unsigned long *p = (unsigned long *)thread; | ||
289 | p[0] = (unsigned long)thread_info_head; | ||
290 | thread_info_head = p; | ||
291 | nr_thread_info += 1; | ||
292 | } else | ||
293 | ll_free_task_struct(thread); | ||
294 | } | ||
295 | |||
296 | /* | ||
297 | * Free current thread data structures etc.. | ||
298 | */ | ||
299 | void exit_thread(void) | ||
300 | { | ||
301 | } | ||
302 | |||
303 | static void default_fp_init(union fp_state *fp) | ||
304 | { | ||
305 | memset(fp, 0, sizeof(union fp_state)); | ||
306 | } | ||
307 | |||
308 | void (*fp_init)(union fp_state *) = default_fp_init; | ||
309 | EXPORT_SYMBOL(fp_init); | ||
310 | |||
311 | void flush_thread(void) | ||
312 | { | ||
313 | struct thread_info *thread = current_thread_info(); | ||
314 | struct task_struct *tsk = current; | ||
315 | |||
316 | memset(thread->used_cp, 0, sizeof(thread->used_cp)); | ||
317 | memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); | ||
318 | #if defined(CONFIG_IWMMXT) | ||
319 | iwmmxt_task_release(thread); | ||
320 | #endif | ||
321 | fp_init(&thread->fpstate); | ||
322 | #if defined(CONFIG_VFP) | ||
323 | vfp_flush_thread(&thread->vfpstate); | ||
324 | #endif | ||
325 | } | ||
326 | |||
327 | void release_thread(struct task_struct *dead_task) | ||
328 | { | ||
329 | #if defined(CONFIG_VFP) | ||
330 | vfp_release_thread(&dead_task->thread_info->vfpstate); | ||
331 | #endif | ||
332 | #if defined(CONFIG_IWMMXT) | ||
333 | iwmmxt_task_release(dead_task->thread_info); | ||
334 | #endif | ||
335 | } | ||
336 | |||
337 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); | ||
338 | |||
339 | int | ||
340 | copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start, | ||
341 | unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs) | ||
342 | { | ||
343 | struct thread_info *thread = p->thread_info; | ||
344 | struct pt_regs *childregs; | ||
345 | |||
346 | childregs = ((struct pt_regs *)((unsigned long)thread + THREAD_SIZE - 8)) - 1; | ||
347 | *childregs = *regs; | ||
348 | childregs->ARM_r0 = 0; | ||
349 | childregs->ARM_sp = stack_start; | ||
350 | |||
351 | memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); | ||
352 | thread->cpu_context.sp = (unsigned long)childregs; | ||
353 | thread->cpu_context.pc = (unsigned long)ret_from_fork; | ||
354 | |||
355 | if (clone_flags & CLONE_SETTLS) | ||
356 | thread->tp_value = regs->ARM_r3; | ||
357 | |||
358 | return 0; | ||
359 | } | ||
360 | |||
361 | /* | ||
362 | * fill in the fpe structure for a core dump... | ||
363 | */ | ||
364 | int dump_fpu (struct pt_regs *regs, struct user_fp *fp) | ||
365 | { | ||
366 | struct thread_info *thread = current_thread_info(); | ||
367 | int used_math = thread->used_cp[1] | thread->used_cp[2]; | ||
368 | |||
369 | if (used_math) | ||
370 | memcpy(fp, &thread->fpstate.soft, sizeof (*fp)); | ||
371 | |||
372 | return used_math != 0; | ||
373 | } | ||
374 | EXPORT_SYMBOL(dump_fpu); | ||
375 | |||
376 | /* | ||
377 | * fill in the user structure for a core dump.. | ||
378 | */ | ||
379 | void dump_thread(struct pt_regs * regs, struct user * dump) | ||
380 | { | ||
381 | struct task_struct *tsk = current; | ||
382 | |||
383 | dump->magic = CMAGIC; | ||
384 | dump->start_code = tsk->mm->start_code; | ||
385 | dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1); | ||
386 | |||
387 | dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT; | ||
388 | dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
389 | dump->u_ssize = 0; | ||
390 | |||
391 | dump->u_debugreg[0] = tsk->thread.debug.bp[0].address; | ||
392 | dump->u_debugreg[1] = tsk->thread.debug.bp[1].address; | ||
393 | dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn.arm; | ||
394 | dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn.arm; | ||
395 | dump->u_debugreg[4] = tsk->thread.debug.nsaved; | ||
396 | |||
397 | if (dump->start_stack < 0x04000000) | ||
398 | dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT; | ||
399 | |||
400 | dump->regs = *regs; | ||
401 | dump->u_fpvalid = dump_fpu (regs, &dump->u_fp); | ||
402 | } | ||
403 | EXPORT_SYMBOL(dump_thread); | ||
404 | |||
405 | /* | ||
406 | * Shuffle the argument into the correct register before calling the | ||
407 | * thread function. r1 is the thread argument, r2 is the pointer to | ||
408 | * the thread function, and r3 points to the exit function. | ||
409 | */ | ||
410 | extern void kernel_thread_helper(void); | ||
411 | asm( ".section .text\n" | ||
412 | " .align\n" | ||
413 | " .type kernel_thread_helper, #function\n" | ||
414 | "kernel_thread_helper:\n" | ||
415 | " mov r0, r1\n" | ||
416 | " mov lr, r3\n" | ||
417 | " mov pc, r2\n" | ||
418 | " .size kernel_thread_helper, . - kernel_thread_helper\n" | ||
419 | " .previous"); | ||
420 | |||
421 | /* | ||
422 | * Create a kernel thread. | ||
423 | */ | ||
424 | pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) | ||
425 | { | ||
426 | struct pt_regs regs; | ||
427 | |||
428 | memset(®s, 0, sizeof(regs)); | ||
429 | |||
430 | regs.ARM_r1 = (unsigned long)arg; | ||
431 | regs.ARM_r2 = (unsigned long)fn; | ||
432 | regs.ARM_r3 = (unsigned long)do_exit; | ||
433 | regs.ARM_pc = (unsigned long)kernel_thread_helper; | ||
434 | regs.ARM_cpsr = SVC_MODE; | ||
435 | |||
436 | return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); | ||
437 | } | ||
438 | EXPORT_SYMBOL(kernel_thread); | ||
439 | |||
440 | unsigned long get_wchan(struct task_struct *p) | ||
441 | { | ||
442 | unsigned long fp, lr; | ||
443 | unsigned long stack_page; | ||
444 | int count = 0; | ||
445 | if (!p || p == current || p->state == TASK_RUNNING) | ||
446 | return 0; | ||
447 | |||
448 | stack_page = 4096 + (unsigned long)p->thread_info; | ||
449 | fp = thread_saved_fp(p); | ||
450 | do { | ||
451 | if (fp < stack_page || fp > 4092+stack_page) | ||
452 | return 0; | ||
453 | lr = pc_pointer (((unsigned long *)fp)[-1]); | ||
454 | if (!in_sched_functions(lr)) | ||
455 | return lr; | ||
456 | fp = *(unsigned long *) (fp - 12); | ||
457 | } while (count ++ < 16); | ||
458 | return 0; | ||
459 | } | ||
460 | EXPORT_SYMBOL(get_wchan); | ||
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c new file mode 100644 index 000000000000..efd7a341614b --- /dev/null +++ b/arch/arm/kernel/ptrace.c | |||
@@ -0,0 +1,861 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/ptrace.c | ||
3 | * | ||
4 | * By Ross Biro 1/23/92 | ||
5 | * edited by Linus Torvalds | ||
6 | * ARM modifications Copyright (C) 2000 Russell King | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | #include <linux/config.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/smp.h> | ||
17 | #include <linux/smp_lock.h> | ||
18 | #include <linux/ptrace.h> | ||
19 | #include <linux/user.h> | ||
20 | #include <linux/security.h> | ||
21 | #include <linux/init.h> | ||
22 | |||
23 | #include <asm/uaccess.h> | ||
24 | #include <asm/pgtable.h> | ||
25 | #include <asm/system.h> | ||
26 | #include <asm/traps.h> | ||
27 | |||
28 | #include "ptrace.h" | ||
29 | |||
30 | #define REG_PC 15 | ||
31 | #define REG_PSR 16 | ||
32 | /* | ||
33 | * does not yet catch signals sent when the child dies. | ||
34 | * in exit.c or in signal.c. | ||
35 | */ | ||
36 | |||
37 | #if 0 | ||
38 | /* | ||
39 | * Breakpoint SWI instruction: SWI &9F0001 | ||
40 | */ | ||
41 | #define BREAKINST_ARM 0xef9f0001 | ||
42 | #define BREAKINST_THUMB 0xdf00 /* fill this in later */ | ||
43 | #else | ||
44 | /* | ||
45 | * New breakpoints - use an undefined instruction. The ARM architecture | ||
46 | * reference manual guarantees that the following instruction space | ||
47 | * will produce an undefined instruction exception on all CPUs: | ||
48 | * | ||
49 | * ARM: xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx | ||
50 | * Thumb: 1101 1110 xxxx xxxx | ||
51 | */ | ||
52 | #define BREAKINST_ARM 0xe7f001f0 | ||
53 | #define BREAKINST_THUMB 0xde01 | ||
54 | #endif | ||
55 | |||
56 | /* | ||
57 | * Get the address of the live pt_regs for the specified task. | ||
58 | * These are saved onto the top kernel stack when the process | ||
59 | * is not running. | ||
60 | * | ||
61 | * Note: if a user thread is execve'd from kernel space, the | ||
62 | * kernel stack will not be empty on entry to the kernel, so | ||
63 | * ptracing these tasks will fail. | ||
64 | */ | ||
65 | static inline struct pt_regs * | ||
66 | get_user_regs(struct task_struct *task) | ||
67 | { | ||
68 | return (struct pt_regs *) | ||
69 | ((unsigned long)task->thread_info + THREAD_SIZE - | ||
70 | 8 - sizeof(struct pt_regs)); | ||
71 | } | ||
72 | |||
73 | /* | ||
74 | * this routine will get a word off of the processes privileged stack. | ||
75 | * the offset is how far from the base addr as stored in the THREAD. | ||
76 | * this routine assumes that all the privileged stacks are in our | ||
77 | * data space. | ||
78 | */ | ||
79 | static inline long get_user_reg(struct task_struct *task, int offset) | ||
80 | { | ||
81 | return get_user_regs(task)->uregs[offset]; | ||
82 | } | ||
83 | |||
84 | /* | ||
85 | * this routine will put a word on the processes privileged stack. | ||
86 | * the offset is how far from the base addr as stored in the THREAD. | ||
87 | * this routine assumes that all the privileged stacks are in our | ||
88 | * data space. | ||
89 | */ | ||
90 | static inline int | ||
91 | put_user_reg(struct task_struct *task, int offset, long data) | ||
92 | { | ||
93 | struct pt_regs newregs, *regs = get_user_regs(task); | ||
94 | int ret = -EINVAL; | ||
95 | |||
96 | newregs = *regs; | ||
97 | newregs.uregs[offset] = data; | ||
98 | |||
99 | if (valid_user_regs(&newregs)) { | ||
100 | regs->uregs[offset] = data; | ||
101 | ret = 0; | ||
102 | } | ||
103 | |||
104 | return ret; | ||
105 | } | ||
106 | |||
107 | static inline int | ||
108 | read_u32(struct task_struct *task, unsigned long addr, u32 *res) | ||
109 | { | ||
110 | int ret; | ||
111 | |||
112 | ret = access_process_vm(task, addr, res, sizeof(*res), 0); | ||
113 | |||
114 | return ret == sizeof(*res) ? 0 : -EIO; | ||
115 | } | ||
116 | |||
117 | static inline int | ||
118 | read_instr(struct task_struct *task, unsigned long addr, u32 *res) | ||
119 | { | ||
120 | int ret; | ||
121 | |||
122 | if (addr & 1) { | ||
123 | u16 val; | ||
124 | ret = access_process_vm(task, addr & ~1, &val, sizeof(val), 0); | ||
125 | ret = ret == sizeof(val) ? 0 : -EIO; | ||
126 | *res = val; | ||
127 | } else { | ||
128 | u32 val; | ||
129 | ret = access_process_vm(task, addr & ~3, &val, sizeof(val), 0); | ||
130 | ret = ret == sizeof(val) ? 0 : -EIO; | ||
131 | *res = val; | ||
132 | } | ||
133 | return ret; | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * Get value of register `rn' (in the instruction) | ||
138 | */ | ||
139 | static unsigned long | ||
140 | ptrace_getrn(struct task_struct *child, unsigned long insn) | ||
141 | { | ||
142 | unsigned int reg = (insn >> 16) & 15; | ||
143 | unsigned long val; | ||
144 | |||
145 | val = get_user_reg(child, reg); | ||
146 | if (reg == 15) | ||
147 | val = pc_pointer(val + 8); | ||
148 | |||
149 | return val; | ||
150 | } | ||
151 | |||
152 | /* | ||
153 | * Get value of operand 2 (in an ALU instruction) | ||
154 | */ | ||
155 | static unsigned long | ||
156 | ptrace_getaluop2(struct task_struct *child, unsigned long insn) | ||
157 | { | ||
158 | unsigned long val; | ||
159 | int shift; | ||
160 | int type; | ||
161 | |||
162 | if (insn & 1 << 25) { | ||
163 | val = insn & 255; | ||
164 | shift = (insn >> 8) & 15; | ||
165 | type = 3; | ||
166 | } else { | ||
167 | val = get_user_reg (child, insn & 15); | ||
168 | |||
169 | if (insn & (1 << 4)) | ||
170 | shift = (int)get_user_reg (child, (insn >> 8) & 15); | ||
171 | else | ||
172 | shift = (insn >> 7) & 31; | ||
173 | |||
174 | type = (insn >> 5) & 3; | ||
175 | } | ||
176 | |||
177 | switch (type) { | ||
178 | case 0: val <<= shift; break; | ||
179 | case 1: val >>= shift; break; | ||
180 | case 2: | ||
181 | val = (((signed long)val) >> shift); | ||
182 | break; | ||
183 | case 3: | ||
184 | val = (val >> shift) | (val << (32 - shift)); | ||
185 | break; | ||
186 | } | ||
187 | return val; | ||
188 | } | ||
189 | |||
190 | /* | ||
191 | * Get value of operand 2 (in a LDR instruction) | ||
192 | */ | ||
193 | static unsigned long | ||
194 | ptrace_getldrop2(struct task_struct *child, unsigned long insn) | ||
195 | { | ||
196 | unsigned long val; | ||
197 | int shift; | ||
198 | int type; | ||
199 | |||
200 | val = get_user_reg(child, insn & 15); | ||
201 | shift = (insn >> 7) & 31; | ||
202 | type = (insn >> 5) & 3; | ||
203 | |||
204 | switch (type) { | ||
205 | case 0: val <<= shift; break; | ||
206 | case 1: val >>= shift; break; | ||
207 | case 2: | ||
208 | val = (((signed long)val) >> shift); | ||
209 | break; | ||
210 | case 3: | ||
211 | val = (val >> shift) | (val << (32 - shift)); | ||
212 | break; | ||
213 | } | ||
214 | return val; | ||
215 | } | ||
216 | |||
217 | #define OP_MASK 0x01e00000 | ||
218 | #define OP_AND 0x00000000 | ||
219 | #define OP_EOR 0x00200000 | ||
220 | #define OP_SUB 0x00400000 | ||
221 | #define OP_RSB 0x00600000 | ||
222 | #define OP_ADD 0x00800000 | ||
223 | #define OP_ADC 0x00a00000 | ||
224 | #define OP_SBC 0x00c00000 | ||
225 | #define OP_RSC 0x00e00000 | ||
226 | #define OP_ORR 0x01800000 | ||
227 | #define OP_MOV 0x01a00000 | ||
228 | #define OP_BIC 0x01c00000 | ||
229 | #define OP_MVN 0x01e00000 | ||
230 | |||
231 | static unsigned long | ||
232 | get_branch_address(struct task_struct *child, unsigned long pc, unsigned long insn) | ||
233 | { | ||
234 | u32 alt = 0; | ||
235 | |||
236 | switch (insn & 0x0e000000) { | ||
237 | case 0x00000000: | ||
238 | case 0x02000000: { | ||
239 | /* | ||
240 | * data processing | ||
241 | */ | ||
242 | long aluop1, aluop2, ccbit; | ||
243 | |||
244 | if ((insn & 0xf000) != 0xf000) | ||
245 | break; | ||
246 | |||
247 | aluop1 = ptrace_getrn(child, insn); | ||
248 | aluop2 = ptrace_getaluop2(child, insn); | ||
249 | ccbit = get_user_reg(child, REG_PSR) & PSR_C_BIT ? 1 : 0; | ||
250 | |||
251 | switch (insn & OP_MASK) { | ||
252 | case OP_AND: alt = aluop1 & aluop2; break; | ||
253 | case OP_EOR: alt = aluop1 ^ aluop2; break; | ||
254 | case OP_SUB: alt = aluop1 - aluop2; break; | ||
255 | case OP_RSB: alt = aluop2 - aluop1; break; | ||
256 | case OP_ADD: alt = aluop1 + aluop2; break; | ||
257 | case OP_ADC: alt = aluop1 + aluop2 + ccbit; break; | ||
258 | case OP_SBC: alt = aluop1 - aluop2 + ccbit; break; | ||
259 | case OP_RSC: alt = aluop2 - aluop1 + ccbit; break; | ||
260 | case OP_ORR: alt = aluop1 | aluop2; break; | ||
261 | case OP_MOV: alt = aluop2; break; | ||
262 | case OP_BIC: alt = aluop1 & ~aluop2; break; | ||
263 | case OP_MVN: alt = ~aluop2; break; | ||
264 | } | ||
265 | break; | ||
266 | } | ||
267 | |||
268 | case 0x04000000: | ||
269 | case 0x06000000: | ||
270 | /* | ||
271 | * ldr | ||
272 | */ | ||
273 | if ((insn & 0x0010f000) == 0x0010f000) { | ||
274 | unsigned long base; | ||
275 | |||
276 | base = ptrace_getrn(child, insn); | ||
277 | if (insn & 1 << 24) { | ||
278 | long aluop2; | ||
279 | |||
280 | if (insn & 0x02000000) | ||
281 | aluop2 = ptrace_getldrop2(child, insn); | ||
282 | else | ||
283 | aluop2 = insn & 0xfff; | ||
284 | |||
285 | if (insn & 1 << 23) | ||
286 | base += aluop2; | ||
287 | else | ||
288 | base -= aluop2; | ||
289 | } | ||
290 | if (read_u32(child, base, &alt) == 0) | ||
291 | alt = pc_pointer(alt); | ||
292 | } | ||
293 | break; | ||
294 | |||
295 | case 0x08000000: | ||
296 | /* | ||
297 | * ldm | ||
298 | */ | ||
299 | if ((insn & 0x00108000) == 0x00108000) { | ||
300 | unsigned long base; | ||
301 | unsigned int nr_regs; | ||
302 | |||
303 | if (insn & (1 << 23)) { | ||
304 | nr_regs = hweight16(insn & 65535) << 2; | ||
305 | |||
306 | if (!(insn & (1 << 24))) | ||
307 | nr_regs -= 4; | ||
308 | } else { | ||
309 | if (insn & (1 << 24)) | ||
310 | nr_regs = -4; | ||
311 | else | ||
312 | nr_regs = 0; | ||
313 | } | ||
314 | |||
315 | base = ptrace_getrn(child, insn); | ||
316 | |||
317 | if (read_u32(child, base + nr_regs, &alt) == 0) | ||
318 | alt = pc_pointer(alt); | ||
319 | break; | ||
320 | } | ||
321 | break; | ||
322 | |||
323 | case 0x0a000000: { | ||
324 | /* | ||
325 | * bl or b | ||
326 | */ | ||
327 | signed long displ; | ||
328 | /* It's a branch/branch link: instead of trying to | ||
329 | * figure out whether the branch will be taken or not, | ||
330 | * we'll put a breakpoint at both locations. This is | ||
331 | * simpler, more reliable, and probably not a whole lot | ||
332 | * slower than the alternative approach of emulating the | ||
333 | * branch. | ||
334 | */ | ||
335 | displ = (insn & 0x00ffffff) << 8; | ||
336 | displ = (displ >> 6) + 8; | ||
337 | if (displ != 0 && displ != 4) | ||
338 | alt = pc + displ; | ||
339 | } | ||
340 | break; | ||
341 | } | ||
342 | |||
343 | return alt; | ||
344 | } | ||
345 | |||
346 | static int | ||
347 | swap_insn(struct task_struct *task, unsigned long addr, | ||
348 | void *old_insn, void *new_insn, int size) | ||
349 | { | ||
350 | int ret; | ||
351 | |||
352 | ret = access_process_vm(task, addr, old_insn, size, 0); | ||
353 | if (ret == size) | ||
354 | ret = access_process_vm(task, addr, new_insn, size, 1); | ||
355 | return ret; | ||
356 | } | ||
357 | |||
358 | static void | ||
359 | add_breakpoint(struct task_struct *task, struct debug_info *dbg, unsigned long addr) | ||
360 | { | ||
361 | int nr = dbg->nsaved; | ||
362 | |||
363 | if (nr < 2) { | ||
364 | u32 new_insn = BREAKINST_ARM; | ||
365 | int res; | ||
366 | |||
367 | res = swap_insn(task, addr, &dbg->bp[nr].insn, &new_insn, 4); | ||
368 | |||
369 | if (res == 4) { | ||
370 | dbg->bp[nr].address = addr; | ||
371 | dbg->nsaved += 1; | ||
372 | } | ||
373 | } else | ||
374 | printk(KERN_ERR "ptrace: too many breakpoints\n"); | ||
375 | } | ||
376 | |||
377 | /* | ||
378 | * Clear one breakpoint in the user program. We copy what the hardware | ||
379 | * does and use bit 0 of the address to indicate whether this is a Thumb | ||
380 | * breakpoint or an ARM breakpoint. | ||
381 | */ | ||
382 | static void clear_breakpoint(struct task_struct *task, struct debug_entry *bp) | ||
383 | { | ||
384 | unsigned long addr = bp->address; | ||
385 | union debug_insn old_insn; | ||
386 | int ret; | ||
387 | |||
388 | if (addr & 1) { | ||
389 | ret = swap_insn(task, addr & ~1, &old_insn.thumb, | ||
390 | &bp->insn.thumb, 2); | ||
391 | |||
392 | if (ret != 2 || old_insn.thumb != BREAKINST_THUMB) | ||
393 | printk(KERN_ERR "%s:%d: corrupted Thumb breakpoint at " | ||
394 | "0x%08lx (0x%04x)\n", task->comm, task->pid, | ||
395 | addr, old_insn.thumb); | ||
396 | } else { | ||
397 | ret = swap_insn(task, addr & ~3, &old_insn.arm, | ||
398 | &bp->insn.arm, 4); | ||
399 | |||
400 | if (ret != 4 || old_insn.arm != BREAKINST_ARM) | ||
401 | printk(KERN_ERR "%s:%d: corrupted ARM breakpoint at " | ||
402 | "0x%08lx (0x%08x)\n", task->comm, task->pid, | ||
403 | addr, old_insn.arm); | ||
404 | } | ||
405 | } | ||
406 | |||
407 | void ptrace_set_bpt(struct task_struct *child) | ||
408 | { | ||
409 | struct pt_regs *regs; | ||
410 | unsigned long pc; | ||
411 | u32 insn; | ||
412 | int res; | ||
413 | |||
414 | regs = get_user_regs(child); | ||
415 | pc = instruction_pointer(regs); | ||
416 | |||
417 | if (thumb_mode(regs)) { | ||
418 | printk(KERN_WARNING "ptrace: can't handle thumb mode\n"); | ||
419 | return; | ||
420 | } | ||
421 | |||
422 | res = read_instr(child, pc, &insn); | ||
423 | if (!res) { | ||
424 | struct debug_info *dbg = &child->thread.debug; | ||
425 | unsigned long alt; | ||
426 | |||
427 | dbg->nsaved = 0; | ||
428 | |||
429 | alt = get_branch_address(child, pc, insn); | ||
430 | if (alt) | ||
431 | add_breakpoint(child, dbg, alt); | ||
432 | |||
433 | /* | ||
434 | * Note that we ignore the result of setting the above | ||
435 | * breakpoint since it may fail. When it does, this is | ||
436 | * not so much an error, but a forewarning that we may | ||
437 | * be receiving a prefetch abort shortly. | ||
438 | * | ||
439 | * If we don't set this breakpoint here, then we can | ||
440 | * lose control of the thread during single stepping. | ||
441 | */ | ||
442 | if (!alt || predicate(insn) != PREDICATE_ALWAYS) | ||
443 | add_breakpoint(child, dbg, pc + 4); | ||
444 | } | ||
445 | } | ||
446 | |||
447 | /* | ||
448 | * Ensure no single-step breakpoint is pending. Returns non-zero | ||
449 | * value if child was being single-stepped. | ||
450 | */ | ||
451 | void ptrace_cancel_bpt(struct task_struct *child) | ||
452 | { | ||
453 | int i, nsaved = child->thread.debug.nsaved; | ||
454 | |||
455 | child->thread.debug.nsaved = 0; | ||
456 | |||
457 | if (nsaved > 2) { | ||
458 | printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved); | ||
459 | nsaved = 2; | ||
460 | } | ||
461 | |||
462 | for (i = 0; i < nsaved; i++) | ||
463 | clear_breakpoint(child, &child->thread.debug.bp[i]); | ||
464 | } | ||
465 | |||
466 | /* | ||
467 | * Called by kernel/ptrace.c when detaching.. | ||
468 | * | ||
469 | * Make sure the single step bit is not set. | ||
470 | */ | ||
471 | void ptrace_disable(struct task_struct *child) | ||
472 | { | ||
473 | child->ptrace &= ~PT_SINGLESTEP; | ||
474 | ptrace_cancel_bpt(child); | ||
475 | } | ||
476 | |||
477 | /* | ||
478 | * Handle hitting a breakpoint. | ||
479 | */ | ||
480 | void ptrace_break(struct task_struct *tsk, struct pt_regs *regs) | ||
481 | { | ||
482 | siginfo_t info; | ||
483 | |||
484 | ptrace_cancel_bpt(tsk); | ||
485 | |||
486 | info.si_signo = SIGTRAP; | ||
487 | info.si_errno = 0; | ||
488 | info.si_code = TRAP_BRKPT; | ||
489 | info.si_addr = (void __user *)instruction_pointer(regs); | ||
490 | |||
491 | force_sig_info(SIGTRAP, &info, tsk); | ||
492 | } | ||
493 | |||
494 | static int break_trap(struct pt_regs *regs, unsigned int instr) | ||
495 | { | ||
496 | ptrace_break(current, regs); | ||
497 | return 0; | ||
498 | } | ||
499 | |||
500 | static struct undef_hook arm_break_hook = { | ||
501 | .instr_mask = 0x0fffffff, | ||
502 | .instr_val = 0x07f001f0, | ||
503 | .cpsr_mask = PSR_T_BIT, | ||
504 | .cpsr_val = 0, | ||
505 | .fn = break_trap, | ||
506 | }; | ||
507 | |||
508 | static struct undef_hook thumb_break_hook = { | ||
509 | .instr_mask = 0xffff, | ||
510 | .instr_val = 0xde01, | ||
511 | .cpsr_mask = PSR_T_BIT, | ||
512 | .cpsr_val = PSR_T_BIT, | ||
513 | .fn = break_trap, | ||
514 | }; | ||
515 | |||
516 | static int __init ptrace_break_init(void) | ||
517 | { | ||
518 | register_undef_hook(&arm_break_hook); | ||
519 | register_undef_hook(&thumb_break_hook); | ||
520 | return 0; | ||
521 | } | ||
522 | |||
523 | core_initcall(ptrace_break_init); | ||
524 | |||
525 | /* | ||
526 | * Read the word at offset "off" into the "struct user". We | ||
527 | * actually access the pt_regs stored on the kernel stack. | ||
528 | */ | ||
529 | static int ptrace_read_user(struct task_struct *tsk, unsigned long off, | ||
530 | unsigned long __user *ret) | ||
531 | { | ||
532 | unsigned long tmp; | ||
533 | |||
534 | if (off & 3 || off >= sizeof(struct user)) | ||
535 | return -EIO; | ||
536 | |||
537 | tmp = 0; | ||
538 | if (off < sizeof(struct pt_regs)) | ||
539 | tmp = get_user_reg(tsk, off >> 2); | ||
540 | |||
541 | return put_user(tmp, ret); | ||
542 | } | ||
543 | |||
544 | /* | ||
545 | * Write the word at offset "off" into "struct user". We | ||
546 | * actually access the pt_regs stored on the kernel stack. | ||
547 | */ | ||
548 | static int ptrace_write_user(struct task_struct *tsk, unsigned long off, | ||
549 | unsigned long val) | ||
550 | { | ||
551 | if (off & 3 || off >= sizeof(struct user)) | ||
552 | return -EIO; | ||
553 | |||
554 | if (off >= sizeof(struct pt_regs)) | ||
555 | return 0; | ||
556 | |||
557 | return put_user_reg(tsk, off >> 2, val); | ||
558 | } | ||
559 | |||
560 | /* | ||
561 | * Get all user integer registers. | ||
562 | */ | ||
563 | static int ptrace_getregs(struct task_struct *tsk, void __user *uregs) | ||
564 | { | ||
565 | struct pt_regs *regs = get_user_regs(tsk); | ||
566 | |||
567 | return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0; | ||
568 | } | ||
569 | |||
570 | /* | ||
571 | * Set all user integer registers. | ||
572 | */ | ||
573 | static int ptrace_setregs(struct task_struct *tsk, void __user *uregs) | ||
574 | { | ||
575 | struct pt_regs newregs; | ||
576 | int ret; | ||
577 | |||
578 | ret = -EFAULT; | ||
579 | if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) { | ||
580 | struct pt_regs *regs = get_user_regs(tsk); | ||
581 | |||
582 | ret = -EINVAL; | ||
583 | if (valid_user_regs(&newregs)) { | ||
584 | *regs = newregs; | ||
585 | ret = 0; | ||
586 | } | ||
587 | } | ||
588 | |||
589 | return ret; | ||
590 | } | ||
591 | |||
592 | /* | ||
593 | * Get the child FPU state. | ||
594 | */ | ||
595 | static int ptrace_getfpregs(struct task_struct *tsk, void __user *ufp) | ||
596 | { | ||
597 | return copy_to_user(ufp, &tsk->thread_info->fpstate, | ||
598 | sizeof(struct user_fp)) ? -EFAULT : 0; | ||
599 | } | ||
600 | |||
601 | /* | ||
602 | * Set the child FPU state. | ||
603 | */ | ||
604 | static int ptrace_setfpregs(struct task_struct *tsk, void __user *ufp) | ||
605 | { | ||
606 | struct thread_info *thread = tsk->thread_info; | ||
607 | thread->used_cp[1] = thread->used_cp[2] = 1; | ||
608 | return copy_from_user(&thread->fpstate, ufp, | ||
609 | sizeof(struct user_fp)) ? -EFAULT : 0; | ||
610 | } | ||
611 | |||
612 | #ifdef CONFIG_IWMMXT | ||
613 | |||
614 | /* | ||
615 | * Get the child iWMMXt state. | ||
616 | */ | ||
617 | static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp) | ||
618 | { | ||
619 | struct thread_info *thread = tsk->thread_info; | ||
620 | void *ptr = &thread->fpstate; | ||
621 | |||
622 | if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT)) | ||
623 | return -ENODATA; | ||
624 | iwmmxt_task_disable(thread); /* force it to ram */ | ||
625 | /* The iWMMXt state is stored doubleword-aligned. */ | ||
626 | if (((long) ptr) & 4) | ||
627 | ptr += 4; | ||
628 | return copy_to_user(ufp, ptr, 0x98) ? -EFAULT : 0; | ||
629 | } | ||
630 | |||
631 | /* | ||
632 | * Set the child iWMMXt state. | ||
633 | */ | ||
634 | static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp) | ||
635 | { | ||
636 | struct thread_info *thread = tsk->thread_info; | ||
637 | void *ptr = &thread->fpstate; | ||
638 | |||
639 | if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT)) | ||
640 | return -EACCES; | ||
641 | iwmmxt_task_release(thread); /* force a reload */ | ||
642 | /* The iWMMXt state is stored doubleword-aligned. */ | ||
643 | if (((long) ptr) & 4) | ||
644 | ptr += 4; | ||
645 | return copy_from_user(ptr, ufp, 0x98) ? -EFAULT : 0; | ||
646 | } | ||
647 | |||
648 | #endif | ||
649 | |||
650 | static int do_ptrace(int request, struct task_struct *child, long addr, long data) | ||
651 | { | ||
652 | unsigned long tmp; | ||
653 | int ret; | ||
654 | |||
655 | switch (request) { | ||
656 | /* | ||
657 | * read word at location "addr" in the child process. | ||
658 | */ | ||
659 | case PTRACE_PEEKTEXT: | ||
660 | case PTRACE_PEEKDATA: | ||
661 | ret = access_process_vm(child, addr, &tmp, | ||
662 | sizeof(unsigned long), 0); | ||
663 | if (ret == sizeof(unsigned long)) | ||
664 | ret = put_user(tmp, (unsigned long __user *) data); | ||
665 | else | ||
666 | ret = -EIO; | ||
667 | break; | ||
668 | |||
669 | case PTRACE_PEEKUSR: | ||
670 | ret = ptrace_read_user(child, addr, (unsigned long __user *)data); | ||
671 | break; | ||
672 | |||
673 | /* | ||
674 | * write the word at location addr. | ||
675 | */ | ||
676 | case PTRACE_POKETEXT: | ||
677 | case PTRACE_POKEDATA: | ||
678 | ret = access_process_vm(child, addr, &data, | ||
679 | sizeof(unsigned long), 1); | ||
680 | if (ret == sizeof(unsigned long)) | ||
681 | ret = 0; | ||
682 | else | ||
683 | ret = -EIO; | ||
684 | break; | ||
685 | |||
686 | case PTRACE_POKEUSR: | ||
687 | ret = ptrace_write_user(child, addr, data); | ||
688 | break; | ||
689 | |||
690 | /* | ||
691 | * continue/restart and stop at next (return from) syscall | ||
692 | */ | ||
693 | case PTRACE_SYSCALL: | ||
694 | case PTRACE_CONT: | ||
695 | ret = -EIO; | ||
696 | if ((unsigned long) data > _NSIG) | ||
697 | break; | ||
698 | if (request == PTRACE_SYSCALL) | ||
699 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
700 | else | ||
701 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
702 | child->exit_code = data; | ||
703 | /* make sure single-step breakpoint is gone. */ | ||
704 | child->ptrace &= ~PT_SINGLESTEP; | ||
705 | ptrace_cancel_bpt(child); | ||
706 | wake_up_process(child); | ||
707 | ret = 0; | ||
708 | break; | ||
709 | |||
710 | /* | ||
711 | * make the child exit. Best I can do is send it a sigkill. | ||
712 | * perhaps it should be put in the status that it wants to | ||
713 | * exit. | ||
714 | */ | ||
715 | case PTRACE_KILL: | ||
716 | /* make sure single-step breakpoint is gone. */ | ||
717 | child->ptrace &= ~PT_SINGLESTEP; | ||
718 | ptrace_cancel_bpt(child); | ||
719 | if (child->exit_state != EXIT_ZOMBIE) { | ||
720 | child->exit_code = SIGKILL; | ||
721 | wake_up_process(child); | ||
722 | } | ||
723 | ret = 0; | ||
724 | break; | ||
725 | |||
726 | /* | ||
727 | * execute single instruction. | ||
728 | */ | ||
729 | case PTRACE_SINGLESTEP: | ||
730 | ret = -EIO; | ||
731 | if ((unsigned long) data > _NSIG) | ||
732 | break; | ||
733 | child->ptrace |= PT_SINGLESTEP; | ||
734 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
735 | child->exit_code = data; | ||
736 | /* give it a chance to run. */ | ||
737 | wake_up_process(child); | ||
738 | ret = 0; | ||
739 | break; | ||
740 | |||
741 | case PTRACE_DETACH: | ||
742 | ret = ptrace_detach(child, data); | ||
743 | break; | ||
744 | |||
745 | case PTRACE_GETREGS: | ||
746 | ret = ptrace_getregs(child, (void __user *)data); | ||
747 | break; | ||
748 | |||
749 | case PTRACE_SETREGS: | ||
750 | ret = ptrace_setregs(child, (void __user *)data); | ||
751 | break; | ||
752 | |||
753 | case PTRACE_GETFPREGS: | ||
754 | ret = ptrace_getfpregs(child, (void __user *)data); | ||
755 | break; | ||
756 | |||
757 | case PTRACE_SETFPREGS: | ||
758 | ret = ptrace_setfpregs(child, (void __user *)data); | ||
759 | break; | ||
760 | |||
761 | #ifdef CONFIG_IWMMXT | ||
762 | case PTRACE_GETWMMXREGS: | ||
763 | ret = ptrace_getwmmxregs(child, (void __user *)data); | ||
764 | break; | ||
765 | |||
766 | case PTRACE_SETWMMXREGS: | ||
767 | ret = ptrace_setwmmxregs(child, (void __user *)data); | ||
768 | break; | ||
769 | #endif | ||
770 | |||
771 | case PTRACE_GET_THREAD_AREA: | ||
772 | ret = put_user(child->thread_info->tp_value, | ||
773 | (unsigned long __user *) data); | ||
774 | break; | ||
775 | |||
776 | default: | ||
777 | ret = ptrace_request(child, request, addr, data); | ||
778 | break; | ||
779 | } | ||
780 | |||
781 | return ret; | ||
782 | } | ||
783 | |||
784 | asmlinkage int sys_ptrace(long request, long pid, long addr, long data) | ||
785 | { | ||
786 | struct task_struct *child; | ||
787 | int ret; | ||
788 | |||
789 | lock_kernel(); | ||
790 | ret = -EPERM; | ||
791 | if (request == PTRACE_TRACEME) { | ||
792 | /* are we already being traced? */ | ||
793 | if (current->ptrace & PT_PTRACED) | ||
794 | goto out; | ||
795 | ret = security_ptrace(current->parent, current); | ||
796 | if (ret) | ||
797 | goto out; | ||
798 | /* set the ptrace bit in the process flags. */ | ||
799 | current->ptrace |= PT_PTRACED; | ||
800 | ret = 0; | ||
801 | goto out; | ||
802 | } | ||
803 | ret = -ESRCH; | ||
804 | read_lock(&tasklist_lock); | ||
805 | child = find_task_by_pid(pid); | ||
806 | if (child) | ||
807 | get_task_struct(child); | ||
808 | read_unlock(&tasklist_lock); | ||
809 | if (!child) | ||
810 | goto out; | ||
811 | |||
812 | ret = -EPERM; | ||
813 | if (pid == 1) /* you may not mess with init */ | ||
814 | goto out_tsk; | ||
815 | |||
816 | if (request == PTRACE_ATTACH) { | ||
817 | ret = ptrace_attach(child); | ||
818 | goto out_tsk; | ||
819 | } | ||
820 | ret = ptrace_check_attach(child, request == PTRACE_KILL); | ||
821 | if (ret == 0) | ||
822 | ret = do_ptrace(request, child, addr, data); | ||
823 | |||
824 | out_tsk: | ||
825 | put_task_struct(child); | ||
826 | out: | ||
827 | unlock_kernel(); | ||
828 | return ret; | ||
829 | } | ||
830 | |||
831 | asmlinkage void syscall_trace(int why, struct pt_regs *regs) | ||
832 | { | ||
833 | unsigned long ip; | ||
834 | |||
835 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | ||
836 | return; | ||
837 | if (!(current->ptrace & PT_PTRACED)) | ||
838 | return; | ||
839 | |||
840 | /* | ||
841 | * Save IP. IP is used to denote syscall entry/exit: | ||
842 | * IP = 0 -> entry, = 1 -> exit | ||
843 | */ | ||
844 | ip = regs->ARM_ip; | ||
845 | regs->ARM_ip = why; | ||
846 | |||
847 | /* the 0x80 provides a way for the tracing parent to distinguish | ||
848 | between a syscall stop and SIGTRAP delivery */ | ||
849 | ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) | ||
850 | ? 0x80 : 0)); | ||
851 | /* | ||
852 | * this isn't the same as continuing with a signal, but it will do | ||
853 | * for normal use. strace only continues with a signal if the | ||
854 | * stopping signal is not SIGTRAP. -brl | ||
855 | */ | ||
856 | if (current->exit_code) { | ||
857 | send_sig(current->exit_code, current, 1); | ||
858 | current->exit_code = 0; | ||
859 | } | ||
860 | regs->ARM_ip = ip; | ||
861 | } | ||
diff --git a/arch/arm/kernel/ptrace.h b/arch/arm/kernel/ptrace.h new file mode 100644 index 000000000000..f7cad13a22e9 --- /dev/null +++ b/arch/arm/kernel/ptrace.h | |||
@@ -0,0 +1,12 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/ptrace.h | ||
3 | * | ||
4 | * Copyright (C) 2000-2003 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | extern void ptrace_cancel_bpt(struct task_struct *); | ||
11 | extern void ptrace_set_bpt(struct task_struct *); | ||
12 | extern void ptrace_break(struct task_struct *, struct pt_regs *); | ||
diff --git a/arch/arm/kernel/semaphore.c b/arch/arm/kernel/semaphore.c new file mode 100644 index 000000000000..ac423e3e224b --- /dev/null +++ b/arch/arm/kernel/semaphore.c | |||
@@ -0,0 +1,220 @@ | |||
1 | /* | ||
2 | * ARM semaphore implementation, taken from | ||
3 | * | ||
4 | * i386 semaphore implementation. | ||
5 | * | ||
6 | * (C) Copyright 1999 Linus Torvalds | ||
7 | * | ||
8 | * Modified for ARM by Russell King | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/init.h> | ||
18 | |||
19 | #include <asm/semaphore.h> | ||
20 | |||
21 | /* | ||
22 | * Semaphores are implemented using a two-way counter: | ||
23 | * The "count" variable is decremented for each process | ||
24 | * that tries to acquire the semaphore, while the "sleeping" | ||
25 | * variable is a count of such acquires. | ||
26 | * | ||
27 | * Notably, the inline "up()" and "down()" functions can | ||
28 | * efficiently test if they need to do any extra work (up | ||
29 | * needs to do something only if count was negative before | ||
30 | * the increment operation. | ||
31 | * | ||
32 | * "sleeping" and the contention routine ordering is | ||
33 | * protected by the semaphore spinlock. | ||
34 | * | ||
35 | * Note that these functions are only called when there is | ||
36 | * contention on the lock, and as such all this is the | ||
37 | * "non-critical" part of the whole semaphore business. The | ||
38 | * critical part is the inline stuff in <asm/semaphore.h> | ||
39 | * where we want to avoid any extra jumps and calls. | ||
40 | */ | ||
41 | |||
42 | /* | ||
43 | * Logic: | ||
44 | * - only on a boundary condition do we need to care. When we go | ||
45 | * from a negative count to a non-negative, we wake people up. | ||
46 | * - when we go from a non-negative count to a negative do we | ||
47 | * (a) synchronize with the "sleeper" count and (b) make sure | ||
48 | * that we're on the wakeup list before we synchronize so that | ||
49 | * we cannot lose wakeup events. | ||
50 | */ | ||
51 | |||
52 | void __up(struct semaphore *sem) | ||
53 | { | ||
54 | wake_up(&sem->wait); | ||
55 | } | ||
56 | |||
57 | static DEFINE_SPINLOCK(semaphore_lock); | ||
58 | |||
59 | void __sched __down(struct semaphore * sem) | ||
60 | { | ||
61 | struct task_struct *tsk = current; | ||
62 | DECLARE_WAITQUEUE(wait, tsk); | ||
63 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
64 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
65 | |||
66 | spin_lock_irq(&semaphore_lock); | ||
67 | sem->sleepers++; | ||
68 | for (;;) { | ||
69 | int sleepers = sem->sleepers; | ||
70 | |||
71 | /* | ||
72 | * Add "everybody else" into it. They aren't | ||
73 | * playing, because we own the spinlock. | ||
74 | */ | ||
75 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
76 | sem->sleepers = 0; | ||
77 | break; | ||
78 | } | ||
79 | sem->sleepers = 1; /* us - see -1 above */ | ||
80 | spin_unlock_irq(&semaphore_lock); | ||
81 | |||
82 | schedule(); | ||
83 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
84 | spin_lock_irq(&semaphore_lock); | ||
85 | } | ||
86 | spin_unlock_irq(&semaphore_lock); | ||
87 | remove_wait_queue(&sem->wait, &wait); | ||
88 | tsk->state = TASK_RUNNING; | ||
89 | wake_up(&sem->wait); | ||
90 | } | ||
91 | |||
92 | int __sched __down_interruptible(struct semaphore * sem) | ||
93 | { | ||
94 | int retval = 0; | ||
95 | struct task_struct *tsk = current; | ||
96 | DECLARE_WAITQUEUE(wait, tsk); | ||
97 | tsk->state = TASK_INTERRUPTIBLE; | ||
98 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
99 | |||
100 | spin_lock_irq(&semaphore_lock); | ||
101 | sem->sleepers ++; | ||
102 | for (;;) { | ||
103 | int sleepers = sem->sleepers; | ||
104 | |||
105 | /* | ||
106 | * With signals pending, this turns into | ||
107 | * the trylock failure case - we won't be | ||
108 | * sleeping, and we* can't get the lock as | ||
109 | * it has contention. Just correct the count | ||
110 | * and exit. | ||
111 | */ | ||
112 | if (signal_pending(current)) { | ||
113 | retval = -EINTR; | ||
114 | sem->sleepers = 0; | ||
115 | atomic_add(sleepers, &sem->count); | ||
116 | break; | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Add "everybody else" into it. They aren't | ||
121 | * playing, because we own the spinlock. The | ||
122 | * "-1" is because we're still hoping to get | ||
123 | * the lock. | ||
124 | */ | ||
125 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
126 | sem->sleepers = 0; | ||
127 | break; | ||
128 | } | ||
129 | sem->sleepers = 1; /* us - see -1 above */ | ||
130 | spin_unlock_irq(&semaphore_lock); | ||
131 | |||
132 | schedule(); | ||
133 | tsk->state = TASK_INTERRUPTIBLE; | ||
134 | spin_lock_irq(&semaphore_lock); | ||
135 | } | ||
136 | spin_unlock_irq(&semaphore_lock); | ||
137 | tsk->state = TASK_RUNNING; | ||
138 | remove_wait_queue(&sem->wait, &wait); | ||
139 | wake_up(&sem->wait); | ||
140 | return retval; | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * Trylock failed - make sure we correct for | ||
145 | * having decremented the count. | ||
146 | * | ||
147 | * We could have done the trylock with a | ||
148 | * single "cmpxchg" without failure cases, | ||
149 | * but then it wouldn't work on a 386. | ||
150 | */ | ||
151 | int __down_trylock(struct semaphore * sem) | ||
152 | { | ||
153 | int sleepers; | ||
154 | unsigned long flags; | ||
155 | |||
156 | spin_lock_irqsave(&semaphore_lock, flags); | ||
157 | sleepers = sem->sleepers + 1; | ||
158 | sem->sleepers = 0; | ||
159 | |||
160 | /* | ||
161 | * Add "everybody else" and us into it. They aren't | ||
162 | * playing, because we own the spinlock. | ||
163 | */ | ||
164 | if (!atomic_add_negative(sleepers, &sem->count)) | ||
165 | wake_up(&sem->wait); | ||
166 | |||
167 | spin_unlock_irqrestore(&semaphore_lock, flags); | ||
168 | return 1; | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * The semaphore operations have a special calling sequence that | ||
173 | * allow us to do a simpler in-line version of them. These routines | ||
174 | * need to convert that sequence back into the C sequence when | ||
175 | * there is contention on the semaphore. | ||
176 | * | ||
177 | * ip contains the semaphore pointer on entry. Save the C-clobbered | ||
178 | * registers (r0 to r3 and lr), but not ip, as we use it as a return | ||
179 | * value in some cases.. | ||
180 | */ | ||
181 | asm(" .section .sched.text,\"ax\" \n\ | ||
182 | .align 5 \n\ | ||
183 | .globl __down_failed \n\ | ||
184 | __down_failed: \n\ | ||
185 | stmfd sp!, {r0 - r3, lr} \n\ | ||
186 | mov r0, ip \n\ | ||
187 | bl __down \n\ | ||
188 | ldmfd sp!, {r0 - r3, pc} \n\ | ||
189 | \n\ | ||
190 | .align 5 \n\ | ||
191 | .globl __down_interruptible_failed \n\ | ||
192 | __down_interruptible_failed: \n\ | ||
193 | stmfd sp!, {r0 - r3, lr} \n\ | ||
194 | mov r0, ip \n\ | ||
195 | bl __down_interruptible \n\ | ||
196 | mov ip, r0 \n\ | ||
197 | ldmfd sp!, {r0 - r3, pc} \n\ | ||
198 | \n\ | ||
199 | .align 5 \n\ | ||
200 | .globl __down_trylock_failed \n\ | ||
201 | __down_trylock_failed: \n\ | ||
202 | stmfd sp!, {r0 - r3, lr} \n\ | ||
203 | mov r0, ip \n\ | ||
204 | bl __down_trylock \n\ | ||
205 | mov ip, r0 \n\ | ||
206 | ldmfd sp!, {r0 - r3, pc} \n\ | ||
207 | \n\ | ||
208 | .align 5 \n\ | ||
209 | .globl __up_wakeup \n\ | ||
210 | __up_wakeup: \n\ | ||
211 | stmfd sp!, {r0 - r3, lr} \n\ | ||
212 | mov r0, ip \n\ | ||
213 | bl __up \n\ | ||
214 | ldmfd sp!, {r0 - r3, pc} \n\ | ||
215 | "); | ||
216 | |||
217 | EXPORT_SYMBOL(__down_failed); | ||
218 | EXPORT_SYMBOL(__down_interruptible_failed); | ||
219 | EXPORT_SYMBOL(__down_trylock_failed); | ||
220 | EXPORT_SYMBOL(__up_wakeup); | ||
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c new file mode 100644 index 000000000000..c2a7da3ac0f1 --- /dev/null +++ b/arch/arm/kernel/setup.c | |||
@@ -0,0 +1,875 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/setup.c | ||
3 | * | ||
4 | * Copyright (C) 1995-2001 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/config.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/stddef.h> | ||
14 | #include <linux/ioport.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include <linux/utsname.h> | ||
17 | #include <linux/initrd.h> | ||
18 | #include <linux/console.h> | ||
19 | #include <linux/bootmem.h> | ||
20 | #include <linux/seq_file.h> | ||
21 | #include <linux/tty.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/root_dev.h> | ||
24 | #include <linux/cpu.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | |||
27 | #include <asm/cpu.h> | ||
28 | #include <asm/elf.h> | ||
29 | #include <asm/hardware.h> | ||
30 | #include <asm/io.h> | ||
31 | #include <asm/procinfo.h> | ||
32 | #include <asm/setup.h> | ||
33 | #include <asm/mach-types.h> | ||
34 | #include <asm/cacheflush.h> | ||
35 | #include <asm/tlbflush.h> | ||
36 | |||
37 | #include <asm/mach/arch.h> | ||
38 | #include <asm/mach/irq.h> | ||
39 | #include <asm/mach/time.h> | ||
40 | |||
41 | #ifndef MEM_SIZE | ||
42 | #define MEM_SIZE (16*1024*1024) | ||
43 | #endif | ||
44 | |||
45 | #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE) | ||
46 | char fpe_type[8]; | ||
47 | |||
48 | static int __init fpe_setup(char *line) | ||
49 | { | ||
50 | memcpy(fpe_type, line, 8); | ||
51 | return 1; | ||
52 | } | ||
53 | |||
54 | __setup("fpe=", fpe_setup); | ||
55 | #endif | ||
56 | |||
57 | extern unsigned int mem_fclk_21285; | ||
58 | extern void paging_init(struct meminfo *, struct machine_desc *desc); | ||
59 | extern void convert_to_tag_list(struct tag *tags); | ||
60 | extern void squash_mem_tags(struct tag *tag); | ||
61 | extern void reboot_setup(char *str); | ||
62 | extern int root_mountflags; | ||
63 | extern void _stext, _text, _etext, __data_start, _edata, _end; | ||
64 | |||
65 | unsigned int processor_id; | ||
66 | unsigned int __machine_arch_type; | ||
67 | EXPORT_SYMBOL(__machine_arch_type); | ||
68 | |||
69 | unsigned int system_rev; | ||
70 | EXPORT_SYMBOL(system_rev); | ||
71 | |||
72 | unsigned int system_serial_low; | ||
73 | EXPORT_SYMBOL(system_serial_low); | ||
74 | |||
75 | unsigned int system_serial_high; | ||
76 | EXPORT_SYMBOL(system_serial_high); | ||
77 | |||
78 | unsigned int elf_hwcap; | ||
79 | EXPORT_SYMBOL(elf_hwcap); | ||
80 | |||
81 | |||
82 | #ifdef MULTI_CPU | ||
83 | struct processor processor; | ||
84 | #endif | ||
85 | #ifdef MULTI_TLB | ||
86 | struct cpu_tlb_fns cpu_tlb; | ||
87 | #endif | ||
88 | #ifdef MULTI_USER | ||
89 | struct cpu_user_fns cpu_user; | ||
90 | #endif | ||
91 | #ifdef MULTI_CACHE | ||
92 | struct cpu_cache_fns cpu_cache; | ||
93 | #endif | ||
94 | |||
95 | char elf_platform[ELF_PLATFORM_SIZE]; | ||
96 | EXPORT_SYMBOL(elf_platform); | ||
97 | |||
98 | unsigned long phys_initrd_start __initdata = 0; | ||
99 | unsigned long phys_initrd_size __initdata = 0; | ||
100 | |||
101 | static struct meminfo meminfo __initdata = { 0, }; | ||
102 | static const char *cpu_name; | ||
103 | static const char *machine_name; | ||
104 | static char command_line[COMMAND_LINE_SIZE]; | ||
105 | |||
106 | static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; | ||
107 | static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } }; | ||
108 | #define ENDIANNESS ((char)endian_test.l) | ||
109 | |||
110 | DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data); | ||
111 | |||
112 | /* | ||
113 | * Standard memory resources | ||
114 | */ | ||
115 | static struct resource mem_res[] = { | ||
116 | { "Video RAM", 0, 0, IORESOURCE_MEM }, | ||
117 | { "Kernel text", 0, 0, IORESOURCE_MEM }, | ||
118 | { "Kernel data", 0, 0, IORESOURCE_MEM } | ||
119 | }; | ||
120 | |||
121 | #define video_ram mem_res[0] | ||
122 | #define kernel_code mem_res[1] | ||
123 | #define kernel_data mem_res[2] | ||
124 | |||
125 | static struct resource io_res[] = { | ||
126 | { "reserved", 0x3bc, 0x3be, IORESOURCE_IO | IORESOURCE_BUSY }, | ||
127 | { "reserved", 0x378, 0x37f, IORESOURCE_IO | IORESOURCE_BUSY }, | ||
128 | { "reserved", 0x278, 0x27f, IORESOURCE_IO | IORESOURCE_BUSY } | ||
129 | }; | ||
130 | |||
131 | #define lp0 io_res[0] | ||
132 | #define lp1 io_res[1] | ||
133 | #define lp2 io_res[2] | ||
134 | |||
135 | static const char *cache_types[16] = { | ||
136 | "write-through", | ||
137 | "write-back", | ||
138 | "write-back", | ||
139 | "undefined 3", | ||
140 | "undefined 4", | ||
141 | "undefined 5", | ||
142 | "write-back", | ||
143 | "write-back", | ||
144 | "undefined 8", | ||
145 | "undefined 9", | ||
146 | "undefined 10", | ||
147 | "undefined 11", | ||
148 | "undefined 12", | ||
149 | "undefined 13", | ||
150 | "write-back", | ||
151 | "undefined 15", | ||
152 | }; | ||
153 | |||
154 | static const char *cache_clean[16] = { | ||
155 | "not required", | ||
156 | "read-block", | ||
157 | "cp15 c7 ops", | ||
158 | "undefined 3", | ||
159 | "undefined 4", | ||
160 | "undefined 5", | ||
161 | "cp15 c7 ops", | ||
162 | "cp15 c7 ops", | ||
163 | "undefined 8", | ||
164 | "undefined 9", | ||
165 | "undefined 10", | ||
166 | "undefined 11", | ||
167 | "undefined 12", | ||
168 | "undefined 13", | ||
169 | "cp15 c7 ops", | ||
170 | "undefined 15", | ||
171 | }; | ||
172 | |||
173 | static const char *cache_lockdown[16] = { | ||
174 | "not supported", | ||
175 | "not supported", | ||
176 | "not supported", | ||
177 | "undefined 3", | ||
178 | "undefined 4", | ||
179 | "undefined 5", | ||
180 | "format A", | ||
181 | "format B", | ||
182 | "undefined 8", | ||
183 | "undefined 9", | ||
184 | "undefined 10", | ||
185 | "undefined 11", | ||
186 | "undefined 12", | ||
187 | "undefined 13", | ||
188 | "format C", | ||
189 | "undefined 15", | ||
190 | }; | ||
191 | |||
192 | static const char *proc_arch[] = { | ||
193 | "undefined/unknown", | ||
194 | "3", | ||
195 | "4", | ||
196 | "4T", | ||
197 | "5", | ||
198 | "5T", | ||
199 | "5TE", | ||
200 | "5TEJ", | ||
201 | "6TEJ", | ||
202 | "?(10)", | ||
203 | "?(11)", | ||
204 | "?(12)", | ||
205 | "?(13)", | ||
206 | "?(14)", | ||
207 | "?(15)", | ||
208 | "?(16)", | ||
209 | "?(17)", | ||
210 | }; | ||
211 | |||
212 | #define CACHE_TYPE(x) (((x) >> 25) & 15) | ||
213 | #define CACHE_S(x) ((x) & (1 << 24)) | ||
214 | #define CACHE_DSIZE(x) (((x) >> 12) & 4095) /* only if S=1 */ | ||
215 | #define CACHE_ISIZE(x) ((x) & 4095) | ||
216 | |||
217 | #define CACHE_SIZE(y) (((y) >> 6) & 7) | ||
218 | #define CACHE_ASSOC(y) (((y) >> 3) & 7) | ||
219 | #define CACHE_M(y) ((y) & (1 << 2)) | ||
220 | #define CACHE_LINE(y) ((y) & 3) | ||
221 | |||
222 | static inline void dump_cache(const char *prefix, int cpu, unsigned int cache) | ||
223 | { | ||
224 | unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0); | ||
225 | |||
226 | printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n", | ||
227 | cpu, prefix, | ||
228 | mult << (8 + CACHE_SIZE(cache)), | ||
229 | (mult << CACHE_ASSOC(cache)) >> 1, | ||
230 | 8 << CACHE_LINE(cache), | ||
231 | 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) - | ||
232 | CACHE_LINE(cache))); | ||
233 | } | ||
234 | |||
235 | static void __init dump_cpu_info(int cpu) | ||
236 | { | ||
237 | unsigned int info = read_cpuid(CPUID_CACHETYPE); | ||
238 | |||
239 | if (info != processor_id) { | ||
240 | printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT", | ||
241 | cache_types[CACHE_TYPE(info)]); | ||
242 | if (CACHE_S(info)) { | ||
243 | dump_cache("I cache", cpu, CACHE_ISIZE(info)); | ||
244 | dump_cache("D cache", cpu, CACHE_DSIZE(info)); | ||
245 | } else { | ||
246 | dump_cache("cache", cpu, CACHE_ISIZE(info)); | ||
247 | } | ||
248 | } | ||
249 | } | ||
250 | |||
251 | int cpu_architecture(void) | ||
252 | { | ||
253 | int cpu_arch; | ||
254 | |||
255 | if ((processor_id & 0x0000f000) == 0) { | ||
256 | cpu_arch = CPU_ARCH_UNKNOWN; | ||
257 | } else if ((processor_id & 0x0000f000) == 0x00007000) { | ||
258 | cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3; | ||
259 | } else { | ||
260 | cpu_arch = (processor_id >> 16) & 7; | ||
261 | if (cpu_arch) | ||
262 | cpu_arch += CPU_ARCH_ARMv3; | ||
263 | } | ||
264 | |||
265 | return cpu_arch; | ||
266 | } | ||
267 | |||
268 | /* | ||
269 | * These functions re-use the assembly code in head.S, which | ||
270 | * already provide the required functionality. | ||
271 | */ | ||
272 | extern struct proc_info_list *lookup_processor_type(void); | ||
273 | extern struct machine_desc *lookup_machine_type(unsigned int); | ||
274 | |||
275 | static void __init setup_processor(void) | ||
276 | { | ||
277 | struct proc_info_list *list; | ||
278 | |||
279 | /* | ||
280 | * locate processor in the list of supported processor | ||
281 | * types. The linker builds this table for us from the | ||
282 | * entries in arch/arm/mm/proc-*.S | ||
283 | */ | ||
284 | list = lookup_processor_type(); | ||
285 | if (!list) { | ||
286 | printk("CPU configuration botched (ID %08x), unable " | ||
287 | "to continue.\n", processor_id); | ||
288 | while (1); | ||
289 | } | ||
290 | |||
291 | cpu_name = list->cpu_name; | ||
292 | |||
293 | #ifdef MULTI_CPU | ||
294 | processor = *list->proc; | ||
295 | #endif | ||
296 | #ifdef MULTI_TLB | ||
297 | cpu_tlb = *list->tlb; | ||
298 | #endif | ||
299 | #ifdef MULTI_USER | ||
300 | cpu_user = *list->user; | ||
301 | #endif | ||
302 | #ifdef MULTI_CACHE | ||
303 | cpu_cache = *list->cache; | ||
304 | #endif | ||
305 | |||
306 | printk("CPU: %s [%08x] revision %d (ARMv%s)\n", | ||
307 | cpu_name, processor_id, (int)processor_id & 15, | ||
308 | proc_arch[cpu_architecture()]); | ||
309 | |||
310 | dump_cpu_info(smp_processor_id()); | ||
311 | |||
312 | sprintf(system_utsname.machine, "%s%c", list->arch_name, ENDIANNESS); | ||
313 | sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); | ||
314 | elf_hwcap = list->elf_hwcap; | ||
315 | |||
316 | cpu_proc_init(); | ||
317 | } | ||
318 | |||
319 | static struct machine_desc * __init setup_machine(unsigned int nr) | ||
320 | { | ||
321 | struct machine_desc *list; | ||
322 | |||
323 | /* | ||
324 | * locate machine in the list of supported machines. | ||
325 | */ | ||
326 | list = lookup_machine_type(nr); | ||
327 | if (!list) { | ||
328 | printk("Machine configuration botched (nr %d), unable " | ||
329 | "to continue.\n", nr); | ||
330 | while (1); | ||
331 | } | ||
332 | |||
333 | printk("Machine: %s\n", list->name); | ||
334 | |||
335 | return list; | ||
336 | } | ||
337 | |||
338 | static void __init early_initrd(char **p) | ||
339 | { | ||
340 | unsigned long start, size; | ||
341 | |||
342 | start = memparse(*p, p); | ||
343 | if (**p == ',') { | ||
344 | size = memparse((*p) + 1, p); | ||
345 | |||
346 | phys_initrd_start = start; | ||
347 | phys_initrd_size = size; | ||
348 | } | ||
349 | } | ||
350 | __early_param("initrd=", early_initrd); | ||
351 | |||
352 | /* | ||
353 | * Pick out the memory size. We look for mem=size@start, | ||
354 | * where start and size are "size[KkMm]" | ||
355 | */ | ||
356 | static void __init early_mem(char **p) | ||
357 | { | ||
358 | static int usermem __initdata = 0; | ||
359 | unsigned long size, start; | ||
360 | |||
361 | /* | ||
362 | * If the user specifies memory size, we | ||
363 | * blow away any automatically generated | ||
364 | * size. | ||
365 | */ | ||
366 | if (usermem == 0) { | ||
367 | usermem = 1; | ||
368 | meminfo.nr_banks = 0; | ||
369 | } | ||
370 | |||
371 | start = PHYS_OFFSET; | ||
372 | size = memparse(*p, p); | ||
373 | if (**p == '@') | ||
374 | start = memparse(*p + 1, p); | ||
375 | |||
376 | meminfo.bank[meminfo.nr_banks].start = start; | ||
377 | meminfo.bank[meminfo.nr_banks].size = size; | ||
378 | meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(start); | ||
379 | meminfo.nr_banks += 1; | ||
380 | } | ||
381 | __early_param("mem=", early_mem); | ||
382 | |||
383 | /* | ||
384 | * Initial parsing of the command line. | ||
385 | */ | ||
386 | static void __init parse_cmdline(char **cmdline_p, char *from) | ||
387 | { | ||
388 | char c = ' ', *to = command_line; | ||
389 | int len = 0; | ||
390 | |||
391 | for (;;) { | ||
392 | if (c == ' ') { | ||
393 | extern struct early_params __early_begin, __early_end; | ||
394 | struct early_params *p; | ||
395 | |||
396 | for (p = &__early_begin; p < &__early_end; p++) { | ||
397 | int len = strlen(p->arg); | ||
398 | |||
399 | if (memcmp(from, p->arg, len) == 0) { | ||
400 | if (to != command_line) | ||
401 | to -= 1; | ||
402 | from += len; | ||
403 | p->fn(&from); | ||
404 | |||
405 | while (*from != ' ' && *from != '\0') | ||
406 | from++; | ||
407 | break; | ||
408 | } | ||
409 | } | ||
410 | } | ||
411 | c = *from++; | ||
412 | if (!c) | ||
413 | break; | ||
414 | if (COMMAND_LINE_SIZE <= ++len) | ||
415 | break; | ||
416 | *to++ = c; | ||
417 | } | ||
418 | *to = '\0'; | ||
419 | *cmdline_p = command_line; | ||
420 | } | ||
421 | |||
422 | static void __init | ||
423 | setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz) | ||
424 | { | ||
425 | #ifdef CONFIG_BLK_DEV_RAM | ||
426 | extern int rd_size, rd_image_start, rd_prompt, rd_doload; | ||
427 | |||
428 | rd_image_start = image_start; | ||
429 | rd_prompt = prompt; | ||
430 | rd_doload = doload; | ||
431 | |||
432 | if (rd_sz) | ||
433 | rd_size = rd_sz; | ||
434 | #endif | ||
435 | } | ||
436 | |||
437 | static void __init | ||
438 | request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc) | ||
439 | { | ||
440 | struct resource *res; | ||
441 | int i; | ||
442 | |||
443 | kernel_code.start = virt_to_phys(&_text); | ||
444 | kernel_code.end = virt_to_phys(&_etext - 1); | ||
445 | kernel_data.start = virt_to_phys(&__data_start); | ||
446 | kernel_data.end = virt_to_phys(&_end - 1); | ||
447 | |||
448 | for (i = 0; i < mi->nr_banks; i++) { | ||
449 | unsigned long virt_start, virt_end; | ||
450 | |||
451 | if (mi->bank[i].size == 0) | ||
452 | continue; | ||
453 | |||
454 | virt_start = __phys_to_virt(mi->bank[i].start); | ||
455 | virt_end = virt_start + mi->bank[i].size - 1; | ||
456 | |||
457 | res = alloc_bootmem_low(sizeof(*res)); | ||
458 | res->name = "System RAM"; | ||
459 | res->start = __virt_to_phys(virt_start); | ||
460 | res->end = __virt_to_phys(virt_end); | ||
461 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | ||
462 | |||
463 | request_resource(&iomem_resource, res); | ||
464 | |||
465 | if (kernel_code.start >= res->start && | ||
466 | kernel_code.end <= res->end) | ||
467 | request_resource(res, &kernel_code); | ||
468 | if (kernel_data.start >= res->start && | ||
469 | kernel_data.end <= res->end) | ||
470 | request_resource(res, &kernel_data); | ||
471 | } | ||
472 | |||
473 | if (mdesc->video_start) { | ||
474 | video_ram.start = mdesc->video_start; | ||
475 | video_ram.end = mdesc->video_end; | ||
476 | request_resource(&iomem_resource, &video_ram); | ||
477 | } | ||
478 | |||
479 | /* | ||
480 | * Some machines don't have the possibility of ever | ||
481 | * possessing lp0, lp1 or lp2 | ||
482 | */ | ||
483 | if (mdesc->reserve_lp0) | ||
484 | request_resource(&ioport_resource, &lp0); | ||
485 | if (mdesc->reserve_lp1) | ||
486 | request_resource(&ioport_resource, &lp1); | ||
487 | if (mdesc->reserve_lp2) | ||
488 | request_resource(&ioport_resource, &lp2); | ||
489 | } | ||
490 | |||
491 | /* | ||
492 | * Tag parsing. | ||
493 | * | ||
494 | * This is the new way of passing data to the kernel at boot time. Rather | ||
495 | * than passing a fixed inflexible structure to the kernel, we pass a list | ||
496 | * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE | ||
497 | * tag for the list to be recognised (to distinguish the tagged list from | ||
498 | * a param_struct). The list is terminated with a zero-length tag (this tag | ||
499 | * is not parsed in any way). | ||
500 | */ | ||
501 | static int __init parse_tag_core(const struct tag *tag) | ||
502 | { | ||
503 | if (tag->hdr.size > 2) { | ||
504 | if ((tag->u.core.flags & 1) == 0) | ||
505 | root_mountflags &= ~MS_RDONLY; | ||
506 | ROOT_DEV = old_decode_dev(tag->u.core.rootdev); | ||
507 | } | ||
508 | return 0; | ||
509 | } | ||
510 | |||
511 | __tagtable(ATAG_CORE, parse_tag_core); | ||
512 | |||
513 | static int __init parse_tag_mem32(const struct tag *tag) | ||
514 | { | ||
515 | if (meminfo.nr_banks >= NR_BANKS) { | ||
516 | printk(KERN_WARNING | ||
517 | "Ignoring memory bank 0x%08x size %dKB\n", | ||
518 | tag->u.mem.start, tag->u.mem.size / 1024); | ||
519 | return -EINVAL; | ||
520 | } | ||
521 | meminfo.bank[meminfo.nr_banks].start = tag->u.mem.start; | ||
522 | meminfo.bank[meminfo.nr_banks].size = tag->u.mem.size; | ||
523 | meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(tag->u.mem.start); | ||
524 | meminfo.nr_banks += 1; | ||
525 | |||
526 | return 0; | ||
527 | } | ||
528 | |||
529 | __tagtable(ATAG_MEM, parse_tag_mem32); | ||
530 | |||
531 | #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) | ||
532 | struct screen_info screen_info = { | ||
533 | .orig_video_lines = 30, | ||
534 | .orig_video_cols = 80, | ||
535 | .orig_video_mode = 0, | ||
536 | .orig_video_ega_bx = 0, | ||
537 | .orig_video_isVGA = 1, | ||
538 | .orig_video_points = 8 | ||
539 | }; | ||
540 | |||
541 | static int __init parse_tag_videotext(const struct tag *tag) | ||
542 | { | ||
543 | screen_info.orig_x = tag->u.videotext.x; | ||
544 | screen_info.orig_y = tag->u.videotext.y; | ||
545 | screen_info.orig_video_page = tag->u.videotext.video_page; | ||
546 | screen_info.orig_video_mode = tag->u.videotext.video_mode; | ||
547 | screen_info.orig_video_cols = tag->u.videotext.video_cols; | ||
548 | screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx; | ||
549 | screen_info.orig_video_lines = tag->u.videotext.video_lines; | ||
550 | screen_info.orig_video_isVGA = tag->u.videotext.video_isvga; | ||
551 | screen_info.orig_video_points = tag->u.videotext.video_points; | ||
552 | return 0; | ||
553 | } | ||
554 | |||
555 | __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext); | ||
556 | #endif | ||
557 | |||
558 | static int __init parse_tag_ramdisk(const struct tag *tag) | ||
559 | { | ||
560 | setup_ramdisk((tag->u.ramdisk.flags & 1) == 0, | ||
561 | (tag->u.ramdisk.flags & 2) == 0, | ||
562 | tag->u.ramdisk.start, tag->u.ramdisk.size); | ||
563 | return 0; | ||
564 | } | ||
565 | |||
566 | __tagtable(ATAG_RAMDISK, parse_tag_ramdisk); | ||
567 | |||
568 | static int __init parse_tag_initrd(const struct tag *tag) | ||
569 | { | ||
570 | printk(KERN_WARNING "ATAG_INITRD is deprecated; " | ||
571 | "please update your bootloader.\n"); | ||
572 | phys_initrd_start = __virt_to_phys(tag->u.initrd.start); | ||
573 | phys_initrd_size = tag->u.initrd.size; | ||
574 | return 0; | ||
575 | } | ||
576 | |||
577 | __tagtable(ATAG_INITRD, parse_tag_initrd); | ||
578 | |||
579 | static int __init parse_tag_initrd2(const struct tag *tag) | ||
580 | { | ||
581 | phys_initrd_start = tag->u.initrd.start; | ||
582 | phys_initrd_size = tag->u.initrd.size; | ||
583 | return 0; | ||
584 | } | ||
585 | |||
586 | __tagtable(ATAG_INITRD2, parse_tag_initrd2); | ||
587 | |||
588 | static int __init parse_tag_serialnr(const struct tag *tag) | ||
589 | { | ||
590 | system_serial_low = tag->u.serialnr.low; | ||
591 | system_serial_high = tag->u.serialnr.high; | ||
592 | return 0; | ||
593 | } | ||
594 | |||
595 | __tagtable(ATAG_SERIAL, parse_tag_serialnr); | ||
596 | |||
597 | static int __init parse_tag_revision(const struct tag *tag) | ||
598 | { | ||
599 | system_rev = tag->u.revision.rev; | ||
600 | return 0; | ||
601 | } | ||
602 | |||
603 | __tagtable(ATAG_REVISION, parse_tag_revision); | ||
604 | |||
605 | static int __init parse_tag_cmdline(const struct tag *tag) | ||
606 | { | ||
607 | strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE); | ||
608 | return 0; | ||
609 | } | ||
610 | |||
611 | __tagtable(ATAG_CMDLINE, parse_tag_cmdline); | ||
612 | |||
613 | /* | ||
614 | * Scan the tag table for this tag, and call its parse function. | ||
615 | * The tag table is built by the linker from all the __tagtable | ||
616 | * declarations. | ||
617 | */ | ||
618 | static int __init parse_tag(const struct tag *tag) | ||
619 | { | ||
620 | extern struct tagtable __tagtable_begin, __tagtable_end; | ||
621 | struct tagtable *t; | ||
622 | |||
623 | for (t = &__tagtable_begin; t < &__tagtable_end; t++) | ||
624 | if (tag->hdr.tag == t->tag) { | ||
625 | t->parse(tag); | ||
626 | break; | ||
627 | } | ||
628 | |||
629 | return t < &__tagtable_end; | ||
630 | } | ||
631 | |||
632 | /* | ||
633 | * Parse all tags in the list, checking both the global and architecture | ||
634 | * specific tag tables. | ||
635 | */ | ||
636 | static void __init parse_tags(const struct tag *t) | ||
637 | { | ||
638 | for (; t->hdr.size; t = tag_next(t)) | ||
639 | if (!parse_tag(t)) | ||
640 | printk(KERN_WARNING | ||
641 | "Ignoring unrecognised tag 0x%08x\n", | ||
642 | t->hdr.tag); | ||
643 | } | ||
644 | |||
645 | /* | ||
646 | * This holds our defaults. | ||
647 | */ | ||
648 | static struct init_tags { | ||
649 | struct tag_header hdr1; | ||
650 | struct tag_core core; | ||
651 | struct tag_header hdr2; | ||
652 | struct tag_mem32 mem; | ||
653 | struct tag_header hdr3; | ||
654 | } init_tags __initdata = { | ||
655 | { tag_size(tag_core), ATAG_CORE }, | ||
656 | { 1, PAGE_SIZE, 0xff }, | ||
657 | { tag_size(tag_mem32), ATAG_MEM }, | ||
658 | { MEM_SIZE, PHYS_OFFSET }, | ||
659 | { 0, ATAG_NONE } | ||
660 | }; | ||
661 | |||
662 | static void (*init_machine)(void) __initdata; | ||
663 | |||
664 | static int __init customize_machine(void) | ||
665 | { | ||
666 | /* customizes platform devices, or adds new ones */ | ||
667 | if (init_machine) | ||
668 | init_machine(); | ||
669 | return 0; | ||
670 | } | ||
671 | arch_initcall(customize_machine); | ||
672 | |||
673 | void __init setup_arch(char **cmdline_p) | ||
674 | { | ||
675 | struct tag *tags = (struct tag *)&init_tags; | ||
676 | struct machine_desc *mdesc; | ||
677 | char *from = default_command_line; | ||
678 | |||
679 | setup_processor(); | ||
680 | mdesc = setup_machine(machine_arch_type); | ||
681 | machine_name = mdesc->name; | ||
682 | |||
683 | if (mdesc->soft_reboot) | ||
684 | reboot_setup("s"); | ||
685 | |||
686 | if (mdesc->param_offset) | ||
687 | tags = phys_to_virt(mdesc->param_offset); | ||
688 | |||
689 | /* | ||
690 | * If we have the old style parameters, convert them to | ||
691 | * a tag list. | ||
692 | */ | ||
693 | if (tags->hdr.tag != ATAG_CORE) | ||
694 | convert_to_tag_list(tags); | ||
695 | if (tags->hdr.tag != ATAG_CORE) | ||
696 | tags = (struct tag *)&init_tags; | ||
697 | |||
698 | if (mdesc->fixup) | ||
699 | mdesc->fixup(mdesc, tags, &from, &meminfo); | ||
700 | |||
701 | if (tags->hdr.tag == ATAG_CORE) { | ||
702 | if (meminfo.nr_banks != 0) | ||
703 | squash_mem_tags(tags); | ||
704 | parse_tags(tags); | ||
705 | } | ||
706 | |||
707 | init_mm.start_code = (unsigned long) &_text; | ||
708 | init_mm.end_code = (unsigned long) &_etext; | ||
709 | init_mm.end_data = (unsigned long) &_edata; | ||
710 | init_mm.brk = (unsigned long) &_end; | ||
711 | |||
712 | memcpy(saved_command_line, from, COMMAND_LINE_SIZE); | ||
713 | saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; | ||
714 | parse_cmdline(cmdline_p, from); | ||
715 | paging_init(&meminfo, mdesc); | ||
716 | request_standard_resources(&meminfo, mdesc); | ||
717 | |||
718 | /* | ||
719 | * Set up various architecture-specific pointers | ||
720 | */ | ||
721 | init_arch_irq = mdesc->init_irq; | ||
722 | system_timer = mdesc->timer; | ||
723 | init_machine = mdesc->init_machine; | ||
724 | |||
725 | #ifdef CONFIG_VT | ||
726 | #if defined(CONFIG_VGA_CONSOLE) | ||
727 | conswitchp = &vga_con; | ||
728 | #elif defined(CONFIG_DUMMY_CONSOLE) | ||
729 | conswitchp = &dummy_con; | ||
730 | #endif | ||
731 | #endif | ||
732 | } | ||
733 | |||
734 | |||
735 | static int __init topology_init(void) | ||
736 | { | ||
737 | int cpu; | ||
738 | |||
739 | for_each_cpu(cpu) | ||
740 | register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu, NULL); | ||
741 | |||
742 | return 0; | ||
743 | } | ||
744 | |||
745 | subsys_initcall(topology_init); | ||
746 | |||
747 | static const char *hwcap_str[] = { | ||
748 | "swp", | ||
749 | "half", | ||
750 | "thumb", | ||
751 | "26bit", | ||
752 | "fastmult", | ||
753 | "fpa", | ||
754 | "vfp", | ||
755 | "edsp", | ||
756 | "java", | ||
757 | NULL | ||
758 | }; | ||
759 | |||
760 | static void | ||
761 | c_show_cache(struct seq_file *m, const char *type, unsigned int cache) | ||
762 | { | ||
763 | unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0); | ||
764 | |||
765 | seq_printf(m, "%s size\t\t: %d\n" | ||
766 | "%s assoc\t\t: %d\n" | ||
767 | "%s line length\t: %d\n" | ||
768 | "%s sets\t\t: %d\n", | ||
769 | type, mult << (8 + CACHE_SIZE(cache)), | ||
770 | type, (mult << CACHE_ASSOC(cache)) >> 1, | ||
771 | type, 8 << CACHE_LINE(cache), | ||
772 | type, 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) - | ||
773 | CACHE_LINE(cache))); | ||
774 | } | ||
775 | |||
776 | static int c_show(struct seq_file *m, void *v) | ||
777 | { | ||
778 | int i; | ||
779 | |||
780 | seq_printf(m, "Processor\t: %s rev %d (%s)\n", | ||
781 | cpu_name, (int)processor_id & 15, elf_platform); | ||
782 | |||
783 | #if defined(CONFIG_SMP) | ||
784 | for_each_online_cpu(i) { | ||
785 | seq_printf(m, "Processor\t: %d\n", i); | ||
786 | seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n", | ||
787 | per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), | ||
788 | (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); | ||
789 | } | ||
790 | #else /* CONFIG_SMP */ | ||
791 | seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", | ||
792 | loops_per_jiffy / (500000/HZ), | ||
793 | (loops_per_jiffy / (5000/HZ)) % 100); | ||
794 | #endif | ||
795 | |||
796 | /* dump out the processor features */ | ||
797 | seq_puts(m, "Features\t: "); | ||
798 | |||
799 | for (i = 0; hwcap_str[i]; i++) | ||
800 | if (elf_hwcap & (1 << i)) | ||
801 | seq_printf(m, "%s ", hwcap_str[i]); | ||
802 | |||
803 | seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24); | ||
804 | seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]); | ||
805 | |||
806 | if ((processor_id & 0x0000f000) == 0x00000000) { | ||
807 | /* pre-ARM7 */ | ||
808 | seq_printf(m, "CPU part\t\t: %07x\n", processor_id >> 4); | ||
809 | } else { | ||
810 | if ((processor_id & 0x0000f000) == 0x00007000) { | ||
811 | /* ARM7 */ | ||
812 | seq_printf(m, "CPU variant\t: 0x%02x\n", | ||
813 | (processor_id >> 16) & 127); | ||
814 | } else { | ||
815 | /* post-ARM7 */ | ||
816 | seq_printf(m, "CPU variant\t: 0x%x\n", | ||
817 | (processor_id >> 20) & 15); | ||
818 | } | ||
819 | seq_printf(m, "CPU part\t: 0x%03x\n", | ||
820 | (processor_id >> 4) & 0xfff); | ||
821 | } | ||
822 | seq_printf(m, "CPU revision\t: %d\n", processor_id & 15); | ||
823 | |||
824 | { | ||
825 | unsigned int cache_info = read_cpuid(CPUID_CACHETYPE); | ||
826 | if (cache_info != processor_id) { | ||
827 | seq_printf(m, "Cache type\t: %s\n" | ||
828 | "Cache clean\t: %s\n" | ||
829 | "Cache lockdown\t: %s\n" | ||
830 | "Cache format\t: %s\n", | ||
831 | cache_types[CACHE_TYPE(cache_info)], | ||
832 | cache_clean[CACHE_TYPE(cache_info)], | ||
833 | cache_lockdown[CACHE_TYPE(cache_info)], | ||
834 | CACHE_S(cache_info) ? "Harvard" : "Unified"); | ||
835 | |||
836 | if (CACHE_S(cache_info)) { | ||
837 | c_show_cache(m, "I", CACHE_ISIZE(cache_info)); | ||
838 | c_show_cache(m, "D", CACHE_DSIZE(cache_info)); | ||
839 | } else { | ||
840 | c_show_cache(m, "Cache", CACHE_ISIZE(cache_info)); | ||
841 | } | ||
842 | } | ||
843 | } | ||
844 | |||
845 | seq_puts(m, "\n"); | ||
846 | |||
847 | seq_printf(m, "Hardware\t: %s\n", machine_name); | ||
848 | seq_printf(m, "Revision\t: %04x\n", system_rev); | ||
849 | seq_printf(m, "Serial\t\t: %08x%08x\n", | ||
850 | system_serial_high, system_serial_low); | ||
851 | |||
852 | return 0; | ||
853 | } | ||
854 | |||
855 | static void *c_start(struct seq_file *m, loff_t *pos) | ||
856 | { | ||
857 | return *pos < 1 ? (void *)1 : NULL; | ||
858 | } | ||
859 | |||
860 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) | ||
861 | { | ||
862 | ++*pos; | ||
863 | return NULL; | ||
864 | } | ||
865 | |||
866 | static void c_stop(struct seq_file *m, void *v) | ||
867 | { | ||
868 | } | ||
869 | |||
870 | struct seq_operations cpuinfo_op = { | ||
871 | .start = c_start, | ||
872 | .next = c_next, | ||
873 | .stop = c_stop, | ||
874 | .show = c_show | ||
875 | }; | ||
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c new file mode 100644 index 000000000000..931919fd5121 --- /dev/null +++ b/arch/arm/kernel/signal.c | |||
@@ -0,0 +1,748 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/signal.c | ||
3 | * | ||
4 | * Copyright (C) 1995-2002 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/config.h> | ||
11 | #include <linux/errno.h> | ||
12 | #include <linux/signal.h> | ||
13 | #include <linux/ptrace.h> | ||
14 | #include <linux/personality.h> | ||
15 | |||
16 | #include <asm/cacheflush.h> | ||
17 | #include <asm/ucontext.h> | ||
18 | #include <asm/uaccess.h> | ||
19 | #include <asm/unistd.h> | ||
20 | |||
21 | #include "ptrace.h" | ||
22 | |||
23 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
24 | |||
25 | /* | ||
26 | * For ARM syscalls, we encode the syscall number into the instruction. | ||
27 | */ | ||
28 | #define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)) | ||
29 | #define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)) | ||
30 | |||
31 | /* | ||
32 | * For Thumb syscalls, we pass the syscall number via r7. We therefore | ||
33 | * need two 16-bit instructions. | ||
34 | */ | ||
35 | #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) | ||
36 | #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) | ||
37 | |||
38 | static const unsigned long retcodes[4] = { | ||
39 | SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, | ||
40 | SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN | ||
41 | }; | ||
42 | |||
43 | static int do_signal(sigset_t *oldset, struct pt_regs * regs, int syscall); | ||
44 | |||
45 | /* | ||
46 | * atomically swap in the new signal mask, and wait for a signal. | ||
47 | */ | ||
48 | asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask, struct pt_regs *regs) | ||
49 | { | ||
50 | sigset_t saveset; | ||
51 | |||
52 | mask &= _BLOCKABLE; | ||
53 | spin_lock_irq(¤t->sighand->siglock); | ||
54 | saveset = current->blocked; | ||
55 | siginitset(¤t->blocked, mask); | ||
56 | recalc_sigpending(); | ||
57 | spin_unlock_irq(¤t->sighand->siglock); | ||
58 | regs->ARM_r0 = -EINTR; | ||
59 | |||
60 | while (1) { | ||
61 | current->state = TASK_INTERRUPTIBLE; | ||
62 | schedule(); | ||
63 | if (do_signal(&saveset, regs, 0)) | ||
64 | return regs->ARM_r0; | ||
65 | } | ||
66 | } | ||
67 | |||
68 | asmlinkage int | ||
69 | sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, struct pt_regs *regs) | ||
70 | { | ||
71 | sigset_t saveset, newset; | ||
72 | |||
73 | /* XXX: Don't preclude handling different sized sigset_t's. */ | ||
74 | if (sigsetsize != sizeof(sigset_t)) | ||
75 | return -EINVAL; | ||
76 | |||
77 | if (copy_from_user(&newset, unewset, sizeof(newset))) | ||
78 | return -EFAULT; | ||
79 | sigdelsetmask(&newset, ~_BLOCKABLE); | ||
80 | |||
81 | spin_lock_irq(¤t->sighand->siglock); | ||
82 | saveset = current->blocked; | ||
83 | current->blocked = newset; | ||
84 | recalc_sigpending(); | ||
85 | spin_unlock_irq(¤t->sighand->siglock); | ||
86 | regs->ARM_r0 = -EINTR; | ||
87 | |||
88 | while (1) { | ||
89 | current->state = TASK_INTERRUPTIBLE; | ||
90 | schedule(); | ||
91 | if (do_signal(&saveset, regs, 0)) | ||
92 | return regs->ARM_r0; | ||
93 | } | ||
94 | } | ||
95 | |||
96 | asmlinkage int | ||
97 | sys_sigaction(int sig, const struct old_sigaction __user *act, | ||
98 | struct old_sigaction __user *oact) | ||
99 | { | ||
100 | struct k_sigaction new_ka, old_ka; | ||
101 | int ret; | ||
102 | |||
103 | if (act) { | ||
104 | old_sigset_t mask; | ||
105 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | ||
106 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || | ||
107 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) | ||
108 | return -EFAULT; | ||
109 | __get_user(new_ka.sa.sa_flags, &act->sa_flags); | ||
110 | __get_user(mask, &act->sa_mask); | ||
111 | siginitset(&new_ka.sa.sa_mask, mask); | ||
112 | } | ||
113 | |||
114 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | ||
115 | |||
116 | if (!ret && oact) { | ||
117 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | ||
118 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || | ||
119 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) | ||
120 | return -EFAULT; | ||
121 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | ||
122 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); | ||
123 | } | ||
124 | |||
125 | return ret; | ||
126 | } | ||
127 | |||
128 | #ifdef CONFIG_IWMMXT | ||
129 | |||
130 | /* iwmmxt_area is 0x98 bytes long, preceeded by 8 bytes of signature */ | ||
131 | #define IWMMXT_STORAGE_SIZE (0x98 + 8) | ||
132 | #define IWMMXT_MAGIC0 0x12ef842a | ||
133 | #define IWMMXT_MAGIC1 0x1c07ca71 | ||
134 | |||
135 | struct iwmmxt_sigframe { | ||
136 | unsigned long magic0; | ||
137 | unsigned long magic1; | ||
138 | unsigned long storage[0x98/4]; | ||
139 | }; | ||
140 | |||
141 | static int page_present(struct mm_struct *mm, void __user *uptr, int wr) | ||
142 | { | ||
143 | unsigned long addr = (unsigned long)uptr; | ||
144 | pgd_t *pgd = pgd_offset(mm, addr); | ||
145 | if (pgd_present(*pgd)) { | ||
146 | pmd_t *pmd = pmd_offset(pgd, addr); | ||
147 | if (pmd_present(*pmd)) { | ||
148 | pte_t *pte = pte_offset_map(pmd, addr); | ||
149 | return (pte_present(*pte) && (!wr || pte_write(*pte))); | ||
150 | } | ||
151 | } | ||
152 | return 0; | ||
153 | } | ||
154 | |||
155 | static int copy_locked(void __user *uptr, void *kptr, size_t size, int write, | ||
156 | void (*copyfn)(void *, void __user *)) | ||
157 | { | ||
158 | unsigned char v, __user *userptr = uptr; | ||
159 | int err = 0; | ||
160 | |||
161 | do { | ||
162 | struct mm_struct *mm; | ||
163 | |||
164 | if (write) { | ||
165 | __put_user_error(0, userptr, err); | ||
166 | __put_user_error(0, userptr + size - 1, err); | ||
167 | } else { | ||
168 | __get_user_error(v, userptr, err); | ||
169 | __get_user_error(v, userptr + size - 1, err); | ||
170 | } | ||
171 | |||
172 | if (err) | ||
173 | break; | ||
174 | |||
175 | mm = current->mm; | ||
176 | spin_lock(&mm->page_table_lock); | ||
177 | if (page_present(mm, userptr, write) && | ||
178 | page_present(mm, userptr + size - 1, write)) { | ||
179 | copyfn(kptr, uptr); | ||
180 | } else | ||
181 | err = 1; | ||
182 | spin_unlock(&mm->page_table_lock); | ||
183 | } while (err); | ||
184 | |||
185 | return err; | ||
186 | } | ||
187 | |||
188 | static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame) | ||
189 | { | ||
190 | int err = 0; | ||
191 | |||
192 | /* the iWMMXt context must be 64 bit aligned */ | ||
193 | WARN_ON((unsigned long)frame & 7); | ||
194 | |||
195 | __put_user_error(IWMMXT_MAGIC0, &frame->magic0, err); | ||
196 | __put_user_error(IWMMXT_MAGIC1, &frame->magic1, err); | ||
197 | |||
198 | /* | ||
199 | * iwmmxt_task_copy() doesn't check user permissions. | ||
200 | * Let's do a dummy write on the upper boundary to ensure | ||
201 | * access to user mem is OK all way up. | ||
202 | */ | ||
203 | err |= copy_locked(&frame->storage, current_thread_info(), | ||
204 | sizeof(frame->storage), 1, iwmmxt_task_copy); | ||
205 | return err; | ||
206 | } | ||
207 | |||
208 | static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) | ||
209 | { | ||
210 | unsigned long magic0, magic1; | ||
211 | int err = 0; | ||
212 | |||
213 | /* the iWMMXt context is 64 bit aligned */ | ||
214 | WARN_ON((unsigned long)frame & 7); | ||
215 | |||
216 | /* | ||
217 | * Validate iWMMXt context signature. | ||
218 | * Also, iwmmxt_task_restore() doesn't check user permissions. | ||
219 | * Let's do a dummy write on the upper boundary to ensure | ||
220 | * access to user mem is OK all way up. | ||
221 | */ | ||
222 | __get_user_error(magic0, &frame->magic0, err); | ||
223 | __get_user_error(magic1, &frame->magic1, err); | ||
224 | if (!err && magic0 == IWMMXT_MAGIC0 && magic1 == IWMMXT_MAGIC1) | ||
225 | err = copy_locked(&frame->storage, current_thread_info(), | ||
226 | sizeof(frame->storage), 0, iwmmxt_task_restore); | ||
227 | return err; | ||
228 | } | ||
229 | |||
230 | #endif | ||
231 | |||
232 | /* | ||
233 | * Auxiliary signal frame. This saves stuff like FP state. | ||
234 | * The layout of this structure is not part of the user ABI. | ||
235 | */ | ||
236 | struct aux_sigframe { | ||
237 | #ifdef CONFIG_IWMMXT | ||
238 | struct iwmmxt_sigframe iwmmxt; | ||
239 | #endif | ||
240 | #ifdef CONFIG_VFP | ||
241 | union vfp_state vfp; | ||
242 | #endif | ||
243 | }; | ||
244 | |||
245 | /* | ||
246 | * Do a signal return; undo the signal stack. These are aligned to 64-bit. | ||
247 | */ | ||
248 | struct sigframe { | ||
249 | struct sigcontext sc; | ||
250 | unsigned long extramask[_NSIG_WORDS-1]; | ||
251 | unsigned long retcode; | ||
252 | struct aux_sigframe aux __attribute__((aligned(8))); | ||
253 | }; | ||
254 | |||
255 | struct rt_sigframe { | ||
256 | struct siginfo __user *pinfo; | ||
257 | void __user *puc; | ||
258 | struct siginfo info; | ||
259 | struct ucontext uc; | ||
260 | unsigned long retcode; | ||
261 | struct aux_sigframe aux __attribute__((aligned(8))); | ||
262 | }; | ||
263 | |||
264 | static int | ||
265 | restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, | ||
266 | struct aux_sigframe __user *aux) | ||
267 | { | ||
268 | int err = 0; | ||
269 | |||
270 | __get_user_error(regs->ARM_r0, &sc->arm_r0, err); | ||
271 | __get_user_error(regs->ARM_r1, &sc->arm_r1, err); | ||
272 | __get_user_error(regs->ARM_r2, &sc->arm_r2, err); | ||
273 | __get_user_error(regs->ARM_r3, &sc->arm_r3, err); | ||
274 | __get_user_error(regs->ARM_r4, &sc->arm_r4, err); | ||
275 | __get_user_error(regs->ARM_r5, &sc->arm_r5, err); | ||
276 | __get_user_error(regs->ARM_r6, &sc->arm_r6, err); | ||
277 | __get_user_error(regs->ARM_r7, &sc->arm_r7, err); | ||
278 | __get_user_error(regs->ARM_r8, &sc->arm_r8, err); | ||
279 | __get_user_error(regs->ARM_r9, &sc->arm_r9, err); | ||
280 | __get_user_error(regs->ARM_r10, &sc->arm_r10, err); | ||
281 | __get_user_error(regs->ARM_fp, &sc->arm_fp, err); | ||
282 | __get_user_error(regs->ARM_ip, &sc->arm_ip, err); | ||
283 | __get_user_error(regs->ARM_sp, &sc->arm_sp, err); | ||
284 | __get_user_error(regs->ARM_lr, &sc->arm_lr, err); | ||
285 | __get_user_error(regs->ARM_pc, &sc->arm_pc, err); | ||
286 | __get_user_error(regs->ARM_cpsr, &sc->arm_cpsr, err); | ||
287 | |||
288 | err |= !valid_user_regs(regs); | ||
289 | |||
290 | #ifdef CONFIG_IWMMXT | ||
291 | if (err == 0 && test_thread_flag(TIF_USING_IWMMXT)) | ||
292 | err |= restore_iwmmxt_context(&aux->iwmmxt); | ||
293 | #endif | ||
294 | #ifdef CONFIG_VFP | ||
295 | // if (err == 0) | ||
296 | // err |= vfp_restore_state(&aux->vfp); | ||
297 | #endif | ||
298 | |||
299 | return err; | ||
300 | } | ||
301 | |||
302 | asmlinkage int sys_sigreturn(struct pt_regs *regs) | ||
303 | { | ||
304 | struct sigframe __user *frame; | ||
305 | sigset_t set; | ||
306 | |||
307 | /* Always make any pending restarted system calls return -EINTR */ | ||
308 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
309 | |||
310 | /* | ||
311 | * Since we stacked the signal on a 64-bit boundary, | ||
312 | * then 'sp' should be word aligned here. If it's | ||
313 | * not, then the user is trying to mess with us. | ||
314 | */ | ||
315 | if (regs->ARM_sp & 7) | ||
316 | goto badframe; | ||
317 | |||
318 | frame = (struct sigframe __user *)regs->ARM_sp; | ||
319 | |||
320 | if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) | ||
321 | goto badframe; | ||
322 | if (__get_user(set.sig[0], &frame->sc.oldmask) | ||
323 | || (_NSIG_WORDS > 1 | ||
324 | && __copy_from_user(&set.sig[1], &frame->extramask, | ||
325 | sizeof(frame->extramask)))) | ||
326 | goto badframe; | ||
327 | |||
328 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
329 | spin_lock_irq(¤t->sighand->siglock); | ||
330 | current->blocked = set; | ||
331 | recalc_sigpending(); | ||
332 | spin_unlock_irq(¤t->sighand->siglock); | ||
333 | |||
334 | if (restore_sigcontext(regs, &frame->sc, &frame->aux)) | ||
335 | goto badframe; | ||
336 | |||
337 | /* Send SIGTRAP if we're single-stepping */ | ||
338 | if (current->ptrace & PT_SINGLESTEP) { | ||
339 | ptrace_cancel_bpt(current); | ||
340 | send_sig(SIGTRAP, current, 1); | ||
341 | } | ||
342 | |||
343 | return regs->ARM_r0; | ||
344 | |||
345 | badframe: | ||
346 | force_sig(SIGSEGV, current); | ||
347 | return 0; | ||
348 | } | ||
349 | |||
350 | asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) | ||
351 | { | ||
352 | struct rt_sigframe __user *frame; | ||
353 | sigset_t set; | ||
354 | |||
355 | /* Always make any pending restarted system calls return -EINTR */ | ||
356 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
357 | |||
358 | /* | ||
359 | * Since we stacked the signal on a 64-bit boundary, | ||
360 | * then 'sp' should be word aligned here. If it's | ||
361 | * not, then the user is trying to mess with us. | ||
362 | */ | ||
363 | if (regs->ARM_sp & 7) | ||
364 | goto badframe; | ||
365 | |||
366 | frame = (struct rt_sigframe __user *)regs->ARM_sp; | ||
367 | |||
368 | if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) | ||
369 | goto badframe; | ||
370 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) | ||
371 | goto badframe; | ||
372 | |||
373 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
374 | spin_lock_irq(¤t->sighand->siglock); | ||
375 | current->blocked = set; | ||
376 | recalc_sigpending(); | ||
377 | spin_unlock_irq(¤t->sighand->siglock); | ||
378 | |||
379 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &frame->aux)) | ||
380 | goto badframe; | ||
381 | |||
382 | if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT) | ||
383 | goto badframe; | ||
384 | |||
385 | /* Send SIGTRAP if we're single-stepping */ | ||
386 | if (current->ptrace & PT_SINGLESTEP) { | ||
387 | ptrace_cancel_bpt(current); | ||
388 | send_sig(SIGTRAP, current, 1); | ||
389 | } | ||
390 | |||
391 | return regs->ARM_r0; | ||
392 | |||
393 | badframe: | ||
394 | force_sig(SIGSEGV, current); | ||
395 | return 0; | ||
396 | } | ||
397 | |||
398 | static int | ||
399 | setup_sigcontext(struct sigcontext __user *sc, struct aux_sigframe __user *aux, | ||
400 | struct pt_regs *regs, unsigned long mask) | ||
401 | { | ||
402 | int err = 0; | ||
403 | |||
404 | __put_user_error(regs->ARM_r0, &sc->arm_r0, err); | ||
405 | __put_user_error(regs->ARM_r1, &sc->arm_r1, err); | ||
406 | __put_user_error(regs->ARM_r2, &sc->arm_r2, err); | ||
407 | __put_user_error(regs->ARM_r3, &sc->arm_r3, err); | ||
408 | __put_user_error(regs->ARM_r4, &sc->arm_r4, err); | ||
409 | __put_user_error(regs->ARM_r5, &sc->arm_r5, err); | ||
410 | __put_user_error(regs->ARM_r6, &sc->arm_r6, err); | ||
411 | __put_user_error(regs->ARM_r7, &sc->arm_r7, err); | ||
412 | __put_user_error(regs->ARM_r8, &sc->arm_r8, err); | ||
413 | __put_user_error(regs->ARM_r9, &sc->arm_r9, err); | ||
414 | __put_user_error(regs->ARM_r10, &sc->arm_r10, err); | ||
415 | __put_user_error(regs->ARM_fp, &sc->arm_fp, err); | ||
416 | __put_user_error(regs->ARM_ip, &sc->arm_ip, err); | ||
417 | __put_user_error(regs->ARM_sp, &sc->arm_sp, err); | ||
418 | __put_user_error(regs->ARM_lr, &sc->arm_lr, err); | ||
419 | __put_user_error(regs->ARM_pc, &sc->arm_pc, err); | ||
420 | __put_user_error(regs->ARM_cpsr, &sc->arm_cpsr, err); | ||
421 | |||
422 | __put_user_error(current->thread.trap_no, &sc->trap_no, err); | ||
423 | __put_user_error(current->thread.error_code, &sc->error_code, err); | ||
424 | __put_user_error(current->thread.address, &sc->fault_address, err); | ||
425 | __put_user_error(mask, &sc->oldmask, err); | ||
426 | |||
427 | #ifdef CONFIG_IWMMXT | ||
428 | if (err == 0 && test_thread_flag(TIF_USING_IWMMXT)) | ||
429 | err |= preserve_iwmmxt_context(&aux->iwmmxt); | ||
430 | #endif | ||
431 | #ifdef CONFIG_VFP | ||
432 | // if (err == 0) | ||
433 | // err |= vfp_save_state(&aux->vfp); | ||
434 | #endif | ||
435 | |||
436 | return err; | ||
437 | } | ||
438 | |||
439 | static inline void __user * | ||
440 | get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, int framesize) | ||
441 | { | ||
442 | unsigned long sp = regs->ARM_sp; | ||
443 | void __user *frame; | ||
444 | |||
445 | /* | ||
446 | * This is the X/Open sanctioned signal stack switching. | ||
447 | */ | ||
448 | if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) | ||
449 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
450 | |||
451 | /* | ||
452 | * ATPCS B01 mandates 8-byte alignment | ||
453 | */ | ||
454 | frame = (void __user *)((sp - framesize) & ~7); | ||
455 | |||
456 | /* | ||
457 | * Check that we can actually write to the signal frame. | ||
458 | */ | ||
459 | if (!access_ok(VERIFY_WRITE, frame, framesize)) | ||
460 | frame = NULL; | ||
461 | |||
462 | return frame; | ||
463 | } | ||
464 | |||
465 | static int | ||
466 | setup_return(struct pt_regs *regs, struct k_sigaction *ka, | ||
467 | unsigned long __user *rc, void __user *frame, int usig) | ||
468 | { | ||
469 | unsigned long handler = (unsigned long)ka->sa.sa_handler; | ||
470 | unsigned long retcode; | ||
471 | int thumb = 0; | ||
472 | unsigned long cpsr = regs->ARM_cpsr & ~PSR_f; | ||
473 | |||
474 | /* | ||
475 | * Maybe we need to deliver a 32-bit signal to a 26-bit task. | ||
476 | */ | ||
477 | if (ka->sa.sa_flags & SA_THIRTYTWO) | ||
478 | cpsr = (cpsr & ~MODE_MASK) | USR_MODE; | ||
479 | |||
480 | #ifdef CONFIG_ARM_THUMB | ||
481 | if (elf_hwcap & HWCAP_THUMB) { | ||
482 | /* | ||
483 | * The LSB of the handler determines if we're going to | ||
484 | * be using THUMB or ARM mode for this signal handler. | ||
485 | */ | ||
486 | thumb = handler & 1; | ||
487 | |||
488 | if (thumb) | ||
489 | cpsr |= PSR_T_BIT; | ||
490 | else | ||
491 | cpsr &= ~PSR_T_BIT; | ||
492 | } | ||
493 | #endif | ||
494 | |||
495 | if (ka->sa.sa_flags & SA_RESTORER) { | ||
496 | retcode = (unsigned long)ka->sa.sa_restorer; | ||
497 | } else { | ||
498 | unsigned int idx = thumb; | ||
499 | |||
500 | if (ka->sa.sa_flags & SA_SIGINFO) | ||
501 | idx += 2; | ||
502 | |||
503 | if (__put_user(retcodes[idx], rc)) | ||
504 | return 1; | ||
505 | |||
506 | /* | ||
507 | * Ensure that the instruction cache sees | ||
508 | * the return code written onto the stack. | ||
509 | */ | ||
510 | flush_icache_range((unsigned long)rc, | ||
511 | (unsigned long)(rc + 1)); | ||
512 | |||
513 | retcode = ((unsigned long)rc) + thumb; | ||
514 | } | ||
515 | |||
516 | regs->ARM_r0 = usig; | ||
517 | regs->ARM_sp = (unsigned long)frame; | ||
518 | regs->ARM_lr = retcode; | ||
519 | regs->ARM_pc = handler; | ||
520 | regs->ARM_cpsr = cpsr; | ||
521 | |||
522 | return 0; | ||
523 | } | ||
524 | |||
525 | static int | ||
526 | setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs) | ||
527 | { | ||
528 | struct sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame)); | ||
529 | int err = 0; | ||
530 | |||
531 | if (!frame) | ||
532 | return 1; | ||
533 | |||
534 | err |= setup_sigcontext(&frame->sc, &frame->aux, regs, set->sig[0]); | ||
535 | |||
536 | if (_NSIG_WORDS > 1) { | ||
537 | err |= __copy_to_user(frame->extramask, &set->sig[1], | ||
538 | sizeof(frame->extramask)); | ||
539 | } | ||
540 | |||
541 | if (err == 0) | ||
542 | err = setup_return(regs, ka, &frame->retcode, frame, usig); | ||
543 | |||
544 | return err; | ||
545 | } | ||
546 | |||
547 | static int | ||
548 | setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, | ||
549 | sigset_t *set, struct pt_regs *regs) | ||
550 | { | ||
551 | struct rt_sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame)); | ||
552 | stack_t stack; | ||
553 | int err = 0; | ||
554 | |||
555 | if (!frame) | ||
556 | return 1; | ||
557 | |||
558 | __put_user_error(&frame->info, &frame->pinfo, err); | ||
559 | __put_user_error(&frame->uc, &frame->puc, err); | ||
560 | err |= copy_siginfo_to_user(&frame->info, info); | ||
561 | |||
562 | __put_user_error(0, &frame->uc.uc_flags, err); | ||
563 | __put_user_error(NULL, &frame->uc.uc_link, err); | ||
564 | |||
565 | memset(&stack, 0, sizeof(stack)); | ||
566 | stack.ss_sp = (void __user *)current->sas_ss_sp; | ||
567 | stack.ss_flags = sas_ss_flags(regs->ARM_sp); | ||
568 | stack.ss_size = current->sas_ss_size; | ||
569 | err |= __copy_to_user(&frame->uc.uc_stack, &stack, sizeof(stack)); | ||
570 | |||
571 | err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->aux, | ||
572 | regs, set->sig[0]); | ||
573 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
574 | |||
575 | if (err == 0) | ||
576 | err = setup_return(regs, ka, &frame->retcode, frame, usig); | ||
577 | |||
578 | if (err == 0) { | ||
579 | /* | ||
580 | * For realtime signals we must also set the second and third | ||
581 | * arguments for the signal handler. | ||
582 | * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06 | ||
583 | */ | ||
584 | regs->ARM_r1 = (unsigned long)&frame->info; | ||
585 | regs->ARM_r2 = (unsigned long)&frame->uc; | ||
586 | } | ||
587 | |||
588 | return err; | ||
589 | } | ||
590 | |||
591 | static inline void restart_syscall(struct pt_regs *regs) | ||
592 | { | ||
593 | regs->ARM_r0 = regs->ARM_ORIG_r0; | ||
594 | regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; | ||
595 | } | ||
596 | |||
597 | /* | ||
598 | * OK, we're invoking a handler | ||
599 | */ | ||
600 | static void | ||
601 | handle_signal(unsigned long sig, struct k_sigaction *ka, | ||
602 | siginfo_t *info, sigset_t *oldset, | ||
603 | struct pt_regs * regs, int syscall) | ||
604 | { | ||
605 | struct thread_info *thread = current_thread_info(); | ||
606 | struct task_struct *tsk = current; | ||
607 | int usig = sig; | ||
608 | int ret; | ||
609 | |||
610 | /* | ||
611 | * If we were from a system call, check for system call restarting... | ||
612 | */ | ||
613 | if (syscall) { | ||
614 | switch (regs->ARM_r0) { | ||
615 | case -ERESTART_RESTARTBLOCK: | ||
616 | case -ERESTARTNOHAND: | ||
617 | regs->ARM_r0 = -EINTR; | ||
618 | break; | ||
619 | case -ERESTARTSYS: | ||
620 | if (!(ka->sa.sa_flags & SA_RESTART)) { | ||
621 | regs->ARM_r0 = -EINTR; | ||
622 | break; | ||
623 | } | ||
624 | /* fallthrough */ | ||
625 | case -ERESTARTNOINTR: | ||
626 | restart_syscall(regs); | ||
627 | } | ||
628 | } | ||
629 | |||
630 | /* | ||
631 | * translate the signal | ||
632 | */ | ||
633 | if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap) | ||
634 | usig = thread->exec_domain->signal_invmap[usig]; | ||
635 | |||
636 | /* | ||
637 | * Set up the stack frame | ||
638 | */ | ||
639 | if (ka->sa.sa_flags & SA_SIGINFO) | ||
640 | ret = setup_rt_frame(usig, ka, info, oldset, regs); | ||
641 | else | ||
642 | ret = setup_frame(usig, ka, oldset, regs); | ||
643 | |||
644 | /* | ||
645 | * Check that the resulting registers are actually sane. | ||
646 | */ | ||
647 | ret |= !valid_user_regs(regs); | ||
648 | |||
649 | /* | ||
650 | * Block the signal if we were unsuccessful. | ||
651 | */ | ||
652 | if (ret != 0 || !(ka->sa.sa_flags & SA_NODEFER)) { | ||
653 | spin_lock_irq(&tsk->sighand->siglock); | ||
654 | sigorsets(&tsk->blocked, &tsk->blocked, | ||
655 | &ka->sa.sa_mask); | ||
656 | sigaddset(&tsk->blocked, sig); | ||
657 | recalc_sigpending(); | ||
658 | spin_unlock_irq(&tsk->sighand->siglock); | ||
659 | } | ||
660 | |||
661 | if (ret == 0) | ||
662 | return; | ||
663 | |||
664 | force_sigsegv(sig, tsk); | ||
665 | } | ||
666 | |||
667 | /* | ||
668 | * Note that 'init' is a special process: it doesn't get signals it doesn't | ||
669 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | ||
670 | * mistake. | ||
671 | * | ||
672 | * Note that we go through the signals twice: once to check the signals that | ||
673 | * the kernel can handle, and then we build all the user-level signal handling | ||
674 | * stack-frames in one go after that. | ||
675 | */ | ||
676 | static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall) | ||
677 | { | ||
678 | struct k_sigaction ka; | ||
679 | siginfo_t info; | ||
680 | int signr; | ||
681 | |||
682 | /* | ||
683 | * We want the common case to go fast, which | ||
684 | * is why we may in certain cases get here from | ||
685 | * kernel mode. Just return without doing anything | ||
686 | * if so. | ||
687 | */ | ||
688 | if (!user_mode(regs)) | ||
689 | return 0; | ||
690 | |||
691 | if (try_to_freeze(0)) | ||
692 | goto no_signal; | ||
693 | |||
694 | if (current->ptrace & PT_SINGLESTEP) | ||
695 | ptrace_cancel_bpt(current); | ||
696 | |||
697 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | ||
698 | if (signr > 0) { | ||
699 | handle_signal(signr, &ka, &info, oldset, regs, syscall); | ||
700 | if (current->ptrace & PT_SINGLESTEP) | ||
701 | ptrace_set_bpt(current); | ||
702 | return 1; | ||
703 | } | ||
704 | |||
705 | no_signal: | ||
706 | /* | ||
707 | * No signal to deliver to the process - restart the syscall. | ||
708 | */ | ||
709 | if (syscall) { | ||
710 | if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) { | ||
711 | if (thumb_mode(regs)) { | ||
712 | regs->ARM_r7 = __NR_restart_syscall; | ||
713 | regs->ARM_pc -= 2; | ||
714 | } else { | ||
715 | u32 __user *usp; | ||
716 | |||
717 | regs->ARM_sp -= 12; | ||
718 | usp = (u32 __user *)regs->ARM_sp; | ||
719 | |||
720 | put_user(regs->ARM_pc, &usp[0]); | ||
721 | /* swi __NR_restart_syscall */ | ||
722 | put_user(0xef000000 | __NR_restart_syscall, &usp[1]); | ||
723 | /* ldr pc, [sp], #12 */ | ||
724 | put_user(0xe49df00c, &usp[2]); | ||
725 | |||
726 | flush_icache_range((unsigned long)usp, | ||
727 | (unsigned long)(usp + 3)); | ||
728 | |||
729 | regs->ARM_pc = regs->ARM_sp + 4; | ||
730 | } | ||
731 | } | ||
732 | if (regs->ARM_r0 == -ERESTARTNOHAND || | ||
733 | regs->ARM_r0 == -ERESTARTSYS || | ||
734 | regs->ARM_r0 == -ERESTARTNOINTR) { | ||
735 | restart_syscall(regs); | ||
736 | } | ||
737 | } | ||
738 | if (current->ptrace & PT_SINGLESTEP) | ||
739 | ptrace_set_bpt(current); | ||
740 | return 0; | ||
741 | } | ||
742 | |||
743 | asmlinkage void | ||
744 | do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall) | ||
745 | { | ||
746 | if (thread_flags & _TIF_SIGPENDING) | ||
747 | do_signal(¤t->blocked, regs, syscall); | ||
748 | } | ||
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c new file mode 100644 index 000000000000..ecc8c3332408 --- /dev/null +++ b/arch/arm/kernel/smp.c | |||
@@ -0,0 +1,396 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/smp.c | ||
3 | * | ||
4 | * Copyright (C) 2002 ARM Limited, All Rights Reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/config.h> | ||
11 | #include <linux/delay.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/spinlock.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/cache.h> | ||
17 | #include <linux/profile.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/cpu.h> | ||
21 | #include <linux/smp.h> | ||
22 | #include <linux/seq_file.h> | ||
23 | |||
24 | #include <asm/atomic.h> | ||
25 | #include <asm/cacheflush.h> | ||
26 | #include <asm/cpu.h> | ||
27 | #include <asm/processor.h> | ||
28 | #include <asm/tlbflush.h> | ||
29 | #include <asm/ptrace.h> | ||
30 | |||
31 | /* | ||
32 | * bitmask of present and online CPUs. | ||
33 | * The present bitmask indicates that the CPU is physically present. | ||
34 | * The online bitmask indicates that the CPU is up and running. | ||
35 | */ | ||
36 | cpumask_t cpu_present_mask; | ||
37 | cpumask_t cpu_online_map; | ||
38 | |||
39 | /* | ||
40 | * structures for inter-processor calls | ||
41 | * - A collection of single bit ipi messages. | ||
42 | */ | ||
43 | struct ipi_data { | ||
44 | spinlock_t lock; | ||
45 | unsigned long ipi_count; | ||
46 | unsigned long bits; | ||
47 | }; | ||
48 | |||
49 | static DEFINE_PER_CPU(struct ipi_data, ipi_data) = { | ||
50 | .lock = SPIN_LOCK_UNLOCKED, | ||
51 | }; | ||
52 | |||
53 | enum ipi_msg_type { | ||
54 | IPI_TIMER, | ||
55 | IPI_RESCHEDULE, | ||
56 | IPI_CALL_FUNC, | ||
57 | IPI_CPU_STOP, | ||
58 | }; | ||
59 | |||
60 | struct smp_call_struct { | ||
61 | void (*func)(void *info); | ||
62 | void *info; | ||
63 | int wait; | ||
64 | cpumask_t pending; | ||
65 | cpumask_t unfinished; | ||
66 | }; | ||
67 | |||
68 | static struct smp_call_struct * volatile smp_call_function_data; | ||
69 | static DEFINE_SPINLOCK(smp_call_function_lock); | ||
70 | |||
71 | int __init __cpu_up(unsigned int cpu) | ||
72 | { | ||
73 | struct task_struct *idle; | ||
74 | int ret; | ||
75 | |||
76 | /* | ||
77 | * Spawn a new process manually. Grab a pointer to | ||
78 | * its task struct so we can mess with it | ||
79 | */ | ||
80 | idle = fork_idle(cpu); | ||
81 | if (IS_ERR(idle)) { | ||
82 | printk(KERN_ERR "CPU%u: fork() failed\n", cpu); | ||
83 | return PTR_ERR(idle); | ||
84 | } | ||
85 | |||
86 | /* | ||
87 | * Now bring the CPU into our world. | ||
88 | */ | ||
89 | ret = boot_secondary(cpu, idle); | ||
90 | if (ret) { | ||
91 | printk(KERN_CRIT "cpu_up: processor %d failed to boot\n", cpu); | ||
92 | /* | ||
93 | * FIXME: We need to clean up the new idle thread. --rmk | ||
94 | */ | ||
95 | } | ||
96 | |||
97 | return ret; | ||
98 | } | ||
99 | |||
100 | /* | ||
101 | * Called by both boot and secondaries to move global data into | ||
102 | * per-processor storage. | ||
103 | */ | ||
104 | void __init smp_store_cpu_info(unsigned int cpuid) | ||
105 | { | ||
106 | struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); | ||
107 | |||
108 | cpu_info->loops_per_jiffy = loops_per_jiffy; | ||
109 | } | ||
110 | |||
111 | void __init smp_cpus_done(unsigned int max_cpus) | ||
112 | { | ||
113 | int cpu; | ||
114 | unsigned long bogosum = 0; | ||
115 | |||
116 | for_each_online_cpu(cpu) | ||
117 | bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; | ||
118 | |||
119 | printk(KERN_INFO "SMP: Total of %d processors activated " | ||
120 | "(%lu.%02lu BogoMIPS).\n", | ||
121 | num_online_cpus(), | ||
122 | bogosum / (500000/HZ), | ||
123 | (bogosum / (5000/HZ)) % 100); | ||
124 | } | ||
125 | |||
126 | void __init smp_prepare_boot_cpu(void) | ||
127 | { | ||
128 | unsigned int cpu = smp_processor_id(); | ||
129 | |||
130 | cpu_set(cpu, cpu_present_mask); | ||
131 | cpu_set(cpu, cpu_online_map); | ||
132 | } | ||
133 | |||
134 | static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg) | ||
135 | { | ||
136 | unsigned long flags; | ||
137 | unsigned int cpu; | ||
138 | |||
139 | local_irq_save(flags); | ||
140 | |||
141 | for_each_cpu_mask(cpu, callmap) { | ||
142 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); | ||
143 | |||
144 | spin_lock(&ipi->lock); | ||
145 | ipi->bits |= 1 << msg; | ||
146 | spin_unlock(&ipi->lock); | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * Call the platform specific cross-CPU call function. | ||
151 | */ | ||
152 | smp_cross_call(callmap); | ||
153 | |||
154 | local_irq_restore(flags); | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * You must not call this function with disabled interrupts, from a | ||
159 | * hardware interrupt handler, nor from a bottom half handler. | ||
160 | */ | ||
161 | int smp_call_function_on_cpu(void (*func)(void *info), void *info, int retry, | ||
162 | int wait, cpumask_t callmap) | ||
163 | { | ||
164 | struct smp_call_struct data; | ||
165 | unsigned long timeout; | ||
166 | int ret = 0; | ||
167 | |||
168 | data.func = func; | ||
169 | data.info = info; | ||
170 | data.wait = wait; | ||
171 | |||
172 | cpu_clear(smp_processor_id(), callmap); | ||
173 | if (cpus_empty(callmap)) | ||
174 | goto out; | ||
175 | |||
176 | data.pending = callmap; | ||
177 | if (wait) | ||
178 | data.unfinished = callmap; | ||
179 | |||
180 | /* | ||
181 | * try to get the mutex on smp_call_function_data | ||
182 | */ | ||
183 | spin_lock(&smp_call_function_lock); | ||
184 | smp_call_function_data = &data; | ||
185 | |||
186 | send_ipi_message(callmap, IPI_CALL_FUNC); | ||
187 | |||
188 | timeout = jiffies + HZ; | ||
189 | while (!cpus_empty(data.pending) && time_before(jiffies, timeout)) | ||
190 | barrier(); | ||
191 | |||
192 | /* | ||
193 | * did we time out? | ||
194 | */ | ||
195 | if (!cpus_empty(data.pending)) { | ||
196 | /* | ||
197 | * this may be causing our panic - report it | ||
198 | */ | ||
199 | printk(KERN_CRIT | ||
200 | "CPU%u: smp_call_function timeout for %p(%p)\n" | ||
201 | " callmap %lx pending %lx, %swait\n", | ||
202 | smp_processor_id(), func, info, callmap, data.pending, | ||
203 | wait ? "" : "no "); | ||
204 | |||
205 | /* | ||
206 | * TRACE | ||
207 | */ | ||
208 | timeout = jiffies + (5 * HZ); | ||
209 | while (!cpus_empty(data.pending) && time_before(jiffies, timeout)) | ||
210 | barrier(); | ||
211 | |||
212 | if (cpus_empty(data.pending)) | ||
213 | printk(KERN_CRIT " RESOLVED\n"); | ||
214 | else | ||
215 | printk(KERN_CRIT " STILL STUCK\n"); | ||
216 | } | ||
217 | |||
218 | /* | ||
219 | * whatever happened, we're done with the data, so release it | ||
220 | */ | ||
221 | smp_call_function_data = NULL; | ||
222 | spin_unlock(&smp_call_function_lock); | ||
223 | |||
224 | if (!cpus_empty(data.pending)) { | ||
225 | ret = -ETIMEDOUT; | ||
226 | goto out; | ||
227 | } | ||
228 | |||
229 | if (wait) | ||
230 | while (!cpus_empty(data.unfinished)) | ||
231 | barrier(); | ||
232 | out: | ||
233 | |||
234 | return 0; | ||
235 | } | ||
236 | |||
237 | int smp_call_function(void (*func)(void *info), void *info, int retry, | ||
238 | int wait) | ||
239 | { | ||
240 | return smp_call_function_on_cpu(func, info, retry, wait, | ||
241 | cpu_online_map); | ||
242 | } | ||
243 | |||
244 | void show_ipi_list(struct seq_file *p) | ||
245 | { | ||
246 | unsigned int cpu; | ||
247 | |||
248 | seq_puts(p, "IPI:"); | ||
249 | |||
250 | for_each_online_cpu(cpu) | ||
251 | seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count); | ||
252 | |||
253 | seq_putc(p, '\n'); | ||
254 | } | ||
255 | |||
256 | static void ipi_timer(struct pt_regs *regs) | ||
257 | { | ||
258 | int user = user_mode(regs); | ||
259 | |||
260 | irq_enter(); | ||
261 | profile_tick(CPU_PROFILING, regs); | ||
262 | update_process_times(user); | ||
263 | irq_exit(); | ||
264 | } | ||
265 | |||
266 | /* | ||
267 | * ipi_call_function - handle IPI from smp_call_function() | ||
268 | * | ||
269 | * Note that we copy data out of the cross-call structure and then | ||
270 | * let the caller know that we're here and have done with their data | ||
271 | */ | ||
272 | static void ipi_call_function(unsigned int cpu) | ||
273 | { | ||
274 | struct smp_call_struct *data = smp_call_function_data; | ||
275 | void (*func)(void *info) = data->func; | ||
276 | void *info = data->info; | ||
277 | int wait = data->wait; | ||
278 | |||
279 | cpu_clear(cpu, data->pending); | ||
280 | |||
281 | func(info); | ||
282 | |||
283 | if (wait) | ||
284 | cpu_clear(cpu, data->unfinished); | ||
285 | } | ||
286 | |||
287 | static DEFINE_SPINLOCK(stop_lock); | ||
288 | |||
289 | /* | ||
290 | * ipi_cpu_stop - handle IPI from smp_send_stop() | ||
291 | */ | ||
292 | static void ipi_cpu_stop(unsigned int cpu) | ||
293 | { | ||
294 | spin_lock(&stop_lock); | ||
295 | printk(KERN_CRIT "CPU%u: stopping\n", cpu); | ||
296 | dump_stack(); | ||
297 | spin_unlock(&stop_lock); | ||
298 | |||
299 | cpu_clear(cpu, cpu_online_map); | ||
300 | |||
301 | local_fiq_disable(); | ||
302 | local_irq_disable(); | ||
303 | |||
304 | while (1) | ||
305 | cpu_relax(); | ||
306 | } | ||
307 | |||
308 | /* | ||
309 | * Main handler for inter-processor interrupts | ||
310 | * | ||
311 | * For ARM, the ipimask now only identifies a single | ||
312 | * category of IPI (Bit 1 IPIs have been replaced by a | ||
313 | * different mechanism): | ||
314 | * | ||
315 | * Bit 0 - Inter-processor function call | ||
316 | */ | ||
317 | void do_IPI(struct pt_regs *regs) | ||
318 | { | ||
319 | unsigned int cpu = smp_processor_id(); | ||
320 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); | ||
321 | |||
322 | ipi->ipi_count++; | ||
323 | |||
324 | for (;;) { | ||
325 | unsigned long msgs; | ||
326 | |||
327 | spin_lock(&ipi->lock); | ||
328 | msgs = ipi->bits; | ||
329 | ipi->bits = 0; | ||
330 | spin_unlock(&ipi->lock); | ||
331 | |||
332 | if (!msgs) | ||
333 | break; | ||
334 | |||
335 | do { | ||
336 | unsigned nextmsg; | ||
337 | |||
338 | nextmsg = msgs & -msgs; | ||
339 | msgs &= ~nextmsg; | ||
340 | nextmsg = ffz(~nextmsg); | ||
341 | |||
342 | switch (nextmsg) { | ||
343 | case IPI_TIMER: | ||
344 | ipi_timer(regs); | ||
345 | break; | ||
346 | |||
347 | case IPI_RESCHEDULE: | ||
348 | /* | ||
349 | * nothing more to do - eveything is | ||
350 | * done on the interrupt return path | ||
351 | */ | ||
352 | break; | ||
353 | |||
354 | case IPI_CALL_FUNC: | ||
355 | ipi_call_function(cpu); | ||
356 | break; | ||
357 | |||
358 | case IPI_CPU_STOP: | ||
359 | ipi_cpu_stop(cpu); | ||
360 | break; | ||
361 | |||
362 | default: | ||
363 | printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", | ||
364 | cpu, nextmsg); | ||
365 | break; | ||
366 | } | ||
367 | } while (msgs); | ||
368 | } | ||
369 | } | ||
370 | |||
371 | void smp_send_reschedule(int cpu) | ||
372 | { | ||
373 | send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE); | ||
374 | } | ||
375 | |||
376 | void smp_send_timer(void) | ||
377 | { | ||
378 | cpumask_t mask = cpu_online_map; | ||
379 | cpu_clear(smp_processor_id(), mask); | ||
380 | send_ipi_message(mask, IPI_TIMER); | ||
381 | } | ||
382 | |||
383 | void smp_send_stop(void) | ||
384 | { | ||
385 | cpumask_t mask = cpu_online_map; | ||
386 | cpu_clear(smp_processor_id(), mask); | ||
387 | send_ipi_message(mask, IPI_CPU_STOP); | ||
388 | } | ||
389 | |||
390 | /* | ||
391 | * not supported here | ||
392 | */ | ||
393 | int __init setup_profiling_timer(unsigned int multiplier) | ||
394 | { | ||
395 | return -EINVAL; | ||
396 | } | ||
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c new file mode 100644 index 000000000000..c41dc605f121 --- /dev/null +++ b/arch/arm/kernel/sys_arm.c | |||
@@ -0,0 +1,332 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/sys_arm.c | ||
3 | * | ||
4 | * Copyright (C) People who wrote linux/arch/i386/kernel/sys_i386.c | ||
5 | * Copyright (C) 1995, 1996 Russell King. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This file contains various random system calls that | ||
12 | * have a non-standard calling sequence on the Linux/arm | ||
13 | * platform. | ||
14 | */ | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/sched.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/sem.h> | ||
21 | #include <linux/msg.h> | ||
22 | #include <linux/shm.h> | ||
23 | #include <linux/stat.h> | ||
24 | #include <linux/syscalls.h> | ||
25 | #include <linux/mman.h> | ||
26 | #include <linux/fs.h> | ||
27 | #include <linux/file.h> | ||
28 | #include <linux/utsname.h> | ||
29 | |||
30 | #include <asm/uaccess.h> | ||
31 | #include <asm/ipc.h> | ||
32 | |||
33 | extern unsigned long do_mremap(unsigned long addr, unsigned long old_len, | ||
34 | unsigned long new_len, unsigned long flags, | ||
35 | unsigned long new_addr); | ||
36 | |||
37 | /* | ||
38 | * sys_pipe() is the normal C calling standard for creating | ||
39 | * a pipe. It's not the way unix traditionally does this, though. | ||
40 | */ | ||
41 | asmlinkage int sys_pipe(unsigned long __user *fildes) | ||
42 | { | ||
43 | int fd[2]; | ||
44 | int error; | ||
45 | |||
46 | error = do_pipe(fd); | ||
47 | if (!error) { | ||
48 | if (copy_to_user(fildes, fd, 2*sizeof(int))) | ||
49 | error = -EFAULT; | ||
50 | } | ||
51 | return error; | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * This is the lowest virtual address we can permit any user space | ||
56 | * mapping to be mapped at. This is particularly important for | ||
57 | * non-high vector CPUs. | ||
58 | */ | ||
59 | #define MIN_MAP_ADDR (PAGE_SIZE) | ||
60 | |||
61 | /* common code for old and new mmaps */ | ||
62 | inline long do_mmap2( | ||
63 | unsigned long addr, unsigned long len, | ||
64 | unsigned long prot, unsigned long flags, | ||
65 | unsigned long fd, unsigned long pgoff) | ||
66 | { | ||
67 | int error = -EINVAL; | ||
68 | struct file * file = NULL; | ||
69 | |||
70 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
71 | |||
72 | if (flags & MAP_FIXED && addr < MIN_MAP_ADDR) | ||
73 | goto out; | ||
74 | |||
75 | error = -EBADF; | ||
76 | if (!(flags & MAP_ANONYMOUS)) { | ||
77 | file = fget(fd); | ||
78 | if (!file) | ||
79 | goto out; | ||
80 | } | ||
81 | |||
82 | down_write(¤t->mm->mmap_sem); | ||
83 | error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
84 | up_write(¤t->mm->mmap_sem); | ||
85 | |||
86 | if (file) | ||
87 | fput(file); | ||
88 | out: | ||
89 | return error; | ||
90 | } | ||
91 | |||
92 | struct mmap_arg_struct { | ||
93 | unsigned long addr; | ||
94 | unsigned long len; | ||
95 | unsigned long prot; | ||
96 | unsigned long flags; | ||
97 | unsigned long fd; | ||
98 | unsigned long offset; | ||
99 | }; | ||
100 | |||
101 | asmlinkage int old_mmap(struct mmap_arg_struct __user *arg) | ||
102 | { | ||
103 | int error = -EFAULT; | ||
104 | struct mmap_arg_struct a; | ||
105 | |||
106 | if (copy_from_user(&a, arg, sizeof(a))) | ||
107 | goto out; | ||
108 | |||
109 | error = -EINVAL; | ||
110 | if (a.offset & ~PAGE_MASK) | ||
111 | goto out; | ||
112 | |||
113 | error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); | ||
114 | out: | ||
115 | return error; | ||
116 | } | ||
117 | |||
118 | asmlinkage unsigned long | ||
119 | sys_arm_mremap(unsigned long addr, unsigned long old_len, | ||
120 | unsigned long new_len, unsigned long flags, | ||
121 | unsigned long new_addr) | ||
122 | { | ||
123 | unsigned long ret = -EINVAL; | ||
124 | |||
125 | if (flags & MREMAP_FIXED && new_addr < MIN_MAP_ADDR) | ||
126 | goto out; | ||
127 | |||
128 | down_write(¤t->mm->mmap_sem); | ||
129 | ret = do_mremap(addr, old_len, new_len, flags, new_addr); | ||
130 | up_write(¤t->mm->mmap_sem); | ||
131 | |||
132 | out: | ||
133 | return ret; | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * Perform the select(nd, in, out, ex, tv) and mmap() system | ||
138 | * calls. | ||
139 | */ | ||
140 | |||
141 | struct sel_arg_struct { | ||
142 | unsigned long n; | ||
143 | fd_set __user *inp, *outp, *exp; | ||
144 | struct timeval __user *tvp; | ||
145 | }; | ||
146 | |||
147 | asmlinkage int old_select(struct sel_arg_struct __user *arg) | ||
148 | { | ||
149 | struct sel_arg_struct a; | ||
150 | |||
151 | if (copy_from_user(&a, arg, sizeof(a))) | ||
152 | return -EFAULT; | ||
153 | /* sys_select() does the appropriate kernel locking */ | ||
154 | return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp); | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * sys_ipc() is the de-multiplexer for the SysV IPC calls.. | ||
159 | * | ||
160 | * This is really horribly ugly. | ||
161 | */ | ||
162 | asmlinkage int sys_ipc(uint call, int first, int second, int third, | ||
163 | void __user *ptr, long fifth) | ||
164 | { | ||
165 | int version, ret; | ||
166 | |||
167 | version = call >> 16; /* hack for backward compatibility */ | ||
168 | call &= 0xffff; | ||
169 | |||
170 | switch (call) { | ||
171 | case SEMOP: | ||
172 | return sys_semtimedop (first, (struct sembuf __user *)ptr, second, NULL); | ||
173 | case SEMTIMEDOP: | ||
174 | return sys_semtimedop(first, (struct sembuf __user *)ptr, second, | ||
175 | (const struct timespec __user *)fifth); | ||
176 | |||
177 | case SEMGET: | ||
178 | return sys_semget (first, second, third); | ||
179 | case SEMCTL: { | ||
180 | union semun fourth; | ||
181 | if (!ptr) | ||
182 | return -EINVAL; | ||
183 | if (get_user(fourth.__pad, (void __user * __user *) ptr)) | ||
184 | return -EFAULT; | ||
185 | return sys_semctl (first, second, third, fourth); | ||
186 | } | ||
187 | |||
188 | case MSGSND: | ||
189 | return sys_msgsnd(first, (struct msgbuf __user *) ptr, | ||
190 | second, third); | ||
191 | case MSGRCV: | ||
192 | switch (version) { | ||
193 | case 0: { | ||
194 | struct ipc_kludge tmp; | ||
195 | if (!ptr) | ||
196 | return -EINVAL; | ||
197 | if (copy_from_user(&tmp,(struct ipc_kludge __user *)ptr, | ||
198 | sizeof (tmp))) | ||
199 | return -EFAULT; | ||
200 | return sys_msgrcv (first, tmp.msgp, second, | ||
201 | tmp.msgtyp, third); | ||
202 | } | ||
203 | default: | ||
204 | return sys_msgrcv (first, | ||
205 | (struct msgbuf __user *) ptr, | ||
206 | second, fifth, third); | ||
207 | } | ||
208 | case MSGGET: | ||
209 | return sys_msgget ((key_t) first, second); | ||
210 | case MSGCTL: | ||
211 | return sys_msgctl(first, second, (struct msqid_ds __user *)ptr); | ||
212 | |||
213 | case SHMAT: | ||
214 | switch (version) { | ||
215 | default: { | ||
216 | ulong raddr; | ||
217 | ret = do_shmat(first, (char __user *)ptr, second, &raddr); | ||
218 | if (ret) | ||
219 | return ret; | ||
220 | return put_user(raddr, (ulong __user *)third); | ||
221 | } | ||
222 | case 1: /* Of course, we don't support iBCS2! */ | ||
223 | return -EINVAL; | ||
224 | } | ||
225 | case SHMDT: | ||
226 | return sys_shmdt ((char __user *)ptr); | ||
227 | case SHMGET: | ||
228 | return sys_shmget (first, second, third); | ||
229 | case SHMCTL: | ||
230 | return sys_shmctl (first, second, | ||
231 | (struct shmid_ds __user *) ptr); | ||
232 | default: | ||
233 | return -ENOSYS; | ||
234 | } | ||
235 | } | ||
236 | |||
237 | asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg, | ||
238 | unsigned long __user *addr) | ||
239 | { | ||
240 | unsigned long ret; | ||
241 | long err; | ||
242 | |||
243 | err = do_shmat(shmid, shmaddr, shmflg, &ret); | ||
244 | if (err == 0) | ||
245 | err = put_user(ret, addr); | ||
246 | return err; | ||
247 | } | ||
248 | |||
249 | /* Fork a new task - this creates a new program thread. | ||
250 | * This is called indirectly via a small wrapper | ||
251 | */ | ||
252 | asmlinkage int sys_fork(struct pt_regs *regs) | ||
253 | { | ||
254 | return do_fork(SIGCHLD, regs->ARM_sp, regs, 0, NULL, NULL); | ||
255 | } | ||
256 | |||
257 | /* Clone a task - this clones the calling program thread. | ||
258 | * This is called indirectly via a small wrapper | ||
259 | */ | ||
260 | asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, | ||
261 | int __user *parent_tidptr, int tls_val, | ||
262 | int __user *child_tidptr, struct pt_regs *regs) | ||
263 | { | ||
264 | if (!newsp) | ||
265 | newsp = regs->ARM_sp; | ||
266 | |||
267 | return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr); | ||
268 | } | ||
269 | |||
270 | asmlinkage int sys_vfork(struct pt_regs *regs) | ||
271 | { | ||
272 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->ARM_sp, regs, 0, NULL, NULL); | ||
273 | } | ||
274 | |||
275 | /* sys_execve() executes a new program. | ||
276 | * This is called indirectly via a small wrapper | ||
277 | */ | ||
278 | asmlinkage int sys_execve(char __user *filenamei, char __user * __user *argv, | ||
279 | char __user * __user *envp, struct pt_regs *regs) | ||
280 | { | ||
281 | int error; | ||
282 | char * filename; | ||
283 | |||
284 | filename = getname(filenamei); | ||
285 | error = PTR_ERR(filename); | ||
286 | if (IS_ERR(filename)) | ||
287 | goto out; | ||
288 | error = do_execve(filename, argv, envp, regs); | ||
289 | putname(filename); | ||
290 | out: | ||
291 | return error; | ||
292 | } | ||
293 | |||
294 | long execve(const char *filename, char **argv, char **envp) | ||
295 | { | ||
296 | struct pt_regs regs; | ||
297 | int ret; | ||
298 | |||
299 | memset(®s, 0, sizeof(struct pt_regs)); | ||
300 | ret = do_execve((char *)filename, (char __user * __user *)argv, | ||
301 | (char __user * __user *)envp, ®s); | ||
302 | if (ret < 0) | ||
303 | goto out; | ||
304 | |||
305 | /* | ||
306 | * Save argc to the register structure for userspace. | ||
307 | */ | ||
308 | regs.ARM_r0 = ret; | ||
309 | |||
310 | /* | ||
311 | * We were successful. We won't be returning to our caller, but | ||
312 | * instead to user space by manipulating the kernel stack. | ||
313 | */ | ||
314 | asm( "add r0, %0, %1\n\t" | ||
315 | "mov r1, %2\n\t" | ||
316 | "mov r2, %3\n\t" | ||
317 | "bl memmove\n\t" /* copy regs to top of stack */ | ||
318 | "mov r8, #0\n\t" /* not a syscall */ | ||
319 | "mov r9, %0\n\t" /* thread structure */ | ||
320 | "mov sp, r0\n\t" /* reposition stack pointer */ | ||
321 | "b ret_to_user" | ||
322 | : | ||
323 | : "r" (current_thread_info()), | ||
324 | "Ir" (THREAD_SIZE - 8 - sizeof(regs)), | ||
325 | "r" (®s), | ||
326 | "Ir" (sizeof(regs)) | ||
327 | : "r0", "r1", "r2", "r3", "ip", "memory"); | ||
328 | |||
329 | out: | ||
330 | return ret; | ||
331 | } | ||
332 | EXPORT_SYMBOL(execve); | ||
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c new file mode 100644 index 000000000000..c232f24f4a60 --- /dev/null +++ b/arch/arm/kernel/time.c | |||
@@ -0,0 +1,402 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/time.c | ||
3 | * | ||
4 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds | ||
5 | * Modifications for ARM (C) 1994-2001 Russell King | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This file contains the ARM-specific time handling details: | ||
12 | * reading the RTC at bootup, etc... | ||
13 | * | ||
14 | * 1994-07-02 Alan Modra | ||
15 | * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime | ||
16 | * 1998-12-20 Updated NTP code according to technical memorandum Jan '96 | ||
17 | * "A Kernel Model for Precision Timekeeping" by Dave Mills | ||
18 | */ | ||
19 | #include <linux/config.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/time.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/smp.h> | ||
26 | #include <linux/timex.h> | ||
27 | #include <linux/errno.h> | ||
28 | #include <linux/profile.h> | ||
29 | #include <linux/sysdev.h> | ||
30 | #include <linux/timer.h> | ||
31 | |||
32 | #include <asm/hardware.h> | ||
33 | #include <asm/io.h> | ||
34 | #include <asm/irq.h> | ||
35 | #include <asm/leds.h> | ||
36 | #include <asm/thread_info.h> | ||
37 | #include <asm/mach/time.h> | ||
38 | |||
39 | u64 jiffies_64 = INITIAL_JIFFIES; | ||
40 | |||
41 | EXPORT_SYMBOL(jiffies_64); | ||
42 | |||
43 | /* | ||
44 | * Our system timer. | ||
45 | */ | ||
46 | struct sys_timer *system_timer; | ||
47 | |||
48 | extern unsigned long wall_jiffies; | ||
49 | |||
50 | /* this needs a better home */ | ||
51 | DEFINE_SPINLOCK(rtc_lock); | ||
52 | |||
53 | #ifdef CONFIG_SA1100_RTC_MODULE | ||
54 | EXPORT_SYMBOL(rtc_lock); | ||
55 | #endif | ||
56 | |||
57 | /* change this if you have some constant time drift */ | ||
58 | #define USECS_PER_JIFFY (1000000/HZ) | ||
59 | |||
60 | #ifdef CONFIG_SMP | ||
61 | unsigned long profile_pc(struct pt_regs *regs) | ||
62 | { | ||
63 | unsigned long fp, pc = instruction_pointer(regs); | ||
64 | |||
65 | if (in_lock_functions(pc)) { | ||
66 | fp = regs->ARM_fp; | ||
67 | pc = pc_pointer(((unsigned long *)fp)[-1]); | ||
68 | } | ||
69 | |||
70 | return pc; | ||
71 | } | ||
72 | EXPORT_SYMBOL(profile_pc); | ||
73 | #endif | ||
74 | |||
75 | /* | ||
76 | * hook for setting the RTC's idea of the current time. | ||
77 | */ | ||
78 | int (*set_rtc)(void); | ||
79 | |||
80 | static unsigned long dummy_gettimeoffset(void) | ||
81 | { | ||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | /* | ||
86 | * Scheduler clock - returns current time in nanosec units. | ||
87 | * This is the default implementation. Sub-architecture | ||
88 | * implementations can override this. | ||
89 | */ | ||
90 | unsigned long long __attribute__((weak)) sched_clock(void) | ||
91 | { | ||
92 | return (unsigned long long)jiffies * (1000000000 / HZ); | ||
93 | } | ||
94 | |||
95 | static unsigned long next_rtc_update; | ||
96 | |||
97 | /* | ||
98 | * If we have an externally synchronized linux clock, then update | ||
99 | * CMOS clock accordingly every ~11 minutes. set_rtc() has to be | ||
100 | * called as close as possible to 500 ms before the new second | ||
101 | * starts. | ||
102 | */ | ||
103 | static inline void do_set_rtc(void) | ||
104 | { | ||
105 | if (time_status & STA_UNSYNC || set_rtc == NULL) | ||
106 | return; | ||
107 | |||
108 | if (next_rtc_update && | ||
109 | time_before((unsigned long)xtime.tv_sec, next_rtc_update)) | ||
110 | return; | ||
111 | |||
112 | if (xtime.tv_nsec < 500000000 - ((unsigned) tick_nsec >> 1) && | ||
113 | xtime.tv_nsec >= 500000000 + ((unsigned) tick_nsec >> 1)) | ||
114 | return; | ||
115 | |||
116 | if (set_rtc()) | ||
117 | /* | ||
118 | * rtc update failed. Try again in 60s | ||
119 | */ | ||
120 | next_rtc_update = xtime.tv_sec + 60; | ||
121 | else | ||
122 | next_rtc_update = xtime.tv_sec + 660; | ||
123 | } | ||
124 | |||
125 | #ifdef CONFIG_LEDS | ||
126 | |||
127 | static void dummy_leds_event(led_event_t evt) | ||
128 | { | ||
129 | } | ||
130 | |||
131 | void (*leds_event)(led_event_t) = dummy_leds_event; | ||
132 | |||
133 | struct leds_evt_name { | ||
134 | const char name[8]; | ||
135 | int on; | ||
136 | int off; | ||
137 | }; | ||
138 | |||
139 | static const struct leds_evt_name evt_names[] = { | ||
140 | { "amber", led_amber_on, led_amber_off }, | ||
141 | { "blue", led_blue_on, led_blue_off }, | ||
142 | { "green", led_green_on, led_green_off }, | ||
143 | { "red", led_red_on, led_red_off }, | ||
144 | }; | ||
145 | |||
146 | static ssize_t leds_store(struct sys_device *dev, const char *buf, size_t size) | ||
147 | { | ||
148 | int ret = -EINVAL, len = strcspn(buf, " "); | ||
149 | |||
150 | if (len > 0 && buf[len] == '\0') | ||
151 | len--; | ||
152 | |||
153 | if (strncmp(buf, "claim", len) == 0) { | ||
154 | leds_event(led_claim); | ||
155 | ret = size; | ||
156 | } else if (strncmp(buf, "release", len) == 0) { | ||
157 | leds_event(led_release); | ||
158 | ret = size; | ||
159 | } else { | ||
160 | int i; | ||
161 | |||
162 | for (i = 0; i < ARRAY_SIZE(evt_names); i++) { | ||
163 | if (strlen(evt_names[i].name) != len || | ||
164 | strncmp(buf, evt_names[i].name, len) != 0) | ||
165 | continue; | ||
166 | if (strncmp(buf+len, " on", 3) == 0) { | ||
167 | leds_event(evt_names[i].on); | ||
168 | ret = size; | ||
169 | } else if (strncmp(buf+len, " off", 4) == 0) { | ||
170 | leds_event(evt_names[i].off); | ||
171 | ret = size; | ||
172 | } | ||
173 | break; | ||
174 | } | ||
175 | } | ||
176 | return ret; | ||
177 | } | ||
178 | |||
179 | static SYSDEV_ATTR(event, 0200, NULL, leds_store); | ||
180 | |||
181 | static int leds_suspend(struct sys_device *dev, pm_message_t state) | ||
182 | { | ||
183 | leds_event(led_stop); | ||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | static int leds_resume(struct sys_device *dev) | ||
188 | { | ||
189 | leds_event(led_start); | ||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | static int leds_shutdown(struct sys_device *dev) | ||
194 | { | ||
195 | leds_event(led_halted); | ||
196 | return 0; | ||
197 | } | ||
198 | |||
199 | static struct sysdev_class leds_sysclass = { | ||
200 | set_kset_name("leds"), | ||
201 | .shutdown = leds_shutdown, | ||
202 | .suspend = leds_suspend, | ||
203 | .resume = leds_resume, | ||
204 | }; | ||
205 | |||
206 | static struct sys_device leds_device = { | ||
207 | .id = 0, | ||
208 | .cls = &leds_sysclass, | ||
209 | }; | ||
210 | |||
211 | static int __init leds_init(void) | ||
212 | { | ||
213 | int ret; | ||
214 | ret = sysdev_class_register(&leds_sysclass); | ||
215 | if (ret == 0) | ||
216 | ret = sysdev_register(&leds_device); | ||
217 | if (ret == 0) | ||
218 | ret = sysdev_create_file(&leds_device, &attr_event); | ||
219 | return ret; | ||
220 | } | ||
221 | |||
222 | device_initcall(leds_init); | ||
223 | |||
224 | EXPORT_SYMBOL(leds_event); | ||
225 | #endif | ||
226 | |||
227 | #ifdef CONFIG_LEDS_TIMER | ||
228 | static inline void do_leds(void) | ||
229 | { | ||
230 | static unsigned int count = 50; | ||
231 | |||
232 | if (--count == 0) { | ||
233 | count = 50; | ||
234 | leds_event(led_timer); | ||
235 | } | ||
236 | } | ||
237 | #else | ||
238 | #define do_leds() | ||
239 | #endif | ||
240 | |||
241 | void do_gettimeofday(struct timeval *tv) | ||
242 | { | ||
243 | unsigned long flags; | ||
244 | unsigned long seq; | ||
245 | unsigned long usec, sec, lost; | ||
246 | |||
247 | do { | ||
248 | seq = read_seqbegin_irqsave(&xtime_lock, flags); | ||
249 | usec = system_timer->offset(); | ||
250 | |||
251 | lost = jiffies - wall_jiffies; | ||
252 | if (lost) | ||
253 | usec += lost * USECS_PER_JIFFY; | ||
254 | |||
255 | sec = xtime.tv_sec; | ||
256 | usec += xtime.tv_nsec / 1000; | ||
257 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); | ||
258 | |||
259 | /* usec may have gone up a lot: be safe */ | ||
260 | while (usec >= 1000000) { | ||
261 | usec -= 1000000; | ||
262 | sec++; | ||
263 | } | ||
264 | |||
265 | tv->tv_sec = sec; | ||
266 | tv->tv_usec = usec; | ||
267 | } | ||
268 | |||
269 | EXPORT_SYMBOL(do_gettimeofday); | ||
270 | |||
271 | int do_settimeofday(struct timespec *tv) | ||
272 | { | ||
273 | time_t wtm_sec, sec = tv->tv_sec; | ||
274 | long wtm_nsec, nsec = tv->tv_nsec; | ||
275 | |||
276 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | ||
277 | return -EINVAL; | ||
278 | |||
279 | write_seqlock_irq(&xtime_lock); | ||
280 | /* | ||
281 | * This is revolting. We need to set "xtime" correctly. However, the | ||
282 | * value in this location is the value at the most recent update of | ||
283 | * wall time. Discover what correction gettimeofday() would have | ||
284 | * done, and then undo it! | ||
285 | */ | ||
286 | nsec -= system_timer->offset() * NSEC_PER_USEC; | ||
287 | nsec -= (jiffies - wall_jiffies) * TICK_NSEC; | ||
288 | |||
289 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | ||
290 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | ||
291 | |||
292 | set_normalized_timespec(&xtime, sec, nsec); | ||
293 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | ||
294 | |||
295 | time_adjust = 0; /* stop active adjtime() */ | ||
296 | time_status |= STA_UNSYNC; | ||
297 | time_maxerror = NTP_PHASE_LIMIT; | ||
298 | time_esterror = NTP_PHASE_LIMIT; | ||
299 | write_sequnlock_irq(&xtime_lock); | ||
300 | clock_was_set(); | ||
301 | return 0; | ||
302 | } | ||
303 | |||
304 | EXPORT_SYMBOL(do_settimeofday); | ||
305 | |||
306 | /** | ||
307 | * save_time_delta - Save the offset between system time and RTC time | ||
308 | * @delta: pointer to timespec to store delta | ||
309 | * @rtc: pointer to timespec for current RTC time | ||
310 | * | ||
311 | * Return a delta between the system time and the RTC time, such | ||
312 | * that system time can be restored later with restore_time_delta() | ||
313 | */ | ||
314 | void save_time_delta(struct timespec *delta, struct timespec *rtc) | ||
315 | { | ||
316 | set_normalized_timespec(delta, | ||
317 | xtime.tv_sec - rtc->tv_sec, | ||
318 | xtime.tv_nsec - rtc->tv_nsec); | ||
319 | } | ||
320 | EXPORT_SYMBOL(save_time_delta); | ||
321 | |||
322 | /** | ||
323 | * restore_time_delta - Restore the current system time | ||
324 | * @delta: delta returned by save_time_delta() | ||
325 | * @rtc: pointer to timespec for current RTC time | ||
326 | */ | ||
327 | void restore_time_delta(struct timespec *delta, struct timespec *rtc) | ||
328 | { | ||
329 | struct timespec ts; | ||
330 | |||
331 | set_normalized_timespec(&ts, | ||
332 | delta->tv_sec + rtc->tv_sec, | ||
333 | delta->tv_nsec + rtc->tv_nsec); | ||
334 | |||
335 | do_settimeofday(&ts); | ||
336 | } | ||
337 | EXPORT_SYMBOL(restore_time_delta); | ||
338 | |||
339 | /* | ||
340 | * Kernel system timer support. | ||
341 | */ | ||
342 | void timer_tick(struct pt_regs *regs) | ||
343 | { | ||
344 | profile_tick(CPU_PROFILING, regs); | ||
345 | do_leds(); | ||
346 | do_set_rtc(); | ||
347 | do_timer(regs); | ||
348 | #ifndef CONFIG_SMP | ||
349 | update_process_times(user_mode(regs)); | ||
350 | #endif | ||
351 | } | ||
352 | |||
353 | #ifdef CONFIG_PM | ||
354 | static int timer_suspend(struct sys_device *dev, pm_message_t state) | ||
355 | { | ||
356 | struct sys_timer *timer = container_of(dev, struct sys_timer, dev); | ||
357 | |||
358 | if (timer->suspend != NULL) | ||
359 | timer->suspend(); | ||
360 | |||
361 | return 0; | ||
362 | } | ||
363 | |||
364 | static int timer_resume(struct sys_device *dev) | ||
365 | { | ||
366 | struct sys_timer *timer = container_of(dev, struct sys_timer, dev); | ||
367 | |||
368 | if (timer->resume != NULL) | ||
369 | timer->resume(); | ||
370 | |||
371 | return 0; | ||
372 | } | ||
373 | #else | ||
374 | #define timer_suspend NULL | ||
375 | #define timer_resume NULL | ||
376 | #endif | ||
377 | |||
378 | static struct sysdev_class timer_sysclass = { | ||
379 | set_kset_name("timer"), | ||
380 | .suspend = timer_suspend, | ||
381 | .resume = timer_resume, | ||
382 | }; | ||
383 | |||
384 | static int __init timer_init_sysfs(void) | ||
385 | { | ||
386 | int ret = sysdev_class_register(&timer_sysclass); | ||
387 | if (ret == 0) { | ||
388 | system_timer->dev.cls = &timer_sysclass; | ||
389 | ret = sysdev_register(&system_timer->dev); | ||
390 | } | ||
391 | return ret; | ||
392 | } | ||
393 | |||
394 | device_initcall(timer_init_sysfs); | ||
395 | |||
396 | void __init time_init(void) | ||
397 | { | ||
398 | if (system_timer->offset == NULL) | ||
399 | system_timer->offset = dummy_gettimeoffset; | ||
400 | system_timer->init(); | ||
401 | } | ||
402 | |||
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c new file mode 100644 index 000000000000..93dc4646cd7f --- /dev/null +++ b/arch/arm/kernel/traps.c | |||
@@ -0,0 +1,590 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/traps.c | ||
3 | * | ||
4 | * Copyright (C) 1995-2002 Russell King | ||
5 | * Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * 'traps.c' handles hardware exceptions after we have saved some state in | ||
12 | * 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably | ||
13 | * kill the offending process. | ||
14 | */ | ||
15 | #include <linux/config.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/signal.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | #include <linux/personality.h> | ||
20 | #include <linux/ptrace.h> | ||
21 | #include <linux/kallsyms.h> | ||
22 | #include <linux/init.h> | ||
23 | |||
24 | #include <asm/atomic.h> | ||
25 | #include <asm/cacheflush.h> | ||
26 | #include <asm/io.h> | ||
27 | #include <asm/system.h> | ||
28 | #include <asm/uaccess.h> | ||
29 | #include <asm/unistd.h> | ||
30 | #include <asm/traps.h> | ||
31 | |||
32 | #include "ptrace.h" | ||
33 | |||
34 | extern void c_backtrace (unsigned long fp, int pmode); | ||
35 | extern void show_pte(struct mm_struct *mm, unsigned long addr); | ||
36 | |||
37 | const char *processor_modes[]= | ||
38 | { "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" , | ||
39 | "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26", | ||
40 | "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" , | ||
41 | "UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32" | ||
42 | }; | ||
43 | |||
44 | static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; | ||
45 | |||
46 | #ifdef CONFIG_DEBUG_USER | ||
47 | unsigned int user_debug; | ||
48 | |||
49 | static int __init user_debug_setup(char *str) | ||
50 | { | ||
51 | get_option(&str, &user_debug); | ||
52 | return 1; | ||
53 | } | ||
54 | __setup("user_debug=", user_debug_setup); | ||
55 | #endif | ||
56 | |||
57 | void dump_backtrace_entry(unsigned long where, unsigned long from) | ||
58 | { | ||
59 | #ifdef CONFIG_KALLSYMS | ||
60 | printk("[<%08lx>] ", where); | ||
61 | print_symbol("(%s) ", where); | ||
62 | printk("from [<%08lx>] ", from); | ||
63 | print_symbol("(%s)\n", from); | ||
64 | #else | ||
65 | printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); | ||
66 | #endif | ||
67 | } | ||
68 | |||
69 | /* | ||
70 | * Stack pointers should always be within the kernels view of | ||
71 | * physical memory. If it is not there, then we can't dump | ||
72 | * out any information relating to the stack. | ||
73 | */ | ||
74 | static int verify_stack(unsigned long sp) | ||
75 | { | ||
76 | if (sp < PAGE_OFFSET || (sp > (unsigned long)high_memory && high_memory != 0)) | ||
77 | return -EFAULT; | ||
78 | |||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * Dump out the contents of some memory nicely... | ||
84 | */ | ||
85 | static void dump_mem(const char *str, unsigned long bottom, unsigned long top) | ||
86 | { | ||
87 | unsigned long p = bottom & ~31; | ||
88 | mm_segment_t fs; | ||
89 | int i; | ||
90 | |||
91 | /* | ||
92 | * We need to switch to kernel mode so that we can use __get_user | ||
93 | * to safely read from kernel space. Note that we now dump the | ||
94 | * code first, just in case the backtrace kills us. | ||
95 | */ | ||
96 | fs = get_fs(); | ||
97 | set_fs(KERNEL_DS); | ||
98 | |||
99 | printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top); | ||
100 | |||
101 | for (p = bottom & ~31; p < top;) { | ||
102 | printk("%04lx: ", p & 0xffff); | ||
103 | |||
104 | for (i = 0; i < 8; i++, p += 4) { | ||
105 | unsigned int val; | ||
106 | |||
107 | if (p < bottom || p >= top) | ||
108 | printk(" "); | ||
109 | else { | ||
110 | __get_user(val, (unsigned long *)p); | ||
111 | printk("%08x ", val); | ||
112 | } | ||
113 | } | ||
114 | printk ("\n"); | ||
115 | } | ||
116 | |||
117 | set_fs(fs); | ||
118 | } | ||
119 | |||
120 | static void dump_instr(struct pt_regs *regs) | ||
121 | { | ||
122 | unsigned long addr = instruction_pointer(regs); | ||
123 | const int thumb = thumb_mode(regs); | ||
124 | const int width = thumb ? 4 : 8; | ||
125 | mm_segment_t fs; | ||
126 | int i; | ||
127 | |||
128 | /* | ||
129 | * We need to switch to kernel mode so that we can use __get_user | ||
130 | * to safely read from kernel space. Note that we now dump the | ||
131 | * code first, just in case the backtrace kills us. | ||
132 | */ | ||
133 | fs = get_fs(); | ||
134 | set_fs(KERNEL_DS); | ||
135 | |||
136 | printk("Code: "); | ||
137 | for (i = -4; i < 1; i++) { | ||
138 | unsigned int val, bad; | ||
139 | |||
140 | if (thumb) | ||
141 | bad = __get_user(val, &((u16 *)addr)[i]); | ||
142 | else | ||
143 | bad = __get_user(val, &((u32 *)addr)[i]); | ||
144 | |||
145 | if (!bad) | ||
146 | printk(i == 0 ? "(%0*x) " : "%0*x ", width, val); | ||
147 | else { | ||
148 | printk("bad PC value."); | ||
149 | break; | ||
150 | } | ||
151 | } | ||
152 | printk("\n"); | ||
153 | |||
154 | set_fs(fs); | ||
155 | } | ||
156 | |||
157 | static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) | ||
158 | { | ||
159 | unsigned int fp; | ||
160 | int ok = 1; | ||
161 | |||
162 | printk("Backtrace: "); | ||
163 | fp = regs->ARM_fp; | ||
164 | if (!fp) { | ||
165 | printk("no frame pointer"); | ||
166 | ok = 0; | ||
167 | } else if (verify_stack(fp)) { | ||
168 | printk("invalid frame pointer 0x%08x", fp); | ||
169 | ok = 0; | ||
170 | } else if (fp < (unsigned long)(tsk->thread_info + 1)) | ||
171 | printk("frame pointer underflow"); | ||
172 | printk("\n"); | ||
173 | |||
174 | if (ok) | ||
175 | c_backtrace(fp, processor_mode(regs)); | ||
176 | } | ||
177 | |||
178 | void dump_stack(void) | ||
179 | { | ||
180 | #ifdef CONFIG_DEBUG_ERRORS | ||
181 | __backtrace(); | ||
182 | #endif | ||
183 | } | ||
184 | |||
185 | EXPORT_SYMBOL(dump_stack); | ||
186 | |||
187 | void show_stack(struct task_struct *tsk, unsigned long *sp) | ||
188 | { | ||
189 | unsigned long fp; | ||
190 | |||
191 | if (!tsk) | ||
192 | tsk = current; | ||
193 | |||
194 | if (tsk != current) | ||
195 | fp = thread_saved_fp(tsk); | ||
196 | else | ||
197 | asm("mov%? %0, fp" : "=r" (fp)); | ||
198 | |||
199 | c_backtrace(fp, 0x10); | ||
200 | barrier(); | ||
201 | } | ||
202 | |||
203 | DEFINE_SPINLOCK(die_lock); | ||
204 | |||
205 | /* | ||
206 | * This function is protected against re-entrancy. | ||
207 | */ | ||
208 | NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) | ||
209 | { | ||
210 | struct task_struct *tsk = current; | ||
211 | static int die_counter; | ||
212 | |||
213 | console_verbose(); | ||
214 | spin_lock_irq(&die_lock); | ||
215 | bust_spinlocks(1); | ||
216 | |||
217 | printk("Internal error: %s: %x [#%d]\n", str, err, ++die_counter); | ||
218 | print_modules(); | ||
219 | printk("CPU: %d\n", smp_processor_id()); | ||
220 | show_regs(regs); | ||
221 | printk("Process %s (pid: %d, stack limit = 0x%p)\n", | ||
222 | tsk->comm, tsk->pid, tsk->thread_info + 1); | ||
223 | |||
224 | if (!user_mode(regs) || in_interrupt()) { | ||
225 | dump_mem("Stack: ", regs->ARM_sp, 8192+(unsigned long)tsk->thread_info); | ||
226 | dump_backtrace(regs, tsk); | ||
227 | dump_instr(regs); | ||
228 | } | ||
229 | |||
230 | bust_spinlocks(0); | ||
231 | spin_unlock_irq(&die_lock); | ||
232 | do_exit(SIGSEGV); | ||
233 | } | ||
234 | |||
235 | void die_if_kernel(const char *str, struct pt_regs *regs, int err) | ||
236 | { | ||
237 | if (user_mode(regs)) | ||
238 | return; | ||
239 | |||
240 | die(str, regs, err); | ||
241 | } | ||
242 | |||
243 | static void notify_die(const char *str, struct pt_regs *regs, siginfo_t *info, | ||
244 | unsigned long err, unsigned long trap) | ||
245 | { | ||
246 | if (user_mode(regs)) { | ||
247 | current->thread.error_code = err; | ||
248 | current->thread.trap_no = trap; | ||
249 | |||
250 | force_sig_info(info->si_signo, info, current); | ||
251 | } else { | ||
252 | die(str, regs, err); | ||
253 | } | ||
254 | } | ||
255 | |||
256 | static LIST_HEAD(undef_hook); | ||
257 | static DEFINE_SPINLOCK(undef_lock); | ||
258 | |||
259 | void register_undef_hook(struct undef_hook *hook) | ||
260 | { | ||
261 | spin_lock_irq(&undef_lock); | ||
262 | list_add(&hook->node, &undef_hook); | ||
263 | spin_unlock_irq(&undef_lock); | ||
264 | } | ||
265 | |||
266 | void unregister_undef_hook(struct undef_hook *hook) | ||
267 | { | ||
268 | spin_lock_irq(&undef_lock); | ||
269 | list_del(&hook->node); | ||
270 | spin_unlock_irq(&undef_lock); | ||
271 | } | ||
272 | |||
273 | asmlinkage void do_undefinstr(struct pt_regs *regs) | ||
274 | { | ||
275 | unsigned int correction = thumb_mode(regs) ? 2 : 4; | ||
276 | unsigned int instr; | ||
277 | struct undef_hook *hook; | ||
278 | siginfo_t info; | ||
279 | void __user *pc; | ||
280 | |||
281 | /* | ||
282 | * According to the ARM ARM, PC is 2 or 4 bytes ahead, | ||
283 | * depending whether we're in Thumb mode or not. | ||
284 | * Correct this offset. | ||
285 | */ | ||
286 | regs->ARM_pc -= correction; | ||
287 | |||
288 | pc = (void __user *)instruction_pointer(regs); | ||
289 | if (thumb_mode(regs)) { | ||
290 | get_user(instr, (u16 __user *)pc); | ||
291 | } else { | ||
292 | get_user(instr, (u32 __user *)pc); | ||
293 | } | ||
294 | |||
295 | spin_lock_irq(&undef_lock); | ||
296 | list_for_each_entry(hook, &undef_hook, node) { | ||
297 | if ((instr & hook->instr_mask) == hook->instr_val && | ||
298 | (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val) { | ||
299 | if (hook->fn(regs, instr) == 0) { | ||
300 | spin_unlock_irq(&undef_lock); | ||
301 | return; | ||
302 | } | ||
303 | } | ||
304 | } | ||
305 | spin_unlock_irq(&undef_lock); | ||
306 | |||
307 | #ifdef CONFIG_DEBUG_USER | ||
308 | if (user_debug & UDBG_UNDEFINED) { | ||
309 | printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", | ||
310 | current->comm, current->pid, pc); | ||
311 | dump_instr(regs); | ||
312 | } | ||
313 | #endif | ||
314 | |||
315 | info.si_signo = SIGILL; | ||
316 | info.si_errno = 0; | ||
317 | info.si_code = ILL_ILLOPC; | ||
318 | info.si_addr = pc; | ||
319 | |||
320 | notify_die("Oops - undefined instruction", regs, &info, 0, 6); | ||
321 | } | ||
322 | |||
323 | asmlinkage void do_unexp_fiq (struct pt_regs *regs) | ||
324 | { | ||
325 | #ifndef CONFIG_IGNORE_FIQ | ||
326 | printk("Hmm. Unexpected FIQ received, but trying to continue\n"); | ||
327 | printk("You may have a hardware problem...\n"); | ||
328 | #endif | ||
329 | } | ||
330 | |||
331 | /* | ||
332 | * bad_mode handles the impossible case in the vectors. If you see one of | ||
333 | * these, then it's extremely serious, and could mean you have buggy hardware. | ||
334 | * It never returns, and never tries to sync. We hope that we can at least | ||
335 | * dump out some state information... | ||
336 | */ | ||
337 | asmlinkage void bad_mode(struct pt_regs *regs, int reason, int proc_mode) | ||
338 | { | ||
339 | console_verbose(); | ||
340 | |||
341 | printk(KERN_CRIT "Bad mode in %s handler detected: mode %s\n", | ||
342 | handler[reason], processor_modes[proc_mode]); | ||
343 | |||
344 | die("Oops - bad mode", regs, 0); | ||
345 | local_irq_disable(); | ||
346 | panic("bad mode"); | ||
347 | } | ||
348 | |||
349 | static int bad_syscall(int n, struct pt_regs *regs) | ||
350 | { | ||
351 | struct thread_info *thread = current_thread_info(); | ||
352 | siginfo_t info; | ||
353 | |||
354 | if (current->personality != PER_LINUX && thread->exec_domain->handler) { | ||
355 | thread->exec_domain->handler(n, regs); | ||
356 | return regs->ARM_r0; | ||
357 | } | ||
358 | |||
359 | #ifdef CONFIG_DEBUG_USER | ||
360 | if (user_debug & UDBG_SYSCALL) { | ||
361 | printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n", | ||
362 | current->pid, current->comm, n); | ||
363 | dump_instr(regs); | ||
364 | } | ||
365 | #endif | ||
366 | |||
367 | info.si_signo = SIGILL; | ||
368 | info.si_errno = 0; | ||
369 | info.si_code = ILL_ILLTRP; | ||
370 | info.si_addr = (void __user *)instruction_pointer(regs) - | ||
371 | (thumb_mode(regs) ? 2 : 4); | ||
372 | |||
373 | notify_die("Oops - bad syscall", regs, &info, n, 0); | ||
374 | |||
375 | return regs->ARM_r0; | ||
376 | } | ||
377 | |||
378 | static inline void | ||
379 | do_cache_op(unsigned long start, unsigned long end, int flags) | ||
380 | { | ||
381 | struct vm_area_struct *vma; | ||
382 | |||
383 | if (end < start || flags) | ||
384 | return; | ||
385 | |||
386 | vma = find_vma(current->active_mm, start); | ||
387 | if (vma && vma->vm_start < end) { | ||
388 | if (start < vma->vm_start) | ||
389 | start = vma->vm_start; | ||
390 | if (end > vma->vm_end) | ||
391 | end = vma->vm_end; | ||
392 | |||
393 | flush_cache_user_range(vma, start, end); | ||
394 | } | ||
395 | } | ||
396 | |||
397 | /* | ||
398 | * Handle all unrecognised system calls. | ||
399 | * 0x9f0000 - 0x9fffff are some more esoteric system calls | ||
400 | */ | ||
401 | #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE) | ||
402 | asmlinkage int arm_syscall(int no, struct pt_regs *regs) | ||
403 | { | ||
404 | struct thread_info *thread = current_thread_info(); | ||
405 | siginfo_t info; | ||
406 | |||
407 | if ((no >> 16) != 0x9f) | ||
408 | return bad_syscall(no, regs); | ||
409 | |||
410 | switch (no & 0xffff) { | ||
411 | case 0: /* branch through 0 */ | ||
412 | info.si_signo = SIGSEGV; | ||
413 | info.si_errno = 0; | ||
414 | info.si_code = SEGV_MAPERR; | ||
415 | info.si_addr = NULL; | ||
416 | |||
417 | notify_die("branch through zero", regs, &info, 0, 0); | ||
418 | return 0; | ||
419 | |||
420 | case NR(breakpoint): /* SWI BREAK_POINT */ | ||
421 | regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; | ||
422 | ptrace_break(current, regs); | ||
423 | return regs->ARM_r0; | ||
424 | |||
425 | /* | ||
426 | * Flush a region from virtual address 'r0' to virtual address 'r1' | ||
427 | * _exclusive_. There is no alignment requirement on either address; | ||
428 | * user space does not need to know the hardware cache layout. | ||
429 | * | ||
430 | * r2 contains flags. It should ALWAYS be passed as ZERO until it | ||
431 | * is defined to be something else. For now we ignore it, but may | ||
432 | * the fires of hell burn in your belly if you break this rule. ;) | ||
433 | * | ||
434 | * (at a later date, we may want to allow this call to not flush | ||
435 | * various aspects of the cache. Passing '0' will guarantee that | ||
436 | * everything necessary gets flushed to maintain consistency in | ||
437 | * the specified region). | ||
438 | */ | ||
439 | case NR(cacheflush): | ||
440 | do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2); | ||
441 | return 0; | ||
442 | |||
443 | case NR(usr26): | ||
444 | if (!(elf_hwcap & HWCAP_26BIT)) | ||
445 | break; | ||
446 | regs->ARM_cpsr &= ~MODE32_BIT; | ||
447 | return regs->ARM_r0; | ||
448 | |||
449 | case NR(usr32): | ||
450 | if (!(elf_hwcap & HWCAP_26BIT)) | ||
451 | break; | ||
452 | regs->ARM_cpsr |= MODE32_BIT; | ||
453 | return regs->ARM_r0; | ||
454 | |||
455 | case NR(set_tls): | ||
456 | thread->tp_value = regs->ARM_r0; | ||
457 | /* | ||
458 | * Our user accessible TLS ptr is located at 0xffff0ffc. | ||
459 | * On SMP read access to this address must raise a fault | ||
460 | * and be emulated from the data abort handler. | ||
461 | * m | ||
462 | */ | ||
463 | *((unsigned long *)0xffff0ffc) = thread->tp_value; | ||
464 | return 0; | ||
465 | |||
466 | default: | ||
467 | /* Calls 9f00xx..9f07ff are defined to return -ENOSYS | ||
468 | if not implemented, rather than raising SIGILL. This | ||
469 | way the calling program can gracefully determine whether | ||
470 | a feature is supported. */ | ||
471 | if (no <= 0x7ff) | ||
472 | return -ENOSYS; | ||
473 | break; | ||
474 | } | ||
475 | #ifdef CONFIG_DEBUG_USER | ||
476 | /* | ||
477 | * experience shows that these seem to indicate that | ||
478 | * something catastrophic has happened | ||
479 | */ | ||
480 | if (user_debug & UDBG_SYSCALL) { | ||
481 | printk("[%d] %s: arm syscall %d\n", | ||
482 | current->pid, current->comm, no); | ||
483 | dump_instr(regs); | ||
484 | if (user_mode(regs)) { | ||
485 | show_regs(regs); | ||
486 | c_backtrace(regs->ARM_fp, processor_mode(regs)); | ||
487 | } | ||
488 | } | ||
489 | #endif | ||
490 | info.si_signo = SIGILL; | ||
491 | info.si_errno = 0; | ||
492 | info.si_code = ILL_ILLTRP; | ||
493 | info.si_addr = (void __user *)instruction_pointer(regs) - | ||
494 | (thumb_mode(regs) ? 2 : 4); | ||
495 | |||
496 | notify_die("Oops - bad syscall(2)", regs, &info, no, 0); | ||
497 | return 0; | ||
498 | } | ||
499 | |||
500 | void __bad_xchg(volatile void *ptr, int size) | ||
501 | { | ||
502 | printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n", | ||
503 | __builtin_return_address(0), ptr, size); | ||
504 | BUG(); | ||
505 | } | ||
506 | EXPORT_SYMBOL(__bad_xchg); | ||
507 | |||
508 | /* | ||
509 | * A data abort trap was taken, but we did not handle the instruction. | ||
510 | * Try to abort the user program, or panic if it was the kernel. | ||
511 | */ | ||
512 | asmlinkage void | ||
513 | baddataabort(int code, unsigned long instr, struct pt_regs *regs) | ||
514 | { | ||
515 | unsigned long addr = instruction_pointer(regs); | ||
516 | siginfo_t info; | ||
517 | |||
518 | #ifdef CONFIG_DEBUG_USER | ||
519 | if (user_debug & UDBG_BADABORT) { | ||
520 | printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n", | ||
521 | current->pid, current->comm, code, instr); | ||
522 | dump_instr(regs); | ||
523 | show_pte(current->mm, addr); | ||
524 | } | ||
525 | #endif | ||
526 | |||
527 | info.si_signo = SIGILL; | ||
528 | info.si_errno = 0; | ||
529 | info.si_code = ILL_ILLOPC; | ||
530 | info.si_addr = (void __user *)addr; | ||
531 | |||
532 | notify_die("unknown data abort code", regs, &info, instr, 0); | ||
533 | } | ||
534 | |||
535 | volatile void __bug(const char *file, int line, void *data) | ||
536 | { | ||
537 | printk(KERN_CRIT"kernel BUG at %s:%d!", file, line); | ||
538 | if (data) | ||
539 | printk(" - extra data = %p", data); | ||
540 | printk("\n"); | ||
541 | *(int *)0 = 0; | ||
542 | } | ||
543 | EXPORT_SYMBOL(__bug); | ||
544 | |||
545 | void __readwrite_bug(const char *fn) | ||
546 | { | ||
547 | printk("%s called, but not implemented\n", fn); | ||
548 | BUG(); | ||
549 | } | ||
550 | EXPORT_SYMBOL(__readwrite_bug); | ||
551 | |||
552 | void __pte_error(const char *file, int line, unsigned long val) | ||
553 | { | ||
554 | printk("%s:%d: bad pte %08lx.\n", file, line, val); | ||
555 | } | ||
556 | |||
557 | void __pmd_error(const char *file, int line, unsigned long val) | ||
558 | { | ||
559 | printk("%s:%d: bad pmd %08lx.\n", file, line, val); | ||
560 | } | ||
561 | |||
562 | void __pgd_error(const char *file, int line, unsigned long val) | ||
563 | { | ||
564 | printk("%s:%d: bad pgd %08lx.\n", file, line, val); | ||
565 | } | ||
566 | |||
567 | asmlinkage void __div0(void) | ||
568 | { | ||
569 | printk("Division by zero in kernel.\n"); | ||
570 | dump_stack(); | ||
571 | } | ||
572 | EXPORT_SYMBOL(__div0); | ||
573 | |||
574 | void abort(void) | ||
575 | { | ||
576 | BUG(); | ||
577 | |||
578 | /* if that doesn't kill us, halt */ | ||
579 | panic("Oops failed to kill thread"); | ||
580 | } | ||
581 | EXPORT_SYMBOL(abort); | ||
582 | |||
583 | void __init trap_init(void) | ||
584 | { | ||
585 | extern void __trap_init(void); | ||
586 | |||
587 | __trap_init(); | ||
588 | flush_icache_range(0xffff0000, 0xffff0000 + PAGE_SIZE); | ||
589 | modify_domain(DOMAIN_USER, DOMAIN_CLIENT); | ||
590 | } | ||
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S new file mode 100644 index 000000000000..a39c6a42d68a --- /dev/null +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -0,0 +1,166 @@ | |||
1 | /* ld script to make ARM Linux kernel | ||
2 | * taken from the i386 version by Russell King | ||
3 | * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> | ||
4 | */ | ||
5 | |||
6 | #include <asm-generic/vmlinux.lds.h> | ||
7 | #include <linux/config.h> | ||
8 | |||
9 | OUTPUT_ARCH(arm) | ||
10 | ENTRY(stext) | ||
11 | #ifndef __ARMEB__ | ||
12 | jiffies = jiffies_64; | ||
13 | #else | ||
14 | jiffies = jiffies_64 + 4; | ||
15 | #endif | ||
16 | SECTIONS | ||
17 | { | ||
18 | . = TEXTADDR; | ||
19 | .init : { /* Init code and data */ | ||
20 | _stext = .; | ||
21 | _sinittext = .; | ||
22 | *(.init.text) | ||
23 | _einittext = .; | ||
24 | __proc_info_begin = .; | ||
25 | *(.proc.info) | ||
26 | __proc_info_end = .; | ||
27 | __arch_info_begin = .; | ||
28 | *(.arch.info) | ||
29 | __arch_info_end = .; | ||
30 | __tagtable_begin = .; | ||
31 | *(.taglist) | ||
32 | __tagtable_end = .; | ||
33 | . = ALIGN(16); | ||
34 | __setup_start = .; | ||
35 | *(.init.setup) | ||
36 | __setup_end = .; | ||
37 | __early_begin = .; | ||
38 | *(__early_param) | ||
39 | __early_end = .; | ||
40 | __initcall_start = .; | ||
41 | *(.initcall1.init) | ||
42 | *(.initcall2.init) | ||
43 | *(.initcall3.init) | ||
44 | *(.initcall4.init) | ||
45 | *(.initcall5.init) | ||
46 | *(.initcall6.init) | ||
47 | *(.initcall7.init) | ||
48 | __initcall_end = .; | ||
49 | __con_initcall_start = .; | ||
50 | *(.con_initcall.init) | ||
51 | __con_initcall_end = .; | ||
52 | __security_initcall_start = .; | ||
53 | *(.security_initcall.init) | ||
54 | __security_initcall_end = .; | ||
55 | . = ALIGN(32); | ||
56 | __initramfs_start = .; | ||
57 | usr/built-in.o(.init.ramfs) | ||
58 | __initramfs_end = .; | ||
59 | . = ALIGN(64); | ||
60 | __per_cpu_start = .; | ||
61 | *(.data.percpu) | ||
62 | __per_cpu_end = .; | ||
63 | #ifndef CONFIG_XIP_KERNEL | ||
64 | __init_begin = _stext; | ||
65 | *(.init.data) | ||
66 | . = ALIGN(4096); | ||
67 | __init_end = .; | ||
68 | #endif | ||
69 | } | ||
70 | |||
71 | /DISCARD/ : { /* Exit code and data */ | ||
72 | *(.exit.text) | ||
73 | *(.exit.data) | ||
74 | *(.exitcall.exit) | ||
75 | } | ||
76 | |||
77 | .text : { /* Real text segment */ | ||
78 | _text = .; /* Text and read-only data */ | ||
79 | *(.text) | ||
80 | SCHED_TEXT | ||
81 | LOCK_TEXT | ||
82 | *(.fixup) | ||
83 | *(.gnu.warning) | ||
84 | *(.rodata) | ||
85 | *(.rodata.*) | ||
86 | *(.glue_7) | ||
87 | *(.glue_7t) | ||
88 | *(.got) /* Global offset table */ | ||
89 | } | ||
90 | |||
91 | . = ALIGN(16); | ||
92 | __ex_table : { /* Exception table */ | ||
93 | __start___ex_table = .; | ||
94 | *(__ex_table) | ||
95 | __stop___ex_table = .; | ||
96 | } | ||
97 | |||
98 | RODATA | ||
99 | |||
100 | _etext = .; /* End of text and rodata section */ | ||
101 | |||
102 | #ifdef CONFIG_XIP_KERNEL | ||
103 | __data_loc = ALIGN(4); /* location in binary */ | ||
104 | . = DATAADDR; | ||
105 | #else | ||
106 | . = ALIGN(8192); | ||
107 | __data_loc = .; | ||
108 | #endif | ||
109 | |||
110 | .data : AT(__data_loc) { | ||
111 | __data_start = .; /* address in memory */ | ||
112 | |||
113 | /* | ||
114 | * first, the init task union, aligned | ||
115 | * to an 8192 byte boundary. | ||
116 | */ | ||
117 | *(.init.task) | ||
118 | |||
119 | #ifdef CONFIG_XIP_KERNEL | ||
120 | . = ALIGN(4096); | ||
121 | __init_begin = .; | ||
122 | *(.init.data) | ||
123 | . = ALIGN(4096); | ||
124 | __init_end = .; | ||
125 | #endif | ||
126 | |||
127 | . = ALIGN(4096); | ||
128 | __nosave_begin = .; | ||
129 | *(.data.nosave) | ||
130 | . = ALIGN(4096); | ||
131 | __nosave_end = .; | ||
132 | |||
133 | /* | ||
134 | * then the cacheline aligned data | ||
135 | */ | ||
136 | . = ALIGN(32); | ||
137 | *(.data.cacheline_aligned) | ||
138 | |||
139 | /* | ||
140 | * and the usual data section | ||
141 | */ | ||
142 | *(.data) | ||
143 | CONSTRUCTORS | ||
144 | |||
145 | _edata = .; | ||
146 | } | ||
147 | |||
148 | .bss : { | ||
149 | __bss_start = .; /* BSS */ | ||
150 | *(.bss) | ||
151 | *(COMMON) | ||
152 | _end = .; | ||
153 | } | ||
154 | /* Stabs debugging sections. */ | ||
155 | .stab 0 : { *(.stab) } | ||
156 | .stabstr 0 : { *(.stabstr) } | ||
157 | .stab.excl 0 : { *(.stab.excl) } | ||
158 | .stab.exclstr 0 : { *(.stab.exclstr) } | ||
159 | .stab.index 0 : { *(.stab.index) } | ||
160 | .stab.indexstr 0 : { *(.stab.indexstr) } | ||
161 | .comment 0 : { *(.comment) } | ||
162 | } | ||
163 | |||
164 | /* those must never be empty */ | ||
165 | ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support") | ||
166 | ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") | ||