diff options
author | Paul Mackerras <paulus@samba.org> | 2005-09-26 02:04:21 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-09-26 02:04:21 -0400 |
commit | 14cf11af6cf608eb8c23e989ddb17a715ddce109 (patch) | |
tree | 271a97ce73e265f39c569cb159c195c5b4bb3f8c | |
parent | e5baa396af7560382d2cf3f0871d616b61fc284c (diff) |
powerpc: Merge enough to start building in arch/powerpc.
This creates the directory structure under arch/powerpc and a bunch
of Kconfig files. It does a first-cut merge of arch/powerpc/mm,
arch/powerpc/lib and arch/powerpc/platforms/powermac. This is enough
to build a 32-bit powermac kernel with ARCH=powerpc.
For now we are getting some unmerged files from arch/ppc/kernel and
arch/ppc/syslib, or arch/ppc64/kernel. This makes some minor changes
to files in those directories and files outside arch/powerpc.
The boot directory is still not merged. That's going to be interesting.
Signed-off-by: Paul Mackerras <paulus@samba.org>
89 files changed, 32423 insertions, 25 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig new file mode 100644 index 000000000000..edfac467b9e0 --- /dev/null +++ b/arch/powerpc/Kconfig | |||
@@ -0,0 +1,861 @@ | |||
1 | # For a description of the syntax of this configuration file, | ||
2 | # see Documentation/kbuild/kconfig-language.txt. | ||
3 | # | ||
4 | |||
5 | mainmenu "Linux/PowerPC Kernel Configuration" | ||
6 | |||
7 | config PPC64 | ||
8 | bool "64-bit kernel" | ||
9 | default n | ||
10 | help | ||
11 | This option selects whether a 32-bit or a 64-bit kernel | ||
12 | will be built. | ||
13 | |||
14 | config PPC32 | ||
15 | bool | ||
16 | default y if !PPC64 | ||
17 | |||
18 | config 64BIT | ||
19 | bool | ||
20 | default y if PPC64 | ||
21 | |||
22 | config PPC_MERGE | ||
23 | def_bool y | ||
24 | |||
25 | config MMU | ||
26 | bool | ||
27 | default y | ||
28 | |||
29 | config UID16 | ||
30 | bool | ||
31 | |||
32 | config GENERIC_HARDIRQS | ||
33 | bool | ||
34 | default y | ||
35 | |||
36 | config RWSEM_GENERIC_SPINLOCK | ||
37 | bool | ||
38 | |||
39 | config RWSEM_XCHGADD_ALGORITHM | ||
40 | bool | ||
41 | default y | ||
42 | |||
43 | config GENERIC_CALIBRATE_DELAY | ||
44 | bool | ||
45 | default y | ||
46 | |||
47 | config PPC | ||
48 | bool | ||
49 | default y | ||
50 | |||
51 | config EARLY_PRINTK | ||
52 | bool | ||
53 | default y if PPC64 | ||
54 | |||
55 | config COMPAT | ||
56 | bool | ||
57 | default y if PPC64 | ||
58 | |||
59 | config SYSVIPC_COMPAT | ||
60 | bool | ||
61 | depends on COMPAT && SYSVIPC | ||
62 | default y | ||
63 | |||
64 | # All PPC32s use generic nvram driver through ppc_md | ||
65 | config GENERIC_NVRAM | ||
66 | bool | ||
67 | default y if PPC32 | ||
68 | |||
69 | config SCHED_NO_NO_OMIT_FRAME_POINTER | ||
70 | bool | ||
71 | default y | ||
72 | |||
73 | config ARCH_MAY_HAVE_PC_FDC | ||
74 | bool | ||
75 | default y | ||
76 | |||
77 | menu "Processor support" | ||
78 | choice | ||
79 | prompt "Processor Type" | ||
80 | depends on PPC32 | ||
81 | default 6xx | ||
82 | |||
83 | config 6xx | ||
84 | bool "6xx/7xx/74xx" | ||
85 | select PPC_FPU | ||
86 | help | ||
87 | There are four families of PowerPC chips supported. The more common | ||
88 | types (601, 603, 604, 740, 750, 7400), the Motorola embedded | ||
89 | versions (821, 823, 850, 855, 860, 52xx, 82xx, 83xx), the AMCC | ||
90 | embedded versions (403 and 405) and the high end 64 bit Power | ||
91 | processors (POWER 3, POWER4, and IBM PPC970 also known as G5). | ||
92 | |||
93 | Unless you are building a kernel for one of the embedded processor | ||
94 | systems, 64 bit IBM RS/6000 or an Apple G5, choose 6xx. | ||
95 | Note that the kernel runs in 32-bit mode even on 64-bit chips. | ||
96 | |||
97 | config PPC_52xx | ||
98 | bool "Freescale 52xx" | ||
99 | |||
100 | config PPC_82xx | ||
101 | bool "Freescale 82xx" | ||
102 | |||
103 | config PPC_83xx | ||
104 | bool "Freescale 83xx" | ||
105 | |||
106 | config 40x | ||
107 | bool "AMCC 40x" | ||
108 | |||
109 | config 44x | ||
110 | bool "AMCC 44x" | ||
111 | |||
112 | config PPC64BRIDGE | ||
113 | select PPC_FPU | ||
114 | bool "POWER3, POWER4 and PPC970 (G5)" | ||
115 | |||
116 | config 8xx | ||
117 | bool "Freescale 8xx" | ||
118 | |||
119 | config E200 | ||
120 | bool "Freescale e200" | ||
121 | |||
122 | config E500 | ||
123 | bool "Freescale e500" | ||
124 | endchoice | ||
125 | |||
126 | config POWER4_ONLY | ||
127 | bool "Optimize for POWER4" | ||
128 | depends on PPC64 || PPC64BRIDGE | ||
129 | default n | ||
130 | ---help--- | ||
131 | Cause the compiler to optimize for POWER4/POWER5/PPC970 processors. | ||
132 | The resulting binary will not work on POWER3 or RS64 processors | ||
133 | when compiled with binutils 2.15 or later. | ||
134 | |||
135 | config POWER3 | ||
136 | bool | ||
137 | depends on PPC64 || PPC64BRIDGE | ||
138 | default y if !POWER4_ONLY | ||
139 | |||
140 | config POWER4 | ||
141 | depends on PPC64 || PPC64BRIDGE | ||
142 | def_bool y | ||
143 | |||
144 | config PPC_FPU | ||
145 | bool | ||
146 | default y if PPC64 | ||
147 | |||
148 | config BOOKE | ||
149 | bool | ||
150 | depends on E200 || E500 | ||
151 | default y | ||
152 | |||
153 | config FSL_BOOKE | ||
154 | bool | ||
155 | depends on E200 || E500 | ||
156 | default y | ||
157 | |||
158 | config PTE_64BIT | ||
159 | bool | ||
160 | depends on 44x || E500 | ||
161 | default y if 44x | ||
162 | default y if E500 && PHYS_64BIT | ||
163 | |||
164 | config PHYS_64BIT | ||
165 | bool 'Large physical address support' if E500 | ||
166 | depends on 44x || E500 | ||
167 | default y if 44x | ||
168 | ---help--- | ||
169 | This option enables kernel support for larger than 32-bit physical | ||
170 | addresses. This features is not be available on all e500 cores. | ||
171 | |||
172 | If in doubt, say N here. | ||
173 | |||
174 | config ALTIVEC | ||
175 | bool "AltiVec Support" | ||
176 | depends on 6xx || POWER4 | ||
177 | ---help--- | ||
178 | This option enables kernel support for the Altivec extensions to the | ||
179 | PowerPC processor. The kernel currently supports saving and restoring | ||
180 | altivec registers, and turning on the 'altivec enable' bit so user | ||
181 | processes can execute altivec instructions. | ||
182 | |||
183 | This option is only usefully if you have a processor that supports | ||
184 | altivec (G4, otherwise known as 74xx series), but does not have | ||
185 | any affect on a non-altivec cpu (it does, however add code to the | ||
186 | kernel). | ||
187 | |||
188 | If in doubt, say Y here. | ||
189 | |||
190 | config SPE | ||
191 | bool "SPE Support" | ||
192 | depends on E200 || E500 | ||
193 | ---help--- | ||
194 | This option enables kernel support for the Signal Processing | ||
195 | Extensions (SPE) to the PowerPC processor. The kernel currently | ||
196 | supports saving and restoring SPE registers, and turning on the | ||
197 | 'spe enable' bit so user processes can execute SPE instructions. | ||
198 | |||
199 | This option is only useful if you have a processor that supports | ||
200 | SPE (e500, otherwise known as 85xx series), but does not have any | ||
201 | effect on a non-spe cpu (it does, however add code to the kernel). | ||
202 | |||
203 | If in doubt, say Y here. | ||
204 | |||
205 | config PPC_STD_MMU | ||
206 | bool | ||
207 | depends on 6xx || POWER3 || POWER4 || PPC64 | ||
208 | default y | ||
209 | |||
210 | config PPC_STD_MMU_32 | ||
211 | def_bool y | ||
212 | depends on PPC_STD_MMU && PPC32 | ||
213 | |||
214 | config SMP | ||
215 | depends on PPC_STD_MMU | ||
216 | bool "Symmetric multi-processing support" | ||
217 | ---help--- | ||
218 | This enables support for systems with more than one CPU. If you have | ||
219 | a system with only one CPU, say N. If you have a system with more | ||
220 | than one CPU, say Y. Note that the kernel does not currently | ||
221 | support SMP machines with 603/603e/603ev or PPC750 ("G3") processors | ||
222 | since they have inadequate hardware support for multiprocessor | ||
223 | operation. | ||
224 | |||
225 | If you say N here, the kernel will run on single and multiprocessor | ||
226 | machines, but will use only one CPU of a multiprocessor machine. If | ||
227 | you say Y here, the kernel will run on single-processor machines. | ||
228 | On a single-processor machine, the kernel will run faster if you say | ||
229 | N here. | ||
230 | |||
231 | If you don't know what to do here, say N. | ||
232 | |||
233 | config NR_CPUS | ||
234 | int "Maximum number of CPUs (2-32)" | ||
235 | range 2 128 | ||
236 | depends on SMP | ||
237 | default "32" if PPC64 | ||
238 | default "4" | ||
239 | |||
240 | config NOT_COHERENT_CACHE | ||
241 | bool | ||
242 | depends on 4xx || 8xx || E200 | ||
243 | default y | ||
244 | endmenu | ||
245 | |||
246 | source "init/Kconfig" | ||
247 | |||
248 | menu "Platform support" | ||
249 | depends on PPC64 || 6xx | ||
250 | |||
251 | choice | ||
252 | prompt "Machine type" | ||
253 | default PPC_MULTIPLATFORM | ||
254 | |||
255 | config PPC_MULTIPLATFORM | ||
256 | bool "Generic desktop/server/laptop" | ||
257 | help | ||
258 | Select this option if configuring for an IBM pSeries or | ||
259 | RS/6000 machine, an Apple machine, or a PReP, CHRP, | ||
260 | Maple or Cell-based machine. | ||
261 | |||
262 | config PPC_ISERIES | ||
263 | bool "IBM Legacy iSeries" | ||
264 | depends on PPC64 | ||
265 | |||
266 | config EMBEDDED6xx | ||
267 | bool "Embedded 6xx/7xx/7xxx-based board" | ||
268 | depends on PPC32 | ||
269 | |||
270 | config APUS | ||
271 | bool "Amiga-APUS" | ||
272 | depends on PPC32 && BROKEN | ||
273 | help | ||
274 | Select APUS if configuring for a PowerUP Amiga. | ||
275 | More information is available at: | ||
276 | <http://linux-apus.sourceforge.net/>. | ||
277 | endchoice | ||
278 | |||
279 | config PPC_PSERIES | ||
280 | depends on PPC_MULTIPLATFORM && PPC64 | ||
281 | bool " IBM pSeries & new (POWER5-based) iSeries" | ||
282 | default y | ||
283 | |||
284 | config PPC_CHRP | ||
285 | bool " Common Hardware Reference Platform (CHRP) based machines" | ||
286 | depends on PPC_MULTIPLATFORM && PPC32 | ||
287 | default y | ||
288 | |||
289 | config PPC_PMAC | ||
290 | bool " Apple PowerMac based machines" | ||
291 | depends on PPC_MULTIPLATFORM | ||
292 | default y | ||
293 | |||
294 | config PPC_PMAC64 | ||
295 | bool | ||
296 | depends on PPC_PMAC && POWER4 | ||
297 | default y | ||
298 | |||
299 | config PPC_PREP | ||
300 | bool " PowerPC Reference Platform (PReP) based machines" | ||
301 | depends on PPC_MULTIPLATFORM && PPC32 | ||
302 | default y | ||
303 | |||
304 | config PPC_MAPLE | ||
305 | depends on PPC_MULTIPLATFORM && PPC64 | ||
306 | bool " Maple 970FX Evaluation Board" | ||
307 | select U3_DART | ||
308 | select MPIC_BROKEN_U3 | ||
309 | default n | ||
310 | help | ||
311 | This option enables support for the Maple 970FX Evaluation Board. | ||
312 | For more informations, refer to <http://www.970eval.com> | ||
313 | |||
314 | config PPC_BPA | ||
315 | bool " Broadband Processor Architecture" | ||
316 | depends on PPC_MULTIPLATFORM && PPC64 | ||
317 | |||
318 | config PPC_OF | ||
319 | bool | ||
320 | depends on PPC_MULTIPLATFORM # for now | ||
321 | default y | ||
322 | |||
323 | config XICS | ||
324 | depends on PPC_PSERIES | ||
325 | bool | ||
326 | default y | ||
327 | |||
328 | config U3_DART | ||
329 | bool | ||
330 | depends on PPC_MULTIPLATFORM && PPC64 | ||
331 | default n | ||
332 | |||
333 | config MPIC | ||
334 | depends on PPC_PSERIES || PPC_PMAC || PPC_MAPLE | ||
335 | bool | ||
336 | default y | ||
337 | |||
338 | config MPIC_BROKEN_U3 | ||
339 | bool | ||
340 | depends on PPC_MAPLE | ||
341 | default y | ||
342 | |||
343 | config BPA_IIC | ||
344 | depends on PPC_BPA | ||
345 | bool | ||
346 | default y | ||
347 | |||
348 | config IBMVIO | ||
349 | depends on PPC_PSERIES || PPC_ISERIES | ||
350 | bool | ||
351 | default y | ||
352 | |||
353 | source "drivers/cpufreq/Kconfig" | ||
354 | |||
355 | config CPU_FREQ_PMAC | ||
356 | bool "Support for Apple PowerBooks" | ||
357 | depends on CPU_FREQ && ADB_PMU && PPC32 | ||
358 | select CPU_FREQ_TABLE | ||
359 | help | ||
360 | This adds support for frequency switching on Apple PowerBooks, | ||
361 | this currently includes some models of iBook & Titanium | ||
362 | PowerBook. | ||
363 | |||
364 | config PPC601_SYNC_FIX | ||
365 | bool "Workarounds for PPC601 bugs" | ||
366 | depends on 6xx && (PPC_PREP || PPC_PMAC) | ||
367 | help | ||
368 | Some versions of the PPC601 (the first PowerPC chip) have bugs which | ||
369 | mean that extra synchronization instructions are required near | ||
370 | certain instructions, typically those that make major changes to the | ||
371 | CPU state. These extra instructions reduce performance slightly. | ||
372 | If you say N here, these extra instructions will not be included, | ||
373 | resulting in a kernel which will run faster but may not run at all | ||
374 | on some systems with the PPC601 chip. | ||
375 | |||
376 | If in doubt, say Y here. | ||
377 | |||
378 | config TAU | ||
379 | bool "Thermal Management Support" | ||
380 | depends on 6xx | ||
381 | help | ||
382 | G3 and G4 processors have an on-chip temperature sensor called the | ||
383 | 'Thermal Assist Unit (TAU)', which, in theory, can measure the on-die | ||
384 | temperature within 2-4 degrees Celsius. This option shows the current | ||
385 | on-die temperature in /proc/cpuinfo if the cpu supports it. | ||
386 | |||
387 | Unfortunately, on some chip revisions, this sensor is very inaccurate | ||
388 | and in some cases, does not work at all, so don't assume the cpu | ||
389 | temp is actually what /proc/cpuinfo says it is. | ||
390 | |||
391 | config TAU_INT | ||
392 | bool "Interrupt driven TAU driver (DANGEROUS)" | ||
393 | depends on TAU | ||
394 | ---help--- | ||
395 | The TAU supports an interrupt driven mode which causes an interrupt | ||
396 | whenever the temperature goes out of range. This is the fastest way | ||
397 | to get notified the temp has exceeded a range. With this option off, | ||
398 | a timer is used to re-check the temperature periodically. | ||
399 | |||
400 | However, on some cpus it appears that the TAU interrupt hardware | ||
401 | is buggy and can cause a situation which would lead unexplained hard | ||
402 | lockups. | ||
403 | |||
404 | Unless you are extending the TAU driver, or enjoy kernel/hardware | ||
405 | debugging, leave this option off. | ||
406 | |||
407 | config TAU_AVERAGE | ||
408 | bool "Average high and low temp" | ||
409 | depends on TAU | ||
410 | ---help--- | ||
411 | The TAU hardware can compare the temperature to an upper and lower | ||
412 | bound. The default behavior is to show both the upper and lower | ||
413 | bound in /proc/cpuinfo. If the range is large, the temperature is | ||
414 | either changing a lot, or the TAU hardware is broken (likely on some | ||
415 | G4's). If the range is small (around 4 degrees), the temperature is | ||
416 | relatively stable. If you say Y here, a single temperature value, | ||
417 | halfway between the upper and lower bounds, will be reported in | ||
418 | /proc/cpuinfo. | ||
419 | |||
420 | If in doubt, say N here. | ||
421 | endmenu | ||
422 | |||
423 | source arch/powerpc/platforms/embedded6xx/Kconfig | ||
424 | source arch/powerpc/platforms/4xx/Kconfig | ||
425 | source arch/powerpc/platforms/85xx/Kconfig | ||
426 | source arch/powerpc/platforms/8xx/Kconfig | ||
427 | |||
428 | menu "Kernel options" | ||
429 | |||
430 | config HIGHMEM | ||
431 | bool "High memory support" | ||
432 | depends on PPC32 | ||
433 | |||
434 | source kernel/Kconfig.hz | ||
435 | source kernel/Kconfig.preempt | ||
436 | source "fs/Kconfig.binfmt" | ||
437 | |||
438 | # We optimistically allocate largepages from the VM, so make the limit | ||
439 | # large enough (16MB). This badly named config option is actually | ||
440 | # max order + 1 | ||
441 | config FORCE_MAX_ZONEORDER | ||
442 | int | ||
443 | depends on PPC64 | ||
444 | default "13" | ||
445 | |||
446 | config MATH_EMULATION | ||
447 | bool "Math emulation" | ||
448 | depends on 4xx || 8xx || E200 || E500 | ||
449 | ---help--- | ||
450 | Some PowerPC chips designed for embedded applications do not have | ||
451 | a floating-point unit and therefore do not implement the | ||
452 | floating-point instructions in the PowerPC instruction set. If you | ||
453 | say Y here, the kernel will include code to emulate a floating-point | ||
454 | unit, which will allow programs that use floating-point | ||
455 | instructions to run. | ||
456 | |||
457 | config IOMMU_VMERGE | ||
458 | bool "Enable IOMMU virtual merging (EXPERIMENTAL)" | ||
459 | depends on EXPERIMENTAL && PPC64 | ||
460 | default n | ||
461 | help | ||
462 | Cause IO segments sent to a device for DMA to be merged virtually | ||
463 | by the IOMMU when they happen to have been allocated contiguously. | ||
464 | This doesn't add pressure to the IOMMU allocator. However, some | ||
465 | drivers don't support getting large merged segments coming back | ||
466 | from *_map_sg(). Say Y if you know the drivers you are using are | ||
467 | properly handling this case. | ||
468 | |||
469 | config HOTPLUG_CPU | ||
470 | bool "Support for enabling/disabling CPUs" | ||
471 | depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC) | ||
472 | ---help--- | ||
473 | Say Y here to be able to disable and re-enable individual | ||
474 | CPUs at runtime on SMP machines. | ||
475 | |||
476 | Say N if you are unsure. | ||
477 | |||
478 | config KEXEC | ||
479 | bool "kexec system call (EXPERIMENTAL)" | ||
480 | depends on PPC_MULTIPLATFORM && EXPERIMENTAL | ||
481 | help | ||
482 | kexec is a system call that implements the ability to shutdown your | ||
483 | current kernel, and to start another kernel. It is like a reboot | ||
484 | but it is indepedent of the system firmware. And like a reboot | ||
485 | you can start any kernel with it, not just Linux. | ||
486 | |||
487 | The name comes from the similiarity to the exec system call. | ||
488 | |||
489 | It is an ongoing process to be certain the hardware in a machine | ||
490 | is properly shutdown, so do not be surprised if this code does not | ||
491 | initially work for you. It may help to enable device hotplugging | ||
492 | support. As of this writing the exact hardware interface is | ||
493 | strongly in flux, so no good recommendation can be made. | ||
494 | |||
495 | config EMBEDDEDBOOT | ||
496 | bool | ||
497 | depends on 8xx || 8260 | ||
498 | default y | ||
499 | |||
500 | config PC_KEYBOARD | ||
501 | bool "PC PS/2 style Keyboard" | ||
502 | depends on 4xx || CPM2 | ||
503 | |||
504 | config PPCBUG_NVRAM | ||
505 | bool "Enable reading PPCBUG NVRAM during boot" if PPLUS || LOPEC | ||
506 | default y if PPC_PREP | ||
507 | |||
508 | config IRQ_ALL_CPUS | ||
509 | bool "Distribute interrupts on all CPUs by default" | ||
510 | depends on SMP && !MV64360 | ||
511 | help | ||
512 | This option gives the kernel permission to distribute IRQs across | ||
513 | multiple CPUs. Saying N here will route all IRQs to the first | ||
514 | CPU. Generally saying Y is safe, although some problems have been | ||
515 | reported with SMP Power Macintoshes with this option enabled. | ||
516 | |||
517 | source "arch/powerpc/platforms/pseries/Kconfig" | ||
518 | |||
519 | config ARCH_SELECT_MEMORY_MODEL | ||
520 | def_bool y | ||
521 | depends on PPC64 | ||
522 | |||
523 | config ARCH_FLATMEM_ENABLE | ||
524 | def_bool y | ||
525 | depends on PPC64 && !NUMA | ||
526 | |||
527 | config ARCH_DISCONTIGMEM_ENABLE | ||
528 | def_bool y | ||
529 | depends on SMP && PPC_PSERIES | ||
530 | |||
531 | config ARCH_DISCONTIGMEM_DEFAULT | ||
532 | def_bool y | ||
533 | depends on ARCH_DISCONTIGMEM_ENABLE | ||
534 | |||
535 | config ARCH_FLATMEM_ENABLE | ||
536 | def_bool y | ||
537 | depends on PPC64 | ||
538 | |||
539 | config ARCH_SPARSEMEM_ENABLE | ||
540 | def_bool y | ||
541 | depends on ARCH_DISCONTIGMEM_ENABLE | ||
542 | |||
543 | source "mm/Kconfig" | ||
544 | |||
545 | config HAVE_ARCH_EARLY_PFN_TO_NID | ||
546 | def_bool y | ||
547 | depends on NEED_MULTIPLE_NODES | ||
548 | |||
549 | # Some NUMA nodes have memory ranges that span | ||
550 | # other nodes. Even though a pfn is valid and | ||
551 | # between a node's start and end pfns, it may not | ||
552 | # reside on that node. | ||
553 | # | ||
554 | # This is a relatively temporary hack that should | ||
555 | # be able to go away when sparsemem is fully in | ||
556 | # place | ||
557 | |||
558 | config NODES_SPAN_OTHER_NODES | ||
559 | def_bool y | ||
560 | depends on NEED_MULTIPLE_NODES | ||
561 | |||
562 | config NUMA | ||
563 | bool "NUMA support" | ||
564 | default y if DISCONTIGMEM || SPARSEMEM | ||
565 | |||
566 | config SCHED_SMT | ||
567 | bool "SMT (Hyperthreading) scheduler support" | ||
568 | depends on PPC64 && SMP | ||
569 | default off | ||
570 | help | ||
571 | SMT scheduler support improves the CPU scheduler's decision making | ||
572 | when dealing with POWER5 cpus at a cost of slightly increased | ||
573 | overhead in some places. If unsure say N here. | ||
574 | |||
575 | config PROC_DEVICETREE | ||
576 | bool "Support for Open Firmware device tree in /proc" | ||
577 | depends on PPC_OF && PROC_FS | ||
578 | help | ||
579 | This option adds a device-tree directory under /proc which contains | ||
580 | an image of the device tree that the kernel copies from Open | ||
581 | Firmware. If unsure, say Y here. | ||
582 | |||
583 | source "arch/powerpc/platforms/prep/Kconfig" | ||
584 | |||
585 | config CMDLINE_BOOL | ||
586 | bool "Default bootloader kernel arguments" | ||
587 | depends on !PPC_ISERIES | ||
588 | |||
589 | config CMDLINE | ||
590 | string "Initial kernel command string" | ||
591 | depends on CMDLINE_BOOL | ||
592 | default "console=ttyS0,9600 console=tty0 root=/dev/sda2" | ||
593 | help | ||
594 | On some platforms, there is currently no way for the boot loader to | ||
595 | pass arguments to the kernel. For these platforms, you can supply | ||
596 | some command-line options at build time by entering them here. In | ||
597 | most cases you will need to specify the root device here. | ||
598 | |||
599 | if !44x || BROKEN | ||
600 | source kernel/power/Kconfig | ||
601 | endif | ||
602 | |||
603 | config SECCOMP | ||
604 | bool "Enable seccomp to safely compute untrusted bytecode" | ||
605 | depends on PROC_FS | ||
606 | default y | ||
607 | help | ||
608 | This kernel feature is useful for number crunching applications | ||
609 | that may need to compute untrusted bytecode during their | ||
610 | execution. By using pipes or other transports made available to | ||
611 | the process as file descriptors supporting the read/write | ||
612 | syscalls, it's possible to isolate those applications in | ||
613 | their own address space using seccomp. Once seccomp is | ||
614 | enabled via /proc/<pid>/seccomp, it cannot be disabled | ||
615 | and the task is only allowed to execute a few safe syscalls | ||
616 | defined by each seccomp mode. | ||
617 | |||
618 | If unsure, say Y. Only embedded should say N here. | ||
619 | |||
620 | endmenu | ||
621 | |||
622 | config ISA_DMA_API | ||
623 | bool | ||
624 | default y | ||
625 | |||
626 | menu "Bus options" | ||
627 | |||
628 | config ISA | ||
629 | bool "Support for ISA-bus hardware" | ||
630 | depends on PPC_PREP || PPC_CHRP | ||
631 | help | ||
632 | Find out whether you have ISA slots on your motherboard. ISA is the | ||
633 | name of a bus system, i.e. the way the CPU talks to the other stuff | ||
634 | inside your box. If you have an Apple machine, say N here; if you | ||
635 | have an IBM RS/6000 or pSeries machine or a PReP machine, say Y. If | ||
636 | you have an embedded board, consult your board documentation. | ||
637 | |||
638 | config GENERIC_ISA_DMA | ||
639 | bool | ||
640 | depends on PPC64 || POWER4 || 6xx && !CPM2 | ||
641 | default y | ||
642 | |||
643 | config EISA | ||
644 | bool | ||
645 | |||
646 | config SBUS | ||
647 | bool | ||
648 | |||
649 | # Yes MCA RS/6000s exist but Linux-PPC does not currently support any | ||
650 | config MCA | ||
651 | bool | ||
652 | |||
653 | config PCI | ||
654 | bool "PCI support" if 40x || CPM2 || 83xx || 85xx || PPC_MPC52xx || (EMBEDDED && PPC_ISERIES) | ||
655 | default y if !40x && !CPM2 && !8xx && !APUS && !83xx && !85xx | ||
656 | default PCI_PERMEDIA if !4xx && !CPM2 && !8xx && APUS | ||
657 | default PCI_QSPAN if !4xx && !CPM2 && 8xx | ||
658 | help | ||
659 | Find out whether your system includes a PCI bus. PCI is the name of | ||
660 | a bus system, i.e. the way the CPU talks to the other stuff inside | ||
661 | your box. If you say Y here, the kernel will include drivers and | ||
662 | infrastructure code to support PCI bus devices. | ||
663 | |||
664 | config PCI_DOMAINS | ||
665 | bool | ||
666 | default PCI | ||
667 | |||
668 | config MPC83xx_PCI2 | ||
669 | bool " Supprt for 2nd PCI host controller" | ||
670 | depends on PCI && MPC834x | ||
671 | default y if MPC834x_SYS | ||
672 | |||
673 | config PCI_QSPAN | ||
674 | bool "QSpan PCI" | ||
675 | depends on !4xx && !CPM2 && 8xx | ||
676 | help | ||
677 | Say Y here if you have a system based on a Motorola 8xx-series | ||
678 | embedded processor with a QSPAN PCI interface, otherwise say N. | ||
679 | |||
680 | config PCI_8260 | ||
681 | bool | ||
682 | depends on PCI && 8260 | ||
683 | default y | ||
684 | |||
685 | config 8260_PCI9 | ||
686 | bool " Enable workaround for MPC826x erratum PCI 9" | ||
687 | depends on PCI_8260 && !ADS8272 | ||
688 | default y | ||
689 | |||
690 | choice | ||
691 | prompt " IDMA channel for PCI 9 workaround" | ||
692 | depends on 8260_PCI9 | ||
693 | |||
694 | config 8260_PCI9_IDMA1 | ||
695 | bool "IDMA1" | ||
696 | |||
697 | config 8260_PCI9_IDMA2 | ||
698 | bool "IDMA2" | ||
699 | |||
700 | config 8260_PCI9_IDMA3 | ||
701 | bool "IDMA3" | ||
702 | |||
703 | config 8260_PCI9_IDMA4 | ||
704 | bool "IDMA4" | ||
705 | |||
706 | endchoice | ||
707 | |||
708 | source "drivers/pci/Kconfig" | ||
709 | |||
710 | source "drivers/pcmcia/Kconfig" | ||
711 | |||
712 | source "drivers/pci/hotplug/Kconfig" | ||
713 | |||
714 | endmenu | ||
715 | |||
716 | menu "Advanced setup" | ||
717 | depends on PPC32 | ||
718 | |||
719 | config ADVANCED_OPTIONS | ||
720 | bool "Prompt for advanced kernel configuration options" | ||
721 | help | ||
722 | This option will enable prompting for a variety of advanced kernel | ||
723 | configuration options. These options can cause the kernel to not | ||
724 | work if they are set incorrectly, but can be used to optimize certain | ||
725 | aspects of kernel memory management. | ||
726 | |||
727 | Unless you know what you are doing, say N here. | ||
728 | |||
729 | comment "Default settings for advanced configuration options are used" | ||
730 | depends on !ADVANCED_OPTIONS | ||
731 | |||
732 | config HIGHMEM_START_BOOL | ||
733 | bool "Set high memory pool address" | ||
734 | depends on ADVANCED_OPTIONS && HIGHMEM | ||
735 | help | ||
736 | This option allows you to set the base address of the kernel virtual | ||
737 | area used to map high memory pages. This can be useful in | ||
738 | optimizing the layout of kernel virtual memory. | ||
739 | |||
740 | Say N here unless you know what you are doing. | ||
741 | |||
742 | config HIGHMEM_START | ||
743 | hex "Virtual start address of high memory pool" if HIGHMEM_START_BOOL | ||
744 | default "0xfe000000" | ||
745 | |||
746 | config LOWMEM_SIZE_BOOL | ||
747 | bool "Set maximum low memory" | ||
748 | depends on ADVANCED_OPTIONS | ||
749 | help | ||
750 | This option allows you to set the maximum amount of memory which | ||
751 | will be used as "low memory", that is, memory which the kernel can | ||
752 | access directly, without having to set up a kernel virtual mapping. | ||
753 | This can be useful in optimizing the layout of kernel virtual | ||
754 | memory. | ||
755 | |||
756 | Say N here unless you know what you are doing. | ||
757 | |||
758 | config LOWMEM_SIZE | ||
759 | hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL | ||
760 | default "0x30000000" | ||
761 | |||
762 | config KERNEL_START_BOOL | ||
763 | bool "Set custom kernel base address" | ||
764 | depends on ADVANCED_OPTIONS | ||
765 | help | ||
766 | This option allows you to set the kernel virtual address at which | ||
767 | the kernel will map low memory (the kernel image will be linked at | ||
768 | this address). This can be useful in optimizing the virtual memory | ||
769 | layout of the system. | ||
770 | |||
771 | Say N here unless you know what you are doing. | ||
772 | |||
773 | config KERNEL_START | ||
774 | hex "Virtual address of kernel base" if KERNEL_START_BOOL | ||
775 | default "0xc0000000" | ||
776 | |||
777 | config TASK_SIZE_BOOL | ||
778 | bool "Set custom user task size" | ||
779 | depends on ADVANCED_OPTIONS | ||
780 | help | ||
781 | This option allows you to set the amount of virtual address space | ||
782 | allocated to user tasks. This can be useful in optimizing the | ||
783 | virtual memory layout of the system. | ||
784 | |||
785 | Say N here unless you know what you are doing. | ||
786 | |||
787 | config TASK_SIZE | ||
788 | hex "Size of user task space" if TASK_SIZE_BOOL | ||
789 | default "0x80000000" | ||
790 | |||
791 | config CONSISTENT_START_BOOL | ||
792 | bool "Set custom consistent memory pool address" | ||
793 | depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE | ||
794 | help | ||
795 | This option allows you to set the base virtual address | ||
796 | of the the consistent memory pool. This pool of virtual | ||
797 | memory is used to make consistent memory allocations. | ||
798 | |||
799 | config CONSISTENT_START | ||
800 | hex "Base virtual address of consistent memory pool" if CONSISTENT_START_BOOL | ||
801 | default "0xff100000" if NOT_COHERENT_CACHE | ||
802 | |||
803 | config CONSISTENT_SIZE_BOOL | ||
804 | bool "Set custom consistent memory pool size" | ||
805 | depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE | ||
806 | help | ||
807 | This option allows you to set the size of the the | ||
808 | consistent memory pool. This pool of virtual memory | ||
809 | is used to make consistent memory allocations. | ||
810 | |||
811 | config CONSISTENT_SIZE | ||
812 | hex "Size of consistent memory pool" if CONSISTENT_SIZE_BOOL | ||
813 | default "0x00200000" if NOT_COHERENT_CACHE | ||
814 | |||
815 | config BOOT_LOAD_BOOL | ||
816 | bool "Set the boot link/load address" | ||
817 | depends on ADVANCED_OPTIONS && !PPC_MULTIPLATFORM | ||
818 | help | ||
819 | This option allows you to set the initial load address of the zImage | ||
820 | or zImage.initrd file. This can be useful if you are on a board | ||
821 | which has a small amount of memory. | ||
822 | |||
823 | Say N here unless you know what you are doing. | ||
824 | |||
825 | config BOOT_LOAD | ||
826 | hex "Link/load address for booting" if BOOT_LOAD_BOOL | ||
827 | default "0x00400000" if 40x || 8xx || 8260 | ||
828 | default "0x01000000" if 44x | ||
829 | default "0x00800000" | ||
830 | |||
831 | config PIN_TLB | ||
832 | bool "Pinned Kernel TLBs (860 ONLY)" | ||
833 | depends on ADVANCED_OPTIONS && 8xx | ||
834 | endmenu | ||
835 | |||
836 | source "net/Kconfig" | ||
837 | |||
838 | source "drivers/Kconfig" | ||
839 | |||
840 | source "fs/Kconfig" | ||
841 | |||
842 | # XXX source "arch/ppc/8xx_io/Kconfig" | ||
843 | |||
844 | # XXX source "arch/ppc/8260_io/Kconfig" | ||
845 | |||
846 | source "arch/powerpc/platforms/iseries/Kconfig" | ||
847 | |||
848 | source "lib/Kconfig" | ||
849 | |||
850 | source "arch/powerpc/oprofile/Kconfig" | ||
851 | |||
852 | source "arch/powerpc/Kconfig.debug" | ||
853 | |||
854 | source "security/Kconfig" | ||
855 | |||
856 | config KEYS_COMPAT | ||
857 | bool | ||
858 | depends on COMPAT && KEYS | ||
859 | default y | ||
860 | |||
861 | source "crypto/Kconfig" | ||
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug new file mode 100644 index 000000000000..61653cb60c4e --- /dev/null +++ b/arch/powerpc/Kconfig.debug | |||
@@ -0,0 +1,73 @@ | |||
1 | menu "Kernel hacking" | ||
2 | |||
3 | source "lib/Kconfig.debug" | ||
4 | |||
5 | config KGDB | ||
6 | bool "Include kgdb kernel debugger" | ||
7 | depends on DEBUG_KERNEL && (BROKEN || PPC_GEN550 || 4xx) | ||
8 | select DEBUG_INFO | ||
9 | help | ||
10 | Include in-kernel hooks for kgdb, the Linux kernel source level | ||
11 | debugger. See <http://kgdb.sourceforge.net/> for more information. | ||
12 | Unless you are intending to debug the kernel, say N here. | ||
13 | |||
14 | choice | ||
15 | prompt "Serial Port" | ||
16 | depends on KGDB | ||
17 | default KGDB_TTYS1 | ||
18 | |||
19 | config KGDB_TTYS0 | ||
20 | bool "ttyS0" | ||
21 | |||
22 | config KGDB_TTYS1 | ||
23 | bool "ttyS1" | ||
24 | |||
25 | config KGDB_TTYS2 | ||
26 | bool "ttyS2" | ||
27 | |||
28 | config KGDB_TTYS3 | ||
29 | bool "ttyS3" | ||
30 | |||
31 | endchoice | ||
32 | |||
33 | config KGDB_CONSOLE | ||
34 | bool "Enable serial console thru kgdb port" | ||
35 | depends on KGDB && 8xx || CPM2 | ||
36 | help | ||
37 | If you enable this, all serial console messages will be sent | ||
38 | over the gdb stub. | ||
39 | If unsure, say N. | ||
40 | |||
41 | config XMON | ||
42 | bool "Include xmon kernel debugger" | ||
43 | depends on DEBUG_KERNEL | ||
44 | help | ||
45 | Include in-kernel hooks for the xmon kernel monitor/debugger. | ||
46 | Unless you are intending to debug the kernel, say N here. | ||
47 | |||
48 | config BDI_SWITCH | ||
49 | bool "Include BDI-2000 user context switcher" | ||
50 | depends on DEBUG_KERNEL | ||
51 | help | ||
52 | Include in-kernel support for the Abatron BDI2000 debugger. | ||
53 | Unless you are intending to debug the kernel with one of these | ||
54 | machines, say N here. | ||
55 | |||
56 | config BOOTX_TEXT | ||
57 | bool "Support for early boot text console (BootX or OpenFirmware only)" | ||
58 | depends PPC_OF | ||
59 | help | ||
60 | Say Y here to see progress messages from the boot firmware in text | ||
61 | mode. Requires either BootX or Open Firmware. | ||
62 | |||
63 | config SERIAL_TEXT_DEBUG | ||
64 | bool "Support for early boot texts over serial port" | ||
65 | depends on 4xx || LOPEC || MV64X60 || PPLUS || PRPMC800 || \ | ||
66 | PPC_GEN550 || PPC_MPC52xx | ||
67 | |||
68 | config PPC_OCP | ||
69 | bool | ||
70 | depends on IBM_OCP || XILINX_OCP | ||
71 | default y | ||
72 | |||
73 | endmenu | ||
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile new file mode 100644 index 000000000000..8a65e112211b --- /dev/null +++ b/arch/powerpc/Makefile | |||
@@ -0,0 +1,222 @@ | |||
1 | # This file is included by the global makefile so that you can add your own | ||
2 | # architecture-specific flags and dependencies. Remember to do have actions | ||
3 | # for "archclean" and "archdep" for cleaning up and making dependencies for | ||
4 | # this architecture. | ||
5 | # | ||
6 | # This file is subject to the terms and conditions of the GNU General Public | ||
7 | # License. See the file "COPYING" in the main directory of this archive | ||
8 | # for more details. | ||
9 | # | ||
10 | # Copyright (C) 1994 by Linus Torvalds | ||
11 | # Changes for PPC by Gary Thomas | ||
12 | # Rewritten by Cort Dougan and Paul Mackerras | ||
13 | # | ||
14 | |||
15 | # This must match PAGE_OFFSET in include/asm-powerpc/page.h. | ||
16 | KERNELLOAD := $(CONFIG_KERNEL_START) | ||
17 | |||
18 | HAS_BIARCH := $(call cc-option-yn, -m32) | ||
19 | |||
20 | ifeq ($(CONFIG_PPC64),y) | ||
21 | SZ := 64 | ||
22 | |||
23 | # Set default 32 bits cross compilers for vdso and boot wrapper | ||
24 | CROSS32_COMPILE ?= | ||
25 | |||
26 | CROSS32CC := $(CROSS32_COMPILE)gcc | ||
27 | CROSS32AS := $(CROSS32_COMPILE)as | ||
28 | CROSS32LD := $(CROSS32_COMPILE)ld | ||
29 | CROSS32OBJCOPY := $(CROSS32_COMPILE)objcopy | ||
30 | |||
31 | ifeq ($(HAS_BIARCH),y) | ||
32 | ifeq ($(CROSS32_COMPILE),) | ||
33 | CROSS32CC := $(CC) -m32 | ||
34 | CROSS32AS := $(AS) -a32 | ||
35 | CROSS32LD := $(LD) -m elf32ppc | ||
36 | CROSS32OBJCOPY := $(OBJCOPY) | ||
37 | endif | ||
38 | endif | ||
39 | |||
40 | export CROSS32CC CROSS32AS CROSS32LD CROSS32OBJCOPY | ||
41 | |||
42 | new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi) | ||
43 | |||
44 | ifeq ($(new_nm),y) | ||
45 | NM := $(NM) --synthetic | ||
46 | endif | ||
47 | |||
48 | else | ||
49 | SZ := 32 | ||
50 | endif | ||
51 | |||
52 | ifeq ($(HAS_BIARCH),y) | ||
53 | override AS += -a$(SZ) | ||
54 | override LD += -m elf$(SZ)ppc | ||
55 | override CC += -m$(SZ) | ||
56 | endif | ||
57 | |||
58 | LDFLAGS_vmlinux := -Ttext $(KERNELLOAD) -Bstatic -e $(KERNELLOAD) | ||
59 | |||
60 | # The -Iarch/$(ARCH)/include is temporary while we are merging | ||
61 | CPPFLAGS += -Iarch/$(ARCH) -Iarch/$(ARCH)/include | ||
62 | AFLAGS += -Iarch/$(ARCH) | ||
63 | CFLAGS += -Iarch/$(ARCH) -msoft-float -pipe | ||
64 | ifeq ($(CONFIG_PPC64),y) | ||
65 | CFLAGS += -mminimal-toc -mtraceback=none -mcall-aixdesc | ||
66 | else | ||
67 | CFLAGS += -ffixed-r2 -mmultiple | ||
68 | endif | ||
69 | CPP = $(CC) -E $(CFLAGS) | ||
70 | # Temporary hack until we have migrated to asm-powerpc | ||
71 | LINUXINCLUDE += -Iarch/$(ARCH)/include | ||
72 | |||
73 | CHECKFLAGS += -m$(SZ) -D__powerpc__ -D__powerpc$(SZ)__ | ||
74 | |||
75 | ifeq ($(CONFIG_PPC64),y) | ||
76 | GCC_VERSION := $(call cc-version) | ||
77 | GCC_BROKEN_VEC := $(shell if [ $(GCC_VERSION) -lt 0400 ] ; then echo "y"; fi) | ||
78 | |||
79 | ifeq ($(CONFIG_POWER4_ONLY),y) | ||
80 | ifeq ($(CONFIG_ALTIVEC),y) | ||
81 | ifeq ($(GCC_BROKEN_VEC),y) | ||
82 | CFLAGS += $(call cc-option,-mcpu=970) | ||
83 | else | ||
84 | CFLAGS += $(call cc-option,-mcpu=power4) | ||
85 | endif | ||
86 | else | ||
87 | CFLAGS += $(call cc-option,-mcpu=power4) | ||
88 | endif | ||
89 | else | ||
90 | CFLAGS += $(call cc-option,-mtune=power4) | ||
91 | endif | ||
92 | endif | ||
93 | |||
94 | # Enable unit-at-a-time mode when possible. It shrinks the | ||
95 | # kernel considerably. | ||
96 | CFLAGS += $(call cc-option,-funit-at-a-time) | ||
97 | |||
98 | ifndef CONFIG_FSL_BOOKE | ||
99 | CFLAGS += -mstring | ||
100 | endif | ||
101 | |||
102 | cpu-as-$(CONFIG_PPC64BRIDGE) += -Wa,-mppc64bridge | ||
103 | cpu-as-$(CONFIG_4xx) += -Wa,-m405 | ||
104 | cpu-as-$(CONFIG_6xx) += -Wa,-maltivec | ||
105 | cpu-as-$(CONFIG_POWER4) += -Wa,-maltivec | ||
106 | cpu-as-$(CONFIG_E500) += -Wa,-me500 | ||
107 | cpu-as-$(CONFIG_E200) += -Wa,-me200 | ||
108 | |||
109 | AFLAGS += $(cpu-as-y) | ||
110 | CFLAGS += $(cpu-as-y) | ||
111 | |||
112 | # Default to the common case. | ||
113 | KBUILD_DEFCONFIG := common_defconfig | ||
114 | |||
115 | head-y := arch/powerpc/kernel/head.o | ||
116 | head-$(CONFIG_PPC64) := arch/powerpc/kernel/head_64.o | ||
117 | head-$(CONFIG_8xx) := arch/powerpc/kernel/head_8xx.o | ||
118 | head-$(CONFIG_4xx) := arch/powerpc/kernel/head_4xx.o | ||
119 | head-$(CONFIG_44x) := arch/powerpc/kernel/head_44x.o | ||
120 | head-$(CONFIG_FSL_BOOKE) := arch/powerpc/kernel/head_fsl_booke.o | ||
121 | |||
122 | ifeq ($(CONFIG_PPC32),y) | ||
123 | head-$(CONFIG_6xx) += arch/powerpc/kernel/idle_6xx.o | ||
124 | head-$(CONFIG_POWER4) += arch/powerpc/kernel/idle_power4.o | ||
125 | head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o | ||
126 | endif | ||
127 | |||
128 | core-y += arch/powerpc/kernel/ \ | ||
129 | arch/powerpc/mm/ \ | ||
130 | arch/powerpc/lib/ \ | ||
131 | arch/powerpc/sysdev/ | ||
132 | core-$(CONFIG_PPC32) += arch/ppc/kernel/ \ | ||
133 | arch/ppc/syslib/ | ||
134 | core-$(CONFIG_PPC64) += arch/ppc64/kernel/ | ||
135 | core-$(CONFIG_PPC_PMAC) += arch/powerpc/platforms/powermac/ | ||
136 | core-$(CONFIG_4xx) += arch/ppc/platforms/4xx/ | ||
137 | core-$(CONFIG_83xx) += arch/ppc/platforms/83xx/ | ||
138 | core-$(CONFIG_85xx) += arch/ppc/platforms/85xx/ | ||
139 | core-$(CONFIG_MATH_EMULATION) += arch/ppc/math-emu/ | ||
140 | core-$(CONFIG_XMON) += arch/powerpc/xmon/ | ||
141 | core-$(CONFIG_APUS) += arch/ppc/amiga/ | ||
142 | drivers-$(CONFIG_8xx) += arch/ppc/8xx_io/ | ||
143 | drivers-$(CONFIG_4xx) += arch/ppc/4xx_io/ | ||
144 | drivers-$(CONFIG_CPM2) += arch/ppc/8260_io/ | ||
145 | |||
146 | drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/ | ||
147 | |||
148 | BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm | ||
149 | |||
150 | .PHONY: $(BOOT_TARGETS) | ||
151 | |||
152 | all: uImage zImage | ||
153 | |||
154 | CPPFLAGS_vmlinux.lds := -Upowerpc | ||
155 | |||
156 | # All the instructions talk about "make bzImage". | ||
157 | bzImage: zImage | ||
158 | |||
159 | boot := arch/$(ARCH)/boot | ||
160 | |||
161 | $(BOOT_TARGETS): vmlinux | ||
162 | $(Q)$(MAKE) $(build)=$(boot) $@ | ||
163 | |||
164 | uImage: vmlinux | ||
165 | $(Q)$(MAKE) $(build)=$(boot)/images $(boot)/images/$@ | ||
166 | |||
167 | define archhelp | ||
168 | @echo '* zImage - Compressed kernel image (arch/$(ARCH)/boot/images/zImage.*)' | ||
169 | @echo ' uImage - Create a bootable image for U-Boot / PPCBoot' | ||
170 | @echo ' install - Install kernel using' | ||
171 | @echo ' (your) ~/bin/installkernel or' | ||
172 | @echo ' (distribution) /sbin/installkernel or' | ||
173 | @echo ' install to $$(INSTALL_PATH) and run lilo' | ||
174 | @echo ' *_defconfig - Select default config from arch/$(ARCH)/ppc/configs' | ||
175 | endef | ||
176 | |||
177 | archclean: | ||
178 | $(Q)$(MAKE) $(clean)=arch/ppc/boot | ||
179 | # Temporary hack until we have migrated to asm-powerpc | ||
180 | $(Q)rm -rf arch/$(ARCH)/include | ||
181 | |||
182 | archprepare: checkbin | ||
183 | |||
184 | # Temporary hack until we have migrated to asm-powerpc | ||
185 | ifeq ($(CONFIG_PPC64),y) | ||
186 | include/asm: arch/$(ARCH)/include/asm | ||
187 | arch/$(ARCH)/include/asm: | ||
188 | $(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi | ||
189 | $(Q)ln -fsn $(srctree)/include/asm-ppc64 arch/$(ARCH)/include/asm | ||
190 | else | ||
191 | include/asm: arch/$(ARCH)/include/asm | ||
192 | arch/$(ARCH)/include/asm: | ||
193 | $(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi | ||
194 | $(Q)ln -fsn $(srctree)/include/asm-ppc arch/$(ARCH)/include/asm | ||
195 | endif | ||
196 | |||
197 | # Use the file '.tmp_gas_check' for binutils tests, as gas won't output | ||
198 | # to stdout and these checks are run even on install targets. | ||
199 | TOUT := .tmp_gas_check | ||
200 | # Ensure this is binutils 2.12.1 (or 2.12.90.0.7) or later for altivec | ||
201 | # instructions. | ||
202 | # gcc-3.4 and binutils-2.14 are a fatal combination. | ||
203 | GCC_VERSION := $(call cc-version) | ||
204 | |||
205 | checkbin: | ||
206 | @if test "$(GCC_VERSION)" = "0304" ; then \ | ||
207 | if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \ | ||
208 | echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '; \ | ||
209 | echo 'correctly with gcc-3.4 and your version of binutils.'; \ | ||
210 | echo '*** Please upgrade your binutils or downgrade your gcc'; \ | ||
211 | false; \ | ||
212 | fi ; \ | ||
213 | fi | ||
214 | @if ! /bin/echo dssall | $(AS) -many -o $(TOUT) >/dev/null 2>&1 ; then \ | ||
215 | echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build ' ; \ | ||
216 | echo 'correctly with old versions of binutils.' ; \ | ||
217 | echo '*** Please upgrade your binutils to 2.12.1 or newer' ; \ | ||
218 | false ; \ | ||
219 | fi | ||
220 | |||
221 | CLEAN_FILES += $(TOUT) | ||
222 | |||
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile new file mode 100644 index 000000000000..62c4a51a23d7 --- /dev/null +++ b/arch/powerpc/kernel/Makefile | |||
@@ -0,0 +1,18 @@ | |||
1 | # | ||
2 | # Makefile for the linux kernel. | ||
3 | # | ||
4 | |||
5 | extra-$(CONFIG_PPC_STD_MMU) := head.o | ||
6 | extra_$(CONFIG_PPC64) := head_64.o | ||
7 | extra-$(CONFIG_40x) := head_4xx.o | ||
8 | extra-$(CONFIG_44x) := head_44x.o | ||
9 | extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o | ||
10 | extra-$(CONFIG_8xx) := head_8xx.o | ||
11 | extra-$(CONFIG_6xx) += idle_6xx.o | ||
12 | extra-$(CONFIG_POWER4) += idle_power4.o | ||
13 | extra-$(CONFIG_PPC_FPU) += fpu.o | ||
14 | extra-y += vmlinux.lds | ||
15 | |||
16 | obj-y := semaphore.o traps.o process.o | ||
17 | |||
18 | obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o | ||
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c new file mode 100644 index 000000000000..16cf0b7ee2b7 --- /dev/null +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -0,0 +1,262 @@ | |||
1 | /* | ||
2 | * This program is used to generate definitions needed by | ||
3 | * assembly language modules. | ||
4 | * | ||
5 | * We use the technique used in the OSF Mach kernel code: | ||
6 | * generate asm statements containing #defines, | ||
7 | * compile this file to assembler, and then extract the | ||
8 | * #defines from the assembly-language output. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * as published by the Free Software Foundation; either version | ||
13 | * 2 of the License, or (at your option) any later version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/config.h> | ||
17 | #include <linux/signal.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/types.h> | ||
23 | #include <linux/ptrace.h> | ||
24 | #include <linux/suspend.h> | ||
25 | #include <linux/mman.h> | ||
26 | #include <linux/mm.h> | ||
27 | #include <linux/time.h> | ||
28 | #include <linux/hardirq.h> | ||
29 | #include <asm/io.h> | ||
30 | #include <asm/page.h> | ||
31 | #include <asm/pgtable.h> | ||
32 | #include <asm/processor.h> | ||
33 | |||
34 | #include <asm/cputable.h> | ||
35 | #include <asm/thread_info.h> | ||
36 | #ifdef CONFIG_PPC64 | ||
37 | #include <asm/paca.h> | ||
38 | #include <asm/lppaca.h> | ||
39 | #include <asm/iSeries/HvLpEvent.h> | ||
40 | #include <asm/rtas.h> | ||
41 | #include <asm/cache.h> | ||
42 | #include <asm/systemcfg.h> | ||
43 | #include <asm/compat.h> | ||
44 | #endif | ||
45 | |||
46 | #define DEFINE(sym, val) \ | ||
47 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
48 | |||
49 | #define BLANK() asm volatile("\n->" : : ) | ||
50 | |||
51 | int main(void) | ||
52 | { | ||
53 | /* thread struct on stack */ | ||
54 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); | ||
55 | DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); | ||
56 | DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); | ||
57 | #ifdef CONFIG_PPC32 | ||
58 | DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); | ||
59 | #endif | ||
60 | #ifdef CONFIG_PPC64 | ||
61 | DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror)); | ||
62 | DEFINE(THREAD_SHIFT, THREAD_SHIFT); | ||
63 | #endif | ||
64 | DEFINE(THREAD_SIZE, THREAD_SIZE); | ||
65 | |||
66 | /* task_struct->thread */ | ||
67 | DEFINE(THREAD, offsetof(struct task_struct, thread)); | ||
68 | DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info)); | ||
69 | DEFINE(MM, offsetof(struct task_struct, mm)); | ||
70 | DEFINE(PTRACE, offsetof(struct task_struct, ptrace)); | ||
71 | DEFINE(KSP, offsetof(struct thread_struct, ksp)); | ||
72 | DEFINE(PGDIR, offsetof(struct thread_struct, pgdir)); | ||
73 | DEFINE(LAST_SYSCALL, offsetof(struct thread_struct, last_syscall)); | ||
74 | DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); | ||
75 | DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode)); | ||
76 | DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0])); | ||
77 | DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr)); | ||
78 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | ||
79 | DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0)); | ||
80 | DEFINE(PT_PTRACED, PT_PTRACED); | ||
81 | #endif | ||
82 | #ifdef CONFIG_PPC64 | ||
83 | DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid)); | ||
84 | #endif | ||
85 | |||
86 | #ifdef CONFIG_ALTIVEC | ||
87 | DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0])); | ||
88 | DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave)); | ||
89 | DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr)); | ||
90 | DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr)); | ||
91 | #endif /* CONFIG_ALTIVEC */ | ||
92 | #ifdef CONFIG_SPE | ||
93 | DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0])); | ||
94 | DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc)); | ||
95 | DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr)); | ||
96 | DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe)); | ||
97 | #endif /* CONFIG_SPE */ | ||
98 | /* Interrupt register frame */ | ||
99 | DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD); | ||
100 | #ifndef CONFIG_PPC64 | ||
101 | DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); | ||
102 | #else | ||
103 | DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); | ||
104 | |||
105 | /* 288 = # of volatile regs, int & fp, for leaf routines */ | ||
106 | /* which do not stack a frame. See the PPC64 ABI. */ | ||
107 | DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 288); | ||
108 | #endif | ||
109 | /* in fact we only use gpr0 - gpr9 and gpr20 - gpr23 */ | ||
110 | DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0])); | ||
111 | DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1])); | ||
112 | DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2])); | ||
113 | DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3])); | ||
114 | DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4])); | ||
115 | DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5])); | ||
116 | DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6])); | ||
117 | DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7])); | ||
118 | DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8])); | ||
119 | DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9])); | ||
120 | DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10])); | ||
121 | DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11])); | ||
122 | DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12])); | ||
123 | DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13])); | ||
124 | DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14])); | ||
125 | DEFINE(GPR15, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[15])); | ||
126 | DEFINE(GPR16, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[16])); | ||
127 | DEFINE(GPR17, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[17])); | ||
128 | DEFINE(GPR18, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[18])); | ||
129 | DEFINE(GPR19, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[19])); | ||
130 | DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20])); | ||
131 | DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21])); | ||
132 | DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22])); | ||
133 | DEFINE(GPR23, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[23])); | ||
134 | DEFINE(GPR24, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[24])); | ||
135 | DEFINE(GPR25, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[25])); | ||
136 | DEFINE(GPR26, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[26])); | ||
137 | DEFINE(GPR27, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[27])); | ||
138 | DEFINE(GPR28, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[28])); | ||
139 | DEFINE(GPR29, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[29])); | ||
140 | DEFINE(GPR30, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[30])); | ||
141 | DEFINE(GPR31, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[31])); | ||
142 | /* | ||
143 | * Note: these symbols include _ because they overlap with special | ||
144 | * register names | ||
145 | */ | ||
146 | DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip)); | ||
147 | DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr)); | ||
148 | DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr)); | ||
149 | DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link)); | ||
150 | DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr)); | ||
151 | DEFINE(_MQ, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, mq)); | ||
152 | DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer)); | ||
153 | DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar)); | ||
154 | DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr)); | ||
155 | /* The PowerPC 400-class & Book-E processors have neither the DAR nor the DSISR | ||
156 | * SPRs. Hence, we overload them to hold the similar DEAR and ESR SPRs | ||
157 | * for such processors. For critical interrupts we use them to | ||
158 | * hold SRR0 and SRR1. | ||
159 | */ | ||
160 | DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar)); | ||
161 | DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr)); | ||
162 | DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3)); | ||
163 | DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result)); | ||
164 | DEFINE(TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap)); | ||
165 | DEFINE(CLONE_VM, CLONE_VM); | ||
166 | DEFINE(CLONE_UNTRACED, CLONE_UNTRACED); | ||
167 | DEFINE(MM_PGD, offsetof(struct mm_struct, pgd)); | ||
168 | |||
169 | /* About the CPU features table */ | ||
170 | DEFINE(CPU_SPEC_ENTRY_SIZE, sizeof(struct cpu_spec)); | ||
171 | DEFINE(CPU_SPEC_PVR_MASK, offsetof(struct cpu_spec, pvr_mask)); | ||
172 | DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value)); | ||
173 | DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features)); | ||
174 | DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup)); | ||
175 | |||
176 | #ifdef CONFIG_PPC64 | ||
177 | DEFINE(MM, offsetof(struct task_struct, mm)); | ||
178 | DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); | ||
179 | |||
180 | DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size)); | ||
181 | DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size)); | ||
182 | DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page)); | ||
183 | DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size)); | ||
184 | DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size)); | ||
185 | DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page)); | ||
186 | DEFINE(PLATFORM, offsetof(struct systemcfg, platform)); | ||
187 | |||
188 | /* paca */ | ||
189 | DEFINE(PACA_SIZE, sizeof(struct paca_struct)); | ||
190 | DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index)); | ||
191 | DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start)); | ||
192 | DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack)); | ||
193 | DEFINE(PACACURRENT, offsetof(struct paca_struct, __current)); | ||
194 | DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr)); | ||
195 | DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real)); | ||
196 | DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr)); | ||
197 | DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr)); | ||
198 | DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1)); | ||
199 | DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc)); | ||
200 | DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled)); | ||
201 | DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); | ||
202 | DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); | ||
203 | DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); | ||
204 | #ifdef CONFIG_HUGETLB_PAGE | ||
205 | DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas)); | ||
206 | DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas)); | ||
207 | #endif /* CONFIG_HUGETLB_PAGE */ | ||
208 | DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr)); | ||
209 | DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); | ||
210 | DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); | ||
211 | DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb)); | ||
212 | DEFINE(PACA_EXDSI, offsetof(struct paca_struct, exdsi)); | ||
213 | DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); | ||
214 | DEFINE(PACALPPACA, offsetof(struct paca_struct, lppaca)); | ||
215 | DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); | ||
216 | DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0)); | ||
217 | DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); | ||
218 | DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); | ||
219 | DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); | ||
220 | |||
221 | /* RTAS */ | ||
222 | DEFINE(RTASBASE, offsetof(struct rtas_t, base)); | ||
223 | DEFINE(RTASENTRY, offsetof(struct rtas_t, entry)); | ||
224 | |||
225 | DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap)); | ||
226 | DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe)); | ||
227 | |||
228 | /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */ | ||
229 | DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); | ||
230 | DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); | ||
231 | |||
232 | /* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */ | ||
233 | DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)); | ||
234 | DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8); | ||
235 | |||
236 | /* systemcfg offsets for use by vdso */ | ||
237 | DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct systemcfg, tb_orig_stamp)); | ||
238 | DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct systemcfg, tb_ticks_per_sec)); | ||
239 | DEFINE(CFG_TB_TO_XS, offsetof(struct systemcfg, tb_to_xs)); | ||
240 | DEFINE(CFG_STAMP_XSEC, offsetof(struct systemcfg, stamp_xsec)); | ||
241 | DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct systemcfg, tb_update_count)); | ||
242 | DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct systemcfg, tz_minuteswest)); | ||
243 | DEFINE(CFG_TZ_DSTTIME, offsetof(struct systemcfg, tz_dsttime)); | ||
244 | DEFINE(CFG_SYSCALL_MAP32, offsetof(struct systemcfg, syscall_map_32)); | ||
245 | DEFINE(CFG_SYSCALL_MAP64, offsetof(struct systemcfg, syscall_map_64)); | ||
246 | |||
247 | /* timeval/timezone offsets for use by vdso */ | ||
248 | DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec)); | ||
249 | DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec)); | ||
250 | DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec)); | ||
251 | DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec)); | ||
252 | DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); | ||
253 | DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); | ||
254 | #endif | ||
255 | |||
256 | DEFINE(pbe_address, offsetof(struct pbe, address)); | ||
257 | DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address)); | ||
258 | DEFINE(pbe_next, offsetof(struct pbe, next)); | ||
259 | |||
260 | DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); | ||
261 | return 0; | ||
262 | } | ||
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S new file mode 100644 index 000000000000..665d7d34304c --- /dev/null +++ b/arch/powerpc/kernel/fpu.S | |||
@@ -0,0 +1,133 @@ | |||
1 | /* | ||
2 | * FPU support code, moved here from head.S so that it can be used | ||
3 | * by chips which use other head-whatever.S files. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version | ||
8 | * 2 of the License, or (at your option) any later version. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <asm/processor.h> | ||
14 | #include <asm/page.h> | ||
15 | #include <asm/mmu.h> | ||
16 | #include <asm/pgtable.h> | ||
17 | #include <asm/cputable.h> | ||
18 | #include <asm/cache.h> | ||
19 | #include <asm/thread_info.h> | ||
20 | #include <asm/ppc_asm.h> | ||
21 | #include <asm/asm-offsets.h> | ||
22 | |||
23 | /* | ||
24 | * This task wants to use the FPU now. | ||
25 | * On UP, disable FP for the task which had the FPU previously, | ||
26 | * and save its floating-point registers in its thread_struct. | ||
27 | * Load up this task's FP registers from its thread_struct, | ||
28 | * enable the FPU for the current task and return to the task. | ||
29 | */ | ||
30 | .globl load_up_fpu | ||
31 | load_up_fpu: | ||
32 | mfmsr r5 | ||
33 | ori r5,r5,MSR_FP | ||
34 | #ifdef CONFIG_PPC64BRIDGE | ||
35 | clrldi r5,r5,1 /* turn off 64-bit mode */ | ||
36 | #endif /* CONFIG_PPC64BRIDGE */ | ||
37 | SYNC | ||
38 | MTMSRD(r5) /* enable use of fpu now */ | ||
39 | isync | ||
40 | /* | ||
41 | * For SMP, we don't do lazy FPU switching because it just gets too | ||
42 | * horrendously complex, especially when a task switches from one CPU | ||
43 | * to another. Instead we call giveup_fpu in switch_to. | ||
44 | */ | ||
45 | #ifndef CONFIG_SMP | ||
46 | tophys(r6,0) /* get __pa constant */ | ||
47 | addis r3,r6,last_task_used_math@ha | ||
48 | lwz r4,last_task_used_math@l(r3) | ||
49 | cmpwi 0,r4,0 | ||
50 | beq 1f | ||
51 | add r4,r4,r6 | ||
52 | addi r4,r4,THREAD /* want last_task_used_math->thread */ | ||
53 | SAVE_32FPRS(0, r4) | ||
54 | mffs fr0 | ||
55 | stfd fr0,THREAD_FPSCR-4(r4) | ||
56 | lwz r5,PT_REGS(r4) | ||
57 | add r5,r5,r6 | ||
58 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
59 | li r10,MSR_FP|MSR_FE0|MSR_FE1 | ||
60 | andc r4,r4,r10 /* disable FP for previous task */ | ||
61 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
62 | 1: | ||
63 | #endif /* CONFIG_SMP */ | ||
64 | /* enable use of FP after return */ | ||
65 | mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ | ||
66 | lwz r4,THREAD_FPEXC_MODE(r5) | ||
67 | ori r9,r9,MSR_FP /* enable FP for current */ | ||
68 | or r9,r9,r4 | ||
69 | lfd fr0,THREAD_FPSCR-4(r5) | ||
70 | mtfsf 0xff,fr0 | ||
71 | REST_32FPRS(0, r5) | ||
72 | #ifndef CONFIG_SMP | ||
73 | subi r4,r5,THREAD | ||
74 | sub r4,r4,r6 | ||
75 | stw r4,last_task_used_math@l(r3) | ||
76 | #endif /* CONFIG_SMP */ | ||
77 | /* restore registers and return */ | ||
78 | /* we haven't used ctr or xer or lr */ | ||
79 | b fast_exception_return | ||
80 | |||
81 | /* | ||
82 | * FP unavailable trap from kernel - print a message, but let | ||
83 | * the task use FP in the kernel until it returns to user mode. | ||
84 | */ | ||
85 | .globl KernelFP | ||
86 | KernelFP: | ||
87 | lwz r3,_MSR(r1) | ||
88 | ori r3,r3,MSR_FP | ||
89 | stw r3,_MSR(r1) /* enable use of FP after return */ | ||
90 | lis r3,86f@h | ||
91 | ori r3,r3,86f@l | ||
92 | mr r4,r2 /* current */ | ||
93 | lwz r5,_NIP(r1) | ||
94 | bl printk | ||
95 | b ret_from_except | ||
96 | 86: .string "floating point used in kernel (task=%p, pc=%x)\n" | ||
97 | .align 4,0 | ||
98 | |||
99 | /* | ||
100 | * giveup_fpu(tsk) | ||
101 | * Disable FP for the task given as the argument, | ||
102 | * and save the floating-point registers in its thread_struct. | ||
103 | * Enables the FPU for use in the kernel on return. | ||
104 | */ | ||
105 | .globl giveup_fpu | ||
106 | giveup_fpu: | ||
107 | mfmsr r5 | ||
108 | ori r5,r5,MSR_FP | ||
109 | SYNC_601 | ||
110 | ISYNC_601 | ||
111 | MTMSRD(r5) /* enable use of fpu now */ | ||
112 | SYNC_601 | ||
113 | isync | ||
114 | cmpwi 0,r3,0 | ||
115 | beqlr- /* if no previous owner, done */ | ||
116 | addi r3,r3,THREAD /* want THREAD of task */ | ||
117 | lwz r5,PT_REGS(r3) | ||
118 | cmpwi 0,r5,0 | ||
119 | SAVE_32FPRS(0, r3) | ||
120 | mffs fr0 | ||
121 | stfd fr0,THREAD_FPSCR-4(r3) | ||
122 | beq 1f | ||
123 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
124 | li r3,MSR_FP|MSR_FE0|MSR_FE1 | ||
125 | andc r4,r4,r3 /* disable FP for previous task */ | ||
126 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
127 | 1: | ||
128 | #ifndef CONFIG_SMP | ||
129 | li r5,0 | ||
130 | lis r4,last_task_used_math@ha | ||
131 | stw r5,last_task_used_math@l(r4) | ||
132 | #endif /* CONFIG_SMP */ | ||
133 | blr | ||
diff --git a/arch/powerpc/kernel/head.S b/arch/powerpc/kernel/head.S new file mode 100644 index 000000000000..d05509f197d0 --- /dev/null +++ b/arch/powerpc/kernel/head.S | |||
@@ -0,0 +1,1545 @@ | |||
1 | /* | ||
2 | * PowerPC version | ||
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
4 | * | ||
5 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP | ||
6 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | ||
7 | * Adapted for Power Macintosh by Paul Mackerras. | ||
8 | * Low-level exception handlers and MMU support | ||
9 | * rewritten by Paul Mackerras. | ||
10 | * Copyright (C) 1996 Paul Mackerras. | ||
11 | * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). | ||
12 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | ||
13 | * | ||
14 | * This file contains the low-level support and setup for the | ||
15 | * PowerPC platform, including trap and interrupt dispatch. | ||
16 | * (The PPC 8xx embedded CPUs use head_8xx.S instead.) | ||
17 | * | ||
18 | * This program is free software; you can redistribute it and/or | ||
19 | * modify it under the terms of the GNU General Public License | ||
20 | * as published by the Free Software Foundation; either version | ||
21 | * 2 of the License, or (at your option) any later version. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #include <linux/config.h> | ||
26 | #include <asm/processor.h> | ||
27 | #include <asm/page.h> | ||
28 | #include <asm/mmu.h> | ||
29 | #include <asm/pgtable.h> | ||
30 | #include <asm/cputable.h> | ||
31 | #include <asm/cache.h> | ||
32 | #include <asm/thread_info.h> | ||
33 | #include <asm/ppc_asm.h> | ||
34 | #include <asm/asm-offsets.h> | ||
35 | |||
36 | #ifdef CONFIG_APUS | ||
37 | #include <asm/amigappc.h> | ||
38 | #endif | ||
39 | |||
40 | #ifdef CONFIG_PPC64BRIDGE | ||
41 | #define LOAD_BAT(n, reg, RA, RB) \ | ||
42 | ld RA,(n*32)+0(reg); \ | ||
43 | ld RB,(n*32)+8(reg); \ | ||
44 | mtspr SPRN_IBAT##n##U,RA; \ | ||
45 | mtspr SPRN_IBAT##n##L,RB; \ | ||
46 | ld RA,(n*32)+16(reg); \ | ||
47 | ld RB,(n*32)+24(reg); \ | ||
48 | mtspr SPRN_DBAT##n##U,RA; \ | ||
49 | mtspr SPRN_DBAT##n##L,RB; \ | ||
50 | |||
51 | #else /* CONFIG_PPC64BRIDGE */ | ||
52 | |||
53 | /* 601 only have IBAT; cr0.eq is set on 601 when using this macro */ | ||
54 | #define LOAD_BAT(n, reg, RA, RB) \ | ||
55 | /* see the comment for clear_bats() -- Cort */ \ | ||
56 | li RA,0; \ | ||
57 | mtspr SPRN_IBAT##n##U,RA; \ | ||
58 | mtspr SPRN_DBAT##n##U,RA; \ | ||
59 | lwz RA,(n*16)+0(reg); \ | ||
60 | lwz RB,(n*16)+4(reg); \ | ||
61 | mtspr SPRN_IBAT##n##U,RA; \ | ||
62 | mtspr SPRN_IBAT##n##L,RB; \ | ||
63 | beq 1f; \ | ||
64 | lwz RA,(n*16)+8(reg); \ | ||
65 | lwz RB,(n*16)+12(reg); \ | ||
66 | mtspr SPRN_DBAT##n##U,RA; \ | ||
67 | mtspr SPRN_DBAT##n##L,RB; \ | ||
68 | 1: | ||
69 | #endif /* CONFIG_PPC64BRIDGE */ | ||
70 | |||
71 | .text | ||
72 | .stabs "arch/ppc/kernel/",N_SO,0,0,0f | ||
73 | .stabs "head.S",N_SO,0,0,0f | ||
74 | 0: | ||
75 | .globl _stext | ||
76 | _stext: | ||
77 | |||
78 | /* | ||
79 | * _start is defined this way because the XCOFF loader in the OpenFirmware | ||
80 | * on the powermac expects the entry point to be a procedure descriptor. | ||
81 | */ | ||
82 | .text | ||
83 | .globl _start | ||
84 | _start: | ||
85 | /* | ||
86 | * These are here for legacy reasons, the kernel used to | ||
87 | * need to look like a coff function entry for the pmac | ||
88 | * but we're always started by some kind of bootloader now. | ||
89 | * -- Cort | ||
90 | */ | ||
91 | nop /* used by __secondary_hold on prep (mtx) and chrp smp */ | ||
92 | nop /* used by __secondary_hold on prep (mtx) and chrp smp */ | ||
93 | nop | ||
94 | |||
95 | /* PMAC | ||
96 | * Enter here with the kernel text, data and bss loaded starting at | ||
97 | * 0, running with virtual == physical mapping. | ||
98 | * r5 points to the prom entry point (the client interface handler | ||
99 | * address). Address translation is turned on, with the prom | ||
100 | * managing the hash table. Interrupts are disabled. The stack | ||
101 | * pointer (r1) points to just below the end of the half-meg region | ||
102 | * from 0x380000 - 0x400000, which is mapped in already. | ||
103 | * | ||
104 | * If we are booted from MacOS via BootX, we enter with the kernel | ||
105 | * image loaded somewhere, and the following values in registers: | ||
106 | * r3: 'BooX' (0x426f6f58) | ||
107 | * r4: virtual address of boot_infos_t | ||
108 | * r5: 0 | ||
109 | * | ||
110 | * APUS | ||
111 | * r3: 'APUS' | ||
112 | * r4: physical address of memory base | ||
113 | * Linux/m68k style BootInfo structure at &_end. | ||
114 | * | ||
115 | * PREP | ||
116 | * This is jumped to on prep systems right after the kernel is relocated | ||
117 | * to its proper place in memory by the boot loader. The expected layout | ||
118 | * of the regs is: | ||
119 | * r3: ptr to residual data | ||
120 | * r4: initrd_start or if no initrd then 0 | ||
121 | * r5: initrd_end - unused if r4 is 0 | ||
122 | * r6: Start of command line string | ||
123 | * r7: End of command line string | ||
124 | * | ||
125 | * This just gets a minimal mmu environment setup so we can call | ||
126 | * start_here() to do the real work. | ||
127 | * -- Cort | ||
128 | */ | ||
129 | |||
130 | .globl __start | ||
131 | __start: | ||
132 | /* | ||
133 | * We have to do any OF calls before we map ourselves to KERNELBASE, | ||
134 | * because OF may have I/O devices mapped into that area | ||
135 | * (particularly on CHRP). | ||
136 | */ | ||
137 | mr r31,r3 /* save parameters */ | ||
138 | mr r30,r4 | ||
139 | mr r29,r5 | ||
140 | mr r28,r6 | ||
141 | mr r27,r7 | ||
142 | li r24,0 /* cpu # */ | ||
143 | |||
144 | /* | ||
145 | * early_init() does the early machine identification and does | ||
146 | * the necessary low-level setup and clears the BSS | ||
147 | * -- Cort <cort@fsmlabs.com> | ||
148 | */ | ||
149 | bl early_init | ||
150 | |||
151 | /* | ||
152 | * On POWER4, we first need to tweak some CPU configuration registers | ||
153 | * like real mode cache inhibit or exception base | ||
154 | */ | ||
155 | #ifdef CONFIG_POWER4 | ||
156 | bl __970_cpu_preinit | ||
157 | #endif /* CONFIG_POWER4 */ | ||
158 | |||
159 | #ifdef CONFIG_APUS | ||
160 | /* On APUS the __va/__pa constants need to be set to the correct | ||
161 | * values before continuing. | ||
162 | */ | ||
163 | mr r4,r30 | ||
164 | bl fix_mem_constants | ||
165 | #endif /* CONFIG_APUS */ | ||
166 | |||
167 | /* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains | ||
168 | * the physical address we are running at, returned by early_init() | ||
169 | */ | ||
170 | bl mmu_off | ||
171 | __after_mmu_off: | ||
172 | #ifndef CONFIG_POWER4 | ||
173 | bl clear_bats | ||
174 | bl flush_tlbs | ||
175 | |||
176 | bl initial_bats | ||
177 | #if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) | ||
178 | bl setup_disp_bat | ||
179 | #endif | ||
180 | #else /* CONFIG_POWER4 */ | ||
181 | bl reloc_offset | ||
182 | bl initial_mm_power4 | ||
183 | #endif /* CONFIG_POWER4 */ | ||
184 | |||
185 | /* | ||
186 | * Call setup_cpu for CPU 0 and initialize 6xx Idle | ||
187 | */ | ||
188 | bl reloc_offset | ||
189 | li r24,0 /* cpu# */ | ||
190 | bl call_setup_cpu /* Call setup_cpu for this CPU */ | ||
191 | #ifdef CONFIG_6xx | ||
192 | bl reloc_offset | ||
193 | bl init_idle_6xx | ||
194 | #endif /* CONFIG_6xx */ | ||
195 | #ifdef CONFIG_POWER4 | ||
196 | bl reloc_offset | ||
197 | bl init_idle_power4 | ||
198 | #endif /* CONFIG_POWER4 */ | ||
199 | |||
200 | |||
201 | #ifndef CONFIG_APUS | ||
202 | /* | ||
203 | * We need to run with _start at physical address 0. | ||
204 | * On CHRP, we are loaded at 0x10000 since OF on CHRP uses | ||
205 | * the exception vectors at 0 (and therefore this copy | ||
206 | * overwrites OF's exception vectors with our own). | ||
207 | * If the MMU is already turned on, we copy stuff to KERNELBASE, | ||
208 | * otherwise we copy it to 0. | ||
209 | */ | ||
210 | bl reloc_offset | ||
211 | mr r26,r3 | ||
212 | addis r4,r3,KERNELBASE@h /* current address of _start */ | ||
213 | cmpwi 0,r4,0 /* are we already running at 0? */ | ||
214 | bne relocate_kernel | ||
215 | #endif /* CONFIG_APUS */ | ||
216 | /* | ||
217 | * we now have the 1st 16M of ram mapped with the bats. | ||
218 | * prep needs the mmu to be turned on here, but pmac already has it on. | ||
219 | * this shouldn't bother the pmac since it just gets turned on again | ||
220 | * as we jump to our code at KERNELBASE. -- Cort | ||
221 | * Actually no, pmac doesn't have it on any more. BootX enters with MMU | ||
222 | * off, and in other cases, we now turn it off before changing BATs above. | ||
223 | */ | ||
224 | turn_on_mmu: | ||
225 | mfmsr r0 | ||
226 | ori r0,r0,MSR_DR|MSR_IR | ||
227 | mtspr SPRN_SRR1,r0 | ||
228 | lis r0,start_here@h | ||
229 | ori r0,r0,start_here@l | ||
230 | mtspr SPRN_SRR0,r0 | ||
231 | SYNC | ||
232 | RFI /* enables MMU */ | ||
233 | |||
234 | /* | ||
235 | * We need __secondary_hold as a place to hold the other cpus on | ||
236 | * an SMP machine, even when we are running a UP kernel. | ||
237 | */ | ||
238 | . = 0xc0 /* for prep bootloader */ | ||
239 | li r3,1 /* MTX only has 1 cpu */ | ||
240 | .globl __secondary_hold | ||
241 | __secondary_hold: | ||
242 | /* tell the master we're here */ | ||
243 | stw r3,4(0) | ||
244 | #ifdef CONFIG_SMP | ||
245 | 100: lwz r4,0(0) | ||
246 | /* wait until we're told to start */ | ||
247 | cmpw 0,r4,r3 | ||
248 | bne 100b | ||
249 | /* our cpu # was at addr 0 - go */ | ||
250 | mr r24,r3 /* cpu # */ | ||
251 | b __secondary_start | ||
252 | #else | ||
253 | b . | ||
254 | #endif /* CONFIG_SMP */ | ||
255 | |||
256 | /* | ||
257 | * Exception entry code. This code runs with address translation | ||
258 | * turned off, i.e. using physical addresses. | ||
259 | * We assume sprg3 has the physical address of the current | ||
260 | * task's thread_struct. | ||
261 | */ | ||
262 | #define EXCEPTION_PROLOG \ | ||
263 | mtspr SPRN_SPRG0,r10; \ | ||
264 | mtspr SPRN_SPRG1,r11; \ | ||
265 | mfcr r10; \ | ||
266 | EXCEPTION_PROLOG_1; \ | ||
267 | EXCEPTION_PROLOG_2 | ||
268 | |||
269 | #define EXCEPTION_PROLOG_1 \ | ||
270 | mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \ | ||
271 | andi. r11,r11,MSR_PR; \ | ||
272 | tophys(r11,r1); /* use tophys(r1) if kernel */ \ | ||
273 | beq 1f; \ | ||
274 | mfspr r11,SPRN_SPRG3; \ | ||
275 | lwz r11,THREAD_INFO-THREAD(r11); \ | ||
276 | addi r11,r11,THREAD_SIZE; \ | ||
277 | tophys(r11,r11); \ | ||
278 | 1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */ | ||
279 | |||
280 | |||
281 | #define EXCEPTION_PROLOG_2 \ | ||
282 | CLR_TOP32(r11); \ | ||
283 | stw r10,_CCR(r11); /* save registers */ \ | ||
284 | stw r12,GPR12(r11); \ | ||
285 | stw r9,GPR9(r11); \ | ||
286 | mfspr r10,SPRN_SPRG0; \ | ||
287 | stw r10,GPR10(r11); \ | ||
288 | mfspr r12,SPRN_SPRG1; \ | ||
289 | stw r12,GPR11(r11); \ | ||
290 | mflr r10; \ | ||
291 | stw r10,_LINK(r11); \ | ||
292 | mfspr r12,SPRN_SRR0; \ | ||
293 | mfspr r9,SPRN_SRR1; \ | ||
294 | stw r1,GPR1(r11); \ | ||
295 | stw r1,0(r11); \ | ||
296 | tovirt(r1,r11); /* set new kernel sp */ \ | ||
297 | li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \ | ||
298 | MTMSRD(r10); /* (except for mach check in rtas) */ \ | ||
299 | stw r0,GPR0(r11); \ | ||
300 | SAVE_4GPRS(3, r11); \ | ||
301 | SAVE_2GPRS(7, r11) | ||
302 | |||
303 | /* | ||
304 | * Note: code which follows this uses cr0.eq (set if from kernel), | ||
305 | * r11, r12 (SRR0), and r9 (SRR1). | ||
306 | * | ||
307 | * Note2: once we have set r1 we are in a position to take exceptions | ||
308 | * again, and we could thus set MSR:RI at that point. | ||
309 | */ | ||
310 | |||
311 | /* | ||
312 | * Exception vectors. | ||
313 | */ | ||
314 | #define EXCEPTION(n, label, hdlr, xfer) \ | ||
315 | . = n; \ | ||
316 | label: \ | ||
317 | EXCEPTION_PROLOG; \ | ||
318 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
319 | xfer(n, hdlr) | ||
320 | |||
321 | #define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \ | ||
322 | li r10,trap; \ | ||
323 | stw r10,TRAP(r11); \ | ||
324 | li r10,MSR_KERNEL; \ | ||
325 | copyee(r10, r9); \ | ||
326 | bl tfer; \ | ||
327 | i##n: \ | ||
328 | .long hdlr; \ | ||
329 | .long ret | ||
330 | |||
331 | #define COPY_EE(d, s) rlwimi d,s,0,16,16 | ||
332 | #define NOCOPY(d, s) | ||
333 | |||
334 | #define EXC_XFER_STD(n, hdlr) \ | ||
335 | EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \ | ||
336 | ret_from_except_full) | ||
337 | |||
338 | #define EXC_XFER_LITE(n, hdlr) \ | ||
339 | EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \ | ||
340 | ret_from_except) | ||
341 | |||
342 | #define EXC_XFER_EE(n, hdlr) \ | ||
343 | EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \ | ||
344 | ret_from_except_full) | ||
345 | |||
346 | #define EXC_XFER_EE_LITE(n, hdlr) \ | ||
347 | EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \ | ||
348 | ret_from_except) | ||
349 | |||
350 | /* System reset */ | ||
351 | /* core99 pmac starts the seconary here by changing the vector, and | ||
352 | putting it back to what it was (UnknownException) when done. */ | ||
353 | #if defined(CONFIG_GEMINI) && defined(CONFIG_SMP) | ||
354 | . = 0x100 | ||
355 | b __secondary_start_gemini | ||
356 | #else | ||
357 | EXCEPTION(0x100, Reset, UnknownException, EXC_XFER_STD) | ||
358 | #endif | ||
359 | |||
360 | /* Machine check */ | ||
361 | /* | ||
362 | * On CHRP, this is complicated by the fact that we could get a | ||
363 | * machine check inside RTAS, and we have no guarantee that certain | ||
364 | * critical registers will have the values we expect. The set of | ||
365 | * registers that might have bad values includes all the GPRs | ||
366 | * and all the BATs. We indicate that we are in RTAS by putting | ||
367 | * a non-zero value, the address of the exception frame to use, | ||
368 | * in SPRG2. The machine check handler checks SPRG2 and uses its | ||
369 | * value if it is non-zero. If we ever needed to free up SPRG2, | ||
370 | * we could use a field in the thread_info or thread_struct instead. | ||
371 | * (Other exception handlers assume that r1 is a valid kernel stack | ||
372 | * pointer when we take an exception from supervisor mode.) | ||
373 | * -- paulus. | ||
374 | */ | ||
375 | . = 0x200 | ||
376 | mtspr SPRN_SPRG0,r10 | ||
377 | mtspr SPRN_SPRG1,r11 | ||
378 | mfcr r10 | ||
379 | #ifdef CONFIG_PPC_CHRP | ||
380 | mfspr r11,SPRN_SPRG2 | ||
381 | cmpwi 0,r11,0 | ||
382 | bne 7f | ||
383 | #endif /* CONFIG_PPC_CHRP */ | ||
384 | EXCEPTION_PROLOG_1 | ||
385 | 7: EXCEPTION_PROLOG_2 | ||
386 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
387 | #ifdef CONFIG_PPC_CHRP | ||
388 | mfspr r4,SPRN_SPRG2 | ||
389 | cmpwi cr1,r4,0 | ||
390 | bne cr1,1f | ||
391 | #endif | ||
392 | EXC_XFER_STD(0x200, MachineCheckException) | ||
393 | #ifdef CONFIG_PPC_CHRP | ||
394 | 1: b machine_check_in_rtas | ||
395 | #endif | ||
396 | |||
397 | /* Data access exception. */ | ||
398 | . = 0x300 | ||
399 | #ifdef CONFIG_PPC64BRIDGE | ||
400 | b DataAccess | ||
401 | DataAccessCont: | ||
402 | #else | ||
403 | DataAccess: | ||
404 | EXCEPTION_PROLOG | ||
405 | #endif /* CONFIG_PPC64BRIDGE */ | ||
406 | mfspr r10,SPRN_DSISR | ||
407 | andis. r0,r10,0xa470 /* weird error? */ | ||
408 | bne 1f /* if not, try to put a PTE */ | ||
409 | mfspr r4,SPRN_DAR /* into the hash table */ | ||
410 | rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */ | ||
411 | bl hash_page | ||
412 | 1: stw r10,_DSISR(r11) | ||
413 | mr r5,r10 | ||
414 | mfspr r4,SPRN_DAR | ||
415 | EXC_XFER_EE_LITE(0x300, handle_page_fault) | ||
416 | |||
417 | #ifdef CONFIG_PPC64BRIDGE | ||
418 | /* SLB fault on data access. */ | ||
419 | . = 0x380 | ||
420 | b DataSegment | ||
421 | #endif /* CONFIG_PPC64BRIDGE */ | ||
422 | |||
423 | /* Instruction access exception. */ | ||
424 | . = 0x400 | ||
425 | #ifdef CONFIG_PPC64BRIDGE | ||
426 | b InstructionAccess | ||
427 | InstructionAccessCont: | ||
428 | #else | ||
429 | InstructionAccess: | ||
430 | EXCEPTION_PROLOG | ||
431 | #endif /* CONFIG_PPC64BRIDGE */ | ||
432 | andis. r0,r9,0x4000 /* no pte found? */ | ||
433 | beq 1f /* if so, try to put a PTE */ | ||
434 | li r3,0 /* into the hash table */ | ||
435 | mr r4,r12 /* SRR0 is fault address */ | ||
436 | bl hash_page | ||
437 | 1: mr r4,r12 | ||
438 | mr r5,r9 | ||
439 | EXC_XFER_EE_LITE(0x400, handle_page_fault) | ||
440 | |||
441 | #ifdef CONFIG_PPC64BRIDGE | ||
442 | /* SLB fault on instruction access. */ | ||
443 | . = 0x480 | ||
444 | b InstructionSegment | ||
445 | #endif /* CONFIG_PPC64BRIDGE */ | ||
446 | |||
447 | /* External interrupt */ | ||
448 | EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) | ||
449 | |||
450 | /* Alignment exception */ | ||
451 | . = 0x600 | ||
452 | Alignment: | ||
453 | EXCEPTION_PROLOG | ||
454 | mfspr r4,SPRN_DAR | ||
455 | stw r4,_DAR(r11) | ||
456 | mfspr r5,SPRN_DSISR | ||
457 | stw r5,_DSISR(r11) | ||
458 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
459 | EXC_XFER_EE(0x600, AlignmentException) | ||
460 | |||
461 | /* Program check exception */ | ||
462 | EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_STD) | ||
463 | |||
464 | /* Floating-point unavailable */ | ||
465 | . = 0x800 | ||
466 | FPUnavailable: | ||
467 | EXCEPTION_PROLOG | ||
468 | bne load_up_fpu /* if from user, just load it up */ | ||
469 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
470 | EXC_XFER_EE_LITE(0x800, KernelFP) | ||
471 | |||
472 | /* Decrementer */ | ||
473 | EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE) | ||
474 | |||
475 | EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE) | ||
476 | EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE) | ||
477 | |||
478 | /* System call */ | ||
479 | . = 0xc00 | ||
480 | SystemCall: | ||
481 | EXCEPTION_PROLOG | ||
482 | EXC_XFER_EE_LITE(0xc00, DoSyscall) | ||
483 | |||
484 | /* Single step - not used on 601 */ | ||
485 | EXCEPTION(0xd00, SingleStep, SingleStepException, EXC_XFER_STD) | ||
486 | EXCEPTION(0xe00, Trap_0e, UnknownException, EXC_XFER_EE) | ||
487 | |||
488 | /* | ||
489 | * The Altivec unavailable trap is at 0x0f20. Foo. | ||
490 | * We effectively remap it to 0x3000. | ||
491 | * We include an altivec unavailable exception vector even if | ||
492 | * not configured for Altivec, so that you can't panic a | ||
493 | * non-altivec kernel running on a machine with altivec just | ||
494 | * by executing an altivec instruction. | ||
495 | */ | ||
496 | . = 0xf00 | ||
497 | b Trap_0f | ||
498 | |||
499 | . = 0xf20 | ||
500 | b AltiVecUnavailable | ||
501 | |||
502 | Trap_0f: | ||
503 | EXCEPTION_PROLOG | ||
504 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
505 | EXC_XFER_EE(0xf00, UnknownException) | ||
506 | |||
507 | /* | ||
508 | * Handle TLB miss for instruction on 603/603e. | ||
509 | * Note: we get an alternate set of r0 - r3 to use automatically. | ||
510 | */ | ||
511 | . = 0x1000 | ||
512 | InstructionTLBMiss: | ||
513 | /* | ||
514 | * r0: stored ctr | ||
515 | * r1: linux style pte ( later becomes ppc hardware pte ) | ||
516 | * r2: ptr to linux-style pte | ||
517 | * r3: scratch | ||
518 | */ | ||
519 | mfctr r0 | ||
520 | /* Get PTE (linux-style) and check access */ | ||
521 | mfspr r3,SPRN_IMISS | ||
522 | lis r1,KERNELBASE@h /* check if kernel address */ | ||
523 | cmplw 0,r3,r1 | ||
524 | mfspr r2,SPRN_SPRG3 | ||
525 | li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ | ||
526 | lwz r2,PGDIR(r2) | ||
527 | blt+ 112f | ||
528 | lis r2,swapper_pg_dir@ha /* if kernel address, use */ | ||
529 | addi r2,r2,swapper_pg_dir@l /* kernel page table */ | ||
530 | mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ | ||
531 | rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ | ||
532 | 112: tophys(r2,r2) | ||
533 | rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ | ||
534 | lwz r2,0(r2) /* get pmd entry */ | ||
535 | rlwinm. r2,r2,0,0,19 /* extract address of pte page */ | ||
536 | beq- InstructionAddressInvalid /* return if no mapping */ | ||
537 | rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ | ||
538 | lwz r3,0(r2) /* get linux-style pte */ | ||
539 | andc. r1,r1,r3 /* check access & ~permission */ | ||
540 | bne- InstructionAddressInvalid /* return if access not permitted */ | ||
541 | ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */ | ||
542 | /* | ||
543 | * NOTE! We are assuming this is not an SMP system, otherwise | ||
544 | * we would need to update the pte atomically with lwarx/stwcx. | ||
545 | */ | ||
546 | stw r3,0(r2) /* update PTE (accessed bit) */ | ||
547 | /* Convert linux-style PTE to low word of PPC-style PTE */ | ||
548 | rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */ | ||
549 | rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ | ||
550 | and r1,r1,r2 /* writable if _RW and _DIRTY */ | ||
551 | rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ | ||
552 | rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ | ||
553 | ori r1,r1,0xe14 /* clear out reserved bits and M */ | ||
554 | andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ | ||
555 | mtspr SPRN_RPA,r1 | ||
556 | mfspr r3,SPRN_IMISS | ||
557 | tlbli r3 | ||
558 | mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ | ||
559 | mtcrf 0x80,r3 | ||
560 | rfi | ||
561 | InstructionAddressInvalid: | ||
562 | mfspr r3,SPRN_SRR1 | ||
563 | rlwinm r1,r3,9,6,6 /* Get load/store bit */ | ||
564 | |||
565 | addis r1,r1,0x2000 | ||
566 | mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */ | ||
567 | mtctr r0 /* Restore CTR */ | ||
568 | andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ | ||
569 | or r2,r2,r1 | ||
570 | mtspr SPRN_SRR1,r2 | ||
571 | mfspr r1,SPRN_IMISS /* Get failing address */ | ||
572 | rlwinm. r2,r2,0,31,31 /* Check for little endian access */ | ||
573 | rlwimi r2,r2,1,30,30 /* change 1 -> 3 */ | ||
574 | xor r1,r1,r2 | ||
575 | mtspr SPRN_DAR,r1 /* Set fault address */ | ||
576 | mfmsr r0 /* Restore "normal" registers */ | ||
577 | xoris r0,r0,MSR_TGPR>>16 | ||
578 | mtcrf 0x80,r3 /* Restore CR0 */ | ||
579 | mtmsr r0 | ||
580 | b InstructionAccess | ||
581 | |||
582 | /* | ||
583 | * Handle TLB miss for DATA Load operation on 603/603e | ||
584 | */ | ||
585 | . = 0x1100 | ||
586 | DataLoadTLBMiss: | ||
587 | /* | ||
588 | * r0: stored ctr | ||
589 | * r1: linux style pte ( later becomes ppc hardware pte ) | ||
590 | * r2: ptr to linux-style pte | ||
591 | * r3: scratch | ||
592 | */ | ||
593 | mfctr r0 | ||
594 | /* Get PTE (linux-style) and check access */ | ||
595 | mfspr r3,SPRN_DMISS | ||
596 | lis r1,KERNELBASE@h /* check if kernel address */ | ||
597 | cmplw 0,r3,r1 | ||
598 | mfspr r2,SPRN_SPRG3 | ||
599 | li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ | ||
600 | lwz r2,PGDIR(r2) | ||
601 | blt+ 112f | ||
602 | lis r2,swapper_pg_dir@ha /* if kernel address, use */ | ||
603 | addi r2,r2,swapper_pg_dir@l /* kernel page table */ | ||
604 | mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ | ||
605 | rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ | ||
606 | 112: tophys(r2,r2) | ||
607 | rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ | ||
608 | lwz r2,0(r2) /* get pmd entry */ | ||
609 | rlwinm. r2,r2,0,0,19 /* extract address of pte page */ | ||
610 | beq- DataAddressInvalid /* return if no mapping */ | ||
611 | rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ | ||
612 | lwz r3,0(r2) /* get linux-style pte */ | ||
613 | andc. r1,r1,r3 /* check access & ~permission */ | ||
614 | bne- DataAddressInvalid /* return if access not permitted */ | ||
615 | ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */ | ||
616 | /* | ||
617 | * NOTE! We are assuming this is not an SMP system, otherwise | ||
618 | * we would need to update the pte atomically with lwarx/stwcx. | ||
619 | */ | ||
620 | stw r3,0(r2) /* update PTE (accessed bit) */ | ||
621 | /* Convert linux-style PTE to low word of PPC-style PTE */ | ||
622 | rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */ | ||
623 | rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ | ||
624 | and r1,r1,r2 /* writable if _RW and _DIRTY */ | ||
625 | rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ | ||
626 | rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ | ||
627 | ori r1,r1,0xe14 /* clear out reserved bits and M */ | ||
628 | andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ | ||
629 | mtspr SPRN_RPA,r1 | ||
630 | mfspr r3,SPRN_DMISS | ||
631 | tlbld r3 | ||
632 | mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ | ||
633 | mtcrf 0x80,r3 | ||
634 | rfi | ||
635 | DataAddressInvalid: | ||
636 | mfspr r3,SPRN_SRR1 | ||
637 | rlwinm r1,r3,9,6,6 /* Get load/store bit */ | ||
638 | addis r1,r1,0x2000 | ||
639 | mtspr SPRN_DSISR,r1 | ||
640 | mtctr r0 /* Restore CTR */ | ||
641 | andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ | ||
642 | mtspr SPRN_SRR1,r2 | ||
643 | mfspr r1,SPRN_DMISS /* Get failing address */ | ||
644 | rlwinm. r2,r2,0,31,31 /* Check for little endian access */ | ||
645 | beq 20f /* Jump if big endian */ | ||
646 | xori r1,r1,3 | ||
647 | 20: mtspr SPRN_DAR,r1 /* Set fault address */ | ||
648 | mfmsr r0 /* Restore "normal" registers */ | ||
649 | xoris r0,r0,MSR_TGPR>>16 | ||
650 | mtcrf 0x80,r3 /* Restore CR0 */ | ||
651 | mtmsr r0 | ||
652 | b DataAccess | ||
653 | |||
654 | /* | ||
655 | * Handle TLB miss for DATA Store on 603/603e | ||
656 | */ | ||
657 | . = 0x1200 | ||
658 | DataStoreTLBMiss: | ||
659 | /* | ||
660 | * r0: stored ctr | ||
661 | * r1: linux style pte ( later becomes ppc hardware pte ) | ||
662 | * r2: ptr to linux-style pte | ||
663 | * r3: scratch | ||
664 | */ | ||
665 | mfctr r0 | ||
666 | /* Get PTE (linux-style) and check access */ | ||
667 | mfspr r3,SPRN_DMISS | ||
668 | lis r1,KERNELBASE@h /* check if kernel address */ | ||
669 | cmplw 0,r3,r1 | ||
670 | mfspr r2,SPRN_SPRG3 | ||
671 | li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */ | ||
672 | lwz r2,PGDIR(r2) | ||
673 | blt+ 112f | ||
674 | lis r2,swapper_pg_dir@ha /* if kernel address, use */ | ||
675 | addi r2,r2,swapper_pg_dir@l /* kernel page table */ | ||
676 | mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ | ||
677 | rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ | ||
678 | 112: tophys(r2,r2) | ||
679 | rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ | ||
680 | lwz r2,0(r2) /* get pmd entry */ | ||
681 | rlwinm. r2,r2,0,0,19 /* extract address of pte page */ | ||
682 | beq- DataAddressInvalid /* return if no mapping */ | ||
683 | rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ | ||
684 | lwz r3,0(r2) /* get linux-style pte */ | ||
685 | andc. r1,r1,r3 /* check access & ~permission */ | ||
686 | bne- DataAddressInvalid /* return if access not permitted */ | ||
687 | ori r3,r3,_PAGE_ACCESSED|_PAGE_DIRTY | ||
688 | /* | ||
689 | * NOTE! We are assuming this is not an SMP system, otherwise | ||
690 | * we would need to update the pte atomically with lwarx/stwcx. | ||
691 | */ | ||
692 | stw r3,0(r2) /* update PTE (accessed/dirty bits) */ | ||
693 | /* Convert linux-style PTE to low word of PPC-style PTE */ | ||
694 | rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ | ||
695 | li r1,0xe15 /* clear out reserved bits and M */ | ||
696 | andc r1,r3,r1 /* PP = user? 2: 0 */ | ||
697 | mtspr SPRN_RPA,r1 | ||
698 | mfspr r3,SPRN_DMISS | ||
699 | tlbld r3 | ||
700 | mfspr r3,SPRN_SRR1 /* Need to restore CR0 */ | ||
701 | mtcrf 0x80,r3 | ||
702 | rfi | ||
703 | |||
704 | #ifndef CONFIG_ALTIVEC | ||
705 | #define AltivecAssistException UnknownException | ||
706 | #endif | ||
707 | |||
708 | EXCEPTION(0x1300, Trap_13, InstructionBreakpoint, EXC_XFER_EE) | ||
709 | EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE) | ||
710 | EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE) | ||
711 | #ifdef CONFIG_POWER4 | ||
712 | EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE) | ||
713 | EXCEPTION(0x1700, Trap_17, AltivecAssistException, EXC_XFER_EE) | ||
714 | EXCEPTION(0x1800, Trap_18, TAUException, EXC_XFER_STD) | ||
715 | #else /* !CONFIG_POWER4 */ | ||
716 | EXCEPTION(0x1600, Trap_16, AltivecAssistException, EXC_XFER_EE) | ||
717 | EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD) | ||
718 | EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE) | ||
719 | #endif /* CONFIG_POWER4 */ | ||
720 | EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE) | ||
721 | EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE) | ||
722 | EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE) | ||
723 | EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE) | ||
724 | EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE) | ||
725 | EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE) | ||
726 | EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE) | ||
727 | EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE) | ||
728 | EXCEPTION(0x2100, Trap_21, UnknownException, EXC_XFER_EE) | ||
729 | EXCEPTION(0x2200, Trap_22, UnknownException, EXC_XFER_EE) | ||
730 | EXCEPTION(0x2300, Trap_23, UnknownException, EXC_XFER_EE) | ||
731 | EXCEPTION(0x2400, Trap_24, UnknownException, EXC_XFER_EE) | ||
732 | EXCEPTION(0x2500, Trap_25, UnknownException, EXC_XFER_EE) | ||
733 | EXCEPTION(0x2600, Trap_26, UnknownException, EXC_XFER_EE) | ||
734 | EXCEPTION(0x2700, Trap_27, UnknownException, EXC_XFER_EE) | ||
735 | EXCEPTION(0x2800, Trap_28, UnknownException, EXC_XFER_EE) | ||
736 | EXCEPTION(0x2900, Trap_29, UnknownException, EXC_XFER_EE) | ||
737 | EXCEPTION(0x2a00, Trap_2a, UnknownException, EXC_XFER_EE) | ||
738 | EXCEPTION(0x2b00, Trap_2b, UnknownException, EXC_XFER_EE) | ||
739 | EXCEPTION(0x2c00, Trap_2c, UnknownException, EXC_XFER_EE) | ||
740 | EXCEPTION(0x2d00, Trap_2d, UnknownException, EXC_XFER_EE) | ||
741 | EXCEPTION(0x2e00, Trap_2e, UnknownException, EXC_XFER_EE) | ||
742 | EXCEPTION(0x2f00, MOLTrampoline, UnknownException, EXC_XFER_EE_LITE) | ||
743 | |||
744 | .globl mol_trampoline | ||
745 | .set mol_trampoline, i0x2f00 | ||
746 | |||
747 | . = 0x3000 | ||
748 | |||
749 | AltiVecUnavailable: | ||
750 | EXCEPTION_PROLOG | ||
751 | #ifdef CONFIG_ALTIVEC | ||
752 | bne load_up_altivec /* if from user, just load it up */ | ||
753 | #endif /* CONFIG_ALTIVEC */ | ||
754 | EXC_XFER_EE_LITE(0xf20, AltivecUnavailException) | ||
755 | |||
756 | #ifdef CONFIG_PPC64BRIDGE | ||
757 | DataAccess: | ||
758 | EXCEPTION_PROLOG | ||
759 | b DataAccessCont | ||
760 | |||
761 | InstructionAccess: | ||
762 | EXCEPTION_PROLOG | ||
763 | b InstructionAccessCont | ||
764 | |||
765 | DataSegment: | ||
766 | EXCEPTION_PROLOG | ||
767 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
768 | mfspr r4,SPRN_DAR | ||
769 | stw r4,_DAR(r11) | ||
770 | EXC_XFER_STD(0x380, UnknownException) | ||
771 | |||
772 | InstructionSegment: | ||
773 | EXCEPTION_PROLOG | ||
774 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
775 | EXC_XFER_STD(0x480, UnknownException) | ||
776 | #endif /* CONFIG_PPC64BRIDGE */ | ||
777 | |||
778 | #ifdef CONFIG_ALTIVEC | ||
779 | /* Note that the AltiVec support is closely modeled after the FP | ||
780 | * support. Changes to one are likely to be applicable to the | ||
781 | * other! */ | ||
782 | load_up_altivec: | ||
783 | /* | ||
784 | * Disable AltiVec for the task which had AltiVec previously, | ||
785 | * and save its AltiVec registers in its thread_struct. | ||
786 | * Enables AltiVec for use in the kernel on return. | ||
787 | * On SMP we know the AltiVec units are free, since we give it up every | ||
788 | * switch. -- Kumar | ||
789 | */ | ||
790 | mfmsr r5 | ||
791 | oris r5,r5,MSR_VEC@h | ||
792 | MTMSRD(r5) /* enable use of AltiVec now */ | ||
793 | isync | ||
794 | /* | ||
795 | * For SMP, we don't do lazy AltiVec switching because it just gets too | ||
796 | * horrendously complex, especially when a task switches from one CPU | ||
797 | * to another. Instead we call giveup_altivec in switch_to. | ||
798 | */ | ||
799 | #ifndef CONFIG_SMP | ||
800 | tophys(r6,0) | ||
801 | addis r3,r6,last_task_used_altivec@ha | ||
802 | lwz r4,last_task_used_altivec@l(r3) | ||
803 | cmpwi 0,r4,0 | ||
804 | beq 1f | ||
805 | add r4,r4,r6 | ||
806 | addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */ | ||
807 | SAVE_32VRS(0,r10,r4) | ||
808 | mfvscr vr0 | ||
809 | li r10,THREAD_VSCR | ||
810 | stvx vr0,r10,r4 | ||
811 | lwz r5,PT_REGS(r4) | ||
812 | add r5,r5,r6 | ||
813 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
814 | lis r10,MSR_VEC@h | ||
815 | andc r4,r4,r10 /* disable altivec for previous task */ | ||
816 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
817 | 1: | ||
818 | #endif /* CONFIG_SMP */ | ||
819 | /* enable use of AltiVec after return */ | ||
820 | oris r9,r9,MSR_VEC@h | ||
821 | mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ | ||
822 | li r4,1 | ||
823 | li r10,THREAD_VSCR | ||
824 | stw r4,THREAD_USED_VR(r5) | ||
825 | lvx vr0,r10,r5 | ||
826 | mtvscr vr0 | ||
827 | REST_32VRS(0,r10,r5) | ||
828 | #ifndef CONFIG_SMP | ||
829 | subi r4,r5,THREAD | ||
830 | sub r4,r4,r6 | ||
831 | stw r4,last_task_used_altivec@l(r3) | ||
832 | #endif /* CONFIG_SMP */ | ||
833 | /* restore registers and return */ | ||
834 | /* we haven't used ctr or xer or lr */ | ||
835 | b fast_exception_return | ||
836 | |||
837 | /* | ||
838 | * AltiVec unavailable trap from kernel - print a message, but let | ||
839 | * the task use AltiVec in the kernel until it returns to user mode. | ||
840 | */ | ||
841 | KernelAltiVec: | ||
842 | lwz r3,_MSR(r1) | ||
843 | oris r3,r3,MSR_VEC@h | ||
844 | stw r3,_MSR(r1) /* enable use of AltiVec after return */ | ||
845 | lis r3,87f@h | ||
846 | ori r3,r3,87f@l | ||
847 | mr r4,r2 /* current */ | ||
848 | lwz r5,_NIP(r1) | ||
849 | bl printk | ||
850 | b ret_from_except | ||
851 | 87: .string "AltiVec used in kernel (task=%p, pc=%x) \n" | ||
852 | .align 4,0 | ||
853 | |||
854 | /* | ||
855 | * giveup_altivec(tsk) | ||
856 | * Disable AltiVec for the task given as the argument, | ||
857 | * and save the AltiVec registers in its thread_struct. | ||
858 | * Enables AltiVec for use in the kernel on return. | ||
859 | */ | ||
860 | |||
861 | .globl giveup_altivec | ||
862 | giveup_altivec: | ||
863 | mfmsr r5 | ||
864 | oris r5,r5,MSR_VEC@h | ||
865 | SYNC | ||
866 | MTMSRD(r5) /* enable use of AltiVec now */ | ||
867 | isync | ||
868 | cmpwi 0,r3,0 | ||
869 | beqlr- /* if no previous owner, done */ | ||
870 | addi r3,r3,THREAD /* want THREAD of task */ | ||
871 | lwz r5,PT_REGS(r3) | ||
872 | cmpwi 0,r5,0 | ||
873 | SAVE_32VRS(0, r4, r3) | ||
874 | mfvscr vr0 | ||
875 | li r4,THREAD_VSCR | ||
876 | stvx vr0,r4,r3 | ||
877 | beq 1f | ||
878 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
879 | lis r3,MSR_VEC@h | ||
880 | andc r4,r4,r3 /* disable AltiVec for previous task */ | ||
881 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
882 | 1: | ||
883 | #ifndef CONFIG_SMP | ||
884 | li r5,0 | ||
885 | lis r4,last_task_used_altivec@ha | ||
886 | stw r5,last_task_used_altivec@l(r4) | ||
887 | #endif /* CONFIG_SMP */ | ||
888 | blr | ||
889 | #endif /* CONFIG_ALTIVEC */ | ||
890 | |||
891 | /* | ||
892 | * This code is jumped to from the startup code to copy | ||
893 | * the kernel image to physical address 0. | ||
894 | */ | ||
895 | relocate_kernel: | ||
896 | addis r9,r26,klimit@ha /* fetch klimit */ | ||
897 | lwz r25,klimit@l(r9) | ||
898 | addis r25,r25,-KERNELBASE@h | ||
899 | li r3,0 /* Destination base address */ | ||
900 | li r6,0 /* Destination offset */ | ||
901 | li r5,0x4000 /* # bytes of memory to copy */ | ||
902 | bl copy_and_flush /* copy the first 0x4000 bytes */ | ||
903 | addi r0,r3,4f@l /* jump to the address of 4f */ | ||
904 | mtctr r0 /* in copy and do the rest. */ | ||
905 | bctr /* jump to the copy */ | ||
906 | 4: mr r5,r25 | ||
907 | bl copy_and_flush /* copy the rest */ | ||
908 | b turn_on_mmu | ||
909 | |||
910 | /* | ||
911 | * Copy routine used to copy the kernel to start at physical address 0 | ||
912 | * and flush and invalidate the caches as needed. | ||
913 | * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset | ||
914 | * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. | ||
915 | */ | ||
916 | copy_and_flush: | ||
917 | addi r5,r5,-4 | ||
918 | addi r6,r6,-4 | ||
919 | 4: li r0,L1_CACHE_LINE_SIZE/4 | ||
920 | mtctr r0 | ||
921 | 3: addi r6,r6,4 /* copy a cache line */ | ||
922 | lwzx r0,r6,r4 | ||
923 | stwx r0,r6,r3 | ||
924 | bdnz 3b | ||
925 | dcbst r6,r3 /* write it to memory */ | ||
926 | sync | ||
927 | icbi r6,r3 /* flush the icache line */ | ||
928 | cmplw 0,r6,r5 | ||
929 | blt 4b | ||
930 | sync /* additional sync needed on g4 */ | ||
931 | isync | ||
932 | addi r5,r5,4 | ||
933 | addi r6,r6,4 | ||
934 | blr | ||
935 | |||
936 | #ifdef CONFIG_APUS | ||
937 | /* | ||
938 | * On APUS the physical base address of the kernel is not known at compile | ||
939 | * time, which means the __pa/__va constants used are incorrect. In the | ||
940 | * __init section is recorded the virtual addresses of instructions using | ||
941 | * these constants, so all that has to be done is fix these before | ||
942 | * continuing the kernel boot. | ||
943 | * | ||
944 | * r4 = The physical address of the kernel base. | ||
945 | */ | ||
946 | fix_mem_constants: | ||
947 | mr r10,r4 | ||
948 | addis r10,r10,-KERNELBASE@h /* virt_to_phys constant */ | ||
949 | neg r11,r10 /* phys_to_virt constant */ | ||
950 | |||
951 | lis r12,__vtop_table_begin@h | ||
952 | ori r12,r12,__vtop_table_begin@l | ||
953 | add r12,r12,r10 /* table begin phys address */ | ||
954 | lis r13,__vtop_table_end@h | ||
955 | ori r13,r13,__vtop_table_end@l | ||
956 | add r13,r13,r10 /* table end phys address */ | ||
957 | subi r12,r12,4 | ||
958 | subi r13,r13,4 | ||
959 | 1: lwzu r14,4(r12) /* virt address of instruction */ | ||
960 | add r14,r14,r10 /* phys address of instruction */ | ||
961 | lwz r15,0(r14) /* instruction, now insert top */ | ||
962 | rlwimi r15,r10,16,16,31 /* half of vp const in low half */ | ||
963 | stw r15,0(r14) /* of instruction and restore. */ | ||
964 | dcbst r0,r14 /* write it to memory */ | ||
965 | sync | ||
966 | icbi r0,r14 /* flush the icache line */ | ||
967 | cmpw r12,r13 | ||
968 | bne 1b | ||
969 | sync /* additional sync needed on g4 */ | ||
970 | isync | ||
971 | |||
972 | /* | ||
973 | * Map the memory where the exception handlers will | ||
974 | * be copied to when hash constants have been patched. | ||
975 | */ | ||
976 | #ifdef CONFIG_APUS_FAST_EXCEPT | ||
977 | lis r8,0xfff0 | ||
978 | #else | ||
979 | lis r8,0 | ||
980 | #endif | ||
981 | ori r8,r8,0x2 /* 128KB, supervisor */ | ||
982 | mtspr SPRN_DBAT3U,r8 | ||
983 | mtspr SPRN_DBAT3L,r8 | ||
984 | |||
985 | lis r12,__ptov_table_begin@h | ||
986 | ori r12,r12,__ptov_table_begin@l | ||
987 | add r12,r12,r10 /* table begin phys address */ | ||
988 | lis r13,__ptov_table_end@h | ||
989 | ori r13,r13,__ptov_table_end@l | ||
990 | add r13,r13,r10 /* table end phys address */ | ||
991 | subi r12,r12,4 | ||
992 | subi r13,r13,4 | ||
993 | 1: lwzu r14,4(r12) /* virt address of instruction */ | ||
994 | add r14,r14,r10 /* phys address of instruction */ | ||
995 | lwz r15,0(r14) /* instruction, now insert top */ | ||
996 | rlwimi r15,r11,16,16,31 /* half of pv const in low half*/ | ||
997 | stw r15,0(r14) /* of instruction and restore. */ | ||
998 | dcbst r0,r14 /* write it to memory */ | ||
999 | sync | ||
1000 | icbi r0,r14 /* flush the icache line */ | ||
1001 | cmpw r12,r13 | ||
1002 | bne 1b | ||
1003 | |||
1004 | sync /* additional sync needed on g4 */ | ||
1005 | isync /* No speculative loading until now */ | ||
1006 | blr | ||
1007 | |||
1008 | /*********************************************************************** | ||
1009 | * Please note that on APUS the exception handlers are located at the | ||
1010 | * physical address 0xfff0000. For this reason, the exception handlers | ||
1011 | * cannot use relative branches to access the code below. | ||
1012 | ***********************************************************************/ | ||
1013 | #endif /* CONFIG_APUS */ | ||
1014 | |||
1015 | #ifdef CONFIG_SMP | ||
1016 | #ifdef CONFIG_GEMINI | ||
1017 | .globl __secondary_start_gemini | ||
1018 | __secondary_start_gemini: | ||
1019 | mfspr r4,SPRN_HID0 | ||
1020 | ori r4,r4,HID0_ICFI | ||
1021 | li r3,0 | ||
1022 | ori r3,r3,HID0_ICE | ||
1023 | andc r4,r4,r3 | ||
1024 | mtspr SPRN_HID0,r4 | ||
1025 | sync | ||
1026 | b __secondary_start | ||
1027 | #endif /* CONFIG_GEMINI */ | ||
1028 | |||
1029 | .globl __secondary_start_pmac_0 | ||
1030 | __secondary_start_pmac_0: | ||
1031 | /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */ | ||
1032 | li r24,0 | ||
1033 | b 1f | ||
1034 | li r24,1 | ||
1035 | b 1f | ||
1036 | li r24,2 | ||
1037 | b 1f | ||
1038 | li r24,3 | ||
1039 | 1: | ||
1040 | /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0 | ||
1041 | set to map the 0xf0000000 - 0xffffffff region */ | ||
1042 | mfmsr r0 | ||
1043 | rlwinm r0,r0,0,28,26 /* clear DR (0x10) */ | ||
1044 | SYNC | ||
1045 | mtmsr r0 | ||
1046 | isync | ||
1047 | |||
1048 | .globl __secondary_start | ||
1049 | __secondary_start: | ||
1050 | #ifdef CONFIG_PPC64BRIDGE | ||
1051 | mfmsr r0 | ||
1052 | clrldi r0,r0,1 /* make sure it's in 32-bit mode */ | ||
1053 | SYNC | ||
1054 | MTMSRD(r0) | ||
1055 | isync | ||
1056 | #endif | ||
1057 | /* Copy some CPU settings from CPU 0 */ | ||
1058 | bl __restore_cpu_setup | ||
1059 | |||
1060 | lis r3,-KERNELBASE@h | ||
1061 | mr r4,r24 | ||
1062 | bl identify_cpu | ||
1063 | bl call_setup_cpu /* Call setup_cpu for this CPU */ | ||
1064 | #ifdef CONFIG_6xx | ||
1065 | lis r3,-KERNELBASE@h | ||
1066 | bl init_idle_6xx | ||
1067 | #endif /* CONFIG_6xx */ | ||
1068 | #ifdef CONFIG_POWER4 | ||
1069 | lis r3,-KERNELBASE@h | ||
1070 | bl init_idle_power4 | ||
1071 | #endif /* CONFIG_POWER4 */ | ||
1072 | |||
1073 | /* get current_thread_info and current */ | ||
1074 | lis r1,secondary_ti@ha | ||
1075 | tophys(r1,r1) | ||
1076 | lwz r1,secondary_ti@l(r1) | ||
1077 | tophys(r2,r1) | ||
1078 | lwz r2,TI_TASK(r2) | ||
1079 | |||
1080 | /* stack */ | ||
1081 | addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD | ||
1082 | li r0,0 | ||
1083 | tophys(r3,r1) | ||
1084 | stw r0,0(r3) | ||
1085 | |||
1086 | /* load up the MMU */ | ||
1087 | bl load_up_mmu | ||
1088 | |||
1089 | /* ptr to phys current thread */ | ||
1090 | tophys(r4,r2) | ||
1091 | addi r4,r4,THREAD /* phys address of our thread_struct */ | ||
1092 | CLR_TOP32(r4) | ||
1093 | mtspr SPRN_SPRG3,r4 | ||
1094 | li r3,0 | ||
1095 | mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */ | ||
1096 | |||
1097 | /* enable MMU and jump to start_secondary */ | ||
1098 | li r4,MSR_KERNEL | ||
1099 | FIX_SRR1(r4,r5) | ||
1100 | lis r3,start_secondary@h | ||
1101 | ori r3,r3,start_secondary@l | ||
1102 | mtspr SPRN_SRR0,r3 | ||
1103 | mtspr SPRN_SRR1,r4 | ||
1104 | SYNC | ||
1105 | RFI | ||
1106 | #endif /* CONFIG_SMP */ | ||
1107 | |||
1108 | /* | ||
1109 | * Those generic dummy functions are kept for CPUs not | ||
1110 | * included in CONFIG_6xx | ||
1111 | */ | ||
1112 | _GLOBAL(__setup_cpu_power3) | ||
1113 | blr | ||
1114 | _GLOBAL(__setup_cpu_generic) | ||
1115 | blr | ||
1116 | |||
1117 | #if !defined(CONFIG_6xx) && !defined(CONFIG_POWER4) | ||
1118 | _GLOBAL(__save_cpu_setup) | ||
1119 | blr | ||
1120 | _GLOBAL(__restore_cpu_setup) | ||
1121 | blr | ||
1122 | #endif /* !defined(CONFIG_6xx) && !defined(CONFIG_POWER4) */ | ||
1123 | |||
1124 | |||
1125 | /* | ||
1126 | * Load stuff into the MMU. Intended to be called with | ||
1127 | * IR=0 and DR=0. | ||
1128 | */ | ||
1129 | load_up_mmu: | ||
1130 | sync /* Force all PTE updates to finish */ | ||
1131 | isync | ||
1132 | tlbia /* Clear all TLB entries */ | ||
1133 | sync /* wait for tlbia/tlbie to finish */ | ||
1134 | TLBSYNC /* ... on all CPUs */ | ||
1135 | /* Load the SDR1 register (hash table base & size) */ | ||
1136 | lis r6,_SDR1@ha | ||
1137 | tophys(r6,r6) | ||
1138 | lwz r6,_SDR1@l(r6) | ||
1139 | mtspr SPRN_SDR1,r6 | ||
1140 | #ifdef CONFIG_PPC64BRIDGE | ||
1141 | /* clear the ASR so we only use the pseudo-segment registers. */ | ||
1142 | li r6,0 | ||
1143 | mtasr r6 | ||
1144 | #endif /* CONFIG_PPC64BRIDGE */ | ||
1145 | li r0,16 /* load up segment register values */ | ||
1146 | mtctr r0 /* for context 0 */ | ||
1147 | lis r3,0x2000 /* Ku = 1, VSID = 0 */ | ||
1148 | li r4,0 | ||
1149 | 3: mtsrin r3,r4 | ||
1150 | addi r3,r3,0x111 /* increment VSID */ | ||
1151 | addis r4,r4,0x1000 /* address of next segment */ | ||
1152 | bdnz 3b | ||
1153 | #ifndef CONFIG_POWER4 | ||
1154 | /* Load the BAT registers with the values set up by MMU_init. | ||
1155 | MMU_init takes care of whether we're on a 601 or not. */ | ||
1156 | mfpvr r3 | ||
1157 | srwi r3,r3,16 | ||
1158 | cmpwi r3,1 | ||
1159 | lis r3,BATS@ha | ||
1160 | addi r3,r3,BATS@l | ||
1161 | tophys(r3,r3) | ||
1162 | LOAD_BAT(0,r3,r4,r5) | ||
1163 | LOAD_BAT(1,r3,r4,r5) | ||
1164 | LOAD_BAT(2,r3,r4,r5) | ||
1165 | LOAD_BAT(3,r3,r4,r5) | ||
1166 | #endif /* CONFIG_POWER4 */ | ||
1167 | blr | ||
1168 | |||
1169 | /* | ||
1170 | * This is where the main kernel code starts. | ||
1171 | */ | ||
1172 | start_here: | ||
1173 | /* ptr to current */ | ||
1174 | lis r2,init_task@h | ||
1175 | ori r2,r2,init_task@l | ||
1176 | /* Set up for using our exception vectors */ | ||
1177 | /* ptr to phys current thread */ | ||
1178 | tophys(r4,r2) | ||
1179 | addi r4,r4,THREAD /* init task's THREAD */ | ||
1180 | CLR_TOP32(r4) | ||
1181 | mtspr SPRN_SPRG3,r4 | ||
1182 | li r3,0 | ||
1183 | mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */ | ||
1184 | |||
1185 | /* stack */ | ||
1186 | lis r1,init_thread_union@ha | ||
1187 | addi r1,r1,init_thread_union@l | ||
1188 | li r0,0 | ||
1189 | stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) | ||
1190 | /* | ||
1191 | * Do early bootinfo parsing, platform-specific initialization, | ||
1192 | * and set up the MMU. | ||
1193 | */ | ||
1194 | mr r3,r31 | ||
1195 | mr r4,r30 | ||
1196 | mr r5,r29 | ||
1197 | mr r6,r28 | ||
1198 | mr r7,r27 | ||
1199 | bl machine_init | ||
1200 | bl MMU_init | ||
1201 | |||
1202 | #ifdef CONFIG_APUS | ||
1203 | /* Copy exception code to exception vector base on APUS. */ | ||
1204 | lis r4,KERNELBASE@h | ||
1205 | #ifdef CONFIG_APUS_FAST_EXCEPT | ||
1206 | lis r3,0xfff0 /* Copy to 0xfff00000 */ | ||
1207 | #else | ||
1208 | lis r3,0 /* Copy to 0x00000000 */ | ||
1209 | #endif | ||
1210 | li r5,0x4000 /* # bytes of memory to copy */ | ||
1211 | li r6,0 | ||
1212 | bl copy_and_flush /* copy the first 0x4000 bytes */ | ||
1213 | #endif /* CONFIG_APUS */ | ||
1214 | |||
1215 | /* | ||
1216 | * Go back to running unmapped so we can load up new values | ||
1217 | * for SDR1 (hash table pointer) and the segment registers | ||
1218 | * and change to using our exception vectors. | ||
1219 | */ | ||
1220 | lis r4,2f@h | ||
1221 | ori r4,r4,2f@l | ||
1222 | tophys(r4,r4) | ||
1223 | li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR) | ||
1224 | FIX_SRR1(r3,r5) | ||
1225 | mtspr SPRN_SRR0,r4 | ||
1226 | mtspr SPRN_SRR1,r3 | ||
1227 | SYNC | ||
1228 | RFI | ||
1229 | /* Load up the kernel context */ | ||
1230 | 2: bl load_up_mmu | ||
1231 | |||
1232 | #ifdef CONFIG_BDI_SWITCH | ||
1233 | /* Add helper information for the Abatron bdiGDB debugger. | ||
1234 | * We do this here because we know the mmu is disabled, and | ||
1235 | * will be enabled for real in just a few instructions. | ||
1236 | */ | ||
1237 | lis r5, abatron_pteptrs@h | ||
1238 | ori r5, r5, abatron_pteptrs@l | ||
1239 | stw r5, 0xf0(r0) /* This much match your Abatron config */ | ||
1240 | lis r6, swapper_pg_dir@h | ||
1241 | ori r6, r6, swapper_pg_dir@l | ||
1242 | tophys(r5, r5) | ||
1243 | stw r6, 0(r5) | ||
1244 | #endif /* CONFIG_BDI_SWITCH */ | ||
1245 | |||
1246 | /* Now turn on the MMU for real! */ | ||
1247 | li r4,MSR_KERNEL | ||
1248 | FIX_SRR1(r4,r5) | ||
1249 | lis r3,start_kernel@h | ||
1250 | ori r3,r3,start_kernel@l | ||
1251 | mtspr SPRN_SRR0,r3 | ||
1252 | mtspr SPRN_SRR1,r4 | ||
1253 | SYNC | ||
1254 | RFI | ||
1255 | |||
1256 | /* | ||
1257 | * Set up the segment registers for a new context. | ||
1258 | */ | ||
1259 | _GLOBAL(set_context) | ||
1260 | mulli r3,r3,897 /* multiply context by skew factor */ | ||
1261 | rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */ | ||
1262 | addis r3,r3,0x6000 /* Set Ks, Ku bits */ | ||
1263 | li r0,NUM_USER_SEGMENTS | ||
1264 | mtctr r0 | ||
1265 | |||
1266 | #ifdef CONFIG_BDI_SWITCH | ||
1267 | /* Context switch the PTE pointer for the Abatron BDI2000. | ||
1268 | * The PGDIR is passed as second argument. | ||
1269 | */ | ||
1270 | lis r5, KERNELBASE@h | ||
1271 | lwz r5, 0xf0(r5) | ||
1272 | stw r4, 0x4(r5) | ||
1273 | #endif | ||
1274 | li r4,0 | ||
1275 | isync | ||
1276 | 3: | ||
1277 | #ifdef CONFIG_PPC64BRIDGE | ||
1278 | slbie r4 | ||
1279 | #endif /* CONFIG_PPC64BRIDGE */ | ||
1280 | mtsrin r3,r4 | ||
1281 | addi r3,r3,0x111 /* next VSID */ | ||
1282 | rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */ | ||
1283 | addis r4,r4,0x1000 /* address of next segment */ | ||
1284 | bdnz 3b | ||
1285 | sync | ||
1286 | isync | ||
1287 | blr | ||
1288 | |||
1289 | /* | ||
1290 | * An undocumented "feature" of 604e requires that the v bit | ||
1291 | * be cleared before changing BAT values. | ||
1292 | * | ||
1293 | * Also, newer IBM firmware does not clear bat3 and 4 so | ||
1294 | * this makes sure it's done. | ||
1295 | * -- Cort | ||
1296 | */ | ||
1297 | clear_bats: | ||
1298 | li r10,0 | ||
1299 | mfspr r9,SPRN_PVR | ||
1300 | rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ | ||
1301 | cmpwi r9, 1 | ||
1302 | beq 1f | ||
1303 | |||
1304 | mtspr SPRN_DBAT0U,r10 | ||
1305 | mtspr SPRN_DBAT0L,r10 | ||
1306 | mtspr SPRN_DBAT1U,r10 | ||
1307 | mtspr SPRN_DBAT1L,r10 | ||
1308 | mtspr SPRN_DBAT2U,r10 | ||
1309 | mtspr SPRN_DBAT2L,r10 | ||
1310 | mtspr SPRN_DBAT3U,r10 | ||
1311 | mtspr SPRN_DBAT3L,r10 | ||
1312 | 1: | ||
1313 | mtspr SPRN_IBAT0U,r10 | ||
1314 | mtspr SPRN_IBAT0L,r10 | ||
1315 | mtspr SPRN_IBAT1U,r10 | ||
1316 | mtspr SPRN_IBAT1L,r10 | ||
1317 | mtspr SPRN_IBAT2U,r10 | ||
1318 | mtspr SPRN_IBAT2L,r10 | ||
1319 | mtspr SPRN_IBAT3U,r10 | ||
1320 | mtspr SPRN_IBAT3L,r10 | ||
1321 | BEGIN_FTR_SECTION | ||
1322 | /* Here's a tweak: at this point, CPU setup have | ||
1323 | * not been called yet, so HIGH_BAT_EN may not be | ||
1324 | * set in HID0 for the 745x processors. However, it | ||
1325 | * seems that doesn't affect our ability to actually | ||
1326 | * write to these SPRs. | ||
1327 | */ | ||
1328 | mtspr SPRN_DBAT4U,r10 | ||
1329 | mtspr SPRN_DBAT4L,r10 | ||
1330 | mtspr SPRN_DBAT5U,r10 | ||
1331 | mtspr SPRN_DBAT5L,r10 | ||
1332 | mtspr SPRN_DBAT6U,r10 | ||
1333 | mtspr SPRN_DBAT6L,r10 | ||
1334 | mtspr SPRN_DBAT7U,r10 | ||
1335 | mtspr SPRN_DBAT7L,r10 | ||
1336 | mtspr SPRN_IBAT4U,r10 | ||
1337 | mtspr SPRN_IBAT4L,r10 | ||
1338 | mtspr SPRN_IBAT5U,r10 | ||
1339 | mtspr SPRN_IBAT5L,r10 | ||
1340 | mtspr SPRN_IBAT6U,r10 | ||
1341 | mtspr SPRN_IBAT6L,r10 | ||
1342 | mtspr SPRN_IBAT7U,r10 | ||
1343 | mtspr SPRN_IBAT7L,r10 | ||
1344 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) | ||
1345 | blr | ||
1346 | |||
1347 | flush_tlbs: | ||
1348 | lis r10, 0x40 | ||
1349 | 1: addic. r10, r10, -0x1000 | ||
1350 | tlbie r10 | ||
1351 | blt 1b | ||
1352 | sync | ||
1353 | blr | ||
1354 | |||
1355 | mmu_off: | ||
1356 | addi r4, r3, __after_mmu_off - _start | ||
1357 | mfmsr r3 | ||
1358 | andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */ | ||
1359 | beqlr | ||
1360 | andc r3,r3,r0 | ||
1361 | mtspr SPRN_SRR0,r4 | ||
1362 | mtspr SPRN_SRR1,r3 | ||
1363 | sync | ||
1364 | RFI | ||
1365 | |||
1366 | #ifndef CONFIG_POWER4 | ||
1367 | /* | ||
1368 | * Use the first pair of BAT registers to map the 1st 16MB | ||
1369 | * of RAM to KERNELBASE. From this point on we can't safely | ||
1370 | * call OF any more. | ||
1371 | */ | ||
1372 | initial_bats: | ||
1373 | lis r11,KERNELBASE@h | ||
1374 | #ifndef CONFIG_PPC64BRIDGE | ||
1375 | mfspr r9,SPRN_PVR | ||
1376 | rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ | ||
1377 | cmpwi 0,r9,1 | ||
1378 | bne 4f | ||
1379 | ori r11,r11,4 /* set up BAT registers for 601 */ | ||
1380 | li r8,0x7f /* valid, block length = 8MB */ | ||
1381 | oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */ | ||
1382 | oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */ | ||
1383 | mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */ | ||
1384 | mtspr SPRN_IBAT0L,r8 /* lower BAT register */ | ||
1385 | mtspr SPRN_IBAT1U,r9 | ||
1386 | mtspr SPRN_IBAT1L,r10 | ||
1387 | isync | ||
1388 | blr | ||
1389 | #endif /* CONFIG_PPC64BRIDGE */ | ||
1390 | |||
1391 | 4: tophys(r8,r11) | ||
1392 | #ifdef CONFIG_SMP | ||
1393 | ori r8,r8,0x12 /* R/W access, M=1 */ | ||
1394 | #else | ||
1395 | ori r8,r8,2 /* R/W access */ | ||
1396 | #endif /* CONFIG_SMP */ | ||
1397 | #ifdef CONFIG_APUS | ||
1398 | ori r11,r11,BL_8M<<2|0x2 /* set up 8MB BAT registers for 604 */ | ||
1399 | #else | ||
1400 | ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */ | ||
1401 | #endif /* CONFIG_APUS */ | ||
1402 | |||
1403 | #ifdef CONFIG_PPC64BRIDGE | ||
1404 | /* clear out the high 32 bits in the BAT */ | ||
1405 | clrldi r11,r11,32 | ||
1406 | clrldi r8,r8,32 | ||
1407 | #endif /* CONFIG_PPC64BRIDGE */ | ||
1408 | mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */ | ||
1409 | mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */ | ||
1410 | mtspr SPRN_IBAT0L,r8 | ||
1411 | mtspr SPRN_IBAT0U,r11 | ||
1412 | isync | ||
1413 | blr | ||
1414 | |||
1415 | #if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) | ||
1416 | setup_disp_bat: | ||
1417 | /* | ||
1418 | * setup the display bat prepared for us in prom.c | ||
1419 | */ | ||
1420 | mflr r8 | ||
1421 | bl reloc_offset | ||
1422 | mtlr r8 | ||
1423 | addis r8,r3,disp_BAT@ha | ||
1424 | addi r8,r8,disp_BAT@l | ||
1425 | lwz r11,0(r8) | ||
1426 | lwz r8,4(r8) | ||
1427 | mfspr r9,SPRN_PVR | ||
1428 | rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ | ||
1429 | cmpwi 0,r9,1 | ||
1430 | beq 1f | ||
1431 | mtspr SPRN_DBAT3L,r8 | ||
1432 | mtspr SPRN_DBAT3U,r11 | ||
1433 | blr | ||
1434 | 1: mtspr SPRN_IBAT3L,r8 | ||
1435 | mtspr SPRN_IBAT3U,r11 | ||
1436 | blr | ||
1437 | |||
1438 | #endif /* !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) */ | ||
1439 | |||
1440 | #else /* CONFIG_POWER4 */ | ||
1441 | /* | ||
1442 | * Load up the SDR1 and segment register values now | ||
1443 | * since we don't have the BATs. | ||
1444 | * Also make sure we are running in 32-bit mode. | ||
1445 | */ | ||
1446 | |||
1447 | initial_mm_power4: | ||
1448 | addis r14,r3,_SDR1@ha /* get the value from _SDR1 */ | ||
1449 | lwz r14,_SDR1@l(r14) /* assume hash table below 4GB */ | ||
1450 | mtspr SPRN_SDR1,r14 | ||
1451 | slbia | ||
1452 | lis r4,0x2000 /* set pseudo-segment reg 12 */ | ||
1453 | ori r5,r4,0x0ccc | ||
1454 | mtsr 12,r5 | ||
1455 | #if 0 | ||
1456 | ori r5,r4,0x0888 /* set pseudo-segment reg 8 */ | ||
1457 | mtsr 8,r5 /* (for access to serial port) */ | ||
1458 | #endif | ||
1459 | #ifdef CONFIG_BOOTX_TEXT | ||
1460 | ori r5,r4,0x0999 /* set pseudo-segment reg 9 */ | ||
1461 | mtsr 9,r5 /* (for access to screen) */ | ||
1462 | #endif | ||
1463 | mfmsr r0 | ||
1464 | clrldi r0,r0,1 | ||
1465 | sync | ||
1466 | mtmsr r0 | ||
1467 | isync | ||
1468 | blr | ||
1469 | |||
1470 | #endif /* CONFIG_POWER4 */ | ||
1471 | |||
1472 | #ifdef CONFIG_8260 | ||
1473 | /* Jump into the system reset for the rom. | ||
1474 | * We first disable the MMU, and then jump to the ROM reset address. | ||
1475 | * | ||
1476 | * r3 is the board info structure, r4 is the location for starting. | ||
1477 | * I use this for building a small kernel that can load other kernels, | ||
1478 | * rather than trying to write or rely on a rom monitor that can tftp load. | ||
1479 | */ | ||
1480 | .globl m8260_gorom | ||
1481 | m8260_gorom: | ||
1482 | mfmsr r0 | ||
1483 | rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */ | ||
1484 | sync | ||
1485 | mtmsr r0 | ||
1486 | sync | ||
1487 | mfspr r11, SPRN_HID0 | ||
1488 | lis r10, 0 | ||
1489 | ori r10,r10,HID0_ICE|HID0_DCE | ||
1490 | andc r11, r11, r10 | ||
1491 | mtspr SPRN_HID0, r11 | ||
1492 | isync | ||
1493 | li r5, MSR_ME|MSR_RI | ||
1494 | lis r6,2f@h | ||
1495 | addis r6,r6,-KERNELBASE@h | ||
1496 | ori r6,r6,2f@l | ||
1497 | mtspr SPRN_SRR0,r6 | ||
1498 | mtspr SPRN_SRR1,r5 | ||
1499 | isync | ||
1500 | sync | ||
1501 | rfi | ||
1502 | 2: | ||
1503 | mtlr r4 | ||
1504 | blr | ||
1505 | #endif | ||
1506 | |||
1507 | |||
1508 | /* | ||
1509 | * We put a few things here that have to be page-aligned. | ||
1510 | * This stuff goes at the beginning of the data segment, | ||
1511 | * which is page-aligned. | ||
1512 | */ | ||
1513 | .data | ||
1514 | .globl sdata | ||
1515 | sdata: | ||
1516 | .globl empty_zero_page | ||
1517 | empty_zero_page: | ||
1518 | .space 4096 | ||
1519 | |||
1520 | .globl swapper_pg_dir | ||
1521 | swapper_pg_dir: | ||
1522 | .space 4096 | ||
1523 | |||
1524 | /* | ||
1525 | * This space gets a copy of optional info passed to us by the bootstrap | ||
1526 | * Used to pass parameters into the kernel like root=/dev/sda1, etc. | ||
1527 | */ | ||
1528 | .globl cmd_line | ||
1529 | cmd_line: | ||
1530 | .space 512 | ||
1531 | |||
1532 | .globl intercept_table | ||
1533 | intercept_table: | ||
1534 | .long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700 | ||
1535 | .long i0x800, 0, 0, 0, 0, i0xd00, 0, 0 | ||
1536 | .long 0, 0, 0, i0x1300, 0, 0, 0, 0 | ||
1537 | .long 0, 0, 0, 0, 0, 0, 0, 0 | ||
1538 | .long 0, 0, 0, 0, 0, 0, 0, 0 | ||
1539 | .long 0, 0, 0, 0, 0, 0, 0, 0 | ||
1540 | |||
1541 | /* Room for two PTE pointers, usually the kernel and current user pointers | ||
1542 | * to their respective root page table. | ||
1543 | */ | ||
1544 | abatron_pteptrs: | ||
1545 | .space 8 | ||
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S new file mode 100644 index 000000000000..599245b0407e --- /dev/null +++ b/arch/powerpc/kernel/head_44x.S | |||
@@ -0,0 +1,778 @@ | |||
1 | /* | ||
2 | * arch/ppc/kernel/head_44x.S | ||
3 | * | ||
4 | * Kernel execution entry point code. | ||
5 | * | ||
6 | * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> | ||
7 | * Initial PowerPC version. | ||
8 | * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> | ||
9 | * Rewritten for PReP | ||
10 | * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> | ||
11 | * Low-level exception handers, MMU support, and rewrite. | ||
12 | * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> | ||
13 | * PowerPC 8xx modifications. | ||
14 | * Copyright (c) 1998-1999 TiVo, Inc. | ||
15 | * PowerPC 403GCX modifications. | ||
16 | * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> | ||
17 | * PowerPC 403GCX/405GP modifications. | ||
18 | * Copyright 2000 MontaVista Software Inc. | ||
19 | * PPC405 modifications | ||
20 | * PowerPC 403GCX/405GP modifications. | ||
21 | * Author: MontaVista Software, Inc. | ||
22 | * frank_rowand@mvista.com or source@mvista.com | ||
23 | * debbie_chu@mvista.com | ||
24 | * Copyright 2002-2005 MontaVista Software, Inc. | ||
25 | * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org> | ||
26 | * | ||
27 | * This program is free software; you can redistribute it and/or modify it | ||
28 | * under the terms of the GNU General Public License as published by the | ||
29 | * Free Software Foundation; either version 2 of the License, or (at your | ||
30 | * option) any later version. | ||
31 | */ | ||
32 | |||
33 | #include <linux/config.h> | ||
34 | #include <asm/processor.h> | ||
35 | #include <asm/page.h> | ||
36 | #include <asm/mmu.h> | ||
37 | #include <asm/pgtable.h> | ||
38 | #include <asm/ibm4xx.h> | ||
39 | #include <asm/ibm44x.h> | ||
40 | #include <asm/cputable.h> | ||
41 | #include <asm/thread_info.h> | ||
42 | #include <asm/ppc_asm.h> | ||
43 | #include <asm/asm-offsets.h> | ||
44 | #include "head_booke.h" | ||
45 | |||
46 | |||
47 | /* As with the other PowerPC ports, it is expected that when code | ||
48 | * execution begins here, the following registers contain valid, yet | ||
49 | * optional, information: | ||
50 | * | ||
51 | * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) | ||
52 | * r4 - Starting address of the init RAM disk | ||
53 | * r5 - Ending address of the init RAM disk | ||
54 | * r6 - Start of kernel command line string (e.g. "mem=128") | ||
55 | * r7 - End of kernel command line string | ||
56 | * | ||
57 | */ | ||
58 | .text | ||
59 | _GLOBAL(_stext) | ||
60 | _GLOBAL(_start) | ||
61 | /* | ||
62 | * Reserve a word at a fixed location to store the address | ||
63 | * of abatron_pteptrs | ||
64 | */ | ||
65 | nop | ||
66 | /* | ||
67 | * Save parameters we are passed | ||
68 | */ | ||
69 | mr r31,r3 | ||
70 | mr r30,r4 | ||
71 | mr r29,r5 | ||
72 | mr r28,r6 | ||
73 | mr r27,r7 | ||
74 | li r24,0 /* CPU number */ | ||
75 | |||
76 | /* | ||
77 | * Set up the initial MMU state | ||
78 | * | ||
79 | * We are still executing code at the virtual address | ||
80 | * mappings set by the firmware for the base of RAM. | ||
81 | * | ||
82 | * We first invalidate all TLB entries but the one | ||
83 | * we are running from. We then load the KERNELBASE | ||
84 | * mappings so we can begin to use kernel addresses | ||
85 | * natively and so the interrupt vector locations are | ||
86 | * permanently pinned (necessary since Book E | ||
87 | * implementations always have translation enabled). | ||
88 | * | ||
89 | * TODO: Use the known TLB entry we are running from to | ||
90 | * determine which physical region we are located | ||
91 | * in. This can be used to determine where in RAM | ||
92 | * (on a shared CPU system) or PCI memory space | ||
93 | * (on a DRAMless system) we are located. | ||
94 | * For now, we assume a perfect world which means | ||
95 | * we are located at the base of DRAM (physical 0). | ||
96 | */ | ||
97 | |||
98 | /* | ||
99 | * Search TLB for entry that we are currently using. | ||
100 | * Invalidate all entries but the one we are using. | ||
101 | */ | ||
102 | /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */ | ||
103 | mfspr r3,SPRN_PID /* Get PID */ | ||
104 | mfmsr r4 /* Get MSR */ | ||
105 | andi. r4,r4,MSR_IS@l /* TS=1? */ | ||
106 | beq wmmucr /* If not, leave STS=0 */ | ||
107 | oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */ | ||
108 | wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */ | ||
109 | sync | ||
110 | |||
111 | bl invstr /* Find our address */ | ||
112 | invstr: mflr r5 /* Make it accessible */ | ||
113 | tlbsx r23,0,r5 /* Find entry we are in */ | ||
114 | li r4,0 /* Start at TLB entry 0 */ | ||
115 | li r3,0 /* Set PAGEID inval value */ | ||
116 | 1: cmpw r23,r4 /* Is this our entry? */ | ||
117 | beq skpinv /* If so, skip the inval */ | ||
118 | tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */ | ||
119 | skpinv: addi r4,r4,1 /* Increment */ | ||
120 | cmpwi r4,64 /* Are we done? */ | ||
121 | bne 1b /* If not, repeat */ | ||
122 | isync /* If so, context change */ | ||
123 | |||
124 | /* | ||
125 | * Configure and load pinned entry into TLB slot 63. | ||
126 | */ | ||
127 | |||
128 | lis r3,KERNELBASE@h /* Load the kernel virtual address */ | ||
129 | ori r3,r3,KERNELBASE@l | ||
130 | |||
131 | /* Kernel is at the base of RAM */ | ||
132 | li r4, 0 /* Load the kernel physical address */ | ||
133 | |||
134 | /* Load the kernel PID = 0 */ | ||
135 | li r0,0 | ||
136 | mtspr SPRN_PID,r0 | ||
137 | sync | ||
138 | |||
139 | /* Initialize MMUCR */ | ||
140 | li r5,0 | ||
141 | mtspr SPRN_MMUCR,r5 | ||
142 | sync | ||
143 | |||
144 | /* pageid fields */ | ||
145 | clrrwi r3,r3,10 /* Mask off the effective page number */ | ||
146 | ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M | ||
147 | |||
148 | /* xlat fields */ | ||
149 | clrrwi r4,r4,10 /* Mask off the real page number */ | ||
150 | /* ERPN is 0 for first 4GB page */ | ||
151 | |||
152 | /* attrib fields */ | ||
153 | /* Added guarded bit to protect against speculative loads/stores */ | ||
154 | li r5,0 | ||
155 | ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G) | ||
156 | |||
157 | li r0,63 /* TLB slot 63 */ | ||
158 | |||
159 | tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */ | ||
160 | tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */ | ||
161 | tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */ | ||
162 | |||
163 | /* Force context change */ | ||
164 | mfmsr r0 | ||
165 | mtspr SPRN_SRR1, r0 | ||
166 | lis r0,3f@h | ||
167 | ori r0,r0,3f@l | ||
168 | mtspr SPRN_SRR0,r0 | ||
169 | sync | ||
170 | rfi | ||
171 | |||
172 | /* If necessary, invalidate original entry we used */ | ||
173 | 3: cmpwi r23,63 | ||
174 | beq 4f | ||
175 | li r6,0 | ||
176 | tlbwe r6,r23,PPC44x_TLB_PAGEID | ||
177 | isync | ||
178 | |||
179 | 4: | ||
180 | #ifdef CONFIG_SERIAL_TEXT_DEBUG | ||
181 | /* | ||
182 | * Add temporary UART mapping for early debug. | ||
183 | * We can map UART registers wherever we want as long as they don't | ||
184 | * interfere with other system mappings (e.g. with pinned entries). | ||
185 | * For an example of how we handle this - see ocotea.h. --ebs | ||
186 | */ | ||
187 | /* pageid fields */ | ||
188 | lis r3,UART0_IO_BASE@h | ||
189 | ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_4K | ||
190 | |||
191 | /* xlat fields */ | ||
192 | lis r4,UART0_PHYS_IO_BASE@h /* RPN depends on SoC */ | ||
193 | #ifndef CONFIG_440EP | ||
194 | ori r4,r4,0x0001 /* ERPN is 1 for second 4GB page */ | ||
195 | #endif | ||
196 | |||
197 | /* attrib fields */ | ||
198 | li r5,0 | ||
199 | ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_I | PPC44x_TLB_G) | ||
200 | |||
201 | li r0,0 /* TLB slot 0 */ | ||
202 | |||
203 | tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */ | ||
204 | tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */ | ||
205 | tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */ | ||
206 | |||
207 | /* Force context change */ | ||
208 | isync | ||
209 | #endif /* CONFIG_SERIAL_TEXT_DEBUG */ | ||
210 | |||
211 | /* Establish the interrupt vector offsets */ | ||
212 | SET_IVOR(0, CriticalInput); | ||
213 | SET_IVOR(1, MachineCheck); | ||
214 | SET_IVOR(2, DataStorage); | ||
215 | SET_IVOR(3, InstructionStorage); | ||
216 | SET_IVOR(4, ExternalInput); | ||
217 | SET_IVOR(5, Alignment); | ||
218 | SET_IVOR(6, Program); | ||
219 | SET_IVOR(7, FloatingPointUnavailable); | ||
220 | SET_IVOR(8, SystemCall); | ||
221 | SET_IVOR(9, AuxillaryProcessorUnavailable); | ||
222 | SET_IVOR(10, Decrementer); | ||
223 | SET_IVOR(11, FixedIntervalTimer); | ||
224 | SET_IVOR(12, WatchdogTimer); | ||
225 | SET_IVOR(13, DataTLBError); | ||
226 | SET_IVOR(14, InstructionTLBError); | ||
227 | SET_IVOR(15, Debug); | ||
228 | |||
229 | /* Establish the interrupt vector base */ | ||
230 | lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ | ||
231 | mtspr SPRN_IVPR,r4 | ||
232 | |||
233 | #ifdef CONFIG_440EP | ||
234 | /* Clear DAPUIB flag in CCR0 (enable APU between CPU and FPU) */ | ||
235 | mfspr r2,SPRN_CCR0 | ||
236 | lis r3,0xffef | ||
237 | ori r3,r3,0xffff | ||
238 | and r2,r2,r3 | ||
239 | mtspr SPRN_CCR0,r2 | ||
240 | isync | ||
241 | #endif | ||
242 | |||
243 | /* | ||
244 | * This is where the main kernel code starts. | ||
245 | */ | ||
246 | |||
247 | /* ptr to current */ | ||
248 | lis r2,init_task@h | ||
249 | ori r2,r2,init_task@l | ||
250 | |||
251 | /* ptr to current thread */ | ||
252 | addi r4,r2,THREAD /* init task's THREAD */ | ||
253 | mtspr SPRN_SPRG3,r4 | ||
254 | |||
255 | /* stack */ | ||
256 | lis r1,init_thread_union@h | ||
257 | ori r1,r1,init_thread_union@l | ||
258 | li r0,0 | ||
259 | stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) | ||
260 | |||
261 | bl early_init | ||
262 | |||
263 | /* | ||
264 | * Decide what sort of machine this is and initialize the MMU. | ||
265 | */ | ||
266 | mr r3,r31 | ||
267 | mr r4,r30 | ||
268 | mr r5,r29 | ||
269 | mr r6,r28 | ||
270 | mr r7,r27 | ||
271 | bl machine_init | ||
272 | bl MMU_init | ||
273 | |||
274 | /* Setup PTE pointers for the Abatron bdiGDB */ | ||
275 | lis r6, swapper_pg_dir@h | ||
276 | ori r6, r6, swapper_pg_dir@l | ||
277 | lis r5, abatron_pteptrs@h | ||
278 | ori r5, r5, abatron_pteptrs@l | ||
279 | lis r4, KERNELBASE@h | ||
280 | ori r4, r4, KERNELBASE@l | ||
281 | stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ | ||
282 | stw r6, 0(r5) | ||
283 | |||
284 | /* Let's move on */ | ||
285 | lis r4,start_kernel@h | ||
286 | ori r4,r4,start_kernel@l | ||
287 | lis r3,MSR_KERNEL@h | ||
288 | ori r3,r3,MSR_KERNEL@l | ||
289 | mtspr SPRN_SRR0,r4 | ||
290 | mtspr SPRN_SRR1,r3 | ||
291 | rfi /* change context and jump to start_kernel */ | ||
292 | |||
293 | /* | ||
294 | * Interrupt vector entry code | ||
295 | * | ||
296 | * The Book E MMUs are always on so we don't need to handle | ||
297 | * interrupts in real mode as with previous PPC processors. In | ||
298 | * this case we handle interrupts in the kernel virtual address | ||
299 | * space. | ||
300 | * | ||
301 | * Interrupt vectors are dynamically placed relative to the | ||
302 | * interrupt prefix as determined by the address of interrupt_base. | ||
303 | * The interrupt vectors offsets are programmed using the labels | ||
304 | * for each interrupt vector entry. | ||
305 | * | ||
306 | * Interrupt vectors must be aligned on a 16 byte boundary. | ||
307 | * We align on a 32 byte cache line boundary for good measure. | ||
308 | */ | ||
309 | |||
310 | interrupt_base: | ||
311 | /* Critical Input Interrupt */ | ||
312 | CRITICAL_EXCEPTION(0x0100, CriticalInput, UnknownException) | ||
313 | |||
314 | /* Machine Check Interrupt */ | ||
315 | #ifdef CONFIG_440A | ||
316 | MCHECK_EXCEPTION(0x0200, MachineCheck, MachineCheckException) | ||
317 | #else | ||
318 | CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException) | ||
319 | #endif | ||
320 | |||
321 | /* Data Storage Interrupt */ | ||
322 | START_EXCEPTION(DataStorage) | ||
323 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
324 | mtspr SPRN_SPRG1, r11 | ||
325 | mtspr SPRN_SPRG4W, r12 | ||
326 | mtspr SPRN_SPRG5W, r13 | ||
327 | mfcr r11 | ||
328 | mtspr SPRN_SPRG7W, r11 | ||
329 | |||
330 | /* | ||
331 | * Check if it was a store fault, if not then bail | ||
332 | * because a user tried to access a kernel or | ||
333 | * read-protected page. Otherwise, get the | ||
334 | * offending address and handle it. | ||
335 | */ | ||
336 | mfspr r10, SPRN_ESR | ||
337 | andis. r10, r10, ESR_ST@h | ||
338 | beq 2f | ||
339 | |||
340 | mfspr r10, SPRN_DEAR /* Get faulting address */ | ||
341 | |||
342 | /* If we are faulting a kernel address, we have to use the | ||
343 | * kernel page tables. | ||
344 | */ | ||
345 | lis r11, TASK_SIZE@h | ||
346 | cmplw r10, r11 | ||
347 | blt+ 3f | ||
348 | lis r11, swapper_pg_dir@h | ||
349 | ori r11, r11, swapper_pg_dir@l | ||
350 | |||
351 | mfspr r12,SPRN_MMUCR | ||
352 | rlwinm r12,r12,0,0,23 /* Clear TID */ | ||
353 | |||
354 | b 4f | ||
355 | |||
356 | /* Get the PGD for the current thread */ | ||
357 | 3: | ||
358 | mfspr r11,SPRN_SPRG3 | ||
359 | lwz r11,PGDIR(r11) | ||
360 | |||
361 | /* Load PID into MMUCR TID */ | ||
362 | mfspr r12,SPRN_MMUCR /* Get MMUCR */ | ||
363 | mfspr r13,SPRN_PID /* Get PID */ | ||
364 | rlwimi r12,r13,0,24,31 /* Set TID */ | ||
365 | |||
366 | 4: | ||
367 | mtspr SPRN_MMUCR,r12 | ||
368 | |||
369 | rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ | ||
370 | lwzx r11, r12, r11 /* Get pgd/pmd entry */ | ||
371 | rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ | ||
372 | beq 2f /* Bail if no table */ | ||
373 | |||
374 | rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ | ||
375 | lwz r11, 4(r12) /* Get pte entry */ | ||
376 | |||
377 | andi. r13, r11, _PAGE_RW /* Is it writeable? */ | ||
378 | beq 2f /* Bail if not */ | ||
379 | |||
380 | /* Update 'changed'. | ||
381 | */ | ||
382 | ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE | ||
383 | stw r11, 4(r12) /* Update Linux page table */ | ||
384 | |||
385 | li r13, PPC44x_TLB_SR@l /* Set SR */ | ||
386 | rlwimi r13, r11, 29, 29, 29 /* SX = _PAGE_HWEXEC */ | ||
387 | rlwimi r13, r11, 0, 30, 30 /* SW = _PAGE_RW */ | ||
388 | rlwimi r13, r11, 29, 28, 28 /* UR = _PAGE_USER */ | ||
389 | rlwimi r12, r11, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */ | ||
390 | rlwimi r12, r11, 29, 30, 30 /* (_PAGE_USER>>3)->r12 */ | ||
391 | and r12, r12, r11 /* HWEXEC/RW & USER */ | ||
392 | rlwimi r13, r12, 0, 26, 26 /* UX = HWEXEC & USER */ | ||
393 | rlwimi r13, r12, 3, 27, 27 /* UW = RW & USER */ | ||
394 | |||
395 | rlwimi r11,r13,0,26,31 /* Insert static perms */ | ||
396 | |||
397 | rlwinm r11,r11,0,20,15 /* Clear U0-U3 */ | ||
398 | |||
399 | /* find the TLB index that caused the fault. It has to be here. */ | ||
400 | tlbsx r10, 0, r10 | ||
401 | |||
402 | tlbwe r11, r10, PPC44x_TLB_ATTRIB /* Write ATTRIB */ | ||
403 | |||
404 | /* Done...restore registers and get out of here. | ||
405 | */ | ||
406 | mfspr r11, SPRN_SPRG7R | ||
407 | mtcr r11 | ||
408 | mfspr r13, SPRN_SPRG5R | ||
409 | mfspr r12, SPRN_SPRG4R | ||
410 | |||
411 | mfspr r11, SPRN_SPRG1 | ||
412 | mfspr r10, SPRN_SPRG0 | ||
413 | rfi /* Force context change */ | ||
414 | |||
415 | 2: | ||
416 | /* | ||
417 | * The bailout. Restore registers to pre-exception conditions | ||
418 | * and call the heavyweights to help us out. | ||
419 | */ | ||
420 | mfspr r11, SPRN_SPRG7R | ||
421 | mtcr r11 | ||
422 | mfspr r13, SPRN_SPRG5R | ||
423 | mfspr r12, SPRN_SPRG4R | ||
424 | |||
425 | mfspr r11, SPRN_SPRG1 | ||
426 | mfspr r10, SPRN_SPRG0 | ||
427 | b data_access | ||
428 | |||
429 | /* Instruction Storage Interrupt */ | ||
430 | INSTRUCTION_STORAGE_EXCEPTION | ||
431 | |||
432 | /* External Input Interrupt */ | ||
433 | EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE) | ||
434 | |||
435 | /* Alignment Interrupt */ | ||
436 | ALIGNMENT_EXCEPTION | ||
437 | |||
438 | /* Program Interrupt */ | ||
439 | PROGRAM_EXCEPTION | ||
440 | |||
441 | /* Floating Point Unavailable Interrupt */ | ||
442 | #ifdef CONFIG_PPC_FPU | ||
443 | FP_UNAVAILABLE_EXCEPTION | ||
444 | #else | ||
445 | EXCEPTION(0x2010, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) | ||
446 | #endif | ||
447 | |||
448 | /* System Call Interrupt */ | ||
449 | START_EXCEPTION(SystemCall) | ||
450 | NORMAL_EXCEPTION_PROLOG | ||
451 | EXC_XFER_EE_LITE(0x0c00, DoSyscall) | ||
452 | |||
453 | /* Auxillary Processor Unavailable Interrupt */ | ||
454 | EXCEPTION(0x2020, AuxillaryProcessorUnavailable, UnknownException, EXC_XFER_EE) | ||
455 | |||
456 | /* Decrementer Interrupt */ | ||
457 | DECREMENTER_EXCEPTION | ||
458 | |||
459 | /* Fixed Internal Timer Interrupt */ | ||
460 | /* TODO: Add FIT support */ | ||
461 | EXCEPTION(0x1010, FixedIntervalTimer, UnknownException, EXC_XFER_EE) | ||
462 | |||
463 | /* Watchdog Timer Interrupt */ | ||
464 | /* TODO: Add watchdog support */ | ||
465 | #ifdef CONFIG_BOOKE_WDT | ||
466 | CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException) | ||
467 | #else | ||
468 | CRITICAL_EXCEPTION(0x1020, WatchdogTimer, UnknownException) | ||
469 | #endif | ||
470 | |||
471 | /* Data TLB Error Interrupt */ | ||
472 | START_EXCEPTION(DataTLBError) | ||
473 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
474 | mtspr SPRN_SPRG1, r11 | ||
475 | mtspr SPRN_SPRG4W, r12 | ||
476 | mtspr SPRN_SPRG5W, r13 | ||
477 | mfcr r11 | ||
478 | mtspr SPRN_SPRG7W, r11 | ||
479 | mfspr r10, SPRN_DEAR /* Get faulting address */ | ||
480 | |||
481 | /* If we are faulting a kernel address, we have to use the | ||
482 | * kernel page tables. | ||
483 | */ | ||
484 | lis r11, TASK_SIZE@h | ||
485 | cmplw r10, r11 | ||
486 | blt+ 3f | ||
487 | lis r11, swapper_pg_dir@h | ||
488 | ori r11, r11, swapper_pg_dir@l | ||
489 | |||
490 | mfspr r12,SPRN_MMUCR | ||
491 | rlwinm r12,r12,0,0,23 /* Clear TID */ | ||
492 | |||
493 | b 4f | ||
494 | |||
495 | /* Get the PGD for the current thread */ | ||
496 | 3: | ||
497 | mfspr r11,SPRN_SPRG3 | ||
498 | lwz r11,PGDIR(r11) | ||
499 | |||
500 | /* Load PID into MMUCR TID */ | ||
501 | mfspr r12,SPRN_MMUCR | ||
502 | mfspr r13,SPRN_PID /* Get PID */ | ||
503 | rlwimi r12,r13,0,24,31 /* Set TID */ | ||
504 | |||
505 | 4: | ||
506 | mtspr SPRN_MMUCR,r12 | ||
507 | |||
508 | rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ | ||
509 | lwzx r11, r12, r11 /* Get pgd/pmd entry */ | ||
510 | rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ | ||
511 | beq 2f /* Bail if no table */ | ||
512 | |||
513 | rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ | ||
514 | lwz r11, 4(r12) /* Get pte entry */ | ||
515 | andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ | ||
516 | beq 2f /* Bail if not present */ | ||
517 | |||
518 | ori r11, r11, _PAGE_ACCESSED | ||
519 | stw r11, 4(r12) | ||
520 | |||
521 | /* Jump to common tlb load */ | ||
522 | b finish_tlb_load | ||
523 | |||
524 | 2: | ||
525 | /* The bailout. Restore registers to pre-exception conditions | ||
526 | * and call the heavyweights to help us out. | ||
527 | */ | ||
528 | mfspr r11, SPRN_SPRG7R | ||
529 | mtcr r11 | ||
530 | mfspr r13, SPRN_SPRG5R | ||
531 | mfspr r12, SPRN_SPRG4R | ||
532 | mfspr r11, SPRN_SPRG1 | ||
533 | mfspr r10, SPRN_SPRG0 | ||
534 | b data_access | ||
535 | |||
536 | /* Instruction TLB Error Interrupt */ | ||
537 | /* | ||
538 | * Nearly the same as above, except we get our | ||
539 | * information from different registers and bailout | ||
540 | * to a different point. | ||
541 | */ | ||
542 | START_EXCEPTION(InstructionTLBError) | ||
543 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
544 | mtspr SPRN_SPRG1, r11 | ||
545 | mtspr SPRN_SPRG4W, r12 | ||
546 | mtspr SPRN_SPRG5W, r13 | ||
547 | mfcr r11 | ||
548 | mtspr SPRN_SPRG7W, r11 | ||
549 | mfspr r10, SPRN_SRR0 /* Get faulting address */ | ||
550 | |||
551 | /* If we are faulting a kernel address, we have to use the | ||
552 | * kernel page tables. | ||
553 | */ | ||
554 | lis r11, TASK_SIZE@h | ||
555 | cmplw r10, r11 | ||
556 | blt+ 3f | ||
557 | lis r11, swapper_pg_dir@h | ||
558 | ori r11, r11, swapper_pg_dir@l | ||
559 | |||
560 | mfspr r12,SPRN_MMUCR | ||
561 | rlwinm r12,r12,0,0,23 /* Clear TID */ | ||
562 | |||
563 | b 4f | ||
564 | |||
565 | /* Get the PGD for the current thread */ | ||
566 | 3: | ||
567 | mfspr r11,SPRN_SPRG3 | ||
568 | lwz r11,PGDIR(r11) | ||
569 | |||
570 | /* Load PID into MMUCR TID */ | ||
571 | mfspr r12,SPRN_MMUCR | ||
572 | mfspr r13,SPRN_PID /* Get PID */ | ||
573 | rlwimi r12,r13,0,24,31 /* Set TID */ | ||
574 | |||
575 | 4: | ||
576 | mtspr SPRN_MMUCR,r12 | ||
577 | |||
578 | rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */ | ||
579 | lwzx r11, r12, r11 /* Get pgd/pmd entry */ | ||
580 | rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ | ||
581 | beq 2f /* Bail if no table */ | ||
582 | |||
583 | rlwimi r12, r10, 23, 20, 28 /* Compute pte address */ | ||
584 | lwz r11, 4(r12) /* Get pte entry */ | ||
585 | andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ | ||
586 | beq 2f /* Bail if not present */ | ||
587 | |||
588 | ori r11, r11, _PAGE_ACCESSED | ||
589 | stw r11, 4(r12) | ||
590 | |||
591 | /* Jump to common TLB load point */ | ||
592 | b finish_tlb_load | ||
593 | |||
594 | 2: | ||
595 | /* The bailout. Restore registers to pre-exception conditions | ||
596 | * and call the heavyweights to help us out. | ||
597 | */ | ||
598 | mfspr r11, SPRN_SPRG7R | ||
599 | mtcr r11 | ||
600 | mfspr r13, SPRN_SPRG5R | ||
601 | mfspr r12, SPRN_SPRG4R | ||
602 | mfspr r11, SPRN_SPRG1 | ||
603 | mfspr r10, SPRN_SPRG0 | ||
604 | b InstructionStorage | ||
605 | |||
606 | /* Debug Interrupt */ | ||
607 | DEBUG_EXCEPTION | ||
608 | |||
609 | /* | ||
610 | * Local functions | ||
611 | */ | ||
612 | /* | ||
613 | * Data TLB exceptions will bail out to this point | ||
614 | * if they can't resolve the lightweight TLB fault. | ||
615 | */ | ||
616 | data_access: | ||
617 | NORMAL_EXCEPTION_PROLOG | ||
618 | mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ | ||
619 | stw r5,_ESR(r11) | ||
620 | mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ | ||
621 | EXC_XFER_EE_LITE(0x0300, handle_page_fault) | ||
622 | |||
623 | /* | ||
624 | |||
625 | * Both the instruction and data TLB miss get to this | ||
626 | * point to load the TLB. | ||
627 | * r10 - EA of fault | ||
628 | * r11 - available to use | ||
629 | * r12 - Pointer to the 64-bit PTE | ||
630 | * r13 - available to use | ||
631 | * MMUCR - loaded with proper value when we get here | ||
632 | * Upon exit, we reload everything and RFI. | ||
633 | */ | ||
634 | finish_tlb_load: | ||
635 | /* | ||
636 | * We set execute, because we don't have the granularity to | ||
637 | * properly set this at the page level (Linux problem). | ||
638 | * If shared is set, we cause a zero PID->TID load. | ||
639 | * Many of these bits are software only. Bits we don't set | ||
640 | * here we (properly should) assume have the appropriate value. | ||
641 | */ | ||
642 | |||
643 | /* Load the next available TLB index */ | ||
644 | lis r13, tlb_44x_index@ha | ||
645 | lwz r13, tlb_44x_index@l(r13) | ||
646 | /* Load the TLB high watermark */ | ||
647 | lis r11, tlb_44x_hwater@ha | ||
648 | lwz r11, tlb_44x_hwater@l(r11) | ||
649 | |||
650 | /* Increment, rollover, and store TLB index */ | ||
651 | addi r13, r13, 1 | ||
652 | cmpw 0, r13, r11 /* reserve entries */ | ||
653 | ble 7f | ||
654 | li r13, 0 | ||
655 | 7: | ||
656 | /* Store the next available TLB index */ | ||
657 | lis r11, tlb_44x_index@ha | ||
658 | stw r13, tlb_44x_index@l(r11) | ||
659 | |||
660 | lwz r11, 0(r12) /* Get MS word of PTE */ | ||
661 | lwz r12, 4(r12) /* Get LS word of PTE */ | ||
662 | rlwimi r11, r12, 0, 0 , 19 /* Insert RPN */ | ||
663 | tlbwe r11, r13, PPC44x_TLB_XLAT /* Write XLAT */ | ||
664 | |||
665 | /* | ||
666 | * Create PAGEID. This is the faulting address, | ||
667 | * page size, and valid flag. | ||
668 | */ | ||
669 | li r11, PPC44x_TLB_VALID | PPC44x_TLB_4K | ||
670 | rlwimi r10, r11, 0, 20, 31 /* Insert valid and page size */ | ||
671 | tlbwe r10, r13, PPC44x_TLB_PAGEID /* Write PAGEID */ | ||
672 | |||
673 | li r10, PPC44x_TLB_SR@l /* Set SR */ | ||
674 | rlwimi r10, r12, 0, 30, 30 /* Set SW = _PAGE_RW */ | ||
675 | rlwimi r10, r12, 29, 29, 29 /* SX = _PAGE_HWEXEC */ | ||
676 | rlwimi r10, r12, 29, 28, 28 /* UR = _PAGE_USER */ | ||
677 | rlwimi r11, r12, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */ | ||
678 | and r11, r12, r11 /* HWEXEC & USER */ | ||
679 | rlwimi r10, r11, 0, 26, 26 /* UX = HWEXEC & USER */ | ||
680 | |||
681 | rlwimi r12, r10, 0, 26, 31 /* Insert static perms */ | ||
682 | rlwinm r12, r12, 0, 20, 15 /* Clear U0-U3 */ | ||
683 | tlbwe r12, r13, PPC44x_TLB_ATTRIB /* Write ATTRIB */ | ||
684 | |||
685 | /* Done...restore registers and get out of here. | ||
686 | */ | ||
687 | mfspr r11, SPRN_SPRG7R | ||
688 | mtcr r11 | ||
689 | mfspr r13, SPRN_SPRG5R | ||
690 | mfspr r12, SPRN_SPRG4R | ||
691 | mfspr r11, SPRN_SPRG1 | ||
692 | mfspr r10, SPRN_SPRG0 | ||
693 | rfi /* Force context change */ | ||
694 | |||
695 | /* | ||
696 | * Global functions | ||
697 | */ | ||
698 | |||
699 | /* | ||
700 | * extern void giveup_altivec(struct task_struct *prev) | ||
701 | * | ||
702 | * The 44x core does not have an AltiVec unit. | ||
703 | */ | ||
704 | _GLOBAL(giveup_altivec) | ||
705 | blr | ||
706 | |||
707 | /* | ||
708 | * extern void giveup_fpu(struct task_struct *prev) | ||
709 | * | ||
710 | * The 44x core does not have an FPU. | ||
711 | */ | ||
712 | #ifndef CONFIG_PPC_FPU | ||
713 | _GLOBAL(giveup_fpu) | ||
714 | blr | ||
715 | #endif | ||
716 | |||
717 | /* | ||
718 | * extern void abort(void) | ||
719 | * | ||
720 | * At present, this routine just applies a system reset. | ||
721 | */ | ||
722 | _GLOBAL(abort) | ||
723 | mfspr r13,SPRN_DBCR0 | ||
724 | oris r13,r13,DBCR0_RST_SYSTEM@h | ||
725 | mtspr SPRN_DBCR0,r13 | ||
726 | |||
727 | _GLOBAL(set_context) | ||
728 | |||
729 | #ifdef CONFIG_BDI_SWITCH | ||
730 | /* Context switch the PTE pointer for the Abatron BDI2000. | ||
731 | * The PGDIR is the second parameter. | ||
732 | */ | ||
733 | lis r5, abatron_pteptrs@h | ||
734 | ori r5, r5, abatron_pteptrs@l | ||
735 | stw r4, 0x4(r5) | ||
736 | #endif | ||
737 | mtspr SPRN_PID,r3 | ||
738 | isync /* Force context change */ | ||
739 | blr | ||
740 | |||
741 | /* | ||
742 | * We put a few things here that have to be page-aligned. This stuff | ||
743 | * goes at the beginning of the data segment, which is page-aligned. | ||
744 | */ | ||
745 | .data | ||
746 | _GLOBAL(sdata) | ||
747 | _GLOBAL(empty_zero_page) | ||
748 | .space 4096 | ||
749 | |||
750 | /* | ||
751 | * To support >32-bit physical addresses, we use an 8KB pgdir. | ||
752 | */ | ||
753 | _GLOBAL(swapper_pg_dir) | ||
754 | .space 8192 | ||
755 | |||
756 | /* Reserved 4k for the critical exception stack & 4k for the machine | ||
757 | * check stack per CPU for kernel mode exceptions */ | ||
758 | .section .bss | ||
759 | .align 12 | ||
760 | exception_stack_bottom: | ||
761 | .space BOOKE_EXCEPTION_STACK_SIZE | ||
762 | _GLOBAL(exception_stack_top) | ||
763 | |||
764 | /* | ||
765 | * This space gets a copy of optional info passed to us by the bootstrap | ||
766 | * which is used to pass parameters into the kernel like root=/dev/sda1, etc. | ||
767 | */ | ||
768 | _GLOBAL(cmd_line) | ||
769 | .space 512 | ||
770 | |||
771 | /* | ||
772 | * Room for two PTE pointers, usually the kernel and current user pointers | ||
773 | * to their respective root page table. | ||
774 | */ | ||
775 | abatron_pteptrs: | ||
776 | .space 8 | ||
777 | |||
778 | |||
diff --git a/arch/powerpc/kernel/head_4xx.S b/arch/powerpc/kernel/head_4xx.S new file mode 100644 index 000000000000..8562b807b37c --- /dev/null +++ b/arch/powerpc/kernel/head_4xx.S | |||
@@ -0,0 +1,1016 @@ | |||
1 | /* | ||
2 | * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> | ||
3 | * Initial PowerPC version. | ||
4 | * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> | ||
5 | * Rewritten for PReP | ||
6 | * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> | ||
7 | * Low-level exception handers, MMU support, and rewrite. | ||
8 | * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> | ||
9 | * PowerPC 8xx modifications. | ||
10 | * Copyright (c) 1998-1999 TiVo, Inc. | ||
11 | * PowerPC 403GCX modifications. | ||
12 | * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> | ||
13 | * PowerPC 403GCX/405GP modifications. | ||
14 | * Copyright 2000 MontaVista Software Inc. | ||
15 | * PPC405 modifications | ||
16 | * PowerPC 403GCX/405GP modifications. | ||
17 | * Author: MontaVista Software, Inc. | ||
18 | * frank_rowand@mvista.com or source@mvista.com | ||
19 | * debbie_chu@mvista.com | ||
20 | * | ||
21 | * | ||
22 | * Module name: head_4xx.S | ||
23 | * | ||
24 | * Description: | ||
25 | * Kernel execution entry point code. | ||
26 | * | ||
27 | * This program is free software; you can redistribute it and/or | ||
28 | * modify it under the terms of the GNU General Public License | ||
29 | * as published by the Free Software Foundation; either version | ||
30 | * 2 of the License, or (at your option) any later version. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/config.h> | ||
35 | #include <asm/processor.h> | ||
36 | #include <asm/page.h> | ||
37 | #include <asm/mmu.h> | ||
38 | #include <asm/pgtable.h> | ||
39 | #include <asm/ibm4xx.h> | ||
40 | #include <asm/cputable.h> | ||
41 | #include <asm/thread_info.h> | ||
42 | #include <asm/ppc_asm.h> | ||
43 | #include <asm/asm-offsets.h> | ||
44 | |||
45 | /* As with the other PowerPC ports, it is expected that when code | ||
46 | * execution begins here, the following registers contain valid, yet | ||
47 | * optional, information: | ||
48 | * | ||
49 | * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) | ||
50 | * r4 - Starting address of the init RAM disk | ||
51 | * r5 - Ending address of the init RAM disk | ||
52 | * r6 - Start of kernel command line string (e.g. "mem=96m") | ||
53 | * r7 - End of kernel command line string | ||
54 | * | ||
55 | * This is all going to change RSN when we add bi_recs....... -- Dan | ||
56 | */ | ||
57 | .text | ||
58 | _GLOBAL(_stext) | ||
59 | _GLOBAL(_start) | ||
60 | |||
61 | /* Save parameters we are passed. | ||
62 | */ | ||
63 | mr r31,r3 | ||
64 | mr r30,r4 | ||
65 | mr r29,r5 | ||
66 | mr r28,r6 | ||
67 | mr r27,r7 | ||
68 | |||
69 | /* We have to turn on the MMU right away so we get cache modes | ||
70 | * set correctly. | ||
71 | */ | ||
72 | bl initial_mmu | ||
73 | |||
74 | /* We now have the lower 16 Meg mapped into TLB entries, and the caches | ||
75 | * ready to work. | ||
76 | */ | ||
77 | turn_on_mmu: | ||
78 | lis r0,MSR_KERNEL@h | ||
79 | ori r0,r0,MSR_KERNEL@l | ||
80 | mtspr SPRN_SRR1,r0 | ||
81 | lis r0,start_here@h | ||
82 | ori r0,r0,start_here@l | ||
83 | mtspr SPRN_SRR0,r0 | ||
84 | SYNC | ||
85 | rfi /* enables MMU */ | ||
86 | b . /* prevent prefetch past rfi */ | ||
87 | |||
88 | /* | ||
89 | * This area is used for temporarily saving registers during the | ||
90 | * critical exception prolog. | ||
91 | */ | ||
92 | . = 0xc0 | ||
93 | crit_save: | ||
94 | _GLOBAL(crit_r10) | ||
95 | .space 4 | ||
96 | _GLOBAL(crit_r11) | ||
97 | .space 4 | ||
98 | |||
99 | /* | ||
100 | * Exception vector entry code. This code runs with address translation | ||
101 | * turned off (i.e. using physical addresses). We assume SPRG3 has the | ||
102 | * physical address of the current task thread_struct. | ||
103 | * Note that we have to have decremented r1 before we write to any fields | ||
104 | * of the exception frame, since a critical interrupt could occur at any | ||
105 | * time, and it will write to the area immediately below the current r1. | ||
106 | */ | ||
107 | #define NORMAL_EXCEPTION_PROLOG \ | ||
108 | mtspr SPRN_SPRG0,r10; /* save two registers to work with */\ | ||
109 | mtspr SPRN_SPRG1,r11; \ | ||
110 | mtspr SPRN_SPRG2,r1; \ | ||
111 | mfcr r10; /* save CR in r10 for now */\ | ||
112 | mfspr r11,SPRN_SRR1; /* check whether user or kernel */\ | ||
113 | andi. r11,r11,MSR_PR; \ | ||
114 | beq 1f; \ | ||
115 | mfspr r1,SPRN_SPRG3; /* if from user, start at top of */\ | ||
116 | lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\ | ||
117 | addi r1,r1,THREAD_SIZE; \ | ||
118 | 1: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\ | ||
119 | tophys(r11,r1); \ | ||
120 | stw r10,_CCR(r11); /* save various registers */\ | ||
121 | stw r12,GPR12(r11); \ | ||
122 | stw r9,GPR9(r11); \ | ||
123 | mfspr r10,SPRN_SPRG0; \ | ||
124 | stw r10,GPR10(r11); \ | ||
125 | mfspr r12,SPRN_SPRG1; \ | ||
126 | stw r12,GPR11(r11); \ | ||
127 | mflr r10; \ | ||
128 | stw r10,_LINK(r11); \ | ||
129 | mfspr r10,SPRN_SPRG2; \ | ||
130 | mfspr r12,SPRN_SRR0; \ | ||
131 | stw r10,GPR1(r11); \ | ||
132 | mfspr r9,SPRN_SRR1; \ | ||
133 | stw r10,0(r11); \ | ||
134 | rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ | ||
135 | stw r0,GPR0(r11); \ | ||
136 | SAVE_4GPRS(3, r11); \ | ||
137 | SAVE_2GPRS(7, r11) | ||
138 | |||
139 | /* | ||
140 | * Exception prolog for critical exceptions. This is a little different | ||
141 | * from the normal exception prolog above since a critical exception | ||
142 | * can potentially occur at any point during normal exception processing. | ||
143 | * Thus we cannot use the same SPRG registers as the normal prolog above. | ||
144 | * Instead we use a couple of words of memory at low physical addresses. | ||
145 | * This is OK since we don't support SMP on these processors. | ||
146 | */ | ||
147 | #define CRITICAL_EXCEPTION_PROLOG \ | ||
148 | stw r10,crit_r10@l(0); /* save two registers to work with */\ | ||
149 | stw r11,crit_r11@l(0); \ | ||
150 | mfcr r10; /* save CR in r10 for now */\ | ||
151 | mfspr r11,SPRN_SRR3; /* check whether user or kernel */\ | ||
152 | andi. r11,r11,MSR_PR; \ | ||
153 | lis r11,critical_stack_top@h; \ | ||
154 | ori r11,r11,critical_stack_top@l; \ | ||
155 | beq 1f; \ | ||
156 | /* COMING FROM USER MODE */ \ | ||
157 | mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\ | ||
158 | lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ | ||
159 | addi r11,r11,THREAD_SIZE; \ | ||
160 | 1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\ | ||
161 | tophys(r11,r11); \ | ||
162 | stw r10,_CCR(r11); /* save various registers */\ | ||
163 | stw r12,GPR12(r11); \ | ||
164 | stw r9,GPR9(r11); \ | ||
165 | mflr r10; \ | ||
166 | stw r10,_LINK(r11); \ | ||
167 | mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\ | ||
168 | stw r12,_DEAR(r11); /* since they may have had stuff */\ | ||
169 | mfspr r9,SPRN_ESR; /* in them at the point where the */\ | ||
170 | stw r9,_ESR(r11); /* exception was taken */\ | ||
171 | mfspr r12,SPRN_SRR2; \ | ||
172 | stw r1,GPR1(r11); \ | ||
173 | mfspr r9,SPRN_SRR3; \ | ||
174 | stw r1,0(r11); \ | ||
175 | tovirt(r1,r11); \ | ||
176 | rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ | ||
177 | stw r0,GPR0(r11); \ | ||
178 | SAVE_4GPRS(3, r11); \ | ||
179 | SAVE_2GPRS(7, r11) | ||
180 | |||
181 | /* | ||
182 | * State at this point: | ||
183 | * r9 saved in stack frame, now saved SRR3 & ~MSR_WE | ||
184 | * r10 saved in crit_r10 and in stack frame, trashed | ||
185 | * r11 saved in crit_r11 and in stack frame, | ||
186 | * now phys stack/exception frame pointer | ||
187 | * r12 saved in stack frame, now saved SRR2 | ||
188 | * CR saved in stack frame, CR0.EQ = !SRR3.PR | ||
189 | * LR, DEAR, ESR in stack frame | ||
190 | * r1 saved in stack frame, now virt stack/excframe pointer | ||
191 | * r0, r3-r8 saved in stack frame | ||
192 | */ | ||
193 | |||
194 | /* | ||
195 | * Exception vectors. | ||
196 | */ | ||
197 | #define START_EXCEPTION(n, label) \ | ||
198 | . = n; \ | ||
199 | label: | ||
200 | |||
201 | #define EXCEPTION(n, label, hdlr, xfer) \ | ||
202 | START_EXCEPTION(n, label); \ | ||
203 | NORMAL_EXCEPTION_PROLOG; \ | ||
204 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
205 | xfer(n, hdlr) | ||
206 | |||
207 | #define CRITICAL_EXCEPTION(n, label, hdlr) \ | ||
208 | START_EXCEPTION(n, label); \ | ||
209 | CRITICAL_EXCEPTION_PROLOG; \ | ||
210 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
211 | EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ | ||
212 | NOCOPY, crit_transfer_to_handler, \ | ||
213 | ret_from_crit_exc) | ||
214 | |||
215 | #define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret) \ | ||
216 | li r10,trap; \ | ||
217 | stw r10,TRAP(r11); \ | ||
218 | lis r10,msr@h; \ | ||
219 | ori r10,r10,msr@l; \ | ||
220 | copyee(r10, r9); \ | ||
221 | bl tfer; \ | ||
222 | .long hdlr; \ | ||
223 | .long ret | ||
224 | |||
225 | #define COPY_EE(d, s) rlwimi d,s,0,16,16 | ||
226 | #define NOCOPY(d, s) | ||
227 | |||
228 | #define EXC_XFER_STD(n, hdlr) \ | ||
229 | EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \ | ||
230 | ret_from_except_full) | ||
231 | |||
232 | #define EXC_XFER_LITE(n, hdlr) \ | ||
233 | EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \ | ||
234 | ret_from_except) | ||
235 | |||
236 | #define EXC_XFER_EE(n, hdlr) \ | ||
237 | EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \ | ||
238 | ret_from_except_full) | ||
239 | |||
240 | #define EXC_XFER_EE_LITE(n, hdlr) \ | ||
241 | EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \ | ||
242 | ret_from_except) | ||
243 | |||
244 | |||
245 | /* | ||
246 | * 0x0100 - Critical Interrupt Exception | ||
247 | */ | ||
248 | CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, UnknownException) | ||
249 | |||
250 | /* | ||
251 | * 0x0200 - Machine Check Exception | ||
252 | */ | ||
253 | CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException) | ||
254 | |||
255 | /* | ||
256 | * 0x0300 - Data Storage Exception | ||
257 | * This happens for just a few reasons. U0 set (but we don't do that), | ||
258 | * or zone protection fault (user violation, write to protected page). | ||
259 | * If this is just an update of modified status, we do that quickly | ||
260 | * and exit. Otherwise, we call heavywight functions to do the work. | ||
261 | */ | ||
262 | START_EXCEPTION(0x0300, DataStorage) | ||
263 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
264 | mtspr SPRN_SPRG1, r11 | ||
265 | #ifdef CONFIG_403GCX | ||
266 | stw r12, 0(r0) | ||
267 | stw r9, 4(r0) | ||
268 | mfcr r11 | ||
269 | mfspr r12, SPRN_PID | ||
270 | stw r11, 8(r0) | ||
271 | stw r12, 12(r0) | ||
272 | #else | ||
273 | mtspr SPRN_SPRG4, r12 | ||
274 | mtspr SPRN_SPRG5, r9 | ||
275 | mfcr r11 | ||
276 | mfspr r12, SPRN_PID | ||
277 | mtspr SPRN_SPRG7, r11 | ||
278 | mtspr SPRN_SPRG6, r12 | ||
279 | #endif | ||
280 | |||
281 | /* First, check if it was a zone fault (which means a user | ||
282 | * tried to access a kernel or read-protected page - always | ||
283 | * a SEGV). All other faults here must be stores, so no | ||
284 | * need to check ESR_DST as well. */ | ||
285 | mfspr r10, SPRN_ESR | ||
286 | andis. r10, r10, ESR_DIZ@h | ||
287 | bne 2f | ||
288 | |||
289 | mfspr r10, SPRN_DEAR /* Get faulting address */ | ||
290 | |||
291 | /* If we are faulting a kernel address, we have to use the | ||
292 | * kernel page tables. | ||
293 | */ | ||
294 | lis r11, TASK_SIZE@h | ||
295 | cmplw r10, r11 | ||
296 | blt+ 3f | ||
297 | lis r11, swapper_pg_dir@h | ||
298 | ori r11, r11, swapper_pg_dir@l | ||
299 | li r9, 0 | ||
300 | mtspr SPRN_PID, r9 /* TLB will have 0 TID */ | ||
301 | b 4f | ||
302 | |||
303 | /* Get the PGD for the current thread. | ||
304 | */ | ||
305 | 3: | ||
306 | mfspr r11,SPRN_SPRG3 | ||
307 | lwz r11,PGDIR(r11) | ||
308 | 4: | ||
309 | tophys(r11, r11) | ||
310 | rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ | ||
311 | lwz r11, 0(r11) /* Get L1 entry */ | ||
312 | rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */ | ||
313 | beq 2f /* Bail if no table */ | ||
314 | |||
315 | rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ | ||
316 | lwz r11, 0(r12) /* Get Linux PTE */ | ||
317 | |||
318 | andi. r9, r11, _PAGE_RW /* Is it writeable? */ | ||
319 | beq 2f /* Bail if not */ | ||
320 | |||
321 | /* Update 'changed'. | ||
322 | */ | ||
323 | ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE | ||
324 | stw r11, 0(r12) /* Update Linux page table */ | ||
325 | |||
326 | /* Most of the Linux PTE is ready to load into the TLB LO. | ||
327 | * We set ZSEL, where only the LS-bit determines user access. | ||
328 | * We set execute, because we don't have the granularity to | ||
329 | * properly set this at the page level (Linux problem). | ||
330 | * If shared is set, we cause a zero PID->TID load. | ||
331 | * Many of these bits are software only. Bits we don't set | ||
332 | * here we (properly should) assume have the appropriate value. | ||
333 | */ | ||
334 | li r12, 0x0ce2 | ||
335 | andc r11, r11, r12 /* Make sure 20, 21 are zero */ | ||
336 | |||
337 | /* find the TLB index that caused the fault. It has to be here. | ||
338 | */ | ||
339 | tlbsx r9, 0, r10 | ||
340 | |||
341 | tlbwe r11, r9, TLB_DATA /* Load TLB LO */ | ||
342 | |||
343 | /* Done...restore registers and get out of here. | ||
344 | */ | ||
345 | #ifdef CONFIG_403GCX | ||
346 | lwz r12, 12(r0) | ||
347 | lwz r11, 8(r0) | ||
348 | mtspr SPRN_PID, r12 | ||
349 | mtcr r11 | ||
350 | lwz r9, 4(r0) | ||
351 | lwz r12, 0(r0) | ||
352 | #else | ||
353 | mfspr r12, SPRN_SPRG6 | ||
354 | mfspr r11, SPRN_SPRG7 | ||
355 | mtspr SPRN_PID, r12 | ||
356 | mtcr r11 | ||
357 | mfspr r9, SPRN_SPRG5 | ||
358 | mfspr r12, SPRN_SPRG4 | ||
359 | #endif | ||
360 | mfspr r11, SPRN_SPRG1 | ||
361 | mfspr r10, SPRN_SPRG0 | ||
362 | PPC405_ERR77_SYNC | ||
363 | rfi /* Should sync shadow TLBs */ | ||
364 | b . /* prevent prefetch past rfi */ | ||
365 | |||
366 | 2: | ||
367 | /* The bailout. Restore registers to pre-exception conditions | ||
368 | * and call the heavyweights to help us out. | ||
369 | */ | ||
370 | #ifdef CONFIG_403GCX | ||
371 | lwz r12, 12(r0) | ||
372 | lwz r11, 8(r0) | ||
373 | mtspr SPRN_PID, r12 | ||
374 | mtcr r11 | ||
375 | lwz r9, 4(r0) | ||
376 | lwz r12, 0(r0) | ||
377 | #else | ||
378 | mfspr r12, SPRN_SPRG6 | ||
379 | mfspr r11, SPRN_SPRG7 | ||
380 | mtspr SPRN_PID, r12 | ||
381 | mtcr r11 | ||
382 | mfspr r9, SPRN_SPRG5 | ||
383 | mfspr r12, SPRN_SPRG4 | ||
384 | #endif | ||
385 | mfspr r11, SPRN_SPRG1 | ||
386 | mfspr r10, SPRN_SPRG0 | ||
387 | b DataAccess | ||
388 | |||
389 | /* | ||
390 | * 0x0400 - Instruction Storage Exception | ||
391 | * This is caused by a fetch from non-execute or guarded pages. | ||
392 | */ | ||
393 | START_EXCEPTION(0x0400, InstructionAccess) | ||
394 | NORMAL_EXCEPTION_PROLOG | ||
395 | mr r4,r12 /* Pass SRR0 as arg2 */ | ||
396 | li r5,0 /* Pass zero as arg3 */ | ||
397 | EXC_XFER_EE_LITE(0x400, handle_page_fault) | ||
398 | |||
399 | /* 0x0500 - External Interrupt Exception */ | ||
400 | EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) | ||
401 | |||
402 | /* 0x0600 - Alignment Exception */ | ||
403 | START_EXCEPTION(0x0600, Alignment) | ||
404 | NORMAL_EXCEPTION_PROLOG | ||
405 | mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */ | ||
406 | stw r4,_DEAR(r11) | ||
407 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
408 | EXC_XFER_EE(0x600, AlignmentException) | ||
409 | |||
410 | /* 0x0700 - Program Exception */ | ||
411 | START_EXCEPTION(0x0700, ProgramCheck) | ||
412 | NORMAL_EXCEPTION_PROLOG | ||
413 | mfspr r4,SPRN_ESR /* Grab the ESR and save it */ | ||
414 | stw r4,_ESR(r11) | ||
415 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
416 | EXC_XFER_STD(0x700, ProgramCheckException) | ||
417 | |||
418 | EXCEPTION(0x0800, Trap_08, UnknownException, EXC_XFER_EE) | ||
419 | EXCEPTION(0x0900, Trap_09, UnknownException, EXC_XFER_EE) | ||
420 | EXCEPTION(0x0A00, Trap_0A, UnknownException, EXC_XFER_EE) | ||
421 | EXCEPTION(0x0B00, Trap_0B, UnknownException, EXC_XFER_EE) | ||
422 | |||
423 | /* 0x0C00 - System Call Exception */ | ||
424 | START_EXCEPTION(0x0C00, SystemCall) | ||
425 | NORMAL_EXCEPTION_PROLOG | ||
426 | EXC_XFER_EE_LITE(0xc00, DoSyscall) | ||
427 | |||
428 | EXCEPTION(0x0D00, Trap_0D, UnknownException, EXC_XFER_EE) | ||
429 | EXCEPTION(0x0E00, Trap_0E, UnknownException, EXC_XFER_EE) | ||
430 | EXCEPTION(0x0F00, Trap_0F, UnknownException, EXC_XFER_EE) | ||
431 | |||
432 | /* 0x1000 - Programmable Interval Timer (PIT) Exception */ | ||
433 | START_EXCEPTION(0x1000, Decrementer) | ||
434 | NORMAL_EXCEPTION_PROLOG | ||
435 | lis r0,TSR_PIS@h | ||
436 | mtspr SPRN_TSR,r0 /* Clear the PIT exception */ | ||
437 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
438 | EXC_XFER_LITE(0x1000, timer_interrupt) | ||
439 | |||
440 | #if 0 | ||
441 | /* NOTE: | ||
442 | * FIT and WDT handlers are not implemented yet. | ||
443 | */ | ||
444 | |||
445 | /* 0x1010 - Fixed Interval Timer (FIT) Exception | ||
446 | */ | ||
447 | STND_EXCEPTION(0x1010, FITException, UnknownException) | ||
448 | |||
449 | /* 0x1020 - Watchdog Timer (WDT) Exception | ||
450 | */ | ||
451 | #ifdef CONFIG_BOOKE_WDT | ||
452 | CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException) | ||
453 | #else | ||
454 | CRITICAL_EXCEPTION(0x1020, WDTException, UnknownException) | ||
455 | #endif | ||
456 | #endif | ||
457 | |||
458 | /* 0x1100 - Data TLB Miss Exception | ||
459 | * As the name implies, translation is not in the MMU, so search the | ||
460 | * page tables and fix it. The only purpose of this function is to | ||
461 | * load TLB entries from the page table if they exist. | ||
462 | */ | ||
463 | START_EXCEPTION(0x1100, DTLBMiss) | ||
464 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
465 | mtspr SPRN_SPRG1, r11 | ||
466 | #ifdef CONFIG_403GCX | ||
467 | stw r12, 0(r0) | ||
468 | stw r9, 4(r0) | ||
469 | mfcr r11 | ||
470 | mfspr r12, SPRN_PID | ||
471 | stw r11, 8(r0) | ||
472 | stw r12, 12(r0) | ||
473 | #else | ||
474 | mtspr SPRN_SPRG4, r12 | ||
475 | mtspr SPRN_SPRG5, r9 | ||
476 | mfcr r11 | ||
477 | mfspr r12, SPRN_PID | ||
478 | mtspr SPRN_SPRG7, r11 | ||
479 | mtspr SPRN_SPRG6, r12 | ||
480 | #endif | ||
481 | mfspr r10, SPRN_DEAR /* Get faulting address */ | ||
482 | |||
483 | /* If we are faulting a kernel address, we have to use the | ||
484 | * kernel page tables. | ||
485 | */ | ||
486 | lis r11, TASK_SIZE@h | ||
487 | cmplw r10, r11 | ||
488 | blt+ 3f | ||
489 | lis r11, swapper_pg_dir@h | ||
490 | ori r11, r11, swapper_pg_dir@l | ||
491 | li r9, 0 | ||
492 | mtspr SPRN_PID, r9 /* TLB will have 0 TID */ | ||
493 | b 4f | ||
494 | |||
495 | /* Get the PGD for the current thread. | ||
496 | */ | ||
497 | 3: | ||
498 | mfspr r11,SPRN_SPRG3 | ||
499 | lwz r11,PGDIR(r11) | ||
500 | 4: | ||
501 | tophys(r11, r11) | ||
502 | rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ | ||
503 | lwz r12, 0(r11) /* Get L1 entry */ | ||
504 | andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */ | ||
505 | beq 2f /* Bail if no table */ | ||
506 | |||
507 | rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ | ||
508 | lwz r11, 0(r12) /* Get Linux PTE */ | ||
509 | andi. r9, r11, _PAGE_PRESENT | ||
510 | beq 5f | ||
511 | |||
512 | ori r11, r11, _PAGE_ACCESSED | ||
513 | stw r11, 0(r12) | ||
514 | |||
515 | /* Create TLB tag. This is the faulting address plus a static | ||
516 | * set of bits. These are size, valid, E, U0. | ||
517 | */ | ||
518 | li r12, 0x00c0 | ||
519 | rlwimi r10, r12, 0, 20, 31 | ||
520 | |||
521 | b finish_tlb_load | ||
522 | |||
523 | 2: /* Check for possible large-page pmd entry */ | ||
524 | rlwinm. r9, r12, 2, 22, 24 | ||
525 | beq 5f | ||
526 | |||
527 | /* Create TLB tag. This is the faulting address, plus a static | ||
528 | * set of bits (valid, E, U0) plus the size from the PMD. | ||
529 | */ | ||
530 | ori r9, r9, 0x40 | ||
531 | rlwimi r10, r9, 0, 20, 31 | ||
532 | mr r11, r12 | ||
533 | |||
534 | b finish_tlb_load | ||
535 | |||
536 | 5: | ||
537 | /* The bailout. Restore registers to pre-exception conditions | ||
538 | * and call the heavyweights to help us out. | ||
539 | */ | ||
540 | #ifdef CONFIG_403GCX | ||
541 | lwz r12, 12(r0) | ||
542 | lwz r11, 8(r0) | ||
543 | mtspr SPRN_PID, r12 | ||
544 | mtcr r11 | ||
545 | lwz r9, 4(r0) | ||
546 | lwz r12, 0(r0) | ||
547 | #else | ||
548 | mfspr r12, SPRN_SPRG6 | ||
549 | mfspr r11, SPRN_SPRG7 | ||
550 | mtspr SPRN_PID, r12 | ||
551 | mtcr r11 | ||
552 | mfspr r9, SPRN_SPRG5 | ||
553 | mfspr r12, SPRN_SPRG4 | ||
554 | #endif | ||
555 | mfspr r11, SPRN_SPRG1 | ||
556 | mfspr r10, SPRN_SPRG0 | ||
557 | b DataAccess | ||
558 | |||
559 | /* 0x1200 - Instruction TLB Miss Exception | ||
560 | * Nearly the same as above, except we get our information from different | ||
561 | * registers and bailout to a different point. | ||
562 | */ | ||
563 | START_EXCEPTION(0x1200, ITLBMiss) | ||
564 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
565 | mtspr SPRN_SPRG1, r11 | ||
566 | #ifdef CONFIG_403GCX | ||
567 | stw r12, 0(r0) | ||
568 | stw r9, 4(r0) | ||
569 | mfcr r11 | ||
570 | mfspr r12, SPRN_PID | ||
571 | stw r11, 8(r0) | ||
572 | stw r12, 12(r0) | ||
573 | #else | ||
574 | mtspr SPRN_SPRG4, r12 | ||
575 | mtspr SPRN_SPRG5, r9 | ||
576 | mfcr r11 | ||
577 | mfspr r12, SPRN_PID | ||
578 | mtspr SPRN_SPRG7, r11 | ||
579 | mtspr SPRN_SPRG6, r12 | ||
580 | #endif | ||
581 | mfspr r10, SPRN_SRR0 /* Get faulting address */ | ||
582 | |||
583 | /* If we are faulting a kernel address, we have to use the | ||
584 | * kernel page tables. | ||
585 | */ | ||
586 | lis r11, TASK_SIZE@h | ||
587 | cmplw r10, r11 | ||
588 | blt+ 3f | ||
589 | lis r11, swapper_pg_dir@h | ||
590 | ori r11, r11, swapper_pg_dir@l | ||
591 | li r9, 0 | ||
592 | mtspr SPRN_PID, r9 /* TLB will have 0 TID */ | ||
593 | b 4f | ||
594 | |||
595 | /* Get the PGD for the current thread. | ||
596 | */ | ||
597 | 3: | ||
598 | mfspr r11,SPRN_SPRG3 | ||
599 | lwz r11,PGDIR(r11) | ||
600 | 4: | ||
601 | tophys(r11, r11) | ||
602 | rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ | ||
603 | lwz r12, 0(r11) /* Get L1 entry */ | ||
604 | andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */ | ||
605 | beq 2f /* Bail if no table */ | ||
606 | |||
607 | rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ | ||
608 | lwz r11, 0(r12) /* Get Linux PTE */ | ||
609 | andi. r9, r11, _PAGE_PRESENT | ||
610 | beq 5f | ||
611 | |||
612 | ori r11, r11, _PAGE_ACCESSED | ||
613 | stw r11, 0(r12) | ||
614 | |||
615 | /* Create TLB tag. This is the faulting address plus a static | ||
616 | * set of bits. These are size, valid, E, U0. | ||
617 | */ | ||
618 | li r12, 0x00c0 | ||
619 | rlwimi r10, r12, 0, 20, 31 | ||
620 | |||
621 | b finish_tlb_load | ||
622 | |||
623 | 2: /* Check for possible large-page pmd entry */ | ||
624 | rlwinm. r9, r12, 2, 22, 24 | ||
625 | beq 5f | ||
626 | |||
627 | /* Create TLB tag. This is the faulting address, plus a static | ||
628 | * set of bits (valid, E, U0) plus the size from the PMD. | ||
629 | */ | ||
630 | ori r9, r9, 0x40 | ||
631 | rlwimi r10, r9, 0, 20, 31 | ||
632 | mr r11, r12 | ||
633 | |||
634 | b finish_tlb_load | ||
635 | |||
636 | 5: | ||
637 | /* The bailout. Restore registers to pre-exception conditions | ||
638 | * and call the heavyweights to help us out. | ||
639 | */ | ||
640 | #ifdef CONFIG_403GCX | ||
641 | lwz r12, 12(r0) | ||
642 | lwz r11, 8(r0) | ||
643 | mtspr SPRN_PID, r12 | ||
644 | mtcr r11 | ||
645 | lwz r9, 4(r0) | ||
646 | lwz r12, 0(r0) | ||
647 | #else | ||
648 | mfspr r12, SPRN_SPRG6 | ||
649 | mfspr r11, SPRN_SPRG7 | ||
650 | mtspr SPRN_PID, r12 | ||
651 | mtcr r11 | ||
652 | mfspr r9, SPRN_SPRG5 | ||
653 | mfspr r12, SPRN_SPRG4 | ||
654 | #endif | ||
655 | mfspr r11, SPRN_SPRG1 | ||
656 | mfspr r10, SPRN_SPRG0 | ||
657 | b InstructionAccess | ||
658 | |||
659 | EXCEPTION(0x1300, Trap_13, UnknownException, EXC_XFER_EE) | ||
660 | EXCEPTION(0x1400, Trap_14, UnknownException, EXC_XFER_EE) | ||
661 | EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE) | ||
662 | EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE) | ||
663 | #ifdef CONFIG_IBM405_ERR51 | ||
664 | /* 405GP errata 51 */ | ||
665 | START_EXCEPTION(0x1700, Trap_17) | ||
666 | b DTLBMiss | ||
667 | #else | ||
668 | EXCEPTION(0x1700, Trap_17, UnknownException, EXC_XFER_EE) | ||
669 | #endif | ||
670 | EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE) | ||
671 | EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE) | ||
672 | EXCEPTION(0x1A00, Trap_1A, UnknownException, EXC_XFER_EE) | ||
673 | EXCEPTION(0x1B00, Trap_1B, UnknownException, EXC_XFER_EE) | ||
674 | EXCEPTION(0x1C00, Trap_1C, UnknownException, EXC_XFER_EE) | ||
675 | EXCEPTION(0x1D00, Trap_1D, UnknownException, EXC_XFER_EE) | ||
676 | EXCEPTION(0x1E00, Trap_1E, UnknownException, EXC_XFER_EE) | ||
677 | EXCEPTION(0x1F00, Trap_1F, UnknownException, EXC_XFER_EE) | ||
678 | |||
679 | /* Check for a single step debug exception while in an exception | ||
680 | * handler before state has been saved. This is to catch the case | ||
681 | * where an instruction that we are trying to single step causes | ||
682 | * an exception (eg ITLB/DTLB miss) and thus the first instruction of | ||
683 | * the exception handler generates a single step debug exception. | ||
684 | * | ||
685 | * If we get a debug trap on the first instruction of an exception handler, | ||
686 | * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is | ||
687 | * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR). | ||
688 | * The exception handler was handling a non-critical interrupt, so it will | ||
689 | * save (and later restore) the MSR via SPRN_SRR1, which will still have | ||
690 | * the MSR_DE bit set. | ||
691 | */ | ||
692 | /* 0x2000 - Debug Exception */ | ||
693 | START_EXCEPTION(0x2000, DebugTrap) | ||
694 | CRITICAL_EXCEPTION_PROLOG | ||
695 | |||
696 | /* | ||
697 | * If this is a single step or branch-taken exception in an | ||
698 | * exception entry sequence, it was probably meant to apply to | ||
699 | * the code where the exception occurred (since exception entry | ||
700 | * doesn't turn off DE automatically). We simulate the effect | ||
701 | * of turning off DE on entry to an exception handler by turning | ||
702 | * off DE in the SRR3 value and clearing the debug status. | ||
703 | */ | ||
704 | mfspr r10,SPRN_DBSR /* check single-step/branch taken */ | ||
705 | andis. r10,r10,DBSR_IC@h | ||
706 | beq+ 2f | ||
707 | |||
708 | andi. r10,r9,MSR_IR|MSR_PR /* check supervisor + MMU off */ | ||
709 | beq 1f /* branch and fix it up */ | ||
710 | |||
711 | mfspr r10,SPRN_SRR2 /* Faulting instruction address */ | ||
712 | cmplwi r10,0x2100 | ||
713 | bgt+ 2f /* address above exception vectors */ | ||
714 | |||
715 | /* here it looks like we got an inappropriate debug exception. */ | ||
716 | 1: rlwinm r9,r9,0,~MSR_DE /* clear DE in the SRR3 value */ | ||
717 | lis r10,DBSR_IC@h /* clear the IC event */ | ||
718 | mtspr SPRN_DBSR,r10 | ||
719 | /* restore state and get out */ | ||
720 | lwz r10,_CCR(r11) | ||
721 | lwz r0,GPR0(r11) | ||
722 | lwz r1,GPR1(r11) | ||
723 | mtcrf 0x80,r10 | ||
724 | mtspr SPRN_SRR2,r12 | ||
725 | mtspr SPRN_SRR3,r9 | ||
726 | lwz r9,GPR9(r11) | ||
727 | lwz r12,GPR12(r11) | ||
728 | lwz r10,crit_r10@l(0) | ||
729 | lwz r11,crit_r11@l(0) | ||
730 | PPC405_ERR77_SYNC | ||
731 | rfci | ||
732 | b . | ||
733 | |||
734 | /* continue normal handling for a critical exception... */ | ||
735 | 2: mfspr r4,SPRN_DBSR | ||
736 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
737 | EXC_XFER_TEMPLATE(DebugException, 0x2002, \ | ||
738 | (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ | ||
739 | NOCOPY, crit_transfer_to_handler, ret_from_crit_exc) | ||
740 | |||
741 | /* | ||
742 | * The other Data TLB exceptions bail out to this point | ||
743 | * if they can't resolve the lightweight TLB fault. | ||
744 | */ | ||
745 | DataAccess: | ||
746 | NORMAL_EXCEPTION_PROLOG | ||
747 | mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ | ||
748 | stw r5,_ESR(r11) | ||
749 | mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ | ||
750 | EXC_XFER_EE_LITE(0x300, handle_page_fault) | ||
751 | |||
752 | /* Other PowerPC processors, namely those derived from the 6xx-series | ||
753 | * have vectors from 0x2100 through 0x2F00 defined, but marked as reserved. | ||
754 | * However, for the 4xx-series processors these are neither defined nor | ||
755 | * reserved. | ||
756 | */ | ||
757 | |||
758 | /* Damn, I came up one instruction too many to fit into the | ||
759 | * exception space :-). Both the instruction and data TLB | ||
760 | * miss get to this point to load the TLB. | ||
761 | * r10 - TLB_TAG value | ||
762 | * r11 - Linux PTE | ||
763 | * r12, r9 - avilable to use | ||
764 | * PID - loaded with proper value when we get here | ||
765 | * Upon exit, we reload everything and RFI. | ||
766 | * Actually, it will fit now, but oh well.....a common place | ||
767 | * to load the TLB. | ||
768 | */ | ||
769 | tlb_4xx_index: | ||
770 | .long 0 | ||
771 | finish_tlb_load: | ||
772 | /* load the next available TLB index. | ||
773 | */ | ||
774 | lwz r9, tlb_4xx_index@l(0) | ||
775 | addi r9, r9, 1 | ||
776 | andi. r9, r9, (PPC4XX_TLB_SIZE-1) | ||
777 | stw r9, tlb_4xx_index@l(0) | ||
778 | |||
779 | 6: | ||
780 | /* | ||
781 | * Clear out the software-only bits in the PTE to generate the | ||
782 | * TLB_DATA value. These are the bottom 2 bits of the RPM, the | ||
783 | * top 3 bits of the zone field, and M. | ||
784 | */ | ||
785 | li r12, 0x0ce2 | ||
786 | andc r11, r11, r12 | ||
787 | |||
788 | tlbwe r11, r9, TLB_DATA /* Load TLB LO */ | ||
789 | tlbwe r10, r9, TLB_TAG /* Load TLB HI */ | ||
790 | |||
791 | /* Done...restore registers and get out of here. | ||
792 | */ | ||
793 | #ifdef CONFIG_403GCX | ||
794 | lwz r12, 12(r0) | ||
795 | lwz r11, 8(r0) | ||
796 | mtspr SPRN_PID, r12 | ||
797 | mtcr r11 | ||
798 | lwz r9, 4(r0) | ||
799 | lwz r12, 0(r0) | ||
800 | #else | ||
801 | mfspr r12, SPRN_SPRG6 | ||
802 | mfspr r11, SPRN_SPRG7 | ||
803 | mtspr SPRN_PID, r12 | ||
804 | mtcr r11 | ||
805 | mfspr r9, SPRN_SPRG5 | ||
806 | mfspr r12, SPRN_SPRG4 | ||
807 | #endif | ||
808 | mfspr r11, SPRN_SPRG1 | ||
809 | mfspr r10, SPRN_SPRG0 | ||
810 | PPC405_ERR77_SYNC | ||
811 | rfi /* Should sync shadow TLBs */ | ||
812 | b . /* prevent prefetch past rfi */ | ||
813 | |||
814 | /* extern void giveup_fpu(struct task_struct *prev) | ||
815 | * | ||
816 | * The PowerPC 4xx family of processors do not have an FPU, so this just | ||
817 | * returns. | ||
818 | */ | ||
819 | _GLOBAL(giveup_fpu) | ||
820 | blr | ||
821 | |||
822 | /* This is where the main kernel code starts. | ||
823 | */ | ||
824 | start_here: | ||
825 | |||
826 | /* ptr to current */ | ||
827 | lis r2,init_task@h | ||
828 | ori r2,r2,init_task@l | ||
829 | |||
830 | /* ptr to phys current thread */ | ||
831 | tophys(r4,r2) | ||
832 | addi r4,r4,THREAD /* init task's THREAD */ | ||
833 | mtspr SPRN_SPRG3,r4 | ||
834 | |||
835 | /* stack */ | ||
836 | lis r1,init_thread_union@ha | ||
837 | addi r1,r1,init_thread_union@l | ||
838 | li r0,0 | ||
839 | stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) | ||
840 | |||
841 | bl early_init /* We have to do this with MMU on */ | ||
842 | |||
843 | /* | ||
844 | * Decide what sort of machine this is and initialize the MMU. | ||
845 | */ | ||
846 | mr r3,r31 | ||
847 | mr r4,r30 | ||
848 | mr r5,r29 | ||
849 | mr r6,r28 | ||
850 | mr r7,r27 | ||
851 | bl machine_init | ||
852 | bl MMU_init | ||
853 | |||
854 | /* Go back to running unmapped so we can load up new values | ||
855 | * and change to using our exception vectors. | ||
856 | * On the 4xx, all we have to do is invalidate the TLB to clear | ||
857 | * the old 16M byte TLB mappings. | ||
858 | */ | ||
859 | lis r4,2f@h | ||
860 | ori r4,r4,2f@l | ||
861 | tophys(r4,r4) | ||
862 | lis r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@h | ||
863 | ori r3,r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@l | ||
864 | mtspr SPRN_SRR0,r4 | ||
865 | mtspr SPRN_SRR1,r3 | ||
866 | rfi | ||
867 | b . /* prevent prefetch past rfi */ | ||
868 | |||
869 | /* Load up the kernel context */ | ||
870 | 2: | ||
871 | sync /* Flush to memory before changing TLB */ | ||
872 | tlbia | ||
873 | isync /* Flush shadow TLBs */ | ||
874 | |||
875 | /* set up the PTE pointers for the Abatron bdiGDB. | ||
876 | */ | ||
877 | lis r6, swapper_pg_dir@h | ||
878 | ori r6, r6, swapper_pg_dir@l | ||
879 | lis r5, abatron_pteptrs@h | ||
880 | ori r5, r5, abatron_pteptrs@l | ||
881 | stw r5, 0xf0(r0) /* Must match your Abatron config file */ | ||
882 | tophys(r5,r5) | ||
883 | stw r6, 0(r5) | ||
884 | |||
885 | /* Now turn on the MMU for real! */ | ||
886 | lis r4,MSR_KERNEL@h | ||
887 | ori r4,r4,MSR_KERNEL@l | ||
888 | lis r3,start_kernel@h | ||
889 | ori r3,r3,start_kernel@l | ||
890 | mtspr SPRN_SRR0,r3 | ||
891 | mtspr SPRN_SRR1,r4 | ||
892 | rfi /* enable MMU and jump to start_kernel */ | ||
893 | b . /* prevent prefetch past rfi */ | ||
894 | |||
895 | /* Set up the initial MMU state so we can do the first level of | ||
896 | * kernel initialization. This maps the first 16 MBytes of memory 1:1 | ||
897 | * virtual to physical and more importantly sets the cache mode. | ||
898 | */ | ||
899 | initial_mmu: | ||
900 | tlbia /* Invalidate all TLB entries */ | ||
901 | isync | ||
902 | |||
903 | /* We should still be executing code at physical address 0x0000xxxx | ||
904 | * at this point. However, start_here is at virtual address | ||
905 | * 0xC000xxxx. So, set up a TLB mapping to cover this once | ||
906 | * translation is enabled. | ||
907 | */ | ||
908 | |||
909 | lis r3,KERNELBASE@h /* Load the kernel virtual address */ | ||
910 | ori r3,r3,KERNELBASE@l | ||
911 | tophys(r4,r3) /* Load the kernel physical address */ | ||
912 | |||
913 | iccci r0,r3 /* Invalidate the i-cache before use */ | ||
914 | |||
915 | /* Load the kernel PID. | ||
916 | */ | ||
917 | li r0,0 | ||
918 | mtspr SPRN_PID,r0 | ||
919 | sync | ||
920 | |||
921 | /* Configure and load two entries into TLB slots 62 and 63. | ||
922 | * In case we are pinning TLBs, these are reserved in by the | ||
923 | * other TLB functions. If not reserving, then it doesn't | ||
924 | * matter where they are loaded. | ||
925 | */ | ||
926 | clrrwi r4,r4,10 /* Mask off the real page number */ | ||
927 | ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ | ||
928 | |||
929 | clrrwi r3,r3,10 /* Mask off the effective page number */ | ||
930 | ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M)) | ||
931 | |||
932 | li r0,63 /* TLB slot 63 */ | ||
933 | |||
934 | tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */ | ||
935 | tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */ | ||
936 | |||
937 | #if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(SERIAL_DEBUG_IO_BASE) | ||
938 | |||
939 | /* Load a TLB entry for the UART, so that ppc4xx_progress() can use | ||
940 | * the UARTs nice and early. We use a 4k real==virtual mapping. */ | ||
941 | |||
942 | lis r3,SERIAL_DEBUG_IO_BASE@h | ||
943 | ori r3,r3,SERIAL_DEBUG_IO_BASE@l | ||
944 | mr r4,r3 | ||
945 | clrrwi r4,r4,12 | ||
946 | ori r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G) | ||
947 | |||
948 | clrrwi r3,r3,12 | ||
949 | ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K)) | ||
950 | |||
951 | li r0,0 /* TLB slot 0 */ | ||
952 | tlbwe r4,r0,TLB_DATA | ||
953 | tlbwe r3,r0,TLB_TAG | ||
954 | #endif /* CONFIG_SERIAL_DEBUG_TEXT && SERIAL_DEBUG_IO_BASE */ | ||
955 | |||
956 | isync | ||
957 | |||
958 | /* Establish the exception vector base | ||
959 | */ | ||
960 | lis r4,KERNELBASE@h /* EVPR only uses the high 16-bits */ | ||
961 | tophys(r0,r4) /* Use the physical address */ | ||
962 | mtspr SPRN_EVPR,r0 | ||
963 | |||
964 | blr | ||
965 | |||
966 | _GLOBAL(abort) | ||
967 | mfspr r13,SPRN_DBCR0 | ||
968 | oris r13,r13,DBCR0_RST_SYSTEM@h | ||
969 | mtspr SPRN_DBCR0,r13 | ||
970 | |||
971 | _GLOBAL(set_context) | ||
972 | |||
973 | #ifdef CONFIG_BDI_SWITCH | ||
974 | /* Context switch the PTE pointer for the Abatron BDI2000. | ||
975 | * The PGDIR is the second parameter. | ||
976 | */ | ||
977 | lis r5, KERNELBASE@h | ||
978 | lwz r5, 0xf0(r5) | ||
979 | stw r4, 0x4(r5) | ||
980 | #endif | ||
981 | sync | ||
982 | mtspr SPRN_PID,r3 | ||
983 | isync /* Need an isync to flush shadow */ | ||
984 | /* TLBs after changing PID */ | ||
985 | blr | ||
986 | |||
987 | /* We put a few things here that have to be page-aligned. This stuff | ||
988 | * goes at the beginning of the data segment, which is page-aligned. | ||
989 | */ | ||
990 | .data | ||
991 | _GLOBAL(sdata) | ||
992 | _GLOBAL(empty_zero_page) | ||
993 | .space 4096 | ||
994 | _GLOBAL(swapper_pg_dir) | ||
995 | .space 4096 | ||
996 | |||
997 | |||
998 | /* Stack for handling critical exceptions from kernel mode */ | ||
999 | .section .bss | ||
1000 | .align 12 | ||
1001 | exception_stack_bottom: | ||
1002 | .space 4096 | ||
1003 | critical_stack_top: | ||
1004 | _GLOBAL(exception_stack_top) | ||
1005 | |||
1006 | /* This space gets a copy of optional info passed to us by the bootstrap | ||
1007 | * which is used to pass parameters into the kernel like root=/dev/sda1, etc. | ||
1008 | */ | ||
1009 | _GLOBAL(cmd_line) | ||
1010 | .space 512 | ||
1011 | |||
1012 | /* Room for two PTE pointers, usually the kernel and current user pointers | ||
1013 | * to their respective root page table. | ||
1014 | */ | ||
1015 | abatron_pteptrs: | ||
1016 | .space 8 | ||
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S new file mode 100644 index 000000000000..22a5ee07e1ea --- /dev/null +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -0,0 +1,2011 @@ | |||
1 | /* | ||
2 | * arch/ppc64/kernel/head.S | ||
3 | * | ||
4 | * PowerPC version | ||
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
6 | * | ||
7 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP | ||
8 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | ||
9 | * Adapted for Power Macintosh by Paul Mackerras. | ||
10 | * Low-level exception handlers and MMU support | ||
11 | * rewritten by Paul Mackerras. | ||
12 | * Copyright (C) 1996 Paul Mackerras. | ||
13 | * | ||
14 | * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and | ||
15 | * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com | ||
16 | * | ||
17 | * This file contains the low-level support and setup for the | ||
18 | * PowerPC-64 platform, including trap and interrupt dispatch. | ||
19 | * | ||
20 | * This program is free software; you can redistribute it and/or | ||
21 | * modify it under the terms of the GNU General Public License | ||
22 | * as published by the Free Software Foundation; either version | ||
23 | * 2 of the License, or (at your option) any later version. | ||
24 | */ | ||
25 | |||
26 | #include <linux/config.h> | ||
27 | #include <linux/threads.h> | ||
28 | #include <asm/processor.h> | ||
29 | #include <asm/page.h> | ||
30 | #include <asm/mmu.h> | ||
31 | #include <asm/systemcfg.h> | ||
32 | #include <asm/ppc_asm.h> | ||
33 | #include <asm/asm-offsets.h> | ||
34 | #include <asm/bug.h> | ||
35 | #include <asm/cputable.h> | ||
36 | #include <asm/setup.h> | ||
37 | #include <asm/hvcall.h> | ||
38 | #include <asm/iSeries/LparMap.h> | ||
39 | |||
40 | #ifdef CONFIG_PPC_ISERIES | ||
41 | #define DO_SOFT_DISABLE | ||
42 | #endif | ||
43 | |||
44 | /* | ||
45 | * We layout physical memory as follows: | ||
46 | * 0x0000 - 0x00ff : Secondary processor spin code | ||
47 | * 0x0100 - 0x2fff : pSeries Interrupt prologs | ||
48 | * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs | ||
49 | * 0x6000 - 0x6fff : Initial (CPU0) segment table | ||
50 | * 0x7000 - 0x7fff : FWNMI data area | ||
51 | * 0x8000 - : Early init and support code | ||
52 | */ | ||
53 | |||
54 | /* | ||
55 | * SPRG Usage | ||
56 | * | ||
57 | * Register Definition | ||
58 | * | ||
59 | * SPRG0 reserved for hypervisor | ||
60 | * SPRG1 temp - used to save gpr | ||
61 | * SPRG2 temp - used to save gpr | ||
62 | * SPRG3 virt addr of paca | ||
63 | */ | ||
64 | |||
65 | /* | ||
66 | * Entering into this code we make the following assumptions: | ||
67 | * For pSeries: | ||
68 | * 1. The MMU is off & open firmware is running in real mode. | ||
69 | * 2. The kernel is entered at __start | ||
70 | * | ||
71 | * For iSeries: | ||
72 | * 1. The MMU is on (as it always is for iSeries) | ||
73 | * 2. The kernel is entered at system_reset_iSeries | ||
74 | */ | ||
75 | |||
76 | .text | ||
77 | .globl _stext | ||
78 | _stext: | ||
79 | #ifdef CONFIG_PPC_MULTIPLATFORM | ||
80 | _GLOBAL(__start) | ||
81 | /* NOP this out unconditionally */ | ||
82 | BEGIN_FTR_SECTION | ||
83 | b .__start_initialization_multiplatform | ||
84 | END_FTR_SECTION(0, 1) | ||
85 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | ||
86 | |||
87 | /* Catch branch to 0 in real mode */ | ||
88 | trap | ||
89 | |||
90 | #ifdef CONFIG_PPC_ISERIES | ||
91 | /* | ||
92 | * At offset 0x20, there is a pointer to iSeries LPAR data. | ||
93 | * This is required by the hypervisor | ||
94 | */ | ||
95 | . = 0x20 | ||
96 | .llong hvReleaseData-KERNELBASE | ||
97 | |||
98 | /* | ||
99 | * At offset 0x28 and 0x30 are offsets to the mschunks_map | ||
100 | * array (used by the iSeries LPAR debugger to do translation | ||
101 | * between physical addresses and absolute addresses) and | ||
102 | * to the pidhash table (also used by the debugger) | ||
103 | */ | ||
104 | .llong mschunks_map-KERNELBASE | ||
105 | .llong 0 /* pidhash-KERNELBASE SFRXXX */ | ||
106 | |||
107 | /* Offset 0x38 - Pointer to start of embedded System.map */ | ||
108 | .globl embedded_sysmap_start | ||
109 | embedded_sysmap_start: | ||
110 | .llong 0 | ||
111 | /* Offset 0x40 - Pointer to end of embedded System.map */ | ||
112 | .globl embedded_sysmap_end | ||
113 | embedded_sysmap_end: | ||
114 | .llong 0 | ||
115 | |||
116 | #endif /* CONFIG_PPC_ISERIES */ | ||
117 | |||
118 | /* Secondary processors spin on this value until it goes to 1. */ | ||
119 | .globl __secondary_hold_spinloop | ||
120 | __secondary_hold_spinloop: | ||
121 | .llong 0x0 | ||
122 | |||
123 | /* Secondary processors write this value with their cpu # */ | ||
124 | /* after they enter the spin loop immediately below. */ | ||
125 | .globl __secondary_hold_acknowledge | ||
126 | __secondary_hold_acknowledge: | ||
127 | .llong 0x0 | ||
128 | |||
129 | . = 0x60 | ||
130 | /* | ||
131 | * The following code is used on pSeries to hold secondary processors | ||
132 | * in a spin loop after they have been freed from OpenFirmware, but | ||
133 | * before the bulk of the kernel has been relocated. This code | ||
134 | * is relocated to physical address 0x60 before prom_init is run. | ||
135 | * All of it must fit below the first exception vector at 0x100. | ||
136 | */ | ||
137 | _GLOBAL(__secondary_hold) | ||
138 | mfmsr r24 | ||
139 | ori r24,r24,MSR_RI | ||
140 | mtmsrd r24 /* RI on */ | ||
141 | |||
142 | /* Grab our linux cpu number */ | ||
143 | mr r24,r3 | ||
144 | |||
145 | /* Tell the master cpu we're here */ | ||
146 | /* Relocation is off & we are located at an address less */ | ||
147 | /* than 0x100, so only need to grab low order offset. */ | ||
148 | std r24,__secondary_hold_acknowledge@l(0) | ||
149 | sync | ||
150 | |||
151 | /* All secondary cpus wait here until told to start. */ | ||
152 | 100: ld r4,__secondary_hold_spinloop@l(0) | ||
153 | cmpdi 0,r4,1 | ||
154 | bne 100b | ||
155 | |||
156 | #ifdef CONFIG_HMT | ||
157 | b .hmt_init | ||
158 | #else | ||
159 | #ifdef CONFIG_SMP | ||
160 | mr r3,r24 | ||
161 | b .pSeries_secondary_smp_init | ||
162 | #else | ||
163 | BUG_OPCODE | ||
164 | #endif | ||
165 | #endif | ||
166 | |||
167 | /* This value is used to mark exception frames on the stack. */ | ||
168 | .section ".toc","aw" | ||
169 | exception_marker: | ||
170 | .tc ID_72656773_68657265[TC],0x7265677368657265 | ||
171 | .text | ||
172 | |||
173 | /* | ||
174 | * The following macros define the code that appears as | ||
175 | * the prologue to each of the exception handlers. They | ||
176 | * are split into two parts to allow a single kernel binary | ||
177 | * to be used for pSeries and iSeries. | ||
178 | * LOL. One day... - paulus | ||
179 | */ | ||
180 | |||
181 | /* | ||
182 | * We make as much of the exception code common between native | ||
183 | * exception handlers (including pSeries LPAR) and iSeries LPAR | ||
184 | * implementations as possible. | ||
185 | */ | ||
186 | |||
187 | /* | ||
188 | * This is the start of the interrupt handlers for pSeries | ||
189 | * This code runs with relocation off. | ||
190 | */ | ||
191 | #define EX_R9 0 | ||
192 | #define EX_R10 8 | ||
193 | #define EX_R11 16 | ||
194 | #define EX_R12 24 | ||
195 | #define EX_R13 32 | ||
196 | #define EX_SRR0 40 | ||
197 | #define EX_R3 40 /* SLB miss saves R3, but not SRR0 */ | ||
198 | #define EX_DAR 48 | ||
199 | #define EX_LR 48 /* SLB miss saves LR, but not DAR */ | ||
200 | #define EX_DSISR 56 | ||
201 | #define EX_CCR 60 | ||
202 | |||
203 | #define EXCEPTION_PROLOG_PSERIES(area, label) \ | ||
204 | mfspr r13,SPRG3; /* get paca address into r13 */ \ | ||
205 | std r9,area+EX_R9(r13); /* save r9 - r12 */ \ | ||
206 | std r10,area+EX_R10(r13); \ | ||
207 | std r11,area+EX_R11(r13); \ | ||
208 | std r12,area+EX_R12(r13); \ | ||
209 | mfspr r9,SPRG1; \ | ||
210 | std r9,area+EX_R13(r13); \ | ||
211 | mfcr r9; \ | ||
212 | clrrdi r12,r13,32; /* get high part of &label */ \ | ||
213 | mfmsr r10; \ | ||
214 | mfspr r11,SRR0; /* save SRR0 */ \ | ||
215 | ori r12,r12,(label)@l; /* virt addr of handler */ \ | ||
216 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ | ||
217 | mtspr SRR0,r12; \ | ||
218 | mfspr r12,SRR1; /* and SRR1 */ \ | ||
219 | mtspr SRR1,r10; \ | ||
220 | rfid; \ | ||
221 | b . /* prevent speculative execution */ | ||
222 | |||
223 | /* | ||
224 | * This is the start of the interrupt handlers for iSeries | ||
225 | * This code runs with relocation on. | ||
226 | */ | ||
227 | #define EXCEPTION_PROLOG_ISERIES_1(area) \ | ||
228 | mfspr r13,SPRG3; /* get paca address into r13 */ \ | ||
229 | std r9,area+EX_R9(r13); /* save r9 - r12 */ \ | ||
230 | std r10,area+EX_R10(r13); \ | ||
231 | std r11,area+EX_R11(r13); \ | ||
232 | std r12,area+EX_R12(r13); \ | ||
233 | mfspr r9,SPRG1; \ | ||
234 | std r9,area+EX_R13(r13); \ | ||
235 | mfcr r9 | ||
236 | |||
237 | #define EXCEPTION_PROLOG_ISERIES_2 \ | ||
238 | mfmsr r10; \ | ||
239 | ld r11,PACALPPACA+LPPACASRR0(r13); \ | ||
240 | ld r12,PACALPPACA+LPPACASRR1(r13); \ | ||
241 | ori r10,r10,MSR_RI; \ | ||
242 | mtmsrd r10,1 | ||
243 | |||
244 | /* | ||
245 | * The common exception prolog is used for all except a few exceptions | ||
246 | * such as a segment miss on a kernel address. We have to be prepared | ||
247 | * to take another exception from the point where we first touch the | ||
248 | * kernel stack onwards. | ||
249 | * | ||
250 | * On entry r13 points to the paca, r9-r13 are saved in the paca, | ||
251 | * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and | ||
252 | * SRR1, and relocation is on. | ||
253 | */ | ||
254 | #define EXCEPTION_PROLOG_COMMON(n, area) \ | ||
255 | andi. r10,r12,MSR_PR; /* See if coming from user */ \ | ||
256 | mr r10,r1; /* Save r1 */ \ | ||
257 | subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ | ||
258 | beq- 1f; \ | ||
259 | ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ | ||
260 | 1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ | ||
261 | bge- cr1,bad_stack; /* abort if it is */ \ | ||
262 | std r9,_CCR(r1); /* save CR in stackframe */ \ | ||
263 | std r11,_NIP(r1); /* save SRR0 in stackframe */ \ | ||
264 | std r12,_MSR(r1); /* save SRR1 in stackframe */ \ | ||
265 | std r10,0(r1); /* make stack chain pointer */ \ | ||
266 | std r0,GPR0(r1); /* save r0 in stackframe */ \ | ||
267 | std r10,GPR1(r1); /* save r1 in stackframe */ \ | ||
268 | std r2,GPR2(r1); /* save r2 in stackframe */ \ | ||
269 | SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ | ||
270 | SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ | ||
271 | ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \ | ||
272 | ld r10,area+EX_R10(r13); \ | ||
273 | std r9,GPR9(r1); \ | ||
274 | std r10,GPR10(r1); \ | ||
275 | ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \ | ||
276 | ld r10,area+EX_R12(r13); \ | ||
277 | ld r11,area+EX_R13(r13); \ | ||
278 | std r9,GPR11(r1); \ | ||
279 | std r10,GPR12(r1); \ | ||
280 | std r11,GPR13(r1); \ | ||
281 | ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ | ||
282 | mflr r9; /* save LR in stackframe */ \ | ||
283 | std r9,_LINK(r1); \ | ||
284 | mfctr r10; /* save CTR in stackframe */ \ | ||
285 | std r10,_CTR(r1); \ | ||
286 | mfspr r11,XER; /* save XER in stackframe */ \ | ||
287 | std r11,_XER(r1); \ | ||
288 | li r9,(n)+1; \ | ||
289 | std r9,_TRAP(r1); /* set trap number */ \ | ||
290 | li r10,0; \ | ||
291 | ld r11,exception_marker@toc(r2); \ | ||
292 | std r10,RESULT(r1); /* clear regs->result */ \ | ||
293 | std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ | ||
294 | |||
295 | /* | ||
296 | * Exception vectors. | ||
297 | */ | ||
298 | #define STD_EXCEPTION_PSERIES(n, label) \ | ||
299 | . = n; \ | ||
300 | .globl label##_pSeries; \ | ||
301 | label##_pSeries: \ | ||
302 | HMT_MEDIUM; \ | ||
303 | mtspr SPRG1,r13; /* save r13 */ \ | ||
304 | RUNLATCH_ON(r13); \ | ||
305 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) | ||
306 | |||
307 | #define STD_EXCEPTION_ISERIES(n, label, area) \ | ||
308 | .globl label##_iSeries; \ | ||
309 | label##_iSeries: \ | ||
310 | HMT_MEDIUM; \ | ||
311 | mtspr SPRG1,r13; /* save r13 */ \ | ||
312 | RUNLATCH_ON(r13); \ | ||
313 | EXCEPTION_PROLOG_ISERIES_1(area); \ | ||
314 | EXCEPTION_PROLOG_ISERIES_2; \ | ||
315 | b label##_common | ||
316 | |||
317 | #define MASKABLE_EXCEPTION_ISERIES(n, label) \ | ||
318 | .globl label##_iSeries; \ | ||
319 | label##_iSeries: \ | ||
320 | HMT_MEDIUM; \ | ||
321 | mtspr SPRG1,r13; /* save r13 */ \ | ||
322 | RUNLATCH_ON(r13); \ | ||
323 | EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \ | ||
324 | lbz r10,PACAPROCENABLED(r13); \ | ||
325 | cmpwi 0,r10,0; \ | ||
326 | beq- label##_iSeries_masked; \ | ||
327 | EXCEPTION_PROLOG_ISERIES_2; \ | ||
328 | b label##_common; \ | ||
329 | |||
330 | #ifdef DO_SOFT_DISABLE | ||
331 | #define DISABLE_INTS \ | ||
332 | lbz r10,PACAPROCENABLED(r13); \ | ||
333 | li r11,0; \ | ||
334 | std r10,SOFTE(r1); \ | ||
335 | mfmsr r10; \ | ||
336 | stb r11,PACAPROCENABLED(r13); \ | ||
337 | ori r10,r10,MSR_EE; \ | ||
338 | mtmsrd r10,1 | ||
339 | |||
340 | #define ENABLE_INTS \ | ||
341 | lbz r10,PACAPROCENABLED(r13); \ | ||
342 | mfmsr r11; \ | ||
343 | std r10,SOFTE(r1); \ | ||
344 | ori r11,r11,MSR_EE; \ | ||
345 | mtmsrd r11,1 | ||
346 | |||
347 | #else /* hard enable/disable interrupts */ | ||
348 | #define DISABLE_INTS | ||
349 | |||
350 | #define ENABLE_INTS \ | ||
351 | ld r12,_MSR(r1); \ | ||
352 | mfmsr r11; \ | ||
353 | rlwimi r11,r12,0,MSR_EE; \ | ||
354 | mtmsrd r11,1 | ||
355 | |||
356 | #endif | ||
357 | |||
358 | #define STD_EXCEPTION_COMMON(trap, label, hdlr) \ | ||
359 | .align 7; \ | ||
360 | .globl label##_common; \ | ||
361 | label##_common: \ | ||
362 | EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ | ||
363 | DISABLE_INTS; \ | ||
364 | bl .save_nvgprs; \ | ||
365 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
366 | bl hdlr; \ | ||
367 | b .ret_from_except | ||
368 | |||
369 | #define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \ | ||
370 | .align 7; \ | ||
371 | .globl label##_common; \ | ||
372 | label##_common: \ | ||
373 | EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ | ||
374 | DISABLE_INTS; \ | ||
375 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
376 | bl hdlr; \ | ||
377 | b .ret_from_except_lite | ||
378 | |||
379 | /* | ||
380 | * Start of pSeries system interrupt routines | ||
381 | */ | ||
382 | . = 0x100 | ||
383 | .globl __start_interrupts | ||
384 | __start_interrupts: | ||
385 | |||
386 | STD_EXCEPTION_PSERIES(0x100, system_reset) | ||
387 | |||
388 | . = 0x200 | ||
389 | _machine_check_pSeries: | ||
390 | HMT_MEDIUM | ||
391 | mtspr SPRG1,r13 /* save r13 */ | ||
392 | RUNLATCH_ON(r13) | ||
393 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) | ||
394 | |||
395 | . = 0x300 | ||
396 | .globl data_access_pSeries | ||
397 | data_access_pSeries: | ||
398 | HMT_MEDIUM | ||
399 | mtspr SPRG1,r13 | ||
400 | BEGIN_FTR_SECTION | ||
401 | mtspr SPRG2,r12 | ||
402 | mfspr r13,DAR | ||
403 | mfspr r12,DSISR | ||
404 | srdi r13,r13,60 | ||
405 | rlwimi r13,r12,16,0x20 | ||
406 | mfcr r12 | ||
407 | cmpwi r13,0x2c | ||
408 | beq .do_stab_bolted_pSeries | ||
409 | mtcrf 0x80,r12 | ||
410 | mfspr r12,SPRG2 | ||
411 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | ||
412 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) | ||
413 | |||
414 | . = 0x380 | ||
415 | .globl data_access_slb_pSeries | ||
416 | data_access_slb_pSeries: | ||
417 | HMT_MEDIUM | ||
418 | mtspr SPRG1,r13 | ||
419 | RUNLATCH_ON(r13) | ||
420 | mfspr r13,SPRG3 /* get paca address into r13 */ | ||
421 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ | ||
422 | std r10,PACA_EXSLB+EX_R10(r13) | ||
423 | std r11,PACA_EXSLB+EX_R11(r13) | ||
424 | std r12,PACA_EXSLB+EX_R12(r13) | ||
425 | std r3,PACA_EXSLB+EX_R3(r13) | ||
426 | mfspr r9,SPRG1 | ||
427 | std r9,PACA_EXSLB+EX_R13(r13) | ||
428 | mfcr r9 | ||
429 | mfspr r12,SRR1 /* and SRR1 */ | ||
430 | mfspr r3,DAR | ||
431 | b .do_slb_miss /* Rel. branch works in real mode */ | ||
432 | |||
433 | STD_EXCEPTION_PSERIES(0x400, instruction_access) | ||
434 | |||
435 | . = 0x480 | ||
436 | .globl instruction_access_slb_pSeries | ||
437 | instruction_access_slb_pSeries: | ||
438 | HMT_MEDIUM | ||
439 | mtspr SPRG1,r13 | ||
440 | RUNLATCH_ON(r13) | ||
441 | mfspr r13,SPRG3 /* get paca address into r13 */ | ||
442 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ | ||
443 | std r10,PACA_EXSLB+EX_R10(r13) | ||
444 | std r11,PACA_EXSLB+EX_R11(r13) | ||
445 | std r12,PACA_EXSLB+EX_R12(r13) | ||
446 | std r3,PACA_EXSLB+EX_R3(r13) | ||
447 | mfspr r9,SPRG1 | ||
448 | std r9,PACA_EXSLB+EX_R13(r13) | ||
449 | mfcr r9 | ||
450 | mfspr r12,SRR1 /* and SRR1 */ | ||
451 | mfspr r3,SRR0 /* SRR0 is faulting address */ | ||
452 | b .do_slb_miss /* Rel. branch works in real mode */ | ||
453 | |||
454 | STD_EXCEPTION_PSERIES(0x500, hardware_interrupt) | ||
455 | STD_EXCEPTION_PSERIES(0x600, alignment) | ||
456 | STD_EXCEPTION_PSERIES(0x700, program_check) | ||
457 | STD_EXCEPTION_PSERIES(0x800, fp_unavailable) | ||
458 | STD_EXCEPTION_PSERIES(0x900, decrementer) | ||
459 | STD_EXCEPTION_PSERIES(0xa00, trap_0a) | ||
460 | STD_EXCEPTION_PSERIES(0xb00, trap_0b) | ||
461 | |||
462 | . = 0xc00 | ||
463 | .globl system_call_pSeries | ||
464 | system_call_pSeries: | ||
465 | HMT_MEDIUM | ||
466 | RUNLATCH_ON(r9) | ||
467 | mr r9,r13 | ||
468 | mfmsr r10 | ||
469 | mfspr r13,SPRG3 | ||
470 | mfspr r11,SRR0 | ||
471 | clrrdi r12,r13,32 | ||
472 | oris r12,r12,system_call_common@h | ||
473 | ori r12,r12,system_call_common@l | ||
474 | mtspr SRR0,r12 | ||
475 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI | ||
476 | mfspr r12,SRR1 | ||
477 | mtspr SRR1,r10 | ||
478 | rfid | ||
479 | b . /* prevent speculative execution */ | ||
480 | |||
481 | STD_EXCEPTION_PSERIES(0xd00, single_step) | ||
482 | STD_EXCEPTION_PSERIES(0xe00, trap_0e) | ||
483 | |||
484 | /* We need to deal with the Altivec unavailable exception | ||
485 | * here which is at 0xf20, thus in the middle of the | ||
486 | * prolog code of the PerformanceMonitor one. A little | ||
487 | * trickery is thus necessary | ||
488 | */ | ||
489 | . = 0xf00 | ||
490 | b performance_monitor_pSeries | ||
491 | |||
492 | STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable) | ||
493 | |||
494 | STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) | ||
495 | STD_EXCEPTION_PSERIES(0x1700, altivec_assist) | ||
496 | |||
497 | . = 0x3000 | ||
498 | |||
499 | /*** pSeries interrupt support ***/ | ||
500 | |||
501 | /* moved from 0xf00 */ | ||
502 | STD_EXCEPTION_PSERIES(., performance_monitor) | ||
503 | |||
504 | .align 7 | ||
505 | _GLOBAL(do_stab_bolted_pSeries) | ||
506 | mtcrf 0x80,r12 | ||
507 | mfspr r12,SPRG2 | ||
508 | EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) | ||
509 | |||
510 | /* | ||
511 | * Vectors for the FWNMI option. Share common code. | ||
512 | */ | ||
513 | .globl system_reset_fwnmi | ||
514 | system_reset_fwnmi: | ||
515 | HMT_MEDIUM | ||
516 | mtspr SPRG1,r13 /* save r13 */ | ||
517 | RUNLATCH_ON(r13) | ||
518 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) | ||
519 | |||
520 | .globl machine_check_fwnmi | ||
521 | machine_check_fwnmi: | ||
522 | HMT_MEDIUM | ||
523 | mtspr SPRG1,r13 /* save r13 */ | ||
524 | RUNLATCH_ON(r13) | ||
525 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) | ||
526 | |||
527 | #ifdef CONFIG_PPC_ISERIES | ||
528 | /*** ISeries-LPAR interrupt handlers ***/ | ||
529 | |||
530 | STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC) | ||
531 | |||
532 | .globl data_access_iSeries | ||
533 | data_access_iSeries: | ||
534 | mtspr SPRG1,r13 | ||
535 | BEGIN_FTR_SECTION | ||
536 | mtspr SPRG2,r12 | ||
537 | mfspr r13,DAR | ||
538 | mfspr r12,DSISR | ||
539 | srdi r13,r13,60 | ||
540 | rlwimi r13,r12,16,0x20 | ||
541 | mfcr r12 | ||
542 | cmpwi r13,0x2c | ||
543 | beq .do_stab_bolted_iSeries | ||
544 | mtcrf 0x80,r12 | ||
545 | mfspr r12,SPRG2 | ||
546 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | ||
547 | EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN) | ||
548 | EXCEPTION_PROLOG_ISERIES_2 | ||
549 | b data_access_common | ||
550 | |||
551 | .do_stab_bolted_iSeries: | ||
552 | mtcrf 0x80,r12 | ||
553 | mfspr r12,SPRG2 | ||
554 | EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) | ||
555 | EXCEPTION_PROLOG_ISERIES_2 | ||
556 | b .do_stab_bolted | ||
557 | |||
558 | .globl data_access_slb_iSeries | ||
559 | data_access_slb_iSeries: | ||
560 | mtspr SPRG1,r13 /* save r13 */ | ||
561 | EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) | ||
562 | std r3,PACA_EXSLB+EX_R3(r13) | ||
563 | ld r12,PACALPPACA+LPPACASRR1(r13) | ||
564 | mfspr r3,DAR | ||
565 | b .do_slb_miss | ||
566 | |||
567 | STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN) | ||
568 | |||
569 | .globl instruction_access_slb_iSeries | ||
570 | instruction_access_slb_iSeries: | ||
571 | mtspr SPRG1,r13 /* save r13 */ | ||
572 | EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) | ||
573 | std r3,PACA_EXSLB+EX_R3(r13) | ||
574 | ld r12,PACALPPACA+LPPACASRR1(r13) | ||
575 | ld r3,PACALPPACA+LPPACASRR0(r13) | ||
576 | b .do_slb_miss | ||
577 | |||
578 | MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt) | ||
579 | STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN) | ||
580 | STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN) | ||
581 | STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN) | ||
582 | MASKABLE_EXCEPTION_ISERIES(0x900, decrementer) | ||
583 | STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN) | ||
584 | STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN) | ||
585 | |||
586 | .globl system_call_iSeries | ||
587 | system_call_iSeries: | ||
588 | mr r9,r13 | ||
589 | mfspr r13,SPRG3 | ||
590 | EXCEPTION_PROLOG_ISERIES_2 | ||
591 | b system_call_common | ||
592 | |||
593 | STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN) | ||
594 | STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN) | ||
595 | STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN) | ||
596 | |||
597 | .globl system_reset_iSeries | ||
598 | system_reset_iSeries: | ||
599 | mfspr r13,SPRG3 /* Get paca address */ | ||
600 | mfmsr r24 | ||
601 | ori r24,r24,MSR_RI | ||
602 | mtmsrd r24 /* RI on */ | ||
603 | lhz r24,PACAPACAINDEX(r13) /* Get processor # */ | ||
604 | cmpwi 0,r24,0 /* Are we processor 0? */ | ||
605 | beq .__start_initialization_iSeries /* Start up the first processor */ | ||
606 | mfspr r4,SPRN_CTRLF | ||
607 | li r5,CTRL_RUNLATCH /* Turn off the run light */ | ||
608 | andc r4,r4,r5 | ||
609 | mtspr SPRN_CTRLT,r4 | ||
610 | |||
611 | 1: | ||
612 | HMT_LOW | ||
613 | #ifdef CONFIG_SMP | ||
614 | lbz r23,PACAPROCSTART(r13) /* Test if this processor | ||
615 | * should start */ | ||
616 | sync | ||
617 | LOADADDR(r3,current_set) | ||
618 | sldi r28,r24,3 /* get current_set[cpu#] */ | ||
619 | ldx r3,r3,r28 | ||
620 | addi r1,r3,THREAD_SIZE | ||
621 | subi r1,r1,STACK_FRAME_OVERHEAD | ||
622 | |||
623 | cmpwi 0,r23,0 | ||
624 | beq iSeries_secondary_smp_loop /* Loop until told to go */ | ||
625 | bne .__secondary_start /* Loop until told to go */ | ||
626 | iSeries_secondary_smp_loop: | ||
627 | /* Let the Hypervisor know we are alive */ | ||
628 | /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ | ||
629 | lis r3,0x8002 | ||
630 | rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */ | ||
631 | #else /* CONFIG_SMP */ | ||
632 | /* Yield the processor. This is required for non-SMP kernels | ||
633 | which are running on multi-threaded machines. */ | ||
634 | lis r3,0x8000 | ||
635 | rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */ | ||
636 | addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */ | ||
637 | li r4,0 /* "yield timed" */ | ||
638 | li r5,-1 /* "yield forever" */ | ||
639 | #endif /* CONFIG_SMP */ | ||
640 | li r0,-1 /* r0=-1 indicates a Hypervisor call */ | ||
641 | sc /* Invoke the hypervisor via a system call */ | ||
642 | mfspr r13,SPRG3 /* Put r13 back ???? */ | ||
643 | b 1b /* If SMP not configured, secondaries | ||
644 | * loop forever */ | ||
645 | |||
646 | .globl decrementer_iSeries_masked | ||
647 | decrementer_iSeries_masked: | ||
648 | li r11,1 | ||
649 | stb r11,PACALPPACA+LPPACADECRINT(r13) | ||
650 | lwz r12,PACADEFAULTDECR(r13) | ||
651 | mtspr SPRN_DEC,r12 | ||
652 | /* fall through */ | ||
653 | |||
654 | .globl hardware_interrupt_iSeries_masked | ||
655 | hardware_interrupt_iSeries_masked: | ||
656 | mtcrf 0x80,r9 /* Restore regs */ | ||
657 | ld r11,PACALPPACA+LPPACASRR0(r13) | ||
658 | ld r12,PACALPPACA+LPPACASRR1(r13) | ||
659 | mtspr SRR0,r11 | ||
660 | mtspr SRR1,r12 | ||
661 | ld r9,PACA_EXGEN+EX_R9(r13) | ||
662 | ld r10,PACA_EXGEN+EX_R10(r13) | ||
663 | ld r11,PACA_EXGEN+EX_R11(r13) | ||
664 | ld r12,PACA_EXGEN+EX_R12(r13) | ||
665 | ld r13,PACA_EXGEN+EX_R13(r13) | ||
666 | rfid | ||
667 | b . /* prevent speculative execution */ | ||
668 | #endif /* CONFIG_PPC_ISERIES */ | ||
669 | |||
670 | /*** Common interrupt handlers ***/ | ||
671 | |||
672 | STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) | ||
673 | |||
674 | /* | ||
675 | * Machine check is different because we use a different | ||
676 | * save area: PACA_EXMC instead of PACA_EXGEN. | ||
677 | */ | ||
678 | .align 7 | ||
679 | .globl machine_check_common | ||
680 | machine_check_common: | ||
681 | EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) | ||
682 | DISABLE_INTS | ||
683 | bl .save_nvgprs | ||
684 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
685 | bl .machine_check_exception | ||
686 | b .ret_from_except | ||
687 | |||
688 | STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt) | ||
689 | STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) | ||
690 | STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) | ||
691 | STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) | ||
692 | STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) | ||
693 | STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception) | ||
694 | STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) | ||
695 | #ifdef CONFIG_ALTIVEC | ||
696 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) | ||
697 | #else | ||
698 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) | ||
699 | #endif | ||
700 | |||
701 | /* | ||
702 | * Here we have detected that the kernel stack pointer is bad. | ||
703 | * R9 contains the saved CR, r13 points to the paca, | ||
704 | * r10 contains the (bad) kernel stack pointer, | ||
705 | * r11 and r12 contain the saved SRR0 and SRR1. | ||
706 | * We switch to using an emergency stack, save the registers there, | ||
707 | * and call kernel_bad_stack(), which panics. | ||
708 | */ | ||
709 | bad_stack: | ||
710 | ld r1,PACAEMERGSP(r13) | ||
711 | subi r1,r1,64+INT_FRAME_SIZE | ||
712 | std r9,_CCR(r1) | ||
713 | std r10,GPR1(r1) | ||
714 | std r11,_NIP(r1) | ||
715 | std r12,_MSR(r1) | ||
716 | mfspr r11,DAR | ||
717 | mfspr r12,DSISR | ||
718 | std r11,_DAR(r1) | ||
719 | std r12,_DSISR(r1) | ||
720 | mflr r10 | ||
721 | mfctr r11 | ||
722 | mfxer r12 | ||
723 | std r10,_LINK(r1) | ||
724 | std r11,_CTR(r1) | ||
725 | std r12,_XER(r1) | ||
726 | SAVE_GPR(0,r1) | ||
727 | SAVE_GPR(2,r1) | ||
728 | SAVE_4GPRS(3,r1) | ||
729 | SAVE_2GPRS(7,r1) | ||
730 | SAVE_10GPRS(12,r1) | ||
731 | SAVE_10GPRS(22,r1) | ||
732 | addi r11,r1,INT_FRAME_SIZE | ||
733 | std r11,0(r1) | ||
734 | li r12,0 | ||
735 | std r12,0(r11) | ||
736 | ld r2,PACATOC(r13) | ||
737 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
738 | bl .kernel_bad_stack | ||
739 | b 1b | ||
740 | |||
741 | /* | ||
742 | * Return from an exception with minimal checks. | ||
743 | * The caller is assumed to have done EXCEPTION_PROLOG_COMMON. | ||
744 | * If interrupts have been enabled, or anything has been | ||
745 | * done that might have changed the scheduling status of | ||
746 | * any task or sent any task a signal, you should use | ||
747 | * ret_from_except or ret_from_except_lite instead of this. | ||
748 | */ | ||
749 | fast_exception_return: | ||
750 | ld r12,_MSR(r1) | ||
751 | ld r11,_NIP(r1) | ||
752 | andi. r3,r12,MSR_RI /* check if RI is set */ | ||
753 | beq- unrecov_fer | ||
754 | ld r3,_CCR(r1) | ||
755 | ld r4,_LINK(r1) | ||
756 | ld r5,_CTR(r1) | ||
757 | ld r6,_XER(r1) | ||
758 | mtcr r3 | ||
759 | mtlr r4 | ||
760 | mtctr r5 | ||
761 | mtxer r6 | ||
762 | REST_GPR(0, r1) | ||
763 | REST_8GPRS(2, r1) | ||
764 | |||
765 | mfmsr r10 | ||
766 | clrrdi r10,r10,2 /* clear RI (LE is 0 already) */ | ||
767 | mtmsrd r10,1 | ||
768 | |||
769 | mtspr SRR1,r12 | ||
770 | mtspr SRR0,r11 | ||
771 | REST_4GPRS(10, r1) | ||
772 | ld r1,GPR1(r1) | ||
773 | rfid | ||
774 | b . /* prevent speculative execution */ | ||
775 | |||
776 | unrecov_fer: | ||
777 | bl .save_nvgprs | ||
778 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
779 | bl .unrecoverable_exception | ||
780 | b 1b | ||
781 | |||
782 | /* | ||
783 | * Here r13 points to the paca, r9 contains the saved CR, | ||
784 | * SRR0 and SRR1 are saved in r11 and r12, | ||
785 | * r9 - r13 are saved in paca->exgen. | ||
786 | */ | ||
787 | .align 7 | ||
788 | .globl data_access_common | ||
789 | data_access_common: | ||
790 | RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */ | ||
791 | mfspr r10,DAR | ||
792 | std r10,PACA_EXGEN+EX_DAR(r13) | ||
793 | mfspr r10,DSISR | ||
794 | stw r10,PACA_EXGEN+EX_DSISR(r13) | ||
795 | EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) | ||
796 | ld r3,PACA_EXGEN+EX_DAR(r13) | ||
797 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | ||
798 | li r5,0x300 | ||
799 | b .do_hash_page /* Try to handle as hpte fault */ | ||
800 | |||
801 | .align 7 | ||
802 | .globl instruction_access_common | ||
803 | instruction_access_common: | ||
804 | EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) | ||
805 | ld r3,_NIP(r1) | ||
806 | andis. r4,r12,0x5820 | ||
807 | li r5,0x400 | ||
808 | b .do_hash_page /* Try to handle as hpte fault */ | ||
809 | |||
810 | .align 7 | ||
811 | .globl hardware_interrupt_common | ||
812 | .globl hardware_interrupt_entry | ||
813 | hardware_interrupt_common: | ||
814 | EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) | ||
815 | hardware_interrupt_entry: | ||
816 | DISABLE_INTS | ||
817 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
818 | bl .do_IRQ | ||
819 | b .ret_from_except_lite | ||
820 | |||
821 | .align 7 | ||
822 | .globl alignment_common | ||
823 | alignment_common: | ||
824 | mfspr r10,DAR | ||
825 | std r10,PACA_EXGEN+EX_DAR(r13) | ||
826 | mfspr r10,DSISR | ||
827 | stw r10,PACA_EXGEN+EX_DSISR(r13) | ||
828 | EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) | ||
829 | ld r3,PACA_EXGEN+EX_DAR(r13) | ||
830 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | ||
831 | std r3,_DAR(r1) | ||
832 | std r4,_DSISR(r1) | ||
833 | bl .save_nvgprs | ||
834 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
835 | ENABLE_INTS | ||
836 | bl .alignment_exception | ||
837 | b .ret_from_except | ||
838 | |||
839 | .align 7 | ||
840 | .globl program_check_common | ||
841 | program_check_common: | ||
842 | EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) | ||
843 | bl .save_nvgprs | ||
844 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
845 | ENABLE_INTS | ||
846 | bl .program_check_exception | ||
847 | b .ret_from_except | ||
848 | |||
849 | .align 7 | ||
850 | .globl fp_unavailable_common | ||
851 | fp_unavailable_common: | ||
852 | EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) | ||
853 | bne .load_up_fpu /* if from user, just load it up */ | ||
854 | bl .save_nvgprs | ||
855 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
856 | ENABLE_INTS | ||
857 | bl .kernel_fp_unavailable_exception | ||
858 | BUG_OPCODE | ||
859 | |||
860 | /* | ||
861 | * load_up_fpu(unused, unused, tsk) | ||
862 | * Disable FP for the task which had the FPU previously, | ||
863 | * and save its floating-point registers in its thread_struct. | ||
864 | * Enables the FPU for use in the kernel on return. | ||
865 | * On SMP we know the fpu is free, since we give it up every | ||
866 | * switch (ie, no lazy save of the FP registers). | ||
867 | * On entry: r13 == 'current' && last_task_used_math != 'current' | ||
868 | */ | ||
869 | _STATIC(load_up_fpu) | ||
870 | mfmsr r5 /* grab the current MSR */ | ||
871 | ori r5,r5,MSR_FP | ||
872 | mtmsrd r5 /* enable use of fpu now */ | ||
873 | isync | ||
874 | /* | ||
875 | * For SMP, we don't do lazy FPU switching because it just gets too | ||
876 | * horrendously complex, especially when a task switches from one CPU | ||
877 | * to another. Instead we call giveup_fpu in switch_to. | ||
878 | * | ||
879 | */ | ||
880 | #ifndef CONFIG_SMP | ||
881 | ld r3,last_task_used_math@got(r2) | ||
882 | ld r4,0(r3) | ||
883 | cmpdi 0,r4,0 | ||
884 | beq 1f | ||
885 | /* Save FP state to last_task_used_math's THREAD struct */ | ||
886 | addi r4,r4,THREAD | ||
887 | SAVE_32FPRS(0, r4) | ||
888 | mffs fr0 | ||
889 | stfd fr0,THREAD_FPSCR(r4) | ||
890 | /* Disable FP for last_task_used_math */ | ||
891 | ld r5,PT_REGS(r4) | ||
892 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
893 | li r6,MSR_FP|MSR_FE0|MSR_FE1 | ||
894 | andc r4,r4,r6 | ||
895 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
896 | 1: | ||
897 | #endif /* CONFIG_SMP */ | ||
898 | /* enable use of FP after return */ | ||
899 | ld r4,PACACURRENT(r13) | ||
900 | addi r5,r4,THREAD /* Get THREAD */ | ||
901 | ld r4,THREAD_FPEXC_MODE(r5) | ||
902 | ori r12,r12,MSR_FP | ||
903 | or r12,r12,r4 | ||
904 | std r12,_MSR(r1) | ||
905 | lfd fr0,THREAD_FPSCR(r5) | ||
906 | mtfsf 0xff,fr0 | ||
907 | REST_32FPRS(0, r5) | ||
908 | #ifndef CONFIG_SMP | ||
909 | /* Update last_task_used_math to 'current' */ | ||
910 | subi r4,r5,THREAD /* Back to 'current' */ | ||
911 | std r4,0(r3) | ||
912 | #endif /* CONFIG_SMP */ | ||
913 | /* restore registers and return */ | ||
914 | b fast_exception_return | ||
915 | |||
916 | .align 7 | ||
917 | .globl altivec_unavailable_common | ||
918 | altivec_unavailable_common: | ||
919 | EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) | ||
920 | #ifdef CONFIG_ALTIVEC | ||
921 | BEGIN_FTR_SECTION | ||
922 | bne .load_up_altivec /* if from user, just load it up */ | ||
923 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
924 | #endif | ||
925 | bl .save_nvgprs | ||
926 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
927 | ENABLE_INTS | ||
928 | bl .altivec_unavailable_exception | ||
929 | b .ret_from_except | ||
930 | |||
931 | #ifdef CONFIG_ALTIVEC | ||
932 | /* | ||
933 | * load_up_altivec(unused, unused, tsk) | ||
934 | * Disable VMX for the task which had it previously, | ||
935 | * and save its vector registers in its thread_struct. | ||
936 | * Enables the VMX for use in the kernel on return. | ||
937 | * On SMP we know the VMX is free, since we give it up every | ||
938 | * switch (ie, no lazy save of the vector registers). | ||
939 | * On entry: r13 == 'current' && last_task_used_altivec != 'current' | ||
940 | */ | ||
941 | _STATIC(load_up_altivec) | ||
942 | mfmsr r5 /* grab the current MSR */ | ||
943 | oris r5,r5,MSR_VEC@h | ||
944 | mtmsrd r5 /* enable use of VMX now */ | ||
945 | isync | ||
946 | |||
947 | /* | ||
948 | * For SMP, we don't do lazy VMX switching because it just gets too | ||
949 | * horrendously complex, especially when a task switches from one CPU | ||
950 | * to another. Instead we call giveup_altvec in switch_to. | ||
951 | * VRSAVE isn't dealt with here, that is done in the normal context | ||
952 | * switch code. Note that we could rely on vrsave value to eventually | ||
953 | * avoid saving all of the VREGs here... | ||
954 | */ | ||
955 | #ifndef CONFIG_SMP | ||
956 | ld r3,last_task_used_altivec@got(r2) | ||
957 | ld r4,0(r3) | ||
958 | cmpdi 0,r4,0 | ||
959 | beq 1f | ||
960 | /* Save VMX state to last_task_used_altivec's THREAD struct */ | ||
961 | addi r4,r4,THREAD | ||
962 | SAVE_32VRS(0,r5,r4) | ||
963 | mfvscr vr0 | ||
964 | li r10,THREAD_VSCR | ||
965 | stvx vr0,r10,r4 | ||
966 | /* Disable VMX for last_task_used_altivec */ | ||
967 | ld r5,PT_REGS(r4) | ||
968 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
969 | lis r6,MSR_VEC@h | ||
970 | andc r4,r4,r6 | ||
971 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
972 | 1: | ||
973 | #endif /* CONFIG_SMP */ | ||
974 | /* Hack: if we get an altivec unavailable trap with VRSAVE | ||
975 | * set to all zeros, we assume this is a broken application | ||
976 | * that fails to set it properly, and thus we switch it to | ||
977 | * all 1's | ||
978 | */ | ||
979 | mfspr r4,SPRN_VRSAVE | ||
980 | cmpdi 0,r4,0 | ||
981 | bne+ 1f | ||
982 | li r4,-1 | ||
983 | mtspr SPRN_VRSAVE,r4 | ||
984 | 1: | ||
985 | /* enable use of VMX after return */ | ||
986 | ld r4,PACACURRENT(r13) | ||
987 | addi r5,r4,THREAD /* Get THREAD */ | ||
988 | oris r12,r12,MSR_VEC@h | ||
989 | std r12,_MSR(r1) | ||
990 | li r4,1 | ||
991 | li r10,THREAD_VSCR | ||
992 | stw r4,THREAD_USED_VR(r5) | ||
993 | lvx vr0,r10,r5 | ||
994 | mtvscr vr0 | ||
995 | REST_32VRS(0,r4,r5) | ||
996 | #ifndef CONFIG_SMP | ||
997 | /* Update last_task_used_math to 'current' */ | ||
998 | subi r4,r5,THREAD /* Back to 'current' */ | ||
999 | std r4,0(r3) | ||
1000 | #endif /* CONFIG_SMP */ | ||
1001 | /* restore registers and return */ | ||
1002 | b fast_exception_return | ||
1003 | #endif /* CONFIG_ALTIVEC */ | ||
1004 | |||
1005 | /* | ||
1006 | * Hash table stuff | ||
1007 | */ | ||
1008 | .align 7 | ||
1009 | _GLOBAL(do_hash_page) | ||
1010 | std r3,_DAR(r1) | ||
1011 | std r4,_DSISR(r1) | ||
1012 | |||
1013 | andis. r0,r4,0xa450 /* weird error? */ | ||
1014 | bne- .handle_page_fault /* if not, try to insert a HPTE */ | ||
1015 | BEGIN_FTR_SECTION | ||
1016 | andis. r0,r4,0x0020 /* Is it a segment table fault? */ | ||
1017 | bne- .do_ste_alloc /* If so handle it */ | ||
1018 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | ||
1019 | |||
1020 | /* | ||
1021 | * We need to set the _PAGE_USER bit if MSR_PR is set or if we are | ||
1022 | * accessing a userspace segment (even from the kernel). We assume | ||
1023 | * kernel addresses always have the high bit set. | ||
1024 | */ | ||
1025 | rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ | ||
1026 | rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ | ||
1027 | orc r0,r12,r0 /* MSR_PR | ~high_bit */ | ||
1028 | rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ | ||
1029 | ori r4,r4,1 /* add _PAGE_PRESENT */ | ||
1030 | rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ | ||
1031 | |||
1032 | /* | ||
1033 | * On iSeries, we soft-disable interrupts here, then | ||
1034 | * hard-enable interrupts so that the hash_page code can spin on | ||
1035 | * the hash_table_lock without problems on a shared processor. | ||
1036 | */ | ||
1037 | DISABLE_INTS | ||
1038 | |||
1039 | /* | ||
1040 | * r3 contains the faulting address | ||
1041 | * r4 contains the required access permissions | ||
1042 | * r5 contains the trap number | ||
1043 | * | ||
1044 | * at return r3 = 0 for success | ||
1045 | */ | ||
1046 | bl .hash_page /* build HPTE if possible */ | ||
1047 | cmpdi r3,0 /* see if hash_page succeeded */ | ||
1048 | |||
1049 | #ifdef DO_SOFT_DISABLE | ||
1050 | /* | ||
1051 | * If we had interrupts soft-enabled at the point where the | ||
1052 | * DSI/ISI occurred, and an interrupt came in during hash_page, | ||
1053 | * handle it now. | ||
1054 | * We jump to ret_from_except_lite rather than fast_exception_return | ||
1055 | * because ret_from_except_lite will check for and handle pending | ||
1056 | * interrupts if necessary. | ||
1057 | */ | ||
1058 | beq .ret_from_except_lite | ||
1059 | /* For a hash failure, we don't bother re-enabling interrupts */ | ||
1060 | ble- 12f | ||
1061 | |||
1062 | /* | ||
1063 | * hash_page couldn't handle it, set soft interrupt enable back | ||
1064 | * to what it was before the trap. Note that .local_irq_restore | ||
1065 | * handles any interrupts pending at this point. | ||
1066 | */ | ||
1067 | ld r3,SOFTE(r1) | ||
1068 | bl .local_irq_restore | ||
1069 | b 11f | ||
1070 | #else | ||
1071 | beq fast_exception_return /* Return from exception on success */ | ||
1072 | ble- 12f /* Failure return from hash_page */ | ||
1073 | |||
1074 | /* fall through */ | ||
1075 | #endif | ||
1076 | |||
1077 | /* Here we have a page fault that hash_page can't handle. */ | ||
1078 | _GLOBAL(handle_page_fault) | ||
1079 | ENABLE_INTS | ||
1080 | 11: ld r4,_DAR(r1) | ||
1081 | ld r5,_DSISR(r1) | ||
1082 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
1083 | bl .do_page_fault | ||
1084 | cmpdi r3,0 | ||
1085 | beq+ .ret_from_except_lite | ||
1086 | bl .save_nvgprs | ||
1087 | mr r5,r3 | ||
1088 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
1089 | lwz r4,_DAR(r1) | ||
1090 | bl .bad_page_fault | ||
1091 | b .ret_from_except | ||
1092 | |||
1093 | /* We have a page fault that hash_page could handle but HV refused | ||
1094 | * the PTE insertion | ||
1095 | */ | ||
1096 | 12: bl .save_nvgprs | ||
1097 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
1098 | lwz r4,_DAR(r1) | ||
1099 | bl .low_hash_fault | ||
1100 | b .ret_from_except | ||
1101 | |||
1102 | /* here we have a segment miss */ | ||
1103 | _GLOBAL(do_ste_alloc) | ||
1104 | bl .ste_allocate /* try to insert stab entry */ | ||
1105 | cmpdi r3,0 | ||
1106 | beq+ fast_exception_return | ||
1107 | b .handle_page_fault | ||
1108 | |||
1109 | /* | ||
1110 | * r13 points to the PACA, r9 contains the saved CR, | ||
1111 | * r11 and r12 contain the saved SRR0 and SRR1. | ||
1112 | * r9 - r13 are saved in paca->exslb. | ||
1113 | * We assume we aren't going to take any exceptions during this procedure. | ||
1114 | * We assume (DAR >> 60) == 0xc. | ||
1115 | */ | ||
1116 | .align 7 | ||
1117 | _GLOBAL(do_stab_bolted) | ||
1118 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | ||
1119 | std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ | ||
1120 | |||
1121 | /* Hash to the primary group */ | ||
1122 | ld r10,PACASTABVIRT(r13) | ||
1123 | mfspr r11,DAR | ||
1124 | srdi r11,r11,28 | ||
1125 | rldimi r10,r11,7,52 /* r10 = first ste of the group */ | ||
1126 | |||
1127 | /* Calculate VSID */ | ||
1128 | /* This is a kernel address, so protovsid = ESID */ | ||
1129 | ASM_VSID_SCRAMBLE(r11, r9) | ||
1130 | rldic r9,r11,12,16 /* r9 = vsid << 12 */ | ||
1131 | |||
1132 | /* Search the primary group for a free entry */ | ||
1133 | 1: ld r11,0(r10) /* Test valid bit of the current ste */ | ||
1134 | andi. r11,r11,0x80 | ||
1135 | beq 2f | ||
1136 | addi r10,r10,16 | ||
1137 | andi. r11,r10,0x70 | ||
1138 | bne 1b | ||
1139 | |||
1140 | /* Stick for only searching the primary group for now. */ | ||
1141 | /* At least for now, we use a very simple random castout scheme */ | ||
1142 | /* Use the TB as a random number ; OR in 1 to avoid entry 0 */ | ||
1143 | mftb r11 | ||
1144 | rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */ | ||
1145 | ori r11,r11,0x10 | ||
1146 | |||
1147 | /* r10 currently points to an ste one past the group of interest */ | ||
1148 | /* make it point to the randomly selected entry */ | ||
1149 | subi r10,r10,128 | ||
1150 | or r10,r10,r11 /* r10 is the entry to invalidate */ | ||
1151 | |||
1152 | isync /* mark the entry invalid */ | ||
1153 | ld r11,0(r10) | ||
1154 | rldicl r11,r11,56,1 /* clear the valid bit */ | ||
1155 | rotldi r11,r11,8 | ||
1156 | std r11,0(r10) | ||
1157 | sync | ||
1158 | |||
1159 | clrrdi r11,r11,28 /* Get the esid part of the ste */ | ||
1160 | slbie r11 | ||
1161 | |||
1162 | 2: std r9,8(r10) /* Store the vsid part of the ste */ | ||
1163 | eieio | ||
1164 | |||
1165 | mfspr r11,DAR /* Get the new esid */ | ||
1166 | clrrdi r11,r11,28 /* Permits a full 32b of ESID */ | ||
1167 | ori r11,r11,0x90 /* Turn on valid and kp */ | ||
1168 | std r11,0(r10) /* Put new entry back into the stab */ | ||
1169 | |||
1170 | sync | ||
1171 | |||
1172 | /* All done -- return from exception. */ | ||
1173 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | ||
1174 | ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */ | ||
1175 | |||
1176 | andi. r10,r12,MSR_RI | ||
1177 | beq- unrecov_slb | ||
1178 | |||
1179 | mtcrf 0x80,r9 /* restore CR */ | ||
1180 | |||
1181 | mfmsr r10 | ||
1182 | clrrdi r10,r10,2 | ||
1183 | mtmsrd r10,1 | ||
1184 | |||
1185 | mtspr SRR0,r11 | ||
1186 | mtspr SRR1,r12 | ||
1187 | ld r9,PACA_EXSLB+EX_R9(r13) | ||
1188 | ld r10,PACA_EXSLB+EX_R10(r13) | ||
1189 | ld r11,PACA_EXSLB+EX_R11(r13) | ||
1190 | ld r12,PACA_EXSLB+EX_R12(r13) | ||
1191 | ld r13,PACA_EXSLB+EX_R13(r13) | ||
1192 | rfid | ||
1193 | b . /* prevent speculative execution */ | ||
1194 | |||
1195 | /* | ||
1196 | * r13 points to the PACA, r9 contains the saved CR, | ||
1197 | * r11 and r12 contain the saved SRR0 and SRR1. | ||
1198 | * r3 has the faulting address | ||
1199 | * r9 - r13 are saved in paca->exslb. | ||
1200 | * r3 is saved in paca->slb_r3 | ||
1201 | * We assume we aren't going to take any exceptions during this procedure. | ||
1202 | */ | ||
1203 | _GLOBAL(do_slb_miss) | ||
1204 | mflr r10 | ||
1205 | |||
1206 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | ||
1207 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ | ||
1208 | |||
1209 | bl .slb_allocate /* handle it */ | ||
1210 | |||
1211 | /* All done -- return from exception. */ | ||
1212 | |||
1213 | ld r10,PACA_EXSLB+EX_LR(r13) | ||
1214 | ld r3,PACA_EXSLB+EX_R3(r13) | ||
1215 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | ||
1216 | #ifdef CONFIG_PPC_ISERIES | ||
1217 | ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */ | ||
1218 | #endif /* CONFIG_PPC_ISERIES */ | ||
1219 | |||
1220 | mtlr r10 | ||
1221 | |||
1222 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ | ||
1223 | beq- unrecov_slb | ||
1224 | |||
1225 | .machine push | ||
1226 | .machine "power4" | ||
1227 | mtcrf 0x80,r9 | ||
1228 | mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ | ||
1229 | .machine pop | ||
1230 | |||
1231 | #ifdef CONFIG_PPC_ISERIES | ||
1232 | mtspr SRR0,r11 | ||
1233 | mtspr SRR1,r12 | ||
1234 | #endif /* CONFIG_PPC_ISERIES */ | ||
1235 | ld r9,PACA_EXSLB+EX_R9(r13) | ||
1236 | ld r10,PACA_EXSLB+EX_R10(r13) | ||
1237 | ld r11,PACA_EXSLB+EX_R11(r13) | ||
1238 | ld r12,PACA_EXSLB+EX_R12(r13) | ||
1239 | ld r13,PACA_EXSLB+EX_R13(r13) | ||
1240 | rfid | ||
1241 | b . /* prevent speculative execution */ | ||
1242 | |||
1243 | unrecov_slb: | ||
1244 | EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) | ||
1245 | DISABLE_INTS | ||
1246 | bl .save_nvgprs | ||
1247 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
1248 | bl .unrecoverable_exception | ||
1249 | b 1b | ||
1250 | |||
1251 | /* | ||
1252 | * Space for CPU0's segment table. | ||
1253 | * | ||
1254 | * On iSeries, the hypervisor must fill in at least one entry before | ||
1255 | * we get control (with relocate on). The address is give to the hv | ||
1256 | * as a page number (see xLparMap in LparData.c), so this must be at a | ||
1257 | * fixed address (the linker can't compute (u64)&initial_stab >> | ||
1258 | * PAGE_SHIFT). | ||
1259 | */ | ||
1260 | . = STAB0_PHYS_ADDR /* 0x6000 */ | ||
1261 | .globl initial_stab | ||
1262 | initial_stab: | ||
1263 | .space 4096 | ||
1264 | |||
1265 | /* | ||
1266 | * Data area reserved for FWNMI option. | ||
1267 | * This address (0x7000) is fixed by the RPA. | ||
1268 | */ | ||
1269 | .= 0x7000 | ||
1270 | .globl fwnmi_data_area | ||
1271 | fwnmi_data_area: | ||
1272 | |||
1273 | /* iSeries does not use the FWNMI stuff, so it is safe to put | ||
1274 | * this here, even if we later allow kernels that will boot on | ||
1275 | * both pSeries and iSeries */ | ||
1276 | #ifdef CONFIG_PPC_ISERIES | ||
1277 | . = LPARMAP_PHYS | ||
1278 | #include "lparmap.s" | ||
1279 | /* | ||
1280 | * This ".text" is here for old compilers that generate a trailing | ||
1281 | * .note section when compiling .c files to .s | ||
1282 | */ | ||
1283 | .text | ||
1284 | #endif /* CONFIG_PPC_ISERIES */ | ||
1285 | |||
1286 | . = 0x8000 | ||
1287 | |||
1288 | /* | ||
1289 | * On pSeries, secondary processors spin in the following code. | ||
1290 | * At entry, r3 = this processor's number (physical cpu id) | ||
1291 | */ | ||
1292 | _GLOBAL(pSeries_secondary_smp_init) | ||
1293 | mr r24,r3 | ||
1294 | |||
1295 | /* turn on 64-bit mode */ | ||
1296 | bl .enable_64b_mode | ||
1297 | isync | ||
1298 | |||
1299 | /* Copy some CPU settings from CPU 0 */ | ||
1300 | bl .__restore_cpu_setup | ||
1301 | |||
1302 | /* Set up a paca value for this processor. Since we have the | ||
1303 | * physical cpu id in r24, we need to search the pacas to find | ||
1304 | * which logical id maps to our physical one. | ||
1305 | */ | ||
1306 | LOADADDR(r13, paca) /* Get base vaddr of paca array */ | ||
1307 | li r5,0 /* logical cpu id */ | ||
1308 | 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ | ||
1309 | cmpw r6,r24 /* Compare to our id */ | ||
1310 | beq 2f | ||
1311 | addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */ | ||
1312 | addi r5,r5,1 | ||
1313 | cmpwi r5,NR_CPUS | ||
1314 | blt 1b | ||
1315 | |||
1316 | mr r3,r24 /* not found, copy phys to r3 */ | ||
1317 | b .kexec_wait /* next kernel might do better */ | ||
1318 | |||
1319 | 2: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ | ||
1320 | /* From now on, r24 is expected to be logical cpuid */ | ||
1321 | mr r24,r5 | ||
1322 | 3: HMT_LOW | ||
1323 | lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ | ||
1324 | /* start. */ | ||
1325 | sync | ||
1326 | |||
1327 | /* Create a temp kernel stack for use before relocation is on. */ | ||
1328 | ld r1,PACAEMERGSP(r13) | ||
1329 | subi r1,r1,STACK_FRAME_OVERHEAD | ||
1330 | |||
1331 | cmpwi 0,r23,0 | ||
1332 | #ifdef CONFIG_SMP | ||
1333 | bne .__secondary_start | ||
1334 | #endif | ||
1335 | b 3b /* Loop until told to go */ | ||
1336 | |||
1337 | #ifdef CONFIG_PPC_ISERIES | ||
1338 | _STATIC(__start_initialization_iSeries) | ||
1339 | /* Clear out the BSS */ | ||
1340 | LOADADDR(r11,__bss_stop) | ||
1341 | LOADADDR(r8,__bss_start) | ||
1342 | sub r11,r11,r8 /* bss size */ | ||
1343 | addi r11,r11,7 /* round up to an even double word */ | ||
1344 | rldicl. r11,r11,61,3 /* shift right by 3 */ | ||
1345 | beq 4f | ||
1346 | addi r8,r8,-8 | ||
1347 | li r0,0 | ||
1348 | mtctr r11 /* zero this many doublewords */ | ||
1349 | 3: stdu r0,8(r8) | ||
1350 | bdnz 3b | ||
1351 | 4: | ||
1352 | LOADADDR(r1,init_thread_union) | ||
1353 | addi r1,r1,THREAD_SIZE | ||
1354 | li r0,0 | ||
1355 | stdu r0,-STACK_FRAME_OVERHEAD(r1) | ||
1356 | |||
1357 | LOADADDR(r3,cpu_specs) | ||
1358 | LOADADDR(r4,cur_cpu_spec) | ||
1359 | li r5,0 | ||
1360 | bl .identify_cpu | ||
1361 | |||
1362 | LOADADDR(r2,__toc_start) | ||
1363 | addi r2,r2,0x4000 | ||
1364 | addi r2,r2,0x4000 | ||
1365 | |||
1366 | bl .iSeries_early_setup | ||
1367 | |||
1368 | /* relocation is on at this point */ | ||
1369 | |||
1370 | b .start_here_common | ||
1371 | #endif /* CONFIG_PPC_ISERIES */ | ||
1372 | |||
1373 | #ifdef CONFIG_PPC_MULTIPLATFORM | ||
1374 | |||
1375 | _STATIC(__mmu_off) | ||
1376 | mfmsr r3 | ||
1377 | andi. r0,r3,MSR_IR|MSR_DR | ||
1378 | beqlr | ||
1379 | andc r3,r3,r0 | ||
1380 | mtspr SPRN_SRR0,r4 | ||
1381 | mtspr SPRN_SRR1,r3 | ||
1382 | sync | ||
1383 | rfid | ||
1384 | b . /* prevent speculative execution */ | ||
1385 | |||
1386 | |||
1387 | /* | ||
1388 | * Here is our main kernel entry point. We support currently 2 kind of entries | ||
1389 | * depending on the value of r5. | ||
1390 | * | ||
1391 | * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content | ||
1392 | * in r3...r7 | ||
1393 | * | ||
1394 | * r5 == NULL -> kexec style entry. r3 is a physical pointer to the | ||
1395 | * DT block, r4 is a physical pointer to the kernel itself | ||
1396 | * | ||
1397 | */ | ||
1398 | _GLOBAL(__start_initialization_multiplatform) | ||
1399 | /* | ||
1400 | * Are we booted from a PROM Of-type client-interface ? | ||
1401 | */ | ||
1402 | cmpldi cr0,r5,0 | ||
1403 | bne .__boot_from_prom /* yes -> prom */ | ||
1404 | |||
1405 | /* Save parameters */ | ||
1406 | mr r31,r3 | ||
1407 | mr r30,r4 | ||
1408 | |||
1409 | /* Make sure we are running in 64 bits mode */ | ||
1410 | bl .enable_64b_mode | ||
1411 | |||
1412 | /* Setup some critical 970 SPRs before switching MMU off */ | ||
1413 | bl .__970_cpu_preinit | ||
1414 | |||
1415 | /* cpu # */ | ||
1416 | li r24,0 | ||
1417 | |||
1418 | /* Switch off MMU if not already */ | ||
1419 | LOADADDR(r4, .__after_prom_start - KERNELBASE) | ||
1420 | add r4,r4,r30 | ||
1421 | bl .__mmu_off | ||
1422 | b .__after_prom_start | ||
1423 | |||
1424 | _STATIC(__boot_from_prom) | ||
1425 | /* Save parameters */ | ||
1426 | mr r31,r3 | ||
1427 | mr r30,r4 | ||
1428 | mr r29,r5 | ||
1429 | mr r28,r6 | ||
1430 | mr r27,r7 | ||
1431 | |||
1432 | /* Make sure we are running in 64 bits mode */ | ||
1433 | bl .enable_64b_mode | ||
1434 | |||
1435 | /* put a relocation offset into r3 */ | ||
1436 | bl .reloc_offset | ||
1437 | |||
1438 | LOADADDR(r2,__toc_start) | ||
1439 | addi r2,r2,0x4000 | ||
1440 | addi r2,r2,0x4000 | ||
1441 | |||
1442 | /* Relocate the TOC from a virt addr to a real addr */ | ||
1443 | sub r2,r2,r3 | ||
1444 | |||
1445 | /* Restore parameters */ | ||
1446 | mr r3,r31 | ||
1447 | mr r4,r30 | ||
1448 | mr r5,r29 | ||
1449 | mr r6,r28 | ||
1450 | mr r7,r27 | ||
1451 | |||
1452 | /* Do all of the interaction with OF client interface */ | ||
1453 | bl .prom_init | ||
1454 | /* We never return */ | ||
1455 | trap | ||
1456 | |||
1457 | /* | ||
1458 | * At this point, r3 contains the physical address we are running at, | ||
1459 | * returned by prom_init() | ||
1460 | */ | ||
1461 | _STATIC(__after_prom_start) | ||
1462 | |||
1463 | /* | ||
1464 | * We need to run with __start at physical address 0. | ||
1465 | * This will leave some code in the first 256B of | ||
1466 | * real memory, which are reserved for software use. | ||
1467 | * The remainder of the first page is loaded with the fixed | ||
1468 | * interrupt vectors. The next two pages are filled with | ||
1469 | * unknown exception placeholders. | ||
1470 | * | ||
1471 | * Note: This process overwrites the OF exception vectors. | ||
1472 | * r26 == relocation offset | ||
1473 | * r27 == KERNELBASE | ||
1474 | */ | ||
1475 | bl .reloc_offset | ||
1476 | mr r26,r3 | ||
1477 | SET_REG_TO_CONST(r27,KERNELBASE) | ||
1478 | |||
1479 | li r3,0 /* target addr */ | ||
1480 | |||
1481 | // XXX FIXME: Use phys returned by OF (r30) | ||
1482 | sub r4,r27,r26 /* source addr */ | ||
1483 | /* current address of _start */ | ||
1484 | /* i.e. where we are running */ | ||
1485 | /* the source addr */ | ||
1486 | |||
1487 | LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */ | ||
1488 | sub r5,r5,r27 | ||
1489 | |||
1490 | li r6,0x100 /* Start offset, the first 0x100 */ | ||
1491 | /* bytes were copied earlier. */ | ||
1492 | |||
1493 | bl .copy_and_flush /* copy the first n bytes */ | ||
1494 | /* this includes the code being */ | ||
1495 | /* executed here. */ | ||
1496 | |||
1497 | LOADADDR(r0, 4f) /* Jump to the copy of this code */ | ||
1498 | mtctr r0 /* that we just made/relocated */ | ||
1499 | bctr | ||
1500 | |||
1501 | 4: LOADADDR(r5,klimit) | ||
1502 | sub r5,r5,r26 | ||
1503 | ld r5,0(r5) /* get the value of klimit */ | ||
1504 | sub r5,r5,r27 | ||
1505 | bl .copy_and_flush /* copy the rest */ | ||
1506 | b .start_here_multiplatform | ||
1507 | |||
1508 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | ||
1509 | |||
1510 | /* | ||
1511 | * Copy routine used to copy the kernel to start at physical address 0 | ||
1512 | * and flush and invalidate the caches as needed. | ||
1513 | * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset | ||
1514 | * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. | ||
1515 | * | ||
1516 | * Note: this routine *only* clobbers r0, r6 and lr | ||
1517 | */ | ||
1518 | _GLOBAL(copy_and_flush) | ||
1519 | addi r5,r5,-8 | ||
1520 | addi r6,r6,-8 | ||
1521 | 4: li r0,16 /* Use the least common */ | ||
1522 | /* denominator cache line */ | ||
1523 | /* size. This results in */ | ||
1524 | /* extra cache line flushes */ | ||
1525 | /* but operation is correct. */ | ||
1526 | /* Can't get cache line size */ | ||
1527 | /* from NACA as it is being */ | ||
1528 | /* moved too. */ | ||
1529 | |||
1530 | mtctr r0 /* put # words/line in ctr */ | ||
1531 | 3: addi r6,r6,8 /* copy a cache line */ | ||
1532 | ldx r0,r6,r4 | ||
1533 | stdx r0,r6,r3 | ||
1534 | bdnz 3b | ||
1535 | dcbst r6,r3 /* write it to memory */ | ||
1536 | sync | ||
1537 | icbi r6,r3 /* flush the icache line */ | ||
1538 | cmpld 0,r6,r5 | ||
1539 | blt 4b | ||
1540 | sync | ||
1541 | addi r5,r5,8 | ||
1542 | addi r6,r6,8 | ||
1543 | blr | ||
1544 | |||
1545 | .align 8 | ||
1546 | copy_to_here: | ||
1547 | |||
1548 | #ifdef CONFIG_SMP | ||
1549 | #ifdef CONFIG_PPC_PMAC | ||
1550 | /* | ||
1551 | * On PowerMac, secondary processors starts from the reset vector, which | ||
1552 | * is temporarily turned into a call to one of the functions below. | ||
1553 | */ | ||
1554 | .section ".text"; | ||
1555 | .align 2 ; | ||
1556 | |||
1557 | .globl pmac_secondary_start_1 | ||
1558 | pmac_secondary_start_1: | ||
1559 | li r24, 1 | ||
1560 | b .pmac_secondary_start | ||
1561 | |||
1562 | .globl pmac_secondary_start_2 | ||
1563 | pmac_secondary_start_2: | ||
1564 | li r24, 2 | ||
1565 | b .pmac_secondary_start | ||
1566 | |||
1567 | .globl pmac_secondary_start_3 | ||
1568 | pmac_secondary_start_3: | ||
1569 | li r24, 3 | ||
1570 | b .pmac_secondary_start | ||
1571 | |||
1572 | _GLOBAL(pmac_secondary_start) | ||
1573 | /* turn on 64-bit mode */ | ||
1574 | bl .enable_64b_mode | ||
1575 | isync | ||
1576 | |||
1577 | /* Copy some CPU settings from CPU 0 */ | ||
1578 | bl .__restore_cpu_setup | ||
1579 | |||
1580 | /* pSeries do that early though I don't think we really need it */ | ||
1581 | mfmsr r3 | ||
1582 | ori r3,r3,MSR_RI | ||
1583 | mtmsrd r3 /* RI on */ | ||
1584 | |||
1585 | /* Set up a paca value for this processor. */ | ||
1586 | LOADADDR(r4, paca) /* Get base vaddr of paca array */ | ||
1587 | mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ | ||
1588 | add r13,r13,r4 /* for this processor. */ | ||
1589 | mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ | ||
1590 | |||
1591 | /* Create a temp kernel stack for use before relocation is on. */ | ||
1592 | ld r1,PACAEMERGSP(r13) | ||
1593 | subi r1,r1,STACK_FRAME_OVERHEAD | ||
1594 | |||
1595 | b .__secondary_start | ||
1596 | |||
1597 | #endif /* CONFIG_PPC_PMAC */ | ||
1598 | |||
1599 | /* | ||
1600 | * This function is called after the master CPU has released the | ||
1601 | * secondary processors. The execution environment is relocation off. | ||
1602 | * The paca for this processor has the following fields initialized at | ||
1603 | * this point: | ||
1604 | * 1. Processor number | ||
1605 | * 2. Segment table pointer (virtual address) | ||
1606 | * On entry the following are set: | ||
1607 | * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries | ||
1608 | * r24 = cpu# (in Linux terms) | ||
1609 | * r13 = paca virtual address | ||
1610 | * SPRG3 = paca virtual address | ||
1611 | */ | ||
1612 | _GLOBAL(__secondary_start) | ||
1613 | |||
1614 | HMT_MEDIUM /* Set thread priority to MEDIUM */ | ||
1615 | |||
1616 | ld r2,PACATOC(r13) | ||
1617 | li r6,0 | ||
1618 | stb r6,PACAPROCENABLED(r13) | ||
1619 | |||
1620 | #ifndef CONFIG_PPC_ISERIES | ||
1621 | /* Initialize the page table pointer register. */ | ||
1622 | LOADADDR(r6,_SDR1) | ||
1623 | ld r6,0(r6) /* get the value of _SDR1 */ | ||
1624 | mtspr SDR1,r6 /* set the htab location */ | ||
1625 | #endif | ||
1626 | /* Initialize the first segment table (or SLB) entry */ | ||
1627 | ld r3,PACASTABVIRT(r13) /* get addr of segment table */ | ||
1628 | bl .stab_initialize | ||
1629 | |||
1630 | /* Initialize the kernel stack. Just a repeat for iSeries. */ | ||
1631 | LOADADDR(r3,current_set) | ||
1632 | sldi r28,r24,3 /* get current_set[cpu#] */ | ||
1633 | ldx r1,r3,r28 | ||
1634 | addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD | ||
1635 | std r1,PACAKSAVE(r13) | ||
1636 | |||
1637 | ld r3,PACASTABREAL(r13) /* get raddr of segment table */ | ||
1638 | ori r4,r3,1 /* turn on valid bit */ | ||
1639 | |||
1640 | #ifdef CONFIG_PPC_ISERIES | ||
1641 | li r0,-1 /* hypervisor call */ | ||
1642 | li r3,1 | ||
1643 | sldi r3,r3,63 /* 0x8000000000000000 */ | ||
1644 | ori r3,r3,4 /* 0x8000000000000004 */ | ||
1645 | sc /* HvCall_setASR */ | ||
1646 | #else | ||
1647 | /* set the ASR */ | ||
1648 | ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */ | ||
1649 | ld r3,0(r3) | ||
1650 | lwz r3,PLATFORM(r3) /* r3 = platform flags */ | ||
1651 | andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */ | ||
1652 | beq 98f /* branch if result is 0 */ | ||
1653 | mfspr r3,PVR | ||
1654 | srwi r3,r3,16 | ||
1655 | cmpwi r3,0x37 /* SStar */ | ||
1656 | beq 97f | ||
1657 | cmpwi r3,0x36 /* IStar */ | ||
1658 | beq 97f | ||
1659 | cmpwi r3,0x34 /* Pulsar */ | ||
1660 | bne 98f | ||
1661 | 97: li r3,H_SET_ASR /* hcall = H_SET_ASR */ | ||
1662 | HVSC /* Invoking hcall */ | ||
1663 | b 99f | ||
1664 | 98: /* !(rpa hypervisor) || !(star) */ | ||
1665 | mtasr r4 /* set the stab location */ | ||
1666 | 99: | ||
1667 | #endif | ||
1668 | li r7,0 | ||
1669 | mtlr r7 | ||
1670 | |||
1671 | /* enable MMU and jump to start_secondary */ | ||
1672 | LOADADDR(r3,.start_secondary_prolog) | ||
1673 | SET_REG_TO_CONST(r4, MSR_KERNEL) | ||
1674 | #ifdef DO_SOFT_DISABLE | ||
1675 | ori r4,r4,MSR_EE | ||
1676 | #endif | ||
1677 | mtspr SRR0,r3 | ||
1678 | mtspr SRR1,r4 | ||
1679 | rfid | ||
1680 | b . /* prevent speculative execution */ | ||
1681 | |||
1682 | /* | ||
1683 | * Running with relocation on at this point. All we want to do is | ||
1684 | * zero the stack back-chain pointer before going into C code. | ||
1685 | */ | ||
1686 | _GLOBAL(start_secondary_prolog) | ||
1687 | li r3,0 | ||
1688 | std r3,0(r1) /* Zero the stack frame pointer */ | ||
1689 | bl .start_secondary | ||
1690 | #endif | ||
1691 | |||
1692 | /* | ||
1693 | * This subroutine clobbers r11 and r12 | ||
1694 | */ | ||
1695 | _GLOBAL(enable_64b_mode) | ||
1696 | mfmsr r11 /* grab the current MSR */ | ||
1697 | li r12,1 | ||
1698 | rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) | ||
1699 | or r11,r11,r12 | ||
1700 | li r12,1 | ||
1701 | rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG) | ||
1702 | or r11,r11,r12 | ||
1703 | mtmsrd r11 | ||
1704 | isync | ||
1705 | blr | ||
1706 | |||
1707 | #ifdef CONFIG_PPC_MULTIPLATFORM | ||
1708 | /* | ||
1709 | * This is where the main kernel code starts. | ||
1710 | */ | ||
1711 | _STATIC(start_here_multiplatform) | ||
1712 | /* get a new offset, now that the kernel has moved. */ | ||
1713 | bl .reloc_offset | ||
1714 | mr r26,r3 | ||
1715 | |||
1716 | /* Clear out the BSS. It may have been done in prom_init, | ||
1717 | * already but that's irrelevant since prom_init will soon | ||
1718 | * be detached from the kernel completely. Besides, we need | ||
1719 | * to clear it now for kexec-style entry. | ||
1720 | */ | ||
1721 | LOADADDR(r11,__bss_stop) | ||
1722 | LOADADDR(r8,__bss_start) | ||
1723 | sub r11,r11,r8 /* bss size */ | ||
1724 | addi r11,r11,7 /* round up to an even double word */ | ||
1725 | rldicl. r11,r11,61,3 /* shift right by 3 */ | ||
1726 | beq 4f | ||
1727 | addi r8,r8,-8 | ||
1728 | li r0,0 | ||
1729 | mtctr r11 /* zero this many doublewords */ | ||
1730 | 3: stdu r0,8(r8) | ||
1731 | bdnz 3b | ||
1732 | 4: | ||
1733 | |||
1734 | mfmsr r6 | ||
1735 | ori r6,r6,MSR_RI | ||
1736 | mtmsrd r6 /* RI on */ | ||
1737 | |||
1738 | #ifdef CONFIG_HMT | ||
1739 | /* Start up the second thread on cpu 0 */ | ||
1740 | mfspr r3,PVR | ||
1741 | srwi r3,r3,16 | ||
1742 | cmpwi r3,0x34 /* Pulsar */ | ||
1743 | beq 90f | ||
1744 | cmpwi r3,0x36 /* Icestar */ | ||
1745 | beq 90f | ||
1746 | cmpwi r3,0x37 /* SStar */ | ||
1747 | beq 90f | ||
1748 | b 91f /* HMT not supported */ | ||
1749 | 90: li r3,0 | ||
1750 | bl .hmt_start_secondary | ||
1751 | 91: | ||
1752 | #endif | ||
1753 | |||
1754 | /* The following gets the stack and TOC set up with the regs */ | ||
1755 | /* pointing to the real addr of the kernel stack. This is */ | ||
1756 | /* all done to support the C function call below which sets */ | ||
1757 | /* up the htab. This is done because we have relocated the */ | ||
1758 | /* kernel but are still running in real mode. */ | ||
1759 | |||
1760 | LOADADDR(r3,init_thread_union) | ||
1761 | sub r3,r3,r26 | ||
1762 | |||
1763 | /* set up a stack pointer (physical address) */ | ||
1764 | addi r1,r3,THREAD_SIZE | ||
1765 | li r0,0 | ||
1766 | stdu r0,-STACK_FRAME_OVERHEAD(r1) | ||
1767 | |||
1768 | /* set up the TOC (physical address) */ | ||
1769 | LOADADDR(r2,__toc_start) | ||
1770 | addi r2,r2,0x4000 | ||
1771 | addi r2,r2,0x4000 | ||
1772 | sub r2,r2,r26 | ||
1773 | |||
1774 | LOADADDR(r3,cpu_specs) | ||
1775 | sub r3,r3,r26 | ||
1776 | LOADADDR(r4,cur_cpu_spec) | ||
1777 | sub r4,r4,r26 | ||
1778 | mr r5,r26 | ||
1779 | bl .identify_cpu | ||
1780 | |||
1781 | /* Save some low level config HIDs of CPU0 to be copied to | ||
1782 | * other CPUs later on, or used for suspend/resume | ||
1783 | */ | ||
1784 | bl .__save_cpu_setup | ||
1785 | sync | ||
1786 | |||
1787 | /* Setup a valid physical PACA pointer in SPRG3 for early_setup | ||
1788 | * note that boot_cpuid can always be 0 nowadays since there is | ||
1789 | * nowhere it can be initialized differently before we reach this | ||
1790 | * code | ||
1791 | */ | ||
1792 | LOADADDR(r27, boot_cpuid) | ||
1793 | sub r27,r27,r26 | ||
1794 | lwz r27,0(r27) | ||
1795 | |||
1796 | LOADADDR(r24, paca) /* Get base vaddr of paca array */ | ||
1797 | mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */ | ||
1798 | add r13,r13,r24 /* for this processor. */ | ||
1799 | sub r13,r13,r26 /* convert to physical addr */ | ||
1800 | mtspr SPRG3,r13 /* PPPBBB: Temp... -Peter */ | ||
1801 | |||
1802 | /* Do very early kernel initializations, including initial hash table, | ||
1803 | * stab and slb setup before we turn on relocation. */ | ||
1804 | |||
1805 | /* Restore parameters passed from prom_init/kexec */ | ||
1806 | mr r3,r31 | ||
1807 | bl .early_setup | ||
1808 | |||
1809 | /* set the ASR */ | ||
1810 | ld r3,PACASTABREAL(r13) | ||
1811 | ori r4,r3,1 /* turn on valid bit */ | ||
1812 | ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */ | ||
1813 | ld r3,0(r3) | ||
1814 | lwz r3,PLATFORM(r3) /* r3 = platform flags */ | ||
1815 | andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */ | ||
1816 | beq 98f /* branch if result is 0 */ | ||
1817 | mfspr r3,PVR | ||
1818 | srwi r3,r3,16 | ||
1819 | cmpwi r3,0x37 /* SStar */ | ||
1820 | beq 97f | ||
1821 | cmpwi r3,0x36 /* IStar */ | ||
1822 | beq 97f | ||
1823 | cmpwi r3,0x34 /* Pulsar */ | ||
1824 | bne 98f | ||
1825 | 97: li r3,H_SET_ASR /* hcall = H_SET_ASR */ | ||
1826 | HVSC /* Invoking hcall */ | ||
1827 | b 99f | ||
1828 | 98: /* !(rpa hypervisor) || !(star) */ | ||
1829 | mtasr r4 /* set the stab location */ | ||
1830 | 99: | ||
1831 | /* Set SDR1 (hash table pointer) */ | ||
1832 | ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */ | ||
1833 | ld r3,0(r3) | ||
1834 | lwz r3,PLATFORM(r3) /* r3 = platform flags */ | ||
1835 | /* Test if bit 0 is set (LPAR bit) */ | ||
1836 | andi. r3,r3,PLATFORM_LPAR | ||
1837 | bne 98f /* branch if result is !0 */ | ||
1838 | LOADADDR(r6,_SDR1) /* Only if NOT LPAR */ | ||
1839 | sub r6,r6,r26 | ||
1840 | ld r6,0(r6) /* get the value of _SDR1 */ | ||
1841 | mtspr SDR1,r6 /* set the htab location */ | ||
1842 | 98: | ||
1843 | LOADADDR(r3,.start_here_common) | ||
1844 | SET_REG_TO_CONST(r4, MSR_KERNEL) | ||
1845 | mtspr SRR0,r3 | ||
1846 | mtspr SRR1,r4 | ||
1847 | rfid | ||
1848 | b . /* prevent speculative execution */ | ||
1849 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | ||
1850 | |||
1851 | /* This is where all platforms converge execution */ | ||
1852 | _STATIC(start_here_common) | ||
1853 | /* relocation is on at this point */ | ||
1854 | |||
1855 | /* The following code sets up the SP and TOC now that we are */ | ||
1856 | /* running with translation enabled. */ | ||
1857 | |||
1858 | LOADADDR(r3,init_thread_union) | ||
1859 | |||
1860 | /* set up the stack */ | ||
1861 | addi r1,r3,THREAD_SIZE | ||
1862 | li r0,0 | ||
1863 | stdu r0,-STACK_FRAME_OVERHEAD(r1) | ||
1864 | |||
1865 | /* Apply the CPUs-specific fixups (nop out sections not relevant | ||
1866 | * to this CPU | ||
1867 | */ | ||
1868 | li r3,0 | ||
1869 | bl .do_cpu_ftr_fixups | ||
1870 | |||
1871 | LOADADDR(r26, boot_cpuid) | ||
1872 | lwz r26,0(r26) | ||
1873 | |||
1874 | LOADADDR(r24, paca) /* Get base vaddr of paca array */ | ||
1875 | mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */ | ||
1876 | add r13,r13,r24 /* for this processor. */ | ||
1877 | mtspr SPRG3,r13 | ||
1878 | |||
1879 | /* ptr to current */ | ||
1880 | LOADADDR(r4,init_task) | ||
1881 | std r4,PACACURRENT(r13) | ||
1882 | |||
1883 | /* Load the TOC */ | ||
1884 | ld r2,PACATOC(r13) | ||
1885 | std r1,PACAKSAVE(r13) | ||
1886 | |||
1887 | bl .setup_system | ||
1888 | |||
1889 | /* Load up the kernel context */ | ||
1890 | 5: | ||
1891 | #ifdef DO_SOFT_DISABLE | ||
1892 | li r5,0 | ||
1893 | stb r5,PACAPROCENABLED(r13) /* Soft Disabled */ | ||
1894 | mfmsr r5 | ||
1895 | ori r5,r5,MSR_EE /* Hard Enabled */ | ||
1896 | mtmsrd r5 | ||
1897 | #endif | ||
1898 | |||
1899 | bl .start_kernel | ||
1900 | |||
1901 | _GLOBAL(hmt_init) | ||
1902 | #ifdef CONFIG_HMT | ||
1903 | LOADADDR(r5, hmt_thread_data) | ||
1904 | mfspr r7,PVR | ||
1905 | srwi r7,r7,16 | ||
1906 | cmpwi r7,0x34 /* Pulsar */ | ||
1907 | beq 90f | ||
1908 | cmpwi r7,0x36 /* Icestar */ | ||
1909 | beq 91f | ||
1910 | cmpwi r7,0x37 /* SStar */ | ||
1911 | beq 91f | ||
1912 | b 101f | ||
1913 | 90: mfspr r6,PIR | ||
1914 | andi. r6,r6,0x1f | ||
1915 | b 92f | ||
1916 | 91: mfspr r6,PIR | ||
1917 | andi. r6,r6,0x3ff | ||
1918 | 92: sldi r4,r24,3 | ||
1919 | stwx r6,r5,r4 | ||
1920 | bl .hmt_start_secondary | ||
1921 | b 101f | ||
1922 | |||
1923 | __hmt_secondary_hold: | ||
1924 | LOADADDR(r5, hmt_thread_data) | ||
1925 | clrldi r5,r5,4 | ||
1926 | li r7,0 | ||
1927 | mfspr r6,PIR | ||
1928 | mfspr r8,PVR | ||
1929 | srwi r8,r8,16 | ||
1930 | cmpwi r8,0x34 | ||
1931 | bne 93f | ||
1932 | andi. r6,r6,0x1f | ||
1933 | b 103f | ||
1934 | 93: andi. r6,r6,0x3f | ||
1935 | |||
1936 | 103: lwzx r8,r5,r7 | ||
1937 | cmpw r8,r6 | ||
1938 | beq 104f | ||
1939 | addi r7,r7,8 | ||
1940 | b 103b | ||
1941 | |||
1942 | 104: addi r7,r7,4 | ||
1943 | lwzx r9,r5,r7 | ||
1944 | mr r24,r9 | ||
1945 | 101: | ||
1946 | #endif | ||
1947 | mr r3,r24 | ||
1948 | b .pSeries_secondary_smp_init | ||
1949 | |||
1950 | #ifdef CONFIG_HMT | ||
1951 | _GLOBAL(hmt_start_secondary) | ||
1952 | LOADADDR(r4,__hmt_secondary_hold) | ||
1953 | clrldi r4,r4,4 | ||
1954 | mtspr NIADORM, r4 | ||
1955 | mfspr r4, MSRDORM | ||
1956 | li r5, -65 | ||
1957 | and r4, r4, r5 | ||
1958 | mtspr MSRDORM, r4 | ||
1959 | lis r4,0xffef | ||
1960 | ori r4,r4,0x7403 | ||
1961 | mtspr TSC, r4 | ||
1962 | li r4,0x1f4 | ||
1963 | mtspr TST, r4 | ||
1964 | mfspr r4, HID0 | ||
1965 | ori r4, r4, 0x1 | ||
1966 | mtspr HID0, r4 | ||
1967 | mfspr r4, SPRN_CTRLF | ||
1968 | oris r4, r4, 0x40 | ||
1969 | mtspr SPRN_CTRLT, r4 | ||
1970 | blr | ||
1971 | #endif | ||
1972 | |||
1973 | #if defined(CONFIG_KEXEC) || (defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES)) | ||
1974 | _GLOBAL(smp_release_cpus) | ||
1975 | /* All secondary cpus are spinning on a common | ||
1976 | * spinloop, release them all now so they can start | ||
1977 | * to spin on their individual paca spinloops. | ||
1978 | * For non SMP kernels, the secondary cpus never | ||
1979 | * get out of the common spinloop. | ||
1980 | */ | ||
1981 | li r3,1 | ||
1982 | LOADADDR(r5,__secondary_hold_spinloop) | ||
1983 | std r3,0(r5) | ||
1984 | sync | ||
1985 | blr | ||
1986 | #endif /* CONFIG_SMP && !CONFIG_PPC_ISERIES */ | ||
1987 | |||
1988 | |||
1989 | /* | ||
1990 | * We put a few things here that have to be page-aligned. | ||
1991 | * This stuff goes at the beginning of the bss, which is page-aligned. | ||
1992 | */ | ||
1993 | .section ".bss" | ||
1994 | |||
1995 | .align PAGE_SHIFT | ||
1996 | |||
1997 | .globl empty_zero_page | ||
1998 | empty_zero_page: | ||
1999 | .space PAGE_SIZE | ||
2000 | |||
2001 | .globl swapper_pg_dir | ||
2002 | swapper_pg_dir: | ||
2003 | .space PAGE_SIZE | ||
2004 | |||
2005 | /* | ||
2006 | * This space gets a copy of optional info passed to us by the bootstrap | ||
2007 | * Used to pass parameters into the kernel like root=/dev/sda1, etc. | ||
2008 | */ | ||
2009 | .globl cmd_line | ||
2010 | cmd_line: | ||
2011 | .space COMMAND_LINE_SIZE | ||
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S new file mode 100644 index 000000000000..cb1a3a54a026 --- /dev/null +++ b/arch/powerpc/kernel/head_8xx.S | |||
@@ -0,0 +1,860 @@ | |||
1 | /* | ||
2 | * arch/ppc/kernel/except_8xx.S | ||
3 | * | ||
4 | * PowerPC version | ||
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
6 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP | ||
7 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | ||
8 | * Low-level exception handlers and MMU support | ||
9 | * rewritten by Paul Mackerras. | ||
10 | * Copyright (C) 1996 Paul Mackerras. | ||
11 | * MPC8xx modifications by Dan Malek | ||
12 | * Copyright (C) 1997 Dan Malek (dmalek@jlc.net). | ||
13 | * | ||
14 | * This file contains low-level support and setup for PowerPC 8xx | ||
15 | * embedded processors, including trap and interrupt dispatch. | ||
16 | * | ||
17 | * This program is free software; you can redistribute it and/or | ||
18 | * modify it under the terms of the GNU General Public License | ||
19 | * as published by the Free Software Foundation; either version | ||
20 | * 2 of the License, or (at your option) any later version. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/config.h> | ||
25 | #include <asm/processor.h> | ||
26 | #include <asm/page.h> | ||
27 | #include <asm/mmu.h> | ||
28 | #include <asm/cache.h> | ||
29 | #include <asm/pgtable.h> | ||
30 | #include <asm/cputable.h> | ||
31 | #include <asm/thread_info.h> | ||
32 | #include <asm/ppc_asm.h> | ||
33 | #include <asm/asm-offsets.h> | ||
34 | |||
35 | /* Macro to make the code more readable. */ | ||
36 | #ifdef CONFIG_8xx_CPU6 | ||
37 | #define DO_8xx_CPU6(val, reg) \ | ||
38 | li reg, val; \ | ||
39 | stw reg, 12(r0); \ | ||
40 | lwz reg, 12(r0); | ||
41 | #else | ||
42 | #define DO_8xx_CPU6(val, reg) | ||
43 | #endif | ||
44 | .text | ||
45 | .globl _stext | ||
46 | _stext: | ||
47 | .text | ||
48 | .globl _start | ||
49 | _start: | ||
50 | |||
51 | /* MPC8xx | ||
52 | * This port was done on an MBX board with an 860. Right now I only | ||
53 | * support an ELF compressed (zImage) boot from EPPC-Bug because the | ||
54 | * code there loads up some registers before calling us: | ||
55 | * r3: ptr to board info data | ||
56 | * r4: initrd_start or if no initrd then 0 | ||
57 | * r5: initrd_end - unused if r4 is 0 | ||
58 | * r6: Start of command line string | ||
59 | * r7: End of command line string | ||
60 | * | ||
61 | * I decided to use conditional compilation instead of checking PVR and | ||
62 | * adding more processor specific branches around code I don't need. | ||
63 | * Since this is an embedded processor, I also appreciate any memory | ||
64 | * savings I can get. | ||
65 | * | ||
66 | * The MPC8xx does not have any BATs, but it supports large page sizes. | ||
67 | * We first initialize the MMU to support 8M byte pages, then load one | ||
68 | * entry into each of the instruction and data TLBs to map the first | ||
69 | * 8M 1:1. I also mapped an additional I/O space 1:1 so we can get to | ||
70 | * the "internal" processor registers before MMU_init is called. | ||
71 | * | ||
72 | * The TLB code currently contains a major hack. Since I use the condition | ||
73 | * code register, I have to save and restore it. I am out of registers, so | ||
74 | * I just store it in memory location 0 (the TLB handlers are not reentrant). | ||
75 | * To avoid making any decisions, I need to use the "segment" valid bit | ||
76 | * in the first level table, but that would require many changes to the | ||
77 | * Linux page directory/table functions that I don't want to do right now. | ||
78 | * | ||
79 | * I used to use SPRG2 for a temporary register in the TLB handler, but it | ||
80 | * has since been put to other uses. I now use a hack to save a register | ||
81 | * and the CCR at memory location 0.....Someday I'll fix this..... | ||
82 | * -- Dan | ||
83 | */ | ||
84 | .globl __start | ||
85 | __start: | ||
86 | mr r31,r3 /* save parameters */ | ||
87 | mr r30,r4 | ||
88 | mr r29,r5 | ||
89 | mr r28,r6 | ||
90 | mr r27,r7 | ||
91 | |||
92 | /* We have to turn on the MMU right away so we get cache modes | ||
93 | * set correctly. | ||
94 | */ | ||
95 | bl initial_mmu | ||
96 | |||
97 | /* We now have the lower 8 Meg mapped into TLB entries, and the caches | ||
98 | * ready to work. | ||
99 | */ | ||
100 | |||
101 | turn_on_mmu: | ||
102 | mfmsr r0 | ||
103 | ori r0,r0,MSR_DR|MSR_IR | ||
104 | mtspr SPRN_SRR1,r0 | ||
105 | lis r0,start_here@h | ||
106 | ori r0,r0,start_here@l | ||
107 | mtspr SPRN_SRR0,r0 | ||
108 | SYNC | ||
109 | rfi /* enables MMU */ | ||
110 | |||
111 | /* | ||
112 | * Exception entry code. This code runs with address translation | ||
113 | * turned off, i.e. using physical addresses. | ||
114 | * We assume sprg3 has the physical address of the current | ||
115 | * task's thread_struct. | ||
116 | */ | ||
117 | #define EXCEPTION_PROLOG \ | ||
118 | mtspr SPRN_SPRG0,r10; \ | ||
119 | mtspr SPRN_SPRG1,r11; \ | ||
120 | mfcr r10; \ | ||
121 | EXCEPTION_PROLOG_1; \ | ||
122 | EXCEPTION_PROLOG_2 | ||
123 | |||
124 | #define EXCEPTION_PROLOG_1 \ | ||
125 | mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \ | ||
126 | andi. r11,r11,MSR_PR; \ | ||
127 | tophys(r11,r1); /* use tophys(r1) if kernel */ \ | ||
128 | beq 1f; \ | ||
129 | mfspr r11,SPRN_SPRG3; \ | ||
130 | lwz r11,THREAD_INFO-THREAD(r11); \ | ||
131 | addi r11,r11,THREAD_SIZE; \ | ||
132 | tophys(r11,r11); \ | ||
133 | 1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */ | ||
134 | |||
135 | |||
136 | #define EXCEPTION_PROLOG_2 \ | ||
137 | CLR_TOP32(r11); \ | ||
138 | stw r10,_CCR(r11); /* save registers */ \ | ||
139 | stw r12,GPR12(r11); \ | ||
140 | stw r9,GPR9(r11); \ | ||
141 | mfspr r10,SPRN_SPRG0; \ | ||
142 | stw r10,GPR10(r11); \ | ||
143 | mfspr r12,SPRN_SPRG1; \ | ||
144 | stw r12,GPR11(r11); \ | ||
145 | mflr r10; \ | ||
146 | stw r10,_LINK(r11); \ | ||
147 | mfspr r12,SPRN_SRR0; \ | ||
148 | mfspr r9,SPRN_SRR1; \ | ||
149 | stw r1,GPR1(r11); \ | ||
150 | stw r1,0(r11); \ | ||
151 | tovirt(r1,r11); /* set new kernel sp */ \ | ||
152 | li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \ | ||
153 | MTMSRD(r10); /* (except for mach check in rtas) */ \ | ||
154 | stw r0,GPR0(r11); \ | ||
155 | SAVE_4GPRS(3, r11); \ | ||
156 | SAVE_2GPRS(7, r11) | ||
157 | |||
158 | /* | ||
159 | * Note: code which follows this uses cr0.eq (set if from kernel), | ||
160 | * r11, r12 (SRR0), and r9 (SRR1). | ||
161 | * | ||
162 | * Note2: once we have set r1 we are in a position to take exceptions | ||
163 | * again, and we could thus set MSR:RI at that point. | ||
164 | */ | ||
165 | |||
166 | /* | ||
167 | * Exception vectors. | ||
168 | */ | ||
169 | #define EXCEPTION(n, label, hdlr, xfer) \ | ||
170 | . = n; \ | ||
171 | label: \ | ||
172 | EXCEPTION_PROLOG; \ | ||
173 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
174 | xfer(n, hdlr) | ||
175 | |||
176 | #define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \ | ||
177 | li r10,trap; \ | ||
178 | stw r10,TRAP(r11); \ | ||
179 | li r10,MSR_KERNEL; \ | ||
180 | copyee(r10, r9); \ | ||
181 | bl tfer; \ | ||
182 | i##n: \ | ||
183 | .long hdlr; \ | ||
184 | .long ret | ||
185 | |||
186 | #define COPY_EE(d, s) rlwimi d,s,0,16,16 | ||
187 | #define NOCOPY(d, s) | ||
188 | |||
189 | #define EXC_XFER_STD(n, hdlr) \ | ||
190 | EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \ | ||
191 | ret_from_except_full) | ||
192 | |||
193 | #define EXC_XFER_LITE(n, hdlr) \ | ||
194 | EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \ | ||
195 | ret_from_except) | ||
196 | |||
197 | #define EXC_XFER_EE(n, hdlr) \ | ||
198 | EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \ | ||
199 | ret_from_except_full) | ||
200 | |||
201 | #define EXC_XFER_EE_LITE(n, hdlr) \ | ||
202 | EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \ | ||
203 | ret_from_except) | ||
204 | |||
205 | /* System reset */ | ||
206 | EXCEPTION(0x100, Reset, UnknownException, EXC_XFER_STD) | ||
207 | |||
208 | /* Machine check */ | ||
209 | . = 0x200 | ||
210 | MachineCheck: | ||
211 | EXCEPTION_PROLOG | ||
212 | mfspr r4,SPRN_DAR | ||
213 | stw r4,_DAR(r11) | ||
214 | mfspr r5,SPRN_DSISR | ||
215 | stw r5,_DSISR(r11) | ||
216 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
217 | EXC_XFER_STD(0x200, MachineCheckException) | ||
218 | |||
219 | /* Data access exception. | ||
220 | * This is "never generated" by the MPC8xx. We jump to it for other | ||
221 | * translation errors. | ||
222 | */ | ||
223 | . = 0x300 | ||
224 | DataAccess: | ||
225 | EXCEPTION_PROLOG | ||
226 | mfspr r10,SPRN_DSISR | ||
227 | stw r10,_DSISR(r11) | ||
228 | mr r5,r10 | ||
229 | mfspr r4,SPRN_DAR | ||
230 | EXC_XFER_EE_LITE(0x300, handle_page_fault) | ||
231 | |||
232 | /* Instruction access exception. | ||
233 | * This is "never generated" by the MPC8xx. We jump to it for other | ||
234 | * translation errors. | ||
235 | */ | ||
236 | . = 0x400 | ||
237 | InstructionAccess: | ||
238 | EXCEPTION_PROLOG | ||
239 | mr r4,r12 | ||
240 | mr r5,r9 | ||
241 | EXC_XFER_EE_LITE(0x400, handle_page_fault) | ||
242 | |||
243 | /* External interrupt */ | ||
244 | EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) | ||
245 | |||
246 | /* Alignment exception */ | ||
247 | . = 0x600 | ||
248 | Alignment: | ||
249 | EXCEPTION_PROLOG | ||
250 | mfspr r4,SPRN_DAR | ||
251 | stw r4,_DAR(r11) | ||
252 | mfspr r5,SPRN_DSISR | ||
253 | stw r5,_DSISR(r11) | ||
254 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
255 | EXC_XFER_EE(0x600, AlignmentException) | ||
256 | |||
257 | /* Program check exception */ | ||
258 | EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_STD) | ||
259 | |||
260 | /* No FPU on MPC8xx. This exception is not supposed to happen. | ||
261 | */ | ||
262 | EXCEPTION(0x800, FPUnavailable, UnknownException, EXC_XFER_STD) | ||
263 | |||
264 | /* Decrementer */ | ||
265 | EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE) | ||
266 | |||
267 | EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE) | ||
268 | EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE) | ||
269 | |||
270 | /* System call */ | ||
271 | . = 0xc00 | ||
272 | SystemCall: | ||
273 | EXCEPTION_PROLOG | ||
274 | EXC_XFER_EE_LITE(0xc00, DoSyscall) | ||
275 | |||
276 | /* Single step - not used on 601 */ | ||
277 | EXCEPTION(0xd00, SingleStep, SingleStepException, EXC_XFER_STD) | ||
278 | EXCEPTION(0xe00, Trap_0e, UnknownException, EXC_XFER_EE) | ||
279 | EXCEPTION(0xf00, Trap_0f, UnknownException, EXC_XFER_EE) | ||
280 | |||
281 | /* On the MPC8xx, this is a software emulation interrupt. It occurs | ||
282 | * for all unimplemented and illegal instructions. | ||
283 | */ | ||
284 | EXCEPTION(0x1000, SoftEmu, SoftwareEmulation, EXC_XFER_STD) | ||
285 | |||
286 | . = 0x1100 | ||
287 | /* | ||
288 | * For the MPC8xx, this is a software tablewalk to load the instruction | ||
289 | * TLB. It is modelled after the example in the Motorola manual. The task | ||
290 | * switch loads the M_TWB register with the pointer to the first level table. | ||
291 | * If we discover there is no second level table (value is zero) or if there | ||
292 | * is an invalid pte, we load that into the TLB, which causes another fault | ||
293 | * into the TLB Error interrupt where we can handle such problems. | ||
294 | * We have to use the MD_xxx registers for the tablewalk because the | ||
295 | * equivalent MI_xxx registers only perform the attribute functions. | ||
296 | */ | ||
297 | InstructionTLBMiss: | ||
298 | #ifdef CONFIG_8xx_CPU6 | ||
299 | stw r3, 8(r0) | ||
300 | #endif | ||
301 | DO_8xx_CPU6(0x3f80, r3) | ||
302 | mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ | ||
303 | mfcr r10 | ||
304 | stw r10, 0(r0) | ||
305 | stw r11, 4(r0) | ||
306 | mfspr r10, SPRN_SRR0 /* Get effective address of fault */ | ||
307 | DO_8xx_CPU6(0x3780, r3) | ||
308 | mtspr SPRN_MD_EPN, r10 /* Have to use MD_EPN for walk, MI_EPN can't */ | ||
309 | mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ | ||
310 | |||
311 | /* If we are faulting a kernel address, we have to use the | ||
312 | * kernel page tables. | ||
313 | */ | ||
314 | andi. r11, r10, 0x0800 /* Address >= 0x80000000 */ | ||
315 | beq 3f | ||
316 | lis r11, swapper_pg_dir@h | ||
317 | ori r11, r11, swapper_pg_dir@l | ||
318 | rlwimi r10, r11, 0, 2, 19 | ||
319 | 3: | ||
320 | lwz r11, 0(r10) /* Get the level 1 entry */ | ||
321 | rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ | ||
322 | beq 2f /* If zero, don't try to find a pte */ | ||
323 | |||
324 | /* We have a pte table, so load the MI_TWC with the attributes | ||
325 | * for this "segment." | ||
326 | */ | ||
327 | ori r11,r11,1 /* Set valid bit */ | ||
328 | DO_8xx_CPU6(0x2b80, r3) | ||
329 | mtspr SPRN_MI_TWC, r11 /* Set segment attributes */ | ||
330 | DO_8xx_CPU6(0x3b80, r3) | ||
331 | mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ | ||
332 | mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ | ||
333 | lwz r10, 0(r11) /* Get the pte */ | ||
334 | |||
335 | ori r10, r10, _PAGE_ACCESSED | ||
336 | stw r10, 0(r11) | ||
337 | |||
338 | /* The Linux PTE won't go exactly into the MMU TLB. | ||
339 | * Software indicator bits 21, 22 and 28 must be clear. | ||
340 | * Software indicator bits 24, 25, 26, and 27 must be | ||
341 | * set. All other Linux PTE bits control the behavior | ||
342 | * of the MMU. | ||
343 | */ | ||
344 | 2: li r11, 0x00f0 | ||
345 | rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ | ||
346 | DO_8xx_CPU6(0x2d80, r3) | ||
347 | mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ | ||
348 | |||
349 | mfspr r10, SPRN_M_TW /* Restore registers */ | ||
350 | lwz r11, 0(r0) | ||
351 | mtcr r11 | ||
352 | lwz r11, 4(r0) | ||
353 | #ifdef CONFIG_8xx_CPU6 | ||
354 | lwz r3, 8(r0) | ||
355 | #endif | ||
356 | rfi | ||
357 | |||
358 | . = 0x1200 | ||
359 | DataStoreTLBMiss: | ||
360 | #ifdef CONFIG_8xx_CPU6 | ||
361 | stw r3, 8(r0) | ||
362 | #endif | ||
363 | DO_8xx_CPU6(0x3f80, r3) | ||
364 | mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ | ||
365 | mfcr r10 | ||
366 | stw r10, 0(r0) | ||
367 | stw r11, 4(r0) | ||
368 | mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ | ||
369 | |||
370 | /* If we are faulting a kernel address, we have to use the | ||
371 | * kernel page tables. | ||
372 | */ | ||
373 | andi. r11, r10, 0x0800 | ||
374 | beq 3f | ||
375 | lis r11, swapper_pg_dir@h | ||
376 | ori r11, r11, swapper_pg_dir@l | ||
377 | rlwimi r10, r11, 0, 2, 19 | ||
378 | 3: | ||
379 | lwz r11, 0(r10) /* Get the level 1 entry */ | ||
380 | rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ | ||
381 | beq 2f /* If zero, don't try to find a pte */ | ||
382 | |||
383 | /* We have a pte table, so load fetch the pte from the table. | ||
384 | */ | ||
385 | ori r11, r11, 1 /* Set valid bit in physical L2 page */ | ||
386 | DO_8xx_CPU6(0x3b80, r3) | ||
387 | mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ | ||
388 | mfspr r10, SPRN_MD_TWC /* ....and get the pte address */ | ||
389 | lwz r10, 0(r10) /* Get the pte */ | ||
390 | |||
391 | /* Insert the Guarded flag into the TWC from the Linux PTE. | ||
392 | * It is bit 27 of both the Linux PTE and the TWC (at least | ||
393 | * I got that right :-). It will be better when we can put | ||
394 | * this into the Linux pgd/pmd and load it in the operation | ||
395 | * above. | ||
396 | */ | ||
397 | rlwimi r11, r10, 0, 27, 27 | ||
398 | DO_8xx_CPU6(0x3b80, r3) | ||
399 | mtspr SPRN_MD_TWC, r11 | ||
400 | |||
401 | mfspr r11, SPRN_MD_TWC /* get the pte address again */ | ||
402 | ori r10, r10, _PAGE_ACCESSED | ||
403 | stw r10, 0(r11) | ||
404 | |||
405 | /* The Linux PTE won't go exactly into the MMU TLB. | ||
406 | * Software indicator bits 21, 22 and 28 must be clear. | ||
407 | * Software indicator bits 24, 25, 26, and 27 must be | ||
408 | * set. All other Linux PTE bits control the behavior | ||
409 | * of the MMU. | ||
410 | */ | ||
411 | 2: li r11, 0x00f0 | ||
412 | rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ | ||
413 | DO_8xx_CPU6(0x3d80, r3) | ||
414 | mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ | ||
415 | |||
416 | mfspr r10, SPRN_M_TW /* Restore registers */ | ||
417 | lwz r11, 0(r0) | ||
418 | mtcr r11 | ||
419 | lwz r11, 4(r0) | ||
420 | #ifdef CONFIG_8xx_CPU6 | ||
421 | lwz r3, 8(r0) | ||
422 | #endif | ||
423 | rfi | ||
424 | |||
425 | /* This is an instruction TLB error on the MPC8xx. This could be due | ||
426 | * to many reasons, such as executing guarded memory or illegal instruction | ||
427 | * addresses. There is nothing to do but handle a big time error fault. | ||
428 | */ | ||
429 | . = 0x1300 | ||
430 | InstructionTLBError: | ||
431 | b InstructionAccess | ||
432 | |||
433 | /* This is the data TLB error on the MPC8xx. This could be due to | ||
434 | * many reasons, including a dirty update to a pte. We can catch that | ||
435 | * one here, but anything else is an error. First, we track down the | ||
436 | * Linux pte. If it is valid, write access is allowed, but the | ||
437 | * page dirty bit is not set, we will set it and reload the TLB. For | ||
438 | * any other case, we bail out to a higher level function that can | ||
439 | * handle it. | ||
440 | */ | ||
441 | . = 0x1400 | ||
442 | DataTLBError: | ||
443 | #ifdef CONFIG_8xx_CPU6 | ||
444 | stw r3, 8(r0) | ||
445 | #endif | ||
446 | DO_8xx_CPU6(0x3f80, r3) | ||
447 | mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ | ||
448 | mfcr r10 | ||
449 | stw r10, 0(r0) | ||
450 | stw r11, 4(r0) | ||
451 | |||
452 | /* First, make sure this was a store operation. | ||
453 | */ | ||
454 | mfspr r10, SPRN_DSISR | ||
455 | andis. r11, r10, 0x0200 /* If set, indicates store op */ | ||
456 | beq 2f | ||
457 | |||
458 | /* The EA of a data TLB miss is automatically stored in the MD_EPN | ||
459 | * register. The EA of a data TLB error is automatically stored in | ||
460 | * the DAR, but not the MD_EPN register. We must copy the 20 most | ||
461 | * significant bits of the EA from the DAR to MD_EPN before we | ||
462 | * start walking the page tables. We also need to copy the CASID | ||
463 | * value from the M_CASID register. | ||
464 | * Addendum: The EA of a data TLB error is _supposed_ to be stored | ||
465 | * in DAR, but it seems that this doesn't happen in some cases, such | ||
466 | * as when the error is due to a dcbi instruction to a page with a | ||
467 | * TLB that doesn't have the changed bit set. In such cases, there | ||
468 | * does not appear to be any way to recover the EA of the error | ||
469 | * since it is neither in DAR nor MD_EPN. As a workaround, the | ||
470 | * _PAGE_HWWRITE bit is set for all kernel data pages when the PTEs | ||
471 | * are initialized in mapin_ram(). This will avoid the problem, | ||
472 | * assuming we only use the dcbi instruction on kernel addresses. | ||
473 | */ | ||
474 | mfspr r10, SPRN_DAR | ||
475 | rlwinm r11, r10, 0, 0, 19 | ||
476 | ori r11, r11, MD_EVALID | ||
477 | mfspr r10, SPRN_M_CASID | ||
478 | rlwimi r11, r10, 0, 28, 31 | ||
479 | DO_8xx_CPU6(0x3780, r3) | ||
480 | mtspr SPRN_MD_EPN, r11 | ||
481 | |||
482 | mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ | ||
483 | |||
484 | /* If we are faulting a kernel address, we have to use the | ||
485 | * kernel page tables. | ||
486 | */ | ||
487 | andi. r11, r10, 0x0800 | ||
488 | beq 3f | ||
489 | lis r11, swapper_pg_dir@h | ||
490 | ori r11, r11, swapper_pg_dir@l | ||
491 | rlwimi r10, r11, 0, 2, 19 | ||
492 | 3: | ||
493 | lwz r11, 0(r10) /* Get the level 1 entry */ | ||
494 | rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ | ||
495 | beq 2f /* If zero, bail */ | ||
496 | |||
497 | /* We have a pte table, so fetch the pte from the table. | ||
498 | */ | ||
499 | ori r11, r11, 1 /* Set valid bit in physical L2 page */ | ||
500 | DO_8xx_CPU6(0x3b80, r3) | ||
501 | mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ | ||
502 | mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ | ||
503 | lwz r10, 0(r11) /* Get the pte */ | ||
504 | |||
505 | andi. r11, r10, _PAGE_RW /* Is it writeable? */ | ||
506 | beq 2f /* Bail out if not */ | ||
507 | |||
508 | /* Update 'changed', among others. | ||
509 | */ | ||
510 | ori r10, r10, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE | ||
511 | mfspr r11, SPRN_MD_TWC /* Get pte address again */ | ||
512 | stw r10, 0(r11) /* and update pte in table */ | ||
513 | |||
514 | /* The Linux PTE won't go exactly into the MMU TLB. | ||
515 | * Software indicator bits 21, 22 and 28 must be clear. | ||
516 | * Software indicator bits 24, 25, 26, and 27 must be | ||
517 | * set. All other Linux PTE bits control the behavior | ||
518 | * of the MMU. | ||
519 | */ | ||
520 | li r11, 0x00f0 | ||
521 | rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ | ||
522 | DO_8xx_CPU6(0x3d80, r3) | ||
523 | mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ | ||
524 | |||
525 | mfspr r10, SPRN_M_TW /* Restore registers */ | ||
526 | lwz r11, 0(r0) | ||
527 | mtcr r11 | ||
528 | lwz r11, 4(r0) | ||
529 | #ifdef CONFIG_8xx_CPU6 | ||
530 | lwz r3, 8(r0) | ||
531 | #endif | ||
532 | rfi | ||
533 | 2: | ||
534 | mfspr r10, SPRN_M_TW /* Restore registers */ | ||
535 | lwz r11, 0(r0) | ||
536 | mtcr r11 | ||
537 | lwz r11, 4(r0) | ||
538 | #ifdef CONFIG_8xx_CPU6 | ||
539 | lwz r3, 8(r0) | ||
540 | #endif | ||
541 | b DataAccess | ||
542 | |||
543 | EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE) | ||
544 | EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE) | ||
545 | EXCEPTION(0x1700, Trap_17, UnknownException, EXC_XFER_EE) | ||
546 | EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE) | ||
547 | EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE) | ||
548 | EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE) | ||
549 | EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE) | ||
550 | |||
551 | /* On the MPC8xx, these next four traps are used for development | ||
552 | * support of breakpoints and such. Someday I will get around to | ||
553 | * using them. | ||
554 | */ | ||
555 | EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE) | ||
556 | EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE) | ||
557 | EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE) | ||
558 | EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE) | ||
559 | |||
560 | . = 0x2000 | ||
561 | |||
562 | .globl giveup_fpu | ||
563 | giveup_fpu: | ||
564 | blr | ||
565 | |||
566 | /* | ||
567 | * This is where the main kernel code starts. | ||
568 | */ | ||
569 | start_here: | ||
570 | /* ptr to current */ | ||
571 | lis r2,init_task@h | ||
572 | ori r2,r2,init_task@l | ||
573 | |||
574 | /* ptr to phys current thread */ | ||
575 | tophys(r4,r2) | ||
576 | addi r4,r4,THREAD /* init task's THREAD */ | ||
577 | mtspr SPRN_SPRG3,r4 | ||
578 | li r3,0 | ||
579 | mtspr SPRN_SPRG2,r3 /* 0 => r1 has kernel sp */ | ||
580 | |||
581 | /* stack */ | ||
582 | lis r1,init_thread_union@ha | ||
583 | addi r1,r1,init_thread_union@l | ||
584 | li r0,0 | ||
585 | stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) | ||
586 | |||
587 | bl early_init /* We have to do this with MMU on */ | ||
588 | |||
589 | /* | ||
590 | * Decide what sort of machine this is and initialize the MMU. | ||
591 | */ | ||
592 | mr r3,r31 | ||
593 | mr r4,r30 | ||
594 | mr r5,r29 | ||
595 | mr r6,r28 | ||
596 | mr r7,r27 | ||
597 | bl machine_init | ||
598 | bl MMU_init | ||
599 | |||
600 | /* | ||
601 | * Go back to running unmapped so we can load up new values | ||
602 | * and change to using our exception vectors. | ||
603 | * On the 8xx, all we have to do is invalidate the TLB to clear | ||
604 | * the old 8M byte TLB mappings and load the page table base register. | ||
605 | */ | ||
606 | /* The right way to do this would be to track it down through | ||
607 | * init's THREAD like the context switch code does, but this is | ||
608 | * easier......until someone changes init's static structures. | ||
609 | */ | ||
610 | lis r6, swapper_pg_dir@h | ||
611 | ori r6, r6, swapper_pg_dir@l | ||
612 | tophys(r6,r6) | ||
613 | #ifdef CONFIG_8xx_CPU6 | ||
614 | lis r4, cpu6_errata_word@h | ||
615 | ori r4, r4, cpu6_errata_word@l | ||
616 | li r3, 0x3980 | ||
617 | stw r3, 12(r4) | ||
618 | lwz r3, 12(r4) | ||
619 | #endif | ||
620 | mtspr SPRN_M_TWB, r6 | ||
621 | lis r4,2f@h | ||
622 | ori r4,r4,2f@l | ||
623 | tophys(r4,r4) | ||
624 | li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR) | ||
625 | mtspr SPRN_SRR0,r4 | ||
626 | mtspr SPRN_SRR1,r3 | ||
627 | rfi | ||
628 | /* Load up the kernel context */ | ||
629 | 2: | ||
630 | SYNC /* Force all PTE updates to finish */ | ||
631 | tlbia /* Clear all TLB entries */ | ||
632 | sync /* wait for tlbia/tlbie to finish */ | ||
633 | TLBSYNC /* ... on all CPUs */ | ||
634 | |||
635 | /* set up the PTE pointers for the Abatron bdiGDB. | ||
636 | */ | ||
637 | tovirt(r6,r6) | ||
638 | lis r5, abatron_pteptrs@h | ||
639 | ori r5, r5, abatron_pteptrs@l | ||
640 | stw r5, 0xf0(r0) /* Must match your Abatron config file */ | ||
641 | tophys(r5,r5) | ||
642 | stw r6, 0(r5) | ||
643 | |||
644 | /* Now turn on the MMU for real! */ | ||
645 | li r4,MSR_KERNEL | ||
646 | lis r3,start_kernel@h | ||
647 | ori r3,r3,start_kernel@l | ||
648 | mtspr SPRN_SRR0,r3 | ||
649 | mtspr SPRN_SRR1,r4 | ||
650 | rfi /* enable MMU and jump to start_kernel */ | ||
651 | |||
652 | /* Set up the initial MMU state so we can do the first level of | ||
653 | * kernel initialization. This maps the first 8 MBytes of memory 1:1 | ||
654 | * virtual to physical. Also, set the cache mode since that is defined | ||
655 | * by TLB entries and perform any additional mapping (like of the IMMR). | ||
656 | * If configured to pin some TLBs, we pin the first 8 Mbytes of kernel, | ||
657 | * 24 Mbytes of data, and the 8M IMMR space. Anything not covered by | ||
658 | * these mappings is mapped by page tables. | ||
659 | */ | ||
660 | initial_mmu: | ||
661 | tlbia /* Invalidate all TLB entries */ | ||
662 | #ifdef CONFIG_PIN_TLB | ||
663 | lis r8, MI_RSV4I@h | ||
664 | ori r8, r8, 0x1c00 | ||
665 | #else | ||
666 | li r8, 0 | ||
667 | #endif | ||
668 | mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */ | ||
669 | |||
670 | #ifdef CONFIG_PIN_TLB | ||
671 | lis r10, (MD_RSV4I | MD_RESETVAL)@h | ||
672 | ori r10, r10, 0x1c00 | ||
673 | mr r8, r10 | ||
674 | #else | ||
675 | lis r10, MD_RESETVAL@h | ||
676 | #endif | ||
677 | #ifndef CONFIG_8xx_COPYBACK | ||
678 | oris r10, r10, MD_WTDEF@h | ||
679 | #endif | ||
680 | mtspr SPRN_MD_CTR, r10 /* Set data TLB control */ | ||
681 | |||
682 | /* Now map the lower 8 Meg into the TLBs. For this quick hack, | ||
683 | * we can load the instruction and data TLB registers with the | ||
684 | * same values. | ||
685 | */ | ||
686 | lis r8, KERNELBASE@h /* Create vaddr for TLB */ | ||
687 | ori r8, r8, MI_EVALID /* Mark it valid */ | ||
688 | mtspr SPRN_MI_EPN, r8 | ||
689 | mtspr SPRN_MD_EPN, r8 | ||
690 | li r8, MI_PS8MEG /* Set 8M byte page */ | ||
691 | ori r8, r8, MI_SVALID /* Make it valid */ | ||
692 | mtspr SPRN_MI_TWC, r8 | ||
693 | mtspr SPRN_MD_TWC, r8 | ||
694 | li r8, MI_BOOTINIT /* Create RPN for address 0 */ | ||
695 | mtspr SPRN_MI_RPN, r8 /* Store TLB entry */ | ||
696 | mtspr SPRN_MD_RPN, r8 | ||
697 | lis r8, MI_Kp@h /* Set the protection mode */ | ||
698 | mtspr SPRN_MI_AP, r8 | ||
699 | mtspr SPRN_MD_AP, r8 | ||
700 | |||
701 | /* Map another 8 MByte at the IMMR to get the processor | ||
702 | * internal registers (among other things). | ||
703 | */ | ||
704 | #ifdef CONFIG_PIN_TLB | ||
705 | addi r10, r10, 0x0100 | ||
706 | mtspr SPRN_MD_CTR, r10 | ||
707 | #endif | ||
708 | mfspr r9, 638 /* Get current IMMR */ | ||
709 | andis. r9, r9, 0xff80 /* Get 8Mbyte boundary */ | ||
710 | |||
711 | mr r8, r9 /* Create vaddr for TLB */ | ||
712 | ori r8, r8, MD_EVALID /* Mark it valid */ | ||
713 | mtspr SPRN_MD_EPN, r8 | ||
714 | li r8, MD_PS8MEG /* Set 8M byte page */ | ||
715 | ori r8, r8, MD_SVALID /* Make it valid */ | ||
716 | mtspr SPRN_MD_TWC, r8 | ||
717 | mr r8, r9 /* Create paddr for TLB */ | ||
718 | ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */ | ||
719 | mtspr SPRN_MD_RPN, r8 | ||
720 | |||
721 | #ifdef CONFIG_PIN_TLB | ||
722 | /* Map two more 8M kernel data pages. | ||
723 | */ | ||
724 | addi r10, r10, 0x0100 | ||
725 | mtspr SPRN_MD_CTR, r10 | ||
726 | |||
727 | lis r8, KERNELBASE@h /* Create vaddr for TLB */ | ||
728 | addis r8, r8, 0x0080 /* Add 8M */ | ||
729 | ori r8, r8, MI_EVALID /* Mark it valid */ | ||
730 | mtspr SPRN_MD_EPN, r8 | ||
731 | li r9, MI_PS8MEG /* Set 8M byte page */ | ||
732 | ori r9, r9, MI_SVALID /* Make it valid */ | ||
733 | mtspr SPRN_MD_TWC, r9 | ||
734 | li r11, MI_BOOTINIT /* Create RPN for address 0 */ | ||
735 | addis r11, r11, 0x0080 /* Add 8M */ | ||
736 | mtspr SPRN_MD_RPN, r8 | ||
737 | |||
738 | addis r8, r8, 0x0080 /* Add 8M */ | ||
739 | mtspr SPRN_MD_EPN, r8 | ||
740 | mtspr SPRN_MD_TWC, r9 | ||
741 | addis r11, r11, 0x0080 /* Add 8M */ | ||
742 | mtspr SPRN_MD_RPN, r8 | ||
743 | #endif | ||
744 | |||
745 | /* Since the cache is enabled according to the information we | ||
746 | * just loaded into the TLB, invalidate and enable the caches here. | ||
747 | * We should probably check/set other modes....later. | ||
748 | */ | ||
749 | lis r8, IDC_INVALL@h | ||
750 | mtspr SPRN_IC_CST, r8 | ||
751 | mtspr SPRN_DC_CST, r8 | ||
752 | lis r8, IDC_ENABLE@h | ||
753 | mtspr SPRN_IC_CST, r8 | ||
754 | #ifdef CONFIG_8xx_COPYBACK | ||
755 | mtspr SPRN_DC_CST, r8 | ||
756 | #else | ||
757 | /* For a debug option, I left this here to easily enable | ||
758 | * the write through cache mode | ||
759 | */ | ||
760 | lis r8, DC_SFWT@h | ||
761 | mtspr SPRN_DC_CST, r8 | ||
762 | lis r8, IDC_ENABLE@h | ||
763 | mtspr SPRN_DC_CST, r8 | ||
764 | #endif | ||
765 | blr | ||
766 | |||
767 | |||
768 | /* | ||
769 | * Set up to use a given MMU context. | ||
770 | * r3 is context number, r4 is PGD pointer. | ||
771 | * | ||
772 | * We place the physical address of the new task page directory loaded | ||
773 | * into the MMU base register, and set the ASID compare register with | ||
774 | * the new "context." | ||
775 | */ | ||
776 | _GLOBAL(set_context) | ||
777 | |||
778 | #ifdef CONFIG_BDI_SWITCH | ||
779 | /* Context switch the PTE pointer for the Abatron BDI2000. | ||
780 | * The PGDIR is passed as second argument. | ||
781 | */ | ||
782 | lis r5, KERNELBASE@h | ||
783 | lwz r5, 0xf0(r5) | ||
784 | stw r4, 0x4(r5) | ||
785 | #endif | ||
786 | |||
787 | #ifdef CONFIG_8xx_CPU6 | ||
788 | lis r6, cpu6_errata_word@h | ||
789 | ori r6, r6, cpu6_errata_word@l | ||
790 | tophys (r4, r4) | ||
791 | li r7, 0x3980 | ||
792 | stw r7, 12(r6) | ||
793 | lwz r7, 12(r6) | ||
794 | mtspr SPRN_M_TWB, r4 /* Update MMU base address */ | ||
795 | li r7, 0x3380 | ||
796 | stw r7, 12(r6) | ||
797 | lwz r7, 12(r6) | ||
798 | mtspr SPRN_M_CASID, r3 /* Update context */ | ||
799 | #else | ||
800 | mtspr SPRN_M_CASID,r3 /* Update context */ | ||
801 | tophys (r4, r4) | ||
802 | mtspr SPRN_M_TWB, r4 /* and pgd */ | ||
803 | #endif | ||
804 | SYNC | ||
805 | blr | ||
806 | |||
807 | #ifdef CONFIG_8xx_CPU6 | ||
808 | /* It's here because it is unique to the 8xx. | ||
809 | * It is important we get called with interrupts disabled. I used to | ||
810 | * do that, but it appears that all code that calls this already had | ||
811 | * interrupt disabled. | ||
812 | */ | ||
813 | .globl set_dec_cpu6 | ||
814 | set_dec_cpu6: | ||
815 | lis r7, cpu6_errata_word@h | ||
816 | ori r7, r7, cpu6_errata_word@l | ||
817 | li r4, 0x2c00 | ||
818 | stw r4, 8(r7) | ||
819 | lwz r4, 8(r7) | ||
820 | mtspr 22, r3 /* Update Decrementer */ | ||
821 | SYNC | ||
822 | blr | ||
823 | #endif | ||
824 | |||
825 | /* | ||
826 | * We put a few things here that have to be page-aligned. | ||
827 | * This stuff goes at the beginning of the data segment, | ||
828 | * which is page-aligned. | ||
829 | */ | ||
830 | .data | ||
831 | .globl sdata | ||
832 | sdata: | ||
833 | .globl empty_zero_page | ||
834 | empty_zero_page: | ||
835 | .space 4096 | ||
836 | |||
837 | .globl swapper_pg_dir | ||
838 | swapper_pg_dir: | ||
839 | .space 4096 | ||
840 | |||
841 | /* | ||
842 | * This space gets a copy of optional info passed to us by the bootstrap | ||
843 | * Used to pass parameters into the kernel like root=/dev/sda1, etc. | ||
844 | */ | ||
845 | .globl cmd_line | ||
846 | cmd_line: | ||
847 | .space 512 | ||
848 | |||
849 | /* Room for two PTE table poiners, usually the kernel and current user | ||
850 | * pointer to their respective root page table (pgdir). | ||
851 | */ | ||
852 | abatron_pteptrs: | ||
853 | .space 8 | ||
854 | |||
855 | #ifdef CONFIG_8xx_CPU6 | ||
856 | .globl cpu6_errata_word | ||
857 | cpu6_errata_word: | ||
858 | .space 16 | ||
859 | #endif | ||
860 | |||
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S new file mode 100644 index 000000000000..eba5a5f8ff08 --- /dev/null +++ b/arch/powerpc/kernel/head_fsl_booke.S | |||
@@ -0,0 +1,1058 @@ | |||
1 | /* | ||
2 | * arch/ppc/kernel/head_fsl_booke.S | ||
3 | * | ||
4 | * Kernel execution entry point code. | ||
5 | * | ||
6 | * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> | ||
7 | * Initial PowerPC version. | ||
8 | * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> | ||
9 | * Rewritten for PReP | ||
10 | * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> | ||
11 | * Low-level exception handers, MMU support, and rewrite. | ||
12 | * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> | ||
13 | * PowerPC 8xx modifications. | ||
14 | * Copyright (c) 1998-1999 TiVo, Inc. | ||
15 | * PowerPC 403GCX modifications. | ||
16 | * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> | ||
17 | * PowerPC 403GCX/405GP modifications. | ||
18 | * Copyright 2000 MontaVista Software Inc. | ||
19 | * PPC405 modifications | ||
20 | * PowerPC 403GCX/405GP modifications. | ||
21 | * Author: MontaVista Software, Inc. | ||
22 | * frank_rowand@mvista.com or source@mvista.com | ||
23 | * debbie_chu@mvista.com | ||
24 | * Copyright 2002-2004 MontaVista Software, Inc. | ||
25 | * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org> | ||
26 | * Copyright 2004 Freescale Semiconductor, Inc | ||
27 | * PowerPC e500 modifications, Kumar Gala <kumar.gala@freescale.com> | ||
28 | * | ||
29 | * This program is free software; you can redistribute it and/or modify it | ||
30 | * under the terms of the GNU General Public License as published by the | ||
31 | * Free Software Foundation; either version 2 of the License, or (at your | ||
32 | * option) any later version. | ||
33 | */ | ||
34 | |||
35 | #include <linux/config.h> | ||
36 | #include <linux/threads.h> | ||
37 | #include <asm/processor.h> | ||
38 | #include <asm/page.h> | ||
39 | #include <asm/mmu.h> | ||
40 | #include <asm/pgtable.h> | ||
41 | #include <asm/cputable.h> | ||
42 | #include <asm/thread_info.h> | ||
43 | #include <asm/ppc_asm.h> | ||
44 | #include <asm/asm-offsets.h> | ||
45 | #include "head_booke.h" | ||
46 | |||
47 | /* As with the other PowerPC ports, it is expected that when code | ||
48 | * execution begins here, the following registers contain valid, yet | ||
49 | * optional, information: | ||
50 | * | ||
51 | * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) | ||
52 | * r4 - Starting address of the init RAM disk | ||
53 | * r5 - Ending address of the init RAM disk | ||
54 | * r6 - Start of kernel command line string (e.g. "mem=128") | ||
55 | * r7 - End of kernel command line string | ||
56 | * | ||
57 | */ | ||
58 | .text | ||
59 | _GLOBAL(_stext) | ||
60 | _GLOBAL(_start) | ||
61 | /* | ||
62 | * Reserve a word at a fixed location to store the address | ||
63 | * of abatron_pteptrs | ||
64 | */ | ||
65 | nop | ||
66 | /* | ||
67 | * Save parameters we are passed | ||
68 | */ | ||
69 | mr r31,r3 | ||
70 | mr r30,r4 | ||
71 | mr r29,r5 | ||
72 | mr r28,r6 | ||
73 | mr r27,r7 | ||
74 | li r24,0 /* CPU number */ | ||
75 | |||
76 | /* We try to not make any assumptions about how the boot loader | ||
77 | * setup or used the TLBs. We invalidate all mappings from the | ||
78 | * boot loader and load a single entry in TLB1[0] to map the | ||
79 | * first 16M of kernel memory. Any boot info passed from the | ||
80 | * bootloader needs to live in this first 16M. | ||
81 | * | ||
82 | * Requirement on bootloader: | ||
83 | * - The page we're executing in needs to reside in TLB1 and | ||
84 | * have IPROT=1. If not an invalidate broadcast could | ||
85 | * evict the entry we're currently executing in. | ||
86 | * | ||
87 | * r3 = Index of TLB1 were executing in | ||
88 | * r4 = Current MSR[IS] | ||
89 | * r5 = Index of TLB1 temp mapping | ||
90 | * | ||
91 | * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0] | ||
92 | * if needed | ||
93 | */ | ||
94 | |||
95 | /* 1. Find the index of the entry we're executing in */ | ||
96 | bl invstr /* Find our address */ | ||
97 | invstr: mflr r6 /* Make it accessible */ | ||
98 | mfmsr r7 | ||
99 | rlwinm r4,r7,27,31,31 /* extract MSR[IS] */ | ||
100 | mfspr r7, SPRN_PID0 | ||
101 | slwi r7,r7,16 | ||
102 | or r7,r7,r4 | ||
103 | mtspr SPRN_MAS6,r7 | ||
104 | tlbsx 0,r6 /* search MSR[IS], SPID=PID0 */ | ||
105 | #ifndef CONFIG_E200 | ||
106 | mfspr r7,SPRN_MAS1 | ||
107 | andis. r7,r7,MAS1_VALID@h | ||
108 | bne match_TLB | ||
109 | mfspr r7,SPRN_PID1 | ||
110 | slwi r7,r7,16 | ||
111 | or r7,r7,r4 | ||
112 | mtspr SPRN_MAS6,r7 | ||
113 | tlbsx 0,r6 /* search MSR[IS], SPID=PID1 */ | ||
114 | mfspr r7,SPRN_MAS1 | ||
115 | andis. r7,r7,MAS1_VALID@h | ||
116 | bne match_TLB | ||
117 | mfspr r7, SPRN_PID2 | ||
118 | slwi r7,r7,16 | ||
119 | or r7,r7,r4 | ||
120 | mtspr SPRN_MAS6,r7 | ||
121 | tlbsx 0,r6 /* Fall through, we had to match */ | ||
122 | #endif | ||
123 | match_TLB: | ||
124 | mfspr r7,SPRN_MAS0 | ||
125 | rlwinm r3,r7,16,20,31 /* Extract MAS0(Entry) */ | ||
126 | |||
127 | mfspr r7,SPRN_MAS1 /* Insure IPROT set */ | ||
128 | oris r7,r7,MAS1_IPROT@h | ||
129 | mtspr SPRN_MAS1,r7 | ||
130 | tlbwe | ||
131 | |||
132 | /* 2. Invalidate all entries except the entry we're executing in */ | ||
133 | mfspr r9,SPRN_TLB1CFG | ||
134 | andi. r9,r9,0xfff | ||
135 | li r6,0 /* Set Entry counter to 0 */ | ||
136 | 1: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ | ||
137 | rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */ | ||
138 | mtspr SPRN_MAS0,r7 | ||
139 | tlbre | ||
140 | mfspr r7,SPRN_MAS1 | ||
141 | rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */ | ||
142 | cmpw r3,r6 | ||
143 | beq skpinv /* Dont update the current execution TLB */ | ||
144 | mtspr SPRN_MAS1,r7 | ||
145 | tlbwe | ||
146 | isync | ||
147 | skpinv: addi r6,r6,1 /* Increment */ | ||
148 | cmpw r6,r9 /* Are we done? */ | ||
149 | bne 1b /* If not, repeat */ | ||
150 | |||
151 | /* Invalidate TLB0 */ | ||
152 | li r6,0x04 | ||
153 | tlbivax 0,r6 | ||
154 | #ifdef CONFIG_SMP | ||
155 | tlbsync | ||
156 | #endif | ||
157 | /* Invalidate TLB1 */ | ||
158 | li r6,0x0c | ||
159 | tlbivax 0,r6 | ||
160 | #ifdef CONFIG_SMP | ||
161 | tlbsync | ||
162 | #endif | ||
163 | msync | ||
164 | |||
165 | /* 3. Setup a temp mapping and jump to it */ | ||
166 | andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */ | ||
167 | addi r5, r5, 0x1 | ||
168 | lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ | ||
169 | rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ | ||
170 | mtspr SPRN_MAS0,r7 | ||
171 | tlbre | ||
172 | |||
173 | /* Just modify the entry ID and EPN for the temp mapping */ | ||
174 | lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ | ||
175 | rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ | ||
176 | mtspr SPRN_MAS0,r7 | ||
177 | xori r6,r4,1 /* Setup TMP mapping in the other Address space */ | ||
178 | slwi r6,r6,12 | ||
179 | oris r6,r6,(MAS1_VALID|MAS1_IPROT)@h | ||
180 | ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_4K))@l | ||
181 | mtspr SPRN_MAS1,r6 | ||
182 | mfspr r6,SPRN_MAS2 | ||
183 | li r7,0 /* temp EPN = 0 */ | ||
184 | rlwimi r7,r6,0,20,31 | ||
185 | mtspr SPRN_MAS2,r7 | ||
186 | tlbwe | ||
187 | |||
188 | xori r6,r4,1 | ||
189 | slwi r6,r6,5 /* setup new context with other address space */ | ||
190 | bl 1f /* Find our address */ | ||
191 | 1: mflr r9 | ||
192 | rlwimi r7,r9,0,20,31 | ||
193 | addi r7,r7,24 | ||
194 | mtspr SPRN_SRR0,r7 | ||
195 | mtspr SPRN_SRR1,r6 | ||
196 | rfi | ||
197 | |||
198 | /* 4. Clear out PIDs & Search info */ | ||
199 | li r6,0 | ||
200 | mtspr SPRN_PID0,r6 | ||
201 | #ifndef CONFIG_E200 | ||
202 | mtspr SPRN_PID1,r6 | ||
203 | mtspr SPRN_PID2,r6 | ||
204 | #endif | ||
205 | mtspr SPRN_MAS6,r6 | ||
206 | |||
207 | /* 5. Invalidate mapping we started in */ | ||
208 | lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ | ||
209 | rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ | ||
210 | mtspr SPRN_MAS0,r7 | ||
211 | tlbre | ||
212 | li r6,0 | ||
213 | mtspr SPRN_MAS1,r6 | ||
214 | tlbwe | ||
215 | /* Invalidate TLB1 */ | ||
216 | li r9,0x0c | ||
217 | tlbivax 0,r9 | ||
218 | #ifdef CONFIG_SMP | ||
219 | tlbsync | ||
220 | #endif | ||
221 | msync | ||
222 | |||
223 | /* 6. Setup KERNELBASE mapping in TLB1[0] */ | ||
224 | lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */ | ||
225 | mtspr SPRN_MAS0,r6 | ||
226 | lis r6,(MAS1_VALID|MAS1_IPROT)@h | ||
227 | ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_16M))@l | ||
228 | mtspr SPRN_MAS1,r6 | ||
229 | li r7,0 | ||
230 | lis r6,KERNELBASE@h | ||
231 | ori r6,r6,KERNELBASE@l | ||
232 | rlwimi r6,r7,0,20,31 | ||
233 | mtspr SPRN_MAS2,r6 | ||
234 | li r7,(MAS3_SX|MAS3_SW|MAS3_SR) | ||
235 | mtspr SPRN_MAS3,r7 | ||
236 | tlbwe | ||
237 | |||
238 | /* 7. Jump to KERNELBASE mapping */ | ||
239 | lis r7,MSR_KERNEL@h | ||
240 | ori r7,r7,MSR_KERNEL@l | ||
241 | bl 1f /* Find our address */ | ||
242 | 1: mflr r9 | ||
243 | rlwimi r6,r9,0,20,31 | ||
244 | addi r6,r6,24 | ||
245 | mtspr SPRN_SRR0,r6 | ||
246 | mtspr SPRN_SRR1,r7 | ||
247 | rfi /* start execution out of TLB1[0] entry */ | ||
248 | |||
249 | /* 8. Clear out the temp mapping */ | ||
250 | lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ | ||
251 | rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ | ||
252 | mtspr SPRN_MAS0,r7 | ||
253 | tlbre | ||
254 | mtspr SPRN_MAS1,r8 | ||
255 | tlbwe | ||
256 | /* Invalidate TLB1 */ | ||
257 | li r9,0x0c | ||
258 | tlbivax 0,r9 | ||
259 | #ifdef CONFIG_SMP | ||
260 | tlbsync | ||
261 | #endif | ||
262 | msync | ||
263 | |||
264 | /* Establish the interrupt vector offsets */ | ||
265 | SET_IVOR(0, CriticalInput); | ||
266 | SET_IVOR(1, MachineCheck); | ||
267 | SET_IVOR(2, DataStorage); | ||
268 | SET_IVOR(3, InstructionStorage); | ||
269 | SET_IVOR(4, ExternalInput); | ||
270 | SET_IVOR(5, Alignment); | ||
271 | SET_IVOR(6, Program); | ||
272 | SET_IVOR(7, FloatingPointUnavailable); | ||
273 | SET_IVOR(8, SystemCall); | ||
274 | SET_IVOR(9, AuxillaryProcessorUnavailable); | ||
275 | SET_IVOR(10, Decrementer); | ||
276 | SET_IVOR(11, FixedIntervalTimer); | ||
277 | SET_IVOR(12, WatchdogTimer); | ||
278 | SET_IVOR(13, DataTLBError); | ||
279 | SET_IVOR(14, InstructionTLBError); | ||
280 | SET_IVOR(15, Debug); | ||
281 | SET_IVOR(32, SPEUnavailable); | ||
282 | SET_IVOR(33, SPEFloatingPointData); | ||
283 | SET_IVOR(34, SPEFloatingPointRound); | ||
284 | #ifndef CONFIG_E200 | ||
285 | SET_IVOR(35, PerformanceMonitor); | ||
286 | #endif | ||
287 | |||
288 | /* Establish the interrupt vector base */ | ||
289 | lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ | ||
290 | mtspr SPRN_IVPR,r4 | ||
291 | |||
292 | /* Setup the defaults for TLB entries */ | ||
293 | li r2,(MAS4_TSIZED(BOOKE_PAGESZ_4K))@l | ||
294 | #ifdef CONFIG_E200 | ||
295 | oris r2,r2,MAS4_TLBSELD(1)@h | ||
296 | #endif | ||
297 | mtspr SPRN_MAS4, r2 | ||
298 | |||
299 | #if 0 | ||
300 | /* Enable DOZE */ | ||
301 | mfspr r2,SPRN_HID0 | ||
302 | oris r2,r2,HID0_DOZE@h | ||
303 | mtspr SPRN_HID0, r2 | ||
304 | #endif | ||
305 | #ifdef CONFIG_E200 | ||
306 | /* enable dedicated debug exception handling resources (Debug APU) */ | ||
307 | mfspr r2,SPRN_HID0 | ||
308 | ori r2,r2,HID0_DAPUEN@l | ||
309 | mtspr SPRN_HID0,r2 | ||
310 | #endif | ||
311 | |||
312 | #if !defined(CONFIG_BDI_SWITCH) | ||
313 | /* | ||
314 | * The Abatron BDI JTAG debugger does not tolerate others | ||
315 | * mucking with the debug registers. | ||
316 | */ | ||
317 | lis r2,DBCR0_IDM@h | ||
318 | mtspr SPRN_DBCR0,r2 | ||
319 | /* clear any residual debug events */ | ||
320 | li r2,-1 | ||
321 | mtspr SPRN_DBSR,r2 | ||
322 | #endif | ||
323 | |||
324 | /* | ||
325 | * This is where the main kernel code starts. | ||
326 | */ | ||
327 | |||
328 | /* ptr to current */ | ||
329 | lis r2,init_task@h | ||
330 | ori r2,r2,init_task@l | ||
331 | |||
332 | /* ptr to current thread */ | ||
333 | addi r4,r2,THREAD /* init task's THREAD */ | ||
334 | mtspr SPRN_SPRG3,r4 | ||
335 | |||
336 | /* stack */ | ||
337 | lis r1,init_thread_union@h | ||
338 | ori r1,r1,init_thread_union@l | ||
339 | li r0,0 | ||
340 | stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) | ||
341 | |||
342 | bl early_init | ||
343 | |||
344 | mfspr r3,SPRN_TLB1CFG | ||
345 | andi. r3,r3,0xfff | ||
346 | lis r4,num_tlbcam_entries@ha | ||
347 | stw r3,num_tlbcam_entries@l(r4) | ||
348 | /* | ||
349 | * Decide what sort of machine this is and initialize the MMU. | ||
350 | */ | ||
351 | mr r3,r31 | ||
352 | mr r4,r30 | ||
353 | mr r5,r29 | ||
354 | mr r6,r28 | ||
355 | mr r7,r27 | ||
356 | bl machine_init | ||
357 | bl MMU_init | ||
358 | |||
359 | /* Setup PTE pointers for the Abatron bdiGDB */ | ||
360 | lis r6, swapper_pg_dir@h | ||
361 | ori r6, r6, swapper_pg_dir@l | ||
362 | lis r5, abatron_pteptrs@h | ||
363 | ori r5, r5, abatron_pteptrs@l | ||
364 | lis r4, KERNELBASE@h | ||
365 | ori r4, r4, KERNELBASE@l | ||
366 | stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ | ||
367 | stw r6, 0(r5) | ||
368 | |||
369 | /* Let's move on */ | ||
370 | lis r4,start_kernel@h | ||
371 | ori r4,r4,start_kernel@l | ||
372 | lis r3,MSR_KERNEL@h | ||
373 | ori r3,r3,MSR_KERNEL@l | ||
374 | mtspr SPRN_SRR0,r4 | ||
375 | mtspr SPRN_SRR1,r3 | ||
376 | rfi /* change context and jump to start_kernel */ | ||
377 | |||
378 | /* Macros to hide the PTE size differences | ||
379 | * | ||
380 | * FIND_PTE -- walks the page tables given EA & pgdir pointer | ||
381 | * r10 -- EA of fault | ||
382 | * r11 -- PGDIR pointer | ||
383 | * r12 -- free | ||
384 | * label 2: is the bailout case | ||
385 | * | ||
386 | * if we find the pte (fall through): | ||
387 | * r11 is low pte word | ||
388 | * r12 is pointer to the pte | ||
389 | */ | ||
390 | #ifdef CONFIG_PTE_64BIT | ||
391 | #define PTE_FLAGS_OFFSET 4 | ||
392 | #define FIND_PTE \ | ||
393 | rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \ | ||
394 | lwzx r11, r12, r11; /* Get pgd/pmd entry */ \ | ||
395 | rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \ | ||
396 | beq 2f; /* Bail if no table */ \ | ||
397 | rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \ | ||
398 | lwz r11, 4(r12); /* Get pte entry */ | ||
399 | #else | ||
400 | #define PTE_FLAGS_OFFSET 0 | ||
401 | #define FIND_PTE \ | ||
402 | rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \ | ||
403 | lwz r11, 0(r11); /* Get L1 entry */ \ | ||
404 | rlwinm. r12, r11, 0, 0, 19; /* Extract L2 (pte) base address */ \ | ||
405 | beq 2f; /* Bail if no table */ \ | ||
406 | rlwimi r12, r10, 22, 20, 29; /* Compute PTE address */ \ | ||
407 | lwz r11, 0(r12); /* Get Linux PTE */ | ||
408 | #endif | ||
409 | |||
410 | /* | ||
411 | * Interrupt vector entry code | ||
412 | * | ||
413 | * The Book E MMUs are always on so we don't need to handle | ||
414 | * interrupts in real mode as with previous PPC processors. In | ||
415 | * this case we handle interrupts in the kernel virtual address | ||
416 | * space. | ||
417 | * | ||
418 | * Interrupt vectors are dynamically placed relative to the | ||
419 | * interrupt prefix as determined by the address of interrupt_base. | ||
420 | * The interrupt vectors offsets are programmed using the labels | ||
421 | * for each interrupt vector entry. | ||
422 | * | ||
423 | * Interrupt vectors must be aligned on a 16 byte boundary. | ||
424 | * We align on a 32 byte cache line boundary for good measure. | ||
425 | */ | ||
426 | |||
427 | interrupt_base: | ||
428 | /* Critical Input Interrupt */ | ||
429 | CRITICAL_EXCEPTION(0x0100, CriticalInput, UnknownException) | ||
430 | |||
431 | /* Machine Check Interrupt */ | ||
432 | #ifdef CONFIG_E200 | ||
433 | /* no RFMCI, MCSRRs on E200 */ | ||
434 | CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException) | ||
435 | #else | ||
436 | MCHECK_EXCEPTION(0x0200, MachineCheck, MachineCheckException) | ||
437 | #endif | ||
438 | |||
439 | /* Data Storage Interrupt */ | ||
440 | START_EXCEPTION(DataStorage) | ||
441 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
442 | mtspr SPRN_SPRG1, r11 | ||
443 | mtspr SPRN_SPRG4W, r12 | ||
444 | mtspr SPRN_SPRG5W, r13 | ||
445 | mfcr r11 | ||
446 | mtspr SPRN_SPRG7W, r11 | ||
447 | |||
448 | /* | ||
449 | * Check if it was a store fault, if not then bail | ||
450 | * because a user tried to access a kernel or | ||
451 | * read-protected page. Otherwise, get the | ||
452 | * offending address and handle it. | ||
453 | */ | ||
454 | mfspr r10, SPRN_ESR | ||
455 | andis. r10, r10, ESR_ST@h | ||
456 | beq 2f | ||
457 | |||
458 | mfspr r10, SPRN_DEAR /* Get faulting address */ | ||
459 | |||
460 | /* If we are faulting a kernel address, we have to use the | ||
461 | * kernel page tables. | ||
462 | */ | ||
463 | lis r11, TASK_SIZE@h | ||
464 | ori r11, r11, TASK_SIZE@l | ||
465 | cmplw 0, r10, r11 | ||
466 | bge 2f | ||
467 | |||
468 | /* Get the PGD for the current thread */ | ||
469 | 3: | ||
470 | mfspr r11,SPRN_SPRG3 | ||
471 | lwz r11,PGDIR(r11) | ||
472 | 4: | ||
473 | FIND_PTE | ||
474 | |||
475 | /* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */ | ||
476 | andi. r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE | ||
477 | cmpwi 0, r13, _PAGE_RW|_PAGE_USER | ||
478 | bne 2f /* Bail if not */ | ||
479 | |||
480 | /* Update 'changed'. */ | ||
481 | ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE | ||
482 | stw r11, PTE_FLAGS_OFFSET(r12) /* Update Linux page table */ | ||
483 | |||
484 | /* MAS2 not updated as the entry does exist in the tlb, this | ||
485 | fault taken to detect state transition (eg: COW -> DIRTY) | ||
486 | */ | ||
487 | andi. r11, r11, _PAGE_HWEXEC | ||
488 | rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */ | ||
489 | ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */ | ||
490 | |||
491 | /* update search PID in MAS6, AS = 0 */ | ||
492 | mfspr r12, SPRN_PID0 | ||
493 | slwi r12, r12, 16 | ||
494 | mtspr SPRN_MAS6, r12 | ||
495 | |||
496 | /* find the TLB index that caused the fault. It has to be here. */ | ||
497 | tlbsx 0, r10 | ||
498 | |||
499 | /* only update the perm bits, assume the RPN is fine */ | ||
500 | mfspr r12, SPRN_MAS3 | ||
501 | rlwimi r12, r11, 0, 20, 31 | ||
502 | mtspr SPRN_MAS3,r12 | ||
503 | tlbwe | ||
504 | |||
505 | /* Done...restore registers and get out of here. */ | ||
506 | mfspr r11, SPRN_SPRG7R | ||
507 | mtcr r11 | ||
508 | mfspr r13, SPRN_SPRG5R | ||
509 | mfspr r12, SPRN_SPRG4R | ||
510 | mfspr r11, SPRN_SPRG1 | ||
511 | mfspr r10, SPRN_SPRG0 | ||
512 | rfi /* Force context change */ | ||
513 | |||
514 | 2: | ||
515 | /* | ||
516 | * The bailout. Restore registers to pre-exception conditions | ||
517 | * and call the heavyweights to help us out. | ||
518 | */ | ||
519 | mfspr r11, SPRN_SPRG7R | ||
520 | mtcr r11 | ||
521 | mfspr r13, SPRN_SPRG5R | ||
522 | mfspr r12, SPRN_SPRG4R | ||
523 | mfspr r11, SPRN_SPRG1 | ||
524 | mfspr r10, SPRN_SPRG0 | ||
525 | b data_access | ||
526 | |||
527 | /* Instruction Storage Interrupt */ | ||
528 | INSTRUCTION_STORAGE_EXCEPTION | ||
529 | |||
530 | /* External Input Interrupt */ | ||
531 | EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE) | ||
532 | |||
533 | /* Alignment Interrupt */ | ||
534 | ALIGNMENT_EXCEPTION | ||
535 | |||
536 | /* Program Interrupt */ | ||
537 | PROGRAM_EXCEPTION | ||
538 | |||
539 | /* Floating Point Unavailable Interrupt */ | ||
540 | #ifdef CONFIG_PPC_FPU | ||
541 | FP_UNAVAILABLE_EXCEPTION | ||
542 | #else | ||
543 | #ifdef CONFIG_E200 | ||
544 | /* E200 treats 'normal' floating point instructions as FP Unavail exception */ | ||
545 | EXCEPTION(0x0800, FloatingPointUnavailable, ProgramCheckException, EXC_XFER_EE) | ||
546 | #else | ||
547 | EXCEPTION(0x0800, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) | ||
548 | #endif | ||
549 | #endif | ||
550 | |||
551 | /* System Call Interrupt */ | ||
552 | START_EXCEPTION(SystemCall) | ||
553 | NORMAL_EXCEPTION_PROLOG | ||
554 | EXC_XFER_EE_LITE(0x0c00, DoSyscall) | ||
555 | |||
556 | /* Auxillary Processor Unavailable Interrupt */ | ||
557 | EXCEPTION(0x2900, AuxillaryProcessorUnavailable, UnknownException, EXC_XFER_EE) | ||
558 | |||
559 | /* Decrementer Interrupt */ | ||
560 | DECREMENTER_EXCEPTION | ||
561 | |||
562 | /* Fixed Internal Timer Interrupt */ | ||
563 | /* TODO: Add FIT support */ | ||
564 | EXCEPTION(0x3100, FixedIntervalTimer, UnknownException, EXC_XFER_EE) | ||
565 | |||
566 | /* Watchdog Timer Interrupt */ | ||
567 | #ifdef CONFIG_BOOKE_WDT | ||
568 | CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException) | ||
569 | #else | ||
570 | CRITICAL_EXCEPTION(0x3200, WatchdogTimer, UnknownException) | ||
571 | #endif | ||
572 | |||
573 | /* Data TLB Error Interrupt */ | ||
574 | START_EXCEPTION(DataTLBError) | ||
575 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
576 | mtspr SPRN_SPRG1, r11 | ||
577 | mtspr SPRN_SPRG4W, r12 | ||
578 | mtspr SPRN_SPRG5W, r13 | ||
579 | mfcr r11 | ||
580 | mtspr SPRN_SPRG7W, r11 | ||
581 | mfspr r10, SPRN_DEAR /* Get faulting address */ | ||
582 | |||
583 | /* If we are faulting a kernel address, we have to use the | ||
584 | * kernel page tables. | ||
585 | */ | ||
586 | lis r11, TASK_SIZE@h | ||
587 | ori r11, r11, TASK_SIZE@l | ||
588 | cmplw 5, r10, r11 | ||
589 | blt 5, 3f | ||
590 | lis r11, swapper_pg_dir@h | ||
591 | ori r11, r11, swapper_pg_dir@l | ||
592 | |||
593 | mfspr r12,SPRN_MAS1 /* Set TID to 0 */ | ||
594 | rlwinm r12,r12,0,16,1 | ||
595 | mtspr SPRN_MAS1,r12 | ||
596 | |||
597 | b 4f | ||
598 | |||
599 | /* Get the PGD for the current thread */ | ||
600 | 3: | ||
601 | mfspr r11,SPRN_SPRG3 | ||
602 | lwz r11,PGDIR(r11) | ||
603 | |||
604 | 4: | ||
605 | FIND_PTE | ||
606 | andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ | ||
607 | beq 2f /* Bail if not present */ | ||
608 | |||
609 | #ifdef CONFIG_PTE_64BIT | ||
610 | lwz r13, 0(r12) | ||
611 | #endif | ||
612 | ori r11, r11, _PAGE_ACCESSED | ||
613 | stw r11, PTE_FLAGS_OFFSET(r12) | ||
614 | |||
615 | /* Jump to common tlb load */ | ||
616 | b finish_tlb_load | ||
617 | 2: | ||
618 | /* The bailout. Restore registers to pre-exception conditions | ||
619 | * and call the heavyweights to help us out. | ||
620 | */ | ||
621 | mfspr r11, SPRN_SPRG7R | ||
622 | mtcr r11 | ||
623 | mfspr r13, SPRN_SPRG5R | ||
624 | mfspr r12, SPRN_SPRG4R | ||
625 | mfspr r11, SPRN_SPRG1 | ||
626 | mfspr r10, SPRN_SPRG0 | ||
627 | b data_access | ||
628 | |||
629 | /* Instruction TLB Error Interrupt */ | ||
630 | /* | ||
631 | * Nearly the same as above, except we get our | ||
632 | * information from different registers and bailout | ||
633 | * to a different point. | ||
634 | */ | ||
635 | START_EXCEPTION(InstructionTLBError) | ||
636 | mtspr SPRN_SPRG0, r10 /* Save some working registers */ | ||
637 | mtspr SPRN_SPRG1, r11 | ||
638 | mtspr SPRN_SPRG4W, r12 | ||
639 | mtspr SPRN_SPRG5W, r13 | ||
640 | mfcr r11 | ||
641 | mtspr SPRN_SPRG7W, r11 | ||
642 | mfspr r10, SPRN_SRR0 /* Get faulting address */ | ||
643 | |||
644 | /* If we are faulting a kernel address, we have to use the | ||
645 | * kernel page tables. | ||
646 | */ | ||
647 | lis r11, TASK_SIZE@h | ||
648 | ori r11, r11, TASK_SIZE@l | ||
649 | cmplw 5, r10, r11 | ||
650 | blt 5, 3f | ||
651 | lis r11, swapper_pg_dir@h | ||
652 | ori r11, r11, swapper_pg_dir@l | ||
653 | |||
654 | mfspr r12,SPRN_MAS1 /* Set TID to 0 */ | ||
655 | rlwinm r12,r12,0,16,1 | ||
656 | mtspr SPRN_MAS1,r12 | ||
657 | |||
658 | b 4f | ||
659 | |||
660 | /* Get the PGD for the current thread */ | ||
661 | 3: | ||
662 | mfspr r11,SPRN_SPRG3 | ||
663 | lwz r11,PGDIR(r11) | ||
664 | |||
665 | 4: | ||
666 | FIND_PTE | ||
667 | andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ | ||
668 | beq 2f /* Bail if not present */ | ||
669 | |||
670 | #ifdef CONFIG_PTE_64BIT | ||
671 | lwz r13, 0(r12) | ||
672 | #endif | ||
673 | ori r11, r11, _PAGE_ACCESSED | ||
674 | stw r11, PTE_FLAGS_OFFSET(r12) | ||
675 | |||
676 | /* Jump to common TLB load point */ | ||
677 | b finish_tlb_load | ||
678 | |||
679 | 2: | ||
680 | /* The bailout. Restore registers to pre-exception conditions | ||
681 | * and call the heavyweights to help us out. | ||
682 | */ | ||
683 | mfspr r11, SPRN_SPRG7R | ||
684 | mtcr r11 | ||
685 | mfspr r13, SPRN_SPRG5R | ||
686 | mfspr r12, SPRN_SPRG4R | ||
687 | mfspr r11, SPRN_SPRG1 | ||
688 | mfspr r10, SPRN_SPRG0 | ||
689 | b InstructionStorage | ||
690 | |||
691 | #ifdef CONFIG_SPE | ||
692 | /* SPE Unavailable */ | ||
693 | START_EXCEPTION(SPEUnavailable) | ||
694 | NORMAL_EXCEPTION_PROLOG | ||
695 | bne load_up_spe | ||
696 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
697 | EXC_XFER_EE_LITE(0x2010, KernelSPE) | ||
698 | #else | ||
699 | EXCEPTION(0x2020, SPEUnavailable, UnknownException, EXC_XFER_EE) | ||
700 | #endif /* CONFIG_SPE */ | ||
701 | |||
702 | /* SPE Floating Point Data */ | ||
703 | #ifdef CONFIG_SPE | ||
704 | EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE); | ||
705 | #else | ||
706 | EXCEPTION(0x2040, SPEFloatingPointData, UnknownException, EXC_XFER_EE) | ||
707 | #endif /* CONFIG_SPE */ | ||
708 | |||
709 | /* SPE Floating Point Round */ | ||
710 | EXCEPTION(0x2050, SPEFloatingPointRound, UnknownException, EXC_XFER_EE) | ||
711 | |||
712 | /* Performance Monitor */ | ||
713 | EXCEPTION(0x2060, PerformanceMonitor, PerformanceMonitorException, EXC_XFER_STD) | ||
714 | |||
715 | |||
716 | /* Debug Interrupt */ | ||
717 | DEBUG_EXCEPTION | ||
718 | |||
719 | /* | ||
720 | * Local functions | ||
721 | */ | ||
722 | |||
723 | /* | ||
724 | * Data TLB exceptions will bail out to this point | ||
725 | * if they can't resolve the lightweight TLB fault. | ||
726 | */ | ||
727 | data_access: | ||
728 | NORMAL_EXCEPTION_PROLOG | ||
729 | mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ | ||
730 | stw r5,_ESR(r11) | ||
731 | mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ | ||
732 | andis. r10,r5,(ESR_ILK|ESR_DLK)@h | ||
733 | bne 1f | ||
734 | EXC_XFER_EE_LITE(0x0300, handle_page_fault) | ||
735 | 1: | ||
736 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
737 | EXC_XFER_EE_LITE(0x0300, CacheLockingException) | ||
738 | |||
739 | /* | ||
740 | |||
741 | * Both the instruction and data TLB miss get to this | ||
742 | * point to load the TLB. | ||
743 | * r10 - EA of fault | ||
744 | * r11 - TLB (info from Linux PTE) | ||
745 | * r12, r13 - available to use | ||
746 | * CR5 - results of addr < TASK_SIZE | ||
747 | * MAS0, MAS1 - loaded with proper value when we get here | ||
748 | * MAS2, MAS3 - will need additional info from Linux PTE | ||
749 | * Upon exit, we reload everything and RFI. | ||
750 | */ | ||
751 | finish_tlb_load: | ||
752 | /* | ||
753 | * We set execute, because we don't have the granularity to | ||
754 | * properly set this at the page level (Linux problem). | ||
755 | * Many of these bits are software only. Bits we don't set | ||
756 | * here we (properly should) assume have the appropriate value. | ||
757 | */ | ||
758 | |||
759 | mfspr r12, SPRN_MAS2 | ||
760 | #ifdef CONFIG_PTE_64BIT | ||
761 | rlwimi r12, r11, 26, 24, 31 /* extract ...WIMGE from pte */ | ||
762 | #else | ||
763 | rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ | ||
764 | #endif | ||
765 | mtspr SPRN_MAS2, r12 | ||
766 | |||
767 | bge 5, 1f | ||
768 | |||
769 | /* is user addr */ | ||
770 | andi. r12, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC) | ||
771 | andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */ | ||
772 | srwi r10, r12, 1 | ||
773 | or r12, r12, r10 /* Copy user perms into supervisor */ | ||
774 | iseleq r12, 0, r12 | ||
775 | b 2f | ||
776 | |||
777 | /* is kernel addr */ | ||
778 | 1: rlwinm r12, r11, 31, 29, 29 /* Extract _PAGE_HWWRITE into SW */ | ||
779 | ori r12, r12, (MAS3_SX | MAS3_SR) | ||
780 | |||
781 | #ifdef CONFIG_PTE_64BIT | ||
782 | 2: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */ | ||
783 | rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */ | ||
784 | mtspr SPRN_MAS3, r12 | ||
785 | BEGIN_FTR_SECTION | ||
786 | srwi r10, r13, 8 /* grab RPN[8:31] */ | ||
787 | mtspr SPRN_MAS7, r10 | ||
788 | END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS) | ||
789 | #else | ||
790 | 2: rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */ | ||
791 | mtspr SPRN_MAS3, r11 | ||
792 | #endif | ||
793 | #ifdef CONFIG_E200 | ||
794 | /* Round robin TLB1 entries assignment */ | ||
795 | mfspr r12, SPRN_MAS0 | ||
796 | |||
797 | /* Extract TLB1CFG(NENTRY) */ | ||
798 | mfspr r11, SPRN_TLB1CFG | ||
799 | andi. r11, r11, 0xfff | ||
800 | |||
801 | /* Extract MAS0(NV) */ | ||
802 | andi. r13, r12, 0xfff | ||
803 | addi r13, r13, 1 | ||
804 | cmpw 0, r13, r11 | ||
805 | addi r12, r12, 1 | ||
806 | |||
807 | /* check if we need to wrap */ | ||
808 | blt 7f | ||
809 | |||
810 | /* wrap back to first free tlbcam entry */ | ||
811 | lis r13, tlbcam_index@ha | ||
812 | lwz r13, tlbcam_index@l(r13) | ||
813 | rlwimi r12, r13, 0, 20, 31 | ||
814 | 7: | ||
815 | mtspr SPRN_MAS0,r12 | ||
816 | #endif /* CONFIG_E200 */ | ||
817 | |||
818 | tlbwe | ||
819 | |||
820 | /* Done...restore registers and get out of here. */ | ||
821 | mfspr r11, SPRN_SPRG7R | ||
822 | mtcr r11 | ||
823 | mfspr r13, SPRN_SPRG5R | ||
824 | mfspr r12, SPRN_SPRG4R | ||
825 | mfspr r11, SPRN_SPRG1 | ||
826 | mfspr r10, SPRN_SPRG0 | ||
827 | rfi /* Force context change */ | ||
828 | |||
829 | #ifdef CONFIG_SPE | ||
830 | /* Note that the SPE support is closely modeled after the AltiVec | ||
831 | * support. Changes to one are likely to be applicable to the | ||
832 | * other! */ | ||
833 | load_up_spe: | ||
834 | /* | ||
835 | * Disable SPE for the task which had SPE previously, | ||
836 | * and save its SPE registers in its thread_struct. | ||
837 | * Enables SPE for use in the kernel on return. | ||
838 | * On SMP we know the SPE units are free, since we give it up every | ||
839 | * switch. -- Kumar | ||
840 | */ | ||
841 | mfmsr r5 | ||
842 | oris r5,r5,MSR_SPE@h | ||
843 | mtmsr r5 /* enable use of SPE now */ | ||
844 | isync | ||
845 | /* | ||
846 | * For SMP, we don't do lazy SPE switching because it just gets too | ||
847 | * horrendously complex, especially when a task switches from one CPU | ||
848 | * to another. Instead we call giveup_spe in switch_to. | ||
849 | */ | ||
850 | #ifndef CONFIG_SMP | ||
851 | lis r3,last_task_used_spe@ha | ||
852 | lwz r4,last_task_used_spe@l(r3) | ||
853 | cmpi 0,r4,0 | ||
854 | beq 1f | ||
855 | addi r4,r4,THREAD /* want THREAD of last_task_used_spe */ | ||
856 | SAVE_32EVRS(0,r10,r4) | ||
857 | evxor evr10, evr10, evr10 /* clear out evr10 */ | ||
858 | evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */ | ||
859 | li r5,THREAD_ACC | ||
860 | evstddx evr10, r4, r5 /* save off accumulator */ | ||
861 | lwz r5,PT_REGS(r4) | ||
862 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
863 | lis r10,MSR_SPE@h | ||
864 | andc r4,r4,r10 /* disable SPE for previous task */ | ||
865 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
866 | 1: | ||
867 | #endif /* CONFIG_SMP */ | ||
868 | /* enable use of SPE after return */ | ||
869 | oris r9,r9,MSR_SPE@h | ||
870 | mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ | ||
871 | li r4,1 | ||
872 | li r10,THREAD_ACC | ||
873 | stw r4,THREAD_USED_SPE(r5) | ||
874 | evlddx evr4,r10,r5 | ||
875 | evmra evr4,evr4 | ||
876 | REST_32EVRS(0,r10,r5) | ||
877 | #ifndef CONFIG_SMP | ||
878 | subi r4,r5,THREAD | ||
879 | stw r4,last_task_used_spe@l(r3) | ||
880 | #endif /* CONFIG_SMP */ | ||
881 | /* restore registers and return */ | ||
882 | 2: REST_4GPRS(3, r11) | ||
883 | lwz r10,_CCR(r11) | ||
884 | REST_GPR(1, r11) | ||
885 | mtcr r10 | ||
886 | lwz r10,_LINK(r11) | ||
887 | mtlr r10 | ||
888 | REST_GPR(10, r11) | ||
889 | mtspr SPRN_SRR1,r9 | ||
890 | mtspr SPRN_SRR0,r12 | ||
891 | REST_GPR(9, r11) | ||
892 | REST_GPR(12, r11) | ||
893 | lwz r11,GPR11(r11) | ||
894 | SYNC | ||
895 | rfi | ||
896 | |||
897 | /* | ||
898 | * SPE unavailable trap from kernel - print a message, but let | ||
899 | * the task use SPE in the kernel until it returns to user mode. | ||
900 | */ | ||
901 | KernelSPE: | ||
902 | lwz r3,_MSR(r1) | ||
903 | oris r3,r3,MSR_SPE@h | ||
904 | stw r3,_MSR(r1) /* enable use of SPE after return */ | ||
905 | lis r3,87f@h | ||
906 | ori r3,r3,87f@l | ||
907 | mr r4,r2 /* current */ | ||
908 | lwz r5,_NIP(r1) | ||
909 | bl printk | ||
910 | b ret_from_except | ||
911 | 87: .string "SPE used in kernel (task=%p, pc=%x) \n" | ||
912 | .align 4,0 | ||
913 | |||
914 | #endif /* CONFIG_SPE */ | ||
915 | |||
916 | /* | ||
917 | * Global functions | ||
918 | */ | ||
919 | |||
920 | /* | ||
921 | * extern void loadcam_entry(unsigned int index) | ||
922 | * | ||
923 | * Load TLBCAM[index] entry in to the L2 CAM MMU | ||
924 | */ | ||
925 | _GLOBAL(loadcam_entry) | ||
926 | lis r4,TLBCAM@ha | ||
927 | addi r4,r4,TLBCAM@l | ||
928 | mulli r5,r3,20 | ||
929 | add r3,r5,r4 | ||
930 | lwz r4,0(r3) | ||
931 | mtspr SPRN_MAS0,r4 | ||
932 | lwz r4,4(r3) | ||
933 | mtspr SPRN_MAS1,r4 | ||
934 | lwz r4,8(r3) | ||
935 | mtspr SPRN_MAS2,r4 | ||
936 | lwz r4,12(r3) | ||
937 | mtspr SPRN_MAS3,r4 | ||
938 | tlbwe | ||
939 | isync | ||
940 | blr | ||
941 | |||
942 | /* | ||
943 | * extern void giveup_altivec(struct task_struct *prev) | ||
944 | * | ||
945 | * The e500 core does not have an AltiVec unit. | ||
946 | */ | ||
947 | _GLOBAL(giveup_altivec) | ||
948 | blr | ||
949 | |||
950 | #ifdef CONFIG_SPE | ||
951 | /* | ||
952 | * extern void giveup_spe(struct task_struct *prev) | ||
953 | * | ||
954 | */ | ||
955 | _GLOBAL(giveup_spe) | ||
956 | mfmsr r5 | ||
957 | oris r5,r5,MSR_SPE@h | ||
958 | SYNC | ||
959 | mtmsr r5 /* enable use of SPE now */ | ||
960 | isync | ||
961 | cmpi 0,r3,0 | ||
962 | beqlr- /* if no previous owner, done */ | ||
963 | addi r3,r3,THREAD /* want THREAD of task */ | ||
964 | lwz r5,PT_REGS(r3) | ||
965 | cmpi 0,r5,0 | ||
966 | SAVE_32EVRS(0, r4, r3) | ||
967 | evxor evr6, evr6, evr6 /* clear out evr6 */ | ||
968 | evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */ | ||
969 | li r4,THREAD_ACC | ||
970 | evstddx evr6, r4, r3 /* save off accumulator */ | ||
971 | mfspr r6,SPRN_SPEFSCR | ||
972 | stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */ | ||
973 | beq 1f | ||
974 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
975 | lis r3,MSR_SPE@h | ||
976 | andc r4,r4,r3 /* disable SPE for previous task */ | ||
977 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
978 | 1: | ||
979 | #ifndef CONFIG_SMP | ||
980 | li r5,0 | ||
981 | lis r4,last_task_used_spe@ha | ||
982 | stw r5,last_task_used_spe@l(r4) | ||
983 | #endif /* CONFIG_SMP */ | ||
984 | blr | ||
985 | #endif /* CONFIG_SPE */ | ||
986 | |||
987 | /* | ||
988 | * extern void giveup_fpu(struct task_struct *prev) | ||
989 | * | ||
990 | * Not all FSL Book-E cores have an FPU | ||
991 | */ | ||
992 | #ifndef CONFIG_PPC_FPU | ||
993 | _GLOBAL(giveup_fpu) | ||
994 | blr | ||
995 | #endif | ||
996 | |||
997 | /* | ||
998 | * extern void abort(void) | ||
999 | * | ||
1000 | * At present, this routine just applies a system reset. | ||
1001 | */ | ||
1002 | _GLOBAL(abort) | ||
1003 | li r13,0 | ||
1004 | mtspr SPRN_DBCR0,r13 /* disable all debug events */ | ||
1005 | mfmsr r13 | ||
1006 | ori r13,r13,MSR_DE@l /* Enable Debug Events */ | ||
1007 | mtmsr r13 | ||
1008 | mfspr r13,SPRN_DBCR0 | ||
1009 | lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h | ||
1010 | mtspr SPRN_DBCR0,r13 | ||
1011 | |||
1012 | _GLOBAL(set_context) | ||
1013 | |||
1014 | #ifdef CONFIG_BDI_SWITCH | ||
1015 | /* Context switch the PTE pointer for the Abatron BDI2000. | ||
1016 | * The PGDIR is the second parameter. | ||
1017 | */ | ||
1018 | lis r5, abatron_pteptrs@h | ||
1019 | ori r5, r5, abatron_pteptrs@l | ||
1020 | stw r4, 0x4(r5) | ||
1021 | #endif | ||
1022 | mtspr SPRN_PID,r3 | ||
1023 | isync /* Force context change */ | ||
1024 | blr | ||
1025 | |||
1026 | /* | ||
1027 | * We put a few things here that have to be page-aligned. This stuff | ||
1028 | * goes at the beginning of the data segment, which is page-aligned. | ||
1029 | */ | ||
1030 | .data | ||
1031 | _GLOBAL(sdata) | ||
1032 | _GLOBAL(empty_zero_page) | ||
1033 | .space 4096 | ||
1034 | _GLOBAL(swapper_pg_dir) | ||
1035 | .space 4096 | ||
1036 | |||
1037 | /* Reserved 4k for the critical exception stack & 4k for the machine | ||
1038 | * check stack per CPU for kernel mode exceptions */ | ||
1039 | .section .bss | ||
1040 | .align 12 | ||
1041 | exception_stack_bottom: | ||
1042 | .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS | ||
1043 | _GLOBAL(exception_stack_top) | ||
1044 | |||
1045 | /* | ||
1046 | * This space gets a copy of optional info passed to us by the bootstrap | ||
1047 | * which is used to pass parameters into the kernel like root=/dev/sda1, etc. | ||
1048 | */ | ||
1049 | _GLOBAL(cmd_line) | ||
1050 | .space 512 | ||
1051 | |||
1052 | /* | ||
1053 | * Room for two PTE pointers, usually the kernel and current user pointers | ||
1054 | * to their respective root page table. | ||
1055 | */ | ||
1056 | abatron_pteptrs: | ||
1057 | .space 8 | ||
1058 | |||
diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S new file mode 100644 index 000000000000..1a2194cf6828 --- /dev/null +++ b/arch/powerpc/kernel/idle_6xx.S | |||
@@ -0,0 +1,233 @@ | |||
1 | /* | ||
2 | * This file contains the power_save function for 6xx & 7xxx CPUs | ||
3 | * rewritten in assembler | ||
4 | * | ||
5 | * Warning ! This code assumes that if your machine has a 750fx | ||
6 | * it will have PLL 1 set to low speed mode (used during NAP/DOZE). | ||
7 | * if this is not the case some additional changes will have to | ||
8 | * be done to check a runtime var (a bit like powersave-nap) | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * as published by the Free Software Foundation; either version | ||
13 | * 2 of the License, or (at your option) any later version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/config.h> | ||
17 | #include <linux/threads.h> | ||
18 | #include <asm/processor.h> | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/cputable.h> | ||
21 | #include <asm/thread_info.h> | ||
22 | #include <asm/ppc_asm.h> | ||
23 | #include <asm/asm-offsets.h> | ||
24 | |||
25 | #undef DEBUG | ||
26 | |||
27 | .text | ||
28 | |||
29 | /* | ||
30 | * Init idle, called at early CPU setup time from head.S for each CPU | ||
31 | * Make sure no rest of NAP mode remains in HID0, save default | ||
32 | * values for some CPU specific registers. Called with r24 | ||
33 | * containing CPU number and r3 reloc offset | ||
34 | */ | ||
35 | _GLOBAL(init_idle_6xx) | ||
36 | BEGIN_FTR_SECTION | ||
37 | mfspr r4,SPRN_HID0 | ||
38 | rlwinm r4,r4,0,10,8 /* Clear NAP */ | ||
39 | mtspr SPRN_HID0, r4 | ||
40 | b 1f | ||
41 | END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) | ||
42 | blr | ||
43 | 1: | ||
44 | slwi r5,r24,2 | ||
45 | add r5,r5,r3 | ||
46 | BEGIN_FTR_SECTION | ||
47 | mfspr r4,SPRN_MSSCR0 | ||
48 | addis r6,r5, nap_save_msscr0@ha | ||
49 | stw r4,nap_save_msscr0@l(r6) | ||
50 | END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) | ||
51 | BEGIN_FTR_SECTION | ||
52 | mfspr r4,SPRN_HID1 | ||
53 | addis r6,r5,nap_save_hid1@ha | ||
54 | stw r4,nap_save_hid1@l(r6) | ||
55 | END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX) | ||
56 | blr | ||
57 | |||
58 | /* | ||
59 | * Here is the power_save_6xx function. This could eventually be | ||
60 | * split into several functions & changing the function pointer | ||
61 | * depending on the various features. | ||
62 | */ | ||
63 | _GLOBAL(ppc6xx_idle) | ||
64 | /* Check if we can nap or doze, put HID0 mask in r3 | ||
65 | */ | ||
66 | lis r3, 0 | ||
67 | BEGIN_FTR_SECTION | ||
68 | lis r3,HID0_DOZE@h | ||
69 | END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) | ||
70 | BEGIN_FTR_SECTION | ||
71 | /* We must dynamically check for the NAP feature as it | ||
72 | * can be cleared by CPU init after the fixups are done | ||
73 | */ | ||
74 | lis r4,cur_cpu_spec@ha | ||
75 | lwz r4,cur_cpu_spec@l(r4) | ||
76 | lwz r4,CPU_SPEC_FEATURES(r4) | ||
77 | andi. r0,r4,CPU_FTR_CAN_NAP | ||
78 | beq 1f | ||
79 | /* Now check if user or arch enabled NAP mode */ | ||
80 | lis r4,powersave_nap@ha | ||
81 | lwz r4,powersave_nap@l(r4) | ||
82 | cmpwi 0,r4,0 | ||
83 | beq 1f | ||
84 | lis r3,HID0_NAP@h | ||
85 | 1: | ||
86 | END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) | ||
87 | cmpwi 0,r3,0 | ||
88 | beqlr | ||
89 | |||
90 | /* Clear MSR:EE */ | ||
91 | mfmsr r7 | ||
92 | rlwinm r0,r7,0,17,15 | ||
93 | mtmsr r0 | ||
94 | |||
95 | /* Check current_thread_info()->flags */ | ||
96 | rlwinm r4,r1,0,0,18 | ||
97 | lwz r4,TI_FLAGS(r4) | ||
98 | andi. r0,r4,_TIF_NEED_RESCHED | ||
99 | beq 1f | ||
100 | mtmsr r7 /* out of line this ? */ | ||
101 | blr | ||
102 | 1: | ||
103 | /* Some pre-nap cleanups needed on some CPUs */ | ||
104 | andis. r0,r3,HID0_NAP@h | ||
105 | beq 2f | ||
106 | BEGIN_FTR_SECTION | ||
107 | /* Disable L2 prefetch on some 745x and try to ensure | ||
108 | * L2 prefetch engines are idle. As explained by errata | ||
109 | * text, we can't be sure they are, we just hope very hard | ||
110 | * that well be enough (sic !). At least I noticed Apple | ||
111 | * doesn't even bother doing the dcbf's here... | ||
112 | */ | ||
113 | mfspr r4,SPRN_MSSCR0 | ||
114 | rlwinm r4,r4,0,0,29 | ||
115 | sync | ||
116 | mtspr SPRN_MSSCR0,r4 | ||
117 | sync | ||
118 | isync | ||
119 | lis r4,KERNELBASE@h | ||
120 | dcbf 0,r4 | ||
121 | dcbf 0,r4 | ||
122 | dcbf 0,r4 | ||
123 | dcbf 0,r4 | ||
124 | END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) | ||
125 | #ifdef DEBUG | ||
126 | lis r6,nap_enter_count@ha | ||
127 | lwz r4,nap_enter_count@l(r6) | ||
128 | addi r4,r4,1 | ||
129 | stw r4,nap_enter_count@l(r6) | ||
130 | #endif | ||
131 | 2: | ||
132 | BEGIN_FTR_SECTION | ||
133 | /* Go to low speed mode on some 750FX */ | ||
134 | lis r4,powersave_lowspeed@ha | ||
135 | lwz r4,powersave_lowspeed@l(r4) | ||
136 | cmpwi 0,r4,0 | ||
137 | beq 1f | ||
138 | mfspr r4,SPRN_HID1 | ||
139 | oris r4,r4,0x0001 | ||
140 | mtspr SPRN_HID1,r4 | ||
141 | 1: | ||
142 | END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX) | ||
143 | |||
144 | /* Go to NAP or DOZE now */ | ||
145 | mfspr r4,SPRN_HID0 | ||
146 | lis r5,(HID0_NAP|HID0_SLEEP)@h | ||
147 | BEGIN_FTR_SECTION | ||
148 | oris r5,r5,HID0_DOZE@h | ||
149 | END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) | ||
150 | andc r4,r4,r5 | ||
151 | or r4,r4,r3 | ||
152 | BEGIN_FTR_SECTION | ||
153 | oris r4,r4,HID0_DPM@h /* that should be done once for all */ | ||
154 | END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM) | ||
155 | mtspr SPRN_HID0,r4 | ||
156 | BEGIN_FTR_SECTION | ||
157 | DSSALL | ||
158 | sync | ||
159 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
160 | ori r7,r7,MSR_EE /* Could be ommited (already set) */ | ||
161 | oris r7,r7,MSR_POW@h | ||
162 | sync | ||
163 | isync | ||
164 | mtmsr r7 | ||
165 | isync | ||
166 | sync | ||
167 | blr | ||
168 | |||
169 | /* | ||
170 | * Return from NAP/DOZE mode, restore some CPU specific registers, | ||
171 | * we are called with DR/IR still off and r2 containing physical | ||
172 | * address of current. | ||
173 | */ | ||
174 | _GLOBAL(power_save_6xx_restore) | ||
175 | mfspr r11,SPRN_HID0 | ||
176 | rlwinm. r11,r11,0,10,8 /* Clear NAP & copy NAP bit !state to cr1 EQ */ | ||
177 | cror 4*cr1+eq,4*cr0+eq,4*cr0+eq | ||
178 | BEGIN_FTR_SECTION | ||
179 | rlwinm r11,r11,0,9,7 /* Clear DOZE */ | ||
180 | END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) | ||
181 | mtspr SPRN_HID0, r11 | ||
182 | |||
183 | #ifdef DEBUG | ||
184 | beq cr1,1f | ||
185 | lis r11,(nap_return_count-KERNELBASE)@ha | ||
186 | lwz r9,nap_return_count@l(r11) | ||
187 | addi r9,r9,1 | ||
188 | stw r9,nap_return_count@l(r11) | ||
189 | 1: | ||
190 | #endif | ||
191 | |||
192 | rlwinm r9,r1,0,0,18 | ||
193 | tophys(r9,r9) | ||
194 | lwz r11,TI_CPU(r9) | ||
195 | slwi r11,r11,2 | ||
196 | /* Todo make sure all these are in the same page | ||
197 | * and load r22 (@ha part + CPU offset) only once | ||
198 | */ | ||
199 | BEGIN_FTR_SECTION | ||
200 | beq cr1,1f | ||
201 | addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha | ||
202 | lwz r9,nap_save_msscr0@l(r9) | ||
203 | mtspr SPRN_MSSCR0, r9 | ||
204 | sync | ||
205 | isync | ||
206 | 1: | ||
207 | END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) | ||
208 | BEGIN_FTR_SECTION | ||
209 | addis r9,r11,(nap_save_hid1-KERNELBASE)@ha | ||
210 | lwz r9,nap_save_hid1@l(r9) | ||
211 | mtspr SPRN_HID1, r9 | ||
212 | END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX) | ||
213 | b transfer_to_handler_cont | ||
214 | |||
215 | .data | ||
216 | |||
217 | _GLOBAL(nap_save_msscr0) | ||
218 | .space 4*NR_CPUS | ||
219 | |||
220 | _GLOBAL(nap_save_hid1) | ||
221 | .space 4*NR_CPUS | ||
222 | |||
223 | _GLOBAL(powersave_nap) | ||
224 | .long 0 | ||
225 | _GLOBAL(powersave_lowspeed) | ||
226 | .long 0 | ||
227 | |||
228 | #ifdef DEBUG | ||
229 | _GLOBAL(nap_enter_count) | ||
230 | .space 4 | ||
231 | _GLOBAL(nap_return_count) | ||
232 | .space 4 | ||
233 | #endif | ||
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c new file mode 100644 index 000000000000..f5a9d2a84fa1 --- /dev/null +++ b/arch/powerpc/kernel/process.c | |||
@@ -0,0 +1,724 @@ | |||
1 | /* | ||
2 | * arch/ppc/kernel/process.c | ||
3 | * | ||
4 | * Derived from "arch/i386/kernel/process.c" | ||
5 | * Copyright (C) 1995 Linus Torvalds | ||
6 | * | ||
7 | * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and | ||
8 | * Paul Mackerras (paulus@cs.anu.edu.au) | ||
9 | * | ||
10 | * PowerPC version | ||
11 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or | ||
14 | * modify it under the terms of the GNU General Public License | ||
15 | * as published by the Free Software Foundation; either version | ||
16 | * 2 of the License, or (at your option) any later version. | ||
17 | */ | ||
18 | |||
19 | #include <linux/config.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/smp.h> | ||
25 | #include <linux/smp_lock.h> | ||
26 | #include <linux/stddef.h> | ||
27 | #include <linux/unistd.h> | ||
28 | #include <linux/ptrace.h> | ||
29 | #include <linux/slab.h> | ||
30 | #include <linux/user.h> | ||
31 | #include <linux/elf.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/prctl.h> | ||
34 | #include <linux/init_task.h> | ||
35 | #include <linux/module.h> | ||
36 | #include <linux/kallsyms.h> | ||
37 | #include <linux/mqueue.h> | ||
38 | #include <linux/hardirq.h> | ||
39 | |||
40 | #include <asm/pgtable.h> | ||
41 | #include <asm/uaccess.h> | ||
42 | #include <asm/system.h> | ||
43 | #include <asm/io.h> | ||
44 | #include <asm/processor.h> | ||
45 | #include <asm/mmu.h> | ||
46 | #include <asm/prom.h> | ||
47 | |||
48 | extern unsigned long _get_SP(void); | ||
49 | |||
50 | #ifndef CONFIG_SMP | ||
51 | struct task_struct *last_task_used_math = NULL; | ||
52 | struct task_struct *last_task_used_altivec = NULL; | ||
53 | struct task_struct *last_task_used_spe = NULL; | ||
54 | #endif | ||
55 | |||
56 | static struct fs_struct init_fs = INIT_FS; | ||
57 | static struct files_struct init_files = INIT_FILES; | ||
58 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); | ||
59 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | ||
60 | struct mm_struct init_mm = INIT_MM(init_mm); | ||
61 | EXPORT_SYMBOL(init_mm); | ||
62 | |||
63 | /* this is 8kB-aligned so we can get to the thread_info struct | ||
64 | at the base of it from the stack pointer with 1 integer instruction. */ | ||
65 | union thread_union init_thread_union | ||
66 | __attribute__((__section__(".data.init_task"))) = | ||
67 | { INIT_THREAD_INFO(init_task) }; | ||
68 | |||
69 | /* initial task structure */ | ||
70 | struct task_struct init_task = INIT_TASK(init_task); | ||
71 | EXPORT_SYMBOL(init_task); | ||
72 | |||
73 | /* only used to get secondary processor up */ | ||
74 | struct task_struct *current_set[NR_CPUS] = {&init_task, }; | ||
75 | |||
76 | /* | ||
77 | * Make sure the floating-point register state in the | ||
78 | * the thread_struct is up to date for task tsk. | ||
79 | */ | ||
80 | void flush_fp_to_thread(struct task_struct *tsk) | ||
81 | { | ||
82 | if (tsk->thread.regs) { | ||
83 | /* | ||
84 | * We need to disable preemption here because if we didn't, | ||
85 | * another process could get scheduled after the regs->msr | ||
86 | * test but before we have finished saving the FP registers | ||
87 | * to the thread_struct. That process could take over the | ||
88 | * FPU, and then when we get scheduled again we would store | ||
89 | * bogus values for the remaining FP registers. | ||
90 | */ | ||
91 | preempt_disable(); | ||
92 | if (tsk->thread.regs->msr & MSR_FP) { | ||
93 | #ifdef CONFIG_SMP | ||
94 | /* | ||
95 | * This should only ever be called for current or | ||
96 | * for a stopped child process. Since we save away | ||
97 | * the FP register state on context switch on SMP, | ||
98 | * there is something wrong if a stopped child appears | ||
99 | * to still have its FP state in the CPU registers. | ||
100 | */ | ||
101 | BUG_ON(tsk != current); | ||
102 | #endif | ||
103 | giveup_fpu(current); | ||
104 | } | ||
105 | preempt_enable(); | ||
106 | } | ||
107 | } | ||
108 | |||
109 | void enable_kernel_fp(void) | ||
110 | { | ||
111 | WARN_ON(preemptible()); | ||
112 | |||
113 | #ifdef CONFIG_SMP | ||
114 | if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) | ||
115 | giveup_fpu(current); | ||
116 | else | ||
117 | giveup_fpu(NULL); /* just enables FP for kernel */ | ||
118 | #else | ||
119 | giveup_fpu(last_task_used_math); | ||
120 | #endif /* CONFIG_SMP */ | ||
121 | } | ||
122 | EXPORT_SYMBOL(enable_kernel_fp); | ||
123 | |||
124 | int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs) | ||
125 | { | ||
126 | if (!tsk->thread.regs) | ||
127 | return 0; | ||
128 | flush_fp_to_thread(current); | ||
129 | |||
130 | memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs)); | ||
131 | |||
132 | return 1; | ||
133 | } | ||
134 | |||
135 | #ifdef CONFIG_ALTIVEC | ||
136 | void enable_kernel_altivec(void) | ||
137 | { | ||
138 | WARN_ON(preemptible()); | ||
139 | |||
140 | #ifdef CONFIG_SMP | ||
141 | if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) | ||
142 | giveup_altivec(current); | ||
143 | else | ||
144 | giveup_altivec(NULL); /* just enable AltiVec for kernel - force */ | ||
145 | #else | ||
146 | giveup_altivec(last_task_used_altivec); | ||
147 | #endif /* CONFIG_SMP */ | ||
148 | } | ||
149 | EXPORT_SYMBOL(enable_kernel_altivec); | ||
150 | |||
151 | /* | ||
152 | * Make sure the VMX/Altivec register state in the | ||
153 | * the thread_struct is up to date for task tsk. | ||
154 | */ | ||
155 | void flush_altivec_to_thread(struct task_struct *tsk) | ||
156 | { | ||
157 | if (tsk->thread.regs) { | ||
158 | preempt_disable(); | ||
159 | if (tsk->thread.regs->msr & MSR_VEC) { | ||
160 | #ifdef CONFIG_SMP | ||
161 | BUG_ON(tsk != current); | ||
162 | #endif | ||
163 | giveup_altivec(current); | ||
164 | } | ||
165 | preempt_enable(); | ||
166 | } | ||
167 | } | ||
168 | |||
169 | int dump_task_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs) | ||
170 | { | ||
171 | flush_altivec_to_thread(current); | ||
172 | memcpy(vrregs, ¤t->thread.vr[0], sizeof(*vrregs)); | ||
173 | return 1; | ||
174 | } | ||
175 | #endif /* CONFIG_ALTIVEC */ | ||
176 | |||
177 | #ifdef CONFIG_SPE | ||
178 | |||
179 | void enable_kernel_spe(void) | ||
180 | { | ||
181 | WARN_ON(preemptible()); | ||
182 | |||
183 | #ifdef CONFIG_SMP | ||
184 | if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) | ||
185 | giveup_spe(current); | ||
186 | else | ||
187 | giveup_spe(NULL); /* just enable SPE for kernel - force */ | ||
188 | #else | ||
189 | giveup_spe(last_task_used_spe); | ||
190 | #endif /* __SMP __ */ | ||
191 | } | ||
192 | EXPORT_SYMBOL(enable_kernel_spe); | ||
193 | |||
194 | void flush_spe_to_thread(struct task_struct *tsk) | ||
195 | { | ||
196 | if (tsk->thread.regs) { | ||
197 | preempt_disable(); | ||
198 | if (tsk->thread.regs->msr & MSR_SPE) { | ||
199 | #ifdef CONFIG_SMP | ||
200 | BUG_ON(tsk != current); | ||
201 | #endif | ||
202 | giveup_spe(current); | ||
203 | } | ||
204 | preempt_enable(); | ||
205 | } | ||
206 | } | ||
207 | |||
208 | int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs) | ||
209 | { | ||
210 | flush_spe_to_thread(current); | ||
211 | /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */ | ||
212 | memcpy(evrregs, ¤t->thread.evr[0], sizeof(u32) * 35); | ||
213 | return 1; | ||
214 | } | ||
215 | #endif /* CONFIG_SPE */ | ||
216 | |||
217 | static void set_dabr_spr(unsigned long val) | ||
218 | { | ||
219 | mtspr(SPRN_DABR, val); | ||
220 | } | ||
221 | |||
222 | int set_dabr(unsigned long dabr) | ||
223 | { | ||
224 | int ret = 0; | ||
225 | |||
226 | #ifdef CONFIG_PPC64 | ||
227 | if (firmware_has_feature(FW_FEATURE_XDABR)) { | ||
228 | /* We want to catch accesses from kernel and userspace */ | ||
229 | unsigned long flags = H_DABRX_KERNEL|H_DABRX_USER; | ||
230 | ret = plpar_set_xdabr(dabr, flags); | ||
231 | } else if (firmware_has_feature(FW_FEATURE_DABR)) { | ||
232 | ret = plpar_set_dabr(dabr); | ||
233 | } else | ||
234 | #endif | ||
235 | set_dabr_spr(dabr); | ||
236 | |||
237 | return ret; | ||
238 | } | ||
239 | |||
240 | static DEFINE_PER_CPU(unsigned long, current_dabr); | ||
241 | |||
242 | struct task_struct *__switch_to(struct task_struct *prev, | ||
243 | struct task_struct *new) | ||
244 | { | ||
245 | struct thread_struct *new_thread, *old_thread; | ||
246 | unsigned long flags; | ||
247 | struct task_struct *last; | ||
248 | |||
249 | #ifdef CONFIG_SMP | ||
250 | /* avoid complexity of lazy save/restore of fpu | ||
251 | * by just saving it every time we switch out if | ||
252 | * this task used the fpu during the last quantum. | ||
253 | * | ||
254 | * If it tries to use the fpu again, it'll trap and | ||
255 | * reload its fp regs. So we don't have to do a restore | ||
256 | * every switch, just a save. | ||
257 | * -- Cort | ||
258 | */ | ||
259 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) | ||
260 | giveup_fpu(prev); | ||
261 | #ifdef CONFIG_ALTIVEC | ||
262 | /* | ||
263 | * If the previous thread used altivec in the last quantum | ||
264 | * (thus changing altivec regs) then save them. | ||
265 | * We used to check the VRSAVE register but not all apps | ||
266 | * set it, so we don't rely on it now (and in fact we need | ||
267 | * to save & restore VSCR even if VRSAVE == 0). -- paulus | ||
268 | * | ||
269 | * On SMP we always save/restore altivec regs just to avoid the | ||
270 | * complexity of changing processors. | ||
271 | * -- Cort | ||
272 | */ | ||
273 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) | ||
274 | giveup_altivec(prev); | ||
275 | /* Avoid the trap. On smp this this never happens since | ||
276 | * we don't set last_task_used_altivec -- Cort | ||
277 | */ | ||
278 | if (new->thread.regs && last_task_used_altivec == new) | ||
279 | new->thread.regs->msr |= MSR_VEC; | ||
280 | #endif /* CONFIG_ALTIVEC */ | ||
281 | #ifdef CONFIG_SPE | ||
282 | /* | ||
283 | * If the previous thread used spe in the last quantum | ||
284 | * (thus changing spe regs) then save them. | ||
285 | * | ||
286 | * On SMP we always save/restore spe regs just to avoid the | ||
287 | * complexity of changing processors. | ||
288 | */ | ||
289 | if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) | ||
290 | giveup_spe(prev); | ||
291 | /* Avoid the trap. On smp this this never happens since | ||
292 | * we don't set last_task_used_spe | ||
293 | */ | ||
294 | if (new->thread.regs && last_task_used_spe == new) | ||
295 | new->thread.regs->msr |= MSR_SPE; | ||
296 | #endif /* CONFIG_SPE */ | ||
297 | #endif /* CONFIG_SMP */ | ||
298 | |||
299 | #ifdef CONFIG_PPC64 /* for now */ | ||
300 | if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) { | ||
301 | set_dabr(new->thread.dabr); | ||
302 | __get_cpu_var(current_dabr) = new->thread.dabr; | ||
303 | } | ||
304 | #endif | ||
305 | |||
306 | new_thread = &new->thread; | ||
307 | old_thread = ¤t->thread; | ||
308 | local_irq_save(flags); | ||
309 | last = _switch(old_thread, new_thread); | ||
310 | |||
311 | local_irq_restore(flags); | ||
312 | |||
313 | return last; | ||
314 | } | ||
315 | |||
316 | void show_regs(struct pt_regs * regs) | ||
317 | { | ||
318 | int i, trap; | ||
319 | |||
320 | printk("NIP: %08lX LR: %08lX SP: %08lX REGS: %p TRAP: %04lx %s\n", | ||
321 | regs->nip, regs->link, regs->gpr[1], regs, regs->trap, | ||
322 | print_tainted()); | ||
323 | printk("MSR: %08lx EE: %01x PR: %01x FP: %01x ME: %01x IR/DR: %01x%01x\n", | ||
324 | regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0, | ||
325 | regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0, | ||
326 | regs->msr&MSR_IR ? 1 : 0, | ||
327 | regs->msr&MSR_DR ? 1 : 0); | ||
328 | trap = TRAP(regs); | ||
329 | if (trap == 0x300 || trap == 0x600) | ||
330 | printk("DAR: %08lX, DSISR: %08lX\n", regs->dar, regs->dsisr); | ||
331 | printk("TASK = %p[%d] '%s' THREAD: %p\n", | ||
332 | current, current->pid, current->comm, current->thread_info); | ||
333 | printk("Last syscall: %ld ", current->thread.last_syscall); | ||
334 | |||
335 | #ifdef CONFIG_SMP | ||
336 | printk(" CPU: %d", smp_processor_id()); | ||
337 | #endif /* CONFIG_SMP */ | ||
338 | |||
339 | for (i = 0; i < 32; i++) { | ||
340 | long r; | ||
341 | if ((i % 8) == 0) | ||
342 | printk("\n" KERN_INFO "GPR%02d: ", i); | ||
343 | if (__get_user(r, ®s->gpr[i])) | ||
344 | break; | ||
345 | printk("%08lX ", r); | ||
346 | if (i == 12 && !FULL_REGS(regs)) | ||
347 | break; | ||
348 | } | ||
349 | printk("\n"); | ||
350 | #ifdef CONFIG_KALLSYMS | ||
351 | /* | ||
352 | * Lookup NIP late so we have the best change of getting the | ||
353 | * above info out without failing | ||
354 | */ | ||
355 | printk("NIP [%08lx] ", regs->nip); | ||
356 | print_symbol("%s\n", regs->nip); | ||
357 | printk("LR [%08lx] ", regs->link); | ||
358 | print_symbol("%s\n", regs->link); | ||
359 | #endif | ||
360 | show_stack(current, (unsigned long *) regs->gpr[1]); | ||
361 | } | ||
362 | |||
363 | void exit_thread(void) | ||
364 | { | ||
365 | #ifndef CONFIG_SMP | ||
366 | if (last_task_used_math == current) | ||
367 | last_task_used_math = NULL; | ||
368 | #ifdef CONFIG_ALTIVEC | ||
369 | if (last_task_used_altivec == current) | ||
370 | last_task_used_altivec = NULL; | ||
371 | #endif /* CONFIG_ALTIVEC */ | ||
372 | #ifdef CONFIG_SPE | ||
373 | if (last_task_used_spe == current) | ||
374 | last_task_used_spe = NULL; | ||
375 | #endif | ||
376 | #endif /* CONFIG_SMP */ | ||
377 | } | ||
378 | |||
379 | void flush_thread(void) | ||
380 | { | ||
381 | #ifndef CONFIG_SMP | ||
382 | if (last_task_used_math == current) | ||
383 | last_task_used_math = NULL; | ||
384 | #ifdef CONFIG_ALTIVEC | ||
385 | if (last_task_used_altivec == current) | ||
386 | last_task_used_altivec = NULL; | ||
387 | #endif /* CONFIG_ALTIVEC */ | ||
388 | #ifdef CONFIG_SPE | ||
389 | if (last_task_used_spe == current) | ||
390 | last_task_used_spe = NULL; | ||
391 | #endif | ||
392 | #endif /* CONFIG_SMP */ | ||
393 | |||
394 | #ifdef CONFIG_PPC64 /* for now */ | ||
395 | if (current->thread.dabr) { | ||
396 | current->thread.dabr = 0; | ||
397 | set_dabr(0); | ||
398 | } | ||
399 | #endif | ||
400 | } | ||
401 | |||
402 | void | ||
403 | release_thread(struct task_struct *t) | ||
404 | { | ||
405 | } | ||
406 | |||
407 | /* | ||
408 | * This gets called before we allocate a new thread and copy | ||
409 | * the current task into it. | ||
410 | */ | ||
411 | void prepare_to_copy(struct task_struct *tsk) | ||
412 | { | ||
413 | flush_fp_to_thread(current); | ||
414 | flush_altivec_to_thread(current); | ||
415 | flush_spe_to_thread(current); | ||
416 | } | ||
417 | |||
418 | /* | ||
419 | * Copy a thread.. | ||
420 | */ | ||
421 | int | ||
422 | copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | ||
423 | unsigned long unused, | ||
424 | struct task_struct *p, struct pt_regs *regs) | ||
425 | { | ||
426 | struct pt_regs *childregs, *kregs; | ||
427 | extern void ret_from_fork(void); | ||
428 | unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE; | ||
429 | unsigned long childframe; | ||
430 | |||
431 | CHECK_FULL_REGS(regs); | ||
432 | /* Copy registers */ | ||
433 | sp -= sizeof(struct pt_regs); | ||
434 | childregs = (struct pt_regs *) sp; | ||
435 | *childregs = *regs; | ||
436 | if ((childregs->msr & MSR_PR) == 0) { | ||
437 | /* for kernel thread, set `current' and stackptr in new task */ | ||
438 | childregs->gpr[1] = sp + sizeof(struct pt_regs); | ||
439 | childregs->gpr[2] = (unsigned long) p; | ||
440 | p->thread.regs = NULL; /* no user register state */ | ||
441 | } else { | ||
442 | childregs->gpr[1] = usp; | ||
443 | p->thread.regs = childregs; | ||
444 | if (clone_flags & CLONE_SETTLS) | ||
445 | childregs->gpr[2] = childregs->gpr[6]; | ||
446 | } | ||
447 | childregs->gpr[3] = 0; /* Result from fork() */ | ||
448 | sp -= STACK_FRAME_OVERHEAD; | ||
449 | childframe = sp; | ||
450 | |||
451 | /* | ||
452 | * The way this works is that at some point in the future | ||
453 | * some task will call _switch to switch to the new task. | ||
454 | * That will pop off the stack frame created below and start | ||
455 | * the new task running at ret_from_fork. The new task will | ||
456 | * do some house keeping and then return from the fork or clone | ||
457 | * system call, using the stack frame created above. | ||
458 | */ | ||
459 | sp -= sizeof(struct pt_regs); | ||
460 | kregs = (struct pt_regs *) sp; | ||
461 | sp -= STACK_FRAME_OVERHEAD; | ||
462 | p->thread.ksp = sp; | ||
463 | kregs->nip = (unsigned long)ret_from_fork; | ||
464 | |||
465 | p->thread.last_syscall = -1; | ||
466 | |||
467 | return 0; | ||
468 | } | ||
469 | |||
470 | /* | ||
471 | * Set up a thread for executing a new program | ||
472 | */ | ||
473 | void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp) | ||
474 | { | ||
475 | set_fs(USER_DS); | ||
476 | memset(regs->gpr, 0, sizeof(regs->gpr)); | ||
477 | regs->ctr = 0; | ||
478 | regs->link = 0; | ||
479 | regs->xer = 0; | ||
480 | regs->ccr = 0; | ||
481 | regs->mq = 0; | ||
482 | regs->nip = nip; | ||
483 | regs->gpr[1] = sp; | ||
484 | regs->msr = MSR_USER; | ||
485 | #ifndef CONFIG_SMP | ||
486 | if (last_task_used_math == current) | ||
487 | last_task_used_math = NULL; | ||
488 | #ifdef CONFIG_ALTIVEC | ||
489 | if (last_task_used_altivec == current) | ||
490 | last_task_used_altivec = NULL; | ||
491 | #endif | ||
492 | #ifdef CONFIG_SPE | ||
493 | if (last_task_used_spe == current) | ||
494 | last_task_used_spe = NULL; | ||
495 | #endif | ||
496 | #endif /* CONFIG_SMP */ | ||
497 | memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); | ||
498 | current->thread.fpscr = 0; | ||
499 | #ifdef CONFIG_ALTIVEC | ||
500 | memset(current->thread.vr, 0, sizeof(current->thread.vr)); | ||
501 | memset(¤t->thread.vscr, 0, sizeof(current->thread.vscr)); | ||
502 | current->thread.vrsave = 0; | ||
503 | current->thread.used_vr = 0; | ||
504 | #endif /* CONFIG_ALTIVEC */ | ||
505 | #ifdef CONFIG_SPE | ||
506 | memset(current->thread.evr, 0, sizeof(current->thread.evr)); | ||
507 | current->thread.acc = 0; | ||
508 | current->thread.spefscr = 0; | ||
509 | current->thread.used_spe = 0; | ||
510 | #endif /* CONFIG_SPE */ | ||
511 | } | ||
512 | |||
513 | #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \ | ||
514 | | PR_FP_EXC_RES | PR_FP_EXC_INV) | ||
515 | |||
516 | int set_fpexc_mode(struct task_struct *tsk, unsigned int val) | ||
517 | { | ||
518 | struct pt_regs *regs = tsk->thread.regs; | ||
519 | |||
520 | /* This is a bit hairy. If we are an SPE enabled processor | ||
521 | * (have embedded fp) we store the IEEE exception enable flags in | ||
522 | * fpexc_mode. fpexc_mode is also used for setting FP exception | ||
523 | * mode (asyn, precise, disabled) for 'Classic' FP. */ | ||
524 | if (val & PR_FP_EXC_SW_ENABLE) { | ||
525 | #ifdef CONFIG_SPE | ||
526 | tsk->thread.fpexc_mode = val & | ||
527 | (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); | ||
528 | #else | ||
529 | return -EINVAL; | ||
530 | #endif | ||
531 | } else { | ||
532 | /* on a CONFIG_SPE this does not hurt us. The bits that | ||
533 | * __pack_fe01 use do not overlap with bits used for | ||
534 | * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits | ||
535 | * on CONFIG_SPE implementations are reserved so writing to | ||
536 | * them does not change anything */ | ||
537 | if (val > PR_FP_EXC_PRECISE) | ||
538 | return -EINVAL; | ||
539 | tsk->thread.fpexc_mode = __pack_fe01(val); | ||
540 | if (regs != NULL && (regs->msr & MSR_FP) != 0) | ||
541 | regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1)) | ||
542 | | tsk->thread.fpexc_mode; | ||
543 | } | ||
544 | return 0; | ||
545 | } | ||
546 | |||
547 | int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) | ||
548 | { | ||
549 | unsigned int val; | ||
550 | |||
551 | if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) | ||
552 | #ifdef CONFIG_SPE | ||
553 | val = tsk->thread.fpexc_mode; | ||
554 | #else | ||
555 | return -EINVAL; | ||
556 | #endif | ||
557 | else | ||
558 | val = __unpack_fe01(tsk->thread.fpexc_mode); | ||
559 | return put_user(val, (unsigned int __user *) adr); | ||
560 | } | ||
561 | |||
562 | int sys_clone(unsigned long clone_flags, unsigned long usp, | ||
563 | int __user *parent_tidp, void __user *child_threadptr, | ||
564 | int __user *child_tidp, int p6, | ||
565 | struct pt_regs *regs) | ||
566 | { | ||
567 | CHECK_FULL_REGS(regs); | ||
568 | if (usp == 0) | ||
569 | usp = regs->gpr[1]; /* stack pointer for child */ | ||
570 | return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp); | ||
571 | } | ||
572 | |||
573 | int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3, | ||
574 | unsigned long p4, unsigned long p5, unsigned long p6, | ||
575 | struct pt_regs *regs) | ||
576 | { | ||
577 | CHECK_FULL_REGS(regs); | ||
578 | return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL); | ||
579 | } | ||
580 | |||
581 | int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3, | ||
582 | unsigned long p4, unsigned long p5, unsigned long p6, | ||
583 | struct pt_regs *regs) | ||
584 | { | ||
585 | CHECK_FULL_REGS(regs); | ||
586 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], | ||
587 | regs, 0, NULL, NULL); | ||
588 | } | ||
589 | |||
590 | int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, | ||
591 | unsigned long a3, unsigned long a4, unsigned long a5, | ||
592 | struct pt_regs *regs) | ||
593 | { | ||
594 | int error; | ||
595 | char * filename; | ||
596 | |||
597 | filename = getname((char __user *) a0); | ||
598 | error = PTR_ERR(filename); | ||
599 | if (IS_ERR(filename)) | ||
600 | goto out; | ||
601 | flush_fp_to_thread(current); | ||
602 | flush_altivec_to_thread(current); | ||
603 | flush_spe_to_thread(current); | ||
604 | if (error == 0) { | ||
605 | task_lock(current); | ||
606 | current->ptrace &= ~PT_DTRACE; | ||
607 | task_unlock(current); | ||
608 | } | ||
609 | putname(filename); | ||
610 | out: | ||
611 | return error; | ||
612 | } | ||
613 | |||
614 | static int validate_sp(unsigned long sp, struct task_struct *p, | ||
615 | unsigned long nbytes) | ||
616 | { | ||
617 | unsigned long stack_page = (unsigned long)p->thread_info; | ||
618 | |||
619 | if (sp >= stack_page + sizeof(struct thread_struct) | ||
620 | && sp <= stack_page + THREAD_SIZE - nbytes) | ||
621 | return 1; | ||
622 | |||
623 | #ifdef CONFIG_IRQSTACKS | ||
624 | stack_page = (unsigned long) hardirq_ctx[task_cpu(p)]; | ||
625 | if (sp >= stack_page + sizeof(struct thread_struct) | ||
626 | && sp <= stack_page + THREAD_SIZE - nbytes) | ||
627 | return 1; | ||
628 | |||
629 | stack_page = (unsigned long) softirq_ctx[task_cpu(p)]; | ||
630 | if (sp >= stack_page + sizeof(struct thread_struct) | ||
631 | && sp <= stack_page + THREAD_SIZE - nbytes) | ||
632 | return 1; | ||
633 | #endif | ||
634 | |||
635 | return 0; | ||
636 | } | ||
637 | |||
638 | void dump_stack(void) | ||
639 | { | ||
640 | show_stack(current, NULL); | ||
641 | } | ||
642 | |||
643 | EXPORT_SYMBOL(dump_stack); | ||
644 | |||
645 | void show_stack(struct task_struct *tsk, unsigned long *stack) | ||
646 | { | ||
647 | unsigned long sp, stack_top, prev_sp, ret; | ||
648 | int count = 0; | ||
649 | unsigned long next_exc = 0; | ||
650 | struct pt_regs *regs; | ||
651 | extern char ret_from_except, ret_from_except_full, ret_from_syscall; | ||
652 | |||
653 | sp = (unsigned long) stack; | ||
654 | if (tsk == NULL) | ||
655 | tsk = current; | ||
656 | if (sp == 0) { | ||
657 | if (tsk == current) | ||
658 | asm("mr %0,1" : "=r" (sp)); | ||
659 | else | ||
660 | sp = tsk->thread.ksp; | ||
661 | } | ||
662 | |||
663 | prev_sp = (unsigned long) (tsk->thread_info + 1); | ||
664 | stack_top = (unsigned long) tsk->thread_info + THREAD_SIZE; | ||
665 | while (count < 16 && sp > prev_sp && sp < stack_top && (sp & 3) == 0) { | ||
666 | if (count == 0) { | ||
667 | printk("Call trace:"); | ||
668 | #ifdef CONFIG_KALLSYMS | ||
669 | printk("\n"); | ||
670 | #endif | ||
671 | } else { | ||
672 | if (next_exc) { | ||
673 | ret = next_exc; | ||
674 | next_exc = 0; | ||
675 | } else | ||
676 | ret = *(unsigned long *)(sp + 4); | ||
677 | printk(" [%08lx] ", ret); | ||
678 | #ifdef CONFIG_KALLSYMS | ||
679 | print_symbol("%s", ret); | ||
680 | printk("\n"); | ||
681 | #endif | ||
682 | if (ret == (unsigned long) &ret_from_except | ||
683 | || ret == (unsigned long) &ret_from_except_full | ||
684 | || ret == (unsigned long) &ret_from_syscall) { | ||
685 | /* sp + 16 points to an exception frame */ | ||
686 | regs = (struct pt_regs *) (sp + 16); | ||
687 | if (sp + 16 + sizeof(*regs) <= stack_top) | ||
688 | next_exc = regs->nip; | ||
689 | } | ||
690 | } | ||
691 | ++count; | ||
692 | sp = *(unsigned long *)sp; | ||
693 | } | ||
694 | #ifndef CONFIG_KALLSYMS | ||
695 | if (count > 0) | ||
696 | printk("\n"); | ||
697 | #endif | ||
698 | } | ||
699 | |||
700 | unsigned long get_wchan(struct task_struct *p) | ||
701 | { | ||
702 | unsigned long ip, sp; | ||
703 | int count = 0; | ||
704 | |||
705 | if (!p || p == current || p->state == TASK_RUNNING) | ||
706 | return 0; | ||
707 | |||
708 | sp = p->thread.ksp; | ||
709 | if (!validate_sp(sp, p, 16)) | ||
710 | return 0; | ||
711 | |||
712 | do { | ||
713 | sp = *(unsigned long *)sp; | ||
714 | if (!validate_sp(sp, p, 16)) | ||
715 | return 0; | ||
716 | if (count > 0) { | ||
717 | ip = *(unsigned long *)(sp + 4); | ||
718 | if (!in_sched_functions(ip)) | ||
719 | return ip; | ||
720 | } | ||
721 | } while (count++ < 16); | ||
722 | return 0; | ||
723 | } | ||
724 | EXPORT_SYMBOL(get_wchan); | ||
diff --git a/arch/powerpc/kernel/semaphore.c b/arch/powerpc/kernel/semaphore.c new file mode 100644 index 000000000000..2f8c3c951394 --- /dev/null +++ b/arch/powerpc/kernel/semaphore.c | |||
@@ -0,0 +1,135 @@ | |||
1 | /* | ||
2 | * PowerPC-specific semaphore code. | ||
3 | * | ||
4 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * April 2001 - Reworked by Paul Mackerras <paulus@samba.org> | ||
12 | * to eliminate the SMP races in the old version between the updates | ||
13 | * of `count' and `waking'. Now we use negative `count' values to | ||
14 | * indicate that some process(es) are waiting for the semaphore. | ||
15 | */ | ||
16 | |||
17 | #include <linux/sched.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/module.h> | ||
20 | |||
21 | #include <asm/atomic.h> | ||
22 | #include <asm/semaphore.h> | ||
23 | #include <asm/errno.h> | ||
24 | |||
25 | /* | ||
26 | * Atomically update sem->count. | ||
27 | * This does the equivalent of the following: | ||
28 | * | ||
29 | * old_count = sem->count; | ||
30 | * tmp = MAX(old_count, 0) + incr; | ||
31 | * sem->count = tmp; | ||
32 | * return old_count; | ||
33 | */ | ||
34 | static inline int __sem_update_count(struct semaphore *sem, int incr) | ||
35 | { | ||
36 | int old_count, tmp; | ||
37 | |||
38 | __asm__ __volatile__("\n" | ||
39 | "1: lwarx %0,0,%3\n" | ||
40 | " srawi %1,%0,31\n" | ||
41 | " andc %1,%0,%1\n" | ||
42 | " add %1,%1,%4\n" | ||
43 | PPC405_ERR77(0,%3) | ||
44 | " stwcx. %1,0,%3\n" | ||
45 | " bne 1b" | ||
46 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) | ||
47 | : "r" (&sem->count), "r" (incr), "m" (sem->count) | ||
48 | : "cc"); | ||
49 | |||
50 | return old_count; | ||
51 | } | ||
52 | |||
53 | void __up(struct semaphore *sem) | ||
54 | { | ||
55 | /* | ||
56 | * Note that we incremented count in up() before we came here, | ||
57 | * but that was ineffective since the result was <= 0, and | ||
58 | * any negative value of count is equivalent to 0. | ||
59 | * This ends up setting count to 1, unless count is now > 0 | ||
60 | * (i.e. because some other cpu has called up() in the meantime), | ||
61 | * in which case we just increment count. | ||
62 | */ | ||
63 | __sem_update_count(sem, 1); | ||
64 | wake_up(&sem->wait); | ||
65 | } | ||
66 | EXPORT_SYMBOL(__up); | ||
67 | |||
68 | /* | ||
69 | * Note that when we come in to __down or __down_interruptible, | ||
70 | * we have already decremented count, but that decrement was | ||
71 | * ineffective since the result was < 0, and any negative value | ||
72 | * of count is equivalent to 0. | ||
73 | * Thus it is only when we decrement count from some value > 0 | ||
74 | * that we have actually got the semaphore. | ||
75 | */ | ||
76 | void __sched __down(struct semaphore *sem) | ||
77 | { | ||
78 | struct task_struct *tsk = current; | ||
79 | DECLARE_WAITQUEUE(wait, tsk); | ||
80 | |||
81 | __set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
82 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
83 | |||
84 | /* | ||
85 | * Try to get the semaphore. If the count is > 0, then we've | ||
86 | * got the semaphore; we decrement count and exit the loop. | ||
87 | * If the count is 0 or negative, we set it to -1, indicating | ||
88 | * that we are asleep, and then sleep. | ||
89 | */ | ||
90 | while (__sem_update_count(sem, -1) <= 0) { | ||
91 | schedule(); | ||
92 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
93 | } | ||
94 | remove_wait_queue(&sem->wait, &wait); | ||
95 | __set_task_state(tsk, TASK_RUNNING); | ||
96 | |||
97 | /* | ||
98 | * If there are any more sleepers, wake one of them up so | ||
99 | * that it can either get the semaphore, or set count to -1 | ||
100 | * indicating that there are still processes sleeping. | ||
101 | */ | ||
102 | wake_up(&sem->wait); | ||
103 | } | ||
104 | EXPORT_SYMBOL(__down); | ||
105 | |||
106 | int __sched __down_interruptible(struct semaphore * sem) | ||
107 | { | ||
108 | int retval = 0; | ||
109 | struct task_struct *tsk = current; | ||
110 | DECLARE_WAITQUEUE(wait, tsk); | ||
111 | |||
112 | __set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
113 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
114 | |||
115 | while (__sem_update_count(sem, -1) <= 0) { | ||
116 | if (signal_pending(current)) { | ||
117 | /* | ||
118 | * A signal is pending - give up trying. | ||
119 | * Set sem->count to 0 if it is negative, | ||
120 | * since we are no longer sleeping. | ||
121 | */ | ||
122 | __sem_update_count(sem, 0); | ||
123 | retval = -EINTR; | ||
124 | break; | ||
125 | } | ||
126 | schedule(); | ||
127 | set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
128 | } | ||
129 | remove_wait_queue(&sem->wait, &wait); | ||
130 | __set_task_state(tsk, TASK_RUNNING); | ||
131 | |||
132 | wake_up(&sem->wait); | ||
133 | return retval; | ||
134 | } | ||
135 | EXPORT_SYMBOL(__down_interruptible); | ||
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c new file mode 100644 index 000000000000..c7afbbba0f36 --- /dev/null +++ b/arch/powerpc/kernel/traps.c | |||
@@ -0,0 +1,1047 @@ | |||
1 | /* | ||
2 | * arch/powerpc/kernel/traps.c | ||
3 | * | ||
4 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * Modified by Cort Dougan (cort@cs.nmt.edu) | ||
12 | * and Paul Mackerras (paulus@samba.org) | ||
13 | */ | ||
14 | |||
15 | /* | ||
16 | * This file handles the architecture-dependent parts of hardware exceptions | ||
17 | */ | ||
18 | |||
19 | #include <linux/config.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/stddef.h> | ||
25 | #include <linux/unistd.h> | ||
26 | #include <linux/ptrace.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/user.h> | ||
29 | #include <linux/a.out.h> | ||
30 | #include <linux/interrupt.h> | ||
31 | #include <linux/config.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/module.h> | ||
34 | #include <linux/prctl.h> | ||
35 | #include <linux/delay.h> | ||
36 | #include <linux/kprobes.h> | ||
37 | #include <asm/kdebug.h> | ||
38 | |||
39 | #include <asm/pgtable.h> | ||
40 | #include <asm/uaccess.h> | ||
41 | #include <asm/system.h> | ||
42 | #include <asm/io.h> | ||
43 | #include <asm/reg.h> | ||
44 | #include <asm/xmon.h> | ||
45 | #ifdef CONFIG_PMAC_BACKLIGHT | ||
46 | #include <asm/backlight.h> | ||
47 | #endif | ||
48 | #include <asm/perfmon.h> | ||
49 | |||
50 | #ifdef CONFIG_DEBUGGER | ||
51 | int (*__debugger)(struct pt_regs *regs); | ||
52 | int (*__debugger_ipi)(struct pt_regs *regs); | ||
53 | int (*__debugger_bpt)(struct pt_regs *regs); | ||
54 | int (*__debugger_sstep)(struct pt_regs *regs); | ||
55 | int (*__debugger_iabr_match)(struct pt_regs *regs); | ||
56 | int (*__debugger_dabr_match)(struct pt_regs *regs); | ||
57 | int (*__debugger_fault_handler)(struct pt_regs *regs); | ||
58 | |||
59 | EXPORT_SYMBOL(__debugger); | ||
60 | EXPORT_SYMBOL(__debugger_ipi); | ||
61 | EXPORT_SYMBOL(__debugger_bpt); | ||
62 | EXPORT_SYMBOL(__debugger_sstep); | ||
63 | EXPORT_SYMBOL(__debugger_iabr_match); | ||
64 | EXPORT_SYMBOL(__debugger_dabr_match); | ||
65 | EXPORT_SYMBOL(__debugger_fault_handler); | ||
66 | #endif | ||
67 | |||
68 | struct notifier_block *powerpc_die_chain; | ||
69 | static DEFINE_SPINLOCK(die_notifier_lock); | ||
70 | |||
71 | int register_die_notifier(struct notifier_block *nb) | ||
72 | { | ||
73 | int err = 0; | ||
74 | unsigned long flags; | ||
75 | |||
76 | spin_lock_irqsave(&die_notifier_lock, flags); | ||
77 | err = notifier_chain_register(&powerpc_die_chain, nb); | ||
78 | spin_unlock_irqrestore(&die_notifier_lock, flags); | ||
79 | return err; | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * Trap & Exception support | ||
84 | */ | ||
85 | |||
86 | static DEFINE_SPINLOCK(die_lock); | ||
87 | |||
88 | int die(const char *str, struct pt_regs *regs, long err) | ||
89 | { | ||
90 | static int die_counter; | ||
91 | int nl = 0; | ||
92 | |||
93 | if (debugger(regs)) | ||
94 | return 1; | ||
95 | |||
96 | console_verbose(); | ||
97 | spin_lock_irq(&die_lock); | ||
98 | bust_spinlocks(1); | ||
99 | #ifdef CONFIG_PMAC_BACKLIGHT | ||
100 | if (_machine == _MACH_Pmac) { | ||
101 | set_backlight_enable(1); | ||
102 | set_backlight_level(BACKLIGHT_MAX); | ||
103 | } | ||
104 | #endif | ||
105 | printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); | ||
106 | #ifdef CONFIG_PREEMPT | ||
107 | printk("PREEMPT "); | ||
108 | nl = 1; | ||
109 | #endif | ||
110 | #ifdef CONFIG_SMP | ||
111 | printk("SMP NR_CPUS=%d ", NR_CPUS); | ||
112 | nl = 1; | ||
113 | #endif | ||
114 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
115 | printk("DEBUG_PAGEALLOC "); | ||
116 | nl = 1; | ||
117 | #endif | ||
118 | #ifdef CONFIG_NUMA | ||
119 | printk("NUMA "); | ||
120 | nl = 1; | ||
121 | #endif | ||
122 | #ifdef CONFIG_PPC64 | ||
123 | switch (systemcfg->platform) { | ||
124 | case PLATFORM_PSERIES: | ||
125 | printk("PSERIES "); | ||
126 | nl = 1; | ||
127 | break; | ||
128 | case PLATFORM_PSERIES_LPAR: | ||
129 | printk("PSERIES LPAR "); | ||
130 | nl = 1; | ||
131 | break; | ||
132 | case PLATFORM_ISERIES_LPAR: | ||
133 | printk("ISERIES LPAR "); | ||
134 | nl = 1; | ||
135 | break; | ||
136 | case PLATFORM_POWERMAC: | ||
137 | printk("POWERMAC "); | ||
138 | nl = 1; | ||
139 | break; | ||
140 | case PLATFORM_BPA: | ||
141 | printk("BPA "); | ||
142 | nl = 1; | ||
143 | break; | ||
144 | } | ||
145 | #endif | ||
146 | if (nl) | ||
147 | printk("\n"); | ||
148 | print_modules(); | ||
149 | show_regs(regs); | ||
150 | bust_spinlocks(0); | ||
151 | spin_unlock_irq(&die_lock); | ||
152 | |||
153 | if (in_interrupt()) | ||
154 | panic("Fatal exception in interrupt"); | ||
155 | |||
156 | if (panic_on_oops) { | ||
157 | panic("Fatal exception"); | ||
158 | } | ||
159 | do_exit(err); | ||
160 | |||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) | ||
165 | { | ||
166 | siginfo_t info; | ||
167 | |||
168 | if (!user_mode(regs)) { | ||
169 | if (die("Exception in kernel mode", regs, signr)) | ||
170 | return; | ||
171 | } | ||
172 | |||
173 | memset(&info, 0, sizeof(info)); | ||
174 | info.si_signo = signr; | ||
175 | info.si_code = code; | ||
176 | info.si_addr = (void __user *) addr; | ||
177 | force_sig_info(signr, &info, current); | ||
178 | |||
179 | /* | ||
180 | * Init gets no signals that it doesn't have a handler for. | ||
181 | * That's all very well, but if it has caused a synchronous | ||
182 | * exception and we ignore the resulting signal, it will just | ||
183 | * generate the same exception over and over again and we get | ||
184 | * nowhere. Better to kill it and let the kernel panic. | ||
185 | */ | ||
186 | if (current->pid == 1) { | ||
187 | __sighandler_t handler; | ||
188 | |||
189 | spin_lock_irq(¤t->sighand->siglock); | ||
190 | handler = current->sighand->action[signr-1].sa.sa_handler; | ||
191 | spin_unlock_irq(¤t->sighand->siglock); | ||
192 | if (handler == SIG_DFL) { | ||
193 | /* init has generated a synchronous exception | ||
194 | and it doesn't have a handler for the signal */ | ||
195 | printk(KERN_CRIT "init has generated signal %d " | ||
196 | "but has no handler for it\n", signr); | ||
197 | do_exit(signr); | ||
198 | } | ||
199 | } | ||
200 | } | ||
201 | |||
202 | #ifdef CONFIG_PPC64 | ||
203 | void system_reset_exception(struct pt_regs *regs) | ||
204 | { | ||
205 | /* See if any machine dependent calls */ | ||
206 | if (ppc_md.system_reset_exception) | ||
207 | ppc_md.system_reset_exception(regs); | ||
208 | |||
209 | die("System Reset", regs, SIGABRT); | ||
210 | |||
211 | /* Must die if the interrupt is not recoverable */ | ||
212 | if (!(regs->msr & MSR_RI)) | ||
213 | panic("Unrecoverable System Reset"); | ||
214 | |||
215 | /* What should we do here? We could issue a shutdown or hard reset. */ | ||
216 | } | ||
217 | #endif | ||
218 | |||
219 | /* | ||
220 | * I/O accesses can cause machine checks on powermacs. | ||
221 | * Check if the NIP corresponds to the address of a sync | ||
222 | * instruction for which there is an entry in the exception | ||
223 | * table. | ||
224 | * Note that the 601 only takes a machine check on TEA | ||
225 | * (transfer error ack) signal assertion, and does not | ||
226 | * set any of the top 16 bits of SRR1. | ||
227 | * -- paulus. | ||
228 | */ | ||
229 | static inline int check_io_access(struct pt_regs *regs) | ||
230 | { | ||
231 | #ifdef CONFIG_PPC_PMAC | ||
232 | unsigned long msr = regs->msr; | ||
233 | const struct exception_table_entry *entry; | ||
234 | unsigned int *nip = (unsigned int *)regs->nip; | ||
235 | |||
236 | if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) | ||
237 | && (entry = search_exception_tables(regs->nip)) != NULL) { | ||
238 | /* | ||
239 | * Check that it's a sync instruction, or somewhere | ||
240 | * in the twi; isync; nop sequence that inb/inw/inl uses. | ||
241 | * As the address is in the exception table | ||
242 | * we should be able to read the instr there. | ||
243 | * For the debug message, we look at the preceding | ||
244 | * load or store. | ||
245 | */ | ||
246 | if (*nip == 0x60000000) /* nop */ | ||
247 | nip -= 2; | ||
248 | else if (*nip == 0x4c00012c) /* isync */ | ||
249 | --nip; | ||
250 | if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { | ||
251 | /* sync or twi */ | ||
252 | unsigned int rb; | ||
253 | |||
254 | --nip; | ||
255 | rb = (*nip >> 11) & 0x1f; | ||
256 | printk(KERN_DEBUG "%s bad port %lx at %p\n", | ||
257 | (*nip & 0x100)? "OUT to": "IN from", | ||
258 | regs->gpr[rb] - _IO_BASE, nip); | ||
259 | regs->msr |= MSR_RI; | ||
260 | regs->nip = entry->fixup; | ||
261 | return 1; | ||
262 | } | ||
263 | } | ||
264 | #endif /* CONFIG_PPC_PMAC */ | ||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | ||
269 | /* On 4xx, the reason for the machine check or program exception | ||
270 | is in the ESR. */ | ||
271 | #define get_reason(regs) ((regs)->dsisr) | ||
272 | #ifndef CONFIG_FSL_BOOKE | ||
273 | #define get_mc_reason(regs) ((regs)->dsisr) | ||
274 | #else | ||
275 | #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) | ||
276 | #endif | ||
277 | #define REASON_FP ESR_FP | ||
278 | #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) | ||
279 | #define REASON_PRIVILEGED ESR_PPR | ||
280 | #define REASON_TRAP ESR_PTR | ||
281 | |||
282 | /* single-step stuff */ | ||
283 | #define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC) | ||
284 | #define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC) | ||
285 | |||
286 | #else | ||
287 | /* On non-4xx, the reason for the machine check or program | ||
288 | exception is in the MSR. */ | ||
289 | #define get_reason(regs) ((regs)->msr) | ||
290 | #define get_mc_reason(regs) ((regs)->msr) | ||
291 | #define REASON_FP 0x100000 | ||
292 | #define REASON_ILLEGAL 0x80000 | ||
293 | #define REASON_PRIVILEGED 0x40000 | ||
294 | #define REASON_TRAP 0x20000 | ||
295 | |||
296 | #define single_stepping(regs) ((regs)->msr & MSR_SE) | ||
297 | #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) | ||
298 | #endif | ||
299 | |||
300 | /* | ||
301 | * This is "fall-back" implementation for configurations | ||
302 | * which don't provide platform-specific machine check info | ||
303 | */ | ||
304 | void __attribute__ ((weak)) | ||
305 | platform_machine_check(struct pt_regs *regs) | ||
306 | { | ||
307 | } | ||
308 | |||
309 | void MachineCheckException(struct pt_regs *regs) | ||
310 | { | ||
311 | #ifdef CONFIG_PPC64 | ||
312 | int recover = 0; | ||
313 | |||
314 | /* See if any machine dependent calls */ | ||
315 | if (ppc_md.machine_check_exception) | ||
316 | recover = ppc_md.machine_check_exception(regs); | ||
317 | |||
318 | if (recover) | ||
319 | return; | ||
320 | #else | ||
321 | unsigned long reason = get_mc_reason(regs); | ||
322 | |||
323 | if (user_mode(regs)) { | ||
324 | regs->msr |= MSR_RI; | ||
325 | _exception(SIGBUS, regs, BUS_ADRERR, regs->nip); | ||
326 | return; | ||
327 | } | ||
328 | |||
329 | #if defined(CONFIG_8xx) && defined(CONFIG_PCI) | ||
330 | /* the qspan pci read routines can cause machine checks -- Cort */ | ||
331 | bad_page_fault(regs, regs->dar, SIGBUS); | ||
332 | return; | ||
333 | #endif | ||
334 | |||
335 | if (debugger_fault_handler(regs)) { | ||
336 | regs->msr |= MSR_RI; | ||
337 | return; | ||
338 | } | ||
339 | |||
340 | if (check_io_access(regs)) | ||
341 | return; | ||
342 | |||
343 | #if defined(CONFIG_4xx) && !defined(CONFIG_440A) | ||
344 | if (reason & ESR_IMCP) { | ||
345 | printk("Instruction"); | ||
346 | mtspr(SPRN_ESR, reason & ~ESR_IMCP); | ||
347 | } else | ||
348 | printk("Data"); | ||
349 | printk(" machine check in kernel mode.\n"); | ||
350 | #elif defined(CONFIG_440A) | ||
351 | printk("Machine check in kernel mode.\n"); | ||
352 | if (reason & ESR_IMCP){ | ||
353 | printk("Instruction Synchronous Machine Check exception\n"); | ||
354 | mtspr(SPRN_ESR, reason & ~ESR_IMCP); | ||
355 | } | ||
356 | else { | ||
357 | u32 mcsr = mfspr(SPRN_MCSR); | ||
358 | if (mcsr & MCSR_IB) | ||
359 | printk("Instruction Read PLB Error\n"); | ||
360 | if (mcsr & MCSR_DRB) | ||
361 | printk("Data Read PLB Error\n"); | ||
362 | if (mcsr & MCSR_DWB) | ||
363 | printk("Data Write PLB Error\n"); | ||
364 | if (mcsr & MCSR_TLBP) | ||
365 | printk("TLB Parity Error\n"); | ||
366 | if (mcsr & MCSR_ICP){ | ||
367 | flush_instruction_cache(); | ||
368 | printk("I-Cache Parity Error\n"); | ||
369 | } | ||
370 | if (mcsr & MCSR_DCSP) | ||
371 | printk("D-Cache Search Parity Error\n"); | ||
372 | if (mcsr & MCSR_DCFP) | ||
373 | printk("D-Cache Flush Parity Error\n"); | ||
374 | if (mcsr & MCSR_IMPE) | ||
375 | printk("Machine Check exception is imprecise\n"); | ||
376 | |||
377 | /* Clear MCSR */ | ||
378 | mtspr(SPRN_MCSR, mcsr); | ||
379 | } | ||
380 | #elif defined (CONFIG_E500) | ||
381 | printk("Machine check in kernel mode.\n"); | ||
382 | printk("Caused by (from MCSR=%lx): ", reason); | ||
383 | |||
384 | if (reason & MCSR_MCP) | ||
385 | printk("Machine Check Signal\n"); | ||
386 | if (reason & MCSR_ICPERR) | ||
387 | printk("Instruction Cache Parity Error\n"); | ||
388 | if (reason & MCSR_DCP_PERR) | ||
389 | printk("Data Cache Push Parity Error\n"); | ||
390 | if (reason & MCSR_DCPERR) | ||
391 | printk("Data Cache Parity Error\n"); | ||
392 | if (reason & MCSR_GL_CI) | ||
393 | printk("Guarded Load or Cache-Inhibited stwcx.\n"); | ||
394 | if (reason & MCSR_BUS_IAERR) | ||
395 | printk("Bus - Instruction Address Error\n"); | ||
396 | if (reason & MCSR_BUS_RAERR) | ||
397 | printk("Bus - Read Address Error\n"); | ||
398 | if (reason & MCSR_BUS_WAERR) | ||
399 | printk("Bus - Write Address Error\n"); | ||
400 | if (reason & MCSR_BUS_IBERR) | ||
401 | printk("Bus - Instruction Data Error\n"); | ||
402 | if (reason & MCSR_BUS_RBERR) | ||
403 | printk("Bus - Read Data Bus Error\n"); | ||
404 | if (reason & MCSR_BUS_WBERR) | ||
405 | printk("Bus - Read Data Bus Error\n"); | ||
406 | if (reason & MCSR_BUS_IPERR) | ||
407 | printk("Bus - Instruction Parity Error\n"); | ||
408 | if (reason & MCSR_BUS_RPERR) | ||
409 | printk("Bus - Read Parity Error\n"); | ||
410 | #elif defined (CONFIG_E200) | ||
411 | printk("Machine check in kernel mode.\n"); | ||
412 | printk("Caused by (from MCSR=%lx): ", reason); | ||
413 | |||
414 | if (reason & MCSR_MCP) | ||
415 | printk("Machine Check Signal\n"); | ||
416 | if (reason & MCSR_CP_PERR) | ||
417 | printk("Cache Push Parity Error\n"); | ||
418 | if (reason & MCSR_CPERR) | ||
419 | printk("Cache Parity Error\n"); | ||
420 | if (reason & MCSR_EXCP_ERR) | ||
421 | printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); | ||
422 | if (reason & MCSR_BUS_IRERR) | ||
423 | printk("Bus - Read Bus Error on instruction fetch\n"); | ||
424 | if (reason & MCSR_BUS_DRERR) | ||
425 | printk("Bus - Read Bus Error on data load\n"); | ||
426 | if (reason & MCSR_BUS_WRERR) | ||
427 | printk("Bus - Write Bus Error on buffered store or cache line push\n"); | ||
428 | #else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */ | ||
429 | printk("Machine check in kernel mode.\n"); | ||
430 | printk("Caused by (from SRR1=%lx): ", reason); | ||
431 | switch (reason & 0x601F0000) { | ||
432 | case 0x80000: | ||
433 | printk("Machine check signal\n"); | ||
434 | break; | ||
435 | case 0: /* for 601 */ | ||
436 | case 0x40000: | ||
437 | case 0x140000: /* 7450 MSS error and TEA */ | ||
438 | printk("Transfer error ack signal\n"); | ||
439 | break; | ||
440 | case 0x20000: | ||
441 | printk("Data parity error signal\n"); | ||
442 | break; | ||
443 | case 0x10000: | ||
444 | printk("Address parity error signal\n"); | ||
445 | break; | ||
446 | case 0x20000000: | ||
447 | printk("L1 Data Cache error\n"); | ||
448 | break; | ||
449 | case 0x40000000: | ||
450 | printk("L1 Instruction Cache error\n"); | ||
451 | break; | ||
452 | case 0x00100000: | ||
453 | printk("L2 data cache parity error\n"); | ||
454 | break; | ||
455 | default: | ||
456 | printk("Unknown values in msr\n"); | ||
457 | } | ||
458 | #endif /* CONFIG_4xx */ | ||
459 | |||
460 | /* | ||
461 | * Optional platform-provided routine to print out | ||
462 | * additional info, e.g. bus error registers. | ||
463 | */ | ||
464 | platform_machine_check(regs); | ||
465 | #endif /* CONFIG_PPC64 */ | ||
466 | |||
467 | if (debugger_fault_handler(regs)) | ||
468 | return; | ||
469 | die("Machine check", regs, SIGBUS); | ||
470 | |||
471 | /* Must die if the interrupt is not recoverable */ | ||
472 | if (!(regs->msr & MSR_RI)) | ||
473 | panic("Unrecoverable Machine check"); | ||
474 | } | ||
475 | |||
476 | void SMIException(struct pt_regs *regs) | ||
477 | { | ||
478 | die("System Management Interrupt", regs, SIGABRT); | ||
479 | } | ||
480 | |||
481 | void UnknownException(struct pt_regs *regs) | ||
482 | { | ||
483 | printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", | ||
484 | regs->nip, regs->msr, regs->trap); | ||
485 | |||
486 | _exception(SIGTRAP, regs, 0, 0); | ||
487 | } | ||
488 | |||
489 | void InstructionBreakpoint(struct pt_regs *regs) | ||
490 | { | ||
491 | if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, | ||
492 | 5, SIGTRAP) == NOTIFY_STOP) | ||
493 | return; | ||
494 | if (debugger_iabr_match(regs)) | ||
495 | return; | ||
496 | _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); | ||
497 | } | ||
498 | |||
499 | void RunModeException(struct pt_regs *regs) | ||
500 | { | ||
501 | _exception(SIGTRAP, regs, 0, 0); | ||
502 | } | ||
503 | |||
504 | void SingleStepException(struct pt_regs *regs) | ||
505 | { | ||
506 | regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */ | ||
507 | |||
508 | if (notify_die(DIE_SSTEP, "single_step", regs, 5, | ||
509 | 5, SIGTRAP) == NOTIFY_STOP) | ||
510 | return; | ||
511 | if (debugger_sstep(regs)) | ||
512 | return; | ||
513 | |||
514 | _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); | ||
515 | } | ||
516 | |||
517 | /* | ||
518 | * After we have successfully emulated an instruction, we have to | ||
519 | * check if the instruction was being single-stepped, and if so, | ||
520 | * pretend we got a single-step exception. This was pointed out | ||
521 | * by Kumar Gala. -- paulus | ||
522 | */ | ||
523 | static void emulate_single_step(struct pt_regs *regs) | ||
524 | { | ||
525 | if (single_stepping(regs)) { | ||
526 | clear_single_step(regs); | ||
527 | _exception(SIGTRAP, regs, TRAP_TRACE, 0); | ||
528 | } | ||
529 | } | ||
530 | |||
531 | /* Illegal instruction emulation support. Originally written to | ||
532 | * provide the PVR to user applications using the mfspr rd, PVR. | ||
533 | * Return non-zero if we can't emulate, or -EFAULT if the associated | ||
534 | * memory access caused an access fault. Return zero on success. | ||
535 | * | ||
536 | * There are a couple of ways to do this, either "decode" the instruction | ||
537 | * or directly match lots of bits. In this case, matching lots of | ||
538 | * bits is faster and easier. | ||
539 | * | ||
540 | */ | ||
541 | #define INST_MFSPR_PVR 0x7c1f42a6 | ||
542 | #define INST_MFSPR_PVR_MASK 0xfc1fffff | ||
543 | |||
544 | #define INST_DCBA 0x7c0005ec | ||
545 | #define INST_DCBA_MASK 0x7c0007fe | ||
546 | |||
547 | #define INST_MCRXR 0x7c000400 | ||
548 | #define INST_MCRXR_MASK 0x7c0007fe | ||
549 | |||
550 | #define INST_STRING 0x7c00042a | ||
551 | #define INST_STRING_MASK 0x7c0007fe | ||
552 | #define INST_STRING_GEN_MASK 0x7c00067e | ||
553 | #define INST_LSWI 0x7c0004aa | ||
554 | #define INST_LSWX 0x7c00042a | ||
555 | #define INST_STSWI 0x7c0005aa | ||
556 | #define INST_STSWX 0x7c00052a | ||
557 | |||
558 | static int emulate_string_inst(struct pt_regs *regs, u32 instword) | ||
559 | { | ||
560 | u8 rT = (instword >> 21) & 0x1f; | ||
561 | u8 rA = (instword >> 16) & 0x1f; | ||
562 | u8 NB_RB = (instword >> 11) & 0x1f; | ||
563 | u32 num_bytes; | ||
564 | unsigned long EA; | ||
565 | int pos = 0; | ||
566 | |||
567 | /* Early out if we are an invalid form of lswx */ | ||
568 | if ((instword & INST_STRING_MASK) == INST_LSWX) | ||
569 | if ((rT == rA) || (rT == NB_RB)) | ||
570 | return -EINVAL; | ||
571 | |||
572 | EA = (rA == 0) ? 0 : regs->gpr[rA]; | ||
573 | |||
574 | switch (instword & INST_STRING_MASK) { | ||
575 | case INST_LSWX: | ||
576 | case INST_STSWX: | ||
577 | EA += NB_RB; | ||
578 | num_bytes = regs->xer & 0x7f; | ||
579 | break; | ||
580 | case INST_LSWI: | ||
581 | case INST_STSWI: | ||
582 | num_bytes = (NB_RB == 0) ? 32 : NB_RB; | ||
583 | break; | ||
584 | default: | ||
585 | return -EINVAL; | ||
586 | } | ||
587 | |||
588 | while (num_bytes != 0) | ||
589 | { | ||
590 | u8 val; | ||
591 | u32 shift = 8 * (3 - (pos & 0x3)); | ||
592 | |||
593 | switch ((instword & INST_STRING_MASK)) { | ||
594 | case INST_LSWX: | ||
595 | case INST_LSWI: | ||
596 | if (get_user(val, (u8 __user *)EA)) | ||
597 | return -EFAULT; | ||
598 | /* first time updating this reg, | ||
599 | * zero it out */ | ||
600 | if (pos == 0) | ||
601 | regs->gpr[rT] = 0; | ||
602 | regs->gpr[rT] |= val << shift; | ||
603 | break; | ||
604 | case INST_STSWI: | ||
605 | case INST_STSWX: | ||
606 | val = regs->gpr[rT] >> shift; | ||
607 | if (put_user(val, (u8 __user *)EA)) | ||
608 | return -EFAULT; | ||
609 | break; | ||
610 | } | ||
611 | /* move EA to next address */ | ||
612 | EA += 1; | ||
613 | num_bytes--; | ||
614 | |||
615 | /* manage our position within the register */ | ||
616 | if (++pos == 4) { | ||
617 | pos = 0; | ||
618 | if (++rT == 32) | ||
619 | rT = 0; | ||
620 | } | ||
621 | } | ||
622 | |||
623 | return 0; | ||
624 | } | ||
625 | |||
626 | static int emulate_instruction(struct pt_regs *regs) | ||
627 | { | ||
628 | u32 instword; | ||
629 | u32 rd; | ||
630 | |||
631 | if (!user_mode(regs)) | ||
632 | return -EINVAL; | ||
633 | CHECK_FULL_REGS(regs); | ||
634 | |||
635 | if (get_user(instword, (u32 __user *)(regs->nip))) | ||
636 | return -EFAULT; | ||
637 | |||
638 | /* Emulate the mfspr rD, PVR. */ | ||
639 | if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) { | ||
640 | rd = (instword >> 21) & 0x1f; | ||
641 | regs->gpr[rd] = mfspr(SPRN_PVR); | ||
642 | return 0; | ||
643 | } | ||
644 | |||
645 | /* Emulating the dcba insn is just a no-op. */ | ||
646 | if ((instword & INST_DCBA_MASK) == INST_DCBA) | ||
647 | return 0; | ||
648 | |||
649 | /* Emulate the mcrxr insn. */ | ||
650 | if ((instword & INST_MCRXR_MASK) == INST_MCRXR) { | ||
651 | int shift = (instword >> 21) & 0x1c; | ||
652 | unsigned long msk = 0xf0000000UL >> shift; | ||
653 | |||
654 | regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); | ||
655 | regs->xer &= ~0xf0000000UL; | ||
656 | return 0; | ||
657 | } | ||
658 | |||
659 | /* Emulate load/store string insn. */ | ||
660 | if ((instword & INST_STRING_GEN_MASK) == INST_STRING) | ||
661 | return emulate_string_inst(regs, instword); | ||
662 | |||
663 | return -EINVAL; | ||
664 | } | ||
665 | |||
666 | /* | ||
667 | * Look through the list of trap instructions that are used for BUG(), | ||
668 | * BUG_ON() and WARN_ON() and see if we hit one. At this point we know | ||
669 | * that the exception was caused by a trap instruction of some kind. | ||
670 | * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0 | ||
671 | * otherwise. | ||
672 | */ | ||
673 | extern struct bug_entry __start___bug_table[], __stop___bug_table[]; | ||
674 | |||
675 | #ifndef CONFIG_MODULES | ||
676 | #define module_find_bug(x) NULL | ||
677 | #endif | ||
678 | |||
679 | struct bug_entry *find_bug(unsigned long bugaddr) | ||
680 | { | ||
681 | struct bug_entry *bug; | ||
682 | |||
683 | for (bug = __start___bug_table; bug < __stop___bug_table; ++bug) | ||
684 | if (bugaddr == bug->bug_addr) | ||
685 | return bug; | ||
686 | return module_find_bug(bugaddr); | ||
687 | } | ||
688 | |||
689 | int check_bug_trap(struct pt_regs *regs) | ||
690 | { | ||
691 | struct bug_entry *bug; | ||
692 | unsigned long addr; | ||
693 | |||
694 | if (regs->msr & MSR_PR) | ||
695 | return 0; /* not in kernel */ | ||
696 | addr = regs->nip; /* address of trap instruction */ | ||
697 | if (addr < PAGE_OFFSET) | ||
698 | return 0; | ||
699 | bug = find_bug(regs->nip); | ||
700 | if (bug == NULL) | ||
701 | return 0; | ||
702 | if (bug->line & BUG_WARNING_TRAP) { | ||
703 | /* this is a WARN_ON rather than BUG/BUG_ON */ | ||
704 | #ifdef CONFIG_XMON | ||
705 | xmon_printf(KERN_ERR "Badness in %s at %s:%d\n", | ||
706 | bug->function, bug->file, | ||
707 | bug->line & ~BUG_WARNING_TRAP); | ||
708 | #endif /* CONFIG_XMON */ | ||
709 | printk(KERN_ERR "Badness in %s at %s:%d\n", | ||
710 | bug->function, bug->file, | ||
711 | bug->line & ~BUG_WARNING_TRAP); | ||
712 | dump_stack(); | ||
713 | return 1; | ||
714 | } | ||
715 | #ifdef CONFIG_XMON | ||
716 | xmon_printf(KERN_CRIT "kernel BUG in %s at %s:%d!\n", | ||
717 | bug->function, bug->file, bug->line); | ||
718 | xmon(regs); | ||
719 | #endif /* CONFIG_XMON */ | ||
720 | printk(KERN_CRIT "kernel BUG in %s at %s:%d!\n", | ||
721 | bug->function, bug->file, bug->line); | ||
722 | |||
723 | return 0; | ||
724 | } | ||
725 | |||
726 | void ProgramCheckException(struct pt_regs *regs) | ||
727 | { | ||
728 | unsigned int reason = get_reason(regs); | ||
729 | extern int do_mathemu(struct pt_regs *regs); | ||
730 | |||
731 | #ifdef CONFIG_MATH_EMULATION | ||
732 | /* (reason & REASON_ILLEGAL) would be the obvious thing here, | ||
733 | * but there seems to be a hardware bug on the 405GP (RevD) | ||
734 | * that means ESR is sometimes set incorrectly - either to | ||
735 | * ESR_DST (!?) or 0. In the process of chasing this with the | ||
736 | * hardware people - not sure if it can happen on any illegal | ||
737 | * instruction or only on FP instructions, whether there is a | ||
738 | * pattern to occurences etc. -dgibson 31/Mar/2003 */ | ||
739 | if (!(reason & REASON_TRAP) && do_mathemu(regs) == 0) { | ||
740 | emulate_single_step(regs); | ||
741 | return; | ||
742 | } | ||
743 | #endif /* CONFIG_MATH_EMULATION */ | ||
744 | |||
745 | if (reason & REASON_FP) { | ||
746 | /* IEEE FP exception */ | ||
747 | int code = 0; | ||
748 | u32 fpscr; | ||
749 | |||
750 | /* We must make sure the FP state is consistent with | ||
751 | * our MSR_FP in regs | ||
752 | */ | ||
753 | preempt_disable(); | ||
754 | if (regs->msr & MSR_FP) | ||
755 | giveup_fpu(current); | ||
756 | preempt_enable(); | ||
757 | |||
758 | fpscr = current->thread.fpscr; | ||
759 | fpscr &= fpscr << 22; /* mask summary bits with enables */ | ||
760 | if (fpscr & FPSCR_VX) | ||
761 | code = FPE_FLTINV; | ||
762 | else if (fpscr & FPSCR_OX) | ||
763 | code = FPE_FLTOVF; | ||
764 | else if (fpscr & FPSCR_UX) | ||
765 | code = FPE_FLTUND; | ||
766 | else if (fpscr & FPSCR_ZX) | ||
767 | code = FPE_FLTDIV; | ||
768 | else if (fpscr & FPSCR_XX) | ||
769 | code = FPE_FLTRES; | ||
770 | _exception(SIGFPE, regs, code, regs->nip); | ||
771 | return; | ||
772 | } | ||
773 | |||
774 | if (reason & REASON_TRAP) { | ||
775 | /* trap exception */ | ||
776 | if (debugger_bpt(regs)) | ||
777 | return; | ||
778 | if (check_bug_trap(regs)) { | ||
779 | regs->nip += 4; | ||
780 | return; | ||
781 | } | ||
782 | _exception(SIGTRAP, regs, TRAP_BRKPT, 0); | ||
783 | return; | ||
784 | } | ||
785 | |||
786 | /* Try to emulate it if we should. */ | ||
787 | if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { | ||
788 | switch (emulate_instruction(regs)) { | ||
789 | case 0: | ||
790 | regs->nip += 4; | ||
791 | emulate_single_step(regs); | ||
792 | return; | ||
793 | case -EFAULT: | ||
794 | _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); | ||
795 | return; | ||
796 | } | ||
797 | } | ||
798 | |||
799 | if (reason & REASON_PRIVILEGED) | ||
800 | _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); | ||
801 | else | ||
802 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | ||
803 | } | ||
804 | |||
805 | void AlignmentException(struct pt_regs *regs) | ||
806 | { | ||
807 | int fixed; | ||
808 | |||
809 | fixed = fix_alignment(regs); | ||
810 | |||
811 | if (fixed == 1) { | ||
812 | regs->nip += 4; /* skip over emulated instruction */ | ||
813 | emulate_single_step(regs); | ||
814 | return; | ||
815 | } | ||
816 | |||
817 | /* Operand address was bad */ | ||
818 | if (fixed == -EFAULT) { | ||
819 | if (user_mode(regs)) | ||
820 | _exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar); | ||
821 | else | ||
822 | /* Search exception table */ | ||
823 | bad_page_fault(regs, regs->dar, SIGSEGV); | ||
824 | return; | ||
825 | } | ||
826 | _exception(SIGBUS, regs, BUS_ADRALN, regs->dar); | ||
827 | } | ||
828 | |||
829 | void StackOverflow(struct pt_regs *regs) | ||
830 | { | ||
831 | printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", | ||
832 | current, regs->gpr[1]); | ||
833 | debugger(regs); | ||
834 | show_regs(regs); | ||
835 | panic("kernel stack overflow"); | ||
836 | } | ||
837 | |||
838 | void nonrecoverable_exception(struct pt_regs *regs) | ||
839 | { | ||
840 | printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", | ||
841 | regs->nip, regs->msr); | ||
842 | debugger(regs); | ||
843 | die("nonrecoverable exception", regs, SIGKILL); | ||
844 | } | ||
845 | |||
846 | void trace_syscall(struct pt_regs *regs) | ||
847 | { | ||
848 | printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", | ||
849 | current, current->pid, regs->nip, regs->link, regs->gpr[0], | ||
850 | regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); | ||
851 | } | ||
852 | |||
853 | #ifdef CONFIG_8xx | ||
854 | void SoftwareEmulation(struct pt_regs *regs) | ||
855 | { | ||
856 | extern int do_mathemu(struct pt_regs *); | ||
857 | extern int Soft_emulate_8xx(struct pt_regs *); | ||
858 | int errcode; | ||
859 | |||
860 | CHECK_FULL_REGS(regs); | ||
861 | |||
862 | if (!user_mode(regs)) { | ||
863 | debugger(regs); | ||
864 | die("Kernel Mode Software FPU Emulation", regs, SIGFPE); | ||
865 | } | ||
866 | |||
867 | #ifdef CONFIG_MATH_EMULATION | ||
868 | errcode = do_mathemu(regs); | ||
869 | #else | ||
870 | errcode = Soft_emulate_8xx(regs); | ||
871 | #endif | ||
872 | if (errcode) { | ||
873 | if (errcode > 0) | ||
874 | _exception(SIGFPE, regs, 0, 0); | ||
875 | else if (errcode == -EFAULT) | ||
876 | _exception(SIGSEGV, regs, 0, 0); | ||
877 | else | ||
878 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | ||
879 | } else | ||
880 | emulate_single_step(regs); | ||
881 | } | ||
882 | #endif /* CONFIG_8xx */ | ||
883 | |||
884 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | ||
885 | |||
886 | void DebugException(struct pt_regs *regs, unsigned long debug_status) | ||
887 | { | ||
888 | if (debug_status & DBSR_IC) { /* instruction completion */ | ||
889 | regs->msr &= ~MSR_DE; | ||
890 | if (user_mode(regs)) { | ||
891 | current->thread.dbcr0 &= ~DBCR0_IC; | ||
892 | } else { | ||
893 | /* Disable instruction completion */ | ||
894 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); | ||
895 | /* Clear the instruction completion event */ | ||
896 | mtspr(SPRN_DBSR, DBSR_IC); | ||
897 | if (debugger_sstep(regs)) | ||
898 | return; | ||
899 | } | ||
900 | _exception(SIGTRAP, regs, TRAP_TRACE, 0); | ||
901 | } | ||
902 | } | ||
903 | #endif /* CONFIG_4xx || CONFIG_BOOKE */ | ||
904 | |||
905 | #if !defined(CONFIG_TAU_INT) | ||
906 | void TAUException(struct pt_regs *regs) | ||
907 | { | ||
908 | printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", | ||
909 | regs->nip, regs->msr, regs->trap, print_tainted()); | ||
910 | } | ||
911 | #endif /* CONFIG_INT_TAU */ | ||
912 | |||
913 | void AltivecUnavailException(struct pt_regs *regs) | ||
914 | { | ||
915 | static int kernel_altivec_count; | ||
916 | |||
917 | #ifndef CONFIG_ALTIVEC | ||
918 | if (user_mode(regs)) { | ||
919 | /* A user program has executed an altivec instruction, | ||
920 | but this kernel doesn't support altivec. */ | ||
921 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | ||
922 | return; | ||
923 | } | ||
924 | #endif | ||
925 | /* The kernel has executed an altivec instruction without | ||
926 | first enabling altivec. Whinge but let it do it. */ | ||
927 | if (++kernel_altivec_count < 10) | ||
928 | printk(KERN_ERR "AltiVec used in kernel (task=%p, pc=%lx)\n", | ||
929 | current, regs->nip); | ||
930 | regs->msr |= MSR_VEC; | ||
931 | } | ||
932 | |||
933 | #ifdef CONFIG_ALTIVEC | ||
934 | void AltivecAssistException(struct pt_regs *regs) | ||
935 | { | ||
936 | int err; | ||
937 | |||
938 | preempt_disable(); | ||
939 | if (regs->msr & MSR_VEC) | ||
940 | giveup_altivec(current); | ||
941 | preempt_enable(); | ||
942 | if (!user_mode(regs)) { | ||
943 | printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" | ||
944 | " at %lx\n", regs->nip); | ||
945 | die("Kernel Altivec assist exception", regs, SIGILL); | ||
946 | } | ||
947 | |||
948 | err = emulate_altivec(regs); | ||
949 | if (err == 0) { | ||
950 | regs->nip += 4; /* skip emulated instruction */ | ||
951 | emulate_single_step(regs); | ||
952 | return; | ||
953 | } | ||
954 | |||
955 | if (err == -EFAULT) { | ||
956 | /* got an error reading the instruction */ | ||
957 | _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); | ||
958 | } else { | ||
959 | /* didn't recognize the instruction */ | ||
960 | /* XXX quick hack for now: set the non-Java bit in the VSCR */ | ||
961 | if (printk_ratelimit()) | ||
962 | printk(KERN_ERR "Unrecognized altivec instruction " | ||
963 | "in %s at %lx\n", current->comm, regs->nip); | ||
964 | current->thread.vscr.u[3] |= 0x10000; | ||
965 | } | ||
966 | } | ||
967 | #endif /* CONFIG_ALTIVEC */ | ||
968 | |||
969 | #ifdef CONFIG_E500 | ||
970 | void PerformanceMonitorException(struct pt_regs *regs) | ||
971 | { | ||
972 | perf_irq(regs); | ||
973 | } | ||
974 | #endif | ||
975 | |||
976 | #ifdef CONFIG_FSL_BOOKE | ||
977 | void CacheLockingException(struct pt_regs *regs, unsigned long address, | ||
978 | unsigned long error_code) | ||
979 | { | ||
980 | /* We treat cache locking instructions from the user | ||
981 | * as priv ops, in the future we could try to do | ||
982 | * something smarter | ||
983 | */ | ||
984 | if (error_code & (ESR_DLK|ESR_ILK)) | ||
985 | _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); | ||
986 | return; | ||
987 | } | ||
988 | #endif /* CONFIG_FSL_BOOKE */ | ||
989 | |||
990 | #ifdef CONFIG_SPE | ||
991 | void SPEFloatingPointException(struct pt_regs *regs) | ||
992 | { | ||
993 | unsigned long spefscr; | ||
994 | int fpexc_mode; | ||
995 | int code = 0; | ||
996 | |||
997 | spefscr = current->thread.spefscr; | ||
998 | fpexc_mode = current->thread.fpexc_mode; | ||
999 | |||
1000 | /* Hardware does not neccessarily set sticky | ||
1001 | * underflow/overflow/invalid flags */ | ||
1002 | if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { | ||
1003 | code = FPE_FLTOVF; | ||
1004 | spefscr |= SPEFSCR_FOVFS; | ||
1005 | } | ||
1006 | else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { | ||
1007 | code = FPE_FLTUND; | ||
1008 | spefscr |= SPEFSCR_FUNFS; | ||
1009 | } | ||
1010 | else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) | ||
1011 | code = FPE_FLTDIV; | ||
1012 | else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { | ||
1013 | code = FPE_FLTINV; | ||
1014 | spefscr |= SPEFSCR_FINVS; | ||
1015 | } | ||
1016 | else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) | ||
1017 | code = FPE_FLTRES; | ||
1018 | |||
1019 | current->thread.spefscr = spefscr; | ||
1020 | |||
1021 | _exception(SIGFPE, regs, code, regs->nip); | ||
1022 | return; | ||
1023 | } | ||
1024 | #endif | ||
1025 | |||
1026 | #ifdef CONFIG_BOOKE_WDT | ||
1027 | /* | ||
1028 | * Default handler for a Watchdog exception, | ||
1029 | * spins until a reboot occurs | ||
1030 | */ | ||
1031 | void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) | ||
1032 | { | ||
1033 | /* Generic WatchdogHandler, implement your own */ | ||
1034 | mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); | ||
1035 | return; | ||
1036 | } | ||
1037 | |||
1038 | void WatchdogException(struct pt_regs *regs) | ||
1039 | { | ||
1040 | printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); | ||
1041 | WatchdogHandler(regs); | ||
1042 | } | ||
1043 | #endif | ||
1044 | |||
1045 | void __init trap_init(void) | ||
1046 | { | ||
1047 | } | ||
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S new file mode 100644 index 000000000000..12cb90bc209c --- /dev/null +++ b/arch/powerpc/kernel/vector.S | |||
@@ -0,0 +1,197 @@ | |||
1 | #include <linux/config.h> | ||
2 | #include <asm/ppc_asm.h> | ||
3 | #include <asm/processor.h> | ||
4 | |||
5 | /* | ||
6 | * The routines below are in assembler so we can closely control the | ||
7 | * usage of floating-point registers. These routines must be called | ||
8 | * with preempt disabled. | ||
9 | */ | ||
10 | #ifdef CONFIG_PPC32 | ||
11 | .data | ||
12 | fpzero: | ||
13 | .long 0 | ||
14 | fpone: | ||
15 | .long 0x3f800000 /* 1.0 in single-precision FP */ | ||
16 | fphalf: | ||
17 | .long 0x3f000000 /* 0.5 in single-precision FP */ | ||
18 | |||
19 | #define LDCONST(fr, name) \ | ||
20 | lis r11,name@ha; \ | ||
21 | lfs fr,name@l(r11) | ||
22 | #else | ||
23 | |||
24 | .section ".toc","aw" | ||
25 | fpzero: | ||
26 | .tc FD_0_0[TC],0 | ||
27 | fpone: | ||
28 | .tc FD_3ff00000_0[TC],0x3ff0000000000000 /* 1.0 */ | ||
29 | fphalf: | ||
30 | .tc FD_3fe00000_0[TC],0x3fe0000000000000 /* 0.5 */ | ||
31 | |||
32 | #define LDCONST(fr, name) \ | ||
33 | lfd fr,name@toc(r2) | ||
34 | #endif | ||
35 | |||
36 | .text | ||
37 | /* | ||
38 | * Internal routine to enable floating point and set FPSCR to 0. | ||
39 | * Don't call it from C; it doesn't use the normal calling convention. | ||
40 | */ | ||
41 | fpenable: | ||
42 | #ifdef CONFIG_PPC32 | ||
43 | stwu r1,-64(r1) | ||
44 | #else | ||
45 | stdu r1,-64(r1) | ||
46 | #endif | ||
47 | mfmsr r10 | ||
48 | ori r11,r10,MSR_FP | ||
49 | mtmsr r11 | ||
50 | isync | ||
51 | stfd fr0,24(r1) | ||
52 | stfd fr1,16(r1) | ||
53 | stfd fr31,8(r1) | ||
54 | LDCONST(fr1, fpzero) | ||
55 | mffs fr31 | ||
56 | mtfsf 0xff,fr1 | ||
57 | blr | ||
58 | |||
59 | fpdisable: | ||
60 | mtlr r12 | ||
61 | mtfsf 0xff,fr31 | ||
62 | lfd fr31,8(r1) | ||
63 | lfd fr1,16(r1) | ||
64 | lfd fr0,24(r1) | ||
65 | mtmsr r10 | ||
66 | isync | ||
67 | addi r1,r1,64 | ||
68 | blr | ||
69 | |||
70 | /* | ||
71 | * Vector add, floating point. | ||
72 | */ | ||
73 | _GLOBAL(vaddfp) | ||
74 | mflr r12 | ||
75 | bl fpenable | ||
76 | li r0,4 | ||
77 | mtctr r0 | ||
78 | li r6,0 | ||
79 | 1: lfsx fr0,r4,r6 | ||
80 | lfsx fr1,r5,r6 | ||
81 | fadds fr0,fr0,fr1 | ||
82 | stfsx fr0,r3,r6 | ||
83 | addi r6,r6,4 | ||
84 | bdnz 1b | ||
85 | b fpdisable | ||
86 | |||
87 | /* | ||
88 | * Vector subtract, floating point. | ||
89 | */ | ||
90 | _GLOBAL(vsubfp) | ||
91 | mflr r12 | ||
92 | bl fpenable | ||
93 | li r0,4 | ||
94 | mtctr r0 | ||
95 | li r6,0 | ||
96 | 1: lfsx fr0,r4,r6 | ||
97 | lfsx fr1,r5,r6 | ||
98 | fsubs fr0,fr0,fr1 | ||
99 | stfsx fr0,r3,r6 | ||
100 | addi r6,r6,4 | ||
101 | bdnz 1b | ||
102 | b fpdisable | ||
103 | |||
104 | /* | ||
105 | * Vector multiply and add, floating point. | ||
106 | */ | ||
107 | _GLOBAL(vmaddfp) | ||
108 | mflr r12 | ||
109 | bl fpenable | ||
110 | stfd fr2,32(r1) | ||
111 | li r0,4 | ||
112 | mtctr r0 | ||
113 | li r7,0 | ||
114 | 1: lfsx fr0,r4,r7 | ||
115 | lfsx fr1,r5,r7 | ||
116 | lfsx fr2,r6,r7 | ||
117 | fmadds fr0,fr0,fr2,fr1 | ||
118 | stfsx fr0,r3,r7 | ||
119 | addi r7,r7,4 | ||
120 | bdnz 1b | ||
121 | lfd fr2,32(r1) | ||
122 | b fpdisable | ||
123 | |||
124 | /* | ||
125 | * Vector negative multiply and subtract, floating point. | ||
126 | */ | ||
127 | _GLOBAL(vnmsubfp) | ||
128 | mflr r12 | ||
129 | bl fpenable | ||
130 | stfd fr2,32(r1) | ||
131 | li r0,4 | ||
132 | mtctr r0 | ||
133 | li r7,0 | ||
134 | 1: lfsx fr0,r4,r7 | ||
135 | lfsx fr1,r5,r7 | ||
136 | lfsx fr2,r6,r7 | ||
137 | fnmsubs fr0,fr0,fr2,fr1 | ||
138 | stfsx fr0,r3,r7 | ||
139 | addi r7,r7,4 | ||
140 | bdnz 1b | ||
141 | lfd fr2,32(r1) | ||
142 | b fpdisable | ||
143 | |||
144 | /* | ||
145 | * Vector reciprocal estimate. We just compute 1.0/x. | ||
146 | * r3 -> destination, r4 -> source. | ||
147 | */ | ||
148 | _GLOBAL(vrefp) | ||
149 | mflr r12 | ||
150 | bl fpenable | ||
151 | li r0,4 | ||
152 | LDCONST(fr1, fpone) | ||
153 | mtctr r0 | ||
154 | li r6,0 | ||
155 | 1: lfsx fr0,r4,r6 | ||
156 | fdivs fr0,fr1,fr0 | ||
157 | stfsx fr0,r3,r6 | ||
158 | addi r6,r6,4 | ||
159 | bdnz 1b | ||
160 | b fpdisable | ||
161 | |||
162 | /* | ||
163 | * Vector reciprocal square-root estimate, floating point. | ||
164 | * We use the frsqrte instruction for the initial estimate followed | ||
165 | * by 2 iterations of Newton-Raphson to get sufficient accuracy. | ||
166 | * r3 -> destination, r4 -> source. | ||
167 | */ | ||
168 | _GLOBAL(vrsqrtefp) | ||
169 | mflr r12 | ||
170 | bl fpenable | ||
171 | stfd fr2,32(r1) | ||
172 | stfd fr3,40(r1) | ||
173 | stfd fr4,48(r1) | ||
174 | stfd fr5,56(r1) | ||
175 | li r0,4 | ||
176 | LDCONST(fr4, fpone) | ||
177 | LDCONST(fr5, fphalf) | ||
178 | mtctr r0 | ||
179 | li r6,0 | ||
180 | 1: lfsx fr0,r4,r6 | ||
181 | frsqrte fr1,fr0 /* r = frsqrte(s) */ | ||
182 | fmuls fr3,fr1,fr0 /* r * s */ | ||
183 | fmuls fr2,fr1,fr5 /* r * 0.5 */ | ||
184 | fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */ | ||
185 | fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */ | ||
186 | fmuls fr3,fr1,fr0 /* r * s */ | ||
187 | fmuls fr2,fr1,fr5 /* r * 0.5 */ | ||
188 | fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */ | ||
189 | fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */ | ||
190 | stfsx fr1,r3,r6 | ||
191 | addi r6,r6,4 | ||
192 | bdnz 1b | ||
193 | lfd fr5,56(r1) | ||
194 | lfd fr4,48(r1) | ||
195 | lfd fr3,40(r1) | ||
196 | lfd fr2,32(r1) | ||
197 | b fpdisable | ||
diff --git a/arch/powerpc/kernel/vmlinux.lds b/arch/powerpc/kernel/vmlinux.lds new file mode 100644 index 000000000000..d62c288a81d0 --- /dev/null +++ b/arch/powerpc/kernel/vmlinux.lds | |||
@@ -0,0 +1,174 @@ | |||
1 | /* Align . to a 8 byte boundary equals to maximum function alignment. */ | ||
2 | /* sched.text is aling to function alignment to secure we have same | ||
3 | * address even at second ld pass when generating System.map */ | ||
4 | /* spinlock.text is aling to function alignment to secure we have same | ||
5 | * address even at second ld pass when generating System.map */ | ||
6 | /* DWARF debug sections. | ||
7 | Symbols in the DWARF debugging sections are relative to | ||
8 | the beginning of the section so we begin them at 0. */ | ||
9 | /* Stabs debugging sections. */ | ||
10 | OUTPUT_ARCH(powerpc:common) | ||
11 | jiffies = jiffies_64 + 4; | ||
12 | SECTIONS | ||
13 | { | ||
14 | /* Read-only sections, merged into text segment: */ | ||
15 | . = + SIZEOF_HEADERS; | ||
16 | .interp : { *(.interp) } | ||
17 | .hash : { *(.hash) } | ||
18 | .dynsym : { *(.dynsym) } | ||
19 | .dynstr : { *(.dynstr) } | ||
20 | .rel.text : { *(.rel.text) } | ||
21 | .rela.text : { *(.rela.text) } | ||
22 | .rel.data : { *(.rel.data) } | ||
23 | .rela.data : { *(.rela.data) } | ||
24 | .rel.rodata : { *(.rel.rodata) } | ||
25 | .rela.rodata : { *(.rela.rodata) } | ||
26 | .rel.got : { *(.rel.got) } | ||
27 | .rela.got : { *(.rela.got) } | ||
28 | .rel.ctors : { *(.rel.ctors) } | ||
29 | .rela.ctors : { *(.rela.ctors) } | ||
30 | .rel.dtors : { *(.rel.dtors) } | ||
31 | .rela.dtors : { *(.rela.dtors) } | ||
32 | .rel.bss : { *(.rel.bss) } | ||
33 | .rela.bss : { *(.rela.bss) } | ||
34 | .rel.plt : { *(.rel.plt) } | ||
35 | .rela.plt : { *(.rela.plt) } | ||
36 | /* .init : { *(.init) } =0*/ | ||
37 | .plt : { *(.plt) } | ||
38 | .text : | ||
39 | { | ||
40 | *(.text) | ||
41 | . = ALIGN(8); __sched_text_start = .; *(.sched.text) __sched_text_end = .; | ||
42 | . = ALIGN(8); __lock_text_start = .; *(.spinlock.text) __lock_text_end = .; | ||
43 | *(.fixup) | ||
44 | *(.got1) | ||
45 | __got2_start = .; | ||
46 | *(.got2) | ||
47 | __got2_end = .; | ||
48 | } | ||
49 | _etext = .; | ||
50 | PROVIDE (etext = .); | ||
51 | .rodata : AT(ADDR(.rodata) - 0) { *(.rodata) *(.rodata.*) *(__vermagic) } .rodata1 : AT(ADDR(.rodata1) - 0) { *(.rodata1) } .pci_fixup : AT(ADDR(.pci_fixup) - 0) { __start_pci_fixups_early = .; *(.pci_fixup_early) __end_pci_fixups_early = .; __start_pci_fixups_header = .; *(.pci_fixup_header) __end_pci_fixups_header = .; __start_pci_fixups_final = .; *(.pci_fixup_final) __end_pci_fixups_final = .; __start_pci_fixups_enable = .; *(.pci_fixup_enable) __end_pci_fixups_enable = .; } __ksymtab : AT(ADDR(__ksymtab) - 0) { __start___ksymtab = .; *(__ksymtab) __stop___ksymtab = .; } __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - 0) { __start___ksymtab_gpl = .; *(__ksymtab_gpl) __stop___ksymtab_gpl = .; } __kcrctab : AT(ADDR(__kcrctab) - 0) { __start___kcrctab = .; *(__kcrctab) __stop___kcrctab = .; } __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - 0) { __start___kcrctab_gpl = .; *(__kcrctab_gpl) __stop___kcrctab_gpl = .; } __ksymtab_strings : AT(ADDR(__ksymtab_strings) - 0) { *(__ksymtab_strings) } __param : AT(ADDR(__param) - 0) { __start___param = .; *(__param) __stop___param = .; } | ||
52 | .fini : { *(.fini) } =0 | ||
53 | .ctors : { *(.ctors) } | ||
54 | .dtors : { *(.dtors) } | ||
55 | .fixup : { *(.fixup) } | ||
56 | __ex_table : { | ||
57 | __start___ex_table = .; | ||
58 | *(__ex_table) | ||
59 | __stop___ex_table = .; | ||
60 | } | ||
61 | __bug_table : { | ||
62 | __start___bug_table = .; | ||
63 | *(__bug_table) | ||
64 | __stop___bug_table = .; | ||
65 | } | ||
66 | /* Read-write section, merged into data segment: */ | ||
67 | . = ALIGN(4096); | ||
68 | .data : | ||
69 | { | ||
70 | *(.data) | ||
71 | *(.data1) | ||
72 | *(.sdata) | ||
73 | *(.sdata2) | ||
74 | *(.got.plt) *(.got) | ||
75 | *(.dynamic) | ||
76 | CONSTRUCTORS | ||
77 | } | ||
78 | |||
79 | . = ALIGN(4096); | ||
80 | __nosave_begin = .; | ||
81 | .data_nosave : { *(.data.nosave) } | ||
82 | . = ALIGN(4096); | ||
83 | __nosave_end = .; | ||
84 | |||
85 | . = ALIGN(32); | ||
86 | .data.cacheline_aligned : { *(.data.cacheline_aligned) } | ||
87 | |||
88 | _edata = .; | ||
89 | PROVIDE (edata = .); | ||
90 | |||
91 | . = ALIGN(8192); | ||
92 | .data.init_task : { *(.data.init_task) } | ||
93 | |||
94 | . = ALIGN(4096); | ||
95 | __init_begin = .; | ||
96 | .init.text : { | ||
97 | _sinittext = .; | ||
98 | *(.init.text) | ||
99 | _einittext = .; | ||
100 | } | ||
101 | /* .exit.text is discarded at runtime, not link time, | ||
102 | to deal with references from __bug_table */ | ||
103 | .exit.text : { *(.exit.text) } | ||
104 | .init.data : { | ||
105 | *(.init.data); | ||
106 | __vtop_table_begin = .; | ||
107 | *(.vtop_fixup); | ||
108 | __vtop_table_end = .; | ||
109 | __ptov_table_begin = .; | ||
110 | *(.ptov_fixup); | ||
111 | __ptov_table_end = .; | ||
112 | } | ||
113 | . = ALIGN(16); | ||
114 | __setup_start = .; | ||
115 | .init.setup : { *(.init.setup) } | ||
116 | __setup_end = .; | ||
117 | __initcall_start = .; | ||
118 | .initcall.init : { | ||
119 | *(.initcall1.init) | ||
120 | *(.initcall2.init) | ||
121 | *(.initcall3.init) | ||
122 | *(.initcall4.init) | ||
123 | *(.initcall5.init) | ||
124 | *(.initcall6.init) | ||
125 | *(.initcall7.init) | ||
126 | } | ||
127 | __initcall_end = .; | ||
128 | |||
129 | __con_initcall_start = .; | ||
130 | .con_initcall.init : { *(.con_initcall.init) } | ||
131 | __con_initcall_end = .; | ||
132 | |||
133 | .security_initcall.init : AT(ADDR(.security_initcall.init) - 0) { __security_initcall_start = .; *(.security_initcall.init) __security_initcall_end = .; } | ||
134 | |||
135 | __start___ftr_fixup = .; | ||
136 | __ftr_fixup : { *(__ftr_fixup) } | ||
137 | __stop___ftr_fixup = .; | ||
138 | |||
139 | . = ALIGN(32); | ||
140 | __per_cpu_start = .; | ||
141 | .data.percpu : { *(.data.percpu) } | ||
142 | __per_cpu_end = .; | ||
143 | |||
144 | . = ALIGN(4096); | ||
145 | __initramfs_start = .; | ||
146 | .init.ramfs : { *(.init.ramfs) } | ||
147 | __initramfs_end = .; | ||
148 | |||
149 | . = ALIGN(4096); | ||
150 | __init_end = .; | ||
151 | |||
152 | . = ALIGN(4096); | ||
153 | _sextratext = .; | ||
154 | _eextratext = .; | ||
155 | |||
156 | __bss_start = .; | ||
157 | .bss : | ||
158 | { | ||
159 | *(.sbss) *(.scommon) | ||
160 | *(.dynbss) | ||
161 | *(.bss) | ||
162 | *(COMMON) | ||
163 | } | ||
164 | __bss_stop = .; | ||
165 | |||
166 | _end = . ; | ||
167 | PROVIDE (end = .); | ||
168 | |||
169 | /* Sections to be discarded. */ | ||
170 | /DISCARD/ : { | ||
171 | *(.exitcall.exit) | ||
172 | *(.exit.data) | ||
173 | } | ||
174 | } | ||
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S new file mode 100644 index 000000000000..09c6525cfa61 --- /dev/null +++ b/arch/powerpc/kernel/vmlinux.lds.S | |||
@@ -0,0 +1,172 @@ | |||
1 | #include <asm-generic/vmlinux.lds.h> | ||
2 | |||
3 | OUTPUT_ARCH(powerpc:common) | ||
4 | jiffies = jiffies_64 + 4; | ||
5 | SECTIONS | ||
6 | { | ||
7 | /* Read-only sections, merged into text segment: */ | ||
8 | . = + SIZEOF_HEADERS; | ||
9 | .interp : { *(.interp) } | ||
10 | .hash : { *(.hash) } | ||
11 | .dynsym : { *(.dynsym) } | ||
12 | .dynstr : { *(.dynstr) } | ||
13 | .rel.text : { *(.rel.text) } | ||
14 | .rela.text : { *(.rela.text) } | ||
15 | .rel.data : { *(.rel.data) } | ||
16 | .rela.data : { *(.rela.data) } | ||
17 | .rel.rodata : { *(.rel.rodata) } | ||
18 | .rela.rodata : { *(.rela.rodata) } | ||
19 | .rel.got : { *(.rel.got) } | ||
20 | .rela.got : { *(.rela.got) } | ||
21 | .rel.ctors : { *(.rel.ctors) } | ||
22 | .rela.ctors : { *(.rela.ctors) } | ||
23 | .rel.dtors : { *(.rel.dtors) } | ||
24 | .rela.dtors : { *(.rela.dtors) } | ||
25 | .rel.bss : { *(.rel.bss) } | ||
26 | .rela.bss : { *(.rela.bss) } | ||
27 | .rel.plt : { *(.rel.plt) } | ||
28 | .rela.plt : { *(.rela.plt) } | ||
29 | /* .init : { *(.init) } =0*/ | ||
30 | .plt : { *(.plt) } | ||
31 | .text : | ||
32 | { | ||
33 | *(.text) | ||
34 | SCHED_TEXT | ||
35 | LOCK_TEXT | ||
36 | *(.fixup) | ||
37 | *(.got1) | ||
38 | __got2_start = .; | ||
39 | *(.got2) | ||
40 | __got2_end = .; | ||
41 | } | ||
42 | _etext = .; | ||
43 | PROVIDE (etext = .); | ||
44 | |||
45 | RODATA | ||
46 | .fini : { *(.fini) } =0 | ||
47 | .ctors : { *(.ctors) } | ||
48 | .dtors : { *(.dtors) } | ||
49 | |||
50 | .fixup : { *(.fixup) } | ||
51 | |||
52 | __ex_table : { | ||
53 | __start___ex_table = .; | ||
54 | *(__ex_table) | ||
55 | __stop___ex_table = .; | ||
56 | } | ||
57 | |||
58 | __bug_table : { | ||
59 | __start___bug_table = .; | ||
60 | *(__bug_table) | ||
61 | __stop___bug_table = .; | ||
62 | } | ||
63 | |||
64 | /* Read-write section, merged into data segment: */ | ||
65 | . = ALIGN(4096); | ||
66 | .data : | ||
67 | { | ||
68 | *(.data) | ||
69 | *(.data1) | ||
70 | *(.sdata) | ||
71 | *(.sdata2) | ||
72 | *(.got.plt) *(.got) | ||
73 | *(.dynamic) | ||
74 | CONSTRUCTORS | ||
75 | } | ||
76 | |||
77 | . = ALIGN(4096); | ||
78 | __nosave_begin = .; | ||
79 | .data_nosave : { *(.data.nosave) } | ||
80 | . = ALIGN(4096); | ||
81 | __nosave_end = .; | ||
82 | |||
83 | . = ALIGN(32); | ||
84 | .data.cacheline_aligned : { *(.data.cacheline_aligned) } | ||
85 | |||
86 | _edata = .; | ||
87 | PROVIDE (edata = .); | ||
88 | |||
89 | . = ALIGN(8192); | ||
90 | .data.init_task : { *(.data.init_task) } | ||
91 | |||
92 | . = ALIGN(4096); | ||
93 | __init_begin = .; | ||
94 | .init.text : { | ||
95 | _sinittext = .; | ||
96 | *(.init.text) | ||
97 | _einittext = .; | ||
98 | } | ||
99 | /* .exit.text is discarded at runtime, not link time, | ||
100 | to deal with references from __bug_table */ | ||
101 | .exit.text : { *(.exit.text) } | ||
102 | .init.data : { | ||
103 | *(.init.data); | ||
104 | __vtop_table_begin = .; | ||
105 | *(.vtop_fixup); | ||
106 | __vtop_table_end = .; | ||
107 | __ptov_table_begin = .; | ||
108 | *(.ptov_fixup); | ||
109 | __ptov_table_end = .; | ||
110 | } | ||
111 | . = ALIGN(16); | ||
112 | __setup_start = .; | ||
113 | .init.setup : { *(.init.setup) } | ||
114 | __setup_end = .; | ||
115 | __initcall_start = .; | ||
116 | .initcall.init : { | ||
117 | *(.initcall1.init) | ||
118 | *(.initcall2.init) | ||
119 | *(.initcall3.init) | ||
120 | *(.initcall4.init) | ||
121 | *(.initcall5.init) | ||
122 | *(.initcall6.init) | ||
123 | *(.initcall7.init) | ||
124 | } | ||
125 | __initcall_end = .; | ||
126 | |||
127 | __con_initcall_start = .; | ||
128 | .con_initcall.init : { *(.con_initcall.init) } | ||
129 | __con_initcall_end = .; | ||
130 | |||
131 | SECURITY_INIT | ||
132 | |||
133 | __start___ftr_fixup = .; | ||
134 | __ftr_fixup : { *(__ftr_fixup) } | ||
135 | __stop___ftr_fixup = .; | ||
136 | |||
137 | . = ALIGN(32); | ||
138 | __per_cpu_start = .; | ||
139 | .data.percpu : { *(.data.percpu) } | ||
140 | __per_cpu_end = .; | ||
141 | |||
142 | . = ALIGN(4096); | ||
143 | __initramfs_start = .; | ||
144 | .init.ramfs : { *(.init.ramfs) } | ||
145 | __initramfs_end = .; | ||
146 | |||
147 | . = ALIGN(4096); | ||
148 | __init_end = .; | ||
149 | |||
150 | . = ALIGN(4096); | ||
151 | _sextratext = .; | ||
152 | _eextratext = .; | ||
153 | |||
154 | __bss_start = .; | ||
155 | .bss : | ||
156 | { | ||
157 | *(.sbss) *(.scommon) | ||
158 | *(.dynbss) | ||
159 | *(.bss) | ||
160 | *(COMMON) | ||
161 | } | ||
162 | __bss_stop = .; | ||
163 | |||
164 | _end = . ; | ||
165 | PROVIDE (end = .); | ||
166 | |||
167 | /* Sections to be discarded. */ | ||
168 | /DISCARD/ : { | ||
169 | *(.exitcall.exit) | ||
170 | *(.exit.data) | ||
171 | } | ||
172 | } | ||
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile new file mode 100644 index 000000000000..347f9798e433 --- /dev/null +++ b/arch/powerpc/lib/Makefile | |||
@@ -0,0 +1,9 @@ | |||
1 | # | ||
2 | # Makefile for ppc-specific library files.. | ||
3 | # | ||
4 | |||
5 | obj-y := strcase.o string.o | ||
6 | obj-$(CONFIG_PPC32) += div64.o copy32.o checksum.o | ||
7 | obj-$(CONFIG_PPC64) += copypage.o copyuser.o memcpy.o usercopy.o \ | ||
8 | sstep.o checksum64.o | ||
9 | obj-$(CONFIG_PPC_ISERIES) += e2a.o | ||
diff --git a/arch/powerpc/lib/checksum.S b/arch/powerpc/lib/checksum.S new file mode 100644 index 000000000000..7874e8a80455 --- /dev/null +++ b/arch/powerpc/lib/checksum.S | |||
@@ -0,0 +1,225 @@ | |||
1 | /* | ||
2 | * This file contains assembly-language implementations | ||
3 | * of IP-style 1's complement checksum routines. | ||
4 | * | ||
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | * | ||
12 | * Severely hacked about by Paul Mackerras (paulus@cs.anu.edu.au). | ||
13 | */ | ||
14 | |||
15 | #include <linux/sys.h> | ||
16 | #include <asm/processor.h> | ||
17 | #include <asm/errno.h> | ||
18 | #include <asm/ppc_asm.h> | ||
19 | |||
20 | .text | ||
21 | |||
22 | /* | ||
23 | * ip_fast_csum(buf, len) -- Optimized for IP header | ||
24 | * len is in words and is always >= 5. | ||
25 | */ | ||
26 | _GLOBAL(ip_fast_csum) | ||
27 | lwz r0,0(r3) | ||
28 | lwzu r5,4(r3) | ||
29 | addic. r4,r4,-2 | ||
30 | addc r0,r0,r5 | ||
31 | mtctr r4 | ||
32 | blelr- | ||
33 | 1: lwzu r4,4(r3) | ||
34 | adde r0,r0,r4 | ||
35 | bdnz 1b | ||
36 | addze r0,r0 /* add in final carry */ | ||
37 | rlwinm r3,r0,16,0,31 /* fold two halves together */ | ||
38 | add r3,r0,r3 | ||
39 | not r3,r3 | ||
40 | srwi r3,r3,16 | ||
41 | blr | ||
42 | |||
43 | /* | ||
44 | * Compute checksum of TCP or UDP pseudo-header: | ||
45 | * csum_tcpudp_magic(saddr, daddr, len, proto, sum) | ||
46 | */ | ||
47 | _GLOBAL(csum_tcpudp_magic) | ||
48 | rlwimi r5,r6,16,0,15 /* put proto in upper half of len */ | ||
49 | addc r0,r3,r4 /* add 4 32-bit words together */ | ||
50 | adde r0,r0,r5 | ||
51 | adde r0,r0,r7 | ||
52 | addze r0,r0 /* add in final carry */ | ||
53 | rlwinm r3,r0,16,0,31 /* fold two halves together */ | ||
54 | add r3,r0,r3 | ||
55 | not r3,r3 | ||
56 | srwi r3,r3,16 | ||
57 | blr | ||
58 | |||
59 | /* | ||
60 | * computes the checksum of a memory block at buff, length len, | ||
61 | * and adds in "sum" (32-bit) | ||
62 | * | ||
63 | * csum_partial(buff, len, sum) | ||
64 | */ | ||
65 | _GLOBAL(csum_partial) | ||
66 | addic r0,r5,0 | ||
67 | subi r3,r3,4 | ||
68 | srwi. r6,r4,2 | ||
69 | beq 3f /* if we're doing < 4 bytes */ | ||
70 | andi. r5,r3,2 /* Align buffer to longword boundary */ | ||
71 | beq+ 1f | ||
72 | lhz r5,4(r3) /* do 2 bytes to get aligned */ | ||
73 | addi r3,r3,2 | ||
74 | subi r4,r4,2 | ||
75 | addc r0,r0,r5 | ||
76 | srwi. r6,r4,2 /* # words to do */ | ||
77 | beq 3f | ||
78 | 1: mtctr r6 | ||
79 | 2: lwzu r5,4(r3) /* the bdnz has zero overhead, so it should */ | ||
80 | adde r0,r0,r5 /* be unnecessary to unroll this loop */ | ||
81 | bdnz 2b | ||
82 | andi. r4,r4,3 | ||
83 | 3: cmpwi 0,r4,2 | ||
84 | blt+ 4f | ||
85 | lhz r5,4(r3) | ||
86 | addi r3,r3,2 | ||
87 | subi r4,r4,2 | ||
88 | adde r0,r0,r5 | ||
89 | 4: cmpwi 0,r4,1 | ||
90 | bne+ 5f | ||
91 | lbz r5,4(r3) | ||
92 | slwi r5,r5,8 /* Upper byte of word */ | ||
93 | adde r0,r0,r5 | ||
94 | 5: addze r3,r0 /* add in final carry */ | ||
95 | blr | ||
96 | |||
97 | /* | ||
98 | * Computes the checksum of a memory block at src, length len, | ||
99 | * and adds in "sum" (32-bit), while copying the block to dst. | ||
100 | * If an access exception occurs on src or dst, it stores -EFAULT | ||
101 | * to *src_err or *dst_err respectively, and (for an error on | ||
102 | * src) zeroes the rest of dst. | ||
103 | * | ||
104 | * csum_partial_copy_generic(src, dst, len, sum, src_err, dst_err) | ||
105 | */ | ||
106 | _GLOBAL(csum_partial_copy_generic) | ||
107 | addic r0,r6,0 | ||
108 | subi r3,r3,4 | ||
109 | subi r4,r4,4 | ||
110 | srwi. r6,r5,2 | ||
111 | beq 3f /* if we're doing < 4 bytes */ | ||
112 | andi. r9,r4,2 /* Align dst to longword boundary */ | ||
113 | beq+ 1f | ||
114 | 81: lhz r6,4(r3) /* do 2 bytes to get aligned */ | ||
115 | addi r3,r3,2 | ||
116 | subi r5,r5,2 | ||
117 | 91: sth r6,4(r4) | ||
118 | addi r4,r4,2 | ||
119 | addc r0,r0,r6 | ||
120 | srwi. r6,r5,2 /* # words to do */ | ||
121 | beq 3f | ||
122 | 1: srwi. r6,r5,4 /* # groups of 4 words to do */ | ||
123 | beq 10f | ||
124 | mtctr r6 | ||
125 | 71: lwz r6,4(r3) | ||
126 | 72: lwz r9,8(r3) | ||
127 | 73: lwz r10,12(r3) | ||
128 | 74: lwzu r11,16(r3) | ||
129 | adde r0,r0,r6 | ||
130 | 75: stw r6,4(r4) | ||
131 | adde r0,r0,r9 | ||
132 | 76: stw r9,8(r4) | ||
133 | adde r0,r0,r10 | ||
134 | 77: stw r10,12(r4) | ||
135 | adde r0,r0,r11 | ||
136 | 78: stwu r11,16(r4) | ||
137 | bdnz 71b | ||
138 | 10: rlwinm. r6,r5,30,30,31 /* # words left to do */ | ||
139 | beq 13f | ||
140 | mtctr r6 | ||
141 | 82: lwzu r9,4(r3) | ||
142 | 92: stwu r9,4(r4) | ||
143 | adde r0,r0,r9 | ||
144 | bdnz 82b | ||
145 | 13: andi. r5,r5,3 | ||
146 | 3: cmpwi 0,r5,2 | ||
147 | blt+ 4f | ||
148 | 83: lhz r6,4(r3) | ||
149 | addi r3,r3,2 | ||
150 | subi r5,r5,2 | ||
151 | 93: sth r6,4(r4) | ||
152 | addi r4,r4,2 | ||
153 | adde r0,r0,r6 | ||
154 | 4: cmpwi 0,r5,1 | ||
155 | bne+ 5f | ||
156 | 84: lbz r6,4(r3) | ||
157 | 94: stb r6,4(r4) | ||
158 | slwi r6,r6,8 /* Upper byte of word */ | ||
159 | adde r0,r0,r6 | ||
160 | 5: addze r3,r0 /* add in final carry */ | ||
161 | blr | ||
162 | |||
163 | /* These shouldn't go in the fixup section, since that would | ||
164 | cause the ex_table addresses to get out of order. */ | ||
165 | |||
166 | src_error_4: | ||
167 | mfctr r6 /* update # bytes remaining from ctr */ | ||
168 | rlwimi r5,r6,4,0,27 | ||
169 | b 79f | ||
170 | src_error_1: | ||
171 | li r6,0 | ||
172 | subi r5,r5,2 | ||
173 | 95: sth r6,4(r4) | ||
174 | addi r4,r4,2 | ||
175 | 79: srwi. r6,r5,2 | ||
176 | beq 3f | ||
177 | mtctr r6 | ||
178 | src_error_2: | ||
179 | li r6,0 | ||
180 | 96: stwu r6,4(r4) | ||
181 | bdnz 96b | ||
182 | 3: andi. r5,r5,3 | ||
183 | beq src_error | ||
184 | src_error_3: | ||
185 | li r6,0 | ||
186 | mtctr r5 | ||
187 | addi r4,r4,3 | ||
188 | 97: stbu r6,1(r4) | ||
189 | bdnz 97b | ||
190 | src_error: | ||
191 | cmpwi 0,r7,0 | ||
192 | beq 1f | ||
193 | li r6,-EFAULT | ||
194 | stw r6,0(r7) | ||
195 | 1: addze r3,r0 | ||
196 | blr | ||
197 | |||
198 | dst_error: | ||
199 | cmpwi 0,r8,0 | ||
200 | beq 1f | ||
201 | li r6,-EFAULT | ||
202 | stw r6,0(r8) | ||
203 | 1: addze r3,r0 | ||
204 | blr | ||
205 | |||
206 | .section __ex_table,"a" | ||
207 | .long 81b,src_error_1 | ||
208 | .long 91b,dst_error | ||
209 | .long 71b,src_error_4 | ||
210 | .long 72b,src_error_4 | ||
211 | .long 73b,src_error_4 | ||
212 | .long 74b,src_error_4 | ||
213 | .long 75b,dst_error | ||
214 | .long 76b,dst_error | ||
215 | .long 77b,dst_error | ||
216 | .long 78b,dst_error | ||
217 | .long 82b,src_error_2 | ||
218 | .long 92b,dst_error | ||
219 | .long 83b,src_error_3 | ||
220 | .long 93b,dst_error | ||
221 | .long 84b,src_error_3 | ||
222 | .long 94b,dst_error | ||
223 | .long 95b,dst_error | ||
224 | .long 96b,dst_error | ||
225 | .long 97b,dst_error | ||
diff --git a/arch/powerpc/lib/checksum64.S b/arch/powerpc/lib/checksum64.S new file mode 100644 index 000000000000..ef96c6c58efc --- /dev/null +++ b/arch/powerpc/lib/checksum64.S | |||
@@ -0,0 +1,229 @@ | |||
1 | /* | ||
2 | * This file contains assembly-language implementations | ||
3 | * of IP-style 1's complement checksum routines. | ||
4 | * | ||
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | * | ||
12 | * Severely hacked about by Paul Mackerras (paulus@cs.anu.edu.au). | ||
13 | */ | ||
14 | |||
15 | #include <linux/sys.h> | ||
16 | #include <asm/processor.h> | ||
17 | #include <asm/errno.h> | ||
18 | #include <asm/ppc_asm.h> | ||
19 | |||
20 | /* | ||
21 | * ip_fast_csum(r3=buf, r4=len) -- Optimized for IP header | ||
22 | * len is in words and is always >= 5. | ||
23 | * | ||
24 | * In practice len == 5, but this is not guaranteed. So this code does not | ||
25 | * attempt to use doubleword instructions. | ||
26 | */ | ||
27 | _GLOBAL(ip_fast_csum) | ||
28 | lwz r0,0(r3) | ||
29 | lwzu r5,4(r3) | ||
30 | addic. r4,r4,-2 | ||
31 | addc r0,r0,r5 | ||
32 | mtctr r4 | ||
33 | blelr- | ||
34 | 1: lwzu r4,4(r3) | ||
35 | adde r0,r0,r4 | ||
36 | bdnz 1b | ||
37 | addze r0,r0 /* add in final carry */ | ||
38 | rldicl r4,r0,32,0 /* fold two 32-bit halves together */ | ||
39 | add r0,r0,r4 | ||
40 | srdi r0,r0,32 | ||
41 | rlwinm r3,r0,16,0,31 /* fold two halves together */ | ||
42 | add r3,r0,r3 | ||
43 | not r3,r3 | ||
44 | srwi r3,r3,16 | ||
45 | blr | ||
46 | |||
47 | /* | ||
48 | * Compute checksum of TCP or UDP pseudo-header: | ||
49 | * csum_tcpudp_magic(r3=saddr, r4=daddr, r5=len, r6=proto, r7=sum) | ||
50 | * No real gain trying to do this specially for 64 bit, but | ||
51 | * the 32 bit addition may spill into the upper bits of | ||
52 | * the doubleword so we still must fold it down from 64. | ||
53 | */ | ||
54 | _GLOBAL(csum_tcpudp_magic) | ||
55 | rlwimi r5,r6,16,0,15 /* put proto in upper half of len */ | ||
56 | addc r0,r3,r4 /* add 4 32-bit words together */ | ||
57 | adde r0,r0,r5 | ||
58 | adde r0,r0,r7 | ||
59 | rldicl r4,r0,32,0 /* fold 64 bit value */ | ||
60 | add r0,r4,r0 | ||
61 | srdi r0,r0,32 | ||
62 | rlwinm r3,r0,16,0,31 /* fold two halves together */ | ||
63 | add r3,r0,r3 | ||
64 | not r3,r3 | ||
65 | srwi r3,r3,16 | ||
66 | blr | ||
67 | |||
68 | /* | ||
69 | * Computes the checksum of a memory block at buff, length len, | ||
70 | * and adds in "sum" (32-bit). | ||
71 | * | ||
72 | * This code assumes at least halfword alignment, though the length | ||
73 | * can be any number of bytes. The sum is accumulated in r5. | ||
74 | * | ||
75 | * csum_partial(r3=buff, r4=len, r5=sum) | ||
76 | */ | ||
77 | _GLOBAL(csum_partial) | ||
78 | subi r3,r3,8 /* we'll offset by 8 for the loads */ | ||
79 | srdi. r6,r4,3 /* divide by 8 for doubleword count */ | ||
80 | addic r5,r5,0 /* clear carry */ | ||
81 | beq 3f /* if we're doing < 8 bytes */ | ||
82 | andi. r0,r3,2 /* aligned on a word boundary already? */ | ||
83 | beq+ 1f | ||
84 | lhz r6,8(r3) /* do 2 bytes to get aligned */ | ||
85 | addi r3,r3,2 | ||
86 | subi r4,r4,2 | ||
87 | addc r5,r5,r6 | ||
88 | srdi. r6,r4,3 /* recompute number of doublewords */ | ||
89 | beq 3f /* any left? */ | ||
90 | 1: mtctr r6 | ||
91 | 2: ldu r6,8(r3) /* main sum loop */ | ||
92 | adde r5,r5,r6 | ||
93 | bdnz 2b | ||
94 | andi. r4,r4,7 /* compute bytes left to sum after doublewords */ | ||
95 | 3: cmpwi 0,r4,4 /* is at least a full word left? */ | ||
96 | blt 4f | ||
97 | lwz r6,8(r3) /* sum this word */ | ||
98 | addi r3,r3,4 | ||
99 | subi r4,r4,4 | ||
100 | adde r5,r5,r6 | ||
101 | 4: cmpwi 0,r4,2 /* is at least a halfword left? */ | ||
102 | blt+ 5f | ||
103 | lhz r6,8(r3) /* sum this halfword */ | ||
104 | addi r3,r3,2 | ||
105 | subi r4,r4,2 | ||
106 | adde r5,r5,r6 | ||
107 | 5: cmpwi 0,r4,1 /* is at least a byte left? */ | ||
108 | bne+ 6f | ||
109 | lbz r6,8(r3) /* sum this byte */ | ||
110 | slwi r6,r6,8 /* this byte is assumed to be the upper byte of a halfword */ | ||
111 | adde r5,r5,r6 | ||
112 | 6: addze r5,r5 /* add in final carry */ | ||
113 | rldicl r4,r5,32,0 /* fold two 32-bit halves together */ | ||
114 | add r3,r4,r5 | ||
115 | srdi r3,r3,32 | ||
116 | blr | ||
117 | |||
118 | /* | ||
119 | * Computes the checksum of a memory block at src, length len, | ||
120 | * and adds in "sum" (32-bit), while copying the block to dst. | ||
121 | * If an access exception occurs on src or dst, it stores -EFAULT | ||
122 | * to *src_err or *dst_err respectively, and (for an error on | ||
123 | * src) zeroes the rest of dst. | ||
124 | * | ||
125 | * This code needs to be reworked to take advantage of 64 bit sum+copy. | ||
126 | * However, due to tokenring halfword alignment problems this will be very | ||
127 | * tricky. For now we'll leave it until we instrument it somehow. | ||
128 | * | ||
129 | * csum_partial_copy_generic(r3=src, r4=dst, r5=len, r6=sum, r7=src_err, r8=dst_err) | ||
130 | */ | ||
131 | _GLOBAL(csum_partial_copy_generic) | ||
132 | addic r0,r6,0 | ||
133 | subi r3,r3,4 | ||
134 | subi r4,r4,4 | ||
135 | srwi. r6,r5,2 | ||
136 | beq 3f /* if we're doing < 4 bytes */ | ||
137 | andi. r9,r4,2 /* Align dst to longword boundary */ | ||
138 | beq+ 1f | ||
139 | 81: lhz r6,4(r3) /* do 2 bytes to get aligned */ | ||
140 | addi r3,r3,2 | ||
141 | subi r5,r5,2 | ||
142 | 91: sth r6,4(r4) | ||
143 | addi r4,r4,2 | ||
144 | addc r0,r0,r6 | ||
145 | srwi. r6,r5,2 /* # words to do */ | ||
146 | beq 3f | ||
147 | 1: mtctr r6 | ||
148 | 82: lwzu r6,4(r3) /* the bdnz has zero overhead, so it should */ | ||
149 | 92: stwu r6,4(r4) /* be unnecessary to unroll this loop */ | ||
150 | adde r0,r0,r6 | ||
151 | bdnz 82b | ||
152 | andi. r5,r5,3 | ||
153 | 3: cmpwi 0,r5,2 | ||
154 | blt+ 4f | ||
155 | 83: lhz r6,4(r3) | ||
156 | addi r3,r3,2 | ||
157 | subi r5,r5,2 | ||
158 | 93: sth r6,4(r4) | ||
159 | addi r4,r4,2 | ||
160 | adde r0,r0,r6 | ||
161 | 4: cmpwi 0,r5,1 | ||
162 | bne+ 5f | ||
163 | 84: lbz r6,4(r3) | ||
164 | 94: stb r6,4(r4) | ||
165 | slwi r6,r6,8 /* Upper byte of word */ | ||
166 | adde r0,r0,r6 | ||
167 | 5: addze r3,r0 /* add in final carry (unlikely with 64-bit regs) */ | ||
168 | rldicl r4,r3,32,0 /* fold 64 bit value */ | ||
169 | add r3,r4,r3 | ||
170 | srdi r3,r3,32 | ||
171 | blr | ||
172 | |||
173 | /* These shouldn't go in the fixup section, since that would | ||
174 | cause the ex_table addresses to get out of order. */ | ||
175 | |||
176 | .globl src_error_1 | ||
177 | src_error_1: | ||
178 | li r6,0 | ||
179 | subi r5,r5,2 | ||
180 | 95: sth r6,4(r4) | ||
181 | addi r4,r4,2 | ||
182 | srwi. r6,r5,2 | ||
183 | beq 3f | ||
184 | mtctr r6 | ||
185 | .globl src_error_2 | ||
186 | src_error_2: | ||
187 | li r6,0 | ||
188 | 96: stwu r6,4(r4) | ||
189 | bdnz 96b | ||
190 | 3: andi. r5,r5,3 | ||
191 | beq src_error | ||
192 | .globl src_error_3 | ||
193 | src_error_3: | ||
194 | li r6,0 | ||
195 | mtctr r5 | ||
196 | addi r4,r4,3 | ||
197 | 97: stbu r6,1(r4) | ||
198 | bdnz 97b | ||
199 | .globl src_error | ||
200 | src_error: | ||
201 | cmpdi 0,r7,0 | ||
202 | beq 1f | ||
203 | li r6,-EFAULT | ||
204 | stw r6,0(r7) | ||
205 | 1: addze r3,r0 | ||
206 | blr | ||
207 | |||
208 | .globl dst_error | ||
209 | dst_error: | ||
210 | cmpdi 0,r8,0 | ||
211 | beq 1f | ||
212 | li r6,-EFAULT | ||
213 | stw r6,0(r8) | ||
214 | 1: addze r3,r0 | ||
215 | blr | ||
216 | |||
217 | .section __ex_table,"a" | ||
218 | .align 3 | ||
219 | .llong 81b,src_error_1 | ||
220 | .llong 91b,dst_error | ||
221 | .llong 82b,src_error_2 | ||
222 | .llong 92b,dst_error | ||
223 | .llong 83b,src_error_3 | ||
224 | .llong 93b,dst_error | ||
225 | .llong 84b,src_error_3 | ||
226 | .llong 94b,dst_error | ||
227 | .llong 95b,dst_error | ||
228 | .llong 96b,dst_error | ||
229 | .llong 97b,dst_error | ||
diff --git a/arch/powerpc/lib/copy32.S b/arch/powerpc/lib/copy32.S new file mode 100644 index 000000000000..420a912198a2 --- /dev/null +++ b/arch/powerpc/lib/copy32.S | |||
@@ -0,0 +1,543 @@ | |||
1 | /* | ||
2 | * Memory copy functions for 32-bit PowerPC. | ||
3 | * | ||
4 | * Copyright (C) 1996-2005 Paul Mackerras. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/config.h> | ||
12 | #include <asm/processor.h> | ||
13 | #include <asm/cache.h> | ||
14 | #include <asm/errno.h> | ||
15 | #include <asm/ppc_asm.h> | ||
16 | |||
17 | #define COPY_16_BYTES \ | ||
18 | lwz r7,4(r4); \ | ||
19 | lwz r8,8(r4); \ | ||
20 | lwz r9,12(r4); \ | ||
21 | lwzu r10,16(r4); \ | ||
22 | stw r7,4(r6); \ | ||
23 | stw r8,8(r6); \ | ||
24 | stw r9,12(r6); \ | ||
25 | stwu r10,16(r6) | ||
26 | |||
27 | #define COPY_16_BYTES_WITHEX(n) \ | ||
28 | 8 ## n ## 0: \ | ||
29 | lwz r7,4(r4); \ | ||
30 | 8 ## n ## 1: \ | ||
31 | lwz r8,8(r4); \ | ||
32 | 8 ## n ## 2: \ | ||
33 | lwz r9,12(r4); \ | ||
34 | 8 ## n ## 3: \ | ||
35 | lwzu r10,16(r4); \ | ||
36 | 8 ## n ## 4: \ | ||
37 | stw r7,4(r6); \ | ||
38 | 8 ## n ## 5: \ | ||
39 | stw r8,8(r6); \ | ||
40 | 8 ## n ## 6: \ | ||
41 | stw r9,12(r6); \ | ||
42 | 8 ## n ## 7: \ | ||
43 | stwu r10,16(r6) | ||
44 | |||
45 | #define COPY_16_BYTES_EXCODE(n) \ | ||
46 | 9 ## n ## 0: \ | ||
47 | addi r5,r5,-(16 * n); \ | ||
48 | b 104f; \ | ||
49 | 9 ## n ## 1: \ | ||
50 | addi r5,r5,-(16 * n); \ | ||
51 | b 105f; \ | ||
52 | .section __ex_table,"a"; \ | ||
53 | .align 2; \ | ||
54 | .long 8 ## n ## 0b,9 ## n ## 0b; \ | ||
55 | .long 8 ## n ## 1b,9 ## n ## 0b; \ | ||
56 | .long 8 ## n ## 2b,9 ## n ## 0b; \ | ||
57 | .long 8 ## n ## 3b,9 ## n ## 0b; \ | ||
58 | .long 8 ## n ## 4b,9 ## n ## 1b; \ | ||
59 | .long 8 ## n ## 5b,9 ## n ## 1b; \ | ||
60 | .long 8 ## n ## 6b,9 ## n ## 1b; \ | ||
61 | .long 8 ## n ## 7b,9 ## n ## 1b; \ | ||
62 | .text | ||
63 | |||
64 | .text | ||
65 | .stabs "arch/powerpc/lib/",N_SO,0,0,0f | ||
66 | .stabs "copy32.S",N_SO,0,0,0f | ||
67 | 0: | ||
68 | |||
69 | CACHELINE_BYTES = L1_CACHE_LINE_SIZE | ||
70 | LG_CACHELINE_BYTES = LG_L1_CACHE_LINE_SIZE | ||
71 | CACHELINE_MASK = (L1_CACHE_LINE_SIZE-1) | ||
72 | |||
73 | /* | ||
74 | * Use dcbz on the complete cache lines in the destination | ||
75 | * to set them to zero. This requires that the destination | ||
76 | * area is cacheable. -- paulus | ||
77 | */ | ||
78 | _GLOBAL(cacheable_memzero) | ||
79 | mr r5,r4 | ||
80 | li r4,0 | ||
81 | addi r6,r3,-4 | ||
82 | cmplwi 0,r5,4 | ||
83 | blt 7f | ||
84 | stwu r4,4(r6) | ||
85 | beqlr | ||
86 | andi. r0,r6,3 | ||
87 | add r5,r0,r5 | ||
88 | subf r6,r0,r6 | ||
89 | clrlwi r7,r6,32-LG_CACHELINE_BYTES | ||
90 | add r8,r7,r5 | ||
91 | srwi r9,r8,LG_CACHELINE_BYTES | ||
92 | addic. r9,r9,-1 /* total number of complete cachelines */ | ||
93 | ble 2f | ||
94 | xori r0,r7,CACHELINE_MASK & ~3 | ||
95 | srwi. r0,r0,2 | ||
96 | beq 3f | ||
97 | mtctr r0 | ||
98 | 4: stwu r4,4(r6) | ||
99 | bdnz 4b | ||
100 | 3: mtctr r9 | ||
101 | li r7,4 | ||
102 | #if !defined(CONFIG_8xx) | ||
103 | 10: dcbz r7,r6 | ||
104 | #else | ||
105 | 10: stw r4, 4(r6) | ||
106 | stw r4, 8(r6) | ||
107 | stw r4, 12(r6) | ||
108 | stw r4, 16(r6) | ||
109 | #if CACHE_LINE_SIZE >= 32 | ||
110 | stw r4, 20(r6) | ||
111 | stw r4, 24(r6) | ||
112 | stw r4, 28(r6) | ||
113 | stw r4, 32(r6) | ||
114 | #endif /* CACHE_LINE_SIZE */ | ||
115 | #endif | ||
116 | addi r6,r6,CACHELINE_BYTES | ||
117 | bdnz 10b | ||
118 | clrlwi r5,r8,32-LG_CACHELINE_BYTES | ||
119 | addi r5,r5,4 | ||
120 | 2: srwi r0,r5,2 | ||
121 | mtctr r0 | ||
122 | bdz 6f | ||
123 | 1: stwu r4,4(r6) | ||
124 | bdnz 1b | ||
125 | 6: andi. r5,r5,3 | ||
126 | 7: cmpwi 0,r5,0 | ||
127 | beqlr | ||
128 | mtctr r5 | ||
129 | addi r6,r6,3 | ||
130 | 8: stbu r4,1(r6) | ||
131 | bdnz 8b | ||
132 | blr | ||
133 | |||
134 | _GLOBAL(memset) | ||
135 | rlwimi r4,r4,8,16,23 | ||
136 | rlwimi r4,r4,16,0,15 | ||
137 | addi r6,r3,-4 | ||
138 | cmplwi 0,r5,4 | ||
139 | blt 7f | ||
140 | stwu r4,4(r6) | ||
141 | beqlr | ||
142 | andi. r0,r6,3 | ||
143 | add r5,r0,r5 | ||
144 | subf r6,r0,r6 | ||
145 | srwi r0,r5,2 | ||
146 | mtctr r0 | ||
147 | bdz 6f | ||
148 | 1: stwu r4,4(r6) | ||
149 | bdnz 1b | ||
150 | 6: andi. r5,r5,3 | ||
151 | 7: cmpwi 0,r5,0 | ||
152 | beqlr | ||
153 | mtctr r5 | ||
154 | addi r6,r6,3 | ||
155 | 8: stbu r4,1(r6) | ||
156 | bdnz 8b | ||
157 | blr | ||
158 | |||
159 | /* | ||
160 | * This version uses dcbz on the complete cache lines in the | ||
161 | * destination area to reduce memory traffic. This requires that | ||
162 | * the destination area is cacheable. | ||
163 | * We only use this version if the source and dest don't overlap. | ||
164 | * -- paulus. | ||
165 | */ | ||
166 | _GLOBAL(cacheable_memcpy) | ||
167 | add r7,r3,r5 /* test if the src & dst overlap */ | ||
168 | add r8,r4,r5 | ||
169 | cmplw 0,r4,r7 | ||
170 | cmplw 1,r3,r8 | ||
171 | crand 0,0,4 /* cr0.lt &= cr1.lt */ | ||
172 | blt memcpy /* if regions overlap */ | ||
173 | |||
174 | addi r4,r4,-4 | ||
175 | addi r6,r3,-4 | ||
176 | neg r0,r3 | ||
177 | andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */ | ||
178 | beq 58f | ||
179 | |||
180 | cmplw 0,r5,r0 /* is this more than total to do? */ | ||
181 | blt 63f /* if not much to do */ | ||
182 | andi. r8,r0,3 /* get it word-aligned first */ | ||
183 | subf r5,r0,r5 | ||
184 | mtctr r8 | ||
185 | beq+ 61f | ||
186 | 70: lbz r9,4(r4) /* do some bytes */ | ||
187 | stb r9,4(r6) | ||
188 | addi r4,r4,1 | ||
189 | addi r6,r6,1 | ||
190 | bdnz 70b | ||
191 | 61: srwi. r0,r0,2 | ||
192 | mtctr r0 | ||
193 | beq 58f | ||
194 | 72: lwzu r9,4(r4) /* do some words */ | ||
195 | stwu r9,4(r6) | ||
196 | bdnz 72b | ||
197 | |||
198 | 58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */ | ||
199 | clrlwi r5,r5,32-LG_CACHELINE_BYTES | ||
200 | li r11,4 | ||
201 | mtctr r0 | ||
202 | beq 63f | ||
203 | 53: | ||
204 | #if !defined(CONFIG_8xx) | ||
205 | dcbz r11,r6 | ||
206 | #endif | ||
207 | COPY_16_BYTES | ||
208 | #if L1_CACHE_LINE_SIZE >= 32 | ||
209 | COPY_16_BYTES | ||
210 | #if L1_CACHE_LINE_SIZE >= 64 | ||
211 | COPY_16_BYTES | ||
212 | COPY_16_BYTES | ||
213 | #if L1_CACHE_LINE_SIZE >= 128 | ||
214 | COPY_16_BYTES | ||
215 | COPY_16_BYTES | ||
216 | COPY_16_BYTES | ||
217 | COPY_16_BYTES | ||
218 | #endif | ||
219 | #endif | ||
220 | #endif | ||
221 | bdnz 53b | ||
222 | |||
223 | 63: srwi. r0,r5,2 | ||
224 | mtctr r0 | ||
225 | beq 64f | ||
226 | 30: lwzu r0,4(r4) | ||
227 | stwu r0,4(r6) | ||
228 | bdnz 30b | ||
229 | |||
230 | 64: andi. r0,r5,3 | ||
231 | mtctr r0 | ||
232 | beq+ 65f | ||
233 | 40: lbz r0,4(r4) | ||
234 | stb r0,4(r6) | ||
235 | addi r4,r4,1 | ||
236 | addi r6,r6,1 | ||
237 | bdnz 40b | ||
238 | 65: blr | ||
239 | |||
240 | _GLOBAL(memmove) | ||
241 | cmplw 0,r3,r4 | ||
242 | bgt backwards_memcpy | ||
243 | /* fall through */ | ||
244 | |||
245 | _GLOBAL(memcpy) | ||
246 | srwi. r7,r5,3 | ||
247 | addi r6,r3,-4 | ||
248 | addi r4,r4,-4 | ||
249 | beq 2f /* if less than 8 bytes to do */ | ||
250 | andi. r0,r6,3 /* get dest word aligned */ | ||
251 | mtctr r7 | ||
252 | bne 5f | ||
253 | 1: lwz r7,4(r4) | ||
254 | lwzu r8,8(r4) | ||
255 | stw r7,4(r6) | ||
256 | stwu r8,8(r6) | ||
257 | bdnz 1b | ||
258 | andi. r5,r5,7 | ||
259 | 2: cmplwi 0,r5,4 | ||
260 | blt 3f | ||
261 | lwzu r0,4(r4) | ||
262 | addi r5,r5,-4 | ||
263 | stwu r0,4(r6) | ||
264 | 3: cmpwi 0,r5,0 | ||
265 | beqlr | ||
266 | mtctr r5 | ||
267 | addi r4,r4,3 | ||
268 | addi r6,r6,3 | ||
269 | 4: lbzu r0,1(r4) | ||
270 | stbu r0,1(r6) | ||
271 | bdnz 4b | ||
272 | blr | ||
273 | 5: subfic r0,r0,4 | ||
274 | mtctr r0 | ||
275 | 6: lbz r7,4(r4) | ||
276 | addi r4,r4,1 | ||
277 | stb r7,4(r6) | ||
278 | addi r6,r6,1 | ||
279 | bdnz 6b | ||
280 | subf r5,r0,r5 | ||
281 | rlwinm. r7,r5,32-3,3,31 | ||
282 | beq 2b | ||
283 | mtctr r7 | ||
284 | b 1b | ||
285 | |||
286 | _GLOBAL(backwards_memcpy) | ||
287 | rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */ | ||
288 | add r6,r3,r5 | ||
289 | add r4,r4,r5 | ||
290 | beq 2f | ||
291 | andi. r0,r6,3 | ||
292 | mtctr r7 | ||
293 | bne 5f | ||
294 | 1: lwz r7,-4(r4) | ||
295 | lwzu r8,-8(r4) | ||
296 | stw r7,-4(r6) | ||
297 | stwu r8,-8(r6) | ||
298 | bdnz 1b | ||
299 | andi. r5,r5,7 | ||
300 | 2: cmplwi 0,r5,4 | ||
301 | blt 3f | ||
302 | lwzu r0,-4(r4) | ||
303 | subi r5,r5,4 | ||
304 | stwu r0,-4(r6) | ||
305 | 3: cmpwi 0,r5,0 | ||
306 | beqlr | ||
307 | mtctr r5 | ||
308 | 4: lbzu r0,-1(r4) | ||
309 | stbu r0,-1(r6) | ||
310 | bdnz 4b | ||
311 | blr | ||
312 | 5: mtctr r0 | ||
313 | 6: lbzu r7,-1(r4) | ||
314 | stbu r7,-1(r6) | ||
315 | bdnz 6b | ||
316 | subf r5,r0,r5 | ||
317 | rlwinm. r7,r5,32-3,3,31 | ||
318 | beq 2b | ||
319 | mtctr r7 | ||
320 | b 1b | ||
321 | |||
322 | _GLOBAL(__copy_tofrom_user) | ||
323 | addi r4,r4,-4 | ||
324 | addi r6,r3,-4 | ||
325 | neg r0,r3 | ||
326 | andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */ | ||
327 | beq 58f | ||
328 | |||
329 | cmplw 0,r5,r0 /* is this more than total to do? */ | ||
330 | blt 63f /* if not much to do */ | ||
331 | andi. r8,r0,3 /* get it word-aligned first */ | ||
332 | mtctr r8 | ||
333 | beq+ 61f | ||
334 | 70: lbz r9,4(r4) /* do some bytes */ | ||
335 | 71: stb r9,4(r6) | ||
336 | addi r4,r4,1 | ||
337 | addi r6,r6,1 | ||
338 | bdnz 70b | ||
339 | 61: subf r5,r0,r5 | ||
340 | srwi. r0,r0,2 | ||
341 | mtctr r0 | ||
342 | beq 58f | ||
343 | 72: lwzu r9,4(r4) /* do some words */ | ||
344 | 73: stwu r9,4(r6) | ||
345 | bdnz 72b | ||
346 | |||
347 | .section __ex_table,"a" | ||
348 | .align 2 | ||
349 | .long 70b,100f | ||
350 | .long 71b,101f | ||
351 | .long 72b,102f | ||
352 | .long 73b,103f | ||
353 | .text | ||
354 | |||
355 | 58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */ | ||
356 | clrlwi r5,r5,32-LG_CACHELINE_BYTES | ||
357 | li r11,4 | ||
358 | beq 63f | ||
359 | |||
360 | #ifdef CONFIG_8xx | ||
361 | /* Don't use prefetch on 8xx */ | ||
362 | mtctr r0 | ||
363 | li r0,0 | ||
364 | 53: COPY_16_BYTES_WITHEX(0) | ||
365 | bdnz 53b | ||
366 | |||
367 | #else /* not CONFIG_8xx */ | ||
368 | /* Here we decide how far ahead to prefetch the source */ | ||
369 | li r3,4 | ||
370 | cmpwi r0,1 | ||
371 | li r7,0 | ||
372 | ble 114f | ||
373 | li r7,1 | ||
374 | #if MAX_COPY_PREFETCH > 1 | ||
375 | /* Heuristically, for large transfers we prefetch | ||
376 | MAX_COPY_PREFETCH cachelines ahead. For small transfers | ||
377 | we prefetch 1 cacheline ahead. */ | ||
378 | cmpwi r0,MAX_COPY_PREFETCH | ||
379 | ble 112f | ||
380 | li r7,MAX_COPY_PREFETCH | ||
381 | 112: mtctr r7 | ||
382 | 111: dcbt r3,r4 | ||
383 | addi r3,r3,CACHELINE_BYTES | ||
384 | bdnz 111b | ||
385 | #else | ||
386 | dcbt r3,r4 | ||
387 | addi r3,r3,CACHELINE_BYTES | ||
388 | #endif /* MAX_COPY_PREFETCH > 1 */ | ||
389 | |||
390 | 114: subf r8,r7,r0 | ||
391 | mr r0,r7 | ||
392 | mtctr r8 | ||
393 | |||
394 | 53: dcbt r3,r4 | ||
395 | 54: dcbz r11,r6 | ||
396 | .section __ex_table,"a" | ||
397 | .align 2 | ||
398 | .long 54b,105f | ||
399 | .text | ||
400 | /* the main body of the cacheline loop */ | ||
401 | COPY_16_BYTES_WITHEX(0) | ||
402 | #if L1_CACHE_LINE_SIZE >= 32 | ||
403 | COPY_16_BYTES_WITHEX(1) | ||
404 | #if L1_CACHE_LINE_SIZE >= 64 | ||
405 | COPY_16_BYTES_WITHEX(2) | ||
406 | COPY_16_BYTES_WITHEX(3) | ||
407 | #if L1_CACHE_LINE_SIZE >= 128 | ||
408 | COPY_16_BYTES_WITHEX(4) | ||
409 | COPY_16_BYTES_WITHEX(5) | ||
410 | COPY_16_BYTES_WITHEX(6) | ||
411 | COPY_16_BYTES_WITHEX(7) | ||
412 | #endif | ||
413 | #endif | ||
414 | #endif | ||
415 | bdnz 53b | ||
416 | cmpwi r0,0 | ||
417 | li r3,4 | ||
418 | li r7,0 | ||
419 | bne 114b | ||
420 | #endif /* CONFIG_8xx */ | ||
421 | |||
422 | 63: srwi. r0,r5,2 | ||
423 | mtctr r0 | ||
424 | beq 64f | ||
425 | 30: lwzu r0,4(r4) | ||
426 | 31: stwu r0,4(r6) | ||
427 | bdnz 30b | ||
428 | |||
429 | 64: andi. r0,r5,3 | ||
430 | mtctr r0 | ||
431 | beq+ 65f | ||
432 | 40: lbz r0,4(r4) | ||
433 | 41: stb r0,4(r6) | ||
434 | addi r4,r4,1 | ||
435 | addi r6,r6,1 | ||
436 | bdnz 40b | ||
437 | 65: li r3,0 | ||
438 | blr | ||
439 | |||
440 | /* read fault, initial single-byte copy */ | ||
441 | 100: li r9,0 | ||
442 | b 90f | ||
443 | /* write fault, initial single-byte copy */ | ||
444 | 101: li r9,1 | ||
445 | 90: subf r5,r8,r5 | ||
446 | li r3,0 | ||
447 | b 99f | ||
448 | /* read fault, initial word copy */ | ||
449 | 102: li r9,0 | ||
450 | b 91f | ||
451 | /* write fault, initial word copy */ | ||
452 | 103: li r9,1 | ||
453 | 91: li r3,2 | ||
454 | b 99f | ||
455 | |||
456 | /* | ||
457 | * this stuff handles faults in the cacheline loop and branches to either | ||
458 | * 104f (if in read part) or 105f (if in write part), after updating r5 | ||
459 | */ | ||
460 | COPY_16_BYTES_EXCODE(0) | ||
461 | #if L1_CACHE_LINE_SIZE >= 32 | ||
462 | COPY_16_BYTES_EXCODE(1) | ||
463 | #if L1_CACHE_LINE_SIZE >= 64 | ||
464 | COPY_16_BYTES_EXCODE(2) | ||
465 | COPY_16_BYTES_EXCODE(3) | ||
466 | #if L1_CACHE_LINE_SIZE >= 128 | ||
467 | COPY_16_BYTES_EXCODE(4) | ||
468 | COPY_16_BYTES_EXCODE(5) | ||
469 | COPY_16_BYTES_EXCODE(6) | ||
470 | COPY_16_BYTES_EXCODE(7) | ||
471 | #endif | ||
472 | #endif | ||
473 | #endif | ||
474 | |||
475 | /* read fault in cacheline loop */ | ||
476 | 104: li r9,0 | ||
477 | b 92f | ||
478 | /* fault on dcbz (effectively a write fault) */ | ||
479 | /* or write fault in cacheline loop */ | ||
480 | 105: li r9,1 | ||
481 | 92: li r3,LG_CACHELINE_BYTES | ||
482 | mfctr r8 | ||
483 | add r0,r0,r8 | ||
484 | b 106f | ||
485 | /* read fault in final word loop */ | ||
486 | 108: li r9,0 | ||
487 | b 93f | ||
488 | /* write fault in final word loop */ | ||
489 | 109: li r9,1 | ||
490 | 93: andi. r5,r5,3 | ||
491 | li r3,2 | ||
492 | b 99f | ||
493 | /* read fault in final byte loop */ | ||
494 | 110: li r9,0 | ||
495 | b 94f | ||
496 | /* write fault in final byte loop */ | ||
497 | 111: li r9,1 | ||
498 | 94: li r5,0 | ||
499 | li r3,0 | ||
500 | /* | ||
501 | * At this stage the number of bytes not copied is | ||
502 | * r5 + (ctr << r3), and r9 is 0 for read or 1 for write. | ||
503 | */ | ||
504 | 99: mfctr r0 | ||
505 | 106: slw r3,r0,r3 | ||
506 | add. r3,r3,r5 | ||
507 | beq 120f /* shouldn't happen */ | ||
508 | cmpwi 0,r9,0 | ||
509 | bne 120f | ||
510 | /* for a read fault, first try to continue the copy one byte at a time */ | ||
511 | mtctr r3 | ||
512 | 130: lbz r0,4(r4) | ||
513 | 131: stb r0,4(r6) | ||
514 | addi r4,r4,1 | ||
515 | addi r6,r6,1 | ||
516 | bdnz 130b | ||
517 | /* then clear out the destination: r3 bytes starting at 4(r6) */ | ||
518 | 132: mfctr r3 | ||
519 | srwi. r0,r3,2 | ||
520 | li r9,0 | ||
521 | mtctr r0 | ||
522 | beq 113f | ||
523 | 112: stwu r9,4(r6) | ||
524 | bdnz 112b | ||
525 | 113: andi. r0,r3,3 | ||
526 | mtctr r0 | ||
527 | beq 120f | ||
528 | 114: stb r9,4(r6) | ||
529 | addi r6,r6,1 | ||
530 | bdnz 114b | ||
531 | 120: blr | ||
532 | |||
533 | .section __ex_table,"a" | ||
534 | .align 2 | ||
535 | .long 30b,108b | ||
536 | .long 31b,109b | ||
537 | .long 40b,110b | ||
538 | .long 41b,111b | ||
539 | .long 130b,132b | ||
540 | .long 131b,120b | ||
541 | .long 112b,120b | ||
542 | .long 114b,120b | ||
543 | .text | ||
diff --git a/arch/powerpc/lib/copypage.S b/arch/powerpc/lib/copypage.S new file mode 100644 index 000000000000..733d61618bbf --- /dev/null +++ b/arch/powerpc/lib/copypage.S | |||
@@ -0,0 +1,121 @@ | |||
1 | /* | ||
2 | * arch/ppc64/lib/copypage.S | ||
3 | * | ||
4 | * Copyright (C) 2002 Paul Mackerras, IBM Corp. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <asm/processor.h> | ||
12 | #include <asm/ppc_asm.h> | ||
13 | |||
14 | _GLOBAL(copy_page) | ||
15 | std r31,-8(1) | ||
16 | std r30,-16(1) | ||
17 | std r29,-24(1) | ||
18 | std r28,-32(1) | ||
19 | std r27,-40(1) | ||
20 | std r26,-48(1) | ||
21 | std r25,-56(1) | ||
22 | std r24,-64(1) | ||
23 | std r23,-72(1) | ||
24 | std r22,-80(1) | ||
25 | std r21,-88(1) | ||
26 | std r20,-96(1) | ||
27 | li r5,4096/32 - 1 | ||
28 | addi r3,r3,-8 | ||
29 | li r12,5 | ||
30 | 0: addi r5,r5,-24 | ||
31 | mtctr r12 | ||
32 | ld r22,640(4) | ||
33 | ld r21,512(4) | ||
34 | ld r20,384(4) | ||
35 | ld r11,256(4) | ||
36 | ld r9,128(4) | ||
37 | ld r7,0(4) | ||
38 | ld r25,648(4) | ||
39 | ld r24,520(4) | ||
40 | ld r23,392(4) | ||
41 | ld r10,264(4) | ||
42 | ld r8,136(4) | ||
43 | ldu r6,8(4) | ||
44 | cmpwi r5,24 | ||
45 | 1: std r22,648(3) | ||
46 | std r21,520(3) | ||
47 | std r20,392(3) | ||
48 | std r11,264(3) | ||
49 | std r9,136(3) | ||
50 | std r7,8(3) | ||
51 | ld r28,648(4) | ||
52 | ld r27,520(4) | ||
53 | ld r26,392(4) | ||
54 | ld r31,264(4) | ||
55 | ld r30,136(4) | ||
56 | ld r29,8(4) | ||
57 | std r25,656(3) | ||
58 | std r24,528(3) | ||
59 | std r23,400(3) | ||
60 | std r10,272(3) | ||
61 | std r8,144(3) | ||
62 | std r6,16(3) | ||
63 | ld r22,656(4) | ||
64 | ld r21,528(4) | ||
65 | ld r20,400(4) | ||
66 | ld r11,272(4) | ||
67 | ld r9,144(4) | ||
68 | ld r7,16(4) | ||
69 | std r28,664(3) | ||
70 | std r27,536(3) | ||
71 | std r26,408(3) | ||
72 | std r31,280(3) | ||
73 | std r30,152(3) | ||
74 | stdu r29,24(3) | ||
75 | ld r25,664(4) | ||
76 | ld r24,536(4) | ||
77 | ld r23,408(4) | ||
78 | ld r10,280(4) | ||
79 | ld r8,152(4) | ||
80 | ldu r6,24(4) | ||
81 | bdnz 1b | ||
82 | std r22,648(3) | ||
83 | std r21,520(3) | ||
84 | std r20,392(3) | ||
85 | std r11,264(3) | ||
86 | std r9,136(3) | ||
87 | std r7,8(3) | ||
88 | addi r4,r4,640 | ||
89 | addi r3,r3,648 | ||
90 | bge 0b | ||
91 | mtctr r5 | ||
92 | ld r7,0(4) | ||
93 | ld r8,8(4) | ||
94 | ldu r9,16(4) | ||
95 | 3: ld r10,8(4) | ||
96 | std r7,8(3) | ||
97 | ld r7,16(4) | ||
98 | std r8,16(3) | ||
99 | ld r8,24(4) | ||
100 | std r9,24(3) | ||
101 | ldu r9,32(4) | ||
102 | stdu r10,32(3) | ||
103 | bdnz 3b | ||
104 | 4: ld r10,8(4) | ||
105 | std r7,8(3) | ||
106 | std r8,16(3) | ||
107 | std r9,24(3) | ||
108 | std r10,32(3) | ||
109 | 9: ld r20,-96(1) | ||
110 | ld r21,-88(1) | ||
111 | ld r22,-80(1) | ||
112 | ld r23,-72(1) | ||
113 | ld r24,-64(1) | ||
114 | ld r25,-56(1) | ||
115 | ld r26,-48(1) | ||
116 | ld r27,-40(1) | ||
117 | ld r28,-32(1) | ||
118 | ld r29,-24(1) | ||
119 | ld r30,-16(1) | ||
120 | ld r31,-8(1) | ||
121 | blr | ||
diff --git a/arch/powerpc/lib/copyuser.S b/arch/powerpc/lib/copyuser.S new file mode 100644 index 000000000000..a0b3fbbd6fb1 --- /dev/null +++ b/arch/powerpc/lib/copyuser.S | |||
@@ -0,0 +1,576 @@ | |||
1 | /* | ||
2 | * arch/ppc64/lib/copyuser.S | ||
3 | * | ||
4 | * Copyright (C) 2002 Paul Mackerras, IBM Corp. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <asm/processor.h> | ||
12 | #include <asm/ppc_asm.h> | ||
13 | |||
14 | .align 7 | ||
15 | _GLOBAL(__copy_tofrom_user) | ||
16 | /* first check for a whole page copy on a page boundary */ | ||
17 | cmpldi cr1,r5,16 | ||
18 | cmpdi cr6,r5,4096 | ||
19 | or r0,r3,r4 | ||
20 | neg r6,r3 /* LS 3 bits = # bytes to 8-byte dest bdry */ | ||
21 | andi. r0,r0,4095 | ||
22 | std r3,-24(r1) | ||
23 | crand cr0*4+2,cr0*4+2,cr6*4+2 | ||
24 | std r4,-16(r1) | ||
25 | std r5,-8(r1) | ||
26 | dcbt 0,r4 | ||
27 | beq .Lcopy_page | ||
28 | andi. r6,r6,7 | ||
29 | mtcrf 0x01,r5 | ||
30 | blt cr1,.Lshort_copy | ||
31 | bne .Ldst_unaligned | ||
32 | .Ldst_aligned: | ||
33 | andi. r0,r4,7 | ||
34 | addi r3,r3,-16 | ||
35 | bne .Lsrc_unaligned | ||
36 | srdi r7,r5,4 | ||
37 | 20: ld r9,0(r4) | ||
38 | addi r4,r4,-8 | ||
39 | mtctr r7 | ||
40 | andi. r5,r5,7 | ||
41 | bf cr7*4+0,22f | ||
42 | addi r3,r3,8 | ||
43 | addi r4,r4,8 | ||
44 | mr r8,r9 | ||
45 | blt cr1,72f | ||
46 | 21: ld r9,8(r4) | ||
47 | 70: std r8,8(r3) | ||
48 | 22: ldu r8,16(r4) | ||
49 | 71: stdu r9,16(r3) | ||
50 | bdnz 21b | ||
51 | 72: std r8,8(r3) | ||
52 | beq+ 3f | ||
53 | addi r3,r3,16 | ||
54 | 23: ld r9,8(r4) | ||
55 | .Ldo_tail: | ||
56 | bf cr7*4+1,1f | ||
57 | rotldi r9,r9,32 | ||
58 | 73: stw r9,0(r3) | ||
59 | addi r3,r3,4 | ||
60 | 1: bf cr7*4+2,2f | ||
61 | rotldi r9,r9,16 | ||
62 | 74: sth r9,0(r3) | ||
63 | addi r3,r3,2 | ||
64 | 2: bf cr7*4+3,3f | ||
65 | rotldi r9,r9,8 | ||
66 | 75: stb r9,0(r3) | ||
67 | 3: li r3,0 | ||
68 | blr | ||
69 | |||
70 | .Lsrc_unaligned: | ||
71 | srdi r6,r5,3 | ||
72 | addi r5,r5,-16 | ||
73 | subf r4,r0,r4 | ||
74 | srdi r7,r5,4 | ||
75 | sldi r10,r0,3 | ||
76 | cmpldi cr6,r6,3 | ||
77 | andi. r5,r5,7 | ||
78 | mtctr r7 | ||
79 | subfic r11,r10,64 | ||
80 | add r5,r5,r0 | ||
81 | bt cr7*4+0,28f | ||
82 | |||
83 | 24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */ | ||
84 | 25: ld r0,8(r4) | ||
85 | sld r6,r9,r10 | ||
86 | 26: ldu r9,16(r4) | ||
87 | srd r7,r0,r11 | ||
88 | sld r8,r0,r10 | ||
89 | or r7,r7,r6 | ||
90 | blt cr6,79f | ||
91 | 27: ld r0,8(r4) | ||
92 | b 2f | ||
93 | |||
94 | 28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */ | ||
95 | 29: ldu r9,8(r4) | ||
96 | sld r8,r0,r10 | ||
97 | addi r3,r3,-8 | ||
98 | blt cr6,5f | ||
99 | 30: ld r0,8(r4) | ||
100 | srd r12,r9,r11 | ||
101 | sld r6,r9,r10 | ||
102 | 31: ldu r9,16(r4) | ||
103 | or r12,r8,r12 | ||
104 | srd r7,r0,r11 | ||
105 | sld r8,r0,r10 | ||
106 | addi r3,r3,16 | ||
107 | beq cr6,78f | ||
108 | |||
109 | 1: or r7,r7,r6 | ||
110 | 32: ld r0,8(r4) | ||
111 | 76: std r12,8(r3) | ||
112 | 2: srd r12,r9,r11 | ||
113 | sld r6,r9,r10 | ||
114 | 33: ldu r9,16(r4) | ||
115 | or r12,r8,r12 | ||
116 | 77: stdu r7,16(r3) | ||
117 | srd r7,r0,r11 | ||
118 | sld r8,r0,r10 | ||
119 | bdnz 1b | ||
120 | |||
121 | 78: std r12,8(r3) | ||
122 | or r7,r7,r6 | ||
123 | 79: std r7,16(r3) | ||
124 | 5: srd r12,r9,r11 | ||
125 | or r12,r8,r12 | ||
126 | 80: std r12,24(r3) | ||
127 | bne 6f | ||
128 | li r3,0 | ||
129 | blr | ||
130 | 6: cmpwi cr1,r5,8 | ||
131 | addi r3,r3,32 | ||
132 | sld r9,r9,r10 | ||
133 | ble cr1,.Ldo_tail | ||
134 | 34: ld r0,8(r4) | ||
135 | srd r7,r0,r11 | ||
136 | or r9,r7,r9 | ||
137 | b .Ldo_tail | ||
138 | |||
139 | .Ldst_unaligned: | ||
140 | mtcrf 0x01,r6 /* put #bytes to 8B bdry into cr7 */ | ||
141 | subf r5,r6,r5 | ||
142 | li r7,0 | ||
143 | cmpldi r1,r5,16 | ||
144 | bf cr7*4+3,1f | ||
145 | 35: lbz r0,0(r4) | ||
146 | 81: stb r0,0(r3) | ||
147 | addi r7,r7,1 | ||
148 | 1: bf cr7*4+2,2f | ||
149 | 36: lhzx r0,r7,r4 | ||
150 | 82: sthx r0,r7,r3 | ||
151 | addi r7,r7,2 | ||
152 | 2: bf cr7*4+1,3f | ||
153 | 37: lwzx r0,r7,r4 | ||
154 | 83: stwx r0,r7,r3 | ||
155 | 3: mtcrf 0x01,r5 | ||
156 | add r4,r6,r4 | ||
157 | add r3,r6,r3 | ||
158 | b .Ldst_aligned | ||
159 | |||
160 | .Lshort_copy: | ||
161 | bf cr7*4+0,1f | ||
162 | 38: lwz r0,0(r4) | ||
163 | 39: lwz r9,4(r4) | ||
164 | addi r4,r4,8 | ||
165 | 84: stw r0,0(r3) | ||
166 | 85: stw r9,4(r3) | ||
167 | addi r3,r3,8 | ||
168 | 1: bf cr7*4+1,2f | ||
169 | 40: lwz r0,0(r4) | ||
170 | addi r4,r4,4 | ||
171 | 86: stw r0,0(r3) | ||
172 | addi r3,r3,4 | ||
173 | 2: bf cr7*4+2,3f | ||
174 | 41: lhz r0,0(r4) | ||
175 | addi r4,r4,2 | ||
176 | 87: sth r0,0(r3) | ||
177 | addi r3,r3,2 | ||
178 | 3: bf cr7*4+3,4f | ||
179 | 42: lbz r0,0(r4) | ||
180 | 88: stb r0,0(r3) | ||
181 | 4: li r3,0 | ||
182 | blr | ||
183 | |||
184 | /* | ||
185 | * exception handlers follow | ||
186 | * we have to return the number of bytes not copied | ||
187 | * for an exception on a load, we set the rest of the destination to 0 | ||
188 | */ | ||
189 | |||
190 | 136: | ||
191 | 137: | ||
192 | add r3,r3,r7 | ||
193 | b 1f | ||
194 | 130: | ||
195 | 131: | ||
196 | addi r3,r3,8 | ||
197 | 120: | ||
198 | 122: | ||
199 | 124: | ||
200 | 125: | ||
201 | 126: | ||
202 | 127: | ||
203 | 128: | ||
204 | 129: | ||
205 | 133: | ||
206 | addi r3,r3,8 | ||
207 | 121: | ||
208 | 132: | ||
209 | addi r3,r3,8 | ||
210 | 123: | ||
211 | 134: | ||
212 | 135: | ||
213 | 138: | ||
214 | 139: | ||
215 | 140: | ||
216 | 141: | ||
217 | 142: | ||
218 | |||
219 | /* | ||
220 | * here we have had a fault on a load and r3 points to the first | ||
221 | * unmodified byte of the destination | ||
222 | */ | ||
223 | 1: ld r6,-24(r1) | ||
224 | ld r4,-16(r1) | ||
225 | ld r5,-8(r1) | ||
226 | subf r6,r6,r3 | ||
227 | add r4,r4,r6 | ||
228 | subf r5,r6,r5 /* #bytes left to go */ | ||
229 | |||
230 | /* | ||
231 | * first see if we can copy any more bytes before hitting another exception | ||
232 | */ | ||
233 | mtctr r5 | ||
234 | 43: lbz r0,0(r4) | ||
235 | addi r4,r4,1 | ||
236 | 89: stb r0,0(r3) | ||
237 | addi r3,r3,1 | ||
238 | bdnz 43b | ||
239 | li r3,0 /* huh? all copied successfully this time? */ | ||
240 | blr | ||
241 | |||
242 | /* | ||
243 | * here we have trapped again, need to clear ctr bytes starting at r3 | ||
244 | */ | ||
245 | 143: mfctr r5 | ||
246 | li r0,0 | ||
247 | mr r4,r3 | ||
248 | mr r3,r5 /* return the number of bytes not copied */ | ||
249 | 1: andi. r9,r4,7 | ||
250 | beq 3f | ||
251 | 90: stb r0,0(r4) | ||
252 | addic. r5,r5,-1 | ||
253 | addi r4,r4,1 | ||
254 | bne 1b | ||
255 | blr | ||
256 | 3: cmpldi cr1,r5,8 | ||
257 | srdi r9,r5,3 | ||
258 | andi. r5,r5,7 | ||
259 | blt cr1,93f | ||
260 | mtctr r9 | ||
261 | 91: std r0,0(r4) | ||
262 | addi r4,r4,8 | ||
263 | bdnz 91b | ||
264 | 93: beqlr | ||
265 | mtctr r5 | ||
266 | 92: stb r0,0(r4) | ||
267 | addi r4,r4,1 | ||
268 | bdnz 92b | ||
269 | blr | ||
270 | |||
271 | /* | ||
272 | * exception handlers for stores: we just need to work | ||
273 | * out how many bytes weren't copied | ||
274 | */ | ||
275 | 182: | ||
276 | 183: | ||
277 | add r3,r3,r7 | ||
278 | b 1f | ||
279 | 180: | ||
280 | addi r3,r3,8 | ||
281 | 171: | ||
282 | 177: | ||
283 | addi r3,r3,8 | ||
284 | 170: | ||
285 | 172: | ||
286 | 176: | ||
287 | 178: | ||
288 | addi r3,r3,4 | ||
289 | 185: | ||
290 | addi r3,r3,4 | ||
291 | 173: | ||
292 | 174: | ||
293 | 175: | ||
294 | 179: | ||
295 | 181: | ||
296 | 184: | ||
297 | 186: | ||
298 | 187: | ||
299 | 188: | ||
300 | 189: | ||
301 | 1: | ||
302 | ld r6,-24(r1) | ||
303 | ld r5,-8(r1) | ||
304 | add r6,r6,r5 | ||
305 | subf r3,r3,r6 /* #bytes not copied */ | ||
306 | 190: | ||
307 | 191: | ||
308 | 192: | ||
309 | blr /* #bytes not copied in r3 */ | ||
310 | |||
311 | .section __ex_table,"a" | ||
312 | .align 3 | ||
313 | .llong 20b,120b | ||
314 | .llong 21b,121b | ||
315 | .llong 70b,170b | ||
316 | .llong 22b,122b | ||
317 | .llong 71b,171b | ||
318 | .llong 72b,172b | ||
319 | .llong 23b,123b | ||
320 | .llong 73b,173b | ||
321 | .llong 74b,174b | ||
322 | .llong 75b,175b | ||
323 | .llong 24b,124b | ||
324 | .llong 25b,125b | ||
325 | .llong 26b,126b | ||
326 | .llong 27b,127b | ||
327 | .llong 28b,128b | ||
328 | .llong 29b,129b | ||
329 | .llong 30b,130b | ||
330 | .llong 31b,131b | ||
331 | .llong 32b,132b | ||
332 | .llong 76b,176b | ||
333 | .llong 33b,133b | ||
334 | .llong 77b,177b | ||
335 | .llong 78b,178b | ||
336 | .llong 79b,179b | ||
337 | .llong 80b,180b | ||
338 | .llong 34b,134b | ||
339 | .llong 35b,135b | ||
340 | .llong 81b,181b | ||
341 | .llong 36b,136b | ||
342 | .llong 82b,182b | ||
343 | .llong 37b,137b | ||
344 | .llong 83b,183b | ||
345 | .llong 38b,138b | ||
346 | .llong 39b,139b | ||
347 | .llong 84b,184b | ||
348 | .llong 85b,185b | ||
349 | .llong 40b,140b | ||
350 | .llong 86b,186b | ||
351 | .llong 41b,141b | ||
352 | .llong 87b,187b | ||
353 | .llong 42b,142b | ||
354 | .llong 88b,188b | ||
355 | .llong 43b,143b | ||
356 | .llong 89b,189b | ||
357 | .llong 90b,190b | ||
358 | .llong 91b,191b | ||
359 | .llong 92b,192b | ||
360 | |||
361 | .text | ||
362 | |||
363 | /* | ||
364 | * Routine to copy a whole page of data, optimized for POWER4. | ||
365 | * On POWER4 it is more than 50% faster than the simple loop | ||
366 | * above (following the .Ldst_aligned label) but it runs slightly | ||
367 | * slower on POWER3. | ||
368 | */ | ||
369 | .Lcopy_page: | ||
370 | std r31,-32(1) | ||
371 | std r30,-40(1) | ||
372 | std r29,-48(1) | ||
373 | std r28,-56(1) | ||
374 | std r27,-64(1) | ||
375 | std r26,-72(1) | ||
376 | std r25,-80(1) | ||
377 | std r24,-88(1) | ||
378 | std r23,-96(1) | ||
379 | std r22,-104(1) | ||
380 | std r21,-112(1) | ||
381 | std r20,-120(1) | ||
382 | li r5,4096/32 - 1 | ||
383 | addi r3,r3,-8 | ||
384 | li r0,5 | ||
385 | 0: addi r5,r5,-24 | ||
386 | mtctr r0 | ||
387 | 20: ld r22,640(4) | ||
388 | 21: ld r21,512(4) | ||
389 | 22: ld r20,384(4) | ||
390 | 23: ld r11,256(4) | ||
391 | 24: ld r9,128(4) | ||
392 | 25: ld r7,0(4) | ||
393 | 26: ld r25,648(4) | ||
394 | 27: ld r24,520(4) | ||
395 | 28: ld r23,392(4) | ||
396 | 29: ld r10,264(4) | ||
397 | 30: ld r8,136(4) | ||
398 | 31: ldu r6,8(4) | ||
399 | cmpwi r5,24 | ||
400 | 1: | ||
401 | 32: std r22,648(3) | ||
402 | 33: std r21,520(3) | ||
403 | 34: std r20,392(3) | ||
404 | 35: std r11,264(3) | ||
405 | 36: std r9,136(3) | ||
406 | 37: std r7,8(3) | ||
407 | 38: ld r28,648(4) | ||
408 | 39: ld r27,520(4) | ||
409 | 40: ld r26,392(4) | ||
410 | 41: ld r31,264(4) | ||
411 | 42: ld r30,136(4) | ||
412 | 43: ld r29,8(4) | ||
413 | 44: std r25,656(3) | ||
414 | 45: std r24,528(3) | ||
415 | 46: std r23,400(3) | ||
416 | 47: std r10,272(3) | ||
417 | 48: std r8,144(3) | ||
418 | 49: std r6,16(3) | ||
419 | 50: ld r22,656(4) | ||
420 | 51: ld r21,528(4) | ||
421 | 52: ld r20,400(4) | ||
422 | 53: ld r11,272(4) | ||
423 | 54: ld r9,144(4) | ||
424 | 55: ld r7,16(4) | ||
425 | 56: std r28,664(3) | ||
426 | 57: std r27,536(3) | ||
427 | 58: std r26,408(3) | ||
428 | 59: std r31,280(3) | ||
429 | 60: std r30,152(3) | ||
430 | 61: stdu r29,24(3) | ||
431 | 62: ld r25,664(4) | ||
432 | 63: ld r24,536(4) | ||
433 | 64: ld r23,408(4) | ||
434 | 65: ld r10,280(4) | ||
435 | 66: ld r8,152(4) | ||
436 | 67: ldu r6,24(4) | ||
437 | bdnz 1b | ||
438 | 68: std r22,648(3) | ||
439 | 69: std r21,520(3) | ||
440 | 70: std r20,392(3) | ||
441 | 71: std r11,264(3) | ||
442 | 72: std r9,136(3) | ||
443 | 73: std r7,8(3) | ||
444 | 74: addi r4,r4,640 | ||
445 | 75: addi r3,r3,648 | ||
446 | bge 0b | ||
447 | mtctr r5 | ||
448 | 76: ld r7,0(4) | ||
449 | 77: ld r8,8(4) | ||
450 | 78: ldu r9,16(4) | ||
451 | 3: | ||
452 | 79: ld r10,8(4) | ||
453 | 80: std r7,8(3) | ||
454 | 81: ld r7,16(4) | ||
455 | 82: std r8,16(3) | ||
456 | 83: ld r8,24(4) | ||
457 | 84: std r9,24(3) | ||
458 | 85: ldu r9,32(4) | ||
459 | 86: stdu r10,32(3) | ||
460 | bdnz 3b | ||
461 | 4: | ||
462 | 87: ld r10,8(4) | ||
463 | 88: std r7,8(3) | ||
464 | 89: std r8,16(3) | ||
465 | 90: std r9,24(3) | ||
466 | 91: std r10,32(3) | ||
467 | 9: ld r20,-120(1) | ||
468 | ld r21,-112(1) | ||
469 | ld r22,-104(1) | ||
470 | ld r23,-96(1) | ||
471 | ld r24,-88(1) | ||
472 | ld r25,-80(1) | ||
473 | ld r26,-72(1) | ||
474 | ld r27,-64(1) | ||
475 | ld r28,-56(1) | ||
476 | ld r29,-48(1) | ||
477 | ld r30,-40(1) | ||
478 | ld r31,-32(1) | ||
479 | li r3,0 | ||
480 | blr | ||
481 | |||
482 | /* | ||
483 | * on an exception, reset to the beginning and jump back into the | ||
484 | * standard __copy_tofrom_user | ||
485 | */ | ||
486 | 100: ld r20,-120(1) | ||
487 | ld r21,-112(1) | ||
488 | ld r22,-104(1) | ||
489 | ld r23,-96(1) | ||
490 | ld r24,-88(1) | ||
491 | ld r25,-80(1) | ||
492 | ld r26,-72(1) | ||
493 | ld r27,-64(1) | ||
494 | ld r28,-56(1) | ||
495 | ld r29,-48(1) | ||
496 | ld r30,-40(1) | ||
497 | ld r31,-32(1) | ||
498 | ld r3,-24(r1) | ||
499 | ld r4,-16(r1) | ||
500 | li r5,4096 | ||
501 | b .Ldst_aligned | ||
502 | |||
503 | .section __ex_table,"a" | ||
504 | .align 3 | ||
505 | .llong 20b,100b | ||
506 | .llong 21b,100b | ||
507 | .llong 22b,100b | ||
508 | .llong 23b,100b | ||
509 | .llong 24b,100b | ||
510 | .llong 25b,100b | ||
511 | .llong 26b,100b | ||
512 | .llong 27b,100b | ||
513 | .llong 28b,100b | ||
514 | .llong 29b,100b | ||
515 | .llong 30b,100b | ||
516 | .llong 31b,100b | ||
517 | .llong 32b,100b | ||
518 | .llong 33b,100b | ||
519 | .llong 34b,100b | ||
520 | .llong 35b,100b | ||
521 | .llong 36b,100b | ||
522 | .llong 37b,100b | ||
523 | .llong 38b,100b | ||
524 | .llong 39b,100b | ||
525 | .llong 40b,100b | ||
526 | .llong 41b,100b | ||
527 | .llong 42b,100b | ||
528 | .llong 43b,100b | ||
529 | .llong 44b,100b | ||
530 | .llong 45b,100b | ||
531 | .llong 46b,100b | ||
532 | .llong 47b,100b | ||
533 | .llong 48b,100b | ||
534 | .llong 49b,100b | ||
535 | .llong 50b,100b | ||
536 | .llong 51b,100b | ||
537 | .llong 52b,100b | ||
538 | .llong 53b,100b | ||
539 | .llong 54b,100b | ||
540 | .llong 55b,100b | ||
541 | .llong 56b,100b | ||
542 | .llong 57b,100b | ||
543 | .llong 58b,100b | ||
544 | .llong 59b,100b | ||
545 | .llong 60b,100b | ||
546 | .llong 61b,100b | ||
547 | .llong 62b,100b | ||
548 | .llong 63b,100b | ||
549 | .llong 64b,100b | ||
550 | .llong 65b,100b | ||
551 | .llong 66b,100b | ||
552 | .llong 67b,100b | ||
553 | .llong 68b,100b | ||
554 | .llong 69b,100b | ||
555 | .llong 70b,100b | ||
556 | .llong 71b,100b | ||
557 | .llong 72b,100b | ||
558 | .llong 73b,100b | ||
559 | .llong 74b,100b | ||
560 | .llong 75b,100b | ||
561 | .llong 76b,100b | ||
562 | .llong 77b,100b | ||
563 | .llong 78b,100b | ||
564 | .llong 79b,100b | ||
565 | .llong 80b,100b | ||
566 | .llong 81b,100b | ||
567 | .llong 82b,100b | ||
568 | .llong 83b,100b | ||
569 | .llong 84b,100b | ||
570 | .llong 85b,100b | ||
571 | .llong 86b,100b | ||
572 | .llong 87b,100b | ||
573 | .llong 88b,100b | ||
574 | .llong 89b,100b | ||
575 | .llong 90b,100b | ||
576 | .llong 91b,100b | ||
diff --git a/arch/powerpc/lib/div64.S b/arch/powerpc/lib/div64.S new file mode 100644 index 000000000000..3527569e9926 --- /dev/null +++ b/arch/powerpc/lib/div64.S | |||
@@ -0,0 +1,58 @@ | |||
1 | /* | ||
2 | * Divide a 64-bit unsigned number by a 32-bit unsigned number. | ||
3 | * This routine assumes that the top 32 bits of the dividend are | ||
4 | * non-zero to start with. | ||
5 | * On entry, r3 points to the dividend, which get overwritten with | ||
6 | * the 64-bit quotient, and r4 contains the divisor. | ||
7 | * On exit, r3 contains the remainder. | ||
8 | * | ||
9 | * Copyright (C) 2002 Paul Mackerras, IBM Corp. | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * as published by the Free Software Foundation; either version | ||
14 | * 2 of the License, or (at your option) any later version. | ||
15 | */ | ||
16 | #include <asm/ppc_asm.h> | ||
17 | #include <asm/processor.h> | ||
18 | |||
19 | _GLOBAL(__div64_32) | ||
20 | lwz r5,0(r3) # get the dividend into r5/r6 | ||
21 | lwz r6,4(r3) | ||
22 | cmplw r5,r4 | ||
23 | li r7,0 | ||
24 | li r8,0 | ||
25 | blt 1f | ||
26 | divwu r7,r5,r4 # if dividend.hi >= divisor, | ||
27 | mullw r0,r7,r4 # quotient.hi = dividend.hi / divisor | ||
28 | subf. r5,r0,r5 # dividend.hi %= divisor | ||
29 | beq 3f | ||
30 | 1: mr r11,r5 # here dividend.hi != 0 | ||
31 | andis. r0,r5,0xc000 | ||
32 | bne 2f | ||
33 | cntlzw r0,r5 # we are shifting the dividend right | ||
34 | li r10,-1 # to make it < 2^32, and shifting | ||
35 | srw r10,r10,r0 # the divisor right the same amount, | ||
36 | add r9,r4,r10 # rounding up (so the estimate cannot | ||
37 | andc r11,r6,r10 # ever be too large, only too small) | ||
38 | andc r9,r9,r10 | ||
39 | or r11,r5,r11 | ||
40 | rotlw r9,r9,r0 | ||
41 | rotlw r11,r11,r0 | ||
42 | divwu r11,r11,r9 # then we divide the shifted quantities | ||
43 | 2: mullw r10,r11,r4 # to get an estimate of the quotient, | ||
44 | mulhwu r9,r11,r4 # multiply the estimate by the divisor, | ||
45 | subfc r6,r10,r6 # take the product from the divisor, | ||
46 | add r8,r8,r11 # and add the estimate to the accumulated | ||
47 | subfe. r5,r9,r5 # quotient | ||
48 | bne 1b | ||
49 | 3: cmplw r6,r4 | ||
50 | blt 4f | ||
51 | divwu r0,r6,r4 # perform the remaining 32-bit division | ||
52 | mullw r10,r0,r4 # and get the remainder | ||
53 | add r8,r8,r0 | ||
54 | subf r6,r10,r6 | ||
55 | 4: stw r7,0(r3) # return the quotient in *r3 | ||
56 | stw r8,4(r3) | ||
57 | mr r3,r6 # return the remainder in r3 | ||
58 | blr | ||
diff --git a/arch/powerpc/lib/e2a.c b/arch/powerpc/lib/e2a.c new file mode 100644 index 000000000000..d2b834887920 --- /dev/null +++ b/arch/powerpc/lib/e2a.c | |||
@@ -0,0 +1,108 @@ | |||
1 | /* | ||
2 | * arch/ppc64/lib/e2a.c | ||
3 | * | ||
4 | * EBCDIC to ASCII conversion | ||
5 | * | ||
6 | * This function moved here from arch/ppc64/kernel/viopath.c | ||
7 | * | ||
8 | * (C) Copyright 2000-2004 IBM Corporation | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License as | ||
12 | * published by the Free Software Foundation; either version 2 of the | ||
13 | * License, or (at your option) anyu later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, but | ||
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software Foundation, | ||
22 | * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #include <linux/module.h> | ||
27 | |||
28 | unsigned char e2a(unsigned char x) | ||
29 | { | ||
30 | switch (x) { | ||
31 | case 0xF0: | ||
32 | return '0'; | ||
33 | case 0xF1: | ||
34 | return '1'; | ||
35 | case 0xF2: | ||
36 | return '2'; | ||
37 | case 0xF3: | ||
38 | return '3'; | ||
39 | case 0xF4: | ||
40 | return '4'; | ||
41 | case 0xF5: | ||
42 | return '5'; | ||
43 | case 0xF6: | ||
44 | return '6'; | ||
45 | case 0xF7: | ||
46 | return '7'; | ||
47 | case 0xF8: | ||
48 | return '8'; | ||
49 | case 0xF9: | ||
50 | return '9'; | ||
51 | case 0xC1: | ||
52 | return 'A'; | ||
53 | case 0xC2: | ||
54 | return 'B'; | ||
55 | case 0xC3: | ||
56 | return 'C'; | ||
57 | case 0xC4: | ||
58 | return 'D'; | ||
59 | case 0xC5: | ||
60 | return 'E'; | ||
61 | case 0xC6: | ||
62 | return 'F'; | ||
63 | case 0xC7: | ||
64 | return 'G'; | ||
65 | case 0xC8: | ||
66 | return 'H'; | ||
67 | case 0xC9: | ||
68 | return 'I'; | ||
69 | case 0xD1: | ||
70 | return 'J'; | ||
71 | case 0xD2: | ||
72 | return 'K'; | ||
73 | case 0xD3: | ||
74 | return 'L'; | ||
75 | case 0xD4: | ||
76 | return 'M'; | ||
77 | case 0xD5: | ||
78 | return 'N'; | ||
79 | case 0xD6: | ||
80 | return 'O'; | ||
81 | case 0xD7: | ||
82 | return 'P'; | ||
83 | case 0xD8: | ||
84 | return 'Q'; | ||
85 | case 0xD9: | ||
86 | return 'R'; | ||
87 | case 0xE2: | ||
88 | return 'S'; | ||
89 | case 0xE3: | ||
90 | return 'T'; | ||
91 | case 0xE4: | ||
92 | return 'U'; | ||
93 | case 0xE5: | ||
94 | return 'V'; | ||
95 | case 0xE6: | ||
96 | return 'W'; | ||
97 | case 0xE7: | ||
98 | return 'X'; | ||
99 | case 0xE8: | ||
100 | return 'Y'; | ||
101 | case 0xE9: | ||
102 | return 'Z'; | ||
103 | } | ||
104 | return ' '; | ||
105 | } | ||
106 | EXPORT_SYMBOL(e2a); | ||
107 | |||
108 | |||
diff --git a/arch/powerpc/lib/memcpy.S b/arch/powerpc/lib/memcpy.S new file mode 100644 index 000000000000..9ccacdf5bcb9 --- /dev/null +++ b/arch/powerpc/lib/memcpy.S | |||
@@ -0,0 +1,172 @@ | |||
1 | /* | ||
2 | * arch/ppc64/lib/memcpy.S | ||
3 | * | ||
4 | * Copyright (C) 2002 Paul Mackerras, IBM Corp. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <asm/processor.h> | ||
12 | #include <asm/ppc_asm.h> | ||
13 | |||
14 | .align 7 | ||
15 | _GLOBAL(memcpy) | ||
16 | mtcrf 0x01,r5 | ||
17 | cmpldi cr1,r5,16 | ||
18 | neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry | ||
19 | andi. r6,r6,7 | ||
20 | dcbt 0,r4 | ||
21 | blt cr1,.Lshort_copy | ||
22 | bne .Ldst_unaligned | ||
23 | .Ldst_aligned: | ||
24 | andi. r0,r4,7 | ||
25 | addi r3,r3,-16 | ||
26 | bne .Lsrc_unaligned | ||
27 | srdi r7,r5,4 | ||
28 | ld r9,0(r4) | ||
29 | addi r4,r4,-8 | ||
30 | mtctr r7 | ||
31 | andi. r5,r5,7 | ||
32 | bf cr7*4+0,2f | ||
33 | addi r3,r3,8 | ||
34 | addi r4,r4,8 | ||
35 | mr r8,r9 | ||
36 | blt cr1,3f | ||
37 | 1: ld r9,8(r4) | ||
38 | std r8,8(r3) | ||
39 | 2: ldu r8,16(r4) | ||
40 | stdu r9,16(r3) | ||
41 | bdnz 1b | ||
42 | 3: std r8,8(r3) | ||
43 | beqlr | ||
44 | addi r3,r3,16 | ||
45 | ld r9,8(r4) | ||
46 | .Ldo_tail: | ||
47 | bf cr7*4+1,1f | ||
48 | rotldi r9,r9,32 | ||
49 | stw r9,0(r3) | ||
50 | addi r3,r3,4 | ||
51 | 1: bf cr7*4+2,2f | ||
52 | rotldi r9,r9,16 | ||
53 | sth r9,0(r3) | ||
54 | addi r3,r3,2 | ||
55 | 2: bf cr7*4+3,3f | ||
56 | rotldi r9,r9,8 | ||
57 | stb r9,0(r3) | ||
58 | 3: blr | ||
59 | |||
60 | .Lsrc_unaligned: | ||
61 | srdi r6,r5,3 | ||
62 | addi r5,r5,-16 | ||
63 | subf r4,r0,r4 | ||
64 | srdi r7,r5,4 | ||
65 | sldi r10,r0,3 | ||
66 | cmpdi cr6,r6,3 | ||
67 | andi. r5,r5,7 | ||
68 | mtctr r7 | ||
69 | subfic r11,r10,64 | ||
70 | add r5,r5,r0 | ||
71 | |||
72 | bt cr7*4+0,0f | ||
73 | |||
74 | ld r9,0(r4) # 3+2n loads, 2+2n stores | ||
75 | ld r0,8(r4) | ||
76 | sld r6,r9,r10 | ||
77 | ldu r9,16(r4) | ||
78 | srd r7,r0,r11 | ||
79 | sld r8,r0,r10 | ||
80 | or r7,r7,r6 | ||
81 | blt cr6,4f | ||
82 | ld r0,8(r4) | ||
83 | # s1<< in r8, d0=(s0<<|s1>>) in r7, s3 in r0, s2 in r9, nix in r6 & r12 | ||
84 | b 2f | ||
85 | |||
86 | 0: ld r0,0(r4) # 4+2n loads, 3+2n stores | ||
87 | ldu r9,8(r4) | ||
88 | sld r8,r0,r10 | ||
89 | addi r3,r3,-8 | ||
90 | blt cr6,5f | ||
91 | ld r0,8(r4) | ||
92 | srd r12,r9,r11 | ||
93 | sld r6,r9,r10 | ||
94 | ldu r9,16(r4) | ||
95 | or r12,r8,r12 | ||
96 | srd r7,r0,r11 | ||
97 | sld r8,r0,r10 | ||
98 | addi r3,r3,16 | ||
99 | beq cr6,3f | ||
100 | |||
101 | # d0=(s0<<|s1>>) in r12, s1<< in r6, s2>> in r7, s2<< in r8, s3 in r9 | ||
102 | 1: or r7,r7,r6 | ||
103 | ld r0,8(r4) | ||
104 | std r12,8(r3) | ||
105 | 2: srd r12,r9,r11 | ||
106 | sld r6,r9,r10 | ||
107 | ldu r9,16(r4) | ||
108 | or r12,r8,r12 | ||
109 | stdu r7,16(r3) | ||
110 | srd r7,r0,r11 | ||
111 | sld r8,r0,r10 | ||
112 | bdnz 1b | ||
113 | |||
114 | 3: std r12,8(r3) | ||
115 | or r7,r7,r6 | ||
116 | 4: std r7,16(r3) | ||
117 | 5: srd r12,r9,r11 | ||
118 | or r12,r8,r12 | ||
119 | std r12,24(r3) | ||
120 | beqlr | ||
121 | cmpwi cr1,r5,8 | ||
122 | addi r3,r3,32 | ||
123 | sld r9,r9,r10 | ||
124 | ble cr1,.Ldo_tail | ||
125 | ld r0,8(r4) | ||
126 | srd r7,r0,r11 | ||
127 | or r9,r7,r9 | ||
128 | b .Ldo_tail | ||
129 | |||
130 | .Ldst_unaligned: | ||
131 | mtcrf 0x01,r6 # put #bytes to 8B bdry into cr7 | ||
132 | subf r5,r6,r5 | ||
133 | li r7,0 | ||
134 | cmpldi r1,r5,16 | ||
135 | bf cr7*4+3,1f | ||
136 | lbz r0,0(r4) | ||
137 | stb r0,0(r3) | ||
138 | addi r7,r7,1 | ||
139 | 1: bf cr7*4+2,2f | ||
140 | lhzx r0,r7,r4 | ||
141 | sthx r0,r7,r3 | ||
142 | addi r7,r7,2 | ||
143 | 2: bf cr7*4+1,3f | ||
144 | lwzx r0,r7,r4 | ||
145 | stwx r0,r7,r3 | ||
146 | 3: mtcrf 0x01,r5 | ||
147 | add r4,r6,r4 | ||
148 | add r3,r6,r3 | ||
149 | b .Ldst_aligned | ||
150 | |||
151 | .Lshort_copy: | ||
152 | bf cr7*4+0,1f | ||
153 | lwz r0,0(r4) | ||
154 | lwz r9,4(r4) | ||
155 | addi r4,r4,8 | ||
156 | stw r0,0(r3) | ||
157 | stw r9,4(r3) | ||
158 | addi r3,r3,8 | ||
159 | 1: bf cr7*4+1,2f | ||
160 | lwz r0,0(r4) | ||
161 | addi r4,r4,4 | ||
162 | stw r0,0(r3) | ||
163 | addi r3,r3,4 | ||
164 | 2: bf cr7*4+2,3f | ||
165 | lhz r0,0(r4) | ||
166 | addi r4,r4,2 | ||
167 | sth r0,0(r3) | ||
168 | addi r3,r3,2 | ||
169 | 3: bf cr7*4+3,4f | ||
170 | lbz r0,0(r4) | ||
171 | stb r0,0(r3) | ||
172 | 4: blr | ||
diff --git a/arch/powerpc/lib/rheap.c b/arch/powerpc/lib/rheap.c new file mode 100644 index 000000000000..42c5de2c898f --- /dev/null +++ b/arch/powerpc/lib/rheap.c | |||
@@ -0,0 +1,693 @@ | |||
1 | /* | ||
2 | * arch/ppc/syslib/rheap.c | ||
3 | * | ||
4 | * A Remote Heap. Remote means that we don't touch the memory that the | ||
5 | * heap points to. Normal heap implementations use the memory they manage | ||
6 | * to place their list. We cannot do that because the memory we manage may | ||
7 | * have special properties, for example it is uncachable or of different | ||
8 | * endianess. | ||
9 | * | ||
10 | * Author: Pantelis Antoniou <panto@intracom.gr> | ||
11 | * | ||
12 | * 2004 (c) INTRACOM S.A. Greece. This file is licensed under | ||
13 | * the terms of the GNU General Public License version 2. This program | ||
14 | * is licensed "as is" without any warranty of any kind, whether express | ||
15 | * or implied. | ||
16 | */ | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/slab.h> | ||
21 | |||
22 | #include <asm/rheap.h> | ||
23 | |||
24 | /* | ||
25 | * Fixup a list_head, needed when copying lists. If the pointers fall | ||
26 | * between s and e, apply the delta. This assumes that | ||
27 | * sizeof(struct list_head *) == sizeof(unsigned long *). | ||
28 | */ | ||
29 | static inline void fixup(unsigned long s, unsigned long e, int d, | ||
30 | struct list_head *l) | ||
31 | { | ||
32 | unsigned long *pp; | ||
33 | |||
34 | pp = (unsigned long *)&l->next; | ||
35 | if (*pp >= s && *pp < e) | ||
36 | *pp += d; | ||
37 | |||
38 | pp = (unsigned long *)&l->prev; | ||
39 | if (*pp >= s && *pp < e) | ||
40 | *pp += d; | ||
41 | } | ||
42 | |||
43 | /* Grow the allocated blocks */ | ||
44 | static int grow(rh_info_t * info, int max_blocks) | ||
45 | { | ||
46 | rh_block_t *block, *blk; | ||
47 | int i, new_blocks; | ||
48 | int delta; | ||
49 | unsigned long blks, blke; | ||
50 | |||
51 | if (max_blocks <= info->max_blocks) | ||
52 | return -EINVAL; | ||
53 | |||
54 | new_blocks = max_blocks - info->max_blocks; | ||
55 | |||
56 | block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_KERNEL); | ||
57 | if (block == NULL) | ||
58 | return -ENOMEM; | ||
59 | |||
60 | if (info->max_blocks > 0) { | ||
61 | |||
62 | /* copy old block area */ | ||
63 | memcpy(block, info->block, | ||
64 | sizeof(rh_block_t) * info->max_blocks); | ||
65 | |||
66 | delta = (char *)block - (char *)info->block; | ||
67 | |||
68 | /* and fixup list pointers */ | ||
69 | blks = (unsigned long)info->block; | ||
70 | blke = (unsigned long)(info->block + info->max_blocks); | ||
71 | |||
72 | for (i = 0, blk = block; i < info->max_blocks; i++, blk++) | ||
73 | fixup(blks, blke, delta, &blk->list); | ||
74 | |||
75 | fixup(blks, blke, delta, &info->empty_list); | ||
76 | fixup(blks, blke, delta, &info->free_list); | ||
77 | fixup(blks, blke, delta, &info->taken_list); | ||
78 | |||
79 | /* free the old allocated memory */ | ||
80 | if ((info->flags & RHIF_STATIC_BLOCK) == 0) | ||
81 | kfree(info->block); | ||
82 | } | ||
83 | |||
84 | info->block = block; | ||
85 | info->empty_slots += new_blocks; | ||
86 | info->max_blocks = max_blocks; | ||
87 | info->flags &= ~RHIF_STATIC_BLOCK; | ||
88 | |||
89 | /* add all new blocks to the free list */ | ||
90 | for (i = 0, blk = block + info->max_blocks; i < new_blocks; i++, blk++) | ||
91 | list_add(&blk->list, &info->empty_list); | ||
92 | |||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * Assure at least the required amount of empty slots. If this function | ||
98 | * causes a grow in the block area then all pointers kept to the block | ||
99 | * area are invalid! | ||
100 | */ | ||
101 | static int assure_empty(rh_info_t * info, int slots) | ||
102 | { | ||
103 | int max_blocks; | ||
104 | |||
105 | /* This function is not meant to be used to grow uncontrollably */ | ||
106 | if (slots >= 4) | ||
107 | return -EINVAL; | ||
108 | |||
109 | /* Enough space */ | ||
110 | if (info->empty_slots >= slots) | ||
111 | return 0; | ||
112 | |||
113 | /* Next 16 sized block */ | ||
114 | max_blocks = ((info->max_blocks + slots) + 15) & ~15; | ||
115 | |||
116 | return grow(info, max_blocks); | ||
117 | } | ||
118 | |||
119 | static rh_block_t *get_slot(rh_info_t * info) | ||
120 | { | ||
121 | rh_block_t *blk; | ||
122 | |||
123 | /* If no more free slots, and failure to extend. */ | ||
124 | /* XXX: You should have called assure_empty before */ | ||
125 | if (info->empty_slots == 0) { | ||
126 | printk(KERN_ERR "rh: out of slots; crash is imminent.\n"); | ||
127 | return NULL; | ||
128 | } | ||
129 | |||
130 | /* Get empty slot to use */ | ||
131 | blk = list_entry(info->empty_list.next, rh_block_t, list); | ||
132 | list_del_init(&blk->list); | ||
133 | info->empty_slots--; | ||
134 | |||
135 | /* Initialize */ | ||
136 | blk->start = NULL; | ||
137 | blk->size = 0; | ||
138 | blk->owner = NULL; | ||
139 | |||
140 | return blk; | ||
141 | } | ||
142 | |||
143 | static inline void release_slot(rh_info_t * info, rh_block_t * blk) | ||
144 | { | ||
145 | list_add(&blk->list, &info->empty_list); | ||
146 | info->empty_slots++; | ||
147 | } | ||
148 | |||
149 | static void attach_free_block(rh_info_t * info, rh_block_t * blkn) | ||
150 | { | ||
151 | rh_block_t *blk; | ||
152 | rh_block_t *before; | ||
153 | rh_block_t *after; | ||
154 | rh_block_t *next; | ||
155 | int size; | ||
156 | unsigned long s, e, bs, be; | ||
157 | struct list_head *l; | ||
158 | |||
159 | /* We assume that they are aligned properly */ | ||
160 | size = blkn->size; | ||
161 | s = (unsigned long)blkn->start; | ||
162 | e = s + size; | ||
163 | |||
164 | /* Find the blocks immediately before and after the given one | ||
165 | * (if any) */ | ||
166 | before = NULL; | ||
167 | after = NULL; | ||
168 | next = NULL; | ||
169 | |||
170 | list_for_each(l, &info->free_list) { | ||
171 | blk = list_entry(l, rh_block_t, list); | ||
172 | |||
173 | bs = (unsigned long)blk->start; | ||
174 | be = bs + blk->size; | ||
175 | |||
176 | if (next == NULL && s >= bs) | ||
177 | next = blk; | ||
178 | |||
179 | if (be == s) | ||
180 | before = blk; | ||
181 | |||
182 | if (e == bs) | ||
183 | after = blk; | ||
184 | |||
185 | /* If both are not null, break now */ | ||
186 | if (before != NULL && after != NULL) | ||
187 | break; | ||
188 | } | ||
189 | |||
190 | /* Now check if they are really adjacent */ | ||
191 | if (before != NULL && s != (unsigned long)before->start + before->size) | ||
192 | before = NULL; | ||
193 | |||
194 | if (after != NULL && e != (unsigned long)after->start) | ||
195 | after = NULL; | ||
196 | |||
197 | /* No coalescing; list insert and return */ | ||
198 | if (before == NULL && after == NULL) { | ||
199 | |||
200 | if (next != NULL) | ||
201 | list_add(&blkn->list, &next->list); | ||
202 | else | ||
203 | list_add(&blkn->list, &info->free_list); | ||
204 | |||
205 | return; | ||
206 | } | ||
207 | |||
208 | /* We don't need it anymore */ | ||
209 | release_slot(info, blkn); | ||
210 | |||
211 | /* Grow the before block */ | ||
212 | if (before != NULL && after == NULL) { | ||
213 | before->size += size; | ||
214 | return; | ||
215 | } | ||
216 | |||
217 | /* Grow the after block backwards */ | ||
218 | if (before == NULL && after != NULL) { | ||
219 | after->start = (int8_t *)after->start - size; | ||
220 | after->size += size; | ||
221 | return; | ||
222 | } | ||
223 | |||
224 | /* Grow the before block, and release the after block */ | ||
225 | before->size += size + after->size; | ||
226 | list_del(&after->list); | ||
227 | release_slot(info, after); | ||
228 | } | ||
229 | |||
230 | static void attach_taken_block(rh_info_t * info, rh_block_t * blkn) | ||
231 | { | ||
232 | rh_block_t *blk; | ||
233 | struct list_head *l; | ||
234 | |||
235 | /* Find the block immediately before the given one (if any) */ | ||
236 | list_for_each(l, &info->taken_list) { | ||
237 | blk = list_entry(l, rh_block_t, list); | ||
238 | if (blk->start > blkn->start) { | ||
239 | list_add_tail(&blkn->list, &blk->list); | ||
240 | return; | ||
241 | } | ||
242 | } | ||
243 | |||
244 | list_add_tail(&blkn->list, &info->taken_list); | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * Create a remote heap dynamically. Note that no memory for the blocks | ||
249 | * are allocated. It will upon the first allocation | ||
250 | */ | ||
251 | rh_info_t *rh_create(unsigned int alignment) | ||
252 | { | ||
253 | rh_info_t *info; | ||
254 | |||
255 | /* Alignment must be a power of two */ | ||
256 | if ((alignment & (alignment - 1)) != 0) | ||
257 | return ERR_PTR(-EINVAL); | ||
258 | |||
259 | info = kmalloc(sizeof(*info), GFP_KERNEL); | ||
260 | if (info == NULL) | ||
261 | return ERR_PTR(-ENOMEM); | ||
262 | |||
263 | info->alignment = alignment; | ||
264 | |||
265 | /* Initially everything as empty */ | ||
266 | info->block = NULL; | ||
267 | info->max_blocks = 0; | ||
268 | info->empty_slots = 0; | ||
269 | info->flags = 0; | ||
270 | |||
271 | INIT_LIST_HEAD(&info->empty_list); | ||
272 | INIT_LIST_HEAD(&info->free_list); | ||
273 | INIT_LIST_HEAD(&info->taken_list); | ||
274 | |||
275 | return info; | ||
276 | } | ||
277 | |||
278 | /* | ||
279 | * Destroy a dynamically created remote heap. Deallocate only if the areas | ||
280 | * are not static | ||
281 | */ | ||
282 | void rh_destroy(rh_info_t * info) | ||
283 | { | ||
284 | if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL) | ||
285 | kfree(info->block); | ||
286 | |||
287 | if ((info->flags & RHIF_STATIC_INFO) == 0) | ||
288 | kfree(info); | ||
289 | } | ||
290 | |||
291 | /* | ||
292 | * Initialize in place a remote heap info block. This is needed to support | ||
293 | * operation very early in the startup of the kernel, when it is not yet safe | ||
294 | * to call kmalloc. | ||
295 | */ | ||
296 | void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks, | ||
297 | rh_block_t * block) | ||
298 | { | ||
299 | int i; | ||
300 | rh_block_t *blk; | ||
301 | |||
302 | /* Alignment must be a power of two */ | ||
303 | if ((alignment & (alignment - 1)) != 0) | ||
304 | return; | ||
305 | |||
306 | info->alignment = alignment; | ||
307 | |||
308 | /* Initially everything as empty */ | ||
309 | info->block = block; | ||
310 | info->max_blocks = max_blocks; | ||
311 | info->empty_slots = max_blocks; | ||
312 | info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK; | ||
313 | |||
314 | INIT_LIST_HEAD(&info->empty_list); | ||
315 | INIT_LIST_HEAD(&info->free_list); | ||
316 | INIT_LIST_HEAD(&info->taken_list); | ||
317 | |||
318 | /* Add all new blocks to the free list */ | ||
319 | for (i = 0, blk = block; i < max_blocks; i++, blk++) | ||
320 | list_add(&blk->list, &info->empty_list); | ||
321 | } | ||
322 | |||
323 | /* Attach a free memory region, coalesces regions if adjuscent */ | ||
324 | int rh_attach_region(rh_info_t * info, void *start, int size) | ||
325 | { | ||
326 | rh_block_t *blk; | ||
327 | unsigned long s, e, m; | ||
328 | int r; | ||
329 | |||
330 | /* The region must be aligned */ | ||
331 | s = (unsigned long)start; | ||
332 | e = s + size; | ||
333 | m = info->alignment - 1; | ||
334 | |||
335 | /* Round start up */ | ||
336 | s = (s + m) & ~m; | ||
337 | |||
338 | /* Round end down */ | ||
339 | e = e & ~m; | ||
340 | |||
341 | /* Take final values */ | ||
342 | start = (void *)s; | ||
343 | size = (int)(e - s); | ||
344 | |||
345 | /* Grow the blocks, if needed */ | ||
346 | r = assure_empty(info, 1); | ||
347 | if (r < 0) | ||
348 | return r; | ||
349 | |||
350 | blk = get_slot(info); | ||
351 | blk->start = start; | ||
352 | blk->size = size; | ||
353 | blk->owner = NULL; | ||
354 | |||
355 | attach_free_block(info, blk); | ||
356 | |||
357 | return 0; | ||
358 | } | ||
359 | |||
360 | /* Detatch given address range, splits free block if needed. */ | ||
361 | void *rh_detach_region(rh_info_t * info, void *start, int size) | ||
362 | { | ||
363 | struct list_head *l; | ||
364 | rh_block_t *blk, *newblk; | ||
365 | unsigned long s, e, m, bs, be; | ||
366 | |||
367 | /* Validate size */ | ||
368 | if (size <= 0) | ||
369 | return ERR_PTR(-EINVAL); | ||
370 | |||
371 | /* The region must be aligned */ | ||
372 | s = (unsigned long)start; | ||
373 | e = s + size; | ||
374 | m = info->alignment - 1; | ||
375 | |||
376 | /* Round start up */ | ||
377 | s = (s + m) & ~m; | ||
378 | |||
379 | /* Round end down */ | ||
380 | e = e & ~m; | ||
381 | |||
382 | if (assure_empty(info, 1) < 0) | ||
383 | return ERR_PTR(-ENOMEM); | ||
384 | |||
385 | blk = NULL; | ||
386 | list_for_each(l, &info->free_list) { | ||
387 | blk = list_entry(l, rh_block_t, list); | ||
388 | /* The range must lie entirely inside one free block */ | ||
389 | bs = (unsigned long)blk->start; | ||
390 | be = (unsigned long)blk->start + blk->size; | ||
391 | if (s >= bs && e <= be) | ||
392 | break; | ||
393 | blk = NULL; | ||
394 | } | ||
395 | |||
396 | if (blk == NULL) | ||
397 | return ERR_PTR(-ENOMEM); | ||
398 | |||
399 | /* Perfect fit */ | ||
400 | if (bs == s && be == e) { | ||
401 | /* Delete from free list, release slot */ | ||
402 | list_del(&blk->list); | ||
403 | release_slot(info, blk); | ||
404 | return (void *)s; | ||
405 | } | ||
406 | |||
407 | /* blk still in free list, with updated start and/or size */ | ||
408 | if (bs == s || be == e) { | ||
409 | if (bs == s) | ||
410 | blk->start = (int8_t *)blk->start + size; | ||
411 | blk->size -= size; | ||
412 | |||
413 | } else { | ||
414 | /* The front free fragment */ | ||
415 | blk->size = s - bs; | ||
416 | |||
417 | /* the back free fragment */ | ||
418 | newblk = get_slot(info); | ||
419 | newblk->start = (void *)e; | ||
420 | newblk->size = be - e; | ||
421 | |||
422 | list_add(&newblk->list, &blk->list); | ||
423 | } | ||
424 | |||
425 | return (void *)s; | ||
426 | } | ||
427 | |||
428 | void *rh_alloc(rh_info_t * info, int size, const char *owner) | ||
429 | { | ||
430 | struct list_head *l; | ||
431 | rh_block_t *blk; | ||
432 | rh_block_t *newblk; | ||
433 | void *start; | ||
434 | |||
435 | /* Validate size */ | ||
436 | if (size <= 0) | ||
437 | return ERR_PTR(-EINVAL); | ||
438 | |||
439 | /* Align to configured alignment */ | ||
440 | size = (size + (info->alignment - 1)) & ~(info->alignment - 1); | ||
441 | |||
442 | if (assure_empty(info, 1) < 0) | ||
443 | return ERR_PTR(-ENOMEM); | ||
444 | |||
445 | blk = NULL; | ||
446 | list_for_each(l, &info->free_list) { | ||
447 | blk = list_entry(l, rh_block_t, list); | ||
448 | if (size <= blk->size) | ||
449 | break; | ||
450 | blk = NULL; | ||
451 | } | ||
452 | |||
453 | if (blk == NULL) | ||
454 | return ERR_PTR(-ENOMEM); | ||
455 | |||
456 | /* Just fits */ | ||
457 | if (blk->size == size) { | ||
458 | /* Move from free list to taken list */ | ||
459 | list_del(&blk->list); | ||
460 | blk->owner = owner; | ||
461 | start = blk->start; | ||
462 | |||
463 | attach_taken_block(info, blk); | ||
464 | |||
465 | return start; | ||
466 | } | ||
467 | |||
468 | newblk = get_slot(info); | ||
469 | newblk->start = blk->start; | ||
470 | newblk->size = size; | ||
471 | newblk->owner = owner; | ||
472 | |||
473 | /* blk still in free list, with updated start, size */ | ||
474 | blk->start = (int8_t *)blk->start + size; | ||
475 | blk->size -= size; | ||
476 | |||
477 | start = newblk->start; | ||
478 | |||
479 | attach_taken_block(info, newblk); | ||
480 | |||
481 | return start; | ||
482 | } | ||
483 | |||
484 | /* allocate at precisely the given address */ | ||
485 | void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner) | ||
486 | { | ||
487 | struct list_head *l; | ||
488 | rh_block_t *blk, *newblk1, *newblk2; | ||
489 | unsigned long s, e, m, bs, be; | ||
490 | |||
491 | /* Validate size */ | ||
492 | if (size <= 0) | ||
493 | return ERR_PTR(-EINVAL); | ||
494 | |||
495 | /* The region must be aligned */ | ||
496 | s = (unsigned long)start; | ||
497 | e = s + size; | ||
498 | m = info->alignment - 1; | ||
499 | |||
500 | /* Round start up */ | ||
501 | s = (s + m) & ~m; | ||
502 | |||
503 | /* Round end down */ | ||
504 | e = e & ~m; | ||
505 | |||
506 | if (assure_empty(info, 2) < 0) | ||
507 | return ERR_PTR(-ENOMEM); | ||
508 | |||
509 | blk = NULL; | ||
510 | list_for_each(l, &info->free_list) { | ||
511 | blk = list_entry(l, rh_block_t, list); | ||
512 | /* The range must lie entirely inside one free block */ | ||
513 | bs = (unsigned long)blk->start; | ||
514 | be = (unsigned long)blk->start + blk->size; | ||
515 | if (s >= bs && e <= be) | ||
516 | break; | ||
517 | } | ||
518 | |||
519 | if (blk == NULL) | ||
520 | return ERR_PTR(-ENOMEM); | ||
521 | |||
522 | /* Perfect fit */ | ||
523 | if (bs == s && be == e) { | ||
524 | /* Move from free list to taken list */ | ||
525 | list_del(&blk->list); | ||
526 | blk->owner = owner; | ||
527 | |||
528 | start = blk->start; | ||
529 | attach_taken_block(info, blk); | ||
530 | |||
531 | return start; | ||
532 | |||
533 | } | ||
534 | |||
535 | /* blk still in free list, with updated start and/or size */ | ||
536 | if (bs == s || be == e) { | ||
537 | if (bs == s) | ||
538 | blk->start = (int8_t *)blk->start + size; | ||
539 | blk->size -= size; | ||
540 | |||
541 | } else { | ||
542 | /* The front free fragment */ | ||
543 | blk->size = s - bs; | ||
544 | |||
545 | /* The back free fragment */ | ||
546 | newblk2 = get_slot(info); | ||
547 | newblk2->start = (void *)e; | ||
548 | newblk2->size = be - e; | ||
549 | |||
550 | list_add(&newblk2->list, &blk->list); | ||
551 | } | ||
552 | |||
553 | newblk1 = get_slot(info); | ||
554 | newblk1->start = (void *)s; | ||
555 | newblk1->size = e - s; | ||
556 | newblk1->owner = owner; | ||
557 | |||
558 | start = newblk1->start; | ||
559 | attach_taken_block(info, newblk1); | ||
560 | |||
561 | return start; | ||
562 | } | ||
563 | |||
564 | int rh_free(rh_info_t * info, void *start) | ||
565 | { | ||
566 | rh_block_t *blk, *blk2; | ||
567 | struct list_head *l; | ||
568 | int size; | ||
569 | |||
570 | /* Linear search for block */ | ||
571 | blk = NULL; | ||
572 | list_for_each(l, &info->taken_list) { | ||
573 | blk2 = list_entry(l, rh_block_t, list); | ||
574 | if (start < blk2->start) | ||
575 | break; | ||
576 | blk = blk2; | ||
577 | } | ||
578 | |||
579 | if (blk == NULL || start > (blk->start + blk->size)) | ||
580 | return -EINVAL; | ||
581 | |||
582 | /* Remove from taken list */ | ||
583 | list_del(&blk->list); | ||
584 | |||
585 | /* Get size of freed block */ | ||
586 | size = blk->size; | ||
587 | attach_free_block(info, blk); | ||
588 | |||
589 | return size; | ||
590 | } | ||
591 | |||
592 | int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats) | ||
593 | { | ||
594 | rh_block_t *blk; | ||
595 | struct list_head *l; | ||
596 | struct list_head *h; | ||
597 | int nr; | ||
598 | |||
599 | switch (what) { | ||
600 | |||
601 | case RHGS_FREE: | ||
602 | h = &info->free_list; | ||
603 | break; | ||
604 | |||
605 | case RHGS_TAKEN: | ||
606 | h = &info->taken_list; | ||
607 | break; | ||
608 | |||
609 | default: | ||
610 | return -EINVAL; | ||
611 | } | ||
612 | |||
613 | /* Linear search for block */ | ||
614 | nr = 0; | ||
615 | list_for_each(l, h) { | ||
616 | blk = list_entry(l, rh_block_t, list); | ||
617 | if (stats != NULL && nr < max_stats) { | ||
618 | stats->start = blk->start; | ||
619 | stats->size = blk->size; | ||
620 | stats->owner = blk->owner; | ||
621 | stats++; | ||
622 | } | ||
623 | nr++; | ||
624 | } | ||
625 | |||
626 | return nr; | ||
627 | } | ||
628 | |||
629 | int rh_set_owner(rh_info_t * info, void *start, const char *owner) | ||
630 | { | ||
631 | rh_block_t *blk, *blk2; | ||
632 | struct list_head *l; | ||
633 | int size; | ||
634 | |||
635 | /* Linear search for block */ | ||
636 | blk = NULL; | ||
637 | list_for_each(l, &info->taken_list) { | ||
638 | blk2 = list_entry(l, rh_block_t, list); | ||
639 | if (start < blk2->start) | ||
640 | break; | ||
641 | blk = blk2; | ||
642 | } | ||
643 | |||
644 | if (blk == NULL || start > (blk->start + blk->size)) | ||
645 | return -EINVAL; | ||
646 | |||
647 | blk->owner = owner; | ||
648 | size = blk->size; | ||
649 | |||
650 | return size; | ||
651 | } | ||
652 | |||
653 | void rh_dump(rh_info_t * info) | ||
654 | { | ||
655 | static rh_stats_t st[32]; /* XXX maximum 32 blocks */ | ||
656 | int maxnr; | ||
657 | int i, nr; | ||
658 | |||
659 | maxnr = sizeof(st) / sizeof(st[0]); | ||
660 | |||
661 | printk(KERN_INFO | ||
662 | "info @0x%p (%d slots empty / %d max)\n", | ||
663 | info, info->empty_slots, info->max_blocks); | ||
664 | |||
665 | printk(KERN_INFO " Free:\n"); | ||
666 | nr = rh_get_stats(info, RHGS_FREE, maxnr, st); | ||
667 | if (nr > maxnr) | ||
668 | nr = maxnr; | ||
669 | for (i = 0; i < nr; i++) | ||
670 | printk(KERN_INFO | ||
671 | " 0x%p-0x%p (%u)\n", | ||
672 | st[i].start, (int8_t *) st[i].start + st[i].size, | ||
673 | st[i].size); | ||
674 | printk(KERN_INFO "\n"); | ||
675 | |||
676 | printk(KERN_INFO " Taken:\n"); | ||
677 | nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st); | ||
678 | if (nr > maxnr) | ||
679 | nr = maxnr; | ||
680 | for (i = 0; i < nr; i++) | ||
681 | printk(KERN_INFO | ||
682 | " 0x%p-0x%p (%u) %s\n", | ||
683 | st[i].start, (int8_t *) st[i].start + st[i].size, | ||
684 | st[i].size, st[i].owner != NULL ? st[i].owner : ""); | ||
685 | printk(KERN_INFO "\n"); | ||
686 | } | ||
687 | |||
688 | void rh_dump_blk(rh_info_t * info, rh_block_t * blk) | ||
689 | { | ||
690 | printk(KERN_INFO | ||
691 | "blk @0x%p: 0x%p-0x%p (%u)\n", | ||
692 | blk, blk->start, (int8_t *) blk->start + blk->size, blk->size); | ||
693 | } | ||
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c new file mode 100644 index 000000000000..e79123d1485c --- /dev/null +++ b/arch/powerpc/lib/sstep.c | |||
@@ -0,0 +1,141 @@ | |||
1 | /* | ||
2 | * Single-step support. | ||
3 | * | ||
4 | * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/ptrace.h> | ||
13 | #include <asm/sstep.h> | ||
14 | #include <asm/processor.h> | ||
15 | |||
16 | extern char system_call_common[]; | ||
17 | |||
18 | /* Bits in SRR1 that are copied from MSR */ | ||
19 | #define MSR_MASK 0xffffffff87c0ffff | ||
20 | |||
21 | /* | ||
22 | * Determine whether a conditional branch instruction would branch. | ||
23 | */ | ||
24 | static int branch_taken(unsigned int instr, struct pt_regs *regs) | ||
25 | { | ||
26 | unsigned int bo = (instr >> 21) & 0x1f; | ||
27 | unsigned int bi; | ||
28 | |||
29 | if ((bo & 4) == 0) { | ||
30 | /* decrement counter */ | ||
31 | --regs->ctr; | ||
32 | if (((bo >> 1) & 1) ^ (regs->ctr == 0)) | ||
33 | return 0; | ||
34 | } | ||
35 | if ((bo & 0x10) == 0) { | ||
36 | /* check bit from CR */ | ||
37 | bi = (instr >> 16) & 0x1f; | ||
38 | if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1)) | ||
39 | return 0; | ||
40 | } | ||
41 | return 1; | ||
42 | } | ||
43 | |||
44 | /* | ||
45 | * Emulate instructions that cause a transfer of control. | ||
46 | * Returns 1 if the step was emulated, 0 if not, | ||
47 | * or -1 if the instruction is one that should not be stepped, | ||
48 | * such as an rfid, or a mtmsrd that would clear MSR_RI. | ||
49 | */ | ||
50 | int emulate_step(struct pt_regs *regs, unsigned int instr) | ||
51 | { | ||
52 | unsigned int opcode, rd; | ||
53 | unsigned long int imm; | ||
54 | |||
55 | opcode = instr >> 26; | ||
56 | switch (opcode) { | ||
57 | case 16: /* bc */ | ||
58 | imm = (signed short)(instr & 0xfffc); | ||
59 | if ((instr & 2) == 0) | ||
60 | imm += regs->nip; | ||
61 | regs->nip += 4; | ||
62 | if ((regs->msr & MSR_SF) == 0) | ||
63 | regs->nip &= 0xffffffffUL; | ||
64 | if (instr & 1) | ||
65 | regs->link = regs->nip; | ||
66 | if (branch_taken(instr, regs)) | ||
67 | regs->nip = imm; | ||
68 | return 1; | ||
69 | case 17: /* sc */ | ||
70 | /* | ||
71 | * N.B. this uses knowledge about how the syscall | ||
72 | * entry code works. If that is changed, this will | ||
73 | * need to be changed also. | ||
74 | */ | ||
75 | regs->gpr[9] = regs->gpr[13]; | ||
76 | regs->gpr[11] = regs->nip + 4; | ||
77 | regs->gpr[12] = regs->msr & MSR_MASK; | ||
78 | regs->gpr[13] = (unsigned long) get_paca(); | ||
79 | regs->nip = (unsigned long) &system_call_common; | ||
80 | regs->msr = MSR_KERNEL; | ||
81 | return 1; | ||
82 | case 18: /* b */ | ||
83 | imm = instr & 0x03fffffc; | ||
84 | if (imm & 0x02000000) | ||
85 | imm -= 0x04000000; | ||
86 | if ((instr & 2) == 0) | ||
87 | imm += regs->nip; | ||
88 | if (instr & 1) { | ||
89 | regs->link = regs->nip + 4; | ||
90 | if ((regs->msr & MSR_SF) == 0) | ||
91 | regs->link &= 0xffffffffUL; | ||
92 | } | ||
93 | if ((regs->msr & MSR_SF) == 0) | ||
94 | imm &= 0xffffffffUL; | ||
95 | regs->nip = imm; | ||
96 | return 1; | ||
97 | case 19: | ||
98 | switch (instr & 0x7fe) { | ||
99 | case 0x20: /* bclr */ | ||
100 | case 0x420: /* bcctr */ | ||
101 | imm = (instr & 0x400)? regs->ctr: regs->link; | ||
102 | regs->nip += 4; | ||
103 | if ((regs->msr & MSR_SF) == 0) { | ||
104 | regs->nip &= 0xffffffffUL; | ||
105 | imm &= 0xffffffffUL; | ||
106 | } | ||
107 | if (instr & 1) | ||
108 | regs->link = regs->nip; | ||
109 | if (branch_taken(instr, regs)) | ||
110 | regs->nip = imm; | ||
111 | return 1; | ||
112 | case 0x24: /* rfid, scary */ | ||
113 | return -1; | ||
114 | } | ||
115 | case 31: | ||
116 | rd = (instr >> 21) & 0x1f; | ||
117 | switch (instr & 0x7fe) { | ||
118 | case 0xa6: /* mfmsr */ | ||
119 | regs->gpr[rd] = regs->msr & MSR_MASK; | ||
120 | regs->nip += 4; | ||
121 | if ((regs->msr & MSR_SF) == 0) | ||
122 | regs->nip &= 0xffffffffUL; | ||
123 | return 1; | ||
124 | case 0x164: /* mtmsrd */ | ||
125 | /* only MSR_EE and MSR_RI get changed if bit 15 set */ | ||
126 | /* mtmsrd doesn't change MSR_HV and MSR_ME */ | ||
127 | imm = (instr & 0x10000)? 0x8002: 0xefffffffffffefffUL; | ||
128 | imm = (regs->msr & MSR_MASK & ~imm) | ||
129 | | (regs->gpr[rd] & imm); | ||
130 | if ((imm & MSR_RI) == 0) | ||
131 | /* can't step mtmsrd that would clear MSR_RI */ | ||
132 | return -1; | ||
133 | regs->msr = imm; | ||
134 | regs->nip += 4; | ||
135 | if ((imm & MSR_SF) == 0) | ||
136 | regs->nip &= 0xffffffffUL; | ||
137 | return 1; | ||
138 | } | ||
139 | } | ||
140 | return 0; | ||
141 | } | ||
diff --git a/arch/powerpc/lib/strcase.c b/arch/powerpc/lib/strcase.c new file mode 100644 index 000000000000..36b521091bbc --- /dev/null +++ b/arch/powerpc/lib/strcase.c | |||
@@ -0,0 +1,23 @@ | |||
1 | #include <linux/ctype.h> | ||
2 | |||
3 | int strcasecmp(const char *s1, const char *s2) | ||
4 | { | ||
5 | int c1, c2; | ||
6 | |||
7 | do { | ||
8 | c1 = tolower(*s1++); | ||
9 | c2 = tolower(*s2++); | ||
10 | } while (c1 == c2 && c1 != 0); | ||
11 | return c1 - c2; | ||
12 | } | ||
13 | |||
14 | int strncasecmp(const char *s1, const char *s2, int n) | ||
15 | { | ||
16 | int c1, c2; | ||
17 | |||
18 | do { | ||
19 | c1 = tolower(*s1++); | ||
20 | c2 = tolower(*s2++); | ||
21 | } while ((--n > 0) && c1 == c2 && c1 != 0); | ||
22 | return c1 - c2; | ||
23 | } | ||
diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S new file mode 100644 index 000000000000..15d40e9ef8b1 --- /dev/null +++ b/arch/powerpc/lib/string.S | |||
@@ -0,0 +1,203 @@ | |||
1 | /* | ||
2 | * String handling functions for PowerPC. | ||
3 | * | ||
4 | * Copyright (C) 1996 Paul Mackerras. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/config.h> | ||
12 | #include <asm/processor.h> | ||
13 | #include <asm/errno.h> | ||
14 | #include <asm/ppc_asm.h> | ||
15 | |||
16 | .text | ||
17 | .stabs "arch/powerpc/lib/",N_SO,0,0,0f | ||
18 | .stabs "string.S",N_SO,0,0,0f | ||
19 | 0: | ||
20 | |||
21 | .section __ex_table,"a" | ||
22 | #ifdef CONFIG_PPC64 | ||
23 | .align 3 | ||
24 | #define EXTBL .llong | ||
25 | #else | ||
26 | .align 2 | ||
27 | #define EXTBL .long | ||
28 | #endif | ||
29 | .text | ||
30 | |||
31 | _GLOBAL(strcpy) | ||
32 | addi r5,r3,-1 | ||
33 | addi r4,r4,-1 | ||
34 | 1: lbzu r0,1(r4) | ||
35 | cmpwi 0,r0,0 | ||
36 | stbu r0,1(r5) | ||
37 | bne 1b | ||
38 | blr | ||
39 | |||
40 | /* This clears out any unused part of the destination buffer, | ||
41 | just as the libc version does. -- paulus */ | ||
42 | _GLOBAL(strncpy) | ||
43 | cmpwi 0,r5,0 | ||
44 | beqlr | ||
45 | mtctr r5 | ||
46 | addi r6,r3,-1 | ||
47 | addi r4,r4,-1 | ||
48 | 1: lbzu r0,1(r4) | ||
49 | cmpwi 0,r0,0 | ||
50 | stbu r0,1(r6) | ||
51 | bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */ | ||
52 | bnelr /* if we didn't hit a null char, we're done */ | ||
53 | mfctr r5 | ||
54 | cmpwi 0,r5,0 /* any space left in destination buffer? */ | ||
55 | beqlr /* we know r0 == 0 here */ | ||
56 | 2: stbu r0,1(r6) /* clear it out if so */ | ||
57 | bdnz 2b | ||
58 | blr | ||
59 | |||
60 | _GLOBAL(strcat) | ||
61 | addi r5,r3,-1 | ||
62 | addi r4,r4,-1 | ||
63 | 1: lbzu r0,1(r5) | ||
64 | cmpwi 0,r0,0 | ||
65 | bne 1b | ||
66 | addi r5,r5,-1 | ||
67 | 1: lbzu r0,1(r4) | ||
68 | cmpwi 0,r0,0 | ||
69 | stbu r0,1(r5) | ||
70 | bne 1b | ||
71 | blr | ||
72 | |||
73 | _GLOBAL(strcmp) | ||
74 | addi r5,r3,-1 | ||
75 | addi r4,r4,-1 | ||
76 | 1: lbzu r3,1(r5) | ||
77 | cmpwi 1,r3,0 | ||
78 | lbzu r0,1(r4) | ||
79 | subf. r3,r0,r3 | ||
80 | beqlr 1 | ||
81 | beq 1b | ||
82 | blr | ||
83 | |||
84 | _GLOBAL(strlen) | ||
85 | addi r4,r3,-1 | ||
86 | 1: lbzu r0,1(r4) | ||
87 | cmpwi 0,r0,0 | ||
88 | bne 1b | ||
89 | subf r3,r3,r4 | ||
90 | blr | ||
91 | |||
92 | _GLOBAL(memcmp) | ||
93 | cmpwi 0,r5,0 | ||
94 | ble- 2f | ||
95 | mtctr r5 | ||
96 | addi r6,r3,-1 | ||
97 | addi r4,r4,-1 | ||
98 | 1: lbzu r3,1(r6) | ||
99 | lbzu r0,1(r4) | ||
100 | subf. r3,r0,r3 | ||
101 | bdnzt 2,1b | ||
102 | blr | ||
103 | 2: li r3,0 | ||
104 | blr | ||
105 | |||
106 | _GLOBAL(memchr) | ||
107 | cmpwi 0,r5,0 | ||
108 | ble- 2f | ||
109 | mtctr r5 | ||
110 | addi r3,r3,-1 | ||
111 | 1: lbzu r0,1(r3) | ||
112 | cmpw 0,r0,r4 | ||
113 | bdnzf 2,1b | ||
114 | beqlr | ||
115 | 2: li r3,0 | ||
116 | blr | ||
117 | |||
118 | _GLOBAL(__clear_user) | ||
119 | addi r6,r3,-4 | ||
120 | li r3,0 | ||
121 | li r5,0 | ||
122 | cmplwi 0,r4,4 | ||
123 | blt 7f | ||
124 | /* clear a single word */ | ||
125 | 11: stwu r5,4(r6) | ||
126 | beqlr | ||
127 | /* clear word sized chunks */ | ||
128 | andi. r0,r6,3 | ||
129 | add r4,r0,r4 | ||
130 | subf r6,r0,r6 | ||
131 | srwi r0,r4,2 | ||
132 | andi. r4,r4,3 | ||
133 | mtctr r0 | ||
134 | bdz 7f | ||
135 | 1: stwu r5,4(r6) | ||
136 | bdnz 1b | ||
137 | /* clear byte sized chunks */ | ||
138 | 7: cmpwi 0,r4,0 | ||
139 | beqlr | ||
140 | mtctr r4 | ||
141 | addi r6,r6,3 | ||
142 | 8: stbu r5,1(r6) | ||
143 | bdnz 8b | ||
144 | blr | ||
145 | 90: mr r3,r4 | ||
146 | blr | ||
147 | 91: mfctr r3 | ||
148 | slwi r3,r3,2 | ||
149 | add r3,r3,r4 | ||
150 | blr | ||
151 | 92: mfctr r3 | ||
152 | blr | ||
153 | |||
154 | .section __ex_table,"a" | ||
155 | EXTBL 11b,90b | ||
156 | EXTBL 1b,91b | ||
157 | EXTBL 8b,92b | ||
158 | .text | ||
159 | |||
160 | _GLOBAL(__strncpy_from_user) | ||
161 | addi r6,r3,-1 | ||
162 | addi r4,r4,-1 | ||
163 | cmpwi 0,r5,0 | ||
164 | beq 2f | ||
165 | mtctr r5 | ||
166 | 1: lbzu r0,1(r4) | ||
167 | cmpwi 0,r0,0 | ||
168 | stbu r0,1(r6) | ||
169 | bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */ | ||
170 | beq 3f | ||
171 | 2: addi r6,r6,1 | ||
172 | 3: subf r3,r3,r6 | ||
173 | blr | ||
174 | 99: li r3,-EFAULT | ||
175 | blr | ||
176 | |||
177 | .section __ex_table,"a" | ||
178 | EXTBL 1b,99b | ||
179 | .text | ||
180 | |||
181 | /* r3 = str, r4 = len (> 0), r5 = top (highest addr) */ | ||
182 | _GLOBAL(__strnlen_user) | ||
183 | addi r7,r3,-1 | ||
184 | subf r6,r7,r5 /* top+1 - str */ | ||
185 | cmplw 0,r4,r6 | ||
186 | bge 0f | ||
187 | mr r6,r4 | ||
188 | 0: mtctr r6 /* ctr = min(len, top - str) */ | ||
189 | 1: lbzu r0,1(r7) /* get next byte */ | ||
190 | cmpwi 0,r0,0 | ||
191 | bdnzf 2,1b /* loop if --ctr != 0 && byte != 0 */ | ||
192 | addi r7,r7,1 | ||
193 | subf r3,r3,r7 /* number of bytes we have looked at */ | ||
194 | beqlr /* return if we found a 0 byte */ | ||
195 | cmpw 0,r3,r4 /* did we look at all len bytes? */ | ||
196 | blt 99f /* if not, must have hit top */ | ||
197 | addi r3,r4,1 /* return len + 1 to indicate no null found */ | ||
198 | blr | ||
199 | 99: li r3,0 /* bad address, return 0 */ | ||
200 | blr | ||
201 | |||
202 | .section __ex_table,"a" | ||
203 | EXTBL 1b,99b | ||
diff --git a/arch/powerpc/lib/usercopy.c b/arch/powerpc/lib/usercopy.c new file mode 100644 index 000000000000..5eea6f3c1e03 --- /dev/null +++ b/arch/powerpc/lib/usercopy.c | |||
@@ -0,0 +1,41 @@ | |||
1 | /* | ||
2 | * Functions which are too large to be inlined. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | #include <linux/module.h> | ||
10 | #include <asm/uaccess.h> | ||
11 | |||
12 | unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) | ||
13 | { | ||
14 | if (likely(access_ok(VERIFY_READ, from, n))) | ||
15 | n = __copy_from_user(to, from, n); | ||
16 | else | ||
17 | memset(to, 0, n); | ||
18 | return n; | ||
19 | } | ||
20 | |||
21 | unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) | ||
22 | { | ||
23 | if (likely(access_ok(VERIFY_WRITE, to, n))) | ||
24 | n = __copy_to_user(to, from, n); | ||
25 | return n; | ||
26 | } | ||
27 | |||
28 | unsigned long copy_in_user(void __user *to, const void __user *from, | ||
29 | unsigned long n) | ||
30 | { | ||
31 | might_sleep(); | ||
32 | if (likely(access_ok(VERIFY_READ, from, n) && | ||
33 | access_ok(VERIFY_WRITE, to, n))) | ||
34 | n =__copy_tofrom_user(to, from, n); | ||
35 | return n; | ||
36 | } | ||
37 | |||
38 | EXPORT_SYMBOL(copy_from_user); | ||
39 | EXPORT_SYMBOL(copy_to_user); | ||
40 | EXPORT_SYMBOL(copy_in_user); | ||
41 | |||
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c new file mode 100644 index 000000000000..3d79ce281b67 --- /dev/null +++ b/arch/powerpc/mm/44x_mmu.c | |||
@@ -0,0 +1,120 @@ | |||
1 | /* | ||
2 | * Modifications by Matt Porter (mporter@mvista.com) to support | ||
3 | * PPC44x Book E processors. | ||
4 | * | ||
5 | * This file contains the routines for initializing the MMU | ||
6 | * on the 4xx series of chips. | ||
7 | * -- paulus | ||
8 | * | ||
9 | * Derived from arch/ppc/mm/init.c: | ||
10 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
11 | * | ||
12 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | ||
13 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | ||
14 | * Copyright (C) 1996 Paul Mackerras | ||
15 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | ||
16 | * | ||
17 | * Derived from "arch/i386/mm/init.c" | ||
18 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | ||
19 | * | ||
20 | * This program is free software; you can redistribute it and/or | ||
21 | * modify it under the terms of the GNU General Public License | ||
22 | * as published by the Free Software Foundation; either version | ||
23 | * 2 of the License, or (at your option) any later version. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include <linux/config.h> | ||
28 | #include <linux/signal.h> | ||
29 | #include <linux/sched.h> | ||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/errno.h> | ||
32 | #include <linux/string.h> | ||
33 | #include <linux/types.h> | ||
34 | #include <linux/ptrace.h> | ||
35 | #include <linux/mman.h> | ||
36 | #include <linux/mm.h> | ||
37 | #include <linux/swap.h> | ||
38 | #include <linux/stddef.h> | ||
39 | #include <linux/vmalloc.h> | ||
40 | #include <linux/init.h> | ||
41 | #include <linux/delay.h> | ||
42 | #include <linux/highmem.h> | ||
43 | |||
44 | #include <asm/pgalloc.h> | ||
45 | #include <asm/prom.h> | ||
46 | #include <asm/io.h> | ||
47 | #include <asm/mmu_context.h> | ||
48 | #include <asm/pgtable.h> | ||
49 | #include <asm/mmu.h> | ||
50 | #include <asm/uaccess.h> | ||
51 | #include <asm/smp.h> | ||
52 | #include <asm/bootx.h> | ||
53 | #include <asm/machdep.h> | ||
54 | #include <asm/setup.h> | ||
55 | |||
56 | #include "mmu_decl.h" | ||
57 | |||
58 | extern char etext[], _stext[]; | ||
59 | |||
60 | /* Used by the 44x TLB replacement exception handler. | ||
61 | * Just needed it declared someplace. | ||
62 | */ | ||
63 | unsigned int tlb_44x_index = 0; | ||
64 | unsigned int tlb_44x_hwater = 62; | ||
65 | |||
66 | /* | ||
67 | * "Pins" a 256MB TLB entry in AS0 for kernel lowmem | ||
68 | */ | ||
69 | static void __init | ||
70 | ppc44x_pin_tlb(int slot, unsigned int virt, unsigned int phys) | ||
71 | { | ||
72 | unsigned long attrib = 0; | ||
73 | |||
74 | __asm__ __volatile__("\ | ||
75 | clrrwi %2,%2,10\n\ | ||
76 | ori %2,%2,%4\n\ | ||
77 | clrrwi %1,%1,10\n\ | ||
78 | li %0,0\n\ | ||
79 | ori %0,%0,%5\n\ | ||
80 | tlbwe %2,%3,%6\n\ | ||
81 | tlbwe %1,%3,%7\n\ | ||
82 | tlbwe %0,%3,%8" | ||
83 | : | ||
84 | : "r" (attrib), "r" (phys), "r" (virt), "r" (slot), | ||
85 | "i" (PPC44x_TLB_VALID | PPC44x_TLB_256M), | ||
86 | "i" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G), | ||
87 | "i" (PPC44x_TLB_PAGEID), | ||
88 | "i" (PPC44x_TLB_XLAT), | ||
89 | "i" (PPC44x_TLB_ATTRIB)); | ||
90 | } | ||
91 | |||
92 | /* | ||
93 | * MMU_init_hw does the chip-specific initialization of the MMU hardware. | ||
94 | */ | ||
95 | void __init MMU_init_hw(void) | ||
96 | { | ||
97 | flush_instruction_cache(); | ||
98 | } | ||
99 | |||
100 | unsigned long __init mmu_mapin_ram(void) | ||
101 | { | ||
102 | unsigned int pinned_tlbs = 1; | ||
103 | int i; | ||
104 | |||
105 | /* Determine number of entries necessary to cover lowmem */ | ||
106 | pinned_tlbs = (unsigned int) | ||
107 | (_ALIGN(total_lowmem, PPC44x_PIN_SIZE) >> PPC44x_PIN_SHIFT); | ||
108 | |||
109 | /* Write upper watermark to save location */ | ||
110 | tlb_44x_hwater = PPC44x_LOW_SLOT - pinned_tlbs; | ||
111 | |||
112 | /* If necessary, set additional pinned TLBs */ | ||
113 | if (pinned_tlbs > 1) | ||
114 | for (i = (PPC44x_LOW_SLOT-(pinned_tlbs-1)); i < PPC44x_LOW_SLOT; i++) { | ||
115 | unsigned int phys_addr = (PPC44x_LOW_SLOT-i) * PPC44x_PIN_SIZE; | ||
116 | ppc44x_pin_tlb(i, phys_addr+PAGE_OFFSET, phys_addr); | ||
117 | } | ||
118 | |||
119 | return total_lowmem; | ||
120 | } | ||
diff --git a/arch/powerpc/mm/4xx_mmu.c b/arch/powerpc/mm/4xx_mmu.c new file mode 100644 index 000000000000..b7bcbc232f39 --- /dev/null +++ b/arch/powerpc/mm/4xx_mmu.c | |||
@@ -0,0 +1,141 @@ | |||
1 | /* | ||
2 | * This file contains the routines for initializing the MMU | ||
3 | * on the 4xx series of chips. | ||
4 | * -- paulus | ||
5 | * | ||
6 | * Derived from arch/ppc/mm/init.c: | ||
7 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
8 | * | ||
9 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | ||
10 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | ||
11 | * Copyright (C) 1996 Paul Mackerras | ||
12 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | ||
13 | * | ||
14 | * Derived from "arch/i386/mm/init.c" | ||
15 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | ||
16 | * | ||
17 | * This program is free software; you can redistribute it and/or | ||
18 | * modify it under the terms of the GNU General Public License | ||
19 | * as published by the Free Software Foundation; either version | ||
20 | * 2 of the License, or (at your option) any later version. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/config.h> | ||
25 | #include <linux/signal.h> | ||
26 | #include <linux/sched.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/errno.h> | ||
29 | #include <linux/string.h> | ||
30 | #include <linux/types.h> | ||
31 | #include <linux/ptrace.h> | ||
32 | #include <linux/mman.h> | ||
33 | #include <linux/mm.h> | ||
34 | #include <linux/swap.h> | ||
35 | #include <linux/stddef.h> | ||
36 | #include <linux/vmalloc.h> | ||
37 | #include <linux/init.h> | ||
38 | #include <linux/delay.h> | ||
39 | #include <linux/highmem.h> | ||
40 | |||
41 | #include <asm/pgalloc.h> | ||
42 | #include <asm/prom.h> | ||
43 | #include <asm/io.h> | ||
44 | #include <asm/mmu_context.h> | ||
45 | #include <asm/pgtable.h> | ||
46 | #include <asm/mmu.h> | ||
47 | #include <asm/uaccess.h> | ||
48 | #include <asm/smp.h> | ||
49 | #include <asm/bootx.h> | ||
50 | #include <asm/machdep.h> | ||
51 | #include <asm/setup.h> | ||
52 | #include "mmu_decl.h" | ||
53 | |||
54 | extern int __map_without_ltlbs; | ||
55 | /* | ||
56 | * MMU_init_hw does the chip-specific initialization of the MMU hardware. | ||
57 | */ | ||
58 | void __init MMU_init_hw(void) | ||
59 | { | ||
60 | /* | ||
61 | * The Zone Protection Register (ZPR) defines how protection will | ||
62 | * be applied to every page which is a member of a given zone. At | ||
63 | * present, we utilize only two of the 4xx's zones. | ||
64 | * The zone index bits (of ZSEL) in the PTE are used for software | ||
65 | * indicators, except the LSB. For user access, zone 1 is used, | ||
66 | * for kernel access, zone 0 is used. We set all but zone 1 | ||
67 | * to zero, allowing only kernel access as indicated in the PTE. | ||
68 | * For zone 1, we set a 01 binary (a value of 10 will not work) | ||
69 | * to allow user access as indicated in the PTE. This also allows | ||
70 | * kernel access as indicated in the PTE. | ||
71 | */ | ||
72 | |||
73 | mtspr(SPRN_ZPR, 0x10000000); | ||
74 | |||
75 | flush_instruction_cache(); | ||
76 | |||
77 | /* | ||
78 | * Set up the real-mode cache parameters for the exception vector | ||
79 | * handlers (which are run in real-mode). | ||
80 | */ | ||
81 | |||
82 | mtspr(SPRN_DCWR, 0x00000000); /* All caching is write-back */ | ||
83 | |||
84 | /* | ||
85 | * Cache instruction and data space where the exception | ||
86 | * vectors and the kernel live in real-mode. | ||
87 | */ | ||
88 | |||
89 | mtspr(SPRN_DCCR, 0xF0000000); /* 512 MB of data space at 0x0. */ | ||
90 | mtspr(SPRN_ICCR, 0xF0000000); /* 512 MB of instr. space at 0x0. */ | ||
91 | } | ||
92 | |||
93 | #define LARGE_PAGE_SIZE_16M (1<<24) | ||
94 | #define LARGE_PAGE_SIZE_4M (1<<22) | ||
95 | |||
96 | unsigned long __init mmu_mapin_ram(void) | ||
97 | { | ||
98 | unsigned long v, s; | ||
99 | phys_addr_t p; | ||
100 | |||
101 | v = KERNELBASE; | ||
102 | p = PPC_MEMSTART; | ||
103 | s = 0; | ||
104 | |||
105 | if (__map_without_ltlbs) { | ||
106 | return s; | ||
107 | } | ||
108 | |||
109 | while (s <= (total_lowmem - LARGE_PAGE_SIZE_16M)) { | ||
110 | pmd_t *pmdp; | ||
111 | unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE; | ||
112 | |||
113 | spin_lock(&init_mm.page_table_lock); | ||
114 | pmdp = pmd_offset(pgd_offset_k(v), v); | ||
115 | pmd_val(*pmdp++) = val; | ||
116 | pmd_val(*pmdp++) = val; | ||
117 | pmd_val(*pmdp++) = val; | ||
118 | pmd_val(*pmdp++) = val; | ||
119 | spin_unlock(&init_mm.page_table_lock); | ||
120 | |||
121 | v += LARGE_PAGE_SIZE_16M; | ||
122 | p += LARGE_PAGE_SIZE_16M; | ||
123 | s += LARGE_PAGE_SIZE_16M; | ||
124 | } | ||
125 | |||
126 | while (s <= (total_lowmem - LARGE_PAGE_SIZE_4M)) { | ||
127 | pmd_t *pmdp; | ||
128 | unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE; | ||
129 | |||
130 | spin_lock(&init_mm.page_table_lock); | ||
131 | pmdp = pmd_offset(pgd_offset_k(v), v); | ||
132 | pmd_val(*pmdp) = val; | ||
133 | spin_unlock(&init_mm.page_table_lock); | ||
134 | |||
135 | v += LARGE_PAGE_SIZE_4M; | ||
136 | p += LARGE_PAGE_SIZE_4M; | ||
137 | s += LARGE_PAGE_SIZE_4M; | ||
138 | } | ||
139 | |||
140 | return s; | ||
141 | } | ||
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile new file mode 100644 index 000000000000..9f52c26acd86 --- /dev/null +++ b/arch/powerpc/mm/Makefile | |||
@@ -0,0 +1,12 @@ | |||
1 | # | ||
2 | # Makefile for the linux ppc-specific parts of the memory manager. | ||
3 | # | ||
4 | |||
5 | obj-y := fault.o mem.o | ||
6 | obj-$(CONFIG_PPC32) += init.o pgtable.o mmu_context.o \ | ||
7 | mem_pieces.o tlb.o | ||
8 | obj-$(CONFIG_PPC64) += init64.o pgtable64.o mmu_context64.o | ||
9 | obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu.o hash_32.o | ||
10 | obj-$(CONFIG_40x) += 4xx_mmu.o | ||
11 | obj-$(CONFIG_44x) += 44x_mmu.o | ||
12 | obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o | ||
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c new file mode 100644 index 000000000000..3df641fa789d --- /dev/null +++ b/arch/powerpc/mm/fault.c | |||
@@ -0,0 +1,391 @@ | |||
1 | /* | ||
2 | * arch/ppc/mm/fault.c | ||
3 | * | ||
4 | * PowerPC version | ||
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
6 | * | ||
7 | * Derived from "arch/i386/mm/fault.c" | ||
8 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | ||
9 | * | ||
10 | * Modified by Cort Dougan and Paul Mackerras. | ||
11 | * | ||
12 | * Modified for PPC64 by Dave Engebretsen (engebret@ibm.com) | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or | ||
15 | * modify it under the terms of the GNU General Public License | ||
16 | * as published by the Free Software Foundation; either version | ||
17 | * 2 of the License, or (at your option) any later version. | ||
18 | */ | ||
19 | |||
20 | #include <linux/config.h> | ||
21 | #include <linux/signal.h> | ||
22 | #include <linux/sched.h> | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/errno.h> | ||
25 | #include <linux/string.h> | ||
26 | #include <linux/types.h> | ||
27 | #include <linux/ptrace.h> | ||
28 | #include <linux/mman.h> | ||
29 | #include <linux/mm.h> | ||
30 | #include <linux/interrupt.h> | ||
31 | #include <linux/highmem.h> | ||
32 | #include <linux/module.h> | ||
33 | #include <linux/kprobes.h> | ||
34 | |||
35 | #include <asm/page.h> | ||
36 | #include <asm/pgtable.h> | ||
37 | #include <asm/mmu.h> | ||
38 | #include <asm/mmu_context.h> | ||
39 | #include <asm/system.h> | ||
40 | #include <asm/uaccess.h> | ||
41 | #include <asm/tlbflush.h> | ||
42 | #include <asm/kdebug.h> | ||
43 | #include <asm/siginfo.h> | ||
44 | |||
45 | /* | ||
46 | * Check whether the instruction at regs->nip is a store using | ||
47 | * an update addressing form which will update r1. | ||
48 | */ | ||
49 | static int store_updates_sp(struct pt_regs *regs) | ||
50 | { | ||
51 | unsigned int inst; | ||
52 | |||
53 | if (get_user(inst, (unsigned int __user *)regs->nip)) | ||
54 | return 0; | ||
55 | /* check for 1 in the rA field */ | ||
56 | if (((inst >> 16) & 0x1f) != 1) | ||
57 | return 0; | ||
58 | /* check major opcode */ | ||
59 | switch (inst >> 26) { | ||
60 | case 37: /* stwu */ | ||
61 | case 39: /* stbu */ | ||
62 | case 45: /* sthu */ | ||
63 | case 53: /* stfsu */ | ||
64 | case 55: /* stfdu */ | ||
65 | return 1; | ||
66 | case 62: /* std or stdu */ | ||
67 | return (inst & 3) == 1; | ||
68 | case 31: | ||
69 | /* check minor opcode */ | ||
70 | switch ((inst >> 1) & 0x3ff) { | ||
71 | case 181: /* stdux */ | ||
72 | case 183: /* stwux */ | ||
73 | case 247: /* stbux */ | ||
74 | case 439: /* sthux */ | ||
75 | case 695: /* stfsux */ | ||
76 | case 759: /* stfdux */ | ||
77 | return 1; | ||
78 | } | ||
79 | } | ||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | static void do_dabr(struct pt_regs *regs, unsigned long error_code) | ||
84 | { | ||
85 | siginfo_t info; | ||
86 | |||
87 | if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, | ||
88 | 11, SIGSEGV) == NOTIFY_STOP) | ||
89 | return; | ||
90 | |||
91 | if (debugger_dabr_match(regs)) | ||
92 | return; | ||
93 | |||
94 | /* Clear the DABR */ | ||
95 | set_dabr(0); | ||
96 | |||
97 | /* Deliver the signal to userspace */ | ||
98 | info.si_signo = SIGTRAP; | ||
99 | info.si_errno = 0; | ||
100 | info.si_code = TRAP_HWBKPT; | ||
101 | info.si_addr = (void __user *)regs->nip; | ||
102 | force_sig_info(SIGTRAP, &info, current); | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * For 600- and 800-family processors, the error_code parameter is DSISR | ||
107 | * for a data fault, SRR1 for an instruction fault. For 400-family processors | ||
108 | * the error_code parameter is ESR for a data fault, 0 for an instruction | ||
109 | * fault. | ||
110 | * For 64-bit processors, the error_code parameter is | ||
111 | * - DSISR for a non-SLB data access fault, | ||
112 | * - SRR1 & 0x08000000 for a non-SLB instruction access fault | ||
113 | * - 0 any SLB fault. | ||
114 | * | ||
115 | * The return value is 0 if the fault was handled, or the signal | ||
116 | * number if this is a kernel fault that can't be handled here. | ||
117 | */ | ||
118 | int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, | ||
119 | unsigned long error_code) | ||
120 | { | ||
121 | struct vm_area_struct * vma; | ||
122 | struct mm_struct *mm = current->mm; | ||
123 | siginfo_t info; | ||
124 | int code = SEGV_MAPERR; | ||
125 | int is_write = 0; | ||
126 | int trap = TRAP(regs); | ||
127 | int is_exec = trap == 0x400; | ||
128 | |||
129 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) | ||
130 | /* | ||
131 | * Fortunately the bit assignments in SRR1 for an instruction | ||
132 | * fault and DSISR for a data fault are mostly the same for the | ||
133 | * bits we are interested in. But there are some bits which | ||
134 | * indicate errors in DSISR but can validly be set in SRR1. | ||
135 | */ | ||
136 | if (trap == 0x400) | ||
137 | error_code &= 0x48200000; | ||
138 | else | ||
139 | is_write = error_code & DSISR_ISSTORE; | ||
140 | #else | ||
141 | is_write = error_code & ESR_DST; | ||
142 | #endif /* CONFIG_4xx || CONFIG_BOOKE */ | ||
143 | |||
144 | if (notify_die(DIE_PAGE_FAULT, "page_fault", regs, error_code, | ||
145 | 11, SIGSEGV) == NOTIFY_STOP) | ||
146 | return 0; | ||
147 | |||
148 | if (trap == 0x300) { | ||
149 | if (debugger_fault_handler(regs)) | ||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | /* On a kernel SLB miss we can only check for a valid exception entry */ | ||
154 | if (!user_mode(regs) && (address >= TASK_SIZE)) | ||
155 | return SIGSEGV; | ||
156 | |||
157 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) | ||
158 | if (error_code & DSISR_DABRMATCH) { | ||
159 | /* DABR match */ | ||
160 | do_dabr(regs, error_code); | ||
161 | return 0; | ||
162 | } | ||
163 | #endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/ | ||
164 | |||
165 | if (in_atomic() || mm == NULL) { | ||
166 | if (!user_mode(regs)) | ||
167 | return SIGSEGV; | ||
168 | /* in_atomic() in user mode is really bad, | ||
169 | as is current->mm == NULL. */ | ||
170 | printk(KERN_EMERG "Page fault in user mode with" | ||
171 | "in_atomic() = %d mm = %p\n", in_atomic(), mm); | ||
172 | printk(KERN_EMERG "NIP = %lx MSR = %lx\n", | ||
173 | regs->nip, regs->msr); | ||
174 | die("Weird page fault", regs, SIGSEGV); | ||
175 | } | ||
176 | |||
177 | /* When running in the kernel we expect faults to occur only to | ||
178 | * addresses in user space. All other faults represent errors in the | ||
179 | * kernel and should generate an OOPS. Unfortunatly, in the case of an | ||
180 | * erroneous fault occuring in a code path which already holds mmap_sem | ||
181 | * we will deadlock attempting to validate the fault against the | ||
182 | * address space. Luckily the kernel only validly references user | ||
183 | * space from well defined areas of code, which are listed in the | ||
184 | * exceptions table. | ||
185 | * | ||
186 | * As the vast majority of faults will be valid we will only perform | ||
187 | * the source reference check when there is a possibilty of a deadlock. | ||
188 | * Attempt to lock the address space, if we cannot we then validate the | ||
189 | * source. If this is invalid we can skip the address space check, | ||
190 | * thus avoiding the deadlock. | ||
191 | */ | ||
192 | if (!down_read_trylock(&mm->mmap_sem)) { | ||
193 | if (!user_mode(regs) && !search_exception_tables(regs->nip)) | ||
194 | goto bad_area_nosemaphore; | ||
195 | |||
196 | down_read(&mm->mmap_sem); | ||
197 | } | ||
198 | |||
199 | vma = find_vma(mm, address); | ||
200 | if (!vma) | ||
201 | goto bad_area; | ||
202 | if (vma->vm_start <= address) | ||
203 | goto good_area; | ||
204 | if (!(vma->vm_flags & VM_GROWSDOWN)) | ||
205 | goto bad_area; | ||
206 | |||
207 | /* | ||
208 | * N.B. The POWER/Open ABI allows programs to access up to | ||
209 | * 288 bytes below the stack pointer. | ||
210 | * The kernel signal delivery code writes up to about 1.5kB | ||
211 | * below the stack pointer (r1) before decrementing it. | ||
212 | * The exec code can write slightly over 640kB to the stack | ||
213 | * before setting the user r1. Thus we allow the stack to | ||
214 | * expand to 1MB without further checks. | ||
215 | */ | ||
216 | if (address + 0x100000 < vma->vm_end) { | ||
217 | /* get user regs even if this fault is in kernel mode */ | ||
218 | struct pt_regs *uregs = current->thread.regs; | ||
219 | if (uregs == NULL) | ||
220 | goto bad_area; | ||
221 | |||
222 | /* | ||
223 | * A user-mode access to an address a long way below | ||
224 | * the stack pointer is only valid if the instruction | ||
225 | * is one which would update the stack pointer to the | ||
226 | * address accessed if the instruction completed, | ||
227 | * i.e. either stwu rs,n(r1) or stwux rs,r1,rb | ||
228 | * (or the byte, halfword, float or double forms). | ||
229 | * | ||
230 | * If we don't check this then any write to the area | ||
231 | * between the last mapped region and the stack will | ||
232 | * expand the stack rather than segfaulting. | ||
233 | */ | ||
234 | if (address + 2048 < uregs->gpr[1] | ||
235 | && (!user_mode(regs) || !store_updates_sp(regs))) | ||
236 | goto bad_area; | ||
237 | } | ||
238 | if (expand_stack(vma, address)) | ||
239 | goto bad_area; | ||
240 | |||
241 | good_area: | ||
242 | code = SEGV_ACCERR; | ||
243 | #if defined(CONFIG_6xx) | ||
244 | if (error_code & 0x95700000) | ||
245 | /* an error such as lwarx to I/O controller space, | ||
246 | address matching DABR, eciwx, etc. */ | ||
247 | goto bad_area; | ||
248 | #endif /* CONFIG_6xx */ | ||
249 | #if defined(CONFIG_8xx) | ||
250 | /* The MPC8xx seems to always set 0x80000000, which is | ||
251 | * "undefined". Of those that can be set, this is the only | ||
252 | * one which seems bad. | ||
253 | */ | ||
254 | if (error_code & 0x10000000) | ||
255 | /* Guarded storage error. */ | ||
256 | goto bad_area; | ||
257 | #endif /* CONFIG_8xx */ | ||
258 | |||
259 | if (is_exec) { | ||
260 | #ifdef CONFIG_PPC64 | ||
261 | /* protection fault */ | ||
262 | if (error_code & DSISR_PROTFAULT) | ||
263 | goto bad_area; | ||
264 | if (!(vma->vm_flags & VM_EXEC)) | ||
265 | goto bad_area; | ||
266 | #endif | ||
267 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | ||
268 | pte_t *ptep; | ||
269 | |||
270 | /* Since 4xx/Book-E supports per-page execute permission, | ||
271 | * we lazily flush dcache to icache. */ | ||
272 | ptep = NULL; | ||
273 | if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) { | ||
274 | struct page *page = pte_page(*ptep); | ||
275 | |||
276 | if (! test_bit(PG_arch_1, &page->flags)) { | ||
277 | flush_dcache_icache_page(page); | ||
278 | set_bit(PG_arch_1, &page->flags); | ||
279 | } | ||
280 | pte_update(ptep, 0, _PAGE_HWEXEC); | ||
281 | _tlbie(address); | ||
282 | pte_unmap(ptep); | ||
283 | up_read(&mm->mmap_sem); | ||
284 | return 0; | ||
285 | } | ||
286 | if (ptep != NULL) | ||
287 | pte_unmap(ptep); | ||
288 | #endif | ||
289 | /* a write */ | ||
290 | } else if (is_write) { | ||
291 | if (!(vma->vm_flags & VM_WRITE)) | ||
292 | goto bad_area; | ||
293 | /* a read */ | ||
294 | } else { | ||
295 | /* protection fault */ | ||
296 | if (error_code & 0x08000000) | ||
297 | goto bad_area; | ||
298 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) | ||
299 | goto bad_area; | ||
300 | } | ||
301 | |||
302 | /* | ||
303 | * If for any reason at all we couldn't handle the fault, | ||
304 | * make sure we exit gracefully rather than endlessly redo | ||
305 | * the fault. | ||
306 | */ | ||
307 | survive: | ||
308 | switch (handle_mm_fault(mm, vma, address, is_write)) { | ||
309 | |||
310 | case VM_FAULT_MINOR: | ||
311 | current->min_flt++; | ||
312 | break; | ||
313 | case VM_FAULT_MAJOR: | ||
314 | current->maj_flt++; | ||
315 | break; | ||
316 | case VM_FAULT_SIGBUS: | ||
317 | goto do_sigbus; | ||
318 | case VM_FAULT_OOM: | ||
319 | goto out_of_memory; | ||
320 | default: | ||
321 | BUG(); | ||
322 | } | ||
323 | |||
324 | up_read(&mm->mmap_sem); | ||
325 | return 0; | ||
326 | |||
327 | bad_area: | ||
328 | up_read(&mm->mmap_sem); | ||
329 | |||
330 | bad_area_nosemaphore: | ||
331 | /* User mode accesses cause a SIGSEGV */ | ||
332 | if (user_mode(regs)) { | ||
333 | _exception(SIGSEGV, regs, code, address); | ||
334 | return 0; | ||
335 | } | ||
336 | |||
337 | if (is_exec && (error_code & DSISR_PROTFAULT) | ||
338 | && printk_ratelimit()) | ||
339 | printk(KERN_CRIT "kernel tried to execute NX-protected" | ||
340 | " page (%lx) - exploit attempt? (uid: %d)\n", | ||
341 | address, current->uid); | ||
342 | |||
343 | return SIGSEGV; | ||
344 | |||
345 | /* | ||
346 | * We ran out of memory, or some other thing happened to us that made | ||
347 | * us unable to handle the page fault gracefully. | ||
348 | */ | ||
349 | out_of_memory: | ||
350 | up_read(&mm->mmap_sem); | ||
351 | if (current->pid == 1) { | ||
352 | yield(); | ||
353 | down_read(&mm->mmap_sem); | ||
354 | goto survive; | ||
355 | } | ||
356 | printk("VM: killing process %s\n", current->comm); | ||
357 | if (user_mode(regs)) | ||
358 | do_exit(SIGKILL); | ||
359 | return SIGKILL; | ||
360 | |||
361 | do_sigbus: | ||
362 | up_read(&mm->mmap_sem); | ||
363 | if (user_mode(regs)) { | ||
364 | info.si_signo = SIGBUS; | ||
365 | info.si_errno = 0; | ||
366 | info.si_code = BUS_ADRERR; | ||
367 | info.si_addr = (void __user *)address; | ||
368 | force_sig_info(SIGBUS, &info, current); | ||
369 | return 0; | ||
370 | } | ||
371 | return SIGBUS; | ||
372 | } | ||
373 | |||
374 | /* | ||
375 | * bad_page_fault is called when we have a bad access from the kernel. | ||
376 | * It is called from the DSI and ISI handlers in head.S and from some | ||
377 | * of the procedures in traps.c. | ||
378 | */ | ||
379 | void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) | ||
380 | { | ||
381 | const struct exception_table_entry *entry; | ||
382 | |||
383 | /* Are we prepared to handle this fault? */ | ||
384 | if ((entry = search_exception_tables(regs->nip)) != NULL) { | ||
385 | regs->nip = entry->fixup; | ||
386 | return; | ||
387 | } | ||
388 | |||
389 | /* kernel has accessed a bad area */ | ||
390 | die("Kernel access of bad area", regs, sig); | ||
391 | } | ||
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c new file mode 100644 index 000000000000..af9ca0eb6d55 --- /dev/null +++ b/arch/powerpc/mm/fsl_booke_mmu.c | |||
@@ -0,0 +1,237 @@ | |||
1 | /* | ||
2 | * Modifications by Kumar Gala (kumar.gala@freescale.com) to support | ||
3 | * E500 Book E processors. | ||
4 | * | ||
5 | * Copyright 2004 Freescale Semiconductor, Inc | ||
6 | * | ||
7 | * This file contains the routines for initializing the MMU | ||
8 | * on the 4xx series of chips. | ||
9 | * -- paulus | ||
10 | * | ||
11 | * Derived from arch/ppc/mm/init.c: | ||
12 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
13 | * | ||
14 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | ||
15 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | ||
16 | * Copyright (C) 1996 Paul Mackerras | ||
17 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | ||
18 | * | ||
19 | * Derived from "arch/i386/mm/init.c" | ||
20 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | ||
21 | * | ||
22 | * This program is free software; you can redistribute it and/or | ||
23 | * modify it under the terms of the GNU General Public License | ||
24 | * as published by the Free Software Foundation; either version | ||
25 | * 2 of the License, or (at your option) any later version. | ||
26 | * | ||
27 | */ | ||
28 | |||
29 | #include <linux/config.h> | ||
30 | #include <linux/signal.h> | ||
31 | #include <linux/sched.h> | ||
32 | #include <linux/kernel.h> | ||
33 | #include <linux/errno.h> | ||
34 | #include <linux/string.h> | ||
35 | #include <linux/types.h> | ||
36 | #include <linux/ptrace.h> | ||
37 | #include <linux/mman.h> | ||
38 | #include <linux/mm.h> | ||
39 | #include <linux/swap.h> | ||
40 | #include <linux/stddef.h> | ||
41 | #include <linux/vmalloc.h> | ||
42 | #include <linux/init.h> | ||
43 | #include <linux/delay.h> | ||
44 | #include <linux/highmem.h> | ||
45 | |||
46 | #include <asm/pgalloc.h> | ||
47 | #include <asm/prom.h> | ||
48 | #include <asm/io.h> | ||
49 | #include <asm/mmu_context.h> | ||
50 | #include <asm/pgtable.h> | ||
51 | #include <asm/mmu.h> | ||
52 | #include <asm/uaccess.h> | ||
53 | #include <asm/smp.h> | ||
54 | #include <asm/bootx.h> | ||
55 | #include <asm/machdep.h> | ||
56 | #include <asm/setup.h> | ||
57 | |||
58 | extern void loadcam_entry(unsigned int index); | ||
59 | unsigned int tlbcam_index; | ||
60 | unsigned int num_tlbcam_entries; | ||
61 | static unsigned long __cam0, __cam1, __cam2; | ||
62 | extern unsigned long total_lowmem; | ||
63 | extern unsigned long __max_low_memory; | ||
64 | #define MAX_LOW_MEM CONFIG_LOWMEM_SIZE | ||
65 | |||
66 | #define NUM_TLBCAMS (16) | ||
67 | |||
68 | struct tlbcam { | ||
69 | u32 MAS0; | ||
70 | u32 MAS1; | ||
71 | u32 MAS2; | ||
72 | u32 MAS3; | ||
73 | u32 MAS7; | ||
74 | } TLBCAM[NUM_TLBCAMS]; | ||
75 | |||
76 | struct tlbcamrange { | ||
77 | unsigned long start; | ||
78 | unsigned long limit; | ||
79 | phys_addr_t phys; | ||
80 | } tlbcam_addrs[NUM_TLBCAMS]; | ||
81 | |||
82 | extern unsigned int tlbcam_index; | ||
83 | |||
84 | /* | ||
85 | * Return PA for this VA if it is mapped by a CAM, or 0 | ||
86 | */ | ||
87 | unsigned long v_mapped_by_tlbcam(unsigned long va) | ||
88 | { | ||
89 | int b; | ||
90 | for (b = 0; b < tlbcam_index; ++b) | ||
91 | if (va >= tlbcam_addrs[b].start && va < tlbcam_addrs[b].limit) | ||
92 | return tlbcam_addrs[b].phys + (va - tlbcam_addrs[b].start); | ||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * Return VA for a given PA or 0 if not mapped | ||
98 | */ | ||
99 | unsigned long p_mapped_by_tlbcam(unsigned long pa) | ||
100 | { | ||
101 | int b; | ||
102 | for (b = 0; b < tlbcam_index; ++b) | ||
103 | if (pa >= tlbcam_addrs[b].phys | ||
104 | && pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start) | ||
105 | +tlbcam_addrs[b].phys) | ||
106 | return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys); | ||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * Set up one of the I/D BAT (block address translation) register pairs. | ||
112 | * The parameters are not checked; in particular size must be a power | ||
113 | * of 4 between 4k and 256M. | ||
114 | */ | ||
115 | void settlbcam(int index, unsigned long virt, phys_addr_t phys, | ||
116 | unsigned int size, int flags, unsigned int pid) | ||
117 | { | ||
118 | unsigned int tsize, lz; | ||
119 | |||
120 | asm ("cntlzw %0,%1" : "=r" (lz) : "r" (size)); | ||
121 | tsize = (21 - lz) / 2; | ||
122 | |||
123 | #ifdef CONFIG_SMP | ||
124 | if ((flags & _PAGE_NO_CACHE) == 0) | ||
125 | flags |= _PAGE_COHERENT; | ||
126 | #endif | ||
127 | |||
128 | TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index) | MAS0_NV(index+1); | ||
129 | TLBCAM[index].MAS1 = MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(tsize) | MAS1_TID(pid); | ||
130 | TLBCAM[index].MAS2 = virt & PAGE_MASK; | ||
131 | |||
132 | TLBCAM[index].MAS2 |= (flags & _PAGE_WRITETHRU) ? MAS2_W : 0; | ||
133 | TLBCAM[index].MAS2 |= (flags & _PAGE_NO_CACHE) ? MAS2_I : 0; | ||
134 | TLBCAM[index].MAS2 |= (flags & _PAGE_COHERENT) ? MAS2_M : 0; | ||
135 | TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0; | ||
136 | TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0; | ||
137 | |||
138 | TLBCAM[index].MAS3 = (phys & PAGE_MASK) | MAS3_SX | MAS3_SR; | ||
139 | TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_SW : 0); | ||
140 | |||
141 | #ifndef CONFIG_KGDB /* want user access for breakpoints */ | ||
142 | if (flags & _PAGE_USER) { | ||
143 | TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR; | ||
144 | TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0); | ||
145 | } | ||
146 | #else | ||
147 | TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR; | ||
148 | TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0); | ||
149 | #endif | ||
150 | |||
151 | tlbcam_addrs[index].start = virt; | ||
152 | tlbcam_addrs[index].limit = virt + size - 1; | ||
153 | tlbcam_addrs[index].phys = phys; | ||
154 | |||
155 | loadcam_entry(index); | ||
156 | } | ||
157 | |||
158 | void invalidate_tlbcam_entry(int index) | ||
159 | { | ||
160 | TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index); | ||
161 | TLBCAM[index].MAS1 = ~MAS1_VALID; | ||
162 | |||
163 | loadcam_entry(index); | ||
164 | } | ||
165 | |||
166 | void __init cam_mapin_ram(unsigned long cam0, unsigned long cam1, | ||
167 | unsigned long cam2) | ||
168 | { | ||
169 | settlbcam(0, KERNELBASE, PPC_MEMSTART, cam0, _PAGE_KERNEL, 0); | ||
170 | tlbcam_index++; | ||
171 | if (cam1) { | ||
172 | tlbcam_index++; | ||
173 | settlbcam(1, KERNELBASE+cam0, PPC_MEMSTART+cam0, cam1, _PAGE_KERNEL, 0); | ||
174 | } | ||
175 | if (cam2) { | ||
176 | tlbcam_index++; | ||
177 | settlbcam(2, KERNELBASE+cam0+cam1, PPC_MEMSTART+cam0+cam1, cam2, _PAGE_KERNEL, 0); | ||
178 | } | ||
179 | } | ||
180 | |||
181 | /* | ||
182 | * MMU_init_hw does the chip-specific initialization of the MMU hardware. | ||
183 | */ | ||
184 | void __init MMU_init_hw(void) | ||
185 | { | ||
186 | flush_instruction_cache(); | ||
187 | } | ||
188 | |||
189 | unsigned long __init mmu_mapin_ram(void) | ||
190 | { | ||
191 | cam_mapin_ram(__cam0, __cam1, __cam2); | ||
192 | |||
193 | return __cam0 + __cam1 + __cam2; | ||
194 | } | ||
195 | |||
196 | |||
197 | void __init | ||
198 | adjust_total_lowmem(void) | ||
199 | { | ||
200 | unsigned long max_low_mem = MAX_LOW_MEM; | ||
201 | unsigned long cam_max = 0x10000000; | ||
202 | unsigned long ram; | ||
203 | |||
204 | /* adjust CAM size to max_low_mem */ | ||
205 | if (max_low_mem < cam_max) | ||
206 | cam_max = max_low_mem; | ||
207 | |||
208 | /* adjust lowmem size to max_low_mem */ | ||
209 | if (max_low_mem < total_lowmem) | ||
210 | ram = max_low_mem; | ||
211 | else | ||
212 | ram = total_lowmem; | ||
213 | |||
214 | /* Calculate CAM values */ | ||
215 | __cam0 = 1UL << 2 * (__ilog2(ram) / 2); | ||
216 | if (__cam0 > cam_max) | ||
217 | __cam0 = cam_max; | ||
218 | ram -= __cam0; | ||
219 | if (ram) { | ||
220 | __cam1 = 1UL << 2 * (__ilog2(ram) / 2); | ||
221 | if (__cam1 > cam_max) | ||
222 | __cam1 = cam_max; | ||
223 | ram -= __cam1; | ||
224 | } | ||
225 | if (ram) { | ||
226 | __cam2 = 1UL << 2 * (__ilog2(ram) / 2); | ||
227 | if (__cam2 > cam_max) | ||
228 | __cam2 = cam_max; | ||
229 | ram -= __cam2; | ||
230 | } | ||
231 | |||
232 | printk(KERN_INFO "Memory CAM mapping: CAM0=%ldMb, CAM1=%ldMb," | ||
233 | " CAM2=%ldMb residual: %ldMb\n", | ||
234 | __cam0 >> 20, __cam1 >> 20, __cam2 >> 20, | ||
235 | (total_lowmem - __cam0 - __cam1 - __cam2) >> 20); | ||
236 | __max_low_memory = max_low_mem = __cam0 + __cam1 + __cam2; | ||
237 | } | ||
diff --git a/arch/powerpc/mm/hash_32.S b/arch/powerpc/mm/hash_32.S new file mode 100644 index 000000000000..57278a8dd132 --- /dev/null +++ b/arch/powerpc/mm/hash_32.S | |||
@@ -0,0 +1,618 @@ | |||
1 | /* | ||
2 | * arch/ppc/kernel/hashtable.S | ||
3 | * | ||
4 | * $Id: hashtable.S,v 1.6 1999/10/08 01:56:15 paulus Exp $ | ||
5 | * | ||
6 | * PowerPC version | ||
7 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
8 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP | ||
9 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | ||
10 | * Adapted for Power Macintosh by Paul Mackerras. | ||
11 | * Low-level exception handlers and MMU support | ||
12 | * rewritten by Paul Mackerras. | ||
13 | * Copyright (C) 1996 Paul Mackerras. | ||
14 | * | ||
15 | * This file contains low-level assembler routines for managing | ||
16 | * the PowerPC MMU hash table. (PPC 8xx processors don't use a | ||
17 | * hash table, so this file is not used on them.) | ||
18 | * | ||
19 | * This program is free software; you can redistribute it and/or | ||
20 | * modify it under the terms of the GNU General Public License | ||
21 | * as published by the Free Software Foundation; either version | ||
22 | * 2 of the License, or (at your option) any later version. | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #include <linux/config.h> | ||
27 | #include <asm/processor.h> | ||
28 | #include <asm/page.h> | ||
29 | #include <asm/pgtable.h> | ||
30 | #include <asm/cputable.h> | ||
31 | #include <asm/ppc_asm.h> | ||
32 | #include <asm/thread_info.h> | ||
33 | #include <asm/asm-offsets.h> | ||
34 | |||
35 | #ifdef CONFIG_SMP | ||
36 | .comm mmu_hash_lock,4 | ||
37 | #endif /* CONFIG_SMP */ | ||
38 | |||
39 | /* | ||
40 | * Sync CPUs with hash_page taking & releasing the hash | ||
41 | * table lock | ||
42 | */ | ||
43 | #ifdef CONFIG_SMP | ||
44 | .text | ||
45 | _GLOBAL(hash_page_sync) | ||
46 | lis r8,mmu_hash_lock@h | ||
47 | ori r8,r8,mmu_hash_lock@l | ||
48 | lis r0,0x0fff | ||
49 | b 10f | ||
50 | 11: lwz r6,0(r8) | ||
51 | cmpwi 0,r6,0 | ||
52 | bne 11b | ||
53 | 10: lwarx r6,0,r8 | ||
54 | cmpwi 0,r6,0 | ||
55 | bne- 11b | ||
56 | stwcx. r0,0,r8 | ||
57 | bne- 10b | ||
58 | isync | ||
59 | eieio | ||
60 | li r0,0 | ||
61 | stw r0,0(r8) | ||
62 | blr | ||
63 | #endif | ||
64 | |||
65 | /* | ||
66 | * Load a PTE into the hash table, if possible. | ||
67 | * The address is in r4, and r3 contains an access flag: | ||
68 | * _PAGE_RW (0x400) if a write. | ||
69 | * r9 contains the SRR1 value, from which we use the MSR_PR bit. | ||
70 | * SPRG3 contains the physical address of the current task's thread. | ||
71 | * | ||
72 | * Returns to the caller if the access is illegal or there is no | ||
73 | * mapping for the address. Otherwise it places an appropriate PTE | ||
74 | * in the hash table and returns from the exception. | ||
75 | * Uses r0, r3 - r8, ctr, lr. | ||
76 | */ | ||
77 | .text | ||
78 | _GLOBAL(hash_page) | ||
79 | #ifdef CONFIG_PPC64BRIDGE | ||
80 | mfmsr r0 | ||
81 | clrldi r0,r0,1 /* make sure it's in 32-bit mode */ | ||
82 | MTMSRD(r0) | ||
83 | isync | ||
84 | #endif | ||
85 | tophys(r7,0) /* gets -KERNELBASE into r7 */ | ||
86 | #ifdef CONFIG_SMP | ||
87 | addis r8,r7,mmu_hash_lock@h | ||
88 | ori r8,r8,mmu_hash_lock@l | ||
89 | lis r0,0x0fff | ||
90 | b 10f | ||
91 | 11: lwz r6,0(r8) | ||
92 | cmpwi 0,r6,0 | ||
93 | bne 11b | ||
94 | 10: lwarx r6,0,r8 | ||
95 | cmpwi 0,r6,0 | ||
96 | bne- 11b | ||
97 | stwcx. r0,0,r8 | ||
98 | bne- 10b | ||
99 | isync | ||
100 | #endif | ||
101 | /* Get PTE (linux-style) and check access */ | ||
102 | lis r0,KERNELBASE@h /* check if kernel address */ | ||
103 | cmplw 0,r4,r0 | ||
104 | mfspr r8,SPRN_SPRG3 /* current task's THREAD (phys) */ | ||
105 | ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */ | ||
106 | lwz r5,PGDIR(r8) /* virt page-table root */ | ||
107 | blt+ 112f /* assume user more likely */ | ||
108 | lis r5,swapper_pg_dir@ha /* if kernel address, use */ | ||
109 | addi r5,r5,swapper_pg_dir@l /* kernel page table */ | ||
110 | rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */ | ||
111 | 112: add r5,r5,r7 /* convert to phys addr */ | ||
112 | rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */ | ||
113 | lwz r8,0(r5) /* get pmd entry */ | ||
114 | rlwinm. r8,r8,0,0,19 /* extract address of pte page */ | ||
115 | #ifdef CONFIG_SMP | ||
116 | beq- hash_page_out /* return if no mapping */ | ||
117 | #else | ||
118 | /* XXX it seems like the 601 will give a machine fault on the | ||
119 | rfi if its alignment is wrong (bottom 4 bits of address are | ||
120 | 8 or 0xc) and we have had a not-taken conditional branch | ||
121 | to the address following the rfi. */ | ||
122 | beqlr- | ||
123 | #endif | ||
124 | rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */ | ||
125 | rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */ | ||
126 | ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE | ||
127 | |||
128 | /* | ||
129 | * Update the linux PTE atomically. We do the lwarx up-front | ||
130 | * because almost always, there won't be a permission violation | ||
131 | * and there won't already be an HPTE, and thus we will have | ||
132 | * to update the PTE to set _PAGE_HASHPTE. -- paulus. | ||
133 | */ | ||
134 | retry: | ||
135 | lwarx r6,0,r8 /* get linux-style pte */ | ||
136 | andc. r5,r3,r6 /* check access & ~permission */ | ||
137 | #ifdef CONFIG_SMP | ||
138 | bne- hash_page_out /* return if access not permitted */ | ||
139 | #else | ||
140 | bnelr- | ||
141 | #endif | ||
142 | or r5,r0,r6 /* set accessed/dirty bits */ | ||
143 | stwcx. r5,0,r8 /* attempt to update PTE */ | ||
144 | bne- retry /* retry if someone got there first */ | ||
145 | |||
146 | mfsrin r3,r4 /* get segment reg for segment */ | ||
147 | mfctr r0 | ||
148 | stw r0,_CTR(r11) | ||
149 | bl create_hpte /* add the hash table entry */ | ||
150 | |||
151 | #ifdef CONFIG_SMP | ||
152 | eieio | ||
153 | addis r8,r7,mmu_hash_lock@ha | ||
154 | li r0,0 | ||
155 | stw r0,mmu_hash_lock@l(r8) | ||
156 | #endif | ||
157 | |||
158 | /* Return from the exception */ | ||
159 | lwz r5,_CTR(r11) | ||
160 | mtctr r5 | ||
161 | lwz r0,GPR0(r11) | ||
162 | lwz r7,GPR7(r11) | ||
163 | lwz r8,GPR8(r11) | ||
164 | b fast_exception_return | ||
165 | |||
166 | #ifdef CONFIG_SMP | ||
167 | hash_page_out: | ||
168 | eieio | ||
169 | addis r8,r7,mmu_hash_lock@ha | ||
170 | li r0,0 | ||
171 | stw r0,mmu_hash_lock@l(r8) | ||
172 | blr | ||
173 | #endif /* CONFIG_SMP */ | ||
174 | |||
175 | /* | ||
176 | * Add an entry for a particular page to the hash table. | ||
177 | * | ||
178 | * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval) | ||
179 | * | ||
180 | * We assume any necessary modifications to the pte (e.g. setting | ||
181 | * the accessed bit) have already been done and that there is actually | ||
182 | * a hash table in use (i.e. we're not on a 603). | ||
183 | */ | ||
184 | _GLOBAL(add_hash_page) | ||
185 | mflr r0 | ||
186 | stw r0,4(r1) | ||
187 | |||
188 | /* Convert context and va to VSID */ | ||
189 | mulli r3,r3,897*16 /* multiply context by context skew */ | ||
190 | rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */ | ||
191 | mulli r0,r0,0x111 /* multiply by ESID skew */ | ||
192 | add r3,r3,r0 /* note create_hpte trims to 24 bits */ | ||
193 | |||
194 | #ifdef CONFIG_SMP | ||
195 | rlwinm r8,r1,0,0,18 /* use cpu number to make tag */ | ||
196 | lwz r8,TI_CPU(r8) /* to go in mmu_hash_lock */ | ||
197 | oris r8,r8,12 | ||
198 | #endif /* CONFIG_SMP */ | ||
199 | |||
200 | /* | ||
201 | * We disable interrupts here, even on UP, because we don't | ||
202 | * want to race with hash_page, and because we want the | ||
203 | * _PAGE_HASHPTE bit to be a reliable indication of whether | ||
204 | * the HPTE exists (or at least whether one did once). | ||
205 | * We also turn off the MMU for data accesses so that we | ||
206 | * we can't take a hash table miss (assuming the code is | ||
207 | * covered by a BAT). -- paulus | ||
208 | */ | ||
209 | mfmsr r10 | ||
210 | SYNC | ||
211 | rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ | ||
212 | rlwinm r0,r0,0,28,26 /* clear MSR_DR */ | ||
213 | mtmsr r0 | ||
214 | SYNC_601 | ||
215 | isync | ||
216 | |||
217 | tophys(r7,0) | ||
218 | |||
219 | #ifdef CONFIG_SMP | ||
220 | addis r9,r7,mmu_hash_lock@ha | ||
221 | addi r9,r9,mmu_hash_lock@l | ||
222 | 10: lwarx r0,0,r9 /* take the mmu_hash_lock */ | ||
223 | cmpi 0,r0,0 | ||
224 | bne- 11f | ||
225 | stwcx. r8,0,r9 | ||
226 | beq+ 12f | ||
227 | 11: lwz r0,0(r9) | ||
228 | cmpi 0,r0,0 | ||
229 | beq 10b | ||
230 | b 11b | ||
231 | 12: isync | ||
232 | #endif | ||
233 | |||
234 | /* | ||
235 | * Fetch the linux pte and test and set _PAGE_HASHPTE atomically. | ||
236 | * If _PAGE_HASHPTE was already set, we don't replace the existing | ||
237 | * HPTE, so we just unlock and return. | ||
238 | */ | ||
239 | mr r8,r5 | ||
240 | rlwimi r8,r4,22,20,29 | ||
241 | 1: lwarx r6,0,r8 | ||
242 | andi. r0,r6,_PAGE_HASHPTE | ||
243 | bne 9f /* if HASHPTE already set, done */ | ||
244 | ori r5,r6,_PAGE_HASHPTE | ||
245 | stwcx. r5,0,r8 | ||
246 | bne- 1b | ||
247 | |||
248 | bl create_hpte | ||
249 | |||
250 | 9: | ||
251 | #ifdef CONFIG_SMP | ||
252 | eieio | ||
253 | li r0,0 | ||
254 | stw r0,0(r9) /* clear mmu_hash_lock */ | ||
255 | #endif | ||
256 | |||
257 | /* reenable interrupts and DR */ | ||
258 | mtmsr r10 | ||
259 | SYNC_601 | ||
260 | isync | ||
261 | |||
262 | lwz r0,4(r1) | ||
263 | mtlr r0 | ||
264 | blr | ||
265 | |||
266 | /* | ||
267 | * This routine adds a hardware PTE to the hash table. | ||
268 | * It is designed to be called with the MMU either on or off. | ||
269 | * r3 contains the VSID, r4 contains the virtual address, | ||
270 | * r5 contains the linux PTE, r6 contains the old value of the | ||
271 | * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the | ||
272 | * offset to be added to addresses (0 if the MMU is on, | ||
273 | * -KERNELBASE if it is off). | ||
274 | * On SMP, the caller should have the mmu_hash_lock held. | ||
275 | * We assume that the caller has (or will) set the _PAGE_HASHPTE | ||
276 | * bit in the linux PTE in memory. The value passed in r6 should | ||
277 | * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set | ||
278 | * this routine will skip the search for an existing HPTE. | ||
279 | * This procedure modifies r0, r3 - r6, r8, cr0. | ||
280 | * -- paulus. | ||
281 | * | ||
282 | * For speed, 4 of the instructions get patched once the size and | ||
283 | * physical address of the hash table are known. These definitions | ||
284 | * of Hash_base and Hash_bits below are just an example. | ||
285 | */ | ||
286 | Hash_base = 0xc0180000 | ||
287 | Hash_bits = 12 /* e.g. 256kB hash table */ | ||
288 | Hash_msk = (((1 << Hash_bits) - 1) * 64) | ||
289 | |||
290 | #ifndef CONFIG_PPC64BRIDGE | ||
291 | /* defines for the PTE format for 32-bit PPCs */ | ||
292 | #define PTE_SIZE 8 | ||
293 | #define PTEG_SIZE 64 | ||
294 | #define LG_PTEG_SIZE 6 | ||
295 | #define LDPTEu lwzu | ||
296 | #define STPTE stw | ||
297 | #define CMPPTE cmpw | ||
298 | #define PTE_H 0x40 | ||
299 | #define PTE_V 0x80000000 | ||
300 | #define TST_V(r) rlwinm. r,r,0,0,0 | ||
301 | #define SET_V(r) oris r,r,PTE_V@h | ||
302 | #define CLR_V(r,t) rlwinm r,r,0,1,31 | ||
303 | |||
304 | #else | ||
305 | /* defines for the PTE format for 64-bit PPCs */ | ||
306 | #define PTE_SIZE 16 | ||
307 | #define PTEG_SIZE 128 | ||
308 | #define LG_PTEG_SIZE 7 | ||
309 | #define LDPTEu ldu | ||
310 | #define STPTE std | ||
311 | #define CMPPTE cmpd | ||
312 | #define PTE_H 2 | ||
313 | #define PTE_V 1 | ||
314 | #define TST_V(r) andi. r,r,PTE_V | ||
315 | #define SET_V(r) ori r,r,PTE_V | ||
316 | #define CLR_V(r,t) li t,PTE_V; andc r,r,t | ||
317 | #endif /* CONFIG_PPC64BRIDGE */ | ||
318 | |||
319 | #define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1) | ||
320 | #define HASH_RIGHT 31-LG_PTEG_SIZE | ||
321 | |||
322 | _GLOBAL(create_hpte) | ||
323 | /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */ | ||
324 | rlwinm r8,r5,32-10,31,31 /* _PAGE_RW -> PP lsb */ | ||
325 | rlwinm r0,r5,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ | ||
326 | and r8,r8,r0 /* writable if _RW & _DIRTY */ | ||
327 | rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */ | ||
328 | rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */ | ||
329 | ori r8,r8,0xe14 /* clear out reserved bits and M */ | ||
330 | andc r8,r5,r8 /* PP = user? (rw&dirty? 2: 3): 0 */ | ||
331 | BEGIN_FTR_SECTION | ||
332 | ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */ | ||
333 | END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT) | ||
334 | |||
335 | /* Construct the high word of the PPC-style PTE (r5) */ | ||
336 | #ifndef CONFIG_PPC64BRIDGE | ||
337 | rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ | ||
338 | rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */ | ||
339 | #else /* CONFIG_PPC64BRIDGE */ | ||
340 | clrlwi r3,r3,8 /* reduce vsid to 24 bits */ | ||
341 | sldi r5,r3,12 /* shift vsid into position */ | ||
342 | rlwimi r5,r4,16,20,24 /* put in API (abbrev page index) */ | ||
343 | #endif /* CONFIG_PPC64BRIDGE */ | ||
344 | SET_V(r5) /* set V (valid) bit */ | ||
345 | |||
346 | /* Get the address of the primary PTE group in the hash table (r3) */ | ||
347 | _GLOBAL(hash_page_patch_A) | ||
348 | addis r0,r7,Hash_base@h /* base address of hash table */ | ||
349 | rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ | ||
350 | rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */ | ||
351 | xor r3,r3,r0 /* make primary hash */ | ||
352 | li r0,8 /* PTEs/group */ | ||
353 | |||
354 | /* | ||
355 | * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search | ||
356 | * if it is clear, meaning that the HPTE isn't there already... | ||
357 | */ | ||
358 | andi. r6,r6,_PAGE_HASHPTE | ||
359 | beq+ 10f /* no PTE: go look for an empty slot */ | ||
360 | tlbie r4 | ||
361 | |||
362 | addis r4,r7,htab_hash_searches@ha | ||
363 | lwz r6,htab_hash_searches@l(r4) | ||
364 | addi r6,r6,1 /* count how many searches we do */ | ||
365 | stw r6,htab_hash_searches@l(r4) | ||
366 | |||
367 | /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */ | ||
368 | mtctr r0 | ||
369 | addi r4,r3,-PTE_SIZE | ||
370 | 1: LDPTEu r6,PTE_SIZE(r4) /* get next PTE */ | ||
371 | CMPPTE 0,r6,r5 | ||
372 | bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ | ||
373 | beq+ found_slot | ||
374 | |||
375 | /* Search the secondary PTEG for a matching PTE */ | ||
376 | ori r5,r5,PTE_H /* set H (secondary hash) bit */ | ||
377 | _GLOBAL(hash_page_patch_B) | ||
378 | xoris r4,r3,Hash_msk>>16 /* compute secondary hash */ | ||
379 | xori r4,r4,(-PTEG_SIZE & 0xffff) | ||
380 | addi r4,r4,-PTE_SIZE | ||
381 | mtctr r0 | ||
382 | 2: LDPTEu r6,PTE_SIZE(r4) | ||
383 | CMPPTE 0,r6,r5 | ||
384 | bdnzf 2,2b | ||
385 | beq+ found_slot | ||
386 | xori r5,r5,PTE_H /* clear H bit again */ | ||
387 | |||
388 | /* Search the primary PTEG for an empty slot */ | ||
389 | 10: mtctr r0 | ||
390 | addi r4,r3,-PTE_SIZE /* search primary PTEG */ | ||
391 | 1: LDPTEu r6,PTE_SIZE(r4) /* get next PTE */ | ||
392 | TST_V(r6) /* test valid bit */ | ||
393 | bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ | ||
394 | beq+ found_empty | ||
395 | |||
396 | /* update counter of times that the primary PTEG is full */ | ||
397 | addis r4,r7,primary_pteg_full@ha | ||
398 | lwz r6,primary_pteg_full@l(r4) | ||
399 | addi r6,r6,1 | ||
400 | stw r6,primary_pteg_full@l(r4) | ||
401 | |||
402 | /* Search the secondary PTEG for an empty slot */ | ||
403 | ori r5,r5,PTE_H /* set H (secondary hash) bit */ | ||
404 | _GLOBAL(hash_page_patch_C) | ||
405 | xoris r4,r3,Hash_msk>>16 /* compute secondary hash */ | ||
406 | xori r4,r4,(-PTEG_SIZE & 0xffff) | ||
407 | addi r4,r4,-PTE_SIZE | ||
408 | mtctr r0 | ||
409 | 2: LDPTEu r6,PTE_SIZE(r4) | ||
410 | TST_V(r6) | ||
411 | bdnzf 2,2b | ||
412 | beq+ found_empty | ||
413 | xori r5,r5,PTE_H /* clear H bit again */ | ||
414 | |||
415 | /* | ||
416 | * Choose an arbitrary slot in the primary PTEG to overwrite. | ||
417 | * Since both the primary and secondary PTEGs are full, and we | ||
418 | * have no information that the PTEs in the primary PTEG are | ||
419 | * more important or useful than those in the secondary PTEG, | ||
420 | * and we know there is a definite (although small) speed | ||
421 | * advantage to putting the PTE in the primary PTEG, we always | ||
422 | * put the PTE in the primary PTEG. | ||
423 | */ | ||
424 | addis r4,r7,next_slot@ha | ||
425 | lwz r6,next_slot@l(r4) | ||
426 | addi r6,r6,PTE_SIZE | ||
427 | andi. r6,r6,7*PTE_SIZE | ||
428 | stw r6,next_slot@l(r4) | ||
429 | add r4,r3,r6 | ||
430 | |||
431 | #ifndef CONFIG_SMP | ||
432 | /* Store PTE in PTEG */ | ||
433 | found_empty: | ||
434 | STPTE r5,0(r4) | ||
435 | found_slot: | ||
436 | STPTE r8,PTE_SIZE/2(r4) | ||
437 | |||
438 | #else /* CONFIG_SMP */ | ||
439 | /* | ||
440 | * Between the tlbie above and updating the hash table entry below, | ||
441 | * another CPU could read the hash table entry and put it in its TLB. | ||
442 | * There are 3 cases: | ||
443 | * 1. using an empty slot | ||
444 | * 2. updating an earlier entry to change permissions (i.e. enable write) | ||
445 | * 3. taking over the PTE for an unrelated address | ||
446 | * | ||
447 | * In each case it doesn't really matter if the other CPUs have the old | ||
448 | * PTE in their TLB. So we don't need to bother with another tlbie here, | ||
449 | * which is convenient as we've overwritten the register that had the | ||
450 | * address. :-) The tlbie above is mainly to make sure that this CPU comes | ||
451 | * and gets the new PTE from the hash table. | ||
452 | * | ||
453 | * We do however have to make sure that the PTE is never in an invalid | ||
454 | * state with the V bit set. | ||
455 | */ | ||
456 | found_empty: | ||
457 | found_slot: | ||
458 | CLR_V(r5,r0) /* clear V (valid) bit in PTE */ | ||
459 | STPTE r5,0(r4) | ||
460 | sync | ||
461 | TLBSYNC | ||
462 | STPTE r8,PTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */ | ||
463 | sync | ||
464 | SET_V(r5) | ||
465 | STPTE r5,0(r4) /* finally set V bit in PTE */ | ||
466 | #endif /* CONFIG_SMP */ | ||
467 | |||
468 | sync /* make sure pte updates get to memory */ | ||
469 | blr | ||
470 | |||
471 | .comm next_slot,4 | ||
472 | .comm primary_pteg_full,4 | ||
473 | .comm htab_hash_searches,4 | ||
474 | |||
475 | /* | ||
476 | * Flush the entry for a particular page from the hash table. | ||
477 | * | ||
478 | * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval, | ||
479 | * int count) | ||
480 | * | ||
481 | * We assume that there is a hash table in use (Hash != 0). | ||
482 | */ | ||
483 | _GLOBAL(flush_hash_pages) | ||
484 | tophys(r7,0) | ||
485 | |||
486 | /* | ||
487 | * We disable interrupts here, even on UP, because we want | ||
488 | * the _PAGE_HASHPTE bit to be a reliable indication of | ||
489 | * whether the HPTE exists (or at least whether one did once). | ||
490 | * We also turn off the MMU for data accesses so that we | ||
491 | * we can't take a hash table miss (assuming the code is | ||
492 | * covered by a BAT). -- paulus | ||
493 | */ | ||
494 | mfmsr r10 | ||
495 | SYNC | ||
496 | rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ | ||
497 | rlwinm r0,r0,0,28,26 /* clear MSR_DR */ | ||
498 | mtmsr r0 | ||
499 | SYNC_601 | ||
500 | isync | ||
501 | |||
502 | /* First find a PTE in the range that has _PAGE_HASHPTE set */ | ||
503 | rlwimi r5,r4,22,20,29 | ||
504 | 1: lwz r0,0(r5) | ||
505 | cmpwi cr1,r6,1 | ||
506 | andi. r0,r0,_PAGE_HASHPTE | ||
507 | bne 2f | ||
508 | ble cr1,19f | ||
509 | addi r4,r4,0x1000 | ||
510 | addi r5,r5,4 | ||
511 | addi r6,r6,-1 | ||
512 | b 1b | ||
513 | |||
514 | /* Convert context and va to VSID */ | ||
515 | 2: mulli r3,r3,897*16 /* multiply context by context skew */ | ||
516 | rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */ | ||
517 | mulli r0,r0,0x111 /* multiply by ESID skew */ | ||
518 | add r3,r3,r0 /* note code below trims to 24 bits */ | ||
519 | |||
520 | /* Construct the high word of the PPC-style PTE (r11) */ | ||
521 | #ifndef CONFIG_PPC64BRIDGE | ||
522 | rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ | ||
523 | rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */ | ||
524 | #else /* CONFIG_PPC64BRIDGE */ | ||
525 | clrlwi r3,r3,8 /* reduce vsid to 24 bits */ | ||
526 | sldi r11,r3,12 /* shift vsid into position */ | ||
527 | rlwimi r11,r4,16,20,24 /* put in API (abbrev page index) */ | ||
528 | #endif /* CONFIG_PPC64BRIDGE */ | ||
529 | SET_V(r11) /* set V (valid) bit */ | ||
530 | |||
531 | #ifdef CONFIG_SMP | ||
532 | addis r9,r7,mmu_hash_lock@ha | ||
533 | addi r9,r9,mmu_hash_lock@l | ||
534 | rlwinm r8,r1,0,0,18 | ||
535 | add r8,r8,r7 | ||
536 | lwz r8,TI_CPU(r8) | ||
537 | oris r8,r8,9 | ||
538 | 10: lwarx r0,0,r9 | ||
539 | cmpi 0,r0,0 | ||
540 | bne- 11f | ||
541 | stwcx. r8,0,r9 | ||
542 | beq+ 12f | ||
543 | 11: lwz r0,0(r9) | ||
544 | cmpi 0,r0,0 | ||
545 | beq 10b | ||
546 | b 11b | ||
547 | 12: isync | ||
548 | #endif | ||
549 | |||
550 | /* | ||
551 | * Check the _PAGE_HASHPTE bit in the linux PTE. If it is | ||
552 | * already clear, we're done (for this pte). If not, | ||
553 | * clear it (atomically) and proceed. -- paulus. | ||
554 | */ | ||
555 | 33: lwarx r8,0,r5 /* fetch the pte */ | ||
556 | andi. r0,r8,_PAGE_HASHPTE | ||
557 | beq 8f /* done if HASHPTE is already clear */ | ||
558 | rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */ | ||
559 | stwcx. r8,0,r5 /* update the pte */ | ||
560 | bne- 33b | ||
561 | |||
562 | /* Get the address of the primary PTE group in the hash table (r3) */ | ||
563 | _GLOBAL(flush_hash_patch_A) | ||
564 | addis r8,r7,Hash_base@h /* base address of hash table */ | ||
565 | rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ | ||
566 | rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */ | ||
567 | xor r8,r0,r8 /* make primary hash */ | ||
568 | |||
569 | /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */ | ||
570 | li r0,8 /* PTEs/group */ | ||
571 | mtctr r0 | ||
572 | addi r12,r8,-PTE_SIZE | ||
573 | 1: LDPTEu r0,PTE_SIZE(r12) /* get next PTE */ | ||
574 | CMPPTE 0,r0,r11 | ||
575 | bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ | ||
576 | beq+ 3f | ||
577 | |||
578 | /* Search the secondary PTEG for a matching PTE */ | ||
579 | ori r11,r11,PTE_H /* set H (secondary hash) bit */ | ||
580 | li r0,8 /* PTEs/group */ | ||
581 | _GLOBAL(flush_hash_patch_B) | ||
582 | xoris r12,r8,Hash_msk>>16 /* compute secondary hash */ | ||
583 | xori r12,r12,(-PTEG_SIZE & 0xffff) | ||
584 | addi r12,r12,-PTE_SIZE | ||
585 | mtctr r0 | ||
586 | 2: LDPTEu r0,PTE_SIZE(r12) | ||
587 | CMPPTE 0,r0,r11 | ||
588 | bdnzf 2,2b | ||
589 | xori r11,r11,PTE_H /* clear H again */ | ||
590 | bne- 4f /* should rarely fail to find it */ | ||
591 | |||
592 | 3: li r0,0 | ||
593 | STPTE r0,0(r12) /* invalidate entry */ | ||
594 | 4: sync | ||
595 | tlbie r4 /* in hw tlb too */ | ||
596 | sync | ||
597 | |||
598 | 8: ble cr1,9f /* if all ptes checked */ | ||
599 | 81: addi r6,r6,-1 | ||
600 | addi r5,r5,4 /* advance to next pte */ | ||
601 | addi r4,r4,0x1000 | ||
602 | lwz r0,0(r5) /* check next pte */ | ||
603 | cmpwi cr1,r6,1 | ||
604 | andi. r0,r0,_PAGE_HASHPTE | ||
605 | bne 33b | ||
606 | bgt cr1,81b | ||
607 | |||
608 | 9: | ||
609 | #ifdef CONFIG_SMP | ||
610 | TLBSYNC | ||
611 | li r0,0 | ||
612 | stw r0,0(r9) /* clear mmu_hash_lock */ | ||
613 | #endif | ||
614 | |||
615 | 19: mtmsr r10 | ||
616 | SYNC_601 | ||
617 | isync | ||
618 | blr | ||
diff --git a/arch/powerpc/mm/init.c b/arch/powerpc/mm/init.c new file mode 100644 index 000000000000..f4d983a6e521 --- /dev/null +++ b/arch/powerpc/mm/init.c | |||
@@ -0,0 +1,581 @@ | |||
1 | /* | ||
2 | * PowerPC version | ||
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
4 | * | ||
5 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | ||
6 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | ||
7 | * Copyright (C) 1996 Paul Mackerras | ||
8 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | ||
9 | * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com) | ||
10 | * | ||
11 | * Derived from "arch/i386/mm/init.c" | ||
12 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or | ||
15 | * modify it under the terms of the GNU General Public License | ||
16 | * as published by the Free Software Foundation; either version | ||
17 | * 2 of the License, or (at your option) any later version. | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | #include <linux/config.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/sched.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/errno.h> | ||
26 | #include <linux/string.h> | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/mm.h> | ||
29 | #include <linux/stddef.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/bootmem.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/initrd.h> | ||
34 | #include <linux/pagemap.h> | ||
35 | |||
36 | #include <asm/pgalloc.h> | ||
37 | #include <asm/prom.h> | ||
38 | #include <asm/io.h> | ||
39 | #include <asm/mmu_context.h> | ||
40 | #include <asm/pgtable.h> | ||
41 | #include <asm/mmu.h> | ||
42 | #include <asm/smp.h> | ||
43 | #include <asm/machdep.h> | ||
44 | #include <asm/btext.h> | ||
45 | #include <asm/tlb.h> | ||
46 | #include <asm/bootinfo.h> | ||
47 | #include <asm/prom.h> | ||
48 | |||
49 | #include "mem_pieces.h" | ||
50 | #include "mmu_decl.h" | ||
51 | |||
52 | #if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL) | ||
53 | /* The ammount of lowmem must be within 0xF0000000 - KERNELBASE. */ | ||
54 | #if (CONFIG_LOWMEM_SIZE > (0xF0000000 - KERNELBASE)) | ||
55 | #error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL" | ||
56 | #endif | ||
57 | #endif | ||
58 | #define MAX_LOW_MEM CONFIG_LOWMEM_SIZE | ||
59 | |||
60 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
61 | |||
62 | unsigned long total_memory; | ||
63 | unsigned long total_lowmem; | ||
64 | |||
65 | unsigned long ppc_memstart; | ||
66 | unsigned long ppc_memoffset = PAGE_OFFSET; | ||
67 | |||
68 | int mem_init_done; | ||
69 | int init_bootmem_done; | ||
70 | int boot_mapsize; | ||
71 | #ifdef CONFIG_PPC_PMAC | ||
72 | unsigned long agp_special_page; | ||
73 | #endif | ||
74 | |||
75 | extern char _end[]; | ||
76 | extern char etext[], _stext[]; | ||
77 | extern char __init_begin, __init_end; | ||
78 | |||
79 | #ifdef CONFIG_HIGHMEM | ||
80 | pte_t *kmap_pte; | ||
81 | pgprot_t kmap_prot; | ||
82 | |||
83 | EXPORT_SYMBOL(kmap_prot); | ||
84 | EXPORT_SYMBOL(kmap_pte); | ||
85 | #endif | ||
86 | |||
87 | void MMU_init(void); | ||
88 | void set_phys_avail(unsigned long total_ram); | ||
89 | |||
90 | /* XXX should be in current.h -- paulus */ | ||
91 | extern struct task_struct *current_set[NR_CPUS]; | ||
92 | |||
93 | char *klimit = _end; | ||
94 | struct mem_pieces phys_avail; | ||
95 | struct device_node *memory_node; | ||
96 | |||
97 | /* | ||
98 | * this tells the system to map all of ram with the segregs | ||
99 | * (i.e. page tables) instead of the bats. | ||
100 | * -- Cort | ||
101 | */ | ||
102 | int __map_without_bats; | ||
103 | int __map_without_ltlbs; | ||
104 | |||
105 | /* max amount of RAM to use */ | ||
106 | unsigned long __max_memory; | ||
107 | /* max amount of low RAM to map in */ | ||
108 | unsigned long __max_low_memory = MAX_LOW_MEM; | ||
109 | |||
110 | /* | ||
111 | * Read in a property describing some pieces of memory. | ||
112 | */ | ||
113 | static int __init get_mem_prop(char *name, struct mem_pieces *mp) | ||
114 | { | ||
115 | struct reg_property *rp; | ||
116 | int i, s; | ||
117 | unsigned int *ip; | ||
118 | int nac = prom_n_addr_cells(memory_node); | ||
119 | int nsc = prom_n_size_cells(memory_node); | ||
120 | |||
121 | ip = (unsigned int *) get_property(memory_node, name, &s); | ||
122 | if (ip == NULL) { | ||
123 | printk(KERN_ERR "error: couldn't get %s property on /memory\n", | ||
124 | name); | ||
125 | return 0; | ||
126 | } | ||
127 | s /= (nsc + nac) * 4; | ||
128 | rp = mp->regions; | ||
129 | for (i = 0; i < s; ++i, ip += nac+nsc) { | ||
130 | if (nac >= 2 && ip[nac-2] != 0) | ||
131 | continue; | ||
132 | rp->address = ip[nac-1]; | ||
133 | if (nsc >= 2 && ip[nac+nsc-2] != 0) | ||
134 | rp->size = ~0U; | ||
135 | else | ||
136 | rp->size = ip[nac+nsc-1]; | ||
137 | ++rp; | ||
138 | } | ||
139 | mp->n_regions = rp - mp->regions; | ||
140 | |||
141 | /* Make sure the pieces are sorted. */ | ||
142 | mem_pieces_sort(mp); | ||
143 | mem_pieces_coalesce(mp); | ||
144 | return 1; | ||
145 | } | ||
146 | |||
147 | /* | ||
148 | * Collect information about physical RAM and which pieces are | ||
149 | * already in use from the device tree. | ||
150 | */ | ||
151 | unsigned long __init find_end_of_memory(void) | ||
152 | { | ||
153 | unsigned long a, total; | ||
154 | struct mem_pieces phys_mem; | ||
155 | |||
156 | /* | ||
157 | * Find out where physical memory is, and check that it | ||
158 | * starts at 0 and is contiguous. It seems that RAM is | ||
159 | * always physically contiguous on Power Macintoshes. | ||
160 | * | ||
161 | * Supporting discontiguous physical memory isn't hard, | ||
162 | * it just makes the virtual <-> physical mapping functions | ||
163 | * more complicated (or else you end up wasting space | ||
164 | * in mem_map). | ||
165 | */ | ||
166 | memory_node = find_devices("memory"); | ||
167 | if (memory_node == NULL || !get_mem_prop("reg", &phys_mem) | ||
168 | || phys_mem.n_regions == 0) | ||
169 | panic("No RAM??"); | ||
170 | a = phys_mem.regions[0].address; | ||
171 | if (a != 0) | ||
172 | panic("RAM doesn't start at physical address 0"); | ||
173 | total = phys_mem.regions[0].size; | ||
174 | |||
175 | if (phys_mem.n_regions > 1) { | ||
176 | printk("RAM starting at 0x%x is not contiguous\n", | ||
177 | phys_mem.regions[1].address); | ||
178 | printk("Using RAM from 0 to 0x%lx\n", total-1); | ||
179 | } | ||
180 | |||
181 | return total; | ||
182 | } | ||
183 | |||
184 | /* | ||
185 | * Check for command-line options that affect what MMU_init will do. | ||
186 | */ | ||
187 | void MMU_setup(void) | ||
188 | { | ||
189 | /* Check for nobats option (used in mapin_ram). */ | ||
190 | if (strstr(cmd_line, "nobats")) { | ||
191 | __map_without_bats = 1; | ||
192 | } | ||
193 | |||
194 | if (strstr(cmd_line, "noltlbs")) { | ||
195 | __map_without_ltlbs = 1; | ||
196 | } | ||
197 | |||
198 | /* Look for mem= option on command line */ | ||
199 | if (strstr(cmd_line, "mem=")) { | ||
200 | char *p, *q; | ||
201 | unsigned long maxmem = 0; | ||
202 | |||
203 | for (q = cmd_line; (p = strstr(q, "mem=")) != 0; ) { | ||
204 | q = p + 4; | ||
205 | if (p > cmd_line && p[-1] != ' ') | ||
206 | continue; | ||
207 | maxmem = simple_strtoul(q, &q, 0); | ||
208 | if (*q == 'k' || *q == 'K') { | ||
209 | maxmem <<= 10; | ||
210 | ++q; | ||
211 | } else if (*q == 'm' || *q == 'M') { | ||
212 | maxmem <<= 20; | ||
213 | ++q; | ||
214 | } | ||
215 | } | ||
216 | __max_memory = maxmem; | ||
217 | } | ||
218 | } | ||
219 | |||
220 | /* | ||
221 | * MMU_init sets up the basic memory mappings for the kernel, | ||
222 | * including both RAM and possibly some I/O regions, | ||
223 | * and sets up the page tables and the MMU hardware ready to go. | ||
224 | */ | ||
225 | void __init MMU_init(void) | ||
226 | { | ||
227 | if (ppc_md.progress) | ||
228 | ppc_md.progress("MMU:enter", 0x111); | ||
229 | |||
230 | /* parse args from command line */ | ||
231 | MMU_setup(); | ||
232 | |||
233 | /* | ||
234 | * Figure out how much memory we have, how much | ||
235 | * is lowmem, and how much is highmem. If we were | ||
236 | * passed the total memory size from the bootloader, | ||
237 | * just use it. | ||
238 | */ | ||
239 | if (boot_mem_size) | ||
240 | total_memory = boot_mem_size; | ||
241 | else | ||
242 | total_memory = ppc_md.find_end_of_memory(); | ||
243 | |||
244 | if (__max_memory && total_memory > __max_memory) | ||
245 | total_memory = __max_memory; | ||
246 | total_lowmem = total_memory; | ||
247 | #ifdef CONFIG_FSL_BOOKE | ||
248 | /* Freescale Book-E parts expect lowmem to be mapped by fixed TLB | ||
249 | * entries, so we need to adjust lowmem to match the amount we can map | ||
250 | * in the fixed entries */ | ||
251 | adjust_total_lowmem(); | ||
252 | #endif /* CONFIG_FSL_BOOKE */ | ||
253 | if (total_lowmem > __max_low_memory) { | ||
254 | total_lowmem = __max_low_memory; | ||
255 | #ifndef CONFIG_HIGHMEM | ||
256 | total_memory = total_lowmem; | ||
257 | #endif /* CONFIG_HIGHMEM */ | ||
258 | } | ||
259 | set_phys_avail(total_lowmem); | ||
260 | |||
261 | /* Initialize the MMU hardware */ | ||
262 | if (ppc_md.progress) | ||
263 | ppc_md.progress("MMU:hw init", 0x300); | ||
264 | MMU_init_hw(); | ||
265 | |||
266 | /* Map in all of RAM starting at KERNELBASE */ | ||
267 | if (ppc_md.progress) | ||
268 | ppc_md.progress("MMU:mapin", 0x301); | ||
269 | mapin_ram(); | ||
270 | |||
271 | #ifdef CONFIG_HIGHMEM | ||
272 | ioremap_base = PKMAP_BASE; | ||
273 | #else | ||
274 | ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */ | ||
275 | #endif /* CONFIG_HIGHMEM */ | ||
276 | ioremap_bot = ioremap_base; | ||
277 | |||
278 | /* Map in I/O resources */ | ||
279 | if (ppc_md.progress) | ||
280 | ppc_md.progress("MMU:setio", 0x302); | ||
281 | if (ppc_md.setup_io_mappings) | ||
282 | ppc_md.setup_io_mappings(); | ||
283 | |||
284 | /* Initialize the context management stuff */ | ||
285 | mmu_context_init(); | ||
286 | |||
287 | if (ppc_md.progress) | ||
288 | ppc_md.progress("MMU:exit", 0x211); | ||
289 | |||
290 | #ifdef CONFIG_BOOTX_TEXT | ||
291 | /* By default, we are no longer mapped */ | ||
292 | boot_text_mapped = 0; | ||
293 | /* Must be done last, or ppc_md.progress will die. */ | ||
294 | map_boot_text(); | ||
295 | #endif | ||
296 | } | ||
297 | |||
298 | /* This is only called until mem_init is done. */ | ||
299 | void __init *early_get_page(void) | ||
300 | { | ||
301 | void *p; | ||
302 | |||
303 | if (init_bootmem_done) { | ||
304 | p = alloc_bootmem_pages(PAGE_SIZE); | ||
305 | } else { | ||
306 | p = mem_pieces_find(PAGE_SIZE, PAGE_SIZE); | ||
307 | } | ||
308 | return p; | ||
309 | } | ||
310 | |||
311 | /* Free up now-unused memory */ | ||
312 | static void free_sec(unsigned long start, unsigned long end, const char *name) | ||
313 | { | ||
314 | unsigned long cnt = 0; | ||
315 | |||
316 | while (start < end) { | ||
317 | ClearPageReserved(virt_to_page(start)); | ||
318 | set_page_count(virt_to_page(start), 1); | ||
319 | free_page(start); | ||
320 | cnt++; | ||
321 | start += PAGE_SIZE; | ||
322 | } | ||
323 | if (cnt) { | ||
324 | printk(" %ldk %s", cnt << (PAGE_SHIFT - 10), name); | ||
325 | totalram_pages += cnt; | ||
326 | } | ||
327 | } | ||
328 | |||
329 | void free_initmem(void) | ||
330 | { | ||
331 | #define FREESEC(TYPE) \ | ||
332 | free_sec((unsigned long)(&__ ## TYPE ## _begin), \ | ||
333 | (unsigned long)(&__ ## TYPE ## _end), \ | ||
334 | #TYPE); | ||
335 | |||
336 | printk ("Freeing unused kernel memory:"); | ||
337 | FREESEC(init); | ||
338 | printk("\n"); | ||
339 | ppc_md.progress = NULL; | ||
340 | #undef FREESEC | ||
341 | } | ||
342 | |||
343 | #ifdef CONFIG_BLK_DEV_INITRD | ||
344 | void free_initrd_mem(unsigned long start, unsigned long end) | ||
345 | { | ||
346 | if (start < end) | ||
347 | printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | ||
348 | for (; start < end; start += PAGE_SIZE) { | ||
349 | ClearPageReserved(virt_to_page(start)); | ||
350 | set_page_count(virt_to_page(start), 1); | ||
351 | free_page(start); | ||
352 | totalram_pages++; | ||
353 | } | ||
354 | } | ||
355 | #endif | ||
356 | |||
357 | /* | ||
358 | * Initialize the bootmem system and give it all the memory we | ||
359 | * have available. | ||
360 | */ | ||
361 | void __init do_init_bootmem(void) | ||
362 | { | ||
363 | unsigned long start, size; | ||
364 | int i; | ||
365 | |||
366 | /* | ||
367 | * Find an area to use for the bootmem bitmap. | ||
368 | * We look for the first area which is at least | ||
369 | * 128kB in length (128kB is enough for a bitmap | ||
370 | * for 4GB of memory, using 4kB pages), plus 1 page | ||
371 | * (in case the address isn't page-aligned). | ||
372 | */ | ||
373 | start = 0; | ||
374 | size = 0; | ||
375 | for (i = 0; i < phys_avail.n_regions; ++i) { | ||
376 | unsigned long a = phys_avail.regions[i].address; | ||
377 | unsigned long s = phys_avail.regions[i].size; | ||
378 | if (s <= size) | ||
379 | continue; | ||
380 | start = a; | ||
381 | size = s; | ||
382 | if (s >= 33 * PAGE_SIZE) | ||
383 | break; | ||
384 | } | ||
385 | start = PAGE_ALIGN(start); | ||
386 | |||
387 | min_low_pfn = start >> PAGE_SHIFT; | ||
388 | max_low_pfn = (PPC_MEMSTART + total_lowmem) >> PAGE_SHIFT; | ||
389 | max_pfn = (PPC_MEMSTART + total_memory) >> PAGE_SHIFT; | ||
390 | boot_mapsize = init_bootmem_node(&contig_page_data, min_low_pfn, | ||
391 | PPC_MEMSTART >> PAGE_SHIFT, | ||
392 | max_low_pfn); | ||
393 | |||
394 | /* remove the bootmem bitmap from the available memory */ | ||
395 | mem_pieces_remove(&phys_avail, start, boot_mapsize, 1); | ||
396 | |||
397 | /* add everything in phys_avail into the bootmem map */ | ||
398 | for (i = 0; i < phys_avail.n_regions; ++i) | ||
399 | free_bootmem(phys_avail.regions[i].address, | ||
400 | phys_avail.regions[i].size); | ||
401 | |||
402 | init_bootmem_done = 1; | ||
403 | } | ||
404 | |||
405 | /* | ||
406 | * paging_init() sets up the page tables - in fact we've already done this. | ||
407 | */ | ||
408 | void __init paging_init(void) | ||
409 | { | ||
410 | unsigned long zones_size[MAX_NR_ZONES], i; | ||
411 | |||
412 | #ifdef CONFIG_HIGHMEM | ||
413 | map_page(PKMAP_BASE, 0, 0); /* XXX gross */ | ||
414 | pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k | ||
415 | (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE); | ||
416 | map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */ | ||
417 | kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k | ||
418 | (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN); | ||
419 | kmap_prot = PAGE_KERNEL; | ||
420 | #endif /* CONFIG_HIGHMEM */ | ||
421 | |||
422 | /* | ||
423 | * All pages are DMA-able so we put them all in the DMA zone. | ||
424 | */ | ||
425 | zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT; | ||
426 | for (i = 1; i < MAX_NR_ZONES; i++) | ||
427 | zones_size[i] = 0; | ||
428 | |||
429 | #ifdef CONFIG_HIGHMEM | ||
430 | zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT; | ||
431 | #endif /* CONFIG_HIGHMEM */ | ||
432 | |||
433 | free_area_init(zones_size); | ||
434 | } | ||
435 | |||
436 | void __init mem_init(void) | ||
437 | { | ||
438 | unsigned long addr; | ||
439 | int codepages = 0; | ||
440 | int datapages = 0; | ||
441 | int initpages = 0; | ||
442 | #ifdef CONFIG_HIGHMEM | ||
443 | unsigned long highmem_mapnr; | ||
444 | |||
445 | highmem_mapnr = total_lowmem >> PAGE_SHIFT; | ||
446 | #endif /* CONFIG_HIGHMEM */ | ||
447 | max_mapnr = total_memory >> PAGE_SHIFT; | ||
448 | |||
449 | high_memory = (void *) __va(PPC_MEMSTART + total_lowmem); | ||
450 | num_physpages = max_mapnr; /* RAM is assumed contiguous */ | ||
451 | |||
452 | totalram_pages += free_all_bootmem(); | ||
453 | |||
454 | #ifdef CONFIG_BLK_DEV_INITRD | ||
455 | /* if we are booted from BootX with an initial ramdisk, | ||
456 | make sure the ramdisk pages aren't reserved. */ | ||
457 | if (initrd_start) { | ||
458 | for (addr = initrd_start; addr < initrd_end; addr += PAGE_SIZE) | ||
459 | ClearPageReserved(virt_to_page(addr)); | ||
460 | } | ||
461 | #endif /* CONFIG_BLK_DEV_INITRD */ | ||
462 | |||
463 | #ifdef CONFIG_PPC_OF | ||
464 | /* mark the RTAS pages as reserved */ | ||
465 | if ( rtas_data ) | ||
466 | for (addr = (ulong)__va(rtas_data); | ||
467 | addr < PAGE_ALIGN((ulong)__va(rtas_data)+rtas_size) ; | ||
468 | addr += PAGE_SIZE) | ||
469 | SetPageReserved(virt_to_page(addr)); | ||
470 | #endif | ||
471 | #ifdef CONFIG_PPC_PMAC | ||
472 | if (agp_special_page) | ||
473 | SetPageReserved(virt_to_page(agp_special_page)); | ||
474 | #endif | ||
475 | for (addr = PAGE_OFFSET; addr < (unsigned long)high_memory; | ||
476 | addr += PAGE_SIZE) { | ||
477 | if (!PageReserved(virt_to_page(addr))) | ||
478 | continue; | ||
479 | if (addr < (ulong) etext) | ||
480 | codepages++; | ||
481 | else if (addr >= (unsigned long)&__init_begin | ||
482 | && addr < (unsigned long)&__init_end) | ||
483 | initpages++; | ||
484 | else if (addr < (ulong) klimit) | ||
485 | datapages++; | ||
486 | } | ||
487 | |||
488 | #ifdef CONFIG_HIGHMEM | ||
489 | { | ||
490 | unsigned long pfn; | ||
491 | |||
492 | for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { | ||
493 | struct page *page = mem_map + pfn; | ||
494 | |||
495 | ClearPageReserved(page); | ||
496 | set_page_count(page, 1); | ||
497 | __free_page(page); | ||
498 | totalhigh_pages++; | ||
499 | } | ||
500 | totalram_pages += totalhigh_pages; | ||
501 | } | ||
502 | #endif /* CONFIG_HIGHMEM */ | ||
503 | |||
504 | printk("Memory: %luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n", | ||
505 | (unsigned long)nr_free_pages()<< (PAGE_SHIFT-10), | ||
506 | codepages<< (PAGE_SHIFT-10), datapages<< (PAGE_SHIFT-10), | ||
507 | initpages<< (PAGE_SHIFT-10), | ||
508 | (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); | ||
509 | |||
510 | #ifdef CONFIG_PPC_PMAC | ||
511 | if (agp_special_page) | ||
512 | printk(KERN_INFO "AGP special page: 0x%08lx\n", agp_special_page); | ||
513 | #endif | ||
514 | |||
515 | mem_init_done = 1; | ||
516 | } | ||
517 | |||
518 | /* | ||
519 | * Set phys_avail to the amount of physical memory, | ||
520 | * less the kernel text/data/bss. | ||
521 | */ | ||
522 | void __init | ||
523 | set_phys_avail(unsigned long total_memory) | ||
524 | { | ||
525 | unsigned long kstart, ksize; | ||
526 | |||
527 | /* | ||
528 | * Initially, available physical memory is equivalent to all | ||
529 | * physical memory. | ||
530 | */ | ||
531 | |||
532 | phys_avail.regions[0].address = PPC_MEMSTART; | ||
533 | phys_avail.regions[0].size = total_memory; | ||
534 | phys_avail.n_regions = 1; | ||
535 | |||
536 | /* | ||
537 | * Map out the kernel text/data/bss from the available physical | ||
538 | * memory. | ||
539 | */ | ||
540 | |||
541 | kstart = __pa(_stext); /* should be 0 */ | ||
542 | ksize = PAGE_ALIGN(klimit - _stext); | ||
543 | |||
544 | mem_pieces_remove(&phys_avail, kstart, ksize, 0); | ||
545 | mem_pieces_remove(&phys_avail, 0, 0x4000, 0); | ||
546 | |||
547 | #if defined(CONFIG_BLK_DEV_INITRD) | ||
548 | /* Remove the init RAM disk from the available memory. */ | ||
549 | if (initrd_start) { | ||
550 | mem_pieces_remove(&phys_avail, __pa(initrd_start), | ||
551 | initrd_end - initrd_start, 1); | ||
552 | } | ||
553 | #endif /* CONFIG_BLK_DEV_INITRD */ | ||
554 | #ifdef CONFIG_PPC_OF | ||
555 | /* remove the RTAS pages from the available memory */ | ||
556 | if (rtas_data) | ||
557 | mem_pieces_remove(&phys_avail, rtas_data, rtas_size, 1); | ||
558 | #endif | ||
559 | #ifdef CONFIG_PPC_PMAC | ||
560 | /* Because of some uninorth weirdness, we need a page of | ||
561 | * memory as high as possible (it must be outside of the | ||
562 | * bus address seen as the AGP aperture). It will be used | ||
563 | * by the r128 DRM driver | ||
564 | * | ||
565 | * FIXME: We need to make sure that page doesn't overlap any of the\ | ||
566 | * above. This could be done by improving mem_pieces_find to be able | ||
567 | * to do a backward search from the end of the list. | ||
568 | */ | ||
569 | if (_machine == _MACH_Pmac && find_devices("uni-north-agp")) { | ||
570 | agp_special_page = (total_memory - PAGE_SIZE); | ||
571 | mem_pieces_remove(&phys_avail, agp_special_page, PAGE_SIZE, 0); | ||
572 | agp_special_page = (unsigned long)__va(agp_special_page); | ||
573 | } | ||
574 | #endif /* CONFIG_PPC_PMAC */ | ||
575 | } | ||
576 | |||
577 | /* Mark some memory as reserved by removing it from phys_avail. */ | ||
578 | void __init reserve_phys_mem(unsigned long start, unsigned long size) | ||
579 | { | ||
580 | mem_pieces_remove(&phys_avail, start, size, 1); | ||
581 | } | ||
diff --git a/arch/powerpc/mm/init64.c b/arch/powerpc/mm/init64.c new file mode 100644 index 000000000000..81f6745b31ef --- /dev/null +++ b/arch/powerpc/mm/init64.c | |||
@@ -0,0 +1,385 @@ | |||
1 | /* | ||
2 | * PowerPC version | ||
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
4 | * | ||
5 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | ||
6 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | ||
7 | * Copyright (C) 1996 Paul Mackerras | ||
8 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | ||
9 | * | ||
10 | * Derived from "arch/i386/mm/init.c" | ||
11 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | ||
12 | * | ||
13 | * Dave Engebretsen <engebret@us.ibm.com> | ||
14 | * Rework for PPC64 port. | ||
15 | * | ||
16 | * This program is free software; you can redistribute it and/or | ||
17 | * modify it under the terms of the GNU General Public License | ||
18 | * as published by the Free Software Foundation; either version | ||
19 | * 2 of the License, or (at your option) any later version. | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/config.h> | ||
24 | #include <linux/signal.h> | ||
25 | #include <linux/sched.h> | ||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/errno.h> | ||
28 | #include <linux/string.h> | ||
29 | #include <linux/types.h> | ||
30 | #include <linux/mman.h> | ||
31 | #include <linux/mm.h> | ||
32 | #include <linux/swap.h> | ||
33 | #include <linux/stddef.h> | ||
34 | #include <linux/vmalloc.h> | ||
35 | #include <linux/init.h> | ||
36 | #include <linux/delay.h> | ||
37 | #include <linux/bootmem.h> | ||
38 | #include <linux/highmem.h> | ||
39 | #include <linux/idr.h> | ||
40 | #include <linux/nodemask.h> | ||
41 | #include <linux/module.h> | ||
42 | |||
43 | #include <asm/pgalloc.h> | ||
44 | #include <asm/page.h> | ||
45 | #include <asm/prom.h> | ||
46 | #include <asm/lmb.h> | ||
47 | #include <asm/rtas.h> | ||
48 | #include <asm/io.h> | ||
49 | #include <asm/mmu_context.h> | ||
50 | #include <asm/pgtable.h> | ||
51 | #include <asm/mmu.h> | ||
52 | #include <asm/uaccess.h> | ||
53 | #include <asm/smp.h> | ||
54 | #include <asm/machdep.h> | ||
55 | #include <asm/tlb.h> | ||
56 | #include <asm/eeh.h> | ||
57 | #include <asm/processor.h> | ||
58 | #include <asm/mmzone.h> | ||
59 | #include <asm/cputable.h> | ||
60 | #include <asm/ppcdebug.h> | ||
61 | #include <asm/sections.h> | ||
62 | #include <asm/system.h> | ||
63 | #include <asm/iommu.h> | ||
64 | #include <asm/abs_addr.h> | ||
65 | #include <asm/vdso.h> | ||
66 | #include <asm/imalloc.h> | ||
67 | |||
68 | #if PGTABLE_RANGE > USER_VSID_RANGE | ||
69 | #warning Limited user VSID range means pagetable space is wasted | ||
70 | #endif | ||
71 | |||
72 | #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE) | ||
73 | #warning TASK_SIZE is smaller than it needs to be. | ||
74 | #endif | ||
75 | |||
76 | int mem_init_done; | ||
77 | unsigned long ioremap_bot = IMALLOC_BASE; | ||
78 | static unsigned long phbs_io_bot = PHBS_IO_BASE; | ||
79 | |||
80 | extern pgd_t swapper_pg_dir[]; | ||
81 | extern struct task_struct *current_set[NR_CPUS]; | ||
82 | |||
83 | unsigned long klimit = (unsigned long)_end; | ||
84 | |||
85 | unsigned long _SDR1=0; | ||
86 | unsigned long _ASR=0; | ||
87 | |||
88 | /* max amount of RAM to use */ | ||
89 | unsigned long __max_memory; | ||
90 | |||
91 | /* info on what we think the IO hole is */ | ||
92 | unsigned long io_hole_start; | ||
93 | unsigned long io_hole_size; | ||
94 | |||
95 | /* | ||
96 | * Do very early mm setup. | ||
97 | */ | ||
98 | void __init mm_init_ppc64(void) | ||
99 | { | ||
100 | #ifndef CONFIG_PPC_ISERIES | ||
101 | unsigned long i; | ||
102 | #endif | ||
103 | |||
104 | ppc64_boot_msg(0x100, "MM Init"); | ||
105 | |||
106 | /* This is the story of the IO hole... please, keep seated, | ||
107 | * unfortunately, we are out of oxygen masks at the moment. | ||
108 | * So we need some rough way to tell where your big IO hole | ||
109 | * is. On pmac, it's between 2G and 4G, on POWER3, it's around | ||
110 | * that area as well, on POWER4 we don't have one, etc... | ||
111 | * We need that as a "hint" when sizing the TCE table on POWER3 | ||
112 | * So far, the simplest way that seem work well enough for us it | ||
113 | * to just assume that the first discontinuity in our physical | ||
114 | * RAM layout is the IO hole. That may not be correct in the future | ||
115 | * (and isn't on iSeries but then we don't care ;) | ||
116 | */ | ||
117 | |||
118 | #ifndef CONFIG_PPC_ISERIES | ||
119 | for (i = 1; i < lmb.memory.cnt; i++) { | ||
120 | unsigned long base, prevbase, prevsize; | ||
121 | |||
122 | prevbase = lmb.memory.region[i-1].base; | ||
123 | prevsize = lmb.memory.region[i-1].size; | ||
124 | base = lmb.memory.region[i].base; | ||
125 | if (base > (prevbase + prevsize)) { | ||
126 | io_hole_start = prevbase + prevsize; | ||
127 | io_hole_size = base - (prevbase + prevsize); | ||
128 | break; | ||
129 | } | ||
130 | } | ||
131 | #endif /* CONFIG_PPC_ISERIES */ | ||
132 | if (io_hole_start) | ||
133 | printk("IO Hole assumed to be %lx -> %lx\n", | ||
134 | io_hole_start, io_hole_start + io_hole_size - 1); | ||
135 | |||
136 | ppc64_boot_msg(0x100, "MM Init Done"); | ||
137 | } | ||
138 | |||
139 | void free_initmem(void) | ||
140 | { | ||
141 | unsigned long addr; | ||
142 | |||
143 | addr = (unsigned long)__init_begin; | ||
144 | for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) { | ||
145 | memset((void *)addr, 0xcc, PAGE_SIZE); | ||
146 | ClearPageReserved(virt_to_page(addr)); | ||
147 | set_page_count(virt_to_page(addr), 1); | ||
148 | free_page(addr); | ||
149 | totalram_pages++; | ||
150 | } | ||
151 | printk ("Freeing unused kernel memory: %luk freed\n", | ||
152 | ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10); | ||
153 | } | ||
154 | |||
155 | #ifdef CONFIG_BLK_DEV_INITRD | ||
156 | void free_initrd_mem(unsigned long start, unsigned long end) | ||
157 | { | ||
158 | if (start < end) | ||
159 | printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | ||
160 | for (; start < end; start += PAGE_SIZE) { | ||
161 | ClearPageReserved(virt_to_page(start)); | ||
162 | set_page_count(virt_to_page(start), 1); | ||
163 | free_page(start); | ||
164 | totalram_pages++; | ||
165 | } | ||
166 | } | ||
167 | #endif | ||
168 | |||
169 | /* | ||
170 | * Initialize the bootmem system and give it all the memory we | ||
171 | * have available. | ||
172 | */ | ||
173 | #ifndef CONFIG_NEED_MULTIPLE_NODES | ||
174 | void __init do_init_bootmem(void) | ||
175 | { | ||
176 | unsigned long i; | ||
177 | unsigned long start, bootmap_pages; | ||
178 | unsigned long total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT; | ||
179 | int boot_mapsize; | ||
180 | |||
181 | /* | ||
182 | * Find an area to use for the bootmem bitmap. Calculate the size of | ||
183 | * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE. | ||
184 | * Add 1 additional page in case the address isn't page-aligned. | ||
185 | */ | ||
186 | bootmap_pages = bootmem_bootmap_pages(total_pages); | ||
187 | |||
188 | start = lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE); | ||
189 | BUG_ON(!start); | ||
190 | |||
191 | boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); | ||
192 | |||
193 | max_pfn = max_low_pfn; | ||
194 | |||
195 | /* Add all physical memory to the bootmem map, mark each area | ||
196 | * present. | ||
197 | */ | ||
198 | for (i=0; i < lmb.memory.cnt; i++) | ||
199 | free_bootmem(lmb.memory.region[i].base, | ||
200 | lmb_size_bytes(&lmb.memory, i)); | ||
201 | |||
202 | /* reserve the sections we're already using */ | ||
203 | for (i=0; i < lmb.reserved.cnt; i++) | ||
204 | reserve_bootmem(lmb.reserved.region[i].base, | ||
205 | lmb_size_bytes(&lmb.reserved, i)); | ||
206 | |||
207 | for (i=0; i < lmb.memory.cnt; i++) | ||
208 | memory_present(0, lmb_start_pfn(&lmb.memory, i), | ||
209 | lmb_end_pfn(&lmb.memory, i)); | ||
210 | } | ||
211 | |||
212 | /* | ||
213 | * paging_init() sets up the page tables - in fact we've already done this. | ||
214 | */ | ||
215 | void __init paging_init(void) | ||
216 | { | ||
217 | unsigned long zones_size[MAX_NR_ZONES]; | ||
218 | unsigned long zholes_size[MAX_NR_ZONES]; | ||
219 | unsigned long total_ram = lmb_phys_mem_size(); | ||
220 | unsigned long top_of_ram = lmb_end_of_DRAM(); | ||
221 | |||
222 | printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", | ||
223 | top_of_ram, total_ram); | ||
224 | printk(KERN_INFO "Memory hole size: %ldMB\n", | ||
225 | (top_of_ram - total_ram) >> 20); | ||
226 | /* | ||
227 | * All pages are DMA-able so we put them all in the DMA zone. | ||
228 | */ | ||
229 | memset(zones_size, 0, sizeof(zones_size)); | ||
230 | memset(zholes_size, 0, sizeof(zholes_size)); | ||
231 | |||
232 | zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; | ||
233 | zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT; | ||
234 | |||
235 | free_area_init_node(0, NODE_DATA(0), zones_size, | ||
236 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size); | ||
237 | } | ||
238 | #endif /* ! CONFIG_NEED_MULTIPLE_NODES */ | ||
239 | |||
240 | static struct kcore_list kcore_vmem; | ||
241 | |||
242 | static int __init setup_kcore(void) | ||
243 | { | ||
244 | int i; | ||
245 | |||
246 | for (i=0; i < lmb.memory.cnt; i++) { | ||
247 | unsigned long base, size; | ||
248 | struct kcore_list *kcore_mem; | ||
249 | |||
250 | base = lmb.memory.region[i].base; | ||
251 | size = lmb.memory.region[i].size; | ||
252 | |||
253 | /* GFP_ATOMIC to avoid might_sleep warnings during boot */ | ||
254 | kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC); | ||
255 | if (!kcore_mem) | ||
256 | panic("mem_init: kmalloc failed\n"); | ||
257 | |||
258 | kclist_add(kcore_mem, __va(base), size); | ||
259 | } | ||
260 | |||
261 | kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); | ||
262 | |||
263 | return 0; | ||
264 | } | ||
265 | module_init(setup_kcore); | ||
266 | |||
267 | void __init mem_init(void) | ||
268 | { | ||
269 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
270 | int nid; | ||
271 | #endif | ||
272 | pg_data_t *pgdat; | ||
273 | unsigned long i; | ||
274 | struct page *page; | ||
275 | unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; | ||
276 | |||
277 | num_physpages = max_low_pfn; /* RAM is assumed contiguous */ | ||
278 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); | ||
279 | |||
280 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
281 | for_each_online_node(nid) { | ||
282 | if (NODE_DATA(nid)->node_spanned_pages != 0) { | ||
283 | printk("freeing bootmem node %x\n", nid); | ||
284 | totalram_pages += | ||
285 | free_all_bootmem_node(NODE_DATA(nid)); | ||
286 | } | ||
287 | } | ||
288 | #else | ||
289 | max_mapnr = num_physpages; | ||
290 | totalram_pages += free_all_bootmem(); | ||
291 | #endif | ||
292 | |||
293 | for_each_pgdat(pgdat) { | ||
294 | for (i = 0; i < pgdat->node_spanned_pages; i++) { | ||
295 | page = pgdat_page_nr(pgdat, i); | ||
296 | if (PageReserved(page)) | ||
297 | reservedpages++; | ||
298 | } | ||
299 | } | ||
300 | |||
301 | codesize = (unsigned long)&_etext - (unsigned long)&_stext; | ||
302 | initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; | ||
303 | datasize = (unsigned long)&_edata - (unsigned long)&__init_end; | ||
304 | bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start; | ||
305 | |||
306 | printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, " | ||
307 | "%luk reserved, %luk data, %luk bss, %luk init)\n", | ||
308 | (unsigned long)nr_free_pages() << (PAGE_SHIFT-10), | ||
309 | num_physpages << (PAGE_SHIFT-10), | ||
310 | codesize >> 10, | ||
311 | reservedpages << (PAGE_SHIFT-10), | ||
312 | datasize >> 10, | ||
313 | bsssize >> 10, | ||
314 | initsize >> 10); | ||
315 | |||
316 | mem_init_done = 1; | ||
317 | |||
318 | /* Initialize the vDSO */ | ||
319 | vdso_init(); | ||
320 | } | ||
321 | |||
322 | void __iomem * reserve_phb_iospace(unsigned long size) | ||
323 | { | ||
324 | void __iomem *virt_addr; | ||
325 | |||
326 | if (phbs_io_bot >= IMALLOC_BASE) | ||
327 | panic("reserve_phb_iospace(): phb io space overflow\n"); | ||
328 | |||
329 | virt_addr = (void __iomem *) phbs_io_bot; | ||
330 | phbs_io_bot += size; | ||
331 | |||
332 | return virt_addr; | ||
333 | } | ||
334 | |||
335 | static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) | ||
336 | { | ||
337 | memset(addr, 0, kmem_cache_size(cache)); | ||
338 | } | ||
339 | |||
340 | static const int pgtable_cache_size[2] = { | ||
341 | PTE_TABLE_SIZE, PMD_TABLE_SIZE | ||
342 | }; | ||
343 | static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = { | ||
344 | "pgd_pte_cache", "pud_pmd_cache", | ||
345 | }; | ||
346 | |||
347 | kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)]; | ||
348 | |||
349 | void pgtable_cache_init(void) | ||
350 | { | ||
351 | int i; | ||
352 | |||
353 | BUILD_BUG_ON(PTE_TABLE_SIZE != pgtable_cache_size[PTE_CACHE_NUM]); | ||
354 | BUILD_BUG_ON(PMD_TABLE_SIZE != pgtable_cache_size[PMD_CACHE_NUM]); | ||
355 | BUILD_BUG_ON(PUD_TABLE_SIZE != pgtable_cache_size[PUD_CACHE_NUM]); | ||
356 | BUILD_BUG_ON(PGD_TABLE_SIZE != pgtable_cache_size[PGD_CACHE_NUM]); | ||
357 | |||
358 | for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) { | ||
359 | int size = pgtable_cache_size[i]; | ||
360 | const char *name = pgtable_cache_name[i]; | ||
361 | |||
362 | pgtable_cache[i] = kmem_cache_create(name, | ||
363 | size, size, | ||
364 | SLAB_HWCACHE_ALIGN | ||
365 | | SLAB_MUST_HWCACHE_ALIGN, | ||
366 | zero_ctor, | ||
367 | NULL); | ||
368 | if (! pgtable_cache[i]) | ||
369 | panic("pgtable_cache_init(): could not create %s!\n", | ||
370 | name); | ||
371 | } | ||
372 | } | ||
373 | |||
374 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr, | ||
375 | unsigned long size, pgprot_t vma_prot) | ||
376 | { | ||
377 | if (ppc_md.phys_mem_access_prot) | ||
378 | return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot); | ||
379 | |||
380 | if (!page_is_ram(addr >> PAGE_SHIFT)) | ||
381 | vma_prot = __pgprot(pgprot_val(vma_prot) | ||
382 | | _PAGE_GUARDED | _PAGE_NO_CACHE); | ||
383 | return vma_prot; | ||
384 | } | ||
385 | EXPORT_SYMBOL(phys_mem_access_prot); | ||
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c new file mode 100644 index 000000000000..345db08e5d20 --- /dev/null +++ b/arch/powerpc/mm/mem.c | |||
@@ -0,0 +1,299 @@ | |||
1 | /* | ||
2 | * PowerPC version | ||
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
4 | * | ||
5 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | ||
6 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | ||
7 | * Copyright (C) 1996 Paul Mackerras | ||
8 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | ||
9 | * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com) | ||
10 | * | ||
11 | * Derived from "arch/i386/mm/init.c" | ||
12 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or | ||
15 | * modify it under the terms of the GNU General Public License | ||
16 | * as published by the Free Software Foundation; either version | ||
17 | * 2 of the License, or (at your option) any later version. | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | #include <linux/config.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/sched.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/errno.h> | ||
26 | #include <linux/string.h> | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/mm.h> | ||
29 | #include <linux/stddef.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/bootmem.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/initrd.h> | ||
34 | #include <linux/pagemap.h> | ||
35 | |||
36 | #include <asm/pgalloc.h> | ||
37 | #include <asm/prom.h> | ||
38 | #include <asm/io.h> | ||
39 | #include <asm/mmu_context.h> | ||
40 | #include <asm/pgtable.h> | ||
41 | #include <asm/mmu.h> | ||
42 | #include <asm/smp.h> | ||
43 | #include <asm/machdep.h> | ||
44 | #include <asm/btext.h> | ||
45 | #include <asm/tlb.h> | ||
46 | #include <asm/bootinfo.h> | ||
47 | #include <asm/prom.h> | ||
48 | |||
49 | #include "mem_pieces.h" | ||
50 | #include "mmu_decl.h" | ||
51 | |||
52 | #ifndef CPU_FTR_COHERENT_ICACHE | ||
53 | #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */ | ||
54 | #define CPU_FTR_NOEXECUTE 0 | ||
55 | #endif | ||
56 | |||
57 | /* | ||
58 | * This is called by /dev/mem to know if a given address has to | ||
59 | * be mapped non-cacheable or not | ||
60 | */ | ||
61 | int page_is_ram(unsigned long pfn) | ||
62 | { | ||
63 | unsigned long paddr = (pfn << PAGE_SHIFT); | ||
64 | |||
65 | #ifndef CONFIG_PPC64 /* XXX for now */ | ||
66 | return paddr < __pa(high_memory); | ||
67 | #else | ||
68 | int i; | ||
69 | for (i=0; i < lmb.memory.cnt; i++) { | ||
70 | unsigned long base; | ||
71 | |||
72 | base = lmb.memory.region[i].base; | ||
73 | |||
74 | if ((paddr >= base) && | ||
75 | (paddr < (base + lmb.memory.region[i].size))) { | ||
76 | return 1; | ||
77 | } | ||
78 | } | ||
79 | |||
80 | return 0; | ||
81 | #endif | ||
82 | } | ||
83 | EXPORT_SYMBOL(page_is_ram); | ||
84 | |||
85 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr, | ||
86 | unsigned long size, pgprot_t vma_prot) | ||
87 | { | ||
88 | if (ppc_md.phys_mem_access_prot) | ||
89 | return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot); | ||
90 | |||
91 | if (!page_is_ram(addr >> PAGE_SHIFT)) | ||
92 | vma_prot = __pgprot(pgprot_val(vma_prot) | ||
93 | | _PAGE_GUARDED | _PAGE_NO_CACHE); | ||
94 | return vma_prot; | ||
95 | } | ||
96 | EXPORT_SYMBOL(phys_mem_access_prot); | ||
97 | |||
98 | void show_mem(void) | ||
99 | { | ||
100 | unsigned long total = 0, reserved = 0; | ||
101 | unsigned long shared = 0, cached = 0; | ||
102 | unsigned long highmem = 0; | ||
103 | struct page *page; | ||
104 | pg_data_t *pgdat; | ||
105 | unsigned long i; | ||
106 | |||
107 | printk("Mem-info:\n"); | ||
108 | show_free_areas(); | ||
109 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | ||
110 | for_each_pgdat(pgdat) { | ||
111 | for (i = 0; i < pgdat->node_spanned_pages; i++) { | ||
112 | page = pgdat_page_nr(pgdat, i); | ||
113 | total++; | ||
114 | if (PageHighMem(page)) | ||
115 | highmem++; | ||
116 | if (PageReserved(page)) | ||
117 | reserved++; | ||
118 | else if (PageSwapCache(page)) | ||
119 | cached++; | ||
120 | else if (page_count(page)) | ||
121 | shared += page_count(page) - 1; | ||
122 | } | ||
123 | } | ||
124 | printk("%ld pages of RAM\n", total); | ||
125 | #ifdef CONFIG_HIGHMEM | ||
126 | printk("%ld pages of HIGHMEM\n", highmem); | ||
127 | #endif | ||
128 | printk("%ld reserved pages\n", reserved); | ||
129 | printk("%ld pages shared\n", shared); | ||
130 | printk("%ld pages swap cached\n", cached); | ||
131 | } | ||
132 | |||
133 | /* | ||
134 | * This is called when a page has been modified by the kernel. | ||
135 | * It just marks the page as not i-cache clean. We do the i-cache | ||
136 | * flush later when the page is given to a user process, if necessary. | ||
137 | */ | ||
138 | void flush_dcache_page(struct page *page) | ||
139 | { | ||
140 | if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) | ||
141 | return; | ||
142 | /* avoid an atomic op if possible */ | ||
143 | if (test_bit(PG_arch_1, &page->flags)) | ||
144 | clear_bit(PG_arch_1, &page->flags); | ||
145 | } | ||
146 | EXPORT_SYMBOL(flush_dcache_page); | ||
147 | |||
148 | void flush_dcache_icache_page(struct page *page) | ||
149 | { | ||
150 | #ifdef CONFIG_BOOKE | ||
151 | void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE); | ||
152 | __flush_dcache_icache(start); | ||
153 | kunmap_atomic(start, KM_PPC_SYNC_ICACHE); | ||
154 | #elif defined(CONFIG_8xx) | ||
155 | /* On 8xx there is no need to kmap since highmem is not supported */ | ||
156 | __flush_dcache_icache(page_address(page)); | ||
157 | #else | ||
158 | __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); | ||
159 | #endif | ||
160 | |||
161 | } | ||
162 | void clear_user_page(void *page, unsigned long vaddr, struct page *pg) | ||
163 | { | ||
164 | clear_page(page); | ||
165 | |||
166 | if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) | ||
167 | return; | ||
168 | /* | ||
169 | * We shouldnt have to do this, but some versions of glibc | ||
170 | * require it (ld.so assumes zero filled pages are icache clean) | ||
171 | * - Anton | ||
172 | */ | ||
173 | |||
174 | /* avoid an atomic op if possible */ | ||
175 | if (test_bit(PG_arch_1, &pg->flags)) | ||
176 | clear_bit(PG_arch_1, &pg->flags); | ||
177 | } | ||
178 | EXPORT_SYMBOL(clear_user_page); | ||
179 | |||
180 | void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, | ||
181 | struct page *pg) | ||
182 | { | ||
183 | copy_page(vto, vfrom); | ||
184 | |||
185 | /* | ||
186 | * We should be able to use the following optimisation, however | ||
187 | * there are two problems. | ||
188 | * Firstly a bug in some versions of binutils meant PLT sections | ||
189 | * were not marked executable. | ||
190 | * Secondly the first word in the GOT section is blrl, used | ||
191 | * to establish the GOT address. Until recently the GOT was | ||
192 | * not marked executable. | ||
193 | * - Anton | ||
194 | */ | ||
195 | #if 0 | ||
196 | if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0)) | ||
197 | return; | ||
198 | #endif | ||
199 | |||
200 | if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) | ||
201 | return; | ||
202 | |||
203 | /* avoid an atomic op if possible */ | ||
204 | if (test_bit(PG_arch_1, &pg->flags)) | ||
205 | clear_bit(PG_arch_1, &pg->flags); | ||
206 | } | ||
207 | |||
208 | void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, | ||
209 | unsigned long addr, int len) | ||
210 | { | ||
211 | unsigned long maddr; | ||
212 | |||
213 | maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK); | ||
214 | flush_icache_range(maddr, maddr + len); | ||
215 | kunmap(page); | ||
216 | } | ||
217 | EXPORT_SYMBOL(flush_icache_user_range); | ||
218 | |||
219 | /* | ||
220 | * This is called at the end of handling a user page fault, when the | ||
221 | * fault has been handled by updating a PTE in the linux page tables. | ||
222 | * We use it to preload an HPTE into the hash table corresponding to | ||
223 | * the updated linux PTE. | ||
224 | * | ||
225 | * This must always be called with the mm->page_table_lock held | ||
226 | */ | ||
227 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, | ||
228 | pte_t pte) | ||
229 | { | ||
230 | /* handle i-cache coherency */ | ||
231 | unsigned long pfn = pte_pfn(pte); | ||
232 | #ifdef CONFIG_PPC32 | ||
233 | pmd_t *pmd; | ||
234 | #else | ||
235 | unsigned long vsid; | ||
236 | void *pgdir; | ||
237 | pte_t *ptep; | ||
238 | int local = 0; | ||
239 | cpumask_t tmp; | ||
240 | unsigned long flags; | ||
241 | #endif | ||
242 | |||
243 | /* handle i-cache coherency */ | ||
244 | if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) && | ||
245 | !cpu_has_feature(CPU_FTR_NOEXECUTE) && | ||
246 | pfn_valid(pfn)) { | ||
247 | struct page *page = pfn_to_page(pfn); | ||
248 | if (!PageReserved(page) | ||
249 | && !test_bit(PG_arch_1, &page->flags)) { | ||
250 | if (vma->vm_mm == current->active_mm) { | ||
251 | #ifdef CONFIG_8xx | ||
252 | /* On 8xx, cache control instructions (particularly | ||
253 | * "dcbst" from flush_dcache_icache) fault as write | ||
254 | * operation if there is an unpopulated TLB entry | ||
255 | * for the address in question. To workaround that, | ||
256 | * we invalidate the TLB here, thus avoiding dcbst | ||
257 | * misbehaviour. | ||
258 | */ | ||
259 | _tlbie(address); | ||
260 | #endif | ||
261 | __flush_dcache_icache((void *) address); | ||
262 | } else | ||
263 | flush_dcache_icache_page(page); | ||
264 | set_bit(PG_arch_1, &page->flags); | ||
265 | } | ||
266 | } | ||
267 | |||
268 | #ifdef CONFIG_PPC_STD_MMU | ||
269 | /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ | ||
270 | if (!pte_young(pte) || address >= TASK_SIZE) | ||
271 | return; | ||
272 | #ifdef CONFIG_PPC32 | ||
273 | if (Hash == 0) | ||
274 | return; | ||
275 | pmd = pmd_offset(pgd_offset(vma->vm_mm, address), address); | ||
276 | if (!pmd_none(*pmd)) | ||
277 | add_hash_page(vma->vm_mm->context, address, pmd_val(*pmd)); | ||
278 | #else | ||
279 | pgdir = vma->vm_mm->pgd; | ||
280 | if (pgdir == NULL) | ||
281 | return; | ||
282 | |||
283 | ptep = find_linux_pte(pgdir, ea); | ||
284 | if (!ptep) | ||
285 | return; | ||
286 | |||
287 | vsid = get_vsid(vma->vm_mm->context.id, ea); | ||
288 | |||
289 | local_irq_save(flags); | ||
290 | tmp = cpumask_of_cpu(smp_processor_id()); | ||
291 | if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp)) | ||
292 | local = 1; | ||
293 | |||
294 | __hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep, | ||
295 | 0x300, local); | ||
296 | local_irq_restore(flags); | ||
297 | #endif | ||
298 | #endif | ||
299 | } | ||
diff --git a/arch/powerpc/mm/mem64.c b/arch/powerpc/mm/mem64.c new file mode 100644 index 000000000000..ef765a84433f --- /dev/null +++ b/arch/powerpc/mm/mem64.c | |||
@@ -0,0 +1,259 @@ | |||
1 | /* | ||
2 | * PowerPC version | ||
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
4 | * | ||
5 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | ||
6 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | ||
7 | * Copyright (C) 1996 Paul Mackerras | ||
8 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | ||
9 | * | ||
10 | * Derived from "arch/i386/mm/init.c" | ||
11 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | ||
12 | * | ||
13 | * Dave Engebretsen <engebret@us.ibm.com> | ||
14 | * Rework for PPC64 port. | ||
15 | * | ||
16 | * This program is free software; you can redistribute it and/or | ||
17 | * modify it under the terms of the GNU General Public License | ||
18 | * as published by the Free Software Foundation; either version | ||
19 | * 2 of the License, or (at your option) any later version. | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/config.h> | ||
24 | #include <linux/signal.h> | ||
25 | #include <linux/sched.h> | ||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/errno.h> | ||
28 | #include <linux/string.h> | ||
29 | #include <linux/types.h> | ||
30 | #include <linux/mman.h> | ||
31 | #include <linux/mm.h> | ||
32 | #include <linux/swap.h> | ||
33 | #include <linux/stddef.h> | ||
34 | #include <linux/vmalloc.h> | ||
35 | #include <linux/init.h> | ||
36 | #include <linux/delay.h> | ||
37 | #include <linux/bootmem.h> | ||
38 | #include <linux/highmem.h> | ||
39 | #include <linux/idr.h> | ||
40 | #include <linux/nodemask.h> | ||
41 | #include <linux/module.h> | ||
42 | |||
43 | #include <asm/pgalloc.h> | ||
44 | #include <asm/page.h> | ||
45 | #include <asm/prom.h> | ||
46 | #include <asm/lmb.h> | ||
47 | #include <asm/rtas.h> | ||
48 | #include <asm/io.h> | ||
49 | #include <asm/mmu_context.h> | ||
50 | #include <asm/pgtable.h> | ||
51 | #include <asm/mmu.h> | ||
52 | #include <asm/uaccess.h> | ||
53 | #include <asm/smp.h> | ||
54 | #include <asm/machdep.h> | ||
55 | #include <asm/tlb.h> | ||
56 | #include <asm/eeh.h> | ||
57 | #include <asm/processor.h> | ||
58 | #include <asm/mmzone.h> | ||
59 | #include <asm/cputable.h> | ||
60 | #include <asm/ppcdebug.h> | ||
61 | #include <asm/sections.h> | ||
62 | #include <asm/system.h> | ||
63 | #include <asm/iommu.h> | ||
64 | #include <asm/abs_addr.h> | ||
65 | #include <asm/vdso.h> | ||
66 | #include <asm/imalloc.h> | ||
67 | |||
68 | /* | ||
69 | * This is called by /dev/mem to know if a given address has to | ||
70 | * be mapped non-cacheable or not | ||
71 | */ | ||
72 | int page_is_ram(unsigned long pfn) | ||
73 | { | ||
74 | int i; | ||
75 | unsigned long paddr = (pfn << PAGE_SHIFT); | ||
76 | |||
77 | for (i=0; i < lmb.memory.cnt; i++) { | ||
78 | unsigned long base; | ||
79 | |||
80 | base = lmb.memory.region[i].base; | ||
81 | |||
82 | if ((paddr >= base) && | ||
83 | (paddr < (base + lmb.memory.region[i].size))) { | ||
84 | return 1; | ||
85 | } | ||
86 | } | ||
87 | |||
88 | return 0; | ||
89 | } | ||
90 | EXPORT_SYMBOL(page_is_ram); | ||
91 | |||
92 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr, | ||
93 | unsigned long size, pgprot_t vma_prot) | ||
94 | { | ||
95 | if (ppc_md.phys_mem_access_prot) | ||
96 | return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot); | ||
97 | |||
98 | if (!page_is_ram(addr >> PAGE_SHIFT)) | ||
99 | vma_prot = __pgprot(pgprot_val(vma_prot) | ||
100 | | _PAGE_GUARDED | _PAGE_NO_CACHE); | ||
101 | return vma_prot; | ||
102 | } | ||
103 | EXPORT_SYMBOL(phys_mem_access_prot); | ||
104 | |||
105 | void show_mem(void) | ||
106 | { | ||
107 | unsigned long total = 0, reserved = 0; | ||
108 | unsigned long shared = 0, cached = 0; | ||
109 | struct page *page; | ||
110 | pg_data_t *pgdat; | ||
111 | unsigned long i; | ||
112 | |||
113 | printk("Mem-info:\n"); | ||
114 | show_free_areas(); | ||
115 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | ||
116 | for_each_pgdat(pgdat) { | ||
117 | for (i = 0; i < pgdat->node_spanned_pages; i++) { | ||
118 | page = pgdat_page_nr(pgdat, i); | ||
119 | total++; | ||
120 | if (PageReserved(page)) | ||
121 | reserved++; | ||
122 | else if (PageSwapCache(page)) | ||
123 | cached++; | ||
124 | else if (page_count(page)) | ||
125 | shared += page_count(page) - 1; | ||
126 | } | ||
127 | } | ||
128 | printk("%ld pages of RAM\n", total); | ||
129 | printk("%ld reserved pages\n", reserved); | ||
130 | printk("%ld pages shared\n", shared); | ||
131 | printk("%ld pages swap cached\n", cached); | ||
132 | } | ||
133 | |||
134 | /* | ||
135 | * This is called when a page has been modified by the kernel. | ||
136 | * It just marks the page as not i-cache clean. We do the i-cache | ||
137 | * flush later when the page is given to a user process, if necessary. | ||
138 | */ | ||
139 | void flush_dcache_page(struct page *page) | ||
140 | { | ||
141 | if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) | ||
142 | return; | ||
143 | /* avoid an atomic op if possible */ | ||
144 | if (test_bit(PG_arch_1, &page->flags)) | ||
145 | clear_bit(PG_arch_1, &page->flags); | ||
146 | } | ||
147 | EXPORT_SYMBOL(flush_dcache_page); | ||
148 | |||
149 | void clear_user_page(void *page, unsigned long vaddr, struct page *pg) | ||
150 | { | ||
151 | clear_page(page); | ||
152 | |||
153 | if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) | ||
154 | return; | ||
155 | /* | ||
156 | * We shouldnt have to do this, but some versions of glibc | ||
157 | * require it (ld.so assumes zero filled pages are icache clean) | ||
158 | * - Anton | ||
159 | */ | ||
160 | |||
161 | /* avoid an atomic op if possible */ | ||
162 | if (test_bit(PG_arch_1, &pg->flags)) | ||
163 | clear_bit(PG_arch_1, &pg->flags); | ||
164 | } | ||
165 | EXPORT_SYMBOL(clear_user_page); | ||
166 | |||
167 | void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, | ||
168 | struct page *pg) | ||
169 | { | ||
170 | copy_page(vto, vfrom); | ||
171 | |||
172 | /* | ||
173 | * We should be able to use the following optimisation, however | ||
174 | * there are two problems. | ||
175 | * Firstly a bug in some versions of binutils meant PLT sections | ||
176 | * were not marked executable. | ||
177 | * Secondly the first word in the GOT section is blrl, used | ||
178 | * to establish the GOT address. Until recently the GOT was | ||
179 | * not marked executable. | ||
180 | * - Anton | ||
181 | */ | ||
182 | #if 0 | ||
183 | if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0)) | ||
184 | return; | ||
185 | #endif | ||
186 | |||
187 | if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) | ||
188 | return; | ||
189 | |||
190 | /* avoid an atomic op if possible */ | ||
191 | if (test_bit(PG_arch_1, &pg->flags)) | ||
192 | clear_bit(PG_arch_1, &pg->flags); | ||
193 | } | ||
194 | |||
195 | void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, | ||
196 | unsigned long addr, int len) | ||
197 | { | ||
198 | unsigned long maddr; | ||
199 | |||
200 | maddr = (unsigned long)page_address(page) + (addr & ~PAGE_MASK); | ||
201 | flush_icache_range(maddr, maddr + len); | ||
202 | } | ||
203 | EXPORT_SYMBOL(flush_icache_user_range); | ||
204 | |||
205 | /* | ||
206 | * This is called at the end of handling a user page fault, when the | ||
207 | * fault has been handled by updating a PTE in the linux page tables. | ||
208 | * We use it to preload an HPTE into the hash table corresponding to | ||
209 | * the updated linux PTE. | ||
210 | * | ||
211 | * This must always be called with the mm->page_table_lock held | ||
212 | */ | ||
213 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea, | ||
214 | pte_t pte) | ||
215 | { | ||
216 | unsigned long vsid; | ||
217 | void *pgdir; | ||
218 | pte_t *ptep; | ||
219 | int local = 0; | ||
220 | cpumask_t tmp; | ||
221 | unsigned long flags; | ||
222 | |||
223 | /* handle i-cache coherency */ | ||
224 | if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) && | ||
225 | !cpu_has_feature(CPU_FTR_NOEXECUTE)) { | ||
226 | unsigned long pfn = pte_pfn(pte); | ||
227 | if (pfn_valid(pfn)) { | ||
228 | struct page *page = pfn_to_page(pfn); | ||
229 | if (!PageReserved(page) | ||
230 | && !test_bit(PG_arch_1, &page->flags)) { | ||
231 | __flush_dcache_icache(page_address(page)); | ||
232 | set_bit(PG_arch_1, &page->flags); | ||
233 | } | ||
234 | } | ||
235 | } | ||
236 | |||
237 | /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ | ||
238 | if (!pte_young(pte)) | ||
239 | return; | ||
240 | |||
241 | pgdir = vma->vm_mm->pgd; | ||
242 | if (pgdir == NULL) | ||
243 | return; | ||
244 | |||
245 | ptep = find_linux_pte(pgdir, ea); | ||
246 | if (!ptep) | ||
247 | return; | ||
248 | |||
249 | vsid = get_vsid(vma->vm_mm->context.id, ea); | ||
250 | |||
251 | local_irq_save(flags); | ||
252 | tmp = cpumask_of_cpu(smp_processor_id()); | ||
253 | if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp)) | ||
254 | local = 1; | ||
255 | |||
256 | __hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep, | ||
257 | 0x300, local); | ||
258 | local_irq_restore(flags); | ||
259 | } | ||
diff --git a/arch/powerpc/mm/mem_pieces.c b/arch/powerpc/mm/mem_pieces.c new file mode 100644 index 000000000000..3d639052017e --- /dev/null +++ b/arch/powerpc/mm/mem_pieces.c | |||
@@ -0,0 +1,163 @@ | |||
1 | /* | ||
2 | * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> | ||
3 | * Changes to accommodate Power Macintoshes. | ||
4 | * Cort Dougan <cort@cs.nmt.edu> | ||
5 | * Rewrites. | ||
6 | * Grant Erickson <grant@lcse.umn.edu> | ||
7 | * General rework and split from mm/init.c. | ||
8 | * | ||
9 | * Module name: mem_pieces.c | ||
10 | * | ||
11 | * Description: | ||
12 | * Routines and data structures for manipulating and representing | ||
13 | * phyiscal memory extents (i.e. address/length pairs). | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/config.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/stddef.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <asm/page.h> | ||
22 | |||
23 | #include "mem_pieces.h" | ||
24 | |||
25 | extern struct mem_pieces phys_avail; | ||
26 | |||
27 | static void mem_pieces_print(struct mem_pieces *); | ||
28 | |||
29 | /* | ||
30 | * Scan a region for a piece of a given size with the required alignment. | ||
31 | */ | ||
32 | void __init * | ||
33 | mem_pieces_find(unsigned int size, unsigned int align) | ||
34 | { | ||
35 | int i; | ||
36 | unsigned a, e; | ||
37 | struct mem_pieces *mp = &phys_avail; | ||
38 | |||
39 | for (i = 0; i < mp->n_regions; ++i) { | ||
40 | a = mp->regions[i].address; | ||
41 | e = a + mp->regions[i].size; | ||
42 | a = (a + align - 1) & -align; | ||
43 | if (a + size <= e) { | ||
44 | mem_pieces_remove(mp, a, size, 1); | ||
45 | return (void *) __va(a); | ||
46 | } | ||
47 | } | ||
48 | panic("Couldn't find %u bytes at %u alignment\n", size, align); | ||
49 | |||
50 | return NULL; | ||
51 | } | ||
52 | |||
53 | /* | ||
54 | * Remove some memory from an array of pieces | ||
55 | */ | ||
56 | void __init | ||
57 | mem_pieces_remove(struct mem_pieces *mp, unsigned int start, unsigned int size, | ||
58 | int must_exist) | ||
59 | { | ||
60 | int i, j; | ||
61 | unsigned int end, rs, re; | ||
62 | struct reg_property *rp; | ||
63 | |||
64 | end = start + size; | ||
65 | for (i = 0, rp = mp->regions; i < mp->n_regions; ++i, ++rp) { | ||
66 | if (end > rp->address && start < rp->address + rp->size) | ||
67 | break; | ||
68 | } | ||
69 | if (i >= mp->n_regions) { | ||
70 | if (must_exist) | ||
71 | printk("mem_pieces_remove: [%x,%x) not in any region\n", | ||
72 | start, end); | ||
73 | return; | ||
74 | } | ||
75 | for (; i < mp->n_regions && end > rp->address; ++i, ++rp) { | ||
76 | rs = rp->address; | ||
77 | re = rs + rp->size; | ||
78 | if (must_exist && (start < rs || end > re)) { | ||
79 | printk("mem_pieces_remove: bad overlap [%x,%x) with", | ||
80 | start, end); | ||
81 | mem_pieces_print(mp); | ||
82 | must_exist = 0; | ||
83 | } | ||
84 | if (start > rs) { | ||
85 | rp->size = start - rs; | ||
86 | if (end < re) { | ||
87 | /* need to split this entry */ | ||
88 | if (mp->n_regions >= MEM_PIECES_MAX) | ||
89 | panic("eek... mem_pieces overflow"); | ||
90 | for (j = mp->n_regions; j > i + 1; --j) | ||
91 | mp->regions[j] = mp->regions[j-1]; | ||
92 | ++mp->n_regions; | ||
93 | rp[1].address = end; | ||
94 | rp[1].size = re - end; | ||
95 | } | ||
96 | } else { | ||
97 | if (end < re) { | ||
98 | rp->address = end; | ||
99 | rp->size = re - end; | ||
100 | } else { | ||
101 | /* need to delete this entry */ | ||
102 | for (j = i; j < mp->n_regions - 1; ++j) | ||
103 | mp->regions[j] = mp->regions[j+1]; | ||
104 | --mp->n_regions; | ||
105 | --i; | ||
106 | --rp; | ||
107 | } | ||
108 | } | ||
109 | } | ||
110 | } | ||
111 | |||
112 | static void __init | ||
113 | mem_pieces_print(struct mem_pieces *mp) | ||
114 | { | ||
115 | int i; | ||
116 | |||
117 | for (i = 0; i < mp->n_regions; ++i) | ||
118 | printk(" [%x, %x)", mp->regions[i].address, | ||
119 | mp->regions[i].address + mp->regions[i].size); | ||
120 | printk("\n"); | ||
121 | } | ||
122 | |||
123 | void __init | ||
124 | mem_pieces_sort(struct mem_pieces *mp) | ||
125 | { | ||
126 | unsigned long a, s; | ||
127 | int i, j; | ||
128 | |||
129 | for (i = 1; i < mp->n_regions; ++i) { | ||
130 | a = mp->regions[i].address; | ||
131 | s = mp->regions[i].size; | ||
132 | for (j = i - 1; j >= 0; --j) { | ||
133 | if (a >= mp->regions[j].address) | ||
134 | break; | ||
135 | mp->regions[j+1] = mp->regions[j]; | ||
136 | } | ||
137 | mp->regions[j+1].address = a; | ||
138 | mp->regions[j+1].size = s; | ||
139 | } | ||
140 | } | ||
141 | |||
142 | void __init | ||
143 | mem_pieces_coalesce(struct mem_pieces *mp) | ||
144 | { | ||
145 | unsigned long a, s, ns; | ||
146 | int i, j, d; | ||
147 | |||
148 | d = 0; | ||
149 | for (i = 0; i < mp->n_regions; i = j) { | ||
150 | a = mp->regions[i].address; | ||
151 | s = mp->regions[i].size; | ||
152 | for (j = i + 1; j < mp->n_regions | ||
153 | && mp->regions[j].address - a <= s; ++j) { | ||
154 | ns = mp->regions[j].address + mp->regions[j].size - a; | ||
155 | if (ns > s) | ||
156 | s = ns; | ||
157 | } | ||
158 | mp->regions[d].address = a; | ||
159 | mp->regions[d].size = s; | ||
160 | ++d; | ||
161 | } | ||
162 | mp->n_regions = d; | ||
163 | } | ||
diff --git a/arch/powerpc/mm/mem_pieces.h b/arch/powerpc/mm/mem_pieces.h new file mode 100644 index 000000000000..e2b700dc7f18 --- /dev/null +++ b/arch/powerpc/mm/mem_pieces.h | |||
@@ -0,0 +1,48 @@ | |||
1 | /* | ||
2 | * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> | ||
3 | * Changes to accommodate Power Macintoshes. | ||
4 | * Cort Dougan <cort@cs.nmt.edu> | ||
5 | * Rewrites. | ||
6 | * Grant Erickson <grant@lcse.umn.edu> | ||
7 | * General rework and split from mm/init.c. | ||
8 | * | ||
9 | * Module name: mem_pieces.h | ||
10 | * | ||
11 | * Description: | ||
12 | * Routines and data structures for manipulating and representing | ||
13 | * phyiscal memory extents (i.e. address/length pairs). | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #ifndef __MEM_PIECES_H__ | ||
18 | #define __MEM_PIECES_H__ | ||
19 | |||
20 | #include <asm/prom.h> | ||
21 | |||
22 | #ifdef __cplusplus | ||
23 | extern "C" { | ||
24 | #endif | ||
25 | |||
26 | |||
27 | /* Type Definitions */ | ||
28 | |||
29 | #define MEM_PIECES_MAX 32 | ||
30 | |||
31 | struct mem_pieces { | ||
32 | int n_regions; | ||
33 | struct reg_property regions[MEM_PIECES_MAX]; | ||
34 | }; | ||
35 | |||
36 | /* Function Prototypes */ | ||
37 | |||
38 | extern void *mem_pieces_find(unsigned int size, unsigned int align); | ||
39 | extern void mem_pieces_remove(struct mem_pieces *mp, unsigned int start, | ||
40 | unsigned int size, int must_exist); | ||
41 | extern void mem_pieces_coalesce(struct mem_pieces *mp); | ||
42 | extern void mem_pieces_sort(struct mem_pieces *mp); | ||
43 | |||
44 | #ifdef __cplusplus | ||
45 | } | ||
46 | #endif | ||
47 | |||
48 | #endif /* __MEM_PIECES_H__ */ | ||
diff --git a/arch/powerpc/mm/mmu_context.c b/arch/powerpc/mm/mmu_context.c new file mode 100644 index 000000000000..a8816e0f6a86 --- /dev/null +++ b/arch/powerpc/mm/mmu_context.c | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | * This file contains the routines for handling the MMU on those | ||
3 | * PowerPC implementations where the MMU substantially follows the | ||
4 | * architecture specification. This includes the 6xx, 7xx, 7xxx, | ||
5 | * 8260, and POWER3 implementations but excludes the 8xx and 4xx. | ||
6 | * -- paulus | ||
7 | * | ||
8 | * Derived from arch/ppc/mm/init.c: | ||
9 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
10 | * | ||
11 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | ||
12 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | ||
13 | * Copyright (C) 1996 Paul Mackerras | ||
14 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | ||
15 | * | ||
16 | * Derived from "arch/i386/mm/init.c" | ||
17 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | ||
18 | * | ||
19 | * This program is free software; you can redistribute it and/or | ||
20 | * modify it under the terms of the GNU General Public License | ||
21 | * as published by the Free Software Foundation; either version | ||
22 | * 2 of the License, or (at your option) any later version. | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #include <linux/config.h> | ||
27 | #include <linux/mm.h> | ||
28 | #include <linux/init.h> | ||
29 | |||
30 | #include <asm/mmu_context.h> | ||
31 | #include <asm/tlbflush.h> | ||
32 | |||
33 | mm_context_t next_mmu_context; | ||
34 | unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; | ||
35 | #ifdef FEW_CONTEXTS | ||
36 | atomic_t nr_free_contexts; | ||
37 | struct mm_struct *context_mm[LAST_CONTEXT+1]; | ||
38 | void steal_context(void); | ||
39 | #endif /* FEW_CONTEXTS */ | ||
40 | |||
41 | /* | ||
42 | * Initialize the context management stuff. | ||
43 | */ | ||
44 | void __init | ||
45 | mmu_context_init(void) | ||
46 | { | ||
47 | /* | ||
48 | * Some processors have too few contexts to reserve one for | ||
49 | * init_mm, and require using context 0 for a normal task. | ||
50 | * Other processors reserve the use of context zero for the kernel. | ||
51 | * This code assumes FIRST_CONTEXT < 32. | ||
52 | */ | ||
53 | context_map[0] = (1 << FIRST_CONTEXT) - 1; | ||
54 | next_mmu_context = FIRST_CONTEXT; | ||
55 | #ifdef FEW_CONTEXTS | ||
56 | atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1); | ||
57 | #endif /* FEW_CONTEXTS */ | ||
58 | } | ||
59 | |||
60 | #ifdef FEW_CONTEXTS | ||
61 | /* | ||
62 | * Steal a context from a task that has one at the moment. | ||
63 | * This is only used on 8xx and 4xx and we presently assume that | ||
64 | * they don't do SMP. If they do then this will have to check | ||
65 | * whether the MM we steal is in use. | ||
66 | * We also assume that this is only used on systems that don't | ||
67 | * use an MMU hash table - this is true for 8xx and 4xx. | ||
68 | * This isn't an LRU system, it just frees up each context in | ||
69 | * turn (sort-of pseudo-random replacement :). This would be the | ||
70 | * place to implement an LRU scheme if anyone was motivated to do it. | ||
71 | * -- paulus | ||
72 | */ | ||
73 | void | ||
74 | steal_context(void) | ||
75 | { | ||
76 | struct mm_struct *mm; | ||
77 | |||
78 | /* free up context `next_mmu_context' */ | ||
79 | /* if we shouldn't free context 0, don't... */ | ||
80 | if (next_mmu_context < FIRST_CONTEXT) | ||
81 | next_mmu_context = FIRST_CONTEXT; | ||
82 | mm = context_mm[next_mmu_context]; | ||
83 | flush_tlb_mm(mm); | ||
84 | destroy_context(mm); | ||
85 | } | ||
86 | #endif /* FEW_CONTEXTS */ | ||
diff --git a/arch/powerpc/mm/mmu_context64.c b/arch/powerpc/mm/mmu_context64.c new file mode 100644 index 000000000000..714a84dd8d5d --- /dev/null +++ b/arch/powerpc/mm/mmu_context64.c | |||
@@ -0,0 +1,63 @@ | |||
1 | /* | ||
2 | * MMU context allocation for 64-bit kernels. | ||
3 | * | ||
4 | * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/config.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/string.h> | ||
18 | #include <linux/types.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/spinlock.h> | ||
21 | #include <linux/idr.h> | ||
22 | |||
23 | #include <asm/mmu_context.h> | ||
24 | |||
25 | static DEFINE_SPINLOCK(mmu_context_lock); | ||
26 | static DEFINE_IDR(mmu_context_idr); | ||
27 | |||
28 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
29 | { | ||
30 | int index; | ||
31 | int err; | ||
32 | |||
33 | again: | ||
34 | if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL)) | ||
35 | return -ENOMEM; | ||
36 | |||
37 | spin_lock(&mmu_context_lock); | ||
38 | err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index); | ||
39 | spin_unlock(&mmu_context_lock); | ||
40 | |||
41 | if (err == -EAGAIN) | ||
42 | goto again; | ||
43 | else if (err) | ||
44 | return err; | ||
45 | |||
46 | if (index > MAX_CONTEXT) { | ||
47 | idr_remove(&mmu_context_idr, index); | ||
48 | return -ENOMEM; | ||
49 | } | ||
50 | |||
51 | mm->context.id = index; | ||
52 | |||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | void destroy_context(struct mm_struct *mm) | ||
57 | { | ||
58 | spin_lock(&mmu_context_lock); | ||
59 | idr_remove(&mmu_context_idr, mm->context.id); | ||
60 | spin_unlock(&mmu_context_lock); | ||
61 | |||
62 | mm->context.id = NO_CONTEXT; | ||
63 | } | ||
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h new file mode 100644 index 000000000000..540f3292b229 --- /dev/null +++ b/arch/powerpc/mm/mmu_decl.h | |||
@@ -0,0 +1,85 @@ | |||
1 | /* | ||
2 | * Declarations of procedures and variables shared between files | ||
3 | * in arch/ppc/mm/. | ||
4 | * | ||
5 | * Derived from arch/ppc/mm/init.c: | ||
6 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
7 | * | ||
8 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | ||
9 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | ||
10 | * Copyright (C) 1996 Paul Mackerras | ||
11 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | ||
12 | * | ||
13 | * Derived from "arch/i386/mm/init.c" | ||
14 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | ||
15 | * | ||
16 | * This program is free software; you can redistribute it and/or | ||
17 | * modify it under the terms of the GNU General Public License | ||
18 | * as published by the Free Software Foundation; either version | ||
19 | * 2 of the License, or (at your option) any later version. | ||
20 | * | ||
21 | */ | ||
22 | #include <asm/tlbflush.h> | ||
23 | #include <asm/mmu.h> | ||
24 | |||
25 | extern void mapin_ram(void); | ||
26 | extern int map_page(unsigned long va, phys_addr_t pa, int flags); | ||
27 | extern void setbat(int index, unsigned long virt, unsigned long phys, | ||
28 | unsigned int size, int flags); | ||
29 | extern void reserve_phys_mem(unsigned long start, unsigned long size); | ||
30 | extern void settlbcam(int index, unsigned long virt, phys_addr_t phys, | ||
31 | unsigned int size, int flags, unsigned int pid); | ||
32 | extern void invalidate_tlbcam_entry(int index); | ||
33 | |||
34 | extern int __map_without_bats; | ||
35 | extern unsigned long ioremap_base; | ||
36 | extern unsigned long ioremap_bot; | ||
37 | extern unsigned int rtas_data, rtas_size; | ||
38 | |||
39 | extern unsigned long total_memory; | ||
40 | extern unsigned long total_lowmem; | ||
41 | extern int mem_init_done; | ||
42 | |||
43 | extern PTE *Hash, *Hash_end; | ||
44 | extern unsigned long Hash_size, Hash_mask; | ||
45 | |||
46 | extern unsigned int num_tlbcam_entries; | ||
47 | |||
48 | /* ...and now those things that may be slightly different between processor | ||
49 | * architectures. -- Dan | ||
50 | */ | ||
51 | #if defined(CONFIG_8xx) | ||
52 | #define flush_HPTE(X, va, pg) _tlbie(va) | ||
53 | #define MMU_init_hw() do { } while(0) | ||
54 | #define mmu_mapin_ram() (0UL) | ||
55 | |||
56 | #elif defined(CONFIG_4xx) | ||
57 | #define flush_HPTE(X, va, pg) _tlbie(va) | ||
58 | extern void MMU_init_hw(void); | ||
59 | extern unsigned long mmu_mapin_ram(void); | ||
60 | |||
61 | #elif defined(CONFIG_FSL_BOOKE) | ||
62 | #define flush_HPTE(X, va, pg) _tlbie(va) | ||
63 | extern void MMU_init_hw(void); | ||
64 | extern unsigned long mmu_mapin_ram(void); | ||
65 | extern void adjust_total_lowmem(void); | ||
66 | |||
67 | #else | ||
68 | /* anything except 4xx or 8xx */ | ||
69 | extern void MMU_init_hw(void); | ||
70 | extern unsigned long mmu_mapin_ram(void); | ||
71 | |||
72 | /* Be careful....this needs to be updated if we ever encounter 603 SMPs, | ||
73 | * which includes all new 82xx processors. We need tlbie/tlbsync here | ||
74 | * in that case (I think). -- Dan. | ||
75 | */ | ||
76 | static inline void flush_HPTE(unsigned context, unsigned long va, | ||
77 | unsigned long pdval) | ||
78 | { | ||
79 | if ((Hash != 0) && | ||
80 | cpu_has_feature(CPU_FTR_HPTE_TABLE)) | ||
81 | flush_hash_pages(0, va, pdval, 1); | ||
82 | else | ||
83 | _tlbie(va); | ||
84 | } | ||
85 | #endif | ||
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c new file mode 100644 index 000000000000..81a3d7446d37 --- /dev/null +++ b/arch/powerpc/mm/pgtable.c | |||
@@ -0,0 +1,470 @@ | |||
1 | /* | ||
2 | * This file contains the routines setting up the linux page tables. | ||
3 | * -- paulus | ||
4 | * | ||
5 | * Derived from arch/ppc/mm/init.c: | ||
6 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
7 | * | ||
8 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | ||
9 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | ||
10 | * Copyright (C) 1996 Paul Mackerras | ||
11 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | ||
12 | * | ||
13 | * Derived from "arch/i386/mm/init.c" | ||
14 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | ||
15 | * | ||
16 | * This program is free software; you can redistribute it and/or | ||
17 | * modify it under the terms of the GNU General Public License | ||
18 | * as published by the Free Software Foundation; either version | ||
19 | * 2 of the License, or (at your option) any later version. | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/config.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/types.h> | ||
27 | #include <linux/mm.h> | ||
28 | #include <linux/vmalloc.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <linux/highmem.h> | ||
31 | |||
32 | #include <asm/pgtable.h> | ||
33 | #include <asm/pgalloc.h> | ||
34 | #include <asm/io.h> | ||
35 | |||
36 | #include "mmu_decl.h" | ||
37 | |||
38 | unsigned long ioremap_base; | ||
39 | unsigned long ioremap_bot; | ||
40 | int io_bat_index; | ||
41 | |||
42 | #if defined(CONFIG_6xx) || defined(CONFIG_POWER3) | ||
43 | #define HAVE_BATS 1 | ||
44 | #endif | ||
45 | |||
46 | #if defined(CONFIG_FSL_BOOKE) | ||
47 | #define HAVE_TLBCAM 1 | ||
48 | #endif | ||
49 | |||
50 | extern char etext[], _stext[]; | ||
51 | |||
52 | #ifdef CONFIG_SMP | ||
53 | extern void hash_page_sync(void); | ||
54 | #endif | ||
55 | |||
56 | #ifdef HAVE_BATS | ||
57 | extern unsigned long v_mapped_by_bats(unsigned long va); | ||
58 | extern unsigned long p_mapped_by_bats(unsigned long pa); | ||
59 | void setbat(int index, unsigned long virt, unsigned long phys, | ||
60 | unsigned int size, int flags); | ||
61 | |||
62 | #else /* !HAVE_BATS */ | ||
63 | #define v_mapped_by_bats(x) (0UL) | ||
64 | #define p_mapped_by_bats(x) (0UL) | ||
65 | #endif /* HAVE_BATS */ | ||
66 | |||
67 | #ifdef HAVE_TLBCAM | ||
68 | extern unsigned int tlbcam_index; | ||
69 | extern unsigned long v_mapped_by_tlbcam(unsigned long va); | ||
70 | extern unsigned long p_mapped_by_tlbcam(unsigned long pa); | ||
71 | #else /* !HAVE_TLBCAM */ | ||
72 | #define v_mapped_by_tlbcam(x) (0UL) | ||
73 | #define p_mapped_by_tlbcam(x) (0UL) | ||
74 | #endif /* HAVE_TLBCAM */ | ||
75 | |||
76 | #ifdef CONFIG_PTE_64BIT | ||
77 | /* 44x uses an 8kB pgdir because it has 8-byte Linux PTEs. */ | ||
78 | #define PGDIR_ORDER 1 | ||
79 | #else | ||
80 | #define PGDIR_ORDER 0 | ||
81 | #endif | ||
82 | |||
83 | pgd_t *pgd_alloc(struct mm_struct *mm) | ||
84 | { | ||
85 | pgd_t *ret; | ||
86 | |||
87 | ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGDIR_ORDER); | ||
88 | return ret; | ||
89 | } | ||
90 | |||
91 | void pgd_free(pgd_t *pgd) | ||
92 | { | ||
93 | free_pages((unsigned long)pgd, PGDIR_ORDER); | ||
94 | } | ||
95 | |||
96 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | ||
97 | { | ||
98 | pte_t *pte; | ||
99 | extern int mem_init_done; | ||
100 | extern void *early_get_page(void); | ||
101 | |||
102 | if (mem_init_done) { | ||
103 | pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | ||
104 | } else { | ||
105 | pte = (pte_t *)early_get_page(); | ||
106 | if (pte) | ||
107 | clear_page(pte); | ||
108 | } | ||
109 | return pte; | ||
110 | } | ||
111 | |||
112 | struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) | ||
113 | { | ||
114 | struct page *ptepage; | ||
115 | |||
116 | #ifdef CONFIG_HIGHPTE | ||
117 | int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT; | ||
118 | #else | ||
119 | int flags = GFP_KERNEL | __GFP_REPEAT; | ||
120 | #endif | ||
121 | |||
122 | ptepage = alloc_pages(flags, 0); | ||
123 | if (ptepage) | ||
124 | clear_highpage(ptepage); | ||
125 | return ptepage; | ||
126 | } | ||
127 | |||
128 | void pte_free_kernel(pte_t *pte) | ||
129 | { | ||
130 | #ifdef CONFIG_SMP | ||
131 | hash_page_sync(); | ||
132 | #endif | ||
133 | free_page((unsigned long)pte); | ||
134 | } | ||
135 | |||
136 | void pte_free(struct page *ptepage) | ||
137 | { | ||
138 | #ifdef CONFIG_SMP | ||
139 | hash_page_sync(); | ||
140 | #endif | ||
141 | __free_page(ptepage); | ||
142 | } | ||
143 | |||
144 | #ifndef CONFIG_PHYS_64BIT | ||
145 | void __iomem * | ||
146 | ioremap(phys_addr_t addr, unsigned long size) | ||
147 | { | ||
148 | return __ioremap(addr, size, _PAGE_NO_CACHE); | ||
149 | } | ||
150 | #else /* CONFIG_PHYS_64BIT */ | ||
151 | void __iomem * | ||
152 | ioremap64(unsigned long long addr, unsigned long size) | ||
153 | { | ||
154 | return __ioremap(addr, size, _PAGE_NO_CACHE); | ||
155 | } | ||
156 | |||
157 | void __iomem * | ||
158 | ioremap(phys_addr_t addr, unsigned long size) | ||
159 | { | ||
160 | phys_addr_t addr64 = fixup_bigphys_addr(addr, size); | ||
161 | |||
162 | return ioremap64(addr64, size); | ||
163 | } | ||
164 | #endif /* CONFIG_PHYS_64BIT */ | ||
165 | |||
166 | void __iomem * | ||
167 | __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) | ||
168 | { | ||
169 | unsigned long v, i; | ||
170 | phys_addr_t p; | ||
171 | int err; | ||
172 | |||
173 | /* | ||
174 | * Choose an address to map it to. | ||
175 | * Once the vmalloc system is running, we use it. | ||
176 | * Before then, we use space going down from ioremap_base | ||
177 | * (ioremap_bot records where we're up to). | ||
178 | */ | ||
179 | p = addr & PAGE_MASK; | ||
180 | size = PAGE_ALIGN(addr + size) - p; | ||
181 | |||
182 | /* | ||
183 | * If the address lies within the first 16 MB, assume it's in ISA | ||
184 | * memory space | ||
185 | */ | ||
186 | if (p < 16*1024*1024) | ||
187 | p += _ISA_MEM_BASE; | ||
188 | |||
189 | /* | ||
190 | * Don't allow anybody to remap normal RAM that we're using. | ||
191 | * mem_init() sets high_memory so only do the check after that. | ||
192 | */ | ||
193 | if ( mem_init_done && (p < virt_to_phys(high_memory)) ) | ||
194 | { | ||
195 | printk("__ioremap(): phys addr "PHYS_FMT" is RAM lr %p\n", p, | ||
196 | __builtin_return_address(0)); | ||
197 | return NULL; | ||
198 | } | ||
199 | |||
200 | if (size == 0) | ||
201 | return NULL; | ||
202 | |||
203 | /* | ||
204 | * Is it already mapped? Perhaps overlapped by a previous | ||
205 | * BAT mapping. If the whole area is mapped then we're done, | ||
206 | * otherwise remap it since we want to keep the virt addrs for | ||
207 | * each request contiguous. | ||
208 | * | ||
209 | * We make the assumption here that if the bottom and top | ||
210 | * of the range we want are mapped then it's mapped to the | ||
211 | * same virt address (and this is contiguous). | ||
212 | * -- Cort | ||
213 | */ | ||
214 | if ((v = p_mapped_by_bats(p)) /*&& p_mapped_by_bats(p+size-1)*/ ) | ||
215 | goto out; | ||
216 | |||
217 | if ((v = p_mapped_by_tlbcam(p))) | ||
218 | goto out; | ||
219 | |||
220 | if (mem_init_done) { | ||
221 | struct vm_struct *area; | ||
222 | area = get_vm_area(size, VM_IOREMAP); | ||
223 | if (area == 0) | ||
224 | return NULL; | ||
225 | v = (unsigned long) area->addr; | ||
226 | } else { | ||
227 | v = (ioremap_bot -= size); | ||
228 | } | ||
229 | |||
230 | if ((flags & _PAGE_PRESENT) == 0) | ||
231 | flags |= _PAGE_KERNEL; | ||
232 | if (flags & _PAGE_NO_CACHE) | ||
233 | flags |= _PAGE_GUARDED; | ||
234 | |||
235 | /* | ||
236 | * Should check if it is a candidate for a BAT mapping | ||
237 | */ | ||
238 | |||
239 | err = 0; | ||
240 | for (i = 0; i < size && err == 0; i += PAGE_SIZE) | ||
241 | err = map_page(v+i, p+i, flags); | ||
242 | if (err) { | ||
243 | if (mem_init_done) | ||
244 | vunmap((void *)v); | ||
245 | return NULL; | ||
246 | } | ||
247 | |||
248 | out: | ||
249 | return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK)); | ||
250 | } | ||
251 | |||
252 | void iounmap(volatile void __iomem *addr) | ||
253 | { | ||
254 | /* | ||
255 | * If mapped by BATs then there is nothing to do. | ||
256 | * Calling vfree() generates a benign warning. | ||
257 | */ | ||
258 | if (v_mapped_by_bats((unsigned long)addr)) return; | ||
259 | |||
260 | if (addr > high_memory && (unsigned long) addr < ioremap_bot) | ||
261 | vunmap((void *) (PAGE_MASK & (unsigned long)addr)); | ||
262 | } | ||
263 | |||
264 | void __iomem *ioport_map(unsigned long port, unsigned int len) | ||
265 | { | ||
266 | return (void __iomem *) (port + _IO_BASE); | ||
267 | } | ||
268 | |||
269 | void ioport_unmap(void __iomem *addr) | ||
270 | { | ||
271 | /* Nothing to do */ | ||
272 | } | ||
273 | EXPORT_SYMBOL(ioport_map); | ||
274 | EXPORT_SYMBOL(ioport_unmap); | ||
275 | |||
276 | int | ||
277 | map_page(unsigned long va, phys_addr_t pa, int flags) | ||
278 | { | ||
279 | pmd_t *pd; | ||
280 | pte_t *pg; | ||
281 | int err = -ENOMEM; | ||
282 | |||
283 | spin_lock(&init_mm.page_table_lock); | ||
284 | /* Use upper 10 bits of VA to index the first level map */ | ||
285 | pd = pmd_offset(pgd_offset_k(va), va); | ||
286 | /* Use middle 10 bits of VA to index the second-level map */ | ||
287 | pg = pte_alloc_kernel(&init_mm, pd, va); | ||
288 | if (pg != 0) { | ||
289 | err = 0; | ||
290 | set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags))); | ||
291 | if (mem_init_done) | ||
292 | flush_HPTE(0, va, pmd_val(*pd)); | ||
293 | } | ||
294 | spin_unlock(&init_mm.page_table_lock); | ||
295 | return err; | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | * Map in all of physical memory starting at KERNELBASE. | ||
300 | */ | ||
301 | void __init mapin_ram(void) | ||
302 | { | ||
303 | unsigned long v, p, s, f; | ||
304 | |||
305 | s = mmu_mapin_ram(); | ||
306 | v = KERNELBASE + s; | ||
307 | p = PPC_MEMSTART + s; | ||
308 | for (; s < total_lowmem; s += PAGE_SIZE) { | ||
309 | if ((char *) v >= _stext && (char *) v < etext) | ||
310 | f = _PAGE_RAM_TEXT; | ||
311 | else | ||
312 | f = _PAGE_RAM; | ||
313 | map_page(v, p, f); | ||
314 | v += PAGE_SIZE; | ||
315 | p += PAGE_SIZE; | ||
316 | } | ||
317 | } | ||
318 | |||
319 | /* is x a power of 2? */ | ||
320 | #define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0)) | ||
321 | |||
322 | /* is x a power of 4? */ | ||
323 | #define is_power_of_4(x) ((x) != 0 && (((x) & (x-1)) == 0) && (ffs(x) & 1)) | ||
324 | |||
325 | /* | ||
326 | * Set up a mapping for a block of I/O. | ||
327 | * virt, phys, size must all be page-aligned. | ||
328 | * This should only be called before ioremap is called. | ||
329 | */ | ||
330 | void __init io_block_mapping(unsigned long virt, phys_addr_t phys, | ||
331 | unsigned int size, int flags) | ||
332 | { | ||
333 | int i; | ||
334 | |||
335 | if (virt > KERNELBASE && virt < ioremap_bot) | ||
336 | ioremap_bot = ioremap_base = virt; | ||
337 | |||
338 | #ifdef HAVE_BATS | ||
339 | /* | ||
340 | * Use a BAT for this if possible... | ||
341 | */ | ||
342 | if (io_bat_index < 2 && is_power_of_2(size) | ||
343 | && (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) { | ||
344 | setbat(io_bat_index, virt, phys, size, flags); | ||
345 | ++io_bat_index; | ||
346 | return; | ||
347 | } | ||
348 | #endif /* HAVE_BATS */ | ||
349 | |||
350 | #ifdef HAVE_TLBCAM | ||
351 | /* | ||
352 | * Use a CAM for this if possible... | ||
353 | */ | ||
354 | if (tlbcam_index < num_tlbcam_entries && is_power_of_4(size) | ||
355 | && (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) { | ||
356 | settlbcam(tlbcam_index, virt, phys, size, flags, 0); | ||
357 | ++tlbcam_index; | ||
358 | return; | ||
359 | } | ||
360 | #endif /* HAVE_TLBCAM */ | ||
361 | |||
362 | /* No BATs available, put it in the page tables. */ | ||
363 | for (i = 0; i < size; i += PAGE_SIZE) | ||
364 | map_page(virt + i, phys + i, flags); | ||
365 | } | ||
366 | |||
367 | /* Scan the real Linux page tables and return a PTE pointer for | ||
368 | * a virtual address in a context. | ||
369 | * Returns true (1) if PTE was found, zero otherwise. The pointer to | ||
370 | * the PTE pointer is unmodified if PTE is not found. | ||
371 | */ | ||
372 | int | ||
373 | get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) | ||
374 | { | ||
375 | pgd_t *pgd; | ||
376 | pmd_t *pmd; | ||
377 | pte_t *pte; | ||
378 | int retval = 0; | ||
379 | |||
380 | pgd = pgd_offset(mm, addr & PAGE_MASK); | ||
381 | if (pgd) { | ||
382 | pmd = pmd_offset(pgd, addr & PAGE_MASK); | ||
383 | if (pmd_present(*pmd)) { | ||
384 | pte = pte_offset_map(pmd, addr & PAGE_MASK); | ||
385 | if (pte) { | ||
386 | retval = 1; | ||
387 | *ptep = pte; | ||
388 | /* XXX caller needs to do pte_unmap, yuck */ | ||
389 | } | ||
390 | } | ||
391 | } | ||
392 | return(retval); | ||
393 | } | ||
394 | |||
395 | /* Find physical address for this virtual address. Normally used by | ||
396 | * I/O functions, but anyone can call it. | ||
397 | */ | ||
398 | unsigned long iopa(unsigned long addr) | ||
399 | { | ||
400 | unsigned long pa; | ||
401 | |||
402 | /* I don't know why this won't work on PMacs or CHRP. It | ||
403 | * appears there is some bug, or there is some implicit | ||
404 | * mapping done not properly represented by BATs or in page | ||
405 | * tables.......I am actively working on resolving this, but | ||
406 | * can't hold up other stuff. -- Dan | ||
407 | */ | ||
408 | pte_t *pte; | ||
409 | struct mm_struct *mm; | ||
410 | |||
411 | /* Check the BATs */ | ||
412 | pa = v_mapped_by_bats(addr); | ||
413 | if (pa) | ||
414 | return pa; | ||
415 | |||
416 | /* Allow mapping of user addresses (within the thread) | ||
417 | * for DMA if necessary. | ||
418 | */ | ||
419 | if (addr < TASK_SIZE) | ||
420 | mm = current->mm; | ||
421 | else | ||
422 | mm = &init_mm; | ||
423 | |||
424 | pa = 0; | ||
425 | if (get_pteptr(mm, addr, &pte)) { | ||
426 | pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK); | ||
427 | pte_unmap(pte); | ||
428 | } | ||
429 | |||
430 | return(pa); | ||
431 | } | ||
432 | |||
433 | /* This is will find the virtual address for a physical one.... | ||
434 | * Swiped from APUS, could be dangerous :-). | ||
435 | * This is only a placeholder until I really find a way to make this | ||
436 | * work. -- Dan | ||
437 | */ | ||
438 | unsigned long | ||
439 | mm_ptov (unsigned long paddr) | ||
440 | { | ||
441 | unsigned long ret; | ||
442 | #if 0 | ||
443 | if (paddr < 16*1024*1024) | ||
444 | ret = ZTWO_VADDR(paddr); | ||
445 | else { | ||
446 | int i; | ||
447 | |||
448 | for (i = 0; i < kmap_chunk_count;){ | ||
449 | unsigned long phys = kmap_chunks[i++]; | ||
450 | unsigned long size = kmap_chunks[i++]; | ||
451 | unsigned long virt = kmap_chunks[i++]; | ||
452 | if (paddr >= phys | ||
453 | && paddr < (phys + size)){ | ||
454 | ret = virt + paddr - phys; | ||
455 | goto exit; | ||
456 | } | ||
457 | } | ||
458 | |||
459 | ret = (unsigned long) __va(paddr); | ||
460 | } | ||
461 | exit: | ||
462 | #ifdef DEBUGPV | ||
463 | printk ("PTOV(%lx)=%lx\n", paddr, ret); | ||
464 | #endif | ||
465 | #else | ||
466 | ret = (unsigned long)paddr + KERNELBASE; | ||
467 | #endif | ||
468 | return ret; | ||
469 | } | ||
470 | |||
diff --git a/arch/powerpc/mm/pgtable64.c b/arch/powerpc/mm/pgtable64.c new file mode 100644 index 000000000000..724f97e5dee5 --- /dev/null +++ b/arch/powerpc/mm/pgtable64.c | |||
@@ -0,0 +1,357 @@ | |||
1 | /* | ||
2 | * This file contains ioremap and related functions for 64-bit machines. | ||
3 | * | ||
4 | * Derived from arch/ppc64/mm/init.c | ||
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
6 | * | ||
7 | * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org) | ||
8 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | ||
9 | * Copyright (C) 1996 Paul Mackerras | ||
10 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | ||
11 | * | ||
12 | * Derived from "arch/i386/mm/init.c" | ||
13 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | ||
14 | * | ||
15 | * Dave Engebretsen <engebret@us.ibm.com> | ||
16 | * Rework for PPC64 port. | ||
17 | * | ||
18 | * This program is free software; you can redistribute it and/or | ||
19 | * modify it under the terms of the GNU General Public License | ||
20 | * as published by the Free Software Foundation; either version | ||
21 | * 2 of the License, or (at your option) any later version. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #include <linux/config.h> | ||
26 | #include <linux/signal.h> | ||
27 | #include <linux/sched.h> | ||
28 | #include <linux/kernel.h> | ||
29 | #include <linux/errno.h> | ||
30 | #include <linux/string.h> | ||
31 | #include <linux/types.h> | ||
32 | #include <linux/mman.h> | ||
33 | #include <linux/mm.h> | ||
34 | #include <linux/swap.h> | ||
35 | #include <linux/stddef.h> | ||
36 | #include <linux/vmalloc.h> | ||
37 | #include <linux/init.h> | ||
38 | #include <linux/delay.h> | ||
39 | #include <linux/bootmem.h> | ||
40 | #include <linux/highmem.h> | ||
41 | #include <linux/idr.h> | ||
42 | #include <linux/nodemask.h> | ||
43 | #include <linux/module.h> | ||
44 | |||
45 | #include <asm/pgalloc.h> | ||
46 | #include <asm/page.h> | ||
47 | #include <asm/prom.h> | ||
48 | #include <asm/lmb.h> | ||
49 | #include <asm/rtas.h> | ||
50 | #include <asm/io.h> | ||
51 | #include <asm/mmu_context.h> | ||
52 | #include <asm/pgtable.h> | ||
53 | #include <asm/mmu.h> | ||
54 | #include <asm/uaccess.h> | ||
55 | #include <asm/smp.h> | ||
56 | #include <asm/machdep.h> | ||
57 | #include <asm/tlb.h> | ||
58 | #include <asm/eeh.h> | ||
59 | #include <asm/processor.h> | ||
60 | #include <asm/mmzone.h> | ||
61 | #include <asm/cputable.h> | ||
62 | #include <asm/ppcdebug.h> | ||
63 | #include <asm/sections.h> | ||
64 | #include <asm/system.h> | ||
65 | #include <asm/iommu.h> | ||
66 | #include <asm/abs_addr.h> | ||
67 | #include <asm/vdso.h> | ||
68 | #include <asm/imalloc.h> | ||
69 | |||
70 | #if PGTABLE_RANGE > USER_VSID_RANGE | ||
71 | #warning Limited user VSID range means pagetable space is wasted | ||
72 | #endif | ||
73 | |||
74 | #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE) | ||
75 | #warning TASK_SIZE is smaller than it needs to be. | ||
76 | #endif | ||
77 | |||
78 | int mem_init_done; | ||
79 | unsigned long ioremap_bot = IMALLOC_BASE; | ||
80 | static unsigned long phbs_io_bot = PHBS_IO_BASE; | ||
81 | |||
82 | extern pgd_t swapper_pg_dir[]; | ||
83 | extern struct task_struct *current_set[NR_CPUS]; | ||
84 | |||
85 | unsigned long klimit = (unsigned long)_end; | ||
86 | |||
87 | /* max amount of RAM to use */ | ||
88 | unsigned long __max_memory; | ||
89 | |||
90 | /* info on what we think the IO hole is */ | ||
91 | unsigned long io_hole_start; | ||
92 | unsigned long io_hole_size; | ||
93 | |||
94 | #ifdef CONFIG_PPC_ISERIES | ||
95 | |||
96 | void __iomem *ioremap(unsigned long addr, unsigned long size) | ||
97 | { | ||
98 | return (void __iomem *)addr; | ||
99 | } | ||
100 | |||
101 | extern void __iomem *__ioremap(unsigned long addr, unsigned long size, | ||
102 | unsigned long flags) | ||
103 | { | ||
104 | return (void __iomem *)addr; | ||
105 | } | ||
106 | |||
107 | void iounmap(volatile void __iomem *addr) | ||
108 | { | ||
109 | return; | ||
110 | } | ||
111 | |||
112 | #else | ||
113 | |||
114 | /* | ||
115 | * map_io_page currently only called by __ioremap | ||
116 | * map_io_page adds an entry to the ioremap page table | ||
117 | * and adds an entry to the HPT, possibly bolting it | ||
118 | */ | ||
119 | static int map_io_page(unsigned long ea, unsigned long pa, int flags) | ||
120 | { | ||
121 | pgd_t *pgdp; | ||
122 | pud_t *pudp; | ||
123 | pmd_t *pmdp; | ||
124 | pte_t *ptep; | ||
125 | unsigned long vsid; | ||
126 | |||
127 | if (mem_init_done) { | ||
128 | spin_lock(&init_mm.page_table_lock); | ||
129 | pgdp = pgd_offset_k(ea); | ||
130 | pudp = pud_alloc(&init_mm, pgdp, ea); | ||
131 | if (!pudp) | ||
132 | return -ENOMEM; | ||
133 | pmdp = pmd_alloc(&init_mm, pudp, ea); | ||
134 | if (!pmdp) | ||
135 | return -ENOMEM; | ||
136 | ptep = pte_alloc_kernel(&init_mm, pmdp, ea); | ||
137 | if (!ptep) | ||
138 | return -ENOMEM; | ||
139 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, | ||
140 | __pgprot(flags))); | ||
141 | spin_unlock(&init_mm.page_table_lock); | ||
142 | } else { | ||
143 | unsigned long va, vpn, hash, hpteg; | ||
144 | |||
145 | /* | ||
146 | * If the mm subsystem is not fully up, we cannot create a | ||
147 | * linux page table entry for this mapping. Simply bolt an | ||
148 | * entry in the hardware page table. | ||
149 | */ | ||
150 | vsid = get_kernel_vsid(ea); | ||
151 | va = (vsid << 28) | (ea & 0xFFFFFFF); | ||
152 | vpn = va >> PAGE_SHIFT; | ||
153 | |||
154 | hash = hpt_hash(vpn, 0); | ||
155 | |||
156 | hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); | ||
157 | |||
158 | /* Panic if a pte grpup is full */ | ||
159 | if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT, | ||
160 | HPTE_V_BOLTED, | ||
161 | _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX) | ||
162 | == -1) { | ||
163 | panic("map_io_page: could not insert mapping"); | ||
164 | } | ||
165 | } | ||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | |||
170 | static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa, | ||
171 | unsigned long ea, unsigned long size, | ||
172 | unsigned long flags) | ||
173 | { | ||
174 | unsigned long i; | ||
175 | |||
176 | if ((flags & _PAGE_PRESENT) == 0) | ||
177 | flags |= pgprot_val(PAGE_KERNEL); | ||
178 | |||
179 | for (i = 0; i < size; i += PAGE_SIZE) | ||
180 | if (map_io_page(ea+i, pa+i, flags)) | ||
181 | return NULL; | ||
182 | |||
183 | return (void __iomem *) (ea + (addr & ~PAGE_MASK)); | ||
184 | } | ||
185 | |||
186 | |||
187 | void __iomem * | ||
188 | ioremap(unsigned long addr, unsigned long size) | ||
189 | { | ||
190 | return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED); | ||
191 | } | ||
192 | |||
193 | void __iomem * __ioremap(unsigned long addr, unsigned long size, | ||
194 | unsigned long flags) | ||
195 | { | ||
196 | unsigned long pa, ea; | ||
197 | void __iomem *ret; | ||
198 | |||
199 | /* | ||
200 | * Choose an address to map it to. | ||
201 | * Once the imalloc system is running, we use it. | ||
202 | * Before that, we map using addresses going | ||
203 | * up from ioremap_bot. imalloc will use | ||
204 | * the addresses from ioremap_bot through | ||
205 | * IMALLOC_END | ||
206 | * | ||
207 | */ | ||
208 | pa = addr & PAGE_MASK; | ||
209 | size = PAGE_ALIGN(addr + size) - pa; | ||
210 | |||
211 | if (size == 0) | ||
212 | return NULL; | ||
213 | |||
214 | if (mem_init_done) { | ||
215 | struct vm_struct *area; | ||
216 | area = im_get_free_area(size); | ||
217 | if (area == NULL) | ||
218 | return NULL; | ||
219 | ea = (unsigned long)(area->addr); | ||
220 | ret = __ioremap_com(addr, pa, ea, size, flags); | ||
221 | if (!ret) | ||
222 | im_free(area->addr); | ||
223 | } else { | ||
224 | ea = ioremap_bot; | ||
225 | ret = __ioremap_com(addr, pa, ea, size, flags); | ||
226 | if (ret) | ||
227 | ioremap_bot += size; | ||
228 | } | ||
229 | return ret; | ||
230 | } | ||
231 | |||
232 | #define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK)) | ||
233 | |||
234 | int __ioremap_explicit(unsigned long pa, unsigned long ea, | ||
235 | unsigned long size, unsigned long flags) | ||
236 | { | ||
237 | struct vm_struct *area; | ||
238 | void __iomem *ret; | ||
239 | |||
240 | /* For now, require page-aligned values for pa, ea, and size */ | ||
241 | if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) || | ||
242 | !IS_PAGE_ALIGNED(size)) { | ||
243 | printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__); | ||
244 | return 1; | ||
245 | } | ||
246 | |||
247 | if (!mem_init_done) { | ||
248 | /* Two things to consider in this case: | ||
249 | * 1) No records will be kept (imalloc, etc) that the region | ||
250 | * has been remapped | ||
251 | * 2) It won't be easy to iounmap() the region later (because | ||
252 | * of 1) | ||
253 | */ | ||
254 | ; | ||
255 | } else { | ||
256 | area = im_get_area(ea, size, | ||
257 | IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS); | ||
258 | if (area == NULL) { | ||
259 | /* Expected when PHB-dlpar is in play */ | ||
260 | return 1; | ||
261 | } | ||
262 | if (ea != (unsigned long) area->addr) { | ||
263 | printk(KERN_ERR "unexpected addr return from " | ||
264 | "im_get_area\n"); | ||
265 | return 1; | ||
266 | } | ||
267 | } | ||
268 | |||
269 | ret = __ioremap_com(pa, pa, ea, size, flags); | ||
270 | if (ret == NULL) { | ||
271 | printk(KERN_ERR "ioremap_explicit() allocation failure !\n"); | ||
272 | return 1; | ||
273 | } | ||
274 | if (ret != (void *) ea) { | ||
275 | printk(KERN_ERR "__ioremap_com() returned unexpected addr\n"); | ||
276 | return 1; | ||
277 | } | ||
278 | |||
279 | return 0; | ||
280 | } | ||
281 | |||
282 | /* | ||
283 | * Unmap an IO region and remove it from imalloc'd list. | ||
284 | * Access to IO memory should be serialized by driver. | ||
285 | * This code is modeled after vmalloc code - unmap_vm_area() | ||
286 | * | ||
287 | * XXX what about calls before mem_init_done (ie python_countermeasures()) | ||
288 | */ | ||
289 | void iounmap(volatile void __iomem *token) | ||
290 | { | ||
291 | void *addr; | ||
292 | |||
293 | if (!mem_init_done) | ||
294 | return; | ||
295 | |||
296 | addr = (void *) ((unsigned long __force) token & PAGE_MASK); | ||
297 | |||
298 | im_free(addr); | ||
299 | } | ||
300 | |||
301 | static int iounmap_subset_regions(unsigned long addr, unsigned long size) | ||
302 | { | ||
303 | struct vm_struct *area; | ||
304 | |||
305 | /* Check whether subsets of this region exist */ | ||
306 | area = im_get_area(addr, size, IM_REGION_SUPERSET); | ||
307 | if (area == NULL) | ||
308 | return 1; | ||
309 | |||
310 | while (area) { | ||
311 | iounmap((void __iomem *) area->addr); | ||
312 | area = im_get_area(addr, size, | ||
313 | IM_REGION_SUPERSET); | ||
314 | } | ||
315 | |||
316 | return 0; | ||
317 | } | ||
318 | |||
319 | int iounmap_explicit(volatile void __iomem *start, unsigned long size) | ||
320 | { | ||
321 | struct vm_struct *area; | ||
322 | unsigned long addr; | ||
323 | int rc; | ||
324 | |||
325 | addr = (unsigned long __force) start & PAGE_MASK; | ||
326 | |||
327 | /* Verify that the region either exists or is a subset of an existing | ||
328 | * region. In the latter case, split the parent region to create | ||
329 | * the exact region | ||
330 | */ | ||
331 | area = im_get_area(addr, size, | ||
332 | IM_REGION_EXISTS | IM_REGION_SUBSET); | ||
333 | if (area == NULL) { | ||
334 | /* Determine whether subset regions exist. If so, unmap */ | ||
335 | rc = iounmap_subset_regions(addr, size); | ||
336 | if (rc) { | ||
337 | printk(KERN_ERR | ||
338 | "%s() cannot unmap nonexistent range 0x%lx\n", | ||
339 | __FUNCTION__, addr); | ||
340 | return 1; | ||
341 | } | ||
342 | } else { | ||
343 | iounmap((void __iomem *) area->addr); | ||
344 | } | ||
345 | /* | ||
346 | * FIXME! This can't be right: | ||
347 | iounmap(area->addr); | ||
348 | * Maybe it should be "iounmap(area);" | ||
349 | */ | ||
350 | return 0; | ||
351 | } | ||
352 | |||
353 | #endif | ||
354 | |||
355 | EXPORT_SYMBOL(ioremap); | ||
356 | EXPORT_SYMBOL(__ioremap); | ||
357 | EXPORT_SYMBOL(iounmap); | ||
diff --git a/arch/powerpc/mm/ppc_mmu.c b/arch/powerpc/mm/ppc_mmu.c new file mode 100644 index 000000000000..9a381ed5eb21 --- /dev/null +++ b/arch/powerpc/mm/ppc_mmu.c | |||
@@ -0,0 +1,296 @@ | |||
1 | /* | ||
2 | * This file contains the routines for handling the MMU on those | ||
3 | * PowerPC implementations where the MMU substantially follows the | ||
4 | * architecture specification. This includes the 6xx, 7xx, 7xxx, | ||
5 | * 8260, and POWER3 implementations but excludes the 8xx and 4xx. | ||
6 | * -- paulus | ||
7 | * | ||
8 | * Derived from arch/ppc/mm/init.c: | ||
9 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
10 | * | ||
11 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | ||
12 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | ||
13 | * Copyright (C) 1996 Paul Mackerras | ||
14 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | ||
15 | * | ||
16 | * Derived from "arch/i386/mm/init.c" | ||
17 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | ||
18 | * | ||
19 | * This program is free software; you can redistribute it and/or | ||
20 | * modify it under the terms of the GNU General Public License | ||
21 | * as published by the Free Software Foundation; either version | ||
22 | * 2 of the License, or (at your option) any later version. | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #include <linux/config.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/mm.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <linux/highmem.h> | ||
31 | |||
32 | #include <asm/prom.h> | ||
33 | #include <asm/mmu.h> | ||
34 | #include <asm/machdep.h> | ||
35 | |||
36 | #include "mmu_decl.h" | ||
37 | #include "mem_pieces.h" | ||
38 | |||
39 | PTE *Hash, *Hash_end; | ||
40 | unsigned long Hash_size, Hash_mask; | ||
41 | unsigned long _SDR1; | ||
42 | |||
43 | union ubat { /* BAT register values to be loaded */ | ||
44 | BAT bat; | ||
45 | #ifdef CONFIG_PPC64BRIDGE | ||
46 | u64 word[2]; | ||
47 | #else | ||
48 | u32 word[2]; | ||
49 | #endif | ||
50 | } BATS[4][2]; /* 4 pairs of IBAT, DBAT */ | ||
51 | |||
52 | struct batrange { /* stores address ranges mapped by BATs */ | ||
53 | unsigned long start; | ||
54 | unsigned long limit; | ||
55 | unsigned long phys; | ||
56 | } bat_addrs[4]; | ||
57 | |||
58 | /* | ||
59 | * Return PA for this VA if it is mapped by a BAT, or 0 | ||
60 | */ | ||
61 | unsigned long v_mapped_by_bats(unsigned long va) | ||
62 | { | ||
63 | int b; | ||
64 | for (b = 0; b < 4; ++b) | ||
65 | if (va >= bat_addrs[b].start && va < bat_addrs[b].limit) | ||
66 | return bat_addrs[b].phys + (va - bat_addrs[b].start); | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | /* | ||
71 | * Return VA for a given PA or 0 if not mapped | ||
72 | */ | ||
73 | unsigned long p_mapped_by_bats(unsigned long pa) | ||
74 | { | ||
75 | int b; | ||
76 | for (b = 0; b < 4; ++b) | ||
77 | if (pa >= bat_addrs[b].phys | ||
78 | && pa < (bat_addrs[b].limit-bat_addrs[b].start) | ||
79 | +bat_addrs[b].phys) | ||
80 | return bat_addrs[b].start+(pa-bat_addrs[b].phys); | ||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | unsigned long __init mmu_mapin_ram(void) | ||
85 | { | ||
86 | #ifdef CONFIG_POWER4 | ||
87 | return 0; | ||
88 | #else | ||
89 | unsigned long tot, bl, done; | ||
90 | unsigned long max_size = (256<<20); | ||
91 | unsigned long align; | ||
92 | |||
93 | if (__map_without_bats) | ||
94 | return 0; | ||
95 | |||
96 | /* Set up BAT2 and if necessary BAT3 to cover RAM. */ | ||
97 | |||
98 | /* Make sure we don't map a block larger than the | ||
99 | smallest alignment of the physical address. */ | ||
100 | /* alignment of PPC_MEMSTART */ | ||
101 | align = ~(PPC_MEMSTART-1) & PPC_MEMSTART; | ||
102 | /* set BAT block size to MIN(max_size, align) */ | ||
103 | if (align && align < max_size) | ||
104 | max_size = align; | ||
105 | |||
106 | tot = total_lowmem; | ||
107 | for (bl = 128<<10; bl < max_size; bl <<= 1) { | ||
108 | if (bl * 2 > tot) | ||
109 | break; | ||
110 | } | ||
111 | |||
112 | setbat(2, KERNELBASE, PPC_MEMSTART, bl, _PAGE_RAM); | ||
113 | done = (unsigned long)bat_addrs[2].limit - KERNELBASE + 1; | ||
114 | if ((done < tot) && !bat_addrs[3].limit) { | ||
115 | /* use BAT3 to cover a bit more */ | ||
116 | tot -= done; | ||
117 | for (bl = 128<<10; bl < max_size; bl <<= 1) | ||
118 | if (bl * 2 > tot) | ||
119 | break; | ||
120 | setbat(3, KERNELBASE+done, PPC_MEMSTART+done, bl, _PAGE_RAM); | ||
121 | done = (unsigned long)bat_addrs[3].limit - KERNELBASE + 1; | ||
122 | } | ||
123 | |||
124 | return done; | ||
125 | #endif | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * Set up one of the I/D BAT (block address translation) register pairs. | ||
130 | * The parameters are not checked; in particular size must be a power | ||
131 | * of 2 between 128k and 256M. | ||
132 | */ | ||
133 | void __init setbat(int index, unsigned long virt, unsigned long phys, | ||
134 | unsigned int size, int flags) | ||
135 | { | ||
136 | unsigned int bl; | ||
137 | int wimgxpp; | ||
138 | union ubat *bat = BATS[index]; | ||
139 | |||
140 | if (((flags & _PAGE_NO_CACHE) == 0) && | ||
141 | cpu_has_feature(CPU_FTR_NEED_COHERENT)) | ||
142 | flags |= _PAGE_COHERENT; | ||
143 | |||
144 | bl = (size >> 17) - 1; | ||
145 | if (PVR_VER(mfspr(SPRN_PVR)) != 1) { | ||
146 | /* 603, 604, etc. */ | ||
147 | /* Do DBAT first */ | ||
148 | wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | ||
149 | | _PAGE_COHERENT | _PAGE_GUARDED); | ||
150 | wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX; | ||
151 | bat[1].word[0] = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */ | ||
152 | bat[1].word[1] = phys | wimgxpp; | ||
153 | #ifndef CONFIG_KGDB /* want user access for breakpoints */ | ||
154 | if (flags & _PAGE_USER) | ||
155 | #endif | ||
156 | bat[1].bat.batu.vp = 1; | ||
157 | if (flags & _PAGE_GUARDED) { | ||
158 | /* G bit must be zero in IBATs */ | ||
159 | bat[0].word[0] = bat[0].word[1] = 0; | ||
160 | } else { | ||
161 | /* make IBAT same as DBAT */ | ||
162 | bat[0] = bat[1]; | ||
163 | } | ||
164 | } else { | ||
165 | /* 601 cpu */ | ||
166 | if (bl > BL_8M) | ||
167 | bl = BL_8M; | ||
168 | wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | ||
169 | | _PAGE_COHERENT); | ||
170 | wimgxpp |= (flags & _PAGE_RW)? | ||
171 | ((flags & _PAGE_USER)? PP_RWRW: PP_RWXX): PP_RXRX; | ||
172 | bat->word[0] = virt | wimgxpp | 4; /* Ks=0, Ku=1 */ | ||
173 | bat->word[1] = phys | bl | 0x40; /* V=1 */ | ||
174 | } | ||
175 | |||
176 | bat_addrs[index].start = virt; | ||
177 | bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1; | ||
178 | bat_addrs[index].phys = phys; | ||
179 | } | ||
180 | |||
181 | /* | ||
182 | * Initialize the hash table and patch the instructions in hashtable.S. | ||
183 | */ | ||
184 | void __init MMU_init_hw(void) | ||
185 | { | ||
186 | unsigned int hmask, mb, mb2; | ||
187 | unsigned int n_hpteg, lg_n_hpteg; | ||
188 | |||
189 | extern unsigned int hash_page_patch_A[]; | ||
190 | extern unsigned int hash_page_patch_B[], hash_page_patch_C[]; | ||
191 | extern unsigned int hash_page[]; | ||
192 | extern unsigned int flush_hash_patch_A[], flush_hash_patch_B[]; | ||
193 | |||
194 | if (!cpu_has_feature(CPU_FTR_HPTE_TABLE)) { | ||
195 | /* | ||
196 | * Put a blr (procedure return) instruction at the | ||
197 | * start of hash_page, since we can still get DSI | ||
198 | * exceptions on a 603. | ||
199 | */ | ||
200 | hash_page[0] = 0x4e800020; | ||
201 | flush_icache_range((unsigned long) &hash_page[0], | ||
202 | (unsigned long) &hash_page[1]); | ||
203 | return; | ||
204 | } | ||
205 | |||
206 | if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105); | ||
207 | |||
208 | #ifdef CONFIG_PPC64BRIDGE | ||
209 | #define LG_HPTEG_SIZE 7 /* 128 bytes per HPTEG */ | ||
210 | #define SDR1_LOW_BITS (lg_n_hpteg - 11) | ||
211 | #define MIN_N_HPTEG 2048 /* min 256kB hash table */ | ||
212 | #else | ||
213 | #define LG_HPTEG_SIZE 6 /* 64 bytes per HPTEG */ | ||
214 | #define SDR1_LOW_BITS ((n_hpteg - 1) >> 10) | ||
215 | #define MIN_N_HPTEG 1024 /* min 64kB hash table */ | ||
216 | #endif | ||
217 | |||
218 | #ifdef CONFIG_POWER4 | ||
219 | /* The hash table has already been allocated and initialized | ||
220 | in prom.c */ | ||
221 | n_hpteg = Hash_size >> LG_HPTEG_SIZE; | ||
222 | lg_n_hpteg = __ilog2(n_hpteg); | ||
223 | |||
224 | /* Remove the hash table from the available memory */ | ||
225 | if (Hash) | ||
226 | reserve_phys_mem(__pa(Hash), Hash_size); | ||
227 | |||
228 | #else /* CONFIG_POWER4 */ | ||
229 | /* | ||
230 | * Allow 1 HPTE (1/8 HPTEG) for each page of memory. | ||
231 | * This is less than the recommended amount, but then | ||
232 | * Linux ain't AIX. | ||
233 | */ | ||
234 | n_hpteg = total_memory / (PAGE_SIZE * 8); | ||
235 | if (n_hpteg < MIN_N_HPTEG) | ||
236 | n_hpteg = MIN_N_HPTEG; | ||
237 | lg_n_hpteg = __ilog2(n_hpteg); | ||
238 | if (n_hpteg & (n_hpteg - 1)) { | ||
239 | ++lg_n_hpteg; /* round up if not power of 2 */ | ||
240 | n_hpteg = 1 << lg_n_hpteg; | ||
241 | } | ||
242 | Hash_size = n_hpteg << LG_HPTEG_SIZE; | ||
243 | |||
244 | /* | ||
245 | * Find some memory for the hash table. | ||
246 | */ | ||
247 | if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); | ||
248 | Hash = mem_pieces_find(Hash_size, Hash_size); | ||
249 | cacheable_memzero(Hash, Hash_size); | ||
250 | _SDR1 = __pa(Hash) | SDR1_LOW_BITS; | ||
251 | #endif /* CONFIG_POWER4 */ | ||
252 | |||
253 | Hash_end = (PTE *) ((unsigned long)Hash + Hash_size); | ||
254 | |||
255 | printk("Total memory = %ldMB; using %ldkB for hash table (at %p)\n", | ||
256 | total_memory >> 20, Hash_size >> 10, Hash); | ||
257 | |||
258 | |||
259 | /* | ||
260 | * Patch up the instructions in hashtable.S:create_hpte | ||
261 | */ | ||
262 | if ( ppc_md.progress ) ppc_md.progress("hash:patch", 0x345); | ||
263 | Hash_mask = n_hpteg - 1; | ||
264 | hmask = Hash_mask >> (16 - LG_HPTEG_SIZE); | ||
265 | mb2 = mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg; | ||
266 | if (lg_n_hpteg > 16) | ||
267 | mb2 = 16 - LG_HPTEG_SIZE; | ||
268 | |||
269 | hash_page_patch_A[0] = (hash_page_patch_A[0] & ~0xffff) | ||
270 | | ((unsigned int)(Hash) >> 16); | ||
271 | hash_page_patch_A[1] = (hash_page_patch_A[1] & ~0x7c0) | (mb << 6); | ||
272 | hash_page_patch_A[2] = (hash_page_patch_A[2] & ~0x7c0) | (mb2 << 6); | ||
273 | hash_page_patch_B[0] = (hash_page_patch_B[0] & ~0xffff) | hmask; | ||
274 | hash_page_patch_C[0] = (hash_page_patch_C[0] & ~0xffff) | hmask; | ||
275 | |||
276 | /* | ||
277 | * Ensure that the locations we've patched have been written | ||
278 | * out from the data cache and invalidated in the instruction | ||
279 | * cache, on those machines with split caches. | ||
280 | */ | ||
281 | flush_icache_range((unsigned long) &hash_page_patch_A[0], | ||
282 | (unsigned long) &hash_page_patch_C[1]); | ||
283 | |||
284 | /* | ||
285 | * Patch up the instructions in hashtable.S:flush_hash_page | ||
286 | */ | ||
287 | flush_hash_patch_A[0] = (flush_hash_patch_A[0] & ~0xffff) | ||
288 | | ((unsigned int)(Hash) >> 16); | ||
289 | flush_hash_patch_A[1] = (flush_hash_patch_A[1] & ~0x7c0) | (mb << 6); | ||
290 | flush_hash_patch_A[2] = (flush_hash_patch_A[2] & ~0x7c0) | (mb2 << 6); | ||
291 | flush_hash_patch_B[0] = (flush_hash_patch_B[0] & ~0xffff) | hmask; | ||
292 | flush_icache_range((unsigned long) &flush_hash_patch_A[0], | ||
293 | (unsigned long) &flush_hash_patch_B[1]); | ||
294 | |||
295 | if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205); | ||
296 | } | ||
diff --git a/arch/powerpc/mm/tlb.c b/arch/powerpc/mm/tlb.c new file mode 100644 index 000000000000..6c3dc3c44c86 --- /dev/null +++ b/arch/powerpc/mm/tlb.c | |||
@@ -0,0 +1,183 @@ | |||
1 | /* | ||
2 | * This file contains the routines for TLB flushing. | ||
3 | * On machines where the MMU uses a hash table to store virtual to | ||
4 | * physical translations, these routines flush entries from the | ||
5 | * hash table also. | ||
6 | * -- paulus | ||
7 | * | ||
8 | * Derived from arch/ppc/mm/init.c: | ||
9 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
10 | * | ||
11 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | ||
12 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | ||
13 | * Copyright (C) 1996 Paul Mackerras | ||
14 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | ||
15 | * | ||
16 | * Derived from "arch/i386/mm/init.c" | ||
17 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | ||
18 | * | ||
19 | * This program is free software; you can redistribute it and/or | ||
20 | * modify it under the terms of the GNU General Public License | ||
21 | * as published by the Free Software Foundation; either version | ||
22 | * 2 of the License, or (at your option) any later version. | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #include <linux/config.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/mm.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <linux/highmem.h> | ||
31 | #include <asm/tlbflush.h> | ||
32 | #include <asm/tlb.h> | ||
33 | |||
34 | #include "mmu_decl.h" | ||
35 | |||
36 | /* | ||
37 | * Called when unmapping pages to flush entries from the TLB/hash table. | ||
38 | */ | ||
39 | void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr) | ||
40 | { | ||
41 | unsigned long ptephys; | ||
42 | |||
43 | if (Hash != 0) { | ||
44 | ptephys = __pa(ptep) & PAGE_MASK; | ||
45 | flush_hash_pages(mm->context, addr, ptephys, 1); | ||
46 | } | ||
47 | } | ||
48 | |||
49 | /* | ||
50 | * Called by ptep_set_access_flags, must flush on CPUs for which the | ||
51 | * DSI handler can't just "fixup" the TLB on a write fault | ||
52 | */ | ||
53 | void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr) | ||
54 | { | ||
55 | if (Hash != 0) | ||
56 | return; | ||
57 | _tlbie(addr); | ||
58 | } | ||
59 | |||
60 | /* | ||
61 | * Called at the end of a mmu_gather operation to make sure the | ||
62 | * TLB flush is completely done. | ||
63 | */ | ||
64 | void tlb_flush(struct mmu_gather *tlb) | ||
65 | { | ||
66 | if (Hash == 0) { | ||
67 | /* | ||
68 | * 603 needs to flush the whole TLB here since | ||
69 | * it doesn't use a hash table. | ||
70 | */ | ||
71 | _tlbia(); | ||
72 | } | ||
73 | } | ||
74 | |||
75 | /* | ||
76 | * TLB flushing: | ||
77 | * | ||
78 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | ||
79 | * - flush_tlb_page(vma, vmaddr) flushes one page | ||
80 | * - flush_tlb_range(vma, start, end) flushes a range of pages | ||
81 | * - flush_tlb_kernel_range(start, end) flushes kernel pages | ||
82 | * | ||
83 | * since the hardware hash table functions as an extension of the | ||
84 | * tlb as far as the linux tables are concerned, flush it too. | ||
85 | * -- Cort | ||
86 | */ | ||
87 | |||
88 | /* | ||
89 | * 750 SMP is a Bad Idea because the 750 doesn't broadcast all | ||
90 | * the cache operations on the bus. Hence we need to use an IPI | ||
91 | * to get the other CPU(s) to invalidate their TLBs. | ||
92 | */ | ||
93 | #ifdef CONFIG_SMP_750 | ||
94 | #define FINISH_FLUSH smp_send_tlb_invalidate(0) | ||
95 | #else | ||
96 | #define FINISH_FLUSH do { } while (0) | ||
97 | #endif | ||
98 | |||
99 | static void flush_range(struct mm_struct *mm, unsigned long start, | ||
100 | unsigned long end) | ||
101 | { | ||
102 | pmd_t *pmd; | ||
103 | unsigned long pmd_end; | ||
104 | int count; | ||
105 | unsigned int ctx = mm->context; | ||
106 | |||
107 | if (Hash == 0) { | ||
108 | _tlbia(); | ||
109 | return; | ||
110 | } | ||
111 | start &= PAGE_MASK; | ||
112 | if (start >= end) | ||
113 | return; | ||
114 | end = (end - 1) | ~PAGE_MASK; | ||
115 | pmd = pmd_offset(pgd_offset(mm, start), start); | ||
116 | for (;;) { | ||
117 | pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1; | ||
118 | if (pmd_end > end) | ||
119 | pmd_end = end; | ||
120 | if (!pmd_none(*pmd)) { | ||
121 | count = ((pmd_end - start) >> PAGE_SHIFT) + 1; | ||
122 | flush_hash_pages(ctx, start, pmd_val(*pmd), count); | ||
123 | } | ||
124 | if (pmd_end == end) | ||
125 | break; | ||
126 | start = pmd_end + 1; | ||
127 | ++pmd; | ||
128 | } | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * Flush kernel TLB entries in the given range | ||
133 | */ | ||
134 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | ||
135 | { | ||
136 | flush_range(&init_mm, start, end); | ||
137 | FINISH_FLUSH; | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | * Flush all the (user) entries for the address space described by mm. | ||
142 | */ | ||
143 | void flush_tlb_mm(struct mm_struct *mm) | ||
144 | { | ||
145 | struct vm_area_struct *mp; | ||
146 | |||
147 | if (Hash == 0) { | ||
148 | _tlbia(); | ||
149 | return; | ||
150 | } | ||
151 | |||
152 | for (mp = mm->mmap; mp != NULL; mp = mp->vm_next) | ||
153 | flush_range(mp->vm_mm, mp->vm_start, mp->vm_end); | ||
154 | FINISH_FLUSH; | ||
155 | } | ||
156 | |||
157 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) | ||
158 | { | ||
159 | struct mm_struct *mm; | ||
160 | pmd_t *pmd; | ||
161 | |||
162 | if (Hash == 0) { | ||
163 | _tlbie(vmaddr); | ||
164 | return; | ||
165 | } | ||
166 | mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; | ||
167 | pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr); | ||
168 | if (!pmd_none(*pmd)) | ||
169 | flush_hash_pages(mm->context, vmaddr, pmd_val(*pmd), 1); | ||
170 | FINISH_FLUSH; | ||
171 | } | ||
172 | |||
173 | /* | ||
174 | * For each address in the range, find the pte for the address | ||
175 | * and check _PAGE_HASHPTE bit; if it is set, find and destroy | ||
176 | * the corresponding HPTE. | ||
177 | */ | ||
178 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
179 | unsigned long end) | ||
180 | { | ||
181 | flush_range(vma->vm_mm, start, end); | ||
182 | FINISH_FLUSH; | ||
183 | } | ||
diff --git a/arch/powerpc/platforms/4xx/Kconfig b/arch/powerpc/platforms/4xx/Kconfig new file mode 100644 index 000000000000..ed39d6a3d22a --- /dev/null +++ b/arch/powerpc/platforms/4xx/Kconfig | |||
@@ -0,0 +1,280 @@ | |||
1 | config 4xx | ||
2 | bool | ||
3 | depends on 40x || 44x | ||
4 | default y | ||
5 | |||
6 | config WANT_EARLY_SERIAL | ||
7 | bool | ||
8 | select SERIAL_8250 | ||
9 | default n | ||
10 | |||
11 | menu "AMCC 4xx options" | ||
12 | depends on 4xx | ||
13 | |||
14 | choice | ||
15 | prompt "Machine Type" | ||
16 | depends on 40x | ||
17 | default WALNUT | ||
18 | |||
19 | config BUBINGA | ||
20 | bool "Bubinga" | ||
21 | select WANT_EARLY_SERIAL | ||
22 | help | ||
23 | This option enables support for the IBM 405EP evaluation board. | ||
24 | |||
25 | config CPCI405 | ||
26 | bool "CPCI405" | ||
27 | help | ||
28 | This option enables support for the CPCI405 board. | ||
29 | |||
30 | config EP405 | ||
31 | bool "EP405/EP405PC" | ||
32 | help | ||
33 | This option enables support for the EP405/EP405PC boards. | ||
34 | |||
35 | config REDWOOD_5 | ||
36 | bool "Redwood-5" | ||
37 | help | ||
38 | This option enables support for the IBM STB04 evaluation board. | ||
39 | |||
40 | config REDWOOD_6 | ||
41 | bool "Redwood-6" | ||
42 | help | ||
43 | This option enables support for the IBM STBx25xx evaluation board. | ||
44 | |||
45 | config SYCAMORE | ||
46 | bool "Sycamore" | ||
47 | help | ||
48 | This option enables support for the IBM PPC405GPr evaluation board. | ||
49 | |||
50 | config WALNUT | ||
51 | bool "Walnut" | ||
52 | help | ||
53 | This option enables support for the IBM PPC405GP evaluation board. | ||
54 | |||
55 | config XILINX_ML300 | ||
56 | bool "Xilinx-ML300" | ||
57 | help | ||
58 | This option enables support for the Xilinx ML300 evaluation board. | ||
59 | |||
60 | endchoice | ||
61 | |||
62 | choice | ||
63 | prompt "Machine Type" | ||
64 | depends on 44x | ||
65 | default EBONY | ||
66 | |||
67 | config BAMBOO | ||
68 | bool "Bamboo" | ||
69 | select WANT_EARLY_SERIAL | ||
70 | help | ||
71 | This option enables support for the IBM PPC440EP evaluation board. | ||
72 | |||
73 | config EBONY | ||
74 | bool "Ebony" | ||
75 | select WANT_EARLY_SERIAL | ||
76 | help | ||
77 | This option enables support for the IBM PPC440GP evaluation board. | ||
78 | |||
79 | config LUAN | ||
80 | bool "Luan" | ||
81 | select WANT_EARLY_SERIAL | ||
82 | help | ||
83 | This option enables support for the IBM PPC440SP evaluation board. | ||
84 | |||
85 | config OCOTEA | ||
86 | bool "Ocotea" | ||
87 | select WANT_EARLY_SERIAL | ||
88 | help | ||
89 | This option enables support for the IBM PPC440GX evaluation board. | ||
90 | |||
91 | endchoice | ||
92 | |||
93 | config EP405PC | ||
94 | bool "EP405PC Support" | ||
95 | depends on EP405 | ||
96 | |||
97 | |||
98 | # It's often necessary to know the specific 4xx processor type. | ||
99 | # Fortunately, it is impled (so far) from the board type, so we | ||
100 | # don't need to ask more redundant questions. | ||
101 | config NP405H | ||
102 | bool | ||
103 | depends on ASH | ||
104 | default y | ||
105 | |||
106 | config 440EP | ||
107 | bool | ||
108 | depends on BAMBOO | ||
109 | select PPC_FPU | ||
110 | default y | ||
111 | |||
112 | config 440GP | ||
113 | bool | ||
114 | depends on EBONY | ||
115 | default y | ||
116 | |||
117 | config 440GX | ||
118 | bool | ||
119 | depends on OCOTEA | ||
120 | default y | ||
121 | |||
122 | config 440SP | ||
123 | bool | ||
124 | depends on LUAN | ||
125 | default y | ||
126 | |||
127 | config 440 | ||
128 | bool | ||
129 | depends on 440GP || 440SP || 440EP | ||
130 | default y | ||
131 | |||
132 | config 440A | ||
133 | bool | ||
134 | depends on 440GX | ||
135 | default y | ||
136 | |||
137 | config IBM440EP_ERR42 | ||
138 | bool | ||
139 | depends on 440EP | ||
140 | default y | ||
141 | |||
142 | # All 405-based cores up until the 405GPR and 405EP have this errata. | ||
143 | config IBM405_ERR77 | ||
144 | bool | ||
145 | depends on 40x && !403GCX && !405GPR && !405EP | ||
146 | default y | ||
147 | |||
148 | # All 40x-based cores, up until the 405GPR and 405EP have this errata. | ||
149 | config IBM405_ERR51 | ||
150 | bool | ||
151 | depends on 40x && !405GPR && !405EP | ||
152 | default y | ||
153 | |||
154 | config BOOKE | ||
155 | bool | ||
156 | depends on 44x | ||
157 | default y | ||
158 | |||
159 | config IBM_OCP | ||
160 | bool | ||
161 | depends on ASH || BAMBOO || BUBINGA || CPCI405 || EBONY || EP405 || LUAN || OCOTEA || REDWOOD_5 || REDWOOD_6 || SYCAMORE || WALNUT | ||
162 | default y | ||
163 | |||
164 | config XILINX_OCP | ||
165 | bool | ||
166 | depends on XILINX_ML300 | ||
167 | default y | ||
168 | |||
169 | config IBM_EMAC4 | ||
170 | bool | ||
171 | depends on 440GX || 440SP | ||
172 | default y | ||
173 | |||
174 | config BIOS_FIXUP | ||
175 | bool | ||
176 | depends on BUBINGA || EP405 || SYCAMORE || WALNUT | ||
177 | default y | ||
178 | |||
179 | # OAK doesn't exist but wanted to keep this around for any future 403GCX boards | ||
180 | config 403GCX | ||
181 | bool | ||
182 | depends OAK | ||
183 | default y | ||
184 | |||
185 | config 405EP | ||
186 | bool | ||
187 | depends on BUBINGA | ||
188 | default y | ||
189 | |||
190 | config 405GP | ||
191 | bool | ||
192 | depends on CPCI405 || EP405 || WALNUT | ||
193 | default y | ||
194 | |||
195 | config 405GPR | ||
196 | bool | ||
197 | depends on SYCAMORE | ||
198 | default y | ||
199 | |||
200 | config VIRTEX_II_PRO | ||
201 | bool | ||
202 | depends on XILINX_ML300 | ||
203 | default y | ||
204 | |||
205 | config STB03xxx | ||
206 | bool | ||
207 | depends on REDWOOD_5 || REDWOOD_6 | ||
208 | default y | ||
209 | |||
210 | config EMBEDDEDBOOT | ||
211 | bool | ||
212 | depends on EP405 || XILINX_ML300 | ||
213 | default y | ||
214 | |||
215 | config IBM_OPENBIOS | ||
216 | bool | ||
217 | depends on ASH || BUBINGA || REDWOOD_5 || REDWOOD_6 || SYCAMORE || WALNUT | ||
218 | default y | ||
219 | |||
220 | config PPC4xx_DMA | ||
221 | bool "PPC4xx DMA controller support" | ||
222 | depends on 4xx | ||
223 | |||
224 | config PPC4xx_EDMA | ||
225 | bool | ||
226 | depends on !STB03xxx && PPC4xx_DMA | ||
227 | default y | ||
228 | |||
229 | config PPC_GEN550 | ||
230 | bool | ||
231 | depends on 4xx | ||
232 | default y | ||
233 | |||
234 | choice | ||
235 | prompt "TTYS0 device and default console" | ||
236 | depends on 40x | ||
237 | default UART0_TTYS0 | ||
238 | |||
239 | config UART0_TTYS0 | ||
240 | bool "UART0" | ||
241 | |||
242 | config UART0_TTYS1 | ||
243 | bool "UART1" | ||
244 | |||
245 | endchoice | ||
246 | |||
247 | config SERIAL_SICC | ||
248 | bool "SICC Serial port support" | ||
249 | depends on STB03xxx | ||
250 | |||
251 | config UART1_DFLT_CONSOLE | ||
252 | bool | ||
253 | depends on SERIAL_SICC && UART0_TTYS1 | ||
254 | default y | ||
255 | |||
256 | config SERIAL_SICC_CONSOLE | ||
257 | bool | ||
258 | depends on SERIAL_SICC && UART0_TTYS1 | ||
259 | default y | ||
260 | endmenu | ||
261 | |||
262 | |||
263 | menu "IBM 40x options" | ||
264 | depends on 40x | ||
265 | |||
266 | config SERIAL_SICC | ||
267 | bool "SICC Serial port" | ||
268 | depends on STB03xxx | ||
269 | |||
270 | config UART1_DFLT_CONSOLE | ||
271 | bool | ||
272 | depends on SERIAL_SICC && UART0_TTYS1 | ||
273 | default y | ||
274 | |||
275 | config SERIAL_SICC_CONSOLE | ||
276 | bool | ||
277 | depends on SERIAL_SICC && UART0_TTYS1 | ||
278 | default y | ||
279 | |||
280 | endmenu | ||
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig new file mode 100644 index 000000000000..c5bc2821d991 --- /dev/null +++ b/arch/powerpc/platforms/85xx/Kconfig | |||
@@ -0,0 +1,86 @@ | |||
1 | config 85xx | ||
2 | bool | ||
3 | depends on E500 | ||
4 | default y | ||
5 | |||
6 | config PPC_INDIRECT_PCI_BE | ||
7 | bool | ||
8 | depends on 85xx | ||
9 | default y | ||
10 | |||
11 | menu "Freescale 85xx options" | ||
12 | depends on E500 | ||
13 | |||
14 | choice | ||
15 | prompt "Machine Type" | ||
16 | depends on 85xx | ||
17 | default MPC8540_ADS | ||
18 | |||
19 | config MPC8540_ADS | ||
20 | bool "Freescale MPC8540 ADS" | ||
21 | help | ||
22 | This option enables support for the MPC 8540 ADS evaluation board. | ||
23 | |||
24 | config MPC8548_CDS | ||
25 | bool "Freescale MPC8548 CDS" | ||
26 | help | ||
27 | This option enablese support for the MPC8548 CDS evaluation board. | ||
28 | |||
29 | config MPC8555_CDS | ||
30 | bool "Freescale MPC8555 CDS" | ||
31 | help | ||
32 | This option enablese support for the MPC8555 CDS evaluation board. | ||
33 | |||
34 | config MPC8560_ADS | ||
35 | bool "Freescale MPC8560 ADS" | ||
36 | help | ||
37 | This option enables support for the MPC 8560 ADS evaluation board. | ||
38 | |||
39 | config SBC8560 | ||
40 | bool "WindRiver PowerQUICC III SBC8560" | ||
41 | help | ||
42 | This option enables support for the WindRiver PowerQUICC III | ||
43 | SBC8560 board. | ||
44 | |||
45 | config STX_GP3 | ||
46 | bool "Silicon Turnkey Express GP3" | ||
47 | help | ||
48 | This option enables support for the Silicon Turnkey Express GP3 | ||
49 | board. | ||
50 | |||
51 | endchoice | ||
52 | |||
53 | # It's often necessary to know the specific 85xx processor type. | ||
54 | # Fortunately, it is implied (so far) from the board type, so we | ||
55 | # don't need to ask more redundant questions. | ||
56 | config MPC8540 | ||
57 | bool | ||
58 | depends on MPC8540_ADS | ||
59 | default y | ||
60 | |||
61 | config MPC8548 | ||
62 | bool | ||
63 | depends on MPC8548_CDS | ||
64 | default y | ||
65 | |||
66 | config MPC8555 | ||
67 | bool | ||
68 | depends on MPC8555_CDS | ||
69 | default y | ||
70 | |||
71 | config MPC8560 | ||
72 | bool | ||
73 | depends on SBC8560 || MPC8560_ADS || STX_GP3 | ||
74 | default y | ||
75 | |||
76 | config 85xx_PCI2 | ||
77 | bool "Supprt for 2nd PCI host controller" | ||
78 | depends on MPC8555_CDS | ||
79 | default y | ||
80 | |||
81 | config PPC_GEN550 | ||
82 | bool | ||
83 | depends on MPC8540 || SBC8560 || MPC8555 | ||
84 | default y | ||
85 | |||
86 | endmenu | ||
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig new file mode 100644 index 000000000000..c8c0ba3cf8e8 --- /dev/null +++ b/arch/powerpc/platforms/8xx/Kconfig | |||
@@ -0,0 +1,352 @@ | |||
1 | config FADS | ||
2 | bool | ||
3 | |||
4 | choice | ||
5 | prompt "8xx Machine Type" | ||
6 | depends on 8xx | ||
7 | default RPXLITE | ||
8 | |||
9 | config RPXLITE | ||
10 | bool "RPX-Lite" | ||
11 | ---help--- | ||
12 | Single-board computers based around the PowerPC MPC8xx chips and | ||
13 | intended for embedded applications. The following types are | ||
14 | supported: | ||
15 | |||
16 | RPX-Lite: | ||
17 | Embedded Planet RPX Lite. PC104 form-factor SBC based on the MPC823. | ||
18 | |||
19 | RPX-Classic: | ||
20 | Embedded Planet RPX Classic Low-fat. Credit-card-size SBC based on | ||
21 | the MPC 860 | ||
22 | |||
23 | BSE-IP: | ||
24 | Bright Star Engineering ip-Engine. | ||
25 | |||
26 | TQM823L: | ||
27 | TQM850L: | ||
28 | TQM855L: | ||
29 | TQM860L: | ||
30 | MPC8xx based family of mini modules, half credit card size, | ||
31 | up to 64 MB of RAM, 8 MB Flash, (Fast) Ethernet, 2 x serial ports, | ||
32 | 2 x CAN bus interface, ... | ||
33 | Manufacturer: TQ Components, www.tq-group.de | ||
34 | Date of Release: October (?) 1999 | ||
35 | End of Life: not yet :-) | ||
36 | URL: | ||
37 | - module: <http://www.denx.de/PDF/TQM8xxLHWM201.pdf> | ||
38 | - starter kit: <http://www.denx.de/PDF/STK8xxLHWM201.pdf> | ||
39 | - images: <http://www.denx.de/embedded-ppc-en.html> | ||
40 | |||
41 | FPS850L: | ||
42 | FingerPrint Sensor System (based on TQM850L) | ||
43 | Manufacturer: IKENDI AG, <http://www.ikendi.com/> | ||
44 | Date of Release: November 1999 | ||
45 | End of life: end 2000 ? | ||
46 | URL: see TQM850L | ||
47 | |||
48 | IVMS8: | ||
49 | MPC860 based board used in the "Integrated Voice Mail System", | ||
50 | Small Version (8 voice channels) | ||
51 | Manufacturer: Speech Design, <http://www.speech-design.de/> | ||
52 | Date of Release: December 2000 (?) | ||
53 | End of life: - | ||
54 | URL: <http://www.speech-design.de/> | ||
55 | |||
56 | IVML24: | ||
57 | MPC860 based board used in the "Integrated Voice Mail System", | ||
58 | Large Version (24 voice channels) | ||
59 | Manufacturer: Speech Design, <http://www.speech-design.de/> | ||
60 | Date of Release: March 2001 (?) | ||
61 | End of life: - | ||
62 | URL: <http://www.speech-design.de/> | ||
63 | |||
64 | HERMES: | ||
65 | Hermes-Pro ISDN/LAN router with integrated 8 x hub | ||
66 | Manufacturer: Multidata Gesellschaft fur Datentechnik und Informatik | ||
67 | <http://www.multidata.de/> | ||
68 | Date of Release: 2000 (?) | ||
69 | End of life: - | ||
70 | URL: <http://www.multidata.de/english/products/hpro.htm> | ||
71 | |||
72 | IP860: | ||
73 | VMEBus IP (Industry Pack) carrier board with MPC860 | ||
74 | Manufacturer: MicroSys GmbH, <http://www.microsys.de/> | ||
75 | Date of Release: ? | ||
76 | End of life: - | ||
77 | URL: <http://www.microsys.de/html/ip860.html> | ||
78 | |||
79 | PCU_E: | ||
80 | PCU = Peripheral Controller Unit, Extended | ||
81 | Manufacturer: Siemens AG, ICN (Information and Communication Networks) | ||
82 | <http://www.siemens.de/page/1,3771,224315-1-999_2_226207-0,00.html> | ||
83 | Date of Release: April 2001 | ||
84 | End of life: August 2001 | ||
85 | URL: n. a. | ||
86 | |||
87 | config RPXCLASSIC | ||
88 | bool "RPX-Classic" | ||
89 | help | ||
90 | The RPX-Classic is a single-board computer based on the Motorola | ||
91 | MPC860. It features 16MB of DRAM and a variable amount of flash, | ||
92 | I2C EEPROM, thermal monitoring, a PCMCIA slot, a DIP switch and two | ||
93 | LEDs. Variants with Ethernet ports exist. Say Y here to support it | ||
94 | directly. | ||
95 | |||
96 | config BSEIP | ||
97 | bool "BSE-IP" | ||
98 | help | ||
99 | Say Y here to support the Bright Star Engineering ipEngine SBC. | ||
100 | This is a credit-card-sized device featuring a MPC823 processor, | ||
101 | 26MB DRAM, 4MB flash, Ethernet, a 16K-gate FPGA, USB, an LCD/video | ||
102 | controller, and two RS232 ports. | ||
103 | |||
104 | config MPC8XXFADS | ||
105 | bool "FADS" | ||
106 | select FADS | ||
107 | |||
108 | config MPC86XADS | ||
109 | bool "MPC86XADS" | ||
110 | help | ||
111 | MPC86x Application Development System by Freescale Semiconductor. | ||
112 | The MPC86xADS is meant to serve as a platform for s/w and h/w | ||
113 | development around the MPC86X processor families. | ||
114 | select FADS | ||
115 | |||
116 | config MPC885ADS | ||
117 | bool "MPC885ADS" | ||
118 | help | ||
119 | Freescale Semiconductor MPC885 Application Development System (ADS). | ||
120 | Also known as DUET. | ||
121 | The MPC885ADS is meant to serve as a platform for s/w and h/w | ||
122 | development around the MPC885 processor family. | ||
123 | |||
124 | config TQM823L | ||
125 | bool "TQM823L" | ||
126 | help | ||
127 | Say Y here to support the TQM823L, one of an MPC8xx-based family of | ||
128 | mini SBCs (half credit-card size) from TQ Components first released | ||
129 | in late 1999. Technical references are at | ||
130 | <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and | ||
131 | <http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at | ||
132 | <http://www.denx.de/embedded-ppc-en.html>. | ||
133 | |||
134 | config TQM850L | ||
135 | bool "TQM850L" | ||
136 | help | ||
137 | Say Y here to support the TQM850L, one of an MPC8xx-based family of | ||
138 | mini SBCs (half credit-card size) from TQ Components first released | ||
139 | in late 1999. Technical references are at | ||
140 | <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and | ||
141 | <http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at | ||
142 | <http://www.denx.de/embedded-ppc-en.html>. | ||
143 | |||
144 | config TQM855L | ||
145 | bool "TQM855L" | ||
146 | help | ||
147 | Say Y here to support the TQM855L, one of an MPC8xx-based family of | ||
148 | mini SBCs (half credit-card size) from TQ Components first released | ||
149 | in late 1999. Technical references are at | ||
150 | <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and | ||
151 | <http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at | ||
152 | <http://www.denx.de/embedded-ppc-en.html>. | ||
153 | |||
154 | config TQM860L | ||
155 | bool "TQM860L" | ||
156 | help | ||
157 | Say Y here to support the TQM860L, one of an MPC8xx-based family of | ||
158 | mini SBCs (half credit-card size) from TQ Components first released | ||
159 | in late 1999. Technical references are at | ||
160 | <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and | ||
161 | <http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at | ||
162 | <http://www.denx.de/embedded-ppc-en.html>. | ||
163 | |||
164 | config FPS850L | ||
165 | bool "FPS850L" | ||
166 | |||
167 | config IVMS8 | ||
168 | bool "IVMS8" | ||
169 | help | ||
170 | Say Y here to support the Integrated Voice-Mail Small 8-channel SBC | ||
171 | from Speech Design, released March 2001. The manufacturer's website | ||
172 | is at <http://www.speech-design.de/>. | ||
173 | |||
174 | config IVML24 | ||
175 | bool "IVML24" | ||
176 | help | ||
177 | Say Y here to support the Integrated Voice-Mail Large 24-channel SBC | ||
178 | from Speech Design, released March 2001. The manufacturer's website | ||
179 | is at <http://www.speech-design.de/>. | ||
180 | |||
181 | config HERMES_PRO | ||
182 | bool "HERMES" | ||
183 | |||
184 | config IP860 | ||
185 | bool "IP860" | ||
186 | |||
187 | config LWMON | ||
188 | bool "LWMON" | ||
189 | |||
190 | config PCU_E | ||
191 | bool "PCU_E" | ||
192 | |||
193 | config CCM | ||
194 | bool "CCM" | ||
195 | |||
196 | config LANTEC | ||
197 | bool "LANTEC" | ||
198 | |||
199 | config MBX | ||
200 | bool "MBX" | ||
201 | help | ||
202 | MBX is a line of Motorola single-board computer based around the | ||
203 | MPC821 and MPC860 processors, and intended for embedded-controller | ||
204 | applications. Say Y here to support these boards directly. | ||
205 | |||
206 | config WINCEPT | ||
207 | bool "WinCept" | ||
208 | help | ||
209 | The Wincept 100/110 is a Motorola single-board computer based on the | ||
210 | MPC821 PowerPC, introduced in 1998 and designed to be used in | ||
211 | thin-client machines. Say Y to support it directly. | ||
212 | |||
213 | endchoice | ||
214 | |||
215 | # | ||
216 | # MPC8xx Communication options | ||
217 | # | ||
218 | |||
219 | menu "MPC8xx CPM Options" | ||
220 | depends on 8xx | ||
221 | |||
222 | config SCC_ENET | ||
223 | bool "CPM SCC Ethernet" | ||
224 | depends on NET_ETHERNET | ||
225 | help | ||
226 | Enable Ethernet support via the Motorola MPC8xx serial | ||
227 | communications controller. | ||
228 | |||
229 | choice | ||
230 | prompt "SCC used for Ethernet" | ||
231 | depends on SCC_ENET | ||
232 | default SCC1_ENET | ||
233 | |||
234 | config SCC1_ENET | ||
235 | bool "SCC1" | ||
236 | help | ||
237 | Use MPC8xx serial communications controller 1 to drive Ethernet | ||
238 | (default). | ||
239 | |||
240 | config SCC2_ENET | ||
241 | bool "SCC2" | ||
242 | help | ||
243 | Use MPC8xx serial communications controller 2 to drive Ethernet. | ||
244 | |||
245 | config SCC3_ENET | ||
246 | bool "SCC3" | ||
247 | help | ||
248 | Use MPC8xx serial communications controller 3 to drive Ethernet. | ||
249 | |||
250 | endchoice | ||
251 | |||
252 | config FEC_ENET | ||
253 | bool "860T FEC Ethernet" | ||
254 | depends on NET_ETHERNET | ||
255 | help | ||
256 | Enable Ethernet support via the Fast Ethernet Controller (FCC) on | ||
257 | the Motorola MPC8260. | ||
258 | |||
259 | config USE_MDIO | ||
260 | bool "Use MDIO for PHY configuration" | ||
261 | depends on FEC_ENET | ||
262 | help | ||
263 | On some boards the hardware configuration of the ethernet PHY can be | ||
264 | used without any software interaction over the MDIO interface, so | ||
265 | all MII code can be omitted. Say N here if unsure or if you don't | ||
266 | need link status reports. | ||
267 | |||
268 | config FEC_AM79C874 | ||
269 | bool "Support AMD79C874 PHY" | ||
270 | depends on USE_MDIO | ||
271 | |||
272 | config FEC_LXT970 | ||
273 | bool "Support LXT970 PHY" | ||
274 | depends on USE_MDIO | ||
275 | |||
276 | config FEC_LXT971 | ||
277 | bool "Support LXT971 PHY" | ||
278 | depends on USE_MDIO | ||
279 | |||
280 | config FEC_QS6612 | ||
281 | bool "Support QS6612 PHY" | ||
282 | depends on USE_MDIO | ||
283 | |||
284 | config ENET_BIG_BUFFERS | ||
285 | bool "Use Big CPM Ethernet Buffers" | ||
286 | depends on SCC_ENET || FEC_ENET | ||
287 | help | ||
288 | Allocate large buffers for MPC8xx Ethernet. Increases throughput | ||
289 | and decreases the likelihood of dropped packets, but costs memory. | ||
290 | |||
291 | config HTDMSOUND | ||
292 | bool "Embedded Planet HIOX Audio" | ||
293 | depends on SOUND=y | ||
294 | |||
295 | # This doesn't really belong here, but it is convenient to ask | ||
296 | # 8xx specific questions. | ||
297 | comment "Generic MPC8xx Options" | ||
298 | |||
299 | config 8xx_COPYBACK | ||
300 | bool "Copy-Back Data Cache (else Writethrough)" | ||
301 | help | ||
302 | Saying Y here will cause the cache on an MPC8xx processor to be used | ||
303 | in Copy-Back mode. If you say N here, it is used in Writethrough | ||
304 | mode. | ||
305 | |||
306 | If in doubt, say Y here. | ||
307 | |||
308 | config 8xx_CPU6 | ||
309 | bool "CPU6 Silicon Errata (860 Pre Rev. C)" | ||
310 | help | ||
311 | MPC860 CPUs, prior to Rev C have some bugs in the silicon, which | ||
312 | require workarounds for Linux (and most other OSes to work). If you | ||
313 | get a BUG() very early in boot, this might fix the problem. For | ||
314 | more details read the document entitled "MPC860 Family Device Errata | ||
315 | Reference" on Motorola's website. This option also incurs a | ||
316 | performance hit. | ||
317 | |||
318 | If in doubt, say N here. | ||
319 | |||
320 | choice | ||
321 | prompt "Microcode patch selection" | ||
322 | default NO_UCODE_PATCH | ||
323 | help | ||
324 | Help not implemented yet, coming soon. | ||
325 | |||
326 | config NO_UCODE_PATCH | ||
327 | bool "None" | ||
328 | |||
329 | config USB_SOF_UCODE_PATCH | ||
330 | bool "USB SOF patch" | ||
331 | help | ||
332 | Help not implemented yet, coming soon. | ||
333 | |||
334 | config I2C_SPI_UCODE_PATCH | ||
335 | bool "I2C/SPI relocation patch" | ||
336 | help | ||
337 | Help not implemented yet, coming soon. | ||
338 | |||
339 | config I2C_SPI_SMC1_UCODE_PATCH | ||
340 | bool "I2C/SPI/SMC1 relocation patch" | ||
341 | help | ||
342 | Help not implemented yet, coming soon. | ||
343 | |||
344 | endchoice | ||
345 | |||
346 | config UCODE_PATCH | ||
347 | bool | ||
348 | default y | ||
349 | depends on !NO_UCODE_PATCH | ||
350 | |||
351 | endmenu | ||
352 | |||
diff --git a/arch/powerpc/platforms/apus/Kconfig b/arch/powerpc/platforms/apus/Kconfig new file mode 100644 index 000000000000..6bde3bffed86 --- /dev/null +++ b/arch/powerpc/platforms/apus/Kconfig | |||
@@ -0,0 +1,130 @@ | |||
1 | |||
2 | config AMIGA | ||
3 | bool | ||
4 | depends on APUS | ||
5 | default y | ||
6 | help | ||
7 | This option enables support for the Amiga series of computers. | ||
8 | |||
9 | config ZORRO | ||
10 | bool | ||
11 | depends on APUS | ||
12 | default y | ||
13 | help | ||
14 | This enables support for the Zorro bus in the Amiga. If you have | ||
15 | expansion cards in your Amiga that conform to the Amiga | ||
16 | AutoConfig(tm) specification, say Y, otherwise N. Note that even | ||
17 | expansion cards that do not fit in the Zorro slots but fit in e.g. | ||
18 | the CPU slot may fall in this category, so you have to say Y to let | ||
19 | Linux use these. | ||
20 | |||
21 | config ABSTRACT_CONSOLE | ||
22 | bool | ||
23 | depends on APUS | ||
24 | default y | ||
25 | |||
26 | config APUS_FAST_EXCEPT | ||
27 | bool | ||
28 | depends on APUS | ||
29 | default y | ||
30 | |||
31 | config AMIGA_PCMCIA | ||
32 | bool "Amiga 1200/600 PCMCIA support" | ||
33 | depends on APUS && EXPERIMENTAL | ||
34 | help | ||
35 | Include support in the kernel for pcmcia on Amiga 1200 and Amiga | ||
36 | 600. If you intend to use pcmcia cards say Y; otherwise say N. | ||
37 | |||
38 | config AMIGA_BUILTIN_SERIAL | ||
39 | tristate "Amiga builtin serial support" | ||
40 | depends on APUS | ||
41 | help | ||
42 | If you want to use your Amiga's built-in serial port in Linux, | ||
43 | answer Y. | ||
44 | |||
45 | To compile this driver as a module, choose M here. | ||
46 | |||
47 | config GVPIOEXT | ||
48 | tristate "GVP IO-Extender support" | ||
49 | depends on APUS | ||
50 | help | ||
51 | If you want to use a GVP IO-Extender serial card in Linux, say Y. | ||
52 | Otherwise, say N. | ||
53 | |||
54 | config GVPIOEXT_LP | ||
55 | tristate "GVP IO-Extender parallel printer support" | ||
56 | depends on GVPIOEXT | ||
57 | help | ||
58 | Say Y to enable driving a printer from the parallel port on your | ||
59 | GVP IO-Extender card, N otherwise. | ||
60 | |||
61 | config GVPIOEXT_PLIP | ||
62 | tristate "GVP IO-Extender PLIP support" | ||
63 | depends on GVPIOEXT | ||
64 | help | ||
65 | Say Y to enable doing IP over the parallel port on your GVP | ||
66 | IO-Extender card, N otherwise. | ||
67 | |||
68 | config MULTIFACE_III_TTY | ||
69 | tristate "Multiface Card III serial support" | ||
70 | depends on APUS | ||
71 | help | ||
72 | If you want to use a Multiface III card's serial port in Linux, | ||
73 | answer Y. | ||
74 | |||
75 | To compile this driver as a module, choose M here. | ||
76 | |||
77 | config A2232 | ||
78 | tristate "Commodore A2232 serial support (EXPERIMENTAL)" | ||
79 | depends on EXPERIMENTAL && APUS | ||
80 | ---help--- | ||
81 | This option supports the 2232 7-port serial card shipped with the | ||
82 | Amiga 2000 and other Zorro-bus machines, dating from 1989. At | ||
83 | a max of 19,200 bps, the ports are served by a 6551 ACIA UART chip | ||
84 | each, plus a 8520 CIA, and a master 6502 CPU and buffer as well. The | ||
85 | ports were connected with 8 pin DIN connectors on the card bracket, | ||
86 | for which 8 pin to DB25 adapters were supplied. The card also had | ||
87 | jumpers internally to toggle various pinning configurations. | ||
88 | |||
89 | This driver can be built as a module; but then "generic_serial" | ||
90 | will also be built as a module. This has to be loaded before | ||
91 | "ser_a2232". If you want to do this, answer M here. | ||
92 | |||
93 | config WHIPPET_SERIAL | ||
94 | tristate "Hisoft Whippet PCMCIA serial support" | ||
95 | depends on AMIGA_PCMCIA | ||
96 | help | ||
97 | HiSoft has a web page at <http://www.hisoft.co.uk/>, but there | ||
98 | is no listing for the Whippet in their Amiga section. | ||
99 | |||
100 | config APNE | ||
101 | tristate "PCMCIA NE2000 support" | ||
102 | depends on AMIGA_PCMCIA | ||
103 | help | ||
104 | If you have a PCMCIA NE2000 compatible adapter, say Y. Otherwise, | ||
105 | say N. | ||
106 | |||
107 | To compile this driver as a module, choose M here: the | ||
108 | module will be called apne. | ||
109 | |||
110 | config SERIAL_CONSOLE | ||
111 | bool "Support for serial port console" | ||
112 | depends on APUS && (AMIGA_BUILTIN_SERIAL=y || GVPIOEXT=y || MULTIFACE_III_TTY=y) | ||
113 | |||
114 | config HEARTBEAT | ||
115 | bool "Use power LED as a heartbeat" | ||
116 | depends on APUS | ||
117 | help | ||
118 | Use the power-on LED on your machine as a load meter. The exact | ||
119 | behavior is platform-dependent, but normally the flash frequency is | ||
120 | a hyperbolic function of the 5-minute load average. | ||
121 | |||
122 | config PROC_HARDWARE | ||
123 | bool "/proc/hardware support" | ||
124 | depends on APUS | ||
125 | |||
126 | source "drivers/zorro/Kconfig" | ||
127 | |||
128 | config PCI_PERMEDIA | ||
129 | bool "PCI for Permedia2" | ||
130 | depends on !4xx && !8xx && APUS | ||
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig new file mode 100644 index 000000000000..4f3551430596 --- /dev/null +++ b/arch/powerpc/platforms/embedded6xx/Kconfig | |||
@@ -0,0 +1,313 @@ | |||
1 | choice | ||
2 | prompt "Machine Type" | ||
3 | depends on EMBEDDED6xx | ||
4 | |||
5 | config APUS | ||
6 | bool "Amiga-APUS" | ||
7 | depends on BROKEN | ||
8 | help | ||
9 | Select APUS if configuring for a PowerUP Amiga. | ||
10 | More information is available at: | ||
11 | <http://linux-apus.sourceforge.net/>. | ||
12 | |||
13 | config KATANA | ||
14 | bool "Artesyn-Katana" | ||
15 | help | ||
16 | Select KATANA if configuring an Artesyn KATANA 750i or 3750 | ||
17 | cPCI board. | ||
18 | |||
19 | config WILLOW | ||
20 | bool "Cogent-Willow" | ||
21 | |||
22 | config CPCI690 | ||
23 | bool "Force-CPCI690" | ||
24 | help | ||
25 | Select CPCI690 if configuring a Force CPCI690 cPCI board. | ||
26 | |||
27 | config POWERPMC250 | ||
28 | bool "Force-PowerPMC250" | ||
29 | |||
30 | config CHESTNUT | ||
31 | bool "IBM 750FX Eval board or 750GX Eval board" | ||
32 | help | ||
33 | Select CHESTNUT if configuring an IBM 750FX Eval Board or a | ||
34 | IBM 750GX Eval board. | ||
35 | |||
36 | config SPRUCE | ||
37 | bool "IBM-Spruce" | ||
38 | |||
39 | config HDPU | ||
40 | bool "Sky-HDPU" | ||
41 | help | ||
42 | Select HDPU if configuring a Sky Computers Compute Blade. | ||
43 | |||
44 | config HDPU_FEATURES | ||
45 | depends HDPU | ||
46 | tristate "HDPU-Features" | ||
47 | help | ||
48 | Select to enable HDPU enhanced features. | ||
49 | |||
50 | config EV64260 | ||
51 | bool "Marvell-EV64260BP" | ||
52 | help | ||
53 | Select EV64260 if configuring a Marvell (formerly Galileo) | ||
54 | EV64260BP Evaluation platform. | ||
55 | |||
56 | config LOPEC | ||
57 | bool "Motorola-LoPEC" | ||
58 | |||
59 | config MVME5100 | ||
60 | bool "Motorola-MVME5100" | ||
61 | |||
62 | config PPLUS | ||
63 | bool "Motorola-PowerPlus" | ||
64 | |||
65 | config PRPMC750 | ||
66 | bool "Motorola-PrPMC750" | ||
67 | |||
68 | config PRPMC800 | ||
69 | bool "Motorola-PrPMC800" | ||
70 | |||
71 | config SANDPOINT | ||
72 | bool "Motorola-Sandpoint" | ||
73 | help | ||
74 | Select SANDPOINT if configuring for a Motorola Sandpoint X3 | ||
75 | (any flavor). | ||
76 | |||
77 | config RADSTONE_PPC7D | ||
78 | bool "Radstone Technology PPC7D board" | ||
79 | |||
80 | config PAL4 | ||
81 | bool "SBS-Palomar4" | ||
82 | |||
83 | config GEMINI | ||
84 | bool "Synergy-Gemini" | ||
85 | depends on BROKEN | ||
86 | help | ||
87 | Select Gemini if configuring for a Synergy Microsystems' Gemini | ||
88 | series Single Board Computer. More information is available at: | ||
89 | <http://www.synergymicro.com/PressRel/97_10_15.html>. | ||
90 | |||
91 | config EST8260 | ||
92 | bool "EST8260" | ||
93 | ---help--- | ||
94 | The EST8260 is a single-board computer manufactured by Wind River | ||
95 | Systems, Inc. (formerly Embedded Support Tools Corp.) and based on | ||
96 | the MPC8260. Wind River Systems has a website at | ||
97 | <http://www.windriver.com/>, but the EST8260 cannot be found on it | ||
98 | and has probably been discontinued or rebadged. | ||
99 | |||
100 | config SBC82xx | ||
101 | bool "SBC82xx" | ||
102 | ---help--- | ||
103 | SBC PowerQUICC II, single-board computer with MPC82xx CPU | ||
104 | Manufacturer: Wind River Systems, Inc. | ||
105 | Date of Release: May 2003 | ||
106 | End of Life: - | ||
107 | URL: <http://www.windriver.com/> | ||
108 | |||
109 | config SBS8260 | ||
110 | bool "SBS8260" | ||
111 | |||
112 | config RPX8260 | ||
113 | bool "RPXSUPER" | ||
114 | |||
115 | config TQM8260 | ||
116 | bool "TQM8260" | ||
117 | ---help--- | ||
118 | MPC8260 based module, little larger than credit card, | ||
119 | up to 128 MB global + 64 MB local RAM, 32 MB Flash, | ||
120 | 32 kB EEPROM, 256 kB L@ Cache, 10baseT + 100baseT Ethernet, | ||
121 | 2 x serial ports, ... | ||
122 | Manufacturer: TQ Components, www.tq-group.de | ||
123 | Date of Release: June 2001 | ||
124 | End of Life: not yet :-) | ||
125 | URL: <http://www.denx.de/PDF/TQM82xx_SPEC_Rev005.pdf> | ||
126 | |||
127 | config ADS8272 | ||
128 | bool "ADS8272" | ||
129 | |||
130 | config PQ2FADS | ||
131 | bool "Freescale-PQ2FADS" | ||
132 | help | ||
133 | Select PQ2FADS if you wish to configure for a Freescale | ||
134 | PQ2FADS board (-VR or -ZU). | ||
135 | |||
136 | config LITE5200 | ||
137 | bool "Freescale LITE5200 / (IceCube)" | ||
138 | select PPC_MPC52xx | ||
139 | help | ||
140 | Support for the LITE5200 dev board for the MPC5200 from Freescale. | ||
141 | This is for the LITE5200 version 2.0 board. Don't know if it changes | ||
142 | much but it's only been tested on this board version. I think this | ||
143 | board is also known as IceCube. | ||
144 | |||
145 | config MPC834x_SYS | ||
146 | bool "Freescale MPC834x SYS" | ||
147 | help | ||
148 | This option enables support for the MPC 834x SYS evaluation board. | ||
149 | |||
150 | Be aware that PCI buses can only function when SYS board is plugged | ||
151 | into the PIB (Platform IO Board) board from Freescale which provide | ||
152 | 3 PCI slots. The PIBs PCI initialization is the bootloader's | ||
153 | responsiblilty. | ||
154 | |||
155 | config EV64360 | ||
156 | bool "Marvell-EV64360BP" | ||
157 | help | ||
158 | Select EV64360 if configuring a Marvell EV64360BP Evaluation | ||
159 | platform. | ||
160 | endchoice | ||
161 | |||
162 | config PQ2ADS | ||
163 | bool | ||
164 | depends on ADS8272 | ||
165 | default y | ||
166 | |||
167 | config TQM8xxL | ||
168 | bool | ||
169 | depends on 8xx && (TQM823L || TQM850L || FPS850L || TQM855L || TQM860L) | ||
170 | default y | ||
171 | |||
172 | config PPC_MPC52xx | ||
173 | bool | ||
174 | |||
175 | config 8260 | ||
176 | bool "CPM2 Support" if WILLOW | ||
177 | depends on 6xx | ||
178 | default y if TQM8260 || RPX8260 || EST8260 || SBS8260 || SBC82xx || PQ2FADS | ||
179 | help | ||
180 | The MPC8260 is a typical embedded CPU made by Motorola. Selecting | ||
181 | this option means that you wish to build a kernel for a machine with | ||
182 | an 8260 class CPU. | ||
183 | |||
184 | config 8272 | ||
185 | bool | ||
186 | depends on 6xx | ||
187 | default y if ADS8272 | ||
188 | select 8260 | ||
189 | help | ||
190 | The MPC8272 CPM has a different internal dpram setup than other CPM2 | ||
191 | devices | ||
192 | |||
193 | config 83xx | ||
194 | bool | ||
195 | default y if MPC834x_SYS | ||
196 | |||
197 | config MPC834x | ||
198 | bool | ||
199 | default y if MPC834x_SYS | ||
200 | |||
201 | config CPM2 | ||
202 | bool | ||
203 | depends on 8260 || MPC8560 || MPC8555 | ||
204 | default y | ||
205 | help | ||
206 | The CPM2 (Communications Processor Module) is a coprocessor on | ||
207 | embedded CPUs made by Motorola. Selecting this option means that | ||
208 | you wish to build a kernel for a machine with a CPM2 coprocessor | ||
209 | on it (826x, 827x, 8560). | ||
210 | |||
211 | config PPC_GEN550 | ||
212 | bool | ||
213 | depends on SANDPOINT || SPRUCE || PPLUS || \ | ||
214 | PRPMC750 || PRPMC800 || LOPEC || \ | ||
215 | (EV64260 && !SERIAL_MPSC) || CHESTNUT || RADSTONE_PPC7D || \ | ||
216 | 83xx | ||
217 | default y | ||
218 | |||
219 | config FORCE | ||
220 | bool | ||
221 | depends on 6xx && POWERPMC250 | ||
222 | default y | ||
223 | |||
224 | config GT64260 | ||
225 | bool | ||
226 | depends on EV64260 || CPCI690 | ||
227 | default y | ||
228 | |||
229 | config MV64360 # Really MV64360 & MV64460 | ||
230 | bool | ||
231 | depends on CHESTNUT || KATANA || RADSTONE_PPC7D || HDPU || EV64360 | ||
232 | default y | ||
233 | |||
234 | config MV64X60 | ||
235 | bool | ||
236 | depends on (GT64260 || MV64360) | ||
237 | default y | ||
238 | |||
239 | menu "Set bridge options" | ||
240 | depends on MV64X60 | ||
241 | |||
242 | config NOT_COHERENT_CACHE | ||
243 | bool "Turn off Cache Coherency" | ||
244 | default n | ||
245 | help | ||
246 | Some 64x60 bridges lock up when trying to enforce cache coherency. | ||
247 | When this option is selected, cache coherency will be turned off. | ||
248 | Note that this can cause other problems (e.g., stale data being | ||
249 | speculatively loaded via a cached mapping). Use at your own risk. | ||
250 | |||
251 | config MV64X60_BASE | ||
252 | hex "Set bridge base used by firmware" | ||
253 | default "0xf1000000" | ||
254 | help | ||
255 | A firmware can leave the base address of the bridge's registers at | ||
256 | a non-standard location. If so, set this value to reflect the | ||
257 | address of that non-standard location. | ||
258 | |||
259 | config MV64X60_NEW_BASE | ||
260 | hex "Set bridge base used by kernel" | ||
261 | default "0xf1000000" | ||
262 | help | ||
263 | If the current base address of the bridge's registers is not where | ||
264 | you want it, set this value to the address that you want it moved to. | ||
265 | |||
266 | endmenu | ||
267 | |||
268 | config NONMONARCH_SUPPORT | ||
269 | bool "Enable Non-Monarch Support" | ||
270 | depends on PRPMC800 | ||
271 | |||
272 | config HARRIER | ||
273 | bool | ||
274 | depends on PRPMC800 | ||
275 | default y | ||
276 | |||
277 | config EPIC_SERIAL_MODE | ||
278 | bool | ||
279 | depends on 6xx && (LOPEC || SANDPOINT) | ||
280 | default y | ||
281 | |||
282 | config MPC10X_BRIDGE | ||
283 | bool | ||
284 | depends on POWERPMC250 || LOPEC || SANDPOINT | ||
285 | default y | ||
286 | |||
287 | config MPC10X_OPENPIC | ||
288 | bool | ||
289 | depends on POWERPMC250 || LOPEC || SANDPOINT | ||
290 | default y | ||
291 | |||
292 | config MPC10X_STORE_GATHERING | ||
293 | bool "Enable MPC10x store gathering" | ||
294 | depends on MPC10X_BRIDGE | ||
295 | |||
296 | config SANDPOINT_ENABLE_UART1 | ||
297 | bool "Enable DUART mode on Sandpoint" | ||
298 | depends on SANDPOINT | ||
299 | help | ||
300 | If this option is enabled then the MPC824x processor will run | ||
301 | in DUART mode instead of UART mode. | ||
302 | |||
303 | config HARRIER_STORE_GATHERING | ||
304 | bool "Enable Harrier store gathering" | ||
305 | depends on HARRIER | ||
306 | |||
307 | config MVME5100_IPMC761_PRESENT | ||
308 | bool "MVME5100 configured with an IPMC761" | ||
309 | depends on MVME5100 | ||
310 | |||
311 | config SPRUCE_BAUD_33M | ||
312 | bool "Spruce baud clock support" | ||
313 | depends on SPRUCE | ||
diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig new file mode 100644 index 000000000000..3d957a30c8c2 --- /dev/null +++ b/arch/powerpc/platforms/iseries/Kconfig | |||
@@ -0,0 +1,31 @@ | |||
1 | |||
2 | menu "iSeries device drivers" | ||
3 | depends on PPC_ISERIES | ||
4 | |||
5 | config VIOCONS | ||
6 | tristate "iSeries Virtual Console Support" | ||
7 | |||
8 | config VIODASD | ||
9 | tristate "iSeries Virtual I/O disk support" | ||
10 | help | ||
11 | If you are running on an iSeries system and you want to use | ||
12 | virtual disks created and managed by OS/400, say Y. | ||
13 | |||
14 | config VIOCD | ||
15 | tristate "iSeries Virtual I/O CD support" | ||
16 | help | ||
17 | If you are running Linux on an IBM iSeries system and you want to | ||
18 | read a CD drive owned by OS/400, say Y here. | ||
19 | |||
20 | config VIOTAPE | ||
21 | tristate "iSeries Virtual Tape Support" | ||
22 | help | ||
23 | If you are running Linux on an iSeries system and you want Linux | ||
24 | to read and/or write a tape drive owned by OS/400, say Y here. | ||
25 | |||
26 | endmenu | ||
27 | |||
28 | config VIOPATH | ||
29 | bool | ||
30 | depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH | ||
31 | default y | ||
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile new file mode 100644 index 000000000000..37b7341396e4 --- /dev/null +++ b/arch/powerpc/platforms/powermac/Makefile | |||
@@ -0,0 +1,9 @@ | |||
1 | obj-$(CONFIG_PPC_PMAC) += pmac_pic.o pmac_setup.o pmac_time.o \ | ||
2 | pmac_feature.o pmac_pci.o pmac_sleep.o \ | ||
3 | pmac_low_i2c.o pmac_cache.o | ||
4 | obj-$(CONFIG_PMAC_BACKLIGHT) += pmac_backlight.o | ||
5 | obj-$(CONFIG_CPU_FREQ_PMAC) += pmac_cpufreq.o | ||
6 | ifeq ($(CONFIG_PPC_PMAC),y) | ||
7 | obj-$(CONFIG_NVRAM) += pmac_nvram.o | ||
8 | obj-$(CONFIG_SMP) += pmac_smp.o | ||
9 | endif | ||
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h new file mode 100644 index 000000000000..40e1c5030f74 --- /dev/null +++ b/arch/powerpc/platforms/powermac/pmac.h | |||
@@ -0,0 +1,31 @@ | |||
1 | #ifndef __PMAC_H__ | ||
2 | #define __PMAC_H__ | ||
3 | |||
4 | #include <linux/pci.h> | ||
5 | #include <linux/ide.h> | ||
6 | |||
7 | /* | ||
8 | * Declaration for the various functions exported by the | ||
9 | * pmac_* files. Mostly for use by pmac_setup | ||
10 | */ | ||
11 | |||
12 | extern void pmac_get_boot_time(struct rtc_time *tm); | ||
13 | extern void pmac_get_rtc_time(struct rtc_time *tm); | ||
14 | extern int pmac_set_rtc_time(struct rtc_time *tm); | ||
15 | extern void pmac_read_rtc_time(void); | ||
16 | extern void pmac_calibrate_decr(void); | ||
17 | |||
18 | extern void pmac_pcibios_fixup(void); | ||
19 | extern void pmac_pci_init(void); | ||
20 | extern void pmac_setup_pci_dma(void); | ||
21 | extern void pmac_check_ht_link(void); | ||
22 | |||
23 | extern void pmac_setup_smp(void); | ||
24 | |||
25 | extern unsigned long pmac_ide_get_base(int index); | ||
26 | extern void pmac_ide_init_hwif_ports(hw_regs_t *hw, | ||
27 | unsigned long data_port, unsigned long ctrl_port, int *irq); | ||
28 | |||
29 | extern void pmac_nvram_init(void); | ||
30 | |||
31 | #endif /* __PMAC_H__ */ | ||
diff --git a/arch/powerpc/platforms/powermac/pmac_backlight.c b/arch/powerpc/platforms/powermac/pmac_backlight.c new file mode 100644 index 000000000000..8be2f7d071f0 --- /dev/null +++ b/arch/powerpc/platforms/powermac/pmac_backlight.c | |||
@@ -0,0 +1,202 @@ | |||
1 | /* | ||
2 | * Miscellaneous procedures for dealing with the PowerMac hardware. | ||
3 | * Contains support for the backlight. | ||
4 | * | ||
5 | * Copyright (C) 2000 Benjamin Herrenschmidt | ||
6 | * | ||
7 | */ | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/stddef.h> | ||
13 | #include <linux/reboot.h> | ||
14 | #include <linux/nvram.h> | ||
15 | #include <linux/console.h> | ||
16 | #include <asm/sections.h> | ||
17 | #include <asm/ptrace.h> | ||
18 | #include <asm/io.h> | ||
19 | #include <asm/pgtable.h> | ||
20 | #include <asm/system.h> | ||
21 | #include <asm/prom.h> | ||
22 | #include <asm/machdep.h> | ||
23 | #include <asm/nvram.h> | ||
24 | #include <asm/backlight.h> | ||
25 | |||
26 | #include <linux/adb.h> | ||
27 | #include <linux/pmu.h> | ||
28 | |||
29 | static struct backlight_controller *backlighter; | ||
30 | static void* backlighter_data; | ||
31 | static int backlight_autosave; | ||
32 | static int backlight_level = BACKLIGHT_MAX; | ||
33 | static int backlight_enabled = 1; | ||
34 | static int backlight_req_level = -1; | ||
35 | static int backlight_req_enable = -1; | ||
36 | |||
37 | static void backlight_callback(void *); | ||
38 | static DECLARE_WORK(backlight_work, backlight_callback, NULL); | ||
39 | |||
40 | void register_backlight_controller(struct backlight_controller *ctrler, | ||
41 | void *data, char *type) | ||
42 | { | ||
43 | struct device_node* bk_node; | ||
44 | char *prop; | ||
45 | int valid = 0; | ||
46 | |||
47 | /* There's already a matching controller, bail out */ | ||
48 | if (backlighter != NULL) | ||
49 | return; | ||
50 | |||
51 | bk_node = find_devices("backlight"); | ||
52 | |||
53 | #ifdef CONFIG_ADB_PMU | ||
54 | /* Special case for the old PowerBook since I can't test on it */ | ||
55 | backlight_autosave = machine_is_compatible("AAPL,3400/2400") | ||
56 | || machine_is_compatible("AAPL,3500"); | ||
57 | if ((backlight_autosave | ||
58 | || machine_is_compatible("AAPL,PowerBook1998") | ||
59 | || machine_is_compatible("PowerBook1,1")) | ||
60 | && !strcmp(type, "pmu")) | ||
61 | valid = 1; | ||
62 | #endif | ||
63 | if (bk_node) { | ||
64 | prop = get_property(bk_node, "backlight-control", NULL); | ||
65 | if (prop && !strncmp(prop, type, strlen(type))) | ||
66 | valid = 1; | ||
67 | } | ||
68 | if (!valid) | ||
69 | return; | ||
70 | backlighter = ctrler; | ||
71 | backlighter_data = data; | ||
72 | |||
73 | if (bk_node && !backlight_autosave) | ||
74 | prop = get_property(bk_node, "bklt", NULL); | ||
75 | else | ||
76 | prop = NULL; | ||
77 | if (prop) { | ||
78 | backlight_level = ((*prop)+1) >> 1; | ||
79 | if (backlight_level > BACKLIGHT_MAX) | ||
80 | backlight_level = BACKLIGHT_MAX; | ||
81 | } | ||
82 | |||
83 | #ifdef CONFIG_ADB_PMU | ||
84 | if (backlight_autosave) { | ||
85 | struct adb_request req; | ||
86 | pmu_request(&req, NULL, 2, 0xd9, 0); | ||
87 | while (!req.complete) | ||
88 | pmu_poll(); | ||
89 | backlight_level = req.reply[0] >> 4; | ||
90 | } | ||
91 | #endif | ||
92 | acquire_console_sem(); | ||
93 | if (!backlighter->set_enable(1, backlight_level, data)) | ||
94 | backlight_enabled = 1; | ||
95 | release_console_sem(); | ||
96 | |||
97 | printk(KERN_INFO "Registered \"%s\" backlight controller," | ||
98 | "level: %d/15\n", type, backlight_level); | ||
99 | } | ||
100 | EXPORT_SYMBOL(register_backlight_controller); | ||
101 | |||
102 | void unregister_backlight_controller(struct backlight_controller | ||
103 | *ctrler, void *data) | ||
104 | { | ||
105 | /* We keep the current backlight level (for now) */ | ||
106 | if (ctrler == backlighter && data == backlighter_data) | ||
107 | backlighter = NULL; | ||
108 | } | ||
109 | EXPORT_SYMBOL(unregister_backlight_controller); | ||
110 | |||
111 | static int __set_backlight_enable(int enable) | ||
112 | { | ||
113 | int rc; | ||
114 | |||
115 | if (!backlighter) | ||
116 | return -ENODEV; | ||
117 | acquire_console_sem(); | ||
118 | rc = backlighter->set_enable(enable, backlight_level, | ||
119 | backlighter_data); | ||
120 | if (!rc) | ||
121 | backlight_enabled = enable; | ||
122 | release_console_sem(); | ||
123 | return rc; | ||
124 | } | ||
125 | int set_backlight_enable(int enable) | ||
126 | { | ||
127 | if (!backlighter) | ||
128 | return -ENODEV; | ||
129 | backlight_req_enable = enable; | ||
130 | schedule_work(&backlight_work); | ||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | EXPORT_SYMBOL(set_backlight_enable); | ||
135 | |||
136 | int get_backlight_enable(void) | ||
137 | { | ||
138 | if (!backlighter) | ||
139 | return -ENODEV; | ||
140 | return backlight_enabled; | ||
141 | } | ||
142 | EXPORT_SYMBOL(get_backlight_enable); | ||
143 | |||
144 | static int __set_backlight_level(int level) | ||
145 | { | ||
146 | int rc = 0; | ||
147 | |||
148 | if (!backlighter) | ||
149 | return -ENODEV; | ||
150 | if (level < BACKLIGHT_MIN) | ||
151 | level = BACKLIGHT_OFF; | ||
152 | if (level > BACKLIGHT_MAX) | ||
153 | level = BACKLIGHT_MAX; | ||
154 | acquire_console_sem(); | ||
155 | if (backlight_enabled) | ||
156 | rc = backlighter->set_level(level, backlighter_data); | ||
157 | if (!rc) | ||
158 | backlight_level = level; | ||
159 | release_console_sem(); | ||
160 | if (!rc && !backlight_autosave) { | ||
161 | level <<=1; | ||
162 | if (level & 0x10) | ||
163 | level |= 0x01; | ||
164 | // -- todo: save to property "bklt" | ||
165 | } | ||
166 | return rc; | ||
167 | } | ||
168 | int set_backlight_level(int level) | ||
169 | { | ||
170 | if (!backlighter) | ||
171 | return -ENODEV; | ||
172 | backlight_req_level = level; | ||
173 | schedule_work(&backlight_work); | ||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | EXPORT_SYMBOL(set_backlight_level); | ||
178 | |||
179 | int get_backlight_level(void) | ||
180 | { | ||
181 | if (!backlighter) | ||
182 | return -ENODEV; | ||
183 | return backlight_level; | ||
184 | } | ||
185 | EXPORT_SYMBOL(get_backlight_level); | ||
186 | |||
187 | static void backlight_callback(void *dummy) | ||
188 | { | ||
189 | int level, enable; | ||
190 | |||
191 | do { | ||
192 | level = backlight_req_level; | ||
193 | enable = backlight_req_enable; | ||
194 | mb(); | ||
195 | |||
196 | if (level >= 0) | ||
197 | __set_backlight_level(level); | ||
198 | if (enable >= 0) | ||
199 | __set_backlight_enable(enable); | ||
200 | } while(cmpxchg(&backlight_req_level, level, -1) != level || | ||
201 | cmpxchg(&backlight_req_enable, enable, -1) != enable); | ||
202 | } | ||
diff --git a/arch/powerpc/platforms/powermac/pmac_cache.S b/arch/powerpc/platforms/powermac/pmac_cache.S new file mode 100644 index 000000000000..fb977de6b704 --- /dev/null +++ b/arch/powerpc/platforms/powermac/pmac_cache.S | |||
@@ -0,0 +1,359 @@ | |||
1 | /* | ||
2 | * This file contains low-level cache management functions | ||
3 | * used for sleep and CPU speed changes on Apple machines. | ||
4 | * (In fact the only thing that is Apple-specific is that we assume | ||
5 | * that we can read from ROM at physical address 0xfff00000.) | ||
6 | * | ||
7 | * Copyright (C) 2004 Paul Mackerras (paulus@samba.org) and | ||
8 | * Benjamin Herrenschmidt (benh@kernel.crashing.org) | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * as published by the Free Software Foundation; either version | ||
13 | * 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/config.h> | ||
18 | #include <asm/processor.h> | ||
19 | #include <asm/ppc_asm.h> | ||
20 | #include <asm/cputable.h> | ||
21 | |||
22 | /* | ||
23 | * Flush and disable all data caches (dL1, L2, L3). This is used | ||
24 | * when going to sleep, when doing a PMU based cpufreq transition, | ||
25 | * or when "offlining" a CPU on SMP machines. This code is over | ||
26 | * paranoid, but I've had enough issues with various CPU revs and | ||
27 | * bugs that I decided it was worth beeing over cautious | ||
28 | */ | ||
29 | |||
30 | _GLOBAL(flush_disable_caches) | ||
31 | #ifndef CONFIG_6xx | ||
32 | blr | ||
33 | #else | ||
34 | BEGIN_FTR_SECTION | ||
35 | b flush_disable_745x | ||
36 | END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450) | ||
37 | BEGIN_FTR_SECTION | ||
38 | b flush_disable_75x | ||
39 | END_FTR_SECTION_IFSET(CPU_FTR_L2CR) | ||
40 | b __flush_disable_L1 | ||
41 | |||
42 | /* This is the code for G3 and 74[01]0 */ | ||
43 | flush_disable_75x: | ||
44 | mflr r10 | ||
45 | |||
46 | /* Turn off EE and DR in MSR */ | ||
47 | mfmsr r11 | ||
48 | rlwinm r0,r11,0,~MSR_EE | ||
49 | rlwinm r0,r0,0,~MSR_DR | ||
50 | sync | ||
51 | mtmsr r0 | ||
52 | isync | ||
53 | |||
54 | /* Stop DST streams */ | ||
55 | BEGIN_FTR_SECTION | ||
56 | DSSALL | ||
57 | sync | ||
58 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
59 | |||
60 | /* Stop DPM */ | ||
61 | mfspr r8,SPRN_HID0 /* Save SPRN_HID0 in r8 */ | ||
62 | rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */ | ||
63 | sync | ||
64 | mtspr SPRN_HID0,r4 /* Disable DPM */ | ||
65 | sync | ||
66 | |||
67 | /* Disp-flush L1. We have a weird problem here that I never | ||
68 | * totally figured out. On 750FX, using the ROM for the flush | ||
69 | * results in a non-working flush. We use that workaround for | ||
70 | * now until I finally understand what's going on. --BenH | ||
71 | */ | ||
72 | |||
73 | /* ROM base by default */ | ||
74 | lis r4,0xfff0 | ||
75 | mfpvr r3 | ||
76 | srwi r3,r3,16 | ||
77 | cmplwi cr0,r3,0x7000 | ||
78 | bne+ 1f | ||
79 | /* RAM base on 750FX */ | ||
80 | li r4,0 | ||
81 | 1: li r4,0x4000 | ||
82 | mtctr r4 | ||
83 | 1: lwz r0,0(r4) | ||
84 | addi r4,r4,32 | ||
85 | bdnz 1b | ||
86 | sync | ||
87 | isync | ||
88 | |||
89 | /* Disable / invalidate / enable L1 data */ | ||
90 | mfspr r3,SPRN_HID0 | ||
91 | rlwinm r3,r3,0,~(HID0_DCE | HID0_ICE) | ||
92 | mtspr SPRN_HID0,r3 | ||
93 | sync | ||
94 | isync | ||
95 | ori r3,r3,(HID0_DCE|HID0_DCI|HID0_ICE|HID0_ICFI) | ||
96 | sync | ||
97 | isync | ||
98 | mtspr SPRN_HID0,r3 | ||
99 | xori r3,r3,(HID0_DCI|HID0_ICFI) | ||
100 | mtspr SPRN_HID0,r3 | ||
101 | sync | ||
102 | |||
103 | /* Get the current enable bit of the L2CR into r4 */ | ||
104 | mfspr r5,SPRN_L2CR | ||
105 | /* Set to data-only (pre-745x bit) */ | ||
106 | oris r3,r5,L2CR_L2DO@h | ||
107 | b 2f | ||
108 | /* When disabling L2, code must be in L1 */ | ||
109 | .balign 32 | ||
110 | 1: mtspr SPRN_L2CR,r3 | ||
111 | 3: sync | ||
112 | isync | ||
113 | b 1f | ||
114 | 2: b 3f | ||
115 | 3: sync | ||
116 | isync | ||
117 | b 1b | ||
118 | 1: /* disp-flush L2. The interesting thing here is that the L2 can be | ||
119 | * up to 2Mb ... so using the ROM, we'll end up wrapping back to memory | ||
120 | * but that is probbaly fine. We disp-flush over 4Mb to be safe | ||
121 | */ | ||
122 | lis r4,2 | ||
123 | mtctr r4 | ||
124 | lis r4,0xfff0 | ||
125 | 1: lwz r0,0(r4) | ||
126 | addi r4,r4,32 | ||
127 | bdnz 1b | ||
128 | sync | ||
129 | isync | ||
130 | lis r4,2 | ||
131 | mtctr r4 | ||
132 | lis r4,0xfff0 | ||
133 | 1: dcbf 0,r4 | ||
134 | addi r4,r4,32 | ||
135 | bdnz 1b | ||
136 | sync | ||
137 | isync | ||
138 | |||
139 | /* now disable L2 */ | ||
140 | rlwinm r5,r5,0,~L2CR_L2E | ||
141 | b 2f | ||
142 | /* When disabling L2, code must be in L1 */ | ||
143 | .balign 32 | ||
144 | 1: mtspr SPRN_L2CR,r5 | ||
145 | 3: sync | ||
146 | isync | ||
147 | b 1f | ||
148 | 2: b 3f | ||
149 | 3: sync | ||
150 | isync | ||
151 | b 1b | ||
152 | 1: sync | ||
153 | isync | ||
154 | /* Invalidate L2. This is pre-745x, we clear the L2I bit ourselves */ | ||
155 | oris r4,r5,L2CR_L2I@h | ||
156 | mtspr SPRN_L2CR,r4 | ||
157 | sync | ||
158 | isync | ||
159 | |||
160 | /* Wait for the invalidation to complete */ | ||
161 | 1: mfspr r3,SPRN_L2CR | ||
162 | rlwinm. r0,r3,0,31,31 | ||
163 | bne 1b | ||
164 | |||
165 | /* Clear L2I */ | ||
166 | xoris r4,r4,L2CR_L2I@h | ||
167 | sync | ||
168 | mtspr SPRN_L2CR,r4 | ||
169 | sync | ||
170 | |||
171 | /* now disable the L1 data cache */ | ||
172 | mfspr r0,SPRN_HID0 | ||
173 | rlwinm r0,r0,0,~(HID0_DCE|HID0_ICE) | ||
174 | mtspr SPRN_HID0,r0 | ||
175 | sync | ||
176 | isync | ||
177 | |||
178 | /* Restore HID0[DPM] to whatever it was before */ | ||
179 | sync | ||
180 | mfspr r0,SPRN_HID0 | ||
181 | rlwimi r0,r8,0,11,11 /* Turn back HID0[DPM] */ | ||
182 | mtspr SPRN_HID0,r0 | ||
183 | sync | ||
184 | |||
185 | /* restore DR and EE */ | ||
186 | sync | ||
187 | mtmsr r11 | ||
188 | isync | ||
189 | |||
190 | mtlr r10 | ||
191 | blr | ||
192 | |||
193 | /* This code is for 745x processors */ | ||
194 | flush_disable_745x: | ||
195 | /* Turn off EE and DR in MSR */ | ||
196 | mfmsr r11 | ||
197 | rlwinm r0,r11,0,~MSR_EE | ||
198 | rlwinm r0,r0,0,~MSR_DR | ||
199 | sync | ||
200 | mtmsr r0 | ||
201 | isync | ||
202 | |||
203 | /* Stop prefetch streams */ | ||
204 | DSSALL | ||
205 | sync | ||
206 | |||
207 | /* Disable L2 prefetching */ | ||
208 | mfspr r0,SPRN_MSSCR0 | ||
209 | rlwinm r0,r0,0,0,29 | ||
210 | mtspr SPRN_MSSCR0,r0 | ||
211 | sync | ||
212 | isync | ||
213 | lis r4,0 | ||
214 | dcbf 0,r4 | ||
215 | dcbf 0,r4 | ||
216 | dcbf 0,r4 | ||
217 | dcbf 0,r4 | ||
218 | dcbf 0,r4 | ||
219 | dcbf 0,r4 | ||
220 | dcbf 0,r4 | ||
221 | dcbf 0,r4 | ||
222 | |||
223 | /* Due to a bug with the HW flush on some CPU revs, we occasionally | ||
224 | * experience data corruption. I'm adding a displacement flush along | ||
225 | * with a dcbf loop over a few Mb to "help". The problem isn't totally | ||
226 | * fixed by this in theory, but at least, in practice, I couldn't reproduce | ||
227 | * it even with a big hammer... | ||
228 | */ | ||
229 | |||
230 | lis r4,0x0002 | ||
231 | mtctr r4 | ||
232 | li r4,0 | ||
233 | 1: | ||
234 | lwz r0,0(r4) | ||
235 | addi r4,r4,32 /* Go to start of next cache line */ | ||
236 | bdnz 1b | ||
237 | isync | ||
238 | |||
239 | /* Now, flush the first 4MB of memory */ | ||
240 | lis r4,0x0002 | ||
241 | mtctr r4 | ||
242 | li r4,0 | ||
243 | sync | ||
244 | 1: | ||
245 | dcbf 0,r4 | ||
246 | addi r4,r4,32 /* Go to start of next cache line */ | ||
247 | bdnz 1b | ||
248 | |||
249 | /* Flush and disable the L1 data cache */ | ||
250 | mfspr r6,SPRN_LDSTCR | ||
251 | lis r3,0xfff0 /* read from ROM for displacement flush */ | ||
252 | li r4,0xfe /* start with only way 0 unlocked */ | ||
253 | li r5,128 /* 128 lines in each way */ | ||
254 | 1: mtctr r5 | ||
255 | rlwimi r6,r4,0,24,31 | ||
256 | mtspr SPRN_LDSTCR,r6 | ||
257 | sync | ||
258 | isync | ||
259 | 2: lwz r0,0(r3) /* touch each cache line */ | ||
260 | addi r3,r3,32 | ||
261 | bdnz 2b | ||
262 | rlwinm r4,r4,1,24,30 /* move on to the next way */ | ||
263 | ori r4,r4,1 | ||
264 | cmpwi r4,0xff /* all done? */ | ||
265 | bne 1b | ||
266 | /* now unlock the L1 data cache */ | ||
267 | li r4,0 | ||
268 | rlwimi r6,r4,0,24,31 | ||
269 | sync | ||
270 | mtspr SPRN_LDSTCR,r6 | ||
271 | sync | ||
272 | isync | ||
273 | |||
274 | /* Flush the L2 cache using the hardware assist */ | ||
275 | mfspr r3,SPRN_L2CR | ||
276 | cmpwi r3,0 /* check if it is enabled first */ | ||
277 | bge 4f | ||
278 | oris r0,r3,(L2CR_L2IO_745x|L2CR_L2DO_745x)@h | ||
279 | b 2f | ||
280 | /* When disabling/locking L2, code must be in L1 */ | ||
281 | .balign 32 | ||
282 | 1: mtspr SPRN_L2CR,r0 /* lock the L2 cache */ | ||
283 | 3: sync | ||
284 | isync | ||
285 | b 1f | ||
286 | 2: b 3f | ||
287 | 3: sync | ||
288 | isync | ||
289 | b 1b | ||
290 | 1: sync | ||
291 | isync | ||
292 | ori r0,r3,L2CR_L2HWF_745x | ||
293 | sync | ||
294 | mtspr SPRN_L2CR,r0 /* set the hardware flush bit */ | ||
295 | 3: mfspr r0,SPRN_L2CR /* wait for it to go to 0 */ | ||
296 | andi. r0,r0,L2CR_L2HWF_745x | ||
297 | bne 3b | ||
298 | sync | ||
299 | rlwinm r3,r3,0,~L2CR_L2E | ||
300 | b 2f | ||
301 | /* When disabling L2, code must be in L1 */ | ||
302 | .balign 32 | ||
303 | 1: mtspr SPRN_L2CR,r3 /* disable the L2 cache */ | ||
304 | 3: sync | ||
305 | isync | ||
306 | b 1f | ||
307 | 2: b 3f | ||
308 | 3: sync | ||
309 | isync | ||
310 | b 1b | ||
311 | 1: sync | ||
312 | isync | ||
313 | oris r4,r3,L2CR_L2I@h | ||
314 | mtspr SPRN_L2CR,r4 | ||
315 | sync | ||
316 | isync | ||
317 | 1: mfspr r4,SPRN_L2CR | ||
318 | andis. r0,r4,L2CR_L2I@h | ||
319 | bne 1b | ||
320 | sync | ||
321 | |||
322 | BEGIN_FTR_SECTION | ||
323 | /* Flush the L3 cache using the hardware assist */ | ||
324 | 4: mfspr r3,SPRN_L3CR | ||
325 | cmpwi r3,0 /* check if it is enabled */ | ||
326 | bge 6f | ||
327 | oris r0,r3,L3CR_L3IO@h | ||
328 | ori r0,r0,L3CR_L3DO | ||
329 | sync | ||
330 | mtspr SPRN_L3CR,r0 /* lock the L3 cache */ | ||
331 | sync | ||
332 | isync | ||
333 | ori r0,r0,L3CR_L3HWF | ||
334 | sync | ||
335 | mtspr SPRN_L3CR,r0 /* set the hardware flush bit */ | ||
336 | 5: mfspr r0,SPRN_L3CR /* wait for it to go to zero */ | ||
337 | andi. r0,r0,L3CR_L3HWF | ||
338 | bne 5b | ||
339 | rlwinm r3,r3,0,~L3CR_L3E | ||
340 | sync | ||
341 | mtspr SPRN_L3CR,r3 /* disable the L3 cache */ | ||
342 | sync | ||
343 | ori r4,r3,L3CR_L3I | ||
344 | mtspr SPRN_L3CR,r4 | ||
345 | 1: mfspr r4,SPRN_L3CR | ||
346 | andi. r0,r4,L3CR_L3I | ||
347 | bne 1b | ||
348 | sync | ||
349 | END_FTR_SECTION_IFSET(CPU_FTR_L3CR) | ||
350 | |||
351 | 6: mfspr r0,SPRN_HID0 /* now disable the L1 data cache */ | ||
352 | rlwinm r0,r0,0,~HID0_DCE | ||
353 | mtspr SPRN_HID0,r0 | ||
354 | sync | ||
355 | isync | ||
356 | mtmsr r11 /* restore DR and EE */ | ||
357 | isync | ||
358 | blr | ||
359 | #endif /* CONFIG_6xx */ | ||
diff --git a/arch/powerpc/platforms/powermac/pmac_cpufreq.c b/arch/powerpc/platforms/powermac/pmac_cpufreq.c new file mode 100644 index 000000000000..6d32d99402be --- /dev/null +++ b/arch/powerpc/platforms/powermac/pmac_cpufreq.c | |||
@@ -0,0 +1,728 @@ | |||
1 | /* | ||
2 | * arch/ppc/platforms/pmac_cpufreq.c | ||
3 | * | ||
4 | * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> | ||
5 | * Copyright (C) 2004 John Steele Scott <toojays@toojays.net> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * TODO: Need a big cleanup here. Basically, we need to have different | ||
12 | * cpufreq_driver structures for the different type of HW instead of the | ||
13 | * current mess. We also need to better deal with the detection of the | ||
14 | * type of machine. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #include <linux/config.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/types.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/delay.h> | ||
24 | #include <linux/sched.h> | ||
25 | #include <linux/adb.h> | ||
26 | #include <linux/pmu.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/cpufreq.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <linux/sysdev.h> | ||
31 | #include <linux/i2c.h> | ||
32 | #include <linux/hardirq.h> | ||
33 | #include <asm/prom.h> | ||
34 | #include <asm/machdep.h> | ||
35 | #include <asm/irq.h> | ||
36 | #include <asm/pmac_feature.h> | ||
37 | #include <asm/mmu_context.h> | ||
38 | #include <asm/sections.h> | ||
39 | #include <asm/cputable.h> | ||
40 | #include <asm/time.h> | ||
41 | #include <asm/system.h> | ||
42 | #include <asm/mpic.h> | ||
43 | #include <asm/keylargo.h> | ||
44 | |||
45 | /* WARNING !!! This will cause calibrate_delay() to be called, | ||
46 | * but this is an __init function ! So you MUST go edit | ||
47 | * init/main.c to make it non-init before enabling DEBUG_FREQ | ||
48 | */ | ||
49 | #undef DEBUG_FREQ | ||
50 | |||
51 | /* | ||
52 | * There is a problem with the core cpufreq code on SMP kernels, | ||
53 | * it won't recalculate the Bogomips properly | ||
54 | */ | ||
55 | #ifdef CONFIG_SMP | ||
56 | #warning "WARNING, CPUFREQ not recommended on SMP kernels" | ||
57 | #endif | ||
58 | |||
59 | extern void low_choose_7447a_dfs(int dfs); | ||
60 | extern void low_choose_750fx_pll(int pll); | ||
61 | extern void low_sleep_handler(void); | ||
62 | |||
63 | /* | ||
64 | * Currently, PowerMac cpufreq supports only high & low frequencies | ||
65 | * that are set by the firmware | ||
66 | */ | ||
67 | static unsigned int low_freq; | ||
68 | static unsigned int hi_freq; | ||
69 | static unsigned int cur_freq; | ||
70 | static unsigned int sleep_freq; | ||
71 | |||
72 | /* | ||
73 | * Different models uses different mecanisms to switch the frequency | ||
74 | */ | ||
75 | static int (*set_speed_proc)(int low_speed); | ||
76 | static unsigned int (*get_speed_proc)(void); | ||
77 | |||
78 | /* | ||
79 | * Some definitions used by the various speedprocs | ||
80 | */ | ||
81 | static u32 voltage_gpio; | ||
82 | static u32 frequency_gpio; | ||
83 | static u32 slew_done_gpio; | ||
84 | static int no_schedule; | ||
85 | static int has_cpu_l2lve; | ||
86 | static int is_pmu_based; | ||
87 | |||
88 | /* There are only two frequency states for each processor. Values | ||
89 | * are in kHz for the time being. | ||
90 | */ | ||
91 | #define CPUFREQ_HIGH 0 | ||
92 | #define CPUFREQ_LOW 1 | ||
93 | |||
94 | static struct cpufreq_frequency_table pmac_cpu_freqs[] = { | ||
95 | {CPUFREQ_HIGH, 0}, | ||
96 | {CPUFREQ_LOW, 0}, | ||
97 | {0, CPUFREQ_TABLE_END}, | ||
98 | }; | ||
99 | |||
100 | static struct freq_attr* pmac_cpu_freqs_attr[] = { | ||
101 | &cpufreq_freq_attr_scaling_available_freqs, | ||
102 | NULL, | ||
103 | }; | ||
104 | |||
105 | static inline void local_delay(unsigned long ms) | ||
106 | { | ||
107 | if (no_schedule) | ||
108 | mdelay(ms); | ||
109 | else | ||
110 | msleep(ms); | ||
111 | } | ||
112 | |||
113 | static inline void wakeup_decrementer(void) | ||
114 | { | ||
115 | set_dec(tb_ticks_per_jiffy); | ||
116 | /* No currently-supported powerbook has a 601, | ||
117 | * so use get_tbl, not native | ||
118 | */ | ||
119 | last_jiffy_stamp(0) = tb_last_stamp = get_tbl(); | ||
120 | } | ||
121 | |||
122 | #ifdef DEBUG_FREQ | ||
123 | static inline void debug_calc_bogomips(void) | ||
124 | { | ||
125 | /* This will cause a recalc of bogomips and display the | ||
126 | * result. We backup/restore the value to avoid affecting the | ||
127 | * core cpufreq framework's own calculation. | ||
128 | */ | ||
129 | extern void calibrate_delay(void); | ||
130 | |||
131 | unsigned long save_lpj = loops_per_jiffy; | ||
132 | calibrate_delay(); | ||
133 | loops_per_jiffy = save_lpj; | ||
134 | } | ||
135 | #endif /* DEBUG_FREQ */ | ||
136 | |||
137 | /* Switch CPU speed under 750FX CPU control | ||
138 | */ | ||
139 | static int cpu_750fx_cpu_speed(int low_speed) | ||
140 | { | ||
141 | u32 hid2; | ||
142 | |||
143 | if (low_speed == 0) { | ||
144 | /* ramping up, set voltage first */ | ||
145 | pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05); | ||
146 | /* Make sure we sleep for at least 1ms */ | ||
147 | local_delay(10); | ||
148 | |||
149 | /* tweak L2 for high voltage */ | ||
150 | if (has_cpu_l2lve) { | ||
151 | hid2 = mfspr(SPRN_HID2); | ||
152 | hid2 &= ~0x2000; | ||
153 | mtspr(SPRN_HID2, hid2); | ||
154 | } | ||
155 | } | ||
156 | #ifdef CONFIG_6xx | ||
157 | low_choose_750fx_pll(low_speed); | ||
158 | #endif | ||
159 | if (low_speed == 1) { | ||
160 | /* tweak L2 for low voltage */ | ||
161 | if (has_cpu_l2lve) { | ||
162 | hid2 = mfspr(SPRN_HID2); | ||
163 | hid2 |= 0x2000; | ||
164 | mtspr(SPRN_HID2, hid2); | ||
165 | } | ||
166 | |||
167 | /* ramping down, set voltage last */ | ||
168 | pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04); | ||
169 | local_delay(10); | ||
170 | } | ||
171 | |||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | static unsigned int cpu_750fx_get_cpu_speed(void) | ||
176 | { | ||
177 | if (mfspr(SPRN_HID1) & HID1_PS) | ||
178 | return low_freq; | ||
179 | else | ||
180 | return hi_freq; | ||
181 | } | ||
182 | |||
183 | /* Switch CPU speed using DFS */ | ||
184 | static int dfs_set_cpu_speed(int low_speed) | ||
185 | { | ||
186 | if (low_speed == 0) { | ||
187 | /* ramping up, set voltage first */ | ||
188 | pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05); | ||
189 | /* Make sure we sleep for at least 1ms */ | ||
190 | local_delay(1); | ||
191 | } | ||
192 | |||
193 | /* set frequency */ | ||
194 | #ifdef CONFIG_6xx | ||
195 | low_choose_7447a_dfs(low_speed); | ||
196 | #endif | ||
197 | udelay(100); | ||
198 | |||
199 | if (low_speed == 1) { | ||
200 | /* ramping down, set voltage last */ | ||
201 | pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04); | ||
202 | local_delay(1); | ||
203 | } | ||
204 | |||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | static unsigned int dfs_get_cpu_speed(void) | ||
209 | { | ||
210 | if (mfspr(SPRN_HID1) & HID1_DFS) | ||
211 | return low_freq; | ||
212 | else | ||
213 | return hi_freq; | ||
214 | } | ||
215 | |||
216 | |||
217 | /* Switch CPU speed using slewing GPIOs | ||
218 | */ | ||
219 | static int gpios_set_cpu_speed(int low_speed) | ||
220 | { | ||
221 | int gpio, timeout = 0; | ||
222 | |||
223 | /* If ramping up, set voltage first */ | ||
224 | if (low_speed == 0) { | ||
225 | pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05); | ||
226 | /* Delay is way too big but it's ok, we schedule */ | ||
227 | local_delay(10); | ||
228 | } | ||
229 | |||
230 | /* Set frequency */ | ||
231 | gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0); | ||
232 | if (low_speed == ((gpio & 0x01) == 0)) | ||
233 | goto skip; | ||
234 | |||
235 | pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, frequency_gpio, | ||
236 | low_speed ? 0x04 : 0x05); | ||
237 | udelay(200); | ||
238 | do { | ||
239 | if (++timeout > 100) | ||
240 | break; | ||
241 | local_delay(1); | ||
242 | gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, slew_done_gpio, 0); | ||
243 | } while((gpio & 0x02) == 0); | ||
244 | skip: | ||
245 | /* If ramping down, set voltage last */ | ||
246 | if (low_speed == 1) { | ||
247 | pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04); | ||
248 | /* Delay is way too big but it's ok, we schedule */ | ||
249 | local_delay(10); | ||
250 | } | ||
251 | |||
252 | #ifdef DEBUG_FREQ | ||
253 | debug_calc_bogomips(); | ||
254 | #endif | ||
255 | |||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | /* Switch CPU speed under PMU control | ||
260 | */ | ||
261 | static int pmu_set_cpu_speed(int low_speed) | ||
262 | { | ||
263 | struct adb_request req; | ||
264 | unsigned long save_l2cr; | ||
265 | unsigned long save_l3cr; | ||
266 | unsigned int pic_prio; | ||
267 | unsigned long flags; | ||
268 | |||
269 | preempt_disable(); | ||
270 | |||
271 | #ifdef DEBUG_FREQ | ||
272 | printk(KERN_DEBUG "HID1, before: %x\n", mfspr(SPRN_HID1)); | ||
273 | #endif | ||
274 | pmu_suspend(); | ||
275 | |||
276 | /* Disable all interrupt sources on openpic */ | ||
277 | pic_prio = mpic_cpu_get_priority(); | ||
278 | mpic_cpu_set_priority(0xf); | ||
279 | |||
280 | /* Make sure the decrementer won't interrupt us */ | ||
281 | asm volatile("mtdec %0" : : "r" (0x7fffffff)); | ||
282 | /* Make sure any pending DEC interrupt occuring while we did | ||
283 | * the above didn't re-enable the DEC */ | ||
284 | mb(); | ||
285 | asm volatile("mtdec %0" : : "r" (0x7fffffff)); | ||
286 | |||
287 | /* We can now disable MSR_EE */ | ||
288 | local_irq_save(flags); | ||
289 | |||
290 | /* Giveup the FPU & vec */ | ||
291 | enable_kernel_fp(); | ||
292 | |||
293 | #ifdef CONFIG_ALTIVEC | ||
294 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | ||
295 | enable_kernel_altivec(); | ||
296 | #endif /* CONFIG_ALTIVEC */ | ||
297 | |||
298 | /* Save & disable L2 and L3 caches */ | ||
299 | save_l3cr = _get_L3CR(); /* (returns -1 if not available) */ | ||
300 | save_l2cr = _get_L2CR(); /* (returns -1 if not available) */ | ||
301 | |||
302 | /* Send the new speed command. My assumption is that this command | ||
303 | * will cause PLL_CFG[0..3] to be changed next time CPU goes to sleep | ||
304 | */ | ||
305 | pmu_request(&req, NULL, 6, PMU_CPU_SPEED, 'W', 'O', 'O', 'F', low_speed); | ||
306 | while (!req.complete) | ||
307 | pmu_poll(); | ||
308 | |||
309 | /* Prepare the northbridge for the speed transition */ | ||
310 | pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,1); | ||
311 | |||
312 | /* Call low level code to backup CPU state and recover from | ||
313 | * hardware reset | ||
314 | */ | ||
315 | low_sleep_handler(); | ||
316 | |||
317 | /* Restore the northbridge */ | ||
318 | pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,0); | ||
319 | |||
320 | /* Restore L2 cache */ | ||
321 | if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0) | ||
322 | _set_L2CR(save_l2cr); | ||
323 | /* Restore L3 cache */ | ||
324 | if (save_l3cr != 0xffffffff && (save_l3cr & L3CR_L3E) != 0) | ||
325 | _set_L3CR(save_l3cr); | ||
326 | |||
327 | /* Restore userland MMU context */ | ||
328 | set_context(current->active_mm->context, current->active_mm->pgd); | ||
329 | |||
330 | #ifdef DEBUG_FREQ | ||
331 | printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1)); | ||
332 | #endif | ||
333 | |||
334 | /* Restore low level PMU operations */ | ||
335 | pmu_unlock(); | ||
336 | |||
337 | /* Restore decrementer */ | ||
338 | wakeup_decrementer(); | ||
339 | |||
340 | /* Restore interrupts */ | ||
341 | mpic_cpu_set_priority(pic_prio); | ||
342 | |||
343 | /* Let interrupts flow again ... */ | ||
344 | local_irq_restore(flags); | ||
345 | |||
346 | #ifdef DEBUG_FREQ | ||
347 | debug_calc_bogomips(); | ||
348 | #endif | ||
349 | |||
350 | pmu_resume(); | ||
351 | |||
352 | preempt_enable(); | ||
353 | |||
354 | return 0; | ||
355 | } | ||
356 | |||
357 | static int do_set_cpu_speed(int speed_mode, int notify) | ||
358 | { | ||
359 | struct cpufreq_freqs freqs; | ||
360 | unsigned long l3cr; | ||
361 | static unsigned long prev_l3cr; | ||
362 | |||
363 | freqs.old = cur_freq; | ||
364 | freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq; | ||
365 | freqs.cpu = smp_processor_id(); | ||
366 | |||
367 | if (freqs.old == freqs.new) | ||
368 | return 0; | ||
369 | |||
370 | if (notify) | ||
371 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
372 | if (speed_mode == CPUFREQ_LOW && | ||
373 | cpu_has_feature(CPU_FTR_L3CR)) { | ||
374 | l3cr = _get_L3CR(); | ||
375 | if (l3cr & L3CR_L3E) { | ||
376 | prev_l3cr = l3cr; | ||
377 | _set_L3CR(0); | ||
378 | } | ||
379 | } | ||
380 | set_speed_proc(speed_mode == CPUFREQ_LOW); | ||
381 | if (speed_mode == CPUFREQ_HIGH && | ||
382 | cpu_has_feature(CPU_FTR_L3CR)) { | ||
383 | l3cr = _get_L3CR(); | ||
384 | if ((prev_l3cr & L3CR_L3E) && l3cr != prev_l3cr) | ||
385 | _set_L3CR(prev_l3cr); | ||
386 | } | ||
387 | if (notify) | ||
388 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
389 | cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq; | ||
390 | |||
391 | return 0; | ||
392 | } | ||
393 | |||
394 | static unsigned int pmac_cpufreq_get_speed(unsigned int cpu) | ||
395 | { | ||
396 | return cur_freq; | ||
397 | } | ||
398 | |||
399 | static int pmac_cpufreq_verify(struct cpufreq_policy *policy) | ||
400 | { | ||
401 | return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs); | ||
402 | } | ||
403 | |||
404 | static int pmac_cpufreq_target( struct cpufreq_policy *policy, | ||
405 | unsigned int target_freq, | ||
406 | unsigned int relation) | ||
407 | { | ||
408 | unsigned int newstate = 0; | ||
409 | |||
410 | if (cpufreq_frequency_table_target(policy, pmac_cpu_freqs, | ||
411 | target_freq, relation, &newstate)) | ||
412 | return -EINVAL; | ||
413 | |||
414 | return do_set_cpu_speed(newstate, 1); | ||
415 | } | ||
416 | |||
417 | unsigned int pmac_get_one_cpufreq(int i) | ||
418 | { | ||
419 | /* Supports only one CPU for now */ | ||
420 | return (i == 0) ? cur_freq : 0; | ||
421 | } | ||
422 | |||
423 | static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy) | ||
424 | { | ||
425 | if (policy->cpu != 0) | ||
426 | return -ENODEV; | ||
427 | |||
428 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
429 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | ||
430 | policy->cur = cur_freq; | ||
431 | |||
432 | cpufreq_frequency_table_get_attr(pmac_cpu_freqs, policy->cpu); | ||
433 | return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs); | ||
434 | } | ||
435 | |||
436 | static u32 read_gpio(struct device_node *np) | ||
437 | { | ||
438 | u32 *reg = (u32 *)get_property(np, "reg", NULL); | ||
439 | u32 offset; | ||
440 | |||
441 | if (reg == NULL) | ||
442 | return 0; | ||
443 | /* That works for all keylargos but shall be fixed properly | ||
444 | * some day... The problem is that it seems we can't rely | ||
445 | * on the "reg" property of the GPIO nodes, they are either | ||
446 | * relative to the base of KeyLargo or to the base of the | ||
447 | * GPIO space, and the device-tree doesn't help. | ||
448 | */ | ||
449 | offset = *reg; | ||
450 | if (offset < KEYLARGO_GPIO_LEVELS0) | ||
451 | offset += KEYLARGO_GPIO_LEVELS0; | ||
452 | return offset; | ||
453 | } | ||
454 | |||
455 | static int pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg) | ||
456 | { | ||
457 | /* Ok, this could be made a bit smarter, but let's be robust for now. We | ||
458 | * always force a speed change to high speed before sleep, to make sure | ||
459 | * we have appropriate voltage and/or bus speed for the wakeup process, | ||
460 | * and to make sure our loops_per_jiffies are "good enough", that is will | ||
461 | * not cause too short delays if we sleep in low speed and wake in high | ||
462 | * speed.. | ||
463 | */ | ||
464 | no_schedule = 1; | ||
465 | sleep_freq = cur_freq; | ||
466 | if (cur_freq == low_freq && !is_pmu_based) | ||
467 | do_set_cpu_speed(CPUFREQ_HIGH, 0); | ||
468 | return 0; | ||
469 | } | ||
470 | |||
471 | static int pmac_cpufreq_resume(struct cpufreq_policy *policy) | ||
472 | { | ||
473 | /* If we resume, first check if we have a get() function */ | ||
474 | if (get_speed_proc) | ||
475 | cur_freq = get_speed_proc(); | ||
476 | else | ||
477 | cur_freq = 0; | ||
478 | |||
479 | /* We don't, hrm... we don't really know our speed here, best | ||
480 | * is that we force a switch to whatever it was, which is | ||
481 | * probably high speed due to our suspend() routine | ||
482 | */ | ||
483 | do_set_cpu_speed(sleep_freq == low_freq ? | ||
484 | CPUFREQ_LOW : CPUFREQ_HIGH, 0); | ||
485 | |||
486 | no_schedule = 0; | ||
487 | return 0; | ||
488 | } | ||
489 | |||
490 | static struct cpufreq_driver pmac_cpufreq_driver = { | ||
491 | .verify = pmac_cpufreq_verify, | ||
492 | .target = pmac_cpufreq_target, | ||
493 | .get = pmac_cpufreq_get_speed, | ||
494 | .init = pmac_cpufreq_cpu_init, | ||
495 | .suspend = pmac_cpufreq_suspend, | ||
496 | .resume = pmac_cpufreq_resume, | ||
497 | .flags = CPUFREQ_PM_NO_WARN, | ||
498 | .attr = pmac_cpu_freqs_attr, | ||
499 | .name = "powermac", | ||
500 | .owner = THIS_MODULE, | ||
501 | }; | ||
502 | |||
503 | |||
504 | static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode) | ||
505 | { | ||
506 | struct device_node *volt_gpio_np = of_find_node_by_name(NULL, | ||
507 | "voltage-gpio"); | ||
508 | struct device_node *freq_gpio_np = of_find_node_by_name(NULL, | ||
509 | "frequency-gpio"); | ||
510 | struct device_node *slew_done_gpio_np = of_find_node_by_name(NULL, | ||
511 | "slewing-done"); | ||
512 | u32 *value; | ||
513 | |||
514 | /* | ||
515 | * Check to see if it's GPIO driven or PMU only | ||
516 | * | ||
517 | * The way we extract the GPIO address is slightly hackish, but it | ||
518 | * works well enough for now. We need to abstract the whole GPIO | ||
519 | * stuff sooner or later anyway | ||
520 | */ | ||
521 | |||
522 | if (volt_gpio_np) | ||
523 | voltage_gpio = read_gpio(volt_gpio_np); | ||
524 | if (freq_gpio_np) | ||
525 | frequency_gpio = read_gpio(freq_gpio_np); | ||
526 | if (slew_done_gpio_np) | ||
527 | slew_done_gpio = read_gpio(slew_done_gpio_np); | ||
528 | |||
529 | /* If we use the frequency GPIOs, calculate the min/max speeds based | ||
530 | * on the bus frequencies | ||
531 | */ | ||
532 | if (frequency_gpio && slew_done_gpio) { | ||
533 | int lenp, rc; | ||
534 | u32 *freqs, *ratio; | ||
535 | |||
536 | freqs = (u32 *)get_property(cpunode, "bus-frequencies", &lenp); | ||
537 | lenp /= sizeof(u32); | ||
538 | if (freqs == NULL || lenp != 2) { | ||
539 | printk(KERN_ERR "cpufreq: bus-frequencies incorrect or missing\n"); | ||
540 | return 1; | ||
541 | } | ||
542 | ratio = (u32 *)get_property(cpunode, "processor-to-bus-ratio*2", NULL); | ||
543 | if (ratio == NULL) { | ||
544 | printk(KERN_ERR "cpufreq: processor-to-bus-ratio*2 missing\n"); | ||
545 | return 1; | ||
546 | } | ||
547 | |||
548 | /* Get the min/max bus frequencies */ | ||
549 | low_freq = min(freqs[0], freqs[1]); | ||
550 | hi_freq = max(freqs[0], freqs[1]); | ||
551 | |||
552 | /* Grrrr.. It _seems_ that the device-tree is lying on the low bus | ||
553 | * frequency, it claims it to be around 84Mhz on some models while | ||
554 | * it appears to be approx. 101Mhz on all. Let's hack around here... | ||
555 | * fortunately, we don't need to be too precise | ||
556 | */ | ||
557 | if (low_freq < 98000000) | ||
558 | low_freq = 101000000; | ||
559 | |||
560 | /* Convert those to CPU core clocks */ | ||
561 | low_freq = (low_freq * (*ratio)) / 2000; | ||
562 | hi_freq = (hi_freq * (*ratio)) / 2000; | ||
563 | |||
564 | /* Now we get the frequencies, we read the GPIO to see what is out current | ||
565 | * speed | ||
566 | */ | ||
567 | rc = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0); | ||
568 | cur_freq = (rc & 0x01) ? hi_freq : low_freq; | ||
569 | |||
570 | set_speed_proc = gpios_set_cpu_speed; | ||
571 | return 1; | ||
572 | } | ||
573 | |||
574 | /* If we use the PMU, look for the min & max frequencies in the | ||
575 | * device-tree | ||
576 | */ | ||
577 | value = (u32 *)get_property(cpunode, "min-clock-frequency", NULL); | ||
578 | if (!value) | ||
579 | return 1; | ||
580 | low_freq = (*value) / 1000; | ||
581 | /* The PowerBook G4 12" (PowerBook6,1) has an error in the device-tree | ||
582 | * here */ | ||
583 | if (low_freq < 100000) | ||
584 | low_freq *= 10; | ||
585 | |||
586 | value = (u32 *)get_property(cpunode, "max-clock-frequency", NULL); | ||
587 | if (!value) | ||
588 | return 1; | ||
589 | hi_freq = (*value) / 1000; | ||
590 | set_speed_proc = pmu_set_cpu_speed; | ||
591 | is_pmu_based = 1; | ||
592 | |||
593 | return 0; | ||
594 | } | ||
595 | |||
596 | static int pmac_cpufreq_init_7447A(struct device_node *cpunode) | ||
597 | { | ||
598 | struct device_node *volt_gpio_np; | ||
599 | |||
600 | if (get_property(cpunode, "dynamic-power-step", NULL) == NULL) | ||
601 | return 1; | ||
602 | |||
603 | volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select"); | ||
604 | if (volt_gpio_np) | ||
605 | voltage_gpio = read_gpio(volt_gpio_np); | ||
606 | if (!voltage_gpio){ | ||
607 | printk(KERN_ERR "cpufreq: missing cpu-vcore-select gpio\n"); | ||
608 | return 1; | ||
609 | } | ||
610 | |||
611 | /* OF only reports the high frequency */ | ||
612 | hi_freq = cur_freq; | ||
613 | low_freq = cur_freq/2; | ||
614 | |||
615 | /* Read actual frequency from CPU */ | ||
616 | cur_freq = dfs_get_cpu_speed(); | ||
617 | set_speed_proc = dfs_set_cpu_speed; | ||
618 | get_speed_proc = dfs_get_cpu_speed; | ||
619 | |||
620 | return 0; | ||
621 | } | ||
622 | |||
623 | static int pmac_cpufreq_init_750FX(struct device_node *cpunode) | ||
624 | { | ||
625 | struct device_node *volt_gpio_np; | ||
626 | u32 pvr, *value; | ||
627 | |||
628 | if (get_property(cpunode, "dynamic-power-step", NULL) == NULL) | ||
629 | return 1; | ||
630 | |||
631 | hi_freq = cur_freq; | ||
632 | value = (u32 *)get_property(cpunode, "reduced-clock-frequency", NULL); | ||
633 | if (!value) | ||
634 | return 1; | ||
635 | low_freq = (*value) / 1000; | ||
636 | |||
637 | volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select"); | ||
638 | if (volt_gpio_np) | ||
639 | voltage_gpio = read_gpio(volt_gpio_np); | ||
640 | |||
641 | pvr = mfspr(SPRN_PVR); | ||
642 | has_cpu_l2lve = !((pvr & 0xf00) == 0x100); | ||
643 | |||
644 | set_speed_proc = cpu_750fx_cpu_speed; | ||
645 | get_speed_proc = cpu_750fx_get_cpu_speed; | ||
646 | cur_freq = cpu_750fx_get_cpu_speed(); | ||
647 | |||
648 | return 0; | ||
649 | } | ||
650 | |||
651 | /* Currently, we support the following machines: | ||
652 | * | ||
653 | * - Titanium PowerBook 1Ghz (PMU based, 667Mhz & 1Ghz) | ||
654 | * - Titanium PowerBook 800 (PMU based, 667Mhz & 800Mhz) | ||
655 | * - Titanium PowerBook 400 (PMU based, 300Mhz & 400Mhz) | ||
656 | * - Titanium PowerBook 500 (PMU based, 300Mhz & 500Mhz) | ||
657 | * - iBook2 500/600 (PMU based, 400Mhz & 500/600Mhz) | ||
658 | * - iBook2 700 (CPU based, 400Mhz & 700Mhz, support low voltage) | ||
659 | * - Recent MacRISC3 laptops | ||
660 | * - All new machines with 7447A CPUs | ||
661 | */ | ||
662 | static int __init pmac_cpufreq_setup(void) | ||
663 | { | ||
664 | struct device_node *cpunode; | ||
665 | u32 *value; | ||
666 | |||
667 | if (strstr(cmd_line, "nocpufreq")) | ||
668 | return 0; | ||
669 | |||
670 | /* Assume only one CPU */ | ||
671 | cpunode = find_type_devices("cpu"); | ||
672 | if (!cpunode) | ||
673 | goto out; | ||
674 | |||
675 | /* Get current cpu clock freq */ | ||
676 | value = (u32 *)get_property(cpunode, "clock-frequency", NULL); | ||
677 | if (!value) | ||
678 | goto out; | ||
679 | cur_freq = (*value) / 1000; | ||
680 | |||
681 | /* Check for 7447A based MacRISC3 */ | ||
682 | if (machine_is_compatible("MacRISC3") && | ||
683 | get_property(cpunode, "dynamic-power-step", NULL) && | ||
684 | PVR_VER(mfspr(SPRN_PVR)) == 0x8003) { | ||
685 | pmac_cpufreq_init_7447A(cpunode); | ||
686 | /* Check for other MacRISC3 machines */ | ||
687 | } else if (machine_is_compatible("PowerBook3,4") || | ||
688 | machine_is_compatible("PowerBook3,5") || | ||
689 | machine_is_compatible("MacRISC3")) { | ||
690 | pmac_cpufreq_init_MacRISC3(cpunode); | ||
691 | /* Else check for iBook2 500/600 */ | ||
692 | } else if (machine_is_compatible("PowerBook4,1")) { | ||
693 | hi_freq = cur_freq; | ||
694 | low_freq = 400000; | ||
695 | set_speed_proc = pmu_set_cpu_speed; | ||
696 | is_pmu_based = 1; | ||
697 | } | ||
698 | /* Else check for TiPb 400 & 500 */ | ||
699 | else if (machine_is_compatible("PowerBook3,2")) { | ||
700 | /* We only know about the 400 MHz and the 500Mhz model | ||
701 | * they both have 300 MHz as low frequency | ||
702 | */ | ||
703 | if (cur_freq < 350000 || cur_freq > 550000) | ||
704 | goto out; | ||
705 | hi_freq = cur_freq; | ||
706 | low_freq = 300000; | ||
707 | set_speed_proc = pmu_set_cpu_speed; | ||
708 | is_pmu_based = 1; | ||
709 | } | ||
710 | /* Else check for 750FX */ | ||
711 | else if (PVR_VER(mfspr(SPRN_PVR)) == 0x7000) | ||
712 | pmac_cpufreq_init_750FX(cpunode); | ||
713 | out: | ||
714 | if (set_speed_proc == NULL) | ||
715 | return -ENODEV; | ||
716 | |||
717 | pmac_cpu_freqs[CPUFREQ_LOW].frequency = low_freq; | ||
718 | pmac_cpu_freqs[CPUFREQ_HIGH].frequency = hi_freq; | ||
719 | |||
720 | printk(KERN_INFO "Registering PowerMac CPU frequency driver\n"); | ||
721 | printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n", | ||
722 | low_freq/1000, hi_freq/1000, cur_freq/1000); | ||
723 | |||
724 | return cpufreq_register_driver(&pmac_cpufreq_driver); | ||
725 | } | ||
726 | |||
727 | module_init(pmac_cpufreq_setup); | ||
728 | |||
diff --git a/arch/powerpc/platforms/powermac/pmac_feature.c b/arch/powerpc/platforms/powermac/pmac_feature.c new file mode 100644 index 000000000000..2cba670c71b7 --- /dev/null +++ b/arch/powerpc/platforms/powermac/pmac_feature.c | |||
@@ -0,0 +1,3062 @@ | |||
1 | /* | ||
2 | * arch/ppc/platforms/pmac_feature.c | ||
3 | * | ||
4 | * Copyright (C) 1996-2001 Paul Mackerras (paulus@cs.anu.edu.au) | ||
5 | * Ben. Herrenschmidt (benh@kernel.crashing.org) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | * | ||
12 | * TODO: | ||
13 | * | ||
14 | * - Replace mdelay with some schedule loop if possible | ||
15 | * - Shorten some obfuscated delays on some routines (like modem | ||
16 | * power) | ||
17 | * - Refcount some clocks (see darwin) | ||
18 | * - Split split split... | ||
19 | * | ||
20 | */ | ||
21 | #include <linux/config.h> | ||
22 | #include <linux/types.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/delay.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/sched.h> | ||
27 | #include <linux/spinlock.h> | ||
28 | #include <linux/adb.h> | ||
29 | #include <linux/pmu.h> | ||
30 | #include <linux/ioport.h> | ||
31 | #include <linux/pci.h> | ||
32 | #include <asm/sections.h> | ||
33 | #include <asm/errno.h> | ||
34 | #include <asm/ohare.h> | ||
35 | #include <asm/heathrow.h> | ||
36 | #include <asm/keylargo.h> | ||
37 | #include <asm/uninorth.h> | ||
38 | #include <asm/io.h> | ||
39 | #include <asm/prom.h> | ||
40 | #include <asm/machdep.h> | ||
41 | #include <asm/pmac_feature.h> | ||
42 | #include <asm/dbdma.h> | ||
43 | #include <asm/pci-bridge.h> | ||
44 | #include <asm/pmac_low_i2c.h> | ||
45 | |||
46 | #undef DEBUG_FEATURE | ||
47 | |||
48 | #ifdef DEBUG_FEATURE | ||
49 | #define DBG(fmt...) printk(KERN_DEBUG fmt) | ||
50 | #else | ||
51 | #define DBG(fmt...) | ||
52 | #endif | ||
53 | |||
54 | #ifdef CONFIG_6xx | ||
55 | extern int powersave_lowspeed; | ||
56 | #endif | ||
57 | |||
58 | extern int powersave_nap; | ||
59 | extern struct device_node *k2_skiplist[2]; | ||
60 | |||
61 | |||
62 | /* | ||
63 | * We use a single global lock to protect accesses. Each driver has | ||
64 | * to take care of its own locking | ||
65 | */ | ||
66 | static DEFINE_SPINLOCK(feature_lock); | ||
67 | |||
68 | #define LOCK(flags) spin_lock_irqsave(&feature_lock, flags); | ||
69 | #define UNLOCK(flags) spin_unlock_irqrestore(&feature_lock, flags); | ||
70 | |||
71 | |||
72 | /* | ||
73 | * Instance of some macio stuffs | ||
74 | */ | ||
75 | struct macio_chip macio_chips[MAX_MACIO_CHIPS]; | ||
76 | |||
77 | struct macio_chip *macio_find(struct device_node *child, int type) | ||
78 | { | ||
79 | while(child) { | ||
80 | int i; | ||
81 | |||
82 | for (i=0; i < MAX_MACIO_CHIPS && macio_chips[i].of_node; i++) | ||
83 | if (child == macio_chips[i].of_node && | ||
84 | (!type || macio_chips[i].type == type)) | ||
85 | return &macio_chips[i]; | ||
86 | child = child->parent; | ||
87 | } | ||
88 | return NULL; | ||
89 | } | ||
90 | EXPORT_SYMBOL_GPL(macio_find); | ||
91 | |||
92 | static const char *macio_names[] = | ||
93 | { | ||
94 | "Unknown", | ||
95 | "Grand Central", | ||
96 | "OHare", | ||
97 | "OHareII", | ||
98 | "Heathrow", | ||
99 | "Gatwick", | ||
100 | "Paddington", | ||
101 | "Keylargo", | ||
102 | "Pangea", | ||
103 | "Intrepid", | ||
104 | "K2" | ||
105 | }; | ||
106 | |||
107 | |||
108 | |||
109 | /* | ||
110 | * Uninorth reg. access. Note that Uni-N regs are big endian | ||
111 | */ | ||
112 | |||
113 | #define UN_REG(r) (uninorth_base + ((r) >> 2)) | ||
114 | #define UN_IN(r) (in_be32(UN_REG(r))) | ||
115 | #define UN_OUT(r,v) (out_be32(UN_REG(r), (v))) | ||
116 | #define UN_BIS(r,v) (UN_OUT((r), UN_IN(r) | (v))) | ||
117 | #define UN_BIC(r,v) (UN_OUT((r), UN_IN(r) & ~(v))) | ||
118 | |||
119 | static struct device_node *uninorth_node; | ||
120 | static u32 __iomem *uninorth_base; | ||
121 | static u32 uninorth_rev; | ||
122 | static int uninorth_u3; | ||
123 | static void __iomem *u3_ht; | ||
124 | |||
125 | /* | ||
126 | * For each motherboard family, we have a table of functions pointers | ||
127 | * that handle the various features. | ||
128 | */ | ||
129 | |||
130 | typedef long (*feature_call)(struct device_node *node, long param, long value); | ||
131 | |||
132 | struct feature_table_entry { | ||
133 | unsigned int selector; | ||
134 | feature_call function; | ||
135 | }; | ||
136 | |||
137 | struct pmac_mb_def | ||
138 | { | ||
139 | const char* model_string; | ||
140 | const char* model_name; | ||
141 | int model_id; | ||
142 | struct feature_table_entry* features; | ||
143 | unsigned long board_flags; | ||
144 | }; | ||
145 | static struct pmac_mb_def pmac_mb; | ||
146 | |||
147 | /* | ||
148 | * Here are the chip specific feature functions | ||
149 | */ | ||
150 | |||
151 | static inline int simple_feature_tweak(struct device_node *node, int type, | ||
152 | int reg, u32 mask, int value) | ||
153 | { | ||
154 | struct macio_chip* macio; | ||
155 | unsigned long flags; | ||
156 | |||
157 | macio = macio_find(node, type); | ||
158 | if (!macio) | ||
159 | return -ENODEV; | ||
160 | LOCK(flags); | ||
161 | if (value) | ||
162 | MACIO_BIS(reg, mask); | ||
163 | else | ||
164 | MACIO_BIC(reg, mask); | ||
165 | (void)MACIO_IN32(reg); | ||
166 | UNLOCK(flags); | ||
167 | |||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | #ifndef CONFIG_POWER4 | ||
172 | |||
173 | static long ohare_htw_scc_enable(struct device_node *node, long param, | ||
174 | long value) | ||
175 | { | ||
176 | struct macio_chip* macio; | ||
177 | unsigned long chan_mask; | ||
178 | unsigned long fcr; | ||
179 | unsigned long flags; | ||
180 | int htw, trans; | ||
181 | unsigned long rmask; | ||
182 | |||
183 | macio = macio_find(node, 0); | ||
184 | if (!macio) | ||
185 | return -ENODEV; | ||
186 | if (!strcmp(node->name, "ch-a")) | ||
187 | chan_mask = MACIO_FLAG_SCCA_ON; | ||
188 | else if (!strcmp(node->name, "ch-b")) | ||
189 | chan_mask = MACIO_FLAG_SCCB_ON; | ||
190 | else | ||
191 | return -ENODEV; | ||
192 | |||
193 | htw = (macio->type == macio_heathrow || macio->type == macio_paddington | ||
194 | || macio->type == macio_gatwick); | ||
195 | /* On these machines, the HRW_SCC_TRANS_EN_N bit mustn't be touched */ | ||
196 | trans = (pmac_mb.model_id != PMAC_TYPE_YOSEMITE && | ||
197 | pmac_mb.model_id != PMAC_TYPE_YIKES); | ||
198 | if (value) { | ||
199 | #ifdef CONFIG_ADB_PMU | ||
200 | if ((param & 0xfff) == PMAC_SCC_IRDA) | ||
201 | pmu_enable_irled(1); | ||
202 | #endif /* CONFIG_ADB_PMU */ | ||
203 | LOCK(flags); | ||
204 | fcr = MACIO_IN32(OHARE_FCR); | ||
205 | /* Check if scc cell need enabling */ | ||
206 | if (!(fcr & OH_SCC_ENABLE)) { | ||
207 | fcr |= OH_SCC_ENABLE; | ||
208 | if (htw) { | ||
209 | /* Side effect: this will also power up the | ||
210 | * modem, but it's too messy to figure out on which | ||
211 | * ports this controls the tranceiver and on which | ||
212 | * it controls the modem | ||
213 | */ | ||
214 | if (trans) | ||
215 | fcr &= ~HRW_SCC_TRANS_EN_N; | ||
216 | MACIO_OUT32(OHARE_FCR, fcr); | ||
217 | fcr |= (rmask = HRW_RESET_SCC); | ||
218 | MACIO_OUT32(OHARE_FCR, fcr); | ||
219 | } else { | ||
220 | fcr |= (rmask = OH_SCC_RESET); | ||
221 | MACIO_OUT32(OHARE_FCR, fcr); | ||
222 | } | ||
223 | UNLOCK(flags); | ||
224 | (void)MACIO_IN32(OHARE_FCR); | ||
225 | mdelay(15); | ||
226 | LOCK(flags); | ||
227 | fcr &= ~rmask; | ||
228 | MACIO_OUT32(OHARE_FCR, fcr); | ||
229 | } | ||
230 | if (chan_mask & MACIO_FLAG_SCCA_ON) | ||
231 | fcr |= OH_SCCA_IO; | ||
232 | if (chan_mask & MACIO_FLAG_SCCB_ON) | ||
233 | fcr |= OH_SCCB_IO; | ||
234 | MACIO_OUT32(OHARE_FCR, fcr); | ||
235 | macio->flags |= chan_mask; | ||
236 | UNLOCK(flags); | ||
237 | if (param & PMAC_SCC_FLAG_XMON) | ||
238 | macio->flags |= MACIO_FLAG_SCC_LOCKED; | ||
239 | } else { | ||
240 | if (macio->flags & MACIO_FLAG_SCC_LOCKED) | ||
241 | return -EPERM; | ||
242 | LOCK(flags); | ||
243 | fcr = MACIO_IN32(OHARE_FCR); | ||
244 | if (chan_mask & MACIO_FLAG_SCCA_ON) | ||
245 | fcr &= ~OH_SCCA_IO; | ||
246 | if (chan_mask & MACIO_FLAG_SCCB_ON) | ||
247 | fcr &= ~OH_SCCB_IO; | ||
248 | MACIO_OUT32(OHARE_FCR, fcr); | ||
249 | if ((fcr & (OH_SCCA_IO | OH_SCCB_IO)) == 0) { | ||
250 | fcr &= ~OH_SCC_ENABLE; | ||
251 | if (htw && trans) | ||
252 | fcr |= HRW_SCC_TRANS_EN_N; | ||
253 | MACIO_OUT32(OHARE_FCR, fcr); | ||
254 | } | ||
255 | macio->flags &= ~(chan_mask); | ||
256 | UNLOCK(flags); | ||
257 | mdelay(10); | ||
258 | #ifdef CONFIG_ADB_PMU | ||
259 | if ((param & 0xfff) == PMAC_SCC_IRDA) | ||
260 | pmu_enable_irled(0); | ||
261 | #endif /* CONFIG_ADB_PMU */ | ||
262 | } | ||
263 | return 0; | ||
264 | } | ||
265 | |||
266 | static long ohare_floppy_enable(struct device_node *node, long param, | ||
267 | long value) | ||
268 | { | ||
269 | return simple_feature_tweak(node, macio_ohare, | ||
270 | OHARE_FCR, OH_FLOPPY_ENABLE, value); | ||
271 | } | ||
272 | |||
273 | static long ohare_mesh_enable(struct device_node *node, long param, long value) | ||
274 | { | ||
275 | return simple_feature_tweak(node, macio_ohare, | ||
276 | OHARE_FCR, OH_MESH_ENABLE, value); | ||
277 | } | ||
278 | |||
279 | static long ohare_ide_enable(struct device_node *node, long param, long value) | ||
280 | { | ||
281 | switch(param) { | ||
282 | case 0: | ||
283 | /* For some reason, setting the bit in set_initial_features() | ||
284 | * doesn't stick. I'm still investigating... --BenH. | ||
285 | */ | ||
286 | if (value) | ||
287 | simple_feature_tweak(node, macio_ohare, | ||
288 | OHARE_FCR, OH_IOBUS_ENABLE, 1); | ||
289 | return simple_feature_tweak(node, macio_ohare, | ||
290 | OHARE_FCR, OH_IDE0_ENABLE, value); | ||
291 | case 1: | ||
292 | return simple_feature_tweak(node, macio_ohare, | ||
293 | OHARE_FCR, OH_BAY_IDE_ENABLE, value); | ||
294 | default: | ||
295 | return -ENODEV; | ||
296 | } | ||
297 | } | ||
298 | |||
299 | static long ohare_ide_reset(struct device_node *node, long param, long value) | ||
300 | { | ||
301 | switch(param) { | ||
302 | case 0: | ||
303 | return simple_feature_tweak(node, macio_ohare, | ||
304 | OHARE_FCR, OH_IDE0_RESET_N, !value); | ||
305 | case 1: | ||
306 | return simple_feature_tweak(node, macio_ohare, | ||
307 | OHARE_FCR, OH_IDE1_RESET_N, !value); | ||
308 | default: | ||
309 | return -ENODEV; | ||
310 | } | ||
311 | } | ||
312 | |||
313 | static long ohare_sleep_state(struct device_node *node, long param, long value) | ||
314 | { | ||
315 | struct macio_chip* macio = &macio_chips[0]; | ||
316 | |||
317 | if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0) | ||
318 | return -EPERM; | ||
319 | if (value == 1) { | ||
320 | MACIO_BIC(OHARE_FCR, OH_IOBUS_ENABLE); | ||
321 | } else if (value == 0) { | ||
322 | MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE); | ||
323 | } | ||
324 | |||
325 | return 0; | ||
326 | } | ||
327 | |||
328 | static long heathrow_modem_enable(struct device_node *node, long param, | ||
329 | long value) | ||
330 | { | ||
331 | struct macio_chip* macio; | ||
332 | u8 gpio; | ||
333 | unsigned long flags; | ||
334 | |||
335 | macio = macio_find(node, macio_unknown); | ||
336 | if (!macio) | ||
337 | return -ENODEV; | ||
338 | gpio = MACIO_IN8(HRW_GPIO_MODEM_RESET) & ~1; | ||
339 | if (!value) { | ||
340 | LOCK(flags); | ||
341 | MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio); | ||
342 | UNLOCK(flags); | ||
343 | (void)MACIO_IN8(HRW_GPIO_MODEM_RESET); | ||
344 | mdelay(250); | ||
345 | } | ||
346 | if (pmac_mb.model_id != PMAC_TYPE_YOSEMITE && | ||
347 | pmac_mb.model_id != PMAC_TYPE_YIKES) { | ||
348 | LOCK(flags); | ||
349 | if (value) | ||
350 | MACIO_BIC(HEATHROW_FCR, HRW_SCC_TRANS_EN_N); | ||
351 | else | ||
352 | MACIO_BIS(HEATHROW_FCR, HRW_SCC_TRANS_EN_N); | ||
353 | UNLOCK(flags); | ||
354 | (void)MACIO_IN32(HEATHROW_FCR); | ||
355 | mdelay(250); | ||
356 | } | ||
357 | if (value) { | ||
358 | LOCK(flags); | ||
359 | MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio | 1); | ||
360 | (void)MACIO_IN8(HRW_GPIO_MODEM_RESET); | ||
361 | UNLOCK(flags); mdelay(250); LOCK(flags); | ||
362 | MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio); | ||
363 | (void)MACIO_IN8(HRW_GPIO_MODEM_RESET); | ||
364 | UNLOCK(flags); mdelay(250); LOCK(flags); | ||
365 | MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio | 1); | ||
366 | (void)MACIO_IN8(HRW_GPIO_MODEM_RESET); | ||
367 | UNLOCK(flags); mdelay(250); | ||
368 | } | ||
369 | return 0; | ||
370 | } | ||
371 | |||
372 | static long heathrow_floppy_enable(struct device_node *node, long param, | ||
373 | long value) | ||
374 | { | ||
375 | return simple_feature_tweak(node, macio_unknown, | ||
376 | HEATHROW_FCR, | ||
377 | HRW_SWIM_ENABLE|HRW_BAY_FLOPPY_ENABLE, | ||
378 | value); | ||
379 | } | ||
380 | |||
381 | static long heathrow_mesh_enable(struct device_node *node, long param, | ||
382 | long value) | ||
383 | { | ||
384 | struct macio_chip* macio; | ||
385 | unsigned long flags; | ||
386 | |||
387 | macio = macio_find(node, macio_unknown); | ||
388 | if (!macio) | ||
389 | return -ENODEV; | ||
390 | LOCK(flags); | ||
391 | /* Set clear mesh cell enable */ | ||
392 | if (value) | ||
393 | MACIO_BIS(HEATHROW_FCR, HRW_MESH_ENABLE); | ||
394 | else | ||
395 | MACIO_BIC(HEATHROW_FCR, HRW_MESH_ENABLE); | ||
396 | (void)MACIO_IN32(HEATHROW_FCR); | ||
397 | udelay(10); | ||
398 | /* Set/Clear termination power */ | ||
399 | if (value) | ||
400 | MACIO_BIC(HEATHROW_MBCR, 0x04000000); | ||
401 | else | ||
402 | MACIO_BIS(HEATHROW_MBCR, 0x04000000); | ||
403 | (void)MACIO_IN32(HEATHROW_MBCR); | ||
404 | udelay(10); | ||
405 | UNLOCK(flags); | ||
406 | |||
407 | return 0; | ||
408 | } | ||
409 | |||
410 | static long heathrow_ide_enable(struct device_node *node, long param, | ||
411 | long value) | ||
412 | { | ||
413 | switch(param) { | ||
414 | case 0: | ||
415 | return simple_feature_tweak(node, macio_unknown, | ||
416 | HEATHROW_FCR, HRW_IDE0_ENABLE, value); | ||
417 | case 1: | ||
418 | return simple_feature_tweak(node, macio_unknown, | ||
419 | HEATHROW_FCR, HRW_BAY_IDE_ENABLE, value); | ||
420 | default: | ||
421 | return -ENODEV; | ||
422 | } | ||
423 | } | ||
424 | |||
425 | static long heathrow_ide_reset(struct device_node *node, long param, | ||
426 | long value) | ||
427 | { | ||
428 | switch(param) { | ||
429 | case 0: | ||
430 | return simple_feature_tweak(node, macio_unknown, | ||
431 | HEATHROW_FCR, HRW_IDE0_RESET_N, !value); | ||
432 | case 1: | ||
433 | return simple_feature_tweak(node, macio_unknown, | ||
434 | HEATHROW_FCR, HRW_IDE1_RESET_N, !value); | ||
435 | default: | ||
436 | return -ENODEV; | ||
437 | } | ||
438 | } | ||
439 | |||
440 | static long heathrow_bmac_enable(struct device_node *node, long param, | ||
441 | long value) | ||
442 | { | ||
443 | struct macio_chip* macio; | ||
444 | unsigned long flags; | ||
445 | |||
446 | macio = macio_find(node, 0); | ||
447 | if (!macio) | ||
448 | return -ENODEV; | ||
449 | if (value) { | ||
450 | LOCK(flags); | ||
451 | MACIO_BIS(HEATHROW_FCR, HRW_BMAC_IO_ENABLE); | ||
452 | MACIO_BIS(HEATHROW_FCR, HRW_BMAC_RESET); | ||
453 | UNLOCK(flags); | ||
454 | (void)MACIO_IN32(HEATHROW_FCR); | ||
455 | mdelay(10); | ||
456 | LOCK(flags); | ||
457 | MACIO_BIC(HEATHROW_FCR, HRW_BMAC_RESET); | ||
458 | UNLOCK(flags); | ||
459 | (void)MACIO_IN32(HEATHROW_FCR); | ||
460 | mdelay(10); | ||
461 | } else { | ||
462 | LOCK(flags); | ||
463 | MACIO_BIC(HEATHROW_FCR, HRW_BMAC_IO_ENABLE); | ||
464 | UNLOCK(flags); | ||
465 | } | ||
466 | return 0; | ||
467 | } | ||
468 | |||
469 | static long heathrow_sound_enable(struct device_node *node, long param, | ||
470 | long value) | ||
471 | { | ||
472 | struct macio_chip* macio; | ||
473 | unsigned long flags; | ||
474 | |||
475 | /* B&W G3 and Yikes don't support that properly (the | ||
476 | * sound appear to never come back after beeing shut down). | ||
477 | */ | ||
478 | if (pmac_mb.model_id == PMAC_TYPE_YOSEMITE || | ||
479 | pmac_mb.model_id == PMAC_TYPE_YIKES) | ||
480 | return 0; | ||
481 | |||
482 | macio = macio_find(node, 0); | ||
483 | if (!macio) | ||
484 | return -ENODEV; | ||
485 | if (value) { | ||
486 | LOCK(flags); | ||
487 | MACIO_BIS(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE); | ||
488 | MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N); | ||
489 | UNLOCK(flags); | ||
490 | (void)MACIO_IN32(HEATHROW_FCR); | ||
491 | } else { | ||
492 | LOCK(flags); | ||
493 | MACIO_BIS(HEATHROW_FCR, HRW_SOUND_POWER_N); | ||
494 | MACIO_BIC(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE); | ||
495 | UNLOCK(flags); | ||
496 | } | ||
497 | return 0; | ||
498 | } | ||
499 | |||
500 | static u32 save_fcr[6]; | ||
501 | static u32 save_mbcr; | ||
502 | static u32 save_gpio_levels[2]; | ||
503 | static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT]; | ||
504 | static u8 save_gpio_normal[KEYLARGO_GPIO_CNT]; | ||
505 | static u32 save_unin_clock_ctl; | ||
506 | static struct dbdma_regs save_dbdma[13]; | ||
507 | static struct dbdma_regs save_alt_dbdma[13]; | ||
508 | |||
509 | static void dbdma_save(struct macio_chip *macio, struct dbdma_regs *save) | ||
510 | { | ||
511 | int i; | ||
512 | |||
513 | /* Save state & config of DBDMA channels */ | ||
514 | for (i = 0; i < 13; i++) { | ||
515 | volatile struct dbdma_regs __iomem * chan = (void __iomem *) | ||
516 | (macio->base + ((0x8000+i*0x100)>>2)); | ||
517 | save[i].cmdptr_hi = in_le32(&chan->cmdptr_hi); | ||
518 | save[i].cmdptr = in_le32(&chan->cmdptr); | ||
519 | save[i].intr_sel = in_le32(&chan->intr_sel); | ||
520 | save[i].br_sel = in_le32(&chan->br_sel); | ||
521 | save[i].wait_sel = in_le32(&chan->wait_sel); | ||
522 | } | ||
523 | } | ||
524 | |||
525 | static void dbdma_restore(struct macio_chip *macio, struct dbdma_regs *save) | ||
526 | { | ||
527 | int i; | ||
528 | |||
529 | /* Save state & config of DBDMA channels */ | ||
530 | for (i = 0; i < 13; i++) { | ||
531 | volatile struct dbdma_regs __iomem * chan = (void __iomem *) | ||
532 | (macio->base + ((0x8000+i*0x100)>>2)); | ||
533 | out_le32(&chan->control, (ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN)<<16); | ||
534 | while (in_le32(&chan->status) & ACTIVE) | ||
535 | mb(); | ||
536 | out_le32(&chan->cmdptr_hi, save[i].cmdptr_hi); | ||
537 | out_le32(&chan->cmdptr, save[i].cmdptr); | ||
538 | out_le32(&chan->intr_sel, save[i].intr_sel); | ||
539 | out_le32(&chan->br_sel, save[i].br_sel); | ||
540 | out_le32(&chan->wait_sel, save[i].wait_sel); | ||
541 | } | ||
542 | } | ||
543 | |||
544 | static void heathrow_sleep(struct macio_chip *macio, int secondary) | ||
545 | { | ||
546 | if (secondary) { | ||
547 | dbdma_save(macio, save_alt_dbdma); | ||
548 | save_fcr[2] = MACIO_IN32(0x38); | ||
549 | save_fcr[3] = MACIO_IN32(0x3c); | ||
550 | } else { | ||
551 | dbdma_save(macio, save_dbdma); | ||
552 | save_fcr[0] = MACIO_IN32(0x38); | ||
553 | save_fcr[1] = MACIO_IN32(0x3c); | ||
554 | save_mbcr = MACIO_IN32(0x34); | ||
555 | /* Make sure sound is shut down */ | ||
556 | MACIO_BIS(HEATHROW_FCR, HRW_SOUND_POWER_N); | ||
557 | MACIO_BIC(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE); | ||
558 | /* This seems to be necessary as well or the fan | ||
559 | * keeps coming up and battery drains fast */ | ||
560 | MACIO_BIC(HEATHROW_FCR, HRW_IOBUS_ENABLE); | ||
561 | MACIO_BIC(HEATHROW_FCR, HRW_IDE0_RESET_N); | ||
562 | /* Make sure eth is down even if module or sleep | ||
563 | * won't work properly */ | ||
564 | MACIO_BIC(HEATHROW_FCR, HRW_BMAC_IO_ENABLE | HRW_BMAC_RESET); | ||
565 | } | ||
566 | /* Make sure modem is shut down */ | ||
567 | MACIO_OUT8(HRW_GPIO_MODEM_RESET, | ||
568 | MACIO_IN8(HRW_GPIO_MODEM_RESET) & ~1); | ||
569 | MACIO_BIS(HEATHROW_FCR, HRW_SCC_TRANS_EN_N); | ||
570 | MACIO_BIC(HEATHROW_FCR, OH_SCCA_IO|OH_SCCB_IO|HRW_SCC_ENABLE); | ||
571 | |||
572 | /* Let things settle */ | ||
573 | (void)MACIO_IN32(HEATHROW_FCR); | ||
574 | } | ||
575 | |||
576 | static void heathrow_wakeup(struct macio_chip *macio, int secondary) | ||
577 | { | ||
578 | if (secondary) { | ||
579 | MACIO_OUT32(0x38, save_fcr[2]); | ||
580 | (void)MACIO_IN32(0x38); | ||
581 | mdelay(1); | ||
582 | MACIO_OUT32(0x3c, save_fcr[3]); | ||
583 | (void)MACIO_IN32(0x38); | ||
584 | mdelay(10); | ||
585 | dbdma_restore(macio, save_alt_dbdma); | ||
586 | } else { | ||
587 | MACIO_OUT32(0x38, save_fcr[0] | HRW_IOBUS_ENABLE); | ||
588 | (void)MACIO_IN32(0x38); | ||
589 | mdelay(1); | ||
590 | MACIO_OUT32(0x3c, save_fcr[1]); | ||
591 | (void)MACIO_IN32(0x38); | ||
592 | mdelay(1); | ||
593 | MACIO_OUT32(0x34, save_mbcr); | ||
594 | (void)MACIO_IN32(0x38); | ||
595 | mdelay(10); | ||
596 | dbdma_restore(macio, save_dbdma); | ||
597 | } | ||
598 | } | ||
599 | |||
600 | static long heathrow_sleep_state(struct device_node *node, long param, | ||
601 | long value) | ||
602 | { | ||
603 | if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0) | ||
604 | return -EPERM; | ||
605 | if (value == 1) { | ||
606 | if (macio_chips[1].type == macio_gatwick) | ||
607 | heathrow_sleep(&macio_chips[0], 1); | ||
608 | heathrow_sleep(&macio_chips[0], 0); | ||
609 | } else if (value == 0) { | ||
610 | heathrow_wakeup(&macio_chips[0], 0); | ||
611 | if (macio_chips[1].type == macio_gatwick) | ||
612 | heathrow_wakeup(&macio_chips[0], 1); | ||
613 | } | ||
614 | return 0; | ||
615 | } | ||
616 | |||
617 | static long core99_scc_enable(struct device_node *node, long param, long value) | ||
618 | { | ||
619 | struct macio_chip* macio; | ||
620 | unsigned long flags; | ||
621 | unsigned long chan_mask; | ||
622 | u32 fcr; | ||
623 | |||
624 | macio = macio_find(node, 0); | ||
625 | if (!macio) | ||
626 | return -ENODEV; | ||
627 | if (!strcmp(node->name, "ch-a")) | ||
628 | chan_mask = MACIO_FLAG_SCCA_ON; | ||
629 | else if (!strcmp(node->name, "ch-b")) | ||
630 | chan_mask = MACIO_FLAG_SCCB_ON; | ||
631 | else | ||
632 | return -ENODEV; | ||
633 | |||
634 | if (value) { | ||
635 | int need_reset_scc = 0; | ||
636 | int need_reset_irda = 0; | ||
637 | |||
638 | LOCK(flags); | ||
639 | fcr = MACIO_IN32(KEYLARGO_FCR0); | ||
640 | /* Check if scc cell need enabling */ | ||
641 | if (!(fcr & KL0_SCC_CELL_ENABLE)) { | ||
642 | fcr |= KL0_SCC_CELL_ENABLE; | ||
643 | need_reset_scc = 1; | ||
644 | } | ||
645 | if (chan_mask & MACIO_FLAG_SCCA_ON) { | ||
646 | fcr |= KL0_SCCA_ENABLE; | ||
647 | /* Don't enable line drivers for I2S modem */ | ||
648 | if ((param & 0xfff) == PMAC_SCC_I2S1) | ||
649 | fcr &= ~KL0_SCC_A_INTF_ENABLE; | ||
650 | else | ||
651 | fcr |= KL0_SCC_A_INTF_ENABLE; | ||
652 | } | ||
653 | if (chan_mask & MACIO_FLAG_SCCB_ON) { | ||
654 | fcr |= KL0_SCCB_ENABLE; | ||
655 | /* Perform irda specific inits */ | ||
656 | if ((param & 0xfff) == PMAC_SCC_IRDA) { | ||
657 | fcr &= ~KL0_SCC_B_INTF_ENABLE; | ||
658 | fcr |= KL0_IRDA_ENABLE; | ||
659 | fcr |= KL0_IRDA_CLK32_ENABLE | KL0_IRDA_CLK19_ENABLE; | ||
660 | fcr |= KL0_IRDA_SOURCE1_SEL; | ||
661 | fcr &= ~(KL0_IRDA_FAST_CONNECT|KL0_IRDA_DEFAULT1|KL0_IRDA_DEFAULT0); | ||
662 | fcr &= ~(KL0_IRDA_SOURCE2_SEL|KL0_IRDA_HIGH_BAND); | ||
663 | need_reset_irda = 1; | ||
664 | } else | ||
665 | fcr |= KL0_SCC_B_INTF_ENABLE; | ||
666 | } | ||
667 | MACIO_OUT32(KEYLARGO_FCR0, fcr); | ||
668 | macio->flags |= chan_mask; | ||
669 | if (need_reset_scc) { | ||
670 | MACIO_BIS(KEYLARGO_FCR0, KL0_SCC_RESET); | ||
671 | (void)MACIO_IN32(KEYLARGO_FCR0); | ||
672 | UNLOCK(flags); | ||
673 | mdelay(15); | ||
674 | LOCK(flags); | ||
675 | MACIO_BIC(KEYLARGO_FCR0, KL0_SCC_RESET); | ||
676 | } | ||
677 | if (need_reset_irda) { | ||
678 | MACIO_BIS(KEYLARGO_FCR0, KL0_IRDA_RESET); | ||
679 | (void)MACIO_IN32(KEYLARGO_FCR0); | ||
680 | UNLOCK(flags); | ||
681 | mdelay(15); | ||
682 | LOCK(flags); | ||
683 | MACIO_BIC(KEYLARGO_FCR0, KL0_IRDA_RESET); | ||
684 | } | ||
685 | UNLOCK(flags); | ||
686 | if (param & PMAC_SCC_FLAG_XMON) | ||
687 | macio->flags |= MACIO_FLAG_SCC_LOCKED; | ||
688 | } else { | ||
689 | if (macio->flags & MACIO_FLAG_SCC_LOCKED) | ||
690 | return -EPERM; | ||
691 | LOCK(flags); | ||
692 | fcr = MACIO_IN32(KEYLARGO_FCR0); | ||
693 | if (chan_mask & MACIO_FLAG_SCCA_ON) | ||
694 | fcr &= ~KL0_SCCA_ENABLE; | ||
695 | if (chan_mask & MACIO_FLAG_SCCB_ON) { | ||
696 | fcr &= ~KL0_SCCB_ENABLE; | ||
697 | /* Perform irda specific clears */ | ||
698 | if ((param & 0xfff) == PMAC_SCC_IRDA) { | ||
699 | fcr &= ~KL0_IRDA_ENABLE; | ||
700 | fcr &= ~(KL0_IRDA_CLK32_ENABLE | KL0_IRDA_CLK19_ENABLE); | ||
701 | fcr &= ~(KL0_IRDA_FAST_CONNECT|KL0_IRDA_DEFAULT1|KL0_IRDA_DEFAULT0); | ||
702 | fcr &= ~(KL0_IRDA_SOURCE1_SEL|KL0_IRDA_SOURCE2_SEL|KL0_IRDA_HIGH_BAND); | ||
703 | } | ||
704 | } | ||
705 | MACIO_OUT32(KEYLARGO_FCR0, fcr); | ||
706 | if ((fcr & (KL0_SCCA_ENABLE | KL0_SCCB_ENABLE)) == 0) { | ||
707 | fcr &= ~KL0_SCC_CELL_ENABLE; | ||
708 | MACIO_OUT32(KEYLARGO_FCR0, fcr); | ||
709 | } | ||
710 | macio->flags &= ~(chan_mask); | ||
711 | UNLOCK(flags); | ||
712 | mdelay(10); | ||
713 | } | ||
714 | return 0; | ||
715 | } | ||
716 | |||
717 | static long | ||
718 | core99_modem_enable(struct device_node *node, long param, long value) | ||
719 | { | ||
720 | struct macio_chip* macio; | ||
721 | u8 gpio; | ||
722 | unsigned long flags; | ||
723 | |||
724 | /* Hack for internal USB modem */ | ||
725 | if (node == NULL) { | ||
726 | if (macio_chips[0].type != macio_keylargo) | ||
727 | return -ENODEV; | ||
728 | node = macio_chips[0].of_node; | ||
729 | } | ||
730 | macio = macio_find(node, 0); | ||
731 | if (!macio) | ||
732 | return -ENODEV; | ||
733 | gpio = MACIO_IN8(KL_GPIO_MODEM_RESET); | ||
734 | gpio |= KEYLARGO_GPIO_OUTPUT_ENABLE; | ||
735 | gpio &= ~KEYLARGO_GPIO_OUTOUT_DATA; | ||
736 | |||
737 | if (!value) { | ||
738 | LOCK(flags); | ||
739 | MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio); | ||
740 | UNLOCK(flags); | ||
741 | (void)MACIO_IN8(KL_GPIO_MODEM_RESET); | ||
742 | mdelay(250); | ||
743 | } | ||
744 | LOCK(flags); | ||
745 | if (value) { | ||
746 | MACIO_BIC(KEYLARGO_FCR2, KL2_ALT_DATA_OUT); | ||
747 | UNLOCK(flags); | ||
748 | (void)MACIO_IN32(KEYLARGO_FCR2); | ||
749 | mdelay(250); | ||
750 | } else { | ||
751 | MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT); | ||
752 | UNLOCK(flags); | ||
753 | } | ||
754 | if (value) { | ||
755 | LOCK(flags); | ||
756 | MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA); | ||
757 | (void)MACIO_IN8(KL_GPIO_MODEM_RESET); | ||
758 | UNLOCK(flags); mdelay(250); LOCK(flags); | ||
759 | MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio); | ||
760 | (void)MACIO_IN8(KL_GPIO_MODEM_RESET); | ||
761 | UNLOCK(flags); mdelay(250); LOCK(flags); | ||
762 | MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA); | ||
763 | (void)MACIO_IN8(KL_GPIO_MODEM_RESET); | ||
764 | UNLOCK(flags); mdelay(250); | ||
765 | } | ||
766 | return 0; | ||
767 | } | ||
768 | |||
769 | static long | ||
770 | pangea_modem_enable(struct device_node *node, long param, long value) | ||
771 | { | ||
772 | struct macio_chip* macio; | ||
773 | u8 gpio; | ||
774 | unsigned long flags; | ||
775 | |||
776 | /* Hack for internal USB modem */ | ||
777 | if (node == NULL) { | ||
778 | if (macio_chips[0].type != macio_pangea && | ||
779 | macio_chips[0].type != macio_intrepid) | ||
780 | return -ENODEV; | ||
781 | node = macio_chips[0].of_node; | ||
782 | } | ||
783 | macio = macio_find(node, 0); | ||
784 | if (!macio) | ||
785 | return -ENODEV; | ||
786 | gpio = MACIO_IN8(KL_GPIO_MODEM_RESET); | ||
787 | gpio |= KEYLARGO_GPIO_OUTPUT_ENABLE; | ||
788 | gpio &= ~KEYLARGO_GPIO_OUTOUT_DATA; | ||
789 | |||
790 | if (!value) { | ||
791 | LOCK(flags); | ||
792 | MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio); | ||
793 | UNLOCK(flags); | ||
794 | (void)MACIO_IN8(KL_GPIO_MODEM_RESET); | ||
795 | mdelay(250); | ||
796 | } | ||
797 | LOCK(flags); | ||
798 | if (value) { | ||
799 | MACIO_OUT8(KL_GPIO_MODEM_POWER, | ||
800 | KEYLARGO_GPIO_OUTPUT_ENABLE); | ||
801 | UNLOCK(flags); | ||
802 | (void)MACIO_IN32(KEYLARGO_FCR2); | ||
803 | mdelay(250); | ||
804 | } else { | ||
805 | MACIO_OUT8(KL_GPIO_MODEM_POWER, | ||
806 | KEYLARGO_GPIO_OUTPUT_ENABLE | KEYLARGO_GPIO_OUTOUT_DATA); | ||
807 | UNLOCK(flags); | ||
808 | } | ||
809 | if (value) { | ||
810 | LOCK(flags); | ||
811 | MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA); | ||
812 | (void)MACIO_IN8(KL_GPIO_MODEM_RESET); | ||
813 | UNLOCK(flags); mdelay(250); LOCK(flags); | ||
814 | MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio); | ||
815 | (void)MACIO_IN8(KL_GPIO_MODEM_RESET); | ||
816 | UNLOCK(flags); mdelay(250); LOCK(flags); | ||
817 | MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA); | ||
818 | (void)MACIO_IN8(KL_GPIO_MODEM_RESET); | ||
819 | UNLOCK(flags); mdelay(250); | ||
820 | } | ||
821 | return 0; | ||
822 | } | ||
823 | |||
824 | static long | ||
825 | core99_ata100_enable(struct device_node *node, long value) | ||
826 | { | ||
827 | unsigned long flags; | ||
828 | struct pci_dev *pdev = NULL; | ||
829 | u8 pbus, pid; | ||
830 | |||
831 | if (uninorth_rev < 0x24) | ||
832 | return -ENODEV; | ||
833 | |||
834 | LOCK(flags); | ||
835 | if (value) | ||
836 | UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_ATA100); | ||
837 | else | ||
838 | UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_ATA100); | ||
839 | (void)UN_IN(UNI_N_CLOCK_CNTL); | ||
840 | UNLOCK(flags); | ||
841 | udelay(20); | ||
842 | |||
843 | if (value) { | ||
844 | if (pci_device_from_OF_node(node, &pbus, &pid) == 0) | ||
845 | pdev = pci_find_slot(pbus, pid); | ||
846 | if (pdev == NULL) | ||
847 | return 0; | ||
848 | pci_enable_device(pdev); | ||
849 | pci_set_master(pdev); | ||
850 | } | ||
851 | return 0; | ||
852 | } | ||
853 | |||
854 | static long | ||
855 | core99_ide_enable(struct device_node *node, long param, long value) | ||
856 | { | ||
857 | /* Bus ID 0 to 2 are KeyLargo based IDE, busID 3 is U2 | ||
858 | * based ata-100 | ||
859 | */ | ||
860 | switch(param) { | ||
861 | case 0: | ||
862 | return simple_feature_tweak(node, macio_unknown, | ||
863 | KEYLARGO_FCR1, KL1_EIDE0_ENABLE, value); | ||
864 | case 1: | ||
865 | return simple_feature_tweak(node, macio_unknown, | ||
866 | KEYLARGO_FCR1, KL1_EIDE1_ENABLE, value); | ||
867 | case 2: | ||
868 | return simple_feature_tweak(node, macio_unknown, | ||
869 | KEYLARGO_FCR1, KL1_UIDE_ENABLE, value); | ||
870 | case 3: | ||
871 | return core99_ata100_enable(node, value); | ||
872 | default: | ||
873 | return -ENODEV; | ||
874 | } | ||
875 | } | ||
876 | |||
877 | static long | ||
878 | core99_ide_reset(struct device_node *node, long param, long value) | ||
879 | { | ||
880 | switch(param) { | ||
881 | case 0: | ||
882 | return simple_feature_tweak(node, macio_unknown, | ||
883 | KEYLARGO_FCR1, KL1_EIDE0_RESET_N, !value); | ||
884 | case 1: | ||
885 | return simple_feature_tweak(node, macio_unknown, | ||
886 | KEYLARGO_FCR1, KL1_EIDE1_RESET_N, !value); | ||
887 | case 2: | ||
888 | return simple_feature_tweak(node, macio_unknown, | ||
889 | KEYLARGO_FCR1, KL1_UIDE_RESET_N, !value); | ||
890 | default: | ||
891 | return -ENODEV; | ||
892 | } | ||
893 | } | ||
894 | |||
895 | static long | ||
896 | core99_gmac_enable(struct device_node *node, long param, long value) | ||
897 | { | ||
898 | unsigned long flags; | ||
899 | |||
900 | LOCK(flags); | ||
901 | if (value) | ||
902 | UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_GMAC); | ||
903 | else | ||
904 | UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_GMAC); | ||
905 | (void)UN_IN(UNI_N_CLOCK_CNTL); | ||
906 | UNLOCK(flags); | ||
907 | udelay(20); | ||
908 | |||
909 | return 0; | ||
910 | } | ||
911 | |||
912 | static long | ||
913 | core99_gmac_phy_reset(struct device_node *node, long param, long value) | ||
914 | { | ||
915 | unsigned long flags; | ||
916 | struct macio_chip *macio; | ||
917 | |||
918 | macio = &macio_chips[0]; | ||
919 | if (macio->type != macio_keylargo && macio->type != macio_pangea && | ||
920 | macio->type != macio_intrepid) | ||
921 | return -ENODEV; | ||
922 | |||
923 | LOCK(flags); | ||
924 | MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, KEYLARGO_GPIO_OUTPUT_ENABLE); | ||
925 | (void)MACIO_IN8(KL_GPIO_ETH_PHY_RESET); | ||
926 | UNLOCK(flags); | ||
927 | mdelay(10); | ||
928 | LOCK(flags); | ||
929 | MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, /*KEYLARGO_GPIO_OUTPUT_ENABLE | */ | ||
930 | KEYLARGO_GPIO_OUTOUT_DATA); | ||
931 | UNLOCK(flags); | ||
932 | mdelay(10); | ||
933 | |||
934 | return 0; | ||
935 | } | ||
936 | |||
937 | static long | ||
938 | core99_sound_chip_enable(struct device_node *node, long param, long value) | ||
939 | { | ||
940 | struct macio_chip* macio; | ||
941 | unsigned long flags; | ||
942 | |||
943 | macio = macio_find(node, 0); | ||
944 | if (!macio) | ||
945 | return -ENODEV; | ||
946 | |||
947 | /* Do a better probe code, screamer G4 desktops & | ||
948 | * iMacs can do that too, add a recalibrate in | ||
949 | * the driver as well | ||
950 | */ | ||
951 | if (pmac_mb.model_id == PMAC_TYPE_PISMO || | ||
952 | pmac_mb.model_id == PMAC_TYPE_TITANIUM) { | ||
953 | LOCK(flags); | ||
954 | if (value) | ||
955 | MACIO_OUT8(KL_GPIO_SOUND_POWER, | ||
956 | KEYLARGO_GPIO_OUTPUT_ENABLE | | ||
957 | KEYLARGO_GPIO_OUTOUT_DATA); | ||
958 | else | ||
959 | MACIO_OUT8(KL_GPIO_SOUND_POWER, | ||
960 | KEYLARGO_GPIO_OUTPUT_ENABLE); | ||
961 | (void)MACIO_IN8(KL_GPIO_SOUND_POWER); | ||
962 | UNLOCK(flags); | ||
963 | } | ||
964 | return 0; | ||
965 | } | ||
966 | |||
967 | static long | ||
968 | core99_airport_enable(struct device_node *node, long param, long value) | ||
969 | { | ||
970 | struct macio_chip* macio; | ||
971 | unsigned long flags; | ||
972 | int state; | ||
973 | |||
974 | macio = macio_find(node, 0); | ||
975 | if (!macio) | ||
976 | return -ENODEV; | ||
977 | |||
978 | /* Hint: we allow passing of macio itself for the sake of the | ||
979 | * sleep code | ||
980 | */ | ||
981 | if (node != macio->of_node && | ||
982 | (!node->parent || node->parent != macio->of_node)) | ||
983 | return -ENODEV; | ||
984 | state = (macio->flags & MACIO_FLAG_AIRPORT_ON) != 0; | ||
985 | if (value == state) | ||
986 | return 0; | ||
987 | if (value) { | ||
988 | /* This code is a reproduction of OF enable-cardslot | ||
989 | * and init-wireless methods, slightly hacked until | ||
990 | * I got it working. | ||
991 | */ | ||
992 | LOCK(flags); | ||
993 | MACIO_OUT8(KEYLARGO_GPIO_0+0xf, 5); | ||
994 | (void)MACIO_IN8(KEYLARGO_GPIO_0+0xf); | ||
995 | UNLOCK(flags); | ||
996 | mdelay(10); | ||
997 | LOCK(flags); | ||
998 | MACIO_OUT8(KEYLARGO_GPIO_0+0xf, 4); | ||
999 | (void)MACIO_IN8(KEYLARGO_GPIO_0+0xf); | ||
1000 | UNLOCK(flags); | ||
1001 | |||
1002 | mdelay(10); | ||
1003 | |||
1004 | LOCK(flags); | ||
1005 | MACIO_BIC(KEYLARGO_FCR2, KL2_CARDSEL_16); | ||
1006 | (void)MACIO_IN32(KEYLARGO_FCR2); | ||
1007 | udelay(10); | ||
1008 | MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xb, 0); | ||
1009 | (void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xb); | ||
1010 | udelay(10); | ||
1011 | MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xa, 0x28); | ||
1012 | (void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xa); | ||
1013 | udelay(10); | ||
1014 | MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xd, 0x28); | ||
1015 | (void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xd); | ||
1016 | udelay(10); | ||
1017 | MACIO_OUT8(KEYLARGO_GPIO_0+0xd, 0x28); | ||
1018 | (void)MACIO_IN8(KEYLARGO_GPIO_0+0xd); | ||
1019 | udelay(10); | ||
1020 | MACIO_OUT8(KEYLARGO_GPIO_0+0xe, 0x28); | ||
1021 | (void)MACIO_IN8(KEYLARGO_GPIO_0+0xe); | ||
1022 | UNLOCK(flags); | ||
1023 | udelay(10); | ||
1024 | MACIO_OUT32(0x1c000, 0); | ||
1025 | mdelay(1); | ||
1026 | MACIO_OUT8(0x1a3e0, 0x41); | ||
1027 | (void)MACIO_IN8(0x1a3e0); | ||
1028 | udelay(10); | ||
1029 | LOCK(flags); | ||
1030 | MACIO_BIS(KEYLARGO_FCR2, KL2_CARDSEL_16); | ||
1031 | (void)MACIO_IN32(KEYLARGO_FCR2); | ||
1032 | UNLOCK(flags); | ||
1033 | mdelay(100); | ||
1034 | |||
1035 | macio->flags |= MACIO_FLAG_AIRPORT_ON; | ||
1036 | } else { | ||
1037 | LOCK(flags); | ||
1038 | MACIO_BIC(KEYLARGO_FCR2, KL2_CARDSEL_16); | ||
1039 | (void)MACIO_IN32(KEYLARGO_FCR2); | ||
1040 | MACIO_OUT8(KL_GPIO_AIRPORT_0, 0); | ||
1041 | MACIO_OUT8(KL_GPIO_AIRPORT_1, 0); | ||
1042 | MACIO_OUT8(KL_GPIO_AIRPORT_2, 0); | ||
1043 | MACIO_OUT8(KL_GPIO_AIRPORT_3, 0); | ||
1044 | MACIO_OUT8(KL_GPIO_AIRPORT_4, 0); | ||
1045 | (void)MACIO_IN8(KL_GPIO_AIRPORT_4); | ||
1046 | UNLOCK(flags); | ||
1047 | |||
1048 | macio->flags &= ~MACIO_FLAG_AIRPORT_ON; | ||
1049 | } | ||
1050 | return 0; | ||
1051 | } | ||
1052 | |||
1053 | #ifdef CONFIG_SMP | ||
1054 | static long | ||
1055 | core99_reset_cpu(struct device_node *node, long param, long value) | ||
1056 | { | ||
1057 | unsigned int reset_io = 0; | ||
1058 | unsigned long flags; | ||
1059 | struct macio_chip *macio; | ||
1060 | struct device_node *np; | ||
1061 | const int dflt_reset_lines[] = { KL_GPIO_RESET_CPU0, | ||
1062 | KL_GPIO_RESET_CPU1, | ||
1063 | KL_GPIO_RESET_CPU2, | ||
1064 | KL_GPIO_RESET_CPU3 }; | ||
1065 | |||
1066 | macio = &macio_chips[0]; | ||
1067 | if (macio->type != macio_keylargo) | ||
1068 | return -ENODEV; | ||
1069 | |||
1070 | np = find_path_device("/cpus"); | ||
1071 | if (np == NULL) | ||
1072 | return -ENODEV; | ||
1073 | for (np = np->child; np != NULL; np = np->sibling) { | ||
1074 | u32 *num = (u32 *)get_property(np, "reg", NULL); | ||
1075 | u32 *rst = (u32 *)get_property(np, "soft-reset", NULL); | ||
1076 | if (num == NULL || rst == NULL) | ||
1077 | continue; | ||
1078 | if (param == *num) { | ||
1079 | reset_io = *rst; | ||
1080 | break; | ||
1081 | } | ||
1082 | } | ||
1083 | if (np == NULL || reset_io == 0) | ||
1084 | reset_io = dflt_reset_lines[param]; | ||
1085 | |||
1086 | LOCK(flags); | ||
1087 | MACIO_OUT8(reset_io, KEYLARGO_GPIO_OUTPUT_ENABLE); | ||
1088 | (void)MACIO_IN8(reset_io); | ||
1089 | udelay(1); | ||
1090 | MACIO_OUT8(reset_io, 0); | ||
1091 | (void)MACIO_IN8(reset_io); | ||
1092 | UNLOCK(flags); | ||
1093 | |||
1094 | return 0; | ||
1095 | } | ||
1096 | #endif /* CONFIG_SMP */ | ||
1097 | |||
1098 | static long | ||
1099 | core99_usb_enable(struct device_node *node, long param, long value) | ||
1100 | { | ||
1101 | struct macio_chip *macio; | ||
1102 | unsigned long flags; | ||
1103 | char *prop; | ||
1104 | int number; | ||
1105 | u32 reg; | ||
1106 | |||
1107 | macio = &macio_chips[0]; | ||
1108 | if (macio->type != macio_keylargo && macio->type != macio_pangea && | ||
1109 | macio->type != macio_intrepid) | ||
1110 | return -ENODEV; | ||
1111 | |||
1112 | prop = (char *)get_property(node, "AAPL,clock-id", NULL); | ||
1113 | if (!prop) | ||
1114 | return -ENODEV; | ||
1115 | if (strncmp(prop, "usb0u048", 8) == 0) | ||
1116 | number = 0; | ||
1117 | else if (strncmp(prop, "usb1u148", 8) == 0) | ||
1118 | number = 2; | ||
1119 | else if (strncmp(prop, "usb2u248", 8) == 0) | ||
1120 | number = 4; | ||
1121 | else | ||
1122 | return -ENODEV; | ||
1123 | |||
1124 | /* Sorry for the brute-force locking, but this is only used during | ||
1125 | * sleep and the timing seem to be critical | ||
1126 | */ | ||
1127 | LOCK(flags); | ||
1128 | if (value) { | ||
1129 | /* Turn ON */ | ||
1130 | if (number == 0) { | ||
1131 | MACIO_BIC(KEYLARGO_FCR0, (KL0_USB0_PAD_SUSPEND0 | KL0_USB0_PAD_SUSPEND1)); | ||
1132 | (void)MACIO_IN32(KEYLARGO_FCR0); | ||
1133 | UNLOCK(flags); | ||
1134 | mdelay(1); | ||
1135 | LOCK(flags); | ||
1136 | MACIO_BIS(KEYLARGO_FCR0, KL0_USB0_CELL_ENABLE); | ||
1137 | } else if (number == 2) { | ||
1138 | MACIO_BIC(KEYLARGO_FCR0, (KL0_USB1_PAD_SUSPEND0 | KL0_USB1_PAD_SUSPEND1)); | ||
1139 | UNLOCK(flags); | ||
1140 | (void)MACIO_IN32(KEYLARGO_FCR0); | ||
1141 | mdelay(1); | ||
1142 | LOCK(flags); | ||
1143 | MACIO_BIS(KEYLARGO_FCR0, KL0_USB1_CELL_ENABLE); | ||
1144 | } else if (number == 4) { | ||
1145 | MACIO_BIC(KEYLARGO_FCR1, (KL1_USB2_PAD_SUSPEND0 | KL1_USB2_PAD_SUSPEND1)); | ||
1146 | UNLOCK(flags); | ||
1147 | (void)MACIO_IN32(KEYLARGO_FCR1); | ||
1148 | mdelay(1); | ||
1149 | LOCK(flags); | ||
1150 | MACIO_BIS(KEYLARGO_FCR1, KL1_USB2_CELL_ENABLE); | ||
1151 | } | ||
1152 | if (number < 4) { | ||
1153 | reg = MACIO_IN32(KEYLARGO_FCR4); | ||
1154 | reg &= ~(KL4_PORT_WAKEUP_ENABLE(number) | KL4_PORT_RESUME_WAKE_EN(number) | | ||
1155 | KL4_PORT_CONNECT_WAKE_EN(number) | KL4_PORT_DISCONNECT_WAKE_EN(number)); | ||
1156 | reg &= ~(KL4_PORT_WAKEUP_ENABLE(number+1) | KL4_PORT_RESUME_WAKE_EN(number+1) | | ||
1157 | KL4_PORT_CONNECT_WAKE_EN(number+1) | KL4_PORT_DISCONNECT_WAKE_EN(number+1)); | ||
1158 | MACIO_OUT32(KEYLARGO_FCR4, reg); | ||
1159 | (void)MACIO_IN32(KEYLARGO_FCR4); | ||
1160 | udelay(10); | ||
1161 | } else { | ||
1162 | reg = MACIO_IN32(KEYLARGO_FCR3); | ||
1163 | reg &= ~(KL3_IT_PORT_WAKEUP_ENABLE(0) | KL3_IT_PORT_RESUME_WAKE_EN(0) | | ||
1164 | KL3_IT_PORT_CONNECT_WAKE_EN(0) | KL3_IT_PORT_DISCONNECT_WAKE_EN(0)); | ||
1165 | reg &= ~(KL3_IT_PORT_WAKEUP_ENABLE(1) | KL3_IT_PORT_RESUME_WAKE_EN(1) | | ||
1166 | KL3_IT_PORT_CONNECT_WAKE_EN(1) | KL3_IT_PORT_DISCONNECT_WAKE_EN(1)); | ||
1167 | MACIO_OUT32(KEYLARGO_FCR3, reg); | ||
1168 | (void)MACIO_IN32(KEYLARGO_FCR3); | ||
1169 | udelay(10); | ||
1170 | } | ||
1171 | if (macio->type == macio_intrepid) { | ||
1172 | /* wait for clock stopped bits to clear */ | ||
1173 | u32 test0 = 0, test1 = 0; | ||
1174 | u32 status0, status1; | ||
1175 | int timeout = 1000; | ||
1176 | |||
1177 | UNLOCK(flags); | ||
1178 | switch (number) { | ||
1179 | case 0: | ||
1180 | test0 = UNI_N_CLOCK_STOPPED_USB0; | ||
1181 | test1 = UNI_N_CLOCK_STOPPED_USB0PCI; | ||
1182 | break; | ||
1183 | case 2: | ||
1184 | test0 = UNI_N_CLOCK_STOPPED_USB1; | ||
1185 | test1 = UNI_N_CLOCK_STOPPED_USB1PCI; | ||
1186 | break; | ||
1187 | case 4: | ||
1188 | test0 = UNI_N_CLOCK_STOPPED_USB2; | ||
1189 | test1 = UNI_N_CLOCK_STOPPED_USB2PCI; | ||
1190 | break; | ||
1191 | } | ||
1192 | do { | ||
1193 | if (--timeout <= 0) { | ||
1194 | printk(KERN_ERR "core99_usb_enable: " | ||
1195 | "Timeout waiting for clocks\n"); | ||
1196 | break; | ||
1197 | } | ||
1198 | mdelay(1); | ||
1199 | status0 = UN_IN(UNI_N_CLOCK_STOP_STATUS0); | ||
1200 | status1 = UN_IN(UNI_N_CLOCK_STOP_STATUS1); | ||
1201 | } while ((status0 & test0) | (status1 & test1)); | ||
1202 | LOCK(flags); | ||
1203 | } | ||
1204 | } else { | ||
1205 | /* Turn OFF */ | ||
1206 | if (number < 4) { | ||
1207 | reg = MACIO_IN32(KEYLARGO_FCR4); | ||
1208 | reg |= KL4_PORT_WAKEUP_ENABLE(number) | KL4_PORT_RESUME_WAKE_EN(number) | | ||
1209 | KL4_PORT_CONNECT_WAKE_EN(number) | KL4_PORT_DISCONNECT_WAKE_EN(number); | ||
1210 | reg |= KL4_PORT_WAKEUP_ENABLE(number+1) | KL4_PORT_RESUME_WAKE_EN(number+1) | | ||
1211 | KL4_PORT_CONNECT_WAKE_EN(number+1) | KL4_PORT_DISCONNECT_WAKE_EN(number+1); | ||
1212 | MACIO_OUT32(KEYLARGO_FCR4, reg); | ||
1213 | (void)MACIO_IN32(KEYLARGO_FCR4); | ||
1214 | udelay(1); | ||
1215 | } else { | ||
1216 | reg = MACIO_IN32(KEYLARGO_FCR3); | ||
1217 | reg |= KL3_IT_PORT_WAKEUP_ENABLE(0) | KL3_IT_PORT_RESUME_WAKE_EN(0) | | ||
1218 | KL3_IT_PORT_CONNECT_WAKE_EN(0) | KL3_IT_PORT_DISCONNECT_WAKE_EN(0); | ||
1219 | reg |= KL3_IT_PORT_WAKEUP_ENABLE(1) | KL3_IT_PORT_RESUME_WAKE_EN(1) | | ||
1220 | KL3_IT_PORT_CONNECT_WAKE_EN(1) | KL3_IT_PORT_DISCONNECT_WAKE_EN(1); | ||
1221 | MACIO_OUT32(KEYLARGO_FCR3, reg); | ||
1222 | (void)MACIO_IN32(KEYLARGO_FCR3); | ||
1223 | udelay(1); | ||
1224 | } | ||
1225 | if (number == 0) { | ||
1226 | if (macio->type != macio_intrepid) | ||
1227 | MACIO_BIC(KEYLARGO_FCR0, KL0_USB0_CELL_ENABLE); | ||
1228 | (void)MACIO_IN32(KEYLARGO_FCR0); | ||
1229 | udelay(1); | ||
1230 | MACIO_BIS(KEYLARGO_FCR0, (KL0_USB0_PAD_SUSPEND0 | KL0_USB0_PAD_SUSPEND1)); | ||
1231 | (void)MACIO_IN32(KEYLARGO_FCR0); | ||
1232 | } else if (number == 2) { | ||
1233 | if (macio->type != macio_intrepid) | ||
1234 | MACIO_BIC(KEYLARGO_FCR0, KL0_USB1_CELL_ENABLE); | ||
1235 | (void)MACIO_IN32(KEYLARGO_FCR0); | ||
1236 | udelay(1); | ||
1237 | MACIO_BIS(KEYLARGO_FCR0, (KL0_USB1_PAD_SUSPEND0 | KL0_USB1_PAD_SUSPEND1)); | ||
1238 | (void)MACIO_IN32(KEYLARGO_FCR0); | ||
1239 | } else if (number == 4) { | ||
1240 | udelay(1); | ||
1241 | MACIO_BIS(KEYLARGO_FCR1, (KL1_USB2_PAD_SUSPEND0 | KL1_USB2_PAD_SUSPEND1)); | ||
1242 | (void)MACIO_IN32(KEYLARGO_FCR1); | ||
1243 | } | ||
1244 | udelay(1); | ||
1245 | } | ||
1246 | UNLOCK(flags); | ||
1247 | |||
1248 | return 0; | ||
1249 | } | ||
1250 | |||
1251 | static long | ||
1252 | core99_firewire_enable(struct device_node *node, long param, long value) | ||
1253 | { | ||
1254 | unsigned long flags; | ||
1255 | struct macio_chip *macio; | ||
1256 | |||
1257 | macio = &macio_chips[0]; | ||
1258 | if (macio->type != macio_keylargo && macio->type != macio_pangea && | ||
1259 | macio->type != macio_intrepid) | ||
1260 | return -ENODEV; | ||
1261 | if (!(macio->flags & MACIO_FLAG_FW_SUPPORTED)) | ||
1262 | return -ENODEV; | ||
1263 | |||
1264 | LOCK(flags); | ||
1265 | if (value) { | ||
1266 | UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_FW); | ||
1267 | (void)UN_IN(UNI_N_CLOCK_CNTL); | ||
1268 | } else { | ||
1269 | UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_FW); | ||
1270 | (void)UN_IN(UNI_N_CLOCK_CNTL); | ||
1271 | } | ||
1272 | UNLOCK(flags); | ||
1273 | mdelay(1); | ||
1274 | |||
1275 | return 0; | ||
1276 | } | ||
1277 | |||
1278 | static long | ||
1279 | core99_firewire_cable_power(struct device_node *node, long param, long value) | ||
1280 | { | ||
1281 | unsigned long flags; | ||
1282 | struct macio_chip *macio; | ||
1283 | |||
1284 | /* Trick: we allow NULL node */ | ||
1285 | if ((pmac_mb.board_flags & PMAC_MB_HAS_FW_POWER) == 0) | ||
1286 | return -ENODEV; | ||
1287 | macio = &macio_chips[0]; | ||
1288 | if (macio->type != macio_keylargo && macio->type != macio_pangea && | ||
1289 | macio->type != macio_intrepid) | ||
1290 | return -ENODEV; | ||
1291 | if (!(macio->flags & MACIO_FLAG_FW_SUPPORTED)) | ||
1292 | return -ENODEV; | ||
1293 | |||
1294 | LOCK(flags); | ||
1295 | if (value) { | ||
1296 | MACIO_OUT8(KL_GPIO_FW_CABLE_POWER , 0); | ||
1297 | MACIO_IN8(KL_GPIO_FW_CABLE_POWER); | ||
1298 | udelay(10); | ||
1299 | } else { | ||
1300 | MACIO_OUT8(KL_GPIO_FW_CABLE_POWER , 4); | ||
1301 | MACIO_IN8(KL_GPIO_FW_CABLE_POWER); udelay(10); | ||
1302 | } | ||
1303 | UNLOCK(flags); | ||
1304 | mdelay(1); | ||
1305 | |||
1306 | return 0; | ||
1307 | } | ||
1308 | |||
1309 | static long | ||
1310 | intrepid_aack_delay_enable(struct device_node *node, long param, long value) | ||
1311 | { | ||
1312 | unsigned long flags; | ||
1313 | |||
1314 | if (uninorth_rev < 0xd2) | ||
1315 | return -ENODEV; | ||
1316 | |||
1317 | LOCK(flags); | ||
1318 | if (param) | ||
1319 | UN_BIS(UNI_N_AACK_DELAY, UNI_N_AACK_DELAY_ENABLE); | ||
1320 | else | ||
1321 | UN_BIC(UNI_N_AACK_DELAY, UNI_N_AACK_DELAY_ENABLE); | ||
1322 | UNLOCK(flags); | ||
1323 | |||
1324 | return 0; | ||
1325 | } | ||
1326 | |||
1327 | |||
1328 | #endif /* CONFIG_POWER4 */ | ||
1329 | |||
1330 | static long | ||
1331 | core99_read_gpio(struct device_node *node, long param, long value) | ||
1332 | { | ||
1333 | struct macio_chip *macio = &macio_chips[0]; | ||
1334 | |||
1335 | return MACIO_IN8(param); | ||
1336 | } | ||
1337 | |||
1338 | |||
1339 | static long | ||
1340 | core99_write_gpio(struct device_node *node, long param, long value) | ||
1341 | { | ||
1342 | struct macio_chip *macio = &macio_chips[0]; | ||
1343 | |||
1344 | MACIO_OUT8(param, (u8)(value & 0xff)); | ||
1345 | return 0; | ||
1346 | } | ||
1347 | |||
1348 | #ifdef CONFIG_POWER4 | ||
1349 | static long g5_gmac_enable(struct device_node *node, long param, long value) | ||
1350 | { | ||
1351 | struct macio_chip *macio = &macio_chips[0]; | ||
1352 | unsigned long flags; | ||
1353 | |||
1354 | if (node == NULL) | ||
1355 | return -ENODEV; | ||
1356 | |||
1357 | LOCK(flags); | ||
1358 | if (value) { | ||
1359 | MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE); | ||
1360 | mb(); | ||
1361 | k2_skiplist[0] = NULL; | ||
1362 | } else { | ||
1363 | k2_skiplist[0] = node; | ||
1364 | mb(); | ||
1365 | MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE); | ||
1366 | } | ||
1367 | |||
1368 | UNLOCK(flags); | ||
1369 | mdelay(1); | ||
1370 | |||
1371 | return 0; | ||
1372 | } | ||
1373 | |||
1374 | static long g5_fw_enable(struct device_node *node, long param, long value) | ||
1375 | { | ||
1376 | struct macio_chip *macio = &macio_chips[0]; | ||
1377 | unsigned long flags; | ||
1378 | |||
1379 | if (node == NULL) | ||
1380 | return -ENODEV; | ||
1381 | |||
1382 | LOCK(flags); | ||
1383 | if (value) { | ||
1384 | MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE); | ||
1385 | mb(); | ||
1386 | k2_skiplist[1] = NULL; | ||
1387 | } else { | ||
1388 | k2_skiplist[1] = node; | ||
1389 | mb(); | ||
1390 | MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE); | ||
1391 | } | ||
1392 | |||
1393 | UNLOCK(flags); | ||
1394 | mdelay(1); | ||
1395 | |||
1396 | return 0; | ||
1397 | } | ||
1398 | |||
1399 | static long g5_mpic_enable(struct device_node *node, long param, long value) | ||
1400 | { | ||
1401 | unsigned long flags; | ||
1402 | |||
1403 | if (node->parent == NULL || strcmp(node->parent->name, "u3")) | ||
1404 | return 0; | ||
1405 | |||
1406 | LOCK(flags); | ||
1407 | UN_BIS(U3_TOGGLE_REG, U3_MPIC_RESET | U3_MPIC_OUTPUT_ENABLE); | ||
1408 | UNLOCK(flags); | ||
1409 | |||
1410 | return 0; | ||
1411 | } | ||
1412 | |||
1413 | static long g5_eth_phy_reset(struct device_node *node, long param, long value) | ||
1414 | { | ||
1415 | struct macio_chip *macio = &macio_chips[0]; | ||
1416 | struct device_node *phy; | ||
1417 | int need_reset; | ||
1418 | |||
1419 | /* | ||
1420 | * We must not reset the combo PHYs, only the BCM5221 found in | ||
1421 | * the iMac G5. | ||
1422 | */ | ||
1423 | phy = of_get_next_child(node, NULL); | ||
1424 | if (!phy) | ||
1425 | return -ENODEV; | ||
1426 | need_reset = device_is_compatible(phy, "B5221"); | ||
1427 | of_node_put(phy); | ||
1428 | if (!need_reset) | ||
1429 | return 0; | ||
1430 | |||
1431 | /* PHY reset is GPIO 29, not in device-tree unfortunately */ | ||
1432 | MACIO_OUT8(K2_GPIO_EXTINT_0 + 29, | ||
1433 | KEYLARGO_GPIO_OUTPUT_ENABLE | KEYLARGO_GPIO_OUTOUT_DATA); | ||
1434 | /* Thankfully, this is now always called at a time when we can | ||
1435 | * schedule by sungem. | ||
1436 | */ | ||
1437 | msleep(10); | ||
1438 | MACIO_OUT8(K2_GPIO_EXTINT_0 + 29, 0); | ||
1439 | |||
1440 | return 0; | ||
1441 | } | ||
1442 | |||
1443 | static long g5_i2s_enable(struct device_node *node, long param, long value) | ||
1444 | { | ||
1445 | /* Very crude implementation for now */ | ||
1446 | struct macio_chip *macio = &macio_chips[0]; | ||
1447 | unsigned long flags; | ||
1448 | |||
1449 | if (value == 0) | ||
1450 | return 0; /* don't disable yet */ | ||
1451 | |||
1452 | LOCK(flags); | ||
1453 | MACIO_BIS(KEYLARGO_FCR3, KL3_CLK45_ENABLE | KL3_CLK49_ENABLE | | ||
1454 | KL3_I2S0_CLK18_ENABLE); | ||
1455 | udelay(10); | ||
1456 | MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_I2S0_CELL_ENABLE | | ||
1457 | K2_FCR1_I2S0_CLK_ENABLE_BIT | K2_FCR1_I2S0_ENABLE); | ||
1458 | udelay(10); | ||
1459 | MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_I2S0_RESET); | ||
1460 | UNLOCK(flags); | ||
1461 | udelay(10); | ||
1462 | |||
1463 | return 0; | ||
1464 | } | ||
1465 | |||
1466 | |||
1467 | #ifdef CONFIG_SMP | ||
1468 | static long g5_reset_cpu(struct device_node *node, long param, long value) | ||
1469 | { | ||
1470 | unsigned int reset_io = 0; | ||
1471 | unsigned long flags; | ||
1472 | struct macio_chip *macio; | ||
1473 | struct device_node *np; | ||
1474 | |||
1475 | macio = &macio_chips[0]; | ||
1476 | if (macio->type != macio_keylargo2) | ||
1477 | return -ENODEV; | ||
1478 | |||
1479 | np = find_path_device("/cpus"); | ||
1480 | if (np == NULL) | ||
1481 | return -ENODEV; | ||
1482 | for (np = np->child; np != NULL; np = np->sibling) { | ||
1483 | u32 *num = (u32 *)get_property(np, "reg", NULL); | ||
1484 | u32 *rst = (u32 *)get_property(np, "soft-reset", NULL); | ||
1485 | if (num == NULL || rst == NULL) | ||
1486 | continue; | ||
1487 | if (param == *num) { | ||
1488 | reset_io = *rst; | ||
1489 | break; | ||
1490 | } | ||
1491 | } | ||
1492 | if (np == NULL || reset_io == 0) | ||
1493 | return -ENODEV; | ||
1494 | |||
1495 | LOCK(flags); | ||
1496 | MACIO_OUT8(reset_io, KEYLARGO_GPIO_OUTPUT_ENABLE); | ||
1497 | (void)MACIO_IN8(reset_io); | ||
1498 | udelay(1); | ||
1499 | MACIO_OUT8(reset_io, 0); | ||
1500 | (void)MACIO_IN8(reset_io); | ||
1501 | UNLOCK(flags); | ||
1502 | |||
1503 | return 0; | ||
1504 | } | ||
1505 | #endif /* CONFIG_SMP */ | ||
1506 | |||
1507 | /* | ||
1508 | * This can be called from pmac_smp so isn't static | ||
1509 | * | ||
1510 | * This takes the second CPU off the bus on dual CPU machines | ||
1511 | * running UP | ||
1512 | */ | ||
1513 | void g5_phy_disable_cpu1(void) | ||
1514 | { | ||
1515 | UN_OUT(U3_API_PHY_CONFIG_1, 0); | ||
1516 | } | ||
1517 | #endif /* CONFIG_POWER4 */ | ||
1518 | |||
1519 | #ifndef CONFIG_POWER4 | ||
1520 | |||
1521 | static void | ||
1522 | keylargo_shutdown(struct macio_chip *macio, int sleep_mode) | ||
1523 | { | ||
1524 | u32 temp; | ||
1525 | |||
1526 | if (sleep_mode) { | ||
1527 | mdelay(1); | ||
1528 | MACIO_BIS(KEYLARGO_FCR0, KL0_USB_REF_SUSPEND); | ||
1529 | (void)MACIO_IN32(KEYLARGO_FCR0); | ||
1530 | mdelay(1); | ||
1531 | } | ||
1532 | |||
1533 | MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE | | ||
1534 | KL0_SCC_CELL_ENABLE | | ||
1535 | KL0_IRDA_ENABLE | KL0_IRDA_CLK32_ENABLE | | ||
1536 | KL0_IRDA_CLK19_ENABLE); | ||
1537 | |||
1538 | MACIO_BIC(KEYLARGO_MBCR, KL_MBCR_MB0_DEV_MASK); | ||
1539 | MACIO_BIS(KEYLARGO_MBCR, KL_MBCR_MB0_IDE_ENABLE); | ||
1540 | |||
1541 | MACIO_BIC(KEYLARGO_FCR1, | ||
1542 | KL1_AUDIO_SEL_22MCLK | KL1_AUDIO_CLK_ENABLE_BIT | | ||
1543 | KL1_AUDIO_CLK_OUT_ENABLE | KL1_AUDIO_CELL_ENABLE | | ||
1544 | KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT | | ||
1545 | KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE | | ||
1546 | KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE | | ||
1547 | KL1_EIDE0_ENABLE | KL1_EIDE0_RESET_N | | ||
1548 | KL1_EIDE1_ENABLE | KL1_EIDE1_RESET_N | | ||
1549 | KL1_UIDE_ENABLE); | ||
1550 | |||
1551 | MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT); | ||
1552 | MACIO_BIC(KEYLARGO_FCR2, KL2_IOBUS_ENABLE); | ||
1553 | |||
1554 | temp = MACIO_IN32(KEYLARGO_FCR3); | ||
1555 | if (macio->rev >= 2) { | ||
1556 | temp |= KL3_SHUTDOWN_PLL2X; | ||
1557 | if (sleep_mode) | ||
1558 | temp |= KL3_SHUTDOWN_PLL_TOTAL; | ||
1559 | } | ||
1560 | |||
1561 | temp |= KL3_SHUTDOWN_PLLKW6 | KL3_SHUTDOWN_PLLKW4 | | ||
1562 | KL3_SHUTDOWN_PLLKW35; | ||
1563 | if (sleep_mode) | ||
1564 | temp |= KL3_SHUTDOWN_PLLKW12; | ||
1565 | temp &= ~(KL3_CLK66_ENABLE | KL3_CLK49_ENABLE | KL3_CLK45_ENABLE | ||
1566 | | KL3_CLK31_ENABLE | KL3_I2S1_CLK18_ENABLE | KL3_I2S0_CLK18_ENABLE); | ||
1567 | if (sleep_mode) | ||
1568 | temp &= ~(KL3_TIMER_CLK18_ENABLE | KL3_VIA_CLK16_ENABLE); | ||
1569 | MACIO_OUT32(KEYLARGO_FCR3, temp); | ||
1570 | |||
1571 | /* Flush posted writes & wait a bit */ | ||
1572 | (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1); | ||
1573 | } | ||
1574 | |||
1575 | static void | ||
1576 | pangea_shutdown(struct macio_chip *macio, int sleep_mode) | ||
1577 | { | ||
1578 | u32 temp; | ||
1579 | |||
1580 | MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE | | ||
1581 | KL0_SCC_CELL_ENABLE | | ||
1582 | KL0_USB0_CELL_ENABLE | KL0_USB1_CELL_ENABLE); | ||
1583 | |||
1584 | MACIO_BIC(KEYLARGO_FCR1, | ||
1585 | KL1_AUDIO_SEL_22MCLK | KL1_AUDIO_CLK_ENABLE_BIT | | ||
1586 | KL1_AUDIO_CLK_OUT_ENABLE | KL1_AUDIO_CELL_ENABLE | | ||
1587 | KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT | | ||
1588 | KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE | | ||
1589 | KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE | | ||
1590 | KL1_UIDE_ENABLE); | ||
1591 | if (pmac_mb.board_flags & PMAC_MB_MOBILE) | ||
1592 | MACIO_BIC(KEYLARGO_FCR1, KL1_UIDE_RESET_N); | ||
1593 | |||
1594 | MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT); | ||
1595 | |||
1596 | temp = MACIO_IN32(KEYLARGO_FCR3); | ||
1597 | temp |= KL3_SHUTDOWN_PLLKW6 | KL3_SHUTDOWN_PLLKW4 | | ||
1598 | KL3_SHUTDOWN_PLLKW35; | ||
1599 | temp &= ~(KL3_CLK49_ENABLE | KL3_CLK45_ENABLE | KL3_CLK31_ENABLE | ||
1600 | | KL3_I2S0_CLK18_ENABLE | KL3_I2S1_CLK18_ENABLE); | ||
1601 | if (sleep_mode) | ||
1602 | temp &= ~(KL3_VIA_CLK16_ENABLE | KL3_TIMER_CLK18_ENABLE); | ||
1603 | MACIO_OUT32(KEYLARGO_FCR3, temp); | ||
1604 | |||
1605 | /* Flush posted writes & wait a bit */ | ||
1606 | (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1); | ||
1607 | } | ||
1608 | |||
1609 | static void | ||
1610 | intrepid_shutdown(struct macio_chip *macio, int sleep_mode) | ||
1611 | { | ||
1612 | u32 temp; | ||
1613 | |||
1614 | MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE | | ||
1615 | KL0_SCC_CELL_ENABLE); | ||
1616 | |||
1617 | MACIO_BIC(KEYLARGO_FCR1, | ||
1618 | /*KL1_USB2_CELL_ENABLE |*/ | ||
1619 | KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT | | ||
1620 | KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE | | ||
1621 | KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE); | ||
1622 | if (pmac_mb.board_flags & PMAC_MB_MOBILE) | ||
1623 | MACIO_BIC(KEYLARGO_FCR1, KL1_UIDE_RESET_N); | ||
1624 | |||
1625 | temp = MACIO_IN32(KEYLARGO_FCR3); | ||
1626 | temp &= ~(KL3_CLK49_ENABLE | KL3_CLK45_ENABLE | | ||
1627 | KL3_I2S1_CLK18_ENABLE | KL3_I2S0_CLK18_ENABLE); | ||
1628 | if (sleep_mode) | ||
1629 | temp &= ~(KL3_TIMER_CLK18_ENABLE | KL3_IT_VIA_CLK32_ENABLE); | ||
1630 | MACIO_OUT32(KEYLARGO_FCR3, temp); | ||
1631 | |||
1632 | /* Flush posted writes & wait a bit */ | ||
1633 | (void)MACIO_IN32(KEYLARGO_FCR0); | ||
1634 | mdelay(10); | ||
1635 | } | ||
1636 | |||
1637 | |||
1638 | void pmac_tweak_clock_spreading(int enable) | ||
1639 | { | ||
1640 | struct macio_chip *macio = &macio_chips[0]; | ||
1641 | |||
1642 | /* Hack for doing clock spreading on some machines PowerBooks and | ||
1643 | * iBooks. This implements the "platform-do-clockspreading" OF | ||
1644 | * property as decoded manually on various models. For safety, we also | ||
1645 | * check the product ID in the device-tree in cases we'll whack the i2c | ||
1646 | * chip to make reasonably sure we won't set wrong values in there | ||
1647 | * | ||
1648 | * Of course, ultimately, we have to implement a real parser for | ||
1649 | * the platform-do-* stuff... | ||
1650 | */ | ||
1651 | |||
1652 | if (macio->type == macio_intrepid) { | ||
1653 | if (enable) | ||
1654 | UN_OUT(UNI_N_CLOCK_SPREADING, 2); | ||
1655 | else | ||
1656 | UN_OUT(UNI_N_CLOCK_SPREADING, 0); | ||
1657 | mdelay(40); | ||
1658 | } | ||
1659 | |||
1660 | while (machine_is_compatible("PowerBook5,2") || | ||
1661 | machine_is_compatible("PowerBook5,3") || | ||
1662 | machine_is_compatible("PowerBook6,2") || | ||
1663 | machine_is_compatible("PowerBook6,3")) { | ||
1664 | struct device_node *ui2c = of_find_node_by_type(NULL, "i2c"); | ||
1665 | struct device_node *dt = of_find_node_by_name(NULL, "device-tree"); | ||
1666 | u8 buffer[9]; | ||
1667 | u32 *productID; | ||
1668 | int i, rc, changed = 0; | ||
1669 | |||
1670 | if (dt == NULL) | ||
1671 | break; | ||
1672 | productID = (u32 *)get_property(dt, "pid#", NULL); | ||
1673 | if (productID == NULL) | ||
1674 | break; | ||
1675 | while(ui2c) { | ||
1676 | struct device_node *p = of_get_parent(ui2c); | ||
1677 | if (p && !strcmp(p->name, "uni-n")) | ||
1678 | break; | ||
1679 | ui2c = of_find_node_by_type(ui2c, "i2c"); | ||
1680 | } | ||
1681 | if (ui2c == NULL) | ||
1682 | break; | ||
1683 | DBG("Trying to bump clock speed for PID: %08x...\n", *productID); | ||
1684 | rc = pmac_low_i2c_open(ui2c, 1); | ||
1685 | if (rc != 0) | ||
1686 | break; | ||
1687 | pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined); | ||
1688 | rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9); | ||
1689 | DBG("read result: %d,", rc); | ||
1690 | if (rc != 0) { | ||
1691 | pmac_low_i2c_close(ui2c); | ||
1692 | break; | ||
1693 | } | ||
1694 | for (i=0; i<9; i++) | ||
1695 | DBG(" %02x", buffer[i]); | ||
1696 | DBG("\n"); | ||
1697 | |||
1698 | switch(*productID) { | ||
1699 | case 0x1182: /* AlBook 12" rev 2 */ | ||
1700 | case 0x1183: /* iBook G4 12" */ | ||
1701 | buffer[0] = (buffer[0] & 0x8f) | 0x70; | ||
1702 | buffer[2] = (buffer[2] & 0x7f) | 0x00; | ||
1703 | buffer[5] = (buffer[5] & 0x80) | 0x31; | ||
1704 | buffer[6] = (buffer[6] & 0x40) | 0xb0; | ||
1705 | buffer[7] = (buffer[7] & 0x00) | (enable ? 0xc0 : 0xba); | ||
1706 | buffer[8] = (buffer[8] & 0x00) | 0x30; | ||
1707 | changed = 1; | ||
1708 | break; | ||
1709 | case 0x3142: /* AlBook 15" (ATI M10) */ | ||
1710 | case 0x3143: /* AlBook 17" (ATI M10) */ | ||
1711 | buffer[0] = (buffer[0] & 0xaf) | 0x50; | ||
1712 | buffer[2] = (buffer[2] & 0x7f) | 0x00; | ||
1713 | buffer[5] = (buffer[5] & 0x80) | 0x31; | ||
1714 | buffer[6] = (buffer[6] & 0x40) | 0xb0; | ||
1715 | buffer[7] = (buffer[7] & 0x00) | (enable ? 0xd0 : 0xc0); | ||
1716 | buffer[8] = (buffer[8] & 0x00) | 0x30; | ||
1717 | changed = 1; | ||
1718 | break; | ||
1719 | default: | ||
1720 | DBG("i2c-hwclock: Machine model not handled\n"); | ||
1721 | break; | ||
1722 | } | ||
1723 | if (!changed) { | ||
1724 | pmac_low_i2c_close(ui2c); | ||
1725 | break; | ||
1726 | } | ||
1727 | pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub); | ||
1728 | rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9); | ||
1729 | DBG("write result: %d,", rc); | ||
1730 | pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined); | ||
1731 | rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9); | ||
1732 | DBG("read result: %d,", rc); | ||
1733 | if (rc != 0) { | ||
1734 | pmac_low_i2c_close(ui2c); | ||
1735 | break; | ||
1736 | } | ||
1737 | for (i=0; i<9; i++) | ||
1738 | DBG(" %02x", buffer[i]); | ||
1739 | pmac_low_i2c_close(ui2c); | ||
1740 | break; | ||
1741 | } | ||
1742 | } | ||
1743 | |||
1744 | |||
1745 | static int | ||
1746 | core99_sleep(void) | ||
1747 | { | ||
1748 | struct macio_chip *macio; | ||
1749 | int i; | ||
1750 | |||
1751 | macio = &macio_chips[0]; | ||
1752 | if (macio->type != macio_keylargo && macio->type != macio_pangea && | ||
1753 | macio->type != macio_intrepid) | ||
1754 | return -ENODEV; | ||
1755 | |||
1756 | /* We power off the wireless slot in case it was not done | ||
1757 | * by the driver. We don't power it on automatically however | ||
1758 | */ | ||
1759 | if (macio->flags & MACIO_FLAG_AIRPORT_ON) | ||
1760 | core99_airport_enable(macio->of_node, 0, 0); | ||
1761 | |||
1762 | /* We power off the FW cable. Should be done by the driver... */ | ||
1763 | if (macio->flags & MACIO_FLAG_FW_SUPPORTED) { | ||
1764 | core99_firewire_enable(NULL, 0, 0); | ||
1765 | core99_firewire_cable_power(NULL, 0, 0); | ||
1766 | } | ||
1767 | |||
1768 | /* We make sure int. modem is off (in case driver lost it) */ | ||
1769 | if (macio->type == macio_keylargo) | ||
1770 | core99_modem_enable(macio->of_node, 0, 0); | ||
1771 | else | ||
1772 | pangea_modem_enable(macio->of_node, 0, 0); | ||
1773 | |||
1774 | /* We make sure the sound is off as well */ | ||
1775 | core99_sound_chip_enable(macio->of_node, 0, 0); | ||
1776 | |||
1777 | /* | ||
1778 | * Save various bits of KeyLargo | ||
1779 | */ | ||
1780 | |||
1781 | /* Save the state of the various GPIOs */ | ||
1782 | save_gpio_levels[0] = MACIO_IN32(KEYLARGO_GPIO_LEVELS0); | ||
1783 | save_gpio_levels[1] = MACIO_IN32(KEYLARGO_GPIO_LEVELS1); | ||
1784 | for (i=0; i<KEYLARGO_GPIO_EXTINT_CNT; i++) | ||
1785 | save_gpio_extint[i] = MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+i); | ||
1786 | for (i=0; i<KEYLARGO_GPIO_CNT; i++) | ||
1787 | save_gpio_normal[i] = MACIO_IN8(KEYLARGO_GPIO_0+i); | ||
1788 | |||
1789 | /* Save the FCRs */ | ||
1790 | if (macio->type == macio_keylargo) | ||
1791 | save_mbcr = MACIO_IN32(KEYLARGO_MBCR); | ||
1792 | save_fcr[0] = MACIO_IN32(KEYLARGO_FCR0); | ||
1793 | save_fcr[1] = MACIO_IN32(KEYLARGO_FCR1); | ||
1794 | save_fcr[2] = MACIO_IN32(KEYLARGO_FCR2); | ||
1795 | save_fcr[3] = MACIO_IN32(KEYLARGO_FCR3); | ||
1796 | save_fcr[4] = MACIO_IN32(KEYLARGO_FCR4); | ||
1797 | if (macio->type == macio_pangea || macio->type == macio_intrepid) | ||
1798 | save_fcr[5] = MACIO_IN32(KEYLARGO_FCR5); | ||
1799 | |||
1800 | /* Save state & config of DBDMA channels */ | ||
1801 | dbdma_save(macio, save_dbdma); | ||
1802 | |||
1803 | /* | ||
1804 | * Turn off as much as we can | ||
1805 | */ | ||
1806 | if (macio->type == macio_pangea) | ||
1807 | pangea_shutdown(macio, 1); | ||
1808 | else if (macio->type == macio_intrepid) | ||
1809 | intrepid_shutdown(macio, 1); | ||
1810 | else if (macio->type == macio_keylargo) | ||
1811 | keylargo_shutdown(macio, 1); | ||
1812 | |||
1813 | /* | ||
1814 | * Put the host bridge to sleep | ||
1815 | */ | ||
1816 | |||
1817 | save_unin_clock_ctl = UN_IN(UNI_N_CLOCK_CNTL); | ||
1818 | /* Note: do not switch GMAC off, driver does it when necessary, WOL must keep it | ||
1819 | * enabled ! | ||
1820 | */ | ||
1821 | UN_OUT(UNI_N_CLOCK_CNTL, save_unin_clock_ctl & | ||
1822 | ~(/*UNI_N_CLOCK_CNTL_GMAC|*/UNI_N_CLOCK_CNTL_FW/*|UNI_N_CLOCK_CNTL_PCI*/)); | ||
1823 | udelay(100); | ||
1824 | UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_SLEEPING); | ||
1825 | UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_SLEEP); | ||
1826 | mdelay(10); | ||
1827 | |||
1828 | /* | ||
1829 | * FIXME: A bit of black magic with OpenPIC (don't ask me why) | ||
1830 | */ | ||
1831 | if (pmac_mb.model_id == PMAC_TYPE_SAWTOOTH) { | ||
1832 | MACIO_BIS(0x506e0, 0x00400000); | ||
1833 | MACIO_BIS(0x506e0, 0x80000000); | ||
1834 | } | ||
1835 | return 0; | ||
1836 | } | ||
1837 | |||
1838 | static int | ||
1839 | core99_wake_up(void) | ||
1840 | { | ||
1841 | struct macio_chip *macio; | ||
1842 | int i; | ||
1843 | |||
1844 | macio = &macio_chips[0]; | ||
1845 | if (macio->type != macio_keylargo && macio->type != macio_pangea && | ||
1846 | macio->type != macio_intrepid) | ||
1847 | return -ENODEV; | ||
1848 | |||
1849 | /* | ||
1850 | * Wakeup the host bridge | ||
1851 | */ | ||
1852 | UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_NORMAL); | ||
1853 | udelay(10); | ||
1854 | UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_RUNNING); | ||
1855 | udelay(10); | ||
1856 | |||
1857 | /* | ||
1858 | * Restore KeyLargo | ||
1859 | */ | ||
1860 | |||
1861 | if (macio->type == macio_keylargo) { | ||
1862 | MACIO_OUT32(KEYLARGO_MBCR, save_mbcr); | ||
1863 | (void)MACIO_IN32(KEYLARGO_MBCR); udelay(10); | ||
1864 | } | ||
1865 | MACIO_OUT32(KEYLARGO_FCR0, save_fcr[0]); | ||
1866 | (void)MACIO_IN32(KEYLARGO_FCR0); udelay(10); | ||
1867 | MACIO_OUT32(KEYLARGO_FCR1, save_fcr[1]); | ||
1868 | (void)MACIO_IN32(KEYLARGO_FCR1); udelay(10); | ||
1869 | MACIO_OUT32(KEYLARGO_FCR2, save_fcr[2]); | ||
1870 | (void)MACIO_IN32(KEYLARGO_FCR2); udelay(10); | ||
1871 | MACIO_OUT32(KEYLARGO_FCR3, save_fcr[3]); | ||
1872 | (void)MACIO_IN32(KEYLARGO_FCR3); udelay(10); | ||
1873 | MACIO_OUT32(KEYLARGO_FCR4, save_fcr[4]); | ||
1874 | (void)MACIO_IN32(KEYLARGO_FCR4); udelay(10); | ||
1875 | if (macio->type == macio_pangea || macio->type == macio_intrepid) { | ||
1876 | MACIO_OUT32(KEYLARGO_FCR5, save_fcr[5]); | ||
1877 | (void)MACIO_IN32(KEYLARGO_FCR5); udelay(10); | ||
1878 | } | ||
1879 | |||
1880 | dbdma_restore(macio, save_dbdma); | ||
1881 | |||
1882 | MACIO_OUT32(KEYLARGO_GPIO_LEVELS0, save_gpio_levels[0]); | ||
1883 | MACIO_OUT32(KEYLARGO_GPIO_LEVELS1, save_gpio_levels[1]); | ||
1884 | for (i=0; i<KEYLARGO_GPIO_EXTINT_CNT; i++) | ||
1885 | MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+i, save_gpio_extint[i]); | ||
1886 | for (i=0; i<KEYLARGO_GPIO_CNT; i++) | ||
1887 | MACIO_OUT8(KEYLARGO_GPIO_0+i, save_gpio_normal[i]); | ||
1888 | |||
1889 | /* FIXME more black magic with OpenPIC ... */ | ||
1890 | if (pmac_mb.model_id == PMAC_TYPE_SAWTOOTH) { | ||
1891 | MACIO_BIC(0x506e0, 0x00400000); | ||
1892 | MACIO_BIC(0x506e0, 0x80000000); | ||
1893 | } | ||
1894 | |||
1895 | UN_OUT(UNI_N_CLOCK_CNTL, save_unin_clock_ctl); | ||
1896 | udelay(100); | ||
1897 | |||
1898 | return 0; | ||
1899 | } | ||
1900 | |||
1901 | static long | ||
1902 | core99_sleep_state(struct device_node *node, long param, long value) | ||
1903 | { | ||
1904 | /* Param == 1 means to enter the "fake sleep" mode that is | ||
1905 | * used for CPU speed switch | ||
1906 | */ | ||
1907 | if (param == 1) { | ||
1908 | if (value == 1) { | ||
1909 | UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_SLEEPING); | ||
1910 | UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_IDLE2); | ||
1911 | } else { | ||
1912 | UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_NORMAL); | ||
1913 | udelay(10); | ||
1914 | UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_RUNNING); | ||
1915 | udelay(10); | ||
1916 | } | ||
1917 | return 0; | ||
1918 | } | ||
1919 | if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0) | ||
1920 | return -EPERM; | ||
1921 | |||
1922 | if (value == 1) | ||
1923 | return core99_sleep(); | ||
1924 | else if (value == 0) | ||
1925 | return core99_wake_up(); | ||
1926 | return 0; | ||
1927 | } | ||
1928 | |||
1929 | #endif /* CONFIG_POWER4 */ | ||
1930 | |||
1931 | static long | ||
1932 | generic_dev_can_wake(struct device_node *node, long param, long value) | ||
1933 | { | ||
1934 | /* Todo: eventually check we are really dealing with on-board | ||
1935 | * video device ... | ||
1936 | */ | ||
1937 | |||
1938 | if (pmac_mb.board_flags & PMAC_MB_MAY_SLEEP) | ||
1939 | pmac_mb.board_flags |= PMAC_MB_CAN_SLEEP; | ||
1940 | return 0; | ||
1941 | } | ||
1942 | |||
1943 | static long generic_get_mb_info(struct device_node *node, long param, long value) | ||
1944 | { | ||
1945 | switch(param) { | ||
1946 | case PMAC_MB_INFO_MODEL: | ||
1947 | return pmac_mb.model_id; | ||
1948 | case PMAC_MB_INFO_FLAGS: | ||
1949 | return pmac_mb.board_flags; | ||
1950 | case PMAC_MB_INFO_NAME: | ||
1951 | /* hack hack hack... but should work */ | ||
1952 | *((const char **)value) = pmac_mb.model_name; | ||
1953 | return 0; | ||
1954 | } | ||
1955 | return -EINVAL; | ||
1956 | } | ||
1957 | |||
1958 | |||
1959 | /* | ||
1960 | * Table definitions | ||
1961 | */ | ||
1962 | |||
1963 | /* Used on any machine | ||
1964 | */ | ||
1965 | static struct feature_table_entry any_features[] = { | ||
1966 | { PMAC_FTR_GET_MB_INFO, generic_get_mb_info }, | ||
1967 | { PMAC_FTR_DEVICE_CAN_WAKE, generic_dev_can_wake }, | ||
1968 | { 0, NULL } | ||
1969 | }; | ||
1970 | |||
1971 | #ifndef CONFIG_POWER4 | ||
1972 | |||
1973 | /* OHare based motherboards. Currently, we only use these on the | ||
1974 | * 2400,3400 and 3500 series powerbooks. Some older desktops seem | ||
1975 | * to have issues with turning on/off those asic cells | ||
1976 | */ | ||
1977 | static struct feature_table_entry ohare_features[] = { | ||
1978 | { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable }, | ||
1979 | { PMAC_FTR_SWIM3_ENABLE, ohare_floppy_enable }, | ||
1980 | { PMAC_FTR_MESH_ENABLE, ohare_mesh_enable }, | ||
1981 | { PMAC_FTR_IDE_ENABLE, ohare_ide_enable}, | ||
1982 | { PMAC_FTR_IDE_RESET, ohare_ide_reset}, | ||
1983 | { PMAC_FTR_SLEEP_STATE, ohare_sleep_state }, | ||
1984 | { 0, NULL } | ||
1985 | }; | ||
1986 | |||
1987 | /* Heathrow desktop machines (Beige G3). | ||
1988 | * Separated as some features couldn't be properly tested | ||
1989 | * and the serial port control bits appear to confuse it. | ||
1990 | */ | ||
1991 | static struct feature_table_entry heathrow_desktop_features[] = { | ||
1992 | { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable }, | ||
1993 | { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable }, | ||
1994 | { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable }, | ||
1995 | { PMAC_FTR_IDE_RESET, heathrow_ide_reset }, | ||
1996 | { PMAC_FTR_BMAC_ENABLE, heathrow_bmac_enable }, | ||
1997 | { 0, NULL } | ||
1998 | }; | ||
1999 | |||
2000 | /* Heathrow based laptop, that is the Wallstreet and mainstreet | ||
2001 | * powerbooks. | ||
2002 | */ | ||
2003 | static struct feature_table_entry heathrow_laptop_features[] = { | ||
2004 | { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable }, | ||
2005 | { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable }, | ||
2006 | { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable }, | ||
2007 | { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable }, | ||
2008 | { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable }, | ||
2009 | { PMAC_FTR_IDE_RESET, heathrow_ide_reset }, | ||
2010 | { PMAC_FTR_BMAC_ENABLE, heathrow_bmac_enable }, | ||
2011 | { PMAC_FTR_SOUND_CHIP_ENABLE, heathrow_sound_enable }, | ||
2012 | { PMAC_FTR_SLEEP_STATE, heathrow_sleep_state }, | ||
2013 | { 0, NULL } | ||
2014 | }; | ||
2015 | |||
2016 | /* Paddington based machines | ||
2017 | * The lombard (101) powerbook, first iMac models, B&W G3 and Yikes G4. | ||
2018 | */ | ||
2019 | static struct feature_table_entry paddington_features[] = { | ||
2020 | { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable }, | ||
2021 | { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable }, | ||
2022 | { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable }, | ||
2023 | { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable }, | ||
2024 | { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable }, | ||
2025 | { PMAC_FTR_IDE_RESET, heathrow_ide_reset }, | ||
2026 | { PMAC_FTR_BMAC_ENABLE, heathrow_bmac_enable }, | ||
2027 | { PMAC_FTR_SOUND_CHIP_ENABLE, heathrow_sound_enable }, | ||
2028 | { PMAC_FTR_SLEEP_STATE, heathrow_sleep_state }, | ||
2029 | { 0, NULL } | ||
2030 | }; | ||
2031 | |||
2032 | /* Core99 & MacRISC 2 machines (all machines released since the | ||
2033 | * iBook (included), that is all AGP machines, except pangea | ||
2034 | * chipset. The pangea chipset is the "combo" UniNorth/KeyLargo | ||
2035 | * used on iBook2 & iMac "flow power". | ||
2036 | */ | ||
2037 | static struct feature_table_entry core99_features[] = { | ||
2038 | { PMAC_FTR_SCC_ENABLE, core99_scc_enable }, | ||
2039 | { PMAC_FTR_MODEM_ENABLE, core99_modem_enable }, | ||
2040 | { PMAC_FTR_IDE_ENABLE, core99_ide_enable }, | ||
2041 | { PMAC_FTR_IDE_RESET, core99_ide_reset }, | ||
2042 | { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable }, | ||
2043 | { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset }, | ||
2044 | { PMAC_FTR_SOUND_CHIP_ENABLE, core99_sound_chip_enable }, | ||
2045 | { PMAC_FTR_AIRPORT_ENABLE, core99_airport_enable }, | ||
2046 | { PMAC_FTR_USB_ENABLE, core99_usb_enable }, | ||
2047 | { PMAC_FTR_1394_ENABLE, core99_firewire_enable }, | ||
2048 | { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power }, | ||
2049 | { PMAC_FTR_SLEEP_STATE, core99_sleep_state }, | ||
2050 | #ifdef CONFIG_SMP | ||
2051 | { PMAC_FTR_RESET_CPU, core99_reset_cpu }, | ||
2052 | #endif /* CONFIG_SMP */ | ||
2053 | { PMAC_FTR_READ_GPIO, core99_read_gpio }, | ||
2054 | { PMAC_FTR_WRITE_GPIO, core99_write_gpio }, | ||
2055 | { 0, NULL } | ||
2056 | }; | ||
2057 | |||
2058 | /* RackMac | ||
2059 | */ | ||
2060 | static struct feature_table_entry rackmac_features[] = { | ||
2061 | { PMAC_FTR_SCC_ENABLE, core99_scc_enable }, | ||
2062 | { PMAC_FTR_IDE_ENABLE, core99_ide_enable }, | ||
2063 | { PMAC_FTR_IDE_RESET, core99_ide_reset }, | ||
2064 | { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable }, | ||
2065 | { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset }, | ||
2066 | { PMAC_FTR_USB_ENABLE, core99_usb_enable }, | ||
2067 | { PMAC_FTR_1394_ENABLE, core99_firewire_enable }, | ||
2068 | { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power }, | ||
2069 | { PMAC_FTR_SLEEP_STATE, core99_sleep_state }, | ||
2070 | #ifdef CONFIG_SMP | ||
2071 | { PMAC_FTR_RESET_CPU, core99_reset_cpu }, | ||
2072 | #endif /* CONFIG_SMP */ | ||
2073 | { PMAC_FTR_READ_GPIO, core99_read_gpio }, | ||
2074 | { PMAC_FTR_WRITE_GPIO, core99_write_gpio }, | ||
2075 | { 0, NULL } | ||
2076 | }; | ||
2077 | |||
2078 | /* Pangea features | ||
2079 | */ | ||
2080 | static struct feature_table_entry pangea_features[] = { | ||
2081 | { PMAC_FTR_SCC_ENABLE, core99_scc_enable }, | ||
2082 | { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable }, | ||
2083 | { PMAC_FTR_IDE_ENABLE, core99_ide_enable }, | ||
2084 | { PMAC_FTR_IDE_RESET, core99_ide_reset }, | ||
2085 | { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable }, | ||
2086 | { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset }, | ||
2087 | { PMAC_FTR_SOUND_CHIP_ENABLE, core99_sound_chip_enable }, | ||
2088 | { PMAC_FTR_AIRPORT_ENABLE, core99_airport_enable }, | ||
2089 | { PMAC_FTR_USB_ENABLE, core99_usb_enable }, | ||
2090 | { PMAC_FTR_1394_ENABLE, core99_firewire_enable }, | ||
2091 | { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power }, | ||
2092 | { PMAC_FTR_SLEEP_STATE, core99_sleep_state }, | ||
2093 | { PMAC_FTR_READ_GPIO, core99_read_gpio }, | ||
2094 | { PMAC_FTR_WRITE_GPIO, core99_write_gpio }, | ||
2095 | { 0, NULL } | ||
2096 | }; | ||
2097 | |||
2098 | /* Intrepid features | ||
2099 | */ | ||
2100 | static struct feature_table_entry intrepid_features[] = { | ||
2101 | { PMAC_FTR_SCC_ENABLE, core99_scc_enable }, | ||
2102 | { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable }, | ||
2103 | { PMAC_FTR_IDE_ENABLE, core99_ide_enable }, | ||
2104 | { PMAC_FTR_IDE_RESET, core99_ide_reset }, | ||
2105 | { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable }, | ||
2106 | { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset }, | ||
2107 | { PMAC_FTR_SOUND_CHIP_ENABLE, core99_sound_chip_enable }, | ||
2108 | { PMAC_FTR_AIRPORT_ENABLE, core99_airport_enable }, | ||
2109 | { PMAC_FTR_USB_ENABLE, core99_usb_enable }, | ||
2110 | { PMAC_FTR_1394_ENABLE, core99_firewire_enable }, | ||
2111 | { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power }, | ||
2112 | { PMAC_FTR_SLEEP_STATE, core99_sleep_state }, | ||
2113 | { PMAC_FTR_READ_GPIO, core99_read_gpio }, | ||
2114 | { PMAC_FTR_WRITE_GPIO, core99_write_gpio }, | ||
2115 | { PMAC_FTR_AACK_DELAY_ENABLE, intrepid_aack_delay_enable }, | ||
2116 | { 0, NULL } | ||
2117 | }; | ||
2118 | |||
2119 | #else /* CONFIG_POWER4 */ | ||
2120 | |||
2121 | /* G5 features | ||
2122 | */ | ||
2123 | static struct feature_table_entry g5_features[] = { | ||
2124 | { PMAC_FTR_GMAC_ENABLE, g5_gmac_enable }, | ||
2125 | { PMAC_FTR_1394_ENABLE, g5_fw_enable }, | ||
2126 | { PMAC_FTR_ENABLE_MPIC, g5_mpic_enable }, | ||
2127 | { PMAC_FTR_GMAC_PHY_RESET, g5_eth_phy_reset }, | ||
2128 | { PMAC_FTR_SOUND_CHIP_ENABLE, g5_i2s_enable }, | ||
2129 | #ifdef CONFIG_SMP | ||
2130 | { PMAC_FTR_RESET_CPU, g5_reset_cpu }, | ||
2131 | #endif /* CONFIG_SMP */ | ||
2132 | { PMAC_FTR_READ_GPIO, core99_read_gpio }, | ||
2133 | { PMAC_FTR_WRITE_GPIO, core99_write_gpio }, | ||
2134 | { 0, NULL } | ||
2135 | }; | ||
2136 | |||
2137 | #endif /* CONFIG_POWER4 */ | ||
2138 | |||
2139 | static struct pmac_mb_def pmac_mb_defs[] = { | ||
2140 | #ifndef CONFIG_POWER4 | ||
2141 | /* | ||
2142 | * Desktops | ||
2143 | */ | ||
2144 | |||
2145 | { "AAPL,8500", "PowerMac 8500/8600", | ||
2146 | PMAC_TYPE_PSURGE, NULL, | ||
2147 | 0 | ||
2148 | }, | ||
2149 | { "AAPL,9500", "PowerMac 9500/9600", | ||
2150 | PMAC_TYPE_PSURGE, NULL, | ||
2151 | 0 | ||
2152 | }, | ||
2153 | { "AAPL,7200", "PowerMac 7200", | ||
2154 | PMAC_TYPE_PSURGE, NULL, | ||
2155 | 0 | ||
2156 | }, | ||
2157 | { "AAPL,7300", "PowerMac 7200/7300", | ||
2158 | PMAC_TYPE_PSURGE, NULL, | ||
2159 | 0 | ||
2160 | }, | ||
2161 | { "AAPL,7500", "PowerMac 7500", | ||
2162 | PMAC_TYPE_PSURGE, NULL, | ||
2163 | 0 | ||
2164 | }, | ||
2165 | { "AAPL,ShinerESB", "Apple Network Server", | ||
2166 | PMAC_TYPE_ANS, NULL, | ||
2167 | 0 | ||
2168 | }, | ||
2169 | { "AAPL,e407", "Alchemy", | ||
2170 | PMAC_TYPE_ALCHEMY, NULL, | ||
2171 | 0 | ||
2172 | }, | ||
2173 | { "AAPL,e411", "Gazelle", | ||
2174 | PMAC_TYPE_GAZELLE, NULL, | ||
2175 | 0 | ||
2176 | }, | ||
2177 | { "AAPL,Gossamer", "PowerMac G3 (Gossamer)", | ||
2178 | PMAC_TYPE_GOSSAMER, heathrow_desktop_features, | ||
2179 | 0 | ||
2180 | }, | ||
2181 | { "AAPL,PowerMac G3", "PowerMac G3 (Silk)", | ||
2182 | PMAC_TYPE_SILK, heathrow_desktop_features, | ||
2183 | 0 | ||
2184 | }, | ||
2185 | { "PowerMac1,1", "Blue&White G3", | ||
2186 | PMAC_TYPE_YOSEMITE, paddington_features, | ||
2187 | 0 | ||
2188 | }, | ||
2189 | { "PowerMac1,2", "PowerMac G4 PCI Graphics", | ||
2190 | PMAC_TYPE_YIKES, paddington_features, | ||
2191 | 0 | ||
2192 | }, | ||
2193 | { "PowerMac2,1", "iMac FireWire", | ||
2194 | PMAC_TYPE_FW_IMAC, core99_features, | ||
2195 | PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99 | ||
2196 | }, | ||
2197 | { "PowerMac2,2", "iMac FireWire", | ||
2198 | PMAC_TYPE_FW_IMAC, core99_features, | ||
2199 | PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99 | ||
2200 | }, | ||
2201 | { "PowerMac3,1", "PowerMac G4 AGP Graphics", | ||
2202 | PMAC_TYPE_SAWTOOTH, core99_features, | ||
2203 | PMAC_MB_OLD_CORE99 | ||
2204 | }, | ||
2205 | { "PowerMac3,2", "PowerMac G4 AGP Graphics", | ||
2206 | PMAC_TYPE_SAWTOOTH, core99_features, | ||
2207 | PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99 | ||
2208 | }, | ||
2209 | { "PowerMac3,3", "PowerMac G4 AGP Graphics", | ||
2210 | PMAC_TYPE_SAWTOOTH, core99_features, | ||
2211 | PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99 | ||
2212 | }, | ||
2213 | { "PowerMac3,4", "PowerMac G4 Silver", | ||
2214 | PMAC_TYPE_QUICKSILVER, core99_features, | ||
2215 | PMAC_MB_MAY_SLEEP | ||
2216 | }, | ||
2217 | { "PowerMac3,5", "PowerMac G4 Silver", | ||
2218 | PMAC_TYPE_QUICKSILVER, core99_features, | ||
2219 | PMAC_MB_MAY_SLEEP | ||
2220 | }, | ||
2221 | { "PowerMac3,6", "PowerMac G4 Windtunnel", | ||
2222 | PMAC_TYPE_WINDTUNNEL, core99_features, | ||
2223 | PMAC_MB_MAY_SLEEP, | ||
2224 | }, | ||
2225 | { "PowerMac4,1", "iMac \"Flower Power\"", | ||
2226 | PMAC_TYPE_PANGEA_IMAC, pangea_features, | ||
2227 | PMAC_MB_MAY_SLEEP | ||
2228 | }, | ||
2229 | { "PowerMac4,2", "Flat panel iMac", | ||
2230 | PMAC_TYPE_FLAT_PANEL_IMAC, pangea_features, | ||
2231 | PMAC_MB_CAN_SLEEP | ||
2232 | }, | ||
2233 | { "PowerMac4,4", "eMac", | ||
2234 | PMAC_TYPE_EMAC, core99_features, | ||
2235 | PMAC_MB_MAY_SLEEP | ||
2236 | }, | ||
2237 | { "PowerMac5,1", "PowerMac G4 Cube", | ||
2238 | PMAC_TYPE_CUBE, core99_features, | ||
2239 | PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99 | ||
2240 | }, | ||
2241 | { "PowerMac6,1", "Flat panel iMac", | ||
2242 | PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, | ||
2243 | PMAC_MB_MAY_SLEEP, | ||
2244 | }, | ||
2245 | { "PowerMac6,3", "Flat panel iMac", | ||
2246 | PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, | ||
2247 | PMAC_MB_MAY_SLEEP, | ||
2248 | }, | ||
2249 | { "PowerMac6,4", "eMac", | ||
2250 | PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, | ||
2251 | PMAC_MB_MAY_SLEEP, | ||
2252 | }, | ||
2253 | { "PowerMac10,1", "Mac mini", | ||
2254 | PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, | ||
2255 | PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER, | ||
2256 | }, | ||
2257 | { "iMac,1", "iMac (first generation)", | ||
2258 | PMAC_TYPE_ORIG_IMAC, paddington_features, | ||
2259 | 0 | ||
2260 | }, | ||
2261 | |||
2262 | /* | ||
2263 | * Xserve's | ||
2264 | */ | ||
2265 | |||
2266 | { "RackMac1,1", "XServe", | ||
2267 | PMAC_TYPE_RACKMAC, rackmac_features, | ||
2268 | 0, | ||
2269 | }, | ||
2270 | { "RackMac1,2", "XServe rev. 2", | ||
2271 | PMAC_TYPE_RACKMAC, rackmac_features, | ||
2272 | 0, | ||
2273 | }, | ||
2274 | |||
2275 | /* | ||
2276 | * Laptops | ||
2277 | */ | ||
2278 | |||
2279 | { "AAPL,3400/2400", "PowerBook 3400", | ||
2280 | PMAC_TYPE_HOOPER, ohare_features, | ||
2281 | PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE | ||
2282 | }, | ||
2283 | { "AAPL,3500", "PowerBook 3500", | ||
2284 | PMAC_TYPE_KANGA, ohare_features, | ||
2285 | PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE | ||
2286 | }, | ||
2287 | { "AAPL,PowerBook1998", "PowerBook Wallstreet", | ||
2288 | PMAC_TYPE_WALLSTREET, heathrow_laptop_features, | ||
2289 | PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE | ||
2290 | }, | ||
2291 | { "PowerBook1,1", "PowerBook 101 (Lombard)", | ||
2292 | PMAC_TYPE_101_PBOOK, paddington_features, | ||
2293 | PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE | ||
2294 | }, | ||
2295 | { "PowerBook2,1", "iBook (first generation)", | ||
2296 | PMAC_TYPE_ORIG_IBOOK, core99_features, | ||
2297 | PMAC_MB_CAN_SLEEP | PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE | ||
2298 | }, | ||
2299 | { "PowerBook2,2", "iBook FireWire", | ||
2300 | PMAC_TYPE_FW_IBOOK, core99_features, | ||
2301 | PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | | ||
2302 | PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE | ||
2303 | }, | ||
2304 | { "PowerBook3,1", "PowerBook Pismo", | ||
2305 | PMAC_TYPE_PISMO, core99_features, | ||
2306 | PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | | ||
2307 | PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE | ||
2308 | }, | ||
2309 | { "PowerBook3,2", "PowerBook Titanium", | ||
2310 | PMAC_TYPE_TITANIUM, core99_features, | ||
2311 | PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE | ||
2312 | }, | ||
2313 | { "PowerBook3,3", "PowerBook Titanium II", | ||
2314 | PMAC_TYPE_TITANIUM2, core99_features, | ||
2315 | PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE | ||
2316 | }, | ||
2317 | { "PowerBook3,4", "PowerBook Titanium III", | ||
2318 | PMAC_TYPE_TITANIUM3, core99_features, | ||
2319 | PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE | ||
2320 | }, | ||
2321 | { "PowerBook3,5", "PowerBook Titanium IV", | ||
2322 | PMAC_TYPE_TITANIUM4, core99_features, | ||
2323 | PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE | ||
2324 | }, | ||
2325 | { "PowerBook4,1", "iBook 2", | ||
2326 | PMAC_TYPE_IBOOK2, pangea_features, | ||
2327 | PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE | ||
2328 | }, | ||
2329 | { "PowerBook4,2", "iBook 2", | ||
2330 | PMAC_TYPE_IBOOK2, pangea_features, | ||
2331 | PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE | ||
2332 | }, | ||
2333 | { "PowerBook4,3", "iBook 2 rev. 2", | ||
2334 | PMAC_TYPE_IBOOK2, pangea_features, | ||
2335 | PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE | ||
2336 | }, | ||
2337 | { "PowerBook5,1", "PowerBook G4 17\"", | ||
2338 | PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, | ||
2339 | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, | ||
2340 | }, | ||
2341 | { "PowerBook5,2", "PowerBook G4 15\"", | ||
2342 | PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, | ||
2343 | PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, | ||
2344 | }, | ||
2345 | { "PowerBook5,3", "PowerBook G4 17\"", | ||
2346 | PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, | ||
2347 | PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, | ||
2348 | }, | ||
2349 | { "PowerBook5,4", "PowerBook G4 15\"", | ||
2350 | PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, | ||
2351 | PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, | ||
2352 | }, | ||
2353 | { "PowerBook5,5", "PowerBook G4 17\"", | ||
2354 | PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, | ||
2355 | PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, | ||
2356 | }, | ||
2357 | { "PowerBook5,6", "PowerBook G4 15\"", | ||
2358 | PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, | ||
2359 | PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, | ||
2360 | }, | ||
2361 | { "PowerBook5,7", "PowerBook G4 17\"", | ||
2362 | PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, | ||
2363 | PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, | ||
2364 | }, | ||
2365 | { "PowerBook6,1", "PowerBook G4 12\"", | ||
2366 | PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, | ||
2367 | PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, | ||
2368 | }, | ||
2369 | { "PowerBook6,2", "PowerBook G4", | ||
2370 | PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, | ||
2371 | PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, | ||
2372 | }, | ||
2373 | { "PowerBook6,3", "iBook G4", | ||
2374 | PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, | ||
2375 | PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, | ||
2376 | }, | ||
2377 | { "PowerBook6,4", "PowerBook G4 12\"", | ||
2378 | PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, | ||
2379 | PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, | ||
2380 | }, | ||
2381 | { "PowerBook6,5", "iBook G4", | ||
2382 | PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, | ||
2383 | PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, | ||
2384 | }, | ||
2385 | { "PowerBook6,8", "PowerBook G4 12\"", | ||
2386 | PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, | ||
2387 | PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, | ||
2388 | }, | ||
2389 | #else /* CONFIG_POWER4 */ | ||
2390 | { "PowerMac7,2", "PowerMac G5", | ||
2391 | PMAC_TYPE_POWERMAC_G5, g5_features, | ||
2392 | 0, | ||
2393 | }, | ||
2394 | #ifdef CONFIG_PPC64 | ||
2395 | { "PowerMac7,3", "PowerMac G5", | ||
2396 | PMAC_TYPE_POWERMAC_G5, g5_features, | ||
2397 | 0, | ||
2398 | }, | ||
2399 | { "PowerMac8,1", "iMac G5", | ||
2400 | PMAC_TYPE_IMAC_G5, g5_features, | ||
2401 | 0, | ||
2402 | }, | ||
2403 | { "PowerMac9,1", "PowerMac G5", | ||
2404 | PMAC_TYPE_POWERMAC_G5_U3L, g5_features, | ||
2405 | 0, | ||
2406 | }, | ||
2407 | { "RackMac3,1", "XServe G5", | ||
2408 | PMAC_TYPE_XSERVE_G5, g5_features, | ||
2409 | 0, | ||
2410 | }, | ||
2411 | #endif /* CONFIG_PPC64 */ | ||
2412 | #endif /* CONFIG_POWER4 */ | ||
2413 | }; | ||
2414 | |||
2415 | /* | ||
2416 | * The toplevel feature_call callback | ||
2417 | */ | ||
2418 | long pmac_do_feature_call(unsigned int selector, ...) | ||
2419 | { | ||
2420 | struct device_node *node; | ||
2421 | long param, value; | ||
2422 | int i; | ||
2423 | feature_call func = NULL; | ||
2424 | va_list args; | ||
2425 | |||
2426 | if (pmac_mb.features) | ||
2427 | for (i=0; pmac_mb.features[i].function; i++) | ||
2428 | if (pmac_mb.features[i].selector == selector) { | ||
2429 | func = pmac_mb.features[i].function; | ||
2430 | break; | ||
2431 | } | ||
2432 | if (!func) | ||
2433 | for (i=0; any_features[i].function; i++) | ||
2434 | if (any_features[i].selector == selector) { | ||
2435 | func = any_features[i].function; | ||
2436 | break; | ||
2437 | } | ||
2438 | if (!func) | ||
2439 | return -ENODEV; | ||
2440 | |||
2441 | va_start(args, selector); | ||
2442 | node = (struct device_node*)va_arg(args, void*); | ||
2443 | param = va_arg(args, long); | ||
2444 | value = va_arg(args, long); | ||
2445 | va_end(args); | ||
2446 | |||
2447 | return func(node, param, value); | ||
2448 | } | ||
2449 | |||
2450 | static int __init probe_motherboard(void) | ||
2451 | { | ||
2452 | int i; | ||
2453 | struct macio_chip *macio = &macio_chips[0]; | ||
2454 | const char *model = NULL; | ||
2455 | struct device_node *dt; | ||
2456 | |||
2457 | /* Lookup known motherboard type in device-tree. First try an | ||
2458 | * exact match on the "model" property, then try a "compatible" | ||
2459 | * match is none is found. | ||
2460 | */ | ||
2461 | dt = find_devices("device-tree"); | ||
2462 | if (dt != NULL) | ||
2463 | model = (const char *) get_property(dt, "model", NULL); | ||
2464 | for(i=0; model && i<(sizeof(pmac_mb_defs)/sizeof(struct pmac_mb_def)); i++) { | ||
2465 | if (strcmp(model, pmac_mb_defs[i].model_string) == 0) { | ||
2466 | pmac_mb = pmac_mb_defs[i]; | ||
2467 | goto found; | ||
2468 | } | ||
2469 | } | ||
2470 | for(i=0; i<(sizeof(pmac_mb_defs)/sizeof(struct pmac_mb_def)); i++) { | ||
2471 | if (machine_is_compatible(pmac_mb_defs[i].model_string)) { | ||
2472 | pmac_mb = pmac_mb_defs[i]; | ||
2473 | goto found; | ||
2474 | } | ||
2475 | } | ||
2476 | |||
2477 | /* Fallback to selection depending on mac-io chip type */ | ||
2478 | switch(macio->type) { | ||
2479 | #ifndef CONFIG_POWER4 | ||
2480 | case macio_grand_central: | ||
2481 | pmac_mb.model_id = PMAC_TYPE_PSURGE; | ||
2482 | pmac_mb.model_name = "Unknown PowerSurge"; | ||
2483 | break; | ||
2484 | case macio_ohare: | ||
2485 | pmac_mb.model_id = PMAC_TYPE_UNKNOWN_OHARE; | ||
2486 | pmac_mb.model_name = "Unknown OHare-based"; | ||
2487 | break; | ||
2488 | case macio_heathrow: | ||
2489 | pmac_mb.model_id = PMAC_TYPE_UNKNOWN_HEATHROW; | ||
2490 | pmac_mb.model_name = "Unknown Heathrow-based"; | ||
2491 | pmac_mb.features = heathrow_desktop_features; | ||
2492 | break; | ||
2493 | case macio_paddington: | ||
2494 | pmac_mb.model_id = PMAC_TYPE_UNKNOWN_PADDINGTON; | ||
2495 | pmac_mb.model_name = "Unknown Paddington-based"; | ||
2496 | pmac_mb.features = paddington_features; | ||
2497 | break; | ||
2498 | case macio_keylargo: | ||
2499 | pmac_mb.model_id = PMAC_TYPE_UNKNOWN_CORE99; | ||
2500 | pmac_mb.model_name = "Unknown Keylargo-based"; | ||
2501 | pmac_mb.features = core99_features; | ||
2502 | break; | ||
2503 | case macio_pangea: | ||
2504 | pmac_mb.model_id = PMAC_TYPE_UNKNOWN_PANGEA; | ||
2505 | pmac_mb.model_name = "Unknown Pangea-based"; | ||
2506 | pmac_mb.features = pangea_features; | ||
2507 | break; | ||
2508 | case macio_intrepid: | ||
2509 | pmac_mb.model_id = PMAC_TYPE_UNKNOWN_INTREPID; | ||
2510 | pmac_mb.model_name = "Unknown Intrepid-based"; | ||
2511 | pmac_mb.features = intrepid_features; | ||
2512 | break; | ||
2513 | #else /* CONFIG_POWER4 */ | ||
2514 | case macio_keylargo2: | ||
2515 | pmac_mb.model_id = PMAC_TYPE_UNKNOWN_K2; | ||
2516 | pmac_mb.model_name = "Unknown K2-based"; | ||
2517 | pmac_mb.features = g5_features; | ||
2518 | break; | ||
2519 | #endif /* CONFIG_POWER4 */ | ||
2520 | default: | ||
2521 | return -ENODEV; | ||
2522 | } | ||
2523 | found: | ||
2524 | #ifndef CONFIG_POWER4 | ||
2525 | /* Fixup Hooper vs. Comet */ | ||
2526 | if (pmac_mb.model_id == PMAC_TYPE_HOOPER) { | ||
2527 | u32 __iomem * mach_id_ptr = ioremap(0xf3000034, 4); | ||
2528 | if (!mach_id_ptr) | ||
2529 | return -ENODEV; | ||
2530 | /* Here, I used to disable the media-bay on comet. It | ||
2531 | * appears this is wrong, the floppy connector is actually | ||
2532 | * a kind of media-bay and works with the current driver. | ||
2533 | */ | ||
2534 | if (__raw_readl(mach_id_ptr) & 0x20000000UL) | ||
2535 | pmac_mb.model_id = PMAC_TYPE_COMET; | ||
2536 | iounmap(mach_id_ptr); | ||
2537 | } | ||
2538 | #endif /* CONFIG_POWER4 */ | ||
2539 | |||
2540 | #ifdef CONFIG_6xx | ||
2541 | /* Set default value of powersave_nap on machines that support it. | ||
2542 | * It appears that uninorth rev 3 has a problem with it, we don't | ||
2543 | * enable it on those. In theory, the flush-on-lock property is | ||
2544 | * supposed to be set when not supported, but I'm not very confident | ||
2545 | * that all Apple OF revs did it properly, I do it the paranoid way. | ||
2546 | */ | ||
2547 | while (uninorth_base && uninorth_rev > 3) { | ||
2548 | struct device_node *np = find_path_device("/cpus"); | ||
2549 | if (!np || !np->child) { | ||
2550 | printk(KERN_WARNING "Can't find CPU(s) in device tree !\n"); | ||
2551 | break; | ||
2552 | } | ||
2553 | np = np->child; | ||
2554 | /* Nap mode not supported on SMP */ | ||
2555 | if (np->sibling) | ||
2556 | break; | ||
2557 | /* Nap mode not supported if flush-on-lock property is present */ | ||
2558 | if (get_property(np, "flush-on-lock", NULL)) | ||
2559 | break; | ||
2560 | powersave_nap = 1; | ||
2561 | printk(KERN_INFO "Processor NAP mode on idle enabled.\n"); | ||
2562 | break; | ||
2563 | } | ||
2564 | |||
2565 | /* On CPUs that support it (750FX), lowspeed by default during | ||
2566 | * NAP mode | ||
2567 | */ | ||
2568 | powersave_lowspeed = 1; | ||
2569 | #endif /* CONFIG_6xx */ | ||
2570 | #ifdef CONFIG_POWER4 | ||
2571 | powersave_nap = 1; | ||
2572 | #endif | ||
2573 | /* Check for "mobile" machine */ | ||
2574 | if (model && (strncmp(model, "PowerBook", 9) == 0 | ||
2575 | || strncmp(model, "iBook", 5) == 0)) | ||
2576 | pmac_mb.board_flags |= PMAC_MB_MOBILE; | ||
2577 | |||
2578 | |||
2579 | printk(KERN_INFO "PowerMac motherboard: %s\n", pmac_mb.model_name); | ||
2580 | return 0; | ||
2581 | } | ||
2582 | |||
2583 | /* Initialize the Core99 UniNorth host bridge and memory controller | ||
2584 | */ | ||
2585 | static void __init probe_uninorth(void) | ||
2586 | { | ||
2587 | unsigned long actrl; | ||
2588 | |||
2589 | /* Locate core99 Uni-N */ | ||
2590 | uninorth_node = of_find_node_by_name(NULL, "uni-n"); | ||
2591 | /* Locate G5 u3 */ | ||
2592 | if (uninorth_node == NULL) { | ||
2593 | uninorth_node = of_find_node_by_name(NULL, "u3"); | ||
2594 | uninorth_u3 = 1; | ||
2595 | } | ||
2596 | if (uninorth_node && uninorth_node->n_addrs > 0) { | ||
2597 | unsigned long address = uninorth_node->addrs[0].address; | ||
2598 | uninorth_base = ioremap(address, 0x40000); | ||
2599 | uninorth_rev = in_be32(UN_REG(UNI_N_VERSION)); | ||
2600 | if (uninorth_u3) | ||
2601 | u3_ht = ioremap(address + U3_HT_CONFIG_BASE, 0x1000); | ||
2602 | } else | ||
2603 | uninorth_node = NULL; | ||
2604 | |||
2605 | if (!uninorth_node) | ||
2606 | return; | ||
2607 | |||
2608 | printk(KERN_INFO "Found %s memory controller & host bridge, revision: %d\n", | ||
2609 | uninorth_u3 ? "U3" : "UniNorth", uninorth_rev); | ||
2610 | printk(KERN_INFO "Mapped at 0x%08lx\n", (unsigned long)uninorth_base); | ||
2611 | |||
2612 | /* Set the arbitrer QAck delay according to what Apple does | ||
2613 | */ | ||
2614 | if (uninorth_rev < 0x11) { | ||
2615 | actrl = UN_IN(UNI_N_ARB_CTRL) & ~UNI_N_ARB_CTRL_QACK_DELAY_MASK; | ||
2616 | actrl |= ((uninorth_rev < 3) ? UNI_N_ARB_CTRL_QACK_DELAY105 : | ||
2617 | UNI_N_ARB_CTRL_QACK_DELAY) << UNI_N_ARB_CTRL_QACK_DELAY_SHIFT; | ||
2618 | UN_OUT(UNI_N_ARB_CTRL, actrl); | ||
2619 | } | ||
2620 | |||
2621 | /* Some more magic as done by them in recent MacOS X on UniNorth | ||
2622 | * revs 1.5 to 2.O and Pangea. Seem to toggle the UniN Maxbus/PCI | ||
2623 | * memory timeout | ||
2624 | */ | ||
2625 | if ((uninorth_rev >= 0x11 && uninorth_rev <= 0x24) || uninorth_rev == 0xc0) | ||
2626 | UN_OUT(0x2160, UN_IN(0x2160) & 0x00ffffff); | ||
2627 | } | ||
2628 | |||
2629 | static void __init probe_one_macio(const char *name, const char *compat, int type) | ||
2630 | { | ||
2631 | struct device_node* node; | ||
2632 | int i; | ||
2633 | volatile u32 __iomem * base; | ||
2634 | u32* revp; | ||
2635 | |||
2636 | node = find_devices(name); | ||
2637 | if (!node || !node->n_addrs) | ||
2638 | return; | ||
2639 | if (compat) | ||
2640 | do { | ||
2641 | if (device_is_compatible(node, compat)) | ||
2642 | break; | ||
2643 | node = node->next; | ||
2644 | } while (node); | ||
2645 | if (!node) | ||
2646 | return; | ||
2647 | for(i=0; i<MAX_MACIO_CHIPS; i++) { | ||
2648 | if (!macio_chips[i].of_node) | ||
2649 | break; | ||
2650 | if (macio_chips[i].of_node == node) | ||
2651 | return; | ||
2652 | } | ||
2653 | if (i >= MAX_MACIO_CHIPS) { | ||
2654 | printk(KERN_ERR "pmac_feature: Please increase MAX_MACIO_CHIPS !\n"); | ||
2655 | printk(KERN_ERR "pmac_feature: %s skipped\n", node->full_name); | ||
2656 | return; | ||
2657 | } | ||
2658 | base = ioremap(node->addrs[0].address, node->addrs[0].size); | ||
2659 | if (!base) { | ||
2660 | printk(KERN_ERR "pmac_feature: Can't map mac-io chip !\n"); | ||
2661 | return; | ||
2662 | } | ||
2663 | if (type == macio_keylargo) { | ||
2664 | u32 *did = (u32 *)get_property(node, "device-id", NULL); | ||
2665 | if (*did == 0x00000025) | ||
2666 | type = macio_pangea; | ||
2667 | if (*did == 0x0000003e) | ||
2668 | type = macio_intrepid; | ||
2669 | } | ||
2670 | macio_chips[i].of_node = node; | ||
2671 | macio_chips[i].type = type; | ||
2672 | macio_chips[i].base = base; | ||
2673 | macio_chips[i].flags = MACIO_FLAG_SCCB_ON | MACIO_FLAG_SCCB_ON; | ||
2674 | macio_chips[i].name = macio_names[type]; | ||
2675 | revp = (u32 *)get_property(node, "revision-id", NULL); | ||
2676 | if (revp) | ||
2677 | macio_chips[i].rev = *revp; | ||
2678 | printk(KERN_INFO "Found a %s mac-io controller, rev: %d, mapped at 0x%p\n", | ||
2679 | macio_names[type], macio_chips[i].rev, macio_chips[i].base); | ||
2680 | } | ||
2681 | |||
2682 | static int __init | ||
2683 | probe_macios(void) | ||
2684 | { | ||
2685 | /* Warning, ordering is important */ | ||
2686 | probe_one_macio("gc", NULL, macio_grand_central); | ||
2687 | probe_one_macio("ohare", NULL, macio_ohare); | ||
2688 | probe_one_macio("pci106b,7", NULL, macio_ohareII); | ||
2689 | probe_one_macio("mac-io", "keylargo", macio_keylargo); | ||
2690 | probe_one_macio("mac-io", "paddington", macio_paddington); | ||
2691 | probe_one_macio("mac-io", "gatwick", macio_gatwick); | ||
2692 | probe_one_macio("mac-io", "heathrow", macio_heathrow); | ||
2693 | probe_one_macio("mac-io", "K2-Keylargo", macio_keylargo2); | ||
2694 | |||
2695 | /* Make sure the "main" macio chip appear first */ | ||
2696 | if (macio_chips[0].type == macio_gatwick | ||
2697 | && macio_chips[1].type == macio_heathrow) { | ||
2698 | struct macio_chip temp = macio_chips[0]; | ||
2699 | macio_chips[0] = macio_chips[1]; | ||
2700 | macio_chips[1] = temp; | ||
2701 | } | ||
2702 | if (macio_chips[0].type == macio_ohareII | ||
2703 | && macio_chips[1].type == macio_ohare) { | ||
2704 | struct macio_chip temp = macio_chips[0]; | ||
2705 | macio_chips[0] = macio_chips[1]; | ||
2706 | macio_chips[1] = temp; | ||
2707 | } | ||
2708 | macio_chips[0].lbus.index = 0; | ||
2709 | macio_chips[1].lbus.index = 1; | ||
2710 | |||
2711 | return (macio_chips[0].of_node == NULL) ? -ENODEV : 0; | ||
2712 | } | ||
2713 | |||
2714 | static void __init | ||
2715 | initial_serial_shutdown(struct device_node *np) | ||
2716 | { | ||
2717 | int len; | ||
2718 | struct slot_names_prop { | ||
2719 | int count; | ||
2720 | char name[1]; | ||
2721 | } *slots; | ||
2722 | char *conn; | ||
2723 | int port_type = PMAC_SCC_ASYNC; | ||
2724 | int modem = 0; | ||
2725 | |||
2726 | slots = (struct slot_names_prop *)get_property(np, "slot-names", &len); | ||
2727 | conn = get_property(np, "AAPL,connector", &len); | ||
2728 | if (conn && (strcmp(conn, "infrared") == 0)) | ||
2729 | port_type = PMAC_SCC_IRDA; | ||
2730 | else if (device_is_compatible(np, "cobalt")) | ||
2731 | modem = 1; | ||
2732 | else if (slots && slots->count > 0) { | ||
2733 | if (strcmp(slots->name, "IrDA") == 0) | ||
2734 | port_type = PMAC_SCC_IRDA; | ||
2735 | else if (strcmp(slots->name, "Modem") == 0) | ||
2736 | modem = 1; | ||
2737 | } | ||
2738 | if (modem) | ||
2739 | pmac_call_feature(PMAC_FTR_MODEM_ENABLE, np, 0, 0); | ||
2740 | pmac_call_feature(PMAC_FTR_SCC_ENABLE, np, port_type, 0); | ||
2741 | } | ||
2742 | |||
2743 | static void __init | ||
2744 | set_initial_features(void) | ||
2745 | { | ||
2746 | struct device_node *np; | ||
2747 | |||
2748 | /* That hack appears to be necessary for some StarMax motherboards | ||
2749 | * but I'm not too sure it was audited for side-effects on other | ||
2750 | * ohare based machines... | ||
2751 | * Since I still have difficulties figuring the right way to | ||
2752 | * differenciate them all and since that hack was there for a long | ||
2753 | * time, I'll keep it around | ||
2754 | */ | ||
2755 | if (macio_chips[0].type == macio_ohare && !find_devices("via-pmu")) { | ||
2756 | struct macio_chip *macio = &macio_chips[0]; | ||
2757 | MACIO_OUT32(OHARE_FCR, STARMAX_FEATURES); | ||
2758 | } else if (macio_chips[0].type == macio_ohare) { | ||
2759 | struct macio_chip *macio = &macio_chips[0]; | ||
2760 | MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE); | ||
2761 | } else if (macio_chips[1].type == macio_ohare) { | ||
2762 | struct macio_chip *macio = &macio_chips[1]; | ||
2763 | MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE); | ||
2764 | } | ||
2765 | |||
2766 | #ifdef CONFIG_POWER4 | ||
2767 | if (macio_chips[0].type == macio_keylargo2) { | ||
2768 | #ifndef CONFIG_SMP | ||
2769 | /* On SMP machines running UP, we have the second CPU eating | ||
2770 | * bus cycles. We need to take it off the bus. This is done | ||
2771 | * from pmac_smp for SMP kernels running on one CPU | ||
2772 | */ | ||
2773 | np = of_find_node_by_type(NULL, "cpu"); | ||
2774 | if (np != NULL) | ||
2775 | np = of_find_node_by_type(np, "cpu"); | ||
2776 | if (np != NULL) { | ||
2777 | g5_phy_disable_cpu1(); | ||
2778 | of_node_put(np); | ||
2779 | } | ||
2780 | #endif /* CONFIG_SMP */ | ||
2781 | /* Enable GMAC for now for PCI probing. It will be disabled | ||
2782 | * later on after PCI probe | ||
2783 | */ | ||
2784 | np = of_find_node_by_name(NULL, "ethernet"); | ||
2785 | while(np) { | ||
2786 | if (device_is_compatible(np, "K2-GMAC")) | ||
2787 | g5_gmac_enable(np, 0, 1); | ||
2788 | np = of_find_node_by_name(np, "ethernet"); | ||
2789 | } | ||
2790 | |||
2791 | /* Enable FW before PCI probe. Will be disabled later on | ||
2792 | * Note: We should have a batter way to check that we are | ||
2793 | * dealing with uninorth internal cell and not a PCI cell | ||
2794 | * on the external PCI. The code below works though. | ||
2795 | */ | ||
2796 | np = of_find_node_by_name(NULL, "firewire"); | ||
2797 | while(np) { | ||
2798 | if (device_is_compatible(np, "pci106b,5811")) { | ||
2799 | macio_chips[0].flags |= MACIO_FLAG_FW_SUPPORTED; | ||
2800 | g5_fw_enable(np, 0, 1); | ||
2801 | } | ||
2802 | np = of_find_node_by_name(np, "firewire"); | ||
2803 | } | ||
2804 | } | ||
2805 | #else /* CONFIG_POWER4 */ | ||
2806 | |||
2807 | if (macio_chips[0].type == macio_keylargo || | ||
2808 | macio_chips[0].type == macio_pangea || | ||
2809 | macio_chips[0].type == macio_intrepid) { | ||
2810 | /* Enable GMAC for now for PCI probing. It will be disabled | ||
2811 | * later on after PCI probe | ||
2812 | */ | ||
2813 | np = of_find_node_by_name(NULL, "ethernet"); | ||
2814 | while(np) { | ||
2815 | if (np->parent | ||
2816 | && device_is_compatible(np->parent, "uni-north") | ||
2817 | && device_is_compatible(np, "gmac")) | ||
2818 | core99_gmac_enable(np, 0, 1); | ||
2819 | np = of_find_node_by_name(np, "ethernet"); | ||
2820 | } | ||
2821 | |||
2822 | /* Enable FW before PCI probe. Will be disabled later on | ||
2823 | * Note: We should have a batter way to check that we are | ||
2824 | * dealing with uninorth internal cell and not a PCI cell | ||
2825 | * on the external PCI. The code below works though. | ||
2826 | */ | ||
2827 | np = of_find_node_by_name(NULL, "firewire"); | ||
2828 | while(np) { | ||
2829 | if (np->parent | ||
2830 | && device_is_compatible(np->parent, "uni-north") | ||
2831 | && (device_is_compatible(np, "pci106b,18") || | ||
2832 | device_is_compatible(np, "pci106b,30") || | ||
2833 | device_is_compatible(np, "pci11c1,5811"))) { | ||
2834 | macio_chips[0].flags |= MACIO_FLAG_FW_SUPPORTED; | ||
2835 | core99_firewire_enable(np, 0, 1); | ||
2836 | } | ||
2837 | np = of_find_node_by_name(np, "firewire"); | ||
2838 | } | ||
2839 | |||
2840 | /* Enable ATA-100 before PCI probe. */ | ||
2841 | np = of_find_node_by_name(NULL, "ata-6"); | ||
2842 | while(np) { | ||
2843 | if (np->parent | ||
2844 | && device_is_compatible(np->parent, "uni-north") | ||
2845 | && device_is_compatible(np, "kauai-ata")) { | ||
2846 | core99_ata100_enable(np, 1); | ||
2847 | } | ||
2848 | np = of_find_node_by_name(np, "ata-6"); | ||
2849 | } | ||
2850 | |||
2851 | /* Switch airport off */ | ||
2852 | np = find_devices("radio"); | ||
2853 | while(np) { | ||
2854 | if (np && np->parent == macio_chips[0].of_node) { | ||
2855 | macio_chips[0].flags |= MACIO_FLAG_AIRPORT_ON; | ||
2856 | core99_airport_enable(np, 0, 0); | ||
2857 | } | ||
2858 | np = np->next; | ||
2859 | } | ||
2860 | } | ||
2861 | |||
2862 | /* On all machines that support sound PM, switch sound off */ | ||
2863 | if (macio_chips[0].of_node) | ||
2864 | pmac_do_feature_call(PMAC_FTR_SOUND_CHIP_ENABLE, | ||
2865 | macio_chips[0].of_node, 0, 0); | ||
2866 | |||
2867 | /* While on some desktop G3s, we turn it back on */ | ||
2868 | if (macio_chips[0].of_node && macio_chips[0].type == macio_heathrow | ||
2869 | && (pmac_mb.model_id == PMAC_TYPE_GOSSAMER || | ||
2870 | pmac_mb.model_id == PMAC_TYPE_SILK)) { | ||
2871 | struct macio_chip *macio = &macio_chips[0]; | ||
2872 | MACIO_BIS(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE); | ||
2873 | MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N); | ||
2874 | } | ||
2875 | |||
2876 | /* Some machine models need the clock chip to be properly setup for | ||
2877 | * clock spreading now. This should be a platform function but we | ||
2878 | * don't do these at the moment | ||
2879 | */ | ||
2880 | pmac_tweak_clock_spreading(1); | ||
2881 | |||
2882 | #endif /* CONFIG_POWER4 */ | ||
2883 | |||
2884 | /* On all machines, switch modem & serial ports off */ | ||
2885 | np = find_devices("ch-a"); | ||
2886 | while(np) { | ||
2887 | initial_serial_shutdown(np); | ||
2888 | np = np->next; | ||
2889 | } | ||
2890 | np = find_devices("ch-b"); | ||
2891 | while(np) { | ||
2892 | initial_serial_shutdown(np); | ||
2893 | np = np->next; | ||
2894 | } | ||
2895 | } | ||
2896 | |||
2897 | void __init | ||
2898 | pmac_feature_init(void) | ||
2899 | { | ||
2900 | /* Detect the UniNorth memory controller */ | ||
2901 | probe_uninorth(); | ||
2902 | |||
2903 | /* Probe mac-io controllers */ | ||
2904 | if (probe_macios()) { | ||
2905 | printk(KERN_WARNING "No mac-io chip found\n"); | ||
2906 | return; | ||
2907 | } | ||
2908 | |||
2909 | /* Setup low-level i2c stuffs */ | ||
2910 | pmac_init_low_i2c(); | ||
2911 | |||
2912 | /* Probe machine type */ | ||
2913 | if (probe_motherboard()) | ||
2914 | printk(KERN_WARNING "Unknown PowerMac !\n"); | ||
2915 | |||
2916 | /* Set some initial features (turn off some chips that will | ||
2917 | * be later turned on) | ||
2918 | */ | ||
2919 | set_initial_features(); | ||
2920 | } | ||
2921 | |||
2922 | int __init pmac_feature_late_init(void) | ||
2923 | { | ||
2924 | #if 0 | ||
2925 | struct device_node *np; | ||
2926 | |||
2927 | /* Request some resources late */ | ||
2928 | if (uninorth_node) | ||
2929 | request_OF_resource(uninorth_node, 0, NULL); | ||
2930 | np = find_devices("hammerhead"); | ||
2931 | if (np) | ||
2932 | request_OF_resource(np, 0, NULL); | ||
2933 | np = find_devices("interrupt-controller"); | ||
2934 | if (np) | ||
2935 | request_OF_resource(np, 0, NULL); | ||
2936 | #endif | ||
2937 | return 0; | ||
2938 | } | ||
2939 | |||
2940 | device_initcall(pmac_feature_late_init); | ||
2941 | |||
2942 | #if 0 | ||
2943 | static void dump_HT_speeds(char *name, u32 cfg, u32 frq) | ||
2944 | { | ||
2945 | int freqs[16] = { 200,300,400,500,600,800,1000,0,0,0,0,0,0,0,0,0 }; | ||
2946 | int bits[8] = { 8,16,0,32,2,4,0,0 }; | ||
2947 | int freq = (frq >> 8) & 0xf; | ||
2948 | |||
2949 | if (freqs[freq] == 0) | ||
2950 | printk("%s: Unknown HT link frequency %x\n", name, freq); | ||
2951 | else | ||
2952 | printk("%s: %d MHz on main link, (%d in / %d out) bits width\n", | ||
2953 | name, freqs[freq], | ||
2954 | bits[(cfg >> 28) & 0x7], bits[(cfg >> 24) & 0x7]); | ||
2955 | } | ||
2956 | |||
2957 | void __init pmac_check_ht_link(void) | ||
2958 | { | ||
2959 | #if 0 /* Disabled for now */ | ||
2960 | u32 ufreq, freq, ucfg, cfg; | ||
2961 | struct device_node *pcix_node; | ||
2962 | u8 px_bus, px_devfn; | ||
2963 | struct pci_controller *px_hose; | ||
2964 | |||
2965 | (void)in_be32(u3_ht + U3_HT_LINK_COMMAND); | ||
2966 | ucfg = cfg = in_be32(u3_ht + U3_HT_LINK_CONFIG); | ||
2967 | ufreq = freq = in_be32(u3_ht + U3_HT_LINK_FREQ); | ||
2968 | dump_HT_speeds("U3 HyperTransport", cfg, freq); | ||
2969 | |||
2970 | pcix_node = of_find_compatible_node(NULL, "pci", "pci-x"); | ||
2971 | if (pcix_node == NULL) { | ||
2972 | printk("No PCI-X bridge found\n"); | ||
2973 | return; | ||
2974 | } | ||
2975 | if (pci_device_from_OF_node(pcix_node, &px_bus, &px_devfn) != 0) { | ||
2976 | printk("PCI-X bridge found but not matched to pci\n"); | ||
2977 | return; | ||
2978 | } | ||
2979 | px_hose = pci_find_hose_for_OF_device(pcix_node); | ||
2980 | if (px_hose == NULL) { | ||
2981 | printk("PCI-X bridge found but not matched to host\n"); | ||
2982 | return; | ||
2983 | } | ||
2984 | early_read_config_dword(px_hose, px_bus, px_devfn, 0xc4, &cfg); | ||
2985 | early_read_config_dword(px_hose, px_bus, px_devfn, 0xcc, &freq); | ||
2986 | dump_HT_speeds("PCI-X HT Uplink", cfg, freq); | ||
2987 | early_read_config_dword(px_hose, px_bus, px_devfn, 0xc8, &cfg); | ||
2988 | early_read_config_dword(px_hose, px_bus, px_devfn, 0xd0, &freq); | ||
2989 | dump_HT_speeds("PCI-X HT Downlink", cfg, freq); | ||
2990 | #endif | ||
2991 | } | ||
2992 | |||
2993 | #endif /* CONFIG_POWER4 */ | ||
2994 | |||
2995 | /* | ||
2996 | * Early video resume hook | ||
2997 | */ | ||
2998 | |||
2999 | static void (*pmac_early_vresume_proc)(void *data); | ||
3000 | static void *pmac_early_vresume_data; | ||
3001 | |||
3002 | void pmac_set_early_video_resume(void (*proc)(void *data), void *data) | ||
3003 | { | ||
3004 | if (_machine != _MACH_Pmac) | ||
3005 | return; | ||
3006 | preempt_disable(); | ||
3007 | pmac_early_vresume_proc = proc; | ||
3008 | pmac_early_vresume_data = data; | ||
3009 | preempt_enable(); | ||
3010 | } | ||
3011 | EXPORT_SYMBOL(pmac_set_early_video_resume); | ||
3012 | |||
3013 | void pmac_call_early_video_resume(void) | ||
3014 | { | ||
3015 | if (pmac_early_vresume_proc) | ||
3016 | pmac_early_vresume_proc(pmac_early_vresume_data); | ||
3017 | } | ||
3018 | |||
3019 | /* | ||
3020 | * AGP related suspend/resume code | ||
3021 | */ | ||
3022 | |||
3023 | static struct pci_dev *pmac_agp_bridge; | ||
3024 | static int (*pmac_agp_suspend)(struct pci_dev *bridge); | ||
3025 | static int (*pmac_agp_resume)(struct pci_dev *bridge); | ||
3026 | |||
3027 | void pmac_register_agp_pm(struct pci_dev *bridge, | ||
3028 | int (*suspend)(struct pci_dev *bridge), | ||
3029 | int (*resume)(struct pci_dev *bridge)) | ||
3030 | { | ||
3031 | if (suspend || resume) { | ||
3032 | pmac_agp_bridge = bridge; | ||
3033 | pmac_agp_suspend = suspend; | ||
3034 | pmac_agp_resume = resume; | ||
3035 | return; | ||
3036 | } | ||
3037 | if (bridge != pmac_agp_bridge) | ||
3038 | return; | ||
3039 | pmac_agp_suspend = pmac_agp_resume = NULL; | ||
3040 | return; | ||
3041 | } | ||
3042 | EXPORT_SYMBOL(pmac_register_agp_pm); | ||
3043 | |||
3044 | void pmac_suspend_agp_for_card(struct pci_dev *dev) | ||
3045 | { | ||
3046 | if (pmac_agp_bridge == NULL || pmac_agp_suspend == NULL) | ||
3047 | return; | ||
3048 | if (pmac_agp_bridge->bus != dev->bus) | ||
3049 | return; | ||
3050 | pmac_agp_suspend(pmac_agp_bridge); | ||
3051 | } | ||
3052 | EXPORT_SYMBOL(pmac_suspend_agp_for_card); | ||
3053 | |||
3054 | void pmac_resume_agp_for_card(struct pci_dev *dev) | ||
3055 | { | ||
3056 | if (pmac_agp_bridge == NULL || pmac_agp_resume == NULL) | ||
3057 | return; | ||
3058 | if (pmac_agp_bridge->bus != dev->bus) | ||
3059 | return; | ||
3060 | pmac_agp_resume(pmac_agp_bridge); | ||
3061 | } | ||
3062 | EXPORT_SYMBOL(pmac_resume_agp_for_card); | ||
diff --git a/arch/powerpc/platforms/powermac/pmac_low_i2c.c b/arch/powerpc/platforms/powermac/pmac_low_i2c.c new file mode 100644 index 000000000000..f3f39e8e337a --- /dev/null +++ b/arch/powerpc/platforms/powermac/pmac_low_i2c.c | |||
@@ -0,0 +1,523 @@ | |||
1 | /* | ||
2 | * arch/ppc/platforms/pmac_low_i2c.c | ||
3 | * | ||
4 | * Copyright (C) 2003 Ben. Herrenschmidt (benh@kernel.crashing.org) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * This file contains some low-level i2c access routines that | ||
12 | * need to be used by various bits of the PowerMac platform code | ||
13 | * at times where the real asynchronous & interrupt driven driver | ||
14 | * cannot be used. The API borrows some semantics from the darwin | ||
15 | * driver in order to ease the implementation of the platform | ||
16 | * properties parser | ||
17 | */ | ||
18 | |||
19 | #undef DEBUG | ||
20 | |||
21 | #include <linux/config.h> | ||
22 | #include <linux/types.h> | ||
23 | #include <linux/sched.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/adb.h> | ||
27 | #include <linux/pmu.h> | ||
28 | #include <asm/keylargo.h> | ||
29 | #include <asm/uninorth.h> | ||
30 | #include <asm/io.h> | ||
31 | #include <asm/prom.h> | ||
32 | #include <asm/machdep.h> | ||
33 | #include <asm/pmac_low_i2c.h> | ||
34 | |||
35 | #define MAX_LOW_I2C_HOST 4 | ||
36 | |||
37 | #ifdef DEBUG | ||
38 | #define DBG(x...) do {\ | ||
39 | printk(KERN_DEBUG "KW:" x); \ | ||
40 | } while(0) | ||
41 | #else | ||
42 | #define DBG(x...) | ||
43 | #endif | ||
44 | |||
45 | struct low_i2c_host; | ||
46 | |||
47 | typedef int (*low_i2c_func_t)(struct low_i2c_host *host, u8 addr, u8 sub, u8 *data, int len); | ||
48 | |||
49 | struct low_i2c_host | ||
50 | { | ||
51 | struct device_node *np; /* OF device node */ | ||
52 | struct semaphore mutex; /* Access mutex for use by i2c-keywest */ | ||
53 | low_i2c_func_t func; /* Access function */ | ||
54 | unsigned int is_open : 1; /* Poor man's access control */ | ||
55 | int mode; /* Current mode */ | ||
56 | int channel; /* Current channel */ | ||
57 | int num_channels; /* Number of channels */ | ||
58 | void __iomem *base; /* For keywest-i2c, base address */ | ||
59 | int bsteps; /* And register stepping */ | ||
60 | int speed; /* And speed */ | ||
61 | }; | ||
62 | |||
63 | static struct low_i2c_host low_i2c_hosts[MAX_LOW_I2C_HOST]; | ||
64 | |||
65 | /* No locking is necessary on allocation, we are running way before | ||
66 | * anything can race with us | ||
67 | */ | ||
68 | static struct low_i2c_host *find_low_i2c_host(struct device_node *np) | ||
69 | { | ||
70 | int i; | ||
71 | |||
72 | for (i = 0; i < MAX_LOW_I2C_HOST; i++) | ||
73 | if (low_i2c_hosts[i].np == np) | ||
74 | return &low_i2c_hosts[i]; | ||
75 | return NULL; | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * | ||
80 | * i2c-keywest implementation (UniNorth, U2, U3, Keylargo's) | ||
81 | * | ||
82 | */ | ||
83 | |||
84 | /* | ||
85 | * Keywest i2c definitions borrowed from drivers/i2c/i2c-keywest.h, | ||
86 | * should be moved somewhere in include/asm-ppc/ | ||
87 | */ | ||
88 | /* Register indices */ | ||
89 | typedef enum { | ||
90 | reg_mode = 0, | ||
91 | reg_control, | ||
92 | reg_status, | ||
93 | reg_isr, | ||
94 | reg_ier, | ||
95 | reg_addr, | ||
96 | reg_subaddr, | ||
97 | reg_data | ||
98 | } reg_t; | ||
99 | |||
100 | |||
101 | /* Mode register */ | ||
102 | #define KW_I2C_MODE_100KHZ 0x00 | ||
103 | #define KW_I2C_MODE_50KHZ 0x01 | ||
104 | #define KW_I2C_MODE_25KHZ 0x02 | ||
105 | #define KW_I2C_MODE_DUMB 0x00 | ||
106 | #define KW_I2C_MODE_STANDARD 0x04 | ||
107 | #define KW_I2C_MODE_STANDARDSUB 0x08 | ||
108 | #define KW_I2C_MODE_COMBINED 0x0C | ||
109 | #define KW_I2C_MODE_MODE_MASK 0x0C | ||
110 | #define KW_I2C_MODE_CHAN_MASK 0xF0 | ||
111 | |||
112 | /* Control register */ | ||
113 | #define KW_I2C_CTL_AAK 0x01 | ||
114 | #define KW_I2C_CTL_XADDR 0x02 | ||
115 | #define KW_I2C_CTL_STOP 0x04 | ||
116 | #define KW_I2C_CTL_START 0x08 | ||
117 | |||
118 | /* Status register */ | ||
119 | #define KW_I2C_STAT_BUSY 0x01 | ||
120 | #define KW_I2C_STAT_LAST_AAK 0x02 | ||
121 | #define KW_I2C_STAT_LAST_RW 0x04 | ||
122 | #define KW_I2C_STAT_SDA 0x08 | ||
123 | #define KW_I2C_STAT_SCL 0x10 | ||
124 | |||
125 | /* IER & ISR registers */ | ||
126 | #define KW_I2C_IRQ_DATA 0x01 | ||
127 | #define KW_I2C_IRQ_ADDR 0x02 | ||
128 | #define KW_I2C_IRQ_STOP 0x04 | ||
129 | #define KW_I2C_IRQ_START 0x08 | ||
130 | #define KW_I2C_IRQ_MASK 0x0F | ||
131 | |||
132 | /* State machine states */ | ||
133 | enum { | ||
134 | state_idle, | ||
135 | state_addr, | ||
136 | state_read, | ||
137 | state_write, | ||
138 | state_stop, | ||
139 | state_dead | ||
140 | }; | ||
141 | |||
142 | #define WRONG_STATE(name) do {\ | ||
143 | printk(KERN_DEBUG "KW: wrong state. Got %s, state: %s (isr: %02x)\n", \ | ||
144 | name, __kw_state_names[state], isr); \ | ||
145 | } while(0) | ||
146 | |||
147 | static const char *__kw_state_names[] = { | ||
148 | "state_idle", | ||
149 | "state_addr", | ||
150 | "state_read", | ||
151 | "state_write", | ||
152 | "state_stop", | ||
153 | "state_dead" | ||
154 | }; | ||
155 | |||
156 | static inline u8 __kw_read_reg(struct low_i2c_host *host, reg_t reg) | ||
157 | { | ||
158 | return readb(host->base + (((unsigned int)reg) << host->bsteps)); | ||
159 | } | ||
160 | |||
161 | static inline void __kw_write_reg(struct low_i2c_host *host, reg_t reg, u8 val) | ||
162 | { | ||
163 | writeb(val, host->base + (((unsigned)reg) << host->bsteps)); | ||
164 | (void)__kw_read_reg(host, reg_subaddr); | ||
165 | } | ||
166 | |||
167 | #define kw_write_reg(reg, val) __kw_write_reg(host, reg, val) | ||
168 | #define kw_read_reg(reg) __kw_read_reg(host, reg) | ||
169 | |||
170 | |||
171 | /* Don't schedule, the g5 fan controller is too | ||
172 | * timing sensitive | ||
173 | */ | ||
174 | static u8 kw_wait_interrupt(struct low_i2c_host* host) | ||
175 | { | ||
176 | int i, j; | ||
177 | u8 isr; | ||
178 | |||
179 | for (i = 0; i < 100000; i++) { | ||
180 | isr = kw_read_reg(reg_isr) & KW_I2C_IRQ_MASK; | ||
181 | if (isr != 0) | ||
182 | return isr; | ||
183 | |||
184 | /* This code is used with the timebase frozen, we cannot rely | ||
185 | * on udelay ! For now, just use a bogus loop | ||
186 | */ | ||
187 | for (j = 1; j < 10000; j++) | ||
188 | mb(); | ||
189 | } | ||
190 | return isr; | ||
191 | } | ||
192 | |||
193 | static int kw_handle_interrupt(struct low_i2c_host *host, int state, int rw, int *rc, u8 **data, int *len, u8 isr) | ||
194 | { | ||
195 | u8 ack; | ||
196 | |||
197 | DBG("kw_handle_interrupt(%s, isr: %x)\n", __kw_state_names[state], isr); | ||
198 | |||
199 | if (isr == 0) { | ||
200 | if (state != state_stop) { | ||
201 | DBG("KW: Timeout !\n"); | ||
202 | *rc = -EIO; | ||
203 | goto stop; | ||
204 | } | ||
205 | if (state == state_stop) { | ||
206 | ack = kw_read_reg(reg_status); | ||
207 | if (!(ack & KW_I2C_STAT_BUSY)) { | ||
208 | state = state_idle; | ||
209 | kw_write_reg(reg_ier, 0x00); | ||
210 | } | ||
211 | } | ||
212 | return state; | ||
213 | } | ||
214 | |||
215 | if (isr & KW_I2C_IRQ_ADDR) { | ||
216 | ack = kw_read_reg(reg_status); | ||
217 | if (state != state_addr) { | ||
218 | kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR); | ||
219 | WRONG_STATE("KW_I2C_IRQ_ADDR"); | ||
220 | *rc = -EIO; | ||
221 | goto stop; | ||
222 | } | ||
223 | if ((ack & KW_I2C_STAT_LAST_AAK) == 0) { | ||
224 | *rc = -ENODEV; | ||
225 | DBG("KW: NAK on address\n"); | ||
226 | return state_stop; | ||
227 | } else { | ||
228 | if (rw) { | ||
229 | state = state_read; | ||
230 | if (*len > 1) | ||
231 | kw_write_reg(reg_control, KW_I2C_CTL_AAK); | ||
232 | } else { | ||
233 | state = state_write; | ||
234 | kw_write_reg(reg_data, **data); | ||
235 | (*data)++; (*len)--; | ||
236 | } | ||
237 | } | ||
238 | kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR); | ||
239 | } | ||
240 | |||
241 | if (isr & KW_I2C_IRQ_DATA) { | ||
242 | if (state == state_read) { | ||
243 | **data = kw_read_reg(reg_data); | ||
244 | (*data)++; (*len)--; | ||
245 | kw_write_reg(reg_isr, KW_I2C_IRQ_DATA); | ||
246 | if ((*len) == 0) | ||
247 | state = state_stop; | ||
248 | else if ((*len) == 1) | ||
249 | kw_write_reg(reg_control, 0); | ||
250 | } else if (state == state_write) { | ||
251 | ack = kw_read_reg(reg_status); | ||
252 | if ((ack & KW_I2C_STAT_LAST_AAK) == 0) { | ||
253 | DBG("KW: nack on data write\n"); | ||
254 | *rc = -EIO; | ||
255 | goto stop; | ||
256 | } else if (*len) { | ||
257 | kw_write_reg(reg_data, **data); | ||
258 | (*data)++; (*len)--; | ||
259 | } else { | ||
260 | kw_write_reg(reg_control, KW_I2C_CTL_STOP); | ||
261 | state = state_stop; | ||
262 | *rc = 0; | ||
263 | } | ||
264 | kw_write_reg(reg_isr, KW_I2C_IRQ_DATA); | ||
265 | } else { | ||
266 | kw_write_reg(reg_isr, KW_I2C_IRQ_DATA); | ||
267 | WRONG_STATE("KW_I2C_IRQ_DATA"); | ||
268 | if (state != state_stop) { | ||
269 | *rc = -EIO; | ||
270 | goto stop; | ||
271 | } | ||
272 | } | ||
273 | } | ||
274 | |||
275 | if (isr & KW_I2C_IRQ_STOP) { | ||
276 | kw_write_reg(reg_isr, KW_I2C_IRQ_STOP); | ||
277 | if (state != state_stop) { | ||
278 | WRONG_STATE("KW_I2C_IRQ_STOP"); | ||
279 | *rc = -EIO; | ||
280 | } | ||
281 | return state_idle; | ||
282 | } | ||
283 | |||
284 | if (isr & KW_I2C_IRQ_START) | ||
285 | kw_write_reg(reg_isr, KW_I2C_IRQ_START); | ||
286 | |||
287 | return state; | ||
288 | |||
289 | stop: | ||
290 | kw_write_reg(reg_control, KW_I2C_CTL_STOP); | ||
291 | return state_stop; | ||
292 | } | ||
293 | |||
294 | static int keywest_low_i2c_func(struct low_i2c_host *host, u8 addr, u8 subaddr, u8 *data, int len) | ||
295 | { | ||
296 | u8 mode_reg = host->speed; | ||
297 | int state = state_addr; | ||
298 | int rc = 0; | ||
299 | |||
300 | /* Setup mode & subaddress if any */ | ||
301 | switch(host->mode) { | ||
302 | case pmac_low_i2c_mode_dumb: | ||
303 | printk(KERN_ERR "low_i2c: Dumb mode not supported !\n"); | ||
304 | return -EINVAL; | ||
305 | case pmac_low_i2c_mode_std: | ||
306 | mode_reg |= KW_I2C_MODE_STANDARD; | ||
307 | break; | ||
308 | case pmac_low_i2c_mode_stdsub: | ||
309 | mode_reg |= KW_I2C_MODE_STANDARDSUB; | ||
310 | break; | ||
311 | case pmac_low_i2c_mode_combined: | ||
312 | mode_reg |= KW_I2C_MODE_COMBINED; | ||
313 | break; | ||
314 | } | ||
315 | |||
316 | /* Setup channel & clear pending irqs */ | ||
317 | kw_write_reg(reg_isr, kw_read_reg(reg_isr)); | ||
318 | kw_write_reg(reg_mode, mode_reg | (host->channel << 4)); | ||
319 | kw_write_reg(reg_status, 0); | ||
320 | |||
321 | /* Set up address and r/w bit */ | ||
322 | kw_write_reg(reg_addr, addr); | ||
323 | |||
324 | /* Set up the sub address */ | ||
325 | if ((mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_STANDARDSUB | ||
326 | || (mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_COMBINED) | ||
327 | kw_write_reg(reg_subaddr, subaddr); | ||
328 | |||
329 | /* Start sending address & disable interrupt*/ | ||
330 | kw_write_reg(reg_ier, 0 /*KW_I2C_IRQ_MASK*/); | ||
331 | kw_write_reg(reg_control, KW_I2C_CTL_XADDR); | ||
332 | |||
333 | /* State machine, to turn into an interrupt handler */ | ||
334 | while(state != state_idle) { | ||
335 | u8 isr = kw_wait_interrupt(host); | ||
336 | state = kw_handle_interrupt(host, state, addr & 1, &rc, &data, &len, isr); | ||
337 | } | ||
338 | |||
339 | return rc; | ||
340 | } | ||
341 | |||
342 | static void keywest_low_i2c_add(struct device_node *np) | ||
343 | { | ||
344 | struct low_i2c_host *host = find_low_i2c_host(NULL); | ||
345 | u32 *psteps, *prate, steps, aoffset = 0; | ||
346 | struct device_node *parent; | ||
347 | |||
348 | if (host == NULL) { | ||
349 | printk(KERN_ERR "low_i2c: Can't allocate host for %s\n", | ||
350 | np->full_name); | ||
351 | return; | ||
352 | } | ||
353 | memset(host, 0, sizeof(*host)); | ||
354 | |||
355 | init_MUTEX(&host->mutex); | ||
356 | host->np = of_node_get(np); | ||
357 | psteps = (u32 *)get_property(np, "AAPL,address-step", NULL); | ||
358 | steps = psteps ? (*psteps) : 0x10; | ||
359 | for (host->bsteps = 0; (steps & 0x01) == 0; host->bsteps++) | ||
360 | steps >>= 1; | ||
361 | parent = of_get_parent(np); | ||
362 | host->num_channels = 1; | ||
363 | if (parent && parent->name[0] == 'u') { | ||
364 | host->num_channels = 2; | ||
365 | aoffset = 3; | ||
366 | } | ||
367 | /* Select interface rate */ | ||
368 | host->speed = KW_I2C_MODE_100KHZ; | ||
369 | prate = (u32 *)get_property(np, "AAPL,i2c-rate", NULL); | ||
370 | if (prate) switch(*prate) { | ||
371 | case 100: | ||
372 | host->speed = KW_I2C_MODE_100KHZ; | ||
373 | break; | ||
374 | case 50: | ||
375 | host->speed = KW_I2C_MODE_50KHZ; | ||
376 | break; | ||
377 | case 25: | ||
378 | host->speed = KW_I2C_MODE_25KHZ; | ||
379 | break; | ||
380 | } | ||
381 | |||
382 | host->mode = pmac_low_i2c_mode_std; | ||
383 | host->base = ioremap(np->addrs[0].address + aoffset, | ||
384 | np->addrs[0].size); | ||
385 | host->func = keywest_low_i2c_func; | ||
386 | } | ||
387 | |||
388 | /* | ||
389 | * | ||
390 | * PMU implementation | ||
391 | * | ||
392 | */ | ||
393 | |||
394 | |||
395 | #ifdef CONFIG_ADB_PMU | ||
396 | |||
397 | static int pmu_low_i2c_func(struct low_i2c_host *host, u8 addr, u8 sub, u8 *data, int len) | ||
398 | { | ||
399 | // TODO | ||
400 | return -ENODEV; | ||
401 | } | ||
402 | |||
403 | static void pmu_low_i2c_add(struct device_node *np) | ||
404 | { | ||
405 | struct low_i2c_host *host = find_low_i2c_host(NULL); | ||
406 | |||
407 | if (host == NULL) { | ||
408 | printk(KERN_ERR "low_i2c: Can't allocate host for %s\n", | ||
409 | np->full_name); | ||
410 | return; | ||
411 | } | ||
412 | memset(host, 0, sizeof(*host)); | ||
413 | |||
414 | init_MUTEX(&host->mutex); | ||
415 | host->np = of_node_get(np); | ||
416 | host->num_channels = 3; | ||
417 | host->mode = pmac_low_i2c_mode_std; | ||
418 | host->func = pmu_low_i2c_func; | ||
419 | } | ||
420 | |||
421 | #endif /* CONFIG_ADB_PMU */ | ||
422 | |||
423 | void __init pmac_init_low_i2c(void) | ||
424 | { | ||
425 | struct device_node *np; | ||
426 | |||
427 | /* Probe keywest-i2c busses */ | ||
428 | np = of_find_compatible_node(NULL, "i2c", "keywest-i2c"); | ||
429 | while(np) { | ||
430 | keywest_low_i2c_add(np); | ||
431 | np = of_find_compatible_node(np, "i2c", "keywest-i2c"); | ||
432 | } | ||
433 | |||
434 | #ifdef CONFIG_ADB_PMU | ||
435 | /* Probe PMU busses */ | ||
436 | np = of_find_node_by_name(NULL, "via-pmu"); | ||
437 | if (np) | ||
438 | pmu_low_i2c_add(np); | ||
439 | #endif /* CONFIG_ADB_PMU */ | ||
440 | |||
441 | /* TODO: Add CUDA support as well */ | ||
442 | } | ||
443 | |||
444 | int pmac_low_i2c_lock(struct device_node *np) | ||
445 | { | ||
446 | struct low_i2c_host *host = find_low_i2c_host(np); | ||
447 | |||
448 | if (!host) | ||
449 | return -ENODEV; | ||
450 | down(&host->mutex); | ||
451 | return 0; | ||
452 | } | ||
453 | EXPORT_SYMBOL(pmac_low_i2c_lock); | ||
454 | |||
455 | int pmac_low_i2c_unlock(struct device_node *np) | ||
456 | { | ||
457 | struct low_i2c_host *host = find_low_i2c_host(np); | ||
458 | |||
459 | if (!host) | ||
460 | return -ENODEV; | ||
461 | up(&host->mutex); | ||
462 | return 0; | ||
463 | } | ||
464 | EXPORT_SYMBOL(pmac_low_i2c_unlock); | ||
465 | |||
466 | |||
467 | int pmac_low_i2c_open(struct device_node *np, int channel) | ||
468 | { | ||
469 | struct low_i2c_host *host = find_low_i2c_host(np); | ||
470 | |||
471 | if (!host) | ||
472 | return -ENODEV; | ||
473 | |||
474 | if (channel >= host->num_channels) | ||
475 | return -EINVAL; | ||
476 | |||
477 | down(&host->mutex); | ||
478 | host->is_open = 1; | ||
479 | host->channel = channel; | ||
480 | |||
481 | return 0; | ||
482 | } | ||
483 | EXPORT_SYMBOL(pmac_low_i2c_open); | ||
484 | |||
485 | int pmac_low_i2c_close(struct device_node *np) | ||
486 | { | ||
487 | struct low_i2c_host *host = find_low_i2c_host(np); | ||
488 | |||
489 | if (!host) | ||
490 | return -ENODEV; | ||
491 | |||
492 | host->is_open = 0; | ||
493 | up(&host->mutex); | ||
494 | |||
495 | return 0; | ||
496 | } | ||
497 | EXPORT_SYMBOL(pmac_low_i2c_close); | ||
498 | |||
499 | int pmac_low_i2c_setmode(struct device_node *np, int mode) | ||
500 | { | ||
501 | struct low_i2c_host *host = find_low_i2c_host(np); | ||
502 | |||
503 | if (!host) | ||
504 | return -ENODEV; | ||
505 | WARN_ON(!host->is_open); | ||
506 | host->mode = mode; | ||
507 | |||
508 | return 0; | ||
509 | } | ||
510 | EXPORT_SYMBOL(pmac_low_i2c_setmode); | ||
511 | |||
512 | int pmac_low_i2c_xfer(struct device_node *np, u8 addrdir, u8 subaddr, u8 *data, int len) | ||
513 | { | ||
514 | struct low_i2c_host *host = find_low_i2c_host(np); | ||
515 | |||
516 | if (!host) | ||
517 | return -ENODEV; | ||
518 | WARN_ON(!host->is_open); | ||
519 | |||
520 | return host->func(host, addrdir, subaddr, data, len); | ||
521 | } | ||
522 | EXPORT_SYMBOL(pmac_low_i2c_xfer); | ||
523 | |||
diff --git a/arch/powerpc/platforms/powermac/pmac_nvram.c b/arch/powerpc/platforms/powermac/pmac_nvram.c new file mode 100644 index 000000000000..8c9b008c7226 --- /dev/null +++ b/arch/powerpc/platforms/powermac/pmac_nvram.c | |||
@@ -0,0 +1,584 @@ | |||
1 | /* | ||
2 | * arch/ppc/platforms/pmac_nvram.c | ||
3 | * | ||
4 | * Copyright (C) 2002 Benjamin Herrenschmidt (benh@kernel.crashing.org) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * Todo: - add support for the OF persistent properties | ||
12 | */ | ||
13 | #include <linux/config.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/stddef.h> | ||
17 | #include <linux/string.h> | ||
18 | #include <linux/nvram.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/delay.h> | ||
22 | #include <linux/errno.h> | ||
23 | #include <linux/adb.h> | ||
24 | #include <linux/pmu.h> | ||
25 | #include <linux/bootmem.h> | ||
26 | #include <linux/completion.h> | ||
27 | #include <linux/spinlock.h> | ||
28 | #include <asm/sections.h> | ||
29 | #include <asm/io.h> | ||
30 | #include <asm/system.h> | ||
31 | #include <asm/prom.h> | ||
32 | #include <asm/machdep.h> | ||
33 | #include <asm/nvram.h> | ||
34 | |||
35 | #define DEBUG | ||
36 | |||
37 | #ifdef DEBUG | ||
38 | #define DBG(x...) printk(x) | ||
39 | #else | ||
40 | #define DBG(x...) | ||
41 | #endif | ||
42 | |||
43 | #define NVRAM_SIZE 0x2000 /* 8kB of non-volatile RAM */ | ||
44 | |||
45 | #define CORE99_SIGNATURE 0x5a | ||
46 | #define CORE99_ADLER_START 0x14 | ||
47 | |||
48 | /* On Core99, nvram is either a sharp, a micron or an AMD flash */ | ||
49 | #define SM_FLASH_STATUS_DONE 0x80 | ||
50 | #define SM_FLASH_STATUS_ERR 0x38 | ||
51 | #define SM_FLASH_CMD_ERASE_CONFIRM 0xd0 | ||
52 | #define SM_FLASH_CMD_ERASE_SETUP 0x20 | ||
53 | #define SM_FLASH_CMD_RESET 0xff | ||
54 | #define SM_FLASH_CMD_WRITE_SETUP 0x40 | ||
55 | #define SM_FLASH_CMD_CLEAR_STATUS 0x50 | ||
56 | #define SM_FLASH_CMD_READ_STATUS 0x70 | ||
57 | |||
58 | /* CHRP NVRAM header */ | ||
59 | struct chrp_header { | ||
60 | u8 signature; | ||
61 | u8 cksum; | ||
62 | u16 len; | ||
63 | char name[12]; | ||
64 | u8 data[0]; | ||
65 | }; | ||
66 | |||
67 | struct core99_header { | ||
68 | struct chrp_header hdr; | ||
69 | u32 adler; | ||
70 | u32 generation; | ||
71 | u32 reserved[2]; | ||
72 | }; | ||
73 | |||
74 | /* | ||
75 | * Read and write the non-volatile RAM on PowerMacs and CHRP machines. | ||
76 | */ | ||
77 | static int nvram_naddrs; | ||
78 | static volatile unsigned char *nvram_addr; | ||
79 | static volatile unsigned char *nvram_data; | ||
80 | static int nvram_mult, is_core_99; | ||
81 | static int core99_bank = 0; | ||
82 | static int nvram_partitions[3]; | ||
83 | static DEFINE_SPINLOCK(nv_lock); | ||
84 | |||
85 | extern int pmac_newworld; | ||
86 | extern int system_running; | ||
87 | |||
88 | static int (*core99_write_bank)(int bank, u8* datas); | ||
89 | static int (*core99_erase_bank)(int bank); | ||
90 | |||
91 | static char *nvram_image; | ||
92 | |||
93 | |||
94 | static unsigned char core99_nvram_read_byte(int addr) | ||
95 | { | ||
96 | if (nvram_image == NULL) | ||
97 | return 0xff; | ||
98 | return nvram_image[addr]; | ||
99 | } | ||
100 | |||
101 | static void core99_nvram_write_byte(int addr, unsigned char val) | ||
102 | { | ||
103 | if (nvram_image == NULL) | ||
104 | return; | ||
105 | nvram_image[addr] = val; | ||
106 | } | ||
107 | |||
108 | |||
109 | static unsigned char direct_nvram_read_byte(int addr) | ||
110 | { | ||
111 | return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]); | ||
112 | } | ||
113 | |||
114 | static void direct_nvram_write_byte(int addr, unsigned char val) | ||
115 | { | ||
116 | out_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult], val); | ||
117 | } | ||
118 | |||
119 | |||
120 | static unsigned char indirect_nvram_read_byte(int addr) | ||
121 | { | ||
122 | unsigned char val; | ||
123 | unsigned long flags; | ||
124 | |||
125 | spin_lock_irqsave(&nv_lock, flags); | ||
126 | out_8(nvram_addr, addr >> 5); | ||
127 | val = in_8(&nvram_data[(addr & 0x1f) << 4]); | ||
128 | spin_unlock_irqrestore(&nv_lock, flags); | ||
129 | |||
130 | return val; | ||
131 | } | ||
132 | |||
133 | static void indirect_nvram_write_byte(int addr, unsigned char val) | ||
134 | { | ||
135 | unsigned long flags; | ||
136 | |||
137 | spin_lock_irqsave(&nv_lock, flags); | ||
138 | out_8(nvram_addr, addr >> 5); | ||
139 | out_8(&nvram_data[(addr & 0x1f) << 4], val); | ||
140 | spin_unlock_irqrestore(&nv_lock, flags); | ||
141 | } | ||
142 | |||
143 | |||
144 | #ifdef CONFIG_ADB_PMU | ||
145 | |||
146 | static void pmu_nvram_complete(struct adb_request *req) | ||
147 | { | ||
148 | if (req->arg) | ||
149 | complete((struct completion *)req->arg); | ||
150 | } | ||
151 | |||
152 | static unsigned char pmu_nvram_read_byte(int addr) | ||
153 | { | ||
154 | struct adb_request req; | ||
155 | DECLARE_COMPLETION(req_complete); | ||
156 | |||
157 | req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL; | ||
158 | if (pmu_request(&req, pmu_nvram_complete, 3, PMU_READ_NVRAM, | ||
159 | (addr >> 8) & 0xff, addr & 0xff)) | ||
160 | return 0xff; | ||
161 | if (system_state == SYSTEM_RUNNING) | ||
162 | wait_for_completion(&req_complete); | ||
163 | while (!req.complete) | ||
164 | pmu_poll(); | ||
165 | return req.reply[0]; | ||
166 | } | ||
167 | |||
168 | static void pmu_nvram_write_byte(int addr, unsigned char val) | ||
169 | { | ||
170 | struct adb_request req; | ||
171 | DECLARE_COMPLETION(req_complete); | ||
172 | |||
173 | req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL; | ||
174 | if (pmu_request(&req, pmu_nvram_complete, 4, PMU_WRITE_NVRAM, | ||
175 | (addr >> 8) & 0xff, addr & 0xff, val)) | ||
176 | return; | ||
177 | if (system_state == SYSTEM_RUNNING) | ||
178 | wait_for_completion(&req_complete); | ||
179 | while (!req.complete) | ||
180 | pmu_poll(); | ||
181 | } | ||
182 | |||
183 | #endif /* CONFIG_ADB_PMU */ | ||
184 | |||
185 | |||
186 | static u8 chrp_checksum(struct chrp_header* hdr) | ||
187 | { | ||
188 | u8 *ptr; | ||
189 | u16 sum = hdr->signature; | ||
190 | for (ptr = (u8 *)&hdr->len; ptr < hdr->data; ptr++) | ||
191 | sum += *ptr; | ||
192 | while (sum > 0xFF) | ||
193 | sum = (sum & 0xFF) + (sum>>8); | ||
194 | return sum; | ||
195 | } | ||
196 | |||
197 | static u32 core99_calc_adler(u8 *buffer) | ||
198 | { | ||
199 | int cnt; | ||
200 | u32 low, high; | ||
201 | |||
202 | buffer += CORE99_ADLER_START; | ||
203 | low = 1; | ||
204 | high = 0; | ||
205 | for (cnt=0; cnt<(NVRAM_SIZE-CORE99_ADLER_START); cnt++) { | ||
206 | if ((cnt % 5000) == 0) { | ||
207 | high %= 65521UL; | ||
208 | high %= 65521UL; | ||
209 | } | ||
210 | low += buffer[cnt]; | ||
211 | high += low; | ||
212 | } | ||
213 | low %= 65521UL; | ||
214 | high %= 65521UL; | ||
215 | |||
216 | return (high << 16) | low; | ||
217 | } | ||
218 | |||
219 | static u32 core99_check(u8* datas) | ||
220 | { | ||
221 | struct core99_header* hdr99 = (struct core99_header*)datas; | ||
222 | |||
223 | if (hdr99->hdr.signature != CORE99_SIGNATURE) { | ||
224 | DBG("Invalid signature\n"); | ||
225 | return 0; | ||
226 | } | ||
227 | if (hdr99->hdr.cksum != chrp_checksum(&hdr99->hdr)) { | ||
228 | DBG("Invalid checksum\n"); | ||
229 | return 0; | ||
230 | } | ||
231 | if (hdr99->adler != core99_calc_adler(datas)) { | ||
232 | DBG("Invalid adler\n"); | ||
233 | return 0; | ||
234 | } | ||
235 | return hdr99->generation; | ||
236 | } | ||
237 | |||
238 | static int sm_erase_bank(int bank) | ||
239 | { | ||
240 | int stat, i; | ||
241 | unsigned long timeout; | ||
242 | |||
243 | u8* base = (u8 *)nvram_data + core99_bank*NVRAM_SIZE; | ||
244 | |||
245 | DBG("nvram: Sharp/Micron Erasing bank %d...\n", bank); | ||
246 | |||
247 | out_8(base, SM_FLASH_CMD_ERASE_SETUP); | ||
248 | out_8(base, SM_FLASH_CMD_ERASE_CONFIRM); | ||
249 | timeout = 0; | ||
250 | do { | ||
251 | if (++timeout > 1000000) { | ||
252 | printk(KERN_ERR "nvram: Sharp/Miron flash erase timeout !\n"); | ||
253 | break; | ||
254 | } | ||
255 | out_8(base, SM_FLASH_CMD_READ_STATUS); | ||
256 | stat = in_8(base); | ||
257 | } while (!(stat & SM_FLASH_STATUS_DONE)); | ||
258 | |||
259 | out_8(base, SM_FLASH_CMD_CLEAR_STATUS); | ||
260 | out_8(base, SM_FLASH_CMD_RESET); | ||
261 | |||
262 | for (i=0; i<NVRAM_SIZE; i++) | ||
263 | if (base[i] != 0xff) { | ||
264 | printk(KERN_ERR "nvram: Sharp/Micron flash erase failed !\n"); | ||
265 | return -ENXIO; | ||
266 | } | ||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | static int sm_write_bank(int bank, u8* datas) | ||
271 | { | ||
272 | int i, stat = 0; | ||
273 | unsigned long timeout; | ||
274 | |||
275 | u8* base = (u8 *)nvram_data + core99_bank*NVRAM_SIZE; | ||
276 | |||
277 | DBG("nvram: Sharp/Micron Writing bank %d...\n", bank); | ||
278 | |||
279 | for (i=0; i<NVRAM_SIZE; i++) { | ||
280 | out_8(base+i, SM_FLASH_CMD_WRITE_SETUP); | ||
281 | udelay(1); | ||
282 | out_8(base+i, datas[i]); | ||
283 | timeout = 0; | ||
284 | do { | ||
285 | if (++timeout > 1000000) { | ||
286 | printk(KERN_ERR "nvram: Sharp/Micron flash write timeout !\n"); | ||
287 | break; | ||
288 | } | ||
289 | out_8(base, SM_FLASH_CMD_READ_STATUS); | ||
290 | stat = in_8(base); | ||
291 | } while (!(stat & SM_FLASH_STATUS_DONE)); | ||
292 | if (!(stat & SM_FLASH_STATUS_DONE)) | ||
293 | break; | ||
294 | } | ||
295 | out_8(base, SM_FLASH_CMD_CLEAR_STATUS); | ||
296 | out_8(base, SM_FLASH_CMD_RESET); | ||
297 | for (i=0; i<NVRAM_SIZE; i++) | ||
298 | if (base[i] != datas[i]) { | ||
299 | printk(KERN_ERR "nvram: Sharp/Micron flash write failed !\n"); | ||
300 | return -ENXIO; | ||
301 | } | ||
302 | return 0; | ||
303 | } | ||
304 | |||
305 | static int amd_erase_bank(int bank) | ||
306 | { | ||
307 | int i, stat = 0; | ||
308 | unsigned long timeout; | ||
309 | |||
310 | u8* base = (u8 *)nvram_data + core99_bank*NVRAM_SIZE; | ||
311 | |||
312 | DBG("nvram: AMD Erasing bank %d...\n", bank); | ||
313 | |||
314 | /* Unlock 1 */ | ||
315 | out_8(base+0x555, 0xaa); | ||
316 | udelay(1); | ||
317 | /* Unlock 2 */ | ||
318 | out_8(base+0x2aa, 0x55); | ||
319 | udelay(1); | ||
320 | |||
321 | /* Sector-Erase */ | ||
322 | out_8(base+0x555, 0x80); | ||
323 | udelay(1); | ||
324 | out_8(base+0x555, 0xaa); | ||
325 | udelay(1); | ||
326 | out_8(base+0x2aa, 0x55); | ||
327 | udelay(1); | ||
328 | out_8(base, 0x30); | ||
329 | udelay(1); | ||
330 | |||
331 | timeout = 0; | ||
332 | do { | ||
333 | if (++timeout > 1000000) { | ||
334 | printk(KERN_ERR "nvram: AMD flash erase timeout !\n"); | ||
335 | break; | ||
336 | } | ||
337 | stat = in_8(base) ^ in_8(base); | ||
338 | } while (stat != 0); | ||
339 | |||
340 | /* Reset */ | ||
341 | out_8(base, 0xf0); | ||
342 | udelay(1); | ||
343 | |||
344 | for (i=0; i<NVRAM_SIZE; i++) | ||
345 | if (base[i] != 0xff) { | ||
346 | printk(KERN_ERR "nvram: AMD flash erase failed !\n"); | ||
347 | return -ENXIO; | ||
348 | } | ||
349 | return 0; | ||
350 | } | ||
351 | |||
352 | static int amd_write_bank(int bank, u8* datas) | ||
353 | { | ||
354 | int i, stat = 0; | ||
355 | unsigned long timeout; | ||
356 | |||
357 | u8* base = (u8 *)nvram_data + core99_bank*NVRAM_SIZE; | ||
358 | |||
359 | DBG("nvram: AMD Writing bank %d...\n", bank); | ||
360 | |||
361 | for (i=0; i<NVRAM_SIZE; i++) { | ||
362 | /* Unlock 1 */ | ||
363 | out_8(base+0x555, 0xaa); | ||
364 | udelay(1); | ||
365 | /* Unlock 2 */ | ||
366 | out_8(base+0x2aa, 0x55); | ||
367 | udelay(1); | ||
368 | |||
369 | /* Write single word */ | ||
370 | out_8(base+0x555, 0xa0); | ||
371 | udelay(1); | ||
372 | out_8(base+i, datas[i]); | ||
373 | |||
374 | timeout = 0; | ||
375 | do { | ||
376 | if (++timeout > 1000000) { | ||
377 | printk(KERN_ERR "nvram: AMD flash write timeout !\n"); | ||
378 | break; | ||
379 | } | ||
380 | stat = in_8(base) ^ in_8(base); | ||
381 | } while (stat != 0); | ||
382 | if (stat != 0) | ||
383 | break; | ||
384 | } | ||
385 | |||
386 | /* Reset */ | ||
387 | out_8(base, 0xf0); | ||
388 | udelay(1); | ||
389 | |||
390 | for (i=0; i<NVRAM_SIZE; i++) | ||
391 | if (base[i] != datas[i]) { | ||
392 | printk(KERN_ERR "nvram: AMD flash write failed !\n"); | ||
393 | return -ENXIO; | ||
394 | } | ||
395 | return 0; | ||
396 | } | ||
397 | |||
398 | static void __init lookup_partitions(void) | ||
399 | { | ||
400 | u8 buffer[17]; | ||
401 | int i, offset; | ||
402 | struct chrp_header* hdr; | ||
403 | |||
404 | if (pmac_newworld) { | ||
405 | nvram_partitions[pmac_nvram_OF] = -1; | ||
406 | nvram_partitions[pmac_nvram_XPRAM] = -1; | ||
407 | nvram_partitions[pmac_nvram_NR] = -1; | ||
408 | hdr = (struct chrp_header *)buffer; | ||
409 | |||
410 | offset = 0; | ||
411 | buffer[16] = 0; | ||
412 | do { | ||
413 | for (i=0;i<16;i++) | ||
414 | buffer[i] = nvram_read_byte(offset+i); | ||
415 | if (!strcmp(hdr->name, "common")) | ||
416 | nvram_partitions[pmac_nvram_OF] = offset + 0x10; | ||
417 | if (!strcmp(hdr->name, "APL,MacOS75")) { | ||
418 | nvram_partitions[pmac_nvram_XPRAM] = offset + 0x10; | ||
419 | nvram_partitions[pmac_nvram_NR] = offset + 0x110; | ||
420 | } | ||
421 | offset += (hdr->len * 0x10); | ||
422 | } while(offset < NVRAM_SIZE); | ||
423 | } else { | ||
424 | nvram_partitions[pmac_nvram_OF] = 0x1800; | ||
425 | nvram_partitions[pmac_nvram_XPRAM] = 0x1300; | ||
426 | nvram_partitions[pmac_nvram_NR] = 0x1400; | ||
427 | } | ||
428 | DBG("nvram: OF partition at 0x%x\n", nvram_partitions[pmac_nvram_OF]); | ||
429 | DBG("nvram: XP partition at 0x%x\n", nvram_partitions[pmac_nvram_XPRAM]); | ||
430 | DBG("nvram: NR partition at 0x%x\n", nvram_partitions[pmac_nvram_NR]); | ||
431 | } | ||
432 | |||
433 | static void core99_nvram_sync(void) | ||
434 | { | ||
435 | struct core99_header* hdr99; | ||
436 | unsigned long flags; | ||
437 | |||
438 | if (!is_core_99 || !nvram_data || !nvram_image) | ||
439 | return; | ||
440 | |||
441 | spin_lock_irqsave(&nv_lock, flags); | ||
442 | if (!memcmp(nvram_image, (u8*)nvram_data + core99_bank*NVRAM_SIZE, | ||
443 | NVRAM_SIZE)) | ||
444 | goto bail; | ||
445 | |||
446 | DBG("Updating nvram...\n"); | ||
447 | |||
448 | hdr99 = (struct core99_header*)nvram_image; | ||
449 | hdr99->generation++; | ||
450 | hdr99->hdr.signature = CORE99_SIGNATURE; | ||
451 | hdr99->hdr.cksum = chrp_checksum(&hdr99->hdr); | ||
452 | hdr99->adler = core99_calc_adler(nvram_image); | ||
453 | core99_bank = core99_bank ? 0 : 1; | ||
454 | if (core99_erase_bank) | ||
455 | if (core99_erase_bank(core99_bank)) { | ||
456 | printk("nvram: Error erasing bank %d\n", core99_bank); | ||
457 | goto bail; | ||
458 | } | ||
459 | if (core99_write_bank) | ||
460 | if (core99_write_bank(core99_bank, nvram_image)) | ||
461 | printk("nvram: Error writing bank %d\n", core99_bank); | ||
462 | bail: | ||
463 | spin_unlock_irqrestore(&nv_lock, flags); | ||
464 | |||
465 | #ifdef DEBUG | ||
466 | mdelay(2000); | ||
467 | #endif | ||
468 | } | ||
469 | |||
470 | void __init pmac_nvram_init(void) | ||
471 | { | ||
472 | struct device_node *dp; | ||
473 | |||
474 | nvram_naddrs = 0; | ||
475 | |||
476 | dp = find_devices("nvram"); | ||
477 | if (dp == NULL) { | ||
478 | printk(KERN_ERR "Can't find NVRAM device\n"); | ||
479 | return; | ||
480 | } | ||
481 | nvram_naddrs = dp->n_addrs; | ||
482 | is_core_99 = device_is_compatible(dp, "nvram,flash"); | ||
483 | if (is_core_99) { | ||
484 | int i; | ||
485 | u32 gen_bank0, gen_bank1; | ||
486 | |||
487 | if (nvram_naddrs < 1) { | ||
488 | printk(KERN_ERR "nvram: no address\n"); | ||
489 | return; | ||
490 | } | ||
491 | nvram_image = alloc_bootmem(NVRAM_SIZE); | ||
492 | if (nvram_image == NULL) { | ||
493 | printk(KERN_ERR "nvram: can't allocate ram image\n"); | ||
494 | return; | ||
495 | } | ||
496 | nvram_data = ioremap(dp->addrs[0].address, NVRAM_SIZE*2); | ||
497 | nvram_naddrs = 1; /* Make sure we get the correct case */ | ||
498 | |||
499 | DBG("nvram: Checking bank 0...\n"); | ||
500 | |||
501 | gen_bank0 = core99_check((u8 *)nvram_data); | ||
502 | gen_bank1 = core99_check((u8 *)nvram_data + NVRAM_SIZE); | ||
503 | core99_bank = (gen_bank0 < gen_bank1) ? 1 : 0; | ||
504 | |||
505 | DBG("nvram: gen0=%d, gen1=%d\n", gen_bank0, gen_bank1); | ||
506 | DBG("nvram: Active bank is: %d\n", core99_bank); | ||
507 | |||
508 | for (i=0; i<NVRAM_SIZE; i++) | ||
509 | nvram_image[i] = nvram_data[i + core99_bank*NVRAM_SIZE]; | ||
510 | |||
511 | ppc_md.nvram_read_val = core99_nvram_read_byte; | ||
512 | ppc_md.nvram_write_val = core99_nvram_write_byte; | ||
513 | ppc_md.nvram_sync = core99_nvram_sync; | ||
514 | /* | ||
515 | * Maybe we could be smarter here though making an exclusive list | ||
516 | * of known flash chips is a bit nasty as older OF didn't provide us | ||
517 | * with a useful "compatible" entry. A solution would be to really | ||
518 | * identify the chip using flash id commands and base ourselves on | ||
519 | * a list of known chips IDs | ||
520 | */ | ||
521 | if (device_is_compatible(dp, "amd-0137")) { | ||
522 | core99_erase_bank = amd_erase_bank; | ||
523 | core99_write_bank = amd_write_bank; | ||
524 | } else { | ||
525 | core99_erase_bank = sm_erase_bank; | ||
526 | core99_write_bank = sm_write_bank; | ||
527 | } | ||
528 | } else if (_machine == _MACH_chrp && nvram_naddrs == 1) { | ||
529 | nvram_data = ioremap(dp->addrs[0].address + isa_mem_base, | ||
530 | dp->addrs[0].size); | ||
531 | nvram_mult = 1; | ||
532 | ppc_md.nvram_read_val = direct_nvram_read_byte; | ||
533 | ppc_md.nvram_write_val = direct_nvram_write_byte; | ||
534 | } else if (nvram_naddrs == 1) { | ||
535 | nvram_data = ioremap(dp->addrs[0].address, dp->addrs[0].size); | ||
536 | nvram_mult = (dp->addrs[0].size + NVRAM_SIZE - 1) / NVRAM_SIZE; | ||
537 | ppc_md.nvram_read_val = direct_nvram_read_byte; | ||
538 | ppc_md.nvram_write_val = direct_nvram_write_byte; | ||
539 | } else if (nvram_naddrs == 2) { | ||
540 | nvram_addr = ioremap(dp->addrs[0].address, dp->addrs[0].size); | ||
541 | nvram_data = ioremap(dp->addrs[1].address, dp->addrs[1].size); | ||
542 | ppc_md.nvram_read_val = indirect_nvram_read_byte; | ||
543 | ppc_md.nvram_write_val = indirect_nvram_write_byte; | ||
544 | } else if (nvram_naddrs == 0 && sys_ctrler == SYS_CTRLER_PMU) { | ||
545 | #ifdef CONFIG_ADB_PMU | ||
546 | nvram_naddrs = -1; | ||
547 | ppc_md.nvram_read_val = pmu_nvram_read_byte; | ||
548 | ppc_md.nvram_write_val = pmu_nvram_write_byte; | ||
549 | #endif /* CONFIG_ADB_PMU */ | ||
550 | } else { | ||
551 | printk(KERN_ERR "Don't know how to access NVRAM with %d addresses\n", | ||
552 | nvram_naddrs); | ||
553 | } | ||
554 | lookup_partitions(); | ||
555 | } | ||
556 | |||
557 | int pmac_get_partition(int partition) | ||
558 | { | ||
559 | return nvram_partitions[partition]; | ||
560 | } | ||
561 | |||
562 | u8 pmac_xpram_read(int xpaddr) | ||
563 | { | ||
564 | int offset = nvram_partitions[pmac_nvram_XPRAM]; | ||
565 | |||
566 | if (offset < 0) | ||
567 | return 0xff; | ||
568 | |||
569 | return ppc_md.nvram_read_val(xpaddr + offset); | ||
570 | } | ||
571 | |||
572 | void pmac_xpram_write(int xpaddr, u8 data) | ||
573 | { | ||
574 | int offset = nvram_partitions[pmac_nvram_XPRAM]; | ||
575 | |||
576 | if (offset < 0) | ||
577 | return; | ||
578 | |||
579 | ppc_md.nvram_write_val(xpaddr + offset, data); | ||
580 | } | ||
581 | |||
582 | EXPORT_SYMBOL(pmac_get_partition); | ||
583 | EXPORT_SYMBOL(pmac_xpram_read); | ||
584 | EXPORT_SYMBOL(pmac_xpram_write); | ||
diff --git a/arch/powerpc/platforms/powermac/pmac_pci.c b/arch/powerpc/platforms/powermac/pmac_pci.c new file mode 100644 index 000000000000..40bcd3e55afb --- /dev/null +++ b/arch/powerpc/platforms/powermac/pmac_pci.c | |||
@@ -0,0 +1,1341 @@ | |||
1 | /* | ||
2 | * Support for PCI bridges found on Power Macintoshes. | ||
3 | * At present the "bandit" and "chaos" bridges are supported. | ||
4 | * Fortunately you access configuration space in the same | ||
5 | * way with either bridge. | ||
6 | * | ||
7 | * Copyright (C) 2003 Benjamin Herrenschmuidt (benh@kernel.crashing.org) | ||
8 | * Copyright (C) 1997 Paul Mackerras (paulus@samba.org) | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * as published by the Free Software Foundation; either version | ||
13 | * 2 of the License, or (at your option) any later version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/pci.h> | ||
18 | #include <linux/delay.h> | ||
19 | #include <linux/string.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/bootmem.h> | ||
22 | |||
23 | #include <asm/sections.h> | ||
24 | #include <asm/io.h> | ||
25 | #include <asm/prom.h> | ||
26 | #include <asm/pci-bridge.h> | ||
27 | #include <asm/machdep.h> | ||
28 | #include <asm/pmac_feature.h> | ||
29 | |||
30 | #undef DEBUG | ||
31 | |||
32 | #ifdef DEBUG | ||
33 | #define DBG(x...) printk(x) | ||
34 | #else | ||
35 | #define DBG(x...) | ||
36 | #endif | ||
37 | |||
38 | static int add_bridge(struct device_node *dev); | ||
39 | extern void pmac_check_ht_link(void); | ||
40 | |||
41 | /* XXX Could be per-controller, but I don't think we risk anything by | ||
42 | * assuming we won't have both UniNorth and Bandit */ | ||
43 | static int has_uninorth; | ||
44 | #ifdef CONFIG_POWER4 | ||
45 | static struct pci_controller *u3_agp; | ||
46 | #endif /* CONFIG_POWER4 */ | ||
47 | |||
48 | extern u8 pci_cache_line_size; | ||
49 | extern int pcibios_assign_bus_offset; | ||
50 | |||
51 | struct device_node *k2_skiplist[2]; | ||
52 | |||
53 | /* | ||
54 | * Magic constants for enabling cache coherency in the bandit/PSX bridge. | ||
55 | */ | ||
56 | #define BANDIT_DEVID_2 8 | ||
57 | #define BANDIT_REVID 3 | ||
58 | |||
59 | #define BANDIT_DEVNUM 11 | ||
60 | #define BANDIT_MAGIC 0x50 | ||
61 | #define BANDIT_COHERENT 0x40 | ||
62 | |||
63 | static int __init fixup_one_level_bus_range(struct device_node *node, int higher) | ||
64 | { | ||
65 | for (; node != 0;node = node->sibling) { | ||
66 | int * bus_range; | ||
67 | unsigned int *class_code; | ||
68 | int len; | ||
69 | |||
70 | /* For PCI<->PCI bridges or CardBus bridges, we go down */ | ||
71 | class_code = (unsigned int *) get_property(node, "class-code", NULL); | ||
72 | if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && | ||
73 | (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) | ||
74 | continue; | ||
75 | bus_range = (int *) get_property(node, "bus-range", &len); | ||
76 | if (bus_range != NULL && len > 2 * sizeof(int)) { | ||
77 | if (bus_range[1] > higher) | ||
78 | higher = bus_range[1]; | ||
79 | } | ||
80 | higher = fixup_one_level_bus_range(node->child, higher); | ||
81 | } | ||
82 | return higher; | ||
83 | } | ||
84 | |||
85 | /* This routine fixes the "bus-range" property of all bridges in the | ||
86 | * system since they tend to have their "last" member wrong on macs | ||
87 | * | ||
88 | * Note that the bus numbers manipulated here are OF bus numbers, they | ||
89 | * are not Linux bus numbers. | ||
90 | */ | ||
91 | static void __init fixup_bus_range(struct device_node *bridge) | ||
92 | { | ||
93 | int * bus_range; | ||
94 | int len; | ||
95 | |||
96 | /* Lookup the "bus-range" property for the hose */ | ||
97 | bus_range = (int *) get_property(bridge, "bus-range", &len); | ||
98 | if (bus_range == NULL || len < 2 * sizeof(int)) { | ||
99 | printk(KERN_WARNING "Can't get bus-range for %s\n", | ||
100 | bridge->full_name); | ||
101 | return; | ||
102 | } | ||
103 | bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]); | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * Apple MacRISC (U3, UniNorth, Bandit, Chaos) PCI controllers. | ||
108 | * | ||
109 | * The "Bandit" version is present in all early PCI PowerMacs, | ||
110 | * and up to the first ones using Grackle. Some machines may | ||
111 | * have 2 bandit controllers (2 PCI busses). | ||
112 | * | ||
113 | * "Chaos" is used in some "Bandit"-type machines as a bridge | ||
114 | * for the separate display bus. It is accessed the same | ||
115 | * way as bandit, but cannot be probed for devices. It therefore | ||
116 | * has its own config access functions. | ||
117 | * | ||
118 | * The "UniNorth" version is present in all Core99 machines | ||
119 | * (iBook, G4, new IMacs, and all the recent Apple machines). | ||
120 | * It contains 3 controllers in one ASIC. | ||
121 | * | ||
122 | * The U3 is the bridge used on G5 machines. It contains an | ||
123 | * AGP bus which is dealt with the old UniNorth access routines | ||
124 | * and a HyperTransport bus which uses its own set of access | ||
125 | * functions. | ||
126 | */ | ||
127 | |||
128 | #define MACRISC_CFA0(devfn, off) \ | ||
129 | ((1 << (unsigned long)PCI_SLOT(dev_fn)) \ | ||
130 | | (((unsigned long)PCI_FUNC(dev_fn)) << 8) \ | ||
131 | | (((unsigned long)(off)) & 0xFCUL)) | ||
132 | |||
133 | #define MACRISC_CFA1(bus, devfn, off) \ | ||
134 | ((((unsigned long)(bus)) << 16) \ | ||
135 | |(((unsigned long)(devfn)) << 8) \ | ||
136 | |(((unsigned long)(off)) & 0xFCUL) \ | ||
137 | |1UL) | ||
138 | |||
139 | static unsigned long macrisc_cfg_access(struct pci_controller* hose, | ||
140 | u8 bus, u8 dev_fn, u8 offset) | ||
141 | { | ||
142 | unsigned int caddr; | ||
143 | |||
144 | if (bus == hose->first_busno) { | ||
145 | if (dev_fn < (11 << 3)) | ||
146 | return 0; | ||
147 | caddr = MACRISC_CFA0(dev_fn, offset); | ||
148 | } else | ||
149 | caddr = MACRISC_CFA1(bus, dev_fn, offset); | ||
150 | |||
151 | /* Uninorth will return garbage if we don't read back the value ! */ | ||
152 | do { | ||
153 | out_le32(hose->cfg_addr, caddr); | ||
154 | } while (in_le32(hose->cfg_addr) != caddr); | ||
155 | |||
156 | offset &= has_uninorth ? 0x07 : 0x03; | ||
157 | return ((unsigned long)hose->cfg_data) + offset; | ||
158 | } | ||
159 | |||
160 | static int macrisc_read_config(struct pci_bus *bus, unsigned int devfn, | ||
161 | int offset, int len, u32 *val) | ||
162 | { | ||
163 | struct pci_controller *hose = bus->sysdata; | ||
164 | unsigned long addr; | ||
165 | |||
166 | addr = macrisc_cfg_access(hose, bus->number, devfn, offset); | ||
167 | if (!addr) | ||
168 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
169 | /* | ||
170 | * Note: the caller has already checked that offset is | ||
171 | * suitably aligned and that len is 1, 2 or 4. | ||
172 | */ | ||
173 | switch (len) { | ||
174 | case 1: | ||
175 | *val = in_8((u8 *)addr); | ||
176 | break; | ||
177 | case 2: | ||
178 | *val = in_le16((u16 *)addr); | ||
179 | break; | ||
180 | default: | ||
181 | *val = in_le32((u32 *)addr); | ||
182 | break; | ||
183 | } | ||
184 | return PCIBIOS_SUCCESSFUL; | ||
185 | } | ||
186 | |||
187 | static int macrisc_write_config(struct pci_bus *bus, unsigned int devfn, | ||
188 | int offset, int len, u32 val) | ||
189 | { | ||
190 | struct pci_controller *hose = bus->sysdata; | ||
191 | unsigned long addr; | ||
192 | |||
193 | addr = macrisc_cfg_access(hose, bus->number, devfn, offset); | ||
194 | if (!addr) | ||
195 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
196 | /* | ||
197 | * Note: the caller has already checked that offset is | ||
198 | * suitably aligned and that len is 1, 2 or 4. | ||
199 | */ | ||
200 | switch (len) { | ||
201 | case 1: | ||
202 | out_8((u8 *)addr, val); | ||
203 | (void) in_8((u8 *)addr); | ||
204 | break; | ||
205 | case 2: | ||
206 | out_le16((u16 *)addr, val); | ||
207 | (void) in_le16((u16 *)addr); | ||
208 | break; | ||
209 | default: | ||
210 | out_le32((u32 *)addr, val); | ||
211 | (void) in_le32((u32 *)addr); | ||
212 | break; | ||
213 | } | ||
214 | return PCIBIOS_SUCCESSFUL; | ||
215 | } | ||
216 | |||
217 | static struct pci_ops macrisc_pci_ops = | ||
218 | { | ||
219 | macrisc_read_config, | ||
220 | macrisc_write_config | ||
221 | }; | ||
222 | |||
223 | /* | ||
224 | * Verifiy that a specific (bus, dev_fn) exists on chaos | ||
225 | */ | ||
226 | static int | ||
227 | chaos_validate_dev(struct pci_bus *bus, int devfn, int offset) | ||
228 | { | ||
229 | struct device_node *np; | ||
230 | u32 *vendor, *device; | ||
231 | |||
232 | np = pci_busdev_to_OF_node(bus, devfn); | ||
233 | if (np == NULL) | ||
234 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
235 | |||
236 | vendor = (u32 *)get_property(np, "vendor-id", NULL); | ||
237 | device = (u32 *)get_property(np, "device-id", NULL); | ||
238 | if (vendor == NULL || device == NULL) | ||
239 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
240 | |||
241 | if ((*vendor == 0x106b) && (*device == 3) && (offset >= 0x10) | ||
242 | && (offset != 0x14) && (offset != 0x18) && (offset <= 0x24)) | ||
243 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
244 | |||
245 | return PCIBIOS_SUCCESSFUL; | ||
246 | } | ||
247 | |||
248 | static int | ||
249 | chaos_read_config(struct pci_bus *bus, unsigned int devfn, int offset, | ||
250 | int len, u32 *val) | ||
251 | { | ||
252 | int result = chaos_validate_dev(bus, devfn, offset); | ||
253 | if (result == PCIBIOS_BAD_REGISTER_NUMBER) | ||
254 | *val = ~0U; | ||
255 | if (result != PCIBIOS_SUCCESSFUL) | ||
256 | return result; | ||
257 | return macrisc_read_config(bus, devfn, offset, len, val); | ||
258 | } | ||
259 | |||
260 | static int | ||
261 | chaos_write_config(struct pci_bus *bus, unsigned int devfn, int offset, | ||
262 | int len, u32 val) | ||
263 | { | ||
264 | int result = chaos_validate_dev(bus, devfn, offset); | ||
265 | if (result != PCIBIOS_SUCCESSFUL) | ||
266 | return result; | ||
267 | return macrisc_write_config(bus, devfn, offset, len, val); | ||
268 | } | ||
269 | |||
270 | static struct pci_ops chaos_pci_ops = | ||
271 | { | ||
272 | chaos_read_config, | ||
273 | chaos_write_config | ||
274 | }; | ||
275 | |||
276 | #ifdef CONFIG_POWER4 | ||
277 | |||
278 | /* | ||
279 | * These versions of U3 HyperTransport config space access ops do not | ||
280 | * implement self-view of the HT host yet | ||
281 | */ | ||
282 | |||
283 | /* | ||
284 | * This function deals with some "special cases" devices. | ||
285 | * | ||
286 | * 0 -> No special case | ||
287 | * 1 -> Skip the device but act as if the access was successfull | ||
288 | * (return 0xff's on reads, eventually, cache config space | ||
289 | * accesses in a later version) | ||
290 | * -1 -> Hide the device (unsuccessful acess) | ||
291 | */ | ||
292 | static int u3_ht_skip_device(struct pci_controller *hose, | ||
293 | struct pci_bus *bus, unsigned int devfn) | ||
294 | { | ||
295 | struct device_node *busdn, *dn; | ||
296 | int i; | ||
297 | |||
298 | /* We only allow config cycles to devices that are in OF device-tree | ||
299 | * as we are apparently having some weird things going on with some | ||
300 | * revs of K2 on recent G5s | ||
301 | */ | ||
302 | if (bus->self) | ||
303 | busdn = pci_device_to_OF_node(bus->self); | ||
304 | else | ||
305 | busdn = hose->arch_data; | ||
306 | for (dn = busdn->child; dn; dn = dn->sibling) | ||
307 | if (dn->data && PCI_DN(dn)->devfn == devfn) | ||
308 | break; | ||
309 | if (dn == NULL) | ||
310 | return -1; | ||
311 | |||
312 | /* | ||
313 | * When a device in K2 is powered down, we die on config | ||
314 | * cycle accesses. Fix that here. | ||
315 | */ | ||
316 | for (i=0; i<2; i++) | ||
317 | if (k2_skiplist[i] == dn) | ||
318 | return 1; | ||
319 | |||
320 | return 0; | ||
321 | } | ||
322 | |||
323 | #define U3_HT_CFA0(devfn, off) \ | ||
324 | ((((unsigned long)devfn) << 8) | offset) | ||
325 | #define U3_HT_CFA1(bus, devfn, off) \ | ||
326 | (U3_HT_CFA0(devfn, off) \ | ||
327 | + (((unsigned long)bus) << 16) \ | ||
328 | + 0x01000000UL) | ||
329 | |||
330 | static unsigned long u3_ht_cfg_access(struct pci_controller* hose, | ||
331 | u8 bus, u8 devfn, u8 offset) | ||
332 | { | ||
333 | if (bus == hose->first_busno) { | ||
334 | /* For now, we don't self probe U3 HT bridge */ | ||
335 | if (PCI_SLOT(devfn) == 0) | ||
336 | return 0; | ||
337 | return ((unsigned long)hose->cfg_data) + U3_HT_CFA0(devfn, offset); | ||
338 | } else | ||
339 | return ((unsigned long)hose->cfg_data) + U3_HT_CFA1(bus, devfn, offset); | ||
340 | } | ||
341 | |||
342 | static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn, | ||
343 | int offset, int len, u32 *val) | ||
344 | { | ||
345 | struct pci_controller *hose = bus->sysdata; | ||
346 | unsigned long addr; | ||
347 | |||
348 | struct device_node *np = pci_busdev_to_OF_node(bus, devfn); | ||
349 | if (np == NULL) | ||
350 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
351 | |||
352 | addr = u3_ht_cfg_access(hose, bus->number, devfn, offset); | ||
353 | if (!addr) | ||
354 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
355 | |||
356 | switch (u3_ht_skip_device(hose, bus, devfn)) { | ||
357 | case 0: | ||
358 | break; | ||
359 | case 1: | ||
360 | switch (len) { | ||
361 | case 1: | ||
362 | *val = 0xff; break; | ||
363 | case 2: | ||
364 | *val = 0xffff; break; | ||
365 | default: | ||
366 | *val = 0xfffffffful; break; | ||
367 | } | ||
368 | return PCIBIOS_SUCCESSFUL; | ||
369 | default: | ||
370 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
371 | } | ||
372 | |||
373 | /* | ||
374 | * Note: the caller has already checked that offset is | ||
375 | * suitably aligned and that len is 1, 2 or 4. | ||
376 | */ | ||
377 | switch (len) { | ||
378 | case 1: | ||
379 | *val = in_8((u8 *)addr); | ||
380 | break; | ||
381 | case 2: | ||
382 | *val = in_le16((u16 *)addr); | ||
383 | break; | ||
384 | default: | ||
385 | *val = in_le32((u32 *)addr); | ||
386 | break; | ||
387 | } | ||
388 | return PCIBIOS_SUCCESSFUL; | ||
389 | } | ||
390 | |||
391 | static int u3_ht_write_config(struct pci_bus *bus, unsigned int devfn, | ||
392 | int offset, int len, u32 val) | ||
393 | { | ||
394 | struct pci_controller *hose = bus->sysdata; | ||
395 | unsigned long addr; | ||
396 | |||
397 | struct device_node *np = pci_busdev_to_OF_node(bus, devfn); | ||
398 | if (np == NULL) | ||
399 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
400 | |||
401 | addr = u3_ht_cfg_access(hose, bus->number, devfn, offset); | ||
402 | if (!addr) | ||
403 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
404 | |||
405 | switch (u3_ht_skip_device(hose, bus, devfn)) { | ||
406 | case 0: | ||
407 | break; | ||
408 | case 1: | ||
409 | return PCIBIOS_SUCCESSFUL; | ||
410 | default: | ||
411 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
412 | } | ||
413 | |||
414 | /* | ||
415 | * Note: the caller has already checked that offset is | ||
416 | * suitably aligned and that len is 1, 2 or 4. | ||
417 | */ | ||
418 | switch (len) { | ||
419 | case 1: | ||
420 | out_8((u8 *)addr, val); | ||
421 | (void) in_8((u8 *)addr); | ||
422 | break; | ||
423 | case 2: | ||
424 | out_le16((u16 *)addr, val); | ||
425 | (void) in_le16((u16 *)addr); | ||
426 | break; | ||
427 | default: | ||
428 | out_le32((u32 *)addr, val); | ||
429 | (void) in_le32((u32 *)addr); | ||
430 | break; | ||
431 | } | ||
432 | return PCIBIOS_SUCCESSFUL; | ||
433 | } | ||
434 | |||
435 | static struct pci_ops u3_ht_pci_ops = | ||
436 | { | ||
437 | u3_ht_read_config, | ||
438 | u3_ht_write_config | ||
439 | }; | ||
440 | |||
441 | #endif /* CONFIG_POWER4 */ | ||
442 | |||
443 | /* | ||
444 | * For a bandit bridge, turn on cache coherency if necessary. | ||
445 | * N.B. we could clean this up using the hose ops directly. | ||
446 | */ | ||
447 | static void __init | ||
448 | init_bandit(struct pci_controller *bp) | ||
449 | { | ||
450 | unsigned int vendev, magic; | ||
451 | int rev; | ||
452 | |||
453 | /* read the word at offset 0 in config space for device 11 */ | ||
454 | out_le32(bp->cfg_addr, (1UL << BANDIT_DEVNUM) + PCI_VENDOR_ID); | ||
455 | udelay(2); | ||
456 | vendev = in_le32(bp->cfg_data); | ||
457 | if (vendev == (PCI_DEVICE_ID_APPLE_BANDIT << 16) + | ||
458 | PCI_VENDOR_ID_APPLE) { | ||
459 | /* read the revision id */ | ||
460 | out_le32(bp->cfg_addr, | ||
461 | (1UL << BANDIT_DEVNUM) + PCI_REVISION_ID); | ||
462 | udelay(2); | ||
463 | rev = in_8(bp->cfg_data); | ||
464 | if (rev != BANDIT_REVID) | ||
465 | printk(KERN_WARNING | ||
466 | "Unknown revision %d for bandit\n", rev); | ||
467 | } else if (vendev != (BANDIT_DEVID_2 << 16) + PCI_VENDOR_ID_APPLE) { | ||
468 | printk(KERN_WARNING "bandit isn't? (%x)\n", vendev); | ||
469 | return; | ||
470 | } | ||
471 | |||
472 | /* read the word at offset 0x50 */ | ||
473 | out_le32(bp->cfg_addr, (1UL << BANDIT_DEVNUM) + BANDIT_MAGIC); | ||
474 | udelay(2); | ||
475 | magic = in_le32(bp->cfg_data); | ||
476 | if ((magic & BANDIT_COHERENT) != 0) | ||
477 | return; | ||
478 | magic |= BANDIT_COHERENT; | ||
479 | udelay(2); | ||
480 | out_le32(bp->cfg_data, magic); | ||
481 | printk(KERN_INFO "Cache coherency enabled for bandit/PSX\n"); | ||
482 | } | ||
483 | |||
484 | |||
485 | /* | ||
486 | * Tweak the PCI-PCI bridge chip on the blue & white G3s. | ||
487 | */ | ||
488 | static void __init | ||
489 | init_p2pbridge(void) | ||
490 | { | ||
491 | struct device_node *p2pbridge; | ||
492 | struct pci_controller* hose; | ||
493 | u8 bus, devfn; | ||
494 | u16 val; | ||
495 | |||
496 | /* XXX it would be better here to identify the specific | ||
497 | PCI-PCI bridge chip we have. */ | ||
498 | if ((p2pbridge = find_devices("pci-bridge")) == 0 | ||
499 | || p2pbridge->parent == NULL | ||
500 | || strcmp(p2pbridge->parent->name, "pci") != 0) | ||
501 | return; | ||
502 | if (pci_device_from_OF_node(p2pbridge, &bus, &devfn) < 0) { | ||
503 | DBG("Can't find PCI infos for PCI<->PCI bridge\n"); | ||
504 | return; | ||
505 | } | ||
506 | /* Warning: At this point, we have not yet renumbered all busses. | ||
507 | * So we must use OF walking to find out hose | ||
508 | */ | ||
509 | hose = pci_find_hose_for_OF_device(p2pbridge); | ||
510 | if (!hose) { | ||
511 | DBG("Can't find hose for PCI<->PCI bridge\n"); | ||
512 | return; | ||
513 | } | ||
514 | if (early_read_config_word(hose, bus, devfn, | ||
515 | PCI_BRIDGE_CONTROL, &val) < 0) { | ||
516 | printk(KERN_ERR "init_p2pbridge: couldn't read bridge control\n"); | ||
517 | return; | ||
518 | } | ||
519 | val &= ~PCI_BRIDGE_CTL_MASTER_ABORT; | ||
520 | early_write_config_word(hose, bus, devfn, PCI_BRIDGE_CONTROL, val); | ||
521 | } | ||
522 | |||
523 | /* | ||
524 | * Some Apple desktop machines have a NEC PD720100A USB2 controller | ||
525 | * on the motherboard. Open Firmware, on these, will disable the | ||
526 | * EHCI part of it so it behaves like a pair of OHCI's. This fixup | ||
527 | * code re-enables it ;) | ||
528 | */ | ||
529 | static void __init | ||
530 | fixup_nec_usb2(void) | ||
531 | { | ||
532 | struct device_node *nec; | ||
533 | |||
534 | for (nec = NULL; (nec = of_find_node_by_name(nec, "usb")) != NULL;) { | ||
535 | struct pci_controller *hose; | ||
536 | u32 data, *prop; | ||
537 | u8 bus, devfn; | ||
538 | |||
539 | prop = (u32 *)get_property(nec, "vendor-id", NULL); | ||
540 | if (prop == NULL) | ||
541 | continue; | ||
542 | if (0x1033 != *prop) | ||
543 | continue; | ||
544 | prop = (u32 *)get_property(nec, "device-id", NULL); | ||
545 | if (prop == NULL) | ||
546 | continue; | ||
547 | if (0x0035 != *prop) | ||
548 | continue; | ||
549 | prop = (u32 *)get_property(nec, "reg", NULL); | ||
550 | if (prop == NULL) | ||
551 | continue; | ||
552 | devfn = (prop[0] >> 8) & 0xff; | ||
553 | bus = (prop[0] >> 16) & 0xff; | ||
554 | if (PCI_FUNC(devfn) != 0) | ||
555 | continue; | ||
556 | hose = pci_find_hose_for_OF_device(nec); | ||
557 | if (!hose) | ||
558 | continue; | ||
559 | early_read_config_dword(hose, bus, devfn, 0xe4, &data); | ||
560 | if (data & 1UL) { | ||
561 | printk("Found NEC PD720100A USB2 chip with disabled EHCI, fixing up...\n"); | ||
562 | data &= ~1UL; | ||
563 | early_write_config_dword(hose, bus, devfn, 0xe4, data); | ||
564 | early_write_config_byte(hose, bus, devfn | 2, PCI_INTERRUPT_LINE, | ||
565 | nec->intrs[0].line); | ||
566 | } | ||
567 | } | ||
568 | } | ||
569 | |||
570 | void __init | ||
571 | pmac_find_bridges(void) | ||
572 | { | ||
573 | struct device_node *np, *root; | ||
574 | struct device_node *ht = NULL; | ||
575 | |||
576 | root = of_find_node_by_path("/"); | ||
577 | if (root == NULL) { | ||
578 | printk(KERN_CRIT "pmac_find_bridges: can't find root of device tree\n"); | ||
579 | return; | ||
580 | } | ||
581 | for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) { | ||
582 | if (np->name == NULL) | ||
583 | continue; | ||
584 | if (strcmp(np->name, "bandit") == 0 | ||
585 | || strcmp(np->name, "chaos") == 0 | ||
586 | || strcmp(np->name, "pci") == 0) { | ||
587 | if (add_bridge(np) == 0) | ||
588 | of_node_get(np); | ||
589 | } | ||
590 | if (strcmp(np->name, "ht") == 0) { | ||
591 | of_node_get(np); | ||
592 | ht = np; | ||
593 | } | ||
594 | } | ||
595 | of_node_put(root); | ||
596 | |||
597 | /* Probe HT last as it relies on the agp resources to be already | ||
598 | * setup | ||
599 | */ | ||
600 | if (ht && add_bridge(ht) != 0) | ||
601 | of_node_put(ht); | ||
602 | |||
603 | init_p2pbridge(); | ||
604 | fixup_nec_usb2(); | ||
605 | |||
606 | /* We are still having some issues with the Xserve G4, enabling | ||
607 | * some offset between bus number and domains for now when we | ||
608 | * assign all busses should help for now | ||
609 | */ | ||
610 | if (pci_assign_all_busses) | ||
611 | pcibios_assign_bus_offset = 0x10; | ||
612 | |||
613 | #ifdef CONFIG_POWER4 | ||
614 | /* There is something wrong with DMA on U3/HT. I haven't figured out | ||
615 | * the details yet, but if I set the cache line size to 128 bytes like | ||
616 | * it should, I'm getting memory corruption caused by devices like | ||
617 | * sungem (even without the MWI bit set, but maybe sungem doesn't | ||
618 | * care). Right now, it appears that setting up a 64 bytes line size | ||
619 | * works properly, 64 bytes beeing the max transfer size of HT, I | ||
620 | * suppose this is related the way HT/PCI are hooked together. I still | ||
621 | * need to dive into more specs though to be really sure of what's | ||
622 | * going on. --BenH. | ||
623 | * | ||
624 | * Ok, apparently, it's just that HT can't do more than 64 bytes | ||
625 | * transactions. MWI seem to be meaningless there as well, it may | ||
626 | * be worth nop'ing out pci_set_mwi too though I haven't done that | ||
627 | * yet. | ||
628 | * | ||
629 | * Note that it's a bit different for whatever is in the AGP slot. | ||
630 | * For now, I don't care, but this can become a real issue, we | ||
631 | * should probably hook pci_set_mwi anyway to make sure it sets | ||
632 | * the real cache line size in there. | ||
633 | */ | ||
634 | if (machine_is_compatible("MacRISC4")) | ||
635 | pci_cache_line_size = 16; /* 64 bytes */ | ||
636 | |||
637 | pmac_check_ht_link(); | ||
638 | #endif /* CONFIG_POWER4 */ | ||
639 | } | ||
640 | |||
641 | #define GRACKLE_CFA(b, d, o) (0x80 | ((b) << 8) | ((d) << 16) \ | ||
642 | | (((o) & ~3) << 24)) | ||
643 | |||
644 | #define GRACKLE_PICR1_STG 0x00000040 | ||
645 | #define GRACKLE_PICR1_LOOPSNOOP 0x00000010 | ||
646 | |||
647 | /* N.B. this is called before bridges is initialized, so we can't | ||
648 | use grackle_pcibios_{read,write}_config_dword. */ | ||
649 | static inline void grackle_set_stg(struct pci_controller* bp, int enable) | ||
650 | { | ||
651 | unsigned int val; | ||
652 | |||
653 | out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); | ||
654 | val = in_le32(bp->cfg_data); | ||
655 | val = enable? (val | GRACKLE_PICR1_STG) : | ||
656 | (val & ~GRACKLE_PICR1_STG); | ||
657 | out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); | ||
658 | out_le32(bp->cfg_data, val); | ||
659 | (void)in_le32(bp->cfg_data); | ||
660 | } | ||
661 | |||
662 | static inline void grackle_set_loop_snoop(struct pci_controller *bp, int enable) | ||
663 | { | ||
664 | unsigned int val; | ||
665 | |||
666 | out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); | ||
667 | val = in_le32(bp->cfg_data); | ||
668 | val = enable? (val | GRACKLE_PICR1_LOOPSNOOP) : | ||
669 | (val & ~GRACKLE_PICR1_LOOPSNOOP); | ||
670 | out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); | ||
671 | out_le32(bp->cfg_data, val); | ||
672 | (void)in_le32(bp->cfg_data); | ||
673 | } | ||
674 | |||
675 | static int __init | ||
676 | setup_uninorth(struct pci_controller* hose, struct reg_property* addr) | ||
677 | { | ||
678 | pci_assign_all_busses = 1; | ||
679 | has_uninorth = 1; | ||
680 | hose->ops = ¯isc_pci_ops; | ||
681 | hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000); | ||
682 | hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000); | ||
683 | /* We "know" that the bridge at f2000000 has the PCI slots. */ | ||
684 | return addr->address == 0xf2000000; | ||
685 | } | ||
686 | |||
687 | static void __init | ||
688 | setup_bandit(struct pci_controller* hose, struct reg_property* addr) | ||
689 | { | ||
690 | hose->ops = ¯isc_pci_ops; | ||
691 | hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000); | ||
692 | hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000); | ||
693 | init_bandit(hose); | ||
694 | } | ||
695 | |||
696 | static void __init | ||
697 | setup_chaos(struct pci_controller* hose, struct reg_property* addr) | ||
698 | { | ||
699 | /* assume a `chaos' bridge */ | ||
700 | hose->ops = &chaos_pci_ops; | ||
701 | hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000); | ||
702 | hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000); | ||
703 | } | ||
704 | |||
705 | #ifdef CONFIG_POWER4 | ||
706 | |||
707 | static void __init setup_u3_agp(struct pci_controller* hose) | ||
708 | { | ||
709 | /* On G5, we move AGP up to high bus number so we don't need | ||
710 | * to reassign bus numbers for HT. If we ever have P2P bridges | ||
711 | * on AGP, we'll have to move pci_assign_all_busses to the | ||
712 | * pci_controller structure so we enable it for AGP and not for | ||
713 | * HT childs. | ||
714 | * We hard code the address because of the different size of | ||
715 | * the reg address cell, we shall fix that by killing struct | ||
716 | * reg_property and using some accessor functions instead | ||
717 | */ | ||
718 | hose->first_busno = 0xf0; | ||
719 | hose->last_busno = 0xff; | ||
720 | has_uninorth = 1; | ||
721 | hose->ops = ¯isc_pci_ops; | ||
722 | hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000); | ||
723 | hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000); | ||
724 | |||
725 | u3_agp = hose; | ||
726 | } | ||
727 | |||
728 | static void __init setup_u3_ht(struct pci_controller* hose) | ||
729 | { | ||
730 | struct device_node *np = (struct device_node *)hose->arch_data; | ||
731 | int i, cur; | ||
732 | |||
733 | hose->ops = &u3_ht_pci_ops; | ||
734 | |||
735 | /* We hard code the address because of the different size of | ||
736 | * the reg address cell, we shall fix that by killing struct | ||
737 | * reg_property and using some accessor functions instead | ||
738 | */ | ||
739 | hose->cfg_data = (volatile unsigned char *)ioremap(0xf2000000, 0x02000000); | ||
740 | |||
741 | /* | ||
742 | * /ht node doesn't expose a "ranges" property, so we "remove" regions that | ||
743 | * have been allocated to AGP. So far, this version of the code doesn't assign | ||
744 | * any of the 0xfxxxxxxx "fine" memory regions to /ht. | ||
745 | * We need to fix that sooner or later by either parsing all child "ranges" | ||
746 | * properties or figuring out the U3 address space decoding logic and | ||
747 | * then read its configuration register (if any). | ||
748 | */ | ||
749 | hose->io_base_phys = 0xf4000000; | ||
750 | hose->io_base_virt = ioremap(hose->io_base_phys, 0x00400000); | ||
751 | isa_io_base = (unsigned long) hose->io_base_virt; | ||
752 | hose->io_resource.name = np->full_name; | ||
753 | hose->io_resource.start = 0; | ||
754 | hose->io_resource.end = 0x003fffff; | ||
755 | hose->io_resource.flags = IORESOURCE_IO; | ||
756 | hose->pci_mem_offset = 0; | ||
757 | hose->first_busno = 0; | ||
758 | hose->last_busno = 0xef; | ||
759 | hose->mem_resources[0].name = np->full_name; | ||
760 | hose->mem_resources[0].start = 0x80000000; | ||
761 | hose->mem_resources[0].end = 0xefffffff; | ||
762 | hose->mem_resources[0].flags = IORESOURCE_MEM; | ||
763 | |||
764 | if (u3_agp == NULL) { | ||
765 | DBG("U3 has no AGP, using full resource range\n"); | ||
766 | return; | ||
767 | } | ||
768 | |||
769 | /* We "remove" the AGP resources from the resources allocated to HT, that | ||
770 | * is we create "holes". However, that code does assumptions that so far | ||
771 | * happen to be true (cross fingers...), typically that resources in the | ||
772 | * AGP node are properly ordered | ||
773 | */ | ||
774 | cur = 0; | ||
775 | for (i=0; i<3; i++) { | ||
776 | struct resource *res = &u3_agp->mem_resources[i]; | ||
777 | if (res->flags != IORESOURCE_MEM) | ||
778 | continue; | ||
779 | /* We don't care about "fine" resources */ | ||
780 | if (res->start >= 0xf0000000) | ||
781 | continue; | ||
782 | /* Check if it's just a matter of "shrinking" us in one direction */ | ||
783 | if (hose->mem_resources[cur].start == res->start) { | ||
784 | DBG("U3/HT: shrink start of %d, %08lx -> %08lx\n", | ||
785 | cur, hose->mem_resources[cur].start, res->end + 1); | ||
786 | hose->mem_resources[cur].start = res->end + 1; | ||
787 | continue; | ||
788 | } | ||
789 | if (hose->mem_resources[cur].end == res->end) { | ||
790 | DBG("U3/HT: shrink end of %d, %08lx -> %08lx\n", | ||
791 | cur, hose->mem_resources[cur].end, res->start - 1); | ||
792 | hose->mem_resources[cur].end = res->start - 1; | ||
793 | continue; | ||
794 | } | ||
795 | /* No, it's not the case, we need a hole */ | ||
796 | if (cur == 2) { | ||
797 | /* not enough resources to make a hole, we drop part of the range */ | ||
798 | printk(KERN_WARNING "Running out of resources for /ht host !\n"); | ||
799 | hose->mem_resources[cur].end = res->start - 1; | ||
800 | continue; | ||
801 | } | ||
802 | cur++; | ||
803 | DBG("U3/HT: hole, %d end at %08lx, %d start at %08lx\n", | ||
804 | cur-1, res->start - 1, cur, res->end + 1); | ||
805 | hose->mem_resources[cur].name = np->full_name; | ||
806 | hose->mem_resources[cur].flags = IORESOURCE_MEM; | ||
807 | hose->mem_resources[cur].start = res->end + 1; | ||
808 | hose->mem_resources[cur].end = hose->mem_resources[cur-1].end; | ||
809 | hose->mem_resources[cur-1].end = res->start - 1; | ||
810 | } | ||
811 | } | ||
812 | |||
813 | #endif /* CONFIG_POWER4 */ | ||
814 | |||
815 | void __init | ||
816 | setup_grackle(struct pci_controller *hose) | ||
817 | { | ||
818 | setup_indirect_pci(hose, 0xfec00000, 0xfee00000); | ||
819 | if (machine_is_compatible("AAPL,PowerBook1998")) | ||
820 | grackle_set_loop_snoop(hose, 1); | ||
821 | #if 0 /* Disabled for now, HW problems ??? */ | ||
822 | grackle_set_stg(hose, 1); | ||
823 | #endif | ||
824 | } | ||
825 | |||
826 | static void __init pmac_process_bridge_OF_ranges(struct pci_controller *hose, | ||
827 | struct device_node *dev, int primary) | ||
828 | { | ||
829 | static unsigned int static_lc_ranges[2024]; | ||
830 | unsigned int *dt_ranges, *lc_ranges, *ranges, *prev; | ||
831 | unsigned int size; | ||
832 | int rlen = 0, orig_rlen; | ||
833 | int memno = 0; | ||
834 | struct resource *res; | ||
835 | int np, na = prom_n_addr_cells(dev); | ||
836 | |||
837 | np = na + 5; | ||
838 | |||
839 | /* First we try to merge ranges to fix a problem with some pmacs | ||
840 | * that can have more than 3 ranges, fortunately using contiguous | ||
841 | * addresses -- BenH | ||
842 | */ | ||
843 | dt_ranges = (unsigned int *) get_property(dev, "ranges", &rlen); | ||
844 | if (!dt_ranges) | ||
845 | return; | ||
846 | /* lc_ranges = alloc_bootmem(rlen);*/ | ||
847 | lc_ranges = static_lc_ranges; | ||
848 | if (!lc_ranges) | ||
849 | return; /* what can we do here ? */ | ||
850 | memcpy(lc_ranges, dt_ranges, rlen); | ||
851 | orig_rlen = rlen; | ||
852 | |||
853 | /* Let's work on a copy of the "ranges" property instead of damaging | ||
854 | * the device-tree image in memory | ||
855 | */ | ||
856 | ranges = lc_ranges; | ||
857 | prev = NULL; | ||
858 | while ((rlen -= np * sizeof(unsigned int)) >= 0) { | ||
859 | if (prev) { | ||
860 | if (prev[0] == ranges[0] && prev[1] == ranges[1] && | ||
861 | (prev[2] + prev[na+4]) == ranges[2] && | ||
862 | (prev[na+2] + prev[na+4]) == ranges[na+2]) { | ||
863 | prev[na+4] += ranges[na+4]; | ||
864 | ranges[0] = 0; | ||
865 | ranges += np; | ||
866 | continue; | ||
867 | } | ||
868 | } | ||
869 | prev = ranges; | ||
870 | ranges += np; | ||
871 | } | ||
872 | |||
873 | /* | ||
874 | * The ranges property is laid out as an array of elements, | ||
875 | * each of which comprises: | ||
876 | * cells 0 - 2: a PCI address | ||
877 | * cells 3 or 3+4: a CPU physical address | ||
878 | * (size depending on dev->n_addr_cells) | ||
879 | * cells 4+5 or 5+6: the size of the range | ||
880 | */ | ||
881 | ranges = lc_ranges; | ||
882 | rlen = orig_rlen; | ||
883 | while (ranges && (rlen -= np * sizeof(unsigned int)) >= 0) { | ||
884 | res = NULL; | ||
885 | size = ranges[na+4]; | ||
886 | switch (ranges[0] >> 24) { | ||
887 | case 1: /* I/O space */ | ||
888 | if (ranges[2] != 0) | ||
889 | break; | ||
890 | hose->io_base_phys = ranges[na+2]; | ||
891 | /* limit I/O space to 16MB */ | ||
892 | if (size > 0x01000000) | ||
893 | size = 0x01000000; | ||
894 | hose->io_base_virt = ioremap(ranges[na+2], size); | ||
895 | if (primary) | ||
896 | isa_io_base = (unsigned long) hose->io_base_virt; | ||
897 | res = &hose->io_resource; | ||
898 | res->flags = IORESOURCE_IO; | ||
899 | res->start = ranges[2]; | ||
900 | break; | ||
901 | case 2: /* memory space */ | ||
902 | memno = 0; | ||
903 | if (ranges[1] == 0 && ranges[2] == 0 | ||
904 | && ranges[na+4] <= (16 << 20)) { | ||
905 | /* 1st 16MB, i.e. ISA memory area */ | ||
906 | #if 0 | ||
907 | if (primary) | ||
908 | isa_mem_base = ranges[na+2]; | ||
909 | #endif | ||
910 | memno = 1; | ||
911 | } | ||
912 | while (memno < 3 && hose->mem_resources[memno].flags) | ||
913 | ++memno; | ||
914 | if (memno == 0) | ||
915 | hose->pci_mem_offset = ranges[na+2] - ranges[2]; | ||
916 | if (memno < 3) { | ||
917 | res = &hose->mem_resources[memno]; | ||
918 | res->flags = IORESOURCE_MEM; | ||
919 | res->start = ranges[na+2]; | ||
920 | } | ||
921 | break; | ||
922 | } | ||
923 | if (res != NULL) { | ||
924 | res->name = dev->full_name; | ||
925 | res->end = res->start + size - 1; | ||
926 | res->parent = NULL; | ||
927 | res->sibling = NULL; | ||
928 | res->child = NULL; | ||
929 | } | ||
930 | ranges += np; | ||
931 | } | ||
932 | } | ||
933 | |||
934 | /* | ||
935 | * We assume that if we have a G3 powermac, we have one bridge called | ||
936 | * "pci" (a MPC106) and no bandit or chaos bridges, and contrariwise, | ||
937 | * if we have one or more bandit or chaos bridges, we don't have a MPC106. | ||
938 | */ | ||
939 | static int __init add_bridge(struct device_node *dev) | ||
940 | { | ||
941 | int len; | ||
942 | struct pci_controller *hose; | ||
943 | struct reg_property *addr; | ||
944 | char* disp_name; | ||
945 | int *bus_range; | ||
946 | int primary = 1; | ||
947 | |||
948 | DBG("Adding PCI host bridge %s\n", dev->full_name); | ||
949 | |||
950 | addr = (struct reg_property *) get_property(dev, "reg", &len); | ||
951 | if (addr == NULL || len < sizeof(*addr)) { | ||
952 | printk(KERN_WARNING "Can't use %s: no address\n", | ||
953 | dev->full_name); | ||
954 | return -ENODEV; | ||
955 | } | ||
956 | bus_range = (int *) get_property(dev, "bus-range", &len); | ||
957 | if (bus_range == NULL || len < 2 * sizeof(int)) { | ||
958 | printk(KERN_WARNING "Can't get bus-range for %s, assume bus 0\n", | ||
959 | dev->full_name); | ||
960 | } | ||
961 | |||
962 | hose = pcibios_alloc_controller(); | ||
963 | if (!hose) | ||
964 | return -ENOMEM; | ||
965 | hose->arch_data = dev; | ||
966 | hose->first_busno = bus_range ? bus_range[0] : 0; | ||
967 | hose->last_busno = bus_range ? bus_range[1] : 0xff; | ||
968 | |||
969 | disp_name = NULL; | ||
970 | #ifdef CONFIG_POWER4 | ||
971 | if (device_is_compatible(dev, "u3-agp")) { | ||
972 | setup_u3_agp(hose, addr); | ||
973 | disp_name = "U3-AGP"; | ||
974 | primary = 0; | ||
975 | } else if (device_is_compatible(dev, "u3-ht")) { | ||
976 | setup_u3_ht(hose, addr); | ||
977 | disp_name = "U3-HT"; | ||
978 | primary = 1; | ||
979 | } else | ||
980 | #endif /* CONFIG_POWER4 */ | ||
981 | if (device_is_compatible(dev, "uni-north")) { | ||
982 | primary = setup_uninorth(hose, addr); | ||
983 | disp_name = "UniNorth"; | ||
984 | } else if (strcmp(dev->name, "pci") == 0) { | ||
985 | /* XXX assume this is a mpc106 (grackle) */ | ||
986 | setup_grackle(hose); | ||
987 | disp_name = "Grackle (MPC106)"; | ||
988 | } else if (strcmp(dev->name, "bandit") == 0) { | ||
989 | setup_bandit(hose, addr); | ||
990 | disp_name = "Bandit"; | ||
991 | } else if (strcmp(dev->name, "chaos") == 0) { | ||
992 | setup_chaos(hose, addr); | ||
993 | disp_name = "Chaos"; | ||
994 | primary = 0; | ||
995 | } | ||
996 | printk(KERN_INFO "Found %s PCI host bridge at 0x%08x. Firmware bus number: %d->%d\n", | ||
997 | disp_name, addr->address, hose->first_busno, hose->last_busno); | ||
998 | DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n", | ||
999 | hose, hose->cfg_addr, hose->cfg_data); | ||
1000 | |||
1001 | /* Interpret the "ranges" property */ | ||
1002 | /* This also maps the I/O region and sets isa_io/mem_base */ | ||
1003 | pci_process_bridge_OF_ranges(hose, dev, primary); | ||
1004 | |||
1005 | /* Fixup "bus-range" OF property */ | ||
1006 | fixup_bus_range(dev); | ||
1007 | |||
1008 | return 0; | ||
1009 | } | ||
1010 | |||
1011 | static void __init | ||
1012 | pcibios_fixup_OF_interrupts(void) | ||
1013 | { | ||
1014 | struct pci_dev* dev = NULL; | ||
1015 | |||
1016 | /* | ||
1017 | * Open Firmware often doesn't initialize the | ||
1018 | * PCI_INTERRUPT_LINE config register properly, so we | ||
1019 | * should find the device node and apply the interrupt | ||
1020 | * obtained from the OF device-tree | ||
1021 | */ | ||
1022 | for_each_pci_dev(dev) { | ||
1023 | struct device_node *node; | ||
1024 | node = pci_device_to_OF_node(dev); | ||
1025 | /* this is the node, see if it has interrupts */ | ||
1026 | if (node && node->n_intrs > 0) | ||
1027 | dev->irq = node->intrs[0].line; | ||
1028 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); | ||
1029 | } | ||
1030 | } | ||
1031 | |||
1032 | void __init | ||
1033 | pmac_pcibios_fixup(void) | ||
1034 | { | ||
1035 | /* Fixup interrupts according to OF tree */ | ||
1036 | pcibios_fixup_OF_interrupts(); | ||
1037 | } | ||
1038 | |||
1039 | int | ||
1040 | pmac_pci_enable_device_hook(struct pci_dev *dev, int initial) | ||
1041 | { | ||
1042 | struct device_node* node; | ||
1043 | int updatecfg = 0; | ||
1044 | int uninorth_child; | ||
1045 | |||
1046 | node = pci_device_to_OF_node(dev); | ||
1047 | |||
1048 | /* We don't want to enable USB controllers absent from the OF tree | ||
1049 | * (iBook second controller) | ||
1050 | */ | ||
1051 | if (dev->vendor == PCI_VENDOR_ID_APPLE | ||
1052 | && (dev->class == ((PCI_CLASS_SERIAL_USB << 8) | 0x10)) | ||
1053 | && !node) { | ||
1054 | printk(KERN_INFO "Apple USB OHCI %s disabled by firmware\n", | ||
1055 | pci_name(dev)); | ||
1056 | return -EINVAL; | ||
1057 | } | ||
1058 | |||
1059 | if (!node) | ||
1060 | return 0; | ||
1061 | |||
1062 | uninorth_child = node->parent && | ||
1063 | device_is_compatible(node->parent, "uni-north"); | ||
1064 | |||
1065 | /* Firewire & GMAC were disabled after PCI probe, the driver is | ||
1066 | * claiming them, we must re-enable them now. | ||
1067 | */ | ||
1068 | if (uninorth_child && !strcmp(node->name, "firewire") && | ||
1069 | (device_is_compatible(node, "pci106b,18") || | ||
1070 | device_is_compatible(node, "pci106b,30") || | ||
1071 | device_is_compatible(node, "pci11c1,5811"))) { | ||
1072 | pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, node, 0, 1); | ||
1073 | pmac_call_feature(PMAC_FTR_1394_ENABLE, node, 0, 1); | ||
1074 | updatecfg = 1; | ||
1075 | } | ||
1076 | if (uninorth_child && !strcmp(node->name, "ethernet") && | ||
1077 | device_is_compatible(node, "gmac")) { | ||
1078 | pmac_call_feature(PMAC_FTR_GMAC_ENABLE, node, 0, 1); | ||
1079 | updatecfg = 1; | ||
1080 | } | ||
1081 | |||
1082 | if (updatecfg) { | ||
1083 | u16 cmd; | ||
1084 | |||
1085 | /* | ||
1086 | * Make sure PCI is correctly configured | ||
1087 | * | ||
1088 | * We use old pci_bios versions of the function since, by | ||
1089 | * default, gmac is not powered up, and so will be absent | ||
1090 | * from the kernel initial PCI lookup. | ||
1091 | * | ||
1092 | * Should be replaced by 2.4 new PCI mechanisms and really | ||
1093 | * register the device. | ||
1094 | */ | ||
1095 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | ||
1096 | cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE; | ||
1097 | pci_write_config_word(dev, PCI_COMMAND, cmd); | ||
1098 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, 16); | ||
1099 | pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size); | ||
1100 | } | ||
1101 | |||
1102 | return 0; | ||
1103 | } | ||
1104 | |||
1105 | /* We power down some devices after they have been probed. They'll | ||
1106 | * be powered back on later on | ||
1107 | */ | ||
1108 | void __init | ||
1109 | pmac_pcibios_after_init(void) | ||
1110 | { | ||
1111 | struct device_node* nd; | ||
1112 | |||
1113 | #ifdef CONFIG_BLK_DEV_IDE | ||
1114 | struct pci_dev *dev = NULL; | ||
1115 | |||
1116 | /* OF fails to initialize IDE controllers on macs | ||
1117 | * (and maybe other machines) | ||
1118 | * | ||
1119 | * Ideally, this should be moved to the IDE layer, but we need | ||
1120 | * to check specifically with Andre Hedrick how to do it cleanly | ||
1121 | * since the common IDE code seem to care about the fact that the | ||
1122 | * BIOS may have disabled a controller. | ||
1123 | * | ||
1124 | * -- BenH | ||
1125 | */ | ||
1126 | for_each_pci_dev(dev) { | ||
1127 | if ((dev->class >> 16) == PCI_BASE_CLASS_STORAGE) | ||
1128 | pci_enable_device(dev); | ||
1129 | } | ||
1130 | #endif /* CONFIG_BLK_DEV_IDE */ | ||
1131 | |||
1132 | nd = find_devices("firewire"); | ||
1133 | while (nd) { | ||
1134 | if (nd->parent && (device_is_compatible(nd, "pci106b,18") || | ||
1135 | device_is_compatible(nd, "pci106b,30") || | ||
1136 | device_is_compatible(nd, "pci11c1,5811")) | ||
1137 | && device_is_compatible(nd->parent, "uni-north")) { | ||
1138 | pmac_call_feature(PMAC_FTR_1394_ENABLE, nd, 0, 0); | ||
1139 | pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, nd, 0, 0); | ||
1140 | } | ||
1141 | nd = nd->next; | ||
1142 | } | ||
1143 | nd = find_devices("ethernet"); | ||
1144 | while (nd) { | ||
1145 | if (nd->parent && device_is_compatible(nd, "gmac") | ||
1146 | && device_is_compatible(nd->parent, "uni-north")) | ||
1147 | pmac_call_feature(PMAC_FTR_GMAC_ENABLE, nd, 0, 0); | ||
1148 | nd = nd->next; | ||
1149 | } | ||
1150 | } | ||
1151 | |||
1152 | #ifdef CONFIG_PPC64 | ||
1153 | static void __init pmac_fixup_phb_resources(void) | ||
1154 | { | ||
1155 | struct pci_controller *hose, *tmp; | ||
1156 | |||
1157 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { | ||
1158 | unsigned long offset = (unsigned long)hose->io_base_virt - pci_io_base; | ||
1159 | hose->io_resource.start += offset; | ||
1160 | hose->io_resource.end += offset; | ||
1161 | printk(KERN_INFO "PCI Host %d, io start: %lx; io end: %lx\n", | ||
1162 | hose->global_number, | ||
1163 | hose->io_resource.start, hose->io_resource.end); | ||
1164 | } | ||
1165 | } | ||
1166 | |||
1167 | void __init pmac_pci_init(void) | ||
1168 | { | ||
1169 | struct device_node *np, *root; | ||
1170 | struct device_node *ht = NULL; | ||
1171 | |||
1172 | /* Probe root PCI hosts, that is on U3 the AGP host and the | ||
1173 | * HyperTransport host. That one is actually "kept" around | ||
1174 | * and actually added last as it's resource management relies | ||
1175 | * on the AGP resources to have been setup first | ||
1176 | */ | ||
1177 | root = of_find_node_by_path("/"); | ||
1178 | if (root == NULL) { | ||
1179 | printk(KERN_CRIT "pmac_find_bridges: can't find root of device tree\n"); | ||
1180 | return; | ||
1181 | } | ||
1182 | for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) { | ||
1183 | if (np->name == NULL) | ||
1184 | continue; | ||
1185 | if (strcmp(np->name, "pci") == 0) { | ||
1186 | if (add_bridge(np) == 0) | ||
1187 | of_node_get(np); | ||
1188 | } | ||
1189 | if (strcmp(np->name, "ht") == 0) { | ||
1190 | of_node_get(np); | ||
1191 | ht = np; | ||
1192 | } | ||
1193 | } | ||
1194 | of_node_put(root); | ||
1195 | |||
1196 | /* Now setup the HyperTransport host if we found any | ||
1197 | */ | ||
1198 | if (ht && add_bridge(ht) != 0) | ||
1199 | of_node_put(ht); | ||
1200 | |||
1201 | /* Fixup the IO resources on our host bridges as the common code | ||
1202 | * does it only for childs of the host bridges | ||
1203 | */ | ||
1204 | pmac_fixup_phb_resources(); | ||
1205 | |||
1206 | /* Setup the linkage between OF nodes and PHBs */ | ||
1207 | pci_devs_phb_init(); | ||
1208 | |||
1209 | /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We | ||
1210 | * assume there is no P2P bridge on the AGP bus, which should be a | ||
1211 | * safe assumptions hopefully. | ||
1212 | */ | ||
1213 | if (u3_agp) { | ||
1214 | struct device_node *np = u3_agp->arch_data; | ||
1215 | PCI_DN(np)->busno = 0xf0; | ||
1216 | for (np = np->child; np; np = np->sibling) | ||
1217 | PCI_DN(np)->busno = 0xf0; | ||
1218 | } | ||
1219 | |||
1220 | pmac_check_ht_link(); | ||
1221 | |||
1222 | /* Tell pci.c to not use the common resource allocation mecanism */ | ||
1223 | pci_probe_only = 1; | ||
1224 | |||
1225 | /* Allow all IO */ | ||
1226 | io_page_mask = -1; | ||
1227 | } | ||
1228 | #endif | ||
1229 | |||
1230 | #ifdef CONFIG_PPC32 | ||
1231 | void pmac_pci_fixup_cardbus(struct pci_dev* dev) | ||
1232 | { | ||
1233 | if (_machine != _MACH_Pmac) | ||
1234 | return; | ||
1235 | /* | ||
1236 | * Fix the interrupt routing on the various cardbus bridges | ||
1237 | * used on powerbooks | ||
1238 | */ | ||
1239 | if (dev->vendor != PCI_VENDOR_ID_TI) | ||
1240 | return; | ||
1241 | if (dev->device == PCI_DEVICE_ID_TI_1130 || | ||
1242 | dev->device == PCI_DEVICE_ID_TI_1131) { | ||
1243 | u8 val; | ||
1244 | /* Enable PCI interrupt */ | ||
1245 | if (pci_read_config_byte(dev, 0x91, &val) == 0) | ||
1246 | pci_write_config_byte(dev, 0x91, val | 0x30); | ||
1247 | /* Disable ISA interrupt mode */ | ||
1248 | if (pci_read_config_byte(dev, 0x92, &val) == 0) | ||
1249 | pci_write_config_byte(dev, 0x92, val & ~0x06); | ||
1250 | } | ||
1251 | if (dev->device == PCI_DEVICE_ID_TI_1210 || | ||
1252 | dev->device == PCI_DEVICE_ID_TI_1211 || | ||
1253 | dev->device == PCI_DEVICE_ID_TI_1410 || | ||
1254 | dev->device == PCI_DEVICE_ID_TI_1510) { | ||
1255 | u8 val; | ||
1256 | /* 0x8c == TI122X_IRQMUX, 2 says to route the INTA | ||
1257 | signal out the MFUNC0 pin */ | ||
1258 | if (pci_read_config_byte(dev, 0x8c, &val) == 0) | ||
1259 | pci_write_config_byte(dev, 0x8c, (val & ~0x0f) | 2); | ||
1260 | /* Disable ISA interrupt mode */ | ||
1261 | if (pci_read_config_byte(dev, 0x92, &val) == 0) | ||
1262 | pci_write_config_byte(dev, 0x92, val & ~0x06); | ||
1263 | } | ||
1264 | } | ||
1265 | |||
1266 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_ANY_ID, pmac_pci_fixup_cardbus); | ||
1267 | |||
1268 | void pmac_pci_fixup_pciata(struct pci_dev* dev) | ||
1269 | { | ||
1270 | u8 progif = 0; | ||
1271 | |||
1272 | /* | ||
1273 | * On PowerMacs, we try to switch any PCI ATA controller to | ||
1274 | * fully native mode | ||
1275 | */ | ||
1276 | if (_machine != _MACH_Pmac) | ||
1277 | return; | ||
1278 | /* Some controllers don't have the class IDE */ | ||
1279 | if (dev->vendor == PCI_VENDOR_ID_PROMISE) | ||
1280 | switch(dev->device) { | ||
1281 | case PCI_DEVICE_ID_PROMISE_20246: | ||
1282 | case PCI_DEVICE_ID_PROMISE_20262: | ||
1283 | case PCI_DEVICE_ID_PROMISE_20263: | ||
1284 | case PCI_DEVICE_ID_PROMISE_20265: | ||
1285 | case PCI_DEVICE_ID_PROMISE_20267: | ||
1286 | case PCI_DEVICE_ID_PROMISE_20268: | ||
1287 | case PCI_DEVICE_ID_PROMISE_20269: | ||
1288 | case PCI_DEVICE_ID_PROMISE_20270: | ||
1289 | case PCI_DEVICE_ID_PROMISE_20271: | ||
1290 | case PCI_DEVICE_ID_PROMISE_20275: | ||
1291 | case PCI_DEVICE_ID_PROMISE_20276: | ||
1292 | case PCI_DEVICE_ID_PROMISE_20277: | ||
1293 | goto good; | ||
1294 | } | ||
1295 | /* Others, check PCI class */ | ||
1296 | if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE) | ||
1297 | return; | ||
1298 | good: | ||
1299 | pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); | ||
1300 | if ((progif & 5) != 5) { | ||
1301 | printk(KERN_INFO "Forcing PCI IDE into native mode: %s\n", pci_name(dev)); | ||
1302 | (void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5); | ||
1303 | if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) || | ||
1304 | (progif & 5) != 5) | ||
1305 | printk(KERN_ERR "Rewrite of PROGIF failed !\n"); | ||
1306 | } | ||
1307 | } | ||
1308 | DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pmac_pci_fixup_pciata); | ||
1309 | #endif | ||
1310 | |||
1311 | /* | ||
1312 | * Disable second function on K2-SATA, it's broken | ||
1313 | * and disable IO BARs on first one | ||
1314 | */ | ||
1315 | static void fixup_k2_sata(struct pci_dev* dev) | ||
1316 | { | ||
1317 | int i; | ||
1318 | u16 cmd; | ||
1319 | |||
1320 | if (PCI_FUNC(dev->devfn) > 0) { | ||
1321 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | ||
1322 | cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY); | ||
1323 | pci_write_config_word(dev, PCI_COMMAND, cmd); | ||
1324 | for (i = 0; i < 6; i++) { | ||
1325 | dev->resource[i].start = dev->resource[i].end = 0; | ||
1326 | dev->resource[i].flags = 0; | ||
1327 | pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0); | ||
1328 | } | ||
1329 | } else { | ||
1330 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | ||
1331 | cmd &= ~PCI_COMMAND_IO; | ||
1332 | pci_write_config_word(dev, PCI_COMMAND, cmd); | ||
1333 | for (i = 0; i < 5; i++) { | ||
1334 | dev->resource[i].start = dev->resource[i].end = 0; | ||
1335 | dev->resource[i].flags = 0; | ||
1336 | pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0); | ||
1337 | } | ||
1338 | } | ||
1339 | } | ||
1340 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, 0x0240, fixup_k2_sata); | ||
1341 | |||
diff --git a/arch/powerpc/platforms/powermac/pmac_pic.c b/arch/powerpc/platforms/powermac/pmac_pic.c new file mode 100644 index 000000000000..bf3e1899a4cc --- /dev/null +++ b/arch/powerpc/platforms/powermac/pmac_pic.c | |||
@@ -0,0 +1,655 @@ | |||
1 | /* | ||
2 | * Support for the interrupt controllers found on Power Macintosh, | ||
3 | * currently Apple's "Grand Central" interrupt controller in all | ||
4 | * it's incarnations. OpenPIC support used on newer machines is | ||
5 | * in a separate file | ||
6 | * | ||
7 | * Copyright (C) 1997 Paul Mackerras (paulus@samba.org) | ||
8 | * | ||
9 | * Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org) | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * as published by the Free Software Foundation; either version | ||
14 | * 2 of the License, or (at your option) any later version. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #include <linux/config.h> | ||
19 | #include <linux/stddef.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/signal.h> | ||
23 | #include <linux/pci.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/sysdev.h> | ||
26 | #include <linux/adb.h> | ||
27 | #include <linux/pmu.h> | ||
28 | |||
29 | #include <asm/sections.h> | ||
30 | #include <asm/io.h> | ||
31 | #include <asm/smp.h> | ||
32 | #include <asm/prom.h> | ||
33 | #include <asm/pci-bridge.h> | ||
34 | #include <asm/time.h> | ||
35 | #include <asm/open_pic.h> | ||
36 | #include <asm/xmon.h> | ||
37 | #include <asm/pmac_feature.h> | ||
38 | #include <asm/mpic.h> | ||
39 | |||
40 | #include "pmac_pic.h" | ||
41 | |||
42 | /* | ||
43 | * XXX this should be in xmon.h, but putting it there means xmon.h | ||
44 | * has to include <linux/interrupt.h> (to get irqreturn_t), which | ||
45 | * causes all sorts of problems. -- paulus | ||
46 | */ | ||
47 | extern irqreturn_t xmon_irq(int, void *, struct pt_regs *); | ||
48 | |||
49 | struct pmac_irq_hw { | ||
50 | unsigned int event; | ||
51 | unsigned int enable; | ||
52 | unsigned int ack; | ||
53 | unsigned int level; | ||
54 | }; | ||
55 | |||
56 | /* Default addresses */ | ||
57 | static volatile struct pmac_irq_hw *pmac_irq_hw[4] = { | ||
58 | (struct pmac_irq_hw *) 0xf3000020, | ||
59 | (struct pmac_irq_hw *) 0xf3000010, | ||
60 | (struct pmac_irq_hw *) 0xf4000020, | ||
61 | (struct pmac_irq_hw *) 0xf4000010, | ||
62 | }; | ||
63 | |||
64 | #define GC_LEVEL_MASK 0x3ff00000 | ||
65 | #define OHARE_LEVEL_MASK 0x1ff00000 | ||
66 | #define HEATHROW_LEVEL_MASK 0x1ff00000 | ||
67 | |||
68 | static int max_irqs; | ||
69 | static int max_real_irqs; | ||
70 | static u32 level_mask[4]; | ||
71 | |||
72 | static DEFINE_SPINLOCK(pmac_pic_lock); | ||
73 | |||
74 | |||
75 | #define GATWICK_IRQ_POOL_SIZE 10 | ||
76 | static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE]; | ||
77 | |||
78 | /* | ||
79 | * Mark an irq as "lost". This is only used on the pmac | ||
80 | * since it can lose interrupts (see pmac_set_irq_mask). | ||
81 | * -- Cort | ||
82 | */ | ||
83 | void | ||
84 | __set_lost(unsigned long irq_nr, int nokick) | ||
85 | { | ||
86 | if (!test_and_set_bit(irq_nr, ppc_lost_interrupts)) { | ||
87 | atomic_inc(&ppc_n_lost_interrupts); | ||
88 | if (!nokick) | ||
89 | set_dec(1); | ||
90 | } | ||
91 | } | ||
92 | |||
93 | static void | ||
94 | pmac_mask_and_ack_irq(unsigned int irq_nr) | ||
95 | { | ||
96 | unsigned long bit = 1UL << (irq_nr & 0x1f); | ||
97 | int i = irq_nr >> 5; | ||
98 | unsigned long flags; | ||
99 | |||
100 | if ((unsigned)irq_nr >= max_irqs) | ||
101 | return; | ||
102 | |||
103 | clear_bit(irq_nr, ppc_cached_irq_mask); | ||
104 | if (test_and_clear_bit(irq_nr, ppc_lost_interrupts)) | ||
105 | atomic_dec(&ppc_n_lost_interrupts); | ||
106 | spin_lock_irqsave(&pmac_pic_lock, flags); | ||
107 | out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]); | ||
108 | out_le32(&pmac_irq_hw[i]->ack, bit); | ||
109 | do { | ||
110 | /* make sure ack gets to controller before we enable | ||
111 | interrupts */ | ||
112 | mb(); | ||
113 | } while((in_le32(&pmac_irq_hw[i]->enable) & bit) | ||
114 | != (ppc_cached_irq_mask[i] & bit)); | ||
115 | spin_unlock_irqrestore(&pmac_pic_lock, flags); | ||
116 | } | ||
117 | |||
118 | static void pmac_set_irq_mask(unsigned int irq_nr, int nokicklost) | ||
119 | { | ||
120 | unsigned long bit = 1UL << (irq_nr & 0x1f); | ||
121 | int i = irq_nr >> 5; | ||
122 | unsigned long flags; | ||
123 | |||
124 | if ((unsigned)irq_nr >= max_irqs) | ||
125 | return; | ||
126 | |||
127 | spin_lock_irqsave(&pmac_pic_lock, flags); | ||
128 | /* enable unmasked interrupts */ | ||
129 | out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]); | ||
130 | |||
131 | do { | ||
132 | /* make sure mask gets to controller before we | ||
133 | return to user */ | ||
134 | mb(); | ||
135 | } while((in_le32(&pmac_irq_hw[i]->enable) & bit) | ||
136 | != (ppc_cached_irq_mask[i] & bit)); | ||
137 | |||
138 | /* | ||
139 | * Unfortunately, setting the bit in the enable register | ||
140 | * when the device interrupt is already on *doesn't* set | ||
141 | * the bit in the flag register or request another interrupt. | ||
142 | */ | ||
143 | if (bit & ppc_cached_irq_mask[i] & in_le32(&pmac_irq_hw[i]->level)) | ||
144 | __set_lost((ulong)irq_nr, nokicklost); | ||
145 | spin_unlock_irqrestore(&pmac_pic_lock, flags); | ||
146 | } | ||
147 | |||
148 | /* When an irq gets requested for the first client, if it's an | ||
149 | * edge interrupt, we clear any previous one on the controller | ||
150 | */ | ||
151 | static unsigned int pmac_startup_irq(unsigned int irq_nr) | ||
152 | { | ||
153 | unsigned long bit = 1UL << (irq_nr & 0x1f); | ||
154 | int i = irq_nr >> 5; | ||
155 | |||
156 | if ((irq_desc[irq_nr].status & IRQ_LEVEL) == 0) | ||
157 | out_le32(&pmac_irq_hw[i]->ack, bit); | ||
158 | set_bit(irq_nr, ppc_cached_irq_mask); | ||
159 | pmac_set_irq_mask(irq_nr, 0); | ||
160 | |||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | static void pmac_mask_irq(unsigned int irq_nr) | ||
165 | { | ||
166 | clear_bit(irq_nr, ppc_cached_irq_mask); | ||
167 | pmac_set_irq_mask(irq_nr, 0); | ||
168 | mb(); | ||
169 | } | ||
170 | |||
171 | static void pmac_unmask_irq(unsigned int irq_nr) | ||
172 | { | ||
173 | set_bit(irq_nr, ppc_cached_irq_mask); | ||
174 | pmac_set_irq_mask(irq_nr, 0); | ||
175 | } | ||
176 | |||
177 | static void pmac_end_irq(unsigned int irq_nr) | ||
178 | { | ||
179 | if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS)) | ||
180 | && irq_desc[irq_nr].action) { | ||
181 | set_bit(irq_nr, ppc_cached_irq_mask); | ||
182 | pmac_set_irq_mask(irq_nr, 1); | ||
183 | } | ||
184 | } | ||
185 | |||
186 | |||
187 | struct hw_interrupt_type pmac_pic = { | ||
188 | .typename = " PMAC-PIC ", | ||
189 | .startup = pmac_startup_irq, | ||
190 | .enable = pmac_unmask_irq, | ||
191 | .disable = pmac_mask_irq, | ||
192 | .ack = pmac_mask_and_ack_irq, | ||
193 | .end = pmac_end_irq, | ||
194 | }; | ||
195 | |||
196 | struct hw_interrupt_type gatwick_pic = { | ||
197 | .typename = " GATWICK ", | ||
198 | .startup = pmac_startup_irq, | ||
199 | .enable = pmac_unmask_irq, | ||
200 | .disable = pmac_mask_irq, | ||
201 | .ack = pmac_mask_and_ack_irq, | ||
202 | .end = pmac_end_irq, | ||
203 | }; | ||
204 | |||
205 | static irqreturn_t gatwick_action(int cpl, void *dev_id, struct pt_regs *regs) | ||
206 | { | ||
207 | int irq, bits; | ||
208 | |||
209 | for (irq = max_irqs; (irq -= 32) >= max_real_irqs; ) { | ||
210 | int i = irq >> 5; | ||
211 | bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i]; | ||
212 | /* We must read level interrupts from the level register */ | ||
213 | bits |= (in_le32(&pmac_irq_hw[i]->level) & level_mask[i]); | ||
214 | bits &= ppc_cached_irq_mask[i]; | ||
215 | if (bits == 0) | ||
216 | continue; | ||
217 | irq += __ilog2(bits); | ||
218 | __do_IRQ(irq, regs); | ||
219 | return IRQ_HANDLED; | ||
220 | } | ||
221 | printk("gatwick irq not from gatwick pic\n"); | ||
222 | return IRQ_NONE; | ||
223 | } | ||
224 | |||
225 | int | ||
226 | pmac_get_irq(struct pt_regs *regs) | ||
227 | { | ||
228 | int irq; | ||
229 | unsigned long bits = 0; | ||
230 | |||
231 | #ifdef CONFIG_SMP | ||
232 | void psurge_smp_message_recv(struct pt_regs *); | ||
233 | |||
234 | /* IPI's are a hack on the powersurge -- Cort */ | ||
235 | if ( smp_processor_id() != 0 ) { | ||
236 | psurge_smp_message_recv(regs); | ||
237 | return -2; /* ignore, already handled */ | ||
238 | } | ||
239 | #endif /* CONFIG_SMP */ | ||
240 | for (irq = max_real_irqs; (irq -= 32) >= 0; ) { | ||
241 | int i = irq >> 5; | ||
242 | bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i]; | ||
243 | /* We must read level interrupts from the level register */ | ||
244 | bits |= (in_le32(&pmac_irq_hw[i]->level) & level_mask[i]); | ||
245 | bits &= ppc_cached_irq_mask[i]; | ||
246 | if (bits == 0) | ||
247 | continue; | ||
248 | irq += __ilog2(bits); | ||
249 | break; | ||
250 | } | ||
251 | |||
252 | return irq; | ||
253 | } | ||
254 | |||
255 | /* This routine will fix some missing interrupt values in the device tree | ||
256 | * on the gatwick mac-io controller used by some PowerBooks | ||
257 | */ | ||
258 | static void __init | ||
259 | pmac_fix_gatwick_interrupts(struct device_node *gw, int irq_base) | ||
260 | { | ||
261 | struct device_node *node; | ||
262 | int count; | ||
263 | |||
264 | memset(gatwick_int_pool, 0, sizeof(gatwick_int_pool)); | ||
265 | node = gw->child; | ||
266 | count = 0; | ||
267 | while(node) | ||
268 | { | ||
269 | /* Fix SCC */ | ||
270 | if (strcasecmp(node->name, "escc") == 0) | ||
271 | if (node->child) { | ||
272 | if (node->child->n_intrs < 3) { | ||
273 | node->child->intrs = &gatwick_int_pool[count]; | ||
274 | count += 3; | ||
275 | } | ||
276 | node->child->n_intrs = 3; | ||
277 | node->child->intrs[0].line = 15+irq_base; | ||
278 | node->child->intrs[1].line = 4+irq_base; | ||
279 | node->child->intrs[2].line = 5+irq_base; | ||
280 | printk(KERN_INFO "irq: fixed SCC on second controller (%d,%d,%d)\n", | ||
281 | node->child->intrs[0].line, | ||
282 | node->child->intrs[1].line, | ||
283 | node->child->intrs[2].line); | ||
284 | } | ||
285 | /* Fix media-bay & left SWIM */ | ||
286 | if (strcasecmp(node->name, "media-bay") == 0) { | ||
287 | struct device_node* ya_node; | ||
288 | |||
289 | if (node->n_intrs == 0) | ||
290 | node->intrs = &gatwick_int_pool[count++]; | ||
291 | node->n_intrs = 1; | ||
292 | node->intrs[0].line = 29+irq_base; | ||
293 | printk(KERN_INFO "irq: fixed media-bay on second controller (%d)\n", | ||
294 | node->intrs[0].line); | ||
295 | |||
296 | ya_node = node->child; | ||
297 | while(ya_node) | ||
298 | { | ||
299 | if (strcasecmp(ya_node->name, "floppy") == 0) { | ||
300 | if (ya_node->n_intrs < 2) { | ||
301 | ya_node->intrs = &gatwick_int_pool[count]; | ||
302 | count += 2; | ||
303 | } | ||
304 | ya_node->n_intrs = 2; | ||
305 | ya_node->intrs[0].line = 19+irq_base; | ||
306 | ya_node->intrs[1].line = 1+irq_base; | ||
307 | printk(KERN_INFO "irq: fixed floppy on second controller (%d,%d)\n", | ||
308 | ya_node->intrs[0].line, ya_node->intrs[1].line); | ||
309 | } | ||
310 | if (strcasecmp(ya_node->name, "ata4") == 0) { | ||
311 | if (ya_node->n_intrs < 2) { | ||
312 | ya_node->intrs = &gatwick_int_pool[count]; | ||
313 | count += 2; | ||
314 | } | ||
315 | ya_node->n_intrs = 2; | ||
316 | ya_node->intrs[0].line = 14+irq_base; | ||
317 | ya_node->intrs[1].line = 3+irq_base; | ||
318 | printk(KERN_INFO "irq: fixed ide on second controller (%d,%d)\n", | ||
319 | ya_node->intrs[0].line, ya_node->intrs[1].line); | ||
320 | } | ||
321 | ya_node = ya_node->sibling; | ||
322 | } | ||
323 | } | ||
324 | node = node->sibling; | ||
325 | } | ||
326 | if (count > 10) { | ||
327 | printk("WARNING !! Gatwick interrupt pool overflow\n"); | ||
328 | printk(" GATWICK_IRQ_POOL_SIZE = %d\n", GATWICK_IRQ_POOL_SIZE); | ||
329 | printk(" requested = %d\n", count); | ||
330 | } | ||
331 | } | ||
332 | |||
333 | /* | ||
334 | * The PowerBook 3400/2400/3500 can have a combo ethernet/modem | ||
335 | * card which includes an ohare chip that acts as a second interrupt | ||
336 | * controller. If we find this second ohare, set it up and fix the | ||
337 | * interrupt value in the device tree for the ethernet chip. | ||
338 | */ | ||
339 | static int __init enable_second_ohare(void) | ||
340 | { | ||
341 | unsigned char bus, devfn; | ||
342 | unsigned short cmd; | ||
343 | unsigned long addr; | ||
344 | struct device_node *irqctrler = find_devices("pci106b,7"); | ||
345 | struct device_node *ether; | ||
346 | |||
347 | if (irqctrler == NULL || irqctrler->n_addrs <= 0) | ||
348 | return -1; | ||
349 | addr = (unsigned long) ioremap(irqctrler->addrs[0].address, 0x40); | ||
350 | pmac_irq_hw[1] = (volatile struct pmac_irq_hw *)(addr + 0x20); | ||
351 | max_irqs = 64; | ||
352 | if (pci_device_from_OF_node(irqctrler, &bus, &devfn) == 0) { | ||
353 | struct pci_controller* hose = pci_find_hose_for_OF_device(irqctrler); | ||
354 | if (!hose) | ||
355 | printk(KERN_ERR "Can't find PCI hose for OHare2 !\n"); | ||
356 | else { | ||
357 | early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd); | ||
358 | cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; | ||
359 | cmd &= ~PCI_COMMAND_IO; | ||
360 | early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd); | ||
361 | } | ||
362 | } | ||
363 | |||
364 | /* Fix interrupt for the modem/ethernet combo controller. The number | ||
365 | in the device tree (27) is bogus (correct for the ethernet-only | ||
366 | board but not the combo ethernet/modem board). | ||
367 | The real interrupt is 28 on the second controller -> 28+32 = 60. | ||
368 | */ | ||
369 | ether = find_devices("pci1011,14"); | ||
370 | if (ether && ether->n_intrs > 0) { | ||
371 | ether->intrs[0].line = 60; | ||
372 | printk(KERN_INFO "irq: Fixed ethernet IRQ to %d\n", | ||
373 | ether->intrs[0].line); | ||
374 | } | ||
375 | |||
376 | /* Return the interrupt number of the cascade */ | ||
377 | return irqctrler->intrs[0].line; | ||
378 | } | ||
379 | |||
380 | static int pmac_u3_cascade(struct pt_regs *regs, void *data) | ||
381 | { | ||
382 | return mpic_get_one_irq((struct mpic *)data, regs); | ||
383 | } | ||
384 | |||
385 | #ifdef CONFIG_XMON | ||
386 | static struct irqaction xmon_action = { | ||
387 | .handler = xmon_irq, | ||
388 | .flags = 0, | ||
389 | .mask = CPU_MASK_NONE, | ||
390 | .name = "NMI - XMON" | ||
391 | }; | ||
392 | #endif | ||
393 | |||
394 | static struct irqaction gatwick_cascade_action = { | ||
395 | .handler = gatwick_action, | ||
396 | .flags = SA_INTERRUPT, | ||
397 | .mask = CPU_MASK_NONE, | ||
398 | .name = "cascade", | ||
399 | }; | ||
400 | |||
401 | void __init pmac_pic_init(void) | ||
402 | { | ||
403 | int i; | ||
404 | struct device_node *irqctrler = NULL; | ||
405 | struct device_node *irqctrler2 = NULL; | ||
406 | struct device_node *np; | ||
407 | unsigned long addr; | ||
408 | int irq_cascade = -1; | ||
409 | struct mpic *mpic1, *mpic2; | ||
410 | |||
411 | /* We first try to detect Apple's new Core99 chipset, since mac-io | ||
412 | * is quite different on those machines and contains an IBM MPIC2. | ||
413 | */ | ||
414 | np = find_type_devices("open-pic"); | ||
415 | while (np) { | ||
416 | if (np->parent && !strcmp(np->parent->name, "u3")) | ||
417 | irqctrler2 = np; | ||
418 | else | ||
419 | irqctrler = np; | ||
420 | np = np->next; | ||
421 | } | ||
422 | if (irqctrler != NULL && irqctrler->n_addrs > 0) { | ||
423 | unsigned char senses[128]; | ||
424 | |||
425 | printk(KERN_INFO "PowerMac using OpenPIC irq controller at 0x%08x\n", | ||
426 | (unsigned int)irqctrler->addrs[0].address); | ||
427 | |||
428 | prom_get_irq_senses(senses, 0, 128); | ||
429 | mpic1 = mpic_alloc(irqctrler->addrs[0].address, | ||
430 | MPIC_PRIMARY | MPIC_WANTS_RESET, | ||
431 | 0, 0, 128, 256, senses, 128, " K2-MPIC "); | ||
432 | BUG_ON(mpic1 == NULL); | ||
433 | mpic_init(mpic1); | ||
434 | |||
435 | if (irqctrler2 != NULL && irqctrler2->n_intrs > 0 && | ||
436 | irqctrler2->n_addrs > 0) { | ||
437 | printk(KERN_INFO "Slave OpenPIC at 0x%08x hooked on IRQ %d\n", | ||
438 | (u32)irqctrler2->addrs[0].address, | ||
439 | irqctrler2->intrs[0].line); | ||
440 | |||
441 | pmac_call_feature(PMAC_FTR_ENABLE_MPIC, irqctrler2, 0, 0); | ||
442 | prom_get_irq_senses(senses, 128, 128 + 128); | ||
443 | |||
444 | /* We don't need to set MPIC_BROKEN_U3 here since we don't have | ||
445 | * hypertransport interrupts routed to it | ||
446 | */ | ||
447 | mpic2 = mpic_alloc(irqctrler2->addrs[0].address, | ||
448 | MPIC_BIG_ENDIAN | MPIC_WANTS_RESET, | ||
449 | 0, 128, 128, 0, senses, 128, " U3-MPIC "); | ||
450 | BUG_ON(mpic2 == NULL); | ||
451 | mpic_init(mpic2); | ||
452 | mpic_setup_cascade(irqctrler2->intrs[0].line, | ||
453 | pmac_u3_cascade, mpic2); | ||
454 | } | ||
455 | } | ||
456 | |||
457 | /* Get the level/edge settings, assume if it's not | ||
458 | * a Grand Central nor an OHare, then it's an Heathrow | ||
459 | * (or Paddington). | ||
460 | */ | ||
461 | if (find_devices("gc")) | ||
462 | level_mask[0] = GC_LEVEL_MASK; | ||
463 | else if (find_devices("ohare")) { | ||
464 | level_mask[0] = OHARE_LEVEL_MASK; | ||
465 | /* We might have a second cascaded ohare */ | ||
466 | level_mask[1] = OHARE_LEVEL_MASK; | ||
467 | } else { | ||
468 | level_mask[0] = HEATHROW_LEVEL_MASK; | ||
469 | level_mask[1] = 0; | ||
470 | /* We might have a second cascaded heathrow */ | ||
471 | level_mask[2] = HEATHROW_LEVEL_MASK; | ||
472 | level_mask[3] = 0; | ||
473 | } | ||
474 | |||
475 | /* | ||
476 | * G3 powermacs and 1999 G3 PowerBooks have 64 interrupts, | ||
477 | * 1998 G3 Series PowerBooks have 128, | ||
478 | * other powermacs have 32. | ||
479 | * The combo ethernet/modem card for the Powerstar powerbooks | ||
480 | * (2400/3400/3500, ohare based) has a second ohare chip | ||
481 | * effectively making a total of 64. | ||
482 | */ | ||
483 | max_irqs = max_real_irqs = 32; | ||
484 | irqctrler = find_devices("mac-io"); | ||
485 | if (irqctrler) | ||
486 | { | ||
487 | max_real_irqs = 64; | ||
488 | if (irqctrler->next) | ||
489 | max_irqs = 128; | ||
490 | else | ||
491 | max_irqs = 64; | ||
492 | } | ||
493 | for ( i = 0; i < max_real_irqs ; i++ ) | ||
494 | irq_desc[i].handler = &pmac_pic; | ||
495 | |||
496 | /* get addresses of first controller */ | ||
497 | if (irqctrler) { | ||
498 | if (irqctrler->n_addrs > 0) { | ||
499 | addr = (unsigned long) | ||
500 | ioremap(irqctrler->addrs[0].address, 0x40); | ||
501 | for (i = 0; i < 2; ++i) | ||
502 | pmac_irq_hw[i] = (volatile struct pmac_irq_hw*) | ||
503 | (addr + (2 - i) * 0x10); | ||
504 | } | ||
505 | |||
506 | /* get addresses of second controller */ | ||
507 | irqctrler = irqctrler->next; | ||
508 | if (irqctrler && irqctrler->n_addrs > 0) { | ||
509 | addr = (unsigned long) | ||
510 | ioremap(irqctrler->addrs[0].address, 0x40); | ||
511 | for (i = 2; i < 4; ++i) | ||
512 | pmac_irq_hw[i] = (volatile struct pmac_irq_hw*) | ||
513 | (addr + (4 - i) * 0x10); | ||
514 | irq_cascade = irqctrler->intrs[0].line; | ||
515 | if (device_is_compatible(irqctrler, "gatwick")) | ||
516 | pmac_fix_gatwick_interrupts(irqctrler, max_real_irqs); | ||
517 | } | ||
518 | } else { | ||
519 | /* older powermacs have a GC (grand central) or ohare at | ||
520 | f3000000, with interrupt control registers at f3000020. */ | ||
521 | addr = (unsigned long) ioremap(0xf3000000, 0x40); | ||
522 | pmac_irq_hw[0] = (volatile struct pmac_irq_hw *) (addr + 0x20); | ||
523 | } | ||
524 | |||
525 | /* PowerBooks 3400 and 3500 can have a second controller in a second | ||
526 | ohare chip, on the combo ethernet/modem card */ | ||
527 | if (machine_is_compatible("AAPL,3400/2400") | ||
528 | || machine_is_compatible("AAPL,3500")) | ||
529 | irq_cascade = enable_second_ohare(); | ||
530 | |||
531 | /* disable all interrupts in all controllers */ | ||
532 | for (i = 0; i * 32 < max_irqs; ++i) | ||
533 | out_le32(&pmac_irq_hw[i]->enable, 0); | ||
534 | /* mark level interrupts */ | ||
535 | for (i = 0; i < max_irqs; i++) | ||
536 | if (level_mask[i >> 5] & (1UL << (i & 0x1f))) | ||
537 | irq_desc[i].status = IRQ_LEVEL; | ||
538 | |||
539 | /* get interrupt line of secondary interrupt controller */ | ||
540 | if (irq_cascade >= 0) { | ||
541 | printk(KERN_INFO "irq: secondary controller on irq %d\n", | ||
542 | (int)irq_cascade); | ||
543 | for ( i = max_real_irqs ; i < max_irqs ; i++ ) | ||
544 | irq_desc[i].handler = &gatwick_pic; | ||
545 | setup_irq(irq_cascade, &gatwick_cascade_action); | ||
546 | } | ||
547 | printk("System has %d possible interrupts\n", max_irqs); | ||
548 | if (max_irqs != max_real_irqs) | ||
549 | printk(KERN_DEBUG "%d interrupts on main controller\n", | ||
550 | max_real_irqs); | ||
551 | |||
552 | #ifdef CONFIG_XMON | ||
553 | setup_irq(20, &xmon_action); | ||
554 | #endif /* CONFIG_XMON */ | ||
555 | } | ||
556 | |||
557 | #ifdef CONFIG_PM | ||
558 | /* | ||
559 | * These procedures are used in implementing sleep on the powerbooks. | ||
560 | * sleep_save_intrs() saves the states of all interrupt enables | ||
561 | * and disables all interrupts except for the nominated one. | ||
562 | * sleep_restore_intrs() restores the states of all interrupt enables. | ||
563 | */ | ||
564 | unsigned long sleep_save_mask[2]; | ||
565 | |||
566 | /* This used to be passed by the PMU driver but that link got | ||
567 | * broken with the new driver model. We use this tweak for now... | ||
568 | */ | ||
569 | static int pmacpic_find_viaint(void) | ||
570 | { | ||
571 | int viaint = -1; | ||
572 | |||
573 | #ifdef CONFIG_ADB_PMU | ||
574 | struct device_node *np; | ||
575 | |||
576 | if (pmu_get_model() != PMU_OHARE_BASED) | ||
577 | goto not_found; | ||
578 | np = of_find_node_by_name(NULL, "via-pmu"); | ||
579 | if (np == NULL) | ||
580 | goto not_found; | ||
581 | viaint = np->intrs[0].line; | ||
582 | #endif /* CONFIG_ADB_PMU */ | ||
583 | |||
584 | not_found: | ||
585 | return viaint; | ||
586 | } | ||
587 | |||
588 | static int pmacpic_suspend(struct sys_device *sysdev, pm_message_t state) | ||
589 | { | ||
590 | int viaint = pmacpic_find_viaint(); | ||
591 | |||
592 | sleep_save_mask[0] = ppc_cached_irq_mask[0]; | ||
593 | sleep_save_mask[1] = ppc_cached_irq_mask[1]; | ||
594 | ppc_cached_irq_mask[0] = 0; | ||
595 | ppc_cached_irq_mask[1] = 0; | ||
596 | if (viaint > 0) | ||
597 | set_bit(viaint, ppc_cached_irq_mask); | ||
598 | out_le32(&pmac_irq_hw[0]->enable, ppc_cached_irq_mask[0]); | ||
599 | if (max_real_irqs > 32) | ||
600 | out_le32(&pmac_irq_hw[1]->enable, ppc_cached_irq_mask[1]); | ||
601 | (void)in_le32(&pmac_irq_hw[0]->event); | ||
602 | /* make sure mask gets to controller before we return to caller */ | ||
603 | mb(); | ||
604 | (void)in_le32(&pmac_irq_hw[0]->enable); | ||
605 | |||
606 | return 0; | ||
607 | } | ||
608 | |||
609 | static int pmacpic_resume(struct sys_device *sysdev) | ||
610 | { | ||
611 | int i; | ||
612 | |||
613 | out_le32(&pmac_irq_hw[0]->enable, 0); | ||
614 | if (max_real_irqs > 32) | ||
615 | out_le32(&pmac_irq_hw[1]->enable, 0); | ||
616 | mb(); | ||
617 | for (i = 0; i < max_real_irqs; ++i) | ||
618 | if (test_bit(i, sleep_save_mask)) | ||
619 | pmac_unmask_irq(i); | ||
620 | |||
621 | return 0; | ||
622 | } | ||
623 | |||
624 | #endif /* CONFIG_PM */ | ||
625 | |||
626 | static struct sysdev_class pmacpic_sysclass = { | ||
627 | set_kset_name("pmac_pic"), | ||
628 | }; | ||
629 | |||
630 | static struct sys_device device_pmacpic = { | ||
631 | .id = 0, | ||
632 | .cls = &pmacpic_sysclass, | ||
633 | }; | ||
634 | |||
635 | static struct sysdev_driver driver_pmacpic = { | ||
636 | #ifdef CONFIG_PM | ||
637 | .suspend = &pmacpic_suspend, | ||
638 | .resume = &pmacpic_resume, | ||
639 | #endif /* CONFIG_PM */ | ||
640 | }; | ||
641 | |||
642 | static int __init init_pmacpic_sysfs(void) | ||
643 | { | ||
644 | if (max_irqs == 0) | ||
645 | return -ENODEV; | ||
646 | |||
647 | printk(KERN_DEBUG "Registering pmac pic with sysfs...\n"); | ||
648 | sysdev_class_register(&pmacpic_sysclass); | ||
649 | sysdev_register(&device_pmacpic); | ||
650 | sysdev_driver_register(&pmacpic_sysclass, &driver_pmacpic); | ||
651 | return 0; | ||
652 | } | ||
653 | |||
654 | subsys_initcall(init_pmacpic_sysfs); | ||
655 | |||
diff --git a/arch/powerpc/platforms/powermac/pmac_pic.h b/arch/powerpc/platforms/powermac/pmac_pic.h new file mode 100644 index 000000000000..664103dfeef9 --- /dev/null +++ b/arch/powerpc/platforms/powermac/pmac_pic.h | |||
@@ -0,0 +1,11 @@ | |||
1 | #ifndef __PPC_PLATFORMS_PMAC_PIC_H | ||
2 | #define __PPC_PLATFORMS_PMAC_PIC_H | ||
3 | |||
4 | #include <linux/irq.h> | ||
5 | |||
6 | extern struct hw_interrupt_type pmac_pic; | ||
7 | |||
8 | void pmac_pic_init(void); | ||
9 | int pmac_get_irq(struct pt_regs *regs); | ||
10 | |||
11 | #endif /* __PPC_PLATFORMS_PMAC_PIC_H */ | ||
diff --git a/arch/powerpc/platforms/powermac/pmac_setup.c b/arch/powerpc/platforms/powermac/pmac_setup.c new file mode 100644 index 000000000000..dbc921a084cd --- /dev/null +++ b/arch/powerpc/platforms/powermac/pmac_setup.c | |||
@@ -0,0 +1,662 @@ | |||
1 | /* | ||
2 | * arch/ppc/platforms/setup.c | ||
3 | * | ||
4 | * PowerPC version | ||
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
6 | * | ||
7 | * Adapted for Power Macintosh by Paul Mackerras | ||
8 | * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) | ||
9 | * | ||
10 | * Derived from "arch/alpha/kernel/setup.c" | ||
11 | * Copyright (C) 1995 Linus Torvalds | ||
12 | * | ||
13 | * Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org) | ||
14 | * | ||
15 | * This program is free software; you can redistribute it and/or | ||
16 | * modify it under the terms of the GNU General Public License | ||
17 | * as published by the Free Software Foundation; either version | ||
18 | * 2 of the License, or (at your option) any later version. | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | /* | ||
23 | * bootup setup stuff.. | ||
24 | */ | ||
25 | |||
26 | #include <linux/config.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/errno.h> | ||
29 | #include <linux/sched.h> | ||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/mm.h> | ||
32 | #include <linux/stddef.h> | ||
33 | #include <linux/unistd.h> | ||
34 | #include <linux/ptrace.h> | ||
35 | #include <linux/slab.h> | ||
36 | #include <linux/user.h> | ||
37 | #include <linux/a.out.h> | ||
38 | #include <linux/tty.h> | ||
39 | #include <linux/string.h> | ||
40 | #include <linux/delay.h> | ||
41 | #include <linux/ioport.h> | ||
42 | #include <linux/major.h> | ||
43 | #include <linux/initrd.h> | ||
44 | #include <linux/vt_kern.h> | ||
45 | #include <linux/console.h> | ||
46 | #include <linux/ide.h> | ||
47 | #include <linux/pci.h> | ||
48 | #include <linux/adb.h> | ||
49 | #include <linux/cuda.h> | ||
50 | #include <linux/pmu.h> | ||
51 | #include <linux/irq.h> | ||
52 | #include <linux/seq_file.h> | ||
53 | #include <linux/root_dev.h> | ||
54 | #include <linux/bitops.h> | ||
55 | #include <linux/suspend.h> | ||
56 | |||
57 | #include <asm/reg.h> | ||
58 | #include <asm/sections.h> | ||
59 | #include <asm/prom.h> | ||
60 | #include <asm/system.h> | ||
61 | #include <asm/pgtable.h> | ||
62 | #include <asm/io.h> | ||
63 | #include <asm/pci-bridge.h> | ||
64 | #include <asm/ohare.h> | ||
65 | #include <asm/mediabay.h> | ||
66 | #include <asm/machdep.h> | ||
67 | #include <asm/dma.h> | ||
68 | #include <asm/bootx.h> | ||
69 | #include <asm/cputable.h> | ||
70 | #include <asm/btext.h> | ||
71 | #include <asm/pmac_feature.h> | ||
72 | #include <asm/time.h> | ||
73 | #include <asm/of_device.h> | ||
74 | #include <asm/mmu_context.h> | ||
75 | |||
76 | #include "pmac_pic.h" | ||
77 | |||
78 | #undef SHOW_GATWICK_IRQS | ||
79 | |||
80 | extern long pmac_time_init(void); | ||
81 | extern unsigned long pmac_get_rtc_time(void); | ||
82 | extern int pmac_set_rtc_time(unsigned long nowtime); | ||
83 | extern void pmac_read_rtc_time(void); | ||
84 | extern void pmac_calibrate_decr(void); | ||
85 | extern void pmac_pcibios_fixup(void); | ||
86 | extern void pmac_find_bridges(void); | ||
87 | extern unsigned long pmac_ide_get_base(int index); | ||
88 | extern void pmac_ide_init_hwif_ports(hw_regs_t *hw, | ||
89 | unsigned long data_port, unsigned long ctrl_port, int *irq); | ||
90 | |||
91 | extern void pmac_nvram_update(void); | ||
92 | extern unsigned char pmac_nvram_read_byte(int addr); | ||
93 | extern void pmac_nvram_write_byte(int addr, unsigned char val); | ||
94 | extern int pmac_pci_enable_device_hook(struct pci_dev *dev, int initial); | ||
95 | extern void pmac_pcibios_after_init(void); | ||
96 | extern int of_show_percpuinfo(struct seq_file *m, int i); | ||
97 | |||
98 | unsigned char drive_info; | ||
99 | |||
100 | int ppc_override_l2cr = 0; | ||
101 | int ppc_override_l2cr_value; | ||
102 | int has_l2cache = 0; | ||
103 | |||
104 | static int current_root_goodness = -1; | ||
105 | |||
106 | extern int pmac_newworld; | ||
107 | |||
108 | #define DEFAULT_ROOT_DEVICE Root_SDA1 /* sda1 - slightly silly choice */ | ||
109 | |||
110 | extern void zs_kgdb_hook(int tty_num); | ||
111 | static void ohare_init(void); | ||
112 | #ifdef CONFIG_BOOTX_TEXT | ||
113 | static void pmac_progress(char *s, unsigned short hex); | ||
114 | #endif | ||
115 | |||
116 | sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN; | ||
117 | |||
118 | #ifdef CONFIG_SMP | ||
119 | extern struct smp_ops_t psurge_smp_ops; | ||
120 | extern struct smp_ops_t core99_smp_ops; | ||
121 | #endif /* CONFIG_SMP */ | ||
122 | |||
123 | static int | ||
124 | pmac_show_cpuinfo(struct seq_file *m) | ||
125 | { | ||
126 | struct device_node *np; | ||
127 | char *pp; | ||
128 | int plen; | ||
129 | int mbmodel = pmac_call_feature(PMAC_FTR_GET_MB_INFO, | ||
130 | NULL, PMAC_MB_INFO_MODEL, 0); | ||
131 | unsigned int mbflags = (unsigned int)pmac_call_feature(PMAC_FTR_GET_MB_INFO, | ||
132 | NULL, PMAC_MB_INFO_FLAGS, 0); | ||
133 | char* mbname; | ||
134 | |||
135 | if (pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_NAME, (int)&mbname) != 0) | ||
136 | mbname = "Unknown"; | ||
137 | |||
138 | /* find motherboard type */ | ||
139 | seq_printf(m, "machine\t\t: "); | ||
140 | np = find_devices("device-tree"); | ||
141 | if (np != NULL) { | ||
142 | pp = (char *) get_property(np, "model", NULL); | ||
143 | if (pp != NULL) | ||
144 | seq_printf(m, "%s\n", pp); | ||
145 | else | ||
146 | seq_printf(m, "PowerMac\n"); | ||
147 | pp = (char *) get_property(np, "compatible", &plen); | ||
148 | if (pp != NULL) { | ||
149 | seq_printf(m, "motherboard\t:"); | ||
150 | while (plen > 0) { | ||
151 | int l = strlen(pp) + 1; | ||
152 | seq_printf(m, " %s", pp); | ||
153 | plen -= l; | ||
154 | pp += l; | ||
155 | } | ||
156 | seq_printf(m, "\n"); | ||
157 | } | ||
158 | } else | ||
159 | seq_printf(m, "PowerMac\n"); | ||
160 | |||
161 | /* print parsed model */ | ||
162 | seq_printf(m, "detected as\t: %d (%s)\n", mbmodel, mbname); | ||
163 | seq_printf(m, "pmac flags\t: %08x\n", mbflags); | ||
164 | |||
165 | /* find l2 cache info */ | ||
166 | np = find_devices("l2-cache"); | ||
167 | if (np == 0) | ||
168 | np = find_type_devices("cache"); | ||
169 | if (np != 0) { | ||
170 | unsigned int *ic = (unsigned int *) | ||
171 | get_property(np, "i-cache-size", NULL); | ||
172 | unsigned int *dc = (unsigned int *) | ||
173 | get_property(np, "d-cache-size", NULL); | ||
174 | seq_printf(m, "L2 cache\t:"); | ||
175 | has_l2cache = 1; | ||
176 | if (get_property(np, "cache-unified", NULL) != 0 && dc) { | ||
177 | seq_printf(m, " %dK unified", *dc / 1024); | ||
178 | } else { | ||
179 | if (ic) | ||
180 | seq_printf(m, " %dK instruction", *ic / 1024); | ||
181 | if (dc) | ||
182 | seq_printf(m, "%s %dK data", | ||
183 | (ic? " +": ""), *dc / 1024); | ||
184 | } | ||
185 | pp = get_property(np, "ram-type", NULL); | ||
186 | if (pp) | ||
187 | seq_printf(m, " %s", pp); | ||
188 | seq_printf(m, "\n"); | ||
189 | } | ||
190 | |||
191 | /* find ram info */ | ||
192 | np = find_devices("memory"); | ||
193 | if (np != 0) { | ||
194 | int n; | ||
195 | struct reg_property *reg = (struct reg_property *) | ||
196 | get_property(np, "reg", &n); | ||
197 | |||
198 | if (reg != 0) { | ||
199 | unsigned long total = 0; | ||
200 | |||
201 | for (n /= sizeof(struct reg_property); n > 0; --n) | ||
202 | total += (reg++)->size; | ||
203 | seq_printf(m, "memory\t\t: %luMB\n", total >> 20); | ||
204 | } | ||
205 | } | ||
206 | |||
207 | /* Checks "l2cr-value" property in the registry */ | ||
208 | np = find_devices("cpus"); | ||
209 | if (np == 0) | ||
210 | np = find_type_devices("cpu"); | ||
211 | if (np != 0) { | ||
212 | unsigned int *l2cr = (unsigned int *) | ||
213 | get_property(np, "l2cr-value", NULL); | ||
214 | if (l2cr != 0) { | ||
215 | seq_printf(m, "l2cr override\t: 0x%x\n", *l2cr); | ||
216 | } | ||
217 | } | ||
218 | |||
219 | /* Indicate newworld/oldworld */ | ||
220 | seq_printf(m, "pmac-generation\t: %s\n", | ||
221 | pmac_newworld ? "NewWorld" : "OldWorld"); | ||
222 | |||
223 | |||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | static int | ||
228 | pmac_show_percpuinfo(struct seq_file *m, int i) | ||
229 | { | ||
230 | #ifdef CONFIG_CPU_FREQ_PMAC | ||
231 | extern unsigned int pmac_get_one_cpufreq(int i); | ||
232 | unsigned int freq = pmac_get_one_cpufreq(i); | ||
233 | if (freq != 0) { | ||
234 | seq_printf(m, "clock\t\t: %dMHz\n", freq/1000); | ||
235 | return 0; | ||
236 | } | ||
237 | #endif /* CONFIG_CPU_FREQ_PMAC */ | ||
238 | return of_show_percpuinfo(m, i); | ||
239 | } | ||
240 | |||
241 | static volatile u32 *sysctrl_regs; | ||
242 | |||
243 | void __init | ||
244 | pmac_setup_arch(void) | ||
245 | { | ||
246 | struct device_node *cpu; | ||
247 | int *fp; | ||
248 | unsigned long pvr; | ||
249 | |||
250 | pvr = PVR_VER(mfspr(SPRN_PVR)); | ||
251 | |||
252 | /* Set loops_per_jiffy to a half-way reasonable value, | ||
253 | for use until calibrate_delay gets called. */ | ||
254 | cpu = find_type_devices("cpu"); | ||
255 | if (cpu != 0) { | ||
256 | fp = (int *) get_property(cpu, "clock-frequency", NULL); | ||
257 | if (fp != 0) { | ||
258 | if (pvr == 4 || pvr >= 8) | ||
259 | /* 604, G3, G4 etc. */ | ||
260 | loops_per_jiffy = *fp / HZ; | ||
261 | else | ||
262 | /* 601, 603, etc. */ | ||
263 | loops_per_jiffy = *fp / (2*HZ); | ||
264 | } else | ||
265 | loops_per_jiffy = 50000000 / HZ; | ||
266 | } | ||
267 | |||
268 | /* this area has the CPU identification register | ||
269 | and some registers used by smp boards */ | ||
270 | sysctrl_regs = (volatile u32 *) ioremap(0xf8000000, 0x1000); | ||
271 | ohare_init(); | ||
272 | |||
273 | /* Lookup PCI hosts */ | ||
274 | pmac_find_bridges(); | ||
275 | |||
276 | /* Checks "l2cr-value" property in the registry */ | ||
277 | if (cpu_has_feature(CPU_FTR_L2CR)) { | ||
278 | struct device_node *np = find_devices("cpus"); | ||
279 | if (np == 0) | ||
280 | np = find_type_devices("cpu"); | ||
281 | if (np != 0) { | ||
282 | unsigned int *l2cr = (unsigned int *) | ||
283 | get_property(np, "l2cr-value", NULL); | ||
284 | if (l2cr != 0) { | ||
285 | ppc_override_l2cr = 1; | ||
286 | ppc_override_l2cr_value = *l2cr; | ||
287 | _set_L2CR(0); | ||
288 | _set_L2CR(ppc_override_l2cr_value); | ||
289 | } | ||
290 | } | ||
291 | } | ||
292 | |||
293 | if (ppc_override_l2cr) | ||
294 | printk(KERN_INFO "L2CR overriden (0x%x), backside cache is %s\n", | ||
295 | ppc_override_l2cr_value, (ppc_override_l2cr_value & 0x80000000) | ||
296 | ? "enabled" : "disabled"); | ||
297 | |||
298 | #ifdef CONFIG_KGDB | ||
299 | zs_kgdb_hook(0); | ||
300 | #endif | ||
301 | |||
302 | #ifdef CONFIG_ADB_CUDA | ||
303 | find_via_cuda(); | ||
304 | #else | ||
305 | if (find_devices("via-cuda")) { | ||
306 | printk("WARNING ! Your machine is Cuda based but your kernel\n"); | ||
307 | printk(" wasn't compiled with CONFIG_ADB_CUDA option !\n"); | ||
308 | } | ||
309 | #endif | ||
310 | #ifdef CONFIG_ADB_PMU | ||
311 | find_via_pmu(); | ||
312 | #else | ||
313 | if (find_devices("via-pmu")) { | ||
314 | printk("WARNING ! Your machine is PMU based but your kernel\n"); | ||
315 | printk(" wasn't compiled with CONFIG_ADB_PMU option !\n"); | ||
316 | } | ||
317 | #endif | ||
318 | #ifdef CONFIG_NVRAM | ||
319 | pmac_nvram_init(); | ||
320 | #endif | ||
321 | #ifdef CONFIG_BLK_DEV_INITRD | ||
322 | if (initrd_start) | ||
323 | ROOT_DEV = Root_RAM0; | ||
324 | else | ||
325 | #endif | ||
326 | ROOT_DEV = DEFAULT_ROOT_DEVICE; | ||
327 | |||
328 | #ifdef CONFIG_SMP | ||
329 | /* Check for Core99 */ | ||
330 | if (find_devices("uni-n") || find_devices("u3")) | ||
331 | ppc_md.smp_ops = &core99_smp_ops; | ||
332 | else | ||
333 | ppc_md.smp_ops = &psurge_smp_ops; | ||
334 | #endif /* CONFIG_SMP */ | ||
335 | |||
336 | pci_create_OF_bus_map(); | ||
337 | } | ||
338 | |||
339 | static void __init ohare_init(void) | ||
340 | { | ||
341 | /* | ||
342 | * Turn on the L2 cache. | ||
343 | * We assume that we have a PSX memory controller iff | ||
344 | * we have an ohare I/O controller. | ||
345 | */ | ||
346 | if (find_devices("ohare") != NULL) { | ||
347 | if (((sysctrl_regs[2] >> 24) & 0xf) >= 3) { | ||
348 | if (sysctrl_regs[4] & 0x10) | ||
349 | sysctrl_regs[4] |= 0x04000020; | ||
350 | else | ||
351 | sysctrl_regs[4] |= 0x04000000; | ||
352 | if(has_l2cache) | ||
353 | printk(KERN_INFO "Level 2 cache enabled\n"); | ||
354 | } | ||
355 | } | ||
356 | } | ||
357 | |||
358 | extern char *bootpath; | ||
359 | extern char *bootdevice; | ||
360 | void *boot_host; | ||
361 | int boot_target; | ||
362 | int boot_part; | ||
363 | extern dev_t boot_dev; | ||
364 | |||
365 | #ifdef CONFIG_SCSI | ||
366 | void __init | ||
367 | note_scsi_host(struct device_node *node, void *host) | ||
368 | { | ||
369 | int l; | ||
370 | char *p; | ||
371 | |||
372 | l = strlen(node->full_name); | ||
373 | if (bootpath != NULL && bootdevice != NULL | ||
374 | && strncmp(node->full_name, bootdevice, l) == 0 | ||
375 | && (bootdevice[l] == '/' || bootdevice[l] == 0)) { | ||
376 | boot_host = host; | ||
377 | /* | ||
378 | * There's a bug in OF 1.0.5. (Why am I not surprised.) | ||
379 | * If you pass a path like scsi/sd@1:0 to canon, it returns | ||
380 | * something like /bandit@F2000000/gc@10/53c94@10000/sd@0,0 | ||
381 | * That is, the scsi target number doesn't get preserved. | ||
382 | * So we pick the target number out of bootpath and use that. | ||
383 | */ | ||
384 | p = strstr(bootpath, "/sd@"); | ||
385 | if (p != NULL) { | ||
386 | p += 4; | ||
387 | boot_target = simple_strtoul(p, NULL, 10); | ||
388 | p = strchr(p, ':'); | ||
389 | if (p != NULL) | ||
390 | boot_part = simple_strtoul(p + 1, NULL, 10); | ||
391 | } | ||
392 | } | ||
393 | } | ||
394 | #endif | ||
395 | |||
396 | #if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC) | ||
397 | static dev_t __init | ||
398 | find_ide_boot(void) | ||
399 | { | ||
400 | char *p; | ||
401 | int n; | ||
402 | dev_t __init pmac_find_ide_boot(char *bootdevice, int n); | ||
403 | |||
404 | if (bootdevice == NULL) | ||
405 | return 0; | ||
406 | p = strrchr(bootdevice, '/'); | ||
407 | if (p == NULL) | ||
408 | return 0; | ||
409 | n = p - bootdevice; | ||
410 | |||
411 | return pmac_find_ide_boot(bootdevice, n); | ||
412 | } | ||
413 | #endif /* CONFIG_BLK_DEV_IDE && CONFIG_BLK_DEV_IDE_PMAC */ | ||
414 | |||
415 | static void __init | ||
416 | find_boot_device(void) | ||
417 | { | ||
418 | #if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC) | ||
419 | boot_dev = find_ide_boot(); | ||
420 | #endif | ||
421 | } | ||
422 | |||
423 | static int initializing = 1; | ||
424 | /* TODO: Merge the suspend-to-ram with the common code !!! | ||
425 | * currently, this is a stub implementation for suspend-to-disk | ||
426 | * only | ||
427 | */ | ||
428 | |||
429 | #ifdef CONFIG_SOFTWARE_SUSPEND | ||
430 | |||
431 | static int pmac_pm_prepare(suspend_state_t state) | ||
432 | { | ||
433 | printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state); | ||
434 | |||
435 | return 0; | ||
436 | } | ||
437 | |||
438 | static int pmac_pm_enter(suspend_state_t state) | ||
439 | { | ||
440 | printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state); | ||
441 | |||
442 | /* Giveup the lazy FPU & vec so we don't have to back them | ||
443 | * up from the low level code | ||
444 | */ | ||
445 | enable_kernel_fp(); | ||
446 | |||
447 | #ifdef CONFIG_ALTIVEC | ||
448 | if (cur_cpu_spec[0]->cpu_features & CPU_FTR_ALTIVEC) | ||
449 | enable_kernel_altivec(); | ||
450 | #endif /* CONFIG_ALTIVEC */ | ||
451 | |||
452 | return 0; | ||
453 | } | ||
454 | |||
455 | static int pmac_pm_finish(suspend_state_t state) | ||
456 | { | ||
457 | printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state); | ||
458 | |||
459 | /* Restore userland MMU context */ | ||
460 | set_context(current->active_mm->context, current->active_mm->pgd); | ||
461 | |||
462 | return 0; | ||
463 | } | ||
464 | |||
465 | static struct pm_ops pmac_pm_ops = { | ||
466 | .pm_disk_mode = PM_DISK_SHUTDOWN, | ||
467 | .prepare = pmac_pm_prepare, | ||
468 | .enter = pmac_pm_enter, | ||
469 | .finish = pmac_pm_finish, | ||
470 | }; | ||
471 | |||
472 | #endif /* CONFIG_SOFTWARE_SUSPEND */ | ||
473 | |||
474 | static int pmac_late_init(void) | ||
475 | { | ||
476 | initializing = 0; | ||
477 | #ifdef CONFIG_SOFTWARE_SUSPEND | ||
478 | pm_set_ops(&pmac_pm_ops); | ||
479 | #endif /* CONFIG_SOFTWARE_SUSPEND */ | ||
480 | return 0; | ||
481 | } | ||
482 | |||
483 | late_initcall(pmac_late_init); | ||
484 | |||
485 | /* can't be __init - can be called whenever a disk is first accessed */ | ||
486 | void | ||
487 | note_bootable_part(dev_t dev, int part, int goodness) | ||
488 | { | ||
489 | static int found_boot = 0; | ||
490 | char *p; | ||
491 | |||
492 | if (!initializing) | ||
493 | return; | ||
494 | if ((goodness <= current_root_goodness) && | ||
495 | ROOT_DEV != DEFAULT_ROOT_DEVICE) | ||
496 | return; | ||
497 | p = strstr(saved_command_line, "root="); | ||
498 | if (p != NULL && (p == saved_command_line || p[-1] == ' ')) | ||
499 | return; | ||
500 | |||
501 | if (!found_boot) { | ||
502 | find_boot_device(); | ||
503 | found_boot = 1; | ||
504 | } | ||
505 | if (!boot_dev || dev == boot_dev) { | ||
506 | ROOT_DEV = dev + part; | ||
507 | boot_dev = 0; | ||
508 | current_root_goodness = goodness; | ||
509 | } | ||
510 | } | ||
511 | |||
512 | static void | ||
513 | pmac_restart(char *cmd) | ||
514 | { | ||
515 | #ifdef CONFIG_ADB_CUDA | ||
516 | struct adb_request req; | ||
517 | #endif /* CONFIG_ADB_CUDA */ | ||
518 | |||
519 | switch (sys_ctrler) { | ||
520 | #ifdef CONFIG_ADB_CUDA | ||
521 | case SYS_CTRLER_CUDA: | ||
522 | cuda_request(&req, NULL, 2, CUDA_PACKET, | ||
523 | CUDA_RESET_SYSTEM); | ||
524 | for (;;) | ||
525 | cuda_poll(); | ||
526 | break; | ||
527 | #endif /* CONFIG_ADB_CUDA */ | ||
528 | #ifdef CONFIG_ADB_PMU | ||
529 | case SYS_CTRLER_PMU: | ||
530 | pmu_restart(); | ||
531 | break; | ||
532 | #endif /* CONFIG_ADB_PMU */ | ||
533 | default: ; | ||
534 | } | ||
535 | } | ||
536 | |||
537 | static void | ||
538 | pmac_power_off(void) | ||
539 | { | ||
540 | #ifdef CONFIG_ADB_CUDA | ||
541 | struct adb_request req; | ||
542 | #endif /* CONFIG_ADB_CUDA */ | ||
543 | |||
544 | switch (sys_ctrler) { | ||
545 | #ifdef CONFIG_ADB_CUDA | ||
546 | case SYS_CTRLER_CUDA: | ||
547 | cuda_request(&req, NULL, 2, CUDA_PACKET, | ||
548 | CUDA_POWERDOWN); | ||
549 | for (;;) | ||
550 | cuda_poll(); | ||
551 | break; | ||
552 | #endif /* CONFIG_ADB_CUDA */ | ||
553 | #ifdef CONFIG_ADB_PMU | ||
554 | case SYS_CTRLER_PMU: | ||
555 | pmu_shutdown(); | ||
556 | break; | ||
557 | #endif /* CONFIG_ADB_PMU */ | ||
558 | default: ; | ||
559 | } | ||
560 | } | ||
561 | |||
562 | static void | ||
563 | pmac_halt(void) | ||
564 | { | ||
565 | pmac_power_off(); | ||
566 | } | ||
567 | |||
568 | void __init | ||
569 | pmac_init(unsigned long r3, unsigned long r4, unsigned long r5, | ||
570 | unsigned long r6, unsigned long r7) | ||
571 | { | ||
572 | /* isa_io_base gets set in pmac_find_bridges */ | ||
573 | isa_mem_base = PMAC_ISA_MEM_BASE; | ||
574 | pci_dram_offset = PMAC_PCI_DRAM_OFFSET; | ||
575 | ISA_DMA_THRESHOLD = ~0L; | ||
576 | DMA_MODE_READ = 1; | ||
577 | DMA_MODE_WRITE = 2; | ||
578 | |||
579 | ppc_md.setup_arch = pmac_setup_arch; | ||
580 | ppc_md.show_cpuinfo = pmac_show_cpuinfo; | ||
581 | ppc_md.show_percpuinfo = pmac_show_percpuinfo; | ||
582 | ppc_md.irq_canonicalize = NULL; | ||
583 | ppc_md.init_IRQ = pmac_pic_init; | ||
584 | ppc_md.get_irq = pmac_get_irq; /* Changed later on ... */ | ||
585 | |||
586 | ppc_md.pcibios_fixup = pmac_pcibios_fixup; | ||
587 | ppc_md.pcibios_enable_device_hook = pmac_pci_enable_device_hook; | ||
588 | ppc_md.pcibios_after_init = pmac_pcibios_after_init; | ||
589 | ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot; | ||
590 | |||
591 | ppc_md.restart = pmac_restart; | ||
592 | ppc_md.power_off = pmac_power_off; | ||
593 | ppc_md.halt = pmac_halt; | ||
594 | |||
595 | ppc_md.time_init = pmac_time_init; | ||
596 | ppc_md.set_rtc_time = pmac_set_rtc_time; | ||
597 | ppc_md.get_rtc_time = pmac_get_rtc_time; | ||
598 | ppc_md.calibrate_decr = pmac_calibrate_decr; | ||
599 | |||
600 | ppc_md.feature_call = pmac_do_feature_call; | ||
601 | |||
602 | #if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) | ||
603 | #ifdef CONFIG_BLK_DEV_IDE_PMAC | ||
604 | ppc_ide_md.ide_init_hwif = pmac_ide_init_hwif_ports; | ||
605 | ppc_ide_md.default_io_base = pmac_ide_get_base; | ||
606 | #endif /* CONFIG_BLK_DEV_IDE_PMAC */ | ||
607 | #endif /* defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) */ | ||
608 | |||
609 | #ifdef CONFIG_BOOTX_TEXT | ||
610 | ppc_md.progress = pmac_progress; | ||
611 | #endif /* CONFIG_BOOTX_TEXT */ | ||
612 | |||
613 | if (ppc_md.progress) ppc_md.progress("pmac_init(): exit", 0); | ||
614 | |||
615 | } | ||
616 | |||
617 | #ifdef CONFIG_BOOTX_TEXT | ||
618 | static void __init | ||
619 | pmac_progress(char *s, unsigned short hex) | ||
620 | { | ||
621 | if (boot_text_mapped) { | ||
622 | btext_drawstring(s); | ||
623 | btext_drawchar('\n'); | ||
624 | } | ||
625 | } | ||
626 | #endif /* CONFIG_BOOTX_TEXT */ | ||
627 | |||
628 | static int __init | ||
629 | pmac_declare_of_platform_devices(void) | ||
630 | { | ||
631 | struct device_node *np; | ||
632 | |||
633 | np = find_devices("uni-n"); | ||
634 | if (np) { | ||
635 | for (np = np->child; np != NULL; np = np->sibling) | ||
636 | if (strncmp(np->name, "i2c", 3) == 0) { | ||
637 | of_platform_device_create(np, "uni-n-i2c", | ||
638 | NULL); | ||
639 | break; | ||
640 | } | ||
641 | } | ||
642 | np = find_devices("u3"); | ||
643 | if (np) { | ||
644 | for (np = np->child; np != NULL; np = np->sibling) | ||
645 | if (strncmp(np->name, "i2c", 3) == 0) { | ||
646 | of_platform_device_create(np, "u3-i2c", | ||
647 | NULL); | ||
648 | break; | ||
649 | } | ||
650 | } | ||
651 | |||
652 | np = find_devices("valkyrie"); | ||
653 | if (np) | ||
654 | of_platform_device_create(np, "valkyrie", NULL); | ||
655 | np = find_devices("platinum"); | ||
656 | if (np) | ||
657 | of_platform_device_create(np, "platinum", NULL); | ||
658 | |||
659 | return 0; | ||
660 | } | ||
661 | |||
662 | device_initcall(pmac_declare_of_platform_devices); | ||
diff --git a/arch/powerpc/platforms/powermac/pmac_sleep.S b/arch/powerpc/platforms/powermac/pmac_sleep.S new file mode 100644 index 000000000000..88419c77ac43 --- /dev/null +++ b/arch/powerpc/platforms/powermac/pmac_sleep.S | |||
@@ -0,0 +1,396 @@ | |||
1 | /* | ||
2 | * This file contains sleep low-level functions for PowerBook G3. | ||
3 | * Copyright (C) 1999 Benjamin Herrenschmidt (benh@kernel.crashing.org) | ||
4 | * and Paul Mackerras (paulus@samba.org). | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/config.h> | ||
14 | #include <asm/processor.h> | ||
15 | #include <asm/page.h> | ||
16 | #include <asm/ppc_asm.h> | ||
17 | #include <asm/cputable.h> | ||
18 | #include <asm/cache.h> | ||
19 | #include <asm/thread_info.h> | ||
20 | #include <asm/asm-offsets.h> | ||
21 | |||
22 | #define MAGIC 0x4c617273 /* 'Lars' */ | ||
23 | |||
24 | /* | ||
25 | * Structure for storing CPU registers on the stack. | ||
26 | */ | ||
27 | #define SL_SP 0 | ||
28 | #define SL_PC 4 | ||
29 | #define SL_MSR 8 | ||
30 | #define SL_SDR1 0xc | ||
31 | #define SL_SPRG0 0x10 /* 4 sprg's */ | ||
32 | #define SL_DBAT0 0x20 | ||
33 | #define SL_IBAT0 0x28 | ||
34 | #define SL_DBAT1 0x30 | ||
35 | #define SL_IBAT1 0x38 | ||
36 | #define SL_DBAT2 0x40 | ||
37 | #define SL_IBAT2 0x48 | ||
38 | #define SL_DBAT3 0x50 | ||
39 | #define SL_IBAT3 0x58 | ||
40 | #define SL_TB 0x60 | ||
41 | #define SL_R2 0x68 | ||
42 | #define SL_CR 0x6c | ||
43 | #define SL_R12 0x70 /* r12 to r31 */ | ||
44 | #define SL_SIZE (SL_R12 + 80) | ||
45 | |||
46 | .section .text | ||
47 | .align 5 | ||
48 | |||
49 | #if defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ_PMAC) | ||
50 | |||
51 | /* This gets called by via-pmu.c late during the sleep process. | ||
52 | * The PMU was already send the sleep command and will shut us down | ||
53 | * soon. We need to save all that is needed and setup the wakeup | ||
54 | * vector that will be called by the ROM on wakeup | ||
55 | */ | ||
56 | _GLOBAL(low_sleep_handler) | ||
57 | #ifndef CONFIG_6xx | ||
58 | blr | ||
59 | #else | ||
60 | mflr r0 | ||
61 | stw r0,4(r1) | ||
62 | stwu r1,-SL_SIZE(r1) | ||
63 | mfcr r0 | ||
64 | stw r0,SL_CR(r1) | ||
65 | stw r2,SL_R2(r1) | ||
66 | stmw r12,SL_R12(r1) | ||
67 | |||
68 | /* Save MSR & SDR1 */ | ||
69 | mfmsr r4 | ||
70 | stw r4,SL_MSR(r1) | ||
71 | mfsdr1 r4 | ||
72 | stw r4,SL_SDR1(r1) | ||
73 | |||
74 | /* Get a stable timebase and save it */ | ||
75 | 1: mftbu r4 | ||
76 | stw r4,SL_TB(r1) | ||
77 | mftb r5 | ||
78 | stw r5,SL_TB+4(r1) | ||
79 | mftbu r3 | ||
80 | cmpw r3,r4 | ||
81 | bne 1b | ||
82 | |||
83 | /* Save SPRGs */ | ||
84 | mfsprg r4,0 | ||
85 | stw r4,SL_SPRG0(r1) | ||
86 | mfsprg r4,1 | ||
87 | stw r4,SL_SPRG0+4(r1) | ||
88 | mfsprg r4,2 | ||
89 | stw r4,SL_SPRG0+8(r1) | ||
90 | mfsprg r4,3 | ||
91 | stw r4,SL_SPRG0+12(r1) | ||
92 | |||
93 | /* Save BATs */ | ||
94 | mfdbatu r4,0 | ||
95 | stw r4,SL_DBAT0(r1) | ||
96 | mfdbatl r4,0 | ||
97 | stw r4,SL_DBAT0+4(r1) | ||
98 | mfdbatu r4,1 | ||
99 | stw r4,SL_DBAT1(r1) | ||
100 | mfdbatl r4,1 | ||
101 | stw r4,SL_DBAT1+4(r1) | ||
102 | mfdbatu r4,2 | ||
103 | stw r4,SL_DBAT2(r1) | ||
104 | mfdbatl r4,2 | ||
105 | stw r4,SL_DBAT2+4(r1) | ||
106 | mfdbatu r4,3 | ||
107 | stw r4,SL_DBAT3(r1) | ||
108 | mfdbatl r4,3 | ||
109 | stw r4,SL_DBAT3+4(r1) | ||
110 | mfibatu r4,0 | ||
111 | stw r4,SL_IBAT0(r1) | ||
112 | mfibatl r4,0 | ||
113 | stw r4,SL_IBAT0+4(r1) | ||
114 | mfibatu r4,1 | ||
115 | stw r4,SL_IBAT1(r1) | ||
116 | mfibatl r4,1 | ||
117 | stw r4,SL_IBAT1+4(r1) | ||
118 | mfibatu r4,2 | ||
119 | stw r4,SL_IBAT2(r1) | ||
120 | mfibatl r4,2 | ||
121 | stw r4,SL_IBAT2+4(r1) | ||
122 | mfibatu r4,3 | ||
123 | stw r4,SL_IBAT3(r1) | ||
124 | mfibatl r4,3 | ||
125 | stw r4,SL_IBAT3+4(r1) | ||
126 | |||
127 | /* Backup various CPU config stuffs */ | ||
128 | bl __save_cpu_setup | ||
129 | |||
130 | /* The ROM can wake us up via 2 different vectors: | ||
131 | * - On wallstreet & lombard, we must write a magic | ||
132 | * value 'Lars' at address 4 and a pointer to a | ||
133 | * memory location containing the PC to resume from | ||
134 | * at address 0. | ||
135 | * - On Core99, we must store the wakeup vector at | ||
136 | * address 0x80 and eventually it's parameters | ||
137 | * at address 0x84. I've have some trouble with those | ||
138 | * parameters however and I no longer use them. | ||
139 | */ | ||
140 | lis r5,grackle_wake_up@ha | ||
141 | addi r5,r5,grackle_wake_up@l | ||
142 | tophys(r5,r5) | ||
143 | stw r5,SL_PC(r1) | ||
144 | lis r4,KERNELBASE@h | ||
145 | tophys(r5,r1) | ||
146 | addi r5,r5,SL_PC | ||
147 | lis r6,MAGIC@ha | ||
148 | addi r6,r6,MAGIC@l | ||
149 | stw r5,0(r4) | ||
150 | stw r6,4(r4) | ||
151 | /* Setup stuffs at 0x80-0x84 for Core99 */ | ||
152 | lis r3,core99_wake_up@ha | ||
153 | addi r3,r3,core99_wake_up@l | ||
154 | tophys(r3,r3) | ||
155 | stw r3,0x80(r4) | ||
156 | stw r5,0x84(r4) | ||
157 | /* Store a pointer to our backup storage into | ||
158 | * a kernel global | ||
159 | */ | ||
160 | lis r3,sleep_storage@ha | ||
161 | addi r3,r3,sleep_storage@l | ||
162 | stw r5,0(r3) | ||
163 | |||
164 | .globl low_cpu_die | ||
165 | low_cpu_die: | ||
166 | /* Flush & disable all caches */ | ||
167 | bl flush_disable_caches | ||
168 | |||
169 | /* Turn off data relocation. */ | ||
170 | mfmsr r3 /* Save MSR in r7 */ | ||
171 | rlwinm r3,r3,0,28,26 /* Turn off DR bit */ | ||
172 | sync | ||
173 | mtmsr r3 | ||
174 | isync | ||
175 | |||
176 | BEGIN_FTR_SECTION | ||
177 | /* Flush any pending L2 data prefetches to work around HW bug */ | ||
178 | sync | ||
179 | lis r3,0xfff0 | ||
180 | lwz r0,0(r3) /* perform cache-inhibited load to ROM */ | ||
181 | sync /* (caches are disabled at this point) */ | ||
182 | END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450) | ||
183 | |||
184 | /* | ||
185 | * Set the HID0 and MSR for sleep. | ||
186 | */ | ||
187 | mfspr r2,SPRN_HID0 | ||
188 | rlwinm r2,r2,0,10,7 /* clear doze, nap */ | ||
189 | oris r2,r2,HID0_SLEEP@h | ||
190 | sync | ||
191 | isync | ||
192 | mtspr SPRN_HID0,r2 | ||
193 | sync | ||
194 | |||
195 | /* This loop puts us back to sleep in case we have a spurrious | ||
196 | * wakeup so that the host bridge properly stays asleep. The | ||
197 | * CPU will be turned off, either after a known time (about 1 | ||
198 | * second) on wallstreet & lombard, or as soon as the CPU enters | ||
199 | * SLEEP mode on core99 | ||
200 | */ | ||
201 | mfmsr r2 | ||
202 | oris r2,r2,MSR_POW@h | ||
203 | 1: sync | ||
204 | mtmsr r2 | ||
205 | isync | ||
206 | b 1b | ||
207 | |||
208 | /* | ||
209 | * Here is the resume code. | ||
210 | */ | ||
211 | |||
212 | |||
213 | /* | ||
214 | * Core99 machines resume here | ||
215 | * r4 has the physical address of SL_PC(sp) (unused) | ||
216 | */ | ||
217 | _GLOBAL(core99_wake_up) | ||
218 | /* Make sure HID0 no longer contains any sleep bit and that data cache | ||
219 | * is disabled | ||
220 | */ | ||
221 | mfspr r3,SPRN_HID0 | ||
222 | rlwinm r3,r3,0,11,7 /* clear SLEEP, NAP, DOZE bits */ | ||
223 | rlwinm 3,r3,0,18,15 /* clear DCE, ICE */ | ||
224 | mtspr SPRN_HID0,r3 | ||
225 | sync | ||
226 | isync | ||
227 | |||
228 | /* sanitize MSR */ | ||
229 | mfmsr r3 | ||
230 | ori r3,r3,MSR_EE|MSR_IP | ||
231 | xori r3,r3,MSR_EE|MSR_IP | ||
232 | sync | ||
233 | isync | ||
234 | mtmsr r3 | ||
235 | sync | ||
236 | isync | ||
237 | |||
238 | /* Recover sleep storage */ | ||
239 | lis r3,sleep_storage@ha | ||
240 | addi r3,r3,sleep_storage@l | ||
241 | tophys(r3,r3) | ||
242 | lwz r1,0(r3) | ||
243 | |||
244 | /* Pass thru to older resume code ... */ | ||
245 | /* | ||
246 | * Here is the resume code for older machines. | ||
247 | * r1 has the physical address of SL_PC(sp). | ||
248 | */ | ||
249 | |||
250 | grackle_wake_up: | ||
251 | |||
252 | /* Restore the kernel's segment registers before | ||
253 | * we do any r1 memory access as we are not sure they | ||
254 | * are in a sane state above the first 256Mb region | ||
255 | */ | ||
256 | li r0,16 /* load up segment register values */ | ||
257 | mtctr r0 /* for context 0 */ | ||
258 | lis r3,0x2000 /* Ku = 1, VSID = 0 */ | ||
259 | li r4,0 | ||
260 | 3: mtsrin r3,r4 | ||
261 | addi r3,r3,0x111 /* increment VSID */ | ||
262 | addis r4,r4,0x1000 /* address of next segment */ | ||
263 | bdnz 3b | ||
264 | sync | ||
265 | isync | ||
266 | |||
267 | subi r1,r1,SL_PC | ||
268 | |||
269 | /* Restore various CPU config stuffs */ | ||
270 | bl __restore_cpu_setup | ||
271 | |||
272 | /* Make sure all FPRs have been initialized */ | ||
273 | bl reloc_offset | ||
274 | bl __init_fpu_registers | ||
275 | |||
276 | /* Invalidate & enable L1 cache, we don't care about | ||
277 | * whatever the ROM may have tried to write to memory | ||
278 | */ | ||
279 | bl __inval_enable_L1 | ||
280 | |||
281 | /* Restore the BATs, and SDR1. Then we can turn on the MMU. */ | ||
282 | lwz r4,SL_SDR1(r1) | ||
283 | mtsdr1 r4 | ||
284 | lwz r4,SL_SPRG0(r1) | ||
285 | mtsprg 0,r4 | ||
286 | lwz r4,SL_SPRG0+4(r1) | ||
287 | mtsprg 1,r4 | ||
288 | lwz r4,SL_SPRG0+8(r1) | ||
289 | mtsprg 2,r4 | ||
290 | lwz r4,SL_SPRG0+12(r1) | ||
291 | mtsprg 3,r4 | ||
292 | |||
293 | lwz r4,SL_DBAT0(r1) | ||
294 | mtdbatu 0,r4 | ||
295 | lwz r4,SL_DBAT0+4(r1) | ||
296 | mtdbatl 0,r4 | ||
297 | lwz r4,SL_DBAT1(r1) | ||
298 | mtdbatu 1,r4 | ||
299 | lwz r4,SL_DBAT1+4(r1) | ||
300 | mtdbatl 1,r4 | ||
301 | lwz r4,SL_DBAT2(r1) | ||
302 | mtdbatu 2,r4 | ||
303 | lwz r4,SL_DBAT2+4(r1) | ||
304 | mtdbatl 2,r4 | ||
305 | lwz r4,SL_DBAT3(r1) | ||
306 | mtdbatu 3,r4 | ||
307 | lwz r4,SL_DBAT3+4(r1) | ||
308 | mtdbatl 3,r4 | ||
309 | lwz r4,SL_IBAT0(r1) | ||
310 | mtibatu 0,r4 | ||
311 | lwz r4,SL_IBAT0+4(r1) | ||
312 | mtibatl 0,r4 | ||
313 | lwz r4,SL_IBAT1(r1) | ||
314 | mtibatu 1,r4 | ||
315 | lwz r4,SL_IBAT1+4(r1) | ||
316 | mtibatl 1,r4 | ||
317 | lwz r4,SL_IBAT2(r1) | ||
318 | mtibatu 2,r4 | ||
319 | lwz r4,SL_IBAT2+4(r1) | ||
320 | mtibatl 2,r4 | ||
321 | lwz r4,SL_IBAT3(r1) | ||
322 | mtibatu 3,r4 | ||
323 | lwz r4,SL_IBAT3+4(r1) | ||
324 | mtibatl 3,r4 | ||
325 | |||
326 | BEGIN_FTR_SECTION | ||
327 | li r4,0 | ||
328 | mtspr SPRN_DBAT4U,r4 | ||
329 | mtspr SPRN_DBAT4L,r4 | ||
330 | mtspr SPRN_DBAT5U,r4 | ||
331 | mtspr SPRN_DBAT5L,r4 | ||
332 | mtspr SPRN_DBAT6U,r4 | ||
333 | mtspr SPRN_DBAT6L,r4 | ||
334 | mtspr SPRN_DBAT7U,r4 | ||
335 | mtspr SPRN_DBAT7L,r4 | ||
336 | mtspr SPRN_IBAT4U,r4 | ||
337 | mtspr SPRN_IBAT4L,r4 | ||
338 | mtspr SPRN_IBAT5U,r4 | ||
339 | mtspr SPRN_IBAT5L,r4 | ||
340 | mtspr SPRN_IBAT6U,r4 | ||
341 | mtspr SPRN_IBAT6L,r4 | ||
342 | mtspr SPRN_IBAT7U,r4 | ||
343 | mtspr SPRN_IBAT7L,r4 | ||
344 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) | ||
345 | |||
346 | /* Flush all TLBs */ | ||
347 | lis r4,0x1000 | ||
348 | 1: addic. r4,r4,-0x1000 | ||
349 | tlbie r4 | ||
350 | blt 1b | ||
351 | sync | ||
352 | |||
353 | /* restore the MSR and turn on the MMU */ | ||
354 | lwz r3,SL_MSR(r1) | ||
355 | bl turn_on_mmu | ||
356 | |||
357 | /* get back the stack pointer */ | ||
358 | tovirt(r1,r1) | ||
359 | |||
360 | /* Restore TB */ | ||
361 | li r3,0 | ||
362 | mttbl r3 | ||
363 | lwz r3,SL_TB(r1) | ||
364 | lwz r4,SL_TB+4(r1) | ||
365 | mttbu r3 | ||
366 | mttbl r4 | ||
367 | |||
368 | /* Restore the callee-saved registers and return */ | ||
369 | lwz r0,SL_CR(r1) | ||
370 | mtcr r0 | ||
371 | lwz r2,SL_R2(r1) | ||
372 | lmw r12,SL_R12(r1) | ||
373 | addi r1,r1,SL_SIZE | ||
374 | lwz r0,4(r1) | ||
375 | mtlr r0 | ||
376 | blr | ||
377 | |||
378 | turn_on_mmu: | ||
379 | mflr r4 | ||
380 | tovirt(r4,r4) | ||
381 | mtsrr0 r4 | ||
382 | mtsrr1 r3 | ||
383 | sync | ||
384 | isync | ||
385 | rfi | ||
386 | |||
387 | #endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */ | ||
388 | |||
389 | .section .data | ||
390 | .balign L1_CACHE_LINE_SIZE | ||
391 | sleep_storage: | ||
392 | .long 0 | ||
393 | .balign L1_CACHE_LINE_SIZE, 0 | ||
394 | |||
395 | #endif /* CONFIG_6xx */ | ||
396 | .section .text | ||
diff --git a/arch/powerpc/platforms/powermac/pmac_smp.c b/arch/powerpc/platforms/powermac/pmac_smp.c new file mode 100644 index 000000000000..995e9095d865 --- /dev/null +++ b/arch/powerpc/platforms/powermac/pmac_smp.c | |||
@@ -0,0 +1,716 @@ | |||
1 | /* | ||
2 | * SMP support for power macintosh. | ||
3 | * | ||
4 | * We support both the old "powersurge" SMP architecture | ||
5 | * and the current Core99 (G4 PowerMac) machines. | ||
6 | * | ||
7 | * Note that we don't support the very first rev. of | ||
8 | * Apple/DayStar 2 CPUs board, the one with the funky | ||
9 | * watchdog. Hopefully, none of these should be there except | ||
10 | * maybe internally to Apple. I should probably still add some | ||
11 | * code to detect this card though and disable SMP. --BenH. | ||
12 | * | ||
13 | * Support Macintosh G4 SMP by Troy Benjegerdes (hozer@drgw.net) | ||
14 | * and Ben Herrenschmidt <benh@kernel.crashing.org>. | ||
15 | * | ||
16 | * Support for DayStar quad CPU cards | ||
17 | * Copyright (C) XLR8, Inc. 1994-2000 | ||
18 | * | ||
19 | * This program is free software; you can redistribute it and/or | ||
20 | * modify it under the terms of the GNU General Public License | ||
21 | * as published by the Free Software Foundation; either version | ||
22 | * 2 of the License, or (at your option) any later version. | ||
23 | */ | ||
24 | #include <linux/config.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/sched.h> | ||
27 | #include <linux/smp.h> | ||
28 | #include <linux/smp_lock.h> | ||
29 | #include <linux/interrupt.h> | ||
30 | #include <linux/kernel_stat.h> | ||
31 | #include <linux/delay.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/spinlock.h> | ||
34 | #include <linux/errno.h> | ||
35 | #include <linux/hardirq.h> | ||
36 | #include <linux/cpu.h> | ||
37 | |||
38 | #include <asm/ptrace.h> | ||
39 | #include <asm/atomic.h> | ||
40 | #include <asm/irq.h> | ||
41 | #include <asm/page.h> | ||
42 | #include <asm/pgtable.h> | ||
43 | #include <asm/sections.h> | ||
44 | #include <asm/io.h> | ||
45 | #include <asm/prom.h> | ||
46 | #include <asm/smp.h> | ||
47 | #include <asm/residual.h> | ||
48 | #include <asm/machdep.h> | ||
49 | #include <asm/pmac_feature.h> | ||
50 | #include <asm/time.h> | ||
51 | #include <asm/open_pic.h> | ||
52 | #include <asm/cacheflush.h> | ||
53 | #include <asm/keylargo.h> | ||
54 | |||
55 | /* | ||
56 | * Powersurge (old powermac SMP) support. | ||
57 | */ | ||
58 | |||
59 | extern void __secondary_start_pmac_0(void); | ||
60 | |||
61 | /* Addresses for powersurge registers */ | ||
62 | #define HAMMERHEAD_BASE 0xf8000000 | ||
63 | #define HHEAD_CONFIG 0x90 | ||
64 | #define HHEAD_SEC_INTR 0xc0 | ||
65 | |||
66 | /* register for interrupting the primary processor on the powersurge */ | ||
67 | /* N.B. this is actually the ethernet ROM! */ | ||
68 | #define PSURGE_PRI_INTR 0xf3019000 | ||
69 | |||
70 | /* register for storing the start address for the secondary processor */ | ||
71 | /* N.B. this is the PCI config space address register for the 1st bridge */ | ||
72 | #define PSURGE_START 0xf2800000 | ||
73 | |||
74 | /* Daystar/XLR8 4-CPU card */ | ||
75 | #define PSURGE_QUAD_REG_ADDR 0xf8800000 | ||
76 | |||
77 | #define PSURGE_QUAD_IRQ_SET 0 | ||
78 | #define PSURGE_QUAD_IRQ_CLR 1 | ||
79 | #define PSURGE_QUAD_IRQ_PRIMARY 2 | ||
80 | #define PSURGE_QUAD_CKSTOP_CTL 3 | ||
81 | #define PSURGE_QUAD_PRIMARY_ARB 4 | ||
82 | #define PSURGE_QUAD_BOARD_ID 6 | ||
83 | #define PSURGE_QUAD_WHICH_CPU 7 | ||
84 | #define PSURGE_QUAD_CKSTOP_RDBK 8 | ||
85 | #define PSURGE_QUAD_RESET_CTL 11 | ||
86 | |||
87 | #define PSURGE_QUAD_OUT(r, v) (out_8(quad_base + ((r) << 4) + 4, (v))) | ||
88 | #define PSURGE_QUAD_IN(r) (in_8(quad_base + ((r) << 4) + 4) & 0x0f) | ||
89 | #define PSURGE_QUAD_BIS(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) | (v))) | ||
90 | #define PSURGE_QUAD_BIC(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) & ~(v))) | ||
91 | |||
92 | /* virtual addresses for the above */ | ||
93 | static volatile u8 __iomem *hhead_base; | ||
94 | static volatile u8 __iomem *quad_base; | ||
95 | static volatile u32 __iomem *psurge_pri_intr; | ||
96 | static volatile u8 __iomem *psurge_sec_intr; | ||
97 | static volatile u32 __iomem *psurge_start; | ||
98 | |||
99 | /* values for psurge_type */ | ||
100 | #define PSURGE_NONE -1 | ||
101 | #define PSURGE_DUAL 0 | ||
102 | #define PSURGE_QUAD_OKEE 1 | ||
103 | #define PSURGE_QUAD_COTTON 2 | ||
104 | #define PSURGE_QUAD_ICEGRASS 3 | ||
105 | |||
106 | /* what sort of powersurge board we have */ | ||
107 | static int psurge_type = PSURGE_NONE; | ||
108 | |||
109 | /* L2 and L3 cache settings to pass from CPU0 to CPU1 */ | ||
110 | volatile static long int core99_l2_cache; | ||
111 | volatile static long int core99_l3_cache; | ||
112 | |||
113 | /* Timebase freeze GPIO */ | ||
114 | static unsigned int core99_tb_gpio; | ||
115 | |||
116 | /* Sync flag for HW tb sync */ | ||
117 | static volatile int sec_tb_reset = 0; | ||
118 | static unsigned int pri_tb_hi, pri_tb_lo; | ||
119 | static unsigned int pri_tb_stamp; | ||
120 | |||
121 | static void __devinit core99_init_caches(int cpu) | ||
122 | { | ||
123 | if (!cpu_has_feature(CPU_FTR_L2CR)) | ||
124 | return; | ||
125 | |||
126 | if (cpu == 0) { | ||
127 | core99_l2_cache = _get_L2CR(); | ||
128 | printk("CPU0: L2CR is %lx\n", core99_l2_cache); | ||
129 | } else { | ||
130 | printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR()); | ||
131 | _set_L2CR(0); | ||
132 | _set_L2CR(core99_l2_cache); | ||
133 | printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache); | ||
134 | } | ||
135 | |||
136 | if (!cpu_has_feature(CPU_FTR_L3CR)) | ||
137 | return; | ||
138 | |||
139 | if (cpu == 0){ | ||
140 | core99_l3_cache = _get_L3CR(); | ||
141 | printk("CPU0: L3CR is %lx\n", core99_l3_cache); | ||
142 | } else { | ||
143 | printk("CPU%d: L3CR was %lx\n", cpu, _get_L3CR()); | ||
144 | _set_L3CR(0); | ||
145 | _set_L3CR(core99_l3_cache); | ||
146 | printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache); | ||
147 | } | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * Set and clear IPIs for powersurge. | ||
152 | */ | ||
153 | static inline void psurge_set_ipi(int cpu) | ||
154 | { | ||
155 | if (psurge_type == PSURGE_NONE) | ||
156 | return; | ||
157 | if (cpu == 0) | ||
158 | in_be32(psurge_pri_intr); | ||
159 | else if (psurge_type == PSURGE_DUAL) | ||
160 | out_8(psurge_sec_intr, 0); | ||
161 | else | ||
162 | PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_SET, 1 << cpu); | ||
163 | } | ||
164 | |||
165 | static inline void psurge_clr_ipi(int cpu) | ||
166 | { | ||
167 | if (cpu > 0) { | ||
168 | switch(psurge_type) { | ||
169 | case PSURGE_DUAL: | ||
170 | out_8(psurge_sec_intr, ~0); | ||
171 | case PSURGE_NONE: | ||
172 | break; | ||
173 | default: | ||
174 | PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, 1 << cpu); | ||
175 | } | ||
176 | } | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * On powersurge (old SMP powermac architecture) we don't have | ||
181 | * separate IPIs for separate messages like openpic does. Instead | ||
182 | * we have a bitmap for each processor, where a 1 bit means that | ||
183 | * the corresponding message is pending for that processor. | ||
184 | * Ideally each cpu's entry would be in a different cache line. | ||
185 | * -- paulus. | ||
186 | */ | ||
187 | static unsigned long psurge_smp_message[NR_CPUS]; | ||
188 | |||
189 | void psurge_smp_message_recv(struct pt_regs *regs) | ||
190 | { | ||
191 | int cpu = smp_processor_id(); | ||
192 | int msg; | ||
193 | |||
194 | /* clear interrupt */ | ||
195 | psurge_clr_ipi(cpu); | ||
196 | |||
197 | if (num_online_cpus() < 2) | ||
198 | return; | ||
199 | |||
200 | /* make sure there is a message there */ | ||
201 | for (msg = 0; msg < 4; msg++) | ||
202 | if (test_and_clear_bit(msg, &psurge_smp_message[cpu])) | ||
203 | smp_message_recv(msg, regs); | ||
204 | } | ||
205 | |||
206 | irqreturn_t psurge_primary_intr(int irq, void *d, struct pt_regs *regs) | ||
207 | { | ||
208 | psurge_smp_message_recv(regs); | ||
209 | return IRQ_HANDLED; | ||
210 | } | ||
211 | |||
212 | static void smp_psurge_message_pass(int target, int msg, unsigned long data, | ||
213 | int wait) | ||
214 | { | ||
215 | int i; | ||
216 | |||
217 | if (num_online_cpus() < 2) | ||
218 | return; | ||
219 | |||
220 | for (i = 0; i < NR_CPUS; i++) { | ||
221 | if (!cpu_online(i)) | ||
222 | continue; | ||
223 | if (target == MSG_ALL | ||
224 | || (target == MSG_ALL_BUT_SELF && i != smp_processor_id()) | ||
225 | || target == i) { | ||
226 | set_bit(msg, &psurge_smp_message[i]); | ||
227 | psurge_set_ipi(i); | ||
228 | } | ||
229 | } | ||
230 | } | ||
231 | |||
232 | /* | ||
233 | * Determine a quad card presence. We read the board ID register, we | ||
234 | * force the data bus to change to something else, and we read it again. | ||
235 | * It it's stable, then the register probably exist (ugh !) | ||
236 | */ | ||
237 | static int __init psurge_quad_probe(void) | ||
238 | { | ||
239 | int type; | ||
240 | unsigned int i; | ||
241 | |||
242 | type = PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID); | ||
243 | if (type < PSURGE_QUAD_OKEE || type > PSURGE_QUAD_ICEGRASS | ||
244 | || type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID)) | ||
245 | return PSURGE_DUAL; | ||
246 | |||
247 | /* looks OK, try a slightly more rigorous test */ | ||
248 | /* bogus is not necessarily cacheline-aligned, | ||
249 | though I don't suppose that really matters. -- paulus */ | ||
250 | for (i = 0; i < 100; i++) { | ||
251 | volatile u32 bogus[8]; | ||
252 | bogus[(0+i)%8] = 0x00000000; | ||
253 | bogus[(1+i)%8] = 0x55555555; | ||
254 | bogus[(2+i)%8] = 0xFFFFFFFF; | ||
255 | bogus[(3+i)%8] = 0xAAAAAAAA; | ||
256 | bogus[(4+i)%8] = 0x33333333; | ||
257 | bogus[(5+i)%8] = 0xCCCCCCCC; | ||
258 | bogus[(6+i)%8] = 0xCCCCCCCC; | ||
259 | bogus[(7+i)%8] = 0x33333333; | ||
260 | wmb(); | ||
261 | asm volatile("dcbf 0,%0" : : "r" (bogus) : "memory"); | ||
262 | mb(); | ||
263 | if (type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID)) | ||
264 | return PSURGE_DUAL; | ||
265 | } | ||
266 | return type; | ||
267 | } | ||
268 | |||
269 | static void __init psurge_quad_init(void) | ||
270 | { | ||
271 | int procbits; | ||
272 | |||
273 | if (ppc_md.progress) ppc_md.progress("psurge_quad_init", 0x351); | ||
274 | procbits = ~PSURGE_QUAD_IN(PSURGE_QUAD_WHICH_CPU); | ||
275 | if (psurge_type == PSURGE_QUAD_ICEGRASS) | ||
276 | PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits); | ||
277 | else | ||
278 | PSURGE_QUAD_BIC(PSURGE_QUAD_CKSTOP_CTL, procbits); | ||
279 | mdelay(33); | ||
280 | out_8(psurge_sec_intr, ~0); | ||
281 | PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, procbits); | ||
282 | PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits); | ||
283 | if (psurge_type != PSURGE_QUAD_ICEGRASS) | ||
284 | PSURGE_QUAD_BIS(PSURGE_QUAD_CKSTOP_CTL, procbits); | ||
285 | PSURGE_QUAD_BIC(PSURGE_QUAD_PRIMARY_ARB, procbits); | ||
286 | mdelay(33); | ||
287 | PSURGE_QUAD_BIC(PSURGE_QUAD_RESET_CTL, procbits); | ||
288 | mdelay(33); | ||
289 | PSURGE_QUAD_BIS(PSURGE_QUAD_PRIMARY_ARB, procbits); | ||
290 | mdelay(33); | ||
291 | } | ||
292 | |||
293 | static int __init smp_psurge_probe(void) | ||
294 | { | ||
295 | int i, ncpus; | ||
296 | |||
297 | /* We don't do SMP on the PPC601 -- paulus */ | ||
298 | if (PVR_VER(mfspr(SPRN_PVR)) == 1) | ||
299 | return 1; | ||
300 | |||
301 | /* | ||
302 | * The powersurge cpu board can be used in the generation | ||
303 | * of powermacs that have a socket for an upgradeable cpu card, | ||
304 | * including the 7500, 8500, 9500, 9600. | ||
305 | * The device tree doesn't tell you if you have 2 cpus because | ||
306 | * OF doesn't know anything about the 2nd processor. | ||
307 | * Instead we look for magic bits in magic registers, | ||
308 | * in the hammerhead memory controller in the case of the | ||
309 | * dual-cpu powersurge board. -- paulus. | ||
310 | */ | ||
311 | if (find_devices("hammerhead") == NULL) | ||
312 | return 1; | ||
313 | |||
314 | hhead_base = ioremap(HAMMERHEAD_BASE, 0x800); | ||
315 | quad_base = ioremap(PSURGE_QUAD_REG_ADDR, 1024); | ||
316 | psurge_sec_intr = hhead_base + HHEAD_SEC_INTR; | ||
317 | |||
318 | psurge_type = psurge_quad_probe(); | ||
319 | if (psurge_type != PSURGE_DUAL) { | ||
320 | psurge_quad_init(); | ||
321 | /* All released cards using this HW design have 4 CPUs */ | ||
322 | ncpus = 4; | ||
323 | } else { | ||
324 | iounmap(quad_base); | ||
325 | if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) { | ||
326 | /* not a dual-cpu card */ | ||
327 | iounmap(hhead_base); | ||
328 | psurge_type = PSURGE_NONE; | ||
329 | return 1; | ||
330 | } | ||
331 | ncpus = 2; | ||
332 | } | ||
333 | |||
334 | psurge_start = ioremap(PSURGE_START, 4); | ||
335 | psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4); | ||
336 | |||
337 | /* this is not actually strictly necessary -- paulus. */ | ||
338 | for (i = 1; i < ncpus; ++i) | ||
339 | smp_hw_index[i] = i; | ||
340 | |||
341 | if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352); | ||
342 | |||
343 | return ncpus; | ||
344 | } | ||
345 | |||
346 | static void __init smp_psurge_kick_cpu(int nr) | ||
347 | { | ||
348 | unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8; | ||
349 | unsigned long a; | ||
350 | |||
351 | /* may need to flush here if secondary bats aren't setup */ | ||
352 | for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32) | ||
353 | asm volatile("dcbf 0,%0" : : "r" (a) : "memory"); | ||
354 | asm volatile("sync"); | ||
355 | |||
356 | if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353); | ||
357 | |||
358 | out_be32(psurge_start, start); | ||
359 | mb(); | ||
360 | |||
361 | psurge_set_ipi(nr); | ||
362 | udelay(10); | ||
363 | psurge_clr_ipi(nr); | ||
364 | |||
365 | if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354); | ||
366 | } | ||
367 | |||
368 | /* | ||
369 | * With the dual-cpu powersurge board, the decrementers and timebases | ||
370 | * of both cpus are frozen after the secondary cpu is started up, | ||
371 | * until we give the secondary cpu another interrupt. This routine | ||
372 | * uses this to get the timebases synchronized. | ||
373 | * -- paulus. | ||
374 | */ | ||
375 | static void __init psurge_dual_sync_tb(int cpu_nr) | ||
376 | { | ||
377 | int t; | ||
378 | |||
379 | set_dec(tb_ticks_per_jiffy); | ||
380 | set_tb(0, 0); | ||
381 | last_jiffy_stamp(cpu_nr) = 0; | ||
382 | |||
383 | if (cpu_nr > 0) { | ||
384 | mb(); | ||
385 | sec_tb_reset = 1; | ||
386 | return; | ||
387 | } | ||
388 | |||
389 | /* wait for the secondary to have reset its TB before proceeding */ | ||
390 | for (t = 10000000; t > 0 && !sec_tb_reset; --t) | ||
391 | ; | ||
392 | |||
393 | /* now interrupt the secondary, starting both TBs */ | ||
394 | psurge_set_ipi(1); | ||
395 | |||
396 | smp_tb_synchronized = 1; | ||
397 | } | ||
398 | |||
399 | static struct irqaction psurge_irqaction = { | ||
400 | .handler = psurge_primary_intr, | ||
401 | .flags = SA_INTERRUPT, | ||
402 | .mask = CPU_MASK_NONE, | ||
403 | .name = "primary IPI", | ||
404 | }; | ||
405 | |||
406 | static void __init smp_psurge_setup_cpu(int cpu_nr) | ||
407 | { | ||
408 | |||
409 | if (cpu_nr == 0) { | ||
410 | /* If we failed to start the second CPU, we should still | ||
411 | * send it an IPI to start the timebase & DEC or we might | ||
412 | * have them stuck. | ||
413 | */ | ||
414 | if (num_online_cpus() < 2) { | ||
415 | if (psurge_type == PSURGE_DUAL) | ||
416 | psurge_set_ipi(1); | ||
417 | return; | ||
418 | } | ||
419 | /* reset the entry point so if we get another intr we won't | ||
420 | * try to startup again */ | ||
421 | out_be32(psurge_start, 0x100); | ||
422 | if (setup_irq(30, &psurge_irqaction)) | ||
423 | printk(KERN_ERR "Couldn't get primary IPI interrupt"); | ||
424 | } | ||
425 | |||
426 | if (psurge_type == PSURGE_DUAL) | ||
427 | psurge_dual_sync_tb(cpu_nr); | ||
428 | } | ||
429 | |||
430 | void __init smp_psurge_take_timebase(void) | ||
431 | { | ||
432 | /* Dummy implementation */ | ||
433 | } | ||
434 | |||
435 | void __init smp_psurge_give_timebase(void) | ||
436 | { | ||
437 | /* Dummy implementation */ | ||
438 | } | ||
439 | |||
440 | static int __init smp_core99_probe(void) | ||
441 | { | ||
442 | #ifdef CONFIG_6xx | ||
443 | extern int powersave_nap; | ||
444 | #endif | ||
445 | struct device_node *cpus, *firstcpu; | ||
446 | int i, ncpus = 0, boot_cpu = -1; | ||
447 | u32 *tbprop = NULL; | ||
448 | |||
449 | if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345); | ||
450 | cpus = firstcpu = find_type_devices("cpu"); | ||
451 | while(cpus != NULL) { | ||
452 | u32 *regprop = (u32 *)get_property(cpus, "reg", NULL); | ||
453 | char *stateprop = (char *)get_property(cpus, "state", NULL); | ||
454 | if (regprop != NULL && stateprop != NULL && | ||
455 | !strncmp(stateprop, "running", 7)) | ||
456 | boot_cpu = *regprop; | ||
457 | ++ncpus; | ||
458 | cpus = cpus->next; | ||
459 | } | ||
460 | if (boot_cpu == -1) | ||
461 | printk(KERN_WARNING "Couldn't detect boot CPU !\n"); | ||
462 | if (boot_cpu != 0) | ||
463 | printk(KERN_WARNING "Boot CPU is %d, unsupported setup !\n", boot_cpu); | ||
464 | |||
465 | if (machine_is_compatible("MacRISC4")) { | ||
466 | extern struct smp_ops_t core99_smp_ops; | ||
467 | |||
468 | core99_smp_ops.take_timebase = smp_generic_take_timebase; | ||
469 | core99_smp_ops.give_timebase = smp_generic_give_timebase; | ||
470 | } else { | ||
471 | if (firstcpu != NULL) | ||
472 | tbprop = (u32 *)get_property(firstcpu, "timebase-enable", NULL); | ||
473 | if (tbprop) | ||
474 | core99_tb_gpio = *tbprop; | ||
475 | else | ||
476 | core99_tb_gpio = KL_GPIO_TB_ENABLE; | ||
477 | } | ||
478 | |||
479 | if (ncpus > 1) { | ||
480 | mpic_request_ipis(); | ||
481 | for (i = 1; i < ncpus; ++i) | ||
482 | smp_hw_index[i] = i; | ||
483 | #ifdef CONFIG_6xx | ||
484 | powersave_nap = 0; | ||
485 | #endif | ||
486 | core99_init_caches(0); | ||
487 | } | ||
488 | |||
489 | return ncpus; | ||
490 | } | ||
491 | |||
492 | static void __devinit smp_core99_kick_cpu(int nr) | ||
493 | { | ||
494 | unsigned long save_vector, new_vector; | ||
495 | unsigned long flags; | ||
496 | |||
497 | volatile unsigned long *vector | ||
498 | = ((volatile unsigned long *)(KERNELBASE+0x100)); | ||
499 | if (nr < 0 || nr > 3) | ||
500 | return; | ||
501 | if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346); | ||
502 | |||
503 | local_irq_save(flags); | ||
504 | local_irq_disable(); | ||
505 | |||
506 | /* Save reset vector */ | ||
507 | save_vector = *vector; | ||
508 | |||
509 | /* Setup fake reset vector that does | ||
510 | * b __secondary_start_pmac_0 + nr*8 - KERNELBASE | ||
511 | */ | ||
512 | new_vector = (unsigned long) __secondary_start_pmac_0 + nr * 8; | ||
513 | *vector = 0x48000002 + new_vector - KERNELBASE; | ||
514 | |||
515 | /* flush data cache and inval instruction cache */ | ||
516 | flush_icache_range((unsigned long) vector, (unsigned long) vector + 4); | ||
517 | |||
518 | /* Put some life in our friend */ | ||
519 | pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0); | ||
520 | |||
521 | /* FIXME: We wait a bit for the CPU to take the exception, I should | ||
522 | * instead wait for the entry code to set something for me. Well, | ||
523 | * ideally, all that crap will be done in prom.c and the CPU left | ||
524 | * in a RAM-based wait loop like CHRP. | ||
525 | */ | ||
526 | mdelay(1); | ||
527 | |||
528 | /* Restore our exception vector */ | ||
529 | *vector = save_vector; | ||
530 | flush_icache_range((unsigned long) vector, (unsigned long) vector + 4); | ||
531 | |||
532 | local_irq_restore(flags); | ||
533 | if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347); | ||
534 | } | ||
535 | |||
536 | static void __devinit smp_core99_setup_cpu(int cpu_nr) | ||
537 | { | ||
538 | /* Setup L2/L3 */ | ||
539 | if (cpu_nr != 0) | ||
540 | core99_init_caches(cpu_nr); | ||
541 | |||
542 | /* Setup openpic */ | ||
543 | mpic_setup_this_cpu(); | ||
544 | |||
545 | if (cpu_nr == 0) { | ||
546 | #ifdef CONFIG_POWER4 | ||
547 | extern void g5_phy_disable_cpu1(void); | ||
548 | |||
549 | /* If we didn't start the second CPU, we must take | ||
550 | * it off the bus | ||
551 | */ | ||
552 | if (machine_is_compatible("MacRISC4") && | ||
553 | num_online_cpus() < 2) | ||
554 | g5_phy_disable_cpu1(); | ||
555 | #endif /* CONFIG_POWER4 */ | ||
556 | if (ppc_md.progress) ppc_md.progress("core99_setup_cpu 0 done", 0x349); | ||
557 | } | ||
558 | } | ||
559 | |||
560 | /* not __init, called in sleep/wakeup code */ | ||
561 | void smp_core99_take_timebase(void) | ||
562 | { | ||
563 | unsigned long flags; | ||
564 | |||
565 | /* tell the primary we're here */ | ||
566 | sec_tb_reset = 1; | ||
567 | mb(); | ||
568 | |||
569 | /* wait for the primary to set pri_tb_hi/lo */ | ||
570 | while (sec_tb_reset < 2) | ||
571 | mb(); | ||
572 | |||
573 | /* set our stuff the same as the primary */ | ||
574 | local_irq_save(flags); | ||
575 | set_dec(1); | ||
576 | set_tb(pri_tb_hi, pri_tb_lo); | ||
577 | last_jiffy_stamp(smp_processor_id()) = pri_tb_stamp; | ||
578 | mb(); | ||
579 | |||
580 | /* tell the primary we're done */ | ||
581 | sec_tb_reset = 0; | ||
582 | mb(); | ||
583 | local_irq_restore(flags); | ||
584 | } | ||
585 | |||
586 | /* not __init, called in sleep/wakeup code */ | ||
587 | void smp_core99_give_timebase(void) | ||
588 | { | ||
589 | unsigned long flags; | ||
590 | unsigned int t; | ||
591 | |||
592 | /* wait for the secondary to be in take_timebase */ | ||
593 | for (t = 100000; t > 0 && !sec_tb_reset; --t) | ||
594 | udelay(10); | ||
595 | if (!sec_tb_reset) { | ||
596 | printk(KERN_WARNING "Timeout waiting sync on second CPU\n"); | ||
597 | return; | ||
598 | } | ||
599 | |||
600 | /* freeze the timebase and read it */ | ||
601 | /* disable interrupts so the timebase is disabled for the | ||
602 | shortest possible time */ | ||
603 | local_irq_save(flags); | ||
604 | pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4); | ||
605 | pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0); | ||
606 | mb(); | ||
607 | pri_tb_hi = get_tbu(); | ||
608 | pri_tb_lo = get_tbl(); | ||
609 | pri_tb_stamp = last_jiffy_stamp(smp_processor_id()); | ||
610 | mb(); | ||
611 | |||
612 | /* tell the secondary we're ready */ | ||
613 | sec_tb_reset = 2; | ||
614 | mb(); | ||
615 | |||
616 | /* wait for the secondary to have taken it */ | ||
617 | for (t = 100000; t > 0 && sec_tb_reset; --t) | ||
618 | udelay(10); | ||
619 | if (sec_tb_reset) | ||
620 | printk(KERN_WARNING "Timeout waiting sync(2) on second CPU\n"); | ||
621 | else | ||
622 | smp_tb_synchronized = 1; | ||
623 | |||
624 | /* Now, restart the timebase by leaving the GPIO to an open collector */ | ||
625 | pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0); | ||
626 | pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0); | ||
627 | local_irq_restore(flags); | ||
628 | } | ||
629 | |||
630 | void smp_core99_message_pass(int target, int msg, unsigned long data, int wait) | ||
631 | { | ||
632 | cpumask_t mask = CPU_MASK_ALL; | ||
633 | /* make sure we're sending something that translates to an IPI */ | ||
634 | if (msg > 0x3) { | ||
635 | printk("SMP %d: smp_message_pass: unknown msg %d\n", | ||
636 | smp_processor_id(), msg); | ||
637 | return; | ||
638 | } | ||
639 | switch (target) { | ||
640 | case MSG_ALL: | ||
641 | mpic_send_ipi(msg, mask); | ||
642 | break; | ||
643 | case MSG_ALL_BUT_SELF: | ||
644 | cpu_clear(smp_processor_id(), mask); | ||
645 | mpic_send_ipi(msg, mask); | ||
646 | break; | ||
647 | default: | ||
648 | mpic_send_ipi(msg, cpumask_of_cpu(target)); | ||
649 | break; | ||
650 | } | ||
651 | } | ||
652 | |||
653 | |||
654 | /* PowerSurge-style Macs */ | ||
655 | struct smp_ops_t psurge_smp_ops = { | ||
656 | .message_pass = smp_psurge_message_pass, | ||
657 | .probe = smp_psurge_probe, | ||
658 | .kick_cpu = smp_psurge_kick_cpu, | ||
659 | .setup_cpu = smp_psurge_setup_cpu, | ||
660 | .give_timebase = smp_psurge_give_timebase, | ||
661 | .take_timebase = smp_psurge_take_timebase, | ||
662 | }; | ||
663 | |||
664 | /* Core99 Macs (dual G4s) */ | ||
665 | struct smp_ops_t core99_smp_ops = { | ||
666 | .message_pass = smp_core99_message_pass, | ||
667 | .probe = smp_core99_probe, | ||
668 | .kick_cpu = smp_core99_kick_cpu, | ||
669 | .setup_cpu = smp_core99_setup_cpu, | ||
670 | .give_timebase = smp_core99_give_timebase, | ||
671 | .take_timebase = smp_core99_take_timebase, | ||
672 | }; | ||
673 | |||
674 | #ifdef CONFIG_HOTPLUG_CPU | ||
675 | |||
676 | int __cpu_disable(void) | ||
677 | { | ||
678 | cpu_clear(smp_processor_id(), cpu_online_map); | ||
679 | |||
680 | /* XXX reset cpu affinity here */ | ||
681 | openpic_set_priority(0xf); | ||
682 | asm volatile("mtdec %0" : : "r" (0x7fffffff)); | ||
683 | mb(); | ||
684 | udelay(20); | ||
685 | asm volatile("mtdec %0" : : "r" (0x7fffffff)); | ||
686 | return 0; | ||
687 | } | ||
688 | |||
689 | extern void low_cpu_die(void) __attribute__((noreturn)); /* in pmac_sleep.S */ | ||
690 | static int cpu_dead[NR_CPUS]; | ||
691 | |||
692 | void cpu_die(void) | ||
693 | { | ||
694 | local_irq_disable(); | ||
695 | cpu_dead[smp_processor_id()] = 1; | ||
696 | mb(); | ||
697 | low_cpu_die(); | ||
698 | } | ||
699 | |||
700 | void __cpu_die(unsigned int cpu) | ||
701 | { | ||
702 | int timeout; | ||
703 | |||
704 | timeout = 1000; | ||
705 | while (!cpu_dead[cpu]) { | ||
706 | if (--timeout == 0) { | ||
707 | printk("CPU %u refused to die!\n", cpu); | ||
708 | break; | ||
709 | } | ||
710 | msleep(1); | ||
711 | } | ||
712 | cpu_callin_map[cpu] = 0; | ||
713 | cpu_dead[cpu] = 0; | ||
714 | } | ||
715 | |||
716 | #endif | ||
diff --git a/arch/powerpc/platforms/powermac/pmac_time.c b/arch/powerpc/platforms/powermac/pmac_time.c new file mode 100644 index 000000000000..ff6adff36cb8 --- /dev/null +++ b/arch/powerpc/platforms/powermac/pmac_time.c | |||
@@ -0,0 +1,291 @@ | |||
1 | /* | ||
2 | * Support for periodic interrupts (100 per second) and for getting | ||
3 | * the current time from the RTC on Power Macintoshes. | ||
4 | * | ||
5 | * We use the decrementer register for our periodic interrupts. | ||
6 | * | ||
7 | * Paul Mackerras August 1996. | ||
8 | * Copyright (C) 1996 Paul Mackerras. | ||
9 | */ | ||
10 | #include <linux/config.h> | ||
11 | #include <linux/errno.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/param.h> | ||
15 | #include <linux/string.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/time.h> | ||
19 | #include <linux/adb.h> | ||
20 | #include <linux/cuda.h> | ||
21 | #include <linux/pmu.h> | ||
22 | #include <linux/hardirq.h> | ||
23 | |||
24 | #include <asm/sections.h> | ||
25 | #include <asm/prom.h> | ||
26 | #include <asm/system.h> | ||
27 | #include <asm/io.h> | ||
28 | #include <asm/pgtable.h> | ||
29 | #include <asm/machdep.h> | ||
30 | #include <asm/time.h> | ||
31 | #include <asm/nvram.h> | ||
32 | |||
33 | /* Apparently the RTC stores seconds since 1 Jan 1904 */ | ||
34 | #define RTC_OFFSET 2082844800 | ||
35 | |||
36 | /* | ||
37 | * Calibrate the decrementer frequency with the VIA timer 1. | ||
38 | */ | ||
39 | #define VIA_TIMER_FREQ_6 4700000 /* time 1 frequency * 6 */ | ||
40 | |||
41 | /* VIA registers */ | ||
42 | #define RS 0x200 /* skip between registers */ | ||
43 | #define T1CL (4*RS) /* Timer 1 ctr/latch (low 8 bits) */ | ||
44 | #define T1CH (5*RS) /* Timer 1 counter (high 8 bits) */ | ||
45 | #define T1LL (6*RS) /* Timer 1 latch (low 8 bits) */ | ||
46 | #define T1LH (7*RS) /* Timer 1 latch (high 8 bits) */ | ||
47 | #define ACR (11*RS) /* Auxiliary control register */ | ||
48 | #define IFR (13*RS) /* Interrupt flag register */ | ||
49 | |||
50 | /* Bits in ACR */ | ||
51 | #define T1MODE 0xc0 /* Timer 1 mode */ | ||
52 | #define T1MODE_CONT 0x40 /* continuous interrupts */ | ||
53 | |||
54 | /* Bits in IFR and IER */ | ||
55 | #define T1_INT 0x40 /* Timer 1 interrupt */ | ||
56 | |||
57 | extern struct timezone sys_tz; | ||
58 | |||
59 | long __init | ||
60 | pmac_time_init(void) | ||
61 | { | ||
62 | #ifdef CONFIG_NVRAM | ||
63 | s32 delta = 0; | ||
64 | int dst; | ||
65 | |||
66 | delta = ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x9)) << 16; | ||
67 | delta |= ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xa)) << 8; | ||
68 | delta |= pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xb); | ||
69 | if (delta & 0x00800000UL) | ||
70 | delta |= 0xFF000000UL; | ||
71 | dst = ((pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x8) & 0x80) != 0); | ||
72 | printk("GMT Delta read from XPRAM: %d minutes, DST: %s\n", delta/60, | ||
73 | dst ? "on" : "off"); | ||
74 | return delta; | ||
75 | #else | ||
76 | return 0; | ||
77 | #endif | ||
78 | } | ||
79 | |||
80 | unsigned long | ||
81 | pmac_get_rtc_time(void) | ||
82 | { | ||
83 | #if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU) | ||
84 | struct adb_request req; | ||
85 | unsigned long now; | ||
86 | #endif | ||
87 | |||
88 | /* Get the time from the RTC */ | ||
89 | switch (sys_ctrler) { | ||
90 | #ifdef CONFIG_ADB_CUDA | ||
91 | case SYS_CTRLER_CUDA: | ||
92 | if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0) | ||
93 | return 0; | ||
94 | while (!req.complete) | ||
95 | cuda_poll(); | ||
96 | if (req.reply_len != 7) | ||
97 | printk(KERN_ERR "pmac_get_rtc_time: got %d byte reply\n", | ||
98 | req.reply_len); | ||
99 | now = (req.reply[3] << 24) + (req.reply[4] << 16) | ||
100 | + (req.reply[5] << 8) + req.reply[6]; | ||
101 | return now - RTC_OFFSET; | ||
102 | #endif /* CONFIG_ADB_CUDA */ | ||
103 | #ifdef CONFIG_ADB_PMU | ||
104 | case SYS_CTRLER_PMU: | ||
105 | if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0) | ||
106 | return 0; | ||
107 | while (!req.complete) | ||
108 | pmu_poll(); | ||
109 | if (req.reply_len != 4) | ||
110 | printk(KERN_ERR "pmac_get_rtc_time: got %d byte reply\n", | ||
111 | req.reply_len); | ||
112 | now = (req.reply[0] << 24) + (req.reply[1] << 16) | ||
113 | + (req.reply[2] << 8) + req.reply[3]; | ||
114 | return now - RTC_OFFSET; | ||
115 | #endif /* CONFIG_ADB_PMU */ | ||
116 | default: ; | ||
117 | } | ||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | int | ||
122 | pmac_set_rtc_time(unsigned long nowtime) | ||
123 | { | ||
124 | #if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU) | ||
125 | struct adb_request req; | ||
126 | #endif | ||
127 | |||
128 | nowtime += RTC_OFFSET; | ||
129 | |||
130 | switch (sys_ctrler) { | ||
131 | #ifdef CONFIG_ADB_CUDA | ||
132 | case SYS_CTRLER_CUDA: | ||
133 | if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME, | ||
134 | nowtime >> 24, nowtime >> 16, nowtime >> 8, nowtime) < 0) | ||
135 | return 0; | ||
136 | while (!req.complete) | ||
137 | cuda_poll(); | ||
138 | if ((req.reply_len != 3) && (req.reply_len != 7)) | ||
139 | printk(KERN_ERR "pmac_set_rtc_time: got %d byte reply\n", | ||
140 | req.reply_len); | ||
141 | return 1; | ||
142 | #endif /* CONFIG_ADB_CUDA */ | ||
143 | #ifdef CONFIG_ADB_PMU | ||
144 | case SYS_CTRLER_PMU: | ||
145 | if (pmu_request(&req, NULL, 5, PMU_SET_RTC, | ||
146 | nowtime >> 24, nowtime >> 16, nowtime >> 8, nowtime) < 0) | ||
147 | return 0; | ||
148 | while (!req.complete) | ||
149 | pmu_poll(); | ||
150 | if (req.reply_len != 0) | ||
151 | printk(KERN_ERR "pmac_set_rtc_time: got %d byte reply\n", | ||
152 | req.reply_len); | ||
153 | return 1; | ||
154 | #endif /* CONFIG_ADB_PMU */ | ||
155 | default: | ||
156 | return 0; | ||
157 | } | ||
158 | } | ||
159 | |||
160 | /* | ||
161 | * Calibrate the decrementer register using VIA timer 1. | ||
162 | * This is used both on powermacs and CHRP machines. | ||
163 | */ | ||
164 | int __init | ||
165 | via_calibrate_decr(void) | ||
166 | { | ||
167 | struct device_node *vias; | ||
168 | volatile unsigned char __iomem *via; | ||
169 | int count = VIA_TIMER_FREQ_6 / 100; | ||
170 | unsigned int dstart, dend; | ||
171 | |||
172 | vias = find_devices("via-cuda"); | ||
173 | if (vias == 0) | ||
174 | vias = find_devices("via-pmu"); | ||
175 | if (vias == 0) | ||
176 | vias = find_devices("via"); | ||
177 | if (vias == 0 || vias->n_addrs == 0) | ||
178 | return 0; | ||
179 | via = ioremap(vias->addrs[0].address, vias->addrs[0].size); | ||
180 | |||
181 | /* set timer 1 for continuous interrupts */ | ||
182 | out_8(&via[ACR], (via[ACR] & ~T1MODE) | T1MODE_CONT); | ||
183 | /* set the counter to a small value */ | ||
184 | out_8(&via[T1CH], 2); | ||
185 | /* set the latch to `count' */ | ||
186 | out_8(&via[T1LL], count); | ||
187 | out_8(&via[T1LH], count >> 8); | ||
188 | /* wait until it hits 0 */ | ||
189 | while ((in_8(&via[IFR]) & T1_INT) == 0) | ||
190 | ; | ||
191 | dstart = get_dec(); | ||
192 | /* clear the interrupt & wait until it hits 0 again */ | ||
193 | in_8(&via[T1CL]); | ||
194 | while ((in_8(&via[IFR]) & T1_INT) == 0) | ||
195 | ; | ||
196 | dend = get_dec(); | ||
197 | |||
198 | tb_ticks_per_jiffy = (dstart - dend) / (6 * (HZ/100)); | ||
199 | tb_to_us = mulhwu_scale_factor(dstart - dend, 60000); | ||
200 | |||
201 | printk(KERN_INFO "via_calibrate_decr: ticks per jiffy = %u (%u ticks)\n", | ||
202 | tb_ticks_per_jiffy, dstart - dend); | ||
203 | |||
204 | iounmap(via); | ||
205 | |||
206 | return 1; | ||
207 | } | ||
208 | |||
209 | #ifdef CONFIG_PM | ||
210 | /* | ||
211 | * Reset the time after a sleep. | ||
212 | */ | ||
213 | static int | ||
214 | time_sleep_notify(struct pmu_sleep_notifier *self, int when) | ||
215 | { | ||
216 | static unsigned long time_diff; | ||
217 | unsigned long flags; | ||
218 | unsigned long seq; | ||
219 | |||
220 | switch (when) { | ||
221 | case PBOOK_SLEEP_NOW: | ||
222 | do { | ||
223 | seq = read_seqbegin_irqsave(&xtime_lock, flags); | ||
224 | time_diff = xtime.tv_sec - pmac_get_rtc_time(); | ||
225 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); | ||
226 | break; | ||
227 | case PBOOK_WAKE: | ||
228 | write_seqlock_irqsave(&xtime_lock, flags); | ||
229 | xtime.tv_sec = pmac_get_rtc_time() + time_diff; | ||
230 | xtime.tv_nsec = 0; | ||
231 | last_rtc_update = xtime.tv_sec; | ||
232 | write_sequnlock_irqrestore(&xtime_lock, flags); | ||
233 | break; | ||
234 | } | ||
235 | return PBOOK_SLEEP_OK; | ||
236 | } | ||
237 | |||
238 | static struct pmu_sleep_notifier time_sleep_notifier = { | ||
239 | time_sleep_notify, SLEEP_LEVEL_MISC, | ||
240 | }; | ||
241 | #endif /* CONFIG_PM */ | ||
242 | |||
243 | /* | ||
244 | * Query the OF and get the decr frequency. | ||
245 | * This was taken from the pmac time_init() when merging the prep/pmac | ||
246 | * time functions. | ||
247 | */ | ||
248 | void __init | ||
249 | pmac_calibrate_decr(void) | ||
250 | { | ||
251 | struct device_node *cpu; | ||
252 | unsigned int freq, *fp; | ||
253 | |||
254 | #ifdef CONFIG_PM | ||
255 | pmu_register_sleep_notifier(&time_sleep_notifier); | ||
256 | #endif /* CONFIG_PM */ | ||
257 | |||
258 | /* We assume MacRISC2 machines have correct device-tree | ||
259 | * calibration. That's better since the VIA itself seems | ||
260 | * to be slightly off. --BenH | ||
261 | */ | ||
262 | if (!machine_is_compatible("MacRISC2") && | ||
263 | !machine_is_compatible("MacRISC3") && | ||
264 | !machine_is_compatible("MacRISC4")) | ||
265 | if (via_calibrate_decr()) | ||
266 | return; | ||
267 | |||
268 | /* Special case: QuickSilver G4s seem to have a badly calibrated | ||
269 | * timebase-frequency in OF, VIA is much better on these. We should | ||
270 | * probably implement calibration based on the KL timer on these | ||
271 | * machines anyway... -BenH | ||
272 | */ | ||
273 | if (machine_is_compatible("PowerMac3,5")) | ||
274 | if (via_calibrate_decr()) | ||
275 | return; | ||
276 | /* | ||
277 | * The cpu node should have a timebase-frequency property | ||
278 | * to tell us the rate at which the decrementer counts. | ||
279 | */ | ||
280 | cpu = find_type_devices("cpu"); | ||
281 | if (cpu == 0) | ||
282 | panic("can't find cpu node in time_init"); | ||
283 | fp = (unsigned int *) get_property(cpu, "timebase-frequency", NULL); | ||
284 | if (fp == 0) | ||
285 | panic("can't get cpu timebase frequency"); | ||
286 | freq = *fp; | ||
287 | printk("time_init: decrementer frequency = %u.%.6u MHz\n", | ||
288 | freq/1000000, freq%1000000); | ||
289 | tb_ticks_per_jiffy = freq / HZ; | ||
290 | tb_to_us = mulhwu_scale_factor(freq, 1000000); | ||
291 | } | ||
diff --git a/arch/powerpc/platforms/prep/Kconfig b/arch/powerpc/platforms/prep/Kconfig new file mode 100644 index 000000000000..673ac47a1626 --- /dev/null +++ b/arch/powerpc/platforms/prep/Kconfig | |||
@@ -0,0 +1,22 @@ | |||
1 | |||
2 | config PREP_RESIDUAL | ||
3 | bool "Support for PReP Residual Data" | ||
4 | depends on PPC_PREP | ||
5 | help | ||
6 | Some PReP systems have residual data passed to the kernel by the | ||
7 | firmware. This allows detection of memory size, devices present and | ||
8 | other useful pieces of information. Sometimes this information is | ||
9 | not present or incorrect, in which case it could lead to the machine | ||
10 | behaving incorrectly. If this happens, either disable PREP_RESIDUAL | ||
11 | or pass the 'noresidual' option to the kernel. | ||
12 | |||
13 | If you are running a PReP system, say Y here, otherwise say N. | ||
14 | |||
15 | config PROC_PREPRESIDUAL | ||
16 | bool "Support for reading of PReP Residual Data in /proc" | ||
17 | depends on PREP_RESIDUAL && PROC_FS | ||
18 | help | ||
19 | Enabling this option will create a /proc/residual file which allows | ||
20 | you to get at the residual data on PReP systems. You will need a tool | ||
21 | (lsresidual) to parse it. If you aren't on a PReP system, you don't | ||
22 | want this. | ||
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig new file mode 100644 index 000000000000..7a3b6fc4d976 --- /dev/null +++ b/arch/powerpc/platforms/pseries/Kconfig | |||
@@ -0,0 +1,47 @@ | |||
1 | |||
2 | config PPC_SPLPAR | ||
3 | depends on PPC_PSERIES | ||
4 | bool "Support for shared-processor logical partitions" | ||
5 | default n | ||
6 | help | ||
7 | Enabling this option will make the kernel run more efficiently | ||
8 | on logically-partitioned pSeries systems which use shared | ||
9 | processors, that is, which share physical processors between | ||
10 | two or more partitions. | ||
11 | |||
12 | config HMT | ||
13 | bool "Hardware multithreading" | ||
14 | depends on SMP && PPC_PSERIES && BROKEN | ||
15 | help | ||
16 | This option enables hardware multithreading on RS64 cpus. | ||
17 | pSeries systems p620 and p660 have such a cpu type. | ||
18 | |||
19 | config EEH | ||
20 | bool "PCI Extended Error Handling (EEH)" if EMBEDDED | ||
21 | depends on PPC_PSERIES | ||
22 | default y if !EMBEDDED | ||
23 | |||
24 | config PPC_RTAS | ||
25 | bool | ||
26 | depends on PPC_PSERIES || PPC_BPA | ||
27 | default y | ||
28 | |||
29 | config RTAS_PROC | ||
30 | bool "Proc interface to RTAS" | ||
31 | depends on PPC_RTAS | ||
32 | default y | ||
33 | |||
34 | config RTAS_FLASH | ||
35 | tristate "Firmware flash interface" | ||
36 | depends on PPC64 && RTAS_PROC | ||
37 | |||
38 | config SCANLOG | ||
39 | tristate "Scanlog dump interface" | ||
40 | depends on RTAS_PROC && PPC_PSERIES | ||
41 | |||
42 | config LPARCFG | ||
43 | tristate "LPAR Configuration Data" | ||
44 | depends on PPC_PSERIES || PPC_ISERIES | ||
45 | help | ||
46 | Provide system capacity information via human readable | ||
47 | <key word>=<value> pairs through a /proc/ppc64/lparcfg interface. | ||
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile new file mode 100644 index 000000000000..26bdcd9a2a43 --- /dev/null +++ b/arch/powerpc/sysdev/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-$(CONFIG_MPIC) += mpic.o | |||
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c new file mode 100644 index 000000000000..c660e7d7c643 --- /dev/null +++ b/arch/powerpc/sysdev/mpic.c | |||
@@ -0,0 +1,904 @@ | |||
1 | /* | ||
2 | * arch/powerpc/kernel/mpic.c | ||
3 | * | ||
4 | * Driver for interrupt controllers following the OpenPIC standard, the | ||
5 | * common implementation beeing IBM's MPIC. This driver also can deal | ||
6 | * with various broken implementations of this HW. | ||
7 | * | ||
8 | * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. | ||
9 | * | ||
10 | * This file is subject to the terms and conditions of the GNU General Public | ||
11 | * License. See the file COPYING in the main directory of this archive | ||
12 | * for more details. | ||
13 | */ | ||
14 | |||
15 | #undef DEBUG | ||
16 | |||
17 | #include <linux/config.h> | ||
18 | #include <linux/types.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/irq.h> | ||
22 | #include <linux/smp.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/bootmem.h> | ||
25 | #include <linux/spinlock.h> | ||
26 | #include <linux/pci.h> | ||
27 | |||
28 | #include <asm/ptrace.h> | ||
29 | #include <asm/signal.h> | ||
30 | #include <asm/io.h> | ||
31 | #include <asm/pgtable.h> | ||
32 | #include <asm/irq.h> | ||
33 | #include <asm/machdep.h> | ||
34 | #include <asm/mpic.h> | ||
35 | #include <asm/smp.h> | ||
36 | |||
37 | #ifdef DEBUG | ||
38 | #define DBG(fmt...) printk(fmt) | ||
39 | #else | ||
40 | #define DBG(fmt...) | ||
41 | #endif | ||
42 | |||
43 | static struct mpic *mpics; | ||
44 | static struct mpic *mpic_primary; | ||
45 | static DEFINE_SPINLOCK(mpic_lock); | ||
46 | |||
47 | |||
48 | /* | ||
49 | * Register accessor functions | ||
50 | */ | ||
51 | |||
52 | |||
53 | static inline u32 _mpic_read(unsigned int be, volatile u32 __iomem *base, | ||
54 | unsigned int reg) | ||
55 | { | ||
56 | if (be) | ||
57 | return in_be32(base + (reg >> 2)); | ||
58 | else | ||
59 | return in_le32(base + (reg >> 2)); | ||
60 | } | ||
61 | |||
62 | static inline void _mpic_write(unsigned int be, volatile u32 __iomem *base, | ||
63 | unsigned int reg, u32 value) | ||
64 | { | ||
65 | if (be) | ||
66 | out_be32(base + (reg >> 2), value); | ||
67 | else | ||
68 | out_le32(base + (reg >> 2), value); | ||
69 | } | ||
70 | |||
71 | static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi) | ||
72 | { | ||
73 | unsigned int be = (mpic->flags & MPIC_BIG_ENDIAN) != 0; | ||
74 | unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10); | ||
75 | |||
76 | if (mpic->flags & MPIC_BROKEN_IPI) | ||
77 | be = !be; | ||
78 | return _mpic_read(be, mpic->gregs, offset); | ||
79 | } | ||
80 | |||
81 | static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value) | ||
82 | { | ||
83 | unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10); | ||
84 | |||
85 | _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->gregs, offset, value); | ||
86 | } | ||
87 | |||
88 | static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg) | ||
89 | { | ||
90 | unsigned int cpu = 0; | ||
91 | |||
92 | if (mpic->flags & MPIC_PRIMARY) | ||
93 | cpu = hard_smp_processor_id(); | ||
94 | |||
95 | return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg); | ||
96 | } | ||
97 | |||
98 | static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value) | ||
99 | { | ||
100 | unsigned int cpu = 0; | ||
101 | |||
102 | if (mpic->flags & MPIC_PRIMARY) | ||
103 | cpu = hard_smp_processor_id(); | ||
104 | |||
105 | _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg, value); | ||
106 | } | ||
107 | |||
108 | static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg) | ||
109 | { | ||
110 | unsigned int isu = src_no >> mpic->isu_shift; | ||
111 | unsigned int idx = src_no & mpic->isu_mask; | ||
112 | |||
113 | return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu], | ||
114 | reg + (idx * MPIC_IRQ_STRIDE)); | ||
115 | } | ||
116 | |||
117 | static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no, | ||
118 | unsigned int reg, u32 value) | ||
119 | { | ||
120 | unsigned int isu = src_no >> mpic->isu_shift; | ||
121 | unsigned int idx = src_no & mpic->isu_mask; | ||
122 | |||
123 | _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu], | ||
124 | reg + (idx * MPIC_IRQ_STRIDE), value); | ||
125 | } | ||
126 | |||
127 | #define mpic_read(b,r) _mpic_read(mpic->flags & MPIC_BIG_ENDIAN,(b),(r)) | ||
128 | #define mpic_write(b,r,v) _mpic_write(mpic->flags & MPIC_BIG_ENDIAN,(b),(r),(v)) | ||
129 | #define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i)) | ||
130 | #define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v)) | ||
131 | #define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i)) | ||
132 | #define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v)) | ||
133 | #define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r)) | ||
134 | #define mpic_irq_write(s,r,v) _mpic_irq_write(mpic,(s),(r),(v)) | ||
135 | |||
136 | |||
137 | /* | ||
138 | * Low level utility functions | ||
139 | */ | ||
140 | |||
141 | |||
142 | |||
143 | /* Check if we have one of those nice broken MPICs with a flipped endian on | ||
144 | * reads from IPI registers | ||
145 | */ | ||
146 | static void __init mpic_test_broken_ipi(struct mpic *mpic) | ||
147 | { | ||
148 | u32 r; | ||
149 | |||
150 | mpic_write(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0, MPIC_VECPRI_MASK); | ||
151 | r = mpic_read(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0); | ||
152 | |||
153 | if (r == le32_to_cpu(MPIC_VECPRI_MASK)) { | ||
154 | printk(KERN_INFO "mpic: Detected reversed IPI registers\n"); | ||
155 | mpic->flags |= MPIC_BROKEN_IPI; | ||
156 | } | ||
157 | } | ||
158 | |||
159 | #ifdef CONFIG_MPIC_BROKEN_U3 | ||
160 | |||
161 | /* Test if an interrupt is sourced from HyperTransport (used on broken U3s) | ||
162 | * to force the edge setting on the MPIC and do the ack workaround. | ||
163 | */ | ||
164 | static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source_no) | ||
165 | { | ||
166 | if (source_no >= 128 || !mpic->fixups) | ||
167 | return 0; | ||
168 | return mpic->fixups[source_no].base != NULL; | ||
169 | } | ||
170 | |||
171 | static inline void mpic_apic_end_irq(struct mpic *mpic, unsigned int source_no) | ||
172 | { | ||
173 | struct mpic_irq_fixup *fixup = &mpic->fixups[source_no]; | ||
174 | u32 tmp; | ||
175 | |||
176 | spin_lock(&mpic->fixup_lock); | ||
177 | writeb(0x11 + 2 * fixup->irq, fixup->base); | ||
178 | tmp = readl(fixup->base + 2); | ||
179 | writel(tmp | 0x80000000ul, fixup->base + 2); | ||
180 | /* config writes shouldn't be posted but let's be safe ... */ | ||
181 | (void)readl(fixup->base + 2); | ||
182 | spin_unlock(&mpic->fixup_lock); | ||
183 | } | ||
184 | |||
185 | |||
186 | static void __init mpic_amd8111_read_irq(struct mpic *mpic, u8 __iomem *devbase) | ||
187 | { | ||
188 | int i, irq; | ||
189 | u32 tmp; | ||
190 | |||
191 | printk(KERN_INFO "mpic: - Workarounds on AMD 8111 @ %p\n", devbase); | ||
192 | |||
193 | for (i=0; i < 24; i++) { | ||
194 | writeb(0x10 + 2*i, devbase + 0xf2); | ||
195 | tmp = readl(devbase + 0xf4); | ||
196 | if ((tmp & 0x1) || !(tmp & 0x20)) | ||
197 | continue; | ||
198 | irq = (tmp >> 16) & 0xff; | ||
199 | mpic->fixups[irq].irq = i; | ||
200 | mpic->fixups[irq].base = devbase + 0xf2; | ||
201 | } | ||
202 | } | ||
203 | |||
204 | static void __init mpic_amd8131_read_irq(struct mpic *mpic, u8 __iomem *devbase) | ||
205 | { | ||
206 | int i, irq; | ||
207 | u32 tmp; | ||
208 | |||
209 | printk(KERN_INFO "mpic: - Workarounds on AMD 8131 @ %p\n", devbase); | ||
210 | |||
211 | for (i=0; i < 4; i++) { | ||
212 | writeb(0x10 + 2*i, devbase + 0xba); | ||
213 | tmp = readl(devbase + 0xbc); | ||
214 | if ((tmp & 0x1) || !(tmp & 0x20)) | ||
215 | continue; | ||
216 | irq = (tmp >> 16) & 0xff; | ||
217 | mpic->fixups[irq].irq = i; | ||
218 | mpic->fixups[irq].base = devbase + 0xba; | ||
219 | } | ||
220 | } | ||
221 | |||
222 | static void __init mpic_scan_ioapics(struct mpic *mpic) | ||
223 | { | ||
224 | unsigned int devfn; | ||
225 | u8 __iomem *cfgspace; | ||
226 | |||
227 | printk(KERN_INFO "mpic: Setting up IO-APICs workarounds for U3\n"); | ||
228 | |||
229 | /* Allocate fixups array */ | ||
230 | mpic->fixups = alloc_bootmem(128 * sizeof(struct mpic_irq_fixup)); | ||
231 | BUG_ON(mpic->fixups == NULL); | ||
232 | memset(mpic->fixups, 0, 128 * sizeof(struct mpic_irq_fixup)); | ||
233 | |||
234 | /* Init spinlock */ | ||
235 | spin_lock_init(&mpic->fixup_lock); | ||
236 | |||
237 | /* Map u3 config space. We assume all IO-APICs are on the primary bus | ||
238 | * and slot will never be above "0xf" so we only need to map 32k | ||
239 | */ | ||
240 | cfgspace = (unsigned char __iomem *)ioremap(0xf2000000, 0x8000); | ||
241 | BUG_ON(cfgspace == NULL); | ||
242 | |||
243 | /* Now we scan all slots. We do a very quick scan, we read the header type, | ||
244 | * vendor ID and device ID only, that's plenty enough | ||
245 | */ | ||
246 | for (devfn = 0; devfn < PCI_DEVFN(0x10,0); devfn ++) { | ||
247 | u8 __iomem *devbase = cfgspace + (devfn << 8); | ||
248 | u8 hdr_type = readb(devbase + PCI_HEADER_TYPE); | ||
249 | u32 l = readl(devbase + PCI_VENDOR_ID); | ||
250 | u16 vendor_id, device_id; | ||
251 | int multifunc = 0; | ||
252 | |||
253 | DBG("devfn %x, l: %x\n", devfn, l); | ||
254 | |||
255 | /* If no device, skip */ | ||
256 | if (l == 0xffffffff || l == 0x00000000 || | ||
257 | l == 0x0000ffff || l == 0xffff0000) | ||
258 | goto next; | ||
259 | |||
260 | /* Check if it's a multifunction device (only really used | ||
261 | * to function 0 though | ||
262 | */ | ||
263 | multifunc = !!(hdr_type & 0x80); | ||
264 | vendor_id = l & 0xffff; | ||
265 | device_id = (l >> 16) & 0xffff; | ||
266 | |||
267 | /* If a known device, go to fixup setup code */ | ||
268 | if (vendor_id == PCI_VENDOR_ID_AMD && device_id == 0x7460) | ||
269 | mpic_amd8111_read_irq(mpic, devbase); | ||
270 | if (vendor_id == PCI_VENDOR_ID_AMD && device_id == 0x7450) | ||
271 | mpic_amd8131_read_irq(mpic, devbase); | ||
272 | next: | ||
273 | /* next device, if function 0 */ | ||
274 | if ((PCI_FUNC(devfn) == 0) && !multifunc) | ||
275 | devfn += 7; | ||
276 | } | ||
277 | } | ||
278 | |||
279 | #endif /* CONFIG_MPIC_BROKEN_U3 */ | ||
280 | |||
281 | |||
282 | /* Find an mpic associated with a given linux interrupt */ | ||
283 | static struct mpic *mpic_find(unsigned int irq, unsigned int *is_ipi) | ||
284 | { | ||
285 | struct mpic *mpic = mpics; | ||
286 | |||
287 | while(mpic) { | ||
288 | /* search IPIs first since they may override the main interrupts */ | ||
289 | if (irq >= mpic->ipi_offset && irq < (mpic->ipi_offset + 4)) { | ||
290 | if (is_ipi) | ||
291 | *is_ipi = 1; | ||
292 | return mpic; | ||
293 | } | ||
294 | if (irq >= mpic->irq_offset && | ||
295 | irq < (mpic->irq_offset + mpic->irq_count)) { | ||
296 | if (is_ipi) | ||
297 | *is_ipi = 0; | ||
298 | return mpic; | ||
299 | } | ||
300 | mpic = mpic -> next; | ||
301 | } | ||
302 | return NULL; | ||
303 | } | ||
304 | |||
305 | /* Convert a cpu mask from logical to physical cpu numbers. */ | ||
306 | static inline u32 mpic_physmask(u32 cpumask) | ||
307 | { | ||
308 | int i; | ||
309 | u32 mask = 0; | ||
310 | |||
311 | for (i = 0; i < NR_CPUS; ++i, cpumask >>= 1) | ||
312 | mask |= (cpumask & 1) << get_hard_smp_processor_id(i); | ||
313 | return mask; | ||
314 | } | ||
315 | |||
316 | #ifdef CONFIG_SMP | ||
317 | /* Get the mpic structure from the IPI number */ | ||
318 | static inline struct mpic * mpic_from_ipi(unsigned int ipi) | ||
319 | { | ||
320 | return container_of(irq_desc[ipi].handler, struct mpic, hc_ipi); | ||
321 | } | ||
322 | #endif | ||
323 | |||
324 | /* Get the mpic structure from the irq number */ | ||
325 | static inline struct mpic * mpic_from_irq(unsigned int irq) | ||
326 | { | ||
327 | return container_of(irq_desc[irq].handler, struct mpic, hc_irq); | ||
328 | } | ||
329 | |||
330 | /* Send an EOI */ | ||
331 | static inline void mpic_eoi(struct mpic *mpic) | ||
332 | { | ||
333 | mpic_cpu_write(MPIC_CPU_EOI, 0); | ||
334 | (void)mpic_cpu_read(MPIC_CPU_WHOAMI); | ||
335 | } | ||
336 | |||
337 | #ifdef CONFIG_SMP | ||
338 | static irqreturn_t mpic_ipi_action(int irq, void *dev_id, struct pt_regs *regs) | ||
339 | { | ||
340 | struct mpic *mpic = dev_id; | ||
341 | |||
342 | smp_message_recv(irq - mpic->ipi_offset, regs); | ||
343 | return IRQ_HANDLED; | ||
344 | } | ||
345 | #endif /* CONFIG_SMP */ | ||
346 | |||
347 | /* | ||
348 | * Linux descriptor level callbacks | ||
349 | */ | ||
350 | |||
351 | |||
352 | static void mpic_enable_irq(unsigned int irq) | ||
353 | { | ||
354 | unsigned int loops = 100000; | ||
355 | struct mpic *mpic = mpic_from_irq(irq); | ||
356 | unsigned int src = irq - mpic->irq_offset; | ||
357 | |||
358 | DBG("%s: enable_irq: %d (src %d)\n", mpic->name, irq, src); | ||
359 | |||
360 | mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI, | ||
361 | mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & ~MPIC_VECPRI_MASK); | ||
362 | |||
363 | /* make sure mask gets to controller before we return to user */ | ||
364 | do { | ||
365 | if (!loops--) { | ||
366 | printk(KERN_ERR "mpic_enable_irq timeout\n"); | ||
367 | break; | ||
368 | } | ||
369 | } while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK); | ||
370 | } | ||
371 | |||
372 | static void mpic_disable_irq(unsigned int irq) | ||
373 | { | ||
374 | unsigned int loops = 100000; | ||
375 | struct mpic *mpic = mpic_from_irq(irq); | ||
376 | unsigned int src = irq - mpic->irq_offset; | ||
377 | |||
378 | DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src); | ||
379 | |||
380 | mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI, | ||
381 | mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) | MPIC_VECPRI_MASK); | ||
382 | |||
383 | /* make sure mask gets to controller before we return to user */ | ||
384 | do { | ||
385 | if (!loops--) { | ||
386 | printk(KERN_ERR "mpic_enable_irq timeout\n"); | ||
387 | break; | ||
388 | } | ||
389 | } while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK)); | ||
390 | } | ||
391 | |||
392 | static void mpic_end_irq(unsigned int irq) | ||
393 | { | ||
394 | struct mpic *mpic = mpic_from_irq(irq); | ||
395 | |||
396 | DBG("%s: end_irq: %d\n", mpic->name, irq); | ||
397 | |||
398 | /* We always EOI on end_irq() even for edge interrupts since that | ||
399 | * should only lower the priority, the MPIC should have properly | ||
400 | * latched another edge interrupt coming in anyway | ||
401 | */ | ||
402 | |||
403 | #ifdef CONFIG_MPIC_BROKEN_U3 | ||
404 | if (mpic->flags & MPIC_BROKEN_U3) { | ||
405 | unsigned int src = irq - mpic->irq_offset; | ||
406 | if (mpic_is_ht_interrupt(mpic, src)) | ||
407 | mpic_apic_end_irq(mpic, src); | ||
408 | } | ||
409 | #endif /* CONFIG_MPIC_BROKEN_U3 */ | ||
410 | |||
411 | mpic_eoi(mpic); | ||
412 | } | ||
413 | |||
414 | #ifdef CONFIG_SMP | ||
415 | |||
416 | static void mpic_enable_ipi(unsigned int irq) | ||
417 | { | ||
418 | struct mpic *mpic = mpic_from_ipi(irq); | ||
419 | unsigned int src = irq - mpic->ipi_offset; | ||
420 | |||
421 | DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, irq, src); | ||
422 | mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK); | ||
423 | } | ||
424 | |||
425 | static void mpic_disable_ipi(unsigned int irq) | ||
426 | { | ||
427 | /* NEVER disable an IPI... that's just plain wrong! */ | ||
428 | } | ||
429 | |||
430 | static void mpic_end_ipi(unsigned int irq) | ||
431 | { | ||
432 | struct mpic *mpic = mpic_from_ipi(irq); | ||
433 | |||
434 | /* | ||
435 | * IPIs are marked IRQ_PER_CPU. This has the side effect of | ||
436 | * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from | ||
437 | * applying to them. We EOI them late to avoid re-entering. | ||
438 | * We mark IPI's with SA_INTERRUPT as they must run with | ||
439 | * irqs disabled. | ||
440 | */ | ||
441 | mpic_eoi(mpic); | ||
442 | } | ||
443 | |||
444 | #endif /* CONFIG_SMP */ | ||
445 | |||
446 | static void mpic_set_affinity(unsigned int irq, cpumask_t cpumask) | ||
447 | { | ||
448 | struct mpic *mpic = mpic_from_irq(irq); | ||
449 | |||
450 | cpumask_t tmp; | ||
451 | |||
452 | cpus_and(tmp, cpumask, cpu_online_map); | ||
453 | |||
454 | mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_DESTINATION, | ||
455 | mpic_physmask(cpus_addr(tmp)[0])); | ||
456 | } | ||
457 | |||
458 | |||
459 | /* | ||
460 | * Exported functions | ||
461 | */ | ||
462 | |||
463 | |||
464 | struct mpic * __init mpic_alloc(unsigned long phys_addr, | ||
465 | unsigned int flags, | ||
466 | unsigned int isu_size, | ||
467 | unsigned int irq_offset, | ||
468 | unsigned int irq_count, | ||
469 | unsigned int ipi_offset, | ||
470 | unsigned char *senses, | ||
471 | unsigned int senses_count, | ||
472 | const char *name) | ||
473 | { | ||
474 | struct mpic *mpic; | ||
475 | u32 reg; | ||
476 | const char *vers; | ||
477 | int i; | ||
478 | |||
479 | mpic = alloc_bootmem(sizeof(struct mpic)); | ||
480 | if (mpic == NULL) | ||
481 | return NULL; | ||
482 | |||
483 | |||
484 | memset(mpic, 0, sizeof(struct mpic)); | ||
485 | mpic->name = name; | ||
486 | |||
487 | mpic->hc_irq.typename = name; | ||
488 | mpic->hc_irq.enable = mpic_enable_irq; | ||
489 | mpic->hc_irq.disable = mpic_disable_irq; | ||
490 | mpic->hc_irq.end = mpic_end_irq; | ||
491 | if (flags & MPIC_PRIMARY) | ||
492 | mpic->hc_irq.set_affinity = mpic_set_affinity; | ||
493 | #ifdef CONFIG_SMP | ||
494 | mpic->hc_ipi.typename = name; | ||
495 | mpic->hc_ipi.enable = mpic_enable_ipi; | ||
496 | mpic->hc_ipi.disable = mpic_disable_ipi; | ||
497 | mpic->hc_ipi.end = mpic_end_ipi; | ||
498 | #endif /* CONFIG_SMP */ | ||
499 | |||
500 | mpic->flags = flags; | ||
501 | mpic->isu_size = isu_size; | ||
502 | mpic->irq_offset = irq_offset; | ||
503 | mpic->irq_count = irq_count; | ||
504 | mpic->ipi_offset = ipi_offset; | ||
505 | mpic->num_sources = 0; /* so far */ | ||
506 | mpic->senses = senses; | ||
507 | mpic->senses_count = senses_count; | ||
508 | |||
509 | /* Map the global registers */ | ||
510 | mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000); | ||
511 | mpic->tmregs = mpic->gregs + (MPIC_TIMER_BASE >> 2); | ||
512 | BUG_ON(mpic->gregs == NULL); | ||
513 | |||
514 | /* Reset */ | ||
515 | if (flags & MPIC_WANTS_RESET) { | ||
516 | mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0, | ||
517 | mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0) | ||
518 | | MPIC_GREG_GCONF_RESET); | ||
519 | while( mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0) | ||
520 | & MPIC_GREG_GCONF_RESET) | ||
521 | mb(); | ||
522 | } | ||
523 | |||
524 | /* Read feature register, calculate num CPUs and, for non-ISU | ||
525 | * MPICs, num sources as well. On ISU MPICs, sources are counted | ||
526 | * as ISUs are added | ||
527 | */ | ||
528 | reg = mpic_read(mpic->gregs, MPIC_GREG_FEATURE_0); | ||
529 | mpic->num_cpus = ((reg & MPIC_GREG_FEATURE_LAST_CPU_MASK) | ||
530 | >> MPIC_GREG_FEATURE_LAST_CPU_SHIFT) + 1; | ||
531 | if (isu_size == 0) | ||
532 | mpic->num_sources = ((reg & MPIC_GREG_FEATURE_LAST_SRC_MASK) | ||
533 | >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT) + 1; | ||
534 | |||
535 | /* Map the per-CPU registers */ | ||
536 | for (i = 0; i < mpic->num_cpus; i++) { | ||
537 | mpic->cpuregs[i] = ioremap(phys_addr + MPIC_CPU_BASE + | ||
538 | i * MPIC_CPU_STRIDE, 0x1000); | ||
539 | BUG_ON(mpic->cpuregs[i] == NULL); | ||
540 | } | ||
541 | |||
542 | /* Initialize main ISU if none provided */ | ||
543 | if (mpic->isu_size == 0) { | ||
544 | mpic->isu_size = mpic->num_sources; | ||
545 | mpic->isus[0] = ioremap(phys_addr + MPIC_IRQ_BASE, | ||
546 | MPIC_IRQ_STRIDE * mpic->isu_size); | ||
547 | BUG_ON(mpic->isus[0] == NULL); | ||
548 | } | ||
549 | mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); | ||
550 | mpic->isu_mask = (1 << mpic->isu_shift) - 1; | ||
551 | |||
552 | /* Display version */ | ||
553 | switch (reg & MPIC_GREG_FEATURE_VERSION_MASK) { | ||
554 | case 1: | ||
555 | vers = "1.0"; | ||
556 | break; | ||
557 | case 2: | ||
558 | vers = "1.2"; | ||
559 | break; | ||
560 | case 3: | ||
561 | vers = "1.3"; | ||
562 | break; | ||
563 | default: | ||
564 | vers = "<unknown>"; | ||
565 | break; | ||
566 | } | ||
567 | printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %lx, max %d CPUs\n", | ||
568 | name, vers, phys_addr, mpic->num_cpus); | ||
569 | printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", mpic->isu_size, | ||
570 | mpic->isu_shift, mpic->isu_mask); | ||
571 | |||
572 | mpic->next = mpics; | ||
573 | mpics = mpic; | ||
574 | |||
575 | if (flags & MPIC_PRIMARY) | ||
576 | mpic_primary = mpic; | ||
577 | |||
578 | return mpic; | ||
579 | } | ||
580 | |||
581 | void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, | ||
582 | unsigned long phys_addr) | ||
583 | { | ||
584 | unsigned int isu_first = isu_num * mpic->isu_size; | ||
585 | |||
586 | BUG_ON(isu_num >= MPIC_MAX_ISU); | ||
587 | |||
588 | mpic->isus[isu_num] = ioremap(phys_addr, MPIC_IRQ_STRIDE * mpic->isu_size); | ||
589 | if ((isu_first + mpic->isu_size) > mpic->num_sources) | ||
590 | mpic->num_sources = isu_first + mpic->isu_size; | ||
591 | } | ||
592 | |||
593 | void __init mpic_setup_cascade(unsigned int irq, mpic_cascade_t handler, | ||
594 | void *data) | ||
595 | { | ||
596 | struct mpic *mpic = mpic_find(irq, NULL); | ||
597 | unsigned long flags; | ||
598 | |||
599 | /* Synchronization here is a bit dodgy, so don't try to replace cascade | ||
600 | * interrupts on the fly too often ... but normally it's set up at boot. | ||
601 | */ | ||
602 | spin_lock_irqsave(&mpic_lock, flags); | ||
603 | if (mpic->cascade) | ||
604 | mpic_disable_irq(mpic->cascade_vec + mpic->irq_offset); | ||
605 | mpic->cascade = NULL; | ||
606 | wmb(); | ||
607 | mpic->cascade_vec = irq - mpic->irq_offset; | ||
608 | mpic->cascade_data = data; | ||
609 | wmb(); | ||
610 | mpic->cascade = handler; | ||
611 | mpic_enable_irq(irq); | ||
612 | spin_unlock_irqrestore(&mpic_lock, flags); | ||
613 | } | ||
614 | |||
615 | void __init mpic_init(struct mpic *mpic) | ||
616 | { | ||
617 | int i; | ||
618 | |||
619 | BUG_ON(mpic->num_sources == 0); | ||
620 | |||
621 | printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources); | ||
622 | |||
623 | /* Set current processor priority to max */ | ||
624 | mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf); | ||
625 | |||
626 | /* Initialize timers: just disable them all */ | ||
627 | for (i = 0; i < 4; i++) { | ||
628 | mpic_write(mpic->tmregs, | ||
629 | i * MPIC_TIMER_STRIDE + MPIC_TIMER_DESTINATION, 0); | ||
630 | mpic_write(mpic->tmregs, | ||
631 | i * MPIC_TIMER_STRIDE + MPIC_TIMER_VECTOR_PRI, | ||
632 | MPIC_VECPRI_MASK | | ||
633 | (MPIC_VEC_TIMER_0 + i)); | ||
634 | } | ||
635 | |||
636 | /* Initialize IPIs to our reserved vectors and mark them disabled for now */ | ||
637 | mpic_test_broken_ipi(mpic); | ||
638 | for (i = 0; i < 4; i++) { | ||
639 | mpic_ipi_write(i, | ||
640 | MPIC_VECPRI_MASK | | ||
641 | (10 << MPIC_VECPRI_PRIORITY_SHIFT) | | ||
642 | (MPIC_VEC_IPI_0 + i)); | ||
643 | #ifdef CONFIG_SMP | ||
644 | if (!(mpic->flags & MPIC_PRIMARY)) | ||
645 | continue; | ||
646 | irq_desc[mpic->ipi_offset+i].status |= IRQ_PER_CPU; | ||
647 | irq_desc[mpic->ipi_offset+i].handler = &mpic->hc_ipi; | ||
648 | |||
649 | #endif /* CONFIG_SMP */ | ||
650 | } | ||
651 | |||
652 | /* Initialize interrupt sources */ | ||
653 | if (mpic->irq_count == 0) | ||
654 | mpic->irq_count = mpic->num_sources; | ||
655 | |||
656 | #ifdef CONFIG_MPIC_BROKEN_U3 | ||
657 | /* Do the ioapic fixups on U3 broken mpic */ | ||
658 | DBG("MPIC flags: %x\n", mpic->flags); | ||
659 | if ((mpic->flags & MPIC_BROKEN_U3) && (mpic->flags & MPIC_PRIMARY)) | ||
660 | mpic_scan_ioapics(mpic); | ||
661 | #endif /* CONFIG_MPIC_BROKEN_U3 */ | ||
662 | |||
663 | for (i = 0; i < mpic->num_sources; i++) { | ||
664 | /* start with vector = source number, and masked */ | ||
665 | u32 vecpri = MPIC_VECPRI_MASK | i | (8 << MPIC_VECPRI_PRIORITY_SHIFT); | ||
666 | int level = 0; | ||
667 | |||
668 | /* if it's an IPI, we skip it */ | ||
669 | if ((mpic->irq_offset + i) >= (mpic->ipi_offset + i) && | ||
670 | (mpic->irq_offset + i) < (mpic->ipi_offset + i + 4)) | ||
671 | continue; | ||
672 | |||
673 | /* do senses munging */ | ||
674 | if (mpic->senses && i < mpic->senses_count) { | ||
675 | if (mpic->senses[i] & IRQ_SENSE_LEVEL) | ||
676 | vecpri |= MPIC_VECPRI_SENSE_LEVEL; | ||
677 | if (mpic->senses[i] & IRQ_POLARITY_POSITIVE) | ||
678 | vecpri |= MPIC_VECPRI_POLARITY_POSITIVE; | ||
679 | } else | ||
680 | vecpri |= MPIC_VECPRI_SENSE_LEVEL; | ||
681 | |||
682 | /* remember if it was a level interrupts */ | ||
683 | level = (vecpri & MPIC_VECPRI_SENSE_LEVEL); | ||
684 | |||
685 | /* deal with broken U3 */ | ||
686 | if (mpic->flags & MPIC_BROKEN_U3) { | ||
687 | #ifdef CONFIG_MPIC_BROKEN_U3 | ||
688 | if (mpic_is_ht_interrupt(mpic, i)) { | ||
689 | vecpri &= ~(MPIC_VECPRI_SENSE_MASK | | ||
690 | MPIC_VECPRI_POLARITY_MASK); | ||
691 | vecpri |= MPIC_VECPRI_POLARITY_POSITIVE; | ||
692 | } | ||
693 | #else | ||
694 | printk(KERN_ERR "mpic: BROKEN_U3 set, but CONFIG doesn't match\n"); | ||
695 | #endif | ||
696 | } | ||
697 | |||
698 | DBG("setup source %d, vecpri: %08x, level: %d\n", i, vecpri, | ||
699 | (level != 0)); | ||
700 | |||
701 | /* init hw */ | ||
702 | mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri); | ||
703 | mpic_irq_write(i, MPIC_IRQ_DESTINATION, | ||
704 | 1 << hard_smp_processor_id()); | ||
705 | |||
706 | /* init linux descriptors */ | ||
707 | if (i < mpic->irq_count) { | ||
708 | irq_desc[mpic->irq_offset+i].status = level ? IRQ_LEVEL : 0; | ||
709 | irq_desc[mpic->irq_offset+i].handler = &mpic->hc_irq; | ||
710 | } | ||
711 | } | ||
712 | |||
713 | /* Init spurrious vector */ | ||
714 | mpic_write(mpic->gregs, MPIC_GREG_SPURIOUS, MPIC_VEC_SPURRIOUS); | ||
715 | |||
716 | /* Disable 8259 passthrough */ | ||
717 | mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0, | ||
718 | mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0) | ||
719 | | MPIC_GREG_GCONF_8259_PTHROU_DIS); | ||
720 | |||
721 | /* Set current processor priority to 0 */ | ||
722 | mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0); | ||
723 | } | ||
724 | |||
725 | |||
726 | |||
727 | void mpic_irq_set_priority(unsigned int irq, unsigned int pri) | ||
728 | { | ||
729 | int is_ipi; | ||
730 | struct mpic *mpic = mpic_find(irq, &is_ipi); | ||
731 | unsigned long flags; | ||
732 | u32 reg; | ||
733 | |||
734 | spin_lock_irqsave(&mpic_lock, flags); | ||
735 | if (is_ipi) { | ||
736 | reg = mpic_ipi_read(irq - mpic->ipi_offset) & MPIC_VECPRI_PRIORITY_MASK; | ||
737 | mpic_ipi_write(irq - mpic->ipi_offset, | ||
738 | reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); | ||
739 | } else { | ||
740 | reg = mpic_irq_read(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI) | ||
741 | & MPIC_VECPRI_PRIORITY_MASK; | ||
742 | mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI, | ||
743 | reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); | ||
744 | } | ||
745 | spin_unlock_irqrestore(&mpic_lock, flags); | ||
746 | } | ||
747 | |||
748 | unsigned int mpic_irq_get_priority(unsigned int irq) | ||
749 | { | ||
750 | int is_ipi; | ||
751 | struct mpic *mpic = mpic_find(irq, &is_ipi); | ||
752 | unsigned long flags; | ||
753 | u32 reg; | ||
754 | |||
755 | spin_lock_irqsave(&mpic_lock, flags); | ||
756 | if (is_ipi) | ||
757 | reg = mpic_ipi_read(irq - mpic->ipi_offset); | ||
758 | else | ||
759 | reg = mpic_irq_read(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI); | ||
760 | spin_unlock_irqrestore(&mpic_lock, flags); | ||
761 | return (reg & MPIC_VECPRI_PRIORITY_MASK) >> MPIC_VECPRI_PRIORITY_SHIFT; | ||
762 | } | ||
763 | |||
764 | void mpic_setup_this_cpu(void) | ||
765 | { | ||
766 | #ifdef CONFIG_SMP | ||
767 | struct mpic *mpic = mpic_primary; | ||
768 | unsigned long flags; | ||
769 | u32 msk = 1 << hard_smp_processor_id(); | ||
770 | unsigned int i; | ||
771 | |||
772 | BUG_ON(mpic == NULL); | ||
773 | |||
774 | DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id()); | ||
775 | |||
776 | spin_lock_irqsave(&mpic_lock, flags); | ||
777 | |||
778 | /* let the mpic know we want intrs. default affinity is 0xffffffff | ||
779 | * until changed via /proc. That's how it's done on x86. If we want | ||
780 | * it differently, then we should make sure we also change the default | ||
781 | * values of irq_affinity in irq.c. | ||
782 | */ | ||
783 | if (distribute_irqs) { | ||
784 | for (i = 0; i < mpic->num_sources ; i++) | ||
785 | mpic_irq_write(i, MPIC_IRQ_DESTINATION, | ||
786 | mpic_irq_read(i, MPIC_IRQ_DESTINATION) | msk); | ||
787 | } | ||
788 | |||
789 | /* Set current processor priority to 0 */ | ||
790 | mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0); | ||
791 | |||
792 | spin_unlock_irqrestore(&mpic_lock, flags); | ||
793 | #endif /* CONFIG_SMP */ | ||
794 | } | ||
795 | |||
796 | int mpic_cpu_get_priority(void) | ||
797 | { | ||
798 | struct mpic *mpic = mpic_primary; | ||
799 | |||
800 | return mpic_cpu_read(MPIC_CPU_CURRENT_TASK_PRI); | ||
801 | } | ||
802 | |||
803 | void mpic_cpu_set_priority(int prio) | ||
804 | { | ||
805 | struct mpic *mpic = mpic_primary; | ||
806 | |||
807 | prio &= MPIC_CPU_TASKPRI_MASK; | ||
808 | mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, prio); | ||
809 | } | ||
810 | |||
811 | /* | ||
812 | * XXX: someone who knows mpic should check this. | ||
813 | * do we need to eoi the ipi including for kexec cpu here (see xics comments)? | ||
814 | * or can we reset the mpic in the new kernel? | ||
815 | */ | ||
816 | void mpic_teardown_this_cpu(int secondary) | ||
817 | { | ||
818 | struct mpic *mpic = mpic_primary; | ||
819 | unsigned long flags; | ||
820 | u32 msk = 1 << hard_smp_processor_id(); | ||
821 | unsigned int i; | ||
822 | |||
823 | BUG_ON(mpic == NULL); | ||
824 | |||
825 | DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id()); | ||
826 | spin_lock_irqsave(&mpic_lock, flags); | ||
827 | |||
828 | /* let the mpic know we don't want intrs. */ | ||
829 | for (i = 0; i < mpic->num_sources ; i++) | ||
830 | mpic_irq_write(i, MPIC_IRQ_DESTINATION, | ||
831 | mpic_irq_read(i, MPIC_IRQ_DESTINATION) & ~msk); | ||
832 | |||
833 | /* Set current processor priority to max */ | ||
834 | mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf); | ||
835 | |||
836 | spin_unlock_irqrestore(&mpic_lock, flags); | ||
837 | } | ||
838 | |||
839 | |||
840 | void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask) | ||
841 | { | ||
842 | struct mpic *mpic = mpic_primary; | ||
843 | |||
844 | BUG_ON(mpic == NULL); | ||
845 | |||
846 | DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no); | ||
847 | |||
848 | mpic_cpu_write(MPIC_CPU_IPI_DISPATCH_0 + ipi_no * 0x10, | ||
849 | mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0])); | ||
850 | } | ||
851 | |||
852 | int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs) | ||
853 | { | ||
854 | u32 irq; | ||
855 | |||
856 | irq = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK; | ||
857 | DBG("%s: get_one_irq(): %d\n", mpic->name, irq); | ||
858 | |||
859 | if (mpic->cascade && irq == mpic->cascade_vec) { | ||
860 | DBG("%s: cascading ...\n", mpic->name); | ||
861 | irq = mpic->cascade(regs, mpic->cascade_data); | ||
862 | mpic_eoi(mpic); | ||
863 | return irq; | ||
864 | } | ||
865 | if (unlikely(irq == MPIC_VEC_SPURRIOUS)) | ||
866 | return -1; | ||
867 | if (irq < MPIC_VEC_IPI_0) | ||
868 | return irq + mpic->irq_offset; | ||
869 | DBG("%s: ipi %d !\n", mpic->name, irq - MPIC_VEC_IPI_0); | ||
870 | return irq - MPIC_VEC_IPI_0 + mpic->ipi_offset; | ||
871 | } | ||
872 | |||
873 | int mpic_get_irq(struct pt_regs *regs) | ||
874 | { | ||
875 | struct mpic *mpic = mpic_primary; | ||
876 | |||
877 | BUG_ON(mpic == NULL); | ||
878 | |||
879 | return mpic_get_one_irq(mpic, regs); | ||
880 | } | ||
881 | |||
882 | |||
883 | #ifdef CONFIG_SMP | ||
884 | void mpic_request_ipis(void) | ||
885 | { | ||
886 | struct mpic *mpic = mpic_primary; | ||
887 | |||
888 | BUG_ON(mpic == NULL); | ||
889 | |||
890 | printk("requesting IPIs ... \n"); | ||
891 | |||
892 | /* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */ | ||
893 | request_irq(mpic->ipi_offset+0, mpic_ipi_action, SA_INTERRUPT, | ||
894 | "IPI0 (call function)", mpic); | ||
895 | request_irq(mpic->ipi_offset+1, mpic_ipi_action, SA_INTERRUPT, | ||
896 | "IPI1 (reschedule)", mpic); | ||
897 | request_irq(mpic->ipi_offset+2, mpic_ipi_action, SA_INTERRUPT, | ||
898 | "IPI2 (unused)", mpic); | ||
899 | request_irq(mpic->ipi_offset+3, mpic_ipi_action, SA_INTERRUPT, | ||
900 | "IPI3 (debugger break)", mpic); | ||
901 | |||
902 | printk("IPIs requested... \n"); | ||
903 | } | ||
904 | #endif /* CONFIG_SMP */ | ||
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile index ce166e3de53b..0649540bc7d9 100644 --- a/arch/ppc/kernel/Makefile +++ b/arch/ppc/kernel/Makefile | |||
@@ -1,6 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Makefile for the linux kernel. | 2 | # Makefile for the linux kernel. |
3 | # | 3 | # |
4 | ifneq ($(CONFIG_PPC_MERGE),y) | ||
4 | 5 | ||
5 | extra-$(CONFIG_PPC_STD_MMU) := head.o | 6 | extra-$(CONFIG_PPC_STD_MMU) := head.o |
6 | extra-$(CONFIG_40x) := head_4xx.o | 7 | extra-$(CONFIG_40x) := head_4xx.o |
@@ -37,3 +38,23 @@ endif | |||
37 | 38 | ||
38 | # These are here while we do the architecture merge | 39 | # These are here while we do the architecture merge |
39 | vecemu-y += ../../powerpc/kernel/vecemu.o | 40 | vecemu-y += ../../powerpc/kernel/vecemu.o |
41 | |||
42 | else | ||
43 | obj-y := entry.o irq.o idle.o time.o misc.o \ | ||
44 | signal.o ptrace.o align.o \ | ||
45 | syscalls.o setup.o \ | ||
46 | cputable.o perfmon.o | ||
47 | obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o | ||
48 | obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o | ||
49 | obj-$(CONFIG_POWER4) += cpu_setup_power4.o | ||
50 | obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o | ||
51 | obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-mapping.o | ||
52 | obj-$(CONFIG_PCI) += pci.o | ||
53 | obj-$(CONFIG_KGDB) += ppc-stub.o | ||
54 | obj-$(CONFIG_SMP) += smp.o smp-tbsync.o | ||
55 | obj-$(CONFIG_TAU) += temp.o | ||
56 | ifndef CONFIG_E200 | ||
57 | obj-$(CONFIG_FSL_BOOKE) += perfmon_fsl_booke.o | ||
58 | endif | ||
59 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o | ||
60 | endif | ||
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c index 77fecfbabe88..1b891b806f3d 100644 --- a/arch/ppc/kernel/setup.c +++ b/arch/ppc/kernel/setup.c | |||
@@ -83,6 +83,8 @@ extern void pmac_init(unsigned long r3, unsigned long r4, | |||
83 | unsigned long r5, unsigned long r6, unsigned long r7); | 83 | unsigned long r5, unsigned long r6, unsigned long r7); |
84 | extern void chrp_init(unsigned long r3, unsigned long r4, | 84 | extern void chrp_init(unsigned long r3, unsigned long r4, |
85 | unsigned long r5, unsigned long r6, unsigned long r7); | 85 | unsigned long r5, unsigned long r6, unsigned long r7); |
86 | |||
87 | dev_t boot_dev; | ||
86 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | 88 | #endif /* CONFIG_PPC_MULTIPLATFORM */ |
87 | 89 | ||
88 | #ifdef CONFIG_MAGIC_SYSRQ | 90 | #ifdef CONFIG_MAGIC_SYSRQ |
@@ -405,11 +407,13 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5, | |||
405 | _machine = _MACH_prep; | 407 | _machine = _MACH_prep; |
406 | } | 408 | } |
407 | 409 | ||
410 | #ifdef CONFIG_PPC_PREP | ||
408 | /* not much more to do here, if prep */ | 411 | /* not much more to do here, if prep */ |
409 | if (_machine == _MACH_prep) { | 412 | if (_machine == _MACH_prep) { |
410 | prep_init(r3, r4, r5, r6, r7); | 413 | prep_init(r3, r4, r5, r6, r7); |
411 | return; | 414 | return; |
412 | } | 415 | } |
416 | #endif | ||
413 | 417 | ||
414 | /* prom_init has already been called from __start */ | 418 | /* prom_init has already been called from __start */ |
415 | if (boot_infos) | 419 | if (boot_infos) |
@@ -480,12 +484,16 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5, | |||
480 | #endif /* CONFIG_ADB */ | 484 | #endif /* CONFIG_ADB */ |
481 | 485 | ||
482 | switch (_machine) { | 486 | switch (_machine) { |
487 | #ifdef CONFIG_PPC_PMAC | ||
483 | case _MACH_Pmac: | 488 | case _MACH_Pmac: |
484 | pmac_init(r3, r4, r5, r6, r7); | 489 | pmac_init(r3, r4, r5, r6, r7); |
485 | break; | 490 | break; |
491 | #endif | ||
492 | #ifdef CONFIG_PPC_CHRP | ||
486 | case _MACH_chrp: | 493 | case _MACH_chrp: |
487 | chrp_init(r3, r4, r5, r6, r7); | 494 | chrp_init(r3, r4, r5, r6, r7); |
488 | break; | 495 | break; |
496 | #endif | ||
489 | } | 497 | } |
490 | } | 498 | } |
491 | 499 | ||
diff --git a/arch/ppc/platforms/prep_setup.c b/arch/ppc/platforms/prep_setup.c index fccafbcd4b58..8bc734fe6682 100644 --- a/arch/ppc/platforms/prep_setup.c +++ b/arch/ppc/platforms/prep_setup.c | |||
@@ -89,9 +89,6 @@ extern void prep_tiger1_setup_pci(char *irq_edge_mask_lo, char *irq_edge_mask_hi | |||
89 | #define cached_21 (((char *)(ppc_cached_irq_mask))[3]) | 89 | #define cached_21 (((char *)(ppc_cached_irq_mask))[3]) |
90 | #define cached_A1 (((char *)(ppc_cached_irq_mask))[2]) | 90 | #define cached_A1 (((char *)(ppc_cached_irq_mask))[2]) |
91 | 91 | ||
92 | /* for the mac fs */ | ||
93 | dev_t boot_dev; | ||
94 | |||
95 | #ifdef CONFIG_SOUND_CS4232 | 92 | #ifdef CONFIG_SOUND_CS4232 |
96 | long ppc_cs4232_dma, ppc_cs4232_dma2; | 93 | long ppc_cs4232_dma, ppc_cs4232_dma2; |
97 | #endif | 94 | #endif |
diff --git a/arch/ppc/syslib/Makefile b/arch/ppc/syslib/Makefile index b8d08f33f7ee..1b0a84931afc 100644 --- a/arch/ppc/syslib/Makefile +++ b/arch/ppc/syslib/Makefile | |||
@@ -5,6 +5,7 @@ | |||
5 | CFLAGS_prom_init.o += -fPIC | 5 | CFLAGS_prom_init.o += -fPIC |
6 | CFLAGS_btext.o += -fPIC | 6 | CFLAGS_btext.o += -fPIC |
7 | 7 | ||
8 | ifneq ($(CONFIG_PPC_MERGE),y) | ||
8 | wdt-mpc8xx-$(CONFIG_8xx_WDT) += m8xx_wdt.o | 9 | wdt-mpc8xx-$(CONFIG_8xx_WDT) += m8xx_wdt.o |
9 | 10 | ||
10 | obj-$(CONFIG_PPCBUG_NVRAM) += prep_nvram.o | 11 | obj-$(CONFIG_PPCBUG_NVRAM) += prep_nvram.o |
@@ -109,3 +110,16 @@ obj-$(CONFIG_PPC_MPC52xx) += mpc52xx_setup.o mpc52xx_pic.o \ | |||
109 | ifeq ($(CONFIG_PPC_MPC52xx),y) | 110 | ifeq ($(CONFIG_PPC_MPC52xx),y) |
110 | obj-$(CONFIG_PCI) += mpc52xx_pci.o | 111 | obj-$(CONFIG_PCI) += mpc52xx_pci.o |
111 | endif | 112 | endif |
113 | |||
114 | else | ||
115 | # Stuff still needed by the merged powerpc sources | ||
116 | |||
117 | obj-$(CONFIG_PPCBUG_NVRAM) += prep_nvram.o | ||
118 | obj-$(CONFIG_PPC_OF) += prom_init.o prom.o of_device.o | ||
119 | obj-$(CONFIG_PPC_PMAC) += indirect_pci.o | ||
120 | obj-$(CONFIG_PPC_CHRP) += indirect_pci.o i8259.o | ||
121 | obj-$(CONFIG_PPC_PREP) += indirect_pci.o i8259.o todc_time.o | ||
122 | obj-$(CONFIG_BOOTX_TEXT) += btext.o | ||
123 | obj-$(CONFIG_MPC10X_BRIDGE) += mpc10x_common.o indirect_pci.o ppc_sys.o | ||
124 | |||
125 | endif | ||
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 76719451e384..503461884528 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c | |||
@@ -405,7 +405,7 @@ static int __init via_pmu_start(void) | |||
405 | bright_req_2.complete = 1; | 405 | bright_req_2.complete = 1; |
406 | batt_req.complete = 1; | 406 | batt_req.complete = 1; |
407 | 407 | ||
408 | #ifdef CONFIG_PPC32 | 408 | #if defined(CONFIG_PPC32) && !defined(CONFIG_PPC_MERGE) |
409 | if (pmu_kind == PMU_KEYLARGO_BASED) | 409 | if (pmu_kind == PMU_KEYLARGO_BASED) |
410 | openpic_set_irq_priority(vias->intrs[0].line, | 410 | openpic_set_irq_priority(vias->intrs[0].line, |
411 | OPENPIC_PRIORITY_DEFAULT + 1); | 411 | OPENPIC_PRIORITY_DEFAULT + 1); |
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index a3453555a94e..5b6b0b6038a7 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c | |||
@@ -629,12 +629,4 @@ void __init proc_misc_init(void) | |||
629 | if (entry) | 629 | if (entry) |
630 | entry->proc_fops = &proc_sysrq_trigger_operations; | 630 | entry->proc_fops = &proc_sysrq_trigger_operations; |
631 | #endif | 631 | #endif |
632 | #ifdef CONFIG_PPC32 | ||
633 | { | ||
634 | extern struct file_operations ppc_htab_operations; | ||
635 | entry = create_proc_entry("ppc_htab", S_IRUGO|S_IWUSR, NULL); | ||
636 | if (entry) | ||
637 | entry->proc_fops = &ppc_htab_operations; | ||
638 | } | ||
639 | #endif | ||
640 | } | 632 | } |
diff --git a/include/asm-powerpc/kdebug.h b/include/asm-powerpc/kdebug.h new file mode 100644 index 000000000000..7c55abf597f6 --- /dev/null +++ b/include/asm-powerpc/kdebug.h | |||
@@ -0,0 +1,42 @@ | |||
1 | #ifndef _POWERPC_KDEBUG_H | ||
2 | #define _POWERPC_KDEBUG_H 1 | ||
3 | |||
4 | /* nearly identical to x86_64/i386 code */ | ||
5 | |||
6 | #include <linux/notifier.h> | ||
7 | |||
8 | struct pt_regs; | ||
9 | |||
10 | struct die_args { | ||
11 | struct pt_regs *regs; | ||
12 | const char *str; | ||
13 | long err; | ||
14 | int trapnr; | ||
15 | int signr; | ||
16 | }; | ||
17 | |||
18 | /* | ||
19 | Note - you should never unregister because that can race with NMIs. | ||
20 | If you really want to do it first unregister - then synchronize_sched - | ||
21 | then free. | ||
22 | */ | ||
23 | int register_die_notifier(struct notifier_block *nb); | ||
24 | extern struct notifier_block *powerpc_die_chain; | ||
25 | |||
26 | /* Grossly misnamed. */ | ||
27 | enum die_val { | ||
28 | DIE_OOPS = 1, | ||
29 | DIE_IABR_MATCH, | ||
30 | DIE_DABR_MATCH, | ||
31 | DIE_BPT, | ||
32 | DIE_SSTEP, | ||
33 | DIE_PAGE_FAULT, | ||
34 | }; | ||
35 | |||
36 | static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig) | ||
37 | { | ||
38 | struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig }; | ||
39 | return notifier_call_chain(&powerpc_die_chain, val, &args); | ||
40 | } | ||
41 | |||
42 | #endif | ||
diff --git a/include/asm-powerpc/kprobes.h b/include/asm-powerpc/kprobes.h new file mode 100644 index 000000000000..d9129d2b038e --- /dev/null +++ b/include/asm-powerpc/kprobes.h | |||
@@ -0,0 +1,67 @@ | |||
1 | #ifndef _ASM_KPROBES_H | ||
2 | #define _ASM_KPROBES_H | ||
3 | /* | ||
4 | * Kernel Probes (KProbes) | ||
5 | * include/asm-ppc64/kprobes.h | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | * | ||
21 | * Copyright (C) IBM Corporation, 2002, 2004 | ||
22 | * | ||
23 | * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel | ||
24 | * Probes initial implementation ( includes suggestions from | ||
25 | * Rusty Russell). | ||
26 | * 2004-Nov Modified for PPC64 by Ananth N Mavinakayanahalli | ||
27 | * <ananth@in.ibm.com> | ||
28 | */ | ||
29 | #include <linux/types.h> | ||
30 | #include <linux/ptrace.h> | ||
31 | |||
32 | struct pt_regs; | ||
33 | |||
34 | typedef unsigned int kprobe_opcode_t; | ||
35 | #define BREAKPOINT_INSTRUCTION 0x7fe00008 /* trap */ | ||
36 | #define MAX_INSN_SIZE 1 | ||
37 | |||
38 | #define IS_TW(instr) (((instr) & 0xfc0007fe) == 0x7c000008) | ||
39 | #define IS_TD(instr) (((instr) & 0xfc0007fe) == 0x7c000088) | ||
40 | #define IS_TDI(instr) (((instr) & 0xfc000000) == 0x08000000) | ||
41 | #define IS_TWI(instr) (((instr) & 0xfc000000) == 0x0c000000) | ||
42 | |||
43 | #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)((func_descr_t *)pentry) | ||
44 | |||
45 | #define is_trap(instr) (IS_TW(instr) || IS_TD(instr) || \ | ||
46 | IS_TWI(instr) || IS_TDI(instr)) | ||
47 | |||
48 | #define ARCH_SUPPORTS_KRETPROBES | ||
49 | void kretprobe_trampoline(void); | ||
50 | |||
51 | /* Architecture specific copy of original instruction */ | ||
52 | struct arch_specific_insn { | ||
53 | /* copy of original instruction */ | ||
54 | kprobe_opcode_t *insn; | ||
55 | }; | ||
56 | |||
57 | #ifdef CONFIG_KPROBES | ||
58 | extern int kprobe_exceptions_notify(struct notifier_block *self, | ||
59 | unsigned long val, void *data); | ||
60 | #else /* !CONFIG_KPROBES */ | ||
61 | static inline int kprobe_exceptions_notify(struct notifier_block *self, | ||
62 | unsigned long val, void *data) | ||
63 | { | ||
64 | return 0; | ||
65 | } | ||
66 | #endif | ||
67 | #endif /* _ASM_KPROBES_H */ | ||
diff --git a/include/asm-powerpc/mpic.h b/include/asm-powerpc/mpic.h new file mode 100644 index 000000000000..f1e24f4b2d1c --- /dev/null +++ b/include/asm-powerpc/mpic.h | |||
@@ -0,0 +1,279 @@ | |||
1 | #include <linux/irq.h> | ||
2 | |||
3 | /* | ||
4 | * Global registers | ||
5 | */ | ||
6 | |||
7 | #define MPIC_GREG_BASE 0x01000 | ||
8 | |||
9 | #define MPIC_GREG_FEATURE_0 0x00000 | ||
10 | #define MPIC_GREG_FEATURE_LAST_SRC_MASK 0x07ff0000 | ||
11 | #define MPIC_GREG_FEATURE_LAST_SRC_SHIFT 16 | ||
12 | #define MPIC_GREG_FEATURE_LAST_CPU_MASK 0x00001f00 | ||
13 | #define MPIC_GREG_FEATURE_LAST_CPU_SHIFT 8 | ||
14 | #define MPIC_GREG_FEATURE_VERSION_MASK 0xff | ||
15 | #define MPIC_GREG_FEATURE_1 0x00010 | ||
16 | #define MPIC_GREG_GLOBAL_CONF_0 0x00020 | ||
17 | #define MPIC_GREG_GCONF_RESET 0x80000000 | ||
18 | #define MPIC_GREG_GCONF_8259_PTHROU_DIS 0x20000000 | ||
19 | #define MPIC_GREG_GCONF_BASE_MASK 0x000fffff | ||
20 | #define MPIC_GREG_GLOBAL_CONF_1 0x00030 | ||
21 | #define MPIC_GREG_VENDOR_0 0x00040 | ||
22 | #define MPIC_GREG_VENDOR_1 0x00050 | ||
23 | #define MPIC_GREG_VENDOR_2 0x00060 | ||
24 | #define MPIC_GREG_VENDOR_3 0x00070 | ||
25 | #define MPIC_GREG_VENDOR_ID 0x00080 | ||
26 | #define MPIC_GREG_VENDOR_ID_STEPPING_MASK 0x00ff0000 | ||
27 | #define MPIC_GREG_VENDOR_ID_STEPPING_SHIFT 16 | ||
28 | #define MPIC_GREG_VENDOR_ID_DEVICE_ID_MASK 0x0000ff00 | ||
29 | #define MPIC_GREG_VENDOR_ID_DEVICE_ID_SHIFT 8 | ||
30 | #define MPIC_GREG_VENDOR_ID_VENDOR_ID_MASK 0x000000ff | ||
31 | #define MPIC_GREG_PROCESSOR_INIT 0x00090 | ||
32 | #define MPIC_GREG_IPI_VECTOR_PRI_0 0x000a0 | ||
33 | #define MPIC_GREG_IPI_VECTOR_PRI_1 0x000b0 | ||
34 | #define MPIC_GREG_IPI_VECTOR_PRI_2 0x000c0 | ||
35 | #define MPIC_GREG_IPI_VECTOR_PRI_3 0x000d0 | ||
36 | #define MPIC_GREG_SPURIOUS 0x000e0 | ||
37 | #define MPIC_GREG_TIMER_FREQ 0x000f0 | ||
38 | |||
39 | /* | ||
40 | * | ||
41 | * Timer registers | ||
42 | */ | ||
43 | #define MPIC_TIMER_BASE 0x01100 | ||
44 | #define MPIC_TIMER_STRIDE 0x40 | ||
45 | |||
46 | #define MPIC_TIMER_CURRENT_CNT 0x00000 | ||
47 | #define MPIC_TIMER_BASE_CNT 0x00010 | ||
48 | #define MPIC_TIMER_VECTOR_PRI 0x00020 | ||
49 | #define MPIC_TIMER_DESTINATION 0x00030 | ||
50 | |||
51 | /* | ||
52 | * Per-Processor registers | ||
53 | */ | ||
54 | |||
55 | #define MPIC_CPU_THISBASE 0x00000 | ||
56 | #define MPIC_CPU_BASE 0x20000 | ||
57 | #define MPIC_CPU_STRIDE 0x01000 | ||
58 | |||
59 | #define MPIC_CPU_IPI_DISPATCH_0 0x00040 | ||
60 | #define MPIC_CPU_IPI_DISPATCH_1 0x00050 | ||
61 | #define MPIC_CPU_IPI_DISPATCH_2 0x00060 | ||
62 | #define MPIC_CPU_IPI_DISPATCH_3 0x00070 | ||
63 | #define MPIC_CPU_CURRENT_TASK_PRI 0x00080 | ||
64 | #define MPIC_CPU_TASKPRI_MASK 0x0000000f | ||
65 | #define MPIC_CPU_WHOAMI 0x00090 | ||
66 | #define MPIC_CPU_WHOAMI_MASK 0x0000001f | ||
67 | #define MPIC_CPU_INTACK 0x000a0 | ||
68 | #define MPIC_CPU_EOI 0x000b0 | ||
69 | |||
70 | /* | ||
71 | * Per-source registers | ||
72 | */ | ||
73 | |||
74 | #define MPIC_IRQ_BASE 0x10000 | ||
75 | #define MPIC_IRQ_STRIDE 0x00020 | ||
76 | #define MPIC_IRQ_VECTOR_PRI 0x00000 | ||
77 | #define MPIC_VECPRI_MASK 0x80000000 | ||
78 | #define MPIC_VECPRI_ACTIVITY 0x40000000 /* Read Only */ | ||
79 | #define MPIC_VECPRI_PRIORITY_MASK 0x000f0000 | ||
80 | #define MPIC_VECPRI_PRIORITY_SHIFT 16 | ||
81 | #define MPIC_VECPRI_VECTOR_MASK 0x000007ff | ||
82 | #define MPIC_VECPRI_POLARITY_POSITIVE 0x00800000 | ||
83 | #define MPIC_VECPRI_POLARITY_NEGATIVE 0x00000000 | ||
84 | #define MPIC_VECPRI_POLARITY_MASK 0x00800000 | ||
85 | #define MPIC_VECPRI_SENSE_LEVEL 0x00400000 | ||
86 | #define MPIC_VECPRI_SENSE_EDGE 0x00000000 | ||
87 | #define MPIC_VECPRI_SENSE_MASK 0x00400000 | ||
88 | #define MPIC_IRQ_DESTINATION 0x00010 | ||
89 | |||
90 | #define MPIC_MAX_IRQ_SOURCES 2048 | ||
91 | #define MPIC_MAX_CPUS 32 | ||
92 | #define MPIC_MAX_ISU 32 | ||
93 | |||
94 | /* | ||
95 | * Special vector numbers (internal use only) | ||
96 | */ | ||
97 | #define MPIC_VEC_SPURRIOUS 255 | ||
98 | #define MPIC_VEC_IPI_3 254 | ||
99 | #define MPIC_VEC_IPI_2 253 | ||
100 | #define MPIC_VEC_IPI_1 252 | ||
101 | #define MPIC_VEC_IPI_0 251 | ||
102 | |||
103 | /* unused */ | ||
104 | #define MPIC_VEC_TIMER_3 250 | ||
105 | #define MPIC_VEC_TIMER_2 249 | ||
106 | #define MPIC_VEC_TIMER_1 248 | ||
107 | #define MPIC_VEC_TIMER_0 247 | ||
108 | |||
109 | /* Type definition of the cascade handler */ | ||
110 | typedef int (*mpic_cascade_t)(struct pt_regs *regs, void *data); | ||
111 | |||
112 | #ifdef CONFIG_MPIC_BROKEN_U3 | ||
113 | /* Fixup table entry */ | ||
114 | struct mpic_irq_fixup | ||
115 | { | ||
116 | u8 __iomem *base; | ||
117 | unsigned int irq; | ||
118 | }; | ||
119 | #endif /* CONFIG_MPIC_BROKEN_U3 */ | ||
120 | |||
121 | |||
122 | /* The instance data of a given MPIC */ | ||
123 | struct mpic | ||
124 | { | ||
125 | /* The "linux" controller struct */ | ||
126 | hw_irq_controller hc_irq; | ||
127 | #ifdef CONFIG_SMP | ||
128 | hw_irq_controller hc_ipi; | ||
129 | #endif | ||
130 | const char *name; | ||
131 | /* Flags */ | ||
132 | unsigned int flags; | ||
133 | /* How many irq sources in a given ISU */ | ||
134 | unsigned int isu_size; | ||
135 | unsigned int isu_shift; | ||
136 | unsigned int isu_mask; | ||
137 | /* Offset of irq vector numbers */ | ||
138 | unsigned int irq_offset; | ||
139 | unsigned int irq_count; | ||
140 | /* Offset of ipi vector numbers */ | ||
141 | unsigned int ipi_offset; | ||
142 | /* Number of sources */ | ||
143 | unsigned int num_sources; | ||
144 | /* Number of CPUs */ | ||
145 | unsigned int num_cpus; | ||
146 | /* cascade handler */ | ||
147 | mpic_cascade_t cascade; | ||
148 | void *cascade_data; | ||
149 | unsigned int cascade_vec; | ||
150 | /* senses array */ | ||
151 | unsigned char *senses; | ||
152 | unsigned int senses_count; | ||
153 | |||
154 | #ifdef CONFIG_MPIC_BROKEN_U3 | ||
155 | /* The fixup table */ | ||
156 | struct mpic_irq_fixup *fixups; | ||
157 | spinlock_t fixup_lock; | ||
158 | #endif | ||
159 | |||
160 | /* The various ioremap'ed bases */ | ||
161 | volatile u32 __iomem *gregs; | ||
162 | volatile u32 __iomem *tmregs; | ||
163 | volatile u32 __iomem *cpuregs[MPIC_MAX_CPUS]; | ||
164 | volatile u32 __iomem *isus[MPIC_MAX_ISU]; | ||
165 | |||
166 | /* link */ | ||
167 | struct mpic *next; | ||
168 | }; | ||
169 | |||
170 | /* This is the primary controller, only that one has IPIs and | ||
171 | * has afinity control. A non-primary MPIC always uses CPU0 | ||
172 | * registers only | ||
173 | */ | ||
174 | #define MPIC_PRIMARY 0x00000001 | ||
175 | /* Set this for a big-endian MPIC */ | ||
176 | #define MPIC_BIG_ENDIAN 0x00000002 | ||
177 | /* Broken U3 MPIC */ | ||
178 | #define MPIC_BROKEN_U3 0x00000004 | ||
179 | /* Broken IPI registers (autodetected) */ | ||
180 | #define MPIC_BROKEN_IPI 0x00000008 | ||
181 | /* MPIC wants a reset */ | ||
182 | #define MPIC_WANTS_RESET 0x00000010 | ||
183 | |||
184 | /* Allocate the controller structure and setup the linux irq descs | ||
185 | * for the range if interrupts passed in. No HW initialization is | ||
186 | * actually performed. | ||
187 | * | ||
188 | * @phys_addr: physial base address of the MPIC | ||
189 | * @flags: flags, see constants above | ||
190 | * @isu_size: number of interrupts in an ISU. Use 0 to use a | ||
191 | * standard ISU-less setup (aka powermac) | ||
192 | * @irq_offset: first irq number to assign to this mpic | ||
193 | * @irq_count: number of irqs to use with this mpic IRQ sources. Pass 0 | ||
194 | * to match the number of sources | ||
195 | * @ipi_offset: first irq number to assign to this mpic IPI sources, | ||
196 | * used only on primary mpic | ||
197 | * @senses: array of sense values | ||
198 | * @senses_num: number of entries in the array | ||
199 | * | ||
200 | * Note about the sense array. If none is passed, all interrupts are | ||
201 | * setup to be level negative unless MPIC_BROKEN_U3 is set in which | ||
202 | * case they are edge positive (and the array is ignored anyway). | ||
203 | * The values in the array start at the first source of the MPIC, | ||
204 | * that is senses[0] correspond to linux irq "irq_offset". | ||
205 | */ | ||
206 | extern struct mpic *mpic_alloc(unsigned long phys_addr, | ||
207 | unsigned int flags, | ||
208 | unsigned int isu_size, | ||
209 | unsigned int irq_offset, | ||
210 | unsigned int irq_count, | ||
211 | unsigned int ipi_offset, | ||
212 | unsigned char *senses, | ||
213 | unsigned int senses_num, | ||
214 | const char *name); | ||
215 | |||
216 | /* Assign ISUs, to call before mpic_init() | ||
217 | * | ||
218 | * @mpic: controller structure as returned by mpic_alloc() | ||
219 | * @isu_num: ISU number | ||
220 | * @phys_addr: physical address of the ISU | ||
221 | */ | ||
222 | extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, | ||
223 | unsigned long phys_addr); | ||
224 | |||
225 | /* Initialize the controller. After this has been called, none of the above | ||
226 | * should be called again for this mpic | ||
227 | */ | ||
228 | extern void mpic_init(struct mpic *mpic); | ||
229 | |||
230 | /* Setup a cascade. Currently, only one cascade is supported this | ||
231 | * way, though you can always do a normal request_irq() and add | ||
232 | * other cascades this way. You should call this _after_ having | ||
233 | * added all the ISUs | ||
234 | * | ||
235 | * @irq_no: "linux" irq number of the cascade (that is offset'ed vector) | ||
236 | * @handler: cascade handler function | ||
237 | */ | ||
238 | extern void mpic_setup_cascade(unsigned int irq_no, mpic_cascade_t hanlder, | ||
239 | void *data); | ||
240 | |||
241 | /* | ||
242 | * All of the following functions must only be used after the | ||
243 | * ISUs have been assigned and the controller fully initialized | ||
244 | * with mpic_init() | ||
245 | */ | ||
246 | |||
247 | |||
248 | /* Change/Read the priority of an interrupt. Default is 8 for irqs and | ||
249 | * 10 for IPIs. You can call this on both IPIs and IRQ numbers, but the | ||
250 | * IPI number is then the offset'ed (linux irq number mapped to the IPI) | ||
251 | */ | ||
252 | extern void mpic_irq_set_priority(unsigned int irq, unsigned int pri); | ||
253 | extern unsigned int mpic_irq_get_priority(unsigned int irq); | ||
254 | |||
255 | /* Setup a non-boot CPU */ | ||
256 | extern void mpic_setup_this_cpu(void); | ||
257 | |||
258 | /* Clean up for kexec (or cpu offline or ...) */ | ||
259 | extern void mpic_teardown_this_cpu(int secondary); | ||
260 | |||
261 | /* Get the current cpu priority for this cpu (0..15) */ | ||
262 | extern int mpic_cpu_get_priority(void); | ||
263 | |||
264 | /* Set the current cpu priority for this cpu */ | ||
265 | extern void mpic_cpu_set_priority(int prio); | ||
266 | |||
267 | /* Request IPIs on primary mpic */ | ||
268 | extern void mpic_request_ipis(void); | ||
269 | |||
270 | /* Send an IPI (non offseted number 0..3) */ | ||
271 | extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask); | ||
272 | |||
273 | /* Fetch interrupt from a given mpic */ | ||
274 | extern int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs); | ||
275 | /* This one gets to the primary mpic */ | ||
276 | extern int mpic_get_irq(struct pt_regs *regs); | ||
277 | |||
278 | /* global mpic for pSeries */ | ||
279 | extern struct mpic *pSeries_mpic; | ||
diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h new file mode 100644 index 000000000000..f97a5f1761b4 --- /dev/null +++ b/include/asm-powerpc/reg.h | |||
@@ -0,0 +1,446 @@ | |||
1 | /* | ||
2 | * Contains the definition of registers common to all PowerPC variants. | ||
3 | * If a register definition has been changed in a different PowerPC | ||
4 | * variant, we will case it in #ifndef XXX ... #endif, and have the | ||
5 | * number used in the Programming Environments Manual For 32-Bit | ||
6 | * Implementations of the PowerPC Architecture (a.k.a. Green Book) here. | ||
7 | */ | ||
8 | |||
9 | #ifdef __KERNEL__ | ||
10 | #ifndef __ASM_PPC_REGS_H__ | ||
11 | #define __ASM_PPC_REGS_H__ | ||
12 | |||
13 | #include <linux/stringify.h> | ||
14 | |||
15 | /* Pickup Book E specific registers. */ | ||
16 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) | ||
17 | #include <asm/reg_booke.h> | ||
18 | #endif | ||
19 | |||
20 | /* Machine State Register (MSR) Fields */ | ||
21 | #define MSR_SF (1<<63) | ||
22 | #define MSR_ISF (1<<61) | ||
23 | #define MSR_VEC (1<<25) /* Enable AltiVec */ | ||
24 | #define MSR_POW (1<<18) /* Enable Power Management */ | ||
25 | #define MSR_WE (1<<18) /* Wait State Enable */ | ||
26 | #define MSR_TGPR (1<<17) /* TLB Update registers in use */ | ||
27 | #define MSR_CE (1<<17) /* Critical Interrupt Enable */ | ||
28 | #define MSR_ILE (1<<16) /* Interrupt Little Endian */ | ||
29 | #define MSR_EE (1<<15) /* External Interrupt Enable */ | ||
30 | #define MSR_PR (1<<14) /* Problem State / Privilege Level */ | ||
31 | #define MSR_FP (1<<13) /* Floating Point enable */ | ||
32 | #define MSR_ME (1<<12) /* Machine Check Enable */ | ||
33 | #define MSR_FE0 (1<<11) /* Floating Exception mode 0 */ | ||
34 | #define MSR_SE (1<<10) /* Single Step */ | ||
35 | #define MSR_BE (1<<9) /* Branch Trace */ | ||
36 | #define MSR_DE (1<<9) /* Debug Exception Enable */ | ||
37 | #define MSR_FE1 (1<<8) /* Floating Exception mode 1 */ | ||
38 | #define MSR_IP (1<<6) /* Exception prefix 0x000/0xFFF */ | ||
39 | #define MSR_IR (1<<5) /* Instruction Relocate */ | ||
40 | #define MSR_DR (1<<4) /* Data Relocate */ | ||
41 | #define MSR_PE (1<<3) /* Protection Enable */ | ||
42 | #define MSR_PX (1<<2) /* Protection Exclusive Mode */ | ||
43 | #define MSR_RI (1<<1) /* Recoverable Exception */ | ||
44 | #define MSR_LE (1<<0) /* Little Endian */ | ||
45 | |||
46 | /* Default MSR for kernel mode. */ | ||
47 | #ifdef CONFIG_APUS_FAST_EXCEPT | ||
48 | #define MSR_KERNEL (MSR_ME|MSR_IP|MSR_RI|MSR_IR|MSR_DR) | ||
49 | #endif | ||
50 | |||
51 | #ifndef MSR_KERNEL | ||
52 | #define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR) | ||
53 | #endif | ||
54 | |||
55 | #define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE) | ||
56 | |||
57 | /* Floating Point Status and Control Register (FPSCR) Fields */ | ||
58 | #define FPSCR_FX 0x80000000 /* FPU exception summary */ | ||
59 | #define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */ | ||
60 | #define FPSCR_VX 0x20000000 /* Invalid operation summary */ | ||
61 | #define FPSCR_OX 0x10000000 /* Overflow exception summary */ | ||
62 | #define FPSCR_UX 0x08000000 /* Underflow exception summary */ | ||
63 | #define FPSCR_ZX 0x04000000 /* Zero-divide exception summary */ | ||
64 | #define FPSCR_XX 0x02000000 /* Inexact exception summary */ | ||
65 | #define FPSCR_VXSNAN 0x01000000 /* Invalid op for SNaN */ | ||
66 | #define FPSCR_VXISI 0x00800000 /* Invalid op for Inv - Inv */ | ||
67 | #define FPSCR_VXIDI 0x00400000 /* Invalid op for Inv / Inv */ | ||
68 | #define FPSCR_VXZDZ 0x00200000 /* Invalid op for Zero / Zero */ | ||
69 | #define FPSCR_VXIMZ 0x00100000 /* Invalid op for Inv * Zero */ | ||
70 | #define FPSCR_VXVC 0x00080000 /* Invalid op for Compare */ | ||
71 | #define FPSCR_FR 0x00040000 /* Fraction rounded */ | ||
72 | #define FPSCR_FI 0x00020000 /* Fraction inexact */ | ||
73 | #define FPSCR_FPRF 0x0001f000 /* FPU Result Flags */ | ||
74 | #define FPSCR_FPCC 0x0000f000 /* FPU Condition Codes */ | ||
75 | #define FPSCR_VXSOFT 0x00000400 /* Invalid op for software request */ | ||
76 | #define FPSCR_VXSQRT 0x00000200 /* Invalid op for square root */ | ||
77 | #define FPSCR_VXCVI 0x00000100 /* Invalid op for integer convert */ | ||
78 | #define FPSCR_VE 0x00000080 /* Invalid op exception enable */ | ||
79 | #define FPSCR_OE 0x00000040 /* IEEE overflow exception enable */ | ||
80 | #define FPSCR_UE 0x00000020 /* IEEE underflow exception enable */ | ||
81 | #define FPSCR_ZE 0x00000010 /* IEEE zero divide exception enable */ | ||
82 | #define FPSCR_XE 0x00000008 /* FP inexact exception enable */ | ||
83 | #define FPSCR_NI 0x00000004 /* FPU non IEEE-Mode */ | ||
84 | #define FPSCR_RN 0x00000003 /* FPU rounding control */ | ||
85 | |||
86 | /* Special Purpose Registers (SPRNs)*/ | ||
87 | #define SPRN_CTR 0x009 /* Count Register */ | ||
88 | #define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */ | ||
89 | #define DABR_TRANSLATION (1UL << 2) | ||
90 | #define SPRN_DAR 0x013 /* Data Address Register */ | ||
91 | #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */ | ||
92 | #define DSISR_NOHPTE 0x40000000 /* no translation found */ | ||
93 | #define DSISR_PROTFAULT 0x08000000 /* protection fault */ | ||
94 | #define DSISR_ISSTORE 0x02000000 /* access was a store */ | ||
95 | #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */ | ||
96 | #define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */ | ||
97 | #define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */ | ||
98 | #define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */ | ||
99 | #define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */ | ||
100 | #define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */ | ||
101 | #define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */ | ||
102 | #define SPRN_DBAT0L 0x219 /* Data BAT 0 Lower Register */ | ||
103 | #define SPRN_DBAT0U 0x218 /* Data BAT 0 Upper Register */ | ||
104 | #define SPRN_DBAT1L 0x21B /* Data BAT 1 Lower Register */ | ||
105 | #define SPRN_DBAT1U 0x21A /* Data BAT 1 Upper Register */ | ||
106 | #define SPRN_DBAT2L 0x21D /* Data BAT 2 Lower Register */ | ||
107 | #define SPRN_DBAT2U 0x21C /* Data BAT 2 Upper Register */ | ||
108 | #define SPRN_DBAT3L 0x21F /* Data BAT 3 Lower Register */ | ||
109 | #define SPRN_DBAT3U 0x21E /* Data BAT 3 Upper Register */ | ||
110 | #define SPRN_DBAT4L 0x239 /* Data BAT 4 Lower Register */ | ||
111 | #define SPRN_DBAT4U 0x238 /* Data BAT 4 Upper Register */ | ||
112 | #define SPRN_DBAT5L 0x23B /* Data BAT 5 Lower Register */ | ||
113 | #define SPRN_DBAT5U 0x23A /* Data BAT 5 Upper Register */ | ||
114 | #define SPRN_DBAT6L 0x23D /* Data BAT 6 Lower Register */ | ||
115 | #define SPRN_DBAT6U 0x23C /* Data BAT 6 Upper Register */ | ||
116 | #define SPRN_DBAT7L 0x23F /* Data BAT 7 Lower Register */ | ||
117 | #define SPRN_DBAT7U 0x23E /* Data BAT 7 Upper Register */ | ||
118 | |||
119 | #define SPRN_DEC 0x016 /* Decrement Register */ | ||
120 | #define SPRN_DER 0x095 /* Debug Enable Regsiter */ | ||
121 | #define DER_RSTE 0x40000000 /* Reset Interrupt */ | ||
122 | #define DER_CHSTPE 0x20000000 /* Check Stop */ | ||
123 | #define DER_MCIE 0x10000000 /* Machine Check Interrupt */ | ||
124 | #define DER_EXTIE 0x02000000 /* External Interrupt */ | ||
125 | #define DER_ALIE 0x01000000 /* Alignment Interrupt */ | ||
126 | #define DER_PRIE 0x00800000 /* Program Interrupt */ | ||
127 | #define DER_FPUVIE 0x00400000 /* FP Unavailable Interrupt */ | ||
128 | #define DER_DECIE 0x00200000 /* Decrementer Interrupt */ | ||
129 | #define DER_SYSIE 0x00040000 /* System Call Interrupt */ | ||
130 | #define DER_TRE 0x00020000 /* Trace Interrupt */ | ||
131 | #define DER_SEIE 0x00004000 /* FP SW Emulation Interrupt */ | ||
132 | #define DER_ITLBMSE 0x00002000 /* Imp. Spec. Instruction TLB Miss */ | ||
133 | #define DER_ITLBERE 0x00001000 /* Imp. Spec. Instruction TLB Error */ | ||
134 | #define DER_DTLBMSE 0x00000800 /* Imp. Spec. Data TLB Miss */ | ||
135 | #define DER_DTLBERE 0x00000400 /* Imp. Spec. Data TLB Error */ | ||
136 | #define DER_LBRKE 0x00000008 /* Load/Store Breakpoint Interrupt */ | ||
137 | #define DER_IBRKE 0x00000004 /* Instruction Breakpoint Interrupt */ | ||
138 | #define DER_EBRKE 0x00000002 /* External Breakpoint Interrupt */ | ||
139 | #define DER_DPIE 0x00000001 /* Dev. Port Nonmaskable Request */ | ||
140 | #define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */ | ||
141 | #define SPRN_EAR 0x11A /* External Address Register */ | ||
142 | #define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */ | ||
143 | #define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */ | ||
144 | #define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */ | ||
145 | #define HID0_EMCP (1<<31) /* Enable Machine Check pin */ | ||
146 | #define HID0_EBA (1<<29) /* Enable Bus Address Parity */ | ||
147 | #define HID0_EBD (1<<28) /* Enable Bus Data Parity */ | ||
148 | #define HID0_SBCLK (1<<27) | ||
149 | #define HID0_EICE (1<<26) | ||
150 | #define HID0_TBEN (1<<26) /* Timebase enable - 745x */ | ||
151 | #define HID0_ECLK (1<<25) | ||
152 | #define HID0_PAR (1<<24) | ||
153 | #define HID0_STEN (1<<24) /* Software table search enable - 745x */ | ||
154 | #define HID0_HIGH_BAT (1<<23) /* Enable high BATs - 7455 */ | ||
155 | #define HID0_DOZE (1<<23) | ||
156 | #define HID0_NAP (1<<22) | ||
157 | #define HID0_SLEEP (1<<21) | ||
158 | #define HID0_DPM (1<<20) | ||
159 | #define HID0_BHTCLR (1<<18) /* Clear branch history table - 7450 */ | ||
160 | #define HID0_XAEN (1<<17) /* Extended addressing enable - 7450 */ | ||
161 | #define HID0_NHR (1<<16) /* Not hard reset (software bit-7450)*/ | ||
162 | #define HID0_ICE (1<<15) /* Instruction Cache Enable */ | ||
163 | #define HID0_DCE (1<<14) /* Data Cache Enable */ | ||
164 | #define HID0_ILOCK (1<<13) /* Instruction Cache Lock */ | ||
165 | #define HID0_DLOCK (1<<12) /* Data Cache Lock */ | ||
166 | #define HID0_ICFI (1<<11) /* Instr. Cache Flash Invalidate */ | ||
167 | #define HID0_DCI (1<<10) /* Data Cache Invalidate */ | ||
168 | #define HID0_SPD (1<<9) /* Speculative disable */ | ||
169 | #define HID0_DAPUEN (1<<8) /* Debug APU enable */ | ||
170 | #define HID0_SGE (1<<7) /* Store Gathering Enable */ | ||
171 | #define HID0_SIED (1<<7) /* Serial Instr. Execution [Disable] */ | ||
172 | #define HID0_DFCA (1<<6) /* Data Cache Flush Assist */ | ||
173 | #define HID0_LRSTK (1<<4) /* Link register stack - 745x */ | ||
174 | #define HID0_BTIC (1<<5) /* Branch Target Instr Cache Enable */ | ||
175 | #define HID0_ABE (1<<3) /* Address Broadcast Enable */ | ||
176 | #define HID0_FOLD (1<<3) /* Branch Folding enable - 745x */ | ||
177 | #define HID0_BHTE (1<<2) /* Branch History Table Enable */ | ||
178 | #define HID0_BTCD (1<<1) /* Branch target cache disable */ | ||
179 | #define HID0_NOPDST (1<<1) /* No-op dst, dstt, etc. instr. */ | ||
180 | #define HID0_NOPTI (1<<0) /* No-op dcbt and dcbst instr. */ | ||
181 | |||
182 | #define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */ | ||
183 | #define HID1_EMCP (1<<31) /* 7450 Machine Check Pin Enable */ | ||
184 | #define HID1_DFS (1<<22) /* 7447A Dynamic Frequency Scaling */ | ||
185 | #define HID1_PC0 (1<<16) /* 7450 PLL_CFG[0] */ | ||
186 | #define HID1_PC1 (1<<15) /* 7450 PLL_CFG[1] */ | ||
187 | #define HID1_PC2 (1<<14) /* 7450 PLL_CFG[2] */ | ||
188 | #define HID1_PC3 (1<<13) /* 7450 PLL_CFG[3] */ | ||
189 | #define HID1_SYNCBE (1<<11) /* 7450 ABE for sync, eieio */ | ||
190 | #define HID1_ABE (1<<10) /* 7450 Address Broadcast Enable */ | ||
191 | #define HID1_PS (1<<16) /* 750FX PLL selection */ | ||
192 | #define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */ | ||
193 | #define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */ | ||
194 | #define SPRN_HID4 0x3F4 /* 970 HID4 */ | ||
195 | #define SPRN_HID5 0x3F6 /* 970 HID5 */ | ||
196 | #if !defined(SPRN_IAC1) && !defined(SPRN_IAC2) | ||
197 | #define SPRN_IAC1 0x3F4 /* Instruction Address Compare 1 */ | ||
198 | #define SPRN_IAC2 0x3F5 /* Instruction Address Compare 2 */ | ||
199 | #endif | ||
200 | #define SPRN_IBAT0L 0x211 /* Instruction BAT 0 Lower Register */ | ||
201 | #define SPRN_IBAT0U 0x210 /* Instruction BAT 0 Upper Register */ | ||
202 | #define SPRN_IBAT1L 0x213 /* Instruction BAT 1 Lower Register */ | ||
203 | #define SPRN_IBAT1U 0x212 /* Instruction BAT 1 Upper Register */ | ||
204 | #define SPRN_IBAT2L 0x215 /* Instruction BAT 2 Lower Register */ | ||
205 | #define SPRN_IBAT2U 0x214 /* Instruction BAT 2 Upper Register */ | ||
206 | #define SPRN_IBAT3L 0x217 /* Instruction BAT 3 Lower Register */ | ||
207 | #define SPRN_IBAT3U 0x216 /* Instruction BAT 3 Upper Register */ | ||
208 | #define SPRN_IBAT4L 0x231 /* Instruction BAT 4 Lower Register */ | ||
209 | #define SPRN_IBAT4U 0x230 /* Instruction BAT 4 Upper Register */ | ||
210 | #define SPRN_IBAT5L 0x233 /* Instruction BAT 5 Lower Register */ | ||
211 | #define SPRN_IBAT5U 0x232 /* Instruction BAT 5 Upper Register */ | ||
212 | #define SPRN_IBAT6L 0x235 /* Instruction BAT 6 Lower Register */ | ||
213 | #define SPRN_IBAT6U 0x234 /* Instruction BAT 6 Upper Register */ | ||
214 | #define SPRN_IBAT7L 0x237 /* Instruction BAT 7 Lower Register */ | ||
215 | #define SPRN_IBAT7U 0x236 /* Instruction BAT 7 Upper Register */ | ||
216 | #define SPRN_ICMP 0x3D5 /* Instruction TLB Compare Register */ | ||
217 | #define SPRN_ICTC 0x3FB /* Instruction Cache Throttling Control Reg */ | ||
218 | #define SPRN_ICTRL 0x3F3 /* 1011 7450 icache and interrupt ctrl */ | ||
219 | #define ICTRL_EICE 0x08000000 /* enable icache parity errs */ | ||
220 | #define ICTRL_EDC 0x04000000 /* enable dcache parity errs */ | ||
221 | #define ICTRL_EICP 0x00000100 /* enable icache par. check */ | ||
222 | #define SPRN_IMISS 0x3D4 /* Instruction TLB Miss Register */ | ||
223 | #define SPRN_IMMR 0x27E /* Internal Memory Map Register */ | ||
224 | #define SPRN_L2CR 0x3F9 /* Level 2 Cache Control Regsiter */ | ||
225 | #define SPRN_L2CR2 0x3f8 | ||
226 | #define L2CR_L2E 0x80000000 /* L2 enable */ | ||
227 | #define L2CR_L2PE 0x40000000 /* L2 parity enable */ | ||
228 | #define L2CR_L2SIZ_MASK 0x30000000 /* L2 size mask */ | ||
229 | #define L2CR_L2SIZ_256KB 0x10000000 /* L2 size 256KB */ | ||
230 | #define L2CR_L2SIZ_512KB 0x20000000 /* L2 size 512KB */ | ||
231 | #define L2CR_L2SIZ_1MB 0x30000000 /* L2 size 1MB */ | ||
232 | #define L2CR_L2CLK_MASK 0x0e000000 /* L2 clock mask */ | ||
233 | #define L2CR_L2CLK_DISABLED 0x00000000 /* L2 clock disabled */ | ||
234 | #define L2CR_L2CLK_DIV1 0x02000000 /* L2 clock / 1 */ | ||
235 | #define L2CR_L2CLK_DIV1_5 0x04000000 /* L2 clock / 1.5 */ | ||
236 | #define L2CR_L2CLK_DIV2 0x08000000 /* L2 clock / 2 */ | ||
237 | #define L2CR_L2CLK_DIV2_5 0x0a000000 /* L2 clock / 2.5 */ | ||
238 | #define L2CR_L2CLK_DIV3 0x0c000000 /* L2 clock / 3 */ | ||
239 | #define L2CR_L2RAM_MASK 0x01800000 /* L2 RAM type mask */ | ||
240 | #define L2CR_L2RAM_FLOW 0x00000000 /* L2 RAM flow through */ | ||
241 | #define L2CR_L2RAM_PIPE 0x01000000 /* L2 RAM pipelined */ | ||
242 | #define L2CR_L2RAM_PIPE_LW 0x01800000 /* L2 RAM pipelined latewr */ | ||
243 | #define L2CR_L2DO 0x00400000 /* L2 data only */ | ||
244 | #define L2CR_L2I 0x00200000 /* L2 global invalidate */ | ||
245 | #define L2CR_L2CTL 0x00100000 /* L2 RAM control */ | ||
246 | #define L2CR_L2WT 0x00080000 /* L2 write-through */ | ||
247 | #define L2CR_L2TS 0x00040000 /* L2 test support */ | ||
248 | #define L2CR_L2OH_MASK 0x00030000 /* L2 output hold mask */ | ||
249 | #define L2CR_L2OH_0_5 0x00000000 /* L2 output hold 0.5 ns */ | ||
250 | #define L2CR_L2OH_1_0 0x00010000 /* L2 output hold 1.0 ns */ | ||
251 | #define L2CR_L2SL 0x00008000 /* L2 DLL slow */ | ||
252 | #define L2CR_L2DF 0x00004000 /* L2 differential clock */ | ||
253 | #define L2CR_L2BYP 0x00002000 /* L2 DLL bypass */ | ||
254 | #define L2CR_L2IP 0x00000001 /* L2 GI in progress */ | ||
255 | #define L2CR_L2IO_745x 0x00100000 /* L2 instr. only (745x) */ | ||
256 | #define L2CR_L2DO_745x 0x00010000 /* L2 data only (745x) */ | ||
257 | #define L2CR_L2REP_745x 0x00001000 /* L2 repl. algorithm (745x) */ | ||
258 | #define L2CR_L2HWF_745x 0x00000800 /* L2 hardware flush (745x) */ | ||
259 | #define SPRN_L3CR 0x3FA /* Level 3 Cache Control Regsiter */ | ||
260 | #define L3CR_L3E 0x80000000 /* L3 enable */ | ||
261 | #define L3CR_L3PE 0x40000000 /* L3 data parity enable */ | ||
262 | #define L3CR_L3APE 0x20000000 /* L3 addr parity enable */ | ||
263 | #define L3CR_L3SIZ 0x10000000 /* L3 size */ | ||
264 | #define L3CR_L3CLKEN 0x08000000 /* L3 clock enable */ | ||
265 | #define L3CR_L3RES 0x04000000 /* L3 special reserved bit */ | ||
266 | #define L3CR_L3CLKDIV 0x03800000 /* L3 clock divisor */ | ||
267 | #define L3CR_L3IO 0x00400000 /* L3 instruction only */ | ||
268 | #define L3CR_L3SPO 0x00040000 /* L3 sample point override */ | ||
269 | #define L3CR_L3CKSP 0x00030000 /* L3 clock sample point */ | ||
270 | #define L3CR_L3PSP 0x0000e000 /* L3 P-clock sample point */ | ||
271 | #define L3CR_L3REP 0x00001000 /* L3 replacement algorithm */ | ||
272 | #define L3CR_L3HWF 0x00000800 /* L3 hardware flush */ | ||
273 | #define L3CR_L3I 0x00000400 /* L3 global invalidate */ | ||
274 | #define L3CR_L3RT 0x00000300 /* L3 SRAM type */ | ||
275 | #define L3CR_L3NIRCA 0x00000080 /* L3 non-integer ratio clock adj. */ | ||
276 | #define L3CR_L3DO 0x00000040 /* L3 data only mode */ | ||
277 | #define L3CR_PMEN 0x00000004 /* L3 private memory enable */ | ||
278 | #define L3CR_PMSIZ 0x00000001 /* L3 private memory size */ | ||
279 | #define SPRN_MSSCR0 0x3f6 /* Memory Subsystem Control Register 0 */ | ||
280 | #define SPRN_MSSSR0 0x3f7 /* Memory Subsystem Status Register 1 */ | ||
281 | #define SPRN_LDSTCR 0x3f8 /* Load/Store control register */ | ||
282 | #define SPRN_LDSTDB 0x3f4 /* */ | ||
283 | #define SPRN_LR 0x008 /* Link Register */ | ||
284 | #define SPRN_MMCR0 0x3B8 /* Monitor Mode Control Register 0 */ | ||
285 | #define SPRN_MMCR1 0x3BC /* Monitor Mode Control Register 1 */ | ||
286 | #ifndef SPRN_PIR | ||
287 | #define SPRN_PIR 0x3FF /* Processor Identification Register */ | ||
288 | #endif | ||
289 | #define SPRN_PMC1 0x3B9 /* Performance Counter Register 1 */ | ||
290 | #define SPRN_PMC2 0x3BA /* Performance Counter Register 2 */ | ||
291 | #define SPRN_PMC3 0x3BD /* Performance Counter Register 3 */ | ||
292 | #define SPRN_PMC4 0x3BE /* Performance Counter Register 4 */ | ||
293 | #define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */ | ||
294 | #define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */ | ||
295 | #define SPRN_PVR 0x11F /* Processor Version Register */ | ||
296 | #define SPRN_RPA 0x3D6 /* Required Physical Address Register */ | ||
297 | #define SPRN_SDA 0x3BF /* Sampled Data Address Register */ | ||
298 | #define SPRN_SDR1 0x019 /* MMU Hash Base Register */ | ||
299 | #define SPRN_SIA 0x3BB /* Sampled Instruction Address Register */ | ||
300 | #define SPRN_SPRG0 0x110 /* Special Purpose Register General 0 */ | ||
301 | #define SPRN_SPRG1 0x111 /* Special Purpose Register General 1 */ | ||
302 | #define SPRN_SPRG2 0x112 /* Special Purpose Register General 2 */ | ||
303 | #define SPRN_SPRG3 0x113 /* Special Purpose Register General 3 */ | ||
304 | #define SPRN_SPRG4 0x114 /* Special Purpose Register General 4 */ | ||
305 | #define SPRN_SPRG5 0x115 /* Special Purpose Register General 5 */ | ||
306 | #define SPRN_SPRG6 0x116 /* Special Purpose Register General 6 */ | ||
307 | #define SPRN_SPRG7 0x117 /* Special Purpose Register General 7 */ | ||
308 | #define SPRN_SRR0 0x01A /* Save/Restore Register 0 */ | ||
309 | #define SPRN_SRR1 0x01B /* Save/Restore Register 1 */ | ||
310 | #ifndef SPRN_SVR | ||
311 | #define SPRN_SVR 0x11E /* System Version Register */ | ||
312 | #endif | ||
313 | #define SPRN_THRM1 0x3FC /* Thermal Management Register 1 */ | ||
314 | /* these bits were defined in inverted endian sense originally, ugh, confusing */ | ||
315 | #define THRM1_TIN (1 << 31) | ||
316 | #define THRM1_TIV (1 << 30) | ||
317 | #define THRM1_THRES(x) ((x&0x7f)<<23) | ||
318 | #define THRM3_SITV(x) ((x&0x3fff)<<1) | ||
319 | #define THRM1_TID (1<<2) | ||
320 | #define THRM1_TIE (1<<1) | ||
321 | #define THRM1_V (1<<0) | ||
322 | #define SPRN_THRM2 0x3FD /* Thermal Management Register 2 */ | ||
323 | #define SPRN_THRM3 0x3FE /* Thermal Management Register 3 */ | ||
324 | #define THRM3_E (1<<0) | ||
325 | #define SPRN_TLBMISS 0x3D4 /* 980 7450 TLB Miss Register */ | ||
326 | #define SPRN_UMMCR0 0x3A8 /* User Monitor Mode Control Register 0 */ | ||
327 | #define SPRN_UMMCR1 0x3AC /* User Monitor Mode Control Register 0 */ | ||
328 | #define SPRN_UPMC1 0x3A9 /* User Performance Counter Register 1 */ | ||
329 | #define SPRN_UPMC2 0x3AA /* User Performance Counter Register 2 */ | ||
330 | #define SPRN_UPMC3 0x3AD /* User Performance Counter Register 3 */ | ||
331 | #define SPRN_UPMC4 0x3AE /* User Performance Counter Register 4 */ | ||
332 | #define SPRN_USIA 0x3AB /* User Sampled Instruction Address Register */ | ||
333 | #define SPRN_VRSAVE 0x100 /* Vector Register Save Register */ | ||
334 | #define SPRN_XER 0x001 /* Fixed Point Exception Register */ | ||
335 | |||
336 | /* Bit definitions for MMCR0 and PMC1 / PMC2. */ | ||
337 | #define MMCR0_PMC1_CYCLES (1 << 7) | ||
338 | #define MMCR0_PMC1_ICACHEMISS (5 << 7) | ||
339 | #define MMCR0_PMC1_DTLB (6 << 7) | ||
340 | #define MMCR0_PMC2_DCACHEMISS 0x6 | ||
341 | #define MMCR0_PMC2_CYCLES 0x1 | ||
342 | #define MMCR0_PMC2_ITLB 0x7 | ||
343 | #define MMCR0_PMC2_LOADMISSTIME 0x5 | ||
344 | #define MMCR0_PMXE (1 << 26) | ||
345 | |||
346 | /* Processor Version Register */ | ||
347 | |||
348 | /* Processor Version Register (PVR) field extraction */ | ||
349 | |||
350 | #define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */ | ||
351 | #define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */ | ||
352 | |||
353 | /* | ||
354 | * IBM has further subdivided the standard PowerPC 16-bit version and | ||
355 | * revision subfields of the PVR for the PowerPC 403s into the following: | ||
356 | */ | ||
357 | |||
358 | #define PVR_FAM(pvr) (((pvr) >> 20) & 0xFFF) /* Family field */ | ||
359 | #define PVR_MEM(pvr) (((pvr) >> 16) & 0xF) /* Member field */ | ||
360 | #define PVR_CORE(pvr) (((pvr) >> 12) & 0xF) /* Core field */ | ||
361 | #define PVR_CFG(pvr) (((pvr) >> 8) & 0xF) /* Configuration field */ | ||
362 | #define PVR_MAJ(pvr) (((pvr) >> 4) & 0xF) /* Major revision field */ | ||
363 | #define PVR_MIN(pvr) (((pvr) >> 0) & 0xF) /* Minor revision field */ | ||
364 | |||
365 | /* Processor Version Numbers */ | ||
366 | |||
367 | #define PVR_403GA 0x00200000 | ||
368 | #define PVR_403GB 0x00200100 | ||
369 | #define PVR_403GC 0x00200200 | ||
370 | #define PVR_403GCX 0x00201400 | ||
371 | #define PVR_405GP 0x40110000 | ||
372 | #define PVR_STB03XXX 0x40310000 | ||
373 | #define PVR_NP405H 0x41410000 | ||
374 | #define PVR_NP405L 0x41610000 | ||
375 | #define PVR_601 0x00010000 | ||
376 | #define PVR_602 0x00050000 | ||
377 | #define PVR_603 0x00030000 | ||
378 | #define PVR_603e 0x00060000 | ||
379 | #define PVR_603ev 0x00070000 | ||
380 | #define PVR_603r 0x00071000 | ||
381 | #define PVR_604 0x00040000 | ||
382 | #define PVR_604e 0x00090000 | ||
383 | #define PVR_604r 0x000A0000 | ||
384 | #define PVR_620 0x00140000 | ||
385 | #define PVR_740 0x00080000 | ||
386 | #define PVR_750 PVR_740 | ||
387 | #define PVR_740P 0x10080000 | ||
388 | #define PVR_750P PVR_740P | ||
389 | #define PVR_7400 0x000C0000 | ||
390 | #define PVR_7410 0x800C0000 | ||
391 | #define PVR_7450 0x80000000 | ||
392 | #define PVR_8540 0x80200000 | ||
393 | #define PVR_8560 0x80200000 | ||
394 | /* | ||
395 | * For the 8xx processors, all of them report the same PVR family for | ||
396 | * the PowerPC core. The various versions of these processors must be | ||
397 | * differentiated by the version number in the Communication Processor | ||
398 | * Module (CPM). | ||
399 | */ | ||
400 | #define PVR_821 0x00500000 | ||
401 | #define PVR_823 PVR_821 | ||
402 | #define PVR_850 PVR_821 | ||
403 | #define PVR_860 PVR_821 | ||
404 | #define PVR_8240 0x00810100 | ||
405 | #define PVR_8245 0x80811014 | ||
406 | #define PVR_8260 PVR_8240 | ||
407 | |||
408 | #if 0 | ||
409 | /* Segment Registers */ | ||
410 | #define SR0 0 | ||
411 | #define SR1 1 | ||
412 | #define SR2 2 | ||
413 | #define SR3 3 | ||
414 | #define SR4 4 | ||
415 | #define SR5 5 | ||
416 | #define SR6 6 | ||
417 | #define SR7 7 | ||
418 | #define SR8 8 | ||
419 | #define SR9 9 | ||
420 | #define SR10 10 | ||
421 | #define SR11 11 | ||
422 | #define SR12 12 | ||
423 | #define SR13 13 | ||
424 | #define SR14 14 | ||
425 | #define SR15 15 | ||
426 | #endif | ||
427 | |||
428 | /* Macros for setting and retrieving special purpose registers */ | ||
429 | #ifndef __ASSEMBLY__ | ||
430 | #define mfmsr() ({unsigned int rval; \ | ||
431 | asm volatile("mfmsr %0" : "=r" (rval)); rval;}) | ||
432 | #define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v)) | ||
433 | |||
434 | #define mfspr(rn) ({unsigned int rval; \ | ||
435 | asm volatile("mfspr %0," __stringify(rn) \ | ||
436 | : "=r" (rval)); rval;}) | ||
437 | #define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v)) | ||
438 | |||
439 | #define mfsrin(v) ({unsigned int rval; \ | ||
440 | asm volatile("mfsrin %0,%1" : "=r" (rval) : "r" (v)); \ | ||
441 | rval;}) | ||
442 | |||
443 | #define proc_trap() asm volatile("trap") | ||
444 | #endif /* __ASSEMBLY__ */ | ||
445 | #endif /* __ASM_PPC_REGS_H__ */ | ||
446 | #endif /* __KERNEL__ */ | ||
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h new file mode 100644 index 000000000000..be542efb32d3 --- /dev/null +++ b/include/asm-powerpc/system.h | |||
@@ -0,0 +1,350 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | ||
3 | */ | ||
4 | #ifndef __PPC_SYSTEM_H | ||
5 | #define __PPC_SYSTEM_H | ||
6 | |||
7 | #include <linux/config.h> | ||
8 | #include <linux/kernel.h> | ||
9 | |||
10 | #include <asm/hw_irq.h> | ||
11 | #include <asm/ppc_asm.h> | ||
12 | |||
13 | /* | ||
14 | * Memory barrier. | ||
15 | * The sync instruction guarantees that all memory accesses initiated | ||
16 | * by this processor have been performed (with respect to all other | ||
17 | * mechanisms that access memory). The eieio instruction is a barrier | ||
18 | * providing an ordering (separately) for (a) cacheable stores and (b) | ||
19 | * loads and stores to non-cacheable memory (e.g. I/O devices). | ||
20 | * | ||
21 | * mb() prevents loads and stores being reordered across this point. | ||
22 | * rmb() prevents loads being reordered across this point. | ||
23 | * wmb() prevents stores being reordered across this point. | ||
24 | * read_barrier_depends() prevents data-dependent loads being reordered | ||
25 | * across this point (nop on PPC). | ||
26 | * | ||
27 | * We have to use the sync instructions for mb(), since lwsync doesn't | ||
28 | * order loads with respect to previous stores. Lwsync is fine for | ||
29 | * rmb(), though. Note that lwsync is interpreted as sync by | ||
30 | * 32-bit and older 64-bit CPUs. | ||
31 | * | ||
32 | * For wmb(), we use sync since wmb is used in drivers to order | ||
33 | * stores to system memory with respect to writes to the device. | ||
34 | * However, smp_wmb() can be a lighter-weight eieio barrier on | ||
35 | * SMP since it is only used to order updates to system memory. | ||
36 | */ | ||
37 | #define mb() __asm__ __volatile__ ("sync" : : : "memory") | ||
38 | #define rmb() __asm__ __volatile__ ("lwsync" : : : "memory") | ||
39 | #define wmb() __asm__ __volatile__ ("sync" : : : "memory") | ||
40 | #define read_barrier_depends() do { } while(0) | ||
41 | |||
42 | #define set_mb(var, value) do { var = value; mb(); } while (0) | ||
43 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) | ||
44 | |||
45 | #ifdef CONFIG_SMP | ||
46 | #define smp_mb() mb() | ||
47 | #define smp_rmb() rmb() | ||
48 | #define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory") | ||
49 | #define smp_read_barrier_depends() read_barrier_depends() | ||
50 | #else | ||
51 | #define smp_mb() barrier() | ||
52 | #define smp_rmb() barrier() | ||
53 | #define smp_wmb() barrier() | ||
54 | #define smp_read_barrier_depends() do { } while(0) | ||
55 | #endif /* CONFIG_SMP */ | ||
56 | |||
57 | #ifdef __KERNEL__ | ||
58 | struct task_struct; | ||
59 | struct pt_regs; | ||
60 | |||
61 | #ifdef CONFIG_DEBUGGER | ||
62 | |||
63 | extern int (*__debugger)(struct pt_regs *regs); | ||
64 | extern int (*__debugger_ipi)(struct pt_regs *regs); | ||
65 | extern int (*__debugger_bpt)(struct pt_regs *regs); | ||
66 | extern int (*__debugger_sstep)(struct pt_regs *regs); | ||
67 | extern int (*__debugger_iabr_match)(struct pt_regs *regs); | ||
68 | extern int (*__debugger_dabr_match)(struct pt_regs *regs); | ||
69 | extern int (*__debugger_fault_handler)(struct pt_regs *regs); | ||
70 | |||
71 | #define DEBUGGER_BOILERPLATE(__NAME) \ | ||
72 | static inline int __NAME(struct pt_regs *regs) \ | ||
73 | { \ | ||
74 | if (unlikely(__ ## __NAME)) \ | ||
75 | return __ ## __NAME(regs); \ | ||
76 | return 0; \ | ||
77 | } | ||
78 | |||
79 | DEBUGGER_BOILERPLATE(debugger) | ||
80 | DEBUGGER_BOILERPLATE(debugger_ipi) | ||
81 | DEBUGGER_BOILERPLATE(debugger_bpt) | ||
82 | DEBUGGER_BOILERPLATE(debugger_sstep) | ||
83 | DEBUGGER_BOILERPLATE(debugger_iabr_match) | ||
84 | DEBUGGER_BOILERPLATE(debugger_dabr_match) | ||
85 | DEBUGGER_BOILERPLATE(debugger_fault_handler) | ||
86 | |||
87 | #ifdef CONFIG_XMON | ||
88 | extern void xmon_init(int enable); | ||
89 | #endif | ||
90 | |||
91 | #else | ||
92 | static inline int debugger(struct pt_regs *regs) { return 0; } | ||
93 | static inline int debugger_ipi(struct pt_regs *regs) { return 0; } | ||
94 | static inline int debugger_bpt(struct pt_regs *regs) { return 0; } | ||
95 | static inline int debugger_sstep(struct pt_regs *regs) { return 0; } | ||
96 | static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; } | ||
97 | static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; } | ||
98 | static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } | ||
99 | #endif | ||
100 | |||
101 | extern int set_dabr(unsigned long dabr); | ||
102 | extern void print_backtrace(unsigned long *); | ||
103 | extern void show_regs(struct pt_regs * regs); | ||
104 | extern void flush_instruction_cache(void); | ||
105 | extern void hard_reset_now(void); | ||
106 | extern void poweroff_now(void); | ||
107 | |||
108 | #ifdef CONFIG_6xx | ||
109 | extern long _get_L2CR(void); | ||
110 | extern long _get_L3CR(void); | ||
111 | extern void _set_L2CR(unsigned long); | ||
112 | extern void _set_L3CR(unsigned long); | ||
113 | #else | ||
114 | #define _get_L2CR() 0L | ||
115 | #define _get_L3CR() 0L | ||
116 | #define _set_L2CR(val) do { } while(0) | ||
117 | #define _set_L3CR(val) do { } while(0) | ||
118 | #endif | ||
119 | |||
120 | extern void via_cuda_init(void); | ||
121 | extern void pmac_nvram_init(void); | ||
122 | extern void read_rtc_time(void); | ||
123 | extern void pmac_find_display(void); | ||
124 | extern void giveup_fpu(struct task_struct *); | ||
125 | extern void enable_kernel_fp(void); | ||
126 | extern void flush_fp_to_thread(struct task_struct *); | ||
127 | extern void enable_kernel_altivec(void); | ||
128 | extern void giveup_altivec(struct task_struct *); | ||
129 | extern void load_up_altivec(struct task_struct *); | ||
130 | extern void giveup_spe(struct task_struct *); | ||
131 | extern void load_up_spe(struct task_struct *); | ||
132 | extern int fix_alignment(struct pt_regs *); | ||
133 | extern void cvt_fd(float *from, double *to, unsigned long *fpscr); | ||
134 | extern void cvt_df(double *from, float *to, unsigned long *fpscr); | ||
135 | |||
136 | #ifdef CONFIG_ALTIVEC | ||
137 | extern void flush_altivec_to_thread(struct task_struct *); | ||
138 | #else | ||
139 | static inline void flush_altivec_to_thread(struct task_struct *t) | ||
140 | { | ||
141 | } | ||
142 | #endif | ||
143 | |||
144 | #ifdef CONFIG_SPE | ||
145 | extern void flush_spe_to_thread(struct task_struct *); | ||
146 | #else | ||
147 | static inline void flush_spe_to_thread(struct task_struct *t) | ||
148 | { | ||
149 | } | ||
150 | #endif | ||
151 | |||
152 | extern int call_rtas(const char *, int, int, unsigned long *, ...); | ||
153 | extern void cacheable_memzero(void *p, unsigned int nb); | ||
154 | extern void *cacheable_memcpy(void *, const void *, unsigned int); | ||
155 | extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); | ||
156 | extern void bad_page_fault(struct pt_regs *, unsigned long, int); | ||
157 | extern int die(const char *, struct pt_regs *, long); | ||
158 | extern void _exception(int, struct pt_regs *, int, unsigned long); | ||
159 | #ifdef CONFIG_BOOKE_WDT | ||
160 | extern u32 booke_wdt_enabled; | ||
161 | extern u32 booke_wdt_period; | ||
162 | #endif /* CONFIG_BOOKE_WDT */ | ||
163 | |||
164 | /* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */ | ||
165 | extern unsigned char e2a(unsigned char); | ||
166 | |||
167 | struct device_node; | ||
168 | extern void note_scsi_host(struct device_node *, void *); | ||
169 | |||
170 | extern struct task_struct *__switch_to(struct task_struct *, | ||
171 | struct task_struct *); | ||
172 | #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) | ||
173 | |||
174 | struct thread_struct; | ||
175 | extern struct task_struct *_switch(struct thread_struct *prev, | ||
176 | struct thread_struct *next); | ||
177 | |||
178 | extern unsigned int rtas_data; | ||
179 | |||
180 | /* | ||
181 | * Atomic exchange | ||
182 | * | ||
183 | * Changes the memory location '*ptr' to be val and returns | ||
184 | * the previous value stored there. | ||
185 | */ | ||
186 | static __inline__ unsigned long | ||
187 | __xchg_u32(volatile void *p, unsigned long val) | ||
188 | { | ||
189 | unsigned long prev; | ||
190 | |||
191 | __asm__ __volatile__( | ||
192 | EIEIO_ON_SMP | ||
193 | "1: lwarx %0,0,%2 \n" | ||
194 | PPC405_ERR77(0,%2) | ||
195 | " stwcx. %3,0,%2 \n\ | ||
196 | bne- 1b" | ||
197 | ISYNC_ON_SMP | ||
198 | : "=&r" (prev), "=m" (*(volatile unsigned int *)p) | ||
199 | : "r" (p), "r" (val), "m" (*(volatile unsigned int *)p) | ||
200 | : "cc", "memory"); | ||
201 | |||
202 | return prev; | ||
203 | } | ||
204 | |||
205 | #ifdef CONFIG_PPC64 | ||
206 | static __inline__ unsigned long | ||
207 | __xchg_u64(volatile void *p, unsigned long val) | ||
208 | { | ||
209 | unsigned long prev; | ||
210 | |||
211 | __asm__ __volatile__( | ||
212 | EIEIO_ON_SMP | ||
213 | "1: ldarx %0,0,%2 \n" | ||
214 | PPC405_ERR77(0,%2) | ||
215 | " stdcx. %3,0,%2 \n\ | ||
216 | bne- 1b" | ||
217 | ISYNC_ON_SMP | ||
218 | : "=&r" (prev), "=m" (*(volatile unsigned long *)p) | ||
219 | : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p) | ||
220 | : "cc", "memory"); | ||
221 | |||
222 | return prev; | ||
223 | } | ||
224 | #endif | ||
225 | |||
226 | /* | ||
227 | * This function doesn't exist, so you'll get a linker error | ||
228 | * if something tries to do an invalid xchg(). | ||
229 | */ | ||
230 | extern void __xchg_called_with_bad_pointer(void); | ||
231 | |||
232 | static __inline__ unsigned long | ||
233 | __xchg(volatile void *ptr, unsigned long x, unsigned int size) | ||
234 | { | ||
235 | switch (size) { | ||
236 | case 4: | ||
237 | return __xchg_u32(ptr, x); | ||
238 | #ifdef CONFIG_PPC64 | ||
239 | case 8: | ||
240 | return __xchg_u64(ptr, x); | ||
241 | #endif | ||
242 | } | ||
243 | __xchg_called_with_bad_pointer(); | ||
244 | return x; | ||
245 | } | ||
246 | |||
247 | #define xchg(ptr,x) \ | ||
248 | ({ \ | ||
249 | __typeof__(*(ptr)) _x_ = (x); \ | ||
250 | (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ | ||
251 | }) | ||
252 | |||
253 | #define tas(ptr) (xchg((ptr),1)) | ||
254 | |||
255 | /* | ||
256 | * Compare and exchange - if *p == old, set it to new, | ||
257 | * and return the old value of *p. | ||
258 | */ | ||
259 | #define __HAVE_ARCH_CMPXCHG 1 | ||
260 | |||
261 | static __inline__ unsigned long | ||
262 | __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) | ||
263 | { | ||
264 | unsigned int prev; | ||
265 | |||
266 | __asm__ __volatile__ ( | ||
267 | EIEIO_ON_SMP | ||
268 | "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ | ||
269 | cmpw 0,%0,%3\n\ | ||
270 | bne- 2f\n" | ||
271 | PPC405_ERR77(0,%2) | ||
272 | " stwcx. %4,0,%2\n\ | ||
273 | bne- 1b" | ||
274 | ISYNC_ON_SMP | ||
275 | "\n\ | ||
276 | 2:" | ||
277 | : "=&r" (prev), "=m" (*p) | ||
278 | : "r" (p), "r" (old), "r" (new), "m" (*p) | ||
279 | : "cc", "memory"); | ||
280 | |||
281 | return prev; | ||
282 | } | ||
283 | |||
284 | #ifdef CONFIG_PPC64 | ||
285 | static __inline__ unsigned long | ||
286 | __cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new) | ||
287 | { | ||
288 | unsigned long prev; | ||
289 | |||
290 | __asm__ __volatile__ ( | ||
291 | EIEIO_ON_SMP | ||
292 | "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ | ||
293 | cmpd 0,%0,%3\n\ | ||
294 | bne- 2f\n\ | ||
295 | stdcx. %4,0,%2\n\ | ||
296 | bne- 1b" | ||
297 | ISYNC_ON_SMP | ||
298 | "\n\ | ||
299 | 2:" | ||
300 | : "=&r" (prev), "=m" (*p) | ||
301 | : "r" (p), "r" (old), "r" (new), "m" (*p) | ||
302 | : "cc", "memory"); | ||
303 | |||
304 | return prev; | ||
305 | } | ||
306 | #endif | ||
307 | |||
308 | /* This function doesn't exist, so you'll get a linker error | ||
309 | if something tries to do an invalid cmpxchg(). */ | ||
310 | extern void __cmpxchg_called_with_bad_pointer(void); | ||
311 | |||
312 | static __inline__ unsigned long | ||
313 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, | ||
314 | unsigned int size) | ||
315 | { | ||
316 | switch (size) { | ||
317 | case 4: | ||
318 | return __cmpxchg_u32(ptr, old, new); | ||
319 | #ifdef CONFIG_PPC64 | ||
320 | case 8: | ||
321 | return __cmpxchg_u64(ptr, old, new); | ||
322 | #endif | ||
323 | } | ||
324 | __cmpxchg_called_with_bad_pointer(); | ||
325 | return old; | ||
326 | } | ||
327 | |||
328 | #define cmpxchg(ptr,o,n) \ | ||
329 | ({ \ | ||
330 | __typeof__(*(ptr)) _o_ = (o); \ | ||
331 | __typeof__(*(ptr)) _n_ = (n); \ | ||
332 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | ||
333 | (unsigned long)_n_, sizeof(*(ptr))); \ | ||
334 | }) | ||
335 | |||
336 | #ifdef CONFIG_PPC64 | ||
337 | /* | ||
338 | * We handle most unaligned accesses in hardware. On the other hand | ||
339 | * unaligned DMA can be very expensive on some ppc64 IO chips (it does | ||
340 | * powers of 2 writes until it reaches sufficient alignment). | ||
341 | * | ||
342 | * Based on this we disable the IP header alignment in network drivers. | ||
343 | */ | ||
344 | #define NET_IP_ALIGN 0 | ||
345 | #endif | ||
346 | |||
347 | #define arch_align_stack(x) (x) | ||
348 | |||
349 | #endif /* __KERNEL__ */ | ||
350 | #endif /* __PPC_SYSTEM_H */ | ||
diff --git a/include/asm-ppc/smp.h b/include/asm-ppc/smp.h index 829481c0a9dc..79c1be3dfe61 100644 --- a/include/asm-ppc/smp.h +++ b/include/asm-ppc/smp.h | |||
@@ -45,30 +45,21 @@ extern int __cpu_disable(void); | |||
45 | extern void __cpu_die(unsigned int cpu); | 45 | extern void __cpu_die(unsigned int cpu); |
46 | extern void cpu_die(void) __attribute__((noreturn)); | 46 | extern void cpu_die(void) __attribute__((noreturn)); |
47 | 47 | ||
48 | #define NO_PROC_ID 0xFF /* No processor magic marker */ | ||
49 | #define PROC_CHANGE_PENALTY 20 | ||
50 | |||
51 | #define raw_smp_processor_id() (current_thread_info()->cpu) | 48 | #define raw_smp_processor_id() (current_thread_info()->cpu) |
52 | 49 | ||
53 | extern int __cpu_up(unsigned int cpu); | 50 | extern int __cpu_up(unsigned int cpu); |
54 | 51 | ||
55 | extern int smp_hw_index[]; | 52 | extern int smp_hw_index[]; |
56 | #define hard_smp_processor_id() (smp_hw_index[smp_processor_id()]) | 53 | #define hard_smp_processor_id() (smp_hw_index[smp_processor_id()]) |
57 | 54 | #define get_hard_smp_processor_id(cpu) (smp_hw_index[(cpu)]) | |
58 | struct klock_info_struct { | ||
59 | unsigned long kernel_flag; | ||
60 | unsigned char akp; | ||
61 | }; | ||
62 | |||
63 | extern struct klock_info_struct klock_info; | ||
64 | #define KLOCK_HELD 0xffffffff | ||
65 | #define KLOCK_CLEAR 0x0 | ||
66 | 55 | ||
67 | #endif /* __ASSEMBLY__ */ | 56 | #endif /* __ASSEMBLY__ */ |
68 | 57 | ||
69 | #else /* !(CONFIG_SMP) */ | 58 | #else /* !(CONFIG_SMP) */ |
70 | 59 | ||
71 | static inline void cpu_die(void) { } | 60 | static inline void cpu_die(void) { } |
61 | #define get_hard_smp_processor_id(cpu) 0 | ||
62 | #define hard_smp_processor_id() 0 | ||
72 | 63 | ||
73 | #endif /* !(CONFIG_SMP) */ | 64 | #endif /* !(CONFIG_SMP) */ |
74 | 65 | ||