aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/Kconfig4
-rw-r--r--arch/alpha/defconfig2
-rw-r--r--arch/alpha/mm/numa.c16
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm/boot/install.sh4
-rw-r--r--arch/arm/configs/s3c2410_defconfig27
-rw-r--r--arch/arm/mach-ixp2000/core.c85
-rw-r--r--arch/arm/mach-ixp4xx/common.c8
-rw-r--r--arch/arm/mach-s3c2410/mach-bast.c49
-rw-r--r--arch/arm/mach-s3c2410/mach-vr1000.c77
-rw-r--r--arch/arm/mm/proc-v6.S6
-rw-r--r--arch/arm/nwfpe/softfloat-macros22
-rw-r--r--arch/arm/nwfpe/softfloat.c12
-rw-r--r--arch/arm26/Kconfig2
-rw-r--r--arch/arm26/boot/install.sh4
-rw-r--r--arch/cris/Kconfig2
-rw-r--r--arch/frv/Kconfig2
-rw-r--r--arch/h8300/Kconfig.cpu3
-rw-r--r--arch/h8300/platform/h8300h/ptrace_h8300h.c4
-rw-r--r--arch/i386/Kconfig40
-rw-r--r--arch/i386/Makefile7
-rw-r--r--arch/i386/boot/install.sh4
-rw-r--r--arch/i386/kernel/apic.c2
-rw-r--r--arch/i386/kernel/cpu/common.c2
-rw-r--r--arch/i386/kernel/cpu/mtrr/main.c23
-rw-r--r--arch/i386/kernel/cpu/proc.c2
-rw-r--r--arch/i386/kernel/i386_ksyms.c160
-rw-r--r--arch/i386/kernel/i387.c3
-rw-r--r--arch/i386/kernel/io_apic.c13
-rw-r--r--arch/i386/kernel/kprobes.c176
-rw-r--r--arch/i386/kernel/mpparse.c31
-rw-r--r--arch/i386/kernel/nmi.c24
-rw-r--r--arch/i386/kernel/pci-dma.c3
-rw-r--r--arch/i386/kernel/process.c36
-rw-r--r--arch/i386/kernel/ptrace.c2
-rw-r--r--arch/i386/kernel/reboot.c5
-rw-r--r--arch/i386/kernel/setup.c28
-rw-r--r--arch/i386/kernel/signal.c31
-rw-r--r--arch/i386/kernel/smp.c3
-rw-r--r--arch/i386/kernel/smpboot.c14
-rw-r--r--arch/i386/kernel/time.c6
-rw-r--r--arch/i386/kernel/timers/common.c12
-rw-r--r--arch/i386/kernel/timers/timer.c9
-rw-r--r--arch/i386/kernel/timers/timer_hpet.c3
-rw-r--r--arch/i386/kernel/timers/timer_pm.c1
-rw-r--r--arch/i386/kernel/timers/timer_tsc.c8
-rw-r--r--arch/i386/kernel/traps.c21
-rw-r--r--arch/i386/lib/dec_and_lock.c2
-rw-r--r--arch/i386/lib/delay.c6
-rw-r--r--arch/i386/lib/mmx.c5
-rw-r--r--arch/i386/lib/usercopy.c8
-rw-r--r--arch/i386/mach-voyager/voyager_smp.c2
-rw-r--r--arch/i386/mm/Makefile2
-rw-r--r--arch/i386/mm/discontig.c127
-rw-r--r--arch/i386/mm/highmem.c6
-rw-r--r--arch/i386/mm/init.c21
-rw-r--r--arch/i386/mm/ioremap.c5
-rw-r--r--arch/i386/mm/pgtable.c10
-rw-r--r--arch/i386/oprofile/backtrace.c2
-rw-r--r--arch/i386/pci/irq.c22
-rw-r--r--arch/i386/pci/pcbios.c4
-rw-r--r--arch/i386/power/cpu.c14
-rw-r--r--arch/ia64/Kconfig6
-rw-r--r--arch/ia64/Kconfig.debug11
-rw-r--r--arch/ia64/configs/sn2_defconfig2
-rw-r--r--arch/ia64/defconfig2
-rw-r--r--arch/ia64/ia32/ia32priv.h2
-rw-r--r--arch/ia64/kernel/Makefile1
-rw-r--r--arch/ia64/kernel/jprobes.S61
-rw-r--r--arch/ia64/kernel/kprobes.c601
-rw-r--r--arch/ia64/kernel/traps.c33
-rw-r--r--arch/ia64/mm/discontig.c9
-rw-r--r--arch/ia64/mm/fault.c8
-rw-r--r--arch/m32r/Kconfig4
-rw-r--r--arch/m32r/mm/init.c4
-rw-r--r--arch/m68k/Kconfig2
-rw-r--r--arch/m68knommu/Kconfig2
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/configs/ip27_defconfig2
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c5
-rw-r--r--arch/parisc/Kconfig4
-rw-r--r--arch/parisc/mm/init.c2
-rw-r--r--arch/ppc/Kconfig2
-rw-r--r--arch/ppc/boot/simple/misc.c2
-rw-r--r--arch/ppc/boot/simple/mpc10x_memory.c2
-rw-r--r--arch/ppc64/Kconfig63
-rw-r--r--arch/ppc64/Makefile2
-rw-r--r--arch/ppc64/boot/install.sh4
-rw-r--r--arch/ppc64/configs/pSeries_defconfig2
-rw-r--r--arch/ppc64/defconfig2
-rw-r--r--arch/ppc64/kernel/Makefile11
-rw-r--r--arch/ppc64/kernel/bpa_iic.c270
-rw-r--r--arch/ppc64/kernel/bpa_iic.h62
-rw-r--r--arch/ppc64/kernel/bpa_iommu.c377
-rw-r--r--arch/ppc64/kernel/bpa_iommu.h65
-rw-r--r--arch/ppc64/kernel/bpa_nvram.c118
-rw-r--r--arch/ppc64/kernel/bpa_setup.c140
-rw-r--r--arch/ppc64/kernel/cpu_setup_power4.S16
-rw-r--r--arch/ppc64/kernel/cputable.c11
-rw-r--r--arch/ppc64/kernel/iSeries_setup.c5
-rw-r--r--arch/ppc64/kernel/irq.c3
-rw-r--r--arch/ppc64/kernel/kprobes.c61
-rw-r--r--arch/ppc64/kernel/maple_setup.c64
-rw-r--r--arch/ppc64/kernel/maple_time.c51
-rw-r--r--arch/ppc64/kernel/mpic.h3
-rw-r--r--arch/ppc64/kernel/pSeries_pci.c497
-rw-r--r--arch/ppc64/kernel/pSeries_setup.c184
-rw-r--r--arch/ppc64/kernel/pSeries_smp.c69
-rw-r--r--arch/ppc64/kernel/pci.c3
-rw-r--r--arch/ppc64/kernel/pci.h6
-rw-r--r--arch/ppc64/kernel/pmac_time.c8
-rw-r--r--arch/ppc64/kernel/proc_ppc64.c2
-rw-r--r--arch/ppc64/kernel/prom_init.c4
-rw-r--r--arch/ppc64/kernel/ptrace.c4
-rw-r--r--arch/ppc64/kernel/rtas-proc.c4
-rw-r--r--arch/ppc64/kernel/rtas.c121
-rw-r--r--arch/ppc64/kernel/rtas_pci.c495
-rw-r--r--arch/ppc64/kernel/rtc.c6
-rw-r--r--arch/ppc64/kernel/setup.c34
-rw-r--r--arch/ppc64/kernel/smp.c4
-rw-r--r--arch/ppc64/kernel/spider-pic.c191
-rw-r--r--arch/ppc64/kernel/time.c63
-rw-r--r--arch/ppc64/kernel/traps.c4
-rw-r--r--arch/ppc64/mm/Makefile2
-rw-r--r--arch/ppc64/mm/init.c22
-rw-r--r--arch/ppc64/mm/numa.c3
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/boot/install.sh4
-rw-r--r--arch/s390/kernel/compat_linux.h2
-rw-r--r--arch/sh/Kconfig4
-rw-r--r--arch/sh64/Kconfig2
-rw-r--r--arch/sparc/Kconfig18
-rw-r--r--arch/sparc64/Kconfig2
-rw-r--r--arch/sparc64/kernel/kprobes.c83
-rw-r--r--arch/sparc64/kernel/signal32.c2
-rw-r--r--arch/um/Kconfig1
-rw-r--r--arch/um/kernel/process_kern.c8
-rw-r--r--arch/v850/Kconfig2
-rw-r--r--arch/x86_64/Kconfig25
-rw-r--r--arch/x86_64/boot/install.sh4
-rw-r--r--arch/x86_64/ia32/ia32_signal.c15
-rw-r--r--arch/x86_64/kernel/aperture.c4
-rw-r--r--arch/x86_64/kernel/early_printk.c13
-rw-r--r--arch/x86_64/kernel/head64.c2
-rw-r--r--arch/x86_64/kernel/i8259.c15
-rw-r--r--arch/x86_64/kernel/kprobes.c190
-rw-r--r--arch/x86_64/kernel/mpparse.c25
-rw-r--r--arch/x86_64/kernel/process.c16
-rw-r--r--arch/x86_64/kernel/setup.c33
-rw-r--r--arch/x86_64/kernel/signal.c30
-rw-r--r--arch/x86_64/kernel/time.c42
-rw-r--r--arch/x86_64/kernel/traps.c22
-rw-r--r--arch/x86_64/lib/delay.c7
-rw-r--r--arch/x86_64/mm/Makefile2
-rw-r--r--arch/x86_64/mm/fault.c4
-rw-r--r--arch/x86_64/mm/init.c9
-rw-r--r--arch/x86_64/mm/ioremap.c2
-rw-r--r--arch/x86_64/mm/numa.c8
-rw-r--r--arch/x86_64/pci/k8-bus.c16
-rw-r--r--arch/xtensa/Kconfig258
-rw-r--r--arch/xtensa/Kconfig.debug7
-rw-r--r--arch/xtensa/Makefile102
-rw-r--r--arch/xtensa/boot/Makefile37
-rw-r--r--arch/xtensa/boot/boot-elf/Makefile52
-rw-r--r--arch/xtensa/boot/boot-elf/boot.ld71
-rw-r--r--arch/xtensa/boot/boot-elf/bootstrap.S37
-rw-r--r--arch/xtensa/boot/boot-redboot/Makefile35
-rw-r--r--arch/xtensa/boot/boot-redboot/boot.ld66
-rw-r--r--arch/xtensa/boot/boot-redboot/bootstrap.S246
-rw-r--r--arch/xtensa/boot/include/zlib.h433
-rw-r--r--arch/xtensa/boot/lib/Makefile6
-rw-r--r--arch/xtensa/boot/lib/memcpy.S36
-rw-r--r--arch/xtensa/boot/lib/zlib.c2150
-rw-r--r--arch/xtensa/boot/lib/zmem.c87
-rw-r--r--arch/xtensa/boot/ramdisk/Makefile23
-rw-r--r--arch/xtensa/configs/common_defconfig662
-rw-r--r--arch/xtensa/configs/iss_defconfig531
-rw-r--r--arch/xtensa/kernel/Makefile18
-rw-r--r--arch/xtensa/kernel/align.S459
-rw-r--r--arch/xtensa/kernel/asm-offsets.c94
-rw-r--r--arch/xtensa/kernel/coprocessor.S201
-rw-r--r--arch/xtensa/kernel/entry.S1996
-rw-r--r--arch/xtensa/kernel/head.S237
-rw-r--r--arch/xtensa/kernel/irq.c192
-rw-r--r--arch/xtensa/kernel/module.c78
-rw-r--r--arch/xtensa/kernel/pci-dma.c73
-rw-r--r--arch/xtensa/kernel/pci.c563
-rw-r--r--arch/xtensa/kernel/platform.c49
-rw-r--r--arch/xtensa/kernel/process.c482
-rw-r--r--arch/xtensa/kernel/ptrace.c407
-rw-r--r--arch/xtensa/kernel/semaphore.c226
-rw-r--r--arch/xtensa/kernel/setup.c520
-rw-r--r--arch/xtensa/kernel/signal.c713
-rw-r--r--arch/xtensa/kernel/syscalls.c418
-rw-r--r--arch/xtensa/kernel/syscalls.h248
-rw-r--r--arch/xtensa/kernel/time.c227
-rw-r--r--arch/xtensa/kernel/traps.c498
-rw-r--r--arch/xtensa/kernel/vectors.S464
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S341
-rw-r--r--arch/xtensa/kernel/xtensa_ksyms.c123
-rw-r--r--arch/xtensa/lib/Makefile7
-rw-r--r--arch/xtensa/lib/checksum.S410
-rw-r--r--arch/xtensa/lib/memcopy.S315
-rw-r--r--arch/xtensa/lib/memset.S160
-rw-r--r--arch/xtensa/lib/pci-auto.c352
-rw-r--r--arch/xtensa/lib/strcasecmp.c32
-rw-r--r--arch/xtensa/lib/strncpy_user.S224
-rw-r--r--arch/xtensa/lib/strnlen_user.S147
-rw-r--r--arch/xtensa/lib/usercopy.S321
-rw-r--r--arch/xtensa/mm/Makefile13
-rw-r--r--arch/xtensa/mm/fault.c241
-rw-r--r--arch/xtensa/mm/init.c551
-rw-r--r--arch/xtensa/mm/misc.S374
-rw-r--r--arch/xtensa/mm/pgtable.c76
-rw-r--r--arch/xtensa/mm/tlb.c545
-rw-r--r--arch/xtensa/platform-iss/Makefile13
-rw-r--r--arch/xtensa/platform-iss/console.c303
-rw-r--r--arch/xtensa/platform-iss/io.c32
-rw-r--r--arch/xtensa/platform-iss/network.c855
-rw-r--r--arch/xtensa/platform-iss/setup.c112
220 files changed, 22883 insertions, 1343 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index f7c96635d3b4..c5739d6309df 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -509,7 +509,7 @@ config NR_CPUS
509 depends on SMP 509 depends on SMP
510 default "64" 510 default "64"
511 511
512config DISCONTIGMEM 512config ARCH_DISCONTIGMEM_ENABLE
513 bool "Discontiguous Memory Support (EXPERIMENTAL)" 513 bool "Discontiguous Memory Support (EXPERIMENTAL)"
514 depends on EXPERIMENTAL 514 depends on EXPERIMENTAL
515 help 515 help
@@ -518,6 +518,8 @@ config DISCONTIGMEM
518 or have huge holes in the physical address space for other reasons. 518 or have huge holes in the physical address space for other reasons.
519 See <file:Documentation/vm/numa> for more. 519 See <file:Documentation/vm/numa> for more.
520 520
521source "mm/Kconfig"
522
521config NUMA 523config NUMA
522 bool "NUMA Support (EXPERIMENTAL)" 524 bool "NUMA Support (EXPERIMENTAL)"
523 depends on DISCONTIGMEM 525 depends on DISCONTIGMEM
diff --git a/arch/alpha/defconfig b/arch/alpha/defconfig
index 5e39b7a7c8f4..6da9c3dbde44 100644
--- a/arch/alpha/defconfig
+++ b/arch/alpha/defconfig
@@ -96,7 +96,7 @@ CONFIG_ALPHA_CORE_AGP=y
96CONFIG_ALPHA_BROKEN_IRQ_MASK=y 96CONFIG_ALPHA_BROKEN_IRQ_MASK=y
97CONFIG_EISA=y 97CONFIG_EISA=y
98# CONFIG_SMP is not set 98# CONFIG_SMP is not set
99# CONFIG_DISCONTIGMEM is not set 99# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
100CONFIG_VERBOSE_MCHECK=y 100CONFIG_VERBOSE_MCHECK=y
101CONFIG_VERBOSE_MCHECK_ON=1 101CONFIG_VERBOSE_MCHECK_ON=1
102CONFIG_PCI_LEGACY_PROC=y 102CONFIG_PCI_LEGACY_PROC=y
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c
index ba81c4422aaf..c7481d59b6df 100644
--- a/arch/alpha/mm/numa.c
+++ b/arch/alpha/mm/numa.c
@@ -327,8 +327,6 @@ void __init mem_init(void)
327 extern char _text, _etext, _data, _edata; 327 extern char _text, _etext, _data, _edata;
328 extern char __init_begin, __init_end; 328 extern char __init_begin, __init_end;
329 unsigned long nid, i; 329 unsigned long nid, i;
330 struct page * lmem_map;
331
332 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 330 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
333 331
334 reservedpages = 0; 332 reservedpages = 0;
@@ -338,10 +336,10 @@ void __init mem_init(void)
338 */ 336 */
339 totalram_pages += free_all_bootmem_node(NODE_DATA(nid)); 337 totalram_pages += free_all_bootmem_node(NODE_DATA(nid));
340 338
341 lmem_map = node_mem_map(nid);
342 pfn = NODE_DATA(nid)->node_start_pfn; 339 pfn = NODE_DATA(nid)->node_start_pfn;
343 for (i = 0; i < node_spanned_pages(nid); i++, pfn++) 340 for (i = 0; i < node_spanned_pages(nid); i++, pfn++)
344 if (page_is_ram(pfn) && PageReserved(lmem_map+i)) 341 if (page_is_ram(pfn) &&
342 PageReserved(nid_page_nr(nid, i)))
345 reservedpages++; 343 reservedpages++;
346 } 344 }
347 345
@@ -373,18 +371,18 @@ show_mem(void)
373 show_free_areas(); 371 show_free_areas();
374 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 372 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
375 for_each_online_node(nid) { 373 for_each_online_node(nid) {
376 struct page * lmem_map = node_mem_map(nid);
377 i = node_spanned_pages(nid); 374 i = node_spanned_pages(nid);
378 while (i-- > 0) { 375 while (i-- > 0) {
376 struct page *page = nid_page_nr(nid, i);
379 total++; 377 total++;
380 if (PageReserved(lmem_map+i)) 378 if (PageReserved(page))
381 reserved++; 379 reserved++;
382 else if (PageSwapCache(lmem_map+i)) 380 else if (PageSwapCache(page))
383 cached++; 381 cached++;
384 else if (!page_count(lmem_map+i)) 382 else if (!page_count(page))
385 free++; 383 free++;
386 else 384 else
387 shared += page_count(lmem_map + i) - 1; 385 shared += page_count(page) - 1;
388 } 386 }
389 } 387 }
390 printk("%ld pages of RAM\n",total); 388 printk("%ld pages of RAM\n",total);
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index ee8a9ad7bbd9..07ba77c19f6c 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -346,7 +346,7 @@ config PREEMPT
346 Say Y here if you are building a kernel for a desktop, embedded 346 Say Y here if you are building a kernel for a desktop, embedded
347 or real-time system. Say N if you are unsure. 347 or real-time system. Say N if you are unsure.
348 348
349config DISCONTIGMEM 349config ARCH_DISCONTIGMEM_ENABLE
350 bool 350 bool
351 default (ARCH_LH7A40X && !LH7A40X_CONTIGMEM) 351 default (ARCH_LH7A40X && !LH7A40X_CONTIGMEM)
352 help 352 help
@@ -355,6 +355,8 @@ config DISCONTIGMEM
355 or have huge holes in the physical address space for other reasons. 355 or have huge holes in the physical address space for other reasons.
356 See <file:Documentation/vm/numa> for more. 356 See <file:Documentation/vm/numa> for more.
357 357
358source "mm/Kconfig"
359
358config LEDS 360config LEDS
359 bool "Timer and CPU usage LEDs" 361 bool "Timer and CPU usage LEDs"
360 depends on ARCH_CDB89712 || ARCH_CO285 || ARCH_EBSA110 || \ 362 depends on ARCH_CDB89712 || ARCH_CO285 || ARCH_EBSA110 || \
diff --git a/arch/arm/boot/install.sh b/arch/arm/boot/install.sh
index 935bb27369e9..9f9bed207345 100644
--- a/arch/arm/boot/install.sh
+++ b/arch/arm/boot/install.sh
@@ -21,8 +21,8 @@
21# 21#
22 22
23# User may have a custom install script 23# User may have a custom install script
24if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi 24if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi
25if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi 25if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then exec /sbin/${CROSS_COMPILE}installkernel "$@"; fi
26 26
27if [ "$(basename $2)" = "zImage" ]; then 27if [ "$(basename $2)" = "zImage" ]; then
28# Compressed install 28# Compressed install
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index 2a63fb277196..98b72ff38832 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -1,14 +1,13 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.12-rc1-bk2 3# Linux kernel version: 2.6.12-git4
4# Sun Mar 27 17:47:45 2005 4# Wed Jun 22 15:56:42 2005
5# 5#
6CONFIG_ARM=y 6CONFIG_ARM=y
7CONFIG_MMU=y 7CONFIG_MMU=y
8CONFIG_UID16=y 8CONFIG_UID16=y
9CONFIG_RWSEM_GENERIC_SPINLOCK=y 9CONFIG_RWSEM_GENERIC_SPINLOCK=y
10CONFIG_GENERIC_CALIBRATE_DELAY=y 10CONFIG_GENERIC_CALIBRATE_DELAY=y
11CONFIG_GENERIC_IOMAP=y
12 11
13# 12#
14# Code maturity level options 13# Code maturity level options
@@ -17,6 +16,7 @@ CONFIG_EXPERIMENTAL=y
17# CONFIG_CLEAN_COMPILE is not set 16# CONFIG_CLEAN_COMPILE is not set
18CONFIG_BROKEN=y 17CONFIG_BROKEN=y
19CONFIG_BROKEN_ON_SMP=y 18CONFIG_BROKEN_ON_SMP=y
19CONFIG_INIT_ENV_ARG_LIMIT=32
20 20
21# 21#
22# General setup 22# General setup
@@ -35,6 +35,8 @@ CONFIG_KOBJECT_UEVENT=y
35CONFIG_KALLSYMS=y 35CONFIG_KALLSYMS=y
36# CONFIG_KALLSYMS_ALL is not set 36# CONFIG_KALLSYMS_ALL is not set
37# CONFIG_KALLSYMS_EXTRA_PASS is not set 37# CONFIG_KALLSYMS_EXTRA_PASS is not set
38CONFIG_PRINTK=y
39CONFIG_BUG=y
38CONFIG_BASE_FULL=y 40CONFIG_BASE_FULL=y
39CONFIG_FUTEX=y 41CONFIG_FUTEX=y
40CONFIG_EPOLL=y 42CONFIG_EPOLL=y
@@ -81,6 +83,7 @@ CONFIG_ARCH_S3C2410=y
81# CONFIG_ARCH_VERSATILE is not set 83# CONFIG_ARCH_VERSATILE is not set
82# CONFIG_ARCH_IMX is not set 84# CONFIG_ARCH_IMX is not set
83# CONFIG_ARCH_H720X is not set 85# CONFIG_ARCH_H720X is not set
86# CONFIG_ARCH_AAEC2000 is not set
84 87
85# 88#
86# S3C24XX Implementations 89# S3C24XX Implementations
@@ -134,6 +137,7 @@ CONFIG_CPU_TLB_V4WBI=y
134# 137#
135# Bus support 138# Bus support
136# 139#
140CONFIG_ISA_DMA_API=y
137 141
138# 142#
139# PCCARD (PCMCIA/CardBus) support 143# PCCARD (PCMCIA/CardBus) support
@@ -143,7 +147,9 @@ CONFIG_CPU_TLB_V4WBI=y
143# 147#
144# Kernel Features 148# Kernel Features
145# 149#
150# CONFIG_SMP is not set
146# CONFIG_PREEMPT is not set 151# CONFIG_PREEMPT is not set
152# CONFIG_DISCONTIGMEM is not set
147CONFIG_ALIGNMENT_TRAP=y 153CONFIG_ALIGNMENT_TRAP=y
148 154
149# 155#
@@ -297,7 +303,6 @@ CONFIG_PARPORT_1284=y
297# 303#
298# Block devices 304# Block devices
299# 305#
300# CONFIG_BLK_DEV_FD is not set
301# CONFIG_PARIDE is not set 306# CONFIG_PARIDE is not set
302# CONFIG_BLK_DEV_COW_COMMON is not set 307# CONFIG_BLK_DEV_COW_COMMON is not set
303CONFIG_BLK_DEV_LOOP=y 308CONFIG_BLK_DEV_LOOP=y
@@ -359,6 +364,7 @@ CONFIG_BLK_DEV_IDE_BAST=y
359# 364#
360# Fusion MPT device support 365# Fusion MPT device support
361# 366#
367# CONFIG_FUSION is not set
362 368
363# 369#
364# IEEE 1394 (FireWire) support 370# IEEE 1394 (FireWire) support
@@ -378,10 +384,11 @@ CONFIG_NET=y
378# Networking options 384# Networking options
379# 385#
380# CONFIG_PACKET is not set 386# CONFIG_PACKET is not set
381# CONFIG_NETLINK_DEV is not set
382CONFIG_UNIX=y 387CONFIG_UNIX=y
383# CONFIG_NET_KEY is not set 388# CONFIG_NET_KEY is not set
384CONFIG_INET=y 389CONFIG_INET=y
390CONFIG_IP_FIB_HASH=y
391# CONFIG_IP_FIB_TRIE is not set
385# CONFIG_IP_MULTICAST is not set 392# CONFIG_IP_MULTICAST is not set
386# CONFIG_IP_ADVANCED_ROUTER is not set 393# CONFIG_IP_ADVANCED_ROUTER is not set
387CONFIG_IP_PNP=y 394CONFIG_IP_PNP=y
@@ -443,8 +450,9 @@ CONFIG_NETDEVICES=y
443# Ethernet (10 or 100Mbit) 450# Ethernet (10 or 100Mbit)
444# 451#
445CONFIG_NET_ETHERNET=y 452CONFIG_NET_ETHERNET=y
446# CONFIG_MII is not set 453CONFIG_MII=m
447# CONFIG_SMC91X is not set 454# CONFIG_SMC91X is not set
455CONFIG_DM9000=m
448 456
449# 457#
450# Ethernet (1000 Mbit) 458# Ethernet (1000 Mbit)
@@ -521,7 +529,6 @@ CONFIG_SERIO_SERPORT=y
521CONFIG_SERIO_LIBPS2=y 529CONFIG_SERIO_LIBPS2=y
522# CONFIG_SERIO_RAW is not set 530# CONFIG_SERIO_RAW is not set
523# CONFIG_GAMEPORT is not set 531# CONFIG_GAMEPORT is not set
524CONFIG_SOUND_GAMEPORT=y
525 532
526# 533#
527# Character devices 534# Character devices
@@ -605,7 +612,6 @@ CONFIG_S3C2410_RTC=y
605# 612#
606# TPM devices 613# TPM devices
607# 614#
608# CONFIG_TCG_TPM is not set
609 615
610# 616#
611# I2C support 617# I2C support
@@ -654,6 +660,7 @@ CONFIG_SENSORS_LM78=m
654CONFIG_SENSORS_LM85=m 660CONFIG_SENSORS_LM85=m
655# CONFIG_SENSORS_LM87 is not set 661# CONFIG_SENSORS_LM87 is not set
656# CONFIG_SENSORS_LM90 is not set 662# CONFIG_SENSORS_LM90 is not set
663# CONFIG_SENSORS_LM92 is not set
657# CONFIG_SENSORS_MAX1619 is not set 664# CONFIG_SENSORS_MAX1619 is not set
658# CONFIG_SENSORS_PC87360 is not set 665# CONFIG_SENSORS_PC87360 is not set
659# CONFIG_SENSORS_SMSC47B397 is not set 666# CONFIG_SENSORS_SMSC47B397 is not set
@@ -665,6 +672,7 @@ CONFIG_SENSORS_LM85=m
665# 672#
666# Other I2C Chip support 673# Other I2C Chip support
667# 674#
675# CONFIG_SENSORS_DS1337 is not set
668CONFIG_SENSORS_EEPROM=m 676CONFIG_SENSORS_EEPROM=m
669# CONFIG_SENSORS_PCF8574 is not set 677# CONFIG_SENSORS_PCF8574 is not set
670# CONFIG_SENSORS_PCF8591 is not set 678# CONFIG_SENSORS_PCF8591 is not set
@@ -696,8 +704,10 @@ CONFIG_FB=y
696# CONFIG_FB_CFB_COPYAREA is not set 704# CONFIG_FB_CFB_COPYAREA is not set
697# CONFIG_FB_CFB_IMAGEBLIT is not set 705# CONFIG_FB_CFB_IMAGEBLIT is not set
698# CONFIG_FB_SOFT_CURSOR is not set 706# CONFIG_FB_SOFT_CURSOR is not set
707# CONFIG_FB_MACMODES is not set
699CONFIG_FB_MODE_HELPERS=y 708CONFIG_FB_MODE_HELPERS=y
700# CONFIG_FB_TILEBLITTING is not set 709# CONFIG_FB_TILEBLITTING is not set
710# CONFIG_FB_S1D13XXX is not set
701# CONFIG_FB_VIRTUAL is not set 711# CONFIG_FB_VIRTUAL is not set
702 712
703# 713#
@@ -782,7 +792,6 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
782# 792#
783CONFIG_PROC_FS=y 793CONFIG_PROC_FS=y
784CONFIG_SYSFS=y 794CONFIG_SYSFS=y
785# CONFIG_DEVFS_FS is not set
786# CONFIG_DEVPTS_FS_XATTR is not set 795# CONFIG_DEVPTS_FS_XATTR is not set
787# CONFIG_TMPFS is not set 796# CONFIG_TMPFS is not set
788# CONFIG_HUGETLBFS is not set 797# CONFIG_HUGETLBFS is not set
diff --git a/arch/arm/mach-ixp2000/core.c b/arch/arm/mach-ixp2000/core.c
index fc0555596d6d..0ee34acb8d7b 100644
--- a/arch/arm/mach-ixp2000/core.c
+++ b/arch/arm/mach-ixp2000/core.c
@@ -40,6 +40,8 @@
40#include <asm/mach/time.h> 40#include <asm/mach/time.h>
41#include <asm/mach/irq.h> 41#include <asm/mach/irq.h>
42 42
43#include <asm/arch/gpio.h>
44
43static DEFINE_SPINLOCK(ixp2000_slowport_lock); 45static DEFINE_SPINLOCK(ixp2000_slowport_lock);
44static unsigned long ixp2000_slowport_irq_flags; 46static unsigned long ixp2000_slowport_irq_flags;
45 47
@@ -179,7 +181,7 @@ static int ixp2000_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
179 181
180 /* clear timer 1 */ 182 /* clear timer 1 */
181 ixp2000_reg_write(IXP2000_T1_CLR, 1); 183 ixp2000_reg_write(IXP2000_T1_CLR, 1);
182 184
183 while ((next_jiffy_time - *missing_jiffy_timer_csr) > ticks_per_jiffy) { 185 while ((next_jiffy_time - *missing_jiffy_timer_csr) > ticks_per_jiffy) {
184 timer_tick(regs); 186 timer_tick(regs);
185 next_jiffy_time -= ticks_per_jiffy; 187 next_jiffy_time -= ticks_per_jiffy;
@@ -238,35 +240,40 @@ void __init ixp2000_init_time(unsigned long tick_rate)
238/************************************************************************* 240/*************************************************************************
239 * GPIO helpers 241 * GPIO helpers
240 *************************************************************************/ 242 *************************************************************************/
241static unsigned long GPIO_IRQ_rising_edge;
242static unsigned long GPIO_IRQ_falling_edge; 243static unsigned long GPIO_IRQ_falling_edge;
244static unsigned long GPIO_IRQ_rising_edge;
243static unsigned long GPIO_IRQ_level_low; 245static unsigned long GPIO_IRQ_level_low;
244static unsigned long GPIO_IRQ_level_high; 246static unsigned long GPIO_IRQ_level_high;
245 247
246void gpio_line_config(int line, int style) 248static void update_gpio_int_csrs(void)
249{
250 ixp2000_reg_write(IXP2000_GPIO_FEDR, GPIO_IRQ_falling_edge);
251 ixp2000_reg_write(IXP2000_GPIO_REDR, GPIO_IRQ_rising_edge);
252 ixp2000_reg_write(IXP2000_GPIO_LSLR, GPIO_IRQ_level_low);
253 ixp2000_reg_write(IXP2000_GPIO_LSHR, GPIO_IRQ_level_high);
254}
255
256void gpio_line_config(int line, int direction)
247{ 257{
248 unsigned long flags; 258 unsigned long flags;
249 259
250 local_irq_save(flags); 260 local_irq_save(flags);
261 if (direction == GPIO_OUT) {
262 irq_desc[line + IRQ_IXP2000_GPIO0].valid = 0;
251 263
252 if(style == GPIO_OUT) {
253 /* if it's an output, it ain't an interrupt anymore */ 264 /* if it's an output, it ain't an interrupt anymore */
254 ixp2000_reg_write(IXP2000_GPIO_PDSR, (1 << line));
255 GPIO_IRQ_falling_edge &= ~(1 << line); 265 GPIO_IRQ_falling_edge &= ~(1 << line);
256 GPIO_IRQ_rising_edge &= ~(1 << line); 266 GPIO_IRQ_rising_edge &= ~(1 << line);
257 GPIO_IRQ_level_low &= ~(1 << line); 267 GPIO_IRQ_level_low &= ~(1 << line);
258 GPIO_IRQ_level_high &= ~(1 << line); 268 GPIO_IRQ_level_high &= ~(1 << line);
259 ixp2000_reg_write(IXP2000_GPIO_FEDR, GPIO_IRQ_falling_edge); 269 update_gpio_int_csrs();
260 ixp2000_reg_write(IXP2000_GPIO_REDR, GPIO_IRQ_rising_edge); 270
261 ixp2000_reg_write(IXP2000_GPIO_LSHR, GPIO_IRQ_level_high); 271 ixp2000_reg_write(IXP2000_GPIO_PDSR, 1 << line);
262 ixp2000_reg_write(IXP2000_GPIO_LSLR, GPIO_IRQ_level_low); 272 } else if (direction == GPIO_IN) {
263 irq_desc[line+IRQ_IXP2000_GPIO0].valid = 0; 273 ixp2000_reg_write(IXP2000_GPIO_PDCR, 1 << line);
264 } else if(style == GPIO_IN) {
265 ixp2000_reg_write(IXP2000_GPIO_PDCR, (1 << line));
266 } 274 }
267
268 local_irq_restore(flags); 275 local_irq_restore(flags);
269} 276}
270 277
271 278
272/************************************************************************* 279/*************************************************************************
@@ -285,9 +292,50 @@ static void ixp2000_GPIO_irq_handler(unsigned int irq, struct irqdesc *desc, str
285 } 292 }
286} 293}
287 294
295static int ixp2000_GPIO_irq_type(unsigned int irq, unsigned int type)
296{
297 int line = irq - IRQ_IXP2000_GPIO0;
298
299 /*
300 * First, configure this GPIO line as an input.
301 */
302 ixp2000_reg_write(IXP2000_GPIO_PDCR, 1 << line);
303
304 /*
305 * Then, set the proper trigger type.
306 */
307 if (type & IRQT_FALLING)
308 GPIO_IRQ_falling_edge |= 1 << line;
309 else
310 GPIO_IRQ_falling_edge &= ~(1 << line);
311 if (type & IRQT_RISING)
312 GPIO_IRQ_rising_edge |= 1 << line;
313 else
314 GPIO_IRQ_rising_edge &= ~(1 << line);
315 if (type & IRQT_LOW)
316 GPIO_IRQ_level_low |= 1 << line;
317 else
318 GPIO_IRQ_level_low &= ~(1 << line);
319 if (type & IRQT_HIGH)
320 GPIO_IRQ_level_high |= 1 << line;
321 else
322 GPIO_IRQ_level_high &= ~(1 << line);
323 update_gpio_int_csrs();
324
325 /*
326 * Finally, mark the corresponding IRQ as valid.
327 */
328 irq_desc[irq].valid = 1;
329
330 return 0;
331}
332
288static void ixp2000_GPIO_irq_mask_ack(unsigned int irq) 333static void ixp2000_GPIO_irq_mask_ack(unsigned int irq)
289{ 334{
290 ixp2000_reg_write(IXP2000_GPIO_INCR, (1 << (irq - IRQ_IXP2000_GPIO0))); 335 ixp2000_reg_write(IXP2000_GPIO_INCR, (1 << (irq - IRQ_IXP2000_GPIO0)));
336
337 ixp2000_reg_write(IXP2000_GPIO_EDSR, (1 << (irq - IRQ_IXP2000_GPIO0)));
338 ixp2000_reg_write(IXP2000_GPIO_LDSR, (1 << (irq - IRQ_IXP2000_GPIO0)));
291 ixp2000_reg_write(IXP2000_GPIO_INST, (1 << (irq - IRQ_IXP2000_GPIO0))); 339 ixp2000_reg_write(IXP2000_GPIO_INST, (1 << (irq - IRQ_IXP2000_GPIO0)));
292} 340}
293 341
@@ -302,6 +350,7 @@ static void ixp2000_GPIO_irq_unmask(unsigned int irq)
302} 350}
303 351
304static struct irqchip ixp2000_GPIO_irq_chip = { 352static struct irqchip ixp2000_GPIO_irq_chip = {
353 .type = ixp2000_GPIO_irq_type,
305 .ack = ixp2000_GPIO_irq_mask_ack, 354 .ack = ixp2000_GPIO_irq_mask_ack,
306 .mask = ixp2000_GPIO_irq_mask, 355 .mask = ixp2000_GPIO_irq_mask,
307 .unmask = ixp2000_GPIO_irq_unmask 356 .unmask = ixp2000_GPIO_irq_unmask
@@ -338,7 +387,7 @@ static void ixp2000_irq_mask(unsigned int irq)
338 387
339static void ixp2000_irq_unmask(unsigned int irq) 388static void ixp2000_irq_unmask(unsigned int irq)
340{ 389{
341 ixp2000_reg_write(IXP2000_IRQ_ENABLE_SET, (1 << irq)); 390 ixp2000_reg_write(IXP2000_IRQ_ENABLE_SET, (1 << irq));
342} 391}
343 392
344static struct irqchip ixp2000_irq_chip = { 393static struct irqchip ixp2000_irq_chip = {
@@ -375,16 +424,16 @@ void __init ixp2000_init_irq(void)
375 * our mask/unmask code much simpler. 424 * our mask/unmask code much simpler.
376 */ 425 */
377 for (irq = IRQ_IXP2000_SOFT_INT; irq <= IRQ_IXP2000_THDB3; irq++) { 426 for (irq = IRQ_IXP2000_SOFT_INT; irq <= IRQ_IXP2000_THDB3; irq++) {
378 if((1 << irq) & IXP2000_VALID_IRQ_MASK) { 427 if ((1 << irq) & IXP2000_VALID_IRQ_MASK) {
379 set_irq_chip(irq, &ixp2000_irq_chip); 428 set_irq_chip(irq, &ixp2000_irq_chip);
380 set_irq_handler(irq, do_level_IRQ); 429 set_irq_handler(irq, do_level_IRQ);
381 set_irq_flags(irq, IRQF_VALID); 430 set_irq_flags(irq, IRQF_VALID);
382 } else set_irq_flags(irq, 0); 431 } else set_irq_flags(irq, 0);
383 } 432 }
384 433
385 /* 434 /*
386 * GPIO IRQs are invalid until someone sets the interrupt mode 435 * GPIO IRQs are invalid until someone sets the interrupt mode
387 * by calling gpio_line_set(); 436 * by calling set_irq_type().
388 */ 437 */
389 for (irq = IRQ_IXP2000_GPIO0; irq <= IRQ_IXP2000_GPIO7; irq++) { 438 for (irq = IRQ_IXP2000_GPIO0; irq <= IRQ_IXP2000_GPIO7; irq++) {
390 set_irq_chip(irq, &ixp2000_GPIO_irq_chip); 439 set_irq_chip(irq, &ixp2000_GPIO_irq_chip);
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 267ba02d77dc..f39e8408488f 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -141,7 +141,15 @@ static struct map_desc ixp4xx_io_desc[] __initdata = {
141 .physical = IXP4XX_PCI_CFG_BASE_PHYS, 141 .physical = IXP4XX_PCI_CFG_BASE_PHYS,
142 .length = IXP4XX_PCI_CFG_REGION_SIZE, 142 .length = IXP4XX_PCI_CFG_REGION_SIZE,
143 .type = MT_DEVICE 143 .type = MT_DEVICE
144 },
145#ifdef CONFIG_DEBUG_LL
146 { /* Debug UART mapping */
147 .virtual = IXP4XX_DEBUG_UART_BASE_VIRT,
148 .physical = IXP4XX_DEBUG_UART_BASE_PHYS,
149 .length = IXP4XX_DEBUG_UART_REGION_SIZE,
150 .type = MT_DEVICE
144 } 151 }
152#endif
145}; 153};
146 154
147void __init ixp4xx_map_io(void) 155void __init ixp4xx_map_io(void)
diff --git a/arch/arm/mach-s3c2410/mach-bast.c b/arch/arm/mach-s3c2410/mach-bast.c
index 3bb97eb6e693..f3e970039b65 100644
--- a/arch/arm/mach-s3c2410/mach-bast.c
+++ b/arch/arm/mach-s3c2410/mach-bast.c
@@ -26,6 +26,7 @@
26 * 03-Mar-2005 BJD Ensured that bast-cpld.h is included 26 * 03-Mar-2005 BJD Ensured that bast-cpld.h is included
27 * 10-Mar-2005 LCVR Changed S3C2410_VA to S3C24XX_VA 27 * 10-Mar-2005 LCVR Changed S3C2410_VA to S3C24XX_VA
28 * 14-Mar-2006 BJD Updated for __iomem changes 28 * 14-Mar-2006 BJD Updated for __iomem changes
29 * 22-Jun-2006 BJD Added DM9000 platform information
29*/ 30*/
30 31
31#include <linux/kernel.h> 32#include <linux/kernel.h>
@@ -35,6 +36,7 @@
35#include <linux/timer.h> 36#include <linux/timer.h>
36#include <linux/init.h> 37#include <linux/init.h>
37#include <linux/device.h> 38#include <linux/device.h>
39#include <linux/dm9000.h>
38 40
39#include <asm/mach/arch.h> 41#include <asm/mach/arch.h>
40#include <asm/mach/map.h> 42#include <asm/mach/map.h>
@@ -53,6 +55,7 @@
53#include <asm/arch/regs-serial.h> 55#include <asm/arch/regs-serial.h>
54#include <asm/arch/regs-gpio.h> 56#include <asm/arch/regs-gpio.h>
55#include <asm/arch/regs-mem.h> 57#include <asm/arch/regs-mem.h>
58#include <asm/arch/regs-lcd.h>
56#include <asm/arch/nand.h> 59#include <asm/arch/nand.h>
57 60
58#include <linux/mtd/mtd.h> 61#include <linux/mtd/mtd.h>
@@ -112,7 +115,6 @@ static struct map_desc bast_iodesc[] __initdata = {
112 { VA_C2(BAST_VA_ISAMEM), PA_CS2(BAST_PA_ISAMEM), SZ_16M, MT_DEVICE }, 115 { VA_C2(BAST_VA_ISAMEM), PA_CS2(BAST_PA_ISAMEM), SZ_16M, MT_DEVICE },
113 { VA_C2(BAST_VA_ASIXNET), PA_CS3(BAST_PA_ASIXNET), SZ_1M, MT_DEVICE }, 116 { VA_C2(BAST_VA_ASIXNET), PA_CS3(BAST_PA_ASIXNET), SZ_1M, MT_DEVICE },
114 { VA_C2(BAST_VA_SUPERIO), PA_CS2(BAST_PA_SUPERIO), SZ_1M, MT_DEVICE }, 117 { VA_C2(BAST_VA_SUPERIO), PA_CS2(BAST_PA_SUPERIO), SZ_1M, MT_DEVICE },
115 { VA_C2(BAST_VA_DM9000), PA_CS2(BAST_PA_DM9000), SZ_1M, MT_DEVICE },
116 { VA_C2(BAST_VA_IDEPRI), PA_CS3(BAST_PA_IDEPRI), SZ_1M, MT_DEVICE }, 118 { VA_C2(BAST_VA_IDEPRI), PA_CS3(BAST_PA_IDEPRI), SZ_1M, MT_DEVICE },
117 { VA_C2(BAST_VA_IDESEC), PA_CS3(BAST_PA_IDESEC), SZ_1M, MT_DEVICE }, 119 { VA_C2(BAST_VA_IDESEC), PA_CS3(BAST_PA_IDESEC), SZ_1M, MT_DEVICE },
118 { VA_C2(BAST_VA_IDEPRIAUX), PA_CS3(BAST_PA_IDEPRIAUX), SZ_1M, MT_DEVICE }, 120 { VA_C2(BAST_VA_IDEPRIAUX), PA_CS3(BAST_PA_IDEPRIAUX), SZ_1M, MT_DEVICE },
@@ -123,7 +125,6 @@ static struct map_desc bast_iodesc[] __initdata = {
123 { VA_C3(BAST_VA_ISAMEM), PA_CS3(BAST_PA_ISAMEM), SZ_16M, MT_DEVICE }, 125 { VA_C3(BAST_VA_ISAMEM), PA_CS3(BAST_PA_ISAMEM), SZ_16M, MT_DEVICE },
124 { VA_C3(BAST_VA_ASIXNET), PA_CS3(BAST_PA_ASIXNET), SZ_1M, MT_DEVICE }, 126 { VA_C3(BAST_VA_ASIXNET), PA_CS3(BAST_PA_ASIXNET), SZ_1M, MT_DEVICE },
125 { VA_C3(BAST_VA_SUPERIO), PA_CS3(BAST_PA_SUPERIO), SZ_1M, MT_DEVICE }, 127 { VA_C3(BAST_VA_SUPERIO), PA_CS3(BAST_PA_SUPERIO), SZ_1M, MT_DEVICE },
126 { VA_C3(BAST_VA_DM9000), PA_CS3(BAST_PA_DM9000), SZ_1M, MT_DEVICE },
127 { VA_C3(BAST_VA_IDEPRI), PA_CS3(BAST_PA_IDEPRI), SZ_1M, MT_DEVICE }, 128 { VA_C3(BAST_VA_IDEPRI), PA_CS3(BAST_PA_IDEPRI), SZ_1M, MT_DEVICE },
128 { VA_C3(BAST_VA_IDESEC), PA_CS3(BAST_PA_IDESEC), SZ_1M, MT_DEVICE }, 129 { VA_C3(BAST_VA_IDESEC), PA_CS3(BAST_PA_IDESEC), SZ_1M, MT_DEVICE },
129 { VA_C3(BAST_VA_IDEPRIAUX), PA_CS3(BAST_PA_IDEPRIAUX), SZ_1M, MT_DEVICE }, 130 { VA_C3(BAST_VA_IDEPRIAUX), PA_CS3(BAST_PA_IDEPRIAUX), SZ_1M, MT_DEVICE },
@@ -134,7 +135,6 @@ static struct map_desc bast_iodesc[] __initdata = {
134 { VA_C4(BAST_VA_ISAMEM), PA_CS4(BAST_PA_ISAMEM), SZ_16M, MT_DEVICE }, 135 { VA_C4(BAST_VA_ISAMEM), PA_CS4(BAST_PA_ISAMEM), SZ_16M, MT_DEVICE },
135 { VA_C4(BAST_VA_ASIXNET), PA_CS5(BAST_PA_ASIXNET), SZ_1M, MT_DEVICE }, 136 { VA_C4(BAST_VA_ASIXNET), PA_CS5(BAST_PA_ASIXNET), SZ_1M, MT_DEVICE },
136 { VA_C4(BAST_VA_SUPERIO), PA_CS4(BAST_PA_SUPERIO), SZ_1M, MT_DEVICE }, 137 { VA_C4(BAST_VA_SUPERIO), PA_CS4(BAST_PA_SUPERIO), SZ_1M, MT_DEVICE },
137 { VA_C4(BAST_VA_DM9000), PA_CS4(BAST_PA_DM9000), SZ_1M, MT_DEVICE },
138 { VA_C4(BAST_VA_IDEPRI), PA_CS5(BAST_PA_IDEPRI), SZ_1M, MT_DEVICE }, 138 { VA_C4(BAST_VA_IDEPRI), PA_CS5(BAST_PA_IDEPRI), SZ_1M, MT_DEVICE },
139 { VA_C4(BAST_VA_IDESEC), PA_CS5(BAST_PA_IDESEC), SZ_1M, MT_DEVICE }, 139 { VA_C4(BAST_VA_IDESEC), PA_CS5(BAST_PA_IDESEC), SZ_1M, MT_DEVICE },
140 { VA_C4(BAST_VA_IDEPRIAUX), PA_CS5(BAST_PA_IDEPRIAUX), SZ_1M, MT_DEVICE }, 140 { VA_C4(BAST_VA_IDEPRIAUX), PA_CS5(BAST_PA_IDEPRIAUX), SZ_1M, MT_DEVICE },
@@ -145,7 +145,6 @@ static struct map_desc bast_iodesc[] __initdata = {
145 { VA_C5(BAST_VA_ISAMEM), PA_CS5(BAST_PA_ISAMEM), SZ_16M, MT_DEVICE }, 145 { VA_C5(BAST_VA_ISAMEM), PA_CS5(BAST_PA_ISAMEM), SZ_16M, MT_DEVICE },
146 { VA_C5(BAST_VA_ASIXNET), PA_CS5(BAST_PA_ASIXNET), SZ_1M, MT_DEVICE }, 146 { VA_C5(BAST_VA_ASIXNET), PA_CS5(BAST_PA_ASIXNET), SZ_1M, MT_DEVICE },
147 { VA_C5(BAST_VA_SUPERIO), PA_CS5(BAST_PA_SUPERIO), SZ_1M, MT_DEVICE }, 147 { VA_C5(BAST_VA_SUPERIO), PA_CS5(BAST_PA_SUPERIO), SZ_1M, MT_DEVICE },
148 { VA_C5(BAST_VA_DM9000), PA_CS5(BAST_PA_DM9000), SZ_1M, MT_DEVICE },
149 { VA_C5(BAST_VA_IDEPRI), PA_CS5(BAST_PA_IDEPRI), SZ_1M, MT_DEVICE }, 148 { VA_C5(BAST_VA_IDEPRI), PA_CS5(BAST_PA_IDEPRI), SZ_1M, MT_DEVICE },
150 { VA_C5(BAST_VA_IDESEC), PA_CS5(BAST_PA_IDESEC), SZ_1M, MT_DEVICE }, 149 { VA_C5(BAST_VA_IDESEC), PA_CS5(BAST_PA_IDESEC), SZ_1M, MT_DEVICE },
151 { VA_C5(BAST_VA_IDEPRIAUX), PA_CS5(BAST_PA_IDEPRIAUX), SZ_1M, MT_DEVICE }, 150 { VA_C5(BAST_VA_IDEPRIAUX), PA_CS5(BAST_PA_IDEPRIAUX), SZ_1M, MT_DEVICE },
@@ -313,6 +312,45 @@ static struct s3c2410_platform_nand bast_nand_info = {
313 .select_chip = bast_nand_select, 312 .select_chip = bast_nand_select,
314}; 313};
315 314
315/* DM9000 */
316
317static struct resource bast_dm9k_resource[] = {
318 [0] = {
319 .start = S3C2410_CS5 + BAST_PA_DM9000,
320 .end = S3C2410_CS5 + BAST_PA_DM9000 + 3,
321 .flags = IORESOURCE_MEM
322 },
323 [1] = {
324 .start = S3C2410_CS5 + BAST_PA_DM9000 + 0x40,
325 .end = S3C2410_CS5 + BAST_PA_DM9000 + 0x40 + 0x3f,
326 .flags = IORESOURCE_MEM
327 },
328 [2] = {
329 .start = IRQ_DM9000,
330 .end = IRQ_DM9000,
331 .flags = IORESOURCE_IRQ
332 }
333
334};
335
336/* for the moment we limit ourselves to 16bit IO until some
337 * better IO routines can be written and tested
338*/
339
340struct dm9000_plat_data bast_dm9k_platdata = {
341 .flags = DM9000_PLATF_16BITONLY
342};
343
344static struct platform_device bast_device_dm9k = {
345 .name = "dm9000",
346 .id = 0,
347 .num_resources = ARRAY_SIZE(bast_dm9k_resource),
348 .resource = bast_dm9k_resource,
349 .dev = {
350 .platform_data = &bast_dm9k_platdata,
351 }
352};
353
316 354
317/* Standard BAST devices */ 355/* Standard BAST devices */
318 356
@@ -324,7 +362,8 @@ static struct platform_device *bast_devices[] __initdata = {
324 &s3c_device_iis, 362 &s3c_device_iis,
325 &s3c_device_rtc, 363 &s3c_device_rtc,
326 &s3c_device_nand, 364 &s3c_device_nand,
327 &bast_device_nor 365 &bast_device_nor,
366 &bast_device_dm9k,
328}; 367};
329 368
330static struct clk *bast_clocks[] = { 369static struct clk *bast_clocks[] = {
diff --git a/arch/arm/mach-s3c2410/mach-vr1000.c b/arch/arm/mach-s3c2410/mach-vr1000.c
index 5512146b1ce4..76be074944a0 100644
--- a/arch/arm/mach-s3c2410/mach-vr1000.c
+++ b/arch/arm/mach-s3c2410/mach-vr1000.c
@@ -27,6 +27,7 @@
27 * 10-Feb-2005 BJD Added power-off capability 27 * 10-Feb-2005 BJD Added power-off capability
28 * 10-Mar-2005 LCVR Changed S3C2410_VA to S3C24XX_VA 28 * 10-Mar-2005 LCVR Changed S3C2410_VA to S3C24XX_VA
29 * 14-Mar-2006 BJD void __iomem fixes 29 * 14-Mar-2006 BJD void __iomem fixes
30 * 22-Jun-2006 BJD Added DM9000 platform information
30*/ 31*/
31 32
32#include <linux/kernel.h> 33#include <linux/kernel.h>
@@ -35,6 +36,7 @@
35#include <linux/list.h> 36#include <linux/list.h>
36#include <linux/timer.h> 37#include <linux/timer.h>
37#include <linux/init.h> 38#include <linux/init.h>
39#include <linux/dm9000.h>
38 40
39#include <linux/serial.h> 41#include <linux/serial.h>
40#include <linux/tty.h> 42#include <linux/tty.h>
@@ -98,28 +100,24 @@ static struct map_desc vr1000_iodesc[] __initdata = {
98 * are only 8bit */ 100 * are only 8bit */
99 101
100 /* slow, byte */ 102 /* slow, byte */
101 { VA_C2(VR1000_VA_DM9000), PA_CS2(VR1000_PA_DM9000), SZ_1M, MT_DEVICE },
102 { VA_C2(VR1000_VA_IDEPRI), PA_CS3(VR1000_PA_IDEPRI), SZ_1M, MT_DEVICE }, 103 { VA_C2(VR1000_VA_IDEPRI), PA_CS3(VR1000_PA_IDEPRI), SZ_1M, MT_DEVICE },
103 { VA_C2(VR1000_VA_IDESEC), PA_CS3(VR1000_PA_IDESEC), SZ_1M, MT_DEVICE }, 104 { VA_C2(VR1000_VA_IDESEC), PA_CS3(VR1000_PA_IDESEC), SZ_1M, MT_DEVICE },
104 { VA_C2(VR1000_VA_IDEPRIAUX), PA_CS3(VR1000_PA_IDEPRIAUX), SZ_1M, MT_DEVICE }, 105 { VA_C2(VR1000_VA_IDEPRIAUX), PA_CS3(VR1000_PA_IDEPRIAUX), SZ_1M, MT_DEVICE },
105 { VA_C2(VR1000_VA_IDESECAUX), PA_CS3(VR1000_PA_IDESECAUX), SZ_1M, MT_DEVICE }, 106 { VA_C2(VR1000_VA_IDESECAUX), PA_CS3(VR1000_PA_IDESECAUX), SZ_1M, MT_DEVICE },
106 107
107 /* slow, word */ 108 /* slow, word */
108 { VA_C3(VR1000_VA_DM9000), PA_CS3(VR1000_PA_DM9000), SZ_1M, MT_DEVICE },
109 { VA_C3(VR1000_VA_IDEPRI), PA_CS3(VR1000_PA_IDEPRI), SZ_1M, MT_DEVICE }, 109 { VA_C3(VR1000_VA_IDEPRI), PA_CS3(VR1000_PA_IDEPRI), SZ_1M, MT_DEVICE },
110 { VA_C3(VR1000_VA_IDESEC), PA_CS3(VR1000_PA_IDESEC), SZ_1M, MT_DEVICE }, 110 { VA_C3(VR1000_VA_IDESEC), PA_CS3(VR1000_PA_IDESEC), SZ_1M, MT_DEVICE },
111 { VA_C3(VR1000_VA_IDEPRIAUX), PA_CS3(VR1000_PA_IDEPRIAUX), SZ_1M, MT_DEVICE }, 111 { VA_C3(VR1000_VA_IDEPRIAUX), PA_CS3(VR1000_PA_IDEPRIAUX), SZ_1M, MT_DEVICE },
112 { VA_C3(VR1000_VA_IDESECAUX), PA_CS3(VR1000_PA_IDESECAUX), SZ_1M, MT_DEVICE }, 112 { VA_C3(VR1000_VA_IDESECAUX), PA_CS3(VR1000_PA_IDESECAUX), SZ_1M, MT_DEVICE },
113 113
114 /* fast, byte */ 114 /* fast, byte */
115 { VA_C4(VR1000_VA_DM9000), PA_CS4(VR1000_PA_DM9000), SZ_1M, MT_DEVICE },
116 { VA_C4(VR1000_VA_IDEPRI), PA_CS5(VR1000_PA_IDEPRI), SZ_1M, MT_DEVICE }, 115 { VA_C4(VR1000_VA_IDEPRI), PA_CS5(VR1000_PA_IDEPRI), SZ_1M, MT_DEVICE },
117 { VA_C4(VR1000_VA_IDESEC), PA_CS5(VR1000_PA_IDESEC), SZ_1M, MT_DEVICE }, 116 { VA_C4(VR1000_VA_IDESEC), PA_CS5(VR1000_PA_IDESEC), SZ_1M, MT_DEVICE },
118 { VA_C4(VR1000_VA_IDEPRIAUX), PA_CS5(VR1000_PA_IDEPRIAUX), SZ_1M, MT_DEVICE }, 117 { VA_C4(VR1000_VA_IDEPRIAUX), PA_CS5(VR1000_PA_IDEPRIAUX), SZ_1M, MT_DEVICE },
119 { VA_C4(VR1000_VA_IDESECAUX), PA_CS5(VR1000_PA_IDESECAUX), SZ_1M, MT_DEVICE }, 118 { VA_C4(VR1000_VA_IDESECAUX), PA_CS5(VR1000_PA_IDESECAUX), SZ_1M, MT_DEVICE },
120 119
121 /* fast, word */ 120 /* fast, word */
122 { VA_C5(VR1000_VA_DM9000), PA_CS5(VR1000_PA_DM9000), SZ_1M, MT_DEVICE },
123 { VA_C5(VR1000_VA_IDEPRI), PA_CS5(VR1000_PA_IDEPRI), SZ_1M, MT_DEVICE }, 121 { VA_C5(VR1000_VA_IDEPRI), PA_CS5(VR1000_PA_IDEPRI), SZ_1M, MT_DEVICE },
124 { VA_C5(VR1000_VA_IDESEC), PA_CS5(VR1000_PA_IDESEC), SZ_1M, MT_DEVICE }, 122 { VA_C5(VR1000_VA_IDESEC), PA_CS5(VR1000_PA_IDESEC), SZ_1M, MT_DEVICE },
125 { VA_C5(VR1000_VA_IDEPRIAUX), PA_CS5(VR1000_PA_IDEPRIAUX), SZ_1M, MT_DEVICE }, 123 { VA_C5(VR1000_VA_IDEPRIAUX), PA_CS5(VR1000_PA_IDEPRIAUX), SZ_1M, MT_DEVICE },
@@ -246,6 +244,74 @@ static struct platform_device vr1000_nor = {
246 .resource = vr1000_nor_resource, 244 .resource = vr1000_nor_resource,
247}; 245};
248 246
247/* DM9000 ethernet devices */
248
249static struct resource vr1000_dm9k0_resource[] = {
250 [0] = {
251 .start = S3C2410_CS5 + VR1000_PA_DM9000,
252 .end = S3C2410_CS5 + VR1000_PA_DM9000 + 3,
253 .flags = IORESOURCE_MEM
254 },
255 [1] = {
256 .start = S3C2410_CS5 + VR1000_PA_DM9000 + 0x40,
257 .end = S3C2410_CS5 + VR1000_PA_DM9000 + 0x7f,
258 .flags = IORESOURCE_MEM
259 },
260 [2] = {
261 .start = IRQ_VR1000_DM9000A,
262 .end = IRQ_VR1000_DM9000A,
263 .flags = IORESOURCE_IRQ
264 }
265
266};
267
268static struct resource vr1000_dm9k1_resource[] = {
269 [0] = {
270 .start = S3C2410_CS5 + VR1000_PA_DM9000 + 0x80,
271 .end = S3C2410_CS5 + VR1000_PA_DM9000 + 0x83,
272 .flags = IORESOURCE_MEM
273 },
274 [1] = {
275 .start = S3C2410_CS5 + VR1000_PA_DM9000 + 0xC0,
276 .end = S3C2410_CS5 + VR1000_PA_DM9000 + 0xFF,
277 .flags = IORESOURCE_MEM
278 },
279 [2] = {
280 .start = IRQ_VR1000_DM9000N,
281 .end = IRQ_VR1000_DM9000N,
282 .flags = IORESOURCE_IRQ
283 }
284};
285
286/* for the moment we limit ourselves to 16bit IO until some
287 * better IO routines can be written and tested
288*/
289
290struct dm9000_plat_data vr1000_dm9k_platdata = {
291 .flags = DM9000_PLATF_16BITONLY,
292};
293
294static struct platform_device vr1000_dm9k0 = {
295 .name = "dm9000",
296 .id = 0,
297 .num_resources = ARRAY_SIZE(vr1000_dm9k0_resource),
298 .resource = vr1000_dm9k0_resource,
299 .dev = {
300 .platform_data = &vr1000_dm9k_platdata,
301 }
302};
303
304static struct platform_device vr1000_dm9k1 = {
305 .name = "dm9000",
306 .id = 1,
307 .num_resources = ARRAY_SIZE(vr1000_dm9k1_resource),
308 .resource = vr1000_dm9k1_resource,
309 .dev = {
310 .platform_data = &vr1000_dm9k_platdata,
311 }
312};
313
314/* devices for this board */
249 315
250static struct platform_device *vr1000_devices[] __initdata = { 316static struct platform_device *vr1000_devices[] __initdata = {
251 &s3c_device_usb, 317 &s3c_device_usb,
@@ -253,8 +319,11 @@ static struct platform_device *vr1000_devices[] __initdata = {
253 &s3c_device_wdt, 319 &s3c_device_wdt,
254 &s3c_device_i2c, 320 &s3c_device_i2c,
255 &s3c_device_iis, 321 &s3c_device_iis,
322 &s3c_device_adc,
256 &serial_device, 323 &serial_device,
257 &vr1000_nor, 324 &vr1000_nor,
325 &vr1000_dm9k0,
326 &vr1000_dm9k1
258}; 327};
259 328
260static struct clk *vr1000_clocks[] = { 329static struct clk *vr1000_clocks[] = {
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 0aa73d414783..e3d8510f4340 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -132,8 +132,8 @@ ENTRY(cpu_v6_switch_mm)
132 * 100x 1 0 1 r/o no acc 132 * 100x 1 0 1 r/o no acc
133 * 10x0 1 0 1 r/o no acc 133 * 10x0 1 0 1 r/o no acc
134 * 1011 0 0 1 r/w no acc 134 * 1011 0 0 1 r/w no acc
135 * 110x 1 1 0 r/o r/o 135 * 110x 0 1 0 r/w r/o
136 * 11x0 1 1 0 r/o r/o 136 * 11x0 0 1 0 r/w r/o
137 * 1111 0 1 1 r/w r/w 137 * 1111 0 1 1 r/w r/w
138 */ 138 */
139ENTRY(cpu_v6_set_pte) 139ENTRY(cpu_v6_set_pte)
@@ -150,7 +150,7 @@ ENTRY(cpu_v6_set_pte)
150 tst r1, #L_PTE_USER 150 tst r1, #L_PTE_USER
151 orrne r2, r2, #AP1 | nG 151 orrne r2, r2, #AP1 | nG
152 tstne r2, #APX 152 tstne r2, #APX
153 eorne r2, r2, #AP0 153 bicne r2, r2, #APX | AP0
154 154
155 tst r1, #L_PTE_YOUNG 155 tst r1, #L_PTE_YOUNG
156 biceq r2, r2, #APX | AP1 | AP0 156 biceq r2, r2, #APX | AP1 | AP0
diff --git a/arch/arm/nwfpe/softfloat-macros b/arch/arm/nwfpe/softfloat-macros
index 5469989f2c5e..5a060f95a58f 100644
--- a/arch/arm/nwfpe/softfloat-macros
+++ b/arch/arm/nwfpe/softfloat-macros
@@ -563,8 +563,14 @@ static bits64 estimateDiv128To64( bits64 a0, bits64 a1, bits64 b )
563 bits64 rem0, rem1, term0, term1; 563 bits64 rem0, rem1, term0, term1;
564 bits64 z; 564 bits64 z;
565 if ( b <= a0 ) return LIT64( 0xFFFFFFFFFFFFFFFF ); 565 if ( b <= a0 ) return LIT64( 0xFFFFFFFFFFFFFFFF );
566 b0 = b>>32; 566 b0 = b>>32; /* hence b0 is 32 bits wide now */
567 z = ( b0<<32 <= a0 ) ? LIT64( 0xFFFFFFFF00000000 ) : ( a0 / b0 )<<32; 567 if ( b0<<32 <= a0 ) {
568 z = LIT64( 0xFFFFFFFF00000000 );
569 } else {
570 z = a0;
571 do_div( z, b0 );
572 z <<= 32;
573 }
568 mul64To128( b, z, &term0, &term1 ); 574 mul64To128( b, z, &term0, &term1 );
569 sub128( a0, a1, term0, term1, &rem0, &rem1 ); 575 sub128( a0, a1, term0, term1, &rem0, &rem1 );
570 while ( ( (sbits64) rem0 ) < 0 ) { 576 while ( ( (sbits64) rem0 ) < 0 ) {
@@ -573,7 +579,12 @@ static bits64 estimateDiv128To64( bits64 a0, bits64 a1, bits64 b )
573 add128( rem0, rem1, b0, b1, &rem0, &rem1 ); 579 add128( rem0, rem1, b0, b1, &rem0, &rem1 );
574 } 580 }
575 rem0 = ( rem0<<32 ) | ( rem1>>32 ); 581 rem0 = ( rem0<<32 ) | ( rem1>>32 );
576 z |= ( b0<<32 <= rem0 ) ? 0xFFFFFFFF : rem0 / b0; 582 if ( b0<<32 <= rem0 ) {
583 z |= 0xFFFFFFFF;
584 } else {
585 do_div( rem0, b0 );
586 z |= rem0;
587 }
577 return z; 588 return z;
578 589
579} 590}
@@ -601,6 +612,7 @@ static bits32 estimateSqrt32( int16 aExp, bits32 a )
601 }; 612 };
602 int8 index; 613 int8 index;
603 bits32 z; 614 bits32 z;
615 bits64 A;
604 616
605 index = ( a>>27 ) & 15; 617 index = ( a>>27 ) & 15;
606 if ( aExp & 1 ) { 618 if ( aExp & 1 ) {
@@ -614,7 +626,9 @@ static bits32 estimateSqrt32( int16 aExp, bits32 a )
614 z = ( 0x20000 <= z ) ? 0xFFFF8000 : ( z<<15 ); 626 z = ( 0x20000 <= z ) ? 0xFFFF8000 : ( z<<15 );
615 if ( z <= a ) return (bits32) ( ( (sbits32) a )>>1 ); 627 if ( z <= a ) return (bits32) ( ( (sbits32) a )>>1 );
616 } 628 }
617 return ( (bits32) ( ( ( (bits64) a )<<31 ) / z ) ) + ( z>>1 ); 629 A = ( (bits64) a )<<31;
630 do_div( A, z );
631 return ( (bits32) A ) + ( z>>1 );
618 632
619} 633}
620 634
diff --git a/arch/arm/nwfpe/softfloat.c b/arch/arm/nwfpe/softfloat.c
index 9d743ae29062..e038dd3be9b3 100644
--- a/arch/arm/nwfpe/softfloat.c
+++ b/arch/arm/nwfpe/softfloat.c
@@ -28,6 +28,8 @@ this code that are retained.
28=============================================================================== 28===============================================================================
29*/ 29*/
30 30
31#include <asm/div64.h>
32
31#include "fpa11.h" 33#include "fpa11.h"
32//#include "milieu.h" 34//#include "milieu.h"
33//#include "softfloat.h" 35//#include "softfloat.h"
@@ -1331,7 +1333,11 @@ float32 float32_div( float32 a, float32 b )
1331 aSig >>= 1; 1333 aSig >>= 1;
1332 ++zExp; 1334 ++zExp;
1333 } 1335 }
1334 zSig = ( ( (bits64) aSig )<<32 ) / bSig; 1336 {
1337 bits64 tmp = ( (bits64) aSig )<<32;
1338 do_div( tmp, bSig );
1339 zSig = tmp;
1340 }
1335 if ( ( zSig & 0x3F ) == 0 ) { 1341 if ( ( zSig & 0x3F ) == 0 ) {
1336 zSig |= ( ( (bits64) bSig ) * zSig != ( (bits64) aSig )<<32 ); 1342 zSig |= ( ( (bits64) bSig ) * zSig != ( (bits64) aSig )<<32 );
1337 } 1343 }
@@ -1397,7 +1403,9 @@ float32 float32_rem( float32 a, float32 b )
1397 q = ( bSig <= aSig ); 1403 q = ( bSig <= aSig );
1398 if ( q ) aSig -= bSig; 1404 if ( q ) aSig -= bSig;
1399 if ( 0 < expDiff ) { 1405 if ( 0 < expDiff ) {
1400 q = ( ( (bits64) aSig )<<32 ) / bSig; 1406 bits64 tmp = ( (bits64) aSig )<<32;
1407 do_div( tmp, bSig );
1408 q = tmp;
1401 q >>= 32 - expDiff; 1409 q >>= 32 - expDiff;
1402 bSig >>= 2; 1410 bSig >>= 2;
1403 aSig = ( ( aSig>>1 )<<( expDiff - 1 ) ) - bSig * q; 1411 aSig = ( ( aSig>>1 )<<( expDiff - 1 ) ) - bSig * q;
diff --git a/arch/arm26/Kconfig b/arch/arm26/Kconfig
index 6caed90661fc..dc0c1936969b 100644
--- a/arch/arm26/Kconfig
+++ b/arch/arm26/Kconfig
@@ -179,6 +179,8 @@ config CMDLINE
179 time by entering them here. As a minimum, you should specify the 179 time by entering them here. As a minimum, you should specify the
180 memory size and the root device (e.g., mem=64M root=/dev/nfs). 180 memory size and the root device (e.g., mem=64M root=/dev/nfs).
181 181
182source "mm/Kconfig"
183
182endmenu 184endmenu
183 185
184source "drivers/base/Kconfig" 186source "drivers/base/Kconfig"
diff --git a/arch/arm26/boot/install.sh b/arch/arm26/boot/install.sh
index c628328dd9ec..8a8399b26cf7 100644
--- a/arch/arm26/boot/install.sh
+++ b/arch/arm26/boot/install.sh
@@ -23,8 +23,8 @@
23 23
24# User may have a custom install script 24# User may have a custom install script
25 25
26if [ -x /sbin/installkernel ]; then 26if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then
27 exec /sbin/installkernel "$@" 27 exec /sbin/${CROSS_COMPILE}installkernel "$@"
28fi 28fi
29 29
30if [ "$2" = "zImage" ]; then 30if [ "$2" = "zImage" ]; then
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 4332ca348d51..f848e3761491 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -74,6 +74,8 @@ config PREEMPT
74 Say Y here if you are building a kernel for a desktop, embedded 74 Say Y here if you are building a kernel for a desktop, embedded
75 or real-time system. Say N if you are unsure. 75 or real-time system. Say N if you are unsure.
76 76
77source mm/Kconfig
78
77endmenu 79endmenu
78 80
79menu "Hardware setup" 81menu "Hardware setup"
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index 2b19372767eb..c93f95146cc2 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -74,6 +74,8 @@ config HIGHPTE
74 with a lot of RAM, this can be wasteful of precious low memory. 74 with a lot of RAM, this can be wasteful of precious low memory.
75 Setting this option will put user-space page tables in high memory. 75 Setting this option will put user-space page tables in high memory.
76 76
77source "mm/Kconfig"
78
77choice 79choice
78 prompt "uClinux kernel load address" 80 prompt "uClinux kernel load address"
79 depends on !MMU 81 depends on !MMU
diff --git a/arch/h8300/Kconfig.cpu b/arch/h8300/Kconfig.cpu
index d9dd62a565a9..a380167a13cf 100644
--- a/arch/h8300/Kconfig.cpu
+++ b/arch/h8300/Kconfig.cpu
@@ -180,4 +180,7 @@ config CPU_H8S
180config PREEMPT 180config PREEMPT
181 bool "Preemptible Kernel" 181 bool "Preemptible Kernel"
182 default n 182 default n
183
184source "mm/Kconfig"
185
183endmenu 186endmenu
diff --git a/arch/h8300/platform/h8300h/ptrace_h8300h.c b/arch/h8300/platform/h8300h/ptrace_h8300h.c
index 18e51a7167d3..6ac93c05a1ae 100644
--- a/arch/h8300/platform/h8300h/ptrace_h8300h.c
+++ b/arch/h8300/platform/h8300h/ptrace_h8300h.c
@@ -245,12 +245,12 @@ static unsigned short *getnextpc(struct task_struct *child, unsigned short *pc)
245 addr = h8300_get_reg(child, regno-1+PT_ER1); 245 addr = h8300_get_reg(child, regno-1+PT_ER1);
246 return (unsigned short *)addr; 246 return (unsigned short *)addr;
247 case relb: 247 case relb:
248 if ((inst = 0x55) || isbranch(child,inst & 0x0f)) 248 if (inst == 0x55 || isbranch(child,inst & 0x0f))
249 pc = (unsigned short *)((unsigned long)pc + 249 pc = (unsigned short *)((unsigned long)pc +
250 ((signed char)(*fetch_p))); 250 ((signed char)(*fetch_p)));
251 return pc+1; /* skip myself */ 251 return pc+1; /* skip myself */
252 case relw: 252 case relw:
253 if ((inst = 0x5c) || isbranch(child,(*fetch_p & 0xf0) >> 4)) 253 if (inst == 0x5c || isbranch(child,(*fetch_p & 0xf0) >> 4))
254 pc = (unsigned short *)((unsigned long)pc + 254 pc = (unsigned short *)((unsigned long)pc +
255 ((signed short)(*(pc+1)))); 255 ((signed short)(*(pc+1))));
256 return pc+2; /* skip myself */ 256 return pc+2; /* skip myself */
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index dfd904f6883b..d4ae5f9ceae6 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -68,7 +68,6 @@ config X86_VOYAGER
68 68
69config X86_NUMAQ 69config X86_NUMAQ
70 bool "NUMAQ (IBM/Sequent)" 70 bool "NUMAQ (IBM/Sequent)"
71 select DISCONTIGMEM
72 select NUMA 71 select NUMA
73 help 72 help
74 This option is used for getting Linux to run on a (IBM/Sequent) NUMA 73 This option is used for getting Linux to run on a (IBM/Sequent) NUMA
@@ -783,25 +782,48 @@ comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support"
783comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI" 782comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
784 depends on X86_SUMMIT && (!HIGHMEM64G || !ACPI) 783 depends on X86_SUMMIT && (!HIGHMEM64G || !ACPI)
785 784
786config DISCONTIGMEM
787 bool
788 depends on NUMA
789 default y
790
791config HAVE_ARCH_BOOTMEM_NODE 785config HAVE_ARCH_BOOTMEM_NODE
792 bool 786 bool
793 depends on NUMA 787 depends on NUMA
794 default y 788 default y
795 789
796config HAVE_MEMORY_PRESENT 790config ARCH_HAVE_MEMORY_PRESENT
797 bool 791 bool
798 depends on DISCONTIGMEM 792 depends on DISCONTIGMEM
799 default y 793 default y
800 794
801config NEED_NODE_MEMMAP_SIZE 795config NEED_NODE_MEMMAP_SIZE
802 bool 796 bool
803 depends on DISCONTIGMEM 797 depends on DISCONTIGMEM || SPARSEMEM
798 default y
799
800config HAVE_ARCH_ALLOC_REMAP
801 bool
802 depends on NUMA
803 default y
804
805config ARCH_DISCONTIGMEM_ENABLE
806 def_bool y
807 depends on NUMA
808
809config ARCH_DISCONTIGMEM_DEFAULT
810 def_bool y
811 depends on NUMA
812
813config ARCH_SPARSEMEM_ENABLE
814 def_bool y
815 depends on NUMA
816
817config ARCH_SELECT_MEMORY_MODEL
818 def_bool y
819 depends on ARCH_SPARSEMEM_ENABLE
820
821source "mm/Kconfig"
822
823config HAVE_ARCH_EARLY_PFN_TO_NID
824 bool
804 default y 825 default y
826 depends on NUMA
805 827
806config HIGHPTE 828config HIGHPTE
807 bool "Allocate 3rd-level pagetables from highmem" 829 bool "Allocate 3rd-level pagetables from highmem"
@@ -939,6 +961,8 @@ config SECCOMP
939 961
940 If unsure, say Y. Only embedded should say N here. 962 If unsure, say Y. Only embedded should say N here.
941 963
964source kernel/Kconfig.hz
965
942endmenu 966endmenu
943 967
944 968
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index 1c36ca332a96..bf7c9ba709f3 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -17,6 +17,13 @@
17# 20050320 Kianusch Sayah Karadji <kianusch@sk-tech.net> 17# 20050320 Kianusch Sayah Karadji <kianusch@sk-tech.net>
18# Added support for GEODE CPU 18# Added support for GEODE CPU
19 19
20HAS_BIARCH := $(call cc-option-yn, -m32)
21ifeq ($(HAS_BIARCH),y)
22AS := $(AS) --32
23LD := $(LD) -m elf_i386
24CC := $(CC) -m32
25endif
26
20LDFLAGS := -m elf_i386 27LDFLAGS := -m elf_i386
21OBJCOPYFLAGS := -O binary -R .note -R .comment -S 28OBJCOPYFLAGS := -O binary -R .note -R .comment -S
22LDFLAGS_vmlinux := 29LDFLAGS_vmlinux :=
diff --git a/arch/i386/boot/install.sh b/arch/i386/boot/install.sh
index 90f2452b3b9e..f17b40dfc0f4 100644
--- a/arch/i386/boot/install.sh
+++ b/arch/i386/boot/install.sh
@@ -21,8 +21,8 @@
21 21
22# User may have a custom install script 22# User may have a custom install script
23 23
24if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi 24if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi
25if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi 25if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then exec /sbin/${CROSS_COMPILE}installkernel "$@"; fi
26 26
27# Default install - same as make zlilo 27# Default install - same as make zlilo
28 28
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index d509836b70c3..8d993fa71754 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -1133,7 +1133,7 @@ inline void smp_local_timer_interrupt(struct pt_regs * regs)
1133 } 1133 }
1134 1134
1135#ifdef CONFIG_SMP 1135#ifdef CONFIG_SMP
1136 update_process_times(user_mode(regs)); 1136 update_process_times(user_mode_vm(regs));
1137#endif 1137#endif
1138 } 1138 }
1139 1139
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index d199e525680a..b9954248d0aa 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -635,7 +635,7 @@ void __init cpu_init (void)
635 635
636 /* Clear all 6 debug registers: */ 636 /* Clear all 6 debug registers: */
637 637
638#define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) ); 638#define CD(register) set_debugreg(0, register)
639 639
640 CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7); 640 CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
641 641
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c
index e1c2042b9b7e..d66b09e0c820 100644
--- a/arch/i386/kernel/cpu/mtrr/main.c
+++ b/arch/i386/kernel/cpu/mtrr/main.c
@@ -375,6 +375,19 @@ int mtrr_add_page(unsigned long base, unsigned long size,
375 return error; 375 return error;
376} 376}
377 377
378static int mtrr_check(unsigned long base, unsigned long size)
379{
380 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
381 printk(KERN_WARNING
382 "mtrr: size and base must be multiples of 4 kiB\n");
383 printk(KERN_DEBUG
384 "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
385 dump_stack();
386 return -1;
387 }
388 return 0;
389}
390
378/** 391/**
379 * mtrr_add - Add a memory type region 392 * mtrr_add - Add a memory type region
380 * @base: Physical base address of region 393 * @base: Physical base address of region
@@ -415,11 +428,8 @@ int
415mtrr_add(unsigned long base, unsigned long size, unsigned int type, 428mtrr_add(unsigned long base, unsigned long size, unsigned int type,
416 char increment) 429 char increment)
417{ 430{
418 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { 431 if (mtrr_check(base, size))
419 printk(KERN_WARNING "mtrr: size and base must be multiples of 4 kiB\n");
420 printk(KERN_DEBUG "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
421 return -EINVAL; 432 return -EINVAL;
422 }
423 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, 433 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
424 increment); 434 increment);
425} 435}
@@ -511,11 +521,8 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
511int 521int
512mtrr_del(int reg, unsigned long base, unsigned long size) 522mtrr_del(int reg, unsigned long base, unsigned long size)
513{ 523{
514 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { 524 if (mtrr_check(base, size))
515 printk(KERN_INFO "mtrr: size and base must be multiples of 4 kiB\n");
516 printk(KERN_DEBUG "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
517 return -EINVAL; 525 return -EINVAL;
518 }
519 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT); 526 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
520} 527}
521 528
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index 7323c19f354e..8bd77d948a84 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -86,7 +86,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
86 seq_printf(m, "stepping\t: unknown\n"); 86 seq_printf(m, "stepping\t: unknown\n");
87 87
88 if ( cpu_has(c, X86_FEATURE_TSC) ) { 88 if ( cpu_has(c, X86_FEATURE_TSC) ) {
89 seq_printf(m, "cpu MHz\t\t: %lu.%03lu\n", 89 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
90 cpu_khz / 1000, (cpu_khz % 1000)); 90 cpu_khz / 1000, (cpu_khz % 1000));
91 } 91 }
92 92
diff --git a/arch/i386/kernel/i386_ksyms.c b/arch/i386/kernel/i386_ksyms.c
index 903190a4b3ff..180f070d03cb 100644
--- a/arch/i386/kernel/i386_ksyms.c
+++ b/arch/i386/kernel/i386_ksyms.c
@@ -1,97 +1,17 @@
1#include <linux/config.h> 1#include <linux/config.h>
2#include <linux/module.h> 2#include <linux/module.h>
3#include <linux/smp.h>
4#include <linux/user.h>
5#include <linux/elfcore.h>
6#include <linux/mca.h>
7#include <linux/sched.h>
8#include <linux/in6.h>
9#include <linux/interrupt.h>
10#include <linux/smp_lock.h>
11#include <linux/pm.h>
12#include <linux/pci.h>
13#include <linux/apm_bios.h>
14#include <linux/kernel.h>
15#include <linux/string.h>
16#include <linux/tty.h>
17#include <linux/highmem.h>
18#include <linux/time.h>
19
20#include <asm/semaphore.h>
21#include <asm/processor.h>
22#include <asm/i387.h>
23#include <asm/uaccess.h>
24#include <asm/checksum.h> 3#include <asm/checksum.h>
25#include <asm/io.h>
26#include <asm/delay.h>
27#include <asm/irq.h>
28#include <asm/mmx.h>
29#include <asm/desc.h> 4#include <asm/desc.h>
30#include <asm/pgtable.h>
31#include <asm/tlbflush.h>
32#include <asm/nmi.h>
33#include <asm/ist.h>
34#include <asm/kdebug.h>
35
36extern void dump_thread(struct pt_regs *, struct user *);
37extern spinlock_t rtc_lock;
38 5
39/* This is definitely a GPL-only symbol */ 6/* This is definitely a GPL-only symbol */
40EXPORT_SYMBOL_GPL(cpu_gdt_table); 7EXPORT_SYMBOL_GPL(cpu_gdt_table);
41 8
42#if defined(CONFIG_APM_MODULE)
43extern void machine_real_restart(unsigned char *, int);
44EXPORT_SYMBOL(machine_real_restart);
45extern void default_idle(void);
46EXPORT_SYMBOL(default_idle);
47#endif
48
49#ifdef CONFIG_SMP
50extern void FASTCALL( __write_lock_failed(rwlock_t *rw));
51extern void FASTCALL( __read_lock_failed(rwlock_t *rw));
52#endif
53
54#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
55extern struct drive_info_struct drive_info;
56EXPORT_SYMBOL(drive_info);
57#endif
58
59extern unsigned long cpu_khz;
60extern unsigned long get_cmos_time(void);
61
62/* platform dependent support */
63EXPORT_SYMBOL(boot_cpu_data);
64#ifdef CONFIG_DISCONTIGMEM
65EXPORT_SYMBOL(node_data);
66EXPORT_SYMBOL(physnode_map);
67#endif
68#ifdef CONFIG_X86_NUMAQ
69EXPORT_SYMBOL(xquad_portio);
70#endif
71EXPORT_SYMBOL(dump_thread);
72EXPORT_SYMBOL(dump_fpu);
73EXPORT_SYMBOL_GPL(kernel_fpu_begin);
74EXPORT_SYMBOL(__ioremap);
75EXPORT_SYMBOL(ioremap_nocache);
76EXPORT_SYMBOL(iounmap);
77EXPORT_SYMBOL(kernel_thread);
78EXPORT_SYMBOL(pm_idle);
79EXPORT_SYMBOL(pm_power_off);
80EXPORT_SYMBOL(get_cmos_time);
81EXPORT_SYMBOL(cpu_khz);
82EXPORT_SYMBOL(apm_info);
83
84EXPORT_SYMBOL(__down_failed); 9EXPORT_SYMBOL(__down_failed);
85EXPORT_SYMBOL(__down_failed_interruptible); 10EXPORT_SYMBOL(__down_failed_interruptible);
86EXPORT_SYMBOL(__down_failed_trylock); 11EXPORT_SYMBOL(__down_failed_trylock);
87EXPORT_SYMBOL(__up_wakeup); 12EXPORT_SYMBOL(__up_wakeup);
88/* Networking helper routines. */ 13/* Networking helper routines. */
89EXPORT_SYMBOL(csum_partial_copy_generic); 14EXPORT_SYMBOL(csum_partial_copy_generic);
90/* Delay loops */
91EXPORT_SYMBOL(__ndelay);
92EXPORT_SYMBOL(__udelay);
93EXPORT_SYMBOL(__delay);
94EXPORT_SYMBOL(__const_udelay);
95 15
96EXPORT_SYMBOL(__get_user_1); 16EXPORT_SYMBOL(__get_user_1);
97EXPORT_SYMBOL(__get_user_2); 17EXPORT_SYMBOL(__get_user_2);
@@ -105,87 +25,11 @@ EXPORT_SYMBOL(__put_user_8);
105EXPORT_SYMBOL(strpbrk); 25EXPORT_SYMBOL(strpbrk);
106EXPORT_SYMBOL(strstr); 26EXPORT_SYMBOL(strstr);
107 27
108EXPORT_SYMBOL(strncpy_from_user);
109EXPORT_SYMBOL(__strncpy_from_user);
110EXPORT_SYMBOL(clear_user);
111EXPORT_SYMBOL(__clear_user);
112EXPORT_SYMBOL(__copy_from_user_ll);
113EXPORT_SYMBOL(__copy_to_user_ll);
114EXPORT_SYMBOL(strnlen_user);
115
116EXPORT_SYMBOL(dma_alloc_coherent);
117EXPORT_SYMBOL(dma_free_coherent);
118
119#ifdef CONFIG_PCI
120EXPORT_SYMBOL(pci_mem_start);
121#endif
122
123#ifdef CONFIG_PCI_BIOS
124EXPORT_SYMBOL(pcibios_set_irq_routing);
125EXPORT_SYMBOL(pcibios_get_irq_routing_table);
126#endif
127
128#ifdef CONFIG_X86_USE_3DNOW
129EXPORT_SYMBOL(_mmx_memcpy);
130EXPORT_SYMBOL(mmx_clear_page);
131EXPORT_SYMBOL(mmx_copy_page);
132#endif
133
134#ifdef CONFIG_X86_HT
135EXPORT_SYMBOL(smp_num_siblings);
136EXPORT_SYMBOL(cpu_sibling_map);
137#endif
138
139#ifdef CONFIG_SMP 28#ifdef CONFIG_SMP
140EXPORT_SYMBOL(cpu_data); 29extern void FASTCALL( __write_lock_failed(rwlock_t *rw));
141EXPORT_SYMBOL(cpu_online_map); 30extern void FASTCALL( __read_lock_failed(rwlock_t *rw));
142EXPORT_SYMBOL(cpu_callout_map);
143EXPORT_SYMBOL(__write_lock_failed); 31EXPORT_SYMBOL(__write_lock_failed);
144EXPORT_SYMBOL(__read_lock_failed); 32EXPORT_SYMBOL(__read_lock_failed);
145
146/* Global SMP stuff */
147EXPORT_SYMBOL(smp_call_function);
148
149/* TLB flushing */
150EXPORT_SYMBOL(flush_tlb_page);
151#endif
152
153#ifdef CONFIG_X86_IO_APIC
154EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
155#endif
156
157#ifdef CONFIG_MCA
158EXPORT_SYMBOL(machine_id);
159#endif
160
161#ifdef CONFIG_VT
162EXPORT_SYMBOL(screen_info);
163#endif
164
165EXPORT_SYMBOL(get_wchan);
166
167EXPORT_SYMBOL(rtc_lock);
168
169EXPORT_SYMBOL_GPL(set_nmi_callback);
170EXPORT_SYMBOL_GPL(unset_nmi_callback);
171
172EXPORT_SYMBOL(register_die_notifier);
173#ifdef CONFIG_HAVE_DEC_LOCK
174EXPORT_SYMBOL(_atomic_dec_and_lock);
175#endif
176
177EXPORT_SYMBOL(__PAGE_KERNEL);
178
179#ifdef CONFIG_HIGHMEM
180EXPORT_SYMBOL(kmap);
181EXPORT_SYMBOL(kunmap);
182EXPORT_SYMBOL(kmap_atomic);
183EXPORT_SYMBOL(kunmap_atomic);
184EXPORT_SYMBOL(kmap_atomic_to_page);
185#endif
186
187#if defined(CONFIG_X86_SPEEDSTEP_SMI) || defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
188EXPORT_SYMBOL(ist_info);
189#endif 33#endif
190 34
191EXPORT_SYMBOL(csum_partial); 35EXPORT_SYMBOL(csum_partial);
diff --git a/arch/i386/kernel/i387.c b/arch/i386/kernel/i387.c
index c55e037f08f7..b817168d9c62 100644
--- a/arch/i386/kernel/i387.c
+++ b/arch/i386/kernel/i387.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/config.h> 11#include <linux/config.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/module.h>
13#include <asm/processor.h> 14#include <asm/processor.h>
14#include <asm/i387.h> 15#include <asm/i387.h>
15#include <asm/math_emu.h> 16#include <asm/math_emu.h>
@@ -79,6 +80,7 @@ void kernel_fpu_begin(void)
79 } 80 }
80 clts(); 81 clts();
81} 82}
83EXPORT_SYMBOL_GPL(kernel_fpu_begin);
82 84
83void restore_fpu( struct task_struct *tsk ) 85void restore_fpu( struct task_struct *tsk )
84{ 86{
@@ -526,6 +528,7 @@ int dump_fpu( struct pt_regs *regs, struct user_i387_struct *fpu )
526 528
527 return fpvalid; 529 return fpvalid;
528} 530}
531EXPORT_SYMBOL(dump_fpu);
529 532
530int dump_task_fpu(struct task_struct *tsk, struct user_i387_struct *fpu) 533int dump_task_fpu(struct task_struct *tsk, struct user_i387_struct *fpu)
531{ 534{
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 7a324e8b86f9..08540bc4ba3e 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -31,7 +31,7 @@
31#include <linux/mc146818rtc.h> 31#include <linux/mc146818rtc.h>
32#include <linux/compiler.h> 32#include <linux/compiler.h>
33#include <linux/acpi.h> 33#include <linux/acpi.h>
34 34#include <linux/module.h>
35#include <linux/sysdev.h> 35#include <linux/sysdev.h>
36#include <asm/io.h> 36#include <asm/io.h>
37#include <asm/smp.h> 37#include <asm/smp.h>
@@ -812,6 +812,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
812 } 812 }
813 return best_guess; 813 return best_guess;
814} 814}
815EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
815 816
816/* 817/*
817 * This function currently is only a helper for the i386 smp boot process where 818 * This function currently is only a helper for the i386 smp boot process where
@@ -1659,6 +1660,12 @@ static void __init setup_ioapic_ids_from_mpc(void)
1659 unsigned long flags; 1660 unsigned long flags;
1660 1661
1661 /* 1662 /*
1663 * Don't check I/O APIC IDs for xAPIC systems. They have
1664 * no meaning without the serial APIC bus.
1665 */
1666 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 < 15))
1667 return;
1668 /*
1662 * This is broken; anything with a real cpu count has to 1669 * This is broken; anything with a real cpu count has to
1663 * circumvent this idiocy regardless. 1670 * circumvent this idiocy regardless.
1664 */ 1671 */
@@ -1684,10 +1691,6 @@ static void __init setup_ioapic_ids_from_mpc(void)
1684 mp_ioapics[apic].mpc_apicid = reg_00.bits.ID; 1691 mp_ioapics[apic].mpc_apicid = reg_00.bits.ID;
1685 } 1692 }
1686 1693
1687 /* Don't check I/O APIC IDs for some xAPIC systems. They have
1688 * no meaning without the serial APIC bus. */
1689 if (NO_IOAPIC_CHECK)
1690 continue;
1691 /* 1694 /*
1692 * Sanity check, is the ID really free? Every APIC in a 1695 * Sanity check, is the ID really free? Every APIC in a
1693 * system must have a unique ID or we get lots of nice 1696 * system must have a unique ID or we get lots of nice
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index 59ff9b455069..3762f6b35ab2 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -23,6 +23,9 @@
23 * Rusty Russell). 23 * Rusty Russell).
24 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 24 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
25 * interface to access function arguments. 25 * interface to access function arguments.
26 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
27 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
28 * <prasanna@in.ibm.com> added function-return probes.
26 */ 29 */
27 30
28#include <linux/config.h> 31#include <linux/config.h>
@@ -30,15 +33,14 @@
30#include <linux/ptrace.h> 33#include <linux/ptrace.h>
31#include <linux/spinlock.h> 34#include <linux/spinlock.h>
32#include <linux/preempt.h> 35#include <linux/preempt.h>
36#include <asm/cacheflush.h>
33#include <asm/kdebug.h> 37#include <asm/kdebug.h>
34#include <asm/desc.h> 38#include <asm/desc.h>
35 39
36/* kprobe_status settings */
37#define KPROBE_HIT_ACTIVE 0x00000001
38#define KPROBE_HIT_SS 0x00000002
39
40static struct kprobe *current_kprobe; 40static struct kprobe *current_kprobe;
41static unsigned long kprobe_status, kprobe_old_eflags, kprobe_saved_eflags; 41static unsigned long kprobe_status, kprobe_old_eflags, kprobe_saved_eflags;
42static struct kprobe *kprobe_prev;
43static unsigned long kprobe_status_prev, kprobe_old_eflags_prev, kprobe_saved_eflags_prev;
42static struct pt_regs jprobe_saved_regs; 44static struct pt_regs jprobe_saved_regs;
43static long *jprobe_saved_esp; 45static long *jprobe_saved_esp;
44/* copy of the kernel stack at the probe fire time */ 46/* copy of the kernel stack at the probe fire time */
@@ -68,16 +70,50 @@ int arch_prepare_kprobe(struct kprobe *p)
68void arch_copy_kprobe(struct kprobe *p) 70void arch_copy_kprobe(struct kprobe *p)
69{ 71{
70 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 72 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
73 p->opcode = *p->addr;
71} 74}
72 75
73void arch_remove_kprobe(struct kprobe *p) 76void arch_arm_kprobe(struct kprobe *p)
74{ 77{
78 *p->addr = BREAKPOINT_INSTRUCTION;
79 flush_icache_range((unsigned long) p->addr,
80 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
75} 81}
76 82
77static inline void disarm_kprobe(struct kprobe *p, struct pt_regs *regs) 83void arch_disarm_kprobe(struct kprobe *p)
78{ 84{
79 *p->addr = p->opcode; 85 *p->addr = p->opcode;
80 regs->eip = (unsigned long)p->addr; 86 flush_icache_range((unsigned long) p->addr,
87 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
88}
89
90void arch_remove_kprobe(struct kprobe *p)
91{
92}
93
94static inline void save_previous_kprobe(void)
95{
96 kprobe_prev = current_kprobe;
97 kprobe_status_prev = kprobe_status;
98 kprobe_old_eflags_prev = kprobe_old_eflags;
99 kprobe_saved_eflags_prev = kprobe_saved_eflags;
100}
101
102static inline void restore_previous_kprobe(void)
103{
104 current_kprobe = kprobe_prev;
105 kprobe_status = kprobe_status_prev;
106 kprobe_old_eflags = kprobe_old_eflags_prev;
107 kprobe_saved_eflags = kprobe_saved_eflags_prev;
108}
109
110static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs)
111{
112 current_kprobe = p;
113 kprobe_saved_eflags = kprobe_old_eflags
114 = (regs->eflags & (TF_MASK | IF_MASK));
115 if (is_IF_modifier(p->opcode))
116 kprobe_saved_eflags &= ~IF_MASK;
81} 117}
82 118
83static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 119static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
@@ -91,6 +127,50 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
91 regs->eip = (unsigned long)&p->ainsn.insn; 127 regs->eip = (unsigned long)&p->ainsn.insn;
92} 128}
93 129
130struct task_struct *arch_get_kprobe_task(void *ptr)
131{
132 return ((struct thread_info *) (((unsigned long) ptr) &
133 (~(THREAD_SIZE -1))))->task;
134}
135
136void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
137{
138 unsigned long *sara = (unsigned long *)&regs->esp;
139 struct kretprobe_instance *ri;
140 static void *orig_ret_addr;
141
142 /*
143 * Save the return address when the return probe hits
144 * the first time, and use it to populate the (krprobe
145 * instance)->ret_addr for subsequent return probes at
146 * the same addrress since stack address would have
147 * the kretprobe_trampoline by then.
148 */
149 if (((void*) *sara) != kretprobe_trampoline)
150 orig_ret_addr = (void*) *sara;
151
152 if ((ri = get_free_rp_inst(rp)) != NULL) {
153 ri->rp = rp;
154 ri->stack_addr = sara;
155 ri->ret_addr = orig_ret_addr;
156 add_rp_inst(ri);
157 /* Replace the return addr with trampoline addr */
158 *sara = (unsigned long) &kretprobe_trampoline;
159 } else {
160 rp->nmissed++;
161 }
162}
163
164void arch_kprobe_flush_task(struct task_struct *tk)
165{
166 struct kretprobe_instance *ri;
167 while ((ri = get_rp_inst_tsk(tk)) != NULL) {
168 *((unsigned long *)(ri->stack_addr)) =
169 (unsigned long) ri->ret_addr;
170 recycle_rp_inst(ri);
171 }
172}
173
94/* 174/*
95 * Interrupts are disabled on entry as trap3 is an interrupt gate and they 175 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
96 * remain disabled thorough out this function. 176 * remain disabled thorough out this function.
@@ -127,8 +207,18 @@ static int kprobe_handler(struct pt_regs *regs)
127 unlock_kprobes(); 207 unlock_kprobes();
128 goto no_kprobe; 208 goto no_kprobe;
129 } 209 }
130 disarm_kprobe(p, regs); 210 /* We have reentered the kprobe_handler(), since
131 ret = 1; 211 * another probe was hit while within the handler.
212 * We here save the original kprobes variables and
213 * just single step on the instruction of the new probe
214 * without calling any user handlers.
215 */
216 save_previous_kprobe();
217 set_current_kprobe(p, regs);
218 p->nmissed++;
219 prepare_singlestep(p, regs);
220 kprobe_status = KPROBE_REENTER;
221 return 1;
132 } else { 222 } else {
133 p = current_kprobe; 223 p = current_kprobe;
134 if (p->break_handler && p->break_handler(p, regs)) { 224 if (p->break_handler && p->break_handler(p, regs)) {
@@ -163,11 +253,7 @@ static int kprobe_handler(struct pt_regs *regs)
163 } 253 }
164 254
165 kprobe_status = KPROBE_HIT_ACTIVE; 255 kprobe_status = KPROBE_HIT_ACTIVE;
166 current_kprobe = p; 256 set_current_kprobe(p, regs);
167 kprobe_saved_eflags = kprobe_old_eflags
168 = (regs->eflags & (TF_MASK | IF_MASK));
169 if (is_IF_modifier(p->opcode))
170 kprobe_saved_eflags &= ~IF_MASK;
171 257
172 if (p->pre_handler && p->pre_handler(p, regs)) 258 if (p->pre_handler && p->pre_handler(p, regs))
173 /* handler has already set things up, so skip ss setup */ 259 /* handler has already set things up, so skip ss setup */
@@ -184,6 +270,55 @@ no_kprobe:
184} 270}
185 271
186/* 272/*
273 * For function-return probes, init_kprobes() establishes a probepoint
274 * here. When a retprobed function returns, this probe is hit and
275 * trampoline_probe_handler() runs, calling the kretprobe's handler.
276 */
277 void kretprobe_trampoline_holder(void)
278 {
279 asm volatile ( ".global kretprobe_trampoline\n"
280 "kretprobe_trampoline: \n"
281 "nop\n");
282 }
283
284/*
285 * Called when we hit the probe point at kretprobe_trampoline
286 */
287int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
288{
289 struct task_struct *tsk;
290 struct kretprobe_instance *ri;
291 struct hlist_head *head;
292 struct hlist_node *node;
293 unsigned long *sara = ((unsigned long *) &regs->esp) - 1;
294
295 tsk = arch_get_kprobe_task(sara);
296 head = kretprobe_inst_table_head(tsk);
297
298 hlist_for_each_entry(ri, node, head, hlist) {
299 if (ri->stack_addr == sara && ri->rp) {
300 if (ri->rp->handler)
301 ri->rp->handler(ri, regs);
302 }
303 }
304 return 0;
305}
306
307void trampoline_post_handler(struct kprobe *p, struct pt_regs *regs,
308 unsigned long flags)
309{
310 struct kretprobe_instance *ri;
311 /* RA already popped */
312 unsigned long *sara = ((unsigned long *)&regs->esp) - 1;
313
314 while ((ri = get_rp_inst(sara))) {
315 regs->eip = (unsigned long)ri->ret_addr;
316 recycle_rp_inst(ri);
317 }
318 regs->eflags &= ~TF_MASK;
319}
320
321/*
187 * Called after single-stepping. p->addr is the address of the 322 * Called after single-stepping. p->addr is the address of the
188 * instruction whose first byte has been replaced by the "int 3" 323 * instruction whose first byte has been replaced by the "int 3"
189 * instruction. To avoid the SMP problems that can occur when we 324 * instruction. To avoid the SMP problems that can occur when we
@@ -263,13 +398,22 @@ static inline int post_kprobe_handler(struct pt_regs *regs)
263 if (!kprobe_running()) 398 if (!kprobe_running())
264 return 0; 399 return 0;
265 400
266 if (current_kprobe->post_handler) 401 if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) {
402 kprobe_status = KPROBE_HIT_SSDONE;
267 current_kprobe->post_handler(current_kprobe, regs, 0); 403 current_kprobe->post_handler(current_kprobe, regs, 0);
404 }
268 405
269 resume_execution(current_kprobe, regs); 406 if (current_kprobe->post_handler != trampoline_post_handler)
407 resume_execution(current_kprobe, regs);
270 regs->eflags |= kprobe_saved_eflags; 408 regs->eflags |= kprobe_saved_eflags;
271 409
410 /*Restore back the original saved kprobes variables and continue. */
411 if (kprobe_status == KPROBE_REENTER) {
412 restore_previous_kprobe();
413 goto out;
414 }
272 unlock_kprobes(); 415 unlock_kprobes();
416out:
273 preempt_enable_no_resched(); 417 preempt_enable_no_resched();
274 418
275 /* 419 /*
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index 1347ab4939e7..383a11600d2c 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -914,7 +914,10 @@ void __init mp_register_ioapic (
914 mp_ioapics[idx].mpc_apicaddr = address; 914 mp_ioapics[idx].mpc_apicaddr = address;
915 915
916 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); 916 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
917 mp_ioapics[idx].mpc_apicid = io_apic_get_unique_id(idx, id); 917 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 < 15))
918 mp_ioapics[idx].mpc_apicid = io_apic_get_unique_id(idx, id);
919 else
920 mp_ioapics[idx].mpc_apicid = id;
918 mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx); 921 mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
919 922
920 /* 923 /*
@@ -1055,11 +1058,20 @@ void __init mp_config_acpi_legacy_irqs (void)
1055 } 1058 }
1056} 1059}
1057 1060
1061#define MAX_GSI_NUM 4096
1062
1058int mp_register_gsi (u32 gsi, int edge_level, int active_high_low) 1063int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
1059{ 1064{
1060 int ioapic = -1; 1065 int ioapic = -1;
1061 int ioapic_pin = 0; 1066 int ioapic_pin = 0;
1062 int idx, bit = 0; 1067 int idx, bit = 0;
1068 static int pci_irq = 16;
1069 /*
1070 * Mapping between Global System Interrups, which
1071 * represent all possible interrupts, and IRQs
1072 * assigned to actual devices.
1073 */
1074 static int gsi_to_irq[MAX_GSI_NUM];
1063 1075
1064#ifdef CONFIG_ACPI_BUS 1076#ifdef CONFIG_ACPI_BUS
1065 /* Don't set up the ACPI SCI because it's already set up */ 1077 /* Don't set up the ACPI SCI because it's already set up */
@@ -1094,11 +1106,26 @@ int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
1094 if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) { 1106 if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
1095 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n", 1107 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
1096 mp_ioapic_routing[ioapic].apic_id, ioapic_pin); 1108 mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
1097 return gsi; 1109 return gsi_to_irq[gsi];
1098 } 1110 }
1099 1111
1100 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit); 1112 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
1101 1113
1114 if (edge_level) {
1115 /*
1116 * For PCI devices assign IRQs in order, avoiding gaps
1117 * due to unused I/O APIC pins.
1118 */
1119 int irq = gsi;
1120 if (gsi < MAX_GSI_NUM) {
1121 gsi = pci_irq++;
1122 gsi_to_irq[irq] = gsi;
1123 } else {
1124 printk(KERN_ERR "GSI %u is too high\n", gsi);
1125 return gsi;
1126 }
1127 }
1128
1102 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, 1129 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
1103 edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1, 1130 edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
1104 active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1); 1131 active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 2c0ee9c2d020..da6c46d667cb 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -28,8 +28,7 @@
28#include <linux/sysctl.h> 28#include <linux/sysctl.h>
29 29
30#include <asm/smp.h> 30#include <asm/smp.h>
31#include <asm/mtrr.h> 31#include <asm/div64.h>
32#include <asm/mpspec.h>
33#include <asm/nmi.h> 32#include <asm/nmi.h>
34 33
35#include "mach_traps.h" 34#include "mach_traps.h"
@@ -324,6 +323,16 @@ static void clear_msr_range(unsigned int base, unsigned int n)
324 wrmsr(base+i, 0, 0); 323 wrmsr(base+i, 0, 0);
325} 324}
326 325
326static inline void write_watchdog_counter(const char *descr)
327{
328 u64 count = (u64)cpu_khz * 1000;
329
330 do_div(count, nmi_hz);
331 if(descr)
332 Dprintk("setting %s to -0x%08Lx\n", descr, count);
333 wrmsrl(nmi_perfctr_msr, 0 - count);
334}
335
327static void setup_k7_watchdog(void) 336static void setup_k7_watchdog(void)
328{ 337{
329 unsigned int evntsel; 338 unsigned int evntsel;
@@ -339,8 +348,7 @@ static void setup_k7_watchdog(void)
339 | K7_NMI_EVENT; 348 | K7_NMI_EVENT;
340 349
341 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); 350 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
342 Dprintk("setting K7_PERFCTR0 to %08lx\n", -(cpu_khz/nmi_hz*1000)); 351 write_watchdog_counter("K7_PERFCTR0");
343 wrmsr(MSR_K7_PERFCTR0, -(cpu_khz/nmi_hz*1000), -1);
344 apic_write(APIC_LVTPC, APIC_DM_NMI); 352 apic_write(APIC_LVTPC, APIC_DM_NMI);
345 evntsel |= K7_EVNTSEL_ENABLE; 353 evntsel |= K7_EVNTSEL_ENABLE;
346 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); 354 wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
@@ -361,8 +369,7 @@ static void setup_p6_watchdog(void)
361 | P6_NMI_EVENT; 369 | P6_NMI_EVENT;
362 370
363 wrmsr(MSR_P6_EVNTSEL0, evntsel, 0); 371 wrmsr(MSR_P6_EVNTSEL0, evntsel, 0);
364 Dprintk("setting P6_PERFCTR0 to %08lx\n", -(cpu_khz/nmi_hz*1000)); 372 write_watchdog_counter("P6_PERFCTR0");
365 wrmsr(MSR_P6_PERFCTR0, -(cpu_khz/nmi_hz*1000), 0);
366 apic_write(APIC_LVTPC, APIC_DM_NMI); 373 apic_write(APIC_LVTPC, APIC_DM_NMI);
367 evntsel |= P6_EVNTSEL0_ENABLE; 374 evntsel |= P6_EVNTSEL0_ENABLE;
368 wrmsr(MSR_P6_EVNTSEL0, evntsel, 0); 375 wrmsr(MSR_P6_EVNTSEL0, evntsel, 0);
@@ -402,8 +409,7 @@ static int setup_p4_watchdog(void)
402 409
403 wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0); 410 wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0);
404 wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0); 411 wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0);
405 Dprintk("setting P4_IQ_COUNTER0 to 0x%08lx\n", -(cpu_khz/nmi_hz*1000)); 412 write_watchdog_counter("P4_IQ_COUNTER0");
406 wrmsr(MSR_P4_IQ_COUNTER0, -(cpu_khz/nmi_hz*1000), -1);
407 apic_write(APIC_LVTPC, APIC_DM_NMI); 413 apic_write(APIC_LVTPC, APIC_DM_NMI);
408 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); 414 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
409 return 1; 415 return 1;
@@ -518,7 +524,7 @@ void nmi_watchdog_tick (struct pt_regs * regs)
518 * other P6 variant */ 524 * other P6 variant */
519 apic_write(APIC_LVTPC, APIC_DM_NMI); 525 apic_write(APIC_LVTPC, APIC_DM_NMI);
520 } 526 }
521 wrmsr(nmi_perfctr_msr, -(cpu_khz/nmi_hz*1000), -1); 527 write_watchdog_counter(NULL);
522 } 528 }
523} 529}
524 530
diff --git a/arch/i386/kernel/pci-dma.c b/arch/i386/kernel/pci-dma.c
index 4de2e03c7b45..1e51427cc9eb 100644
--- a/arch/i386/kernel/pci-dma.c
+++ b/arch/i386/kernel/pci-dma.c
@@ -11,6 +11,7 @@
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/string.h> 12#include <linux/string.h>
13#include <linux/pci.h> 13#include <linux/pci.h>
14#include <linux/module.h>
14#include <asm/io.h> 15#include <asm/io.h>
15 16
16struct dma_coherent_mem { 17struct dma_coherent_mem {
@@ -54,6 +55,7 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
54 } 55 }
55 return ret; 56 return ret;
56} 57}
58EXPORT_SYMBOL(dma_alloc_coherent);
57 59
58void dma_free_coherent(struct device *dev, size_t size, 60void dma_free_coherent(struct device *dev, size_t size,
59 void *vaddr, dma_addr_t dma_handle) 61 void *vaddr, dma_addr_t dma_handle)
@@ -68,6 +70,7 @@ void dma_free_coherent(struct device *dev, size_t size,
68 } else 70 } else
69 free_pages((unsigned long)vaddr, order); 71 free_pages((unsigned long)vaddr, order);
70} 72}
73EXPORT_SYMBOL(dma_free_coherent);
71 74
72int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, 75int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
73 dma_addr_t device_addr, size_t size, int flags) 76 dma_addr_t device_addr, size_t size, int flags)
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 96e3ea6b17c7..aea2ce1145df 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -37,6 +37,7 @@
37#include <linux/kallsyms.h> 37#include <linux/kallsyms.h>
38#include <linux/ptrace.h> 38#include <linux/ptrace.h>
39#include <linux/random.h> 39#include <linux/random.h>
40#include <linux/kprobes.h>
40 41
41#include <asm/uaccess.h> 42#include <asm/uaccess.h>
42#include <asm/pgtable.h> 43#include <asm/pgtable.h>
@@ -73,6 +74,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
73 * Powermanagement idle function, if any.. 74 * Powermanagement idle function, if any..
74 */ 75 */
75void (*pm_idle)(void); 76void (*pm_idle)(void);
77EXPORT_SYMBOL(pm_idle);
76static DEFINE_PER_CPU(unsigned int, cpu_idle_state); 78static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
77 79
78void disable_hlt(void) 80void disable_hlt(void)
@@ -105,6 +107,9 @@ void default_idle(void)
105 cpu_relax(); 107 cpu_relax();
106 } 108 }
107} 109}
110#ifdef CONFIG_APM_MODULE
111EXPORT_SYMBOL(default_idle);
112#endif
108 113
109/* 114/*
110 * On SMP it's slightly faster (but much more power-consuming!) 115 * On SMP it's slightly faster (but much more power-consuming!)
@@ -262,7 +267,7 @@ void show_regs(struct pt_regs * regs)
262 printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id()); 267 printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
263 print_symbol("EIP is at %s\n", regs->eip); 268 print_symbol("EIP is at %s\n", regs->eip);
264 269
265 if (regs->xcs & 3) 270 if (user_mode(regs))
266 printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp); 271 printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
267 printk(" EFLAGS: %08lx %s (%s)\n", 272 printk(" EFLAGS: %08lx %s (%s)\n",
268 regs->eflags, print_tainted(), system_utsname.release); 273 regs->eflags, print_tainted(), system_utsname.release);
@@ -325,6 +330,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
325 /* Ok, create the new process.. */ 330 /* Ok, create the new process.. */
326 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL); 331 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
327} 332}
333EXPORT_SYMBOL(kernel_thread);
328 334
329/* 335/*
330 * Free current thread data structures etc.. 336 * Free current thread data structures etc..
@@ -334,6 +340,13 @@ void exit_thread(void)
334 struct task_struct *tsk = current; 340 struct task_struct *tsk = current;
335 struct thread_struct *t = &tsk->thread; 341 struct thread_struct *t = &tsk->thread;
336 342
343 /*
344 * Remove function-return probe instances associated with this task
345 * and put them back on the free list. Do not insert an exit probe for
346 * this function, it will be disabled by kprobe_flush_task if you do.
347 */
348 kprobe_flush_task(tsk);
349
337 /* The process may have allocated an io port bitmap... nuke it. */ 350 /* The process may have allocated an io port bitmap... nuke it. */
338 if (unlikely(NULL != t->io_bitmap_ptr)) { 351 if (unlikely(NULL != t->io_bitmap_ptr)) {
339 int cpu = get_cpu(); 352 int cpu = get_cpu();
@@ -357,6 +370,13 @@ void flush_thread(void)
357{ 370{
358 struct task_struct *tsk = current; 371 struct task_struct *tsk = current;
359 372
373 /*
374 * Remove function-return probe instances associated with this task
375 * and put them back on the free list. Do not insert an exit probe for
376 * this function, it will be disabled by kprobe_flush_task if you do.
377 */
378 kprobe_flush_task(tsk);
379
360 memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8); 380 memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
361 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); 381 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
362 /* 382 /*
@@ -508,6 +528,7 @@ void dump_thread(struct pt_regs * regs, struct user * dump)
508 528
509 dump->u_fpvalid = dump_fpu (regs, &dump->i387); 529 dump->u_fpvalid = dump_fpu (regs, &dump->i387);
510} 530}
531EXPORT_SYMBOL(dump_thread);
511 532
512/* 533/*
513 * Capture the user space registers if the task is not running (in user space) 534 * Capture the user space registers if the task is not running (in user space)
@@ -627,13 +648,13 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
627 * Now maybe reload the debug registers 648 * Now maybe reload the debug registers
628 */ 649 */
629 if (unlikely(next->debugreg[7])) { 650 if (unlikely(next->debugreg[7])) {
630 loaddebug(next, 0); 651 set_debugreg(current->thread.debugreg[0], 0);
631 loaddebug(next, 1); 652 set_debugreg(current->thread.debugreg[1], 1);
632 loaddebug(next, 2); 653 set_debugreg(current->thread.debugreg[2], 2);
633 loaddebug(next, 3); 654 set_debugreg(current->thread.debugreg[3], 3);
634 /* no 4 and 5 */ 655 /* no 4 and 5 */
635 loaddebug(next, 6); 656 set_debugreg(current->thread.debugreg[6], 6);
636 loaddebug(next, 7); 657 set_debugreg(current->thread.debugreg[7], 7);
637 } 658 }
638 659
639 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) 660 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr))
@@ -731,6 +752,7 @@ unsigned long get_wchan(struct task_struct *p)
731 } while (count++ < 16); 752 } while (count++ < 16);
732 return 0; 753 return 0;
733} 754}
755EXPORT_SYMBOL(get_wchan);
734 756
735/* 757/*
736 * sys_alloc_thread_area: get a yet unused TLS descriptor index. 758 * sys_alloc_thread_area: get a yet unused TLS descriptor index.
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index e34f651fa13c..0da59b42843c 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -668,7 +668,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
668 info.si_code = TRAP_BRKPT; 668 info.si_code = TRAP_BRKPT;
669 669
670 /* User-mode eip? */ 670 /* User-mode eip? */
671 info.si_addr = user_mode(regs) ? (void __user *) regs->eip : NULL; 671 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->eip : NULL;
672 672
673 /* Send us the fakey SIGTRAP */ 673 /* Send us the fakey SIGTRAP */
674 force_sig_info(SIGTRAP, &info, tsk); 674 force_sig_info(SIGTRAP, &info, tsk);
diff --git a/arch/i386/kernel/reboot.c b/arch/i386/kernel/reboot.c
index 6dc27eb70ee7..db912209a8d3 100644
--- a/arch/i386/kernel/reboot.c
+++ b/arch/i386/kernel/reboot.c
@@ -2,6 +2,7 @@
2 * linux/arch/i386/kernel/reboot.c 2 * linux/arch/i386/kernel/reboot.c
3 */ 3 */
4 4
5#include <linux/config.h>
5#include <linux/mm.h> 6#include <linux/mm.h>
6#include <linux/module.h> 7#include <linux/module.h>
7#include <linux/delay.h> 8#include <linux/delay.h>
@@ -19,6 +20,7 @@
19 * Power off function, if any 20 * Power off function, if any
20 */ 21 */
21void (*pm_power_off)(void); 22void (*pm_power_off)(void);
23EXPORT_SYMBOL(pm_power_off);
22 24
23static int reboot_mode; 25static int reboot_mode;
24static int reboot_thru_bios; 26static int reboot_thru_bios;
@@ -295,6 +297,9 @@ void machine_real_restart(unsigned char *code, int length)
295 : 297 :
296 : "i" ((void *) (0x1000 - sizeof (real_mode_switch) - 100))); 298 : "i" ((void *) (0x1000 - sizeof (real_mode_switch) - 100)));
297} 299}
300#ifdef CONFIG_APM_MODULE
301EXPORT_SYMBOL(machine_real_restart);
302#endif
298 303
299void machine_restart(char * __unused) 304void machine_restart(char * __unused)
300{ 305{
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 2bfbddebdbf8..30406fd0b64c 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -23,8 +23,10 @@
23 * This file handles the architecture-dependent parts of initialization 23 * This file handles the architecture-dependent parts of initialization
24 */ 24 */
25 25
26#include <linux/config.h>
26#include <linux/sched.h> 27#include <linux/sched.h>
27#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/mmzone.h>
28#include <linux/tty.h> 30#include <linux/tty.h>
29#include <linux/ioport.h> 31#include <linux/ioport.h>
30#include <linux/acpi.h> 32#include <linux/acpi.h>
@@ -73,6 +75,7 @@ EXPORT_SYMBOL(efi_enabled);
73struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; 75struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
74/* common cpu data for all cpus */ 76/* common cpu data for all cpus */
75struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; 77struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
78EXPORT_SYMBOL(boot_cpu_data);
76 79
77unsigned long mmu_cr4_features; 80unsigned long mmu_cr4_features;
78 81
@@ -90,12 +93,18 @@ extern acpi_interrupt_flags acpi_sci_flags;
90 93
91/* for MCA, but anyone else can use it if they want */ 94/* for MCA, but anyone else can use it if they want */
92unsigned int machine_id; 95unsigned int machine_id;
96#ifdef CONFIG_MCA
97EXPORT_SYMBOL(machine_id);
98#endif
93unsigned int machine_submodel_id; 99unsigned int machine_submodel_id;
94unsigned int BIOS_revision; 100unsigned int BIOS_revision;
95unsigned int mca_pentium_flag; 101unsigned int mca_pentium_flag;
96 102
97/* For PCI or other memory-mapped resources */ 103/* For PCI or other memory-mapped resources */
98unsigned long pci_mem_start = 0x10000000; 104unsigned long pci_mem_start = 0x10000000;
105#ifdef CONFIG_PCI
106EXPORT_SYMBOL(pci_mem_start);
107#endif
99 108
100/* Boot loader ID as an integer, for the benefit of proc_dointvec */ 109/* Boot loader ID as an integer, for the benefit of proc_dointvec */
101int bootloader_type; 110int bootloader_type;
@@ -107,14 +116,26 @@ static unsigned int highmem_pages = -1;
107 * Setup options 116 * Setup options
108 */ 117 */
109struct drive_info_struct { char dummy[32]; } drive_info; 118struct drive_info_struct { char dummy[32]; } drive_info;
119#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || \
120 defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
121EXPORT_SYMBOL(drive_info);
122#endif
110struct screen_info screen_info; 123struct screen_info screen_info;
124#ifdef CONFIG_VT
125EXPORT_SYMBOL(screen_info);
126#endif
111struct apm_info apm_info; 127struct apm_info apm_info;
128EXPORT_SYMBOL(apm_info);
112struct sys_desc_table_struct { 129struct sys_desc_table_struct {
113 unsigned short length; 130 unsigned short length;
114 unsigned char table[0]; 131 unsigned char table[0];
115}; 132};
116struct edid_info edid_info; 133struct edid_info edid_info;
117struct ist_info ist_info; 134struct ist_info ist_info;
135#if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
136 defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
137EXPORT_SYMBOL(ist_info);
138#endif
118struct e820map e820; 139struct e820map e820;
119 140
120extern void early_cpu_init(void); 141extern void early_cpu_init(void);
@@ -1022,7 +1043,7 @@ static void __init reserve_ebda_region(void)
1022 reserve_bootmem(addr, PAGE_SIZE); 1043 reserve_bootmem(addr, PAGE_SIZE);
1023} 1044}
1024 1045
1025#ifndef CONFIG_DISCONTIGMEM 1046#ifndef CONFIG_NEED_MULTIPLE_NODES
1026void __init setup_bootmem_allocator(void); 1047void __init setup_bootmem_allocator(void);
1027static unsigned long __init setup_memory(void) 1048static unsigned long __init setup_memory(void)
1028{ 1049{
@@ -1072,9 +1093,9 @@ void __init zone_sizes_init(void)
1072 free_area_init(zones_size); 1093 free_area_init(zones_size);
1073} 1094}
1074#else 1095#else
1075extern unsigned long setup_memory(void); 1096extern unsigned long __init setup_memory(void);
1076extern void zone_sizes_init(void); 1097extern void zone_sizes_init(void);
1077#endif /* !CONFIG_DISCONTIGMEM */ 1098#endif /* !CONFIG_NEED_MULTIPLE_NODES */
1078 1099
1079void __init setup_bootmem_allocator(void) 1100void __init setup_bootmem_allocator(void)
1080{ 1101{
@@ -1475,6 +1496,7 @@ void __init setup_arch(char **cmdline_p)
1475#endif 1496#endif
1476 paging_init(); 1497 paging_init();
1477 remapped_pgdat_init(); 1498 remapped_pgdat_init();
1499 sparse_init();
1478 zone_sizes_init(); 1500 zone_sizes_init();
1479 1501
1480 /* 1502 /*
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c
index ea46d028af08..b9b8f4e20fad 100644
--- a/arch/i386/kernel/signal.c
+++ b/arch/i386/kernel/signal.c
@@ -346,8 +346,8 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
346extern void __user __kernel_sigreturn; 346extern void __user __kernel_sigreturn;
347extern void __user __kernel_rt_sigreturn; 347extern void __user __kernel_rt_sigreturn;
348 348
349static void setup_frame(int sig, struct k_sigaction *ka, 349static int setup_frame(int sig, struct k_sigaction *ka,
350 sigset_t *set, struct pt_regs * regs) 350 sigset_t *set, struct pt_regs * regs)
351{ 351{
352 void __user *restorer; 352 void __user *restorer;
353 struct sigframe __user *frame; 353 struct sigframe __user *frame;
@@ -429,13 +429,14 @@ static void setup_frame(int sig, struct k_sigaction *ka,
429 current->comm, current->pid, frame, regs->eip, frame->pretcode); 429 current->comm, current->pid, frame, regs->eip, frame->pretcode);
430#endif 430#endif
431 431
432 return; 432 return 1;
433 433
434give_sigsegv: 434give_sigsegv:
435 force_sigsegv(sig, current); 435 force_sigsegv(sig, current);
436 return 0;
436} 437}
437 438
438static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 439static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
439 sigset_t *set, struct pt_regs * regs) 440 sigset_t *set, struct pt_regs * regs)
440{ 441{
441 void __user *restorer; 442 void __user *restorer;
@@ -522,20 +523,23 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
522 current->comm, current->pid, frame, regs->eip, frame->pretcode); 523 current->comm, current->pid, frame, regs->eip, frame->pretcode);
523#endif 524#endif
524 525
525 return; 526 return 1;
526 527
527give_sigsegv: 528give_sigsegv:
528 force_sigsegv(sig, current); 529 force_sigsegv(sig, current);
530 return 0;
529} 531}
530 532
531/* 533/*
532 * OK, we're invoking a handler 534 * OK, we're invoking a handler
533 */ 535 */
534 536
535static void 537static int
536handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, 538handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
537 sigset_t *oldset, struct pt_regs * regs) 539 sigset_t *oldset, struct pt_regs * regs)
538{ 540{
541 int ret;
542
539 /* Are we from a system call? */ 543 /* Are we from a system call? */
540 if (regs->orig_eax >= 0) { 544 if (regs->orig_eax >= 0) {
541 /* If so, check system call restarting.. */ 545 /* If so, check system call restarting.. */
@@ -569,17 +573,19 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
569 573
570 /* Set up the stack frame */ 574 /* Set up the stack frame */
571 if (ka->sa.sa_flags & SA_SIGINFO) 575 if (ka->sa.sa_flags & SA_SIGINFO)
572 setup_rt_frame(sig, ka, info, oldset, regs); 576 ret = setup_rt_frame(sig, ka, info, oldset, regs);
573 else 577 else
574 setup_frame(sig, ka, oldset, regs); 578 ret = setup_frame(sig, ka, oldset, regs);
575 579
576 if (!(ka->sa.sa_flags & SA_NODEFER)) { 580 if (ret && !(ka->sa.sa_flags & SA_NODEFER)) {
577 spin_lock_irq(&current->sighand->siglock); 581 spin_lock_irq(&current->sighand->siglock);
578 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 582 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
579 sigaddset(&current->blocked,sig); 583 sigaddset(&current->blocked,sig);
580 recalc_sigpending(); 584 recalc_sigpending();
581 spin_unlock_irq(&current->sighand->siglock); 585 spin_unlock_irq(&current->sighand->siglock);
582 } 586 }
587
588 return ret;
583} 589}
584 590
585/* 591/*
@@ -599,7 +605,7 @@ int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
599 * kernel mode. Just return without doing anything 605 * kernel mode. Just return without doing anything
600 * if so. 606 * if so.
601 */ 607 */
602 if ((regs->xcs & 3) != 3) 608 if (!user_mode(regs))
603 return 1; 609 return 1;
604 610
605 if (current->flags & PF_FREEZE) { 611 if (current->flags & PF_FREEZE) {
@@ -618,12 +624,11 @@ int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
618 * inside the kernel. 624 * inside the kernel.
619 */ 625 */
620 if (unlikely(current->thread.debugreg[7])) { 626 if (unlikely(current->thread.debugreg[7])) {
621 loaddebug(&current->thread, 7); 627 set_debugreg(current->thread.debugreg[7], 7);
622 } 628 }
623 629
624 /* Whee! Actually deliver the signal. */ 630 /* Whee! Actually deliver the signal. */
625 handle_signal(signr, &info, &ka, oldset, regs); 631 return handle_signal(signr, &info, &ka, oldset, regs);
626 return 1;
627 } 632 }
628 633
629 no_signal: 634 no_signal:
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index 6223c33ac91c..68be7d0c7238 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -19,6 +19,7 @@
19#include <linux/mc146818rtc.h> 19#include <linux/mc146818rtc.h>
20#include <linux/cache.h> 20#include <linux/cache.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/module.h>
22 23
23#include <asm/mtrr.h> 24#include <asm/mtrr.h>
24#include <asm/tlbflush.h> 25#include <asm/tlbflush.h>
@@ -452,6 +453,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
452 453
453 preempt_enable(); 454 preempt_enable();
454} 455}
456EXPORT_SYMBOL(flush_tlb_page);
455 457
456static void do_flush_tlb_all(void* info) 458static void do_flush_tlb_all(void* info)
457{ 459{
@@ -547,6 +549,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
547 549
548 return 0; 550 return 0;
549} 551}
552EXPORT_SYMBOL(smp_call_function);
550 553
551static void stop_this_cpu (void * dummy) 554static void stop_this_cpu (void * dummy)
552{ 555{
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index bc1bb6919e6a..c20d96d5c15c 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -60,6 +60,9 @@ static int __initdata smp_b_stepping;
60 60
61/* Number of siblings per CPU package */ 61/* Number of siblings per CPU package */
62int smp_num_siblings = 1; 62int smp_num_siblings = 1;
63#ifdef CONFIG_X86_HT
64EXPORT_SYMBOL(smp_num_siblings);
65#endif
63int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */ 66int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
64EXPORT_SYMBOL(phys_proc_id); 67EXPORT_SYMBOL(phys_proc_id);
65int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */ 68int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */
@@ -67,13 +70,16 @@ EXPORT_SYMBOL(cpu_core_id);
67 70
68/* bitmap of online cpus */ 71/* bitmap of online cpus */
69cpumask_t cpu_online_map; 72cpumask_t cpu_online_map;
73EXPORT_SYMBOL(cpu_online_map);
70 74
71cpumask_t cpu_callin_map; 75cpumask_t cpu_callin_map;
72cpumask_t cpu_callout_map; 76cpumask_t cpu_callout_map;
77EXPORT_SYMBOL(cpu_callout_map);
73static cpumask_t smp_commenced_mask; 78static cpumask_t smp_commenced_mask;
74 79
75/* Per CPU bogomips and other parameters */ 80/* Per CPU bogomips and other parameters */
76struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; 81struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
82EXPORT_SYMBOL(cpu_data);
77 83
78u8 x86_cpu_to_apicid[NR_CPUS] = 84u8 x86_cpu_to_apicid[NR_CPUS] =
79 { [0 ... NR_CPUS-1] = 0xff }; 85 { [0 ... NR_CPUS-1] = 0xff };
@@ -199,7 +205,7 @@ static void __init synchronize_tsc_bp (void)
199 unsigned long long t0; 205 unsigned long long t0;
200 unsigned long long sum, avg; 206 unsigned long long sum, avg;
201 long long delta; 207 long long delta;
202 unsigned long one_usec; 208 unsigned int one_usec;
203 int buggy = 0; 209 int buggy = 0;
204 210
205 printk(KERN_INFO "checking TSC synchronization across %u CPUs: ", num_booting_cpus()); 211 printk(KERN_INFO "checking TSC synchronization across %u CPUs: ", num_booting_cpus());
@@ -885,8 +891,14 @@ static void smp_tune_scheduling (void)
885static int boot_cpu_logical_apicid; 891static int boot_cpu_logical_apicid;
886/* Where the IO area was mapped on multiquad, always 0 otherwise */ 892/* Where the IO area was mapped on multiquad, always 0 otherwise */
887void *xquad_portio; 893void *xquad_portio;
894#ifdef CONFIG_X86_NUMAQ
895EXPORT_SYMBOL(xquad_portio);
896#endif
888 897
889cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned; 898cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
899#ifdef CONFIG_X86_HT
900EXPORT_SYMBOL(cpu_sibling_map);
901#endif
890cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; 902cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
891EXPORT_SYMBOL(cpu_core_map); 903EXPORT_SYMBOL(cpu_core_map);
892 904
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index a0dcb7c87c30..e68d9fdb0759 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -77,11 +77,13 @@ u64 jiffies_64 = INITIAL_JIFFIES;
77 77
78EXPORT_SYMBOL(jiffies_64); 78EXPORT_SYMBOL(jiffies_64);
79 79
80unsigned long cpu_khz; /* Detected as we calibrate the TSC */ 80unsigned int cpu_khz; /* Detected as we calibrate the TSC */
81EXPORT_SYMBOL(cpu_khz);
81 82
82extern unsigned long wall_jiffies; 83extern unsigned long wall_jiffies;
83 84
84DEFINE_SPINLOCK(rtc_lock); 85DEFINE_SPINLOCK(rtc_lock);
86EXPORT_SYMBOL(rtc_lock);
85 87
86DEFINE_SPINLOCK(i8253_lock); 88DEFINE_SPINLOCK(i8253_lock);
87EXPORT_SYMBOL(i8253_lock); 89EXPORT_SYMBOL(i8253_lock);
@@ -324,6 +326,8 @@ unsigned long get_cmos_time(void)
324 326
325 return retval; 327 return retval;
326} 328}
329EXPORT_SYMBOL(get_cmos_time);
330
327static void sync_cmos_clock(unsigned long dummy); 331static void sync_cmos_clock(unsigned long dummy);
328 332
329static struct timer_list sync_cmos_timer = 333static struct timer_list sync_cmos_timer =
diff --git a/arch/i386/kernel/timers/common.c b/arch/i386/kernel/timers/common.c
index 8e201219f525..37353bd31803 100644
--- a/arch/i386/kernel/timers/common.c
+++ b/arch/i386/kernel/timers/common.c
@@ -139,6 +139,15 @@ bad_calibration:
139} 139}
140#endif 140#endif
141 141
142
143unsigned long read_timer_tsc(void)
144{
145 unsigned long retval;
146 rdtscl(retval);
147 return retval;
148}
149
150
142/* calculate cpu_khz */ 151/* calculate cpu_khz */
143void init_cpu_khz(void) 152void init_cpu_khz(void)
144{ 153{
@@ -154,7 +163,8 @@ void init_cpu_khz(void)
154 :"=a" (cpu_khz), "=d" (edx) 163 :"=a" (cpu_khz), "=d" (edx)
155 :"r" (tsc_quotient), 164 :"r" (tsc_quotient),
156 "0" (eax), "1" (edx)); 165 "0" (eax), "1" (edx));
157 printk("Detected %lu.%03lu MHz processor.\n", cpu_khz / 1000, cpu_khz % 1000); 166 printk("Detected %u.%03u MHz processor.\n",
167 cpu_khz / 1000, cpu_khz % 1000);
158 } 168 }
159 } 169 }
160 } 170 }
diff --git a/arch/i386/kernel/timers/timer.c b/arch/i386/kernel/timers/timer.c
index a3d6a288088b..7e39ed8e33f8 100644
--- a/arch/i386/kernel/timers/timer.c
+++ b/arch/i386/kernel/timers/timer.c
@@ -64,3 +64,12 @@ struct timer_opts* __init select_timer(void)
64 panic("select_timer: Cannot find a suitable timer\n"); 64 panic("select_timer: Cannot find a suitable timer\n");
65 return NULL; 65 return NULL;
66} 66}
67
68int read_current_timer(unsigned long *timer_val)
69{
70 if (cur_timer->read_timer) {
71 *timer_val = cur_timer->read_timer();
72 return 0;
73 }
74 return -1;
75}
diff --git a/arch/i386/kernel/timers/timer_hpet.c b/arch/i386/kernel/timers/timer_hpet.c
index f778f471a09a..d766e0963ac1 100644
--- a/arch/i386/kernel/timers/timer_hpet.c
+++ b/arch/i386/kernel/timers/timer_hpet.c
@@ -158,7 +158,7 @@ static int __init init_hpet(char* override)
158 { unsigned long eax=0, edx=1000; 158 { unsigned long eax=0, edx=1000;
159 ASM_DIV64_REG(cpu_khz, edx, tsc_quotient, 159 ASM_DIV64_REG(cpu_khz, edx, tsc_quotient,
160 eax, edx); 160 eax, edx);
161 printk("Detected %lu.%03lu MHz processor.\n", 161 printk("Detected %u.%03u MHz processor.\n",
162 cpu_khz / 1000, cpu_khz % 1000); 162 cpu_khz / 1000, cpu_khz % 1000);
163 } 163 }
164 set_cyc2ns_scale(cpu_khz/1000); 164 set_cyc2ns_scale(cpu_khz/1000);
@@ -186,6 +186,7 @@ static struct timer_opts timer_hpet = {
186 .get_offset = get_offset_hpet, 186 .get_offset = get_offset_hpet,
187 .monotonic_clock = monotonic_clock_hpet, 187 .monotonic_clock = monotonic_clock_hpet,
188 .delay = delay_hpet, 188 .delay = delay_hpet,
189 .read_timer = read_timer_tsc,
189}; 190};
190 191
191struct init_timer_opts __initdata timer_hpet_init = { 192struct init_timer_opts __initdata timer_hpet_init = {
diff --git a/arch/i386/kernel/timers/timer_pm.c b/arch/i386/kernel/timers/timer_pm.c
index d77f22030fe6..4ef20e663498 100644
--- a/arch/i386/kernel/timers/timer_pm.c
+++ b/arch/i386/kernel/timers/timer_pm.c
@@ -246,6 +246,7 @@ static struct timer_opts timer_pmtmr = {
246 .get_offset = get_offset_pmtmr, 246 .get_offset = get_offset_pmtmr,
247 .monotonic_clock = monotonic_clock_pmtmr, 247 .monotonic_clock = monotonic_clock_pmtmr,
248 .delay = delay_pmtmr, 248 .delay = delay_pmtmr,
249 .read_timer = read_timer_tsc,
249}; 250};
250 251
251struct init_timer_opts __initdata timer_pmtmr_init = { 252struct init_timer_opts __initdata timer_pmtmr_init = {
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c
index 180444d87824..54c36b182021 100644
--- a/arch/i386/kernel/timers/timer_tsc.c
+++ b/arch/i386/kernel/timers/timer_tsc.c
@@ -256,7 +256,7 @@ static unsigned long loops_per_jiffy_ref = 0;
256 256
257#ifndef CONFIG_SMP 257#ifndef CONFIG_SMP
258static unsigned long fast_gettimeoffset_ref = 0; 258static unsigned long fast_gettimeoffset_ref = 0;
259static unsigned long cpu_khz_ref = 0; 259static unsigned int cpu_khz_ref = 0;
260#endif 260#endif
261 261
262static int 262static int
@@ -323,7 +323,7 @@ static inline void cpufreq_delayed_get(void) { return; }
323int recalibrate_cpu_khz(void) 323int recalibrate_cpu_khz(void)
324{ 324{
325#ifndef CONFIG_SMP 325#ifndef CONFIG_SMP
326 unsigned long cpu_khz_old = cpu_khz; 326 unsigned int cpu_khz_old = cpu_khz;
327 327
328 if (cpu_has_tsc) { 328 if (cpu_has_tsc) {
329 init_cpu_khz(); 329 init_cpu_khz();
@@ -534,7 +534,8 @@ static int __init init_tsc(char* override)
534 :"=a" (cpu_khz), "=d" (edx) 534 :"=a" (cpu_khz), "=d" (edx)
535 :"r" (tsc_quotient), 535 :"r" (tsc_quotient),
536 "0" (eax), "1" (edx)); 536 "0" (eax), "1" (edx));
537 printk("Detected %lu.%03lu MHz processor.\n", cpu_khz / 1000, cpu_khz % 1000); 537 printk("Detected %u.%03u MHz processor.\n",
538 cpu_khz / 1000, cpu_khz % 1000);
538 } 539 }
539 set_cyc2ns_scale(cpu_khz/1000); 540 set_cyc2ns_scale(cpu_khz/1000);
540 return 0; 541 return 0;
@@ -572,6 +573,7 @@ static struct timer_opts timer_tsc = {
572 .get_offset = get_offset_tsc, 573 .get_offset = get_offset_tsc,
573 .monotonic_clock = monotonic_clock_tsc, 574 .monotonic_clock = monotonic_clock_tsc,
574 .delay = delay_tsc, 575 .delay = delay_tsc,
576 .read_timer = read_timer_tsc,
575}; 577};
576 578
577struct init_timer_opts __initdata timer_tsc_init = { 579struct init_timer_opts __initdata timer_tsc_init = {
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 83c579e82a81..e4d4e2162c7a 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -104,6 +104,7 @@ int register_die_notifier(struct notifier_block *nb)
104 spin_unlock_irqrestore(&die_notifier_lock, flags); 104 spin_unlock_irqrestore(&die_notifier_lock, flags);
105 return err; 105 return err;
106} 106}
107EXPORT_SYMBOL(register_die_notifier);
107 108
108static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) 109static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
109{ 110{
@@ -209,7 +210,7 @@ void show_registers(struct pt_regs *regs)
209 210
210 esp = (unsigned long) (&regs->esp); 211 esp = (unsigned long) (&regs->esp);
211 ss = __KERNEL_DS; 212 ss = __KERNEL_DS;
212 if (regs->xcs & 3) { 213 if (user_mode(regs)) {
213 in_kernel = 0; 214 in_kernel = 0;
214 esp = regs->esp; 215 esp = regs->esp;
215 ss = regs->xss & 0xffff; 216 ss = regs->xss & 0xffff;
@@ -265,7 +266,7 @@ static void handle_BUG(struct pt_regs *regs)
265 char c; 266 char c;
266 unsigned long eip; 267 unsigned long eip;
267 268
268 if (regs->xcs & 3) 269 if (user_mode(regs))
269 goto no_bug; /* Not in kernel */ 270 goto no_bug; /* Not in kernel */
270 271
271 eip = regs->eip; 272 eip = regs->eip;
@@ -353,7 +354,7 @@ void die(const char * str, struct pt_regs * regs, long err)
353 354
354static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err) 355static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
355{ 356{
356 if (!(regs->eflags & VM_MASK) && !(3 & regs->xcs)) 357 if (!user_mode_vm(regs))
357 die(str, regs, err); 358 die(str, regs, err);
358} 359}
359 360
@@ -366,7 +367,7 @@ static void do_trap(int trapnr, int signr, char *str, int vm86,
366 goto trap_signal; 367 goto trap_signal;
367 } 368 }
368 369
369 if (!(regs->xcs & 3)) 370 if (!user_mode(regs))
370 goto kernel_trap; 371 goto kernel_trap;
371 372
372 trap_signal: { 373 trap_signal: {
@@ -488,7 +489,7 @@ fastcall void do_general_protection(struct pt_regs * regs, long error_code)
488 if (regs->eflags & VM_MASK) 489 if (regs->eflags & VM_MASK)
489 goto gp_in_vm86; 490 goto gp_in_vm86;
490 491
491 if (!(regs->xcs & 3)) 492 if (!user_mode(regs))
492 goto gp_in_kernel; 493 goto gp_in_kernel;
493 494
494 current->thread.error_code = error_code; 495 current->thread.error_code = error_code;
@@ -636,11 +637,13 @@ void set_nmi_callback(nmi_callback_t callback)
636{ 637{
637 nmi_callback = callback; 638 nmi_callback = callback;
638} 639}
640EXPORT_SYMBOL_GPL(set_nmi_callback);
639 641
640void unset_nmi_callback(void) 642void unset_nmi_callback(void)
641{ 643{
642 nmi_callback = dummy_nmi_callback; 644 nmi_callback = dummy_nmi_callback;
643} 645}
646EXPORT_SYMBOL_GPL(unset_nmi_callback);
644 647
645#ifdef CONFIG_KPROBES 648#ifdef CONFIG_KPROBES
646fastcall void do_int3(struct pt_regs *regs, long error_code) 649fastcall void do_int3(struct pt_regs *regs, long error_code)
@@ -682,7 +685,7 @@ fastcall void do_debug(struct pt_regs * regs, long error_code)
682 unsigned int condition; 685 unsigned int condition;
683 struct task_struct *tsk = current; 686 struct task_struct *tsk = current;
684 687
685 __asm__ __volatile__("movl %%db6,%0" : "=r" (condition)); 688 get_debugreg(condition, 6);
686 689
687 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code, 690 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
688 SIGTRAP) == NOTIFY_STOP) 691 SIGTRAP) == NOTIFY_STOP)
@@ -713,7 +716,7 @@ fastcall void do_debug(struct pt_regs * regs, long error_code)
713 * check for kernel mode by just checking the CPL 716 * check for kernel mode by just checking the CPL
714 * of CS. 717 * of CS.
715 */ 718 */
716 if ((regs->xcs & 3) == 0) 719 if (!user_mode(regs))
717 goto clear_TF_reenable; 720 goto clear_TF_reenable;
718 } 721 }
719 722
@@ -724,9 +727,7 @@ fastcall void do_debug(struct pt_regs * regs, long error_code)
724 * the signal is delivered. 727 * the signal is delivered.
725 */ 728 */
726clear_dr7: 729clear_dr7:
727 __asm__("movl %0,%%db7" 730 set_debugreg(0, 7);
728 : /* no output */
729 : "r" (0));
730 return; 731 return;
731 732
732debug_vm86: 733debug_vm86:
diff --git a/arch/i386/lib/dec_and_lock.c b/arch/i386/lib/dec_and_lock.c
index ab43394dc775..8b81b2524fa6 100644
--- a/arch/i386/lib/dec_and_lock.c
+++ b/arch/i386/lib/dec_and_lock.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/spinlock.h> 10#include <linux/spinlock.h>
11#include <linux/module.h>
11#include <asm/atomic.h> 12#include <asm/atomic.h>
12 13
13int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) 14int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
@@ -38,3 +39,4 @@ slow_path:
38 spin_unlock(lock); 39 spin_unlock(lock);
39 return 0; 40 return 0;
40} 41}
42EXPORT_SYMBOL(_atomic_dec_and_lock);
diff --git a/arch/i386/lib/delay.c b/arch/i386/lib/delay.c
index eb0cdfe9280f..c49a6acbee56 100644
--- a/arch/i386/lib/delay.c
+++ b/arch/i386/lib/delay.c
@@ -13,6 +13,7 @@
13#include <linux/config.h> 13#include <linux/config.h>
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/module.h>
16#include <asm/processor.h> 17#include <asm/processor.h>
17#include <asm/delay.h> 18#include <asm/delay.h>
18#include <asm/timer.h> 19#include <asm/timer.h>
@@ -47,3 +48,8 @@ void __ndelay(unsigned long nsecs)
47{ 48{
48 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ 49 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
49} 50}
51
52EXPORT_SYMBOL(__delay);
53EXPORT_SYMBOL(__const_udelay);
54EXPORT_SYMBOL(__udelay);
55EXPORT_SYMBOL(__ndelay);
diff --git a/arch/i386/lib/mmx.c b/arch/i386/lib/mmx.c
index 01f8b1a2cc84..2afda94dffd3 100644
--- a/arch/i386/lib/mmx.c
+++ b/arch/i386/lib/mmx.c
@@ -3,6 +3,7 @@
3#include <linux/string.h> 3#include <linux/string.h>
4#include <linux/sched.h> 4#include <linux/sched.h>
5#include <linux/hardirq.h> 5#include <linux/hardirq.h>
6#include <linux/module.h>
6 7
7#include <asm/i387.h> 8#include <asm/i387.h>
8 9
@@ -397,3 +398,7 @@ void mmx_copy_page(void *to, void *from)
397 else 398 else
398 fast_copy_page(to, from); 399 fast_copy_page(to, from);
399} 400}
401
402EXPORT_SYMBOL(_mmx_memcpy);
403EXPORT_SYMBOL(mmx_clear_page);
404EXPORT_SYMBOL(mmx_copy_page);
diff --git a/arch/i386/lib/usercopy.c b/arch/i386/lib/usercopy.c
index 51aa2bbb0269..4cf981d70f45 100644
--- a/arch/i386/lib/usercopy.c
+++ b/arch/i386/lib/usercopy.c
@@ -84,6 +84,7 @@ __strncpy_from_user(char *dst, const char __user *src, long count)
84 __do_strncpy_from_user(dst, src, count, res); 84 __do_strncpy_from_user(dst, src, count, res);
85 return res; 85 return res;
86} 86}
87EXPORT_SYMBOL(__strncpy_from_user);
87 88
88/** 89/**
89 * strncpy_from_user: - Copy a NUL terminated string from userspace. 90 * strncpy_from_user: - Copy a NUL terminated string from userspace.
@@ -111,7 +112,7 @@ strncpy_from_user(char *dst, const char __user *src, long count)
111 __do_strncpy_from_user(dst, src, count, res); 112 __do_strncpy_from_user(dst, src, count, res);
112 return res; 113 return res;
113} 114}
114 115EXPORT_SYMBOL(strncpy_from_user);
115 116
116/* 117/*
117 * Zero Userspace 118 * Zero Userspace
@@ -157,6 +158,7 @@ clear_user(void __user *to, unsigned long n)
157 __do_clear_user(to, n); 158 __do_clear_user(to, n);
158 return n; 159 return n;
159} 160}
161EXPORT_SYMBOL(clear_user);
160 162
161/** 163/**
162 * __clear_user: - Zero a block of memory in user space, with less checking. 164 * __clear_user: - Zero a block of memory in user space, with less checking.
@@ -175,6 +177,7 @@ __clear_user(void __user *to, unsigned long n)
175 __do_clear_user(to, n); 177 __do_clear_user(to, n);
176 return n; 178 return n;
177} 179}
180EXPORT_SYMBOL(__clear_user);
178 181
179/** 182/**
180 * strlen_user: - Get the size of a string in user space. 183 * strlen_user: - Get the size of a string in user space.
@@ -218,6 +221,7 @@ long strnlen_user(const char __user *s, long n)
218 :"cc"); 221 :"cc");
219 return res & mask; 222 return res & mask;
220} 223}
224EXPORT_SYMBOL(strnlen_user);
221 225
222#ifdef CONFIG_X86_INTEL_USERCOPY 226#ifdef CONFIG_X86_INTEL_USERCOPY
223static unsigned long 227static unsigned long
@@ -570,6 +574,7 @@ survive:
570 n = __copy_user_intel(to, from, n); 574 n = __copy_user_intel(to, from, n);
571 return n; 575 return n;
572} 576}
577EXPORT_SYMBOL(__copy_to_user_ll);
573 578
574unsigned long 579unsigned long
575__copy_from_user_ll(void *to, const void __user *from, unsigned long n) 580__copy_from_user_ll(void *to, const void __user *from, unsigned long n)
@@ -581,6 +586,7 @@ __copy_from_user_ll(void *to, const void __user *from, unsigned long n)
581 n = __copy_user_zeroing_intel(to, from, n); 586 n = __copy_user_zeroing_intel(to, from, n);
582 return n; 587 return n;
583} 588}
589EXPORT_SYMBOL(__copy_from_user_ll);
584 590
585/** 591/**
586 * copy_to_user: - Copy a block of data into user space. 592 * copy_to_user: - Copy a block of data into user space.
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c
index a6e0ddd65bd0..8c8527593da0 100644
--- a/arch/i386/mach-voyager/voyager_smp.c
+++ b/arch/i386/mach-voyager/voyager_smp.c
@@ -1288,7 +1288,7 @@ smp_local_timer_interrupt(struct pt_regs * regs)
1288 per_cpu(prof_counter, cpu); 1288 per_cpu(prof_counter, cpu);
1289 } 1289 }
1290 1290
1291 update_process_times(user_mode(regs)); 1291 update_process_times(user_mode_vm(regs));
1292 } 1292 }
1293 1293
1294 if( ((1<<cpu) & voyager_extended_vic_processors) == 0) 1294 if( ((1<<cpu) & voyager_extended_vic_processors) == 0)
diff --git a/arch/i386/mm/Makefile b/arch/i386/mm/Makefile
index fc3272506846..80908b5aa60f 100644
--- a/arch/i386/mm/Makefile
+++ b/arch/i386/mm/Makefile
@@ -4,7 +4,7 @@
4 4
5obj-y := init.o pgtable.o fault.o ioremap.o extable.o pageattr.o mmap.o 5obj-y := init.o pgtable.o fault.o ioremap.o extable.o pageattr.o mmap.o
6 6
7obj-$(CONFIG_DISCONTIGMEM) += discontig.o 7obj-$(CONFIG_NUMA) += discontig.o
8obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 8obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
9obj-$(CONFIG_HIGHMEM) += highmem.o 9obj-$(CONFIG_HIGHMEM) += highmem.o
10obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap.o 10obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap.o
diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c
index 1726b4096b10..f429c871e845 100644
--- a/arch/i386/mm/discontig.c
+++ b/arch/i386/mm/discontig.c
@@ -29,12 +29,14 @@
29#include <linux/highmem.h> 29#include <linux/highmem.h>
30#include <linux/initrd.h> 30#include <linux/initrd.h>
31#include <linux/nodemask.h> 31#include <linux/nodemask.h>
32#include <linux/module.h>
32#include <asm/e820.h> 33#include <asm/e820.h>
33#include <asm/setup.h> 34#include <asm/setup.h>
34#include <asm/mmzone.h> 35#include <asm/mmzone.h>
35#include <bios_ebda.h> 36#include <bios_ebda.h>
36 37
37struct pglist_data *node_data[MAX_NUMNODES]; 38struct pglist_data *node_data[MAX_NUMNODES];
39EXPORT_SYMBOL(node_data);
38bootmem_data_t node0_bdata; 40bootmem_data_t node0_bdata;
39 41
40/* 42/*
@@ -42,12 +44,16 @@ bootmem_data_t node0_bdata;
42 * populated the following initialisation. 44 * populated the following initialisation.
43 * 45 *
44 * 1) node_online_map - the map of all nodes configured (online) in the system 46 * 1) node_online_map - the map of all nodes configured (online) in the system
45 * 2) physnode_map - the mapping between a pfn and owning node 47 * 2) node_start_pfn - the starting page frame number for a node
46 * 3) node_start_pfn - the starting page frame number for a node
47 * 3) node_end_pfn - the ending page fram number for a node 48 * 3) node_end_pfn - the ending page fram number for a node
48 */ 49 */
50unsigned long node_start_pfn[MAX_NUMNODES];
51unsigned long node_end_pfn[MAX_NUMNODES];
52
49 53
54#ifdef CONFIG_DISCONTIGMEM
50/* 55/*
56 * 4) physnode_map - the mapping between a pfn and owning node
51 * physnode_map keeps track of the physical memory layout of a generic 57 * physnode_map keeps track of the physical memory layout of a generic
52 * numa node on a 256Mb break (each element of the array will 58 * numa node on a 256Mb break (each element of the array will
53 * represent 256Mb of memory and will be marked by the node id. so, 59 * represent 256Mb of memory and will be marked by the node id. so,
@@ -59,6 +65,7 @@ bootmem_data_t node0_bdata;
59 * physnode_map[8- ] = -1; 65 * physnode_map[8- ] = -1;
60 */ 66 */
61s8 physnode_map[MAX_ELEMENTS] = { [0 ... (MAX_ELEMENTS - 1)] = -1}; 67s8 physnode_map[MAX_ELEMENTS] = { [0 ... (MAX_ELEMENTS - 1)] = -1};
68EXPORT_SYMBOL(physnode_map);
62 69
63void memory_present(int nid, unsigned long start, unsigned long end) 70void memory_present(int nid, unsigned long start, unsigned long end)
64{ 71{
@@ -85,9 +92,7 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
85 92
86 return (nr_pages + 1) * sizeof(struct page); 93 return (nr_pages + 1) * sizeof(struct page);
87} 94}
88 95#endif
89unsigned long node_start_pfn[MAX_NUMNODES];
90unsigned long node_end_pfn[MAX_NUMNODES];
91 96
92extern unsigned long find_max_low_pfn(void); 97extern unsigned long find_max_low_pfn(void);
93extern void find_max_pfn(void); 98extern void find_max_pfn(void);
@@ -108,6 +113,9 @@ unsigned long node_remap_offset[MAX_NUMNODES];
108void *node_remap_start_vaddr[MAX_NUMNODES]; 113void *node_remap_start_vaddr[MAX_NUMNODES];
109void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); 114void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
110 115
116void *node_remap_end_vaddr[MAX_NUMNODES];
117void *node_remap_alloc_vaddr[MAX_NUMNODES];
118
111/* 119/*
112 * FLAT - support for basic PC memory model with discontig enabled, essentially 120 * FLAT - support for basic PC memory model with discontig enabled, essentially
113 * a single node with all available processors in it with a flat 121 * a single node with all available processors in it with a flat
@@ -146,6 +154,21 @@ static void __init find_max_pfn_node(int nid)
146 BUG(); 154 BUG();
147} 155}
148 156
157/* Find the owning node for a pfn. */
158int early_pfn_to_nid(unsigned long pfn)
159{
160 int nid;
161
162 for_each_node(nid) {
163 if (node_end_pfn[nid] == 0)
164 break;
165 if (node_start_pfn[nid] <= pfn && node_end_pfn[nid] >= pfn)
166 return nid;
167 }
168
169 return 0;
170}
171
149/* 172/*
150 * Allocate memory for the pg_data_t for this node via a crude pre-bootmem 173 * Allocate memory for the pg_data_t for this node via a crude pre-bootmem
151 * method. For node zero take this from the bottom of memory, for 174 * method. For node zero take this from the bottom of memory, for
@@ -163,6 +186,21 @@ static void __init allocate_pgdat(int nid)
163 } 186 }
164} 187}
165 188
189void *alloc_remap(int nid, unsigned long size)
190{
191 void *allocation = node_remap_alloc_vaddr[nid];
192
193 size = ALIGN(size, L1_CACHE_BYTES);
194
195 if (!allocation || (allocation + size) >= node_remap_end_vaddr[nid])
196 return 0;
197
198 node_remap_alloc_vaddr[nid] += size;
199 memset(allocation, 0, size);
200
201 return allocation;
202}
203
166void __init remap_numa_kva(void) 204void __init remap_numa_kva(void)
167{ 205{
168 void *vaddr; 206 void *vaddr;
@@ -170,8 +208,6 @@ void __init remap_numa_kva(void)
170 int node; 208 int node;
171 209
172 for_each_online_node(node) { 210 for_each_online_node(node) {
173 if (node == 0)
174 continue;
175 for (pfn=0; pfn < node_remap_size[node]; pfn += PTRS_PER_PTE) { 211 for (pfn=0; pfn < node_remap_size[node]; pfn += PTRS_PER_PTE) {
176 vaddr = node_remap_start_vaddr[node]+(pfn<<PAGE_SHIFT); 212 vaddr = node_remap_start_vaddr[node]+(pfn<<PAGE_SHIFT);
177 set_pmd_pfn((ulong) vaddr, 213 set_pmd_pfn((ulong) vaddr,
@@ -185,13 +221,9 @@ static unsigned long calculate_numa_remap_pages(void)
185{ 221{
186 int nid; 222 int nid;
187 unsigned long size, reserve_pages = 0; 223 unsigned long size, reserve_pages = 0;
224 unsigned long pfn;
188 225
189 for_each_online_node(nid) { 226 for_each_online_node(nid) {
190 if (nid == 0)
191 continue;
192 if (!node_remap_size[nid])
193 continue;
194
195 /* 227 /*
196 * The acpi/srat node info can show hot-add memroy zones 228 * The acpi/srat node info can show hot-add memroy zones
197 * where memory could be added but not currently present. 229 * where memory could be added but not currently present.
@@ -208,11 +240,24 @@ static unsigned long calculate_numa_remap_pages(void)
208 size = (size + LARGE_PAGE_BYTES - 1) / LARGE_PAGE_BYTES; 240 size = (size + LARGE_PAGE_BYTES - 1) / LARGE_PAGE_BYTES;
209 /* now the roundup is correct, convert to PAGE_SIZE pages */ 241 /* now the roundup is correct, convert to PAGE_SIZE pages */
210 size = size * PTRS_PER_PTE; 242 size = size * PTRS_PER_PTE;
243
244 /*
245 * Validate the region we are allocating only contains valid
246 * pages.
247 */
248 for (pfn = node_end_pfn[nid] - size;
249 pfn < node_end_pfn[nid]; pfn++)
250 if (!page_is_ram(pfn))
251 break;
252
253 if (pfn != node_end_pfn[nid])
254 size = 0;
255
211 printk("Reserving %ld pages of KVA for lmem_map of node %d\n", 256 printk("Reserving %ld pages of KVA for lmem_map of node %d\n",
212 size, nid); 257 size, nid);
213 node_remap_size[nid] = size; 258 node_remap_size[nid] = size;
214 reserve_pages += size;
215 node_remap_offset[nid] = reserve_pages; 259 node_remap_offset[nid] = reserve_pages;
260 reserve_pages += size;
216 printk("Shrinking node %d from %ld pages to %ld pages\n", 261 printk("Shrinking node %d from %ld pages to %ld pages\n",
217 nid, node_end_pfn[nid], node_end_pfn[nid] - size); 262 nid, node_end_pfn[nid], node_end_pfn[nid] - size);
218 node_end_pfn[nid] -= size; 263 node_end_pfn[nid] -= size;
@@ -265,12 +310,18 @@ unsigned long __init setup_memory(void)
265 (ulong) pfn_to_kaddr(max_low_pfn)); 310 (ulong) pfn_to_kaddr(max_low_pfn));
266 for_each_online_node(nid) { 311 for_each_online_node(nid) {
267 node_remap_start_vaddr[nid] = pfn_to_kaddr( 312 node_remap_start_vaddr[nid] = pfn_to_kaddr(
268 (highstart_pfn + reserve_pages) - node_remap_offset[nid]); 313 highstart_pfn + node_remap_offset[nid]);
314 /* Init the node remap allocator */
315 node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] +
316 (node_remap_size[nid] * PAGE_SIZE);
317 node_remap_alloc_vaddr[nid] = node_remap_start_vaddr[nid] +
318 ALIGN(sizeof(pg_data_t), PAGE_SIZE);
319
269 allocate_pgdat(nid); 320 allocate_pgdat(nid);
270 printk ("node %d will remap to vaddr %08lx - %08lx\n", nid, 321 printk ("node %d will remap to vaddr %08lx - %08lx\n", nid,
271 (ulong) node_remap_start_vaddr[nid], 322 (ulong) node_remap_start_vaddr[nid],
272 (ulong) pfn_to_kaddr(highstart_pfn + reserve_pages 323 (ulong) pfn_to_kaddr(highstart_pfn
273 - node_remap_offset[nid] + node_remap_size[nid])); 324 + node_remap_offset[nid] + node_remap_size[nid]));
274 } 325 }
275 printk("High memory starts at vaddr %08lx\n", 326 printk("High memory starts at vaddr %08lx\n",
276 (ulong) pfn_to_kaddr(highstart_pfn)); 327 (ulong) pfn_to_kaddr(highstart_pfn));
@@ -333,23 +384,9 @@ void __init zone_sizes_init(void)
333 } 384 }
334 385
335 zholes_size = get_zholes_size(nid); 386 zholes_size = get_zholes_size(nid);
336 /* 387
337 * We let the lmem_map for node 0 be allocated from the 388 free_area_init_node(nid, NODE_DATA(nid), zones_size, start,
338 * normal bootmem allocator, but other nodes come from the 389 zholes_size);
339 * remapped KVA area - mbligh
340 */
341 if (!nid)
342 free_area_init_node(nid, NODE_DATA(nid),
343 zones_size, start, zholes_size);
344 else {
345 unsigned long lmem_map;
346 lmem_map = (unsigned long)node_remap_start_vaddr[nid];
347 lmem_map += sizeof(pg_data_t) + PAGE_SIZE - 1;
348 lmem_map &= PAGE_MASK;
349 NODE_DATA(nid)->node_mem_map = (struct page *)lmem_map;
350 free_area_init_node(nid, NODE_DATA(nid), zones_size,
351 start, zholes_size);
352 }
353 } 390 }
354 return; 391 return;
355} 392}
@@ -358,24 +395,26 @@ void __init set_highmem_pages_init(int bad_ppro)
358{ 395{
359#ifdef CONFIG_HIGHMEM 396#ifdef CONFIG_HIGHMEM
360 struct zone *zone; 397 struct zone *zone;
398 struct page *page;
361 399
362 for_each_zone(zone) { 400 for_each_zone(zone) {
363 unsigned long node_pfn, node_high_size, zone_start_pfn; 401 unsigned long node_pfn, zone_start_pfn, zone_end_pfn;
364 struct page * zone_mem_map; 402
365
366 if (!is_highmem(zone)) 403 if (!is_highmem(zone))
367 continue; 404 continue;
368 405
369 printk("Initializing %s for node %d\n", zone->name,
370 zone->zone_pgdat->node_id);
371
372 node_high_size = zone->spanned_pages;
373 zone_mem_map = zone->zone_mem_map;
374 zone_start_pfn = zone->zone_start_pfn; 406 zone_start_pfn = zone->zone_start_pfn;
407 zone_end_pfn = zone_start_pfn + zone->spanned_pages;
408
409 printk("Initializing %s for node %d (%08lx:%08lx)\n",
410 zone->name, zone->zone_pgdat->node_id,
411 zone_start_pfn, zone_end_pfn);
375 412
376 for (node_pfn = 0; node_pfn < node_high_size; node_pfn++) { 413 for (node_pfn = zone_start_pfn; node_pfn < zone_end_pfn; node_pfn++) {
377 one_highpage_init((struct page *)(zone_mem_map + node_pfn), 414 if (!pfn_valid(node_pfn))
378 zone_start_pfn + node_pfn, bad_ppro); 415 continue;
416 page = pfn_to_page(node_pfn);
417 one_highpage_init(page, node_pfn, bad_ppro);
379 } 418 }
380 } 419 }
381 totalram_pages += totalhigh_pages; 420 totalram_pages += totalhigh_pages;
diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c
index fc4c4cad4e98..4b7aaf99d7ea 100644
--- a/arch/i386/mm/highmem.c
+++ b/arch/i386/mm/highmem.c
@@ -1,4 +1,5 @@
1#include <linux/highmem.h> 1#include <linux/highmem.h>
2#include <linux/module.h>
2 3
3void *kmap(struct page *page) 4void *kmap(struct page *page)
4{ 5{
@@ -87,3 +88,8 @@ struct page *kmap_atomic_to_page(void *ptr)
87 return pte_page(*pte); 88 return pte_page(*pte);
88} 89}
89 90
91EXPORT_SYMBOL(kmap);
92EXPORT_SYMBOL(kunmap);
93EXPORT_SYMBOL(kmap_atomic);
94EXPORT_SYMBOL(kunmap_atomic);
95EXPORT_SYMBOL(kmap_atomic_to_page);
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 8766c771bb45..3672e2ef51ae 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -191,7 +191,7 @@ static inline int page_kills_ppro(unsigned long pagenr)
191 191
192extern int is_available_memory(efi_memory_desc_t *); 192extern int is_available_memory(efi_memory_desc_t *);
193 193
194static inline int page_is_ram(unsigned long pagenr) 194int page_is_ram(unsigned long pagenr)
195{ 195{
196 int i; 196 int i;
197 unsigned long addr, end; 197 unsigned long addr, end;
@@ -276,7 +276,9 @@ void __init one_highpage_init(struct page *page, int pfn, int bad_ppro)
276 SetPageReserved(page); 276 SetPageReserved(page);
277} 277}
278 278
279#ifndef CONFIG_DISCONTIGMEM 279#ifdef CONFIG_NUMA
280extern void set_highmem_pages_init(int);
281#else
280static void __init set_highmem_pages_init(int bad_ppro) 282static void __init set_highmem_pages_init(int bad_ppro)
281{ 283{
282 int pfn; 284 int pfn;
@@ -284,9 +286,7 @@ static void __init set_highmem_pages_init(int bad_ppro)
284 one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro); 286 one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
285 totalram_pages += totalhigh_pages; 287 totalram_pages += totalhigh_pages;
286} 288}
287#else 289#endif /* CONFIG_FLATMEM */
288extern void set_highmem_pages_init(int);
289#endif /* !CONFIG_DISCONTIGMEM */
290 290
291#else 291#else
292#define kmap_init() do { } while (0) 292#define kmap_init() do { } while (0)
@@ -295,12 +295,13 @@ extern void set_highmem_pages_init(int);
295#endif /* CONFIG_HIGHMEM */ 295#endif /* CONFIG_HIGHMEM */
296 296
297unsigned long long __PAGE_KERNEL = _PAGE_KERNEL; 297unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
298EXPORT_SYMBOL(__PAGE_KERNEL);
298unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC; 299unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
299 300
300#ifndef CONFIG_DISCONTIGMEM 301#ifdef CONFIG_NUMA
301#define remap_numa_kva() do {} while (0)
302#else
303extern void __init remap_numa_kva(void); 302extern void __init remap_numa_kva(void);
303#else
304#define remap_numa_kva() do {} while (0)
304#endif 305#endif
305 306
306static void __init pagetable_init (void) 307static void __init pagetable_init (void)
@@ -525,7 +526,7 @@ static void __init set_max_mapnr_init(void)
525#else 526#else
526 num_physpages = max_low_pfn; 527 num_physpages = max_low_pfn;
527#endif 528#endif
528#ifndef CONFIG_DISCONTIGMEM 529#ifdef CONFIG_FLATMEM
529 max_mapnr = num_physpages; 530 max_mapnr = num_physpages;
530#endif 531#endif
531} 532}
@@ -539,7 +540,7 @@ void __init mem_init(void)
539 int tmp; 540 int tmp;
540 int bad_ppro; 541 int bad_ppro;
541 542
542#ifndef CONFIG_DISCONTIGMEM 543#ifdef CONFIG_FLATMEM
543 if (!mem_map) 544 if (!mem_map)
544 BUG(); 545 BUG();
545#endif 546#endif
diff --git a/arch/i386/mm/ioremap.c b/arch/i386/mm/ioremap.c
index ab542792b27b..d393eefc7052 100644
--- a/arch/i386/mm/ioremap.c
+++ b/arch/i386/mm/ioremap.c
@@ -11,6 +11,7 @@
11#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/module.h>
14#include <asm/io.h> 15#include <asm/io.h>
15#include <asm/fixmap.h> 16#include <asm/fixmap.h>
16#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
@@ -165,7 +166,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
165 } 166 }
166 return (void __iomem *) (offset + (char __iomem *)addr); 167 return (void __iomem *) (offset + (char __iomem *)addr);
167} 168}
168 169EXPORT_SYMBOL(__ioremap);
169 170
170/** 171/**
171 * ioremap_nocache - map bus memory into CPU space 172 * ioremap_nocache - map bus memory into CPU space
@@ -222,6 +223,7 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
222 223
223 return p; 224 return p;
224} 225}
226EXPORT_SYMBOL(ioremap_nocache);
225 227
226void iounmap(volatile void __iomem *addr) 228void iounmap(volatile void __iomem *addr)
227{ 229{
@@ -255,6 +257,7 @@ out_unlock:
255 write_unlock(&vmlist_lock); 257 write_unlock(&vmlist_lock);
256 kfree(p); 258 kfree(p);
257} 259}
260EXPORT_SYMBOL(iounmap);
258 261
259void __init *bt_ioremap(unsigned long phys_addr, unsigned long size) 262void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
260{ 263{
diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c
index dd81479ff88a..270c59f099a4 100644
--- a/arch/i386/mm/pgtable.c
+++ b/arch/i386/mm/pgtable.c
@@ -30,13 +30,14 @@ void show_mem(void)
30 struct page *page; 30 struct page *page;
31 pg_data_t *pgdat; 31 pg_data_t *pgdat;
32 unsigned long i; 32 unsigned long i;
33 struct page_state ps;
33 34
34 printk("Mem-info:\n"); 35 printk("Mem-info:\n");
35 show_free_areas(); 36 show_free_areas();
36 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 37 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
37 for_each_pgdat(pgdat) { 38 for_each_pgdat(pgdat) {
38 for (i = 0; i < pgdat->node_spanned_pages; ++i) { 39 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
39 page = pgdat->node_mem_map + i; 40 page = pgdat_page_nr(pgdat, i);
40 total++; 41 total++;
41 if (PageHighMem(page)) 42 if (PageHighMem(page))
42 highmem++; 43 highmem++;
@@ -53,6 +54,13 @@ void show_mem(void)
53 printk("%d reserved pages\n",reserved); 54 printk("%d reserved pages\n",reserved);
54 printk("%d pages shared\n",shared); 55 printk("%d pages shared\n",shared);
55 printk("%d pages swap cached\n",cached); 56 printk("%d pages swap cached\n",cached);
57
58 get_page_state(&ps);
59 printk("%lu pages dirty\n", ps.nr_dirty);
60 printk("%lu pages writeback\n", ps.nr_writeback);
61 printk("%lu pages mapped\n", ps.nr_mapped);
62 printk("%lu pages slab\n", ps.nr_slab);
63 printk("%lu pages pagetables\n", ps.nr_page_table_pages);
56} 64}
57 65
58/* 66/*
diff --git a/arch/i386/oprofile/backtrace.c b/arch/i386/oprofile/backtrace.c
index 52d72e074f7f..65dfd2edb671 100644
--- a/arch/i386/oprofile/backtrace.c
+++ b/arch/i386/oprofile/backtrace.c
@@ -91,7 +91,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
91 head = (struct frame_head *)regs->ebp; 91 head = (struct frame_head *)regs->ebp;
92#endif 92#endif
93 93
94 if (!user_mode(regs)) { 94 if (!user_mode_vm(regs)) {
95 while (depth-- && valid_kernel_stack(head, regs)) 95 while (depth-- && valid_kernel_stack(head, regs))
96 head = dump_backtrace(head); 96 head = dump_backtrace(head);
97 return; 97 return;
diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c
index da21b1d07c15..83458f81e661 100644
--- a/arch/i386/pci/irq.c
+++ b/arch/i386/pci/irq.c
@@ -227,6 +227,24 @@ static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i
227} 227}
228 228
229/* 229/*
230 * The VIA pirq rules are nibble-based, like ALI,
231 * but without the ugly irq number munging.
232 * However, for 82C586, nibble map is different .
233 */
234static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
235{
236 static unsigned int pirqmap[4] = { 3, 2, 5, 1 };
237 return read_config_nybble(router, 0x55, pirqmap[pirq-1]);
238}
239
240static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
241{
242 static unsigned int pirqmap[4] = { 3, 2, 5, 1 };
243 write_config_nybble(router, 0x55, pirqmap[pirq-1], irq);
244 return 1;
245}
246
247/*
230 * ITE 8330G pirq rules are nibble-based 248 * ITE 8330G pirq rules are nibble-based
231 * FIXME: pirqmap may be { 1, 0, 3, 2 }, 249 * FIXME: pirqmap may be { 1, 0, 3, 2 },
232 * 2+3 are both mapped to irq 9 on my system 250 * 2+3 are both mapped to irq 9 on my system
@@ -512,6 +530,10 @@ static __init int via_router_probe(struct irq_router *r, struct pci_dev *router,
512 switch(device) 530 switch(device)
513 { 531 {
514 case PCI_DEVICE_ID_VIA_82C586_0: 532 case PCI_DEVICE_ID_VIA_82C586_0:
533 r->name = "VIA";
534 r->get = pirq_via586_get;
535 r->set = pirq_via586_set;
536 return 1;
515 case PCI_DEVICE_ID_VIA_82C596: 537 case PCI_DEVICE_ID_VIA_82C596:
516 case PCI_DEVICE_ID_VIA_82C686: 538 case PCI_DEVICE_ID_VIA_82C686:
517 case PCI_DEVICE_ID_VIA_8231: 539 case PCI_DEVICE_ID_VIA_8231:
diff --git a/arch/i386/pci/pcbios.c b/arch/i386/pci/pcbios.c
index 141421b673b0..b9d65f0bc2d1 100644
--- a/arch/i386/pci/pcbios.c
+++ b/arch/i386/pci/pcbios.c
@@ -4,6 +4,7 @@
4 4
5#include <linux/pci.h> 5#include <linux/pci.h>
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/module.h>
7#include "pci.h" 8#include "pci.h"
8#include "pci-functions.h" 9#include "pci-functions.h"
9 10
@@ -456,7 +457,7 @@ struct irq_routing_table * __devinit pcibios_get_irq_routing_table(void)
456 free_page(page); 457 free_page(page);
457 return rt; 458 return rt;
458} 459}
459 460EXPORT_SYMBOL(pcibios_get_irq_routing_table);
460 461
461int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq) 462int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
462{ 463{
@@ -473,6 +474,7 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
473 "S" (&pci_indirect)); 474 "S" (&pci_indirect));
474 return !(ret & 0xff00); 475 return !(ret & 0xff00);
475} 476}
477EXPORT_SYMBOL(pcibios_set_irq_routing);
476 478
477static int __init pci_pcbios_init(void) 479static int __init pci_pcbios_init(void)
478{ 480{
diff --git a/arch/i386/power/cpu.c b/arch/i386/power/cpu.c
index cf337c673d92..6f521cf19a13 100644
--- a/arch/i386/power/cpu.c
+++ b/arch/i386/power/cpu.c
@@ -94,13 +94,13 @@ static void fix_processor_context(void)
94 * Now maybe reload the debug registers 94 * Now maybe reload the debug registers
95 */ 95 */
96 if (current->thread.debugreg[7]){ 96 if (current->thread.debugreg[7]){
97 loaddebug(&current->thread, 0); 97 set_debugreg(current->thread.debugreg[0], 0);
98 loaddebug(&current->thread, 1); 98 set_debugreg(current->thread.debugreg[1], 1);
99 loaddebug(&current->thread, 2); 99 set_debugreg(current->thread.debugreg[2], 2);
100 loaddebug(&current->thread, 3); 100 set_debugreg(current->thread.debugreg[3], 3);
101 /* no 4 and 5 */ 101 /* no 4 and 5 */
102 loaddebug(&current->thread, 6); 102 set_debugreg(current->thread.debugreg[6], 6);
103 loaddebug(&current->thread, 7); 103 set_debugreg(current->thread.debugreg[7], 7);
104 } 104 }
105 105
106} 106}
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index ce4dfa8b834d..01b78e7f992e 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -161,6 +161,8 @@ config IA64_PAGE_SIZE_64KB
161 161
162endchoice 162endchoice
163 163
164source kernel/Kconfig.hz
165
164config IA64_BRL_EMU 166config IA64_BRL_EMU
165 bool 167 bool
166 depends on ITANIUM 168 depends on ITANIUM
@@ -197,7 +199,7 @@ config HOLES_IN_ZONE
197 bool 199 bool
198 default y if VIRTUAL_MEM_MAP 200 default y if VIRTUAL_MEM_MAP
199 201
200config DISCONTIGMEM 202config ARCH_DISCONTIGMEM_ENABLE
201 bool "Discontiguous memory support" 203 bool "Discontiguous memory support"
202 depends on (IA64_DIG || IA64_SGI_SN2 || IA64_GENERIC || IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB) && NUMA && VIRTUAL_MEM_MAP 204 depends on (IA64_DIG || IA64_SGI_SN2 || IA64_GENERIC || IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB) && NUMA && VIRTUAL_MEM_MAP
203 default y if (IA64_SGI_SN2 || IA64_GENERIC) && NUMA 205 default y if (IA64_SGI_SN2 || IA64_GENERIC) && NUMA
@@ -300,6 +302,8 @@ config PREEMPT
300 Say Y here if you are building a kernel for a desktop, embedded 302 Say Y here if you are building a kernel for a desktop, embedded
301 or real-time system. Say N if you are unsure. 303 or real-time system. Say N if you are unsure.
302 304
305source "mm/Kconfig"
306
303config HAVE_DEC_LOCK 307config HAVE_DEC_LOCK
304 bool 308 bool
305 depends on (SMP || PREEMPT) 309 depends on (SMP || PREEMPT)
diff --git a/arch/ia64/Kconfig.debug b/arch/ia64/Kconfig.debug
index de9d507ba0fd..fda67ac993d7 100644
--- a/arch/ia64/Kconfig.debug
+++ b/arch/ia64/Kconfig.debug
@@ -2,6 +2,17 @@ menu "Kernel hacking"
2 2
3source "lib/Kconfig.debug" 3source "lib/Kconfig.debug"
4 4
5config KPROBES
6 bool "Kprobes"
7 depends on DEBUG_KERNEL
8 help
9 Kprobes allows you to trap at almost any kernel address and
10 execute a callback function. register_kprobe() establishes
11 a probepoint and specifies the callback. Kprobes is useful
12 for kernel debugging, non-intrusive instrumentation and testing.
13 If in doubt, say "N".
14
15
5choice 16choice
6 prompt "Physical memory granularity" 17 prompt "Physical memory granularity"
7 default IA64_GRANULE_64MB 18 default IA64_GRANULE_64MB
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig
index a01bb02d074d..487d2e36b0a6 100644
--- a/arch/ia64/configs/sn2_defconfig
+++ b/arch/ia64/configs/sn2_defconfig
@@ -78,7 +78,7 @@ CONFIG_IA64_L1_CACHE_SHIFT=7
78CONFIG_NUMA=y 78CONFIG_NUMA=y
79CONFIG_VIRTUAL_MEM_MAP=y 79CONFIG_VIRTUAL_MEM_MAP=y
80CONFIG_HOLES_IN_ZONE=y 80CONFIG_HOLES_IN_ZONE=y
81CONFIG_DISCONTIGMEM=y 81CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
82# CONFIG_IA64_CYCLONE is not set 82# CONFIG_IA64_CYCLONE is not set
83CONFIG_IOSAPIC=y 83CONFIG_IOSAPIC=y
84CONFIG_IA64_SGI_SN_SIM=y 84CONFIG_IA64_SGI_SN_SIM=y
diff --git a/arch/ia64/defconfig b/arch/ia64/defconfig
index 7be8096e0561..8444add76380 100644
--- a/arch/ia64/defconfig
+++ b/arch/ia64/defconfig
@@ -84,7 +84,7 @@ CONFIG_IA64_L1_CACHE_SHIFT=7
84CONFIG_NUMA=y 84CONFIG_NUMA=y
85CONFIG_VIRTUAL_MEM_MAP=y 85CONFIG_VIRTUAL_MEM_MAP=y
86CONFIG_HOLES_IN_ZONE=y 86CONFIG_HOLES_IN_ZONE=y
87CONFIG_DISCONTIGMEM=y 87CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
88CONFIG_IA64_CYCLONE=y 88CONFIG_IA64_CYCLONE=y
89CONFIG_IOSAPIC=y 89CONFIG_IOSAPIC=y
90CONFIG_FORCE_MAX_ZONEORDER=18 90CONFIG_FORCE_MAX_ZONEORDER=18
diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
index b2de948bdaea..e3e9290e3ff2 100644
--- a/arch/ia64/ia32/ia32priv.h
+++ b/arch/ia64/ia32/ia32priv.h
@@ -241,7 +241,7 @@ typedef struct compat_siginfo {
241 241
242 /* POSIX.1b timers */ 242 /* POSIX.1b timers */
243 struct { 243 struct {
244 timer_t _tid; /* timer id */ 244 compat_timer_t _tid; /* timer id */
245 int _overrun; /* overrun count */ 245 int _overrun; /* overrun count */
246 char _pad[sizeof(unsigned int) - sizeof(int)]; 246 char _pad[sizeof(unsigned int) - sizeof(int)];
247 compat_sigval_t _sigval; /* same as below */ 247 compat_sigval_t _sigval; /* same as below */
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 4c73d8ba2e3d..b2e2f6509eb0 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_SMP) += smp.o smpboot.o domain.o
20obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o 20obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o
21obj-$(CONFIG_IA64_CYCLONE) += cyclone.o 21obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
22obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o 22obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
23obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o
23obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o 24obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
24mca_recovery-y += mca_drv.o mca_drv_asm.o 25mca_recovery-y += mca_drv.o mca_drv_asm.o
25 26
diff --git a/arch/ia64/kernel/jprobes.S b/arch/ia64/kernel/jprobes.S
new file mode 100644
index 000000000000..b7fa3ccd2b0f
--- /dev/null
+++ b/arch/ia64/kernel/jprobes.S
@@ -0,0 +1,61 @@
1/*
2 * Jprobe specific operations
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) Intel Corporation, 2005
19 *
20 * 2005-May Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
21 * <anil.s.keshavamurthy@intel.com> initial implementation
22 *
23 * Jprobes (a.k.a. "jump probes" which is built on-top of kprobes) allow a
24 * probe to be inserted into the beginning of a function call. The fundamental
25 * difference between a jprobe and a kprobe is the jprobe handler is executed
26 * in the same context as the target function, while the kprobe handlers
27 * are executed in interrupt context.
28 *
29 * For jprobes we initially gain control by placing a break point in the
30 * first instruction of the targeted function. When we catch that specific
31 * break, we:
32 * * set the return address to our jprobe_inst_return() function
33 * * jump to the jprobe handler function
34 *
35 * Since we fixed up the return address, the jprobe handler will return to our
36 * jprobe_inst_return() function, giving us control again. At this point we
37 * are back in the parents frame marker, so we do yet another call to our
38 * jprobe_break() function to fix up the frame marker as it would normally
39 * exist in the target function.
40 *
41 * Our jprobe_return function then transfers control back to kprobes.c by
42 * executing a break instruction using one of our reserved numbers. When we
43 * catch that break in kprobes.c, we continue like we do for a normal kprobe
44 * by single stepping the emulated instruction, and then returning execution
45 * to the correct location.
46 */
47#include <asm/asmmacro.h>
48
49 /*
50 * void jprobe_break(void)
51 */
52ENTRY(jprobe_break)
53 break.m 0x80300
54END(jprobe_break)
55
56 /*
57 * void jprobe_inst_return(void)
58 */
59GLOBAL_ENTRY(jprobe_inst_return)
60 br.call.sptk.many b0=jprobe_break
61END(jprobe_inst_return)
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
new file mode 100644
index 000000000000..5978823d5c63
--- /dev/null
+++ b/arch/ia64/kernel/kprobes.c
@@ -0,0 +1,601 @@
1/*
2 * Kernel Probes (KProbes)
3 * arch/ia64/kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 * Copyright (C) Intel Corporation, 2005
21 *
22 * 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
23 * <anil.s.keshavamurthy@intel.com> adapted from i386
24 */
25
26#include <linux/config.h>
27#include <linux/kprobes.h>
28#include <linux/ptrace.h>
29#include <linux/spinlock.h>
30#include <linux/string.h>
31#include <linux/slab.h>
32#include <linux/preempt.h>
33#include <linux/moduleloader.h>
34
35#include <asm/pgtable.h>
36#include <asm/kdebug.h>
37
38extern void jprobe_inst_return(void);
39
40/* kprobe_status settings */
41#define KPROBE_HIT_ACTIVE 0x00000001
42#define KPROBE_HIT_SS 0x00000002
43
44static struct kprobe *current_kprobe, *kprobe_prev;
45static unsigned long kprobe_status, kprobe_status_prev;
46static struct pt_regs jprobe_saved_regs;
47
48enum instruction_type {A, I, M, F, B, L, X, u};
49static enum instruction_type bundle_encoding[32][3] = {
50 { M, I, I }, /* 00 */
51 { M, I, I }, /* 01 */
52 { M, I, I }, /* 02 */
53 { M, I, I }, /* 03 */
54 { M, L, X }, /* 04 */
55 { M, L, X }, /* 05 */
56 { u, u, u }, /* 06 */
57 { u, u, u }, /* 07 */
58 { M, M, I }, /* 08 */
59 { M, M, I }, /* 09 */
60 { M, M, I }, /* 0A */
61 { M, M, I }, /* 0B */
62 { M, F, I }, /* 0C */
63 { M, F, I }, /* 0D */
64 { M, M, F }, /* 0E */
65 { M, M, F }, /* 0F */
66 { M, I, B }, /* 10 */
67 { M, I, B }, /* 11 */
68 { M, B, B }, /* 12 */
69 { M, B, B }, /* 13 */
70 { u, u, u }, /* 14 */
71 { u, u, u }, /* 15 */
72 { B, B, B }, /* 16 */
73 { B, B, B }, /* 17 */
74 { M, M, B }, /* 18 */
75 { M, M, B }, /* 19 */
76 { u, u, u }, /* 1A */
77 { u, u, u }, /* 1B */
78 { M, F, B }, /* 1C */
79 { M, F, B }, /* 1D */
80 { u, u, u }, /* 1E */
81 { u, u, u }, /* 1F */
82};
83
84/*
85 * In this function we check to see if the instruction
86 * is IP relative instruction and update the kprobe
87 * inst flag accordingly
88 */
89static void update_kprobe_inst_flag(uint template, uint slot, uint major_opcode,
90 unsigned long kprobe_inst, struct kprobe *p)
91{
92 p->ainsn.inst_flag = 0;
93 p->ainsn.target_br_reg = 0;
94
95 if (bundle_encoding[template][slot] == B) {
96 switch (major_opcode) {
97 case INDIRECT_CALL_OPCODE:
98 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
99 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
100 break;
101 case IP_RELATIVE_PREDICT_OPCODE:
102 case IP_RELATIVE_BRANCH_OPCODE:
103 p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
104 break;
105 case IP_RELATIVE_CALL_OPCODE:
106 p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
107 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
108 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
109 break;
110 }
111 } else if (bundle_encoding[template][slot] == X) {
112 switch (major_opcode) {
113 case LONG_CALL_OPCODE:
114 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
115 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
116 break;
117 }
118 }
119 return;
120}
121
122/*
123 * In this function we check to see if the instruction
124 * on which we are inserting kprobe is supported.
125 * Returns 0 if supported
126 * Returns -EINVAL if unsupported
127 */
128static int unsupported_inst(uint template, uint slot, uint major_opcode,
129 unsigned long kprobe_inst, struct kprobe *p)
130{
131 unsigned long addr = (unsigned long)p->addr;
132
133 if (bundle_encoding[template][slot] == I) {
134 switch (major_opcode) {
135 case 0x0: //I_UNIT_MISC_OPCODE:
136 /*
137 * Check for Integer speculation instruction
138 * - Bit 33-35 to be equal to 0x1
139 */
140 if (((kprobe_inst >> 33) & 0x7) == 1) {
141 printk(KERN_WARNING
142 "Kprobes on speculation inst at <0x%lx> not supported\n",
143 addr);
144 return -EINVAL;
145 }
146
147 /*
148 * IP relative mov instruction
149 * - Bit 27-35 to be equal to 0x30
150 */
151 if (((kprobe_inst >> 27) & 0x1FF) == 0x30) {
152 printk(KERN_WARNING
153 "Kprobes on \"mov r1=ip\" at <0x%lx> not supported\n",
154 addr);
155 return -EINVAL;
156
157 }
158 }
159 }
160 return 0;
161}
162
163
164/*
165 * In this function we check to see if the instruction
166 * (qp) cmpx.crel.ctype p1,p2=r2,r3
167 * on which we are inserting kprobe is cmp instruction
168 * with ctype as unc.
169 */
170static uint is_cmp_ctype_unc_inst(uint template, uint slot, uint major_opcode,
171unsigned long kprobe_inst)
172{
173 cmp_inst_t cmp_inst;
174 uint ctype_unc = 0;
175
176 if (!((bundle_encoding[template][slot] == I) ||
177 (bundle_encoding[template][slot] == M)))
178 goto out;
179
180 if (!((major_opcode == 0xC) || (major_opcode == 0xD) ||
181 (major_opcode == 0xE)))
182 goto out;
183
184 cmp_inst.l = kprobe_inst;
185 if ((cmp_inst.f.x2 == 0) || (cmp_inst.f.x2 == 1)) {
186 /* Integere compare - Register Register (A6 type)*/
187 if ((cmp_inst.f.tb == 0) && (cmp_inst.f.ta == 0)
188 &&(cmp_inst.f.c == 1))
189 ctype_unc = 1;
190 } else if ((cmp_inst.f.x2 == 2)||(cmp_inst.f.x2 == 3)) {
191 /* Integere compare - Immediate Register (A8 type)*/
192 if ((cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1))
193 ctype_unc = 1;
194 }
195out:
196 return ctype_unc;
197}
198
199/*
200 * In this function we override the bundle with
201 * the break instruction at the given slot.
202 */
203static void prepare_break_inst(uint template, uint slot, uint major_opcode,
204 unsigned long kprobe_inst, struct kprobe *p)
205{
206 unsigned long break_inst = BREAK_INST;
207 bundle_t *bundle = &p->ainsn.insn.bundle;
208
209 /*
210 * Copy the original kprobe_inst qualifying predicate(qp)
211 * to the break instruction iff !is_cmp_ctype_unc_inst
212 * because for cmp instruction with ctype equal to unc,
213 * which is a special instruction always needs to be
214 * executed regradless of qp
215 */
216 if (!is_cmp_ctype_unc_inst(template, slot, major_opcode, kprobe_inst))
217 break_inst |= (0x3f & kprobe_inst);
218
219 switch (slot) {
220 case 0:
221 bundle->quad0.slot0 = break_inst;
222 break;
223 case 1:
224 bundle->quad0.slot1_p0 = break_inst;
225 bundle->quad1.slot1_p1 = break_inst >> (64-46);
226 break;
227 case 2:
228 bundle->quad1.slot2 = break_inst;
229 break;
230 }
231
232 /*
233 * Update the instruction flag, so that we can
234 * emulate the instruction properly after we
235 * single step on original instruction
236 */
237 update_kprobe_inst_flag(template, slot, major_opcode, kprobe_inst, p);
238}
239
240static inline void get_kprobe_inst(bundle_t *bundle, uint slot,
241 unsigned long *kprobe_inst, uint *major_opcode)
242{
243 unsigned long kprobe_inst_p0, kprobe_inst_p1;
244 unsigned int template;
245
246 template = bundle->quad0.template;
247
248 switch (slot) {
249 case 0:
250 *major_opcode = (bundle->quad0.slot0 >> SLOT0_OPCODE_SHIFT);
251 *kprobe_inst = bundle->quad0.slot0;
252 break;
253 case 1:
254 *major_opcode = (bundle->quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT);
255 kprobe_inst_p0 = bundle->quad0.slot1_p0;
256 kprobe_inst_p1 = bundle->quad1.slot1_p1;
257 *kprobe_inst = kprobe_inst_p0 | (kprobe_inst_p1 << (64-46));
258 break;
259 case 2:
260 *major_opcode = (bundle->quad1.slot2 >> SLOT2_OPCODE_SHIFT);
261 *kprobe_inst = bundle->quad1.slot2;
262 break;
263 }
264}
265
266static int valid_kprobe_addr(int template, int slot, unsigned long addr)
267{
268 if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) {
269 printk(KERN_WARNING "Attempting to insert unaligned kprobe at 0x%lx\n",
270 addr);
271 return -EINVAL;
272 }
273 return 0;
274}
275
276static inline void save_previous_kprobe(void)
277{
278 kprobe_prev = current_kprobe;
279 kprobe_status_prev = kprobe_status;
280}
281
282static inline void restore_previous_kprobe(void)
283{
284 current_kprobe = kprobe_prev;
285 kprobe_status = kprobe_status_prev;
286}
287
288static inline void set_current_kprobe(struct kprobe *p)
289{
290 current_kprobe = p;
291}
292
293int arch_prepare_kprobe(struct kprobe *p)
294{
295 unsigned long addr = (unsigned long) p->addr;
296 unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL);
297 unsigned long kprobe_inst=0;
298 unsigned int slot = addr & 0xf, template, major_opcode = 0;
299 bundle_t *bundle = &p->ainsn.insn.bundle;
300
301 memcpy(&p->opcode.bundle, kprobe_addr, sizeof(bundle_t));
302 memcpy(&p->ainsn.insn.bundle, kprobe_addr, sizeof(bundle_t));
303
304 template = bundle->quad0.template;
305
306 if(valid_kprobe_addr(template, slot, addr))
307 return -EINVAL;
308
309 /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
310 if (slot == 1 && bundle_encoding[template][1] == L)
311 slot++;
312
313 /* Get kprobe_inst and major_opcode from the bundle */
314 get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode);
315
316 if (unsupported_inst(template, slot, major_opcode, kprobe_inst, p))
317 return -EINVAL;
318
319 prepare_break_inst(template, slot, major_opcode, kprobe_inst, p);
320
321 return 0;
322}
323
324void arch_arm_kprobe(struct kprobe *p)
325{
326 unsigned long addr = (unsigned long)p->addr;
327 unsigned long arm_addr = addr & ~0xFULL;
328
329 memcpy((char *)arm_addr, &p->ainsn.insn.bundle, sizeof(bundle_t));
330 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
331}
332
333void arch_disarm_kprobe(struct kprobe *p)
334{
335 unsigned long addr = (unsigned long)p->addr;
336 unsigned long arm_addr = addr & ~0xFULL;
337
338 /* p->opcode contains the original unaltered bundle */
339 memcpy((char *) arm_addr, (char *) &p->opcode.bundle, sizeof(bundle_t));
340 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
341}
342
343void arch_remove_kprobe(struct kprobe *p)
344{
345}
346
347/*
348 * We are resuming execution after a single step fault, so the pt_regs
349 * structure reflects the register state after we executed the instruction
350 * located in the kprobe (p->ainsn.insn.bundle). We still need to adjust
351 * the ip to point back to the original stack address. To set the IP address
352 * to original stack address, handle the case where we need to fixup the
353 * relative IP address and/or fixup branch register.
354 */
355static void resume_execution(struct kprobe *p, struct pt_regs *regs)
356{
357 unsigned long bundle_addr = ((unsigned long) (&p->opcode.bundle)) & ~0xFULL;
358 unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL;
359 unsigned long template;
360 int slot = ((unsigned long)p->addr & 0xf);
361
362 template = p->opcode.bundle.quad0.template;
363
364 if (slot == 1 && bundle_encoding[template][1] == L)
365 slot = 2;
366
367 if (p->ainsn.inst_flag) {
368
369 if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) {
370 /* Fix relative IP address */
371 regs->cr_iip = (regs->cr_iip - bundle_addr) + resume_addr;
372 }
373
374 if (p->ainsn.inst_flag & INST_FLAG_FIX_BRANCH_REG) {
375 /*
376 * Fix target branch register, software convention is
377 * to use either b0 or b6 or b7, so just checking
378 * only those registers
379 */
380 switch (p->ainsn.target_br_reg) {
381 case 0:
382 if ((regs->b0 == bundle_addr) ||
383 (regs->b0 == bundle_addr + 0x10)) {
384 regs->b0 = (regs->b0 - bundle_addr) +
385 resume_addr;
386 }
387 break;
388 case 6:
389 if ((regs->b6 == bundle_addr) ||
390 (regs->b6 == bundle_addr + 0x10)) {
391 regs->b6 = (regs->b6 - bundle_addr) +
392 resume_addr;
393 }
394 break;
395 case 7:
396 if ((regs->b7 == bundle_addr) ||
397 (regs->b7 == bundle_addr + 0x10)) {
398 regs->b7 = (regs->b7 - bundle_addr) +
399 resume_addr;
400 }
401 break;
402 } /* end switch */
403 }
404 goto turn_ss_off;
405 }
406
407 if (slot == 2) {
408 if (regs->cr_iip == bundle_addr + 0x10) {
409 regs->cr_iip = resume_addr + 0x10;
410 }
411 } else {
412 if (regs->cr_iip == bundle_addr) {
413 regs->cr_iip = resume_addr;
414 }
415 }
416
417turn_ss_off:
418 /* Turn off Single Step bit */
419 ia64_psr(regs)->ss = 0;
420}
421
422static void prepare_ss(struct kprobe *p, struct pt_regs *regs)
423{
424 unsigned long bundle_addr = (unsigned long) &p->opcode.bundle;
425 unsigned long slot = (unsigned long)p->addr & 0xf;
426
427 /* Update instruction pointer (IIP) and slot number (IPSR.ri) */
428 regs->cr_iip = bundle_addr & ~0xFULL;
429
430 if (slot > 2)
431 slot = 0;
432
433 ia64_psr(regs)->ri = slot;
434
435 /* turn on single stepping */
436 ia64_psr(regs)->ss = 1;
437}
438
439static int pre_kprobes_handler(struct die_args *args)
440{
441 struct kprobe *p;
442 int ret = 0;
443 struct pt_regs *regs = args->regs;
444 kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs);
445
446 preempt_disable();
447
448 /* Handle recursion cases */
449 if (kprobe_running()) {
450 p = get_kprobe(addr);
451 if (p) {
452 if (kprobe_status == KPROBE_HIT_SS) {
453 unlock_kprobes();
454 goto no_kprobe;
455 }
456 /* We have reentered the pre_kprobe_handler(), since
457 * another probe was hit while within the handler.
458 * We here save the original kprobes variables and
459 * just single step on the instruction of the new probe
460 * without calling any user handlers.
461 */
462 save_previous_kprobe();
463 set_current_kprobe(p);
464 p->nmissed++;
465 prepare_ss(p, regs);
466 kprobe_status = KPROBE_REENTER;
467 return 1;
468 } else if (args->err == __IA64_BREAK_JPROBE) {
469 /*
470 * jprobe instrumented function just completed
471 */
472 p = current_kprobe;
473 if (p->break_handler && p->break_handler(p, regs)) {
474 goto ss_probe;
475 }
476 } else {
477 /* Not our break */
478 goto no_kprobe;
479 }
480 }
481
482 lock_kprobes();
483 p = get_kprobe(addr);
484 if (!p) {
485 unlock_kprobes();
486 goto no_kprobe;
487 }
488
489 kprobe_status = KPROBE_HIT_ACTIVE;
490 set_current_kprobe(p);
491
492 if (p->pre_handler && p->pre_handler(p, regs))
493 /*
494 * Our pre-handler is specifically requesting that we just
495 * do a return. This is handling the case where the
496 * pre-handler is really our special jprobe pre-handler.
497 */
498 return 1;
499
500ss_probe:
501 prepare_ss(p, regs);
502 kprobe_status = KPROBE_HIT_SS;
503 return 1;
504
505no_kprobe:
506 preempt_enable_no_resched();
507 return ret;
508}
509
510static int post_kprobes_handler(struct pt_regs *regs)
511{
512 if (!kprobe_running())
513 return 0;
514
515 if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) {
516 kprobe_status = KPROBE_HIT_SSDONE;
517 current_kprobe->post_handler(current_kprobe, regs, 0);
518 }
519
520 resume_execution(current_kprobe, regs);
521
522 /*Restore back the original saved kprobes variables and continue. */
523 if (kprobe_status == KPROBE_REENTER) {
524 restore_previous_kprobe();
525 goto out;
526 }
527
528 unlock_kprobes();
529
530out:
531 preempt_enable_no_resched();
532 return 1;
533}
534
535static int kprobes_fault_handler(struct pt_regs *regs, int trapnr)
536{
537 if (!kprobe_running())
538 return 0;
539
540 if (current_kprobe->fault_handler &&
541 current_kprobe->fault_handler(current_kprobe, regs, trapnr))
542 return 1;
543
544 if (kprobe_status & KPROBE_HIT_SS) {
545 resume_execution(current_kprobe, regs);
546 unlock_kprobes();
547 preempt_enable_no_resched();
548 }
549
550 return 0;
551}
552
553int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
554 void *data)
555{
556 struct die_args *args = (struct die_args *)data;
557 switch(val) {
558 case DIE_BREAK:
559 if (pre_kprobes_handler(args))
560 return NOTIFY_STOP;
561 break;
562 case DIE_SS:
563 if (post_kprobes_handler(args->regs))
564 return NOTIFY_STOP;
565 break;
566 case DIE_PAGE_FAULT:
567 if (kprobes_fault_handler(args->regs, args->trapnr))
568 return NOTIFY_STOP;
569 default:
570 break;
571 }
572 return NOTIFY_DONE;
573}
574
575int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
576{
577 struct jprobe *jp = container_of(p, struct jprobe, kp);
578 unsigned long addr = ((struct fnptr *)(jp->entry))->ip;
579
580 /* save architectural state */
581 jprobe_saved_regs = *regs;
582
583 /* after rfi, execute the jprobe instrumented function */
584 regs->cr_iip = addr & ~0xFULL;
585 ia64_psr(regs)->ri = addr & 0xf;
586 regs->r1 = ((struct fnptr *)(jp->entry))->gp;
587
588 /*
589 * fix the return address to our jprobe_inst_return() function
590 * in the jprobes.S file
591 */
592 regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip;
593
594 return 1;
595}
596
597int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
598{
599 *regs = jprobe_saved_regs;
600 return 1;
601}
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index 1861173bd4f6..e7e520d90f03 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -21,12 +21,26 @@
21#include <asm/intrinsics.h> 21#include <asm/intrinsics.h>
22#include <asm/processor.h> 22#include <asm/processor.h>
23#include <asm/uaccess.h> 23#include <asm/uaccess.h>
24#include <asm/kdebug.h>
24 25
25extern spinlock_t timerlist_lock; 26extern spinlock_t timerlist_lock;
26 27
27fpswa_interface_t *fpswa_interface; 28fpswa_interface_t *fpswa_interface;
28EXPORT_SYMBOL(fpswa_interface); 29EXPORT_SYMBOL(fpswa_interface);
29 30
31struct notifier_block *ia64die_chain;
32static DEFINE_SPINLOCK(die_notifier_lock);
33
34int register_die_notifier(struct notifier_block *nb)
35{
36 int err = 0;
37 unsigned long flags;
38 spin_lock_irqsave(&die_notifier_lock, flags);
39 err = notifier_chain_register(&ia64die_chain, nb);
40 spin_unlock_irqrestore(&die_notifier_lock, flags);
41 return err;
42}
43
30void __init 44void __init
31trap_init (void) 45trap_init (void)
32{ 46{
@@ -137,6 +151,10 @@ ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
137 151
138 switch (break_num) { 152 switch (break_num) {
139 case 0: /* unknown error (used by GCC for __builtin_abort()) */ 153 case 0: /* unknown error (used by GCC for __builtin_abort()) */
154 if (notify_die(DIE_BREAK, "break 0", regs, break_num, TRAP_BRKPT, SIGTRAP)
155 == NOTIFY_STOP) {
156 return;
157 }
140 die_if_kernel("bugcheck!", regs, break_num); 158 die_if_kernel("bugcheck!", regs, break_num);
141 sig = SIGILL; code = ILL_ILLOPC; 159 sig = SIGILL; code = ILL_ILLOPC;
142 break; 160 break;
@@ -189,6 +207,15 @@ ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
189 sig = SIGILL; code = __ILL_BNDMOD; 207 sig = SIGILL; code = __ILL_BNDMOD;
190 break; 208 break;
191 209
210 case 0x80200:
211 case 0x80300:
212 if (notify_die(DIE_BREAK, "kprobe", regs, break_num, TRAP_BRKPT, SIGTRAP)
213 == NOTIFY_STOP) {
214 return;
215 }
216 sig = SIGTRAP; code = TRAP_BRKPT;
217 break;
218
192 default: 219 default:
193 if (break_num < 0x40000 || break_num > 0x100000) 220 if (break_num < 0x40000 || break_num > 0x100000)
194 die_if_kernel("Bad break", regs, break_num); 221 die_if_kernel("Bad break", regs, break_num);
@@ -548,7 +575,11 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
548#endif 575#endif
549 break; 576 break;
550 case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break; 577 case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break;
551 case 36: siginfo.si_code = TRAP_TRACE; ifa = 0; break; 578 case 36:
579 if (notify_die(DIE_SS, "ss", &regs, vector,
580 vector, SIGTRAP) == NOTIFY_STOP)
581 return;
582 siginfo.si_code = TRAP_TRACE; ifa = 0; break;
552 } 583 }
553 siginfo.si_signo = SIGTRAP; 584 siginfo.si_signo = SIGTRAP;
554 siginfo.si_errno = 0; 585 siginfo.si_errno = 0;
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index c00710929390..f3fd528ead3b 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -560,14 +560,15 @@ void show_mem(void)
560 int shared = 0, cached = 0, reserved = 0; 560 int shared = 0, cached = 0, reserved = 0;
561 printk("Node ID: %d\n", pgdat->node_id); 561 printk("Node ID: %d\n", pgdat->node_id);
562 for(i = 0; i < pgdat->node_spanned_pages; i++) { 562 for(i = 0; i < pgdat->node_spanned_pages; i++) {
563 struct page *page = pgdat_page_nr(pgdat, i);
563 if (!ia64_pfn_valid(pgdat->node_start_pfn+i)) 564 if (!ia64_pfn_valid(pgdat->node_start_pfn+i))
564 continue; 565 continue;
565 if (PageReserved(pgdat->node_mem_map+i)) 566 if (PageReserved(page))
566 reserved++; 567 reserved++;
567 else if (PageSwapCache(pgdat->node_mem_map+i)) 568 else if (PageSwapCache(page))
568 cached++; 569 cached++;
569 else if (page_count(pgdat->node_mem_map+i)) 570 else if (page_count(page))
570 shared += page_count(pgdat->node_mem_map+i)-1; 571 shared += page_count(page)-1;
571 } 572 }
572 total_present += present; 573 total_present += present;
573 total_reserved += reserved; 574 total_reserved += reserved;
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 4174ec999dde..ff62551eb3a1 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -14,6 +14,7 @@
14#include <asm/processor.h> 14#include <asm/processor.h>
15#include <asm/system.h> 15#include <asm/system.h>
16#include <asm/uaccess.h> 16#include <asm/uaccess.h>
17#include <asm/kdebug.h>
17 18
18extern void die (char *, struct pt_regs *, long); 19extern void die (char *, struct pt_regs *, long);
19 20
@@ -102,6 +103,13 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
102 goto bad_area_no_up; 103 goto bad_area_no_up;
103#endif 104#endif
104 105
106 /*
107 * This is to handle the kprobes on user space access instructions
108 */
109 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, code, TRAP_BRKPT,
110 SIGSEGV) == NOTIFY_STOP)
111 return;
112
105 down_read(&mm->mmap_sem); 113 down_read(&mm->mmap_sem);
106 114
107 vma = find_vma_prev(mm, address, &prev_vma); 115 vma = find_vma_prev(mm, address, &prev_vma);
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 64c133344afe..42ca8a39798d 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -172,11 +172,13 @@ config NOHIGHMEM
172 bool 172 bool
173 default y 173 default y
174 174
175config DISCONTIGMEM 175config ARCH_DISCONTIGMEM_ENABLE
176 bool "Internal RAM Support" 176 bool "Internal RAM Support"
177 depends on CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP 177 depends on CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP
178 default y 178 default y
179 179
180source "mm/Kconfig"
181
180config IRAM_START 182config IRAM_START
181 hex "Internal memory start address (hex)" 183 hex "Internal memory start address (hex)"
182 default "00f00000" 184 default "00f00000"
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c
index bc423d838fb8..d9a40b1fe8ba 100644
--- a/arch/m32r/mm/init.c
+++ b/arch/m32r/mm/init.c
@@ -49,7 +49,7 @@ void show_mem(void)
49 printk("Free swap: %6ldkB\n",nr_swap_pages<<(PAGE_SHIFT-10)); 49 printk("Free swap: %6ldkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
50 for_each_pgdat(pgdat) { 50 for_each_pgdat(pgdat) {
51 for (i = 0; i < pgdat->node_spanned_pages; ++i) { 51 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
52 page = pgdat->node_mem_map + i; 52 page = pgdat_page_nr(pgdat, i);
53 total++; 53 total++;
54 if (PageHighMem(page)) 54 if (PageHighMem(page))
55 highmem++; 55 highmem++;
@@ -152,7 +152,7 @@ int __init reservedpages_count(void)
152 reservedpages = 0; 152 reservedpages = 0;
153 for_each_online_node(nid) 153 for_each_online_node(nid)
154 for (i = 0 ; i < MAX_LOW_PFN(nid) - START_PFN(nid) ; i++) 154 for (i = 0 ; i < MAX_LOW_PFN(nid) - START_PFN(nid) ; i++)
155 if (PageReserved(NODE_DATA(nid)->node_mem_map + i)) 155 if (PageReserved(nid_page_nr(nid, i)))
156 reservedpages++; 156 reservedpages++;
157 157
158 return reservedpages; 158 return reservedpages;
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index d0713c7d9f0a..691a2469ff36 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -357,6 +357,8 @@ config 060_WRITETHROUGH
357 is hardwired on. The 53c710 SCSI driver is known to suffer from 357 is hardwired on. The 53c710 SCSI driver is known to suffer from
358 this problem. 358 this problem.
359 359
360source "mm/Kconfig"
361
360endmenu 362endmenu
361 363
362menu "General setup" 364menu "General setup"
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index e729bd280623..dbfcdc8e6087 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -532,6 +532,8 @@ config ROMKERNEL
532 532
533endchoice 533endchoice
534 534
535source "mm/Kconfig"
536
535endmenu 537endmenu
536 538
537config ISA_DMA_API 539config ISA_DMA_API
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index ab9944693f1f..94f5a8eb2c22 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -492,7 +492,7 @@ config SGI_SN0_N_MODE
492 which allows for more memory. Your system is most probably 492 which allows for more memory. Your system is most probably
493 running in M-Mode, so you should say N here. 493 running in M-Mode, so you should say N here.
494 494
495config DISCONTIGMEM 495config ARCH_DISCONTIGMEM_ENABLE
496 bool 496 bool
497 default y if SGI_IP27 497 default y if SGI_IP27
498 help 498 help
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index 13472292d0ec..b5bab3a42fc4 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -82,7 +82,7 @@ CONFIG_STOP_MACHINE=y
82# CONFIG_SGI_IP22 is not set 82# CONFIG_SGI_IP22 is not set
83CONFIG_SGI_IP27=y 83CONFIG_SGI_IP27=y
84# CONFIG_SGI_SN0_N_MODE is not set 84# CONFIG_SGI_SN0_N_MODE is not set
85CONFIG_DISCONTIGMEM=y 85CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
86CONFIG_NUMA=y 86CONFIG_NUMA=y
87# CONFIG_MAPPED_KERNEL is not set 87# CONFIG_MAPPED_KERNEL is not set
88# CONFIG_REPLICATE_KTEXT is not set 88# CONFIG_REPLICATE_KTEXT is not set
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 0a44a98d7adc..a160d04f7dbe 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -549,9 +549,8 @@ void __init mem_init(void)
549 */ 549 */
550 numslots = node_getlastslot(node); 550 numslots = node_getlastslot(node);
551 for (slot = 1; slot <= numslots; slot++) { 551 for (slot = 1; slot <= numslots; slot++) {
552 p = NODE_DATA(node)->node_mem_map + 552 p = nid_page_nr(node, slot_getbasepfn(node, slot) -
553 (slot_getbasepfn(node, slot) - 553 slot_getbasepfn(node, 0));
554 slot_getbasepfn(node, 0));
555 554
556 /* 555 /*
557 * Free valid memory in current slot. 556 * Free valid memory in current slot.
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index e7e7c56fc212..ce327c799b44 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -148,7 +148,7 @@ config HOTPLUG_CPU
148 default y if SMP 148 default y if SMP
149 select HOTPLUG 149 select HOTPLUG
150 150
151config DISCONTIGMEM 151config ARCH_DISCONTIGMEM_ENABLE
152 bool "Discontiguous memory support (EXPERIMENTAL)" 152 bool "Discontiguous memory support (EXPERIMENTAL)"
153 depends on EXPERIMENTAL 153 depends on EXPERIMENTAL
154 help 154 help
@@ -157,6 +157,8 @@ config DISCONTIGMEM
157 or have huge holes in the physical address space for other reasons. 157 or have huge holes in the physical address space for other reasons.
158 See <file:Documentation/vm/numa> for more. 158 See <file:Documentation/vm/numa> for more.
159 159
160source "mm/Kconfig"
161
160config PREEMPT 162config PREEMPT
161 bool 163 bool
162# bool "Preemptible Kernel" 164# bool "Preemptible Kernel"
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index cac37589e35c..2886ad70db48 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -506,7 +506,7 @@ void show_mem(void)
506 for (j = node_start_pfn(i); j < node_end_pfn(i); j++) { 506 for (j = node_start_pfn(i); j < node_end_pfn(i); j++) {
507 struct page *p; 507 struct page *p;
508 508
509 p = node_mem_map(i) + j - node_start_pfn(i); 509 p = nid_page_nr(i, j) - node_start_pfn(i);
510 510
511 total++; 511 total++;
512 if (PageReserved(p)) 512 if (PageReserved(p))
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig
index 10162b187bcf..848f43970a4b 100644
--- a/arch/ppc/Kconfig
+++ b/arch/ppc/Kconfig
@@ -905,6 +905,8 @@ config PREEMPT
905config HIGHMEM 905config HIGHMEM
906 bool "High memory support" 906 bool "High memory support"
907 907
908source "mm/Kconfig"
909
908source "fs/Kconfig.binfmt" 910source "fs/Kconfig.binfmt"
909 911
910config PROC_DEVICETREE 912config PROC_DEVICETREE
diff --git a/arch/ppc/boot/simple/misc.c b/arch/ppc/boot/simple/misc.c
index ab0f9902cb67..e02de5b467a4 100644
--- a/arch/ppc/boot/simple/misc.c
+++ b/arch/ppc/boot/simple/misc.c
@@ -222,7 +222,7 @@ decompress_kernel(unsigned long load_addr, int num_words, unsigned long cksum)
222 puts("\n"); 222 puts("\n");
223 223
224 puts("Uncompressing Linux..."); 224 puts("Uncompressing Linux...");
225 gunzip(0x0, 0x400000, zimage_start, &zimage_size); 225 gunzip(NULL, 0x400000, zimage_start, &zimage_size);
226 puts("done.\n"); 226 puts("done.\n");
227 227
228 /* get the bi_rec address */ 228 /* get the bi_rec address */
diff --git a/arch/ppc/boot/simple/mpc10x_memory.c b/arch/ppc/boot/simple/mpc10x_memory.c
index 977daedc14c0..20d92a34ceb8 100644
--- a/arch/ppc/boot/simple/mpc10x_memory.c
+++ b/arch/ppc/boot/simple/mpc10x_memory.c
@@ -33,7 +33,7 @@
33 33
34#define MPC10X_PCI_OP(rw, size, type, op, mask) \ 34#define MPC10X_PCI_OP(rw, size, type, op, mask) \
35static void \ 35static void \
36mpc10x_##rw##_config_##size(unsigned int *cfg_addr, \ 36mpc10x_##rw##_config_##size(unsigned int __iomem *cfg_addr, \
37 unsigned int *cfg_data, int devfn, int offset, \ 37 unsigned int *cfg_data, int devfn, int offset, \
38 type val) \ 38 type val) \
39{ \ 39{ \
diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig
index 0f1fa289744e..cb27068bfcd4 100644
--- a/arch/ppc64/Kconfig
+++ b/arch/ppc64/Kconfig
@@ -77,6 +77,10 @@ config PPC_PSERIES
77 bool " IBM pSeries & new iSeries" 77 bool " IBM pSeries & new iSeries"
78 default y 78 default y
79 79
80config PPC_BPA
81 bool " Broadband Processor Architecture"
82 depends on PPC_MULTIPLATFORM
83
80config PPC_PMAC 84config PPC_PMAC
81 depends on PPC_MULTIPLATFORM 85 depends on PPC_MULTIPLATFORM
82 bool " Apple G5 based machines" 86 bool " Apple G5 based machines"
@@ -106,6 +110,21 @@ config PPC_OF
106 bool 110 bool
107 default y 111 default y
108 112
113config XICS
114 depends on PPC_PSERIES
115 bool
116 default y
117
118config MPIC
119 depends on PPC_PSERIES || PPC_PMAC || PPC_MAPLE
120 bool
121 default y
122
123config BPA_IIC
124 depends on PPC_BPA
125 bool
126 default y
127
109# VMX is pSeries only for now until somebody writes the iSeries 128# VMX is pSeries only for now until somebody writes the iSeries
110# exception vectors for it 129# exception vectors for it
111config ALTIVEC 130config ALTIVEC
@@ -198,13 +217,49 @@ config HMT
198 This option enables hardware multithreading on RS64 cpus. 217 This option enables hardware multithreading on RS64 cpus.
199 pSeries systems p620 and p660 have such a cpu type. 218 pSeries systems p620 and p660 have such a cpu type.
200 219
201config DISCONTIGMEM 220config ARCH_SELECT_MEMORY_MODEL
202 bool "Discontiguous Memory Support" 221 def_bool y
222
223config ARCH_FLATMEM_ENABLE
224 def_bool y
225 depends on !NUMA
226
227config ARCH_DISCONTIGMEM_ENABLE
228 def_bool y
203 depends on SMP && PPC_PSERIES 229 depends on SMP && PPC_PSERIES
204 230
231config ARCH_DISCONTIGMEM_DEFAULT
232 def_bool y
233 depends on ARCH_DISCONTIGMEM_ENABLE
234
235config ARCH_FLATMEM_ENABLE
236 def_bool y
237
238config ARCH_SPARSEMEM_ENABLE
239 def_bool y
240 depends on ARCH_DISCONTIGMEM_ENABLE
241
242source "mm/Kconfig"
243
244config HAVE_ARCH_EARLY_PFN_TO_NID
245 def_bool y
246 depends on NEED_MULTIPLE_NODES
247
248# Some NUMA nodes have memory ranges that span
249# other nodes. Even though a pfn is valid and
250# between a node's start and end pfns, it may not
251# reside on that node.
252#
253# This is a relatively temporary hack that should
254# be able to go away when sparsemem is fully in
255# place
256config NODES_SPAN_OTHER_NODES
257 def_bool y
258 depends on NEED_MULTIPLE_NODES
259
205config NUMA 260config NUMA
206 bool "NUMA support" 261 bool "NUMA support"
207 depends on DISCONTIGMEM 262 default y if DISCONTIGMEM || SPARSEMEM
208 263
209config SCHED_SMT 264config SCHED_SMT
210 bool "SMT (Hyperthreading) scheduler support" 265 bool "SMT (Hyperthreading) scheduler support"
@@ -256,7 +311,7 @@ config MSCHUNKS
256 311
257config PPC_RTAS 312config PPC_RTAS
258 bool 313 bool
259 depends on PPC_PSERIES 314 depends on PPC_PSERIES || PPC_BPA
260 default y 315 default y
261 316
262config RTAS_PROC 317config RTAS_PROC
diff --git a/arch/ppc64/Makefile b/arch/ppc64/Makefile
index 33c752ceca4b..731b84758331 100644
--- a/arch/ppc64/Makefile
+++ b/arch/ppc64/Makefile
@@ -90,12 +90,14 @@ boot := arch/ppc64/boot
90boottarget-$(CONFIG_PPC_PSERIES) := zImage zImage.initrd 90boottarget-$(CONFIG_PPC_PSERIES) := zImage zImage.initrd
91boottarget-$(CONFIG_PPC_MAPLE) := zImage zImage.initrd 91boottarget-$(CONFIG_PPC_MAPLE) := zImage zImage.initrd
92boottarget-$(CONFIG_PPC_ISERIES) := vmlinux.sminitrd vmlinux.initrd vmlinux.sm 92boottarget-$(CONFIG_PPC_ISERIES) := vmlinux.sminitrd vmlinux.initrd vmlinux.sm
93boottarget-$(CONFIG_PPC_BPA) := zImage zImage.initrd
93$(boottarget-y): vmlinux 94$(boottarget-y): vmlinux
94 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 95 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
95 96
96bootimage-$(CONFIG_PPC_PSERIES) := $(boot)/zImage 97bootimage-$(CONFIG_PPC_PSERIES) := $(boot)/zImage
97bootimage-$(CONFIG_PPC_PMAC) := vmlinux 98bootimage-$(CONFIG_PPC_PMAC) := vmlinux
98bootimage-$(CONFIG_PPC_MAPLE) := $(boot)/zImage 99bootimage-$(CONFIG_PPC_MAPLE) := $(boot)/zImage
100bootimage-$(CONFIG_PPC_BPA) := zImage
99bootimage-$(CONFIG_PPC_ISERIES) := vmlinux 101bootimage-$(CONFIG_PPC_ISERIES) := vmlinux
100BOOTIMAGE := $(bootimage-y) 102BOOTIMAGE := $(bootimage-y)
101install: vmlinux 103install: vmlinux
diff --git a/arch/ppc64/boot/install.sh b/arch/ppc64/boot/install.sh
index 955c5681db6c..cb2d6626b555 100644
--- a/arch/ppc64/boot/install.sh
+++ b/arch/ppc64/boot/install.sh
@@ -22,8 +22,8 @@
22 22
23# User may have a custom install script 23# User may have a custom install script
24 24
25if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi 25if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi
26if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi 26if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then exec /sbin/${CROSS_COMPILE}installkernel "$@"; fi
27 27
28# Default install 28# Default install
29 29
diff --git a/arch/ppc64/configs/pSeries_defconfig b/arch/ppc64/configs/pSeries_defconfig
index 3eb5ef25d3a3..d0db8b5966c0 100644
--- a/arch/ppc64/configs/pSeries_defconfig
+++ b/arch/ppc64/configs/pSeries_defconfig
@@ -88,7 +88,7 @@ CONFIG_IBMVIO=y
88CONFIG_IOMMU_VMERGE=y 88CONFIG_IOMMU_VMERGE=y
89CONFIG_SMP=y 89CONFIG_SMP=y
90CONFIG_NR_CPUS=128 90CONFIG_NR_CPUS=128
91CONFIG_DISCONTIGMEM=y 91CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
92CONFIG_NUMA=y 92CONFIG_NUMA=y
93CONFIG_SCHED_SMT=y 93CONFIG_SCHED_SMT=y
94# CONFIG_PREEMPT is not set 94# CONFIG_PREEMPT is not set
diff --git a/arch/ppc64/defconfig b/arch/ppc64/defconfig
index 2f31bf3046f9..b8e2066dde77 100644
--- a/arch/ppc64/defconfig
+++ b/arch/ppc64/defconfig
@@ -89,7 +89,7 @@ CONFIG_BOOTX_TEXT=y
89CONFIG_IOMMU_VMERGE=y 89CONFIG_IOMMU_VMERGE=y
90CONFIG_SMP=y 90CONFIG_SMP=y
91CONFIG_NR_CPUS=32 91CONFIG_NR_CPUS=32
92CONFIG_DISCONTIGMEM=y 92CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
93# CONFIG_NUMA is not set 93# CONFIG_NUMA is not set
94# CONFIG_SCHED_SMT is not set 94# CONFIG_SCHED_SMT is not set
95# CONFIG_PREEMPT is not set 95# CONFIG_PREEMPT is not set
diff --git a/arch/ppc64/kernel/Makefile b/arch/ppc64/kernel/Makefile
index b5e167cf1a05..dffbfb7ac8d5 100644
--- a/arch/ppc64/kernel/Makefile
+++ b/arch/ppc64/kernel/Makefile
@@ -27,17 +27,21 @@ obj-$(CONFIG_PPC_ISERIES) += HvCall.o HvLpConfig.o LparData.o \
27 mf.o HvLpEvent.o iSeries_proc.o iSeries_htab.o \ 27 mf.o HvLpEvent.o iSeries_proc.o iSeries_htab.o \
28 iSeries_iommu.o 28 iSeries_iommu.o
29 29
30obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o i8259.o prom_init.o prom.o mpic.o 30obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o i8259.o prom_init.o prom.o
31 31
32obj-$(CONFIG_PPC_PSERIES) += pSeries_pci.o pSeries_lpar.o pSeries_hvCall.o \ 32obj-$(CONFIG_PPC_PSERIES) += pSeries_pci.o pSeries_lpar.o pSeries_hvCall.o \
33 pSeries_nvram.o rtasd.o ras.o pSeries_reconfig.o \ 33 pSeries_nvram.o rtasd.o ras.o pSeries_reconfig.o \
34 xics.o rtas.o pSeries_setup.o pSeries_iommu.o 34 pSeries_setup.o pSeries_iommu.o
35
36obj-$(CONFIG_PPC_BPA) += bpa_setup.o bpa_iommu.o bpa_nvram.o \
37 bpa_iic.o spider-pic.o
35 38
36obj-$(CONFIG_EEH) += eeh.o 39obj-$(CONFIG_EEH) += eeh.o
37obj-$(CONFIG_PROC_FS) += proc_ppc64.o 40obj-$(CONFIG_PROC_FS) += proc_ppc64.o
38obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o 41obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
39obj-$(CONFIG_SMP) += smp.o 42obj-$(CONFIG_SMP) += smp.o
40obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o 43obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o
44obj-$(CONFIG_PPC_RTAS) += rtas.o rtas_pci.o
41obj-$(CONFIG_RTAS_PROC) += rtas-proc.o 45obj-$(CONFIG_RTAS_PROC) += rtas-proc.o
42obj-$(CONFIG_SCANLOG) += scanlog.o 46obj-$(CONFIG_SCANLOG) += scanlog.o
43obj-$(CONFIG_VIOPATH) += viopath.o 47obj-$(CONFIG_VIOPATH) += viopath.o
@@ -46,6 +50,8 @@ obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
46obj-$(CONFIG_BOOTX_TEXT) += btext.o 50obj-$(CONFIG_BOOTX_TEXT) += btext.o
47obj-$(CONFIG_HVCS) += hvcserver.o 51obj-$(CONFIG_HVCS) += hvcserver.o
48obj-$(CONFIG_IBMVIO) += vio.o 52obj-$(CONFIG_IBMVIO) += vio.o
53obj-$(CONFIG_XICS) += xics.o
54obj-$(CONFIG_MPIC) += mpic.o
49 55
50obj-$(CONFIG_PPC_PMAC) += pmac_setup.o pmac_feature.o pmac_pci.o \ 56obj-$(CONFIG_PPC_PMAC) += pmac_setup.o pmac_feature.o pmac_pci.o \
51 pmac_time.o pmac_nvram.o pmac_low_i2c.o 57 pmac_time.o pmac_nvram.o pmac_low_i2c.o
@@ -58,6 +64,7 @@ ifdef CONFIG_SMP
58obj-$(CONFIG_PPC_PMAC) += pmac_smp.o smp-tbsync.o 64obj-$(CONFIG_PPC_PMAC) += pmac_smp.o smp-tbsync.o
59obj-$(CONFIG_PPC_ISERIES) += iSeries_smp.o 65obj-$(CONFIG_PPC_ISERIES) += iSeries_smp.o
60obj-$(CONFIG_PPC_PSERIES) += pSeries_smp.o 66obj-$(CONFIG_PPC_PSERIES) += pSeries_smp.o
67obj-$(CONFIG_PPC_BPA) += pSeries_smp.o
61obj-$(CONFIG_PPC_MAPLE) += smp-tbsync.o 68obj-$(CONFIG_PPC_MAPLE) += smp-tbsync.o
62endif 69endif
63 70
diff --git a/arch/ppc64/kernel/bpa_iic.c b/arch/ppc64/kernel/bpa_iic.c
new file mode 100644
index 000000000000..c8f3dc3fad70
--- /dev/null
+++ b/arch/ppc64/kernel/bpa_iic.c
@@ -0,0 +1,270 @@
1/*
2 * BPA Internal Interrupt Controller
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/config.h>
24#include <linux/interrupt.h>
25#include <linux/irq.h>
26#include <linux/percpu.h>
27#include <linux/types.h>
28
29#include <asm/io.h>
30#include <asm/pgtable.h>
31#include <asm/prom.h>
32#include <asm/ptrace.h>
33
34#include "bpa_iic.h"
35
36struct iic_pending_bits {
37 u32 data;
38 u8 flags;
39 u8 class;
40 u8 source;
41 u8 prio;
42};
43
44enum iic_pending_flags {
45 IIC_VALID = 0x80,
46 IIC_IPI = 0x40,
47};
48
49struct iic_regs {
50 struct iic_pending_bits pending;
51 struct iic_pending_bits pending_destr;
52 u64 generate;
53 u64 prio;
54};
55
56struct iic {
57 struct iic_regs __iomem *regs;
58};
59
60static DEFINE_PER_CPU(struct iic, iic);
61
62void iic_local_enable(void)
63{
64 out_be64(&__get_cpu_var(iic).regs->prio, 0xff);
65}
66
67void iic_local_disable(void)
68{
69 out_be64(&__get_cpu_var(iic).regs->prio, 0x0);
70}
71
72static unsigned int iic_startup(unsigned int irq)
73{
74 return 0;
75}
76
77static void iic_enable(unsigned int irq)
78{
79 iic_local_enable();
80}
81
82static void iic_disable(unsigned int irq)
83{
84}
85
86static void iic_end(unsigned int irq)
87{
88 iic_local_enable();
89}
90
91static struct hw_interrupt_type iic_pic = {
92 .typename = " BPA-IIC ",
93 .startup = iic_startup,
94 .enable = iic_enable,
95 .disable = iic_disable,
96 .end = iic_end,
97};
98
99static int iic_external_get_irq(struct iic_pending_bits pending)
100{
101 int irq;
102 unsigned char node, unit;
103
104 node = pending.source >> 4;
105 unit = pending.source & 0xf;
106 irq = -1;
107
108 /*
109 * This mapping is specific to the Broadband
110 * Engine. We might need to get the numbers
111 * from the device tree to support future CPUs.
112 */
113 switch (unit) {
114 case 0x00:
115 case 0x0b:
116 /*
117 * One of these units can be connected
118 * to an external interrupt controller.
119 */
120 if (pending.prio > 0x3f ||
121 pending.class != 2)
122 break;
123 irq = IIC_EXT_OFFSET
124 + spider_get_irq(pending.prio + node * IIC_NODE_STRIDE)
125 + node * IIC_NODE_STRIDE;
126 break;
127 case 0x01 ... 0x04:
128 case 0x07 ... 0x0a:
129 /*
130 * These units are connected to the SPEs
131 */
132 if (pending.class > 2)
133 break;
134 irq = IIC_SPE_OFFSET
135 + pending.class * IIC_CLASS_STRIDE
136 + node * IIC_NODE_STRIDE
137 + unit;
138 break;
139 }
140 if (irq == -1)
141 printk(KERN_WARNING "Unexpected interrupt class %02x, "
142 "source %02x, prio %02x, cpu %02x\n", pending.class,
143 pending.source, pending.prio, smp_processor_id());
144 return irq;
145}
146
147/* Get an IRQ number from the pending state register of the IIC */
148int iic_get_irq(struct pt_regs *regs)
149{
150 struct iic *iic;
151 int irq;
152 struct iic_pending_bits pending;
153
154 iic = &__get_cpu_var(iic);
155 *(unsigned long *) &pending =
156 in_be64((unsigned long __iomem *) &iic->regs->pending_destr);
157
158 irq = -1;
159 if (pending.flags & IIC_VALID) {
160 if (pending.flags & IIC_IPI) {
161 irq = IIC_IPI_OFFSET + (pending.prio >> 4);
162/*
163 if (irq > 0x80)
164 printk(KERN_WARNING "Unexpected IPI prio %02x"
165 "on CPU %02x\n", pending.prio,
166 smp_processor_id());
167*/
168 } else {
169 irq = iic_external_get_irq(pending);
170 }
171 }
172 return irq;
173}
174
175static struct iic_regs __iomem *find_iic(int cpu)
176{
177 struct device_node *np;
178 int nodeid = cpu / 2;
179 unsigned long regs;
180 struct iic_regs __iomem *iic_regs;
181
182 for (np = of_find_node_by_type(NULL, "cpu");
183 np;
184 np = of_find_node_by_type(np, "cpu")) {
185 if (nodeid == *(int *)get_property(np, "node-id", NULL))
186 break;
187 }
188
189 if (!np) {
190 printk(KERN_WARNING "IIC: CPU %d not found\n", cpu);
191 iic_regs = NULL;
192 } else {
193 regs = *(long *)get_property(np, "iic", NULL);
194
195 /* hack until we have decided on the devtree info */
196 regs += 0x400;
197 if (cpu & 1)
198 regs += 0x20;
199
200 printk(KERN_DEBUG "IIC for CPU %d at %lx\n", cpu, regs);
201 iic_regs = __ioremap(regs, sizeof(struct iic_regs),
202 _PAGE_NO_CACHE);
203 }
204 return iic_regs;
205}
206
207#ifdef CONFIG_SMP
208void iic_setup_cpu(void)
209{
210 out_be64(&__get_cpu_var(iic).regs->prio, 0xff);
211}
212
213void iic_cause_IPI(int cpu, int mesg)
214{
215 out_be64(&per_cpu(iic, cpu).regs->generate, mesg);
216}
217
218static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
219{
220
221 smp_message_recv(irq - IIC_IPI_OFFSET, regs);
222 return IRQ_HANDLED;
223}
224
225static void iic_request_ipi(int irq, const char *name)
226{
227 /* IPIs are marked SA_INTERRUPT as they must run with irqs
228 * disabled */
229 get_irq_desc(irq)->handler = &iic_pic;
230 get_irq_desc(irq)->status |= IRQ_PER_CPU;
231 request_irq(irq, iic_ipi_action, SA_INTERRUPT, name, NULL);
232}
233
234void iic_request_IPIs(void)
235{
236 iic_request_ipi(IIC_IPI_OFFSET + PPC_MSG_CALL_FUNCTION, "IPI-call");
237 iic_request_ipi(IIC_IPI_OFFSET + PPC_MSG_RESCHEDULE, "IPI-resched");
238#ifdef CONFIG_DEBUGGER
239 iic_request_ipi(IIC_IPI_OFFSET + PPC_MSG_DEBUGGER_BREAK, "IPI-debug");
240#endif /* CONFIG_DEBUGGER */
241}
242#endif /* CONFIG_SMP */
243
244static void iic_setup_spe_handlers(void)
245{
246 int be, isrc;
247
248 /* Assume two threads per BE are present */
249 for (be=0; be < num_present_cpus() / 2; be++) {
250 for (isrc = 0; isrc < IIC_CLASS_STRIDE * 3; isrc++) {
251 int irq = IIC_NODE_STRIDE * be + IIC_SPE_OFFSET + isrc;
252 get_irq_desc(irq)->handler = &iic_pic;
253 }
254 }
255}
256
257void iic_init_IRQ(void)
258{
259 int cpu, irq_offset;
260 struct iic *iic;
261
262 irq_offset = 0;
263 for_each_cpu(cpu) {
264 iic = &per_cpu(iic, cpu);
265 iic->regs = find_iic(cpu);
266 if (iic->regs)
267 out_be64(&iic->regs->prio, 0xff);
268 }
269 iic_setup_spe_handlers();
270}
diff --git a/arch/ppc64/kernel/bpa_iic.h b/arch/ppc64/kernel/bpa_iic.h
new file mode 100644
index 000000000000..6833c3022166
--- /dev/null
+++ b/arch/ppc64/kernel/bpa_iic.h
@@ -0,0 +1,62 @@
1#ifndef ASM_BPA_IIC_H
2#define ASM_BPA_IIC_H
3#ifdef __KERNEL__
4/*
5 * Mapping of IIC pending bits into per-node
6 * interrupt numbers.
7 *
8 * IRQ FF CC SS PP FF CC SS PP Description
9 *
10 * 00-3f 80 02 +0 00 - 80 02 +0 3f South Bridge
11 * 00-3f 80 02 +b 00 - 80 02 +b 3f South Bridge
12 * 41-4a 80 00 +1 ** - 80 00 +a ** SPU Class 0
13 * 51-5a 80 01 +1 ** - 80 01 +a ** SPU Class 1
14 * 61-6a 80 02 +1 ** - 80 02 +a ** SPU Class 2
15 * 70-7f C0 ** ** 00 - C0 ** ** 0f IPI
16 *
17 * F flags
18 * C class
19 * S source
20 * P Priority
21 * + node number
22 * * don't care
23 *
24 * A node consists of a Broadband Engine and an optional
25 * south bridge device providing a maximum of 64 IRQs.
26 * The south bridge may be connected to either IOIF0
27 * or IOIF1.
28 * Each SPE is represented as three IRQ lines, one per
29 * interrupt class.
30 * 16 IRQ numbers are reserved for inter processor
31 * interruptions, although these are only used in the
32 * range of the first node.
33 *
34 * This scheme needs 128 IRQ numbers per BIF node ID,
35 * which means that with the total of 512 lines
36 * available, we can have a maximum of four nodes.
37 */
38
39enum {
40 IIC_EXT_OFFSET = 0x00, /* Start of south bridge IRQs */
41 IIC_NUM_EXT = 0x40, /* Number of south bridge IRQs */
42 IIC_SPE_OFFSET = 0x40, /* Start of SPE interrupts */
43 IIC_CLASS_STRIDE = 0x10, /* SPE IRQs per class */
44 IIC_IPI_OFFSET = 0x70, /* Start of IPI IRQs */
45 IIC_NUM_IPIS = 0x10, /* IRQs reserved for IPI */
46 IIC_NODE_STRIDE = 0x80, /* Total IRQs per node */
47};
48
49extern void iic_init_IRQ(void);
50extern int iic_get_irq(struct pt_regs *regs);
51extern void iic_cause_IPI(int cpu, int mesg);
52extern void iic_request_IPIs(void);
53extern void iic_setup_cpu(void);
54extern void iic_local_enable(void);
55extern void iic_local_disable(void);
56
57
58extern void spider_init_IRQ(void);
59extern int spider_get_irq(unsigned long int_pending);
60
61#endif
62#endif /* ASM_BPA_IIC_H */
diff --git a/arch/ppc64/kernel/bpa_iommu.c b/arch/ppc64/kernel/bpa_iommu.c
new file mode 100644
index 000000000000..f33a7bccb0d7
--- /dev/null
+++ b/arch/ppc64/kernel/bpa_iommu.c
@@ -0,0 +1,377 @@
1/*
2 * IOMMU implementation for Broadband Processor Architecture
3 * We just establish a linear mapping at boot by setting all the
4 * IOPT cache entries in the CPU.
5 * The mapping functions should be identical to pci_direct_iommu,
6 * except for the handling of the high order bit that is required
7 * by the Spider bridge. These should be split into a separate
8 * file at the point where we get a different bridge chip.
9 *
10 * Copyright (C) 2005 IBM Deutschland Entwicklung GmbH,
11 * Arnd Bergmann <arndb@de.ibm.com>
12 *
13 * Based on linear mapping
14 * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 */
21
22#undef DEBUG
23
24#include <linux/kernel.h>
25#include <linux/pci.h>
26#include <linux/delay.h>
27#include <linux/string.h>
28#include <linux/init.h>
29#include <linux/bootmem.h>
30#include <linux/mm.h>
31#include <linux/dma-mapping.h>
32
33#include <asm/sections.h>
34#include <asm/iommu.h>
35#include <asm/io.h>
36#include <asm/prom.h>
37#include <asm/pci-bridge.h>
38#include <asm/machdep.h>
39#include <asm/pmac_feature.h>
40#include <asm/abs_addr.h>
41#include <asm/system.h>
42
43#include "pci.h"
44#include "bpa_iommu.h"
45
46static inline unsigned long
47get_iopt_entry(unsigned long real_address, unsigned long ioid,
48 unsigned long prot)
49{
50 return (prot & IOPT_PROT_MASK)
51 | (IOPT_COHERENT)
52 | (IOPT_ORDER_VC)
53 | (real_address & IOPT_RPN_MASK)
54 | (ioid & IOPT_IOID_MASK);
55}
56
57typedef struct {
58 unsigned long val;
59} ioste;
60
61static inline ioste
62mk_ioste(unsigned long val)
63{
64 ioste ioste = { .val = val, };
65 return ioste;
66}
67
68static inline ioste
69get_iost_entry(unsigned long iopt_base, unsigned long io_address, unsigned page_size)
70{
71 unsigned long ps;
72 unsigned long iostep;
73 unsigned long nnpt;
74 unsigned long shift;
75
76 switch (page_size) {
77 case 0x1000000:
78 ps = IOST_PS_16M;
79 nnpt = 0; /* one page per segment */
80 shift = 5; /* segment has 16 iopt entries */
81 break;
82
83 case 0x100000:
84 ps = IOST_PS_1M;
85 nnpt = 0; /* one page per segment */
86 shift = 1; /* segment has 256 iopt entries */
87 break;
88
89 case 0x10000:
90 ps = IOST_PS_64K;
91 nnpt = 0x07; /* 8 pages per io page table */
92 shift = 0; /* all entries are used */
93 break;
94
95 case 0x1000:
96 ps = IOST_PS_4K;
97 nnpt = 0x7f; /* 128 pages per io page table */
98 shift = 0; /* all entries are used */
99 break;
100
101 default: /* not a known compile time constant */
102 BUILD_BUG_ON(1);
103 break;
104 }
105
106 iostep = iopt_base +
107 /* need 8 bytes per iopte */
108 (((io_address / page_size * 8)
109 /* align io page tables on 4k page boundaries */
110 << shift)
111 /* nnpt+1 pages go into each iopt */
112 & ~(nnpt << 12));
113
114 nnpt++; /* this seems to work, but the documentation is not clear
115 about wether we put nnpt or nnpt-1 into the ioste bits.
116 In theory, this can't work for 4k pages. */
117 return mk_ioste(IOST_VALID_MASK
118 | (iostep & IOST_PT_BASE_MASK)
119 | ((nnpt << 5) & IOST_NNPT_MASK)
120 | (ps & IOST_PS_MASK));
121}
122
123/* compute the address of an io pte */
124static inline unsigned long
125get_ioptep(ioste iost_entry, unsigned long io_address)
126{
127 unsigned long iopt_base;
128 unsigned long page_size;
129 unsigned long page_number;
130 unsigned long iopt_offset;
131
132 iopt_base = iost_entry.val & IOST_PT_BASE_MASK;
133 page_size = iost_entry.val & IOST_PS_MASK;
134
135 /* decode page size to compute page number */
136 page_number = (io_address & 0x0fffffff) >> (10 + 2 * page_size);
137 /* page number is an offset into the io page table */
138 iopt_offset = (page_number << 3) & 0x7fff8ul;
139 return iopt_base + iopt_offset;
140}
141
142/* compute the tag field of the iopt cache entry */
143static inline unsigned long
144get_ioc_tag(ioste iost_entry, unsigned long io_address)
145{
146 unsigned long iopte = get_ioptep(iost_entry, io_address);
147
148 return IOPT_VALID_MASK
149 | ((iopte & 0x00000000000000ff8ul) >> 3)
150 | ((iopte & 0x0000003fffffc0000ul) >> 9);
151}
152
153/* compute the hashed 6 bit index for the 4-way associative pte cache */
154static inline unsigned long
155get_ioc_hash(ioste iost_entry, unsigned long io_address)
156{
157 unsigned long iopte = get_ioptep(iost_entry, io_address);
158
159 return ((iopte & 0x000000000000001f8ul) >> 3)
160 ^ ((iopte & 0x00000000000020000ul) >> 17)
161 ^ ((iopte & 0x00000000000010000ul) >> 15)
162 ^ ((iopte & 0x00000000000008000ul) >> 13)
163 ^ ((iopte & 0x00000000000004000ul) >> 11)
164 ^ ((iopte & 0x00000000000002000ul) >> 9)
165 ^ ((iopte & 0x00000000000001000ul) >> 7);
166}
167
168/* same as above, but pretend that we have a simpler 1-way associative
169 pte cache with an 8 bit index */
170static inline unsigned long
171get_ioc_hash_1way(ioste iost_entry, unsigned long io_address)
172{
173 unsigned long iopte = get_ioptep(iost_entry, io_address);
174
175 return ((iopte & 0x000000000000001f8ul) >> 3)
176 ^ ((iopte & 0x00000000000020000ul) >> 17)
177 ^ ((iopte & 0x00000000000010000ul) >> 15)
178 ^ ((iopte & 0x00000000000008000ul) >> 13)
179 ^ ((iopte & 0x00000000000004000ul) >> 11)
180 ^ ((iopte & 0x00000000000002000ul) >> 9)
181 ^ ((iopte & 0x00000000000001000ul) >> 7)
182 ^ ((iopte & 0x0000000000000c000ul) >> 8);
183}
184
185static inline ioste
186get_iost_cache(void __iomem *base, unsigned long index)
187{
188 unsigned long __iomem *p = (base + IOC_ST_CACHE_DIR);
189 return mk_ioste(in_be64(&p[index]));
190}
191
192static inline void
193set_iost_cache(void __iomem *base, unsigned long index, ioste ste)
194{
195 unsigned long __iomem *p = (base + IOC_ST_CACHE_DIR);
196 pr_debug("ioste %02lx was %016lx, store %016lx", index,
197 get_iost_cache(base, index).val, ste.val);
198 out_be64(&p[index], ste.val);
199 pr_debug(" now %016lx\n", get_iost_cache(base, index).val);
200}
201
202static inline unsigned long
203get_iopt_cache(void __iomem *base, unsigned long index, unsigned long *tag)
204{
205 unsigned long __iomem *tags = (void *)(base + IOC_PT_CACHE_DIR);
206 unsigned long __iomem *p = (void *)(base + IOC_PT_CACHE_REG);
207
208 *tag = tags[index];
209 rmb();
210 return *p;
211}
212
213static inline void
214set_iopt_cache(void __iomem *base, unsigned long index,
215 unsigned long tag, unsigned long val)
216{
217 unsigned long __iomem *tags = base + IOC_PT_CACHE_DIR;
218 unsigned long __iomem *p = base + IOC_PT_CACHE_REG;
219 pr_debug("iopt %02lx was v%016lx/t%016lx, store v%016lx/t%016lx\n",
220 index, get_iopt_cache(base, index, &oldtag), oldtag, val, tag);
221
222 out_be64(p, val);
223 out_be64(&tags[index], tag);
224}
225
226static inline void
227set_iost_origin(void __iomem *base)
228{
229 unsigned long __iomem *p = base + IOC_ST_ORIGIN;
230 unsigned long origin = IOSTO_ENABLE | IOSTO_SW;
231
232 pr_debug("iost_origin %016lx, now %016lx\n", in_be64(p), origin);
233 out_be64(p, origin);
234}
235
236static inline void
237set_iocmd_config(void __iomem *base)
238{
239 unsigned long __iomem *p = base + 0xc00;
240 unsigned long conf;
241
242 conf = in_be64(p);
243 pr_debug("iost_conf %016lx, now %016lx\n", conf, conf | IOCMD_CONF_TE);
244 out_be64(p, conf | IOCMD_CONF_TE);
245}
246
247/* FIXME: get these from the device tree */
248#define ioc_base 0x20000511000ull
249#define ioc_mmio_base 0x20000510000ull
250#define ioid 0x48a
251#define iopt_phys_offset (- 0x20000000) /* We have a 512MB offset from the SB */
252#define io_page_size 0x1000000
253
254static unsigned long map_iopt_entry(unsigned long address)
255{
256 switch (address >> 20) {
257 case 0x600:
258 address = 0x24020000000ull; /* spider i/o */
259 break;
260 default:
261 address += iopt_phys_offset;
262 break;
263 }
264
265 return get_iopt_entry(address, ioid, IOPT_PROT_RW);
266}
267
268static void iommu_bus_setup_null(struct pci_bus *b) { }
269static void iommu_dev_setup_null(struct pci_dev *d) { }
270
271/* initialize the iommu to support a simple linear mapping
272 * for each DMA window used by any device. For now, we
273 * happen to know that there is only one DMA window in use,
274 * starting at iopt_phys_offset. */
275static void bpa_map_iommu(void)
276{
277 unsigned long address;
278 void __iomem *base;
279 ioste ioste;
280 unsigned long index;
281
282 base = __ioremap(ioc_base, 0x1000, _PAGE_NO_CACHE);
283 pr_debug("%lx mapped to %p\n", ioc_base, base);
284 set_iocmd_config(base);
285 iounmap(base);
286
287 base = __ioremap(ioc_mmio_base, 0x1000, _PAGE_NO_CACHE);
288 pr_debug("%lx mapped to %p\n", ioc_mmio_base, base);
289
290 set_iost_origin(base);
291
292 for (address = 0; address < 0x100000000ul; address += io_page_size) {
293 ioste = get_iost_entry(0x10000000000ul, address, io_page_size);
294 if ((address & 0xfffffff) == 0) /* segment start */
295 set_iost_cache(base, address >> 28, ioste);
296 index = get_ioc_hash_1way(ioste, address);
297 pr_debug("addr %08lx, index %02lx, ioste %016lx\n",
298 address, index, ioste.val);
299 set_iopt_cache(base,
300 get_ioc_hash_1way(ioste, address),
301 get_ioc_tag(ioste, address),
302 map_iopt_entry(address));
303 }
304 iounmap(base);
305}
306
307
308static void *bpa_alloc_coherent(struct device *hwdev, size_t size,
309 dma_addr_t *dma_handle, unsigned int __nocast flag)
310{
311 void *ret;
312
313 ret = (void *)__get_free_pages(flag, get_order(size));
314 if (ret != NULL) {
315 memset(ret, 0, size);
316 *dma_handle = virt_to_abs(ret) | BPA_DMA_VALID;
317 }
318 return ret;
319}
320
321static void bpa_free_coherent(struct device *hwdev, size_t size,
322 void *vaddr, dma_addr_t dma_handle)
323{
324 free_pages((unsigned long)vaddr, get_order(size));
325}
326
327static dma_addr_t bpa_map_single(struct device *hwdev, void *ptr,
328 size_t size, enum dma_data_direction direction)
329{
330 return virt_to_abs(ptr) | BPA_DMA_VALID;
331}
332
333static void bpa_unmap_single(struct device *hwdev, dma_addr_t dma_addr,
334 size_t size, enum dma_data_direction direction)
335{
336}
337
338static int bpa_map_sg(struct device *hwdev, struct scatterlist *sg,
339 int nents, enum dma_data_direction direction)
340{
341 int i;
342
343 for (i = 0; i < nents; i++, sg++) {
344 sg->dma_address = (page_to_phys(sg->page) + sg->offset)
345 | BPA_DMA_VALID;
346 sg->dma_length = sg->length;
347 }
348
349 return nents;
350}
351
352static void bpa_unmap_sg(struct device *hwdev, struct scatterlist *sg,
353 int nents, enum dma_data_direction direction)
354{
355}
356
357static int bpa_dma_supported(struct device *dev, u64 mask)
358{
359 return mask < 0x100000000ull;
360}
361
362void bpa_init_iommu(void)
363{
364 bpa_map_iommu();
365
366 /* Direct I/O, IOMMU off */
367 ppc_md.iommu_dev_setup = iommu_dev_setup_null;
368 ppc_md.iommu_bus_setup = iommu_bus_setup_null;
369
370 pci_dma_ops.alloc_coherent = bpa_alloc_coherent;
371 pci_dma_ops.free_coherent = bpa_free_coherent;
372 pci_dma_ops.map_single = bpa_map_single;
373 pci_dma_ops.unmap_single = bpa_unmap_single;
374 pci_dma_ops.map_sg = bpa_map_sg;
375 pci_dma_ops.unmap_sg = bpa_unmap_sg;
376 pci_dma_ops.dma_supported = bpa_dma_supported;
377}
diff --git a/arch/ppc64/kernel/bpa_iommu.h b/arch/ppc64/kernel/bpa_iommu.h
new file mode 100644
index 000000000000..e547d77dfa04
--- /dev/null
+++ b/arch/ppc64/kernel/bpa_iommu.h
@@ -0,0 +1,65 @@
1#ifndef BPA_IOMMU_H
2#define BPA_IOMMU_H
3
4/* some constants */
5enum {
6 /* segment table entries */
7 IOST_VALID_MASK = 0x8000000000000000ul,
8 IOST_TAG_MASK = 0x3000000000000000ul,
9 IOST_PT_BASE_MASK = 0x000003fffffff000ul,
10 IOST_NNPT_MASK = 0x0000000000000fe0ul,
11 IOST_PS_MASK = 0x000000000000000ful,
12
13 IOST_PS_4K = 0x1,
14 IOST_PS_64K = 0x3,
15 IOST_PS_1M = 0x5,
16 IOST_PS_16M = 0x7,
17
18 /* iopt tag register */
19 IOPT_VALID_MASK = 0x0000000200000000ul,
20 IOPT_TAG_MASK = 0x00000001fffffffful,
21
22 /* iopt cache register */
23 IOPT_PROT_MASK = 0xc000000000000000ul,
24 IOPT_PROT_NONE = 0x0000000000000000ul,
25 IOPT_PROT_READ = 0x4000000000000000ul,
26 IOPT_PROT_WRITE = 0x8000000000000000ul,
27 IOPT_PROT_RW = 0xc000000000000000ul,
28 IOPT_COHERENT = 0x2000000000000000ul,
29
30 IOPT_ORDER_MASK = 0x1800000000000000ul,
31 /* order access to same IOID/VC on same address */
32 IOPT_ORDER_ADDR = 0x0800000000000000ul,
33 /* similar, but only after a write access */
34 IOPT_ORDER_WRITES = 0x1000000000000000ul,
35 /* Order all accesses to same IOID/VC */
36 IOPT_ORDER_VC = 0x1800000000000000ul,
37
38 IOPT_RPN_MASK = 0x000003fffffff000ul,
39 IOPT_HINT_MASK = 0x0000000000000800ul,
40 IOPT_IOID_MASK = 0x00000000000007fful,
41
42 IOSTO_ENABLE = 0x8000000000000000ul,
43 IOSTO_ORIGIN = 0x000003fffffff000ul,
44 IOSTO_HW = 0x0000000000000800ul,
45 IOSTO_SW = 0x0000000000000400ul,
46
47 IOCMD_CONF_TE = 0x0000800000000000ul,
48
49 /* memory mapped registers */
50 IOC_PT_CACHE_DIR = 0x000,
51 IOC_ST_CACHE_DIR = 0x800,
52 IOC_PT_CACHE_REG = 0x910,
53 IOC_ST_ORIGIN = 0x918,
54 IOC_CONF = 0x930,
55
56 /* The high bit needs to be set on every DMA address,
57 only 2GB are addressable */
58 BPA_DMA_VALID = 0x80000000,
59 BPA_DMA_MASK = 0x7fffffff,
60};
61
62
63void bpa_init_iommu(void);
64
65#endif
diff --git a/arch/ppc64/kernel/bpa_nvram.c b/arch/ppc64/kernel/bpa_nvram.c
new file mode 100644
index 000000000000..06a119cfceb5
--- /dev/null
+++ b/arch/ppc64/kernel/bpa_nvram.c
@@ -0,0 +1,118 @@
1/*
2 * NVRAM for CPBW
3 *
4 * (C) Copyright IBM Corp. 2005
5 *
6 * Authors : Utz Bacher <utz.bacher@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/fs.h>
24#include <linux/init.h>
25#include <linux/kernel.h>
26#include <linux/spinlock.h>
27#include <linux/types.h>
28
29#include <asm/machdep.h>
30#include <asm/nvram.h>
31#include <asm/prom.h>
32
33static void __iomem *bpa_nvram_start;
34static long bpa_nvram_len;
35static spinlock_t bpa_nvram_lock = SPIN_LOCK_UNLOCKED;
36
37static ssize_t bpa_nvram_read(char *buf, size_t count, loff_t *index)
38{
39 unsigned long flags;
40
41 if (*index >= bpa_nvram_len)
42 return 0;
43 if (*index + count > bpa_nvram_len)
44 count = bpa_nvram_len - *index;
45
46 spin_lock_irqsave(&bpa_nvram_lock, flags);
47
48 memcpy_fromio(buf, bpa_nvram_start + *index, count);
49
50 spin_unlock_irqrestore(&bpa_nvram_lock, flags);
51
52 *index += count;
53 return count;
54}
55
56static ssize_t bpa_nvram_write(char *buf, size_t count, loff_t *index)
57{
58 unsigned long flags;
59
60 if (*index >= bpa_nvram_len)
61 return 0;
62 if (*index + count > bpa_nvram_len)
63 count = bpa_nvram_len - *index;
64
65 spin_lock_irqsave(&bpa_nvram_lock, flags);
66
67 memcpy_toio(bpa_nvram_start + *index, buf, count);
68
69 spin_unlock_irqrestore(&bpa_nvram_lock, flags);
70
71 *index += count;
72 return count;
73}
74
75static ssize_t bpa_nvram_get_size(void)
76{
77 return bpa_nvram_len;
78}
79
80int __init bpa_nvram_init(void)
81{
82 struct device_node *nvram_node;
83 unsigned long *buffer;
84 int proplen;
85 unsigned long nvram_addr;
86 int ret;
87
88 ret = -ENODEV;
89 nvram_node = of_find_node_by_type(NULL, "nvram");
90 if (!nvram_node)
91 goto out;
92
93 ret = -EIO;
94 buffer = (unsigned long *)get_property(nvram_node, "reg", &proplen);
95 if (proplen != 2*sizeof(unsigned long))
96 goto out;
97
98 ret = -ENODEV;
99 nvram_addr = buffer[0];
100 bpa_nvram_len = buffer[1];
101 if ( (!bpa_nvram_len) || (!nvram_addr) )
102 goto out;
103
104 bpa_nvram_start = ioremap(nvram_addr, bpa_nvram_len);
105 if (!bpa_nvram_start)
106 goto out;
107
108 printk(KERN_INFO "BPA NVRAM, %luk mapped to %p\n",
109 bpa_nvram_len >> 10, bpa_nvram_start);
110
111 ppc_md.nvram_read = bpa_nvram_read;
112 ppc_md.nvram_write = bpa_nvram_write;
113 ppc_md.nvram_size = bpa_nvram_get_size;
114
115out:
116 of_node_put(nvram_node);
117 return ret;
118}
diff --git a/arch/ppc64/kernel/bpa_setup.c b/arch/ppc64/kernel/bpa_setup.c
new file mode 100644
index 000000000000..57b3db66f458
--- /dev/null
+++ b/arch/ppc64/kernel/bpa_setup.c
@@ -0,0 +1,140 @@
1/*
2 * linux/arch/ppc/kernel/bpa_setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Adapted from 'alpha' version by Gary Thomas
6 * Modified by Cort Dougan (cort@cs.nmt.edu)
7 * Modified by PPC64 Team, IBM Corp
8 * Modified by BPA Team, IBM Deutschland Entwicklung GmbH
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15#undef DEBUG
16
17#include <linux/config.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/stddef.h>
22#include <linux/unistd.h>
23#include <linux/slab.h>
24#include <linux/user.h>
25#include <linux/reboot.h>
26#include <linux/init.h>
27#include <linux/delay.h>
28#include <linux/irq.h>
29#include <linux/seq_file.h>
30#include <linux/root_dev.h>
31#include <linux/console.h>
32
33#include <asm/mmu.h>
34#include <asm/processor.h>
35#include <asm/io.h>
36#include <asm/pgtable.h>
37#include <asm/prom.h>
38#include <asm/rtas.h>
39#include <asm/pci-bridge.h>
40#include <asm/iommu.h>
41#include <asm/dma.h>
42#include <asm/machdep.h>
43#include <asm/time.h>
44#include <asm/nvram.h>
45#include <asm/cputable.h>
46
47#include "pci.h"
48#include "bpa_iic.h"
49#include "bpa_iommu.h"
50
51#ifdef DEBUG
52#define DBG(fmt...) udbg_printf(fmt)
53#else
54#define DBG(fmt...)
55#endif
56
57void bpa_get_cpuinfo(struct seq_file *m)
58{
59 struct device_node *root;
60 const char *model = "";
61
62 root = of_find_node_by_path("/");
63 if (root)
64 model = get_property(root, "model", NULL);
65 seq_printf(m, "machine\t\t: BPA %s\n", model);
66 of_node_put(root);
67}
68
69static void bpa_progress(char *s, unsigned short hex)
70{
71 printk("*** %04x : %s\n", hex, s ? s : "");
72}
73
74static void __init bpa_setup_arch(void)
75{
76 ppc_md.init_IRQ = iic_init_IRQ;
77 ppc_md.get_irq = iic_get_irq;
78
79#ifdef CONFIG_SMP
80 smp_init_pSeries();
81#endif
82
83 /* init to some ~sane value until calibrate_delay() runs */
84 loops_per_jiffy = 50000000;
85
86 if (ROOT_DEV == 0) {
87 printk("No ramdisk, default root is /dev/hda2\n");
88 ROOT_DEV = Root_HDA2;
89 }
90
91 /* Find and initialize PCI host bridges */
92 init_pci_config_tokens();
93 find_and_init_phbs();
94 spider_init_IRQ();
95#ifdef CONFIG_DUMMY_CONSOLE
96 conswitchp = &dummy_con;
97#endif
98
99 bpa_nvram_init();
100}
101
102/*
103 * Early initialization. Relocation is on but do not reference unbolted pages
104 */
105static void __init bpa_init_early(void)
106{
107 DBG(" -> bpa_init_early()\n");
108
109 hpte_init_native();
110
111 bpa_init_iommu();
112
113 ppc64_interrupt_controller = IC_BPA_IIC;
114
115 DBG(" <- bpa_init_early()\n");
116}
117
118
119static int __init bpa_probe(int platform)
120{
121 if (platform != PLATFORM_BPA)
122 return 0;
123
124 return 1;
125}
126
127struct machdep_calls __initdata bpa_md = {
128 .probe = bpa_probe,
129 .setup_arch = bpa_setup_arch,
130 .init_early = bpa_init_early,
131 .get_cpuinfo = bpa_get_cpuinfo,
132 .restart = rtas_restart,
133 .power_off = rtas_power_off,
134 .halt = rtas_halt,
135 .get_boot_time = rtas_get_boot_time,
136 .get_rtc_time = rtas_get_rtc_time,
137 .set_rtc_time = rtas_set_rtc_time,
138 .calibrate_decr = generic_calibrate_decr,
139 .progress = bpa_progress,
140};
diff --git a/arch/ppc64/kernel/cpu_setup_power4.S b/arch/ppc64/kernel/cpu_setup_power4.S
index 3bd951820850..42fc08cf87a0 100644
--- a/arch/ppc64/kernel/cpu_setup_power4.S
+++ b/arch/ppc64/kernel/cpu_setup_power4.S
@@ -73,7 +73,21 @@ _GLOBAL(__970_cpu_preinit)
73 73
74_GLOBAL(__setup_cpu_power4) 74_GLOBAL(__setup_cpu_power4)
75 blr 75 blr
76 76
77_GLOBAL(__setup_cpu_be)
78 /* Set large page sizes LP=0: 16MB, LP=1: 64KB */
79 addi r3, 0, 0
80 ori r3, r3, HID6_LB
81 sldi r3, r3, 32
82 nor r3, r3, r3
83 mfspr r4, SPRN_HID6
84 and r4, r4, r3
85 addi r3, 0, 0x02000
86 sldi r3, r3, 32
87 or r4, r4, r3
88 mtspr SPRN_HID6, r4
89 blr
90
77_GLOBAL(__setup_cpu_ppc970) 91_GLOBAL(__setup_cpu_ppc970)
78 mfspr r0,SPRN_HID0 92 mfspr r0,SPRN_HID0
79 li r11,5 /* clear DOZE and SLEEP */ 93 li r11,5 /* clear DOZE and SLEEP */
diff --git a/arch/ppc64/kernel/cputable.c b/arch/ppc64/kernel/cputable.c
index 8644a8648058..1d162c7c59df 100644
--- a/arch/ppc64/kernel/cputable.c
+++ b/arch/ppc64/kernel/cputable.c
@@ -34,6 +34,7 @@ EXPORT_SYMBOL(cur_cpu_spec);
34extern void __setup_cpu_power3(unsigned long offset, struct cpu_spec* spec); 34extern void __setup_cpu_power3(unsigned long offset, struct cpu_spec* spec);
35extern void __setup_cpu_power4(unsigned long offset, struct cpu_spec* spec); 35extern void __setup_cpu_power4(unsigned long offset, struct cpu_spec* spec);
36extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec); 36extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
37extern void __setup_cpu_be(unsigned long offset, struct cpu_spec* spec);
37 38
38 39
39/* We only set the altivec features if the kernel was compiled with altivec 40/* We only set the altivec features if the kernel was compiled with altivec
@@ -162,6 +163,16 @@ struct cpu_spec cpu_specs[] = {
162 __setup_cpu_power4, 163 __setup_cpu_power4,
163 COMMON_PPC64_FW 164 COMMON_PPC64_FW
164 }, 165 },
166 { /* BE DD1.x */
167 0xffff0000, 0x00700000, "Broadband Engine",
168 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
169 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
170 CPU_FTR_SMT,
171 COMMON_USER_PPC64 | PPC_FEATURE_HAS_ALTIVEC_COMP,
172 128, 128,
173 __setup_cpu_be,
174 COMMON_PPC64_FW
175 },
165 { /* default match */ 176 { /* default match */
166 0x00000000, 0x00000000, "POWER4 (compatible)", 177 0x00000000, 0x00000000, "POWER4 (compatible)",
167 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | 178 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
diff --git a/arch/ppc64/kernel/iSeries_setup.c b/arch/ppc64/kernel/iSeries_setup.c
index b31962436fe3..86966ce76b58 100644
--- a/arch/ppc64/kernel/iSeries_setup.c
+++ b/arch/ppc64/kernel/iSeries_setup.c
@@ -671,9 +671,6 @@ static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr)
671 } 671 }
672} 672}
673 673
674extern unsigned long ppc_proc_freq;
675extern unsigned long ppc_tb_freq;
676
677/* 674/*
678 * Document me. 675 * Document me.
679 */ 676 */
@@ -772,8 +769,6 @@ static void iSeries_halt(void)
772 mf_power_off(); 769 mf_power_off();
773} 770}
774 771
775extern void setup_default_decr(void);
776
777/* 772/*
778 * void __init iSeries_calibrate_decr() 773 * void __init iSeries_calibrate_decr()
779 * 774 *
diff --git a/arch/ppc64/kernel/irq.c b/arch/ppc64/kernel/irq.c
index d860467b8f09..3defc8c33adf 100644
--- a/arch/ppc64/kernel/irq.c
+++ b/arch/ppc64/kernel/irq.c
@@ -395,6 +395,9 @@ int virt_irq_create_mapping(unsigned int real_irq)
395 if (ppc64_interrupt_controller == IC_OPEN_PIC) 395 if (ppc64_interrupt_controller == IC_OPEN_PIC)
396 return real_irq; /* no mapping for openpic (for now) */ 396 return real_irq; /* no mapping for openpic (for now) */
397 397
398 if (ppc64_interrupt_controller == IC_BPA_IIC)
399 return real_irq; /* no mapping for iic either */
400
398 /* don't map interrupts < MIN_VIRT_IRQ */ 401 /* don't map interrupts < MIN_VIRT_IRQ */
399 if (real_irq < MIN_VIRT_IRQ) { 402 if (real_irq < MIN_VIRT_IRQ) {
400 virt_irq_to_real_map[real_irq] = real_irq; 403 virt_irq_to_real_map[real_irq] = real_irq;
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c
index e950a2058a19..782ce3efa2c1 100644
--- a/arch/ppc64/kernel/kprobes.c
+++ b/arch/ppc64/kernel/kprobes.c
@@ -32,15 +32,14 @@
32#include <linux/ptrace.h> 32#include <linux/ptrace.h>
33#include <linux/spinlock.h> 33#include <linux/spinlock.h>
34#include <linux/preempt.h> 34#include <linux/preempt.h>
35#include <asm/cacheflush.h>
35#include <asm/kdebug.h> 36#include <asm/kdebug.h>
36#include <asm/sstep.h> 37#include <asm/sstep.h>
37 38
38/* kprobe_status settings */
39#define KPROBE_HIT_ACTIVE 0x00000001
40#define KPROBE_HIT_SS 0x00000002
41
42static struct kprobe *current_kprobe; 39static struct kprobe *current_kprobe;
43static unsigned long kprobe_status, kprobe_saved_msr; 40static unsigned long kprobe_status, kprobe_saved_msr;
41static struct kprobe *kprobe_prev;
42static unsigned long kprobe_status_prev, kprobe_saved_msr_prev;
44static struct pt_regs jprobe_saved_regs; 43static struct pt_regs jprobe_saved_regs;
45 44
46int arch_prepare_kprobe(struct kprobe *p) 45int arch_prepare_kprobe(struct kprobe *p)
@@ -61,16 +60,25 @@ int arch_prepare_kprobe(struct kprobe *p)
61void arch_copy_kprobe(struct kprobe *p) 60void arch_copy_kprobe(struct kprobe *p)
62{ 61{
63 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 62 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
63 p->opcode = *p->addr;
64} 64}
65 65
66void arch_remove_kprobe(struct kprobe *p) 66void arch_arm_kprobe(struct kprobe *p)
67{ 67{
68 *p->addr = BREAKPOINT_INSTRUCTION;
69 flush_icache_range((unsigned long) p->addr,
70 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
68} 71}
69 72
70static inline void disarm_kprobe(struct kprobe *p, struct pt_regs *regs) 73void arch_disarm_kprobe(struct kprobe *p)
71{ 74{
72 *p->addr = p->opcode; 75 *p->addr = p->opcode;
73 regs->nip = (unsigned long)p->addr; 76 flush_icache_range((unsigned long) p->addr,
77 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
78}
79
80void arch_remove_kprobe(struct kprobe *p)
81{
74} 82}
75 83
76static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 84static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
@@ -83,6 +91,20 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
83 regs->nip = (unsigned long)&p->ainsn.insn; 91 regs->nip = (unsigned long)&p->ainsn.insn;
84} 92}
85 93
94static inline void save_previous_kprobe(void)
95{
96 kprobe_prev = current_kprobe;
97 kprobe_status_prev = kprobe_status;
98 kprobe_saved_msr_prev = kprobe_saved_msr;
99}
100
101static inline void restore_previous_kprobe(void)
102{
103 current_kprobe = kprobe_prev;
104 kprobe_status = kprobe_status_prev;
105 kprobe_saved_msr = kprobe_saved_msr_prev;
106}
107
86static inline int kprobe_handler(struct pt_regs *regs) 108static inline int kprobe_handler(struct pt_regs *regs)
87{ 109{
88 struct kprobe *p; 110 struct kprobe *p;
@@ -101,8 +123,19 @@ static inline int kprobe_handler(struct pt_regs *regs)
101 unlock_kprobes(); 123 unlock_kprobes();
102 goto no_kprobe; 124 goto no_kprobe;
103 } 125 }
104 disarm_kprobe(p, regs); 126 /* We have reentered the kprobe_handler(), since
105 ret = 1; 127 * another probe was hit while within the handler.
128 * We here save the original kprobes variables and
129 * just single step on the instruction of the new probe
130 * without calling any user handlers.
131 */
132 save_previous_kprobe();
133 current_kprobe = p;
134 kprobe_saved_msr = regs->msr;
135 p->nmissed++;
136 prepare_singlestep(p, regs);
137 kprobe_status = KPROBE_REENTER;
138 return 1;
106 } else { 139 } else {
107 p = current_kprobe; 140 p = current_kprobe;
108 if (p->break_handler && p->break_handler(p, regs)) { 141 if (p->break_handler && p->break_handler(p, regs)) {
@@ -184,13 +217,21 @@ static inline int post_kprobe_handler(struct pt_regs *regs)
184 if (!kprobe_running()) 217 if (!kprobe_running())
185 return 0; 218 return 0;
186 219
187 if (current_kprobe->post_handler) 220 if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) {
221 kprobe_status = KPROBE_HIT_SSDONE;
188 current_kprobe->post_handler(current_kprobe, regs, 0); 222 current_kprobe->post_handler(current_kprobe, regs, 0);
223 }
189 224
190 resume_execution(current_kprobe, regs); 225 resume_execution(current_kprobe, regs);
191 regs->msr |= kprobe_saved_msr; 226 regs->msr |= kprobe_saved_msr;
192 227
228 /*Restore back the original saved kprobes variables and continue. */
229 if (kprobe_status == KPROBE_REENTER) {
230 restore_previous_kprobe();
231 goto out;
232 }
193 unlock_kprobes(); 233 unlock_kprobes();
234out:
194 preempt_enable_no_resched(); 235 preempt_enable_no_resched();
195 236
196 /* 237 /*
diff --git a/arch/ppc64/kernel/maple_setup.c b/arch/ppc64/kernel/maple_setup.c
index 8cf95a27178e..da8900b51f40 100644
--- a/arch/ppc64/kernel/maple_setup.c
+++ b/arch/ppc64/kernel/maple_setup.c
@@ -78,17 +78,77 @@ extern int maple_pci_get_legacy_ide_irq(struct pci_dev *dev, int channel);
78extern void generic_find_legacy_serial_ports(u64 *physport, 78extern void generic_find_legacy_serial_ports(u64 *physport,
79 unsigned int *default_speed); 79 unsigned int *default_speed);
80 80
81
82static void maple_restart(char *cmd) 81static void maple_restart(char *cmd)
83{ 82{
83 unsigned int maple_nvram_base;
84 unsigned int maple_nvram_offset;
85 unsigned int maple_nvram_command;
86 struct device_node *rtcs;
87
88 /* find NVRAM device */
89 rtcs = find_compatible_devices("nvram", "AMD8111");
90 if (rtcs && rtcs->addrs) {
91 maple_nvram_base = rtcs->addrs[0].address;
92 } else {
93 printk(KERN_EMERG "Maple: Unable to find NVRAM\n");
94 printk(KERN_EMERG "Maple: Manual Restart Required\n");
95 return;
96 }
97
98 /* find service processor device */
99 rtcs = find_devices("service-processor");
100 if (!rtcs) {
101 printk(KERN_EMERG "Maple: Unable to find Service Processor\n");
102 printk(KERN_EMERG "Maple: Manual Restart Required\n");
103 return;
104 }
105 maple_nvram_offset = *(unsigned int*) get_property(rtcs,
106 "restart-addr", NULL);
107 maple_nvram_command = *(unsigned int*) get_property(rtcs,
108 "restart-value", NULL);
109
110 /* send command */
111 outb_p(maple_nvram_command, maple_nvram_base + maple_nvram_offset);
112 for (;;) ;
84} 113}
85 114
86static void maple_power_off(void) 115static void maple_power_off(void)
87{ 116{
117 unsigned int maple_nvram_base;
118 unsigned int maple_nvram_offset;
119 unsigned int maple_nvram_command;
120 struct device_node *rtcs;
121
122 /* find NVRAM device */
123 rtcs = find_compatible_devices("nvram", "AMD8111");
124 if (rtcs && rtcs->addrs) {
125 maple_nvram_base = rtcs->addrs[0].address;
126 } else {
127 printk(KERN_EMERG "Maple: Unable to find NVRAM\n");
128 printk(KERN_EMERG "Maple: Manual Power-Down Required\n");
129 return;
130 }
131
132 /* find service processor device */
133 rtcs = find_devices("service-processor");
134 if (!rtcs) {
135 printk(KERN_EMERG "Maple: Unable to find Service Processor\n");
136 printk(KERN_EMERG "Maple: Manual Power-Down Required\n");
137 return;
138 }
139 maple_nvram_offset = *(unsigned int*) get_property(rtcs,
140 "power-off-addr", NULL);
141 maple_nvram_command = *(unsigned int*) get_property(rtcs,
142 "power-off-value", NULL);
143
144 /* send command */
145 outb_p(maple_nvram_command, maple_nvram_base + maple_nvram_offset);
146 for (;;) ;
88} 147}
89 148
90static void maple_halt(void) 149static void maple_halt(void)
91{ 150{
151 maple_power_off();
92} 152}
93 153
94#ifdef CONFIG_SMP 154#ifdef CONFIG_SMP
@@ -235,6 +295,6 @@ struct machdep_calls __initdata maple_md = {
235 .get_boot_time = maple_get_boot_time, 295 .get_boot_time = maple_get_boot_time,
236 .set_rtc_time = maple_set_rtc_time, 296 .set_rtc_time = maple_set_rtc_time,
237 .get_rtc_time = maple_get_rtc_time, 297 .get_rtc_time = maple_get_rtc_time,
238 .calibrate_decr = maple_calibrate_decr, 298 .calibrate_decr = generic_calibrate_decr,
239 .progress = maple_progress, 299 .progress = maple_progress,
240}; 300};
diff --git a/arch/ppc64/kernel/maple_time.c b/arch/ppc64/kernel/maple_time.c
index 07ce7895b43d..d65210abcd03 100644
--- a/arch/ppc64/kernel/maple_time.c
+++ b/arch/ppc64/kernel/maple_time.c
@@ -42,11 +42,8 @@
42#define DBG(x...) 42#define DBG(x...)
43#endif 43#endif
44 44
45extern void setup_default_decr(void);
46extern void GregorianDay(struct rtc_time * tm); 45extern void GregorianDay(struct rtc_time * tm);
47 46
48extern unsigned long ppc_tb_freq;
49extern unsigned long ppc_proc_freq;
50static int maple_rtc_addr; 47static int maple_rtc_addr;
51 48
52static int maple_clock_read(int addr) 49static int maple_clock_read(int addr)
@@ -176,51 +173,3 @@ void __init maple_get_boot_time(struct rtc_time *tm)
176 maple_get_rtc_time(tm); 173 maple_get_rtc_time(tm);
177} 174}
178 175
179/* XXX FIXME: Some sane defaults: 125 MHz timebase, 1GHz processor */
180#define DEFAULT_TB_FREQ 125000000UL
181#define DEFAULT_PROC_FREQ (DEFAULT_TB_FREQ * 8)
182
183void __init maple_calibrate_decr(void)
184{
185 struct device_node *cpu;
186 struct div_result divres;
187 unsigned int *fp = NULL;
188
189 /*
190 * The cpu node should have a timebase-frequency property
191 * to tell us the rate at which the decrementer counts.
192 */
193 cpu = of_find_node_by_type(NULL, "cpu");
194
195 ppc_tb_freq = DEFAULT_TB_FREQ;
196 if (cpu != 0)
197 fp = (unsigned int *)get_property(cpu, "timebase-frequency", NULL);
198 if (fp != NULL)
199 ppc_tb_freq = *fp;
200 else
201 printk(KERN_ERR "WARNING: Estimating decrementer frequency (not found)\n");
202 fp = NULL;
203 ppc_proc_freq = DEFAULT_PROC_FREQ;
204 if (cpu != 0)
205 fp = (unsigned int *)get_property(cpu, "clock-frequency", NULL);
206 if (fp != NULL)
207 ppc_proc_freq = *fp;
208 else
209 printk(KERN_ERR "WARNING: Estimating processor frequency (not found)\n");
210
211 of_node_put(cpu);
212
213 printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n",
214 ppc_tb_freq/1000000, ppc_tb_freq%1000000);
215 printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n",
216 ppc_proc_freq/1000000, ppc_proc_freq%1000000);
217
218 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
219 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
220 tb_ticks_per_usec = ppc_tb_freq / 1000000;
221 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
222 div128_by_32(1024*1024, 0, tb_ticks_per_sec, &divres);
223 tb_to_xs = divres.result_low;
224
225 setup_default_decr();
226}
diff --git a/arch/ppc64/kernel/mpic.h b/arch/ppc64/kernel/mpic.h
index 571b3c99e062..63e177143eac 100644
--- a/arch/ppc64/kernel/mpic.h
+++ b/arch/ppc64/kernel/mpic.h
@@ -265,3 +265,6 @@ extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask);
265extern int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs); 265extern int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs);
266/* This one gets to the primary mpic */ 266/* This one gets to the primary mpic */
267extern int mpic_get_irq(struct pt_regs *regs); 267extern int mpic_get_irq(struct pt_regs *regs);
268
269/* global mpic for pSeries */
270extern struct mpic *pSeries_mpic;
diff --git a/arch/ppc64/kernel/pSeries_pci.c b/arch/ppc64/kernel/pSeries_pci.c
index 0b1cca281408..1f5f141fb7a1 100644
--- a/arch/ppc64/kernel/pSeries_pci.c
+++ b/arch/ppc64/kernel/pSeries_pci.c
@@ -1,13 +1,11 @@
1/* 1/*
2 * pSeries_pci.c 2 * arch/ppc64/kernel/pSeries_pci.c
3 * 3 *
4 * Copyright (C) 2001 Dave Engebretsen, IBM Corporation 4 * Copyright (C) 2001 Dave Engebretsen, IBM Corporation
5 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM 5 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
6 * 6 *
7 * pSeries specific routines for PCI. 7 * pSeries specific routines for PCI.
8 * 8 *
9 * Based on code from pci.c and chrp_pci.c
10 *
11 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or 11 * the Free Software Foundation; either version 2 of the License, or
@@ -23,430 +21,18 @@
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */ 22 */
25 23
24#include <linux/init.h>
25#include <linux/ioport.h>
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/threads.h>
28#include <linux/pci.h> 27#include <linux/pci.h>
29#include <linux/string.h> 28#include <linux/string.h>
30#include <linux/init.h>
31#include <linux/bootmem.h>
32 29
33#include <asm/io.h>
34#include <asm/pgtable.h>
35#include <asm/irq.h>
36#include <asm/prom.h>
37#include <asm/machdep.h>
38#include <asm/pci-bridge.h> 30#include <asm/pci-bridge.h>
39#include <asm/iommu.h> 31#include <asm/prom.h>
40#include <asm/rtas.h>
41 32
42#include "mpic.h"
43#include "pci.h" 33#include "pci.h"
44 34
45/* RTAS tokens */ 35static int __initdata s7a_workaround = -1;
46static int read_pci_config;
47static int write_pci_config;
48static int ibm_read_pci_config;
49static int ibm_write_pci_config;
50
51static int s7a_workaround;
52
53extern struct mpic *pSeries_mpic;
54
55static int config_access_valid(struct device_node *dn, int where)
56{
57 if (where < 256)
58 return 1;
59 if (where < 4096 && dn->pci_ext_config_space)
60 return 1;
61
62 return 0;
63}
64
65static int rtas_read_config(struct device_node *dn, int where, int size, u32 *val)
66{
67 int returnval = -1;
68 unsigned long buid, addr;
69 int ret;
70
71 if (!dn)
72 return PCIBIOS_DEVICE_NOT_FOUND;
73 if (!config_access_valid(dn, where))
74 return PCIBIOS_BAD_REGISTER_NUMBER;
75
76 addr = ((where & 0xf00) << 20) | (dn->busno << 16) |
77 (dn->devfn << 8) | (where & 0xff);
78 buid = dn->phb->buid;
79 if (buid) {
80 ret = rtas_call(ibm_read_pci_config, 4, 2, &returnval,
81 addr, buid >> 32, buid & 0xffffffff, size);
82 } else {
83 ret = rtas_call(read_pci_config, 2, 2, &returnval, addr, size);
84 }
85 *val = returnval;
86
87 if (ret)
88 return PCIBIOS_DEVICE_NOT_FOUND;
89
90 if (returnval == EEH_IO_ERROR_VALUE(size)
91 && eeh_dn_check_failure (dn, NULL))
92 return PCIBIOS_DEVICE_NOT_FOUND;
93
94 return PCIBIOS_SUCCESSFUL;
95}
96
97static int rtas_pci_read_config(struct pci_bus *bus,
98 unsigned int devfn,
99 int where, int size, u32 *val)
100{
101 struct device_node *busdn, *dn;
102
103 if (bus->self)
104 busdn = pci_device_to_OF_node(bus->self);
105 else
106 busdn = bus->sysdata; /* must be a phb */
107
108 /* Search only direct children of the bus */
109 for (dn = busdn->child; dn; dn = dn->sibling)
110 if (dn->devfn == devfn)
111 return rtas_read_config(dn, where, size, val);
112 return PCIBIOS_DEVICE_NOT_FOUND;
113}
114
115static int rtas_write_config(struct device_node *dn, int where, int size, u32 val)
116{
117 unsigned long buid, addr;
118 int ret;
119
120 if (!dn)
121 return PCIBIOS_DEVICE_NOT_FOUND;
122 if (!config_access_valid(dn, where))
123 return PCIBIOS_BAD_REGISTER_NUMBER;
124
125 addr = ((where & 0xf00) << 20) | (dn->busno << 16) |
126 (dn->devfn << 8) | (where & 0xff);
127 buid = dn->phb->buid;
128 if (buid) {
129 ret = rtas_call(ibm_write_pci_config, 5, 1, NULL, addr, buid >> 32, buid & 0xffffffff, size, (ulong) val);
130 } else {
131 ret = rtas_call(write_pci_config, 3, 1, NULL, addr, size, (ulong)val);
132 }
133
134 if (ret)
135 return PCIBIOS_DEVICE_NOT_FOUND;
136
137 return PCIBIOS_SUCCESSFUL;
138}
139
140static int rtas_pci_write_config(struct pci_bus *bus,
141 unsigned int devfn,
142 int where, int size, u32 val)
143{
144 struct device_node *busdn, *dn;
145
146 if (bus->self)
147 busdn = pci_device_to_OF_node(bus->self);
148 else
149 busdn = bus->sysdata; /* must be a phb */
150
151 /* Search only direct children of the bus */
152 for (dn = busdn->child; dn; dn = dn->sibling)
153 if (dn->devfn == devfn)
154 return rtas_write_config(dn, where, size, val);
155 return PCIBIOS_DEVICE_NOT_FOUND;
156}
157
158struct pci_ops rtas_pci_ops = {
159 rtas_pci_read_config,
160 rtas_pci_write_config
161};
162
163int is_python(struct device_node *dev)
164{
165 char *model = (char *)get_property(dev, "model", NULL);
166
167 if (model && strstr(model, "Python"))
168 return 1;
169
170 return 0;
171}
172
173static int get_phb_reg_prop(struct device_node *dev,
174 unsigned int addr_size_words,
175 struct reg_property64 *reg)
176{
177 unsigned int *ui_ptr = NULL, len;
178
179 /* Found a PHB, now figure out where his registers are mapped. */
180 ui_ptr = (unsigned int *)get_property(dev, "reg", &len);
181 if (ui_ptr == NULL)
182 return 1;
183
184 if (addr_size_words == 1) {
185 reg->address = ((struct reg_property32 *)ui_ptr)->address;
186 reg->size = ((struct reg_property32 *)ui_ptr)->size;
187 } else {
188 *reg = *((struct reg_property64 *)ui_ptr);
189 }
190
191 return 0;
192}
193
194static void python_countermeasures(struct device_node *dev,
195 unsigned int addr_size_words)
196{
197 struct reg_property64 reg_struct;
198 void __iomem *chip_regs;
199 volatile u32 val;
200
201 if (get_phb_reg_prop(dev, addr_size_words, &reg_struct))
202 return;
203
204 /* Python's register file is 1 MB in size. */
205 chip_regs = ioremap(reg_struct.address & ~(0xfffffUL), 0x100000);
206
207 /*
208 * Firmware doesn't always clear this bit which is critical
209 * for good performance - Anton
210 */
211
212#define PRG_CL_RESET_VALID 0x00010000
213
214 val = in_be32(chip_regs + 0xf6030);
215 if (val & PRG_CL_RESET_VALID) {
216 printk(KERN_INFO "Python workaround: ");
217 val &= ~PRG_CL_RESET_VALID;
218 out_be32(chip_regs + 0xf6030, val);
219 /*
220 * We must read it back for changes to
221 * take effect
222 */
223 val = in_be32(chip_regs + 0xf6030);
224 printk("reg0: %x\n", val);
225 }
226
227 iounmap(chip_regs);
228}
229
230void __init init_pci_config_tokens (void)
231{
232 read_pci_config = rtas_token("read-pci-config");
233 write_pci_config = rtas_token("write-pci-config");
234 ibm_read_pci_config = rtas_token("ibm,read-pci-config");
235 ibm_write_pci_config = rtas_token("ibm,write-pci-config");
236}
237
238unsigned long __devinit get_phb_buid (struct device_node *phb)
239{
240 int addr_cells;
241 unsigned int *buid_vals;
242 unsigned int len;
243 unsigned long buid;
244
245 if (ibm_read_pci_config == -1) return 0;
246
247 /* PHB's will always be children of the root node,
248 * or so it is promised by the current firmware. */
249 if (phb->parent == NULL)
250 return 0;
251 if (phb->parent->parent)
252 return 0;
253
254 buid_vals = (unsigned int *) get_property(phb, "reg", &len);
255 if (buid_vals == NULL)
256 return 0;
257
258 addr_cells = prom_n_addr_cells(phb);
259 if (addr_cells == 1) {
260 buid = (unsigned long) buid_vals[0];
261 } else {
262 buid = (((unsigned long)buid_vals[0]) << 32UL) |
263 (((unsigned long)buid_vals[1]) & 0xffffffff);
264 }
265 return buid;
266}
267
268static int phb_set_bus_ranges(struct device_node *dev,
269 struct pci_controller *phb)
270{
271 int *bus_range;
272 unsigned int len;
273
274 bus_range = (int *) get_property(dev, "bus-range", &len);
275 if (bus_range == NULL || len < 2 * sizeof(int)) {
276 return 1;
277 }
278
279 phb->first_busno = bus_range[0];
280 phb->last_busno = bus_range[1];
281
282 return 0;
283}
284
285static int __devinit setup_phb(struct device_node *dev,
286 struct pci_controller *phb,
287 unsigned int addr_size_words)
288{
289 pci_setup_pci_controller(phb);
290
291 if (is_python(dev))
292 python_countermeasures(dev, addr_size_words);
293
294 if (phb_set_bus_ranges(dev, phb))
295 return 1;
296
297 phb->arch_data = dev;
298 phb->ops = &rtas_pci_ops;
299 phb->buid = get_phb_buid(dev);
300
301 return 0;
302}
303
304static void __devinit add_linux_pci_domain(struct device_node *dev,
305 struct pci_controller *phb,
306 struct property *of_prop)
307{
308 memset(of_prop, 0, sizeof(struct property));
309 of_prop->name = "linux,pci-domain";
310 of_prop->length = sizeof(phb->global_number);
311 of_prop->value = (unsigned char *)&of_prop[1];
312 memcpy(of_prop->value, &phb->global_number, sizeof(phb->global_number));
313 prom_add_property(dev, of_prop);
314}
315
316static struct pci_controller * __init alloc_phb(struct device_node *dev,
317 unsigned int addr_size_words)
318{
319 struct pci_controller *phb;
320 struct property *of_prop;
321
322 phb = alloc_bootmem(sizeof(struct pci_controller));
323 if (phb == NULL)
324 return NULL;
325
326 of_prop = alloc_bootmem(sizeof(struct property) +
327 sizeof(phb->global_number));
328 if (!of_prop)
329 return NULL;
330
331 if (setup_phb(dev, phb, addr_size_words))
332 return NULL;
333
334 add_linux_pci_domain(dev, phb, of_prop);
335
336 return phb;
337}
338
339static struct pci_controller * __devinit alloc_phb_dynamic(struct device_node *dev, unsigned int addr_size_words)
340{
341 struct pci_controller *phb;
342
343 phb = (struct pci_controller *)kmalloc(sizeof(struct pci_controller),
344 GFP_KERNEL);
345 if (phb == NULL)
346 return NULL;
347
348 if (setup_phb(dev, phb, addr_size_words))
349 return NULL;
350
351 phb->is_dynamic = 1;
352
353 /* TODO: linux,pci-domain? */
354
355 return phb;
356}
357
358unsigned long __init find_and_init_phbs(void)
359{
360 struct device_node *node;
361 struct pci_controller *phb;
362 unsigned int root_size_cells = 0;
363 unsigned int index;
364 unsigned int *opprop = NULL;
365 struct device_node *root = of_find_node_by_path("/");
366
367 if (ppc64_interrupt_controller == IC_OPEN_PIC) {
368 opprop = (unsigned int *)get_property(root,
369 "platform-open-pic", NULL);
370 }
371
372 root_size_cells = prom_n_size_cells(root);
373
374 index = 0;
375
376 for (node = of_get_next_child(root, NULL);
377 node != NULL;
378 node = of_get_next_child(root, node)) {
379 if (node->type == NULL || strcmp(node->type, "pci") != 0)
380 continue;
381
382 phb = alloc_phb(node, root_size_cells);
383 if (!phb)
384 continue;
385
386 pci_process_bridge_OF_ranges(phb, node);
387 pci_setup_phb_io(phb, index == 0);
388
389 if (ppc64_interrupt_controller == IC_OPEN_PIC && pSeries_mpic) {
390 int addr = root_size_cells * (index + 2) - 1;
391 mpic_assign_isu(pSeries_mpic, index, opprop[addr]);
392 }
393
394 index++;
395 }
396
397 of_node_put(root);
398 pci_devs_phb_init();
399
400 /*
401 * pci_probe_only and pci_assign_all_buses can be set via properties
402 * in chosen.
403 */
404 if (of_chosen) {
405 int *prop;
406
407 prop = (int *)get_property(of_chosen, "linux,pci-probe-only",
408 NULL);
409 if (prop)
410 pci_probe_only = *prop;
411
412 prop = (int *)get_property(of_chosen,
413 "linux,pci-assign-all-buses", NULL);
414 if (prop)
415 pci_assign_all_buses = *prop;
416 }
417
418 return 0;
419}
420
421struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
422{
423 struct device_node *root = of_find_node_by_path("/");
424 unsigned int root_size_cells = 0;
425 struct pci_controller *phb;
426 struct pci_bus *bus;
427 int primary;
428
429 root_size_cells = prom_n_size_cells(root);
430
431 primary = list_empty(&hose_list);
432 phb = alloc_phb_dynamic(dn, root_size_cells);
433 if (!phb)
434 return NULL;
435
436 pci_process_bridge_OF_ranges(phb, dn);
437
438 pci_setup_phb_io_dynamic(phb, primary);
439 of_node_put(root);
440
441 pci_devs_phb_init_dynamic(phb);
442 phb->last_busno = 0xff;
443 bus = pci_scan_bus(phb->first_busno, phb->ops, phb->arch_data);
444 phb->bus = bus;
445 phb->last_busno = bus->subordinate;
446
447 return phb;
448}
449EXPORT_SYMBOL(init_phb_dynamic);
450 36
451#if 0 37#if 0
452void pcibios_name_device(struct pci_dev *dev) 38void pcibios_name_device(struct pci_dev *dev)
@@ -474,11 +60,12 @@ void pcibios_name_device(struct pci_dev *dev)
474DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_name_device); 60DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_name_device);
475#endif 61#endif
476 62
477static void check_s7a(void) 63static void __init check_s7a(void)
478{ 64{
479 struct device_node *root; 65 struct device_node *root;
480 char *model; 66 char *model;
481 67
68 s7a_workaround = 0;
482 root = of_find_node_by_path("/"); 69 root = of_find_node_by_path("/");
483 if (root) { 70 if (root) {
484 model = get_property(root, "model", NULL); 71 model = get_property(root, "model", NULL);
@@ -488,55 +75,23 @@ static void check_s7a(void)
488 } 75 }
489} 76}
490 77
491/* RPA-specific bits for removing PHBs */ 78void __devinit pSeries_irq_bus_setup(struct pci_bus *bus)
492int pcibios_remove_root_bus(struct pci_controller *phb)
493{ 79{
494 struct pci_bus *b = phb->bus; 80 struct pci_dev *dev;
495 struct resource *res;
496 int rc, i;
497
498 res = b->resource[0];
499 if (!res->flags) {
500 printk(KERN_ERR "%s: no IO resource for PHB %s\n", __FUNCTION__,
501 b->name);
502 return 1;
503 }
504
505 rc = unmap_bus_range(b);
506 if (rc) {
507 printk(KERN_ERR "%s: failed to unmap IO on bus %s\n",
508 __FUNCTION__, b->name);
509 return 1;
510 }
511 81
512 if (release_resource(res)) { 82 if (s7a_workaround < 0)
513 printk(KERN_ERR "%s: failed to release IO on bus %s\n", 83 check_s7a();
514 __FUNCTION__, b->name); 84 list_for_each_entry(dev, &bus->devices, bus_list) {
515 return 1; 85 pci_read_irq_line(dev);
516 } 86 if (s7a_workaround) {
517 87 if (dev->irq > 16) {
518 for (i = 1; i < 3; ++i) { 88 dev->irq -= 3;
519 res = b->resource[i]; 89 pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
520 if (!res->flags && i == 0) { 90 dev->irq);
521 printk(KERN_ERR "%s: no MEM resource for PHB %s\n", 91 }
522 __FUNCTION__, b->name);
523 return 1;
524 }
525 if (res->flags && release_resource(res)) {
526 printk(KERN_ERR
527 "%s: failed to release IO %d on bus %s\n",
528 __FUNCTION__, i, b->name);
529 return 1;
530 } 92 }
531 } 93 }
532
533 list_del(&phb->list_node);
534 if (phb->is_dynamic)
535 kfree(phb);
536
537 return 0;
538} 94}
539EXPORT_SYMBOL(pcibios_remove_root_bus);
540 95
541static void __init pSeries_request_regions(void) 96static void __init pSeries_request_regions(void)
542{ 97{
@@ -553,20 +108,6 @@ static void __init pSeries_request_regions(void)
553 108
554void __init pSeries_final_fixup(void) 109void __init pSeries_final_fixup(void)
555{ 110{
556 struct pci_dev *dev = NULL;
557
558 check_s7a();
559
560 for_each_pci_dev(dev) {
561 pci_read_irq_line(dev);
562 if (s7a_workaround) {
563 if (dev->irq > 16) {
564 dev->irq -= 3;
565 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
566 }
567 }
568 }
569
570 phbs_remap_io(); 111 phbs_remap_io();
571 pSeries_request_regions(); 112 pSeries_request_regions();
572 113
diff --git a/arch/ppc64/kernel/pSeries_setup.c b/arch/ppc64/kernel/pSeries_setup.c
index 6c0d1d58a552..f2b41243342c 100644
--- a/arch/ppc64/kernel/pSeries_setup.c
+++ b/arch/ppc64/kernel/pSeries_setup.c
@@ -71,11 +71,6 @@
71#define DBG(fmt...) 71#define DBG(fmt...)
72#endif 72#endif
73 73
74extern void pSeries_final_fixup(void);
75
76extern void pSeries_get_boot_time(struct rtc_time *rtc_time);
77extern void pSeries_get_rtc_time(struct rtc_time *rtc_time);
78extern int pSeries_set_rtc_time(struct rtc_time *rtc_time);
79extern void find_udbg_vterm(void); 74extern void find_udbg_vterm(void);
80extern void system_reset_fwnmi(void); /* from head.S */ 75extern void system_reset_fwnmi(void); /* from head.S */
81extern void machine_check_fwnmi(void); /* from head.S */ 76extern void machine_check_fwnmi(void); /* from head.S */
@@ -84,9 +79,6 @@ extern void generic_find_legacy_serial_ports(u64 *physport,
84 79
85int fwnmi_active; /* TRUE if an FWNMI handler is present */ 80int fwnmi_active; /* TRUE if an FWNMI handler is present */
86 81
87extern unsigned long ppc_proc_freq;
88extern unsigned long ppc_tb_freq;
89
90extern void pSeries_system_reset_exception(struct pt_regs *regs); 82extern void pSeries_system_reset_exception(struct pt_regs *regs);
91extern int pSeries_machine_check_exception(struct pt_regs *regs); 83extern int pSeries_machine_check_exception(struct pt_regs *regs);
92 84
@@ -381,171 +373,6 @@ static void __init pSeries_init_early(void)
381} 373}
382 374
383 375
384static void pSeries_progress(char *s, unsigned short hex)
385{
386 struct device_node *root;
387 int width, *p;
388 char *os;
389 static int display_character, set_indicator;
390 static int max_width;
391 static DEFINE_SPINLOCK(progress_lock);
392 static int pending_newline = 0; /* did last write end with unprinted newline? */
393
394 if (!rtas.base)
395 return;
396
397 if (max_width == 0) {
398 if ((root = find_path_device("/rtas")) &&
399 (p = (unsigned int *)get_property(root,
400 "ibm,display-line-length",
401 NULL)))
402 max_width = *p;
403 else
404 max_width = 0x10;
405 display_character = rtas_token("display-character");
406 set_indicator = rtas_token("set-indicator");
407 }
408
409 if (display_character == RTAS_UNKNOWN_SERVICE) {
410 /* use hex display if available */
411 if (set_indicator != RTAS_UNKNOWN_SERVICE)
412 rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex);
413 return;
414 }
415
416 spin_lock(&progress_lock);
417
418 /*
419 * Last write ended with newline, but we didn't print it since
420 * it would just clear the bottom line of output. Print it now
421 * instead.
422 *
423 * If no newline is pending, print a CR to start output at the
424 * beginning of the line.
425 */
426 if (pending_newline) {
427 rtas_call(display_character, 1, 1, NULL, '\r');
428 rtas_call(display_character, 1, 1, NULL, '\n');
429 pending_newline = 0;
430 } else {
431 rtas_call(display_character, 1, 1, NULL, '\r');
432 }
433
434 width = max_width;
435 os = s;
436 while (*os) {
437 if (*os == '\n' || *os == '\r') {
438 /* Blank to end of line. */
439 while (width-- > 0)
440 rtas_call(display_character, 1, 1, NULL, ' ');
441
442 /* If newline is the last character, save it
443 * until next call to avoid bumping up the
444 * display output.
445 */
446 if (*os == '\n' && !os[1]) {
447 pending_newline = 1;
448 spin_unlock(&progress_lock);
449 return;
450 }
451
452 /* RTAS wants CR-LF, not just LF */
453
454 if (*os == '\n') {
455 rtas_call(display_character, 1, 1, NULL, '\r');
456 rtas_call(display_character, 1, 1, NULL, '\n');
457 } else {
458 /* CR might be used to re-draw a line, so we'll
459 * leave it alone and not add LF.
460 */
461 rtas_call(display_character, 1, 1, NULL, *os);
462 }
463
464 width = max_width;
465 } else {
466 width--;
467 rtas_call(display_character, 1, 1, NULL, *os);
468 }
469
470 os++;
471
472 /* if we overwrite the screen length */
473 if (width <= 0)
474 while ((*os != 0) && (*os != '\n') && (*os != '\r'))
475 os++;
476 }
477
478 /* Blank to end of line. */
479 while (width-- > 0)
480 rtas_call(display_character, 1, 1, NULL, ' ');
481
482 spin_unlock(&progress_lock);
483}
484
485extern void setup_default_decr(void);
486
487/* Some sane defaults: 125 MHz timebase, 1GHz processor */
488#define DEFAULT_TB_FREQ 125000000UL
489#define DEFAULT_PROC_FREQ (DEFAULT_TB_FREQ * 8)
490
491static void __init pSeries_calibrate_decr(void)
492{
493 struct device_node *cpu;
494 struct div_result divres;
495 unsigned int *fp;
496 int node_found;
497
498 /*
499 * The cpu node should have a timebase-frequency property
500 * to tell us the rate at which the decrementer counts.
501 */
502 cpu = of_find_node_by_type(NULL, "cpu");
503
504 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
505 node_found = 0;
506 if (cpu != 0) {
507 fp = (unsigned int *)get_property(cpu, "timebase-frequency",
508 NULL);
509 if (fp != 0) {
510 node_found = 1;
511 ppc_tb_freq = *fp;
512 }
513 }
514 if (!node_found)
515 printk(KERN_ERR "WARNING: Estimating decrementer frequency "
516 "(not found)\n");
517
518 ppc_proc_freq = DEFAULT_PROC_FREQ;
519 node_found = 0;
520 if (cpu != 0) {
521 fp = (unsigned int *)get_property(cpu, "clock-frequency",
522 NULL);
523 if (fp != 0) {
524 node_found = 1;
525 ppc_proc_freq = *fp;
526 }
527 }
528 if (!node_found)
529 printk(KERN_ERR "WARNING: Estimating processor frequency "
530 "(not found)\n");
531
532 of_node_put(cpu);
533
534 printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n",
535 ppc_tb_freq/1000000, ppc_tb_freq%1000000);
536 printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n",
537 ppc_proc_freq/1000000, ppc_proc_freq%1000000);
538
539 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
540 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
541 tb_ticks_per_usec = ppc_tb_freq / 1000000;
542 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
543 div128_by_32(1024*1024, 0, tb_ticks_per_sec, &divres);
544 tb_to_xs = divres.result_low;
545
546 setup_default_decr();
547}
548
549static int pSeries_check_legacy_ioport(unsigned int baseport) 376static int pSeries_check_legacy_ioport(unsigned int baseport)
550{ 377{
551 struct device_node *np; 378 struct device_node *np;
@@ -596,16 +423,17 @@ struct machdep_calls __initdata pSeries_md = {
596 .get_cpuinfo = pSeries_get_cpuinfo, 423 .get_cpuinfo = pSeries_get_cpuinfo,
597 .log_error = pSeries_log_error, 424 .log_error = pSeries_log_error,
598 .pcibios_fixup = pSeries_final_fixup, 425 .pcibios_fixup = pSeries_final_fixup,
426 .irq_bus_setup = pSeries_irq_bus_setup,
599 .restart = rtas_restart, 427 .restart = rtas_restart,
600 .power_off = rtas_power_off, 428 .power_off = rtas_power_off,
601 .halt = rtas_halt, 429 .halt = rtas_halt,
602 .panic = rtas_os_term, 430 .panic = rtas_os_term,
603 .cpu_die = pSeries_mach_cpu_die, 431 .cpu_die = pSeries_mach_cpu_die,
604 .get_boot_time = pSeries_get_boot_time, 432 .get_boot_time = rtas_get_boot_time,
605 .get_rtc_time = pSeries_get_rtc_time, 433 .get_rtc_time = rtas_get_rtc_time,
606 .set_rtc_time = pSeries_set_rtc_time, 434 .set_rtc_time = rtas_set_rtc_time,
607 .calibrate_decr = pSeries_calibrate_decr, 435 .calibrate_decr = generic_calibrate_decr,
608 .progress = pSeries_progress, 436 .progress = rtas_progress,
609 .check_legacy_ioport = pSeries_check_legacy_ioport, 437 .check_legacy_ioport = pSeries_check_legacy_ioport,
610 .system_reset_exception = pSeries_system_reset_exception, 438 .system_reset_exception = pSeries_system_reset_exception,
611 .machine_check_exception = pSeries_machine_check_exception, 439 .machine_check_exception = pSeries_machine_check_exception,
diff --git a/arch/ppc64/kernel/pSeries_smp.c b/arch/ppc64/kernel/pSeries_smp.c
index 4203bd020c82..30154140f7e2 100644
--- a/arch/ppc64/kernel/pSeries_smp.c
+++ b/arch/ppc64/kernel/pSeries_smp.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * SMP support for pSeries machines. 2 * SMP support for pSeries and BPA machines.
3 * 3 *
4 * Dave Engebretsen, Peter Bergner, and 4 * Dave Engebretsen, Peter Bergner, and
5 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com 5 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
@@ -47,6 +47,7 @@
47#include <asm/pSeries_reconfig.h> 47#include <asm/pSeries_reconfig.h>
48 48
49#include "mpic.h" 49#include "mpic.h"
50#include "bpa_iic.h"
50 51
51#ifdef DEBUG 52#ifdef DEBUG
52#define DBG(fmt...) udbg_printf(fmt) 53#define DBG(fmt...) udbg_printf(fmt)
@@ -286,6 +287,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
286 return 1; 287 return 1;
287} 288}
288 289
290#ifdef CONFIG_XICS
289static inline void smp_xics_do_message(int cpu, int msg) 291static inline void smp_xics_do_message(int cpu, int msg)
290{ 292{
291 set_bit(msg, &xics_ipi_message[cpu].value); 293 set_bit(msg, &xics_ipi_message[cpu].value);
@@ -327,6 +329,37 @@ static void __devinit smp_xics_setup_cpu(int cpu)
327 cpu_clear(cpu, of_spin_map); 329 cpu_clear(cpu, of_spin_map);
328 330
329} 331}
332#endif /* CONFIG_XICS */
333#ifdef CONFIG_BPA_IIC
334static void smp_iic_message_pass(int target, int msg)
335{
336 unsigned int i;
337
338 if (target < NR_CPUS) {
339 iic_cause_IPI(target, msg);
340 } else {
341 for_each_online_cpu(i) {
342 if (target == MSG_ALL_BUT_SELF
343 && i == smp_processor_id())
344 continue;
345 iic_cause_IPI(i, msg);
346 }
347 }
348}
349
350static int __init smp_iic_probe(void)
351{
352 iic_request_IPIs();
353
354 return cpus_weight(cpu_possible_map);
355}
356
357static void __devinit smp_iic_setup_cpu(int cpu)
358{
359 if (cpu != boot_cpuid)
360 iic_setup_cpu();
361}
362#endif /* CONFIG_BPA_IIC */
330 363
331static DEFINE_SPINLOCK(timebase_lock); 364static DEFINE_SPINLOCK(timebase_lock);
332static unsigned long timebase = 0; 365static unsigned long timebase = 0;
@@ -381,14 +414,15 @@ static int smp_pSeries_cpu_bootable(unsigned int nr)
381 414
382 return 1; 415 return 1;
383} 416}
384 417#ifdef CONFIG_MPIC
385static struct smp_ops_t pSeries_mpic_smp_ops = { 418static struct smp_ops_t pSeries_mpic_smp_ops = {
386 .message_pass = smp_mpic_message_pass, 419 .message_pass = smp_mpic_message_pass,
387 .probe = smp_mpic_probe, 420 .probe = smp_mpic_probe,
388 .kick_cpu = smp_pSeries_kick_cpu, 421 .kick_cpu = smp_pSeries_kick_cpu,
389 .setup_cpu = smp_mpic_setup_cpu, 422 .setup_cpu = smp_mpic_setup_cpu,
390}; 423};
391 424#endif
425#ifdef CONFIG_XICS
392static struct smp_ops_t pSeries_xics_smp_ops = { 426static struct smp_ops_t pSeries_xics_smp_ops = {
393 .message_pass = smp_xics_message_pass, 427 .message_pass = smp_xics_message_pass,
394 .probe = smp_xics_probe, 428 .probe = smp_xics_probe,
@@ -396,6 +430,16 @@ static struct smp_ops_t pSeries_xics_smp_ops = {
396 .setup_cpu = smp_xics_setup_cpu, 430 .setup_cpu = smp_xics_setup_cpu,
397 .cpu_bootable = smp_pSeries_cpu_bootable, 431 .cpu_bootable = smp_pSeries_cpu_bootable,
398}; 432};
433#endif
434#ifdef CONFIG_BPA_IIC
435static struct smp_ops_t bpa_iic_smp_ops = {
436 .message_pass = smp_iic_message_pass,
437 .probe = smp_iic_probe,
438 .kick_cpu = smp_pSeries_kick_cpu,
439 .setup_cpu = smp_iic_setup_cpu,
440 .cpu_bootable = smp_pSeries_cpu_bootable,
441};
442#endif
399 443
400/* This is called very early */ 444/* This is called very early */
401void __init smp_init_pSeries(void) 445void __init smp_init_pSeries(void)
@@ -404,10 +448,25 @@ void __init smp_init_pSeries(void)
404 448
405 DBG(" -> smp_init_pSeries()\n"); 449 DBG(" -> smp_init_pSeries()\n");
406 450
407 if (ppc64_interrupt_controller == IC_OPEN_PIC) 451 switch (ppc64_interrupt_controller) {
452#ifdef CONFIG_MPIC
453 case IC_OPEN_PIC:
408 smp_ops = &pSeries_mpic_smp_ops; 454 smp_ops = &pSeries_mpic_smp_ops;
409 else 455 break;
456#endif
457#ifdef CONFIG_XICS
458 case IC_PPC_XIC:
410 smp_ops = &pSeries_xics_smp_ops; 459 smp_ops = &pSeries_xics_smp_ops;
460 break;
461#endif
462#ifdef CONFIG_BPA_IIC
463 case IC_BPA_IIC:
464 smp_ops = &bpa_iic_smp_ops;
465 break;
466#endif
467 default:
468 panic("Invalid interrupt controller");
469 }
411 470
412#ifdef CONFIG_HOTPLUG_CPU 471#ifdef CONFIG_HOTPLUG_CPU
413 smp_ops->cpu_disable = pSeries_cpu_disable; 472 smp_ops->cpu_disable = pSeries_cpu_disable;
diff --git a/arch/ppc64/kernel/pci.c b/arch/ppc64/kernel/pci.c
index 2bf0513f3eca..580676f87d23 100644
--- a/arch/ppc64/kernel/pci.c
+++ b/arch/ppc64/kernel/pci.c
@@ -902,6 +902,9 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus)
902 list_for_each_entry(dev, &bus->devices, bus_list) 902 list_for_each_entry(dev, &bus->devices, bus_list)
903 ppc_md.iommu_dev_setup(dev); 903 ppc_md.iommu_dev_setup(dev);
904 904
905 if (ppc_md.irq_bus_setup)
906 ppc_md.irq_bus_setup(bus);
907
905 if (!pci_probe_only) 908 if (!pci_probe_only)
906 return; 909 return;
907 910
diff --git a/arch/ppc64/kernel/pci.h b/arch/ppc64/kernel/pci.h
index 0fd7d849aa77..26be78b13af1 100644
--- a/arch/ppc64/kernel/pci.h
+++ b/arch/ppc64/kernel/pci.h
@@ -40,10 +40,14 @@ struct device_node *fetch_dev_dn(struct pci_dev *dev);
40void pci_addr_cache_insert_device(struct pci_dev *dev); 40void pci_addr_cache_insert_device(struct pci_dev *dev);
41void pci_addr_cache_remove_device(struct pci_dev *dev); 41void pci_addr_cache_remove_device(struct pci_dev *dev);
42 42
43/* From pSeries_pci.h */ 43/* From rtas_pci.h */
44void init_pci_config_tokens (void); 44void init_pci_config_tokens (void);
45unsigned long get_phb_buid (struct device_node *); 45unsigned long get_phb_buid (struct device_node *);
46 46
47/* From pSeries_pci.h */
48extern void pSeries_final_fixup(void);
49extern void pSeries_irq_bus_setup(struct pci_bus *bus);
50
47extern unsigned long pci_probe_only; 51extern unsigned long pci_probe_only;
48extern unsigned long pci_assign_all_buses; 52extern unsigned long pci_assign_all_buses;
49extern int pci_read_irq_line(struct pci_dev *pci_dev); 53extern int pci_read_irq_line(struct pci_dev *pci_dev);
diff --git a/arch/ppc64/kernel/pmac_time.c b/arch/ppc64/kernel/pmac_time.c
index f24827581dd7..3059edb09cc8 100644
--- a/arch/ppc64/kernel/pmac_time.c
+++ b/arch/ppc64/kernel/pmac_time.c
@@ -40,11 +40,6 @@
40#define DBG(x...) 40#define DBG(x...)
41#endif 41#endif
42 42
43extern void setup_default_decr(void);
44
45extern unsigned long ppc_tb_freq;
46extern unsigned long ppc_proc_freq;
47
48/* Apparently the RTC stores seconds since 1 Jan 1904 */ 43/* Apparently the RTC stores seconds since 1 Jan 1904 */
49#define RTC_OFFSET 2082844800 44#define RTC_OFFSET 2082844800
50 45
@@ -161,8 +156,7 @@ void __init pmac_get_boot_time(struct rtc_time *tm)
161 156
162/* 157/*
163 * Query the OF and get the decr frequency. 158 * Query the OF and get the decr frequency.
164 * This was taken from the pmac time_init() when merging the prep/pmac 159 * FIXME: merge this with generic_calibrate_decr
165 * time functions.
166 */ 160 */
167void __init pmac_calibrate_decr(void) 161void __init pmac_calibrate_decr(void)
168{ 162{
diff --git a/arch/ppc64/kernel/proc_ppc64.c b/arch/ppc64/kernel/proc_ppc64.c
index 0914b0669b05..a87c66a9652a 100644
--- a/arch/ppc64/kernel/proc_ppc64.c
+++ b/arch/ppc64/kernel/proc_ppc64.c
@@ -53,7 +53,7 @@ static int __init proc_ppc64_create(void)
53 if (!root) 53 if (!root)
54 return 1; 54 return 1;
55 55
56 if (!(systemcfg->platform & PLATFORM_PSERIES)) 56 if (!(systemcfg->platform & (PLATFORM_PSERIES | PLATFORM_BPA)))
57 return 0; 57 return 0;
58 58
59 if (!proc_mkdir("rtas", root)) 59 if (!proc_mkdir("rtas", root))
diff --git a/arch/ppc64/kernel/prom_init.c b/arch/ppc64/kernel/prom_init.c
index b7683abfbe6a..e248a7950aeb 100644
--- a/arch/ppc64/kernel/prom_init.c
+++ b/arch/ppc64/kernel/prom_init.c
@@ -1915,9 +1915,9 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, unsigned long
1915 prom_send_capabilities(); 1915 prom_send_capabilities();
1916 1916
1917 /* 1917 /*
1918 * On pSeries, copy the CPU hold code 1918 * On pSeries and BPA, copy the CPU hold code
1919 */ 1919 */
1920 if (RELOC(of_platform) & PLATFORM_PSERIES) 1920 if (RELOC(of_platform) & (PLATFORM_PSERIES | PLATFORM_BPA))
1921 copy_and_flush(0, KERNELBASE - offset, 0x100, 0); 1921 copy_and_flush(0, KERNELBASE - offset, 0x100, 0);
1922 1922
1923 /* 1923 /*
diff --git a/arch/ppc64/kernel/ptrace.c b/arch/ppc64/kernel/ptrace.c
index 9f8c6087ae56..2993f108d96d 100644
--- a/arch/ppc64/kernel/ptrace.c
+++ b/arch/ppc64/kernel/ptrace.c
@@ -305,6 +305,8 @@ static void do_syscall_trace(void)
305 305
306void do_syscall_trace_enter(struct pt_regs *regs) 306void do_syscall_trace_enter(struct pt_regs *regs)
307{ 307{
308 secure_computing(regs->gpr[0]);
309
308 if (test_thread_flag(TIF_SYSCALL_TRACE) 310 if (test_thread_flag(TIF_SYSCALL_TRACE)
309 && (current->ptrace & PT_PTRACED)) 311 && (current->ptrace & PT_PTRACED))
310 do_syscall_trace(); 312 do_syscall_trace();
@@ -320,8 +322,6 @@ void do_syscall_trace_enter(struct pt_regs *regs)
320 322
321void do_syscall_trace_leave(struct pt_regs *regs) 323void do_syscall_trace_leave(struct pt_regs *regs)
322{ 324{
323 secure_computing(regs->gpr[0]);
324
325 if (unlikely(current->audit_context)) 325 if (unlikely(current->audit_context))
326 audit_syscall_exit(current, 326 audit_syscall_exit(current,
327 (regs->ccr&0x1000)?AUDITSC_FAILURE:AUDITSC_SUCCESS, 327 (regs->ccr&0x1000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
diff --git a/arch/ppc64/kernel/rtas-proc.c b/arch/ppc64/kernel/rtas-proc.c
index 28b1f1521f21..1f3ff860fdf0 100644
--- a/arch/ppc64/kernel/rtas-proc.c
+++ b/arch/ppc64/kernel/rtas-proc.c
@@ -371,11 +371,11 @@ static ssize_t ppc_rtas_progress_write(struct file *file,
371 /* Lets see if the user passed hexdigits */ 371 /* Lets see if the user passed hexdigits */
372 hex = simple_strtoul(progress_led, NULL, 10); 372 hex = simple_strtoul(progress_led, NULL, 10);
373 373
374 ppc_md.progress ((char *)progress_led, hex); 374 rtas_progress ((char *)progress_led, hex);
375 return count; 375 return count;
376 376
377 /* clear the line */ 377 /* clear the line */
378 /* ppc_md.progress(" ", 0xffff);*/ 378 /* rtas_progress(" ", 0xffff);*/
379} 379}
380/* ****************************************************************** */ 380/* ****************************************************************** */
381static int ppc_rtas_progress_show(struct seq_file *m, void *v) 381static int ppc_rtas_progress_show(struct seq_file *m, void *v)
diff --git a/arch/ppc64/kernel/rtas.c b/arch/ppc64/kernel/rtas.c
index 5575603def27..5e8eb33b8e54 100644
--- a/arch/ppc64/kernel/rtas.c
+++ b/arch/ppc64/kernel/rtas.c
@@ -91,6 +91,123 @@ call_rtas_display_status_delay(unsigned char c)
91 } 91 }
92} 92}
93 93
94void
95rtas_progress(char *s, unsigned short hex)
96{
97 struct device_node *root;
98 int width, *p;
99 char *os;
100 static int display_character, set_indicator;
101 static int display_width, display_lines, *row_width, form_feed;
102 static DEFINE_SPINLOCK(progress_lock);
103 static int current_line;
104 static int pending_newline = 0; /* did last write end with unprinted newline? */
105
106 if (!rtas.base)
107 return;
108
109 if (display_width == 0) {
110 display_width = 0x10;
111 if ((root = find_path_device("/rtas"))) {
112 if ((p = (unsigned int *)get_property(root,
113 "ibm,display-line-length", NULL)))
114 display_width = *p;
115 if ((p = (unsigned int *)get_property(root,
116 "ibm,form-feed", NULL)))
117 form_feed = *p;
118 if ((p = (unsigned int *)get_property(root,
119 "ibm,display-number-of-lines", NULL)))
120 display_lines = *p;
121 row_width = (unsigned int *)get_property(root,
122 "ibm,display-truncation-length", NULL);
123 }
124 display_character = rtas_token("display-character");
125 set_indicator = rtas_token("set-indicator");
126 }
127
128 if (display_character == RTAS_UNKNOWN_SERVICE) {
129 /* use hex display if available */
130 if (set_indicator != RTAS_UNKNOWN_SERVICE)
131 rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex);
132 return;
133 }
134
135 spin_lock(&progress_lock);
136
137 /*
138 * Last write ended with newline, but we didn't print it since
139 * it would just clear the bottom line of output. Print it now
140 * instead.
141 *
142 * If no newline is pending and form feed is supported, clear the
143 * display with a form feed; otherwise, print a CR to start output
144 * at the beginning of the line.
145 */
146 if (pending_newline) {
147 rtas_call(display_character, 1, 1, NULL, '\r');
148 rtas_call(display_character, 1, 1, NULL, '\n');
149 pending_newline = 0;
150 } else {
151 current_line = 0;
152 if (form_feed)
153 rtas_call(display_character, 1, 1, NULL,
154 (char)form_feed);
155 else
156 rtas_call(display_character, 1, 1, NULL, '\r');
157 }
158
159 if (row_width)
160 width = row_width[current_line];
161 else
162 width = display_width;
163 os = s;
164 while (*os) {
165 if (*os == '\n' || *os == '\r') {
166 /* If newline is the last character, save it
167 * until next call to avoid bumping up the
168 * display output.
169 */
170 if (*os == '\n' && !os[1]) {
171 pending_newline = 1;
172 current_line++;
173 if (current_line > display_lines-1)
174 current_line = display_lines-1;
175 spin_unlock(&progress_lock);
176 return;
177 }
178
179 /* RTAS wants CR-LF, not just LF */
180
181 if (*os == '\n') {
182 rtas_call(display_character, 1, 1, NULL, '\r');
183 rtas_call(display_character, 1, 1, NULL, '\n');
184 } else {
185 /* CR might be used to re-draw a line, so we'll
186 * leave it alone and not add LF.
187 */
188 rtas_call(display_character, 1, 1, NULL, *os);
189 }
190
191 if (row_width)
192 width = row_width[current_line];
193 else
194 width = display_width;
195 } else {
196 width--;
197 rtas_call(display_character, 1, 1, NULL, *os);
198 }
199
200 os++;
201
202 /* if we overwrite the screen length */
203 if (width <= 0)
204 while ((*os != 0) && (*os != '\n') && (*os != '\r'))
205 os++;
206 }
207
208 spin_unlock(&progress_lock);
209}
210
94int 211int
95rtas_token(const char *service) 212rtas_token(const char *service)
96{ 213{
@@ -425,8 +542,8 @@ rtas_flash_firmware(void)
425 542
426 printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size); 543 printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size);
427 printk(KERN_ALERT "FLASH: performing flash and reboot\n"); 544 printk(KERN_ALERT "FLASH: performing flash and reboot\n");
428 ppc_md.progress("Flashing \n", 0x0); 545 rtas_progress("Flashing \n", 0x0);
429 ppc_md.progress("Please Wait... ", 0x0); 546 rtas_progress("Please Wait... ", 0x0);
430 printk(KERN_ALERT "FLASH: this will take several minutes. Do not power off!\n"); 547 printk(KERN_ALERT "FLASH: this will take several minutes. Do not power off!\n");
431 status = rtas_call(update_token, 1, 1, NULL, rtas_block_list); 548 status = rtas_call(update_token, 1, 1, NULL, rtas_block_list);
432 switch (status) { /* should only get "bad" status */ 549 switch (status) { /* should only get "bad" status */
diff --git a/arch/ppc64/kernel/rtas_pci.c b/arch/ppc64/kernel/rtas_pci.c
new file mode 100644
index 000000000000..1048817befb8
--- /dev/null
+++ b/arch/ppc64/kernel/rtas_pci.c
@@ -0,0 +1,495 @@
1/*
2 * arch/ppc64/kernel/rtas_pci.c
3 *
4 * Copyright (C) 2001 Dave Engebretsen, IBM Corporation
5 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
6 *
7 * RTAS specific routines for PCI.
8 *
9 * Based on code from pci.c, chrp_pci.c and pSeries_pci.c
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25
26#include <linux/kernel.h>
27#include <linux/threads.h>
28#include <linux/pci.h>
29#include <linux/string.h>
30#include <linux/init.h>
31#include <linux/bootmem.h>
32
33#include <asm/io.h>
34#include <asm/pgtable.h>
35#include <asm/irq.h>
36#include <asm/prom.h>
37#include <asm/machdep.h>
38#include <asm/pci-bridge.h>
39#include <asm/iommu.h>
40#include <asm/rtas.h>
41
42#include "mpic.h"
43#include "pci.h"
44
45/* RTAS tokens */
46static int read_pci_config;
47static int write_pci_config;
48static int ibm_read_pci_config;
49static int ibm_write_pci_config;
50
51static int config_access_valid(struct device_node *dn, int where)
52{
53 if (where < 256)
54 return 1;
55 if (where < 4096 && dn->pci_ext_config_space)
56 return 1;
57
58 return 0;
59}
60
61static int rtas_read_config(struct device_node *dn, int where, int size, u32 *val)
62{
63 int returnval = -1;
64 unsigned long buid, addr;
65 int ret;
66
67 if (!dn)
68 return PCIBIOS_DEVICE_NOT_FOUND;
69 if (!config_access_valid(dn, where))
70 return PCIBIOS_BAD_REGISTER_NUMBER;
71
72 addr = ((where & 0xf00) << 20) | (dn->busno << 16) |
73 (dn->devfn << 8) | (where & 0xff);
74 buid = dn->phb->buid;
75 if (buid) {
76 ret = rtas_call(ibm_read_pci_config, 4, 2, &returnval,
77 addr, buid >> 32, buid & 0xffffffff, size);
78 } else {
79 ret = rtas_call(read_pci_config, 2, 2, &returnval, addr, size);
80 }
81 *val = returnval;
82
83 if (ret)
84 return PCIBIOS_DEVICE_NOT_FOUND;
85
86 if (returnval == EEH_IO_ERROR_VALUE(size)
87 && eeh_dn_check_failure (dn, NULL))
88 return PCIBIOS_DEVICE_NOT_FOUND;
89
90 return PCIBIOS_SUCCESSFUL;
91}
92
93static int rtas_pci_read_config(struct pci_bus *bus,
94 unsigned int devfn,
95 int where, int size, u32 *val)
96{
97 struct device_node *busdn, *dn;
98
99 if (bus->self)
100 busdn = pci_device_to_OF_node(bus->self);
101 else
102 busdn = bus->sysdata; /* must be a phb */
103
104 /* Search only direct children of the bus */
105 for (dn = busdn->child; dn; dn = dn->sibling)
106 if (dn->devfn == devfn)
107 return rtas_read_config(dn, where, size, val);
108 return PCIBIOS_DEVICE_NOT_FOUND;
109}
110
111static int rtas_write_config(struct device_node *dn, int where, int size, u32 val)
112{
113 unsigned long buid, addr;
114 int ret;
115
116 if (!dn)
117 return PCIBIOS_DEVICE_NOT_FOUND;
118 if (!config_access_valid(dn, where))
119 return PCIBIOS_BAD_REGISTER_NUMBER;
120
121 addr = ((where & 0xf00) << 20) | (dn->busno << 16) |
122 (dn->devfn << 8) | (where & 0xff);
123 buid = dn->phb->buid;
124 if (buid) {
125 ret = rtas_call(ibm_write_pci_config, 5, 1, NULL, addr, buid >> 32, buid & 0xffffffff, size, (ulong) val);
126 } else {
127 ret = rtas_call(write_pci_config, 3, 1, NULL, addr, size, (ulong)val);
128 }
129
130 if (ret)
131 return PCIBIOS_DEVICE_NOT_FOUND;
132
133 return PCIBIOS_SUCCESSFUL;
134}
135
136static int rtas_pci_write_config(struct pci_bus *bus,
137 unsigned int devfn,
138 int where, int size, u32 val)
139{
140 struct device_node *busdn, *dn;
141
142 if (bus->self)
143 busdn = pci_device_to_OF_node(bus->self);
144 else
145 busdn = bus->sysdata; /* must be a phb */
146
147 /* Search only direct children of the bus */
148 for (dn = busdn->child; dn; dn = dn->sibling)
149 if (dn->devfn == devfn)
150 return rtas_write_config(dn, where, size, val);
151 return PCIBIOS_DEVICE_NOT_FOUND;
152}
153
154struct pci_ops rtas_pci_ops = {
155 rtas_pci_read_config,
156 rtas_pci_write_config
157};
158
159int is_python(struct device_node *dev)
160{
161 char *model = (char *)get_property(dev, "model", NULL);
162
163 if (model && strstr(model, "Python"))
164 return 1;
165
166 return 0;
167}
168
169static int get_phb_reg_prop(struct device_node *dev,
170 unsigned int addr_size_words,
171 struct reg_property64 *reg)
172{
173 unsigned int *ui_ptr = NULL, len;
174
175 /* Found a PHB, now figure out where his registers are mapped. */
176 ui_ptr = (unsigned int *)get_property(dev, "reg", &len);
177 if (ui_ptr == NULL)
178 return 1;
179
180 if (addr_size_words == 1) {
181 reg->address = ((struct reg_property32 *)ui_ptr)->address;
182 reg->size = ((struct reg_property32 *)ui_ptr)->size;
183 } else {
184 *reg = *((struct reg_property64 *)ui_ptr);
185 }
186
187 return 0;
188}
189
190static void python_countermeasures(struct device_node *dev,
191 unsigned int addr_size_words)
192{
193 struct reg_property64 reg_struct;
194 void __iomem *chip_regs;
195 volatile u32 val;
196
197 if (get_phb_reg_prop(dev, addr_size_words, &reg_struct))
198 return;
199
200 /* Python's register file is 1 MB in size. */
201 chip_regs = ioremap(reg_struct.address & ~(0xfffffUL), 0x100000);
202
203 /*
204 * Firmware doesn't always clear this bit which is critical
205 * for good performance - Anton
206 */
207
208#define PRG_CL_RESET_VALID 0x00010000
209
210 val = in_be32(chip_regs + 0xf6030);
211 if (val & PRG_CL_RESET_VALID) {
212 printk(KERN_INFO "Python workaround: ");
213 val &= ~PRG_CL_RESET_VALID;
214 out_be32(chip_regs + 0xf6030, val);
215 /*
216 * We must read it back for changes to
217 * take effect
218 */
219 val = in_be32(chip_regs + 0xf6030);
220 printk("reg0: %x\n", val);
221 }
222
223 iounmap(chip_regs);
224}
225
226void __init init_pci_config_tokens (void)
227{
228 read_pci_config = rtas_token("read-pci-config");
229 write_pci_config = rtas_token("write-pci-config");
230 ibm_read_pci_config = rtas_token("ibm,read-pci-config");
231 ibm_write_pci_config = rtas_token("ibm,write-pci-config");
232}
233
234unsigned long __devinit get_phb_buid (struct device_node *phb)
235{
236 int addr_cells;
237 unsigned int *buid_vals;
238 unsigned int len;
239 unsigned long buid;
240
241 if (ibm_read_pci_config == -1) return 0;
242
243 /* PHB's will always be children of the root node,
244 * or so it is promised by the current firmware. */
245 if (phb->parent == NULL)
246 return 0;
247 if (phb->parent->parent)
248 return 0;
249
250 buid_vals = (unsigned int *) get_property(phb, "reg", &len);
251 if (buid_vals == NULL)
252 return 0;
253
254 addr_cells = prom_n_addr_cells(phb);
255 if (addr_cells == 1) {
256 buid = (unsigned long) buid_vals[0];
257 } else {
258 buid = (((unsigned long)buid_vals[0]) << 32UL) |
259 (((unsigned long)buid_vals[1]) & 0xffffffff);
260 }
261 return buid;
262}
263
264static int phb_set_bus_ranges(struct device_node *dev,
265 struct pci_controller *phb)
266{
267 int *bus_range;
268 unsigned int len;
269
270 bus_range = (int *) get_property(dev, "bus-range", &len);
271 if (bus_range == NULL || len < 2 * sizeof(int)) {
272 return 1;
273 }
274
275 phb->first_busno = bus_range[0];
276 phb->last_busno = bus_range[1];
277
278 return 0;
279}
280
281static int __devinit setup_phb(struct device_node *dev,
282 struct pci_controller *phb,
283 unsigned int addr_size_words)
284{
285 pci_setup_pci_controller(phb);
286
287 if (is_python(dev))
288 python_countermeasures(dev, addr_size_words);
289
290 if (phb_set_bus_ranges(dev, phb))
291 return 1;
292
293 phb->arch_data = dev;
294 phb->ops = &rtas_pci_ops;
295 phb->buid = get_phb_buid(dev);
296
297 return 0;
298}
299
300static void __devinit add_linux_pci_domain(struct device_node *dev,
301 struct pci_controller *phb,
302 struct property *of_prop)
303{
304 memset(of_prop, 0, sizeof(struct property));
305 of_prop->name = "linux,pci-domain";
306 of_prop->length = sizeof(phb->global_number);
307 of_prop->value = (unsigned char *)&of_prop[1];
308 memcpy(of_prop->value, &phb->global_number, sizeof(phb->global_number));
309 prom_add_property(dev, of_prop);
310}
311
312static struct pci_controller * __init alloc_phb(struct device_node *dev,
313 unsigned int addr_size_words)
314{
315 struct pci_controller *phb;
316 struct property *of_prop;
317
318 phb = alloc_bootmem(sizeof(struct pci_controller));
319 if (phb == NULL)
320 return NULL;
321
322 of_prop = alloc_bootmem(sizeof(struct property) +
323 sizeof(phb->global_number));
324 if (!of_prop)
325 return NULL;
326
327 if (setup_phb(dev, phb, addr_size_words))
328 return NULL;
329
330 add_linux_pci_domain(dev, phb, of_prop);
331
332 return phb;
333}
334
335static struct pci_controller * __devinit alloc_phb_dynamic(struct device_node *dev, unsigned int addr_size_words)
336{
337 struct pci_controller *phb;
338
339 phb = (struct pci_controller *)kmalloc(sizeof(struct pci_controller),
340 GFP_KERNEL);
341 if (phb == NULL)
342 return NULL;
343
344 if (setup_phb(dev, phb, addr_size_words))
345 return NULL;
346
347 phb->is_dynamic = 1;
348
349 /* TODO: linux,pci-domain? */
350
351 return phb;
352}
353
354unsigned long __init find_and_init_phbs(void)
355{
356 struct device_node *node;
357 struct pci_controller *phb;
358 unsigned int root_size_cells = 0;
359 unsigned int index;
360 unsigned int *opprop = NULL;
361 struct device_node *root = of_find_node_by_path("/");
362
363 if (ppc64_interrupt_controller == IC_OPEN_PIC) {
364 opprop = (unsigned int *)get_property(root,
365 "platform-open-pic", NULL);
366 }
367
368 root_size_cells = prom_n_size_cells(root);
369
370 index = 0;
371
372 for (node = of_get_next_child(root, NULL);
373 node != NULL;
374 node = of_get_next_child(root, node)) {
375 if (node->type == NULL || strcmp(node->type, "pci") != 0)
376 continue;
377
378 phb = alloc_phb(node, root_size_cells);
379 if (!phb)
380 continue;
381
382 pci_process_bridge_OF_ranges(phb, node);
383 pci_setup_phb_io(phb, index == 0);
384#ifdef CONFIG_PPC_PSERIES
385 if (ppc64_interrupt_controller == IC_OPEN_PIC && pSeries_mpic) {
386 int addr = root_size_cells * (index + 2) - 1;
387 mpic_assign_isu(pSeries_mpic, index, opprop[addr]);
388 }
389#endif
390 index++;
391 }
392
393 of_node_put(root);
394 pci_devs_phb_init();
395
396 /*
397 * pci_probe_only and pci_assign_all_buses can be set via properties
398 * in chosen.
399 */
400 if (of_chosen) {
401 int *prop;
402
403 prop = (int *)get_property(of_chosen, "linux,pci-probe-only",
404 NULL);
405 if (prop)
406 pci_probe_only = *prop;
407
408 prop = (int *)get_property(of_chosen,
409 "linux,pci-assign-all-buses", NULL);
410 if (prop)
411 pci_assign_all_buses = *prop;
412 }
413
414 return 0;
415}
416
417struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
418{
419 struct device_node *root = of_find_node_by_path("/");
420 unsigned int root_size_cells = 0;
421 struct pci_controller *phb;
422 struct pci_bus *bus;
423 int primary;
424
425 root_size_cells = prom_n_size_cells(root);
426
427 primary = list_empty(&hose_list);
428 phb = alloc_phb_dynamic(dn, root_size_cells);
429 if (!phb)
430 return NULL;
431
432 pci_process_bridge_OF_ranges(phb, dn);
433
434 pci_setup_phb_io_dynamic(phb, primary);
435 of_node_put(root);
436
437 pci_devs_phb_init_dynamic(phb);
438 phb->last_busno = 0xff;
439 bus = pci_scan_bus(phb->first_busno, phb->ops, phb->arch_data);
440 phb->bus = bus;
441 phb->last_busno = bus->subordinate;
442
443 return phb;
444}
445EXPORT_SYMBOL(init_phb_dynamic);
446
447/* RPA-specific bits for removing PHBs */
448int pcibios_remove_root_bus(struct pci_controller *phb)
449{
450 struct pci_bus *b = phb->bus;
451 struct resource *res;
452 int rc, i;
453
454 res = b->resource[0];
455 if (!res->flags) {
456 printk(KERN_ERR "%s: no IO resource for PHB %s\n", __FUNCTION__,
457 b->name);
458 return 1;
459 }
460
461 rc = unmap_bus_range(b);
462 if (rc) {
463 printk(KERN_ERR "%s: failed to unmap IO on bus %s\n",
464 __FUNCTION__, b->name);
465 return 1;
466 }
467
468 if (release_resource(res)) {
469 printk(KERN_ERR "%s: failed to release IO on bus %s\n",
470 __FUNCTION__, b->name);
471 return 1;
472 }
473
474 for (i = 1; i < 3; ++i) {
475 res = b->resource[i];
476 if (!res->flags && i == 0) {
477 printk(KERN_ERR "%s: no MEM resource for PHB %s\n",
478 __FUNCTION__, b->name);
479 return 1;
480 }
481 if (res->flags && release_resource(res)) {
482 printk(KERN_ERR
483 "%s: failed to release IO %d on bus %s\n",
484 __FUNCTION__, i, b->name);
485 return 1;
486 }
487 }
488
489 list_del(&phb->list_node);
490 if (phb->is_dynamic)
491 kfree(phb);
492
493 return 0;
494}
495EXPORT_SYMBOL(pcibios_remove_root_bus);
diff --git a/arch/ppc64/kernel/rtc.c b/arch/ppc64/kernel/rtc.c
index de02aedbe080..d729fefa0df5 100644
--- a/arch/ppc64/kernel/rtc.c
+++ b/arch/ppc64/kernel/rtc.c
@@ -301,7 +301,7 @@ void iSeries_get_boot_time(struct rtc_time *tm)
301#ifdef CONFIG_PPC_RTAS 301#ifdef CONFIG_PPC_RTAS
302#define MAX_RTC_WAIT 5000 /* 5 sec */ 302#define MAX_RTC_WAIT 5000 /* 5 sec */
303#define RTAS_CLOCK_BUSY (-2) 303#define RTAS_CLOCK_BUSY (-2)
304void pSeries_get_boot_time(struct rtc_time *rtc_tm) 304void rtas_get_boot_time(struct rtc_time *rtc_tm)
305{ 305{
306 int ret[8]; 306 int ret[8];
307 int error, wait_time; 307 int error, wait_time;
@@ -336,7 +336,7 @@ void pSeries_get_boot_time(struct rtc_time *rtc_tm)
336 * and if a delay is needed to read the clock. In this case we just 336 * and if a delay is needed to read the clock. In this case we just
337 * silently return without updating rtc_tm. 337 * silently return without updating rtc_tm.
338 */ 338 */
339void pSeries_get_rtc_time(struct rtc_time *rtc_tm) 339void rtas_get_rtc_time(struct rtc_time *rtc_tm)
340{ 340{
341 int ret[8]; 341 int ret[8];
342 int error, wait_time; 342 int error, wait_time;
@@ -371,7 +371,7 @@ void pSeries_get_rtc_time(struct rtc_time *rtc_tm)
371 rtc_tm->tm_year = ret[0] - 1900; 371 rtc_tm->tm_year = ret[0] - 1900;
372} 372}
373 373
374int pSeries_set_rtc_time(struct rtc_time *tm) 374int rtas_set_rtc_time(struct rtc_time *tm)
375{ 375{
376 int error, wait_time; 376 int error, wait_time;
377 unsigned long max_wait_tb; 377 unsigned long max_wait_tb;
diff --git a/arch/ppc64/kernel/setup.c b/arch/ppc64/kernel/setup.c
index 8e439a817642..0a47a5ef428d 100644
--- a/arch/ppc64/kernel/setup.c
+++ b/arch/ppc64/kernel/setup.c
@@ -344,6 +344,7 @@ static void __init setup_cpu_maps(void)
344extern struct machdep_calls pSeries_md; 344extern struct machdep_calls pSeries_md;
345extern struct machdep_calls pmac_md; 345extern struct machdep_calls pmac_md;
346extern struct machdep_calls maple_md; 346extern struct machdep_calls maple_md;
347extern struct machdep_calls bpa_md;
347 348
348/* Ultimately, stuff them in an elf section like initcalls... */ 349/* Ultimately, stuff them in an elf section like initcalls... */
349static struct machdep_calls __initdata *machines[] = { 350static struct machdep_calls __initdata *machines[] = {
@@ -356,6 +357,9 @@ static struct machdep_calls __initdata *machines[] = {
356#ifdef CONFIG_PPC_MAPLE 357#ifdef CONFIG_PPC_MAPLE
357 &maple_md, 358 &maple_md,
358#endif /* CONFIG_PPC_MAPLE */ 359#endif /* CONFIG_PPC_MAPLE */
360#ifdef CONFIG_PPC_BPA
361 &bpa_md,
362#endif
359 NULL 363 NULL
360}; 364};
361 365
@@ -679,6 +683,12 @@ void machine_restart(char *cmd)
679 if (ppc_md.nvram_sync) 683 if (ppc_md.nvram_sync)
680 ppc_md.nvram_sync(); 684 ppc_md.nvram_sync();
681 ppc_md.restart(cmd); 685 ppc_md.restart(cmd);
686#ifdef CONFIG_SMP
687 smp_send_stop();
688#endif
689 printk(KERN_EMERG "System Halted, OK to turn off power\n");
690 local_irq_disable();
691 while (1) ;
682} 692}
683 693
684EXPORT_SYMBOL(machine_restart); 694EXPORT_SYMBOL(machine_restart);
@@ -688,6 +698,12 @@ void machine_power_off(void)
688 if (ppc_md.nvram_sync) 698 if (ppc_md.nvram_sync)
689 ppc_md.nvram_sync(); 699 ppc_md.nvram_sync();
690 ppc_md.power_off(); 700 ppc_md.power_off();
701#ifdef CONFIG_SMP
702 smp_send_stop();
703#endif
704 printk(KERN_EMERG "System Halted, OK to turn off power\n");
705 local_irq_disable();
706 while (1) ;
691} 707}
692 708
693EXPORT_SYMBOL(machine_power_off); 709EXPORT_SYMBOL(machine_power_off);
@@ -697,13 +713,16 @@ void machine_halt(void)
697 if (ppc_md.nvram_sync) 713 if (ppc_md.nvram_sync)
698 ppc_md.nvram_sync(); 714 ppc_md.nvram_sync();
699 ppc_md.halt(); 715 ppc_md.halt();
716#ifdef CONFIG_SMP
717 smp_send_stop();
718#endif
719 printk(KERN_EMERG "System Halted, OK to turn off power\n");
720 local_irq_disable();
721 while (1) ;
700} 722}
701 723
702EXPORT_SYMBOL(machine_halt); 724EXPORT_SYMBOL(machine_halt);
703 725
704unsigned long ppc_proc_freq;
705unsigned long ppc_tb_freq;
706
707static int ppc64_panic_event(struct notifier_block *this, 726static int ppc64_panic_event(struct notifier_block *this,
708 unsigned long event, void *ptr) 727 unsigned long event, void *ptr)
709{ 728{
@@ -1055,6 +1074,7 @@ void __init setup_arch(char **cmdline_p)
1055 1074
1056 /* set up the bootmem stuff with available memory */ 1075 /* set up the bootmem stuff with available memory */
1057 do_init_bootmem(); 1076 do_init_bootmem();
1077 sparse_init();
1058 1078
1059 /* initialize the syscall map in systemcfg */ 1079 /* initialize the syscall map in systemcfg */
1060 setup_syscall_map(); 1080 setup_syscall_map();
@@ -1079,11 +1099,11 @@ void __init setup_arch(char **cmdline_p)
1079static void ppc64_do_msg(unsigned int src, const char *msg) 1099static void ppc64_do_msg(unsigned int src, const char *msg)
1080{ 1100{
1081 if (ppc_md.progress) { 1101 if (ppc_md.progress) {
1082 char buf[32]; 1102 char buf[128];
1083 1103
1084 sprintf(buf, "%08x \n", src); 1104 sprintf(buf, "%08X\n", src);
1085 ppc_md.progress(buf, 0); 1105 ppc_md.progress(buf, 0);
1086 sprintf(buf, "%-16s", msg); 1106 snprintf(buf, 128, "%s", msg);
1087 ppc_md.progress(buf, 0); 1107 ppc_md.progress(buf, 0);
1088 } 1108 }
1089} 1109}
@@ -1117,7 +1137,7 @@ void ppc64_dump_msg(unsigned int src, const char *msg)
1117} 1137}
1118 1138
1119/* This should only be called on processor 0 during calibrate decr */ 1139/* This should only be called on processor 0 during calibrate decr */
1120void setup_default_decr(void) 1140void __init setup_default_decr(void)
1121{ 1141{
1122 struct paca_struct *lpaca = get_paca(); 1142 struct paca_struct *lpaca = get_paca();
1123 1143
diff --git a/arch/ppc64/kernel/smp.c b/arch/ppc64/kernel/smp.c
index 9ef5d36d6b25..2fcddfcb594d 100644
--- a/arch/ppc64/kernel/smp.c
+++ b/arch/ppc64/kernel/smp.c
@@ -71,7 +71,7 @@ void smp_call_function_interrupt(void);
71 71
72int smt_enabled_at_boot = 1; 72int smt_enabled_at_boot = 1;
73 73
74#ifdef CONFIG_PPC_MULTIPLATFORM 74#ifdef CONFIG_MPIC
75void smp_mpic_message_pass(int target, int msg) 75void smp_mpic_message_pass(int target, int msg)
76{ 76{
77 /* make sure we're sending something that translates to an IPI */ 77 /* make sure we're sending something that translates to an IPI */
@@ -128,7 +128,7 @@ void __devinit smp_generic_kick_cpu(int nr)
128 smp_mb(); 128 smp_mb();
129} 129}
130 130
131#endif /* CONFIG_PPC_MULTIPLATFORM */ 131#endif /* CONFIG_MPIC */
132 132
133static void __init smp_space_timers(unsigned int max_cpus) 133static void __init smp_space_timers(unsigned int max_cpus)
134{ 134{
diff --git a/arch/ppc64/kernel/spider-pic.c b/arch/ppc64/kernel/spider-pic.c
new file mode 100644
index 000000000000..d5c9a02fb119
--- /dev/null
+++ b/arch/ppc64/kernel/spider-pic.c
@@ -0,0 +1,191 @@
1/*
2 * External Interrupt Controller on Spider South Bridge
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/interrupt.h>
24#include <linux/irq.h>
25
26#include <asm/pgtable.h>
27#include <asm/prom.h>
28#include <asm/io.h>
29
30#include "bpa_iic.h"
31
32/* register layout taken from Spider spec, table 7.4-4 */
33enum {
34 TIR_DEN = 0x004, /* Detection Enable Register */
35 TIR_MSK = 0x084, /* Mask Level Register */
36 TIR_EDC = 0x0c0, /* Edge Detection Clear Register */
37 TIR_PNDA = 0x100, /* Pending Register A */
38 TIR_PNDB = 0x104, /* Pending Register B */
39 TIR_CS = 0x144, /* Current Status Register */
40 TIR_LCSA = 0x150, /* Level Current Status Register A */
41 TIR_LCSB = 0x154, /* Level Current Status Register B */
42 TIR_LCSC = 0x158, /* Level Current Status Register C */
43 TIR_LCSD = 0x15c, /* Level Current Status Register D */
44 TIR_CFGA = 0x200, /* Setting Register A0 */
45 TIR_CFGB = 0x204, /* Setting Register B0 */
46 /* 0x208 ... 0x3ff Setting Register An/Bn */
47 TIR_PPNDA = 0x400, /* Packet Pending Register A */
48 TIR_PPNDB = 0x404, /* Packet Pending Register B */
49 TIR_PIERA = 0x408, /* Packet Output Error Register A */
50 TIR_PIERB = 0x40c, /* Packet Output Error Register B */
51 TIR_PIEN = 0x444, /* Packet Output Enable Register */
52 TIR_PIPND = 0x454, /* Packet Output Pending Register */
53 TIRDID = 0x484, /* Spider Device ID Register */
54 REISTIM = 0x500, /* Reissue Command Timeout Time Setting */
55 REISTIMEN = 0x504, /* Reissue Command Timeout Setting */
56 REISWAITEN = 0x508, /* Reissue Wait Control*/
57};
58
59static void __iomem *spider_pics[4];
60
61static void __iomem *spider_get_pic(int irq)
62{
63 int node = irq / IIC_NODE_STRIDE;
64 irq %= IIC_NODE_STRIDE;
65
66 if (irq >= IIC_EXT_OFFSET &&
67 irq < IIC_EXT_OFFSET + IIC_NUM_EXT &&
68 spider_pics)
69 return spider_pics[node];
70 return NULL;
71}
72
73static int spider_get_nr(unsigned int irq)
74{
75 return (irq % IIC_NODE_STRIDE) - IIC_EXT_OFFSET;
76}
77
78static void __iomem *spider_get_irq_config(int irq)
79{
80 void __iomem *pic;
81 pic = spider_get_pic(irq);
82 return pic + TIR_CFGA + 8 * spider_get_nr(irq);
83}
84
85static void spider_enable_irq(unsigned int irq)
86{
87 void __iomem *cfg = spider_get_irq_config(irq);
88 irq = spider_get_nr(irq);
89
90 out_be32(cfg, in_be32(cfg) | 0x3107000eu);
91 out_be32(cfg + 4, in_be32(cfg + 4) | 0x00020000u | irq);
92}
93
94static void spider_disable_irq(unsigned int irq)
95{
96 void __iomem *cfg = spider_get_irq_config(irq);
97 irq = spider_get_nr(irq);
98
99 out_be32(cfg, in_be32(cfg) & ~0x30000000u);
100}
101
102static unsigned int spider_startup_irq(unsigned int irq)
103{
104 spider_enable_irq(irq);
105 return 0;
106}
107
108static void spider_shutdown_irq(unsigned int irq)
109{
110 spider_disable_irq(irq);
111}
112
113static void spider_end_irq(unsigned int irq)
114{
115 spider_enable_irq(irq);
116}
117
118static void spider_ack_irq(unsigned int irq)
119{
120 spider_disable_irq(irq);
121 iic_local_enable();
122}
123
124static struct hw_interrupt_type spider_pic = {
125 .typename = " SPIDER ",
126 .startup = spider_startup_irq,
127 .shutdown = spider_shutdown_irq,
128 .enable = spider_enable_irq,
129 .disable = spider_disable_irq,
130 .ack = spider_ack_irq,
131 .end = spider_end_irq,
132};
133
134
135int spider_get_irq(unsigned long int_pending)
136{
137 void __iomem *regs = spider_get_pic(int_pending);
138 unsigned long cs;
139 int irq;
140
141 cs = in_be32(regs + TIR_CS);
142
143 irq = cs >> 24;
144 if (irq != 63)
145 return irq;
146
147 return -1;
148}
149
150void spider_init_IRQ(void)
151{
152 int node;
153 struct device_node *dn;
154 unsigned int *property;
155 long spiderpic;
156 int n;
157
158/* FIXME: detect multiple PICs as soon as the device tree has them */
159 for (node = 0; node < 1; node++) {
160 dn = of_find_node_by_path("/");
161 n = prom_n_addr_cells(dn);
162 property = (unsigned int *) get_property(dn,
163 "platform-spider-pic", NULL);
164
165 if (!property)
166 continue;
167 for (spiderpic = 0; n > 0; --n)
168 spiderpic = (spiderpic << 32) + *property++;
169 printk(KERN_DEBUG "SPIDER addr: %lx\n", spiderpic);
170 spider_pics[node] = __ioremap(spiderpic, 0x800, _PAGE_NO_CACHE);
171 for (n = 0; n < IIC_NUM_EXT; n++) {
172 int irq = n + IIC_EXT_OFFSET + node * IIC_NODE_STRIDE;
173 get_irq_desc(irq)->handler = &spider_pic;
174
175 /* do not mask any interrupts because of level */
176 out_be32(spider_pics[node] + TIR_MSK, 0x0);
177
178 /* disable edge detection clear */
179 /* out_be32(spider_pics[node] + TIR_EDC, 0x0); */
180
181 /* enable interrupt packets to be output */
182 out_be32(spider_pics[node] + TIR_PIEN,
183 in_be32(spider_pics[node] + TIR_PIEN) | 0x1);
184
185 /* Enable the interrupt detection enable bit. Do this last! */
186 out_be32(spider_pics[node] + TIR_DEN,
187 in_be32(spider_pics[node] +TIR_DEN) | 0x1);
188
189 }
190 }
191}
diff --git a/arch/ppc64/kernel/time.c b/arch/ppc64/kernel/time.c
index 33364a7d2cd2..2348a75e050d 100644
--- a/arch/ppc64/kernel/time.c
+++ b/arch/ppc64/kernel/time.c
@@ -107,6 +107,9 @@ void ppc_adjtimex(void);
107 107
108static unsigned adjusting_time = 0; 108static unsigned adjusting_time = 0;
109 109
110unsigned long ppc_proc_freq;
111unsigned long ppc_tb_freq;
112
110static __inline__ void timer_check_rtc(void) 113static __inline__ void timer_check_rtc(void)
111{ 114{
112 /* 115 /*
@@ -472,6 +475,66 @@ int do_settimeofday(struct timespec *tv)
472 475
473EXPORT_SYMBOL(do_settimeofday); 476EXPORT_SYMBOL(do_settimeofday);
474 477
478#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_MAPLE) || defined(CONFIG_PPC_BPA)
479void __init generic_calibrate_decr(void)
480{
481 struct device_node *cpu;
482 struct div_result divres;
483 unsigned int *fp;
484 int node_found;
485
486 /*
487 * The cpu node should have a timebase-frequency property
488 * to tell us the rate at which the decrementer counts.
489 */
490 cpu = of_find_node_by_type(NULL, "cpu");
491
492 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
493 node_found = 0;
494 if (cpu != 0) {
495 fp = (unsigned int *)get_property(cpu, "timebase-frequency",
496 NULL);
497 if (fp != 0) {
498 node_found = 1;
499 ppc_tb_freq = *fp;
500 }
501 }
502 if (!node_found)
503 printk(KERN_ERR "WARNING: Estimating decrementer frequency "
504 "(not found)\n");
505
506 ppc_proc_freq = DEFAULT_PROC_FREQ;
507 node_found = 0;
508 if (cpu != 0) {
509 fp = (unsigned int *)get_property(cpu, "clock-frequency",
510 NULL);
511 if (fp != 0) {
512 node_found = 1;
513 ppc_proc_freq = *fp;
514 }
515 }
516 if (!node_found)
517 printk(KERN_ERR "WARNING: Estimating processor frequency "
518 "(not found)\n");
519
520 of_node_put(cpu);
521
522 printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n",
523 ppc_tb_freq/1000000, ppc_tb_freq%1000000);
524 printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n",
525 ppc_proc_freq/1000000, ppc_proc_freq%1000000);
526
527 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
528 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
529 tb_ticks_per_usec = ppc_tb_freq / 1000000;
530 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
531 div128_by_32(1024*1024, 0, tb_ticks_per_sec, &divres);
532 tb_to_xs = divres.result_low;
533
534 setup_default_decr();
535}
536#endif
537
475void __init time_init(void) 538void __init time_init(void)
476{ 539{
477 /* This function is only called on the boot processor */ 540 /* This function is only called on the boot processor */
diff --git a/arch/ppc64/kernel/traps.c b/arch/ppc64/kernel/traps.c
index 7e52cb2605e0..a8d5e83ee89f 100644
--- a/arch/ppc64/kernel/traps.c
+++ b/arch/ppc64/kernel/traps.c
@@ -126,6 +126,10 @@ int die(const char *str, struct pt_regs *regs, long err)
126 printk("POWERMAC "); 126 printk("POWERMAC ");
127 nl = 1; 127 nl = 1;
128 break; 128 break;
129 case PLATFORM_BPA:
130 printk("BPA ");
131 nl = 1;
132 break;
129 } 133 }
130 if (nl) 134 if (nl)
131 printk("\n"); 135 printk("\n");
diff --git a/arch/ppc64/mm/Makefile b/arch/ppc64/mm/Makefile
index ac522d57b2a7..3695d00d347f 100644
--- a/arch/ppc64/mm/Makefile
+++ b/arch/ppc64/mm/Makefile
@@ -6,6 +6,6 @@ EXTRA_CFLAGS += -mno-minimal-toc
6 6
7obj-y := fault.o init.o imalloc.o hash_utils.o hash_low.o tlb.o \ 7obj-y := fault.o init.o imalloc.o hash_utils.o hash_low.o tlb.o \
8 slb_low.o slb.o stab.o mmap.o 8 slb_low.o slb.o stab.o mmap.o
9obj-$(CONFIG_DISCONTIGMEM) += numa.o 9obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
10obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 10obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
11obj-$(CONFIG_PPC_MULTIPLATFORM) += hash_native.o 11obj-$(CONFIG_PPC_MULTIPLATFORM) += hash_native.o
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
index 6fa1e6490b57..b50b3a446dbe 100644
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -98,7 +98,7 @@ void show_mem(void)
98 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 98 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
99 for_each_pgdat(pgdat) { 99 for_each_pgdat(pgdat) {
100 for (i = 0; i < pgdat->node_spanned_pages; i++) { 100 for (i = 0; i < pgdat->node_spanned_pages; i++) {
101 page = pgdat->node_mem_map + i; 101 page = pgdat_page_nr(pgdat, i);
102 total++; 102 total++;
103 if (PageReserved(page)) 103 if (PageReserved(page))
104 reserved++; 104 reserved++;
@@ -531,7 +531,7 @@ EXPORT_SYMBOL(page_is_ram);
531 * Initialize the bootmem system and give it all the memory we 531 * Initialize the bootmem system and give it all the memory we
532 * have available. 532 * have available.
533 */ 533 */
534#ifndef CONFIG_DISCONTIGMEM 534#ifndef CONFIG_NEED_MULTIPLE_NODES
535void __init do_init_bootmem(void) 535void __init do_init_bootmem(void)
536{ 536{
537 unsigned long i; 537 unsigned long i;
@@ -553,12 +553,20 @@ void __init do_init_bootmem(void)
553 553
554 max_pfn = max_low_pfn; 554 max_pfn = max_low_pfn;
555 555
556 /* add all physical memory to the bootmem map. Also find the first */ 556 /* Add all physical memory to the bootmem map, mark each area
557 * present.
558 */
557 for (i=0; i < lmb.memory.cnt; i++) { 559 for (i=0; i < lmb.memory.cnt; i++) {
558 unsigned long physbase, size; 560 unsigned long physbase, size;
561 unsigned long start_pfn, end_pfn;
559 562
560 physbase = lmb.memory.region[i].physbase; 563 physbase = lmb.memory.region[i].physbase;
561 size = lmb.memory.region[i].size; 564 size = lmb.memory.region[i].size;
565
566 start_pfn = physbase >> PAGE_SHIFT;
567 end_pfn = start_pfn + (size >> PAGE_SHIFT);
568 memory_present(0, start_pfn, end_pfn);
569
562 free_bootmem(physbase, size); 570 free_bootmem(physbase, size);
563 } 571 }
564 572
@@ -597,7 +605,7 @@ void __init paging_init(void)
597 free_area_init_node(0, NODE_DATA(0), zones_size, 605 free_area_init_node(0, NODE_DATA(0), zones_size,
598 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size); 606 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
599} 607}
600#endif /* CONFIG_DISCONTIGMEM */ 608#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
601 609
602static struct kcore_list kcore_vmem; 610static struct kcore_list kcore_vmem;
603 611
@@ -628,7 +636,7 @@ module_init(setup_kcore);
628 636
629void __init mem_init(void) 637void __init mem_init(void)
630{ 638{
631#ifdef CONFIG_DISCONTIGMEM 639#ifdef CONFIG_NEED_MULTIPLE_NODES
632 int nid; 640 int nid;
633#endif 641#endif
634 pg_data_t *pgdat; 642 pg_data_t *pgdat;
@@ -639,7 +647,7 @@ void __init mem_init(void)
639 num_physpages = max_low_pfn; /* RAM is assumed contiguous */ 647 num_physpages = max_low_pfn; /* RAM is assumed contiguous */
640 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 648 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
641 649
642#ifdef CONFIG_DISCONTIGMEM 650#ifdef CONFIG_NEED_MULTIPLE_NODES
643 for_each_online_node(nid) { 651 for_each_online_node(nid) {
644 if (NODE_DATA(nid)->node_spanned_pages != 0) { 652 if (NODE_DATA(nid)->node_spanned_pages != 0) {
645 printk("freeing bootmem node %x\n", nid); 653 printk("freeing bootmem node %x\n", nid);
@@ -654,7 +662,7 @@ void __init mem_init(void)
654 662
655 for_each_pgdat(pgdat) { 663 for_each_pgdat(pgdat) {
656 for (i = 0; i < pgdat->node_spanned_pages; i++) { 664 for (i = 0; i < pgdat->node_spanned_pages; i++) {
657 page = pgdat->node_mem_map + i; 665 page = pgdat_page_nr(pgdat, i);
658 if (PageReserved(page)) 666 if (PageReserved(page))
659 reservedpages++; 667 reservedpages++;
660 } 668 }
diff --git a/arch/ppc64/mm/numa.c b/arch/ppc64/mm/numa.c
index ea862ec643d3..cafd91aef289 100644
--- a/arch/ppc64/mm/numa.c
+++ b/arch/ppc64/mm/numa.c
@@ -440,6 +440,8 @@ new_range:
440 for (i = start ; i < (start+size); i += MEMORY_INCREMENT) 440 for (i = start ; i < (start+size); i += MEMORY_INCREMENT)
441 numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = 441 numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] =
442 numa_domain; 442 numa_domain;
443 memory_present(numa_domain, start >> PAGE_SHIFT,
444 (start + size) >> PAGE_SHIFT);
443 445
444 if (--ranges) 446 if (--ranges)
445 goto new_range; 447 goto new_range;
@@ -481,6 +483,7 @@ static void __init setup_nonnuma(void)
481 483
482 for (i = 0 ; i < top_of_ram; i += MEMORY_INCREMENT) 484 for (i = 0 ; i < top_of_ram; i += MEMORY_INCREMENT)
483 numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = 0; 485 numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = 0;
486 memory_present(0, 0, init_node_data[0].node_end_pfn);
484} 487}
485 488
486static void __init dump_numa_topology(void) 489static void __init dump_numa_topology(void)
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index ab79af84699a..32696c1d9280 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -226,6 +226,8 @@ config WARN_STACK_SIZE
226 This allows you to specify the maximum frame size a function may 226 This allows you to specify the maximum frame size a function may
227 have without the compiler complaining about it. 227 have without the compiler complaining about it.
228 228
229source "mm/Kconfig"
230
229comment "I/O subsystem configuration" 231comment "I/O subsystem configuration"
230 232
231config MACHCHK_WARNING 233config MACHCHK_WARNING
diff --git a/arch/s390/boot/install.sh b/arch/s390/boot/install.sh
index 278a8139cb18..d4026f62cb06 100644
--- a/arch/s390/boot/install.sh
+++ b/arch/s390/boot/install.sh
@@ -21,8 +21,8 @@
21 21
22# User may have a custom install script 22# User may have a custom install script
23 23
24if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi 24if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi
25if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi 25if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then exec /sbin/${CROSS_COMPILE}installkernel "$@"; fi
26 26
27# Default install - same as make zlilo 27# Default install - same as make zlilo
28 28
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index bf33dcfec7db..3898f66d0b2f 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -45,7 +45,7 @@ typedef struct compat_siginfo {
45 45
46 /* POSIX.1b timers */ 46 /* POSIX.1b timers */
47 struct { 47 struct {
48 timer_t _tid; /* timer id */ 48 compat_timer_t _tid; /* timer id */
49 int _overrun; /* overrun count */ 49 int _overrun; /* overrun count */
50 compat_sigval_t _sigval; /* same as below */ 50 compat_sigval_t _sigval; /* same as below */
51 int _sys_private; /* not to be passed to user */ 51 int _sys_private; /* not to be passed to user */
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 3468d5127223..a7c8bfc11604 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -486,7 +486,7 @@ config CPU_SUBTYPE_ST40
486 depends on CPU_SUBTYPE_ST40STB1 || CPU_SUBTYPE_ST40GX1 486 depends on CPU_SUBTYPE_ST40STB1 || CPU_SUBTYPE_ST40GX1
487 default y 487 default y
488 488
489config DISCONTIGMEM 489config ARCH_DISCONTIGMEM_ENABLE
490 bool 490 bool
491 depends on SH_HP690 491 depends on SH_HP690
492 default y 492 default y
@@ -496,6 +496,8 @@ config DISCONTIGMEM
496 or have huge holes in the physical address space for other reasons. 496 or have huge holes in the physical address space for other reasons.
497 See <file:Documentation/vm/numa> for more. 497 See <file:Documentation/vm/numa> for more.
498 498
499source "mm/Kconfig"
500
499config ZERO_PAGE_OFFSET 501config ZERO_PAGE_OFFSET
500 hex "Zero page offset" 502 hex "Zero page offset"
501 default "0x00001000" if !(SH_MPC1211 || SH_SH03) 503 default "0x00001000" if !(SH_MPC1211 || SH_SH03)
diff --git a/arch/sh64/Kconfig b/arch/sh64/Kconfig
index 76eb81fba45e..708e59736a4d 100644
--- a/arch/sh64/Kconfig
+++ b/arch/sh64/Kconfig
@@ -217,6 +217,8 @@ config PREEMPT
217 bool "Preemptible Kernel (EXPERIMENTAL)" 217 bool "Preemptible Kernel (EXPERIMENTAL)"
218 depends on EXPERIMENTAL 218 depends on EXPERIMENTAL
219 219
220source "mm/Kconfig"
221
220endmenu 222endmenu
221 223
222menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)" 224menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)"
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 237f922520fd..262e13d086fe 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -264,7 +264,11 @@ config SUNOS_EMUL
264 want to run SunOS binaries on an Ultra you must also say Y to 264 want to run SunOS binaries on an Ultra you must also say Y to
265 "Kernel support for 32-bit a.out binaries" above. 265 "Kernel support for 32-bit a.out binaries" above.
266 266
267source "drivers/parport/Kconfig" 267source "mm/Kconfig"
268
269endmenu
270
271source "drivers/Kconfig"
268 272
269config PRINTER 273config PRINTER
270 tristate "Parallel printer support" 274 tristate "Parallel printer support"
@@ -291,6 +295,8 @@ config PRINTER
291 If you have more than 8 printers, you need to increase the LP_NO 295 If you have more than 8 printers, you need to increase the LP_NO
292 macro in lp.c and the PARPORT_MAX macro in parport.h. 296 macro in lp.c and the PARPORT_MAX macro in parport.h.
293 297
298source "mm/Kconfig"
299
294endmenu 300endmenu
295 301
296source "drivers/base/Kconfig" 302source "drivers/base/Kconfig"
@@ -372,18 +378,8 @@ config UNIX98_PTY_COUNT
372 378
373endmenu 379endmenu
374 380
375source "drivers/input/Kconfig"
376
377source "fs/Kconfig" 381source "fs/Kconfig"
378 382
379source "sound/Kconfig"
380
381source "drivers/usb/Kconfig"
382
383source "drivers/infiniband/Kconfig"
384
385source "drivers/char/watchdog/Kconfig"
386
387source "arch/sparc/Kconfig.debug" 383source "arch/sparc/Kconfig.debug"
388 384
389source "security/Kconfig" 385source "security/Kconfig"
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index a72fd15d5ea8..e2b050eb3b96 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -484,6 +484,8 @@ config CMDLINE
484 484
485 NOTE: This option WILL override the PROM bootargs setting! 485 NOTE: This option WILL override the PROM bootargs setting!
486 486
487source "mm/Kconfig"
488
487endmenu 489endmenu
488 490
489source "drivers/base/Kconfig" 491source "drivers/base/Kconfig"
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c
index 7066d7ba667a..bdac631cf011 100644
--- a/arch/sparc64/kernel/kprobes.c
+++ b/arch/sparc64/kernel/kprobes.c
@@ -6,7 +6,6 @@
6#include <linux/config.h> 6#include <linux/config.h>
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/kprobes.h> 8#include <linux/kprobes.h>
9
10#include <asm/kdebug.h> 9#include <asm/kdebug.h>
11#include <asm/signal.h> 10#include <asm/signal.h>
12 11
@@ -47,25 +46,59 @@ void arch_copy_kprobe(struct kprobe *p)
47{ 46{
48 p->ainsn.insn[0] = *p->addr; 47 p->ainsn.insn[0] = *p->addr;
49 p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2; 48 p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2;
49 p->opcode = *p->addr;
50} 50}
51 51
52void arch_remove_kprobe(struct kprobe *p) 52void arch_arm_kprobe(struct kprobe *p)
53{ 53{
54 *p->addr = BREAKPOINT_INSTRUCTION;
55 flushi(p->addr);
54} 56}
55 57
56/* kprobe_status settings */ 58void arch_disarm_kprobe(struct kprobe *p)
57#define KPROBE_HIT_ACTIVE 0x00000001 59{
58#define KPROBE_HIT_SS 0x00000002 60 *p->addr = p->opcode;
61 flushi(p->addr);
62}
63
64void arch_remove_kprobe(struct kprobe *p)
65{
66}
59 67
60static struct kprobe *current_kprobe; 68static struct kprobe *current_kprobe;
61static unsigned long current_kprobe_orig_tnpc; 69static unsigned long current_kprobe_orig_tnpc;
62static unsigned long current_kprobe_orig_tstate_pil; 70static unsigned long current_kprobe_orig_tstate_pil;
63static unsigned int kprobe_status; 71static unsigned int kprobe_status;
72static struct kprobe *kprobe_prev;
73static unsigned long kprobe_orig_tnpc_prev;
74static unsigned long kprobe_orig_tstate_pil_prev;
75static unsigned int kprobe_status_prev;
64 76
65static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 77static inline void save_previous_kprobe(void)
78{
79 kprobe_status_prev = kprobe_status;
80 kprobe_orig_tnpc_prev = current_kprobe_orig_tnpc;
81 kprobe_orig_tstate_pil_prev = current_kprobe_orig_tstate_pil;
82 kprobe_prev = current_kprobe;
83}
84
85static inline void restore_previous_kprobe(void)
86{
87 kprobe_status = kprobe_status_prev;
88 current_kprobe_orig_tnpc = kprobe_orig_tnpc_prev;
89 current_kprobe_orig_tstate_pil = kprobe_orig_tstate_pil_prev;
90 current_kprobe = kprobe_prev;
91}
92
93static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs)
66{ 94{
67 current_kprobe_orig_tnpc = regs->tnpc; 95 current_kprobe_orig_tnpc = regs->tnpc;
68 current_kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL); 96 current_kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL);
97 current_kprobe = p;
98}
99
100static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
101{
69 regs->tstate |= TSTATE_PIL; 102 regs->tstate |= TSTATE_PIL;
70 103
71 /*single step inline, if it a breakpoint instruction*/ 104 /*single step inline, if it a breakpoint instruction*/
@@ -78,17 +111,6 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
78 } 111 }
79} 112}
80 113
81static inline void disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
82{
83 *p->addr = p->opcode;
84 flushi(p->addr);
85
86 regs->tpc = (unsigned long) p->addr;
87 regs->tnpc = current_kprobe_orig_tnpc;
88 regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
89 current_kprobe_orig_tstate_pil);
90}
91
92static int kprobe_handler(struct pt_regs *regs) 114static int kprobe_handler(struct pt_regs *regs)
93{ 115{
94 struct kprobe *p; 116 struct kprobe *p;
@@ -109,8 +131,18 @@ static int kprobe_handler(struct pt_regs *regs)
109 unlock_kprobes(); 131 unlock_kprobes();
110 goto no_kprobe; 132 goto no_kprobe;
111 } 133 }
112 disarm_kprobe(p, regs); 134 /* We have reentered the kprobe_handler(), since
113 ret = 1; 135 * another probe was hit while within the handler.
136 * We here save the original kprobes variables and
137 * just single step on the instruction of the new probe
138 * without calling any user handlers.
139 */
140 save_previous_kprobe();
141 set_current_kprobe(p, regs);
142 p->nmissed++;
143 kprobe_status = KPROBE_REENTER;
144 prepare_singlestep(p, regs);
145 return 1;
114 } else { 146 } else {
115 p = current_kprobe; 147 p = current_kprobe;
116 if (p->break_handler && p->break_handler(p, regs)) 148 if (p->break_handler && p->break_handler(p, regs))
@@ -138,8 +170,8 @@ static int kprobe_handler(struct pt_regs *regs)
138 goto no_kprobe; 170 goto no_kprobe;
139 } 171 }
140 172
173 set_current_kprobe(p, regs);
141 kprobe_status = KPROBE_HIT_ACTIVE; 174 kprobe_status = KPROBE_HIT_ACTIVE;
142 current_kprobe = p;
143 if (p->pre_handler && p->pre_handler(p, regs)) 175 if (p->pre_handler && p->pre_handler(p, regs))
144 return 1; 176 return 1;
145 177
@@ -245,12 +277,20 @@ static inline int post_kprobe_handler(struct pt_regs *regs)
245 if (!kprobe_running()) 277 if (!kprobe_running())
246 return 0; 278 return 0;
247 279
248 if (current_kprobe->post_handler) 280 if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) {
281 kprobe_status = KPROBE_HIT_SSDONE;
249 current_kprobe->post_handler(current_kprobe, regs, 0); 282 current_kprobe->post_handler(current_kprobe, regs, 0);
283 }
250 284
251 resume_execution(current_kprobe, regs); 285 resume_execution(current_kprobe, regs);
252 286
287 /*Restore back the original saved kprobes variables and continue. */
288 if (kprobe_status == KPROBE_REENTER) {
289 restore_previous_kprobe();
290 goto out;
291 }
253 unlock_kprobes(); 292 unlock_kprobes();
293out:
254 preempt_enable_no_resched(); 294 preempt_enable_no_resched();
255 295
256 return 1; 296 return 1;
@@ -392,3 +432,4 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
392 } 432 }
393 return 0; 433 return 0;
394} 434}
435
diff --git a/arch/sparc64/kernel/signal32.c b/arch/sparc64/kernel/signal32.c
index 9a375e975cff..f28428f4170e 100644
--- a/arch/sparc64/kernel/signal32.c
+++ b/arch/sparc64/kernel/signal32.c
@@ -102,7 +102,7 @@ typedef struct compat_siginfo{
102 102
103 /* POSIX.1b timers */ 103 /* POSIX.1b timers */
104 struct { 104 struct {
105 timer_t _tid; /* timer id */ 105 compat_timer_t _tid; /* timer id */
106 int _overrun; /* overrun count */ 106 int _overrun; /* overrun count */
107 compat_sigval_t _sigval; /* same as below */ 107 compat_sigval_t _sigval; /* same as below */
108 int _sys_private; /* not to be passed to user */ 108 int _sys_private; /* not to be passed to user */
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index b8e952c88fd1..9469e77303e6 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -74,6 +74,7 @@ config MODE_SKAS
74 option will shrink the UML binary slightly. 74 option will shrink the UML binary slightly.
75 75
76source "arch/um/Kconfig_arch" 76source "arch/um/Kconfig_arch"
77source "mm/Kconfig"
77 78
78config LD_SCRIPT_STATIC 79config LD_SCRIPT_STATIC
79 bool 80 bool
diff --git a/arch/um/kernel/process_kern.c b/arch/um/kernel/process_kern.c
index 804c6bbdf67c..157584ae4792 100644
--- a/arch/um/kernel/process_kern.c
+++ b/arch/um/kernel/process_kern.c
@@ -8,6 +8,7 @@
8#include "linux/kernel.h" 8#include "linux/kernel.h"
9#include "linux/sched.h" 9#include "linux/sched.h"
10#include "linux/interrupt.h" 10#include "linux/interrupt.h"
11#include "linux/string.h"
11#include "linux/mm.h" 12#include "linux/mm.h"
12#include "linux/slab.h" 13#include "linux/slab.h"
13#include "linux/utsname.h" 14#include "linux/utsname.h"
@@ -322,12 +323,7 @@ void do_uml_exitcalls(void)
322 323
323char *uml_strdup(char *string) 324char *uml_strdup(char *string)
324{ 325{
325 char *new; 326 return kstrdup(string, GFP_KERNEL);
326
327 new = kmalloc(strlen(string) + 1, GFP_KERNEL);
328 if(new == NULL) return(NULL);
329 strcpy(new, string);
330 return(new);
331} 327}
332 328
333int copy_to_user_proc(void __user *to, void *from, int size) 329int copy_to_user_proc(void __user *to, void *from, int size)
diff --git a/arch/v850/Kconfig b/arch/v850/Kconfig
index 90cd4baa75ee..27febd6ffa80 100644
--- a/arch/v850/Kconfig
+++ b/arch/v850/Kconfig
@@ -218,6 +218,8 @@ menu "Processor type and features"
218 a lot of RAM, and you need to able to allocate very large 218 a lot of RAM, and you need to able to allocate very large
219 contiguous chunks. If unsure, say N. 219 contiguous chunks. If unsure, say N.
220 220
221source "mm/Kconfig"
222
221endmenu 223endmenu
222 224
223 225
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 289f448ac89c..db259757dc8a 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -265,7 +265,7 @@ config NUMA_EMU
265 into virtual nodes when booted with "numa=fake=N", where N is the 265 into virtual nodes when booted with "numa=fake=N", where N is the
266 number of nodes. This is only useful for debugging. 266 number of nodes. This is only useful for debugging.
267 267
268config DISCONTIGMEM 268config ARCH_DISCONTIGMEM_ENABLE
269 bool 269 bool
270 depends on NUMA 270 depends on NUMA
271 default y 271 default y
@@ -274,6 +274,27 @@ config NUMA
274 bool 274 bool
275 default n 275 default n
276 276
277config ARCH_DISCONTIGMEM_ENABLE
278 def_bool y
279 depends on NUMA
280
281config ARCH_DISCONTIGMEM_DEFAULT
282 def_bool y
283 depends on NUMA
284
285config ARCH_SPARSEMEM_ENABLE
286 def_bool y
287 depends on NUMA
288
289config ARCH_FLATMEM_ENABLE
290 def_bool y
291 depends on !NUMA
292
293source "mm/Kconfig"
294
295config HAVE_ARCH_EARLY_PFN_TO_NID
296 def_bool y
297
277config HAVE_DEC_LOCK 298config HAVE_DEC_LOCK
278 bool 299 bool
279 depends on SMP 300 depends on SMP
@@ -381,6 +402,8 @@ config SECCOMP
381 402
382 If unsure, say Y. Only embedded should say N here. 403 If unsure, say Y. Only embedded should say N here.
383 404
405source kernel/Kconfig.hz
406
384endmenu 407endmenu
385 408
386# 409#
diff --git a/arch/x86_64/boot/install.sh b/arch/x86_64/boot/install.sh
index 90f2452b3b9e..f17b40dfc0f4 100644
--- a/arch/x86_64/boot/install.sh
+++ b/arch/x86_64/boot/install.sh
@@ -21,8 +21,8 @@
21 21
22# User may have a custom install script 22# User may have a custom install script
23 23
24if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi 24if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi
25if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi 25if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then exec /sbin/${CROSS_COMPILE}installkernel "$@"; fi
26 26
27# Default install - same as make zlilo 27# Default install - same as make zlilo
28 28
diff --git a/arch/x86_64/ia32/ia32_signal.c b/arch/x86_64/ia32/ia32_signal.c
index fbd09b5126ce..66e2821533db 100644
--- a/arch/x86_64/ia32/ia32_signal.c
+++ b/arch/x86_64/ia32/ia32_signal.c
@@ -428,8 +428,8 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
428 return (void __user *)((rsp - frame_size) & -8UL); 428 return (void __user *)((rsp - frame_size) & -8UL);
429} 429}
430 430
431void ia32_setup_frame(int sig, struct k_sigaction *ka, 431int ia32_setup_frame(int sig, struct k_sigaction *ka,
432 compat_sigset_t *set, struct pt_regs * regs) 432 compat_sigset_t *set, struct pt_regs * regs)
433{ 433{
434 struct sigframe __user *frame; 434 struct sigframe __user *frame;
435 int err = 0; 435 int err = 0;
@@ -514,14 +514,15 @@ void ia32_setup_frame(int sig, struct k_sigaction *ka,
514 current->comm, current->pid, frame, regs->rip, frame->pretcode); 514 current->comm, current->pid, frame, regs->rip, frame->pretcode);
515#endif 515#endif
516 516
517 return; 517 return 1;
518 518
519give_sigsegv: 519give_sigsegv:
520 force_sigsegv(sig, current); 520 force_sigsegv(sig, current);
521 return 0;
521} 522}
522 523
523void ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 524int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
524 compat_sigset_t *set, struct pt_regs * regs) 525 compat_sigset_t *set, struct pt_regs * regs)
525{ 526{
526 struct rt_sigframe __user *frame; 527 struct rt_sigframe __user *frame;
527 int err = 0; 528 int err = 0;
@@ -613,9 +614,9 @@ void ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
613 current->comm, current->pid, frame, regs->rip, frame->pretcode); 614 current->comm, current->pid, frame, regs->rip, frame->pretcode);
614#endif 615#endif
615 616
616 return; 617 return 1;
617 618
618give_sigsegv: 619give_sigsegv:
619 force_sigsegv(sig, current); 620 force_sigsegv(sig, current);
621 return 0;
620} 622}
621
diff --git a/arch/x86_64/kernel/aperture.c b/arch/x86_64/kernel/aperture.c
index 504e63474993..c9a6b812e926 100644
--- a/arch/x86_64/kernel/aperture.c
+++ b/arch/x86_64/kernel/aperture.c
@@ -40,11 +40,7 @@ int fix_aperture __initdata = 1;
40 40
41static u32 __init allocate_aperture(void) 41static u32 __init allocate_aperture(void)
42{ 42{
43#ifdef CONFIG_DISCONTIGMEM
44 pg_data_t *nd0 = NODE_DATA(0); 43 pg_data_t *nd0 = NODE_DATA(0);
45#else
46 pg_data_t *nd0 = &contig_page_data;
47#endif
48 u32 aper_size; 44 u32 aper_size;
49 void *p; 45 void *p;
50 46
diff --git a/arch/x86_64/kernel/early_printk.c b/arch/x86_64/kernel/early_printk.c
index e3a19e8ebbf8..9631c747c5e3 100644
--- a/arch/x86_64/kernel/early_printk.c
+++ b/arch/x86_64/kernel/early_printk.c
@@ -2,20 +2,24 @@
2#include <linux/kernel.h> 2#include <linux/kernel.h>
3#include <linux/init.h> 3#include <linux/init.h>
4#include <linux/string.h> 4#include <linux/string.h>
5#include <linux/tty.h>
5#include <asm/io.h> 6#include <asm/io.h>
6#include <asm/processor.h> 7#include <asm/processor.h>
7 8
8/* Simple VGA output */ 9/* Simple VGA output */
9 10
10#ifdef __i386__ 11#ifdef __i386__
12#include <asm/setup.h>
11#define VGABASE (__ISA_IO_base + 0xb8000) 13#define VGABASE (__ISA_IO_base + 0xb8000)
12#else 14#else
15#include <asm/bootsetup.h>
13#define VGABASE ((void __iomem *)0xffffffff800b8000UL) 16#define VGABASE ((void __iomem *)0xffffffff800b8000UL)
14#endif 17#endif
15 18
16#define MAX_YPOS 25 19#define MAX_YPOS max_ypos
17#define MAX_XPOS 80 20#define MAX_XPOS max_xpos
18 21
22static int max_ypos = 25, max_xpos = 80;
19static int current_ypos = 1, current_xpos = 0; 23static int current_ypos = 1, current_xpos = 0;
20 24
21static void early_vga_write(struct console *con, const char *str, unsigned n) 25static void early_vga_write(struct console *con, const char *str, unsigned n)
@@ -196,7 +200,10 @@ int __init setup_early_printk(char *opt)
196 } else if (!strncmp(buf, "ttyS", 4)) { 200 } else if (!strncmp(buf, "ttyS", 4)) {
197 early_serial_init(buf); 201 early_serial_init(buf);
198 early_console = &early_serial_console; 202 early_console = &early_serial_console;
199 } else if (!strncmp(buf, "vga", 3)) { 203 } else if (!strncmp(buf, "vga", 3)
204 && SCREEN_INFO.orig_video_isVGA == 1) {
205 max_xpos = SCREEN_INFO.orig_video_cols;
206 max_ypos = SCREEN_INFO.orig_video_lines;
200 early_console = &early_vga_console; 207 early_console = &early_vga_console;
201 } 208 }
202 early_console_initialized = 1; 209 early_console_initialized = 1;
diff --git a/arch/x86_64/kernel/head64.c b/arch/x86_64/kernel/head64.c
index 0f8c78dcd38c..cf6ab147a2a5 100644
--- a/arch/x86_64/kernel/head64.c
+++ b/arch/x86_64/kernel/head64.c
@@ -94,7 +94,7 @@ void __init x86_64_start_kernel(char * real_mode_data)
94 s = strstr(saved_command_line, "earlyprintk="); 94 s = strstr(saved_command_line, "earlyprintk=");
95 if (s != NULL) 95 if (s != NULL)
96 setup_early_printk(s); 96 setup_early_printk(s);
97#ifdef CONFIG_DISCONTIGMEM 97#ifdef CONFIG_NUMA
98 s = strstr(saved_command_line, "numa="); 98 s = strstr(saved_command_line, "numa=");
99 if (s != NULL) 99 if (s != NULL)
100 numa_setup(s+5); 100 numa_setup(s+5);
diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c
index 7873d9ba8814..19eafa0aa95c 100644
--- a/arch/x86_64/kernel/i8259.c
+++ b/arch/x86_64/kernel/i8259.c
@@ -157,14 +157,13 @@ static unsigned int startup_8259A_irq(unsigned int irq)
157} 157}
158 158
159static struct hw_interrupt_type i8259A_irq_type = { 159static struct hw_interrupt_type i8259A_irq_type = {
160 "XT-PIC", 160 .typename = "XT-PIC",
161 startup_8259A_irq, 161 .startup = startup_8259A_irq,
162 shutdown_8259A_irq, 162 .shutdown = shutdown_8259A_irq,
163 enable_8259A_irq, 163 .enable = enable_8259A_irq,
164 disable_8259A_irq, 164 .disable = disable_8259A_irq,
165 mask_and_ack_8259A, 165 .ack = mask_and_ack_8259A,
166 end_8259A_irq, 166 .end = end_8259A_irq,
167 NULL
168}; 167};
169 168
170/* 169/*
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index f77f8a0ff187..4e680f87a75f 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -27,6 +27,8 @@
27 * <prasanna@in.ibm.com> adapted for x86_64 27 * <prasanna@in.ibm.com> adapted for x86_64
28 * 2005-Mar Roland McGrath <roland@redhat.com> 28 * 2005-Mar Roland McGrath <roland@redhat.com>
29 * Fixed to handle %rip-relative addressing mode correctly. 29 * Fixed to handle %rip-relative addressing mode correctly.
30 * 2005-May Rusty Lynch <rusty.lynch@intel.com>
31 * Added function return probes functionality
30 */ 32 */
31 33
32#include <linux/config.h> 34#include <linux/config.h>
@@ -37,18 +39,16 @@
37#include <linux/slab.h> 39#include <linux/slab.h>
38#include <linux/preempt.h> 40#include <linux/preempt.h>
39#include <linux/moduleloader.h> 41#include <linux/moduleloader.h>
40 42#include <asm/cacheflush.h>
41#include <asm/pgtable.h> 43#include <asm/pgtable.h>
42#include <asm/kdebug.h> 44#include <asm/kdebug.h>
43 45
44static DECLARE_MUTEX(kprobe_mutex); 46static DECLARE_MUTEX(kprobe_mutex);
45 47
46/* kprobe_status settings */
47#define KPROBE_HIT_ACTIVE 0x00000001
48#define KPROBE_HIT_SS 0x00000002
49
50static struct kprobe *current_kprobe; 48static struct kprobe *current_kprobe;
51static unsigned long kprobe_status, kprobe_old_rflags, kprobe_saved_rflags; 49static unsigned long kprobe_status, kprobe_old_rflags, kprobe_saved_rflags;
50static struct kprobe *kprobe_prev;
51static unsigned long kprobe_status_prev, kprobe_old_rflags_prev, kprobe_saved_rflags_prev;
52static struct pt_regs jprobe_saved_regs; 52static struct pt_regs jprobe_saved_regs;
53static long *jprobe_saved_rsp; 53static long *jprobe_saved_rsp;
54static kprobe_opcode_t *get_insn_slot(void); 54static kprobe_opcode_t *get_insn_slot(void);
@@ -214,6 +214,21 @@ void arch_copy_kprobe(struct kprobe *p)
214 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */ 214 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
215 *ripdisp = disp; 215 *ripdisp = disp;
216 } 216 }
217 p->opcode = *p->addr;
218}
219
220void arch_arm_kprobe(struct kprobe *p)
221{
222 *p->addr = BREAKPOINT_INSTRUCTION;
223 flush_icache_range((unsigned long) p->addr,
224 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
225}
226
227void arch_disarm_kprobe(struct kprobe *p)
228{
229 *p->addr = p->opcode;
230 flush_icache_range((unsigned long) p->addr,
231 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
217} 232}
218 233
219void arch_remove_kprobe(struct kprobe *p) 234void arch_remove_kprobe(struct kprobe *p)
@@ -223,10 +238,29 @@ void arch_remove_kprobe(struct kprobe *p)
223 down(&kprobe_mutex); 238 down(&kprobe_mutex);
224} 239}
225 240
226static inline void disarm_kprobe(struct kprobe *p, struct pt_regs *regs) 241static inline void save_previous_kprobe(void)
227{ 242{
228 *p->addr = p->opcode; 243 kprobe_prev = current_kprobe;
229 regs->rip = (unsigned long)p->addr; 244 kprobe_status_prev = kprobe_status;
245 kprobe_old_rflags_prev = kprobe_old_rflags;
246 kprobe_saved_rflags_prev = kprobe_saved_rflags;
247}
248
249static inline void restore_previous_kprobe(void)
250{
251 current_kprobe = kprobe_prev;
252 kprobe_status = kprobe_status_prev;
253 kprobe_old_rflags = kprobe_old_rflags_prev;
254 kprobe_saved_rflags = kprobe_saved_rflags_prev;
255}
256
257static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs)
258{
259 current_kprobe = p;
260 kprobe_saved_rflags = kprobe_old_rflags
261 = (regs->eflags & (TF_MASK | IF_MASK));
262 if (is_IF_modifier(p->ainsn.insn))
263 kprobe_saved_rflags &= ~IF_MASK;
230} 264}
231 265
232static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 266static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
@@ -240,6 +274,50 @@ static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
240 regs->rip = (unsigned long)p->ainsn.insn; 274 regs->rip = (unsigned long)p->ainsn.insn;
241} 275}
242 276
277struct task_struct *arch_get_kprobe_task(void *ptr)
278{
279 return ((struct thread_info *) (((unsigned long) ptr) &
280 (~(THREAD_SIZE -1))))->task;
281}
282
283void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
284{
285 unsigned long *sara = (unsigned long *)regs->rsp;
286 struct kretprobe_instance *ri;
287 static void *orig_ret_addr;
288
289 /*
290 * Save the return address when the return probe hits
291 * the first time, and use it to populate the (krprobe
292 * instance)->ret_addr for subsequent return probes at
293 * the same addrress since stack address would have
294 * the kretprobe_trampoline by then.
295 */
296 if (((void*) *sara) != kretprobe_trampoline)
297 orig_ret_addr = (void*) *sara;
298
299 if ((ri = get_free_rp_inst(rp)) != NULL) {
300 ri->rp = rp;
301 ri->stack_addr = sara;
302 ri->ret_addr = orig_ret_addr;
303 add_rp_inst(ri);
304 /* Replace the return addr with trampoline addr */
305 *sara = (unsigned long) &kretprobe_trampoline;
306 } else {
307 rp->nmissed++;
308 }
309}
310
311void arch_kprobe_flush_task(struct task_struct *tk)
312{
313 struct kretprobe_instance *ri;
314 while ((ri = get_rp_inst_tsk(tk)) != NULL) {
315 *((unsigned long *)(ri->stack_addr)) =
316 (unsigned long) ri->ret_addr;
317 recycle_rp_inst(ri);
318 }
319}
320
243/* 321/*
244 * Interrupts are disabled on entry as trap3 is an interrupt gate and they 322 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
245 * remain disabled thorough out this function. 323 * remain disabled thorough out this function.
@@ -264,9 +342,30 @@ int kprobe_handler(struct pt_regs *regs)
264 regs->eflags |= kprobe_saved_rflags; 342 regs->eflags |= kprobe_saved_rflags;
265 unlock_kprobes(); 343 unlock_kprobes();
266 goto no_kprobe; 344 goto no_kprobe;
345 } else if (kprobe_status == KPROBE_HIT_SSDONE) {
346 /* TODO: Provide re-entrancy from
347 * post_kprobes_handler() and avoid exception
348 * stack corruption while single-stepping on
349 * the instruction of the new probe.
350 */
351 arch_disarm_kprobe(p);
352 regs->rip = (unsigned long)p->addr;
353 ret = 1;
354 } else {
355 /* We have reentered the kprobe_handler(), since
356 * another probe was hit while within the
357 * handler. We here save the original kprobe
358 * variables and just single step on instruction
359 * of the new probe without calling any user
360 * handlers.
361 */
362 save_previous_kprobe();
363 set_current_kprobe(p, regs);
364 p->nmissed++;
365 prepare_singlestep(p, regs);
366 kprobe_status = KPROBE_REENTER;
367 return 1;
267 } 368 }
268 disarm_kprobe(p, regs);
269 ret = 1;
270 } else { 369 } else {
271 p = current_kprobe; 370 p = current_kprobe;
272 if (p->break_handler && p->break_handler(p, regs)) { 371 if (p->break_handler && p->break_handler(p, regs)) {
@@ -296,11 +395,7 @@ int kprobe_handler(struct pt_regs *regs)
296 } 395 }
297 396
298 kprobe_status = KPROBE_HIT_ACTIVE; 397 kprobe_status = KPROBE_HIT_ACTIVE;
299 current_kprobe = p; 398 set_current_kprobe(p, regs);
300 kprobe_saved_rflags = kprobe_old_rflags
301 = (regs->eflags & (TF_MASK | IF_MASK));
302 if (is_IF_modifier(p->ainsn.insn))
303 kprobe_saved_rflags &= ~IF_MASK;
304 399
305 if (p->pre_handler && p->pre_handler(p, regs)) 400 if (p->pre_handler && p->pre_handler(p, regs))
306 /* handler has already set things up, so skip ss setup */ 401 /* handler has already set things up, so skip ss setup */
@@ -317,6 +412,55 @@ no_kprobe:
317} 412}
318 413
319/* 414/*
415 * For function-return probes, init_kprobes() establishes a probepoint
416 * here. When a retprobed function returns, this probe is hit and
417 * trampoline_probe_handler() runs, calling the kretprobe's handler.
418 */
419 void kretprobe_trampoline_holder(void)
420 {
421 asm volatile ( ".global kretprobe_trampoline\n"
422 "kretprobe_trampoline: \n"
423 "nop\n");
424 }
425
426/*
427 * Called when we hit the probe point at kretprobe_trampoline
428 */
429int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
430{
431 struct task_struct *tsk;
432 struct kretprobe_instance *ri;
433 struct hlist_head *head;
434 struct hlist_node *node;
435 unsigned long *sara = (unsigned long *)regs->rsp - 1;
436
437 tsk = arch_get_kprobe_task(sara);
438 head = kretprobe_inst_table_head(tsk);
439
440 hlist_for_each_entry(ri, node, head, hlist) {
441 if (ri->stack_addr == sara && ri->rp) {
442 if (ri->rp->handler)
443 ri->rp->handler(ri, regs);
444 }
445 }
446 return 0;
447}
448
449void trampoline_post_handler(struct kprobe *p, struct pt_regs *regs,
450 unsigned long flags)
451{
452 struct kretprobe_instance *ri;
453 /* RA already popped */
454 unsigned long *sara = ((unsigned long *)regs->rsp) - 1;
455
456 while ((ri = get_rp_inst(sara))) {
457 regs->rip = (unsigned long)ri->ret_addr;
458 recycle_rp_inst(ri);
459 }
460 regs->eflags &= ~TF_MASK;
461}
462
463/*
320 * Called after single-stepping. p->addr is the address of the 464 * Called after single-stepping. p->addr is the address of the
321 * instruction whose first byte has been replaced by the "int 3" 465 * instruction whose first byte has been replaced by the "int 3"
322 * instruction. To avoid the SMP problems that can occur when we 466 * instruction. To avoid the SMP problems that can occur when we
@@ -401,13 +545,23 @@ int post_kprobe_handler(struct pt_regs *regs)
401 if (!kprobe_running()) 545 if (!kprobe_running())
402 return 0; 546 return 0;
403 547
404 if (current_kprobe->post_handler) 548 if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) {
549 kprobe_status = KPROBE_HIT_SSDONE;
405 current_kprobe->post_handler(current_kprobe, regs, 0); 550 current_kprobe->post_handler(current_kprobe, regs, 0);
551 }
406 552
407 resume_execution(current_kprobe, regs); 553 if (current_kprobe->post_handler != trampoline_post_handler)
554 resume_execution(current_kprobe, regs);
408 regs->eflags |= kprobe_saved_rflags; 555 regs->eflags |= kprobe_saved_rflags;
409 556
410 unlock_kprobes(); 557 /* Restore the original saved kprobes variables and continue. */
558 if (kprobe_status == KPROBE_REENTER) {
559 restore_previous_kprobe();
560 goto out;
561 } else {
562 unlock_kprobes();
563 }
564out:
411 preempt_enable_no_resched(); 565 preempt_enable_no_resched();
412 566
413 /* 567 /*
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index 61a63be6b294..9c5aa2a790c7 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -23,6 +23,7 @@
23#include <linux/kernel_stat.h> 23#include <linux/kernel_stat.h>
24#include <linux/mc146818rtc.h> 24#include <linux/mc146818rtc.h>
25#include <linux/acpi.h> 25#include <linux/acpi.h>
26#include <linux/module.h>
26 27
27#include <asm/smp.h> 28#include <asm/smp.h>
28#include <asm/mtrr.h> 29#include <asm/mtrr.h>
@@ -45,7 +46,8 @@ int acpi_found_madt;
45int apic_version [MAX_APICS]; 46int apic_version [MAX_APICS];
46unsigned char mp_bus_id_to_type [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 }; 47unsigned char mp_bus_id_to_type [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
47int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 }; 48int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
48cpumask_t pci_bus_to_cpumask [256] = { [0 ... 255] = CPU_MASK_ALL }; 49unsigned char pci_bus_to_node [256];
50EXPORT_SYMBOL(pci_bus_to_node);
49 51
50static int mp_current_pci_id = 0; 52static int mp_current_pci_id = 0;
51/* I/O APIC entries */ 53/* I/O APIC entries */
@@ -904,11 +906,20 @@ void __init mp_config_acpi_legacy_irqs (void)
904 return; 906 return;
905} 907}
906 908
909#define MAX_GSI_NUM 4096
910
907int mp_register_gsi(u32 gsi, int edge_level, int active_high_low) 911int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
908{ 912{
909 int ioapic = -1; 913 int ioapic = -1;
910 int ioapic_pin = 0; 914 int ioapic_pin = 0;
911 int idx, bit = 0; 915 int idx, bit = 0;
916 static int pci_irq = 16;
917 /*
918 * Mapping between Global System Interrupts, which
919 * represent all possible interrupts, to the IRQs
920 * assigned to actual devices.
921 */
922 static int gsi_to_irq[MAX_GSI_NUM];
912 923
913 if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC) 924 if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
914 return gsi; 925 return gsi;
@@ -943,11 +954,21 @@ int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
943 if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) { 954 if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
944 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n", 955 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
945 mp_ioapic_routing[ioapic].apic_id, ioapic_pin); 956 mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
946 return gsi; 957 return gsi_to_irq[gsi];
947 } 958 }
948 959
949 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit); 960 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
950 961
962 if (edge_level) {
963 /*
964 * For PCI devices assign IRQs in order, avoiding gaps
965 * due to unused I/O APIC pins.
966 */
967 int irq = gsi;
968 gsi = pci_irq++;
969 gsi_to_irq[irq] = gsi;
970 }
971
951 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, 972 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
952 edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1, 973 edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
953 active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1); 974 active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index dce8bab4306c..e59d1f9d6163 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -34,6 +34,7 @@
34#include <linux/ptrace.h> 34#include <linux/ptrace.h>
35#include <linux/utsname.h> 35#include <linux/utsname.h>
36#include <linux/random.h> 36#include <linux/random.h>
37#include <linux/kprobes.h>
37 38
38#include <asm/uaccess.h> 39#include <asm/uaccess.h>
39#include <asm/pgtable.h> 40#include <asm/pgtable.h>
@@ -293,6 +294,14 @@ void exit_thread(void)
293{ 294{
294 struct task_struct *me = current; 295 struct task_struct *me = current;
295 struct thread_struct *t = &me->thread; 296 struct thread_struct *t = &me->thread;
297
298 /*
299 * Remove function-return probe instances associated with this task
300 * and put them back on the free list. Do not insert an exit probe for
301 * this function, it will be disabled by kprobe_flush_task if you do.
302 */
303 kprobe_flush_task(me);
304
296 if (me->thread.io_bitmap_ptr) { 305 if (me->thread.io_bitmap_ptr) {
297 struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); 306 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
298 307
@@ -312,6 +321,13 @@ void flush_thread(void)
312 struct task_struct *tsk = current; 321 struct task_struct *tsk = current;
313 struct thread_info *t = current_thread_info(); 322 struct thread_info *t = current_thread_info();
314 323
324 /*
325 * Remove function-return probe instances associated with this task
326 * and put them back on the free list. Do not insert an exit probe for
327 * this function, it will be disabled by kprobe_flush_task if you do.
328 */
329 kprobe_flush_task(tsk);
330
315 if (t->flags & _TIF_ABI_PENDING) 331 if (t->flags & _TIF_ABI_PENDING)
316 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32); 332 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
317 333
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 99f038ede23c..000015dd5a8b 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -40,6 +40,8 @@
40#include <linux/acpi.h> 40#include <linux/acpi.h>
41#include <linux/kallsyms.h> 41#include <linux/kallsyms.h>
42#include <linux/edd.h> 42#include <linux/edd.h>
43#include <linux/mmzone.h>
44
43#include <asm/mtrr.h> 45#include <asm/mtrr.h>
44#include <asm/uaccess.h> 46#include <asm/uaccess.h>
45#include <asm/system.h> 47#include <asm/system.h>
@@ -348,7 +350,7 @@ static __init void parse_cmdline_early (char ** cmdline_p)
348 if (!memcmp(from, "mem=", 4)) 350 if (!memcmp(from, "mem=", 4))
349 parse_memopt(from+4, &from); 351 parse_memopt(from+4, &from);
350 352
351#ifdef CONFIG_DISCONTIGMEM 353#ifdef CONFIG_NUMA
352 if (!memcmp(from, "numa=", 5)) 354 if (!memcmp(from, "numa=", 5))
353 numa_setup(from+5); 355 numa_setup(from+5);
354#endif 356#endif
@@ -377,17 +379,20 @@ static __init void parse_cmdline_early (char ** cmdline_p)
377 *cmdline_p = command_line; 379 *cmdline_p = command_line;
378} 380}
379 381
380#ifndef CONFIG_DISCONTIGMEM 382#ifndef CONFIG_NUMA
381static void __init contig_initmem_init(void) 383static void __init
384contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
382{ 385{
383 unsigned long bootmap_size, bootmap; 386 unsigned long bootmap_size, bootmap;
384 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT; 387
385 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size); 388 memory_present(0, start_pfn, end_pfn);
386 if (bootmap == -1L) 389 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
387 panic("Cannot find bootmem map of size %ld\n",bootmap_size); 390 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
388 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn); 391 if (bootmap == -1L)
389 e820_bootmem_free(&contig_page_data, 0, end_pfn << PAGE_SHIFT); 392 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
390 reserve_bootmem(bootmap, bootmap_size); 393 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
394 e820_bootmem_free(NODE_DATA(0), 0, end_pfn << PAGE_SHIFT);
395 reserve_bootmem(bootmap, bootmap_size);
391} 396}
392#endif 397#endif
393 398
@@ -554,10 +559,10 @@ void __init setup_arch(char **cmdline_p)
554 acpi_numa_init(); 559 acpi_numa_init();
555#endif 560#endif
556 561
557#ifdef CONFIG_DISCONTIGMEM 562#ifdef CONFIG_NUMA
558 numa_initmem_init(0, end_pfn); 563 numa_initmem_init(0, end_pfn);
559#else 564#else
560 contig_initmem_init(); 565 contig_initmem_init(0, end_pfn);
561#endif 566#endif
562 567
563 /* Reserve direct mapping */ 568 /* Reserve direct mapping */
@@ -618,6 +623,8 @@ void __init setup_arch(char **cmdline_p)
618 } 623 }
619 } 624 }
620#endif 625#endif
626
627 sparse_init();
621 paging_init(); 628 paging_init();
622 629
623 check_ioapic(); 630 check_ioapic();
diff --git a/arch/x86_64/kernel/signal.c b/arch/x86_64/kernel/signal.c
index 429c0269dc4e..98b7ba95d581 100644
--- a/arch/x86_64/kernel/signal.c
+++ b/arch/x86_64/kernel/signal.c
@@ -34,9 +34,9 @@
34 34
35#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 35#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
36 36
37void ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 37int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
38 sigset_t *set, struct pt_regs * regs); 38 sigset_t *set, struct pt_regs * regs);
39void ia32_setup_frame(int sig, struct k_sigaction *ka, 39int ia32_setup_frame(int sig, struct k_sigaction *ka,
40 sigset_t *set, struct pt_regs * regs); 40 sigset_t *set, struct pt_regs * regs);
41 41
42asmlinkage long 42asmlinkage long
@@ -238,7 +238,7 @@ get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
238 return (void __user *)round_down(rsp - size, 16); 238 return (void __user *)round_down(rsp - size, 16);
239} 239}
240 240
241static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 241static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
242 sigset_t *set, struct pt_regs * regs) 242 sigset_t *set, struct pt_regs * regs)
243{ 243{
244 struct rt_sigframe __user *frame; 244 struct rt_sigframe __user *frame;
@@ -327,20 +327,23 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
327 current->comm, current->pid, frame, regs->rip, frame->pretcode); 327 current->comm, current->pid, frame, regs->rip, frame->pretcode);
328#endif 328#endif
329 329
330 return; 330 return 1;
331 331
332give_sigsegv: 332give_sigsegv:
333 force_sigsegv(sig, current); 333 force_sigsegv(sig, current);
334 return 0;
334} 335}
335 336
336/* 337/*
337 * OK, we're invoking a handler 338 * OK, we're invoking a handler
338 */ 339 */
339 340
340static void 341static int
341handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, 342handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
342 sigset_t *oldset, struct pt_regs *regs) 343 sigset_t *oldset, struct pt_regs *regs)
343{ 344{
345 int ret;
346
344#ifdef DEBUG_SIG 347#ifdef DEBUG_SIG
345 printk("handle_signal pid:%d sig:%lu rip:%lx rsp:%lx regs=%p\n", 348 printk("handle_signal pid:%d sig:%lu rip:%lx rsp:%lx regs=%p\n",
346 current->pid, sig, 349 current->pid, sig,
@@ -384,20 +387,22 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
384#ifdef CONFIG_IA32_EMULATION 387#ifdef CONFIG_IA32_EMULATION
385 if (test_thread_flag(TIF_IA32)) { 388 if (test_thread_flag(TIF_IA32)) {
386 if (ka->sa.sa_flags & SA_SIGINFO) 389 if (ka->sa.sa_flags & SA_SIGINFO)
387 ia32_setup_rt_frame(sig, ka, info, oldset, regs); 390 ret = ia32_setup_rt_frame(sig, ka, info, oldset, regs);
388 else 391 else
389 ia32_setup_frame(sig, ka, oldset, regs); 392 ret = ia32_setup_frame(sig, ka, oldset, regs);
390 } else 393 } else
391#endif 394#endif
392 setup_rt_frame(sig, ka, info, oldset, regs); 395 ret = setup_rt_frame(sig, ka, info, oldset, regs);
393 396
394 if (!(ka->sa.sa_flags & SA_NODEFER)) { 397 if (ret && !(ka->sa.sa_flags & SA_NODEFER)) {
395 spin_lock_irq(&current->sighand->siglock); 398 spin_lock_irq(&current->sighand->siglock);
396 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 399 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
397 sigaddset(&current->blocked,sig); 400 sigaddset(&current->blocked,sig);
398 recalc_sigpending(); 401 recalc_sigpending();
399 spin_unlock_irq(&current->sighand->siglock); 402 spin_unlock_irq(&current->sighand->siglock);
400 } 403 }
404
405 return ret;
401} 406}
402 407
403/* 408/*
@@ -417,7 +422,7 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
417 * kernel mode. Just return without doing anything 422 * kernel mode. Just return without doing anything
418 * if so. 423 * if so.
419 */ 424 */
420 if ((regs->cs & 3) != 3) 425 if (!user_mode(regs))
421 return 1; 426 return 1;
422 427
423 if (try_to_freeze(0)) 428 if (try_to_freeze(0))
@@ -434,11 +439,10 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
434 * inside the kernel. 439 * inside the kernel.
435 */ 440 */
436 if (current->thread.debugreg7) 441 if (current->thread.debugreg7)
437 asm volatile("movq %0,%%db7" : : "r" (current->thread.debugreg7)); 442 set_debugreg(current->thread.debugreg7, 7);
438 443
439 /* Whee! Actually deliver the signal. */ 444 /* Whee! Actually deliver the signal. */
440 handle_signal(signr, &info, &ka, oldset, regs); 445 return handle_signal(signr, &info, &ka, oldset, regs);
441 return 1;
442 } 446 }
443 447
444 no_signal: 448 no_signal:
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index fb8c809b4cd9..66bf6ddeb0c3 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -64,6 +64,7 @@ static int notsc __initdata = 0;
64unsigned int cpu_khz; /* TSC clocks / usec, not used here */ 64unsigned int cpu_khz; /* TSC clocks / usec, not used here */
65static unsigned long hpet_period; /* fsecs / HPET clock */ 65static unsigned long hpet_period; /* fsecs / HPET clock */
66unsigned long hpet_tick; /* HPET clocks / interrupt */ 66unsigned long hpet_tick; /* HPET clocks / interrupt */
67static int hpet_use_timer;
67unsigned long vxtime_hz = PIT_TICK_RATE; 68unsigned long vxtime_hz = PIT_TICK_RATE;
68int report_lost_ticks; /* command line option */ 69int report_lost_ticks; /* command line option */
69unsigned long long monotonic_base; 70unsigned long long monotonic_base;
@@ -105,7 +106,9 @@ static inline unsigned int do_gettimeoffset_tsc(void)
105 106
106static inline unsigned int do_gettimeoffset_hpet(void) 107static inline unsigned int do_gettimeoffset_hpet(void)
107{ 108{
108 return ((hpet_readl(HPET_COUNTER) - vxtime.last) * vxtime.quot) >> 32; 109 /* cap counter read to one tick to avoid inconsistencies */
110 unsigned long counter = hpet_readl(HPET_COUNTER) - vxtime.last;
111 return (min(counter,hpet_tick) * vxtime.quot) >> 32;
109} 112}
110 113
111unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc; 114unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc;
@@ -301,7 +304,7 @@ unsigned long long monotonic_clock(void)
301 304
302 last_offset = vxtime.last; 305 last_offset = vxtime.last;
303 base = monotonic_base; 306 base = monotonic_base;
304 this_offset = hpet_readl(HPET_T0_CMP) - hpet_tick; 307 this_offset = hpet_readl(HPET_COUNTER);
305 308
306 } while (read_seqretry(&xtime_lock, seq)); 309 } while (read_seqretry(&xtime_lock, seq));
307 offset = (this_offset - last_offset); 310 offset = (this_offset - last_offset);
@@ -377,7 +380,14 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
377 380
378 write_seqlock(&xtime_lock); 381 write_seqlock(&xtime_lock);
379 382
380 if (vxtime.hpet_address) { 383 if (vxtime.hpet_address)
384 offset = hpet_readl(HPET_COUNTER);
385
386 if (hpet_use_timer) {
387 /* if we're using the hpet timer functionality,
388 * we can more accurately know the counter value
389 * when the timer interrupt occured.
390 */
381 offset = hpet_readl(HPET_T0_CMP) - hpet_tick; 391 offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
382 delay = hpet_readl(HPET_COUNTER) - offset; 392 delay = hpet_readl(HPET_COUNTER) - offset;
383 } else { 393 } else {
@@ -803,17 +813,18 @@ static int hpet_timer_stop_set_go(unsigned long tick)
803 * Set up timer 0, as periodic with first interrupt to happen at hpet_tick, 813 * Set up timer 0, as periodic with first interrupt to happen at hpet_tick,
804 * and period also hpet_tick. 814 * and period also hpet_tick.
805 */ 815 */
806 816 if (hpet_use_timer) {
807 hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL | 817 hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
808 HPET_TN_32BIT, HPET_T0_CFG); 818 HPET_TN_32BIT, HPET_T0_CFG);
809 hpet_writel(hpet_tick, HPET_T0_CMP); 819 hpet_writel(hpet_tick, HPET_T0_CMP);
810 hpet_writel(hpet_tick, HPET_T0_CMP); /* AK: why twice? */ 820 hpet_writel(hpet_tick, HPET_T0_CMP); /* AK: why twice? */
811 821 cfg |= HPET_CFG_LEGACY;
822 }
812/* 823/*
813 * Go! 824 * Go!
814 */ 825 */
815 826
816 cfg |= HPET_CFG_ENABLE | HPET_CFG_LEGACY; 827 cfg |= HPET_CFG_ENABLE;
817 hpet_writel(cfg, HPET_CFG); 828 hpet_writel(cfg, HPET_CFG);
818 829
819 return 0; 830 return 0;
@@ -834,8 +845,7 @@ static int hpet_init(void)
834 845
835 id = hpet_readl(HPET_ID); 846 id = hpet_readl(HPET_ID);
836 847
837 if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER) || 848 if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER))
838 !(id & HPET_ID_LEGSUP))
839 return -1; 849 return -1;
840 850
841 hpet_period = hpet_readl(HPET_PERIOD); 851 hpet_period = hpet_readl(HPET_PERIOD);
@@ -845,6 +855,8 @@ static int hpet_init(void)
845 hpet_tick = (1000000000L * (USEC_PER_SEC / HZ) + hpet_period / 2) / 855 hpet_tick = (1000000000L * (USEC_PER_SEC / HZ) + hpet_period / 2) /
846 hpet_period; 856 hpet_period;
847 857
858 hpet_use_timer = (id & HPET_ID_LEGSUP);
859
848 return hpet_timer_stop_set_go(hpet_tick); 860 return hpet_timer_stop_set_go(hpet_tick);
849} 861}
850 862
@@ -901,9 +913,11 @@ void __init time_init(void)
901 set_normalized_timespec(&wall_to_monotonic, 913 set_normalized_timespec(&wall_to_monotonic,
902 -xtime.tv_sec, -xtime.tv_nsec); 914 -xtime.tv_sec, -xtime.tv_nsec);
903 915
904 if (!hpet_init()) { 916 if (!hpet_init())
905 vxtime_hz = (1000000000000000L + hpet_period / 2) / 917 vxtime_hz = (1000000000000000L + hpet_period / 2) /
906 hpet_period; 918 hpet_period;
919
920 if (hpet_use_timer) {
907 cpu_khz = hpet_calibrate_tsc(); 921 cpu_khz = hpet_calibrate_tsc();
908 timename = "HPET"; 922 timename = "HPET";
909#ifdef CONFIG_X86_PM_TIMER 923#ifdef CONFIG_X86_PM_TIMER
@@ -968,7 +982,7 @@ void __init time_init_gtod(void)
968 if (unsynchronized_tsc()) 982 if (unsynchronized_tsc())
969 notsc = 1; 983 notsc = 1;
970 if (vxtime.hpet_address && notsc) { 984 if (vxtime.hpet_address && notsc) {
971 timetype = "HPET"; 985 timetype = hpet_use_timer ? "HPET" : "PIT/HPET";
972 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick; 986 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
973 vxtime.mode = VXTIME_HPET; 987 vxtime.mode = VXTIME_HPET;
974 do_gettimeoffset = do_gettimeoffset_hpet; 988 do_gettimeoffset = do_gettimeoffset_hpet;
@@ -983,7 +997,7 @@ void __init time_init_gtod(void)
983 printk(KERN_INFO "Disabling vsyscall due to use of PM timer\n"); 997 printk(KERN_INFO "Disabling vsyscall due to use of PM timer\n");
984#endif 998#endif
985 } else { 999 } else {
986 timetype = vxtime.hpet_address ? "HPET/TSC" : "PIT/TSC"; 1000 timetype = hpet_use_timer ? "HPET/TSC" : "PIT/TSC";
987 vxtime.mode = VXTIME_TSC; 1001 vxtime.mode = VXTIME_TSC;
988 } 1002 }
989 1003
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 3dfec8fdabcd..121646fc43f6 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -274,7 +274,7 @@ EXPORT_SYMBOL(dump_stack);
274void show_registers(struct pt_regs *regs) 274void show_registers(struct pt_regs *regs)
275{ 275{
276 int i; 276 int i;
277 int in_kernel = (regs->cs & 3) == 0; 277 int in_kernel = !user_mode(regs);
278 unsigned long rsp; 278 unsigned long rsp;
279 const int cpu = safe_smp_processor_id(); 279 const int cpu = safe_smp_processor_id();
280 struct task_struct *cur = cpu_pda[cpu].pcurrent; 280 struct task_struct *cur = cpu_pda[cpu].pcurrent;
@@ -318,7 +318,7 @@ void handle_BUG(struct pt_regs *regs)
318 struct bug_frame f; 318 struct bug_frame f;
319 char tmp; 319 char tmp;
320 320
321 if (regs->cs & 3) 321 if (user_mode(regs))
322 return; 322 return;
323 if (__copy_from_user(&f, (struct bug_frame *) regs->rip, 323 if (__copy_from_user(&f, (struct bug_frame *) regs->rip,
324 sizeof(struct bug_frame))) 324 sizeof(struct bug_frame)))
@@ -437,7 +437,7 @@ static void do_trap(int trapnr, int signr, char *str,
437 } 437 }
438#endif 438#endif
439 439
440 if ((regs->cs & 3) != 0) { 440 if (user_mode(regs)) {
441 struct task_struct *tsk = current; 441 struct task_struct *tsk = current;
442 442
443 if (exception_trace && unhandled_signal(tsk, signr)) 443 if (exception_trace && unhandled_signal(tsk, signr))
@@ -522,7 +522,7 @@ asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
522 } 522 }
523#endif 523#endif
524 524
525 if ((regs->cs & 3)!=0) { 525 if (user_mode(regs)) {
526 struct task_struct *tsk = current; 526 struct task_struct *tsk = current;
527 527
528 if (exception_trace && unhandled_signal(tsk, SIGSEGV)) 528 if (exception_trace && unhandled_signal(tsk, SIGSEGV))
@@ -638,7 +638,7 @@ asmlinkage struct pt_regs *sync_regs(struct pt_regs *eregs)
638 if (eregs == (struct pt_regs *)eregs->rsp) 638 if (eregs == (struct pt_regs *)eregs->rsp)
639 ; 639 ;
640 /* Exception from user space */ 640 /* Exception from user space */
641 else if (eregs->cs & 3) 641 else if (user_mode(eregs))
642 regs = ((struct pt_regs *)current->thread.rsp0) - 1; 642 regs = ((struct pt_regs *)current->thread.rsp0) - 1;
643 /* Exception from kernel and interrupts are enabled. Move to 643 /* Exception from kernel and interrupts are enabled. Move to
644 kernel process stack. */ 644 kernel process stack. */
@@ -669,7 +669,7 @@ asmlinkage void do_debug(struct pt_regs * regs, unsigned long error_code)
669 } 669 }
670#endif 670#endif
671 671
672 asm("movq %%db6,%0" : "=r" (condition)); 672 get_debugreg(condition, 6);
673 673
674 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code, 674 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
675 SIGTRAP) == NOTIFY_STOP) 675 SIGTRAP) == NOTIFY_STOP)
@@ -697,7 +697,7 @@ asmlinkage void do_debug(struct pt_regs * regs, unsigned long error_code)
697 * allowing programs to debug themselves without the ptrace() 697 * allowing programs to debug themselves without the ptrace()
698 * interface. 698 * interface.
699 */ 699 */
700 if ((regs->cs & 3) == 0) 700 if (!user_mode(regs))
701 goto clear_TF_reenable; 701 goto clear_TF_reenable;
702 /* 702 /*
703 * Was the TF flag set by a debugger? If so, clear it now, 703 * Was the TF flag set by a debugger? If so, clear it now,
@@ -715,13 +715,13 @@ asmlinkage void do_debug(struct pt_regs * regs, unsigned long error_code)
715 info.si_signo = SIGTRAP; 715 info.si_signo = SIGTRAP;
716 info.si_errno = 0; 716 info.si_errno = 0;
717 info.si_code = TRAP_BRKPT; 717 info.si_code = TRAP_BRKPT;
718 if ((regs->cs & 3) == 0) 718 if (!user_mode(regs))
719 goto clear_dr7; 719 goto clear_dr7;
720 720
721 info.si_addr = (void __user *)regs->rip; 721 info.si_addr = (void __user *)regs->rip;
722 force_sig_info(SIGTRAP, &info, tsk); 722 force_sig_info(SIGTRAP, &info, tsk);
723clear_dr7: 723clear_dr7:
724 asm volatile("movq %0,%%db7"::"r"(0UL)); 724 set_debugreg(0UL, 7);
725 return; 725 return;
726 726
727clear_TF_reenable: 727clear_TF_reenable:
@@ -756,7 +756,7 @@ asmlinkage void do_coprocessor_error(struct pt_regs *regs)
756 unsigned short cwd, swd; 756 unsigned short cwd, swd;
757 757
758 conditional_sti(regs); 758 conditional_sti(regs);
759 if ((regs->cs & 3) == 0 && 759 if (!user_mode(regs) &&
760 kernel_math_error(regs, "kernel x87 math error")) 760 kernel_math_error(regs, "kernel x87 math error"))
761 return; 761 return;
762 762
@@ -822,7 +822,7 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
822 unsigned short mxcsr; 822 unsigned short mxcsr;
823 823
824 conditional_sti(regs); 824 conditional_sti(regs);
825 if ((regs->cs & 3) == 0 && 825 if (!user_mode(regs) &&
826 kernel_math_error(regs, "kernel simd math error")) 826 kernel_math_error(regs, "kernel simd math error"))
827 return; 827 return;
828 828
diff --git a/arch/x86_64/lib/delay.c b/arch/x86_64/lib/delay.c
index aed61a668a1b..33a873a3c223 100644
--- a/arch/x86_64/lib/delay.c
+++ b/arch/x86_64/lib/delay.c
@@ -12,6 +12,7 @@
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <asm/delay.h> 14#include <asm/delay.h>
15#include <asm/msr.h>
15 16
16#ifdef CONFIG_SMP 17#ifdef CONFIG_SMP
17#include <asm/smp.h> 18#include <asm/smp.h>
@@ -19,6 +20,12 @@
19 20
20int x86_udelay_tsc = 0; /* Delay via TSC */ 21int x86_udelay_tsc = 0; /* Delay via TSC */
21 22
23int read_current_timer(unsigned long *timer_value)
24{
25 rdtscll(*timer_value);
26 return 0;
27}
28
22void __delay(unsigned long loops) 29void __delay(unsigned long loops)
23{ 30{
24 unsigned bclock, now; 31 unsigned bclock, now;
diff --git a/arch/x86_64/mm/Makefile b/arch/x86_64/mm/Makefile
index 66c354ad80ca..1d232a87f113 100644
--- a/arch/x86_64/mm/Makefile
+++ b/arch/x86_64/mm/Makefile
@@ -4,7 +4,7 @@
4 4
5obj-y := init.o fault.o ioremap.o extable.o pageattr.o 5obj-y := init.o fault.o ioremap.o extable.o pageattr.o
6obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 6obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
7obj-$(CONFIG_DISCONTIGMEM) += numa.o 7obj-$(CONFIG_NUMA) += numa.o
8obj-$(CONFIG_K8_NUMA) += k8topology.o 8obj-$(CONFIG_K8_NUMA) += k8topology.o
9obj-$(CONFIG_ACPI_NUMA) += srat.o 9obj-$(CONFIG_ACPI_NUMA) += srat.o
10 10
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 57d3ab15a5c7..2f187986f940 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -74,7 +74,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
74 instr = (unsigned char *)convert_rip_to_linear(current, regs); 74 instr = (unsigned char *)convert_rip_to_linear(current, regs);
75 max_instr = instr + 15; 75 max_instr = instr + 15;
76 76
77 if ((regs->cs & 3) != 0 && instr >= (unsigned char *)TASK_SIZE) 77 if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
78 return 0; 78 return 0;
79 79
80 while (scan_more && instr < max_instr) { 80 while (scan_more && instr < max_instr) {
@@ -106,7 +106,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
106 /* Could check the LDT for lm, but for now it's good 106 /* Could check the LDT for lm, but for now it's good
107 enough to assume that long mode only uses well known 107 enough to assume that long mode only uses well known
108 segments or kernel. */ 108 segments or kernel. */
109 scan_more = ((regs->cs & 3) == 0) || (regs->cs == __USER_CS); 109 scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
110 break; 110 break;
111 111
112 case 0x60: 112 case 0x60:
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index dbe53b4c7e66..72e4b364ed73 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -318,7 +318,7 @@ void zap_low_mappings(void)
318 flush_tlb_all(); 318 flush_tlb_all();
319} 319}
320 320
321#ifndef CONFIG_DISCONTIGMEM 321#ifndef CONFIG_NUMA
322void __init paging_init(void) 322void __init paging_init(void)
323{ 323{
324 { 324 {
@@ -427,13 +427,16 @@ void __init mem_init(void)
427 reservedpages = 0; 427 reservedpages = 0;
428 428
429 /* this will put all low memory onto the freelists */ 429 /* this will put all low memory onto the freelists */
430#ifdef CONFIG_DISCONTIGMEM 430#ifdef CONFIG_NUMA
431 totalram_pages += numa_free_all_bootmem(); 431 totalram_pages += numa_free_all_bootmem();
432 tmp = 0; 432 tmp = 0;
433 /* should count reserved pages here for all nodes */ 433 /* should count reserved pages here for all nodes */
434#else 434#else
435
436#ifdef CONFIG_FLATMEM
435 max_mapnr = end_pfn; 437 max_mapnr = end_pfn;
436 if (!mem_map) BUG(); 438 if (!mem_map) BUG();
439#endif
437 440
438 totalram_pages += free_all_bootmem(); 441 totalram_pages += free_all_bootmem();
439 442
@@ -515,7 +518,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
515void __init reserve_bootmem_generic(unsigned long phys, unsigned len) 518void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
516{ 519{
517 /* Should check here against the e820 map to avoid double free */ 520 /* Should check here against the e820 map to avoid double free */
518#ifdef CONFIG_DISCONTIGMEM 521#ifdef CONFIG_NUMA
519 int nid = phys_to_nid(phys); 522 int nid = phys_to_nid(phys);
520 reserve_bootmem_node(NODE_DATA(nid), phys, len); 523 reserve_bootmem_node(NODE_DATA(nid), phys, len);
521#else 524#else
diff --git a/arch/x86_64/mm/ioremap.c b/arch/x86_64/mm/ioremap.c
index 58aac23760ef..6972df480d2b 100644
--- a/arch/x86_64/mm/ioremap.c
+++ b/arch/x86_64/mm/ioremap.c
@@ -178,7 +178,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
178 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) 178 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
179 return (__force void __iomem *)phys_to_virt(phys_addr); 179 return (__force void __iomem *)phys_to_virt(phys_addr);
180 180
181#ifndef CONFIG_DISCONTIGMEM 181#ifdef CONFIG_FLATMEM
182 /* 182 /*
183 * Don't allow anybody to remap normal RAM that we're using.. 183 * Don't allow anybody to remap normal RAM that we're using..
184 */ 184 */
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index fd9f25d7a6c4..84cde796ecb1 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -66,6 +66,13 @@ int __init compute_hash_shift(struct node *nodes, int numnodes)
66 return -1; 66 return -1;
67} 67}
68 68
69#ifdef CONFIG_SPARSEMEM
70int early_pfn_to_nid(unsigned long pfn)
71{
72 return phys_to_nid(pfn << PAGE_SHIFT);
73}
74#endif
75
69/* Initialize bootmem allocator for a node */ 76/* Initialize bootmem allocator for a node */
70void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) 77void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
71{ 78{
@@ -80,6 +87,7 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long en
80 start_pfn = start >> PAGE_SHIFT; 87 start_pfn = start >> PAGE_SHIFT;
81 end_pfn = end >> PAGE_SHIFT; 88 end_pfn = end >> PAGE_SHIFT;
82 89
90 memory_present(nodeid, start_pfn, end_pfn);
83 nodedata_phys = find_e820_area(start, end, pgdat_size); 91 nodedata_phys = find_e820_area(start, end, pgdat_size);
84 if (nodedata_phys == -1L) 92 if (nodedata_phys == -1L)
85 panic("Cannot find memory pgdat in node %d\n", nodeid); 93 panic("Cannot find memory pgdat in node %d\n", nodeid);
diff --git a/arch/x86_64/pci/k8-bus.c b/arch/x86_64/pci/k8-bus.c
index 62349c78db57..7e7d0c2a0025 100644
--- a/arch/x86_64/pci/k8-bus.c
+++ b/arch/x86_64/pci/k8-bus.c
@@ -53,25 +53,11 @@ fill_mp_bus_to_cpumask(void)
53 for (j = SECONDARY_LDT_BUS_NUMBER(ldtbus); 53 for (j = SECONDARY_LDT_BUS_NUMBER(ldtbus);
54 j <= SUBORDINATE_LDT_BUS_NUMBER(ldtbus); 54 j <= SUBORDINATE_LDT_BUS_NUMBER(ldtbus);
55 j++) 55 j++)
56 pci_bus_to_cpumask[j] = 56 pci_bus_to_node[j] = NODE_ID(nid);
57 node_to_cpumask(NODE_ID(nid));
58 } 57 }
59 } 58 }
60 } 59 }
61 60
62 /* quick sanity check */
63 printed = 0;
64 for (i = 0; i < 256; i++) {
65 if (cpus_empty(pci_bus_to_cpumask[i])) {
66 pci_bus_to_cpumask[i] = CPU_MASK_ALL;
67 if (printed)
68 continue;
69 printk(KERN_ERR
70 "k8-bus.c: some busses have empty cpu mask\n");
71 printed = 1;
72 }
73 }
74
75 return 0; 61 return 0;
76} 62}
77 63
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
new file mode 100644
index 000000000000..3e89767cea72
--- /dev/null
+++ b/arch/xtensa/Kconfig
@@ -0,0 +1,258 @@
1# For a description of the syntax of this configuration file,
2# see Documentation/kbuild/config-language.txt.
3
4mainmenu "Linux/Xtensa Kernel Configuration"
5
6config FRAME_POINTER
7 bool
8 default n
9
10config XTENSA
11 bool
12 default y
13 help
14 Xtensa processors are 32-bit RISC machines designed by Tensilica
15 primarily for embedded systems. These processors are both
16 configurable and extensible. The Linux port to the Xtensa
17 architecture supports all processor configurations and extensions,
18 with reasonable minimum requirements. The Xtensa Linux project has
19 a home page at <http://xtensa.sourceforge.net/>.
20
21config UID16
22 bool
23 default n
24
25config RWSEM_XCHGADD_ALGORITHM
26 bool
27 default y
28
29config HAVE_DEC_LOCK
30 bool
31 default y
32
33config GENERIC_HARDIRQS
34 bool
35 default y
36
37source "init/Kconfig"
38
39menu "Processor type and features"
40
41choice
42 prompt "Xtensa Processor Configuration"
43 default XTENSA_CPU_LINUX_BE
44
45config XTENSA_CPU_LINUX_BE
46 bool "linux_be"
47 ---help---
48 The linux_be processor configuration is the baseline Xtensa
49 configurations included in this kernel and also used by
50 binutils, gcc, and gdb. It contains no TIE, no coprocessors,
51 and the following configuration options:
52
53 Code Density Option 2 Misc Special Registers
54 NSA/NSAU Instructions 128-bit Data Bus Width
55 Processor ID 8K, 2-way I and D Caches
56 Zero-Overhead Loops 2 Inst Address Break Registers
57 Big Endian 2 Data Address Break Registers
58 64 General-Purpose Registers JTAG Interface and Trace Port
59 17 Interrupts MMU w/ TLBs and Autorefill
60 3 Interrupt Levels 8 Autorefill Ways (I/D TLBs)
61 3 Timers Unaligned Exceptions
62endchoice
63
64config MMU
65 bool
66 default y
67
68config XTENSA_UNALIGNED_USER
69 bool "Unaligned memory access in use space"
70 ---help---
71 The Xtensa architecture currently does not handle unaligned
72 memory accesses in hardware but through an exception handler.
73 Per default, unaligned memory accesses are disabled in user space.
74
75 Say Y here to enable unaligned memory access in user space.
76
77config PREEMPT
78 bool "Preemptible Kernel"
79 ---help---
80 This option reduces the latency of the kernel when reacting to
81 real-time or interactive events by allowing a low priority process to
82 be preempted even if it is in kernel mode executing a system call.
83 Unfortunately the kernel code has some race conditions if both
84 CONFIG_SMP and CONFIG_PREEMPT are enabled, so this option is
85 currently disabled if you are building an SMP kernel.
86
87 Say Y here if you are building a kernel for a desktop, embedded
88 or real-time system. Say N if you are unsure.
89
90config MATH_EMULATION
91 bool "Math emulation"
92 help
93 Can we use information of configuration file?
94
95config HIGHMEM
96 bool "High memory support"
97
98endmenu
99
100menu "Platform options"
101
102choice
103 prompt "Xtensa System Type"
104 default XTENSA_PLATFORM_ISS
105
106config XTENSA_PLATFORM_ISS
107 bool "ISS"
108 help
109 ISS is an acronym for Tensilica's Instruction Set Simulator.
110
111config XTENSA_PLATFORM_XT2000
112 bool "XT2000"
113 help
114 XT2000 is the name of Tensilica's feature-rich emulation platform.
115 This hardware is capable of running a full Linux distribution.
116
117endchoice
118
119
120config XTENSA_CALIBRATE_CCOUNT
121 bool "Auto calibration of the CPU clock rate"
122 ---help---
123 On some platforms (XT2000, for example), the CPU clock rate can
124 vary. The frequency can be determined, however, by measuring
125 against a well known, fixed frequency, such as an UART oscillator.
126
127config XTENSA_CPU_CLOCK
128 int "CPU clock rate [MHz]"
129 depends on !XTENSA_CALIBRATE_CCOUNT
130 default "16"
131
132config GENERIC_CALIBRATE_DELAY
133 bool "Auto calibration of the BogoMIPS value"
134 ---help---
135 The BogoMIPS value can easily derived from the CPU frequency.
136
137config CMDLINE_BOOL
138 bool "Default bootloader kernel arguments"
139
140config CMDLINE
141 string "Initial kernel command string"
142 depends on CMDLINE_BOOL
143 default "console=ttyS0,38400 root=/dev/ram"
144 help
145 On some architectures (EBSA110 and CATS), there is currently no way
146 for the boot loader to pass arguments to the kernel. For these
147 architectures, you should supply some command-line options at build
148 time by entering them here. As a minimum, you should specify the
149 memory size and the root device (e.g., mem=64M root=/dev/nfs).
150
151config SERIAL_CONSOLE
152 bool
153 depends on XTENSA_PLATFORM_ISS
154 default y
155
156config XTENSA_ISS_NETWORK
157 bool
158 depends on XTENSA_PLATFORM_ISS
159 default y
160
161endmenu
162
163menu "Bus options"
164
165config PCI
166 bool "PCI support" if !XTENSA_PLATFORM_ISS
167 depends on !XTENSA_PLATFORM_ISS
168 default y
169 help
170 Find out whether you have a PCI motherboard. PCI is the name of a
171 bus system, i.e. the way the CPU talks to the other stuff inside
172 your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
173 VESA. If you have PCI, say Y, otherwise N.
174
175 The PCI-HOWTO, available from
176 <http://www.linuxdoc.org/docs.html#howto>, contains valuable
177 information about which PCI hardware does work under Linux and which
178 doesn't
179
180source "drivers/pci/Kconfig"
181
182config HOTPLUG
183
184 bool "Support for hot-pluggable devices"
185 ---help---
186 Say Y here if you want to plug devices into your computer while
187 the system is running, and be able to use them quickly. In many
188 cases, the devices can likewise be unplugged at any time too.
189
190 One well known example of this is PCMCIA- or PC-cards, credit-card
191 size devices such as network cards, modems or hard drives which are
192 plugged into slots found on all modern laptop computers. Another
193 example, used on modern desktops as well as laptops, is USB.
194
195 Enable HOTPLUG and KMOD, and build a modular kernel. Get agent
196 software (at <http://linux-hotplug.sourceforge.net/>) and install it.
197 Then your kernel will automatically call out to a user mode "policy
198 agent" (/sbin/hotplug) to load modules and set up software needed
199 to use devices as you hotplug them.
200
201source "drivers/pcmcia/Kconfig"
202
203source "drivers/pci/hotplug/Kconfig"
204
205endmenu
206
207menu "Exectuable file formats"
208
209# only elf supported
210config KCORE_ELF
211 bool
212 depends on PROC_FS
213 default y
214 help
215 If you enabled support for /proc file system then the file
216 /proc/kcore will contain the kernel core image in ELF format. This
217 can be used in gdb:
218
219 $ cd /usr/src/linux ; gdb vmlinux /proc/kcore
220
221 This is especially useful if you have compiled the kernel with the
222 "-g" option to preserve debugging information. It is mainly used
223 for examining kernel data structures on the live kernel.
224
225source "fs/Kconfig.binfmt"
226
227endmenu
228
229source "drivers/Kconfig"
230
231source "fs/Kconfig"
232
233menu "Xtensa initrd options"
234 depends on BLK_DEV_INITRD
235
236 config EMBEDDED_RAMDISK
237 bool "Embed root filesystem ramdisk into the kernel"
238
239config EMBEDDED_RAMDISK_IMAGE
240 string "Filename of gziped ramdisk image"
241 depends on EMBEDDED_RAMDISK
242 default "ramdisk.gz"
243 help
244 This is the filename of the ramdisk image to be built into the
245 kernel. Relative pathnames are relative to arch/xtensa/boot/ramdisk/.
246 The ramdisk image is not part of the kernel distribution; you must
247 provide one yourself.
248endmenu
249
250source "arch/xtensa/Kconfig.debug"
251
252source "security/Kconfig"
253
254source "crypto/Kconfig"
255
256source "lib/Kconfig"
257
258
diff --git a/arch/xtensa/Kconfig.debug b/arch/xtensa/Kconfig.debug
new file mode 100644
index 000000000000..11c585295dd7
--- /dev/null
+++ b/arch/xtensa/Kconfig.debug
@@ -0,0 +1,7 @@
1menu "Kernel hacking"
2
3source "lib/Kconfig.debug"
4
5endmenu
6
7
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
new file mode 100644
index 000000000000..4fa27453b1f9
--- /dev/null
+++ b/arch/xtensa/Makefile
@@ -0,0 +1,102 @@
1#
2# This file is subject to the terms and conditions of the GNU General Public
3# License. See the file "COPYING" in the main directory of this archive
4# for more details.
5#
6# Copyright (C) 2001 - 2005 Tensilica Inc.
7#
8# This file is included by the global makefile so that you can add your own
9# architecture-specific flags and dependencies. Remember to do have actions
10# for "archclean" and "archdep" for cleaning up and making dependencies for
11# this architecture
12
13# Core configuration.
14# (Use CPU=<xtensa_config> to use another default compiler.)
15
16cpu-$(CONFIG_XTENSA_CPU_LINUX_BE) := linux_be
17cpu-$(CONFIG_XTENSA_CPU_LINUX_CUSTOM) := linux_custom
18
19CPU = $(cpu-y)
20export CPU
21
22# Platform configuration
23
24platform-y := common
25platform-$(CONFIG_XTENSA_PLATFORM_XT2000) := xt2000
26platform-$(CONFIG_XTENSA_PLATFORM_ISS) := iss
27
28PLATFORM = $(platform-y)
29export PLATFORM
30
31#LDFLAGS_vmlinux := -T$(word 1,$(LINKSCRIPT))
32AFLAGS_vmlinux.lds.o := -Uxtensa
33CPPFLAGS += -Iarch/xtensa -Iinclude/asm -mlongcalls -g
34AFLAGS += -Iarch/xtensa -Iinclude/asm
35CPP = $(CC) -E $(CFLAGS)
36
37cflags-y += -Iarch/xtensa -pipe -mlongcalls
38
39
40KBUILD_DEFCONFIG := common_defconfig
41
42# ramdisk/initrd support
43# You need a compressed ramdisk image, named ramdisk.gz in
44# arch/xtensa/boot/ramdisk
45
46core-$(CONFIG_EMBEDDED_RAMDISK) += arch/xtensa/boot/ramdisk/
47
48# Test for cross compiling
49
50ifneq ($(CPU),)
51 COMPILE_ARCH = $(shell uname -m)
52
53 ifneq ($(COMPILE_ARCH), xtensa)
54 ifndef CROSS_COMPILE
55 CROSS_COMPILE = xtensa_$(CPU)-
56 endif
57 endif
58endif
59
60#
61
62LIBGCC := $(shell $(CC) $(CFLAGS) -print-libgcc-file-name)
63
64head-y := arch/xtensa/kernel/head.o
65core-y += arch/xtensa/kernel/ \
66 arch/xtensa/mm/ arch/xtensa/platform-$(PLATFORM)/
67libs-y += arch/xtensa/lib/ $(LIBGCC)
68
69boot := arch/xtensa/boot
70
71arch/xtensa/kernel/asm-offsets.s: \
72 arch/xtensa/kernel/asm-offsets.c \
73 include/asm-xtensa/.platform
74
75include/asm-xtensa/offsets.h: arch/xtensa/kernel/asm-offsets.s
76 $(call filechk,gen-asm-offsets)
77
78prepare: include/asm-xtensa/.platform include/asm-xtensa/offsets.h
79
80# Update machine cpu and platform symlinks if something which affects
81# them changed.
82
83include/asm-xtensa/.platform: $(wildcard include/config/arch/*.h)
84 @echo ' Setting up cpu ($(CPU)) and platform ($(PLATFORM)) symlinks'
85 $(Q)rm -f include/asm-xtensa/platform
86 $(Q)rm -f include/asm-xtensa/xtensa/config
87 $(Q)(cd include/asm-xtensa/; ln -sf platform-$(PLATFORM) platform)
88 $(Q)(cd include/asm-xtensa/xtensa; ln -sf config-$(CPU) config)
89
90all: zImage
91
92bzImage : zImage
93
94zImage zImage.initrd: vmlinux
95 $(Q)$(MAKE) $(build)=$(boot) $@
96
97CLEAN_FILES += arch/xtensa/vmlinux.lds include/asm-xtensa/offset.h
98
99define archhelp
100 @echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)'
101endef
102
diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile
new file mode 100644
index 000000000000..260f456ccf0b
--- /dev/null
+++ b/arch/xtensa/boot/Makefile
@@ -0,0 +1,37 @@
1#
2# arch/xtensa/boot/Makefile
3#
4# This file is subject to the terms and conditions of the GNU General Public
5# License. See the file "COPYING" in the main directory of this archive
6# for more details.
7#
8#
9
10
11CFLAGS += -fno-builtin -Iarch/$(ARCH)/boot/include
12HOSTFLAGS += -Iarch/$(ARCH)/boot/include
13
14BIG_ENDIAN := $(shell echo -e "\#ifdef __XTENSA_EL__\nint little;\n\#else\nint big;\n\#endif" | $(CC) -E -|grep -c big)
15
16
17export CFLAGS
18export AFLAGS
19export BIG_ENDIAN
20
21# Subdirs for the boot loader(s)
22
23bootdir-$(CONFIG_XTENSA_PLATFORM_ISS) += boot-elf
24bootdir-$(CONFIG_XTENSA_PLATFORM_XT2000) += boot-redboot boot-elf
25
26subdir-y := lib/
27
28subdir-y += boot-elf/ boot-redboot/
29
30zImage zImage.initrd Image Image.initrd: $(bootdir-y)
31
32$(bootdir-y): $(addprefix $(obj)/,$(subdir-y)) \
33 $(addprefix $(obj)/,$(host-progs))
34 $(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS)
35
36
37
diff --git a/arch/xtensa/boot/boot-elf/Makefile b/arch/xtensa/boot/boot-elf/Makefile
new file mode 100644
index 000000000000..f6ef6a369667
--- /dev/null
+++ b/arch/xtensa/boot/boot-elf/Makefile
@@ -0,0 +1,52 @@
1#
2# This file is subject to the terms and conditions of the GNU General Public
3# License. See the file "COPYING" in the main directory of this archive
4# for more details.
5#
6
7GZIP = gzip
8GZIP_FLAGS = -v9fc
9
10ifeq ($(BIG_ENDIAN),1)
11OBJCOPY_ARGS := -O elf32-xtensa-be
12else
13OBJCOPY_ARGS := -O elf32-xtensa-le
14endif
15
16export OBJCOPY_ARGS
17
18boot-y := bootstrap.o
19
20OBJS := $(addprefix $(obj)/,$(boot-y))
21
22Image: vmlinux $(OBJS)
23 $(OBJCOPY) --strip-all -R .comment -R .xt.insn -O binary \
24 vmlinux vmlinux.tmp
25 $(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
26 --add-section image=vmlinux.tmp \
27 --set-section-flags image=contents,alloc,load,load,data \
28 $(OBJS) $@.tmp
29 $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) \
30 -T arch/$(ARCH)/boot/boot-elf/boot.ld \
31 -o arch/$(ARCH)/boot/$@.elf $@.tmp
32 rm -f $@.tmp vmlinux.tmp
33
34Image.initrd: vmlinux $(OBJS)
35 $(OBJCOPY) --strip-all -R .comment -R .xt.insn -O binary \
36 --add-section .initrd=arch/$(ARCH)/boot/ramdisk \
37 --set-section-flags .initrd=contents,alloc,load,load,data \
38 vmlinux vmlinux.tmp
39 $(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
40 --add-section image=vmlinux.tmp \
41 --set-section-flags image=contents,alloc,load,load,data \
42 $(OBJS) $@.tmp
43 $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) \
44 -T arch/$(ARCH)/boot/boot-elf/boot.ld \
45 -o arch/$(ARCH)/boot/$@.elf $@.tmp
46 rm -f $@.tmp vmlinux.tmp
47
48
49zImage: Image
50
51zImage.initrd: Image.initrd
52
diff --git a/arch/xtensa/boot/boot-elf/boot.ld b/arch/xtensa/boot/boot-elf/boot.ld
new file mode 100644
index 000000000000..4ab06a0a7a6b
--- /dev/null
+++ b/arch/xtensa/boot/boot-elf/boot.ld
@@ -0,0 +1,71 @@
1OUTPUT_ARCH(xtensa)
2
3SECTIONS
4{
5 .start 0xD0000000 : { *(.start) }
6
7 .text 0xD0000000:
8 {
9 __reloc_start = . ;
10 _text_start = . ;
11 *(.literal .text.literal .text)
12 _text_end = . ;
13 }
14
15 .rodata ALIGN(0x04):
16 {
17 *(.rodata)
18 *(.rodata1)
19 }
20
21 .data ALIGN(0x04):
22 {
23 *(.data)
24 *(.data1)
25 *(.sdata)
26 *(.sdata2)
27 *(.got.plt)
28 *(.got)
29 *(.dynamic)
30 }
31
32 __reloc_end = . ;
33
34 .initrd ALIGN(0x10) :
35 {
36 boot_initrd_start = . ;
37 *(.initrd)
38 boot_initrd_end = .;
39 }
40
41 . = ALIGN(0x10);
42 __image_load = . ;
43 .image 0xd0001000:
44 {
45 _image_start = .;
46 *(image)
47 . = (. + 3) & ~ 3;
48 _image_end = . ;
49 }
50
51
52 .bss ((LOADADDR(.image) + SIZEOF(.image) + 3) & ~ 3):
53 {
54 __bss_start = .;
55 *(.sbss)
56 *(.scommon)
57 *(.dynbss)
58 *(.bss)
59 __bss_end = .;
60 }
61 _end = .;
62 _param_start = .;
63
64 .ResetVector.text 0xfe000020 :
65 {
66 *(.ResetVector.text)
67 }
68
69
70 PROVIDE (end = .);
71}
diff --git a/arch/xtensa/boot/boot-elf/bootstrap.S b/arch/xtensa/boot/boot-elf/bootstrap.S
new file mode 100644
index 000000000000..7cba94abdab8
--- /dev/null
+++ b/arch/xtensa/boot/boot-elf/bootstrap.S
@@ -0,0 +1,37 @@
1
2#include <xtensa/config/specreg.h>
3#include <xtensa/config/core.h>
4
5#include <linux/config.h>
6#include <asm/bootparam.h>
7
8
9/* ResetVector
10 */
11 .section .ResetVector.text, "ax"
12 .global _ResetVector
13_ResetVector:
14 _j reset
15 .align 4
16RomInitAddr:
17 .word 0xd0001000
18RomBootParam:
19 .word _bootparam
20reset:
21 l32r a0, RomInitAddr
22 l32r a2, RomBootParam
23 movi a3, 0
24 movi a4, 0
25 jx a0
26
27 .align 4
28 .section .bootstrap.data, "aw"
29
30 .globl _bootparam
31_bootparam:
32 .short BP_TAG_FIRST
33 .short 4
34 .long BP_VERSION
35 .short BP_TAG_LAST
36 .short 0
37 .long 0
diff --git a/arch/xtensa/boot/boot-redboot/Makefile b/arch/xtensa/boot/boot-redboot/Makefile
new file mode 100644
index 000000000000..ca8a68bc8472
--- /dev/null
+++ b/arch/xtensa/boot/boot-redboot/Makefile
@@ -0,0 +1,35 @@
1#
2# This file is subject to the terms and conditions of the GNU General Public
3# License. See the file "COPYING" in the main directory of this archive
4# for more details.
5#
6
7GZIP = gzip
8GZIP_FLAGS = -v9fc
9ifeq ($(BIG_ENDIAN),1)
10OBJCOPY_ARGS := -O elf32-xtensa-be
11else
12OBJCOPY_ARGS := -O elf32-xtensa-le
13endif
14
15LD_ARGS = -T $(obj)/boot.ld
16
17boot-y := bootstrap.o
18
19OBJS := $(addprefix $(obj)/,$(boot-y))
20LIBS := arch/$(ARCH)/boot/lib/lib.a arch/$(ARCH)/lib/lib.a
21
22LIBGCC := $(shell $(CC) $(CFLAGS) -print-libgcc-file-name)
23
24zImage: vmlinux $(OBJS) $(LIBS)
25 $(OBJCOPY) --strip-all -R .comment -R .xt.insn -O binary \
26 $(TOPDIR)/vmlinux vmlinux.tmp
27 gzip -vf9 vmlinux.tmp
28 $(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
29 --add-section image=vmlinux.tmp.gz \
30 --set-section-flags image=contents,alloc,load,load,data \
31 $(OBJS) $@.tmp
32 $(LD) $(LD_ARGS) -o $@.elf $@.tmp $(LIBS) -L/xtensa-elf/lib $(LIBGCC)
33 $(OBJCOPY) -S -O binary $@.elf arch/$(ARCH)/boot/images/$@.redboot
34# rm -f $@.tmp $@.elf vmlinux.tmp.gz
35
diff --git a/arch/xtensa/boot/boot-redboot/boot.ld b/arch/xtensa/boot/boot-redboot/boot.ld
new file mode 100644
index 000000000000..65b726410e8a
--- /dev/null
+++ b/arch/xtensa/boot/boot-redboot/boot.ld
@@ -0,0 +1,66 @@
1OUTPUT_ARCH(xtensa)
2
3SECTIONS
4{
5 .start 0xD0200000 : { *(.start) }
6
7 .text :
8 {
9 __reloc_start = . ;
10 _text_start = . ;
11 *(.literal .text.literal .text)
12 _text_end = . ;
13 }
14
15 .rodata ALIGN(0x04):
16 {
17 *(.rodata)
18 *(.rodata1)
19 }
20
21 .data ALIGN(0x04):
22 {
23 *(.data)
24 *(.data1)
25 *(.sdata)
26 *(.sdata2)
27 *(.got.plt)
28 *(.got)
29 *(.dynamic)
30 }
31
32 __reloc_end = . ;
33
34 .initrd ALIGN(0x10) :
35 {
36 boot_initrd_start = . ;
37 *(.initrd)
38 boot_initrd_end = .;
39 }
40
41 . = ALIGN(0x10);
42 __image_load = . ;
43 .image 0xd0001000: AT(__image_load)
44 {
45 _image_start = .;
46 *(image)
47 . = (. + 3) & ~ 3;
48 _image_end = . ;
49 }
50
51
52 .bss ((LOADADDR(.image) + SIZEOF(.image) + 3) & ~ 3):
53 {
54 __bss_start = .;
55 *(.sbss)
56 *(.scommon)
57 *(.dynbss)
58 *(.bss)
59 __bss_end = .;
60 }
61 _end = .;
62 _param_start = .;
63
64
65 PROVIDE (end = .);
66}
diff --git a/arch/xtensa/boot/boot-redboot/bootstrap.S b/arch/xtensa/boot/boot-redboot/bootstrap.S
new file mode 100644
index 000000000000..ee636b0da81c
--- /dev/null
+++ b/arch/xtensa/boot/boot-redboot/bootstrap.S
@@ -0,0 +1,246 @@
1
2#define _ASMLANGUAGE
3#include <xtensa/config/specreg.h>
4#include <xtensa/config/core.h>
5#include <xtensa/cacheasm.h>
6
7 /*
8 * RB-Data: RedBoot data/bss
9 * P: Boot-Parameters
10 * L: Kernel-Loader
11 *
12 * The Linux-Kernel image including the loader must be loaded
13 * to a position so that the kernel and the boot parameters
14 * can fit in the space before the load address.
15 * ______________________________________________________
16 * |_RB-Data_|_P_|__________|_L_|___Linux-Kernel___|______|
17 * ^
18 * ^ Load address
19 * ______________________________________________________
20 * |___Linux-Kernel___|_P_|_L_|___________________________|
21 *
22 * The loader copies the parameter to the position that will
23 * be the end of the kernel and itself to the end of the
24 * parameter list.
25 */
26
27/* Make sure we have enough space for the 'uncompressor' */
28
29#define STACK_SIZE 32768
30#define HEAP_SIZE (131072*4)
31
32 # a2: Parameter list
33 # a3: Size of parameter list
34
35 .section .start, "ax"
36
37 .globl __start
38 /* this must be the first byte of the loader! */
39__start:
40 entry sp, 32 # we do not intend to return
41 _call0 _start
42__start_a0:
43 .align 4
44
45 .section .text, "ax"
46 .begin literal_prefix .text
47
48 /* put literals in here! */
49
50 .globl _start
51_start:
52
53 /* 'reset' window registers */
54
55 movi a4, 1
56 wsr a4, PS
57 rsync
58
59 rsr a5, WINDOWBASE
60 ssl a5
61 sll a4, a4
62 wsr a4, WINDOWSTART
63 rsync
64
65 movi a4, 0x00040000
66 wsr a4, PS
67 rsync
68
69 /* copy the loader to its address
70 * Note: The loader itself is a very small piece, so we assume we
71 * don't partially overlap. We also assume (even more important)
72 * that the kernel image is out of the way. Usually, when the
73 * load address of this image is not at an arbitrary address,
74 * but aligned to some 10K's we shouldn't overlap.
75 */
76
77 /* Note: The assembler cannot relax "addi a0, a0, ..." to an
78 l32r, so we load to a4 first. */
79
80 addi a4, a0, __start - __start_a0
81 mov a0, a4
82 movi a4, __start
83 movi a5, __reloc_end
84
85 # a0: address where this code has been loaded
86 # a4: compiled address of __start
87 # a5: compiled end address
88
89 mov.n a7, a0
90 mov.n a8, a4
91
921:
93 l32i a10, a7, 0
94 l32i a11, a7, 4
95 s32i a10, a8, 0
96 s32i a11, a8, 4
97 l32i a10, a7, 8
98 l32i a11, a7, 12
99 s32i a10, a8, 8
100 s32i a11, a8, 12
101 addi a8, a8, 16
102 addi a7, a7, 16
103 blt a8, a5, 1b
104
105
106 /* We have to flush and invalidate the caches here before we jump. */
107
108#if XCHAL_DCACHE_IS_WRITEBACK
109 dcache_writeback_all a5, a6
110#endif
111 icache_invalidate_all a5, a6
112
113 movi a11, _reloc
114 jx a11
115
116 .globl _reloc
117_reloc:
118
119 /* RedBoot is now at the end of the memory, so we don't have
120 * to copy the parameter list. Keep the code around; in case
121 * we need it again. */
122#if 0
123 # a0: load address
124 # a2: start address of parameter list
125 # a3: length of parameter list
126 # a4: __start
127
128 /* copy the parameter list out of the way */
129
130 movi a6, _param_start
131 add a3, a2, a3
1322:
133 l32i a8, a2, 0
134 s32i a8, a6, 0
135 addi a2, a2, 4
136 addi a6, a6, 4
137 blt a2, a3, 2b
138#endif
139
140 /* clear BSS section */
141 movi a6, __bss_start
142 movi a7, __bss_end
143 movi.n a5, 0
1443:
145 s32i a5, a6, 0
146 addi a6, a6, 4
147 blt a6, a7, 3b
148
149 movi a5, -16
150 movi a1, _stack + STACK_SIZE
151 and a1, a1, a5
152
153 /* Uncompress the kernel */
154
155 # a0: load address
156 # a2: boot parameter
157 # a4: __start
158
159 movi a3, __image_load
160 sub a4, a3, a4
161 add a8, a0, a4
162
163 # a1 Stack
164 # a8(a4) Load address of the image
165
166 movi a6, _image_start
167 movi a10, _image_end
168 movi a7, 0x1000000
169 sub a11, a10, a6
170 movi a9, complen
171 s32i a11, a9, 0
172
173 movi a0, 0
174
175 # a6 destination
176 # a7 maximum size of destination
177 # a8 source
178 # a9 ptr to length
179
180 .extern gunzip
181 movi a4, gunzip
182 beqz a4, 1f
183
184 callx4 a4
185
186 j 2f
187
188
189 # a6 destination start
190 # a7 maximum size of destination
191 # a8 source start
192 # a9 ptr to length
193 # a10 destination end
194
1951:
196 l32i a9, a8, 0
197 l32i a11, a8, 4
198 s32i a9, a6, 0
199 s32i a11, a6, 4
200 l32i a9, a8, 8
201 l32i a11, a8, 12
202 s32i a9, a6, 8
203 s32i a11, a6, 12
204 addi a6, a6, 16
205 addi a8, a8, 16
206 blt a6, a10, 1b
207
208
209 /* jump to the kernel */
2102:
211#if XCHAL_DCACHE_IS_WRITEBACK
212 dcache_writeback_all a5, a6
213#endif
214 icache_invalidate_all a5, a6
215
216 movi a5, __start
217 movi a3, boot_initrd_start
218 movi a4, boot_initrd_end
219 sub a3, a3, a5
220 sub a4, a4, a5
221 add a3, a0, a3
222 add a4, a0, a4
223
224 # a2 Boot parameter list
225 # a3 initrd_start (virtual load address)
226 # a4 initrd_end (virtual load address)
227
228 movi a0, _image_start
229 jx a0
230
231 .align 16
232 .data
233 .globl avail_ram
234avail_ram:
235 .long _heap
236 .globl end_avail
237end_avail:
238 .long _heap + HEAP_SIZE
239
240 .comm _stack, STACK_SIZE
241 .comm _heap, HEAP_SIZE
242
243 .globl end_avail
244 .comm complen, 4
245
246 .end literal_prefix
diff --git a/arch/xtensa/boot/include/zlib.h b/arch/xtensa/boot/include/zlib.h
new file mode 100644
index 000000000000..ea29b6237852
--- /dev/null
+++ b/arch/xtensa/boot/include/zlib.h
@@ -0,0 +1,433 @@
1/*
2 * BK Id: SCCS/s.zlib.h 1.8 05/18/01 15:17:23 cort
3 */
4/*
5 * This file is derived from zlib.h and zconf.h from the zlib-0.95
6 * distribution by Jean-loup Gailly and Mark Adler, with some additions
7 * by Paul Mackerras to aid in implementing Deflate compression and
8 * decompression for PPP packets.
9 */
10
11/*
12 * ==FILEVERSION 960122==
13 *
14 * This marker is used by the Linux installation script to determine
15 * whether an up-to-date version of this file is already installed.
16 */
17
18/* zlib.h -- interface of the 'zlib' general purpose compression library
19 version 0.95, Aug 16th, 1995.
20
21 Copyright (C) 1995 Jean-loup Gailly and Mark Adler
22
23 This software is provided 'as-is', without any express or implied
24 warranty. In no event will the authors be held liable for any damages
25 arising from the use of this software.
26
27 Permission is granted to anyone to use this software for any purpose,
28 including commercial applications, and to alter it and redistribute it
29 freely, subject to the following restrictions:
30
31 1. The origin of this software must not be misrepresented; you must not
32 claim that you wrote the original software. If you use this software
33 in a product, an acknowledgment in the product documentation would be
34 appreciated but is not required.
35 2. Altered source versions must be plainly marked as such, and must not be
36 misrepresented as being the original software.
37 3. This notice may not be removed or altered from any source distribution.
38
39 Jean-loup Gailly Mark Adler
40 gzip@prep.ai.mit.edu madler@alumni.caltech.edu
41 */
42
43#ifndef _ZLIB_H
44#define _ZLIB_H
45
46/* #include "zconf.h" */ /* included directly here */
47
48/* zconf.h -- configuration of the zlib compression library
49 * Copyright (C) 1995 Jean-loup Gailly.
50 * For conditions of distribution and use, see copyright notice in zlib.h
51 */
52
53/* From: zconf.h,v 1.12 1995/05/03 17:27:12 jloup Exp */
54
55/*
56 The library does not install any signal handler. It is recommended to
57 add at least a handler for SIGSEGV when decompressing; the library checks
58 the consistency of the input data whenever possible but may go nuts
59 for some forms of corrupted input.
60 */
61
62/*
63 * Compile with -DMAXSEG_64K if the alloc function cannot allocate more
64 * than 64k bytes at a time (needed on systems with 16-bit int).
65 * Compile with -DUNALIGNED_OK if it is OK to access shorts or ints
66 * at addresses which are not a multiple of their size.
67 * Under DOS, -DFAR=far or -DFAR=__far may be needed.
68 */
69
70#ifndef STDC
71# if defined(MSDOS) || defined(__STDC__) || defined(__cplusplus)
72# define STDC
73# endif
74#endif
75
76#ifdef __MWERKS__ /* Metrowerks CodeWarrior declares fileno() in unix.h */
77# include <unix.h>
78#endif
79
80/* Maximum value for memLevel in deflateInit2 */
81#ifndef MAX_MEM_LEVEL
82# ifdef MAXSEG_64K
83# define MAX_MEM_LEVEL 8
84# else
85# define MAX_MEM_LEVEL 9
86# endif
87#endif
88
89#ifndef FAR
90# define FAR
91#endif
92
93/* Maximum value for windowBits in deflateInit2 and inflateInit2 */
94#ifndef MAX_WBITS
95# define MAX_WBITS 15 /* 32K LZ77 window */
96#endif
97
98/* The memory requirements for deflate are (in bytes):
99 1 << (windowBits+2) + 1 << (memLevel+9)
100 that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values)
101 plus a few kilobytes for small objects. For example, if you want to reduce
102 the default memory requirements from 256K to 128K, compile with
103 make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7"
104 Of course this will generally degrade compression (there's no free lunch).
105
106 The memory requirements for inflate are (in bytes) 1 << windowBits
107 that is, 32K for windowBits=15 (default value) plus a few kilobytes
108 for small objects.
109*/
110
111 /* Type declarations */
112
113#ifndef OF /* function prototypes */
114# ifdef STDC
115# define OF(args) args
116# else
117# define OF(args) ()
118# endif
119#endif
120
121typedef unsigned char Byte; /* 8 bits */
122typedef unsigned int uInt; /* 16 bits or more */
123typedef unsigned long uLong; /* 32 bits or more */
124
125typedef Byte FAR Bytef;
126typedef char FAR charf;
127typedef int FAR intf;
128typedef uInt FAR uIntf;
129typedef uLong FAR uLongf;
130
131#ifdef STDC
132 typedef void FAR *voidpf;
133 typedef void *voidp;
134#else
135 typedef Byte FAR *voidpf;
136 typedef Byte *voidp;
137#endif
138
139/* end of original zconf.h */
140
141#define ZLIB_VERSION "0.95P"
142
143/*
144 The 'zlib' compression library provides in-memory compression and
145 decompression functions, including integrity checks of the uncompressed
146 data. This version of the library supports only one compression method
147 (deflation) but other algorithms may be added later and will have the same
148 stream interface.
149
150 For compression the application must provide the output buffer and
151 may optionally provide the input buffer for optimization. For decompression,
152 the application must provide the input buffer and may optionally provide
153 the output buffer for optimization.
154
155 Compression can be done in a single step if the buffers are large
156 enough (for example if an input file is mmap'ed), or can be done by
157 repeated calls of the compression function. In the latter case, the
158 application must provide more input and/or consume the output
159 (providing more output space) before each call.
160*/
161
162typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size));
163typedef void (*free_func) OF((voidpf opaque, voidpf address, uInt nbytes));
164
165struct internal_state;
166
167typedef struct z_stream_s {
168 Bytef *next_in; /* next input byte */
169 uInt avail_in; /* number of bytes available at next_in */
170 uLong total_in; /* total nb of input bytes read so far */
171
172 Bytef *next_out; /* next output byte should be put there */
173 uInt avail_out; /* remaining free space at next_out */
174 uLong total_out; /* total nb of bytes output so far */
175
176 char *msg; /* last error message, NULL if no error */
177 struct internal_state FAR *state; /* not visible by applications */
178
179 alloc_func zalloc; /* used to allocate the internal state */
180 free_func zfree; /* used to free the internal state */
181 voidp opaque; /* private data object passed to zalloc and zfree */
182
183 Byte data_type; /* best guess about the data type: ascii or binary */
184
185} z_stream;
186
187/*
188 The application must update next_in and avail_in when avail_in has
189 dropped to zero. It must update next_out and avail_out when avail_out
190 has dropped to zero. The application must initialize zalloc, zfree and
191 opaque before calling the init function. All other fields are set by the
192 compression library and must not be updated by the application.
193
194 The opaque value provided by the application will be passed as the first
195 parameter for calls of zalloc and zfree. This can be useful for custom
196 memory management. The compression library attaches no meaning to the
197 opaque value.
198
199 zalloc must return Z_NULL if there is not enough memory for the object.
200 On 16-bit systems, the functions zalloc and zfree must be able to allocate
201 exactly 65536 bytes, but will not be required to allocate more than this
202 if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS,
203 pointers returned by zalloc for objects of exactly 65536 bytes *must*
204 have their offset normalized to zero. The default allocation function
205 provided by this library ensures this (see zutil.c). To reduce memory
206 requirements and avoid any allocation of 64K objects, at the expense of
207 compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h).
208
209 The fields total_in and total_out can be used for statistics or
210 progress reports. After compression, total_in holds the total size of
211 the uncompressed data and may be saved for use in the decompressor
212 (particularly if the decompressor wants to decompress everything in
213 a single step).
214*/
215
216 /* constants */
217
218#define Z_NO_FLUSH 0
219#define Z_PARTIAL_FLUSH 1
220#define Z_FULL_FLUSH 2
221#define Z_SYNC_FLUSH 3 /* experimental: partial_flush + byte align */
222#define Z_FINISH 4
223#define Z_PACKET_FLUSH 5
224/* See deflate() below for the usage of these constants */
225
226#define Z_OK 0
227#define Z_STREAM_END 1
228#define Z_ERRNO (-1)
229#define Z_STREAM_ERROR (-2)
230#define Z_DATA_ERROR (-3)
231#define Z_MEM_ERROR (-4)
232#define Z_BUF_ERROR (-5)
233/* error codes for the compression/decompression functions */
234
235#define Z_BEST_SPEED 1
236#define Z_BEST_COMPRESSION 9
237#define Z_DEFAULT_COMPRESSION (-1)
238/* compression levels */
239
240#define Z_FILTERED 1
241#define Z_HUFFMAN_ONLY 2
242#define Z_DEFAULT_STRATEGY 0
243
244#define Z_BINARY 0
245#define Z_ASCII 1
246#define Z_UNKNOWN 2
247/* Used to set the data_type field */
248
249#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
250
251extern char *zlib_version;
252/* The application can compare zlib_version and ZLIB_VERSION for consistency.
253 If the first character differs, the library code actually used is
254 not compatible with the zlib.h header file used by the application.
255 */
256
257 /* basic functions */
258
259extern int inflateInit OF((z_stream *strm));
260/*
261 Initializes the internal stream state for decompression. The fields
262 zalloc and zfree must be initialized before by the caller. If zalloc and
263 zfree are set to Z_NULL, inflateInit updates them to use default allocation
264 functions.
265
266 inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
267 enough memory. msg is set to null if there is no error message.
268 inflateInit does not perform any decompression: this will be done by
269 inflate().
270*/
271
272
273extern int inflate OF((z_stream *strm, int flush));
274/*
275 Performs one or both of the following actions:
276
277 - Decompress more input starting at next_in and update next_in and avail_in
278 accordingly. If not all input can be processed (because there is not
279 enough room in the output buffer), next_in is updated and processing
280 will resume at this point for the next call of inflate().
281
282 - Provide more output starting at next_out and update next_out and avail_out
283 accordingly. inflate() always provides as much output as possible
284 (until there is no more input data or no more space in the output buffer).
285
286 Before the call of inflate(), the application should ensure that at least
287 one of the actions is possible, by providing more input and/or consuming
288 more output, and updating the next_* and avail_* values accordingly.
289 The application can consume the uncompressed output when it wants, for
290 example when the output buffer is full (avail_out == 0), or after each
291 call of inflate().
292
293 If the parameter flush is set to Z_PARTIAL_FLUSH or Z_PACKET_FLUSH,
294 inflate flushes as much output as possible to the output buffer. The
295 flushing behavior of inflate is not specified for values of the flush
296 parameter other than Z_PARTIAL_FLUSH, Z_PACKET_FLUSH or Z_FINISH, but the
297 current implementation actually flushes as much output as possible
298 anyway. For Z_PACKET_FLUSH, inflate checks that once all the input data
299 has been consumed, it is expecting to see the length field of a stored
300 block; if not, it returns Z_DATA_ERROR.
301
302 inflate() should normally be called until it returns Z_STREAM_END or an
303 error. However if all decompression is to be performed in a single step
304 (a single call of inflate), the parameter flush should be set to
305 Z_FINISH. In this case all pending input is processed and all pending
306 output is flushed; avail_out must be large enough to hold all the
307 uncompressed data. (The size of the uncompressed data may have been saved
308 by the compressor for this purpose.) The next operation on this stream must
309 be inflateEnd to deallocate the decompression state. The use of Z_FINISH
310 is never required, but can be used to inform inflate that a faster routine
311 may be used for the single inflate() call.
312
313 inflate() returns Z_OK if some progress has been made (more input
314 processed or more output produced), Z_STREAM_END if the end of the
315 compressed data has been reached and all uncompressed output has been
316 produced, Z_DATA_ERROR if the input data was corrupted, Z_STREAM_ERROR if
317 the stream structure was inconsistent (for example if next_in or next_out
318 was NULL), Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if no
319 progress is possible or if there was not enough room in the output buffer
320 when Z_FINISH is used. In the Z_DATA_ERROR case, the application may then
321 call inflateSync to look for a good compression block. */
322
323
324extern int inflateEnd OF((z_stream *strm));
325/*
326 All dynamically allocated data structures for this stream are freed.
327 This function discards any unprocessed input and does not flush any
328 pending output.
329
330 inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state
331 was inconsistent. In the error case, msg may be set but then points to a
332 static string (which must not be deallocated).
333*/
334
335 /* advanced functions */
336
337extern int inflateInit2 OF((z_stream *strm,
338 int windowBits));
339/*
340 This is another version of inflateInit with more compression options. The
341 fields next_out, zalloc and zfree must be initialized before by the caller.
342
343 The windowBits parameter is the base two logarithm of the maximum window
344 size (the size of the history buffer). It should be in the range 8..15 for
345 this version of the library (the value 16 will be allowed soon). The
346 default value is 15 if inflateInit is used instead. If a compressed stream
347 with a larger window size is given as input, inflate() will return with
348 the error code Z_DATA_ERROR instead of trying to allocate a larger window.
349
350 If next_out is not null, the library will use this buffer for the history
351 buffer; the buffer must either be large enough to hold the entire output
352 data, or have at least 1<<windowBits bytes. If next_out is null, the
353 library will allocate its own buffer (and leave next_out null). next_in
354 need not be provided here but must be provided by the application for the
355 next call of inflate().
356
357 If the history buffer is provided by the application, next_out must
358 never be changed by the application since the decompressor maintains
359 history information inside this buffer from call to call; the application
360 can only reset next_out to the beginning of the history buffer when
361 avail_out is zero and all output has been consumed.
362
363 inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was
364 not enough memory, Z_STREAM_ERROR if a parameter is invalid (such as
365 windowBits < 8). msg is set to null if there is no error message.
366 inflateInit2 does not perform any decompression: this will be done by
367 inflate().
368*/
369
370extern int inflateSync OF((z_stream *strm));
371/*
372 Skips invalid compressed data until the special marker (see deflate()
373 above) can be found, or until all available input is skipped. No output
374 is provided.
375
376 inflateSync returns Z_OK if the special marker has been found, Z_BUF_ERROR
377 if no more input was provided, Z_DATA_ERROR if no marker has been found,
378 or Z_STREAM_ERROR if the stream structure was inconsistent. In the success
379 case, the application may save the current current value of total_in which
380 indicates where valid compressed data was found. In the error case, the
381 application may repeatedly call inflateSync, providing more input each time,
382 until success or end of the input data.
383*/
384
385extern int inflateReset OF((z_stream *strm));
386/*
387 This function is equivalent to inflateEnd followed by inflateInit,
388 but does not free and reallocate all the internal decompression state.
389 The stream will keep attributes that may have been set by inflateInit2.
390
391 inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
392 stream state was inconsistent (such as zalloc or state being NULL).
393*/
394
395extern int inflateIncomp OF((z_stream *strm));
396/*
397 This function adds the data at next_in (avail_in bytes) to the output
398 history without performing any output. There must be no pending output,
399 and the decompressor must be expecting to see the start of a block.
400 Calling this function is equivalent to decompressing a stored block
401 containing the data at next_in (except that the data is not output).
402*/
403
404 /* checksum functions */
405
406/*
407 This function is not related to compression but is exported
408 anyway because it might be useful in applications using the
409 compression library.
410*/
411
412extern uLong adler32 OF((uLong adler, Bytef *buf, uInt len));
413
414/*
415 Update a running Adler-32 checksum with the bytes buf[0..len-1] and
416 return the updated checksum. If buf is NULL, this function returns
417 the required initial value for the checksum.
418 An Adler-32 checksum is almost as reliable as a CRC32 but can be computed
419 much faster. Usage example:
420
421 uLong adler = adler32(0L, Z_NULL, 0);
422
423 while (read_buffer(buffer, length) != EOF) {
424 adler = adler32(adler, buffer, length);
425 }
426 if (adler != original_adler) error();
427*/
428
429#ifndef _Z_UTIL_H
430 struct internal_state {int dummy;}; /* hack for buggy compilers */
431#endif
432
433#endif /* _ZLIB_H */
diff --git a/arch/xtensa/boot/lib/Makefile b/arch/xtensa/boot/lib/Makefile
new file mode 100644
index 000000000000..c0a74dc3a0df
--- /dev/null
+++ b/arch/xtensa/boot/lib/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for some libs needed by zImage.
3#
4
5
6lib-y := zlib.o zmem.o
diff --git a/arch/xtensa/boot/lib/memcpy.S b/arch/xtensa/boot/lib/memcpy.S
new file mode 100644
index 000000000000..a029f5df2d5c
--- /dev/null
+++ b/arch/xtensa/boot/lib/memcpy.S
@@ -0,0 +1,36 @@
1/*
2 * arch/xtensa/lib/memcpy.S
3 *
4 * ANSI C standard library function memcpy
5 *
6 * This file is subject to the terms and conditions of the GNU General
7 * Public License. See the file "COPYING" in the main directory of
8 * this archive for more details.
9 *
10 * Copyright (C) 2002 Tensilica Inc.
11 */
12
13#define _ASMLANGUAGE
14#include <xtensa/config/core.h>
15
16.text
17.align 4
18.global bcopy
19.type bcopy,@function
20bcopy:
21 movi a14, xthal_bcopy // a14 safe to use regardless of whether caller
22 // used call4 or call8 (can't have used call12)
23 jx a14 // let the Core HAL do the work
24
25.text
26.align 4
27.global memcpy
28.type memcpy,@function
29memcpy:
30.global memmove
31.type memmove,@function
32memmove:
33 movi a14, xthal_memcpy // a14 safe to use regardless of whether caller
34 // used call4 or call8 (can't have used call12)
35 jx a14 // let the Core HAL do the work
36
diff --git a/arch/xtensa/boot/lib/zlib.c b/arch/xtensa/boot/lib/zlib.c
new file mode 100644
index 000000000000..e3859f631077
--- /dev/null
+++ b/arch/xtensa/boot/lib/zlib.c
@@ -0,0 +1,2150 @@
1/*
2 * BK Id: SCCS/s.zlib.c 1.8 05/18/01 15:17:24 cort
3 */
4/*
5 * This file is derived from various .h and .c files from the zlib-0.95
6 * distribution by Jean-loup Gailly and Mark Adler, with some additions
7 * by Paul Mackerras to aid in implementing Deflate compression and
8 * decompression for PPP packets. See zlib.h for conditions of
9 * distribution and use.
10 *
11 * Changes that have been made include:
12 * - changed functions not used outside this file to "local"
13 * - added minCompression parameter to deflateInit2
14 * - added Z_PACKET_FLUSH (see zlib.h for details)
15 * - added inflateIncomp
16 *
17 */
18
19/*+++++*/
20/* zutil.h -- internal interface and configuration of the compression library
21 * Copyright (C) 1995 Jean-loup Gailly.
22 * For conditions of distribution and use, see copyright notice in zlib.h
23 */
24
25/* WARNING: this file should *not* be used by applications. It is
26 part of the implementation of the compression library and is
27 subject to change. Applications should only use zlib.h.
28 */
29
30/* From: zutil.h,v 1.9 1995/05/03 17:27:12 jloup Exp */
31
32#define _Z_UTIL_H
33
34#include "zlib.h"
35
36#ifndef local
37# define local static
38#endif
39/* compile with -Dlocal if your debugger can't find static symbols */
40
41#define FAR
42
43typedef unsigned char uch;
44typedef uch FAR uchf;
45typedef unsigned short ush;
46typedef ush FAR ushf;
47typedef unsigned long ulg;
48
49extern char *z_errmsg[]; /* indexed by 1-zlib_error */
50
51#define ERR_RETURN(strm,err) return (strm->msg=z_errmsg[1-err], err)
52/* To be used only when the state is known to be valid */
53
54#ifndef NULL
55#define NULL ((void *) 0)
56#endif
57
58 /* common constants */
59
60#define DEFLATED 8
61
62#ifndef DEF_WBITS
63# define DEF_WBITS MAX_WBITS
64#endif
65/* default windowBits for decompression. MAX_WBITS is for compression only */
66
67#if MAX_MEM_LEVEL >= 8
68# define DEF_MEM_LEVEL 8
69#else
70# define DEF_MEM_LEVEL MAX_MEM_LEVEL
71#endif
72/* default memLevel */
73
74#define STORED_BLOCK 0
75#define STATIC_TREES 1
76#define DYN_TREES 2
77/* The three kinds of block type */
78
79#define MIN_MATCH 3
80#define MAX_MATCH 258
81/* The minimum and maximum match lengths */
82
83 /* functions */
84
85#include <linux/string.h>
86#define zmemcpy memcpy
87#define zmemzero(dest, len) memset(dest, 0, len)
88
89/* Diagnostic functions */
90#ifdef DEBUG_ZLIB
91# include <stdio.h>
92# ifndef verbose
93# define verbose 0
94# endif
95# define Assert(cond,msg) {if(!(cond)) z_error(msg);}
96# define Trace(x) fprintf x
97# define Tracev(x) {if (verbose) fprintf x ;}
98# define Tracevv(x) {if (verbose>1) fprintf x ;}
99# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
100# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
101#else
102# define Assert(cond,msg)
103# define Trace(x)
104# define Tracev(x)
105# define Tracevv(x)
106# define Tracec(c,x)
107# define Tracecv(c,x)
108#endif
109
110
111typedef uLong (*check_func) OF((uLong check, Bytef *buf, uInt len));
112
113/* voidpf zcalloc OF((voidpf opaque, unsigned items, unsigned size)); */
114/* void zcfree OF((voidpf opaque, voidpf ptr)); */
115
116#define ZALLOC(strm, items, size) \
117 (*((strm)->zalloc))((strm)->opaque, (items), (size))
118#define ZFREE(strm, addr, size) \
119 (*((strm)->zfree))((strm)->opaque, (voidpf)(addr), (size))
120#define TRY_FREE(s, p, n) {if (p) ZFREE(s, p, n);}
121
122/* deflate.h -- internal compression state
123 * Copyright (C) 1995 Jean-loup Gailly
124 * For conditions of distribution and use, see copyright notice in zlib.h
125 */
126
127/* WARNING: this file should *not* be used by applications. It is
128 part of the implementation of the compression library and is
129 subject to change. Applications should only use zlib.h.
130 */
131
132/*+++++*/
133/* infblock.h -- header to use infblock.c
134 * Copyright (C) 1995 Mark Adler
135 * For conditions of distribution and use, see copyright notice in zlib.h
136 */
137
138/* WARNING: this file should *not* be used by applications. It is
139 part of the implementation of the compression library and is
140 subject to change. Applications should only use zlib.h.
141 */
142
143struct inflate_blocks_state;
144typedef struct inflate_blocks_state FAR inflate_blocks_statef;
145
146local inflate_blocks_statef * inflate_blocks_new OF((
147 z_stream *z,
148 check_func c, /* check function */
149 uInt w)); /* window size */
150
151local int inflate_blocks OF((
152 inflate_blocks_statef *,
153 z_stream *,
154 int)); /* initial return code */
155
156local void inflate_blocks_reset OF((
157 inflate_blocks_statef *,
158 z_stream *,
159 uLongf *)); /* check value on output */
160
161local int inflate_blocks_free OF((
162 inflate_blocks_statef *,
163 z_stream *,
164 uLongf *)); /* check value on output */
165
166local int inflate_addhistory OF((
167 inflate_blocks_statef *,
168 z_stream *));
169
170local int inflate_packet_flush OF((
171 inflate_blocks_statef *));
172
173/*+++++*/
174/* inftrees.h -- header to use inftrees.c
175 * Copyright (C) 1995 Mark Adler
176 * For conditions of distribution and use, see copyright notice in zlib.h
177 */
178
179/* WARNING: this file should *not* be used by applications. It is
180 part of the implementation of the compression library and is
181 subject to change. Applications should only use zlib.h.
182 */
183
184/* Huffman code lookup table entry--this entry is four bytes for machines
185 that have 16-bit pointers (e.g. PC's in the small or medium model). */
186
187typedef struct inflate_huft_s FAR inflate_huft;
188
189struct inflate_huft_s {
190 union {
191 struct {
192 Byte Exop; /* number of extra bits or operation */
193 Byte Bits; /* number of bits in this code or subcode */
194 } what;
195 uInt Nalloc; /* number of these allocated here */
196 Bytef *pad; /* pad structure to a power of 2 (4 bytes for */
197 } word; /* 16-bit, 8 bytes for 32-bit machines) */
198 union {
199 uInt Base; /* literal, length base, or distance base */
200 inflate_huft *Next; /* pointer to next level of table */
201 } more;
202};
203
204#ifdef DEBUG_ZLIB
205 local uInt inflate_hufts;
206#endif
207
208local int inflate_trees_bits OF((
209 uIntf *, /* 19 code lengths */
210 uIntf *, /* bits tree desired/actual depth */
211 inflate_huft * FAR *, /* bits tree result */
212 z_stream *)); /* for zalloc, zfree functions */
213
214local int inflate_trees_dynamic OF((
215 uInt, /* number of literal/length codes */
216 uInt, /* number of distance codes */
217 uIntf *, /* that many (total) code lengths */
218 uIntf *, /* literal desired/actual bit depth */
219 uIntf *, /* distance desired/actual bit depth */
220 inflate_huft * FAR *, /* literal/length tree result */
221 inflate_huft * FAR *, /* distance tree result */
222 z_stream *)); /* for zalloc, zfree functions */
223
224local int inflate_trees_fixed OF((
225 uIntf *, /* literal desired/actual bit depth */
226 uIntf *, /* distance desired/actual bit depth */
227 inflate_huft * FAR *, /* literal/length tree result */
228 inflate_huft * FAR *)); /* distance tree result */
229
230local int inflate_trees_free OF((
231 inflate_huft *, /* tables to free */
232 z_stream *)); /* for zfree function */
233
234
235/*+++++*/
236/* infcodes.h -- header to use infcodes.c
237 * Copyright (C) 1995 Mark Adler
238 * For conditions of distribution and use, see copyright notice in zlib.h
239 */
240
241/* WARNING: this file should *not* be used by applications. It is
242 part of the implementation of the compression library and is
243 subject to change. Applications should only use zlib.h.
244 */
245
246struct inflate_codes_state;
247typedef struct inflate_codes_state FAR inflate_codes_statef;
248
249local inflate_codes_statef *inflate_codes_new OF((
250 uInt, uInt,
251 inflate_huft *, inflate_huft *,
252 z_stream *));
253
254local int inflate_codes OF((
255 inflate_blocks_statef *,
256 z_stream *,
257 int));
258
259local void inflate_codes_free OF((
260 inflate_codes_statef *,
261 z_stream *));
262
263
264/*+++++*/
265/* inflate.c -- zlib interface to inflate modules
266 * Copyright (C) 1995 Mark Adler
267 * For conditions of distribution and use, see copyright notice in zlib.h
268 */
269
270/* inflate private state */
271struct internal_state {
272
273 /* mode */
274 enum {
275 METHOD, /* waiting for method byte */
276 FLAG, /* waiting for flag byte */
277 BLOCKS, /* decompressing blocks */
278 CHECK4, /* four check bytes to go */
279 CHECK3, /* three check bytes to go */
280 CHECK2, /* two check bytes to go */
281 CHECK1, /* one check byte to go */
282 DONE, /* finished check, done */
283 BAD} /* got an error--stay here */
284 mode; /* current inflate mode */
285
286 /* mode dependent information */
287 union {
288 uInt method; /* if FLAGS, method byte */
289 struct {
290 uLong was; /* computed check value */
291 uLong need; /* stream check value */
292 } check; /* if CHECK, check values to compare */
293 uInt marker; /* if BAD, inflateSync's marker bytes count */
294 } sub; /* submode */
295
296 /* mode independent information */
297 int nowrap; /* flag for no wrapper */
298 uInt wbits; /* log2(window size) (8..15, defaults to 15) */
299 inflate_blocks_statef
300 *blocks; /* current inflate_blocks state */
301
302};
303
304
305int inflateReset(z)
306z_stream *z;
307{
308 uLong c;
309
310 if (z == Z_NULL || z->state == Z_NULL)
311 return Z_STREAM_ERROR;
312 z->total_in = z->total_out = 0;
313 z->msg = Z_NULL;
314 z->state->mode = z->state->nowrap ? BLOCKS : METHOD;
315 inflate_blocks_reset(z->state->blocks, z, &c);
316 Trace((stderr, "inflate: reset\n"));
317 return Z_OK;
318}
319
320
321int inflateEnd(z)
322z_stream *z;
323{
324 uLong c;
325
326 if (z == Z_NULL || z->state == Z_NULL || z->zfree == Z_NULL)
327 return Z_STREAM_ERROR;
328 if (z->state->blocks != Z_NULL)
329 inflate_blocks_free(z->state->blocks, z, &c);
330 ZFREE(z, z->state, sizeof(struct internal_state));
331 z->state = Z_NULL;
332 Trace((stderr, "inflate: end\n"));
333 return Z_OK;
334}
335
336
337int inflateInit2(z, w)
338z_stream *z;
339int w;
340{
341 /* initialize state */
342 if (z == Z_NULL)
343 return Z_STREAM_ERROR;
344/* if (z->zalloc == Z_NULL) z->zalloc = zcalloc; */
345/* if (z->zfree == Z_NULL) z->zfree = zcfree; */
346 if ((z->state = (struct internal_state FAR *)
347 ZALLOC(z,1,sizeof(struct internal_state))) == Z_NULL)
348 return Z_MEM_ERROR;
349 z->state->blocks = Z_NULL;
350
351 /* handle undocumented nowrap option (no zlib header or check) */
352 z->state->nowrap = 0;
353 if (w < 0)
354 {
355 w = - w;
356 z->state->nowrap = 1;
357 }
358
359 /* set window size */
360 if (w < 8 || w > 15)
361 {
362 inflateEnd(z);
363 return Z_STREAM_ERROR;
364 }
365 z->state->wbits = (uInt)w;
366
367 /* create inflate_blocks state */
368 if ((z->state->blocks =
369 inflate_blocks_new(z, z->state->nowrap ? Z_NULL : adler32, 1 << w))
370 == Z_NULL)
371 {
372 inflateEnd(z);
373 return Z_MEM_ERROR;
374 }
375 Trace((stderr, "inflate: allocated\n"));
376
377 /* reset state */
378 inflateReset(z);
379 return Z_OK;
380}
381
382
383int inflateInit(z)
384z_stream *z;
385{
386 return inflateInit2(z, DEF_WBITS);
387}
388
389
390#define NEEDBYTE {if(z->avail_in==0)goto empty;r=Z_OK;}
391#define NEXTBYTE (z->avail_in--,z->total_in++,*z->next_in++)
392
393int inflate(z, f)
394z_stream *z;
395int f;
396{
397 int r;
398 uInt b;
399
400 if (z == Z_NULL || z->next_in == Z_NULL)
401 return Z_STREAM_ERROR;
402 r = Z_BUF_ERROR;
403 while (1) switch (z->state->mode)
404 {
405 case METHOD:
406 NEEDBYTE
407 if (((z->state->sub.method = NEXTBYTE) & 0xf) != DEFLATED)
408 {
409 z->state->mode = BAD;
410 z->msg = "unknown compression method";
411 z->state->sub.marker = 5; /* can't try inflateSync */
412 break;
413 }
414 if ((z->state->sub.method >> 4) + 8 > z->state->wbits)
415 {
416 z->state->mode = BAD;
417 z->msg = "invalid window size";
418 z->state->sub.marker = 5; /* can't try inflateSync */
419 break;
420 }
421 z->state->mode = FLAG;
422 case FLAG:
423 NEEDBYTE
424 if ((b = NEXTBYTE) & 0x20)
425 {
426 z->state->mode = BAD;
427 z->msg = "invalid reserved bit";
428 z->state->sub.marker = 5; /* can't try inflateSync */
429 break;
430 }
431 if (((z->state->sub.method << 8) + b) % 31)
432 {
433 z->state->mode = BAD;
434 z->msg = "incorrect header check";
435 z->state->sub.marker = 5; /* can't try inflateSync */
436 break;
437 }
438 Trace((stderr, "inflate: zlib header ok\n"));
439 z->state->mode = BLOCKS;
440 case BLOCKS:
441 r = inflate_blocks(z->state->blocks, z, r);
442 if (f == Z_PACKET_FLUSH && z->avail_in == 0 && z->avail_out != 0)
443 r = inflate_packet_flush(z->state->blocks);
444 if (r == Z_DATA_ERROR)
445 {
446 z->state->mode = BAD;
447 z->state->sub.marker = 0; /* can try inflateSync */
448 break;
449 }
450 if (r != Z_STREAM_END)
451 return r;
452 r = Z_OK;
453 inflate_blocks_reset(z->state->blocks, z, &z->state->sub.check.was);
454 if (z->state->nowrap)
455 {
456 z->state->mode = DONE;
457 break;
458 }
459 z->state->mode = CHECK4;
460 case CHECK4:
461 NEEDBYTE
462 z->state->sub.check.need = (uLong)NEXTBYTE << 24;
463 z->state->mode = CHECK3;
464 case CHECK3:
465 NEEDBYTE
466 z->state->sub.check.need += (uLong)NEXTBYTE << 16;
467 z->state->mode = CHECK2;
468 case CHECK2:
469 NEEDBYTE
470 z->state->sub.check.need += (uLong)NEXTBYTE << 8;
471 z->state->mode = CHECK1;
472 case CHECK1:
473 NEEDBYTE
474 z->state->sub.check.need += (uLong)NEXTBYTE;
475
476 if (z->state->sub.check.was != z->state->sub.check.need)
477 {
478 z->state->mode = BAD;
479 z->msg = "incorrect data check";
480 z->state->sub.marker = 5; /* can't try inflateSync */
481 break;
482 }
483 Trace((stderr, "inflate: zlib check ok\n"));
484 z->state->mode = DONE;
485 case DONE:
486 return Z_STREAM_END;
487 case BAD:
488 return Z_DATA_ERROR;
489 default:
490 return Z_STREAM_ERROR;
491 }
492
493 empty:
494 if (f != Z_PACKET_FLUSH)
495 return r;
496 z->state->mode = BAD;
497 z->state->sub.marker = 0; /* can try inflateSync */
498 return Z_DATA_ERROR;
499}
500
501/*
502 * This subroutine adds the data at next_in/avail_in to the output history
503 * without performing any output. The output buffer must be "caught up";
504 * i.e. no pending output (hence s->read equals s->write), and the state must
505 * be BLOCKS (i.e. we should be willing to see the start of a series of
506 * BLOCKS). On exit, the output will also be caught up, and the checksum
507 * will have been updated if need be.
508 */
509
510int inflateIncomp(z)
511z_stream *z;
512{
513 if (z->state->mode != BLOCKS)
514 return Z_DATA_ERROR;
515 return inflate_addhistory(z->state->blocks, z);
516}
517
518
519int inflateSync(z)
520z_stream *z;
521{
522 uInt n; /* number of bytes to look at */
523 Bytef *p; /* pointer to bytes */
524 uInt m; /* number of marker bytes found in a row */
525 uLong r, w; /* temporaries to save total_in and total_out */
526
527 /* set up */
528 if (z == Z_NULL || z->state == Z_NULL)
529 return Z_STREAM_ERROR;
530 if (z->state->mode != BAD)
531 {
532 z->state->mode = BAD;
533 z->state->sub.marker = 0;
534 }
535 if ((n = z->avail_in) == 0)
536 return Z_BUF_ERROR;
537 p = z->next_in;
538 m = z->state->sub.marker;
539
540 /* search */
541 while (n && m < 4)
542 {
543 if (*p == (Byte)(m < 2 ? 0 : 0xff))
544 m++;
545 else if (*p)
546 m = 0;
547 else
548 m = 4 - m;
549 p++, n--;
550 }
551
552 /* restore */
553 z->total_in += p - z->next_in;
554 z->next_in = p;
555 z->avail_in = n;
556 z->state->sub.marker = m;
557
558 /* return no joy or set up to restart on a new block */
559 if (m != 4)
560 return Z_DATA_ERROR;
561 r = z->total_in; w = z->total_out;
562 inflateReset(z);
563 z->total_in = r; z->total_out = w;
564 z->state->mode = BLOCKS;
565 return Z_OK;
566}
567
568#undef NEEDBYTE
569#undef NEXTBYTE
570
571/*+++++*/
572/* infutil.h -- types and macros common to blocks and codes
573 * Copyright (C) 1995 Mark Adler
574 * For conditions of distribution and use, see copyright notice in zlib.h
575 */
576
577/* WARNING: this file should *not* be used by applications. It is
578 part of the implementation of the compression library and is
579 subject to change. Applications should only use zlib.h.
580 */
581
582/* inflate blocks semi-private state */
583struct inflate_blocks_state {
584
585 /* mode */
586 enum {
587 TYPE, /* get type bits (3, including end bit) */
588 LENS, /* get lengths for stored */
589 STORED, /* processing stored block */
590 TABLE, /* get table lengths */
591 BTREE, /* get bit lengths tree for a dynamic block */
592 DTREE, /* get length, distance trees for a dynamic block */
593 CODES, /* processing fixed or dynamic block */
594 DRY, /* output remaining window bytes */
595 DONEB, /* finished last block, done */
596 BADB} /* got a data error--stuck here */
597 mode; /* current inflate_block mode */
598
599 /* mode dependent information */
600 union {
601 uInt left; /* if STORED, bytes left to copy */
602 struct {
603 uInt table; /* table lengths (14 bits) */
604 uInt index; /* index into blens (or border) */
605 uIntf *blens; /* bit lengths of codes */
606 uInt bb; /* bit length tree depth */
607 inflate_huft *tb; /* bit length decoding tree */
608 int nblens; /* # elements allocated at blens */
609 } trees; /* if DTREE, decoding info for trees */
610 struct {
611 inflate_huft *tl, *td; /* trees to free */
612 inflate_codes_statef
613 *codes;
614 } decode; /* if CODES, current state */
615 } sub; /* submode */
616 uInt last; /* true if this block is the last block */
617
618 /* mode independent information */
619 uInt bitk; /* bits in bit buffer */
620 uLong bitb; /* bit buffer */
621 Bytef *window; /* sliding window */
622 Bytef *end; /* one byte after sliding window */
623 Bytef *read; /* window read pointer */
624 Bytef *write; /* window write pointer */
625 check_func checkfn; /* check function */
626 uLong check; /* check on output */
627
628};
629
630
631/* defines for inflate input/output */
632/* update pointers and return */
633#define UPDBITS {s->bitb=b;s->bitk=k;}
634#define UPDIN {z->avail_in=n;z->total_in+=p-z->next_in;z->next_in=p;}
635#define UPDOUT {s->write=q;}
636#define UPDATE {UPDBITS UPDIN UPDOUT}
637#define LEAVE {UPDATE return inflate_flush(s,z,r);}
638/* get bytes and bits */
639#define LOADIN {p=z->next_in;n=z->avail_in;b=s->bitb;k=s->bitk;}
640#define NEEDBYTE {if(n)r=Z_OK;else LEAVE}
641#define NEXTBYTE (n--,*p++)
642#define NEEDBITS(j) {while(k<(j)){NEEDBYTE;b|=((uLong)NEXTBYTE)<<k;k+=8;}}
643#define DUMPBITS(j) {b>>=(j);k-=(j);}
644/* output bytes */
645#define WAVAIL (q<s->read?s->read-q-1:s->end-q)
646#define LOADOUT {q=s->write;m=WAVAIL;}
647#define WRAP {if(q==s->end&&s->read!=s->window){q=s->window;m=WAVAIL;}}
648#define FLUSH {UPDOUT r=inflate_flush(s,z,r); LOADOUT}
649#define NEEDOUT {if(m==0){WRAP if(m==0){FLUSH WRAP if(m==0) LEAVE}}r=Z_OK;}
650#define OUTBYTE(a) {*q++=(Byte)(a);m--;}
651/* load local pointers */
652#define LOAD {LOADIN LOADOUT}
653
654/*
655 * The IBM 150 firmware munges the data right after _etext[]. This
656 * protects it. -- Cort
657 */
658local uInt protect_mask[] = {0, 0, 0, 0, 0, 0, 0, 0, 0 ,0 ,0 ,0};
659/* And'ing with mask[n] masks the lower n bits */
660local uInt inflate_mask[] = {
661 0x0000,
662 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
663 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
664};
665
666/* copy as much as possible from the sliding window to the output area */
667local int inflate_flush OF((
668 inflate_blocks_statef *,
669 z_stream *,
670 int));
671
672/*+++++*/
673/* inffast.h -- header to use inffast.c
674 * Copyright (C) 1995 Mark Adler
675 * For conditions of distribution and use, see copyright notice in zlib.h
676 */
677
678/* WARNING: this file should *not* be used by applications. It is
679 part of the implementation of the compression library and is
680 subject to change. Applications should only use zlib.h.
681 */
682
683local int inflate_fast OF((
684 uInt,
685 uInt,
686 inflate_huft *,
687 inflate_huft *,
688 inflate_blocks_statef *,
689 z_stream *));
690
691
692/*+++++*/
693/* infblock.c -- interpret and process block types to last block
694 * Copyright (C) 1995 Mark Adler
695 * For conditions of distribution and use, see copyright notice in zlib.h
696 */
697
698/* Table for deflate from PKZIP's appnote.txt. */
699local uInt border[] = { /* Order of the bit length code lengths */
700 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
701
702/*
703 Notes beyond the 1.93a appnote.txt:
704
705 1. Distance pointers never point before the beginning of the output
706 stream.
707 2. Distance pointers can point back across blocks, up to 32k away.
708 3. There is an implied maximum of 7 bits for the bit length table and
709 15 bits for the actual data.
710 4. If only one code exists, then it is encoded using one bit. (Zero
711 would be more efficient, but perhaps a little confusing.) If two
712 codes exist, they are coded using one bit each (0 and 1).
713 5. There is no way of sending zero distance codes--a dummy must be
714 sent if there are none. (History: a pre 2.0 version of PKZIP would
715 store blocks with no distance codes, but this was discovered to be
716 too harsh a criterion.) Valid only for 1.93a. 2.04c does allow
717 zero distance codes, which is sent as one code of zero bits in
718 length.
719 6. There are up to 286 literal/length codes. Code 256 represents the
720 end-of-block. Note however that the static length tree defines
721 288 codes just to fill out the Huffman codes. Codes 286 and 287
722 cannot be used though, since there is no length base or extra bits
723 defined for them. Similarily, there are up to 30 distance codes.
724 However, static trees define 32 codes (all 5 bits) to fill out the
725 Huffman codes, but the last two had better not show up in the data.
726 7. Unzip can check dynamic Huffman blocks for complete code sets.
727 The exception is that a single code would not be complete (see #4).
728 8. The five bits following the block type is really the number of
729 literal codes sent minus 257.
730 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
731 (1+6+6). Therefore, to output three times the length, you output
732 three codes (1+1+1), whereas to output four times the same length,
733 you only need two codes (1+3). Hmm.
734 10. In the tree reconstruction algorithm, Code = Code + Increment
735 only if BitLength(i) is not zero. (Pretty obvious.)
736 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19)
737 12. Note: length code 284 can represent 227-258, but length code 285
738 really is 258. The last length deserves its own, short code
739 since it gets used a lot in very redundant files. The length
740 258 is special since 258 - 3 (the min match length) is 255.
741 13. The literal/length and distance code bit lengths are read as a
742 single stream of lengths. It is possible (and advantageous) for
743 a repeat code (16, 17, or 18) to go across the boundary between
744 the two sets of lengths.
745 */
746
747
748local void inflate_blocks_reset(s, z, c)
749inflate_blocks_statef *s;
750z_stream *z;
751uLongf *c;
752{
753 if (s->checkfn != Z_NULL)
754 *c = s->check;
755 if (s->mode == BTREE || s->mode == DTREE)
756 ZFREE(z, s->sub.trees.blens, s->sub.trees.nblens * sizeof(uInt));
757 if (s->mode == CODES)
758 {
759 inflate_codes_free(s->sub.decode.codes, z);
760 inflate_trees_free(s->sub.decode.td, z);
761 inflate_trees_free(s->sub.decode.tl, z);
762 }
763 s->mode = TYPE;
764 s->bitk = 0;
765 s->bitb = 0;
766 s->read = s->write = s->window;
767 if (s->checkfn != Z_NULL)
768 s->check = (*s->checkfn)(0L, Z_NULL, 0);
769 Trace((stderr, "inflate: blocks reset\n"));
770}
771
772
773local inflate_blocks_statef *inflate_blocks_new(z, c, w)
774z_stream *z;
775check_func c;
776uInt w;
777{
778 inflate_blocks_statef *s;
779
780 if ((s = (inflate_blocks_statef *)ZALLOC
781 (z,1,sizeof(struct inflate_blocks_state))) == Z_NULL)
782 return s;
783 if ((s->window = (Bytef *)ZALLOC(z, 1, w)) == Z_NULL)
784 {
785 ZFREE(z, s, sizeof(struct inflate_blocks_state));
786 return Z_NULL;
787 }
788 s->end = s->window + w;
789 s->checkfn = c;
790 s->mode = TYPE;
791 Trace((stderr, "inflate: blocks allocated\n"));
792 inflate_blocks_reset(s, z, &s->check);
793 return s;
794}
795
796
797local int inflate_blocks(s, z, r)
798inflate_blocks_statef *s;
799z_stream *z;
800int r;
801{
802 uInt t; /* temporary storage */
803 uLong b; /* bit buffer */
804 uInt k; /* bits in bit buffer */
805 Bytef *p; /* input data pointer */
806 uInt n; /* bytes available there */
807 Bytef *q; /* output window write pointer */
808 uInt m; /* bytes to end of window or read pointer */
809
810 /* copy input/output information to locals (UPDATE macro restores) */
811 LOAD
812
813 /* process input based on current state */
814 while (1) switch (s->mode)
815 {
816 case TYPE:
817 NEEDBITS(3)
818 t = (uInt)b & 7;
819 s->last = t & 1;
820 switch (t >> 1)
821 {
822 case 0: /* stored */
823 Trace((stderr, "inflate: stored block%s\n",
824 s->last ? " (last)" : ""));
825 DUMPBITS(3)
826 t = k & 7; /* go to byte boundary */
827 DUMPBITS(t)
828 s->mode = LENS; /* get length of stored block */
829 break;
830 case 1: /* fixed */
831 Trace((stderr, "inflate: fixed codes block%s\n",
832 s->last ? " (last)" : ""));
833 {
834 uInt bl, bd;
835 inflate_huft *tl, *td;
836
837 inflate_trees_fixed(&bl, &bd, &tl, &td);
838 s->sub.decode.codes = inflate_codes_new(bl, bd, tl, td, z);
839 if (s->sub.decode.codes == Z_NULL)
840 {
841 r = Z_MEM_ERROR;
842 LEAVE
843 }
844 s->sub.decode.tl = Z_NULL; /* don't try to free these */
845 s->sub.decode.td = Z_NULL;
846 }
847 DUMPBITS(3)
848 s->mode = CODES;
849 break;
850 case 2: /* dynamic */
851 Trace((stderr, "inflate: dynamic codes block%s\n",
852 s->last ? " (last)" : ""));
853 DUMPBITS(3)
854 s->mode = TABLE;
855 break;
856 case 3: /* illegal */
857 DUMPBITS(3)
858 s->mode = BADB;
859 z->msg = "invalid block type";
860 r = Z_DATA_ERROR;
861 LEAVE
862 }
863 break;
864 case LENS:
865 NEEDBITS(32)
866 if (((~b) >> 16) != (b & 0xffff))
867 {
868 s->mode = BADB;
869 z->msg = "invalid stored block lengths";
870 r = Z_DATA_ERROR;
871 LEAVE
872 }
873 s->sub.left = (uInt)b & 0xffff;
874 b = k = 0; /* dump bits */
875 Tracev((stderr, "inflate: stored length %u\n", s->sub.left));
876 s->mode = s->sub.left ? STORED : TYPE;
877 break;
878 case STORED:
879 if (n == 0)
880 LEAVE
881 NEEDOUT
882 t = s->sub.left;
883 if (t > n) t = n;
884 if (t > m) t = m;
885 zmemcpy(q, p, t);
886 p += t; n -= t;
887 q += t; m -= t;
888 if ((s->sub.left -= t) != 0)
889 break;
890 Tracev((stderr, "inflate: stored end, %lu total out\n",
891 z->total_out + (q >= s->read ? q - s->read :
892 (s->end - s->read) + (q - s->window))));
893 s->mode = s->last ? DRY : TYPE;
894 break;
895 case TABLE:
896 NEEDBITS(14)
897 s->sub.trees.table = t = (uInt)b & 0x3fff;
898#ifndef PKZIP_BUG_WORKAROUND
899 if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29)
900 {
901 s->mode = BADB;
902 z->msg = "too many length or distance symbols";
903 r = Z_DATA_ERROR;
904 LEAVE
905 }
906#endif
907 t = 258 + (t & 0x1f) + ((t >> 5) & 0x1f);
908 if (t < 19)
909 t = 19;
910 if ((s->sub.trees.blens = (uIntf*)ZALLOC(z, t, sizeof(uInt))) == Z_NULL)
911 {
912 r = Z_MEM_ERROR;
913 LEAVE
914 }
915 s->sub.trees.nblens = t;
916 DUMPBITS(14)
917 s->sub.trees.index = 0;
918 Tracev((stderr, "inflate: table sizes ok\n"));
919 s->mode = BTREE;
920 case BTREE:
921 while (s->sub.trees.index < 4 + (s->sub.trees.table >> 10))
922 {
923 NEEDBITS(3)
924 s->sub.trees.blens[border[s->sub.trees.index++]] = (uInt)b & 7;
925 DUMPBITS(3)
926 }
927 while (s->sub.trees.index < 19)
928 s->sub.trees.blens[border[s->sub.trees.index++]] = 0;
929 s->sub.trees.bb = 7;
930 t = inflate_trees_bits(s->sub.trees.blens, &s->sub.trees.bb,
931 &s->sub.trees.tb, z);
932 if (t != Z_OK)
933 {
934 r = t;
935 if (r == Z_DATA_ERROR)
936 s->mode = BADB;
937 LEAVE
938 }
939 s->sub.trees.index = 0;
940 Tracev((stderr, "inflate: bits tree ok\n"));
941 s->mode = DTREE;
942 case DTREE:
943 while (t = s->sub.trees.table,
944 s->sub.trees.index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f))
945 {
946 inflate_huft *h;
947 uInt i, j, c;
948
949 t = s->sub.trees.bb;
950 NEEDBITS(t)
951 h = s->sub.trees.tb + ((uInt)b & inflate_mask[t]);
952 t = h->word.what.Bits;
953 c = h->more.Base;
954 if (c < 16)
955 {
956 DUMPBITS(t)
957 s->sub.trees.blens[s->sub.trees.index++] = c;
958 }
959 else /* c == 16..18 */
960 {
961 i = c == 18 ? 7 : c - 14;
962 j = c == 18 ? 11 : 3;
963 NEEDBITS(t + i)
964 DUMPBITS(t)
965 j += (uInt)b & inflate_mask[i];
966 DUMPBITS(i)
967 i = s->sub.trees.index;
968 t = s->sub.trees.table;
969 if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) ||
970 (c == 16 && i < 1))
971 {
972 s->mode = BADB;
973 z->msg = "invalid bit length repeat";
974 r = Z_DATA_ERROR;
975 LEAVE
976 }
977 c = c == 16 ? s->sub.trees.blens[i - 1] : 0;
978 do {
979 s->sub.trees.blens[i++] = c;
980 } while (--j);
981 s->sub.trees.index = i;
982 }
983 }
984 inflate_trees_free(s->sub.trees.tb, z);
985 s->sub.trees.tb = Z_NULL;
986 {
987 uInt bl, bd;
988 inflate_huft *tl, *td;
989 inflate_codes_statef *c;
990
991 bl = 9; /* must be <= 9 for lookahead assumptions */
992 bd = 6; /* must be <= 9 for lookahead assumptions */
993 t = s->sub.trees.table;
994 t = inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f),
995 s->sub.trees.blens, &bl, &bd, &tl, &td, z);
996 if (t != Z_OK)
997 {
998 if (t == (uInt)Z_DATA_ERROR)
999 s->mode = BADB;
1000 r = t;
1001 LEAVE
1002 }
1003 Tracev((stderr, "inflate: trees ok\n"));
1004 if ((c = inflate_codes_new(bl, bd, tl, td, z)) == Z_NULL)
1005 {
1006 inflate_trees_free(td, z);
1007 inflate_trees_free(tl, z);
1008 r = Z_MEM_ERROR;
1009 LEAVE
1010 }
1011 ZFREE(z, s->sub.trees.blens, s->sub.trees.nblens * sizeof(uInt));
1012 s->sub.decode.codes = c;
1013 s->sub.decode.tl = tl;
1014 s->sub.decode.td = td;
1015 }
1016 s->mode = CODES;
1017 case CODES:
1018 UPDATE
1019 if ((r = inflate_codes(s, z, r)) != Z_STREAM_END)
1020 return inflate_flush(s, z, r);
1021 r = Z_OK;
1022 inflate_codes_free(s->sub.decode.codes, z);
1023 inflate_trees_free(s->sub.decode.td, z);
1024 inflate_trees_free(s->sub.decode.tl, z);
1025 LOAD
1026 Tracev((stderr, "inflate: codes end, %lu total out\n",
1027 z->total_out + (q >= s->read ? q - s->read :
1028 (s->end - s->read) + (q - s->window))));
1029 if (!s->last)
1030 {
1031 s->mode = TYPE;
1032 break;
1033 }
1034 if (k > 7) /* return unused byte, if any */
1035 {
1036 Assert(k < 16, "inflate_codes grabbed too many bytes")
1037 k -= 8;
1038 n++;
1039 p--; /* can always return one */
1040 }
1041 s->mode = DRY;
1042 case DRY:
1043 FLUSH
1044 if (s->read != s->write)
1045 LEAVE
1046 s->mode = DONEB;
1047 case DONEB:
1048 r = Z_STREAM_END;
1049 LEAVE
1050 case BADB:
1051 r = Z_DATA_ERROR;
1052 LEAVE
1053 default:
1054 r = Z_STREAM_ERROR;
1055 LEAVE
1056 }
1057}
1058
1059
1060local int inflate_blocks_free(s, z, c)
1061inflate_blocks_statef *s;
1062z_stream *z;
1063uLongf *c;
1064{
1065 inflate_blocks_reset(s, z, c);
1066 ZFREE(z, s->window, s->end - s->window);
1067 ZFREE(z, s, sizeof(struct inflate_blocks_state));
1068 Trace((stderr, "inflate: blocks freed\n"));
1069 return Z_OK;
1070}
1071
1072/*
1073 * This subroutine adds the data at next_in/avail_in to the output history
1074 * without performing any output. The output buffer must be "caught up";
1075 * i.e. no pending output (hence s->read equals s->write), and the state must
1076 * be BLOCKS (i.e. we should be willing to see the start of a series of
1077 * BLOCKS). On exit, the output will also be caught up, and the checksum
1078 * will have been updated if need be.
1079 */
1080local int inflate_addhistory(s, z)
1081inflate_blocks_statef *s;
1082z_stream *z;
1083{
1084 uLong b; /* bit buffer */ /* NOT USED HERE */
1085 uInt k; /* bits in bit buffer */ /* NOT USED HERE */
1086 uInt t; /* temporary storage */
1087 Bytef *p; /* input data pointer */
1088 uInt n; /* bytes available there */
1089 Bytef *q; /* output window write pointer */
1090 uInt m; /* bytes to end of window or read pointer */
1091
1092 if (s->read != s->write)
1093 return Z_STREAM_ERROR;
1094 if (s->mode != TYPE)
1095 return Z_DATA_ERROR;
1096
1097 /* we're ready to rock */
1098 LOAD
1099 /* while there is input ready, copy to output buffer, moving
1100 * pointers as needed.
1101 */
1102 while (n) {
1103 t = n; /* how many to do */
1104 /* is there room until end of buffer? */
1105 if (t > m) t = m;
1106 /* update check information */
1107 if (s->checkfn != Z_NULL)
1108 s->check = (*s->checkfn)(s->check, q, t);
1109 zmemcpy(q, p, t);
1110 q += t;
1111 p += t;
1112 n -= t;
1113 z->total_out += t;
1114 s->read = q; /* drag read pointer forward */
1115/* WRAP */ /* expand WRAP macro by hand to handle s->read */
1116 if (q == s->end) {
1117 s->read = q = s->window;
1118 m = WAVAIL;
1119 }
1120 }
1121 UPDATE
1122 return Z_OK;
1123}
1124
1125
1126/*
1127 * At the end of a Deflate-compressed PPP packet, we expect to have seen
1128 * a `stored' block type value but not the (zero) length bytes.
1129 */
1130local int inflate_packet_flush(s)
1131 inflate_blocks_statef *s;
1132{
1133 if (s->mode != LENS)
1134 return Z_DATA_ERROR;
1135 s->mode = TYPE;
1136 return Z_OK;
1137}
1138
1139
1140/*+++++*/
1141/* inftrees.c -- generate Huffman trees for efficient decoding
1142 * Copyright (C) 1995 Mark Adler
1143 * For conditions of distribution and use, see copyright notice in zlib.h
1144 */
1145
1146/* simplify the use of the inflate_huft type with some defines */
1147#define base more.Base
1148#define next more.Next
1149#define exop word.what.Exop
1150#define bits word.what.Bits
1151
1152
1153local int huft_build OF((
1154 uIntf *, /* code lengths in bits */
1155 uInt, /* number of codes */
1156 uInt, /* number of "simple" codes */
1157 uIntf *, /* list of base values for non-simple codes */
1158 uIntf *, /* list of extra bits for non-simple codes */
1159 inflate_huft * FAR*,/* result: starting table */
1160 uIntf *, /* maximum lookup bits (returns actual) */
1161 z_stream *)); /* for zalloc function */
1162
1163local voidpf falloc OF((
1164 voidpf, /* opaque pointer (not used) */
1165 uInt, /* number of items */
1166 uInt)); /* size of item */
1167
1168local void ffree OF((
1169 voidpf q, /* opaque pointer (not used) */
1170 voidpf p, /* what to free (not used) */
1171 uInt n)); /* number of bytes (not used) */
1172
1173/* Tables for deflate from PKZIP's appnote.txt. */
1174local uInt cplens[] = { /* Copy lengths for literal codes 257..285 */
1175 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
1176 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
1177 /* actually lengths - 2; also see note #13 above about 258 */
1178local uInt cplext[] = { /* Extra bits for literal codes 257..285 */
1179 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
1180 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 192, 192}; /* 192==invalid */
1181local uInt cpdist[] = { /* Copy offsets for distance codes 0..29 */
1182 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
1183 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
1184 8193, 12289, 16385, 24577};
1185local uInt cpdext[] = { /* Extra bits for distance codes */
1186 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
1187 7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
1188 12, 12, 13, 13};
1189
1190/*
1191 Huffman code decoding is performed using a multi-level table lookup.
1192 The fastest way to decode is to simply build a lookup table whose
1193 size is determined by the longest code. However, the time it takes
1194 to build this table can also be a factor if the data being decoded
1195 is not very long. The most common codes are necessarily the
1196 shortest codes, so those codes dominate the decoding time, and hence
1197 the speed. The idea is you can have a shorter table that decodes the
1198 shorter, more probable codes, and then point to subsidiary tables for
1199 the longer codes. The time it costs to decode the longer codes is
1200 then traded against the time it takes to make longer tables.
1201
1202 This results of this trade are in the variables lbits and dbits
1203 below. lbits is the number of bits the first level table for literal/
1204 length codes can decode in one step, and dbits is the same thing for
1205 the distance codes. Subsequent tables are also less than or equal to
1206 those sizes. These values may be adjusted either when all of the
1207 codes are shorter than that, in which case the longest code length in
1208 bits is used, or when the shortest code is *longer* than the requested
1209 table size, in which case the length of the shortest code in bits is
1210 used.
1211
1212 There are two different values for the two tables, since they code a
1213 different number of possibilities each. The literal/length table
1214 codes 286 possible values, or in a flat code, a little over eight
1215 bits. The distance table codes 30 possible values, or a little less
1216 than five bits, flat. The optimum values for speed end up being
1217 about one bit more than those, so lbits is 8+1 and dbits is 5+1.
1218 The optimum values may differ though from machine to machine, and
1219 possibly even between compilers. Your mileage may vary.
1220 */
1221
1222
1223/* If BMAX needs to be larger than 16, then h and x[] should be uLong. */
1224#define BMAX 15 /* maximum bit length of any code */
1225#define N_MAX 288 /* maximum number of codes in any set */
1226
1227#ifdef DEBUG_ZLIB
1228 uInt inflate_hufts;
1229#endif
1230
1231local int huft_build(b, n, s, d, e, t, m, zs)
1232uIntf *b; /* code lengths in bits (all assumed <= BMAX) */
1233uInt n; /* number of codes (assumed <= N_MAX) */
1234uInt s; /* number of simple-valued codes (0..s-1) */
1235uIntf *d; /* list of base values for non-simple codes */
1236uIntf *e; /* list of extra bits for non-simple codes */
1237inflate_huft * FAR *t; /* result: starting table */
1238uIntf *m; /* maximum lookup bits, returns actual */
1239z_stream *zs; /* for zalloc function */
1240/* Given a list of code lengths and a maximum table size, make a set of
1241 tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR
1242 if the given code set is incomplete (the tables are still built in this
1243 case), Z_DATA_ERROR if the input is invalid (all zero length codes or an
1244 over-subscribed set of lengths), or Z_MEM_ERROR if not enough memory. */
1245{
1246
1247 uInt a; /* counter for codes of length k */
1248 uInt c[BMAX+1]; /* bit length count table */
1249 uInt f; /* i repeats in table every f entries */
1250 int g; /* maximum code length */
1251 int h; /* table level */
1252 register uInt i; /* counter, current code */
1253 register uInt j; /* counter */
1254 register int k; /* number of bits in current code */
1255 int l; /* bits per table (returned in m) */
1256 register uIntf *p; /* pointer into c[], b[], or v[] */
1257 inflate_huft *q; /* points to current table */
1258 struct inflate_huft_s r; /* table entry for structure assignment */
1259 inflate_huft *u[BMAX]; /* table stack */
1260 uInt v[N_MAX]; /* values in order of bit length */
1261 register int w; /* bits before this table == (l * h) */
1262 uInt x[BMAX+1]; /* bit offsets, then code stack */
1263 uIntf *xp; /* pointer into x */
1264 int y; /* number of dummy codes added */
1265 uInt z; /* number of entries in current table */
1266
1267
1268 /* Generate counts for each bit length */
1269 p = c;
1270#define C0 *p++ = 0;
1271#define C2 C0 C0 C0 C0
1272#define C4 C2 C2 C2 C2
1273 C4 /* clear c[]--assume BMAX+1 is 16 */
1274 p = b; i = n;
1275 do {
1276 c[*p++]++; /* assume all entries <= BMAX */
1277 } while (--i);
1278 if (c[0] == n) /* null input--all zero length codes */
1279 {
1280 *t = (inflate_huft *)Z_NULL;
1281 *m = 0;
1282 return Z_OK;
1283 }
1284
1285
1286 /* Find minimum and maximum length, bound *m by those */
1287 l = *m;
1288 for (j = 1; j <= BMAX; j++)
1289 if (c[j])
1290 break;
1291 k = j; /* minimum code length */
1292 if ((uInt)l < j)
1293 l = j;
1294 for (i = BMAX; i; i--)
1295 if (c[i])
1296 break;
1297 g = i; /* maximum code length */
1298 if ((uInt)l > i)
1299 l = i;
1300 *m = l;
1301
1302
1303 /* Adjust last length count to fill out codes, if needed */
1304 for (y = 1 << j; j < i; j++, y <<= 1)
1305 if ((y -= c[j]) < 0)
1306 return Z_DATA_ERROR;
1307 if ((y -= c[i]) < 0)
1308 return Z_DATA_ERROR;
1309 c[i] += y;
1310
1311
1312 /* Generate starting offsets into the value table for each length */
1313 x[1] = j = 0;
1314 p = c + 1; xp = x + 2;
1315 while (--i) { /* note that i == g from above */
1316 *xp++ = (j += *p++);
1317 }
1318
1319
1320 /* Make a table of values in order of bit lengths */
1321 p = b; i = 0;
1322 do {
1323 if ((j = *p++) != 0)
1324 v[x[j]++] = i;
1325 } while (++i < n);
1326
1327
1328 /* Generate the Huffman codes and for each, make the table entries */
1329 x[0] = i = 0; /* first Huffman code is zero */
1330 p = v; /* grab values in bit order */
1331 h = -1; /* no tables yet--level -1 */
1332 w = -l; /* bits decoded == (l * h) */
1333 u[0] = (inflate_huft *)Z_NULL; /* just to keep compilers happy */
1334 q = (inflate_huft *)Z_NULL; /* ditto */
1335 z = 0; /* ditto */
1336
1337 /* go through the bit lengths (k already is bits in shortest code) */
1338 for (; k <= g; k++)
1339 {
1340 a = c[k];
1341 while (a--)
1342 {
1343 /* here i is the Huffman code of length k bits for value *p */
1344 /* make tables up to required level */
1345 while (k > w + l)
1346 {
1347 h++;
1348 w += l; /* previous table always l bits */
1349
1350 /* compute minimum size table less than or equal to l bits */
1351 z = (z = g - w) > (uInt)l ? l : z; /* table size upper limit */
1352 if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */
1353 { /* too few codes for k-w bit table */
1354 f -= a + 1; /* deduct codes from patterns left */
1355 xp = c + k;
1356 if (j < z)
1357 while (++j < z) /* try smaller tables up to z bits */
1358 {
1359 if ((f <<= 1) <= *++xp)
1360 break; /* enough codes to use up j bits */
1361 f -= *xp; /* else deduct codes from patterns */
1362 }
1363 }
1364 z = 1 << j; /* table entries for j-bit table */
1365
1366 /* allocate and link in new table */
1367 if ((q = (inflate_huft *)ZALLOC
1368 (zs,z + 1,sizeof(inflate_huft))) == Z_NULL)
1369 {
1370 if (h)
1371 inflate_trees_free(u[0], zs);
1372 return Z_MEM_ERROR; /* not enough memory */
1373 }
1374 q->word.Nalloc = z + 1;
1375#ifdef DEBUG_ZLIB
1376 inflate_hufts += z + 1;
1377#endif
1378 *t = q + 1; /* link to list for huft_free() */
1379 *(t = &(q->next)) = Z_NULL;
1380 u[h] = ++q; /* table starts after link */
1381
1382 /* connect to last table, if there is one */
1383 if (h)
1384 {
1385 x[h] = i; /* save pattern for backing up */
1386 r.bits = (Byte)l; /* bits to dump before this table */
1387 r.exop = (Byte)j; /* bits in this table */
1388 r.next = q; /* pointer to this table */
1389 j = i >> (w - l); /* (get around Turbo C bug) */
1390 u[h-1][j] = r; /* connect to last table */
1391 }
1392 }
1393
1394 /* set up table entry in r */
1395 r.bits = (Byte)(k - w);
1396 if (p >= v + n)
1397 r.exop = 128 + 64; /* out of values--invalid code */
1398 else if (*p < s)
1399 {
1400 r.exop = (Byte)(*p < 256 ? 0 : 32 + 64); /* 256 is end-of-block */
1401 r.base = *p++; /* simple code is just the value */
1402 }
1403 else
1404 {
1405 r.exop = (Byte)e[*p - s] + 16 + 64; /* non-simple--look up in lists */
1406 r.base = d[*p++ - s];
1407 }
1408
1409 /* fill code-like entries with r */
1410 f = 1 << (k - w);
1411 for (j = i >> w; j < z; j += f)
1412 q[j] = r;
1413
1414 /* backwards increment the k-bit code i */
1415 for (j = 1 << (k - 1); i & j; j >>= 1)
1416 i ^= j;
1417 i ^= j;
1418
1419 /* backup over finished tables */
1420 while ((i & ((1 << w) - 1)) != x[h])
1421 {
1422 h--; /* don't need to update q */
1423 w -= l;
1424 }
1425 }
1426 }
1427
1428
1429 /* Return Z_BUF_ERROR if we were given an incomplete table */
1430 return y != 0 && g != 1 ? Z_BUF_ERROR : Z_OK;
1431}
1432
1433
1434local int inflate_trees_bits(c, bb, tb, z)
1435uIntf *c; /* 19 code lengths */
1436uIntf *bb; /* bits tree desired/actual depth */
1437inflate_huft * FAR *tb; /* bits tree result */
1438z_stream *z; /* for zfree function */
1439{
1440 int r;
1441
1442 r = huft_build(c, 19, 19, (uIntf*)Z_NULL, (uIntf*)Z_NULL, tb, bb, z);
1443 if (r == Z_DATA_ERROR)
1444 z->msg = "oversubscribed dynamic bit lengths tree";
1445 else if (r == Z_BUF_ERROR)
1446 {
1447 inflate_trees_free(*tb, z);
1448 z->msg = "incomplete dynamic bit lengths tree";
1449 r = Z_DATA_ERROR;
1450 }
1451 return r;
1452}
1453
1454
1455local int inflate_trees_dynamic(nl, nd, c, bl, bd, tl, td, z)
1456uInt nl; /* number of literal/length codes */
1457uInt nd; /* number of distance codes */
1458uIntf *c; /* that many (total) code lengths */
1459uIntf *bl; /* literal desired/actual bit depth */
1460uIntf *bd; /* distance desired/actual bit depth */
1461inflate_huft * FAR *tl; /* literal/length tree result */
1462inflate_huft * FAR *td; /* distance tree result */
1463z_stream *z; /* for zfree function */
1464{
1465 int r;
1466
1467 /* build literal/length tree */
1468 if ((r = huft_build(c, nl, 257, cplens, cplext, tl, bl, z)) != Z_OK)
1469 {
1470 if (r == Z_DATA_ERROR)
1471 z->msg = "oversubscribed literal/length tree";
1472 else if (r == Z_BUF_ERROR)
1473 {
1474 inflate_trees_free(*tl, z);
1475 z->msg = "incomplete literal/length tree";
1476 r = Z_DATA_ERROR;
1477 }
1478 return r;
1479 }
1480
1481 /* build distance tree */
1482 if ((r = huft_build(c + nl, nd, 0, cpdist, cpdext, td, bd, z)) != Z_OK)
1483 {
1484 if (r == Z_DATA_ERROR)
1485 z->msg = "oversubscribed literal/length tree";
1486 else if (r == Z_BUF_ERROR) {
1487#ifdef PKZIP_BUG_WORKAROUND
1488 r = Z_OK;
1489 }
1490#else
1491 inflate_trees_free(*td, z);
1492 z->msg = "incomplete literal/length tree";
1493 r = Z_DATA_ERROR;
1494 }
1495 inflate_trees_free(*tl, z);
1496 return r;
1497#endif
1498 }
1499
1500 /* done */
1501 return Z_OK;
1502}
1503
1504
1505/* build fixed tables only once--keep them here */
1506local int fixed_lock = 0;
1507local int fixed_built = 0;
1508#define FIXEDH 530 /* number of hufts used by fixed tables */
1509local uInt fixed_left = FIXEDH;
1510local inflate_huft fixed_mem[FIXEDH];
1511local uInt fixed_bl;
1512local uInt fixed_bd;
1513local inflate_huft *fixed_tl;
1514local inflate_huft *fixed_td;
1515
1516
1517local voidpf falloc(q, n, s)
1518voidpf q; /* opaque pointer (not used) */
1519uInt n; /* number of items */
1520uInt s; /* size of item */
1521{
1522 Assert(s == sizeof(inflate_huft) && n <= fixed_left,
1523 "inflate_trees falloc overflow");
1524 if (q) s++; /* to make some compilers happy */
1525 fixed_left -= n;
1526 return (voidpf)(fixed_mem + fixed_left);
1527}
1528
1529
1530local void ffree(q, p, n)
1531voidpf q;
1532voidpf p;
1533uInt n;
1534{
1535 Assert(0, "inflate_trees ffree called!");
1536 if (q) q = p; /* to make some compilers happy */
1537}
1538
1539
1540local int inflate_trees_fixed(bl, bd, tl, td)
1541uIntf *bl; /* literal desired/actual bit depth */
1542uIntf *bd; /* distance desired/actual bit depth */
1543inflate_huft * FAR *tl; /* literal/length tree result */
1544inflate_huft * FAR *td; /* distance tree result */
1545{
1546 /* build fixed tables if not built already--lock out other instances */
1547 while (++fixed_lock > 1)
1548 fixed_lock--;
1549 if (!fixed_built)
1550 {
1551 int k; /* temporary variable */
1552 unsigned c[288]; /* length list for huft_build */
1553 z_stream z; /* for falloc function */
1554
1555 /* set up fake z_stream for memory routines */
1556 z.zalloc = falloc;
1557 z.zfree = ffree;
1558 z.opaque = Z_NULL;
1559
1560 /* literal table */
1561 for (k = 0; k < 144; k++)
1562 c[k] = 8;
1563 for (; k < 256; k++)
1564 c[k] = 9;
1565 for (; k < 280; k++)
1566 c[k] = 7;
1567 for (; k < 288; k++)
1568 c[k] = 8;
1569 fixed_bl = 7;
1570 huft_build(c, 288, 257, cplens, cplext, &fixed_tl, &fixed_bl, &z);
1571
1572 /* distance table */
1573 for (k = 0; k < 30; k++)
1574 c[k] = 5;
1575 fixed_bd = 5;
1576 huft_build(c, 30, 0, cpdist, cpdext, &fixed_td, &fixed_bd, &z);
1577
1578 /* done */
1579 fixed_built = 1;
1580 }
1581 fixed_lock--;
1582 *bl = fixed_bl;
1583 *bd = fixed_bd;
1584 *tl = fixed_tl;
1585 *td = fixed_td;
1586 return Z_OK;
1587}
1588
1589
1590local int inflate_trees_free(t, z)
1591inflate_huft *t; /* table to free */
1592z_stream *z; /* for zfree function */
1593/* Free the malloc'ed tables built by huft_build(), which makes a linked
1594 list of the tables it made, with the links in a dummy first entry of
1595 each table. */
1596{
1597 register inflate_huft *p, *q;
1598
1599 /* Go through linked list, freeing from the malloced (t[-1]) address. */
1600 p = t;
1601 while (p != Z_NULL)
1602 {
1603 q = (--p)->next;
1604 ZFREE(z, p, p->word.Nalloc * sizeof(inflate_huft));
1605 p = q;
1606 }
1607 return Z_OK;
1608}
1609
1610/*+++++*/
1611/* infcodes.c -- process literals and length/distance pairs
1612 * Copyright (C) 1995 Mark Adler
1613 * For conditions of distribution and use, see copyright notice in zlib.h
1614 */
1615
1616/* simplify the use of the inflate_huft type with some defines */
1617#define base more.Base
1618#define next more.Next
1619#define exop word.what.Exop
1620#define bits word.what.Bits
1621
1622/* inflate codes private state */
1623struct inflate_codes_state {
1624
1625 /* mode */
1626 enum { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
1627 START, /* x: set up for LEN */
1628 LEN, /* i: get length/literal/eob next */
1629 LENEXT, /* i: getting length extra (have base) */
1630 DIST, /* i: get distance next */
1631 DISTEXT, /* i: getting distance extra */
1632 COPY, /* o: copying bytes in window, waiting for space */
1633 LIT, /* o: got literal, waiting for output space */
1634 WASH, /* o: got eob, possibly still output waiting */
1635 END, /* x: got eob and all data flushed */
1636 BADCODE} /* x: got error */
1637 mode; /* current inflate_codes mode */
1638
1639 /* mode dependent information */
1640 uInt len;
1641 union {
1642 struct {
1643 inflate_huft *tree; /* pointer into tree */
1644 uInt need; /* bits needed */
1645 } code; /* if LEN or DIST, where in tree */
1646 uInt lit; /* if LIT, literal */
1647 struct {
1648 uInt get; /* bits to get for extra */
1649 uInt dist; /* distance back to copy from */
1650 } copy; /* if EXT or COPY, where and how much */
1651 } sub; /* submode */
1652
1653 /* mode independent information */
1654 Byte lbits; /* ltree bits decoded per branch */
1655 Byte dbits; /* dtree bits decoder per branch */
1656 inflate_huft *ltree; /* literal/length/eob tree */
1657 inflate_huft *dtree; /* distance tree */
1658
1659};
1660
1661
1662local inflate_codes_statef *inflate_codes_new(bl, bd, tl, td, z)
1663uInt bl, bd;
1664inflate_huft *tl, *td;
1665z_stream *z;
1666{
1667 inflate_codes_statef *c;
1668
1669 if ((c = (inflate_codes_statef *)
1670 ZALLOC(z,1,sizeof(struct inflate_codes_state))) != Z_NULL)
1671 {
1672 c->mode = START;
1673 c->lbits = (Byte)bl;
1674 c->dbits = (Byte)bd;
1675 c->ltree = tl;
1676 c->dtree = td;
1677 Tracev((stderr, "inflate: codes new\n"));
1678 }
1679 return c;
1680}
1681
1682
1683local int inflate_codes(s, z, r)
1684inflate_blocks_statef *s;
1685z_stream *z;
1686int r;
1687{
1688 uInt j; /* temporary storage */
1689 inflate_huft *t; /* temporary pointer */
1690 uInt e; /* extra bits or operation */
1691 uLong b; /* bit buffer */
1692 uInt k; /* bits in bit buffer */
1693 Bytef *p; /* input data pointer */
1694 uInt n; /* bytes available there */
1695 Bytef *q; /* output window write pointer */
1696 uInt m; /* bytes to end of window or read pointer */
1697 Bytef *f; /* pointer to copy strings from */
1698 inflate_codes_statef *c = s->sub.decode.codes; /* codes state */
1699
1700 /* copy input/output information to locals (UPDATE macro restores) */
1701 LOAD
1702
1703 /* process input and output based on current state */
1704 while (1) switch (c->mode)
1705 { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
1706 case START: /* x: set up for LEN */
1707#ifndef SLOW
1708 if (m >= 258 && n >= 10)
1709 {
1710 UPDATE
1711 r = inflate_fast(c->lbits, c->dbits, c->ltree, c->dtree, s, z);
1712 LOAD
1713 if (r != Z_OK)
1714 {
1715 c->mode = r == Z_STREAM_END ? WASH : BADCODE;
1716 break;
1717 }
1718 }
1719#endif /* !SLOW */
1720 c->sub.code.need = c->lbits;
1721 c->sub.code.tree = c->ltree;
1722 c->mode = LEN;
1723 case LEN: /* i: get length/literal/eob next */
1724 j = c->sub.code.need;
1725 NEEDBITS(j)
1726 t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
1727 DUMPBITS(t->bits)
1728 e = (uInt)(t->exop);
1729 if (e == 0) /* literal */
1730 {
1731 c->sub.lit = t->base;
1732 Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
1733 "inflate: literal '%c'\n" :
1734 "inflate: literal 0x%02x\n", t->base));
1735 c->mode = LIT;
1736 break;
1737 }
1738 if (e & 16) /* length */
1739 {
1740 c->sub.copy.get = e & 15;
1741 c->len = t->base;
1742 c->mode = LENEXT;
1743 break;
1744 }
1745 if ((e & 64) == 0) /* next table */
1746 {
1747 c->sub.code.need = e;
1748 c->sub.code.tree = t->next;
1749 break;
1750 }
1751 if (e & 32) /* end of block */
1752 {
1753 Tracevv((stderr, "inflate: end of block\n"));
1754 c->mode = WASH;
1755 break;
1756 }
1757 c->mode = BADCODE; /* invalid code */
1758 z->msg = "invalid literal/length code";
1759 r = Z_DATA_ERROR;
1760 LEAVE
1761 case LENEXT: /* i: getting length extra (have base) */
1762 j = c->sub.copy.get;
1763 NEEDBITS(j)
1764 c->len += (uInt)b & inflate_mask[j];
1765 DUMPBITS(j)
1766 c->sub.code.need = c->dbits;
1767 c->sub.code.tree = c->dtree;
1768 Tracevv((stderr, "inflate: length %u\n", c->len));
1769 c->mode = DIST;
1770 case DIST: /* i: get distance next */
1771 j = c->sub.code.need;
1772 NEEDBITS(j)
1773 t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
1774 DUMPBITS(t->bits)
1775 e = (uInt)(t->exop);
1776 if (e & 16) /* distance */
1777 {
1778 c->sub.copy.get = e & 15;
1779 c->sub.copy.dist = t->base;
1780 c->mode = DISTEXT;
1781 break;
1782 }
1783 if ((e & 64) == 0) /* next table */
1784 {
1785 c->sub.code.need = e;
1786 c->sub.code.tree = t->next;
1787 break;
1788 }
1789 c->mode = BADCODE; /* invalid code */
1790 z->msg = "invalid distance code";
1791 r = Z_DATA_ERROR;
1792 LEAVE
1793 case DISTEXT: /* i: getting distance extra */
1794 j = c->sub.copy.get;
1795 NEEDBITS(j)
1796 c->sub.copy.dist += (uInt)b & inflate_mask[j];
1797 DUMPBITS(j)
1798 Tracevv((stderr, "inflate: distance %u\n", c->sub.copy.dist));
1799 c->mode = COPY;
1800 case COPY: /* o: copying bytes in window, waiting for space */
1801#ifndef __TURBOC__ /* Turbo C bug for following expression */
1802 f = (uInt)(q - s->window) < c->sub.copy.dist ?
1803 s->end - (c->sub.copy.dist - (q - s->window)) :
1804 q - c->sub.copy.dist;
1805#else
1806 f = q - c->sub.copy.dist;
1807 if ((uInt)(q - s->window) < c->sub.copy.dist)
1808 f = s->end - (c->sub.copy.dist - (q - s->window));
1809#endif
1810 while (c->len)
1811 {
1812 NEEDOUT
1813 OUTBYTE(*f++)
1814 if (f == s->end)
1815 f = s->window;
1816 c->len--;
1817 }
1818 c->mode = START;
1819 break;
1820 case LIT: /* o: got literal, waiting for output space */
1821 NEEDOUT
1822 OUTBYTE(c->sub.lit)
1823 c->mode = START;
1824 break;
1825 case WASH: /* o: got eob, possibly more output */
1826 FLUSH
1827 if (s->read != s->write)
1828 LEAVE
1829 c->mode = END;
1830 case END:
1831 r = Z_STREAM_END;
1832 LEAVE
1833 case BADCODE: /* x: got error */
1834 r = Z_DATA_ERROR;
1835 LEAVE
1836 default:
1837 r = Z_STREAM_ERROR;
1838 LEAVE
1839 }
1840}
1841
1842
1843local void inflate_codes_free(c, z)
1844inflate_codes_statef *c;
1845z_stream *z;
1846{
1847 ZFREE(z, c, sizeof(struct inflate_codes_state));
1848 Tracev((stderr, "inflate: codes free\n"));
1849}
1850
1851/*+++++*/
1852/* inflate_util.c -- data and routines common to blocks and codes
1853 * Copyright (C) 1995 Mark Adler
1854 * For conditions of distribution and use, see copyright notice in zlib.h
1855 */
1856
1857/* copy as much as possible from the sliding window to the output area */
1858local int inflate_flush(s, z, r)
1859inflate_blocks_statef *s;
1860z_stream *z;
1861int r;
1862{
1863 uInt n;
1864 Bytef *p, *q;
1865
1866 /* local copies of source and destination pointers */
1867 p = z->next_out;
1868 q = s->read;
1869
1870 /* compute number of bytes to copy as far as end of window */
1871 n = (uInt)((q <= s->write ? s->write : s->end) - q);
1872 if (n > z->avail_out) n = z->avail_out;
1873 if (n && r == Z_BUF_ERROR) r = Z_OK;
1874
1875 /* update counters */
1876 z->avail_out -= n;
1877 z->total_out += n;
1878
1879 /* update check information */
1880 if (s->checkfn != Z_NULL)
1881 s->check = (*s->checkfn)(s->check, q, n);
1882
1883 /* copy as far as end of window */
1884 zmemcpy(p, q, n);
1885 p += n;
1886 q += n;
1887
1888 /* see if more to copy at beginning of window */
1889 if (q == s->end)
1890 {
1891 /* wrap pointers */
1892 q = s->window;
1893 if (s->write == s->end)
1894 s->write = s->window;
1895
1896 /* compute bytes to copy */
1897 n = (uInt)(s->write - q);
1898 if (n > z->avail_out) n = z->avail_out;
1899 if (n && r == Z_BUF_ERROR) r = Z_OK;
1900
1901 /* update counters */
1902 z->avail_out -= n;
1903 z->total_out += n;
1904
1905 /* update check information */
1906 if (s->checkfn != Z_NULL)
1907 s->check = (*s->checkfn)(s->check, q, n);
1908
1909 /* copy */
1910 zmemcpy(p, q, n);
1911 p += n;
1912 q += n;
1913 }
1914
1915 /* update pointers */
1916 z->next_out = p;
1917 s->read = q;
1918
1919 /* done */
1920 return r;
1921}
1922
1923
1924/*+++++*/
1925/* inffast.c -- process literals and length/distance pairs fast
1926 * Copyright (C) 1995 Mark Adler
1927 * For conditions of distribution and use, see copyright notice in zlib.h
1928 */
1929
1930/* simplify the use of the inflate_huft type with some defines */
1931#define base more.Base
1932#define next more.Next
1933#define exop word.what.Exop
1934#define bits word.what.Bits
1935
1936/* macros for bit input with no checking and for returning unused bytes */
1937#define GRABBITS(j) {while(k<(j)){b|=((uLong)NEXTBYTE)<<k;k+=8;}}
1938#define UNGRAB {n+=(c=k>>3);p-=c;k&=7;}
1939
1940/* Called with number of bytes left to write in window at least 258
1941 (the maximum string length) and number of input bytes available
1942 at least ten. The ten bytes are six bytes for the longest length/
1943 distance pair plus four bytes for overloading the bit buffer. */
1944
1945local int inflate_fast(bl, bd, tl, td, s, z)
1946uInt bl, bd;
1947inflate_huft *tl, *td;
1948inflate_blocks_statef *s;
1949z_stream *z;
1950{
1951 inflate_huft *t; /* temporary pointer */
1952 uInt e; /* extra bits or operation */
1953 uLong b; /* bit buffer */
1954 uInt k; /* bits in bit buffer */
1955 Bytef *p; /* input data pointer */
1956 uInt n; /* bytes available there */
1957 Bytef *q; /* output window write pointer */
1958 uInt m; /* bytes to end of window or read pointer */
1959 uInt ml; /* mask for literal/length tree */
1960 uInt md; /* mask for distance tree */
1961 uInt c; /* bytes to copy */
1962 uInt d; /* distance back to copy from */
1963 Bytef *r; /* copy source pointer */
1964
1965 /* load input, output, bit values */
1966 LOAD
1967
1968 /* initialize masks */
1969 ml = inflate_mask[bl];
1970 md = inflate_mask[bd];
1971
1972 /* do until not enough input or output space for fast loop */
1973 do { /* assume called with m >= 258 && n >= 10 */
1974 /* get literal/length code */
1975 GRABBITS(20) /* max bits for literal/length code */
1976 if ((e = (t = tl + ((uInt)b & ml))->exop) == 0)
1977 {
1978 DUMPBITS(t->bits)
1979 Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
1980 "inflate: * literal '%c'\n" :
1981 "inflate: * literal 0x%02x\n", t->base));
1982 *q++ = (Byte)t->base;
1983 m--;
1984 continue;
1985 }
1986 do {
1987 DUMPBITS(t->bits)
1988 if (e & 16)
1989 {
1990 /* get extra bits for length */
1991 e &= 15;
1992 c = t->base + ((uInt)b & inflate_mask[e]);
1993 DUMPBITS(e)
1994 Tracevv((stderr, "inflate: * length %u\n", c));
1995
1996 /* decode distance base of block to copy */
1997 GRABBITS(15); /* max bits for distance code */
1998 e = (t = td + ((uInt)b & md))->exop;
1999 do {
2000 DUMPBITS(t->bits)
2001 if (e & 16)
2002 {
2003 /* get extra bits to add to distance base */
2004 e &= 15;
2005 GRABBITS(e) /* get extra bits (up to 13) */
2006 d = t->base + ((uInt)b & inflate_mask[e]);
2007 DUMPBITS(e)
2008 Tracevv((stderr, "inflate: * distance %u\n", d));
2009
2010 /* do the copy */
2011 m -= c;
2012 if ((uInt)(q - s->window) >= d) /* offset before dest */
2013 { /* just copy */
2014 r = q - d;
2015 *q++ = *r++; c--; /* minimum count is three, */
2016 *q++ = *r++; c--; /* so unroll loop a little */
2017 }
2018 else /* else offset after destination */
2019 {
2020 e = d - (q - s->window); /* bytes from offset to end */
2021 r = s->end - e; /* pointer to offset */
2022 if (c > e) /* if source crosses, */
2023 {
2024 c -= e; /* copy to end of window */
2025 do {
2026 *q++ = *r++;
2027 } while (--e);
2028 r = s->window; /* copy rest from start of window */
2029 }
2030 }
2031 do { /* copy all or what's left */
2032 *q++ = *r++;
2033 } while (--c);
2034 break;
2035 }
2036 else if ((e & 64) == 0)
2037 e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop;
2038 else
2039 {
2040 z->msg = "invalid distance code";
2041 UNGRAB
2042 UPDATE
2043 return Z_DATA_ERROR;
2044 }
2045 } while (1);
2046 break;
2047 }
2048 if ((e & 64) == 0)
2049 {
2050 if ((e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop) == 0)
2051 {
2052 DUMPBITS(t->bits)
2053 Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
2054 "inflate: * literal '%c'\n" :
2055 "inflate: * literal 0x%02x\n", t->base));
2056 *q++ = (Byte)t->base;
2057 m--;
2058 break;
2059 }
2060 }
2061 else if (e & 32)
2062 {
2063 Tracevv((stderr, "inflate: * end of block\n"));
2064 UNGRAB
2065 UPDATE
2066 return Z_STREAM_END;
2067 }
2068 else
2069 {
2070 z->msg = "invalid literal/length code";
2071 UNGRAB
2072 UPDATE
2073 return Z_DATA_ERROR;
2074 }
2075 } while (1);
2076 } while (m >= 258 && n >= 10);
2077
2078 /* not enough input or output--restore pointers and return */
2079 UNGRAB
2080 UPDATE
2081 return Z_OK;
2082}
2083
2084
2085/*+++++*/
2086/* zutil.c -- target dependent utility functions for the compression library
2087 * Copyright (C) 1995 Jean-loup Gailly.
2088 * For conditions of distribution and use, see copyright notice in zlib.h
2089 */
2090
2091/* From: zutil.c,v 1.8 1995/05/03 17:27:12 jloup Exp */
2092
2093char *zlib_version = ZLIB_VERSION;
2094
2095char *z_errmsg[] = {
2096"stream end", /* Z_STREAM_END 1 */
2097"", /* Z_OK 0 */
2098"file error", /* Z_ERRNO (-1) */
2099"stream error", /* Z_STREAM_ERROR (-2) */
2100"data error", /* Z_DATA_ERROR (-3) */
2101"insufficient memory", /* Z_MEM_ERROR (-4) */
2102"buffer error", /* Z_BUF_ERROR (-5) */
2103""};
2104
2105
2106/*+++++*/
2107/* adler32.c -- compute the Adler-32 checksum of a data stream
2108 * Copyright (C) 1995 Mark Adler
2109 * For conditions of distribution and use, see copyright notice in zlib.h
2110 */
2111
2112/* From: adler32.c,v 1.6 1995/05/03 17:27:08 jloup Exp */
2113
2114#define BASE 65521L /* largest prime smaller than 65536 */
2115#define NMAX 5552
2116/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */
2117
2118#define DO1(buf) {s1 += *buf++; s2 += s1;}
2119#define DO2(buf) DO1(buf); DO1(buf);
2120#define DO4(buf) DO2(buf); DO2(buf);
2121#define DO8(buf) DO4(buf); DO4(buf);
2122#define DO16(buf) DO8(buf); DO8(buf);
2123
2124/* ========================================================================= */
2125uLong adler32(adler, buf, len)
2126 uLong adler;
2127 Bytef *buf;
2128 uInt len;
2129{
2130 unsigned long s1 = adler & 0xffff;
2131 unsigned long s2 = (adler >> 16) & 0xffff;
2132 int k;
2133
2134 if (buf == Z_NULL) return 1L;
2135
2136 while (len > 0) {
2137 k = len < NMAX ? len : NMAX;
2138 len -= k;
2139 while (k >= 16) {
2140 DO16(buf);
2141 k -= 16;
2142 }
2143 if (k != 0) do {
2144 DO1(buf);
2145 } while (--k);
2146 s1 %= BASE;
2147 s2 %= BASE;
2148 }
2149 return (s2 << 16) | s1;
2150}
diff --git a/arch/xtensa/boot/lib/zmem.c b/arch/xtensa/boot/lib/zmem.c
new file mode 100644
index 000000000000..7848f126d67d
--- /dev/null
+++ b/arch/xtensa/boot/lib/zmem.c
@@ -0,0 +1,87 @@
1#include "zlib.h"
2
3/* bits taken from ppc */
4
5extern void *avail_ram, *end_avail;
6
7void exit (void)
8{
9 for (;;);
10}
11
12void *zalloc(void *x, unsigned items, unsigned size)
13{
14 void *p = avail_ram;
15
16 size *= items;
17 size = (size + 7) & -8;
18 avail_ram += size;
19 if (avail_ram > end_avail) {
20 //puts("oops... out of memory\n");
21 //pause();
22 exit ();
23 }
24 return p;
25}
26
27void zfree(void *x, void *addr, unsigned nb)
28{
29}
30
31
32#define HEAD_CRC 2
33#define EXTRA_FIELD 4
34#define ORIG_NAME 8
35#define COMMENT 0x10
36#define RESERVED 0xe0
37
38#define DEFLATED 8
39
40void gunzip (void *dst, int dstlen, unsigned char *src, int *lenp)
41{
42 z_stream s;
43 int r, i, flags;
44
45 /* skip header */
46
47 i = 10;
48 flags = src[3];
49 if (src[2] != DEFLATED || (flags & RESERVED) != 0) {
50 //puts("bad gzipped data\n");
51 exit();
52 }
53 if ((flags & EXTRA_FIELD) != 0)
54 i = 12 + src[10] + (src[11] << 8);
55 if ((flags & ORIG_NAME) != 0)
56 while (src[i++] != 0)
57 ;
58 if ((flags & COMMENT) != 0)
59 while (src[i++] != 0)
60 ;
61 if ((flags & HEAD_CRC) != 0)
62 i += 2;
63 if (i >= *lenp) {
64 //puts("gunzip: ran out of data in header\n");
65 exit();
66 }
67
68 s.zalloc = zalloc;
69 s.zfree = zfree;
70 r = inflateInit2(&s, -MAX_WBITS);
71 if (r != Z_OK) {
72 //puts("inflateInit2 returned "); puthex(r); puts("\n");
73 exit();
74 }
75 s.next_in = src + i;
76 s.avail_in = *lenp - i;
77 s.next_out = dst;
78 s.avail_out = dstlen;
79 r = inflate(&s, Z_FINISH);
80 if (r != Z_OK && r != Z_STREAM_END) {
81 //puts("inflate returned "); puthex(r); puts("\n");
82 exit();
83 }
84 *lenp = s.next_out - (unsigned char *) dst;
85 inflateEnd(&s);
86}
87
diff --git a/arch/xtensa/boot/ramdisk/Makefile b/arch/xtensa/boot/ramdisk/Makefile
new file mode 100644
index 000000000000..b12f76352438
--- /dev/null
+++ b/arch/xtensa/boot/ramdisk/Makefile
@@ -0,0 +1,23 @@
1#
2# Makefile for a ramdisk image
3#
4
5BIG_ENDIAN := $(shell echo -e "\#ifdef __XTENSA_EL__\nint little;\n\#else\nint big;\n\#endif" | $(CC) -E -|grep -c big)
6
7ifeq ($(BIG_ENDIAN),1)
8OBJCOPY_ARGS := -O elf32-xtensa-be
9else
10OBJCOPY_ARGS := -O elf32-xtensa-le
11endif
12
13obj-y = ramdisk.o
14
15RAMDISK_IMAGE = arch/$(ARCH)/boot/ramdisk/$(CONFIG_EMBEDDED_RAMDISK_IMAGE)
16
17arch/$(ARCH)/boot/ramdisk/ramdisk.o:
18 $(Q)echo -e "dummy:" | $(AS) -o $@;
19 $(Q)$(OBJCOPY) $(OBJCOPY_ARGS) \
20 --add-section .initrd=$(RAMDISK_IMAGE) \
21 --set-section-flags .initrd=contents,alloc,load,load,data \
22 arch/$(ARCH)/boot/ramdisk/ramdisk.o $@
23
diff --git a/arch/xtensa/configs/common_defconfig b/arch/xtensa/configs/common_defconfig
new file mode 100644
index 000000000000..1d230ee081b4
--- /dev/null
+++ b/arch/xtensa/configs/common_defconfig
@@ -0,0 +1,662 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.11-rc2
4# Tue Mar 1 16:36:53 2005
5#
6# CONFIG_FRAME_POINTER is not set
7CONFIG_XTENSA=y
8# CONFIG_UID16 is not set
9CONFIG_RWSEM_XCHGADD_ALGORITHM=y
10CONFIG_HAVE_DEC_LOCK=y
11CONFIG_GENERIC_HARDIRQS=y
12
13#
14# Code maturity level options
15#
16CONFIG_EXPERIMENTAL=y
17CONFIG_CLEAN_COMPILE=y
18CONFIG_BROKEN_ON_SMP=y
19
20#
21# General setup
22#
23CONFIG_LOCALVERSION=""
24CONFIG_SWAP=y
25CONFIG_SYSVIPC=y
26# CONFIG_POSIX_MQUEUE is not set
27CONFIG_BSD_PROCESS_ACCT=y
28# CONFIG_BSD_PROCESS_ACCT_V3 is not set
29CONFIG_SYSCTL=y
30# CONFIG_AUDIT is not set
31CONFIG_LOG_BUF_SHIFT=14
32# CONFIG_HOTPLUG is not set
33CONFIG_KOBJECT_UEVENT=y
34# CONFIG_IKCONFIG is not set
35# CONFIG_EMBEDDED is not set
36CONFIG_KALLSYMS=y
37# CONFIG_KALLSYMS_ALL is not set
38# CONFIG_KALLSYMS_EXTRA_PASS is not set
39CONFIG_FUTEX=y
40CONFIG_EPOLL=y
41# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
42CONFIG_SHMEM=y
43CONFIG_CC_ALIGN_FUNCTIONS=0
44CONFIG_CC_ALIGN_LABELS=0
45CONFIG_CC_ALIGN_LOOPS=0
46CONFIG_CC_ALIGN_JUMPS=0
47# CONFIG_TINY_SHMEM is not set
48
49#
50# Loadable module support
51#
52CONFIG_MODULES=y
53# CONFIG_MODULE_UNLOAD is not set
54CONFIG_OBSOLETE_MODPARM=y
55CONFIG_MODVERSIONS=y
56# CONFIG_MODULE_SRCVERSION_ALL is not set
57CONFIG_KMOD=y
58
59#
60# Processor type and features
61#
62CONFIG_XTENSA_ARCH_LINUX_BE=y
63# CONFIG_XTENSA_ARCH_LINUX_LE is not set
64# CONFIG_XTENSA_ARCH_LINUX_TEST is not set
65# CONFIG_XTENSA_ARCH_S5 is not set
66# CONFIG_XTENSA_CUSTOM is not set
67CONFIG_MMU=y
68# CONFIG_XTENSA_UNALIGNED_USER is not set
69# CONFIG_PREEMPT is not set
70# CONFIG_MATH_EMULATION is not set
71# CONFIG_HIGHMEM is not set
72
73#
74# Platform options
75#
76# CONFIG_XTENSA_PLATFORM_ISS is not set
77CONFIG_XTENSA_PLATFORM_XT2000=y
78CONFIG_XTENSA_CALIBRATE_CCOUNT=y
79CONFIG_GENERIC_CALIBRATE_DELAY=y
80CONFIG_CMDLINE_BOOL=y
81CONFIG_CMDLINE="console=ttyS0,38400 ip=bootp root=nfs nfsroot=/opt/montavista/pro/devkit/xtensa/linux_be/target"
82
83#
84# Bus options
85#
86CONFIG_PCI=y
87# CONFIG_PCI_LEGACY_PROC is not set
88# CONFIG_PCI_NAMES is not set
89
90#
91# PCCARD (PCMCIA/CardBus) support
92#
93# CONFIG_PCCARD is not set
94
95#
96# PC-card bridges
97#
98
99#
100# PCI Hotplug Support
101#
102# CONFIG_HOTPLUG_PCI is not set
103
104#
105# Exectuable file formats
106#
107CONFIG_KCORE_ELF=y
108CONFIG_BINFMT_ELF=y
109CONFIG_BINFMT_MISC=y
110
111#
112# Device Drivers
113#
114
115#
116# Generic Driver Options
117#
118CONFIG_STANDALONE=y
119CONFIG_PREVENT_FIRMWARE_BUILD=y
120# CONFIG_FW_LOADER is not set
121# CONFIG_DEBUG_DRIVER is not set
122
123#
124# Memory Technology Devices (MTD)
125#
126# CONFIG_MTD is not set
127
128#
129# Parallel port support
130#
131# CONFIG_PARPORT is not set
132
133#
134# Plug and Play support
135#
136
137#
138# Block devices
139#
140# CONFIG_BLK_DEV_FD is not set
141# CONFIG_BLK_CPQ_DA is not set
142# CONFIG_BLK_CPQ_CISS_DA is not set
143# CONFIG_BLK_DEV_DAC960 is not set
144# CONFIG_BLK_DEV_UMEM is not set
145# CONFIG_BLK_DEV_COW_COMMON is not set
146# CONFIG_BLK_DEV_LOOP is not set
147# CONFIG_BLK_DEV_NBD is not set
148# CONFIG_BLK_DEV_SX8 is not set
149# CONFIG_BLK_DEV_RAM is not set
150CONFIG_BLK_DEV_RAM_COUNT=16
151CONFIG_INITRAMFS_SOURCE=""
152# CONFIG_CDROM_PKTCDVD is not set
153
154#
155# IO Schedulers
156#
157CONFIG_IOSCHED_NOOP=y
158CONFIG_IOSCHED_AS=y
159CONFIG_IOSCHED_DEADLINE=y
160CONFIG_IOSCHED_CFQ=y
161# CONFIG_ATA_OVER_ETH is not set
162
163#
164# ATA/ATAPI/MFM/RLL support
165#
166# CONFIG_IDE is not set
167
168#
169# SCSI device support
170#
171# CONFIG_SCSI is not set
172
173#
174# Multi-device support (RAID and LVM)
175#
176# CONFIG_MD is not set
177
178#
179# Fusion MPT device support
180#
181
182#
183# IEEE 1394 (FireWire) support
184#
185# CONFIG_IEEE1394 is not set
186
187#
188# I2O device support
189#
190# CONFIG_I2O is not set
191
192#
193# Networking support
194#
195CONFIG_NET=y
196
197#
198# Networking options
199#
200# CONFIG_PACKET is not set
201# CONFIG_NETLINK_DEV is not set
202CONFIG_UNIX=y
203# CONFIG_NET_KEY is not set
204CONFIG_INET=y
205CONFIG_IP_MULTICAST=y
206CONFIG_IP_ADVANCED_ROUTER=y
207CONFIG_IP_MULTIPLE_TABLES=y
208CONFIG_IP_ROUTE_MULTIPATH=y
209CONFIG_IP_ROUTE_VERBOSE=y
210CONFIG_IP_PNP=y
211CONFIG_IP_PNP_DHCP=y
212CONFIG_IP_PNP_BOOTP=y
213CONFIG_IP_PNP_RARP=y
214# CONFIG_NET_IPIP is not set
215# CONFIG_NET_IPGRE is not set
216# CONFIG_IP_MROUTE is not set
217# CONFIG_ARPD is not set
218# CONFIG_SYN_COOKIES is not set
219# CONFIG_INET_AH is not set
220# CONFIG_INET_ESP is not set
221# CONFIG_INET_IPCOMP is not set
222# CONFIG_INET_TUNNEL is not set
223# CONFIG_IP_TCPDIAG is not set
224# CONFIG_IP_TCPDIAG_IPV6 is not set
225# CONFIG_IPV6 is not set
226# CONFIG_NETFILTER is not set
227
228#
229# SCTP Configuration (EXPERIMENTAL)
230#
231# CONFIG_IP_SCTP is not set
232# CONFIG_ATM is not set
233# CONFIG_BRIDGE is not set
234# CONFIG_VLAN_8021Q is not set
235# CONFIG_DECNET is not set
236# CONFIG_LLC2 is not set
237# CONFIG_IPX is not set
238# CONFIG_ATALK is not set
239# CONFIG_X25 is not set
240# CONFIG_LAPB is not set
241# CONFIG_NET_DIVERT is not set
242# CONFIG_ECONET is not set
243# CONFIG_WAN_ROUTER is not set
244
245#
246# QoS and/or fair queueing
247#
248CONFIG_NET_SCHED=y
249CONFIG_NET_SCH_CLK_JIFFIES=y
250# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
251# CONFIG_NET_SCH_CLK_CPU is not set
252CONFIG_NET_SCH_CBQ=m
253CONFIG_NET_SCH_HTB=m
254# CONFIG_NET_SCH_HFSC is not set
255CONFIG_NET_SCH_PRIO=m
256CONFIG_NET_SCH_RED=m
257CONFIG_NET_SCH_SFQ=m
258CONFIG_NET_SCH_TEQL=m
259CONFIG_NET_SCH_TBF=m
260CONFIG_NET_SCH_GRED=m
261CONFIG_NET_SCH_DSMARK=m
262# CONFIG_NET_SCH_NETEM is not set
263CONFIG_NET_SCH_INGRESS=m
264CONFIG_NET_QOS=y
265CONFIG_NET_ESTIMATOR=y
266CONFIG_NET_CLS=y
267CONFIG_NET_CLS_TCINDEX=m
268CONFIG_NET_CLS_ROUTE4=m
269CONFIG_NET_CLS_ROUTE=y
270CONFIG_NET_CLS_FW=m
271CONFIG_NET_CLS_U32=m
272# CONFIG_CLS_U32_PERF is not set
273# CONFIG_NET_CLS_IND is not set
274CONFIG_NET_CLS_RSVP=m
275CONFIG_NET_CLS_RSVP6=m
276# CONFIG_NET_CLS_ACT is not set
277CONFIG_NET_CLS_POLICE=y
278
279#
280# Network testing
281#
282# CONFIG_NET_PKTGEN is not set
283# CONFIG_NETPOLL is not set
284# CONFIG_NET_POLL_CONTROLLER is not set
285# CONFIG_HAMRADIO is not set
286# CONFIG_IRDA is not set
287# CONFIG_BT is not set
288CONFIG_NETDEVICES=y
289CONFIG_DUMMY=y
290# CONFIG_BONDING is not set
291# CONFIG_EQUALIZER is not set
292# CONFIG_TUN is not set
293
294#
295# ARCnet devices
296#
297# CONFIG_ARCNET is not set
298
299#
300# Ethernet (10 or 100Mbit)
301#
302CONFIG_NET_ETHERNET=y
303# CONFIG_MII is not set
304CONFIG_XT2000_SONIC=y
305# CONFIG_HAPPYMEAL is not set
306# CONFIG_SUNGEM is not set
307# CONFIG_NET_VENDOR_3COM is not set
308
309#
310# Tulip family network device support
311#
312# CONFIG_NET_TULIP is not set
313# CONFIG_HP100 is not set
314# CONFIG_NET_PCI is not set
315
316#
317# Ethernet (1000 Mbit)
318#
319# CONFIG_ACENIC is not set
320# CONFIG_DL2K is not set
321# CONFIG_E1000 is not set
322# CONFIG_NS83820 is not set
323# CONFIG_HAMACHI is not set
324# CONFIG_YELLOWFIN is not set
325# CONFIG_R8169 is not set
326# CONFIG_SK98LIN is not set
327# CONFIG_TIGON3 is not set
328
329#
330# Ethernet (10000 Mbit)
331#
332# CONFIG_IXGB is not set
333# CONFIG_S2IO is not set
334
335#
336# Token Ring devices
337#
338# CONFIG_TR is not set
339
340#
341# Wireless LAN (non-hamradio)
342#
343CONFIG_NET_RADIO=y
344
345#
346# Obsolete Wireless cards support (pre-802.11)
347#
348CONFIG_STRIP=m
349
350#
351# Wireless 802.11b ISA/PCI cards support
352#
353CONFIG_HERMES=m
354# CONFIG_PLX_HERMES is not set
355# CONFIG_TMD_HERMES is not set
356# CONFIG_PCI_HERMES is not set
357# CONFIG_ATMEL is not set
358
359#
360# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
361#
362# CONFIG_PRISM54 is not set
363CONFIG_NET_WIRELESS=y
364
365#
366# Wan interfaces
367#
368# CONFIG_WAN is not set
369# CONFIG_FDDI is not set
370# CONFIG_HIPPI is not set
371# CONFIG_PPP is not set
372# CONFIG_SLIP is not set
373# CONFIG_SHAPER is not set
374# CONFIG_NETCONSOLE is not set
375
376#
377# ISDN subsystem
378#
379# CONFIG_ISDN is not set
380
381#
382# Telephony Support
383#
384# CONFIG_PHONE is not set
385
386#
387# Input device support
388#
389CONFIG_INPUT=y
390
391#
392# Userland interfaces
393#
394CONFIG_INPUT_MOUSEDEV=y
395# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
396CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
397CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
398# CONFIG_INPUT_JOYDEV is not set
399# CONFIG_INPUT_TSDEV is not set
400# CONFIG_INPUT_EVDEV is not set
401# CONFIG_INPUT_EVBUG is not set
402
403#
404# Input I/O drivers
405#
406# CONFIG_GAMEPORT is not set
407CONFIG_SOUND_GAMEPORT=y
408CONFIG_SERIO=y
409# CONFIG_SERIO_I8042 is not set
410# CONFIG_SERIO_SERPORT is not set
411# CONFIG_SERIO_CT82C710 is not set
412# CONFIG_SERIO_PCIPS2 is not set
413# CONFIG_SERIO_RAW is not set
414
415#
416# Input Device Drivers
417#
418# CONFIG_INPUT_KEYBOARD is not set
419# CONFIG_INPUT_MOUSE is not set
420# CONFIG_INPUT_JOYSTICK is not set
421# CONFIG_INPUT_TOUCHSCREEN is not set
422# CONFIG_INPUT_MISC is not set
423
424#
425# Character devices
426#
427CONFIG_VT=y
428CONFIG_VT_CONSOLE=y
429CONFIG_HW_CONSOLE=y
430# CONFIG_SERIAL_NONSTANDARD is not set
431
432#
433# Serial drivers
434#
435CONFIG_SERIAL_8250=y
436CONFIG_SERIAL_8250_CONSOLE=y
437CONFIG_SERIAL_8250_NR_UARTS=4
438# CONFIG_SERIAL_8250_EXTENDED is not set
439
440#
441# Non-8250 serial port support
442#
443CONFIG_SERIAL_CORE=y
444CONFIG_SERIAL_CORE_CONSOLE=y
445CONFIG_UNIX98_PTYS=y
446CONFIG_LEGACY_PTYS=y
447CONFIG_LEGACY_PTY_COUNT=256
448
449#
450# IPMI
451#
452# CONFIG_IPMI_HANDLER is not set
453
454#
455# Watchdog Cards
456#
457# CONFIG_WATCHDOG is not set
458# CONFIG_RTC is not set
459# CONFIG_GEN_RTC is not set
460# CONFIG_DTLK is not set
461# CONFIG_R3964 is not set
462# CONFIG_APPLICOM is not set
463
464#
465# Ftape, the floppy tape device driver
466#
467# CONFIG_DRM is not set
468# CONFIG_RAW_DRIVER is not set
469
470#
471# I2C support
472#
473# CONFIG_I2C is not set
474
475#
476# Dallas's 1-wire bus
477#
478# CONFIG_W1 is not set
479
480#
481# Misc devices
482#
483
484#
485# Multimedia devices
486#
487# CONFIG_VIDEO_DEV is not set
488
489#
490# Digital Video Broadcasting Devices
491#
492# CONFIG_DVB is not set
493
494#
495# Graphics support
496#
497# CONFIG_FB is not set
498
499#
500# Console display driver support
501#
502# CONFIG_VGA_CONSOLE is not set
503CONFIG_DUMMY_CONSOLE=y
504# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
505
506#
507# Sound
508#
509# CONFIG_SOUND is not set
510
511#
512# USB support
513#
514# CONFIG_USB is not set
515CONFIG_USB_ARCH_HAS_HCD=y
516CONFIG_USB_ARCH_HAS_OHCI=y
517
518#
519# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
520#
521
522#
523# USB Gadget Support
524#
525# CONFIG_USB_GADGET is not set
526
527#
528# MMC/SD Card support
529#
530# CONFIG_MMC is not set
531
532#
533# InfiniBand support
534#
535# CONFIG_INFINIBAND is not set
536
537#
538# File systems
539#
540# CONFIG_EXT2_FS is not set
541# CONFIG_EXT3_FS is not set
542# CONFIG_JBD is not set
543# CONFIG_REISERFS_FS is not set
544# CONFIG_JFS_FS is not set
545# CONFIG_XFS_FS is not set
546# CONFIG_MINIX_FS is not set
547# CONFIG_ROMFS_FS is not set
548# CONFIG_QUOTA is not set
549CONFIG_DNOTIFY=y
550# CONFIG_AUTOFS_FS is not set
551# CONFIG_AUTOFS4_FS is not set
552
553#
554# CD-ROM/DVD Filesystems
555#
556# CONFIG_ISO9660_FS is not set
557# CONFIG_UDF_FS is not set
558
559#
560# DOS/FAT/NT Filesystems
561#
562# CONFIG_MSDOS_FS is not set
563# CONFIG_VFAT_FS is not set
564# CONFIG_NTFS_FS is not set
565
566#
567# Pseudo filesystems
568#
569CONFIG_PROC_FS=y
570# CONFIG_PROC_KCORE is not set
571CONFIG_SYSFS=y
572CONFIG_DEVFS_FS=y
573# CONFIG_DEVFS_MOUNT is not set
574# CONFIG_DEVFS_DEBUG is not set
575# CONFIG_DEVPTS_FS_XATTR is not set
576# CONFIG_TMPFS is not set
577# CONFIG_HUGETLB_PAGE is not set
578CONFIG_RAMFS=y
579
580#
581# Miscellaneous filesystems
582#
583# CONFIG_ADFS_FS is not set
584# CONFIG_AFFS_FS is not set
585# CONFIG_HFS_FS is not set
586# CONFIG_HFSPLUS_FS is not set
587# CONFIG_BEFS_FS is not set
588# CONFIG_BFS_FS is not set
589# CONFIG_EFS_FS is not set
590# CONFIG_CRAMFS is not set
591# CONFIG_VXFS_FS is not set
592# CONFIG_HPFS_FS is not set
593# CONFIG_QNX4FS_FS is not set
594# CONFIG_SYSV_FS is not set
595# CONFIG_UFS_FS is not set
596
597#
598# Network File Systems
599#
600CONFIG_NFS_FS=y
601CONFIG_NFS_V3=y
602# CONFIG_NFS_V4 is not set
603# CONFIG_NFS_DIRECTIO is not set
604# CONFIG_NFSD is not set
605CONFIG_ROOT_NFS=y
606CONFIG_LOCKD=y
607CONFIG_LOCKD_V4=y
608# CONFIG_EXPORTFS is not set
609CONFIG_SUNRPC=y
610# CONFIG_RPCSEC_GSS_KRB5 is not set
611# CONFIG_RPCSEC_GSS_SPKM3 is not set
612# CONFIG_SMB_FS is not set
613# CONFIG_CIFS is not set
614# CONFIG_NCP_FS is not set
615# CONFIG_CODA_FS is not set
616# CONFIG_AFS_FS is not set
617
618#
619# Partition Types
620#
621# CONFIG_PARTITION_ADVANCED is not set
622CONFIG_MSDOS_PARTITION=y
623
624#
625# Native Language Support
626#
627# CONFIG_NLS is not set
628
629#
630# Kernel hacking
631#
632CONFIG_DEBUG_KERNEL=y
633# CONFIG_DEBUG_STACKOVERFLOW is not set
634# CONFIG_DEBUG_SLAB is not set
635CONFIG_MAGIC_SYSRQ=y
636# CONFIG_DEBUG_SPINLOCK is not set
637# CONFIG_DEBUG_PAGEALLOC is not set
638# CONFIG_DEBUG_INFO is not set
639# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
640# CONFIG_KGDB is not set
641
642#
643# Security options
644#
645# CONFIG_KEYS is not set
646# CONFIG_SECURITY is not set
647
648#
649# Cryptographic options
650#
651# CONFIG_CRYPTO is not set
652
653#
654# Hardware crypto devices
655#
656
657#
658# Library routines
659#
660# CONFIG_CRC_CCITT is not set
661# CONFIG_CRC32 is not set
662# CONFIG_LIBCRC32C is not set
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
new file mode 100644
index 000000000000..802621dd4867
--- /dev/null
+++ b/arch/xtensa/configs/iss_defconfig
@@ -0,0 +1,531 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.11-rc2
4# Fri Feb 25 19:21:24 2005
5#
6CONFIG_FRAME_POINTER=y
7CONFIG_XTENSA=y
8# CONFIG_UID16 is not set
9CONFIG_RWSEM_XCHGADD_ALGORITHM=y
10CONFIG_HAVE_DEC_LOCK=y
11CONFIG_GENERIC_HARDIRQS=y
12
13#
14# Code maturity level options
15#
16CONFIG_EXPERIMENTAL=y
17CONFIG_CLEAN_COMPILE=y
18CONFIG_BROKEN_ON_SMP=y
19
20#
21# General setup
22#
23CONFIG_LOCALVERSION=""
24CONFIG_SWAP=y
25CONFIG_SYSVIPC=y
26# CONFIG_POSIX_MQUEUE is not set
27# CONFIG_BSD_PROCESS_ACCT is not set
28CONFIG_SYSCTL=y
29# CONFIG_AUDIT is not set
30CONFIG_LOG_BUF_SHIFT=14
31# CONFIG_HOTPLUG is not set
32# CONFIG_KOBJECT_UEVENT is not set
33# CONFIG_IKCONFIG is not set
34CONFIG_EMBEDDED=y
35CONFIG_KALLSYMS=y
36# CONFIG_KALLSYMS_ALL is not set
37# CONFIG_KALLSYMS_EXTRA_PASS is not set
38CONFIG_FUTEX=y
39CONFIG_EPOLL=y
40# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
41CONFIG_SHMEM=y
42CONFIG_CC_ALIGN_FUNCTIONS=0
43CONFIG_CC_ALIGN_LABELS=0
44CONFIG_CC_ALIGN_LOOPS=0
45CONFIG_CC_ALIGN_JUMPS=0
46# CONFIG_TINY_SHMEM is not set
47
48#
49# Loadable module support
50#
51# CONFIG_MODULES is not set
52
53#
54# Processor type and features
55#
56CONFIG_XTENSA_ARCH_LINUX_BE=y
57# CONFIG_XTENSA_ARCH_LINUX_LE is not set
58# CONFIG_XTENSA_ARCH_LINUX_TEST is not set
59# CONFIG_XTENSA_ARCH_S5 is not set
60# CONFIG_XTENSA_CUSTOM is not set
61CONFIG_MMU=y
62# CONFIG_XTENSA_UNALIGNED_USER is not set
63# CONFIG_PREEMPT is not set
64# CONFIG_MATH_EMULATION is not set
65# CONFIG_HIGHMEM is not set
66
67#
68# Platform options
69#
70CONFIG_XTENSA_PLATFORM_ISS=y
71# CONFIG_XTENSA_PLATFORM_XT2000 is not set
72# CONFIG_XTENSA_PLATFORM_ARUBA is not set
73# CONFIG_XTENSA_CALIBRATE_CCOUNT is not set
74CONFIG_XTENSA_CPU_CLOCK=10
75# CONFIG_GENERIC_CALIBRATE_DELAY is not set
76CONFIG_CMDLINE_BOOL=y
77CONFIG_CMDLINE="console=ttyS0,38400 eth0=tuntap,,tap0 ip=192.168.168.5:192.168.168.1 root=nfs nfsroot=192.168.168.1:/opt/montavista/pro/devkit/xtensa/linux_be/target"
78CONFIG_SERIAL_CONSOLE=y
79CONFIG_XTENSA_ISS_NETWORK=y
80
81#
82# Bus options
83#
84
85#
86# PCCARD (PCMCIA/CardBus) support
87#
88# CONFIG_PCCARD is not set
89
90#
91# PC-card bridges
92#
93
94#
95# PCI Hotplug Support
96#
97
98#
99# Exectuable file formats
100#
101CONFIG_KCORE_ELF=y
102CONFIG_BINFMT_ELF=y
103# CONFIG_BINFMT_MISC is not set
104
105#
106# Device Drivers
107#
108
109#
110# Generic Driver Options
111#
112# CONFIG_STANDALONE is not set
113CONFIG_PREVENT_FIRMWARE_BUILD=y
114# CONFIG_FW_LOADER is not set
115# CONFIG_DEBUG_DRIVER is not set
116
117#
118# Memory Technology Devices (MTD)
119#
120# CONFIG_MTD is not set
121
122#
123# Parallel port support
124#
125# CONFIG_PARPORT is not set
126
127#
128# Plug and Play support
129#
130
131#
132# Block devices
133#
134# CONFIG_BLK_DEV_FD is not set
135# CONFIG_BLK_DEV_COW_COMMON is not set
136# CONFIG_BLK_DEV_LOOP is not set
137# CONFIG_BLK_DEV_NBD is not set
138# CONFIG_BLK_DEV_RAM is not set
139CONFIG_BLK_DEV_RAM_COUNT=16
140CONFIG_INITRAMFS_SOURCE=""
141# CONFIG_CDROM_PKTCDVD is not set
142
143#
144# IO Schedulers
145#
146CONFIG_IOSCHED_NOOP=y
147# CONFIG_IOSCHED_AS is not set
148# CONFIG_IOSCHED_DEADLINE is not set
149# CONFIG_IOSCHED_CFQ is not set
150# CONFIG_ATA_OVER_ETH is not set
151
152#
153# ATA/ATAPI/MFM/RLL support
154#
155# CONFIG_IDE is not set
156
157#
158# SCSI device support
159#
160# CONFIG_SCSI is not set
161
162#
163# Multi-device support (RAID and LVM)
164#
165# CONFIG_MD is not set
166
167#
168# Fusion MPT device support
169#
170
171#
172# IEEE 1394 (FireWire) support
173#
174
175#
176# I2O device support
177#
178
179#
180# Networking support
181#
182CONFIG_NET=y
183
184#
185# Networking options
186#
187CONFIG_PACKET=y
188# CONFIG_PACKET_MMAP is not set
189# CONFIG_NETLINK_DEV is not set
190CONFIG_UNIX=y
191# CONFIG_NET_KEY is not set
192CONFIG_INET=y
193# CONFIG_IP_MULTICAST is not set
194# CONFIG_IP_ADVANCED_ROUTER is not set
195CONFIG_IP_PNP=y
196CONFIG_IP_PNP_DHCP=y
197CONFIG_IP_PNP_BOOTP=y
198CONFIG_IP_PNP_RARP=y
199# CONFIG_NET_IPIP is not set
200# CONFIG_NET_IPGRE is not set
201# CONFIG_ARPD is not set
202# CONFIG_SYN_COOKIES is not set
203# CONFIG_INET_AH is not set
204# CONFIG_INET_ESP is not set
205# CONFIG_INET_IPCOMP is not set
206# CONFIG_INET_TUNNEL is not set
207# CONFIG_IP_TCPDIAG is not set
208# CONFIG_IP_TCPDIAG_IPV6 is not set
209# CONFIG_IPV6 is not set
210# CONFIG_NETFILTER is not set
211
212#
213# SCTP Configuration (EXPERIMENTAL)
214#
215# CONFIG_IP_SCTP is not set
216# CONFIG_SCTP_HMAC_NONE is not set
217# CONFIG_SCTP_HMAC_SHA1 is not set
218# CONFIG_SCTP_HMAC_MD5 is not set
219# CONFIG_ATM is not set
220# CONFIG_BRIDGE is not set
221# CONFIG_VLAN_8021Q is not set
222# CONFIG_DECNET is not set
223# CONFIG_LLC2 is not set
224# CONFIG_IPX is not set
225# CONFIG_ATALK is not set
226# CONFIG_X25 is not set
227# CONFIG_LAPB is not set
228# CONFIG_NET_DIVERT is not set
229# CONFIG_ECONET is not set
230# CONFIG_WAN_ROUTER is not set
231
232#
233# QoS and/or fair queueing
234#
235# CONFIG_NET_SCHED is not set
236# CONFIG_NET_SCH_CLK_JIFFIES is not set
237# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
238# CONFIG_NET_SCH_CLK_CPU is not set
239# CONFIG_NET_CLS_ROUTE is not set
240
241#
242# Network testing
243#
244# CONFIG_NET_PKTGEN is not set
245# CONFIG_NETPOLL is not set
246# CONFIG_NET_POLL_CONTROLLER is not set
247# CONFIG_HAMRADIO is not set
248# CONFIG_IRDA is not set
249# CONFIG_BT is not set
250# CONFIG_NETDEVICES is not set
251
252#
253# ISDN subsystem
254#
255# CONFIG_ISDN is not set
256
257#
258# Telephony Support
259#
260# CONFIG_PHONE is not set
261
262#
263# Input device support
264#
265CONFIG_INPUT=y
266
267#
268# Userland interfaces
269#
270# CONFIG_INPUT_MOUSEDEV is not set
271# CONFIG_INPUT_JOYDEV is not set
272# CONFIG_INPUT_TSDEV is not set
273# CONFIG_INPUT_EVDEV is not set
274# CONFIG_INPUT_EVBUG is not set
275
276#
277# Input I/O drivers
278#
279# CONFIG_GAMEPORT is not set
280CONFIG_SOUND_GAMEPORT=y
281# CONFIG_SERIO is not set
282# CONFIG_SERIO_I8042 is not set
283
284#
285# Input Device Drivers
286#
287# CONFIG_INPUT_KEYBOARD is not set
288# CONFIG_INPUT_MOUSE is not set
289# CONFIG_INPUT_JOYSTICK is not set
290# CONFIG_INPUT_TOUCHSCREEN is not set
291# CONFIG_INPUT_MISC is not set
292
293#
294# Character devices
295#
296CONFIG_VT=y
297CONFIG_VT_CONSOLE=y
298CONFIG_HW_CONSOLE=y
299# CONFIG_SERIAL_NONSTANDARD is not set
300
301#
302# Serial drivers
303#
304# CONFIG_SERIAL_8250 is not set
305
306#
307# Non-8250 serial port support
308#
309CONFIG_UNIX98_PTYS=y
310CONFIG_LEGACY_PTYS=y
311CONFIG_LEGACY_PTY_COUNT=256
312
313#
314# IPMI
315#
316# CONFIG_IPMI_HANDLER is not set
317
318#
319# Watchdog Cards
320#
321CONFIG_WATCHDOG=y
322CONFIG_WATCHDOG_NOWAYOUT=y
323
324#
325# Watchdog Device Drivers
326#
327CONFIG_SOFT_WATCHDOG=y
328# CONFIG_RTC is not set
329# CONFIG_GEN_RTC is not set
330# CONFIG_DTLK is not set
331# CONFIG_R3964 is not set
332
333#
334# Ftape, the floppy tape device driver
335#
336# CONFIG_DRM is not set
337# CONFIG_RAW_DRIVER is not set
338
339#
340# I2C support
341#
342# CONFIG_I2C is not set
343
344#
345# Dallas's 1-wire bus
346#
347# CONFIG_W1 is not set
348
349#
350# Misc devices
351#
352
353#
354# Multimedia devices
355#
356# CONFIG_VIDEO_DEV is not set
357
358#
359# Digital Video Broadcasting Devices
360#
361# CONFIG_DVB is not set
362
363#
364# Graphics support
365#
366# CONFIG_FB is not set
367
368#
369# Console display driver support
370#
371# CONFIG_VGA_CONSOLE is not set
372CONFIG_DUMMY_CONSOLE=y
373# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
374
375#
376# Sound
377#
378# CONFIG_SOUND is not set
379
380#
381# USB support
382#
383# CONFIG_USB_ARCH_HAS_HCD is not set
384# CONFIG_USB_ARCH_HAS_OHCI is not set
385
386#
387# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
388#
389
390#
391# USB Gadget Support
392#
393# CONFIG_USB_GADGET is not set
394
395#
396# MMC/SD Card support
397#
398# CONFIG_MMC is not set
399
400#
401# InfiniBand support
402#
403# CONFIG_INFINIBAND is not set
404
405#
406# File systems
407#
408# CONFIG_EXT2_FS is not set
409# CONFIG_EXT3_FS is not set
410# CONFIG_JBD is not set
411# CONFIG_REISERFS_FS is not set
412# CONFIG_JFS_FS is not set
413# CONFIG_XFS_FS is not set
414# CONFIG_MINIX_FS is not set
415# CONFIG_ROMFS_FS is not set
416# CONFIG_QUOTA is not set
417# CONFIG_DNOTIFY is not set
418# CONFIG_AUTOFS_FS is not set
419# CONFIG_AUTOFS4_FS is not set
420
421#
422# CD-ROM/DVD Filesystems
423#
424# CONFIG_ISO9660_FS is not set
425# CONFIG_UDF_FS is not set
426
427#
428# DOS/FAT/NT Filesystems
429#
430# CONFIG_MSDOS_FS is not set
431# CONFIG_VFAT_FS is not set
432# CONFIG_NTFS_FS is not set
433
434#
435# Pseudo filesystems
436#
437CONFIG_PROC_FS=y
438CONFIG_PROC_KCORE=y
439CONFIG_SYSFS=y
440CONFIG_DEVFS_FS=y
441CONFIG_DEVFS_MOUNT=y
442# CONFIG_DEVFS_DEBUG is not set
443# CONFIG_DEVPTS_FS_XATTR is not set
444CONFIG_TMPFS=y
445# CONFIG_TMPFS_XATTR is not set
446# CONFIG_HUGETLB_PAGE is not set
447CONFIG_RAMFS=y
448
449#
450# Miscellaneous filesystems
451#
452# CONFIG_ADFS_FS is not set
453# CONFIG_AFFS_FS is not set
454# CONFIG_HFS_FS is not set
455# CONFIG_HFSPLUS_FS is not set
456# CONFIG_BEFS_FS is not set
457# CONFIG_BFS_FS is not set
458# CONFIG_EFS_FS is not set
459# CONFIG_CRAMFS is not set
460# CONFIG_VXFS_FS is not set
461# CONFIG_HPFS_FS is not set
462# CONFIG_QNX4FS_FS is not set
463# CONFIG_SYSV_FS is not set
464# CONFIG_UFS_FS is not set
465
466#
467# Network File Systems
468#
469CONFIG_NFS_FS=y
470CONFIG_NFS_V3=y
471# CONFIG_NFS_V4 is not set
472CONFIG_NFS_DIRECTIO=y
473# CONFIG_NFSD is not set
474CONFIG_ROOT_NFS=y
475CONFIG_LOCKD=y
476CONFIG_LOCKD_V4=y
477# CONFIG_EXPORTFS is not set
478CONFIG_SUNRPC=y
479# CONFIG_RPCSEC_GSS_KRB5 is not set
480# CONFIG_RPCSEC_GSS_SPKM3 is not set
481# CONFIG_SMB_FS is not set
482# CONFIG_CIFS is not set
483# CONFIG_NCP_FS is not set
484# CONFIG_CODA_FS is not set
485# CONFIG_AFS_FS is not set
486
487#
488# Partition Types
489#
490# CONFIG_PARTITION_ADVANCED is not set
491CONFIG_MSDOS_PARTITION=y
492
493#
494# Native Language Support
495#
496# CONFIG_NLS is not set
497
498#
499# Kernel hacking
500#
501CONFIG_DEBUG_KERNEL=y
502# CONFIG_DEBUG_STACKOVERFLOW is not set
503# CONFIG_DEBUG_SLAB is not set
504# CONFIG_MAGIC_SYSRQ is not set
505# CONFIG_DEBUG_SPINLOCK is not set
506# CONFIG_DEBUG_PAGEALLOC is not set
507# CONFIG_DEBUG_INFO is not set
508# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
509# CONFIG_KGDB is not set
510
511#
512# Security options
513#
514# CONFIG_KEYS is not set
515# CONFIG_SECURITY is not set
516
517#
518# Cryptographic options
519#
520# CONFIG_CRYPTO is not set
521
522#
523# Hardware crypto devices
524#
525
526#
527# Library routines
528#
529# CONFIG_CRC_CCITT is not set
530# CONFIG_CRC32 is not set
531# CONFIG_LIBCRC32C is not set
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
new file mode 100644
index 000000000000..d573017a5dde
--- /dev/null
+++ b/arch/xtensa/kernel/Makefile
@@ -0,0 +1,18 @@
1#
2# Makefile for the Linux/Xtensa kernel.
3#
4
5extra-y := head.o vmlinux.lds
6
7
8obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o \
9 setup.o signal.o syscalls.o time.o traps.o vectors.o platform.o \
10 pci-dma.o
11
12## windowspill.o
13
14obj-$(CONFIG_KGDB) += xtensa-stub.o
15obj-$(CONFIG_PCI) += pci.o
16obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
17
18
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S
new file mode 100644
index 000000000000..74b1e90ef08c
--- /dev/null
+++ b/arch/xtensa/kernel/align.S
@@ -0,0 +1,459 @@
1/*
2 * arch/xtensa/kernel/align.S
3 *
4 * Handle unalignment exceptions in kernel space.
5 *
6 * This file is subject to the terms and conditions of the GNU General
7 * Public License. See the file "COPYING" in the main directory of
8 * this archive for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica, Inc.
11 *
12 * Rewritten by Chris Zankel <chris@zankel.net>
13 *
14 * Based on work from Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
15 * and Marc Gauthier <marc@tensilica.com, marc@alimni.uwaterloo.ca>
16 */
17
18#include <linux/linkage.h>
19#include <asm/ptrace.h>
20#include <asm/ptrace.h>
21#include <asm/current.h>
22#include <asm/offsets.h>
23#include <asm/pgtable.h>
24#include <asm/processor.h>
25#include <asm/page.h>
26#include <asm/thread_info.h>
27
28#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
29
30/* First-level exception handler for unaligned exceptions.
31 *
32 * Note: This handler works only for kernel exceptions. Unaligned user
33 * access should get a seg fault.
34 */
35
36/* Big and little endian 16-bit values are located in
37 * different halves of a register. HWORD_START helps to
38 * abstract the notion of extracting a 16-bit value from a
39 * register.
40 * We also have to define new shifting instructions because
41 * lsb and msb are on 'opposite' ends in a register for
42 * different endian machines.
43 *
44 * Assume a memory region in ascending address:
45 * 0 1 2 3|4 5 6 7
46 *
47 * When loading one word into a register, the content of that register is:
48 * LE 3 2 1 0, 7 6 5 4
49 * BE 0 1 2 3, 4 5 6 7
50 *
51 * Masking the bits of the higher/lower address means:
52 * LE X X 0 0, 0 0 X X
53 * BE 0 0 X X, X X 0 0
54 *
55 * Shifting to higher/lower addresses, means:
56 * LE shift left / shift right
57 * BE shift right / shift left
58 *
59 * Extracting 16 bits from a 32 bit reg. value to higher/lower address means:
60 * LE mask 0 0 X X / shift left
61 * BE shift left / mask 0 0 X X
62 */
63
64#define UNALIGNED_USER_EXCEPTION
65
66#if XCHAL_HAVE_BE
67
68#define HWORD_START 16
69#define INSN_OP0 28
70#define INSN_T 24
71#define INSN_OP1 16
72
73.macro __src_b r, w0, w1; src \r, \w0, \w1; .endm
74.macro __ssa8 r; ssa8b \r; .endm
75.macro __ssa8r r; ssa8l \r; .endm
76.macro __sh r, s; srl \r, \s; .endm
77.macro __sl r, s; sll \r, \s; .endm
78.macro __exth r, s; extui \r, \s, 0, 16; .endm
79.macro __extl r, s; slli \r, \s, 16; .endm
80
81#else
82
83#define HWORD_START 0
84#define INSN_OP0 0
85#define INSN_T 4
86#define INSN_OP1 12
87
88.macro __src_b r, w0, w1; src \r, \w1, \w0; .endm
89.macro __ssa8 r; ssa8l \r; .endm
90.macro __ssa8r r; ssa8b \r; .endm
91.macro __sh r, s; sll \r, \s; .endm
92.macro __sl r, s; srl \r, \s; .endm
93.macro __exth r, s; slli \r, \s, 16; .endm
94.macro __extl r, s; extui \r, \s, 0, 16; .endm
95
96#endif
97
98/*
99 * xxxx xxxx = imm8 field
100 * yyyy = imm4 field
101 * ssss = s field
102 * tttt = t field
103 *
104 * 16 0
105 * -------------------
106 * L32I.N yyyy ssss tttt 1000
107 * S32I.N yyyy ssss tttt 1001
108 *
109 * 23 0
110 * -----------------------------
111 * res 0000 0010
112 * L16UI xxxx xxxx 0001 ssss tttt 0010
113 * L32I xxxx xxxx 0010 ssss tttt 0010
114 * XXX 0011 ssss tttt 0010
115 * XXX 0100 ssss tttt 0010
116 * S16I xxxx xxxx 0101 ssss tttt 0010
117 * S32I xxxx xxxx 0110 ssss tttt 0010
118 * XXX 0111 ssss tttt 0010
119 * XXX 1000 ssss tttt 0010
120 * L16SI xxxx xxxx 1001 ssss tttt 0010
121 * XXX 1010 0010
122 * **L32AI xxxx xxxx 1011 ssss tttt 0010 unsupported
123 * XXX 1100 0010
124 * XXX 1101 0010
125 * XXX 1110 0010
126 * **S32RI xxxx xxxx 1111 ssss tttt 0010 unsupported
127 * -----------------------------
128 * ^ ^ ^
129 * sub-opcode (NIBBLE_R) -+ | |
130 * t field (NIBBLE_T) -----------+ |
131 * major opcode (NIBBLE_OP0) --------------+
132 */
133
134#define OP0_L32I_N 0x8 /* load immediate narrow */
135#define OP0_S32I_N 0x9 /* store immediate narrow */
136#define OP1_SI_MASK 0x4 /* OP1 bit set for stores */
137#define OP1_SI_BIT 2 /* OP1 bit number for stores */
138
139#define OP1_L32I 0x2
140#define OP1_L16UI 0x1
141#define OP1_L16SI 0x9
142#define OP1_L32AI 0xb
143
144#define OP1_S32I 0x6
145#define OP1_S16I 0x5
146#define OP1_S32RI 0xf
147
148/*
149 * Entry condition:
150 *
151 * a0: trashed, original value saved on stack (PT_AREG0)
152 * a1: a1
153 * a2: new stack pointer, original in DEPC
154 * a3: dispatch table
155 * depc: a2, original value saved on stack (PT_DEPC)
156 * excsave_1: a3
157 *
158 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
159 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
160 */
161
162
163ENTRY(fast_unaligned)
164
165 /* Note: We don't expect the address to be aligned on a word
166 * boundary. After all, the processor generated that exception
167 * and it would be a hardware fault.
168 */
169
170 /* Save some working register */
171
172 s32i a4, a2, PT_AREG4
173 s32i a5, a2, PT_AREG5
174 s32i a6, a2, PT_AREG6
175 s32i a7, a2, PT_AREG7
176 s32i a8, a2, PT_AREG8
177
178 rsr a0, DEPC
179 xsr a3, EXCSAVE_1
180 s32i a0, a2, PT_AREG2
181 s32i a3, a2, PT_AREG3
182
183 /* Keep value of SAR in a0 */
184
185 rsr a0, SAR
186 rsr a8, EXCVADDR # load unaligned memory address
187
188 /* Now, identify one of the following load/store instructions.
189 *
190 * The only possible danger of a double exception on the
191 * following l32i instructions is kernel code in vmalloc
192 * memory. The processor was just executing at the EPC_1
193 * address, and indeed, already fetched the instruction. That
194 * guarantees a TLB mapping, which hasn't been replaced by
195 * this unaligned exception handler that uses only static TLB
196 * mappings. However, high-level interrupt handlers might
197 * modify TLB entries, so for the generic case, we register a
198 * TABLE_FIXUP handler here, too.
199 */
200
201 /* a3...a6 saved on stack, a2 = SP */
202
203 /* Extract the instruction that caused the unaligned access. */
204
205 rsr a7, EPC_1 # load exception address
206 movi a3, ~3
207 and a3, a3, a7 # mask lower bits
208
209 l32i a4, a3, 0 # load 2 words
210 l32i a5, a3, 4
211
212 __ssa8 a7
213 __src_b a4, a4, a5 # a4 has the instruction
214
215 /* Analyze the instruction (load or store?). */
216
217 extui a5, a4, INSN_OP0, 4 # get insn.op0 nibble
218
219#if XCHAL_HAVE_NARROW
220 _beqi a5, OP0_L32I_N, .Lload # L32I.N, jump
221 addi a6, a5, -OP0_S32I_N
222 _beqz a6, .Lstore # S32I.N, do a store
223#endif
224 /* 'store indicator bit' not set, jump */
225 _bbci.l a4, OP1_SI_BIT + INSN_OP1, .Lload
226
227 /* Store: Jump to table entry to get the value in the source register.*/
228
229.Lstore:movi a5, .Lstore_table # table
230 extui a6, a4, INSN_T, 4 # get source register
231 addx8 a5, a6, a5
232 jx a5 # jump into table
233
234 /* Invalid instruction, CRITICAL! */
235.Linvalid_instruction_load:
236 j .Linvalid_instruction
237
238 /* Load: Load memory address. */
239
240.Lload: movi a3, ~3
241 and a3, a3, a8 # align memory address
242
243 __ssa8 a8
244#ifdef UNALIGNED_USER_EXCEPTION
245 addi a3, a3, 8
246 l32e a5, a3, -8
247 l32e a6, a3, -4
248#else
249 l32i a5, a3, 0
250 l32i a6, a3, 4
251#endif
252 __src_b a3, a5, a6 # a3 has the data word
253
254#if XCHAL_HAVE_NARROW
255 addi a7, a7, 2 # increment PC (assume 16-bit insn)
256
257 extui a5, a4, INSN_OP0, 4
258 _beqi a5, OP0_L32I_N, 1f # l32i.n: jump
259
260 addi a7, a7, 1
261#else
262 addi a7, a7, 3
263#endif
264
265 extui a5, a4, INSN_OP1, 4
266 _beqi a5, OP1_L32I, 1f # l32i: jump
267
268 extui a3, a3, 0, 16 # extract lower 16 bits
269 _beqi a5, OP1_L16UI, 1f
270 addi a5, a5, -OP1_L16SI
271 _bnez a5, .Linvalid_instruction_load
272
273 /* sign extend value */
274
275 slli a3, a3, 16
276 srai a3, a3, 16
277
278 /* Set target register. */
279
2801:
281
282#if XCHAL_HAVE_LOOP
283 rsr a3, LEND # check if we reached LEND
284 bne a7, a3, 1f
285 rsr a3, LCOUNT # and LCOUNT != 0
286 beqz a3, 1f
287 addi a3, a3, -1 # decrement LCOUNT and set
288 rsr a7, LBEG # set PC to LBEGIN
289 wsr a3, LCOUNT
290#endif
291
2921: wsr a7, EPC_1 # skip load instruction
293 extui a4, a4, INSN_T, 4 # extract target register
294 movi a5, .Lload_table
295 addx8 a4, a4, a5
296 jx a4 # jump to entry for target register
297
298 .align 8
299.Lload_table:
300 s32i a3, a2, PT_AREG0; _j .Lexit; .align 8
301 mov a1, a3; _j .Lexit; .align 8 # fishy??
302 s32i a3, a2, PT_AREG2; _j .Lexit; .align 8
303 s32i a3, a2, PT_AREG3; _j .Lexit; .align 8
304 s32i a3, a2, PT_AREG4; _j .Lexit; .align 8
305 s32i a3, a2, PT_AREG5; _j .Lexit; .align 8
306 s32i a3, a2, PT_AREG6; _j .Lexit; .align 8
307 s32i a3, a2, PT_AREG7; _j .Lexit; .align 8
308 s32i a3, a2, PT_AREG8; _j .Lexit; .align 8
309 mov a9, a3 ; _j .Lexit; .align 8
310 mov a10, a3 ; _j .Lexit; .align 8
311 mov a11, a3 ; _j .Lexit; .align 8
312 mov a12, a3 ; _j .Lexit; .align 8
313 mov a13, a3 ; _j .Lexit; .align 8
314 mov a14, a3 ; _j .Lexit; .align 8
315 mov a15, a3 ; _j .Lexit; .align 8
316
317.Lstore_table:
318 l32i a3, a2, PT_AREG0; _j 1f; .align 8
319 mov a3, a1; _j 1f; .align 8 # fishy??
320 l32i a3, a2, PT_AREG2; _j 1f; .align 8
321 l32i a3, a2, PT_AREG3; _j 1f; .align 8
322 l32i a3, a2, PT_AREG4; _j 1f; .align 8
323 l32i a3, a2, PT_AREG5; _j 1f; .align 8
324 l32i a3, a2, PT_AREG6; _j 1f; .align 8
325 l32i a3, a2, PT_AREG7; _j 1f; .align 8
326 l32i a3, a2, PT_AREG8; _j 1f; .align 8
327 mov a3, a9 ; _j 1f; .align 8
328 mov a3, a10 ; _j 1f; .align 8
329 mov a3, a11 ; _j 1f; .align 8
330 mov a3, a12 ; _j 1f; .align 8
331 mov a3, a13 ; _j 1f; .align 8
332 mov a3, a14 ; _j 1f; .align 8
333 mov a3, a15 ; _j 1f; .align 8
334
3351: # a7: instruction pointer, a4: instruction, a3: value
336
337 movi a6, 0 # mask: ffffffff:00000000
338
339#if XCHAL_HAVE_NARROW
340 addi a7, a7, 2 # incr. PC,assume 16-bit instruction
341
342 extui a5, a4, INSN_OP0, 4 # extract OP0
343 addi a5, a5, -OP0_S32I_N
344 _beqz a5, 1f # s32i.n: jump
345
346 addi a7, a7, 1 # increment PC, 32-bit instruction
347#else
348 addi a7, a7, 3 # increment PC, 32-bit instruction
349#endif
350
351 extui a5, a4, INSN_OP1, 4 # extract OP1
352 _beqi a5, OP1_S32I, 1f # jump if 32 bit store
353 _bnei a5, OP1_S16I, .Linvalid_instruction_store
354
355 movi a5, -1
356 __extl a3, a3 # get 16-bit value
357 __exth a6, a5 # get 16-bit mask ffffffff:ffff0000
358
359 /* Get memory address */
360
3611:
362#if XCHAL_HAVE_LOOP
363 rsr a3, LEND # check if we reached LEND
364 bne a7, a3, 1f
365 rsr a3, LCOUNT # and LCOUNT != 0
366 beqz a3, 1f
367 addi a3, a3, -1 # decrement LCOUNT and set
368 rsr a7, LBEG # set PC to LBEGIN
369 wsr a3, LCOUNT
370#endif
371
3721: wsr a7, EPC_1 # skip store instruction
373 movi a4, ~3
374 and a4, a4, a8 # align memory address
375
376 /* Insert value into memory */
377
378 movi a5, -1 # mask: ffffffff:XXXX0000
379#ifdef UNALIGNED_USER_EXCEPTION
380 addi a4, a4, 8
381#endif
382
383 __ssa8r a8
384 __src_b a7, a5, a6 # lo-mask F..F0..0 (BE) 0..0F..F (LE)
385 __src_b a6, a6, a5 # hi-mask 0..0F..F (BE) F..F0..0 (LE)
386#ifdef UNALIGNED_USER_EXCEPTION
387 l32e a5, a4, -8
388#else
389 l32i a5, a4, 0 # load lower address word
390#endif
391 and a5, a5, a7 # mask
392 __sh a7, a3 # shift value
393 or a5, a5, a7 # or with original value
394#ifdef UNALIGNED_USER_EXCEPTION
395 s32e a5, a4, -8
396 l32e a7, a4, -4
397#else
398 s32i a5, a4, 0 # store
399 l32i a7, a4, 4 # same for upper address word
400#endif
401 __sl a5, a3
402 and a6, a7, a6
403 or a6, a6, a5
404#ifdef UNALIGNED_USER_EXCEPTION
405 s32e a6, a4, -4
406#else
407 s32i a6, a4, 4
408#endif
409
410 /* Done. restore stack and return */
411
412.Lexit:
413 movi a4, 0
414 rsr a3, EXCSAVE_1
415 s32i a4, a3, EXC_TABLE_FIXUP
416
417 /* Restore working register */
418
419 l32i a7, a2, PT_AREG7
420 l32i a6, a2, PT_AREG6
421 l32i a5, a2, PT_AREG5
422 l32i a4, a2, PT_AREG4
423 l32i a3, a2, PT_AREG3
424
425 /* restore SAR and return */
426
427 wsr a0, SAR
428 l32i a0, a2, PT_AREG0
429 l32i a2, a2, PT_AREG2
430 rfe
431
432 /* We cannot handle this exception. */
433
434 .extern _kernel_exception
435.Linvalid_instruction_store:
436.Linvalid_instruction:
437
438 /* Restore a4...a8 and SAR, set SP, and jump to default exception. */
439
440 l32i a8, a2, PT_AREG8
441 l32i a7, a2, PT_AREG7
442 l32i a6, a2, PT_AREG6
443 l32i a5, a2, PT_AREG5
444 l32i a4, a2, PT_AREG4
445 wsr a0, SAR
446 mov a1, a2
447
448 rsr a0, PS
449 bbsi.l a2, PS_UM_SHIFT, 1f # jump if user mode
450
451 movi a0, _kernel_exception
452 jx a0
453
4541: movi a0, _user_exception
455 jx a0
456
457
458#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
459
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
new file mode 100644
index 000000000000..840cd9a1d3d2
--- /dev/null
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -0,0 +1,94 @@
1/*
2 * arch/xtensa/kernel/asm-offsets.c
3 *
4 * Generates definitions from c-type structures used by assembly sources.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2005 Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 */
14
15#include <asm/processor.h>
16
17#include <linux/types.h>
18#include <linux/sched.h>
19#include <linux/stddef.h>
20#include <linux/thread_info.h>
21#include <linux/ptrace.h>
22#include <asm/ptrace.h>
23#include <asm/processor.h>
24#include <asm/uaccess.h>
25
26#define DEFINE(sym, val) asm volatile("\n->" #sym " %0 " #val : : "i" (val))
27#define BLANK() asm volatile("\n->" : : )
28
29int main(void)
30{
31 /* struct pt_regs */
32 DEFINE(PT_PC, offsetof (struct pt_regs, pc));
33 DEFINE(PT_PS, offsetof (struct pt_regs, ps));
34 DEFINE(PT_DEPC, offsetof (struct pt_regs, depc));
35 DEFINE(PT_EXCCAUSE, offsetof (struct pt_regs, exccause));
36 DEFINE(PT_EXCVADDR, offsetof (struct pt_regs, excvaddr));
37 DEFINE(PT_DEBUGCAUSE, offsetof (struct pt_regs, debugcause));
38 DEFINE(PT_WMASK, offsetof (struct pt_regs, wmask));
39 DEFINE(PT_LBEG, offsetof (struct pt_regs, lbeg));
40 DEFINE(PT_LEND, offsetof (struct pt_regs, lend));
41 DEFINE(PT_LCOUNT, offsetof (struct pt_regs, lcount));
42 DEFINE(PT_SAR, offsetof (struct pt_regs, sar));
43 DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall));
44 DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0]));
45 DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0]));
46 DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1]));
47 DEFINE(PT_AREG2, offsetof (struct pt_regs, areg[2]));
48 DEFINE(PT_AREG3, offsetof (struct pt_regs, areg[3]));
49 DEFINE(PT_AREG4, offsetof (struct pt_regs, areg[4]));
50 DEFINE(PT_AREG5, offsetof (struct pt_regs, areg[5]));
51 DEFINE(PT_AREG6, offsetof (struct pt_regs, areg[6]));
52 DEFINE(PT_AREG7, offsetof (struct pt_regs, areg[7]));
53 DEFINE(PT_AREG8, offsetof (struct pt_regs, areg[8]));
54 DEFINE(PT_AREG9, offsetof (struct pt_regs, areg[9]));
55 DEFINE(PT_AREG10, offsetof (struct pt_regs, areg[10]));
56 DEFINE(PT_AREG11, offsetof (struct pt_regs, areg[11]));
57 DEFINE(PT_AREG12, offsetof (struct pt_regs, areg[12]));
58 DEFINE(PT_AREG13, offsetof (struct pt_regs, areg[13]));
59 DEFINE(PT_AREG14, offsetof (struct pt_regs, areg[14]));
60 DEFINE(PT_AREG15, offsetof (struct pt_regs, areg[15]));
61 DEFINE(PT_WINDOWBASE, offsetof (struct pt_regs, windowbase));
62 DEFINE(PT_WINDOWSTART, offsetof(struct pt_regs, windowstart));
63 DEFINE(PT_SIZE, sizeof(struct pt_regs));
64 DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS]));
65 DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS]));
66 BLANK();
67
68 /* struct task_struct */
69 DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace));
70 DEFINE(TASK_MM, offsetof (struct task_struct, mm));
71 DEFINE(TASK_ACTIVE_MM, offsetof (struct task_struct, active_mm));
72 DEFINE(TASK_PID, offsetof (struct task_struct, pid));
73 DEFINE(TASK_THREAD, offsetof (struct task_struct, thread));
74 DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, thread_info));
75 DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct));
76 BLANK();
77
78 /* struct thread_info (offset from start_struct) */
79 DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
80 DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
81 DEFINE(THREAD_CP_SAVE, offsetof (struct task_struct, thread.cp_save));
82 DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds));
83 BLANK();
84
85 /* struct mm_struct */
86 DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users));
87 DEFINE(MM_PGD, offsetof (struct mm_struct, pgd));
88 DEFINE(MM_CONTEXT, offsetof (struct mm_struct, context));
89 BLANK();
90 DEFINE(PT_SINGLESTEP_BIT, PT_SINGLESTEP_BIT);
91 return 0;
92}
93
94
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
new file mode 100644
index 000000000000..356192a4d39d
--- /dev/null
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -0,0 +1,201 @@
1/*
2 * arch/xtensa/kernel/coprocessor.S
3 *
4 * Xtensa processor configuration-specific table of coprocessor and
5 * other custom register layout information.
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 *
11 * Copyright (C) 2003 - 2005 Tensilica Inc.
12 *
13 * Marc Gauthier <marc@tensilica.com> <marc@alumni.uwaterloo.ca>
14 */
15
16/*
17 * This module contains a table that describes the layout of the various
18 * custom registers and states associated with each coprocessor, as well
19 * as those not associated with any coprocessor ("extra state").
20 * This table is included with core dumps and is available via the ptrace
21 * interface, allowing the layout of such register/state information to
22 * be modified in the kernel without affecting the debugger. Each
23 * register or state is identified using a 32-bit "libdb target number"
24 * assigned when the Xtensa processor is generated.
25 */
26
27#include <linux/config.h>
28#include <linux/linkage.h>
29#include <asm/processor.h>
30
31#if XCHAL_HAVE_CP
32
33#define CP_LAST ((XCHAL_CP_MAX - 1) * COPROCESSOR_INFO_SIZE)
34
35ENTRY(release_coprocessors)
36
37 entry a1, 16
38 # a2: task
39 movi a3, 1 << XCHAL_CP_MAX # a3: coprocessor-bit
40 movi a4, coprocessor_info+CP_LAST # a4: owner-table
41 # a5: tmp
42 movi a6, 0 # a6: 0
43 rsil a7, LOCKLEVEL # a7: PS
44
451: /* Check if task is coprocessor owner of coprocessor[i]. */
46
47 l32i a5, a4, COPROCESSOR_INFO_OWNER
48 srli a3, a3, 1
49 beqz a3, 1f
50 addi a4, a4, -8
51 beq a2, a5, 1b
52
53 /* Found an entry: Clear entry CPENABLE bit to disable CP. */
54
55 rsr a5, CPENABLE
56 s32i a6, a4, COPROCESSOR_INFO_OWNER
57 xor a5, a3, a5
58 wsr a5, CPENABLE
59
60 bnez a3, 1b
61
621: wsr a7, PS
63 rsync
64 retw
65
66
67ENTRY(disable_coprocessor)
68 entry sp, 16
69 rsil a7, LOCKLEVEL
70 rsr a3, CPENABLE
71 movi a4, 1
72 ssl a2
73 sll a4, a4
74 and a4, a3, a4
75 xor a3, a3, a4
76 wsr a3, CPENABLE
77 wsr a7, PS
78 rsync
79 retw
80
81ENTRY(enable_coprocessor)
82 entry sp, 16
83 rsil a7, LOCKLEVEL
84 rsr a3, CPENABLE
85 movi a4, 1
86 ssl a2
87 sll a4, a4
88 or a3, a3, a4
89 wsr a3, CPENABLE
90 wsr a7, PS
91 rsync
92 retw
93
94#endif
95
96ENTRY(save_coprocessor_extra)
97 entry sp, 16
98 xchal_extra_store_funcbody
99 retw
100
101ENTRY(restore_coprocessor_extra)
102 entry sp, 16
103 xchal_extra_load_funcbody
104 retw
105
106ENTRY(save_coprocessor_registers)
107 entry sp, 16
108 xchal_cpi_store_funcbody
109 retw
110
111ENTRY(restore_coprocessor_registers)
112 entry sp, 16
113 xchal_cpi_load_funcbody
114 retw
115
116
117/*
118 * The Xtensa compile-time HAL (core.h) XCHAL_*_SA_CONTENTS_LIBDB macros
119 * describe the contents of coprocessor & extra save areas in terms of
120 * undefined CONTENTS_LIBDB_{SREG,UREG,REGF} macros. We define these
121 * latter macros here; they expand into a table of the format we want.
122 * The general format is:
123 *
124 * CONTENTS_LIBDB_SREG(libdbnum, offset, size, align, rsv1, name, sregnum,
125 * bitmask, rsv2, rsv3)
126 * CONTENTS_LIBDB_UREG(libdbnum, offset, size, align, rsv1, name, uregnum,
127 * bitmask, rsv2, rsv3)
128 * CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index,
129 * numentries, contentsize, regname_base,
130 * regfile_name, rsv2, rsv3)
131 *
132 * For this table, we only care about the <libdbnum>, <offset> and <size>
133 * fields.
134 */
135
136/* Map all XCHAL CONTENTS macros to the reg_entry asm macro defined below: */
137
138#define CONTENTS_LIBDB_SREG(libdbnum,offset,size,align,rsv1,name,sregnum, \
139 bitmask, rsv2, rsv3) \
140 reg_entry libdbnum, offset, size ;
141#define CONTENTS_LIBDB_UREG(libdbnum,offset,size,align,rsv1,name,uregnum, \
142 bitmask, rsv2, rsv3) \
143 reg_entry libdbnum, offset, size ;
144#define CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index, \
145 numentries, contentsize, regname_base, \
146 regfile_name, rsv2, rsv3) \
147 reg_entry libdbnum, offset, size ;
148
149/* A single table entry: */
150 .macro reg_entry libdbnum, offset, size
151 .ifne (__last_offset-(__last_group_offset+\offset))
152 /* padding entry */
153 .word (0xFC000000+__last_offset-(__last_group_offset+\offset))
154 .endif
155 .word \libdbnum /* actual entry */
156 .set __last_offset, __last_group_offset+\offset+\size
157 .endm /* reg_entry */
158
159
160/* Table entry that marks the beginning of a group (coprocessor or "extra"): */
161 .macro reg_group cpnum, num_entries, align
162 .set __last_group_offset, (__last_offset + \align- 1) & -\align
163 .ifne \num_entries
164 .word 0xFD000000+(\cpnum<<16)+\num_entries
165 .endif
166 .endm /* reg_group */
167
168/*
169 * Register info tables.
170 */
171
172 .section .rodata, "a"
173 .globl _xtensa_reginfo_tables
174 .globl _xtensa_reginfo_table_size
175 .align 4
176_xtensa_reginfo_table_size:
177 .word _xtensa_reginfo_table_end - _xtensa_reginfo_tables
178
179_xtensa_reginfo_tables:
180 .set __last_offset, 0
181 reg_group 0xFF, XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM, XCHAL_EXTRA_SA_ALIGN
182 XCHAL_EXTRA_SA_CONTENTS_LIBDB
183 reg_group 0, XCHAL_CP0_SA_CONTENTS_LIBDB_NUM, XCHAL_CP0_SA_ALIGN
184 XCHAL_CP0_SA_CONTENTS_LIBDB
185 reg_group 1, XCHAL_CP1_SA_CONTENTS_LIBDB_NUM, XCHAL_CP1_SA_ALIGN
186 XCHAL_CP1_SA_CONTENTS_LIBDB
187 reg_group 2, XCHAL_CP2_SA_CONTENTS_LIBDB_NUM, XCHAL_CP2_SA_ALIGN
188 XCHAL_CP2_SA_CONTENTS_LIBDB
189 reg_group 3, XCHAL_CP3_SA_CONTENTS_LIBDB_NUM, XCHAL_CP3_SA_ALIGN
190 XCHAL_CP3_SA_CONTENTS_LIBDB
191 reg_group 4, XCHAL_CP4_SA_CONTENTS_LIBDB_NUM, XCHAL_CP4_SA_ALIGN
192 XCHAL_CP4_SA_CONTENTS_LIBDB
193 reg_group 5, XCHAL_CP5_SA_CONTENTS_LIBDB_NUM, XCHAL_CP5_SA_ALIGN
194 XCHAL_CP5_SA_CONTENTS_LIBDB
195 reg_group 6, XCHAL_CP6_SA_CONTENTS_LIBDB_NUM, XCHAL_CP6_SA_ALIGN
196 XCHAL_CP6_SA_CONTENTS_LIBDB
197 reg_group 7, XCHAL_CP7_SA_CONTENTS_LIBDB_NUM, XCHAL_CP7_SA_ALIGN
198 XCHAL_CP7_SA_CONTENTS_LIBDB
199 .word 0xFC000000 /* invalid register number,marks end of table*/
200_xtensa_reginfo_table_end:
201
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
new file mode 100644
index 000000000000..c64a01f71de6
--- /dev/null
+++ b/arch/xtensa/kernel/entry.S
@@ -0,0 +1,1996 @@
1/*
2 * arch/xtensa/kernel/entry.S
3 *
4 * Low-level exception handling
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2004-2005 by Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 *
14 */
15
16#include <linux/linkage.h>
17#include <asm/offsets.h>
18#include <asm/processor.h>
19#include <asm/thread_info.h>
20#include <asm/uaccess.h>
21#include <asm/unistd.h>
22#include <asm/ptrace.h>
23#include <asm/current.h>
24#include <asm/pgtable.h>
25#include <asm/page.h>
26#include <asm/signal.h>
27#include <xtensa/coreasm.h>
28
29/* Unimplemented features. */
30
31#undef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
32#undef KERNEL_STACK_OVERFLOW_CHECK
33#undef PREEMPTIBLE_KERNEL
34#undef ALLOCA_EXCEPTION_IN_IRAM
35
36/* Not well tested.
37 *
38 * - fast_coprocessor
39 */
40
41/*
42 * Macro to find first bit set in WINDOWBASE from the left + 1
43 *
44 * 100....0 -> 1
45 * 010....0 -> 2
46 * 000....1 -> WSBITS
47 */
48
49 .macro ffs_ws bit mask
50
51#if XCHAL_HAVE_NSA
52 nsau \bit, \mask # 32-WSBITS ... 31 (32 iff 0)
53 addi \bit, \bit, WSBITS - 32 + 1 # uppest bit set -> return 1
54#else
55 movi \bit, WSBITS
56#if WSBITS > 16
57 _bltui \mask, 0x10000, 99f
58 addi \bit, \bit, -16
59 extui \mask, \mask, 16, 16
60#endif
61#if WSBITS > 8
6299: _bltui \mask, 0x100, 99f
63 addi \bit, \bit, -8
64 srli \mask, \mask, 8
65#endif
6699: _bltui \mask, 0x10, 99f
67 addi \bit, \bit, -4
68 srli \mask, \mask, 4
6999: _bltui \mask, 0x4, 99f
70 addi \bit, \bit, -2
71 srli \mask, \mask, 2
7299: _bltui \mask, 0x2, 99f
73 addi \bit, \bit, -1
7499:
75
76#endif
77 .endm
78
79/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
80
81/*
82 * First-level exception handler for user exceptions.
83 * Save some special registers, extra states and all registers in the AR
84 * register file that were in use in the user task, and jump to the common
85 * exception code.
86 * We save SAR (used to calculate WMASK), and WB and WS (we don't have to
87 * save them for kernel exceptions).
88 *
89 * Entry condition for user_exception:
90 *
91 * a0: trashed, original value saved on stack (PT_AREG0)
92 * a1: a1
93 * a2: new stack pointer, original value in depc
94 * a3: dispatch table
95 * depc: a2, original value saved on stack (PT_DEPC)
96 * excsave1: a3
97 *
98 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
99 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
100 *
101 * Entry condition for _user_exception:
102 *
103 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
104 * excsave has been restored, and
105 * stack pointer (a1) has been set.
106 *
107 * Note: _user_exception might be at an odd adress. Don't use call0..call12
108 */
109
110ENTRY(user_exception)
111
112 /* Save a2, a3, and depc, restore excsave_1 and set SP. */
113
114 xsr a3, EXCSAVE_1
115 rsr a0, DEPC
116 s32i a1, a2, PT_AREG1
117 s32i a0, a2, PT_AREG2
118 s32i a3, a2, PT_AREG3
119 mov a1, a2
120
121 .globl _user_exception
122_user_exception:
123
124 /* Save SAR and turn off single stepping */
125
126 movi a2, 0
127 rsr a3, SAR
128 wsr a2, ICOUNTLEVEL
129 s32i a3, a1, PT_SAR
130
131 /* Rotate ws so that the current windowbase is at bit0. */
132 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
133
134 rsr a2, WINDOWBASE
135 rsr a3, WINDOWSTART
136 ssr a2
137 s32i a2, a1, PT_WINDOWBASE
138 s32i a3, a1, PT_WINDOWSTART
139 slli a2, a3, 32-WSBITS
140 src a2, a3, a2
141 srli a2, a2, 32-WSBITS
142 s32i a2, a1, PT_WMASK # needed for restoring registers
143
144 /* Save only live registers. */
145
146 _bbsi.l a2, 1, 1f
147 s32i a4, a1, PT_AREG4
148 s32i a5, a1, PT_AREG5
149 s32i a6, a1, PT_AREG6
150 s32i a7, a1, PT_AREG7
151 _bbsi.l a2, 2, 1f
152 s32i a8, a1, PT_AREG8
153 s32i a9, a1, PT_AREG9
154 s32i a10, a1, PT_AREG10
155 s32i a11, a1, PT_AREG11
156 _bbsi.l a2, 3, 1f
157 s32i a12, a1, PT_AREG12
158 s32i a13, a1, PT_AREG13
159 s32i a14, a1, PT_AREG14
160 s32i a15, a1, PT_AREG15
161 _bnei a2, 1, 1f # only one valid frame?
162
163 /* Only one valid frame, skip saving regs. */
164
165 j 2f
166
167 /* Save the remaining registers.
168 * We have to save all registers up to the first '1' from
169 * the right, except the current frame (bit 0).
170 * Assume a2 is: 001001000110001
171 * All regiser frames starting from the top fiel to the marked '1'
172 * must be saved.
173 */
174
1751: addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0
176 neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1
177 and a3, a3, a2 # max. only one bit is set
178
179 /* Find number of frames to save */
180
181 ffs_ws a0, a3 # number of frames to the '1' from left
182
183 /* Store information into WMASK:
184 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart,
185 * bits 4...: number of valid 4-register frames
186 */
187
188 slli a3, a0, 4 # number of frames to save in bits 8..4
189 extui a2, a2, 0, 4 # mask for the first 16 registers
190 or a2, a3, a2
191 s32i a2, a1, PT_WMASK # needed when we restore the reg-file
192
193 /* Save 4 registers at a time */
194
1951: rotw -1
196 s32i a0, a5, PT_AREG_END - 16
197 s32i a1, a5, PT_AREG_END - 12
198 s32i a2, a5, PT_AREG_END - 8
199 s32i a3, a5, PT_AREG_END - 4
200 addi a0, a4, -1
201 addi a1, a5, -16
202 _bnez a0, 1b
203
204 /* WINDOWBASE still in SAR! */
205
206 rsr a2, SAR # original WINDOWBASE
207 movi a3, 1
208 ssl a2
209 sll a3, a3
210 wsr a3, WINDOWSTART # set corresponding WINDOWSTART bit
211 wsr a2, WINDOWBASE # and WINDOWSTART
212 rsync
213
214 /* We are back to the original stack pointer (a1) */
215
2162:
217#if XCHAL_EXTRA_SA_SIZE
218
219 /* For user exceptions, save the extra state into the user's TCB.
220 * Note: We must assume that xchal_extra_store_funcbody destroys a2..a15
221 */
222
223 GET_CURRENT(a2,a1)
224 addi a2, a2, THREAD_CP_SAVE
225 xchal_extra_store_funcbody
226#endif
227
228 /* Now, jump to the common exception handler. */
229
230 j common_exception
231
232
233/*
234 * First-level exit handler for kernel exceptions
235 * Save special registers and the live window frame.
236 * Note: Even though we changes the stack pointer, we don't have to do a
237 * MOVSP here, as we do that when we return from the exception.
238 * (See comment in the kernel exception exit code)
239 *
240 * Entry condition for kernel_exception:
241 *
242 * a0: trashed, original value saved on stack (PT_AREG0)
243 * a1: a1
244 * a2: new stack pointer, original in DEPC
245 * a3: dispatch table
246 * depc: a2, original value saved on stack (PT_DEPC)
247 * excsave_1: a3
248 *
249 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
250 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
251 *
252 * Entry condition for _kernel_exception:
253 *
254 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
255 * excsave has been restored, and
256 * stack pointer (a1) has been set.
257 *
258 * Note: _kernel_exception might be at an odd adress. Don't use call0..call12
259 */
260
261ENTRY(kernel_exception)
262
263 /* Save a0, a2, a3, DEPC and set SP. */
264
265 xsr a3, EXCSAVE_1 # restore a3, excsave_1
266 rsr a0, DEPC # get a2
267 s32i a1, a2, PT_AREG1
268 s32i a0, a2, PT_AREG2
269 s32i a3, a2, PT_AREG3
270 mov a1, a2
271
272 .globl _kernel_exception
273_kernel_exception:
274
275 /* Save SAR and turn off single stepping */
276
277 movi a2, 0
278 rsr a3, SAR
279 wsr a2, ICOUNTLEVEL
280 s32i a3, a1, PT_SAR
281
282 /* Rotate ws so that the current windowbase is at bit0. */
283 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
284
285 rsr a2, WINDOWBASE # don't need to save these, we only
286 rsr a3, WINDOWSTART # need shifted windowstart: windowmask
287 ssr a2
288 slli a2, a3, 32-WSBITS
289 src a2, a3, a2
290 srli a2, a2, 32-WSBITS
291 s32i a2, a1, PT_WMASK # needed for kernel_exception_exit
292
293 /* Save only the live window-frame */
294
295 _bbsi.l a2, 1, 1f
296 s32i a4, a1, PT_AREG4
297 s32i a5, a1, PT_AREG5
298 s32i a6, a1, PT_AREG6
299 s32i a7, a1, PT_AREG7
300 _bbsi.l a2, 2, 1f
301 s32i a8, a1, PT_AREG8
302 s32i a9, a1, PT_AREG9
303 s32i a10, a1, PT_AREG10
304 s32i a11, a1, PT_AREG11
305 _bbsi.l a2, 3, 1f
306 s32i a12, a1, PT_AREG12
307 s32i a13, a1, PT_AREG13
308 s32i a14, a1, PT_AREG14
309 s32i a15, a1, PT_AREG15
310
3111:
312
313#ifdef KERNEL_STACK_OVERFLOW_CHECK
314
315 /* Stack overflow check, for debugging */
316 extui a2, a1, TASK_SIZE_BITS,XX
317 movi a3, SIZE??
318 _bge a2, a3, out_of_stack_panic
319
320#endif
321
322/*
323 * This is the common exception handler.
324 * We get here from the user exception handler or simply by falling through
325 * from the kernel exception handler.
326 * Save the remaining special registers, switch to kernel mode, and jump
327 * to the second-level exception handler.
328 *
329 */
330
331common_exception:
332
333 /* Save EXCVADDR, DEBUGCAUSE, and PC, and clear LCOUNT */
334
335 rsr a2, DEBUGCAUSE
336 rsr a3, EPC_1
337 s32i a2, a1, PT_DEBUGCAUSE
338 s32i a3, a1, PT_PC
339
340 rsr a3, EXCVADDR
341 movi a2, 0
342 s32i a3, a1, PT_EXCVADDR
343 xsr a2, LCOUNT
344 s32i a2, a1, PT_LCOUNT
345
346 /* It is now save to restore the EXC_TABLE_FIXUP variable. */
347
348 rsr a0, EXCCAUSE
349 movi a3, 0
350 rsr a2, EXCSAVE_1
351 s32i a0, a1, PT_EXCCAUSE
352 s32i a3, a2, EXC_TABLE_FIXUP
353
354 /* All unrecoverable states are saved on stack, now, and a1 is valid,
355 * so we can allow exceptions and interrupts (*) again.
356 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
357 *
358 * (*) We only allow interrupts if PS.INTLEVEL was not set to 1 before
359 * (interrupts disabled) and if this exception is not an interrupt.
360 */
361
362 rsr a3, PS
363 addi a0, a0, -4
364 movi a2, 1
365 extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0]
366 moveqz a3, a2, a0 # a3 = 1 iff interrupt exception
367 movi a2, PS_WOE_MASK
368 or a3, a3, a2
369 rsr a0, EXCCAUSE
370 xsr a3, PS
371
372 s32i a3, a1, PT_PS # save ps
373
374 /* Save LBEG, LEND */
375
376 rsr a2, LBEG
377 rsr a3, LEND
378 s32i a2, a1, PT_LBEG
379 s32i a3, a1, PT_LEND
380
381 /* Go to second-level dispatcher. Set up parameters to pass to the
382 * exception handler and call the exception handler.
383 */
384
385 movi a4, exc_table
386 mov a6, a1 # pass stack frame
387 mov a7, a0 # pass EXCCAUSE
388 addx4 a4, a0, a4
389 l32i a4, a4, EXC_TABLE_DEFAULT # load handler
390
391 /* Call the second-level handler */
392
393 callx4 a4
394
395 /* Jump here for exception exit */
396
397common_exception_return:
398
399 /* Jump if we are returning from kernel exceptions. */
400
4011: l32i a3, a1, PT_PS
402 _bbsi.l a3, PS_UM_SHIFT, 2f
403 j kernel_exception_exit
404
405 /* Specific to a user exception exit:
406 * We need to check some flags for signal handling and rescheduling,
407 * and have to restore WB and WS, extra states, and all registers
408 * in the register file that were in use in the user task.
409 */
410
4112: wsr a3, PS /* disable interrupts */
412
413 /* Check for signals (keep interrupts disabled while we read TI_FLAGS)
414 * Note: PS.INTLEVEL = 0, PS.EXCM = 1
415 */
416
417 GET_THREAD_INFO(a2,a1)
418 l32i a4, a2, TI_FLAGS
419
420 /* Enable interrupts again.
421 * Note: When we get here, we certainly have handled any interrupts.
422 * (Hint: There is only one user exception frame on stack)
423 */
424
425 movi a3, PS_WOE_MASK
426
427 _bbsi.l a4, TIF_NEED_RESCHED, 3f
428 _bbci.l a4, TIF_SIGPENDING, 4f
429
430#ifndef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
431 l32i a4, a1, PT_DEPC
432 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
433#endif
434
435 /* Reenable interrupts and call do_signal() */
436
437 wsr a3, PS
438 movi a4, do_signal # int do_signal(struct pt_regs*, sigset_t*)
439 mov a6, a1
440 movi a7, 0
441 callx4 a4
442 j 1b
443
4443: /* Reenable interrupts and reschedule */
445
446 wsr a3, PS
447 movi a4, schedule # void schedule (void)
448 callx4 a4
449 j 1b
450
451 /* Restore the state of the task and return from the exception. */
452
453
454 /* If we are returning from a user exception, and the process
455 * to run next has PT_SINGLESTEP set, we want to setup
456 * ICOUNT and ICOUNTLEVEL to step one instruction.
457 * PT_SINGLESTEP is set by sys_ptrace (ptrace.c)
458 */
459
4604: /* a2 holds GET_CURRENT(a2,a1) */
461
462 l32i a3, a2, TI_TASK
463 l32i a3, a3, TASK_PTRACE
464 bbci.l a3, PT_SINGLESTEP_BIT, 1f # jump if single-step flag is not set
465
466 movi a3, -2 # PT_SINGLESTEP flag is set,
467 movi a4, 1 # icountlevel of 1 means it won't
468 wsr a3, ICOUNT # start counting until after rfe
469 wsr a4, ICOUNTLEVEL # so setup icount & icountlevel.
470 isync
471
4721:
473
474#if XCHAL_EXTRA_SA_SIZE
475
476 /* For user exceptions, restore the extra state from the user's TCB. */
477
478 /* Note: a2 still contains GET_CURRENT(a2,a1) */
479 addi a2, a2, THREAD_CP_SAVE
480 xchal_extra_load_funcbody
481
482 /* We must assume that xchal_extra_store_funcbody destroys
483 * registers a2..a15. FIXME, this list can eventually be
484 * reduced once real register requirements of the macro are
485 * finalized. */
486
487#endif /* XCHAL_EXTRA_SA_SIZE */
488
489
490 /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
491
492 l32i a2, a1, PT_WINDOWBASE
493 l32i a3, a1, PT_WINDOWSTART
494 wsr a1, DEPC # use DEPC as temp storage
495 wsr a3, WINDOWSTART # restore WINDOWSTART
496 ssr a2 # preserve user's WB in the SAR
497 wsr a2, WINDOWBASE # switch to user's saved WB
498 rsync
499 rsr a1, DEPC # restore stack pointer
500 l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9)
501 rotw -1 # we restore a4..a7
502 _bltui a6, 16, 1f # only have to restore current window?
503
504 /* The working registers are a0 and a3. We are restoring to
505 * a4..a7. Be careful not to destroy what we have just restored.
506 * Note: wmask has the format YYYYM:
507 * Y: number of registers saved in groups of 4
508 * M: 4 bit mask of first 16 registers
509 */
510
511 mov a2, a6
512 mov a3, a5
513
5142: rotw -1 # a0..a3 become a4..a7
515 addi a3, a7, -4*4 # next iteration
516 addi a2, a6, -16 # decrementing Y in WMASK
517 l32i a4, a3, PT_AREG_END + 0
518 l32i a5, a3, PT_AREG_END + 4
519 l32i a6, a3, PT_AREG_END + 8
520 l32i a7, a3, PT_AREG_END + 12
521 _bgeui a2, 16, 2b
522
523 /* Clear unrestored registers (don't leak anything to user-land */
524
5251: rsr a0, WINDOWBASE
526 rsr a3, SAR
527 sub a3, a0, a3
528 beqz a3, 2f
529 extui a3, a3, 0, WBBITS
530
5311: rotw -1
532 addi a3, a7, -1
533 movi a4, 0
534 movi a5, 0
535 movi a6, 0
536 movi a7, 0
537 bgei a3, 1, 1b
538
539 /* We are back were we were when we started.
540 * Note: a2 still contains WMASK (if we've returned to the original
541 * frame where we had loaded a2), or at least the lower 4 bits
542 * (if we have restored WSBITS-1 frames).
543 */
544
5452: j common_exception_exit
546
547 /* This is the kernel exception exit.
548 * We avoided to do a MOVSP when we entered the exception, but we
549 * have to do it here.
550 */
551
552kernel_exception_exit:
553
554 /* Disable interrupts (a3 holds PT_PS) */
555
556 wsr a3, PS
557
558#ifdef PREEMPTIBLE_KERNEL
559
560#ifdef CONFIG_PREEMPT
561
562 /*
563 * Note: We've just returned from a call4, so we have
564 * at least 4 addt'l regs.
565 */
566
567 /* Check current_thread_info->preempt_count */
568
569 GET_THREAD_INFO(a2)
570 l32i a3, a2, TI_PREEMPT
571 bnez a3, 1f
572
573 l32i a2, a2, TI_FLAGS
574
5751:
576
577#endif
578
579#endif
580
581 /* Check if we have to do a movsp.
582 *
583 * We only have to do a movsp if the previous window-frame has
584 * been spilled to the *temporary* exception stack instead of the
585 * task's stack. This is the case if the corresponding bit in
586 * WINDOWSTART for the previous window-frame was set before
587 * (not spilled) but is zero now (spilled).
588 * If this bit is zero, all other bits except the one for the
589 * current window frame are also zero. So, we can use a simple test:
590 * 'and' WINDOWSTART and WINDOWSTART-1:
591 *
592 * (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]*
593 *
594 * The result is zero only if one bit was set.
595 *
596 * (Note: We might have gone through several task switches before
597 * we come back to the current task, so WINDOWBASE might be
598 * different from the time the exception occurred.)
599 */
600
601 /* Test WINDOWSTART before and after the exception.
602 * We actually have WMASK, so we only have to test if it is 1 or not.
603 */
604
605 l32i a2, a1, PT_WMASK
606 _beqi a2, 1, common_exception_exit # Spilled before exception,jump
607
608 /* Test WINDOWSTART now. If spilled, do the movsp */
609
610 rsr a3, WINDOWSTART
611 addi a0, a3, -1
612 and a3, a3, a0
613 _bnez a3, common_exception_exit
614
615 /* Do a movsp (we returned from a call4, so we have at least a0..a7) */
616
617 addi a0, a1, -16
618 l32i a3, a0, 0
619 l32i a4, a0, 4
620 s32i a3, a1, PT_SIZE+0
621 s32i a4, a1, PT_SIZE+4
622 l32i a3, a0, 8
623 l32i a4, a0, 12
624 s32i a3, a1, PT_SIZE+8
625 s32i a4, a1, PT_SIZE+12
626
627 /* Common exception exit.
628 * We restore the special register and the current window frame, and
629 * return from the exception.
630 *
631 * Note: We expect a2 to hold PT_WMASK
632 */
633
634common_exception_exit:
635
636 _bbsi.l a2, 1, 1f
637 l32i a4, a1, PT_AREG4
638 l32i a5, a1, PT_AREG5
639 l32i a6, a1, PT_AREG6
640 l32i a7, a1, PT_AREG7
641 _bbsi.l a2, 2, 1f
642 l32i a8, a1, PT_AREG8
643 l32i a9, a1, PT_AREG9
644 l32i a10, a1, PT_AREG10
645 l32i a11, a1, PT_AREG11
646 _bbsi.l a2, 3, 1f
647 l32i a12, a1, PT_AREG12
648 l32i a13, a1, PT_AREG13
649 l32i a14, a1, PT_AREG14
650 l32i a15, a1, PT_AREG15
651
652 /* Restore PC, SAR */
653
6541: l32i a2, a1, PT_PC
655 l32i a3, a1, PT_SAR
656 wsr a2, EPC_1
657 wsr a3, SAR
658
659 /* Restore LBEG, LEND, LCOUNT */
660
661 l32i a2, a1, PT_LBEG
662 l32i a3, a1, PT_LEND
663 wsr a2, LBEG
664 l32i a2, a1, PT_LCOUNT
665 wsr a3, LEND
666 wsr a2, LCOUNT
667
668 /* Check if it was double exception. */
669
670 l32i a0, a1, PT_DEPC
671 l32i a3, a1, PT_AREG3
672 l32i a2, a1, PT_AREG2
673 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
674
675 /* Restore a0...a3 and return */
676
677 l32i a0, a1, PT_AREG0
678 l32i a1, a1, PT_AREG1
679 rfe
680
6811: wsr a0, DEPC
682 l32i a0, a1, PT_AREG0
683 l32i a1, a1, PT_AREG1
684 rfde
685
686/*
687 * Debug exception handler.
688 *
689 * Currently, we don't support KGDB, so only user application can be debugged.
690 *
691 * When we get here, a0 is trashed and saved to excsave[debuglevel]
692 */
693
694ENTRY(debug_exception)
695
696 rsr a0, EPS + XCHAL_DEBUGLEVEL
697 bbsi.l a0, PS_EXCM_SHIFT, 1f # exception mode
698
699 /* Set EPC_1 and EXCCAUSE */
700
701 wsr a2, DEPC # save a2 temporarily
702 rsr a2, EPC + XCHAL_DEBUGLEVEL
703 wsr a2, EPC_1
704
705 movi a2, EXCCAUSE_MAPPED_DEBUG
706 wsr a2, EXCCAUSE
707
708 /* Restore PS to the value before the debug exc but with PS.EXCM set.*/
709
710 movi a2, 1 << PS_EXCM_SHIFT
711 or a2, a0, a2
712 movi a0, debug_exception # restore a3, debug jump vector
713 wsr a2, PS
714 xsr a0, EXCSAVE + XCHAL_DEBUGLEVEL
715
716 /* Switch to kernel/user stack, restore jump vector, and save a0 */
717
718 bbsi.l a2, PS_UM_SHIFT, 2f # jump if user mode
719
720 addi a2, a1, -16-PT_SIZE # assume kernel stack
721 s32i a0, a2, PT_AREG0
722 movi a0, 0
723 s32i a1, a2, PT_AREG1
724 s32i a0, a2, PT_DEPC # mark it as a regular exception
725 xsr a0, DEPC
726 s32i a3, a2, PT_AREG3
727 s32i a0, a2, PT_AREG2
728 mov a1, a2
729 j _kernel_exception
730
7312: rsr a2, EXCSAVE_1
732 l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer
733 s32i a0, a2, PT_AREG0
734 movi a0, 0
735 s32i a1, a2, PT_AREG1
736 s32i a0, a2, PT_DEPC
737 xsr a0, DEPC
738 s32i a3, a2, PT_AREG3
739 s32i a0, a2, PT_AREG2
740 mov a1, a2
741 j _user_exception
742
743 /* Debug exception while in exception mode. */
7441: j 1b // FIXME!!
745
746
747/*
748 * We get here in case of an unrecoverable exception.
749 * The only thing we can do is to be nice and print a panic message.
750 * We only produce a single stack frame for panic, so ???
751 *
752 *
753 * Entry conditions:
754 *
755 * - a0 contains the caller address; original value saved in excsave1.
756 * - the original a0 contains a valid return address (backtrace) or 0.
757 * - a2 contains a valid stackpointer
758 *
759 * Notes:
760 *
761 * - If the stack pointer could be invalid, the caller has to setup a
762 * dummy stack pointer (e.g. the stack of the init_task)
763 *
764 * - If the return address could be invalid, the caller has to set it
765 * to 0, so the backtrace would stop.
766 *
767 */
768 .align 4
769unrecoverable_text:
770 .ascii "Unrecoverable error in exception handler\0"
771
772ENTRY(unrecoverable_exception)
773
774 movi a0, 1
775 movi a1, 0
776
777 wsr a0, WINDOWSTART
778 wsr a1, WINDOWBASE
779 rsync
780
781 movi a1, PS_WOE_MASK | 1
782 wsr a1, PS
783 rsync
784
785 movi a1, init_task
786 movi a0, 0
787 addi a1, a1, PT_REGS_OFFSET
788
789 movi a4, panic
790 movi a6, unrecoverable_text
791
792 callx4 a4
793
7941: j 1b
795
796
797/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
798
799/*
800 * Fast-handler for alloca exceptions
801 *
802 * The ALLOCA handler is entered when user code executes the MOVSP
803 * instruction and the caller's frame is not in the register file.
804 * In this case, the caller frame's a0..a3 are on the stack just
805 * below sp (a1), and this handler moves them.
806 *
807 * For "MOVSP <ar>,<as>" without destination register a1, this routine
808 * simply moves the value from <as> to <ar> without moving the save area.
809 *
810 * Entry condition:
811 *
812 * a0: trashed, original value saved on stack (PT_AREG0)
813 * a1: a1
814 * a2: new stack pointer, original in DEPC
815 * a3: dispatch table
816 * depc: a2, original value saved on stack (PT_DEPC)
817 * excsave_1: a3
818 *
819 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
820 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
821 */
822
823#if XCHAL_HAVE_BE
824#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 4, 4
825#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 0, 4
826#else
827#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 0, 4
828#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 4, 4
829#endif
830
831ENTRY(fast_alloca)
832
833 /* We shouldn't be in a double exception. */
834
835 l32i a0, a2, PT_DEPC
836 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double
837
838 rsr a0, DEPC # get a2
839 s32i a4, a2, PT_AREG4 # save a4 and
840 s32i a0, a2, PT_AREG2 # a2 to stack
841
842 /* Exit critical section. */
843
844 movi a0, 0
845 s32i a0, a3, EXC_TABLE_FIXUP
846
847 /* Restore a3, excsave_1 */
848
849 xsr a3, EXCSAVE_1 # make sure excsave_1 is valid for dbl.
850 rsr a4, EPC_1 # get exception address
851 s32i a3, a2, PT_AREG3 # save a3 to stack
852
853#ifdef ALLOCA_EXCEPTION_IN_IRAM
854#error iram not supported
855#else
856 /* Note: l8ui not allowed in IRAM/IROM!! */
857 l8ui a0, a4, 1 # read as(src) from MOVSP instruction
858#endif
859 movi a3, .Lmovsp_src
860 _EXTUI_MOVSP_SRC(a0) # extract source register number
861 addx8 a3, a0, a3
862 jx a3
863
864.Lunhandled_double:
865 wsr a0, EXCSAVE_1
866 movi a0, unrecoverable_exception
867 callx0 a0
868
869 .align 8
870.Lmovsp_src:
871 l32i a3, a2, PT_AREG0; _j 1f; .align 8
872 mov a3, a1; _j 1f; .align 8
873 l32i a3, a2, PT_AREG2; _j 1f; .align 8
874 l32i a3, a2, PT_AREG3; _j 1f; .align 8
875 l32i a3, a2, PT_AREG4; _j 1f; .align 8
876 mov a3, a5; _j 1f; .align 8
877 mov a3, a6; _j 1f; .align 8
878 mov a3, a7; _j 1f; .align 8
879 mov a3, a8; _j 1f; .align 8
880 mov a3, a9; _j 1f; .align 8
881 mov a3, a10; _j 1f; .align 8
882 mov a3, a11; _j 1f; .align 8
883 mov a3, a12; _j 1f; .align 8
884 mov a3, a13; _j 1f; .align 8
885 mov a3, a14; _j 1f; .align 8
886 mov a3, a15; _j 1f; .align 8
887
8881:
889
890#ifdef ALLOCA_EXCEPTION_IN_IRAM
891#error iram not supported
892#else
893 l8ui a0, a4, 0 # read ar(dst) from MOVSP instruction
894#endif
895 addi a4, a4, 3 # step over movsp
896 _EXTUI_MOVSP_DST(a0) # extract destination register
897 wsr a4, EPC_1 # save new epc_1
898
899 _bnei a0, 1, 1f # no 'movsp a1, ax': jump
900
901 /* Move the save area. This implies the use of the L32E
902 * and S32E instructions, because this move must be done with
903 * the user's PS.RING privilege levels, not with ring 0
904 * (kernel's) privileges currently active with PS.EXCM
905 * set. Note that we have stil registered a fixup routine with the
906 * double exception vector in case a double exception occurs.
907 */
908
909 /* a0,a4:avail a1:old user stack a2:exc. stack a3:new user stack. */
910
911 l32e a0, a1, -16
912 l32e a4, a1, -12
913 s32e a0, a3, -16
914 s32e a4, a3, -12
915 l32e a0, a1, -8
916 l32e a4, a1, -4
917 s32e a0, a3, -8
918 s32e a4, a3, -4
919
920 /* Restore stack-pointer and all the other saved registers. */
921
922 mov a1, a3
923
924 l32i a4, a2, PT_AREG4
925 l32i a3, a2, PT_AREG3
926 l32i a0, a2, PT_AREG0
927 l32i a2, a2, PT_AREG2
928 rfe
929
930 /* MOVSP <at>,<as> was invoked with <at> != a1.
931 * Because the stack pointer is not being modified,
932 * we should be able to just modify the pointer
933 * without moving any save area.
934 * The processor only traps these occurrences if the
935 * caller window isn't live, so unfortunately we can't
936 * use this as an alternate trap mechanism.
937 * So we just do the move. This requires that we
938 * resolve the destination register, not just the source,
939 * so there's some extra work.
940 * (PERHAPS NOT REALLY NEEDED, BUT CLEANER...)
941 */
942
943 /* a0 dst-reg, a1 user-stack, a2 stack, a3 value of src reg. */
944
9451: movi a4, .Lmovsp_dst
946 addx8 a4, a0, a4
947 jx a4
948
949 .align 8
950.Lmovsp_dst:
951 s32i a3, a2, PT_AREG0; _j 1f; .align 8
952 mov a1, a3; _j 1f; .align 8
953 s32i a3, a2, PT_AREG2; _j 1f; .align 8
954 s32i a3, a2, PT_AREG3; _j 1f; .align 8
955 s32i a3, a2, PT_AREG4; _j 1f; .align 8
956 mov a5, a3; _j 1f; .align 8
957 mov a6, a3; _j 1f; .align 8
958 mov a7, a3; _j 1f; .align 8
959 mov a8, a3; _j 1f; .align 8
960 mov a9, a3; _j 1f; .align 8
961 mov a10, a3; _j 1f; .align 8
962 mov a11, a3; _j 1f; .align 8
963 mov a12, a3; _j 1f; .align 8
964 mov a13, a3; _j 1f; .align 8
965 mov a14, a3; _j 1f; .align 8
966 mov a15, a3; _j 1f; .align 8
967
9681: l32i a4, a2, PT_AREG4
969 l32i a3, a2, PT_AREG3
970 l32i a0, a2, PT_AREG0
971 l32i a2, a2, PT_AREG2
972 rfe
973
974
975/*
976 * fast system calls.
977 *
978 * WARNING: The kernel doesn't save the entire user context before
979 * handling a fast system call. These functions are small and short,
980 * usually offering some functionality not available to user tasks.
981 *
982 * BE CAREFUL TO PRESERVE THE USER'S CONTEXT.
983 *
984 * Entry condition:
985 *
986 * a0: trashed, original value saved on stack (PT_AREG0)
987 * a1: a1
988 * a2: new stack pointer, original in DEPC
989 * a3: dispatch table
990 * depc: a2, original value saved on stack (PT_DEPC)
991 * excsave_1: a3
992 */
993
994ENTRY(fast_syscall_kernel)
995
996 /* Skip syscall. */
997
998 rsr a0, EPC_1
999 addi a0, a0, 3
1000 wsr a0, EPC_1
1001
1002 l32i a0, a2, PT_DEPC
1003 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
1004
1005 rsr a0, DEPC # get syscall-nr
1006 _beqz a0, fast_syscall_spill_registers
1007
1008 addi a0, a0, -__NR_sysxtensa
1009 _beqz a0, fast_syscall_sysxtensa
1010
1011 j kernel_exception
1012
1013
1014ENTRY(fast_syscall_user)
1015
1016 /* Skip syscall. */
1017
1018 rsr a0, EPC_1
1019 addi a0, a0, 3
1020 wsr a0, EPC_1
1021
1022 l32i a0, a2, PT_DEPC
1023 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
1024
1025 rsr a0, DEPC # get syscall-nr
1026 _beqz a0, fast_syscall_spill_registers
1027
1028 addi a0, a0, -__NR_sysxtensa
1029 _beqz a0, fast_syscall_sysxtensa
1030
1031 j user_exception
1032
1033ENTRY(fast_syscall_unrecoverable)
1034
1035 /* Restore all states. */
1036
1037 l32i a0, a2, PT_AREG0 # restore a0
1038 xsr a2, DEPC # restore a2, depc
1039 rsr a3, EXCSAVE_1
1040
1041 wsr a0, EXCSAVE_1
1042 movi a0, unrecoverable_exception
1043 callx0 a0
1044
1045
1046
1047/*
1048 * sysxtensa syscall handler
1049 *
1050 * int sysxtensa (XTENSA_ATOMIC_SET, ptr, val, unused);
1051 * int sysxtensa (XTENSA_ATOMIC_ADD, ptr, val, unused);
1052 * int sysxtensa (XTENSA_ATOMIC_EXG_ADD, ptr, val, unused);
1053 * int sysxtensa (XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
1054 * a2 a6 a3 a4 a5
1055 *
1056 * Entry condition:
1057 *
1058 * a0: trashed, original value saved on stack (PT_AREG0)
1059 * a1: a1
1060 * a2: new stack pointer, original in DEPC
1061 * a3: dispatch table
1062 * depc: a2, original value saved on stack (PT_DEPC)
1063 * excsave_1: a3
1064 *
1065 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1066 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1067 *
1068 * Note: we don't have to save a2; a2 holds the return value
1069 *
1070 * We use the two macros TRY and CATCH:
1071 *
1072 * TRY adds an entry to the __ex_table fixup table for the immediately
1073 * following instruction.
1074 *
1075 * CATCH catches any exception that occurred at one of the preceeding TRY
1076 * statements and continues from there
1077 *
1078 * Usage TRY l32i a0, a1, 0
1079 * <other code>
1080 * done: rfe
1081 * CATCH <set return code>
1082 * j done
1083 */
1084
1085#define TRY \
1086 .section __ex_table, "a"; \
1087 .word 66f, 67f; \
1088 .text; \
108966:
1090
1091#define CATCH \
109267:
1093
1094ENTRY(fast_syscall_sysxtensa)
1095
1096 _beqz a6, 1f
1097 _blti a6, SYSXTENSA_COUNT, 2f
1098
10991: j user_exception
1100
11012: xsr a3, EXCSAVE_1 # restore a3, excsave1
1102 s32i a7, a2, PT_AREG7
1103
1104 movi a7, 4 # sizeof(unsigned int)
1105 verify_area a3, a7, a0, a2, .Leac
1106
1107 _beqi a6, SYSXTENSA_ATOMIC_SET, .Lset
1108 _beqi a6, SYSXTENSA_ATOMIC_EXG_ADD, .Lexg
1109 _beqi a6, SYSXTENSA_ATOMIC_ADD, .Ladd
1110
1111 /* Fall through for SYSXTENSA_ATOMIC_CMP_SWP */
1112
1113.Lswp: /* Atomic compare and swap */
1114
1115TRY l32i a7, a3, 0 # read old value
1116 bne a7, a4, 1f # same as old value? jump
1117 s32i a5, a3, 0 # different, modify value
1118 movi a7, 1 # and return 1
1119 j .Lret
1120
11211: movi a7, 0 # same values: return 0
1122 j .Lret
1123
1124.Ladd: /* Atomic add */
1125.Lexg: /* Atomic (exchange) add */
1126
1127TRY l32i a7, a3, 0
1128 add a4, a4, a7
1129 s32i a4, a3, 0
1130 j .Lret
1131
1132.Lset: /* Atomic set */
1133
1134TRY l32i a7, a3, 0 # read old value as return value
1135 s32i a4, a3, 0 # write new value
1136
1137.Lret: mov a0, a2
1138 mov a2, a7
1139 l32i a7, a0, PT_AREG7
1140 l32i a3, a0, PT_AREG3
1141 l32i a0, a0, PT_AREG0
1142 rfe
1143
1144CATCH
1145.Leac: movi a7, -EFAULT
1146 j .Lret
1147
1148
1149
1150/* fast_syscall_spill_registers.
1151 *
1152 * Entry condition:
1153 *
1154 * a0: trashed, original value saved on stack (PT_AREG0)
1155 * a1: a1
1156 * a2: new stack pointer, original in DEPC
1157 * a3: dispatch table
1158 * depc: a2, original value saved on stack (PT_DEPC)
1159 * excsave_1: a3
1160 *
1161 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
1162 * Note: We don't need to save a2 in depc (return value)
1163 */
1164
1165ENTRY(fast_syscall_spill_registers)
1166
1167 /* Register a FIXUP handler (pass current wb as a parameter) */
1168
1169 movi a0, fast_syscall_spill_registers_fixup
1170 s32i a0, a3, EXC_TABLE_FIXUP
1171 rsr a0, WINDOWBASE
1172 s32i a0, a3, EXC_TABLE_PARAM
1173
1174 /* Save a3 and SAR on stack. */
1175
1176 rsr a0, SAR
1177 xsr a3, EXCSAVE_1 # restore a3 and excsave_1
1178 s32i a0, a2, PT_AREG4 # store SAR to PT_AREG4
1179 s32i a3, a2, PT_AREG3
1180
1181 /* The spill routine might clobber a7, a11, and a15. */
1182
1183 s32i a7, a2, PT_AREG5
1184 s32i a11, a2, PT_AREG6
1185 s32i a15, a2, PT_AREG7
1186
1187 call0 _spill_registers # destroys a3, DEPC, and SAR
1188
1189 /* Advance PC, restore registers and SAR, and return from exception. */
1190
1191 l32i a3, a2, PT_AREG4
1192 l32i a0, a2, PT_AREG0
1193 wsr a3, SAR
1194 l32i a3, a2, PT_AREG3
1195
1196 /* Restore clobbered registers. */
1197
1198 l32i a7, a2, PT_AREG5
1199 l32i a11, a2, PT_AREG6
1200 l32i a15, a2, PT_AREG7
1201
1202 movi a2, 0
1203 rfe
1204
1205/* Fixup handler.
1206 *
1207 * We get here if the spill routine causes an exception, e.g. tlb miss.
1208 * We basically restore WINDOWBASE and WINDOWSTART to the condition when
1209 * we entered the spill routine and jump to the user exception handler.
1210 *
1211 * a0: value of depc, original value in depc
1212 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
1213 * a3: exctable, original value in excsave1
1214 */
1215
1216fast_syscall_spill_registers_fixup:
1217
1218 rsr a2, WINDOWBASE # get current windowbase (a2 is saved)
1219 xsr a0, DEPC # restore depc and a0
1220 ssl a2 # set shift (32 - WB)
1221
1222 /* We need to make sure the current registers (a0-a3) are preserved.
1223 * To do this, we simply set the bit for the current window frame
1224 * in WS, so that the exception handlers save them to the task stack.
1225 */
1226
1227 rsr a3, EXCSAVE_1 # get spill-mask
1228 slli a2, a3, 1 # shift left by one
1229
1230 slli a3, a2, 32-WSBITS
1231 src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy......
1232 wsr a2, WINDOWSTART # set corrected windowstart
1233
1234 movi a3, exc_table
1235 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2
1236 l32i a3, a3, EXC_TABLE_PARAM # original WB (in user task)
1237
1238 /* Return to the original (user task) WINDOWBASE.
1239 * We leave the following frame behind:
1240 * a0, a1, a2 same
1241 * a3: trashed (saved in excsave_1)
1242 * depc: depc (we have to return to that address)
1243 * excsave_1: a3
1244 */
1245
1246 wsr a3, WINDOWBASE
1247 rsync
1248
1249 /* We are now in the original frame when we entered _spill_registers:
1250 * a0: return address
1251 * a1: used, stack pointer
1252 * a2: kernel stack pointer
1253 * a3: available, saved in EXCSAVE_1
1254 * depc: exception address
1255 * excsave: a3
1256 * Note: This frame might be the same as above.
1257 */
1258
1259#ifdef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
1260 /* Restore registers we precautiously saved.
1261 * We have the value of the 'right' a3
1262 */
1263
1264 l32i a7, a2, PT_AREG5
1265 l32i a11, a2, PT_AREG6
1266 l32i a15, a2, PT_AREG7
1267#endif
1268
1269 /* Setup stack pointer. */
1270
1271 addi a2, a2, -PT_USER_SIZE
1272 s32i a0, a2, PT_AREG0
1273
1274 /* Make sure we return to this fixup handler. */
1275
1276 movi a3, fast_syscall_spill_registers_fixup_return
1277 s32i a3, a2, PT_DEPC # setup depc
1278
1279 /* Jump to the exception handler. */
1280
1281 movi a3, exc_table
1282 rsr a0, EXCCAUSE
1283 addx4 a0, a0, a3 # find entry in table
1284 l32i a0, a0, EXC_TABLE_FAST_USER # load handler
1285 jx a0
1286
1287fast_syscall_spill_registers_fixup_return:
1288
1289 /* When we return here, all registers have been restored (a2: DEPC) */
1290
1291 wsr a2, DEPC # exception address
1292
1293 /* Restore fixup handler. */
1294
1295 xsr a3, EXCSAVE_1
1296 movi a2, fast_syscall_spill_registers_fixup
1297 s32i a2, a3, EXC_TABLE_FIXUP
1298 rsr a2, WINDOWBASE
1299 s32i a2, a3, EXC_TABLE_PARAM
1300 l32i a2, a3, EXC_TABLE_KSTK
1301
1302#ifdef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
1303 /* Save registers again that might be clobbered. */
1304
1305 s32i a7, a2, PT_AREG5
1306 s32i a11, a2, PT_AREG6
1307 s32i a15, a2, PT_AREG7
1308#endif
1309
1310 /* Load WB at the time the exception occurred. */
1311
1312 rsr a3, SAR # WB is still in SAR
1313 neg a3, a3
1314 wsr a3, WINDOWBASE
1315 rsync
1316
1317 /* Restore a3 and return. */
1318
1319 movi a3, exc_table
1320 xsr a3, EXCSAVE_1
1321
1322 rfde
1323
1324
1325/*
1326 * spill all registers.
1327 *
1328 * This is not a real function. The following conditions must be met:
1329 *
1330 * - must be called with call0.
1331 * - uses DEPC, a3 and SAR.
1332 * - the last 'valid' register of each frame are clobbered.
1333 * - the caller must have registered a fixup handler
1334 * (or be inside a critical section)
1335 * - PS_EXCM must be set (PS_WOE cleared?)
1336 */
1337
1338ENTRY(_spill_registers)
1339
1340 /*
1341 * Rotate ws so that the current windowbase is at bit 0.
1342 * Assume ws = xxxwww1yy (www1 current window frame).
1343 * Rotate ws right so that a2 = yyxxxwww1.
1344 */
1345
1346 wsr a2, DEPC # preserve a2
1347 rsr a2, WINDOWBASE
1348 rsr a3, WINDOWSTART
1349 ssr a2 # holds WB
1350 slli a2, a3, WSBITS
1351 or a3, a3, a2 # a2 = xxxwww1yyxxxwww1yy
1352 srl a3, a3
1353
1354 /* We are done if there are no more than the current register frame. */
1355
1356 extui a3, a3, 1, WSBITS-2 # a3 = 0yyxxxwww
1357 movi a2, (1 << (WSBITS-1))
1358 _beqz a3, .Lnospill # only one active frame? jump
1359
1360 /* We want 1 at the top, so that we return to the current windowbase */
1361
1362 or a3, a3, a2 # 1yyxxxwww
1363
1364 /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
1365
1366 wsr a3, WINDOWSTART # save shifted windowstart
1367 neg a2, a3
1368 and a3, a2, a3 # first bit set from right: 000010000
1369
1370 ffs_ws a2, a3 # a2: shifts to skip empty frames
1371 movi a3, WSBITS
1372 sub a2, a3, a2 # WSBITS-a2:number of 0-bits from right
1373 ssr a2 # save in SAR for later.
1374
1375 rsr a3, WINDOWBASE
1376 add a3, a3, a2
1377 rsr a2, DEPC # restore a2
1378 wsr a3, WINDOWBASE
1379 rsync
1380
1381 rsr a3, WINDOWSTART
1382 srl a3, a3 # shift windowstart
1383
1384 /* WB is now just one frame below the oldest frame in the register
1385 window. WS is shifted so the oldest frame is in bit 0, thus, WB
1386 and WS differ by one 4-register frame. */
1387
1388 /* Save frames. Depending what call was used (call4, call8, call12),
1389 * we have to save 4,8. or 12 registers.
1390 */
1391
1392 _bbsi.l a3, 1, .Lc4
1393 _bbsi.l a3, 2, .Lc8
1394
1395 /* Special case: we have a call12-frame starting at a4. */
1396
1397 _bbci.l a3, 3, .Lc12 # bit 3 shouldn't be zero! (Jump to Lc12 first)
1398
1399 s32e a4, a1, -16 # a1 is valid with an empty spill area
1400 l32e a4, a5, -12
1401 s32e a8, a4, -48
1402 mov a8, a4
1403 l32e a4, a1, -16
1404 j .Lc12c
1405
1406.Lloop: _bbsi.l a3, 1, .Lc4
1407 _bbci.l a3, 2, .Lc12
1408
1409.Lc8: s32e a4, a13, -16
1410 l32e a4, a5, -12
1411 s32e a8, a4, -32
1412 s32e a5, a13, -12
1413 s32e a6, a13, -8
1414 s32e a7, a13, -4
1415 s32e a9, a4, -28
1416 s32e a10, a4, -24
1417 s32e a11, a4, -20
1418
1419 srli a11, a3, 2 # shift windowbase by 2
1420 rotw 2
1421 _bnei a3, 1, .Lloop
1422
1423.Lexit: /* Done. Do the final rotation, set WS, and return. */
1424
1425 rotw 1
1426 rsr a3, WINDOWBASE
1427 ssl a3
1428 movi a3, 1
1429 sll a3, a3
1430 wsr a3, WINDOWSTART
1431
1432.Lnospill:
1433 jx a0
1434
1435.Lc4: s32e a4, a9, -16
1436 s32e a5, a9, -12
1437 s32e a6, a9, -8
1438 s32e a7, a9, -4
1439
1440 srli a7, a3, 1
1441 rotw 1
1442 _bnei a3, 1, .Lloop
1443 j .Lexit
1444
1445.Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero!
1446
1447 /* 12-register frame (call12) */
1448
1449 l32e a2, a5, -12
1450 s32e a8, a2, -48
1451 mov a8, a2
1452
1453.Lc12c: s32e a9, a8, -44
1454 s32e a10, a8, -40
1455 s32e a11, a8, -36
1456 s32e a12, a8, -32
1457 s32e a13, a8, -28
1458 s32e a14, a8, -24
1459 s32e a15, a8, -20
1460 srli a15, a3, 3
1461
1462 /* The stack pointer for a4..a7 is out of reach, so we rotate the
1463 * window, grab the stackpointer, and rotate back.
1464 * Alternatively, we could also use the following approach, but that
1465 * makes the fixup routine much more complicated:
1466 * rotw 1
1467 * s32e a0, a13, -16
1468 * ...
1469 * rotw 2
1470 */
1471
1472 rotw 1
1473 mov a5, a13
1474 rotw -1
1475
1476 s32e a4, a9, -16
1477 s32e a5, a9, -12
1478 s32e a6, a9, -8
1479 s32e a7, a9, -4
1480
1481 rotw 3
1482
1483 _beqi a3, 1, .Lexit
1484 j .Lloop
1485
1486.Linvalid_mask:
1487
1488 /* We get here because of an unrecoverable error in the window
1489 * registers. If we are in user space, we kill the application,
1490 * however, this condition is unrecoverable in kernel space.
1491 */
1492
1493 rsr a0, PS
1494 _bbci.l a0, PS_UM_SHIFT, 1f
1495
1496 /* User space: Setup a dummy frame and kill application.
1497 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
1498 */
1499
1500 movi a0, 1
1501 movi a1, 0
1502
1503 wsr a0, WINDOWSTART
1504 wsr a1, WINDOWBASE
1505 rsync
1506
1507 movi a0, 0
1508
1509 movi a3, exc_table
1510 l32i a1, a3, EXC_TABLE_KSTK
1511 wsr a3, EXCSAVE_1
1512
1513 movi a4, PS_WOE_MASK | 1
1514 wsr a4, PS
1515 rsync
1516
1517 movi a6, SIGSEGV
1518 movi a4, do_exit
1519 callx4 a4
1520
15211: /* Kernel space: PANIC! */
1522
1523 wsr a0, EXCSAVE_1
1524 movi a0, unrecoverable_exception
1525 callx0 a0 # should not return
15261: j 1b
1527
1528/*
1529 * We should never get here. Bail out!
1530 */
1531
1532ENTRY(fast_second_level_miss_double_kernel)
1533
15341: movi a0, unrecoverable_exception
1535 callx0 a0 # should not return
15361: j 1b
1537
1538/* First-level entry handler for user, kernel, and double 2nd-level
1539 * TLB miss exceptions. Note that for now, user and kernel miss
1540 * exceptions share the same entry point and are handled identically.
1541 *
1542 * An old, less-efficient C version of this function used to exist.
1543 * We include it below, interleaved as comments, for reference.
1544 *
1545 * Entry condition:
1546 *
1547 * a0: trashed, original value saved on stack (PT_AREG0)
1548 * a1: a1
1549 * a2: new stack pointer, original in DEPC
1550 * a3: dispatch table
1551 * depc: a2, original value saved on stack (PT_DEPC)
1552 * excsave_1: a3
1553 *
1554 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1555 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1556 */
1557
1558ENTRY(fast_second_level_miss)
1559
1560 /* Save a1. Note: we don't expect a double exception. */
1561
1562 s32i a1, a2, PT_AREG1
1563
1564 /* We need to map the page of PTEs for the user task. Find
1565 * the pointer to that page. Also, it's possible for tsk->mm
1566 * to be NULL while tsk->active_mm is nonzero if we faulted on
1567 * a vmalloc address. In that rare case, we must use
1568 * active_mm instead to avoid a fault in this handler. See
1569 *
1570 * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html
1571 * (or search Internet on "mm vs. active_mm")
1572 *
1573 * if (!mm)
1574 * mm = tsk->active_mm;
1575 * pgd = pgd_offset (mm, regs->excvaddr);
1576 * pmd = pmd_offset (pgd, regs->excvaddr);
1577 * pmdval = *pmd;
1578 */
1579
1580 GET_CURRENT(a1,a2)
1581 l32i a0, a1, TASK_MM # tsk->mm
1582 beqz a0, 9f
1583
15848: rsr a1, EXCVADDR # fault address
1585 _PGD_OFFSET(a0, a1, a1)
1586 l32i a0, a0, 0 # read pmdval
1587 //beqi a0, _PAGE_USER, 2f
1588 beqz a0, 2f
1589
1590 /* Read ptevaddr and convert to top of page-table page.
1591 *
1592 * vpnval = read_ptevaddr_register() & PAGE_MASK;
1593 * vpnval += DTLB_WAY_PGTABLE;
1594 * pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL);
1595 * write_dtlb_entry (pteval, vpnval);
1596 *
1597 * The messy computation for 'pteval' above really simplifies
1598 * into the following:
1599 *
1600 * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_KERNEL
1601 */
1602
1603 movi a1, -PAGE_OFFSET
1604 add a0, a0, a1 # pmdval - PAGE_OFFSET
1605 extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK
1606 xor a0, a0, a1
1607
1608
1609 movi a1, PAGE_DIRECTORY
1610 or a0, a0, a1 # ... | PAGE_DIRECTORY
1611
1612 rsr a1, PTEVADDR
1613 srli a1, a1, PAGE_SHIFT
1614 slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK
1615 addi a1, a1, DTLB_WAY_PGTABLE # ... + way_number
1616
1617 wdtlb a0, a1
1618 dsync
1619
1620 /* Exit critical section. */
1621
1622 movi a0, 0
1623 s32i a0, a3, EXC_TABLE_FIXUP
1624
1625 /* Restore the working registers, and return. */
1626
1627 l32i a0, a2, PT_AREG0
1628 l32i a1, a2, PT_AREG1
1629 l32i a2, a2, PT_DEPC
1630 xsr a3, EXCSAVE_1
1631
1632 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1633
1634 /* Restore excsave1 and return. */
1635
1636 rsr a2, DEPC
1637 rfe
1638
1639 /* Return from double exception. */
1640
16411: xsr a2, DEPC
1642 esync
1643 rfde
1644
16459: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
1646 j 8b
1647
16482: /* Invalid PGD, default exception handling */
1649
1650 rsr a1, DEPC
1651 xsr a3, EXCSAVE_1
1652 s32i a1, a2, PT_AREG2
1653 s32i a3, a2, PT_AREG3
1654 mov a1, a2
1655
1656 rsr a2, PS
1657 bbsi.l a2, PS_UM_SHIFT, 1f
1658 j _kernel_exception
16591: j _user_exception
1660
1661
1662/*
1663 * StoreProhibitedException
1664 *
1665 * Update the pte and invalidate the itlb mapping for this pte.
1666 *
1667 * Entry condition:
1668 *
1669 * a0: trashed, original value saved on stack (PT_AREG0)
1670 * a1: a1
1671 * a2: new stack pointer, original in DEPC
1672 * a3: dispatch table
1673 * depc: a2, original value saved on stack (PT_DEPC)
1674 * excsave_1: a3
1675 *
1676 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1677 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1678 */
1679
1680ENTRY(fast_store_prohibited)
1681
1682 /* Save a1 and a4. */
1683
1684 s32i a1, a2, PT_AREG1
1685 s32i a4, a2, PT_AREG4
1686
1687 GET_CURRENT(a1,a2)
1688 l32i a0, a1, TASK_MM # tsk->mm
1689 beqz a0, 9f
1690
16918: rsr a1, EXCVADDR # fault address
1692 _PGD_OFFSET(a0, a1, a4)
1693 l32i a0, a0, 0
1694 //beqi a0, _PAGE_USER, 2f # FIXME use _PAGE_INVALID
1695 beqz a0, 2f
1696
1697 _PTE_OFFSET(a0, a1, a4)
1698 l32i a4, a0, 0 # read pteval
1699 movi a1, _PAGE_VALID | _PAGE_RW
1700 bnall a4, a1, 2f
1701
1702 movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_WRENABLE
1703 or a4, a4, a1
1704 rsr a1, EXCVADDR
1705 s32i a4, a0, 0
1706
1707 /* We need to flush the cache if we have page coloring. */
1708#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
1709 dhwb a0, 0
1710#endif
1711 pdtlb a0, a1
1712 beqz a0, 1f
1713 idtlb a0 // FIXME do we need this?
1714 wdtlb a4, a0
17151:
1716
1717 /* Exit critical section. */
1718
1719 movi a0, 0
1720 s32i a0, a3, EXC_TABLE_FIXUP
1721
1722 /* Restore the working registers, and return. */
1723
1724 l32i a4, a2, PT_AREG4
1725 l32i a1, a2, PT_AREG1
1726 l32i a0, a2, PT_AREG0
1727 l32i a2, a2, PT_DEPC
1728
1729 /* Restore excsave1 and a3. */
1730
1731 xsr a3, EXCSAVE_1
1732 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1733
1734 rsr a2, DEPC
1735 rfe
1736
1737 /* Double exception. Restore FIXUP handler and return. */
1738
17391: xsr a2, DEPC
1740 esync
1741 rfde
1742
17439: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
1744 j 8b
1745
17462: /* If there was a problem, handle fault in C */
1747
1748 rsr a4, DEPC # still holds a2
1749 xsr a3, EXCSAVE_1
1750 s32i a4, a2, PT_AREG2
1751 s32i a3, a2, PT_AREG3
1752 l32i a4, a2, PT_AREG4
1753 mov a1, a2
1754
1755 rsr a2, PS
1756 bbsi.l a2, PS_UM_SHIFT, 1f
1757 j _kernel_exception
17581: j _user_exception
1759
1760
1761#if XCHAL_EXTRA_SA_SIZE
1762
1763#warning fast_coprocessor untested
1764
1765/*
1766 * Entry condition:
1767 *
1768 * a0: trashed, original value saved on stack (PT_AREG0)
1769 * a1: a1
1770 * a2: new stack pointer, original in DEPC
1771 * a3: dispatch table
1772 * depc: a2, original value saved on stack (PT_DEPC)
1773 * excsave_1: a3
1774 *
1775 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1776 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1777 */
1778
1779ENTRY(fast_coprocessor_double)
1780 wsr a0, EXCSAVE_1
1781 movi a0, unrecoverable_exception
1782 callx0 a0
1783
1784ENTRY(fast_coprocessor)
1785
1786 /* Fatal if we are in a double exception. */
1787
1788 l32i a0, a2, PT_DEPC
1789 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_coprocessor_double
1790
1791 /* Save some registers a1, a3, a4, SAR */
1792
1793 xsr a3, EXCSAVE_1
1794 s32i a3, a2, PT_AREG3
1795 rsr a3, SAR
1796 s32i a4, a2, PT_AREG4
1797 s32i a1, a2, PT_AREG1
1798 s32i a5, a1, PT_AREG5
1799 s32i a3, a2, PT_SAR
1800 mov a1, a2
1801
1802 /* Currently, the HAL macros only guarantee saving a0 and a1.
1803 * These can and will be refined in the future, but for now,
1804 * just save the remaining registers of a2...a15.
1805 */
1806 s32i a6, a1, PT_AREG6
1807 s32i a7, a1, PT_AREG7
1808 s32i a8, a1, PT_AREG8
1809 s32i a9, a1, PT_AREG9
1810 s32i a10, a1, PT_AREG10
1811 s32i a11, a1, PT_AREG11
1812 s32i a12, a1, PT_AREG12
1813 s32i a13, a1, PT_AREG13
1814 s32i a14, a1, PT_AREG14
1815 s32i a15, a1, PT_AREG15
1816
1817 /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
1818
1819 rsr a0, EXCCAUSE
1820 addi a3, a0, -XCHAL_EXCCAUSE_COPROCESSOR0_DISABLED
1821
1822 /* Set corresponding CPENABLE bit */
1823
1824 movi a4, 1
1825 ssl a3 # SAR: 32 - coprocessor_number
1826 rsr a5, CPENABLE
1827 sll a4, a4
1828 or a4, a5, a4
1829 wsr a4, CPENABLE
1830 rsync
1831 movi a5, coprocessor_info # list of owner and offset into cp_save
1832 addx8 a0, a4, a5 # entry for CP
1833
1834 bne a4, a5, .Lload # bit wasn't set before, cp not in use
1835
1836 /* Now compare the current task with the owner of the coprocessor.
1837 * If they are the same, there is no reason to save or restore any
1838 * coprocessor state. Having already enabled the coprocessor,
1839 * branch ahead to return.
1840 */
1841 GET_CURRENT(a5,a1)
1842 l32i a4, a0, COPROCESSOR_INFO_OWNER # a4: current owner for this CP
1843 beq a4, a5, .Ldone
1844
1845 /* Find location to dump current coprocessor state:
1846 * task_struct->task_cp_save_offset + coprocessor_offset[coprocessor]
1847 *
1848 * Note: a0 pointer to the entry in the coprocessor owner table,
1849 * a3 coprocessor number,
1850 * a4 current owner of coprocessor.
1851 */
1852 l32i a5, a0, COPROCESSOR_INFO_OFFSET
1853 addi a2, a4, THREAD_CP_SAVE
1854 add a2, a2, a5
1855
1856 /* Store current coprocessor states. (a5 still has CP number) */
1857
1858 xchal_cpi_store_funcbody
1859
1860 /* The macro might have destroyed a3 (coprocessor number), but
1861 * SAR still has 32 - coprocessor_number!
1862 */
1863 movi a3, 32
1864 rsr a4, SAR
1865 sub a3, a3, a4
1866
1867.Lload: /* A new task now owns the corpocessors. Save its TCB pointer into
1868 * the coprocessor owner table.
1869 *
1870 * Note: a0 pointer to the entry in the coprocessor owner table,
1871 * a3 coprocessor number.
1872 */
1873 GET_CURRENT(a4,a1)
1874 s32i a4, a0, 0
1875
1876 /* Find location from where to restore the current coprocessor state.*/
1877
1878 l32i a5, a0, COPROCESSOR_INFO_OFFSET
1879 addi a2, a4, THREAD_CP_SAVE
1880 add a2, a2, a4
1881
1882 xchal_cpi_load_funcbody
1883
1884 /* We must assume that the xchal_cpi_store_funcbody macro destroyed
1885 * registers a2..a15.
1886 */
1887
1888.Ldone: l32i a15, a1, PT_AREG15
1889 l32i a14, a1, PT_AREG14
1890 l32i a13, a1, PT_AREG13
1891 l32i a12, a1, PT_AREG12
1892 l32i a11, a1, PT_AREG11
1893 l32i a10, a1, PT_AREG10
1894 l32i a9, a1, PT_AREG9
1895 l32i a8, a1, PT_AREG8
1896 l32i a7, a1, PT_AREG7
1897 l32i a6, a1, PT_AREG6
1898 l32i a5, a1, PT_AREG5
1899 l32i a4, a1, PT_AREG4
1900 l32i a3, a1, PT_AREG3
1901 l32i a2, a1, PT_AREG2
1902 l32i a0, a1, PT_AREG0
1903 l32i a1, a1, PT_AREG1
1904
1905 rfe
1906
1907#endif /* XCHAL_EXTRA_SA_SIZE */
1908
1909/*
1910 * Task switch.
1911 *
1912 * struct task* _switch_to (struct task* prev, struct task* next)
1913 * a2 a2 a3
1914 */
1915
1916ENTRY(_switch_to)
1917
1918 entry a1, 16
1919
1920 mov a4, a3 # preserve a3
1921
1922 s32i a0, a2, THREAD_RA # save return address
1923 s32i a1, a2, THREAD_SP # save stack pointer
1924
1925 /* Disable ints while we manipulate the stack pointer; spill regs. */
1926
1927 movi a5, PS_EXCM_MASK | LOCKLEVEL
1928 xsr a5, PS
1929 rsr a3, EXCSAVE_1
1930 rsync
1931 s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
1932
1933 call0 _spill_registers
1934
1935 /* Set kernel stack (and leave critical section)
1936 * Note: It's save to set it here. The stack will not be overwritten
1937 * because the kernel stack will only be loaded again after
1938 * we return from kernel space.
1939 */
1940
1941 l32i a0, a4, TASK_THREAD_INFO
1942 rsr a3, EXCSAVE_1 # exc_table
1943 movi a1, 0
1944 addi a0, a0, PT_REGS_OFFSET
1945 s32i a1, a3, EXC_TABLE_FIXUP
1946 s32i a0, a3, EXC_TABLE_KSTK
1947
1948 /* restore context of the task that 'next' addresses */
1949
1950 l32i a0, a4, THREAD_RA /* restore return address */
1951 l32i a1, a4, THREAD_SP /* restore stack pointer */
1952
1953 wsr a5, PS
1954 rsync
1955
1956 retw
1957
1958
1959ENTRY(ret_from_fork)
1960
1961 /* void schedule_tail (struct task_struct *prev)
1962 * Note: prev is still in a6 (return value from fake call4 frame)
1963 */
1964 movi a4, schedule_tail
1965 callx4 a4
1966
1967 movi a4, do_syscall_trace
1968 callx4 a4
1969
1970 j common_exception_return
1971
1972
1973
1974/*
1975 * Table of syscalls
1976 */
1977
1978.data
1979.align 4
1980.global sys_call_table
1981sys_call_table:
1982
1983#define SYSCALL(call, narg) .word call
1984#include "syscalls.h"
1985
1986/*
1987 * Number of arguments of each syscall
1988 */
1989
1990.global sys_narg_table
1991sys_narg_table:
1992
1993#undef SYSCALL
1994#define SYSCALL(call, narg) .byte narg
1995#include "syscalls.h"
1996
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
new file mode 100644
index 000000000000..6e9b5225b8f6
--- /dev/null
+++ b/arch/xtensa/kernel/head.S
@@ -0,0 +1,237 @@
1/*
2 * arch/xtensa/kernel/head.S
3 *
4 * Xtensa Processor startup code.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
14 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
15 * Kevin Chea
16 */
17
18#include <xtensa/cacheasm.h>
19#include <linux/config.h>
20#include <asm/processor.h>
21#include <asm/page.h>
22
23/*
24 * This module contains the entry code for kernel images. It performs the
25 * minimal setup needed to call the generic C routines.
26 *
27 * Prerequisites:
28 *
29 * - The kernel image has been loaded to the actual address where it was
30 * compiled to.
31 * - a2 contains either 0 or a pointer to a list of boot parameters.
32 * (see setup.c for more details)
33 *
34 */
35
36 .macro iterate from, to , cmd
37 .ifeq ((\to - \from) & ~0xfff)
38 \cmd \from
39 iterate "(\from+1)", \to, \cmd
40 .endif
41 .endm
42
43/*
44 * _start
45 *
46 * The bootloader passes a pointer to a list of boot parameters in a2.
47 */
48
49 /* The first bytes of the kernel image must be an instruction, so we
50 * manually allocate and define the literal constant we need for a jx
51 * instruction.
52 */
53
54 .section .head.text, "ax"
55 .globl _start
56_start: _j 2f
57 .align 4
581: .word _startup
592: l32r a0, 1b
60 jx a0
61
62 .text
63 .align 4
64_startup:
65
66 /* Disable interrupts and exceptions. */
67
68 movi a0, XCHAL_PS_EXCM_MASK
69 wsr a0, PS
70
71 /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
72
73 wsr a2, EXCSAVE_1
74
75 /* Start with a fresh windowbase and windowstart. */
76
77 movi a1, 1
78 movi a0, 0
79 wsr a1, WINDOWSTART
80 wsr a0, WINDOWBASE
81 rsync
82
83 /* Set a0 to 0 for the remaining initialization. */
84
85 movi a0, 0
86
87 /* Clear debugging registers. */
88
89#if XCHAL_HAVE_DEBUG
90 wsr a0, IBREAKENABLE
91 wsr a0, ICOUNT
92 movi a1, 15
93 wsr a0, ICOUNTLEVEL
94
95 .macro reset_dbreak num
96 wsr a0, DBREAKC + \num
97 .endm
98
99 iterate 0, XCHAL_NUM_IBREAK-1, reset_dbreak
100#endif
101
102 /* Clear CCOUNT (not really necessary, but nice) */
103
104 wsr a0, CCOUNT # not really necessary, but nice
105
106 /* Disable zero-loops. */
107
108#if XCHAL_HAVE_LOOPS
109 wsr a0, LCOUNT
110#endif
111
112 /* Disable all timers. */
113
114 .macro reset_timer num
115 wsr a0, CCOMPARE_0 + \num
116 .endm
117 iterate 0, XCHAL_NUM_TIMERS-1, reset_timer
118
119 /* Interrupt initialization. */
120
121 movi a2, XCHAL_INTTYPE_MASK_SOFTWARE | XCHAL_INTTYPE_MASK_EXTERN_EDGE
122 wsr a0, INTENABLE
123 wsr a2, INTCLEAR
124
125 /* Disable coprocessors. */
126
127#if XCHAL_CP_NUM > 0
128 wsr a0, CPENABLE
129#endif
130
131 /* Set PS.INTLEVEL=1, PS.WOE=0, kernel stack, PS.EXCM=0
132 *
133 * Note: PS.EXCM must be cleared before using any loop
134 * instructions; otherwise, they are silently disabled, and
135 * at most one iteration of the loop is executed.
136 */
137
138 movi a1, 1
139 wsr a1, PS
140 rsync
141
142 /* Initialize the caches.
143 * Does not include flushing writeback d-cache.
144 * a6, a7 are just working registers (clobbered).
145 */
146
147 icache_reset a2, a3
148 dcache_reset a2, a3
149
150 /* Unpack data sections
151 *
152 * The linker script used to build the Linux kernel image
153 * creates a table located at __boot_reloc_table_start
154 * that contans the information what data needs to be unpacked.
155 *
156 * Uses a2-a7.
157 */
158
159 movi a2, __boot_reloc_table_start
160 movi a3, __boot_reloc_table_end
161
1621: beq a2, a3, 3f # no more entries?
163 l32i a4, a2, 0 # start destination (in RAM)
164 l32i a5, a2, 4 # end desination (in RAM)
165 l32i a6, a2, 8 # start source (in ROM)
166 addi a2, a2, 12 # next entry
167 beq a4, a5, 1b # skip, empty entry
168 beq a4, a6, 1b # skip, source and dest. are the same
169
1702: l32i a7, a6, 0 # load word
171 addi a6, a6, 4
172 s32i a7, a4, 0 # store word
173 addi a4, a4, 4
174 bltu a4, a5, 2b
175 j 1b
176
1773:
178 /* All code and initialized data segments have been copied.
179 * Now clear the BSS segment.
180 */
181
182 movi a2, _bss_start # start of BSS
183 movi a3, _bss_end # end of BSS
184
1851: addi a2, a2, 4
186 s32i a0, a2, 0
187 blt a2, a3, 1b
188
189#if XCHAL_DCACHE_IS_WRITEBACK
190
191 /* After unpacking, flush the writeback cache to memory so the
192 * instructions/data are available.
193 */
194
195 dcache_writeback_all a2, a3
196#endif
197
198 /* Setup stack and enable window exceptions (keep irqs disabled) */
199
200 movi a1, init_thread_union
201 addi a1, a1, KERNEL_STACK_SIZE
202
203 movi a2, 0x00040001 # WOE=1, INTLEVEL=1, UM=0
204 wsr a2, PS # (enable reg-windows; progmode stack)
205 rsync
206
207 /* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/
208
209 movi a2, debug_exception
210 wsr a2, EXCSAVE + XCHAL_DEBUGLEVEL
211
212 /* Set up EXCSAVE[1] to point to the exc_table. */
213
214 movi a6, exc_table
215 xsr a6, EXCSAVE_1
216
217 /* init_arch kick-starts the linux kernel */
218
219 movi a4, init_arch
220 callx4 a4
221
222 movi a4, start_kernel
223 callx4 a4
224
225should_never_return:
226 j should_never_return
227
228 /* Define some common data structures here. We define them
229 * here in this assembly file due to their unusual alignment
230 * requirements.
231 */
232
233 .comm swapper_pg_dir,PAGE_SIZE,PAGE_SIZE
234 .comm empty_bad_page_table,PAGE_SIZE,PAGE_SIZE
235 .comm empty_bad_page,PAGE_SIZE,PAGE_SIZE
236 .comm empty_zero_page,PAGE_SIZE,PAGE_SIZE
237
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
new file mode 100644
index 000000000000..4cbf6d91571f
--- /dev/null
+++ b/arch/xtensa/kernel/irq.c
@@ -0,0 +1,192 @@
1/*
2 * linux/arch/xtensa/kernel/irq.c
3 *
4 * Xtensa built-in interrupt controller and some generic functions copied
5 * from i386.
6 *
7 * Copyright (C) 2002 - 2005 Tensilica, Inc.
8 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
9 *
10 *
11 * Chris Zankel <chris@zankel.net>
12 * Kevin Chea
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/seq_file.h>
18#include <linux/interrupt.h>
19#include <linux/irq.h>
20#include <linux/kernel_stat.h>
21
22#include <asm/uaccess.h>
23#include <asm/platform.h>
24
25static void enable_xtensa_irq(unsigned int irq);
26static void disable_xtensa_irq(unsigned int irq);
27static void mask_and_ack_xtensa(unsigned int irq);
28static void end_xtensa_irq(unsigned int irq);
29
30static unsigned int cached_irq_mask;
31
32atomic_t irq_err_count;
33
34/*
35 * 'what should we do if we get a hw irq event on an illegal vector'.
36 * each architecture has to answer this themselves.
37 */
38void ack_bad_irq(unsigned int irq)
39{
40 printk("unexpected IRQ trap at vector %02x\n", irq);
41}
42
43/*
44 * do_IRQ handles all normal device IRQ's (the special
45 * SMP cross-CPU interrupts have their own specific
46 * handlers).
47 */
48
49unsigned int do_IRQ(int irq, struct pt_regs *regs)
50{
51 irq_enter();
52
53#ifdef CONFIG_DEBUG_STACKOVERFLOW
54 /* Debugging check for stack overflow: is there less than 1KB free? */
55 {
56 unsigned long sp;
57
58 __asm__ __volatile__ ("mov %0, a1\n" : "=a" (sp));
59 sp &= THREAD_SIZE - 1;
60
61 if (unlikely(sp < (sizeof(thread_info) + 1024)))
62 printk("Stack overflow in do_IRQ: %ld\n",
63 sp - sizeof(struct thread_info));
64 }
65#endif
66
67 __do_IRQ(irq, regs);
68
69 irq_exit();
70
71 return 1;
72}
73
74/*
75 * Generic, controller-independent functions:
76 */
77
78int show_interrupts(struct seq_file *p, void *v)
79{
80 int i = *(loff_t *) v, j;
81 struct irqaction * action;
82 unsigned long flags;
83
84 if (i == 0) {
85 seq_printf(p, " ");
86 for (j=0; j<NR_CPUS; j++)
87 if (cpu_online(j))
88 seq_printf(p, "CPU%d ",j);
89 seq_putc(p, '\n');
90 }
91
92 if (i < NR_IRQS) {
93 spin_lock_irqsave(&irq_desc[i].lock, flags);
94 action = irq_desc[i].action;
95 if (!action)
96 goto skip;
97 seq_printf(p, "%3d: ",i);
98#ifndef CONFIG_SMP
99 seq_printf(p, "%10u ", kstat_irqs(i));
100#else
101 for (j = 0; j < NR_CPUS; j++)
102 if (cpu_online(j))
103 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
104#endif
105 seq_printf(p, " %14s", irq_desc[i].handler->typename);
106 seq_printf(p, " %s", action->name);
107
108 for (action=action->next; action; action = action->next)
109 seq_printf(p, ", %s", action->name);
110
111 seq_putc(p, '\n');
112skip:
113 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
114 } else if (i == NR_IRQS) {
115 seq_printf(p, "NMI: ");
116 for (j = 0; j < NR_CPUS; j++)
117 if (cpu_online(j))
118 seq_printf(p, "%10u ", nmi_count(j));
119 seq_putc(p, '\n');
120 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
121 }
122 return 0;
123}
124/* shutdown is same as "disable" */
125#define shutdown_xtensa_irq disable_xtensa_irq
126
127static unsigned int startup_xtensa_irq(unsigned int irq)
128{
129 enable_xtensa_irq(irq);
130 return 0; /* never anything pending */
131}
132
133static struct hw_interrupt_type xtensa_irq_type = {
134 "Xtensa-IRQ",
135 startup_xtensa_irq,
136 shutdown_xtensa_irq,
137 enable_xtensa_irq,
138 disable_xtensa_irq,
139 mask_and_ack_xtensa,
140 end_xtensa_irq
141};
142
143static inline void mask_irq(unsigned int irq)
144{
145 cached_irq_mask &= ~(1 << irq);
146 set_sr (cached_irq_mask, INTENABLE);
147}
148
149static inline void unmask_irq(unsigned int irq)
150{
151 cached_irq_mask |= 1 << irq;
152 set_sr (cached_irq_mask, INTENABLE);
153}
154
155static void disable_xtensa_irq(unsigned int irq)
156{
157 unsigned long flags;
158 local_save_flags(flags);
159 mask_irq(irq);
160 local_irq_restore(flags);
161}
162
163static void enable_xtensa_irq(unsigned int irq)
164{
165 unsigned long flags;
166 local_save_flags(flags);
167 unmask_irq(irq);
168 local_irq_restore(flags);
169}
170
171static void mask_and_ack_xtensa(unsigned int irq)
172{
173 disable_xtensa_irq(irq);
174}
175
176static void end_xtensa_irq(unsigned int irq)
177{
178 enable_xtensa_irq(irq);
179}
180
181
182void __init init_IRQ(void)
183{
184 int i;
185
186 for (i=0; i < XTENSA_NR_IRQS; i++)
187 irq_desc[i].handler = &xtensa_irq_type;
188
189 cached_irq_mask = 0;
190
191 platform_init_irq();
192}
diff --git a/arch/xtensa/kernel/module.c b/arch/xtensa/kernel/module.c
new file mode 100644
index 000000000000..d1683cfa19a2
--- /dev/null
+++ b/arch/xtensa/kernel/module.c
@@ -0,0 +1,78 @@
1/*
2 * arch/xtensa/kernel/platform.c
3 *
4 * Module support.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/moduleloader.h>
18#include <linux/elf.h>
19#include <linux/vmalloc.h>
20#include <linux/fs.h>
21#include <linux/string.h>
22#include <linux/kernel.h>
23#include <linux/cache.h>
24
25LIST_HEAD(module_buf_list);
26
27void *module_alloc(unsigned long size)
28{
29 panic("module_alloc not implemented");
30}
31
32void module_free(struct module *mod, void *module_region)
33{
34 panic("module_free not implemented");
35}
36
37int module_frob_arch_sections(Elf32_Ehdr *hdr,
38 Elf32_Shdr *sechdrs,
39 char *secstrings,
40 struct module *me)
41{
42 panic("module_frob_arch_sections not implemented");
43}
44
45int apply_relocate(Elf32_Shdr *sechdrs,
46 const char *strtab,
47 unsigned int symindex,
48 unsigned int relsec,
49 struct module *module)
50{
51 panic ("apply_relocate not implemented");
52}
53
54int apply_relocate_add(Elf32_Shdr *sechdrs,
55 const char *strtab,
56 unsigned int symindex,
57 unsigned int relsec,
58 struct module *module)
59{
60 panic("apply_relocate_add not implemented");
61}
62
63int module_finalize(const Elf_Ehdr *hdr,
64 const Elf_Shdr *sechdrs,
65 struct module *me)
66{
67 panic ("module_finalize not implemented");
68}
69
70void module_arch_cleanup(struct module *mod)
71{
72 panic("module_arch_cleanup not implemented");
73}
74
75struct bug_entry *module_find_bug(unsigned long bugaddr)
76{
77 panic("module_find_bug not implemented");
78}
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
new file mode 100644
index 000000000000..84fde258cf85
--- /dev/null
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -0,0 +1,73 @@
1/*
2 * arch/xtensa/pci-dma.c
3 *
4 * DMA coherent memory allocation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * Copyright (C) 2002 - 2005 Tensilica Inc.
12 *
13 * Based on version for i386.
14 *
15 * Chris Zankel <chris@zankel.net>
16 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
17 */
18
19#include <linux/types.h>
20#include <linux/mm.h>
21#include <linux/string.h>
22#include <linux/pci.h>
23#include <asm/io.h>
24#include <asm/cacheflush.h>
25
26/*
27 * Note: We assume that the full memory space is always mapped to 'kseg'
28 * Otherwise we have to use page attributes (not implemented).
29 */
30
31void *
32dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, int gfp)
33{
34 void *ret;
35
36 /* ignore region speicifiers */
37 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
38
39 if (dev == NULL || (*dev->dma_mask < 0xffffffff))
40 gfp |= GFP_DMA;
41 ret = (void *)__get_free_pages(gfp, get_order(size));
42
43 if (ret != NULL) {
44 memset(ret, 0, size);
45 *handle = virt_to_bus(ret);
46 }
47 return (void*) BYPASS_ADDR((unsigned long)ret);
48}
49
50void dma_free_coherent(struct device *hwdev, size_t size,
51 void *vaddr, dma_addr_t dma_handle)
52{
53 free_pages(CACHED_ADDR((unsigned long)vaddr), get_order(size));
54}
55
56
57void consistent_sync(void *vaddr, size_t size, int direction)
58{
59 switch (direction) {
60 case PCI_DMA_NONE:
61 BUG();
62 case PCI_DMA_FROMDEVICE: /* invalidate only */
63 __invalidate_dcache_range((unsigned long)vaddr,
64 (unsigned long)size);
65 break;
66
67 case PCI_DMA_TODEVICE: /* writeback only */
68 case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */
69 __flush_invalidate_dcache_range((unsigned long)vaddr,
70 (unsigned long)size);
71 break;
72 }
73}
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
new file mode 100644
index 000000000000..d29a81648637
--- /dev/null
+++ b/arch/xtensa/kernel/pci.c
@@ -0,0 +1,563 @@
1/*
2 * arch/xtensa/pcibios.c
3 *
4 * PCI bios-type initialisation for PCI machines
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * Copyright (C) 2001-2005 Tensilica Inc.
12 *
13 * Based largely on work from Cort (ppc/kernel/pci.c)
14 * IO functions copied from sparc.
15 *
16 * Chris Zankel <chris@zankel.net>
17 *
18 */
19
20#include <linux/config.h>
21#include <linux/kernel.h>
22#include <linux/pci.h>
23#include <linux/delay.h>
24#include <linux/string.h>
25#include <linux/init.h>
26#include <linux/sched.h>
27#include <linux/errno.h>
28#include <linux/bootmem.h>
29
30#include <asm/pci-bridge.h>
31#include <asm/platform.h>
32
33#undef DEBUG
34
35#ifdef DEBUG
36#define DBG(x...) printk(x)
37#else
38#define DBG(x...)
39#endif
40
41/* PCI Controller */
42
43
44/*
45 * pcibios_alloc_controller
46 * pcibios_enable_device
47 * pcibios_fixups
48 * pcibios_align_resource
49 * pcibios_fixup_bus
50 * pcibios_setup
51 * pci_bus_add_device
52 * pci_mmap_page_range
53 */
54
55struct pci_controller* pci_ctrl_head;
56struct pci_controller** pci_ctrl_tail = &pci_ctrl_head;
57
58static int pci_bus_count;
59
60static void pcibios_fixup_resources(struct pci_dev* dev);
61
62#if 0 // FIXME
63struct pci_fixup pcibios_fixups[] = {
64 { DECLARE_PCI_FIXUP_HEADER, PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources },
65 { 0 }
66};
67#endif
68
69void
70pcibios_update_resource(struct pci_dev *dev, struct resource *root,
71 struct resource *res, int resource)
72{
73 u32 new, check, mask;
74 int reg;
75 struct pci_controller* pci_ctrl = dev->sysdata;
76
77 new = res->start;
78 if (pci_ctrl && res->flags & IORESOURCE_IO) {
79 new -= pci_ctrl->io_space.base;
80 }
81 new |= (res->flags & PCI_REGION_FLAG_MASK);
82 if (resource < 6) {
83 reg = PCI_BASE_ADDRESS_0 + 4*resource;
84 } else if (resource == PCI_ROM_RESOURCE) {
85 res->flags |= PCI_ROM_ADDRESS_ENABLE;
86 reg = dev->rom_base_reg;
87 } else {
88 /* Somebody might have asked allocation of a non-standard resource */
89 return;
90 }
91
92 pci_write_config_dword(dev, reg, new);
93 pci_read_config_dword(dev, reg, &check);
94 mask = (new & PCI_BASE_ADDRESS_SPACE_IO) ?
95 PCI_BASE_ADDRESS_IO_MASK : PCI_BASE_ADDRESS_MEM_MASK;
96
97 if ((new ^ check) & mask) {
98 printk(KERN_ERR "PCI: Error while updating region "
99 "%s/%d (%08x != %08x)\n", dev->slot_name, resource,
100 new, check);
101 }
102}
103
104/*
105 * We need to avoid collisions with `mirrored' VGA ports
106 * and other strange ISA hardware, so we always want the
107 * addresses to be allocated in the 0x000-0x0ff region
108 * modulo 0x400.
109 *
110 * Why? Because some silly external IO cards only decode
111 * the low 10 bits of the IO address. The 0x00-0xff region
112 * is reserved for motherboard devices that decode all 16
113 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
114 * but we want to try to avoid allocating at 0x2900-0x2bff
115 * which might have be mirrored at 0x0100-0x03ff..
116 */
117void
118pcibios_align_resource(void *data, struct resource *res, unsigned long size,
119 unsigned long align)
120{
121 struct pci_dev *dev = data;
122
123 if (res->flags & IORESOURCE_IO) {
124 unsigned long start = res->start;
125
126 if (size > 0x100) {
127 printk(KERN_ERR "PCI: I/O Region %s/%d too large"
128 " (%ld bytes)\n", dev->slot_name,
129 dev->resource - res, size);
130 }
131
132 if (start & 0x300) {
133 start = (start + 0x3ff) & ~0x3ff;
134 res->start = start;
135 }
136 }
137}
138
139int
140pcibios_enable_resources(struct pci_dev *dev, int mask)
141{
142 u16 cmd, old_cmd;
143 int idx;
144 struct resource *r;
145
146 pci_read_config_word(dev, PCI_COMMAND, &cmd);
147 old_cmd = cmd;
148 for(idx=0; idx<6; idx++) {
149 r = &dev->resource[idx];
150 if (!r->start && r->end) {
151 printk (KERN_ERR "PCI: Device %s not available because "
152 "of resource collisions\n", dev->slot_name);
153 return -EINVAL;
154 }
155 if (r->flags & IORESOURCE_IO)
156 cmd |= PCI_COMMAND_IO;
157 if (r->flags & IORESOURCE_MEM)
158 cmd |= PCI_COMMAND_MEMORY;
159 }
160 if (dev->resource[PCI_ROM_RESOURCE].start)
161 cmd |= PCI_COMMAND_MEMORY;
162 if (cmd != old_cmd) {
163 printk("PCI: Enabling device %s (%04x -> %04x)\n",
164 dev->slot_name, old_cmd, cmd);
165 pci_write_config_word(dev, PCI_COMMAND, cmd);
166 }
167 return 0;
168}
169
170struct pci_controller * __init pcibios_alloc_controller(void)
171{
172 struct pci_controller *pci_ctrl;
173
174 pci_ctrl = (struct pci_controller *)alloc_bootmem(sizeof(*pci_ctrl));
175 memset(pci_ctrl, 0, sizeof(struct pci_controller));
176
177 *pci_ctrl_tail = pci_ctrl;
178 pci_ctrl_tail = &pci_ctrl->next;
179
180 return pci_ctrl;
181}
182
183static int __init pcibios_init(void)
184{
185 struct pci_controller *pci_ctrl;
186 struct pci_bus *bus;
187 int next_busno = 0, i;
188
189 printk("PCI: Probing PCI hardware\n");
190
191 /* Scan all of the recorded PCI controllers. */
192 for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) {
193 pci_ctrl->last_busno = 0xff;
194 bus = pci_scan_bus(pci_ctrl->first_busno, pci_ctrl->ops,
195 pci_ctrl);
196 if (pci_ctrl->io_resource.flags) {
197 unsigned long offs;
198
199 offs = (unsigned long)pci_ctrl->io_space.base;
200 pci_ctrl->io_resource.start += offs;
201 pci_ctrl->io_resource.end += offs;
202 bus->resource[0] = &pci_ctrl->io_resource;
203 }
204 for (i = 0; i < 3; ++i)
205 if (pci_ctrl->mem_resources[i].flags)
206 bus->resource[i+1] =&pci_ctrl->mem_resources[i];
207 pci_ctrl->bus = bus;
208 pci_ctrl->last_busno = bus->subordinate;
209 if (next_busno <= pci_ctrl->last_busno)
210 next_busno = pci_ctrl->last_busno+1;
211 }
212 pci_bus_count = next_busno;
213
214 return platform_pcibios_fixup();
215}
216
217subsys_initcall(pcibios_init);
218
219void __init pcibios_fixup_bus(struct pci_bus *bus)
220{
221 struct pci_controller *pci_ctrl = bus->sysdata;
222 struct resource *res;
223 unsigned long io_offset;
224 int i;
225
226 io_offset = (unsigned long)pci_ctrl->io_space.base;
227 if (bus->parent == NULL) {
228 /* this is a host bridge - fill in its resources */
229 pci_ctrl->bus = bus;
230
231 bus->resource[0] = res = &pci_ctrl->io_resource;
232 if (!res->flags) {
233 if (io_offset)
234 printk (KERN_ERR "I/O resource not set for host"
235 " bridge %d\n", pci_ctrl->index);
236 res->start = 0;
237 res->end = IO_SPACE_LIMIT;
238 res->flags = IORESOURCE_IO;
239 }
240 res->start += io_offset;
241 res->end += io_offset;
242
243 for (i = 0; i < 3; i++) {
244 res = &pci_ctrl->mem_resources[i];
245 if (!res->flags) {
246 if (i > 0)
247 continue;
248 printk(KERN_ERR "Memory resource not set for "
249 "host bridge %d\n", pci_ctrl->index);
250 res->start = 0;
251 res->end = ~0U;
252 res->flags = IORESOURCE_MEM;
253 }
254 bus->resource[i+1] = res;
255 }
256 } else {
257 /* This is a subordinate bridge */
258 pci_read_bridge_bases(bus);
259
260 for (i = 0; i < 4; i++) {
261 if ((res = bus->resource[i]) == NULL || !res->flags)
262 continue;
263 if (io_offset && (res->flags & IORESOURCE_IO)) {
264 res->start += io_offset;
265 res->end += io_offset;
266 }
267 }
268 }
269}
270
271char __init *pcibios_setup(char *str)
272{
273 return str;
274}
275
276/* the next one is stolen from the alpha port... */
277
278void __init
279pcibios_update_irq(struct pci_dev *dev, int irq)
280{
281 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
282}
283
284int pcibios_enable_device(struct pci_dev *dev, int mask)
285{
286 u16 cmd, old_cmd;
287 int idx;
288 struct resource *r;
289
290 pci_read_config_word(dev, PCI_COMMAND, &cmd);
291 old_cmd = cmd;
292 for (idx=0; idx<6; idx++) {
293 r = &dev->resource[idx];
294 if (!r->start && r->end) {
295 printk(KERN_ERR "PCI: Device %s not available because "
296 "of resource collisions\n", dev->slot_name);
297 return -EINVAL;
298 }
299 if (r->flags & IORESOURCE_IO)
300 cmd |= PCI_COMMAND_IO;
301 if (r->flags & IORESOURCE_MEM)
302 cmd |= PCI_COMMAND_MEMORY;
303 }
304 if (cmd != old_cmd) {
305 printk("PCI: Enabling device %s (%04x -> %04x)\n",
306 dev->slot_name, old_cmd, cmd);
307 pci_write_config_word(dev, PCI_COMMAND, cmd);
308 }
309
310 return 0;
311}
312
313#ifdef CONFIG_PROC_FS
314
315/*
316 * Return the index of the PCI controller for device pdev.
317 */
318
319int
320pci_controller_num(struct pci_dev *dev)
321{
322 struct pci_controller *pci_ctrl = (struct pci_controller*) dev->sysdata;
323 return pci_ctrl->index;
324}
325
326#endif /* CONFIG_PROC_FS */
327
328
329static void
330pcibios_fixup_resources(struct pci_dev *dev)
331{
332 struct pci_controller* pci_ctrl = (struct pci_controller *)dev->sysdata;
333 int i;
334 unsigned long offset;
335
336 if (!pci_ctrl) {
337 printk(KERN_ERR "No pci_ctrl for PCI dev %s!\n",dev->slot_name);
338 return;
339 }
340 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
341 struct resource *res = dev->resource + i;
342 if (!res->start || !res->flags)
343 continue;
344 if (res->end == 0xffffffff) {
345 DBG("PCI:%s Resource %d [%08lx-%08lx] is unassigned\n",
346 dev->slot_name, i, res->start, res->end);
347 res->end -= res->start;
348 res->start = 0;
349 continue;
350 }
351 offset = 0;
352 if (res->flags & IORESOURCE_IO)
353 offset = (unsigned long) pci_ctrl->io_space.base;
354 else if (res->flags & IORESOURCE_MEM)
355 offset = (unsigned long) pci_ctrl->mem_space.base;
356
357 if (offset != 0) {
358 res->start += offset;
359 res->end += offset;
360#ifdef DEBUG
361 printk("Fixup res %d (%lx) of dev %s: %lx -> %lx\n",
362 i, res->flags, dev->slot_name,
363 res->start - offset, res->start);
364#endif
365 }
366 }
367}
368
369/*
370 * Platform support for /proc/bus/pci/X/Y mmap()s,
371 * modelled on the sparc64 implementation by Dave Miller.
372 * -- paulus.
373 */
374
375/*
376 * Adjust vm_pgoff of VMA such that it is the physical page offset
377 * corresponding to the 32-bit pci bus offset for DEV requested by the user.
378 *
379 * Basically, the user finds the base address for his device which he wishes
380 * to mmap. They read the 32-bit value from the config space base register,
381 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
382 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
383 *
384 * Returns negative error code on failure, zero on success.
385 */
386static __inline__ int
387__pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma,
388 enum pci_mmap_state mmap_state)
389{
390 struct pci_controller *pci_ctrl = (struct pci_controller*) dev->sysdata;
391 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
392 unsigned long io_offset = 0;
393 int i, res_bit;
394
395 if (pci_ctrl == 0)
396 return -EINVAL; /* should never happen */
397
398 /* If memory, add on the PCI bridge address offset */
399 if (mmap_state == pci_mmap_mem) {
400 res_bit = IORESOURCE_MEM;
401 } else {
402 io_offset = (unsigned long)pci_ctrl->io_space.base;
403 offset += io_offset;
404 res_bit = IORESOURCE_IO;
405 }
406
407 /*
408 * Check that the offset requested corresponds to one of the
409 * resources of the device.
410 */
411 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
412 struct resource *rp = &dev->resource[i];
413 int flags = rp->flags;
414
415 /* treat ROM as memory (should be already) */
416 if (i == PCI_ROM_RESOURCE)
417 flags |= IORESOURCE_MEM;
418
419 /* Active and same type? */
420 if ((flags & res_bit) == 0)
421 continue;
422
423 /* In the range of this resource? */
424 if (offset < (rp->start & PAGE_MASK) || offset > rp->end)
425 continue;
426
427 /* found it! construct the final physical address */
428 if (mmap_state == pci_mmap_io)
429 offset += pci_ctrl->io_space.start - io_offset;
430 vma->vm_pgoff = offset >> PAGE_SHIFT;
431 return 0;
432 }
433
434 return -EINVAL;
435}
436
437/*
438 * Set vm_flags of VMA, as appropriate for this architecture, for a pci device
439 * mapping.
440 */
441static __inline__ void
442__pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma,
443 enum pci_mmap_state mmap_state)
444{
445 vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
446}
447
448/*
449 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
450 * device mapping.
451 */
452static __inline__ void
453__pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
454 enum pci_mmap_state mmap_state, int write_combine)
455{
456 int prot = pgprot_val(vma->vm_page_prot);
457
458 /* Set to write-through */
459 prot &= ~_PAGE_NO_CACHE;
460#if 0
461 if (!write_combine)
462 prot |= _PAGE_WRITETHRU;
463#endif
464 vma->vm_page_prot = __pgprot(prot);
465}
466
467/*
468 * Perform the actual remap of the pages for a PCI device mapping, as
469 * appropriate for this architecture. The region in the process to map
470 * is described by vm_start and vm_end members of VMA, the base physical
471 * address is found in vm_pgoff.
472 * The pci device structure is provided so that architectures may make mapping
473 * decisions on a per-device or per-bus basis.
474 *
475 * Returns a negative error code on failure, zero on success.
476 */
477int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
478 enum pci_mmap_state mmap_state,
479 int write_combine)
480{
481 int ret;
482
483 ret = __pci_mmap_make_offset(dev, vma, mmap_state);
484 if (ret < 0)
485 return ret;
486
487 __pci_mmap_set_flags(dev, vma, mmap_state);
488 __pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine);
489
490 ret = io_remap_page_range(vma, vma->vm_start, vma->vm_pgoff<<PAGE_SHIFT,
491 vma->vm_end - vma->vm_start, vma->vm_page_prot);
492
493 return ret;
494}
495
496/*
497 * This probably belongs here rather than ioport.c because
498 * we do not want this crud linked into SBus kernels.
499 * Also, think for a moment about likes of floppy.c that
500 * include architecture specific parts. They may want to redefine ins/outs.
501 *
502 * We do not use horroble macroses here because we want to
503 * advance pointer by sizeof(size).
504 */
505void outsb(unsigned long addr, const void *src, unsigned long count) {
506 while (count) {
507 count -= 1;
508 writeb(*(const char *)src, addr);
509 src += 1;
510 addr += 1;
511 }
512}
513
514void outsw(unsigned long addr, const void *src, unsigned long count) {
515 while (count) {
516 count -= 2;
517 writew(*(const short *)src, addr);
518 src += 2;
519 addr += 2;
520 }
521}
522
523void outsl(unsigned long addr, const void *src, unsigned long count) {
524 while (count) {
525 count -= 4;
526 writel(*(const long *)src, addr);
527 src += 4;
528 addr += 4;
529 }
530}
531
532void insb(unsigned long addr, void *dst, unsigned long count) {
533 while (count) {
534 count -= 1;
535 *(unsigned char *)dst = readb(addr);
536 dst += 1;
537 addr += 1;
538 }
539}
540
541void insw(unsigned long addr, void *dst, unsigned long count) {
542 while (count) {
543 count -= 2;
544 *(unsigned short *)dst = readw(addr);
545 dst += 2;
546 addr += 2;
547 }
548}
549
550void insl(unsigned long addr, void *dst, unsigned long count) {
551 while (count) {
552 count -= 4;
553 /*
554 * XXX I am sure we are in for an unaligned trap here.
555 */
556 *(unsigned long *)dst = readl(addr);
557 dst += 4;
558 addr += 4;
559 }
560}
561
562
563
diff --git a/arch/xtensa/kernel/platform.c b/arch/xtensa/kernel/platform.c
new file mode 100644
index 000000000000..cf1362784443
--- /dev/null
+++ b/arch/xtensa/kernel/platform.c
@@ -0,0 +1,49 @@
1/*
2 * arch/xtensa/kernel/platform.c
3 *
4 * Default platform functions.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2005 Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 */
14
15#include <linux/config.h>
16#include <linux/types.h>
17#include <linux/pci.h>
18#include <linux/time.h>
19#include <asm/platform.h>
20#include <asm/timex.h>
21
22#define _F(r,f,a,b) \
23 r __platform_##f a b; \
24 r platform_##f a __attribute__((weak, alias("__platform_"#f)))
25
26/*
27 * Default functions that are used if no platform specific function is defined.
28 * (Please, refer to include/asm-xtensa/platform.h for more information)
29 */
30
31_F(void, setup, (char** cmd), { });
32_F(void, init_irq, (void), { });
33_F(void, restart, (void), { while(1); });
34_F(void, halt, (void), { while(1); });
35_F(void, power_off, (void), { while(1); });
36_F(void, idle, (void), { __asm__ __volatile__ ("waiti 0" ::: "memory"); });
37_F(void, heartbeat, (void), { });
38_F(int, pcibios_fixup, (void), { return 0; });
39_F(int, get_rtc_time, (time_t* t), { return 0; });
40_F(int, set_rtc_time, (time_t t), { return 0; });
41
42#if CONFIG_XTENSA_CALIBRATE_CCOUNT
43_F(void, calibrate_ccount, (void),
44{
45 printk ("ERROR: Cannot calibrate cpu frequency! Assuming 100MHz.\n");
46 ccount_per_jiffy = 100 * (1000000UL/HZ);
47});
48#endif
49
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
new file mode 100644
index 000000000000..4099703b14be
--- /dev/null
+++ b/arch/xtensa/kernel/process.c
@@ -0,0 +1,482 @@
1// TODO verify coprocessor handling
2/*
3 * arch/xtensa/kernel/process.c
4 *
5 * Xtensa Processor version.
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 *
11 * Copyright (C) 2001 - 2005 Tensilica Inc.
12 *
13 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
14 * Chris Zankel <chris@zankel.net>
15 * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
16 * Kevin Chea
17 */
18
19#include <linux/config.h>
20#include <linux/errno.h>
21#include <linux/sched.h>
22#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/smp.h>
25#include <linux/smp_lock.h>
26#include <linux/stddef.h>
27#include <linux/unistd.h>
28#include <linux/ptrace.h>
29#include <linux/slab.h>
30#include <linux/elf.h>
31#include <linux/init.h>
32#include <linux/prctl.h>
33#include <linux/init_task.h>
34#include <linux/module.h>
35#include <linux/mqueue.h>
36
37#include <asm/pgtable.h>
38#include <asm/uaccess.h>
39#include <asm/system.h>
40#include <asm/io.h>
41#include <asm/processor.h>
42#include <asm/platform.h>
43#include <asm/mmu.h>
44#include <asm/irq.h>
45#include <asm/atomic.h>
46#include <asm/offsets.h>
47#include <asm/coprocessor.h>
48
49extern void ret_from_fork(void);
50
51static struct fs_struct init_fs = INIT_FS;
52static struct files_struct init_files = INIT_FILES;
53static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
54static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
55struct mm_struct init_mm = INIT_MM(init_mm);
56EXPORT_SYMBOL(init_mm);
57
58union thread_union init_thread_union
59 __attribute__((__section__(".data.init_task"))) =
60{ INIT_THREAD_INFO(init_task) };
61
62struct task_struct init_task = INIT_TASK(init_task);
63EXPORT_SYMBOL(init_task);
64
65struct task_struct *current_set[NR_CPUS] = {&init_task, };
66
67
68#if XCHAL_CP_NUM > 0
69
70/*
71 * Coprocessor ownership.
72 */
73
74coprocessor_info_t coprocessor_info[] = {
75 { 0, XTENSA_CPE_CP0_OFFSET },
76 { 0, XTENSA_CPE_CP1_OFFSET },
77 { 0, XTENSA_CPE_CP2_OFFSET },
78 { 0, XTENSA_CPE_CP3_OFFSET },
79 { 0, XTENSA_CPE_CP4_OFFSET },
80 { 0, XTENSA_CPE_CP5_OFFSET },
81 { 0, XTENSA_CPE_CP6_OFFSET },
82 { 0, XTENSA_CPE_CP7_OFFSET },
83};
84
85#endif
86
87/*
88 * Powermanagement idle function, if any is provided by the platform.
89 */
90
91void cpu_idle(void)
92{
93 local_irq_enable();
94
95 /* endless idle loop with no priority at all */
96 while (1) {
97 while (!need_resched())
98 platform_idle();
99 preempt_enable();
100 schedule();
101 }
102}
103
104/*
105 * Free current thread data structures etc..
106 */
107
108void exit_thread(void)
109{
110 release_coprocessors(current); /* Empty macro if no CPs are defined */
111}
112
113void flush_thread(void)
114{
115 release_coprocessors(current); /* Empty macro if no CPs are defined */
116}
117
118/*
119 * Copy thread.
120 *
121 * The stack layout for the new thread looks like this:
122 *
123 * +------------------------+ <- sp in childregs (= tos)
124 * | childregs |
125 * +------------------------+ <- thread.sp = sp in dummy-frame
126 * | dummy-frame | (saved in dummy-frame spill-area)
127 * +------------------------+
128 *
129 * We create a dummy frame to return to ret_from_fork:
130 * a0 points to ret_from_fork (simulating a call4)
131 * sp points to itself (thread.sp)
132 * a2, a3 are unused.
133 *
134 * Note: This is a pristine frame, so we don't need any spill region on top of
135 * childregs.
136 */
137
138int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
139 unsigned long unused,
140 struct task_struct * p, struct pt_regs * regs)
141{
142 struct pt_regs *childregs;
143 unsigned long tos;
144 int user_mode = user_mode(regs);
145
146 /* Set up new TSS. */
147 tos = (unsigned long)p->thread_info + THREAD_SIZE;
148 if (user_mode)
149 childregs = (struct pt_regs*)(tos - PT_USER_SIZE);
150 else
151 childregs = (struct pt_regs*)tos - 1;
152
153 *childregs = *regs;
154
155 /* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
156 *((int*)childregs - 3) = (unsigned long)childregs;
157 *((int*)childregs - 4) = 0;
158
159 childregs->areg[1] = tos;
160 childregs->areg[2] = 0;
161 p->set_child_tid = p->clear_child_tid = NULL;
162 p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1);
163 p->thread.sp = (unsigned long)childregs;
164 if (user_mode(regs)) {
165
166 int len = childregs->wmask & ~0xf;
167 childregs->areg[1] = usp;
168 memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
169 &regs->areg[XCHAL_NUM_AREGS - len/4], len);
170
171 if (clone_flags & CLONE_SETTLS)
172 childregs->areg[2] = childregs->areg[6];
173
174 } else {
175 /* In kernel space, we start a new thread with a new stack. */
176 childregs->wmask = 1;
177 }
178 return 0;
179}
180
181
182/*
183 * Create a kernel thread
184 */
185
186int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
187{
188 long retval;
189 __asm__ __volatile__
190 ("mov a5, %4\n\t" /* preserve fn in a5 */
191 "mov a6, %3\n\t" /* preserve and setup arg in a6 */
192 "movi a2, %1\n\t" /* load __NR_clone for syscall*/
193 "mov a3, sp\n\t" /* sp check and sys_clone */
194 "mov a4, %5\n\t" /* load flags for syscall */
195 "syscall\n\t"
196 "beq a3, sp, 1f\n\t" /* branch if parent */
197 "callx4 a5\n\t" /* call fn */
198 "movi a2, %2\n\t" /* load __NR_exit for syscall */
199 "mov a3, a6\n\t" /* load fn return value */
200 "syscall\n"
201 "1:\n\t"
202 "mov %0, a2\n\t" /* parent returns zero */
203 :"=r" (retval)
204 :"i" (__NR_clone), "i" (__NR_exit),
205 "r" (arg), "r" (fn),
206 "r" (flags | CLONE_VM)
207 : "a2", "a3", "a4", "a5", "a6" );
208 return retval;
209}
210
211
212/*
213 * These bracket the sleeping functions..
214 */
215
216unsigned long get_wchan(struct task_struct *p)
217{
218 unsigned long sp, pc;
219 unsigned long stack_page = (unsigned long) p->thread_info;
220 int count = 0;
221
222 if (!p || p == current || p->state == TASK_RUNNING)
223 return 0;
224
225 sp = p->thread.sp;
226 pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
227
228 do {
229 if (sp < stack_page + sizeof(struct task_struct) ||
230 sp >= (stack_page + THREAD_SIZE) ||
231 pc == 0)
232 return 0;
233 if (!in_sched_functions(pc))
234 return pc;
235
236 /* Stack layout: sp-4: ra, sp-3: sp' */
237
238 pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp);
239 sp = *(unsigned long *)sp - 3;
240 } while (count++ < 16);
241 return 0;
242}
243
244/*
245 * do_copy_regs() gathers information from 'struct pt_regs' and
246 * 'current->thread.areg[]' to fill in the xtensa_gregset_t
247 * structure.
248 *
249 * xtensa_gregset_t and 'struct pt_regs' are vastly different formats
250 * of processor registers. Besides different ordering,
251 * xtensa_gregset_t contains non-live register information that
252 * 'struct pt_regs' does not. Exception handling (primarily) uses
253 * 'struct pt_regs'. Core files and ptrace use xtensa_gregset_t.
254 *
255 */
256
257void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
258 struct task_struct *tsk)
259{
260 int i, n, wb_offset;
261
262 elfregs->xchal_config_id0 = XCHAL_HW_CONFIGID0;
263 elfregs->xchal_config_id1 = XCHAL_HW_CONFIGID1;
264
265 __asm__ __volatile__ ("rsr %0, 176\n" : "=a" (i));
266 elfregs->cpux = i;
267 __asm__ __volatile__ ("rsr %0, 208\n" : "=a" (i));
268 elfregs->cpuy = i;
269
270 /* Note: PS.EXCM is not set while user task is running; its
271 * being set in regs->ps is for exception handling convenience.
272 */
273
274 elfregs->pc = regs->pc;
275 elfregs->ps = (regs->ps & ~XCHAL_PS_EXCM_MASK);
276 elfregs->exccause = regs->exccause;
277 elfregs->excvaddr = regs->excvaddr;
278 elfregs->windowbase = regs->windowbase;
279 elfregs->windowstart = regs->windowstart;
280 elfregs->lbeg = regs->lbeg;
281 elfregs->lend = regs->lend;
282 elfregs->lcount = regs->lcount;
283 elfregs->sar = regs->sar;
284 elfregs->syscall = regs->syscall;
285
286 /* Copy register file.
287 * The layout looks like this:
288 *
289 * | a0 ... a15 | Z ... Z | arX ... arY |
290 * current window unused saved frames
291 */
292
293 memset (elfregs->ar, 0, sizeof(elfregs->ar));
294
295 wb_offset = regs->windowbase * 4;
296 n = (regs->wmask&1)? 4 : (regs->wmask&2)? 8 : (regs->wmask&4)? 12 : 16;
297
298 for (i = 0; i < n; i++)
299 elfregs->ar[(wb_offset + i) % XCHAL_NUM_AREGS] = regs->areg[i];
300
301 n = (regs->wmask >> 4) * 4;
302
303 for (i = XCHAL_NUM_AREGS - n; n > 0; i++, n--)
304 elfregs->ar[(wb_offset + i) % XCHAL_NUM_AREGS] = regs->areg[i];
305}
306
307void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
308{
309 do_copy_regs ((xtensa_gregset_t *)elfregs, regs, current);
310}
311
312
313/* The inverse of do_copy_regs(). No error or sanity checking. */
314
315void do_restore_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
316 struct task_struct *tsk)
317{
318 int i, n, wb_offset;
319
320 /* Note: PS.EXCM is not set while user task is running; it
321 * needs to be set in regs->ps is for exception handling convenience.
322 */
323
324 regs->pc = elfregs->pc;
325 regs->ps = (elfregs->ps | XCHAL_PS_EXCM_MASK);
326 regs->exccause = elfregs->exccause;
327 regs->excvaddr = elfregs->excvaddr;
328 regs->windowbase = elfregs->windowbase;
329 regs->windowstart = elfregs->windowstart;
330 regs->lbeg = elfregs->lbeg;
331 regs->lend = elfregs->lend;
332 regs->lcount = elfregs->lcount;
333 regs->sar = elfregs->sar;
334 regs->syscall = elfregs->syscall;
335
336 /* Clear everything. */
337
338 memset (regs->areg, 0, sizeof(regs->areg));
339
340 /* Copy regs from live window frame. */
341
342 wb_offset = regs->windowbase * 4;
343 n = (regs->wmask&1)? 4 : (regs->wmask&2)? 8 : (regs->wmask&4)? 12 : 16;
344
345 for (i = 0; i < n; i++)
346 regs->areg[(wb_offset+i) % XCHAL_NUM_AREGS] = elfregs->ar[i];
347
348 n = (regs->wmask >> 4) * 4;
349
350 for (i = XCHAL_NUM_AREGS - n; n > 0; i++, n--)
351 regs->areg[(wb_offset+i) % XCHAL_NUM_AREGS] = elfregs->ar[i];
352}
353
354/*
355 * do_save_fpregs() gathers information from 'struct pt_regs' and
356 * 'current->thread' to fill in the elf_fpregset_t structure.
357 *
358 * Core files and ptrace use elf_fpregset_t.
359 */
360
361void do_save_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
362 struct task_struct *tsk)
363{
364#if XCHAL_HAVE_CP
365
366 extern unsigned char _xtensa_reginfo_tables[];
367 extern unsigned _xtensa_reginfo_table_size;
368 int i;
369 unsigned long flags;
370
371 /* Before dumping coprocessor state from memory,
372 * ensure any live coprocessor contents for this
373 * task are first saved to memory:
374 */
375 local_irq_save(flags);
376
377 for (i = 0; i < XCHAL_CP_MAX; i++) {
378 if (tsk == coprocessor_info[i].owner) {
379 enable_coprocessor(i);
380 save_coprocessor_registers(
381 tsk->thread.cp_save+coprocessor_info[i].offset,i);
382 disable_coprocessor(i);
383 }
384 }
385
386 local_irq_restore(flags);
387
388 /* Now dump coprocessor & extra state: */
389 memcpy((unsigned char*)fpregs,
390 _xtensa_reginfo_tables, _xtensa_reginfo_table_size);
391 memcpy((unsigned char*)fpregs + _xtensa_reginfo_table_size,
392 tsk->thread.cp_save, XTENSA_CP_EXTRA_SIZE);
393#endif
394}
395
396/*
397 * The inverse of do_save_fpregs().
398 * Copies coprocessor and extra state from fpregs into regs and tsk->thread.
399 * Returns 0 on success, non-zero if layout doesn't match.
400 */
401
402int do_restore_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
403 struct task_struct *tsk)
404{
405#if XCHAL_HAVE_CP
406
407 extern unsigned char _xtensa_reginfo_tables[];
408 extern unsigned _xtensa_reginfo_table_size;
409 int i;
410 unsigned long flags;
411
412 /* Make sure save area layouts match.
413 * FIXME: in the future we could allow restoring from
414 * a different layout of the same registers, by comparing
415 * fpregs' table with _xtensa_reginfo_tables and matching
416 * entries and copying registers one at a time.
417 * Not too sure yet whether that's very useful.
418 */
419
420 if( memcmp((unsigned char*)fpregs,
421 _xtensa_reginfo_tables, _xtensa_reginfo_table_size) ) {
422 return -1;
423 }
424
425 /* Before restoring coprocessor state from memory,
426 * ensure any live coprocessor contents for this
427 * task are first invalidated.
428 */
429
430 local_irq_save(flags);
431
432 for (i = 0; i < XCHAL_CP_MAX; i++) {
433 if (tsk == coprocessor_info[i].owner) {
434 enable_coprocessor(i);
435 save_coprocessor_registers(
436 tsk->thread.cp_save+coprocessor_info[i].offset,i);
437 coprocessor_info[i].owner = 0;
438 disable_coprocessor(i);
439 }
440 }
441
442 local_irq_restore(flags);
443
444 /* Now restore coprocessor & extra state: */
445
446 memcpy(tsk->thread.cp_save,
447 (unsigned char*)fpregs + _xtensa_reginfo_table_size,
448 XTENSA_CP_EXTRA_SIZE);
449#endif
450 return 0;
451}
452/*
453 * Fill in the CP structure for a core dump for a particular task.
454 */
455
456int
457dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r)
458{
459/* see asm/coprocessor.h for this magic number 16 */
460#if TOTAL_CPEXTRA_SIZE > 16
461 do_save_fpregs (r, regs, task);
462
463 /* For now, bit 16 means some extra state may be present: */
464// FIXME!! need to track to return more accurate mask
465 return 0x10000 | XCHAL_CP_MASK;
466#else
467 return 0; /* no coprocessors active on this processor */
468#endif
469}
470
471/*
472 * Fill in the CP structure for a core dump.
473 * This includes any FPU coprocessor.
474 * Here, we dump all coprocessors, and other ("extra") custom state.
475 *
476 * This function is called by elf_core_dump() in fs/binfmt_elf.c
477 * (in which case 'regs' comes from calls to do_coredump, see signals.c).
478 */
479int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
480{
481 return dump_task_fpu(regs, current, r);
482}
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
new file mode 100644
index 000000000000..9ef07a4dd2a2
--- /dev/null
+++ b/arch/xtensa/kernel/ptrace.c
@@ -0,0 +1,407 @@
1// TODO some minor issues
2/*
3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive
5 * for more details.
6 *
7 * Copyright (C) 2001 - 2005 Tensilica Inc.
8 *
9 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
10 * Chris Zankel <chris@zankel.net>
11 * Scott Foehner<sfoehner@yahoo.com>,
12 * Kevin Chea
13 * Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca>
14 */
15
16#include <linux/config.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/mm.h>
20#include <linux/errno.h>
21#include <linux/ptrace.h>
22#include <linux/smp.h>
23#include <linux/smp_lock.h>
24#include <linux/security.h>
25
26#include <asm/pgtable.h>
27#include <asm/page.h>
28#include <asm/system.h>
29#include <asm/uaccess.h>
30#include <asm/ptrace.h>
31#include <asm/elf.h>
32
33#define TEST_KERNEL // verify kernel operations FIXME: remove
34
35
36/*
37 * Called by kernel/ptrace.c when detaching..
38 *
39 * Make sure single step bits etc are not set.
40 */
41
42void ptrace_disable(struct task_struct *child)
43{
44 /* Nothing to do.. */
45}
46
47int sys_ptrace(long request, long pid, long addr, long data)
48{
49 struct task_struct *child;
50 int ret = -EPERM;
51
52 lock_kernel();
53
54#if 0
55 if ((int)request != 1)
56 printk("ptrace(r=%d,pid=%d,addr=%08lx,data=%08lx)\n",
57 (int) request, (int) pid, (unsigned long) addr,
58 (unsigned long) data);
59#endif
60
61 if (request == PTRACE_TRACEME) {
62
63 /* Are we already being traced? */
64
65 if (current->ptrace & PT_PTRACED)
66 goto out;
67
68 if ((ret = security_ptrace(current->parent, current)))
69 goto out;
70
71 /* Set the ptrace bit in the process flags. */
72
73 current->ptrace |= PT_PTRACED;
74 ret = 0;
75 goto out;
76 }
77
78 ret = -ESRCH;
79 read_lock(&tasklist_lock);
80 child = find_task_by_pid(pid);
81 if (child)
82 get_task_struct(child);
83 read_unlock(&tasklist_lock);
84 if (!child)
85 goto out;
86
87 ret = -EPERM;
88 if (pid == 1) /* you may not mess with init */
89 goto out;
90
91 if (request == PTRACE_ATTACH) {
92 ret = ptrace_attach(child);
93 goto out_tsk;
94 }
95
96 if ((ret = ptrace_check_attach(child, request == PTRACE_KILL)) < 0)
97 goto out_tsk;
98
99 switch (request) {
100 case PTRACE_PEEKTEXT: /* read word at location addr. */
101 case PTRACE_PEEKDATA:
102 {
103 unsigned long tmp;
104 int copied;
105
106 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
107 ret = -EIO;
108 if (copied != sizeof(tmp))
109 break;
110 ret = put_user(tmp,(unsigned long *) data);
111
112 goto out;
113 }
114
115 /* Read the word at location addr in the USER area. */
116
117 case PTRACE_PEEKUSR:
118 {
119 struct pt_regs *regs;
120 unsigned long tmp;
121
122 regs = xtensa_pt_regs(child);
123 tmp = 0; /* Default return value. */
124
125 switch(addr) {
126
127 case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
128 {
129 int ar = addr - REG_AR_BASE - regs->windowbase * 4;
130 ar &= (XCHAL_NUM_AREGS - 1);
131 if (ar < 16 && ar + (regs->wmask >> 4) * 4 >= 0)
132 tmp = regs->areg[ar];
133 else
134 ret = -EIO;
135 break;
136 }
137 case REG_A_BASE ... REG_A_BASE + 15:
138 tmp = regs->areg[addr - REG_A_BASE];
139 break;
140 case REG_PC:
141 tmp = regs->pc;
142 break;
143 case REG_PS:
144 /* Note: PS.EXCM is not set while user task is running;
145 * its being set in regs is for exception handling
146 * convenience. */
147 tmp = (regs->ps & ~XCHAL_PS_EXCM_MASK);
148 break;
149 case REG_WB:
150 tmp = regs->windowbase;
151 break;
152 case REG_WS:
153 tmp = regs->windowstart;
154 break;
155 case REG_LBEG:
156 tmp = regs->lbeg;
157 break;
158 case REG_LEND:
159 tmp = regs->lend;
160 break;
161 case REG_LCOUNT:
162 tmp = regs->lcount;
163 break;
164 case REG_SAR:
165 tmp = regs->sar;
166 break;
167 case REG_DEPC:
168 tmp = regs->depc;
169 break;
170 case REG_EXCCAUSE:
171 tmp = regs->exccause;
172 break;
173 case REG_EXCVADDR:
174 tmp = regs->excvaddr;
175 break;
176 case SYSCALL_NR:
177 tmp = regs->syscall;
178 break;
179 default:
180 tmp = 0;
181 ret = -EIO;
182 goto out;
183 }
184 ret = put_user(tmp, (unsigned long *) data);
185 goto out;
186 }
187
188 case PTRACE_POKETEXT: /* write the word at location addr. */
189 case PTRACE_POKEDATA:
190 if (access_process_vm(child, addr, &data, sizeof(data), 1)
191 == sizeof(data))
192 break;
193 ret = -EIO;
194 goto out;
195
196 case PTRACE_POKEUSR:
197 {
198 struct pt_regs *regs;
199 regs = xtensa_pt_regs(child);
200
201 switch (addr) {
202 case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
203 {
204 int ar = addr - REG_AR_BASE - regs->windowbase * 4;
205 if (ar < 16 && ar + (regs->wmask >> 4) * 4 >= 0)
206 regs->areg[ar & (XCHAL_NUM_AREGS - 1)] = data;
207 else
208 ret = -EIO;
209 break;
210 }
211 case REG_A_BASE ... REG_A_BASE + 15:
212 regs->areg[addr - REG_A_BASE] = data;
213 break;
214 case REG_PC:
215 regs->pc = data;
216 break;
217 case SYSCALL_NR:
218 regs->syscall = data;
219 break;
220#ifdef TEST_KERNEL
221 case REG_WB:
222 regs->windowbase = data;
223 break;
224 case REG_WS:
225 regs->windowstart = data;
226 break;
227#endif
228
229 default:
230 /* The rest are not allowed. */
231 ret = -EIO;
232 break;
233 }
234 break;
235 }
236
237 /* continue and stop at next (return from) syscall */
238 case PTRACE_SYSCALL:
239 case PTRACE_CONT: /* restart after signal. */
240 {
241 ret = -EIO;
242 if ((unsigned long) data > _NSIG)
243 break;
244 if (request == PTRACE_SYSCALL)
245 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
246 else
247 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
248 child->exit_code = data;
249 /* Make sure the single step bit is not set. */
250 child->ptrace &= ~PT_SINGLESTEP;
251 wake_up_process(child);
252 ret = 0;
253 break;
254 }
255
256 /*
257 * make the child exit. Best I can do is send it a sigkill.
258 * perhaps it should be put in the status that it wants to
259 * exit.
260 */
261 case PTRACE_KILL:
262 ret = 0;
263 if (child->state == EXIT_ZOMBIE) /* already dead */
264 break;
265 child->exit_code = SIGKILL;
266 child->ptrace &= ~PT_SINGLESTEP;
267 wake_up_process(child);
268 break;
269
270 case PTRACE_SINGLESTEP:
271 ret = -EIO;
272 if ((unsigned long) data > _NSIG)
273 break;
274 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
275 child->ptrace |= PT_SINGLESTEP;
276 child->exit_code = data;
277 wake_up_process(child);
278 ret = 0;
279 break;
280
281 case PTRACE_GETREGS:
282 {
283 /* 'data' points to user memory in which to write.
284 * Mainly due to the non-live register values, we
285 * reformat the register values into something more
286 * standard. For convenience, we use the handy
287 * elf_gregset_t format. */
288
289 xtensa_gregset_t format;
290 struct pt_regs *regs = xtensa_pt_regs(child);
291
292 do_copy_regs (&format, regs, child);
293
294 /* Now, copy to user space nice and easy... */
295 ret = 0;
296 if (copy_to_user((void *)data, &format, sizeof(elf_gregset_t)))
297 ret = -EFAULT;
298 break;
299 }
300
301 case PTRACE_SETREGS:
302 {
303 /* 'data' points to user memory that contains the new
304 * values in the elf_gregset_t format. */
305
306 xtensa_gregset_t format;
307 struct pt_regs *regs = xtensa_pt_regs(child);
308
309 if (copy_from_user(&format,(void *)data,sizeof(elf_gregset_t))){
310 ret = -EFAULT;
311 break;
312 }
313
314 /* FIXME: Perhaps we want some sanity checks on
315 * these user-space values? See ARM version. Are
316 * debuggers a security concern? */
317
318 do_restore_regs (&format, regs, child);
319
320 ret = 0;
321 break;
322 }
323
324 case PTRACE_GETFPREGS:
325 {
326 /* 'data' points to user memory in which to write.
327 * For convenience, we use the handy
328 * elf_fpregset_t format. */
329
330 elf_fpregset_t fpregs;
331 struct pt_regs *regs = xtensa_pt_regs(child);
332
333 do_save_fpregs (&fpregs, regs, child);
334
335 /* Now, copy to user space nice and easy... */
336 ret = 0;
337 if (copy_to_user((void *)data, &fpregs, sizeof(elf_fpregset_t)))
338 ret = -EFAULT;
339
340 break;
341 }
342
343 case PTRACE_SETFPREGS:
344 {
345 /* 'data' points to user memory that contains the new
346 * values in the elf_fpregset_t format.
347 */
348 elf_fpregset_t fpregs;
349 struct pt_regs *regs = xtensa_pt_regs(child);
350
351 ret = 0;
352 if (copy_from_user(&fpregs, (void *)data, sizeof(elf_fpregset_t))) {
353 ret = -EFAULT;
354 break;
355 }
356
357 if (do_restore_fpregs (&fpregs, regs, child))
358 ret = -EIO;
359 break;
360 }
361
362 case PTRACE_GETFPREGSIZE:
363 /* 'data' points to 'unsigned long' set to the size
364 * of elf_fpregset_t
365 */
366 ret = put_user(sizeof(elf_fpregset_t), (unsigned long *) data);
367 break;
368
369 case PTRACE_DETACH: /* detach a process that was attached. */
370 ret = ptrace_detach(child, data);
371 break;
372
373 default:
374 ret = ptrace_request(child, request, addr, data);
375 goto out;
376 }
377out_tsk:
378 put_task_struct(child);
379out:
380 unlock_kernel();
381 return ret;
382}
383
384void do_syscall_trace(void)
385{
386 if (!test_thread_flag(TIF_SYSCALL_TRACE))
387 return;
388
389 if (!(current->ptrace & PT_PTRACED))
390 return;
391
392 /*
393 * The 0x80 provides a way for the tracing parent to distinguish
394 * between a syscall stop and SIGTRAP delivery
395 */
396 ptrace_notify(SIGTRAP|((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
397
398 /*
399 * this isn't the same as continuing with a signal, but it will do
400 * for normal use. strace only continues with a signal if the
401 * stopping signal is not SIGTRAP. -brl
402 */
403 if (current->exit_code) {
404 send_sig(current->exit_code, current, 1);
405 current->exit_code = 0;
406 }
407}
diff --git a/arch/xtensa/kernel/semaphore.c b/arch/xtensa/kernel/semaphore.c
new file mode 100644
index 000000000000..d40f4b1b75ac
--- /dev/null
+++ b/arch/xtensa/kernel/semaphore.c
@@ -0,0 +1,226 @@
1/*
2 * arch/xtensa/kernel/semaphore.c
3 *
4 * Generic semaphore code. Buyer beware. Do your own specific changes
5 * in <asm/semaphore-helper.h>
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 *
11 * Copyright (C) 2001 - 2005 Tensilica Inc.
12 *
13 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
14 * Chris Zankel <chris@zankel.net>
15 * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
16 * Kevin Chea
17 */
18
19#include <linux/sched.h>
20#include <linux/wait.h>
21#include <linux/init.h>
22#include <asm/semaphore.h>
23#include <asm/errno.h>
24
25/*
26 * These two _must_ execute atomically wrt each other.
27 */
28
29static __inline__ void wake_one_more(struct semaphore * sem)
30{
31 atomic_inc((atomic_t *)&sem->sleepers);
32}
33
34static __inline__ int waking_non_zero(struct semaphore *sem)
35{
36 unsigned long flags;
37 int ret = 0;
38
39 spin_lock_irqsave(&semaphore_wake_lock, flags);
40 if (sem->sleepers > 0) {
41 sem->sleepers--;
42 ret = 1;
43 }
44 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
45 return ret;
46}
47
48/*
49 * waking_non_zero_interruptible:
50 * 1 got the lock
51 * 0 go to sleep
52 * -EINTR interrupted
53 *
54 * We must undo the sem->count down_interruptible() increment while we are
55 * protected by the spinlock in order to make atomic this atomic_inc() with the
56 * atomic_read() in wake_one_more(), otherwise we can race. -arca
57 */
58
59static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
60 struct task_struct *tsk)
61{
62 unsigned long flags;
63 int ret = 0;
64
65 spin_lock_irqsave(&semaphore_wake_lock, flags);
66 if (sem->sleepers > 0) {
67 sem->sleepers--;
68 ret = 1;
69 } else if (signal_pending(tsk)) {
70 atomic_inc(&sem->count);
71 ret = -EINTR;
72 }
73 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
74 return ret;
75}
76
77/*
78 * waking_non_zero_trylock:
79 * 1 failed to lock
80 * 0 got the lock
81 *
82 * We must undo the sem->count down_trylock() increment while we are
83 * protected by the spinlock in order to make atomic this atomic_inc() with the
84 * atomic_read() in wake_one_more(), otherwise we can race. -arca
85 */
86
87static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
88{
89 unsigned long flags;
90 int ret = 1;
91
92 spin_lock_irqsave(&semaphore_wake_lock, flags);
93 if (sem->sleepers <= 0)
94 atomic_inc(&sem->count);
95 else {
96 sem->sleepers--;
97 ret = 0;
98 }
99 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
100 return ret;
101}
102
103spinlock_t semaphore_wake_lock;
104
105/*
106 * Semaphores are implemented using a two-way counter:
107 * The "count" variable is decremented for each process
108 * that tries to sleep, while the "waking" variable is
109 * incremented when the "up()" code goes to wake up waiting
110 * processes.
111 *
112 * Notably, the inline "up()" and "down()" functions can
113 * efficiently test if they need to do any extra work (up
114 * needs to do something only if count was negative before
115 * the increment operation.
116 *
117 * waking_non_zero() (from asm/semaphore.h) must execute
118 * atomically.
119 *
120 * When __up() is called, the count was negative before
121 * incrementing it, and we need to wake up somebody.
122 *
123 * This routine adds one to the count of processes that need to
124 * wake up and exit. ALL waiting processes actually wake up but
125 * only the one that gets to the "waking" field first will gate
126 * through and acquire the semaphore. The others will go back
127 * to sleep.
128 *
129 * Note that these functions are only called when there is
130 * contention on the lock, and as such all this is the
131 * "non-critical" part of the whole semaphore business. The
132 * critical part is the inline stuff in <asm/semaphore.h>
133 * where we want to avoid any extra jumps and calls.
134 */
135
136void __up(struct semaphore *sem)
137{
138 wake_one_more(sem);
139 wake_up(&sem->wait);
140}
141
142/*
143 * Perform the "down" function. Return zero for semaphore acquired,
144 * return negative for signalled out of the function.
145 *
146 * If called from __down, the return is ignored and the wait loop is
147 * not interruptible. This means that a task waiting on a semaphore
148 * using "down()" cannot be killed until someone does an "up()" on
149 * the semaphore.
150 *
151 * If called from __down_interruptible, the return value gets checked
152 * upon return. If the return value is negative then the task continues
153 * with the negative value in the return register (it can be tested by
154 * the caller).
155 *
156 * Either form may be used in conjunction with "up()".
157 *
158 */
159
160#define DOWN_VAR \
161 struct task_struct *tsk = current; \
162 wait_queue_t wait; \
163 init_waitqueue_entry(&wait, tsk);
164
165#define DOWN_HEAD(task_state) \
166 \
167 \
168 tsk->state = (task_state); \
169 add_wait_queue(&sem->wait, &wait); \
170 \
171 /* \
172 * Ok, we're set up. sem->count is known to be less than zero \
173 * so we must wait. \
174 * \
175 * We can let go the lock for purposes of waiting. \
176 * We re-acquire it after awaking so as to protect \
177 * all semaphore operations. \
178 * \
179 * If "up()" is called before we call waking_non_zero() then \
180 * we will catch it right away. If it is called later then \
181 * we will have to go through a wakeup cycle to catch it. \
182 * \
183 * Multiple waiters contend for the semaphore lock to see \
184 * who gets to gate through and who has to wait some more. \
185 */ \
186 for (;;) {
187
188#define DOWN_TAIL(task_state) \
189 tsk->state = (task_state); \
190 } \
191 tsk->state = TASK_RUNNING; \
192 remove_wait_queue(&sem->wait, &wait);
193
194void __sched __down(struct semaphore * sem)
195{
196 DOWN_VAR
197 DOWN_HEAD(TASK_UNINTERRUPTIBLE)
198 if (waking_non_zero(sem))
199 break;
200 schedule();
201 DOWN_TAIL(TASK_UNINTERRUPTIBLE)
202}
203
204int __sched __down_interruptible(struct semaphore * sem)
205{
206 int ret = 0;
207 DOWN_VAR
208 DOWN_HEAD(TASK_INTERRUPTIBLE)
209
210 ret = waking_non_zero_interruptible(sem, tsk);
211 if (ret)
212 {
213 if (ret == 1)
214 /* ret != 0 only if we get interrupted -arca */
215 ret = 0;
216 break;
217 }
218 schedule();
219 DOWN_TAIL(TASK_INTERRUPTIBLE)
220 return ret;
221}
222
223int __down_trylock(struct semaphore * sem)
224{
225 return waking_non_zero_trylock(sem);
226}
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
new file mode 100644
index 000000000000..1f5bf5d624e4
--- /dev/null
+++ b/arch/xtensa/kernel/setup.c
@@ -0,0 +1,520 @@
1/*
2 * arch/xtensa/setup.c
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1995 Linus Torvalds
9 * Copyright (C) 2001 - 2005 Tensilica Inc.
10 *
11 * Chris Zankel <chris@zankel.net>
12 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
13 * Kevin Chea
14 * Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca>
15 */
16
17#include <linux/config.h>
18#include <linux/errno.h>
19#include <linux/init.h>
20#include <linux/proc_fs.h>
21#include <linux/tty.h>
22#include <linux/bootmem.h>
23#include <linux/kernel.h>
24
25#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
26# include <linux/console.h>
27#endif
28
29#ifdef CONFIG_RTC
30# include <linux/timex.h>
31#endif
32
33#ifdef CONFIG_PROC_FS
34# include <linux/seq_file.h>
35#endif
36
37#include <asm/system.h>
38#include <asm/bootparam.h>
39#include <asm/pgtable.h>
40#include <asm/processor.h>
41#include <asm/timex.h>
42#include <asm/platform.h>
43#include <asm/page.h>
44#include <asm/setup.h>
45
46#include <xtensa/config/system.h>
47
48#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
49struct screen_info screen_info = { 0, 24, 0, 0, 0, 80, 0, 0, 0, 24, 1, 16};
50#endif
51
52#ifdef CONFIG_BLK_DEV_FD
53extern struct fd_ops no_fd_ops;
54struct fd_ops *fd_ops;
55#endif
56
57#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
58extern struct ide_ops no_ide_ops;
59struct ide_ops *ide_ops;
60#endif
61
62extern struct rtc_ops no_rtc_ops;
63struct rtc_ops *rtc_ops;
64
65#ifdef CONFIG_PC_KEYB
66extern struct kbd_ops no_kbd_ops;
67struct kbd_ops *kbd_ops;
68#endif
69
70#ifdef CONFIG_BLK_DEV_INITRD
71extern void *initrd_start;
72extern void *initrd_end;
73extern void *__initrd_start;
74extern void *__initrd_end;
75int initrd_is_mapped = 0;
76extern int initrd_below_start_ok;
77#endif
78
79unsigned char aux_device_present;
80extern unsigned long loops_per_jiffy;
81
82/* Command line specified as configuration option. */
83
84static char command_line[COMMAND_LINE_SIZE];
85
86#ifdef CONFIG_CMDLINE_BOOL
87static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
88#endif
89
90sysmem_info_t __initdata sysmem;
91
92#ifdef CONFIG_BLK_DEV_INITRD
93int initrd_is_mapped;
94#endif
95
96extern void init_mmu(void);
97
98/*
99 * Boot parameter parsing.
100 *
101 * The Xtensa port uses a list of variable-sized tags to pass data to
102 * the kernel. The first tag must be a BP_TAG_FIRST tag for the list
103 * to be recognised. The list is terminated with a zero-sized
104 * BP_TAG_LAST tag.
105 */
106
107typedef struct tagtable {
108 u32 tag;
109 int (*parse)(const bp_tag_t*);
110} tagtable_t;
111
112#define __tagtable(tag, fn) static tagtable_t __tagtable_##fn \
113 __attribute__((unused, __section__(".taglist"))) = { tag, fn }
114
115/* parse current tag */
116
117static int __init parse_tag_mem(const bp_tag_t *tag)
118{
119 meminfo_t *mi = (meminfo_t*)(tag->data);
120
121 if (mi->type != MEMORY_TYPE_CONVENTIONAL)
122 return -1;
123
124 if (sysmem.nr_banks >= SYSMEM_BANKS_MAX) {
125 printk(KERN_WARNING
126 "Ignoring memory bank 0x%08lx size %ldKB\n",
127 (unsigned long)mi->start,
128 (unsigned long)mi->end - (unsigned long)mi->start);
129 return -EINVAL;
130 }
131 sysmem.bank[sysmem.nr_banks].type = mi->type;
132 sysmem.bank[sysmem.nr_banks].start = PAGE_ALIGN(mi->start);
133 sysmem.bank[sysmem.nr_banks].end = mi->end & PAGE_SIZE;
134 sysmem.nr_banks++;
135
136 return 0;
137}
138
139__tagtable(BP_TAG_MEMORY, parse_tag_mem);
140
141#ifdef CONFIG_BLK_DEV_INITRD
142
143static int __init parse_tag_initrd(const bp_tag_t* tag)
144{
145 meminfo_t* mi;
146 mi = (meminfo_t*)(tag->data);
147 initrd_start = (void*)(mi->start);
148 initrd_end = (void*)(mi->end);
149
150 return 0;
151}
152
153__tagtable(BP_TAG_INITRD, parse_tag_initrd);
154
155#endif /* CONFIG_BLK_DEV_INITRD */
156
157static int __init parse_tag_cmdline(const bp_tag_t* tag)
158{
159 strncpy(command_line, (char*)(tag->data), COMMAND_LINE_SIZE);
160 command_line[COMMAND_LINE_SIZE - 1] = '\0';
161 return 0;
162}
163
164__tagtable(BP_TAG_COMMAND_LINE, parse_tag_cmdline);
165
166static int __init parse_bootparam(const bp_tag_t* tag)
167{
168 extern tagtable_t __tagtable_begin, __tagtable_end;
169 tagtable_t *t;
170
171 /* Boot parameters must start with a BP_TAG_FIRST tag. */
172
173 if (tag->id != BP_TAG_FIRST) {
174 printk(KERN_WARNING "Invalid boot parameters!\n");
175 return 0;
176 }
177
178 tag = (bp_tag_t*)((unsigned long)tag + sizeof(bp_tag_t) + tag->size);
179
180 /* Parse all tags. */
181
182 while (tag != NULL && tag->id != BP_TAG_LAST) {
183 for (t = &__tagtable_begin; t < &__tagtable_end; t++) {
184 if (tag->id == t->tag) {
185 t->parse(tag);
186 break;
187 }
188 }
189 if (t == &__tagtable_end)
190 printk(KERN_WARNING "Ignoring tag "
191 "0x%08x\n", tag->id);
192 tag = (bp_tag_t*)((unsigned long)(tag + 1) + tag->size);
193 }
194
195 return 0;
196}
197
198/*
199 * Initialize architecture. (Early stage)
200 */
201
202void __init init_arch(bp_tag_t *bp_start)
203{
204
205#ifdef CONFIG_BLK_DEV_INITRD
206 initrd_start = &__initrd_start;
207 initrd_end = &__initrd_end;
208#endif
209
210 sysmem.nr_banks = 0;
211
212#ifdef CONFIG_CMDLINE_BOOL
213 strcpy(command_line, default_command_line);
214#endif
215
216 /* Parse boot parameters */
217
218 if (bp_start)
219 parse_bootparam(bp_start);
220
221 if (sysmem.nr_banks == 0) {
222 sysmem.nr_banks = 1;
223 sysmem.bank[0].start = PLATFORM_DEFAULT_MEM_START;
224 sysmem.bank[0].end = PLATFORM_DEFAULT_MEM_START
225 + PLATFORM_DEFAULT_MEM_SIZE;
226 }
227
228 /* Early hook for platforms */
229
230 platform_init(bp_start);
231
232 /* Initialize MMU. */
233
234 init_mmu();
235}
236
237/*
238 * Initialize system. Setup memory and reserve regions.
239 */
240
241extern char _end;
242extern char _stext;
243extern char _WindowVectors_text_start;
244extern char _WindowVectors_text_end;
245extern char _DebugInterruptVector_literal_start;
246extern char _DebugInterruptVector_text_end;
247extern char _KernelExceptionVector_literal_start;
248extern char _KernelExceptionVector_text_end;
249extern char _UserExceptionVector_literal_start;
250extern char _UserExceptionVector_text_end;
251extern char _DoubleExceptionVector_literal_start;
252extern char _DoubleExceptionVector_text_end;
253
254void __init setup_arch(char **cmdline_p)
255{
256 extern int mem_reserve(unsigned long, unsigned long, int);
257 extern void bootmem_init(void);
258
259 memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
260 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
261 *cmdline_p = command_line;
262
263 /* Reserve some memory regions */
264
265#ifdef CONFIG_BLK_DEV_INITRD
266 if (initrd_start < initrd_end) {
267 initrd_is_mapped = mem_reserve(__pa(initrd_start),
268 __pa(initrd_end), 0);
269 initrd_below_start_ok = 1;
270 } else {
271 initrd_start = 0;
272 }
273#endif
274
275 mem_reserve(__pa(&_stext),__pa(&_end), 1);
276
277 mem_reserve(__pa(&_WindowVectors_text_start),
278 __pa(&_WindowVectors_text_end), 0);
279
280 mem_reserve(__pa(&_DebugInterruptVector_literal_start),
281 __pa(&_DebugInterruptVector_text_end), 0);
282
283 mem_reserve(__pa(&_KernelExceptionVector_literal_start),
284 __pa(&_KernelExceptionVector_text_end), 0);
285
286 mem_reserve(__pa(&_UserExceptionVector_literal_start),
287 __pa(&_UserExceptionVector_text_end), 0);
288
289 mem_reserve(__pa(&_DoubleExceptionVector_literal_start),
290 __pa(&_DoubleExceptionVector_text_end), 0);
291
292 bootmem_init();
293
294 platform_setup(cmdline_p);
295
296
297 paging_init();
298
299#ifdef CONFIG_VT
300# if defined(CONFIG_VGA_CONSOLE)
301 conswitchp = &vga_con;
302# elif defined(CONFIG_DUMMY_CONSOLE)
303 conswitchp = &dummy_con;
304# endif
305#endif
306
307#if CONFIG_PCI
308 platform_pcibios_init();
309#endif
310}
311
312void machine_restart(char * cmd)
313{
314 platform_restart();
315}
316
317void machine_halt(void)
318{
319 platform_halt();
320 while (1);
321}
322
323void machine_power_off(void)
324{
325 platform_power_off();
326 while (1);
327}
328#ifdef CONFIG_PROC_FS
329
330/*
331 * Display some core information through /proc/cpuinfo.
332 */
333
334static int
335c_show(struct seq_file *f, void *slot)
336{
337 /* high-level stuff */
338 seq_printf(f,"processor\t: 0\n"
339 "vendor_id\t: Tensilica\n"
340 "model\t\t: Xtensa " XCHAL_HW_RELEASE_NAME "\n"
341 "core ID\t\t: " XCHAL_CORE_ID "\n"
342 "build ID\t: 0x%x\n"
343 "byte order\t: %s\n"
344 "cpu MHz\t\t: %lu.%02lu\n"
345 "bogomips\t: %lu.%02lu\n",
346 XCHAL_BUILD_UNIQUE_ID,
347 XCHAL_HAVE_BE ? "big" : "little",
348 CCOUNT_PER_JIFFY/(1000000/HZ),
349 (CCOUNT_PER_JIFFY/(10000/HZ)) % 100,
350 loops_per_jiffy/(500000/HZ),
351 (loops_per_jiffy/(5000/HZ)) % 100);
352
353 seq_printf(f,"flags\t\t: "
354#if XCHAL_HAVE_NMI
355 "nmi "
356#endif
357#if XCHAL_HAVE_DEBUG
358 "debug "
359# if XCHAL_HAVE_OCD
360 "ocd "
361# endif
362#endif
363#if XCHAL_HAVE_DENSITY
364 "density "
365#endif
366#if XCHAL_HAVE_BOOLEANS
367 "boolean "
368#endif
369#if XCHAL_HAVE_LOOPS
370 "loop "
371#endif
372#if XCHAL_HAVE_NSA
373 "nsa "
374#endif
375#if XCHAL_HAVE_MINMAX
376 "minmax "
377#endif
378#if XCHAL_HAVE_SEXT
379 "sext "
380#endif
381#if XCHAL_HAVE_CLAMPS
382 "clamps "
383#endif
384#if XCHAL_HAVE_MAC16
385 "mac16 "
386#endif
387#if XCHAL_HAVE_MUL16
388 "mul16 "
389#endif
390#if XCHAL_HAVE_MUL32
391 "mul32 "
392#endif
393#if XCHAL_HAVE_MUL32_HIGH
394 "mul32h "
395#endif
396#if XCHAL_HAVE_FP
397 "fpu "
398#endif
399 "\n");
400
401 /* Registers. */
402 seq_printf(f,"physical aregs\t: %d\n"
403 "misc regs\t: %d\n"
404 "ibreak\t\t: %d\n"
405 "dbreak\t\t: %d\n",
406 XCHAL_NUM_AREGS,
407 XCHAL_NUM_MISC_REGS,
408 XCHAL_NUM_IBREAK,
409 XCHAL_NUM_DBREAK);
410
411
412 /* Interrupt. */
413 seq_printf(f,"num ints\t: %d\n"
414 "ext ints\t: %d\n"
415 "int levels\t: %d\n"
416 "timers\t\t: %d\n"
417 "debug level\t: %d\n",
418 XCHAL_NUM_INTERRUPTS,
419 XCHAL_NUM_EXTINTERRUPTS,
420 XCHAL_NUM_INTLEVELS,
421 XCHAL_NUM_TIMERS,
422 XCHAL_DEBUGLEVEL);
423
424 /* Coprocessors */
425#if XCHAL_HAVE_CP
426 seq_printf(f, "coprocessors\t: %d\n", XCHAL_CP_NUM);
427#else
428 seq_printf(f, "coprocessors\t: none\n");
429#endif
430
431 /* {I,D}{RAM,ROM} and XLMI */
432 seq_printf(f,"inst ROMs\t: %d\n"
433 "inst RAMs\t: %d\n"
434 "data ROMs\t: %d\n"
435 "data RAMs\t: %d\n"
436 "XLMI ports\t: %d\n",
437 XCHAL_NUM_IROM,
438 XCHAL_NUM_IRAM,
439 XCHAL_NUM_DROM,
440 XCHAL_NUM_DRAM,
441 XCHAL_NUM_XLMI);
442
443 /* Cache */
444 seq_printf(f,"icache line size: %d\n"
445 "icache ways\t: %d\n"
446 "icache size\t: %d\n"
447 "icache flags\t: "
448#if XCHAL_ICACHE_LINE_LOCKABLE
449 "lock"
450#endif
451 "\n"
452 "dcache line size: %d\n"
453 "dcache ways\t: %d\n"
454 "dcache size\t: %d\n"
455 "dcache flags\t: "
456#if XCHAL_DCACHE_IS_WRITEBACK
457 "writeback"
458#endif
459#if XCHAL_DCACHE_LINE_LOCKABLE
460 "lock"
461#endif
462 "\n",
463 XCHAL_ICACHE_LINESIZE,
464 XCHAL_ICACHE_WAYS,
465 XCHAL_ICACHE_SIZE,
466 XCHAL_DCACHE_LINESIZE,
467 XCHAL_DCACHE_WAYS,
468 XCHAL_DCACHE_SIZE);
469
470 /* MMU */
471 seq_printf(f,"ASID bits\t: %d\n"
472 "ASID invalid\t: %d\n"
473 "ASID kernel\t: %d\n"
474 "rings\t\t: %d\n"
475 "itlb ways\t: %d\n"
476 "itlb AR ways\t: %d\n"
477 "dtlb ways\t: %d\n"
478 "dtlb AR ways\t: %d\n",
479 XCHAL_MMU_ASID_BITS,
480 XCHAL_MMU_ASID_INVALID,
481 XCHAL_MMU_ASID_KERNEL,
482 XCHAL_MMU_RINGS,
483 XCHAL_ITLB_WAYS,
484 XCHAL_ITLB_ARF_WAYS,
485 XCHAL_DTLB_WAYS,
486 XCHAL_DTLB_ARF_WAYS);
487
488 return 0;
489}
490
491/*
492 * We show only CPU #0 info.
493 */
494static void *
495c_start(struct seq_file *f, loff_t *pos)
496{
497 return (void *) ((*pos == 0) ? (void *)1 : NULL);
498}
499
500static void *
501c_next(struct seq_file *f, void *v, loff_t *pos)
502{
503 return NULL;
504}
505
506static void
507c_stop(struct seq_file *f, void *v)
508{
509}
510
511struct seq_operations cpuinfo_op =
512{
513 start: c_start,
514 next: c_next,
515 stop: c_stop,
516 show: c_show
517};
518
519#endif /* CONFIG_PROC_FS */
520
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
new file mode 100644
index 000000000000..df6e1e17b096
--- /dev/null
+++ b/arch/xtensa/kernel/signal.c
@@ -0,0 +1,713 @@
1// TODO coprocessor stuff
2/*
3 * linux/arch/xtensa/kernel/signal.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * Joe Taylor <joe@tensilica.com>
9 * Chris Zankel <chris@zankel.net>
10 *
11 *
12 *
13 */
14
15#include <xtensa/config/core.h>
16#include <xtensa/hal.h>
17#include <linux/sched.h>
18#include <linux/mm.h>
19#include <linux/smp.h>
20#include <linux/smp_lock.h>
21#include <linux/kernel.h>
22#include <linux/signal.h>
23#include <linux/errno.h>
24#include <linux/wait.h>
25#include <linux/ptrace.h>
26#include <linux/unistd.h>
27#include <linux/stddef.h>
28#include <linux/personality.h>
29#include <asm/ucontext.h>
30#include <asm/uaccess.h>
31#include <asm/pgtable.h>
32#include <asm/cacheflush.h>
33
34#define DEBUG_SIG 0
35
36#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
37
38asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options,
39 struct rusage * ru);
40asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
41
42extern struct task_struct *coproc_owners[];
43
44
45/*
46 * Atomically swap in the new signal mask, and wait for a signal.
47 */
48
49int sys_sigsuspend(struct pt_regs *regs)
50{
51 old_sigset_t mask = (old_sigset_t) regs->areg[3];
52 sigset_t saveset;
53
54 mask &= _BLOCKABLE;
55 spin_lock_irq(&current->sighand->siglock);
56 saveset = current->blocked;
57 siginitset(&current->blocked, mask);
58 recalc_sigpending();
59 spin_unlock_irq(&current->sighand->siglock);
60
61 regs->areg[2] = -EINTR;
62 while (1) {
63 current->state = TASK_INTERRUPTIBLE;
64 schedule();
65 if (do_signal(regs, &saveset))
66 return -EINTR;
67 }
68}
69
70asmlinkage int
71sys_rt_sigsuspend(struct pt_regs *regs)
72{
73 sigset_t *unewset = (sigset_t *) regs->areg[4];
74 size_t sigsetsize = (size_t) regs->areg[3];
75 sigset_t saveset, newset;
76 /* XXX: Don't preclude handling different sized sigset_t's. */
77 if (sigsetsize != sizeof(sigset_t))
78 return -EINVAL;
79
80 if (copy_from_user(&newset, unewset, sizeof(newset)))
81 return -EFAULT;
82 sigdelsetmask(&newset, ~_BLOCKABLE);
83 spin_lock_irq(&current->sighand->siglock);
84 saveset = current->blocked;
85 current->blocked = newset;
86 recalc_sigpending();
87 spin_unlock_irq(&current->sighand->siglock);
88
89 regs->areg[2] = -EINTR;
90 while (1) {
91 current->state = TASK_INTERRUPTIBLE;
92 schedule();
93 if (do_signal(regs, &saveset))
94 return -EINTR;
95 }
96}
97
98asmlinkage int
99sys_sigaction(int sig, const struct old_sigaction *act,
100 struct old_sigaction *oact)
101{
102 struct k_sigaction new_ka, old_ka;
103 int ret;
104
105 if (act) {
106 old_sigset_t mask;
107 if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
108 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
109 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
110 return -EFAULT;
111 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
112 __get_user(mask, &act->sa_mask);
113 siginitset(&new_ka.sa.sa_mask, mask);
114 }
115
116 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
117
118 if (!ret && oact) {
119 if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
120 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
121 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
122 return -EFAULT;
123 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
124 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
125 }
126
127 return ret;
128}
129
130asmlinkage int
131sys_sigaltstack(struct pt_regs *regs)
132{
133 const stack_t *uss = (stack_t *) regs->areg[4];
134 stack_t *uoss = (stack_t *) regs->areg[3];
135
136 if (regs->depc > 64)
137 panic ("Double exception sys_sigreturn\n");
138
139
140 return do_sigaltstack(uss, uoss, regs->areg[1]);
141}
142
143
144/*
145 * Do a signal return; undo the signal stack.
146 */
147
148struct sigframe
149{
150 struct sigcontext sc;
151 struct _cpstate cpstate;
152 unsigned long extramask[_NSIG_WORDS-1];
153 unsigned char retcode[6];
154 unsigned int reserved[4]; /* Reserved area for chaining */
155 unsigned int window[4]; /* Window of 4 registers for initial context */
156};
157
158struct rt_sigframe
159{
160 struct siginfo info;
161 struct ucontext uc;
162 struct _cpstate cpstate;
163 unsigned char retcode[6];
164 unsigned int reserved[4]; /* Reserved area for chaining */
165 unsigned int window[4]; /* Window of 4 registers for initial context */
166};
167
168extern void release_all_cp (struct task_struct *);
169
170
171// FIXME restore_cpextra
172static inline int
173restore_cpextra (struct _cpstate *buf)
174{
175#if 0
176 /* The signal handler may have used coprocessors in which
177 * case they are still enabled. We disable them to force a
178 * reloading of the original task's CP state by the lazy
179 * context-switching mechanisms of CP exception handling.
180 * Also, we essentially discard any coprocessor state that the
181 * signal handler created. */
182
183 struct task_struct *tsk = current;
184 release_all_cp(tsk);
185 return __copy_from_user(tsk->thread.cpextra, buf, TOTAL_CPEXTRA_SIZE);
186#endif
187 return 0;
188}
189
190/* Note: We don't copy double exception 'tregs', we have to finish double exc. first before we return to signal handler! This dbl.exc.handler might cause another double exception, but I think we are fine as the situation is the same as if we had returned to the signal handerl and got an interrupt immediately...
191 */
192
193
194static int
195restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
196{
197 struct thread_struct *thread;
198 unsigned int err = 0;
199 unsigned long ps;
200 struct _cpstate *buf;
201
202#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
203 COPY(pc);
204 COPY(depc);
205 COPY(wmask);
206 COPY(lbeg);
207 COPY(lend);
208 COPY(lcount);
209 COPY(sar);
210 COPY(windowbase);
211 COPY(windowstart);
212#undef COPY
213
214 /* For PS, restore only PS.CALLINC.
215 * Assume that all other bits are either the same as for the signal
216 * handler, or the user mode value doesn't matter (e.g. PS.OWB).
217 */
218 err |= __get_user(ps, &sc->sc_ps);
219 regs->ps = (regs->ps & ~XCHAL_PS_CALLINC_MASK)
220 | (ps & XCHAL_PS_CALLINC_MASK);
221
222 /* Additional corruption checks */
223
224 if ((regs->windowbase >= (XCHAL_NUM_AREGS/4))
225 || ((regs->windowstart & ~((1<<(XCHAL_NUM_AREGS/4)) - 1)) != 0) )
226 err = 1;
227 if ((regs->lcount > 0)
228 && ((regs->lbeg > TASK_SIZE) || (regs->lend > TASK_SIZE)) )
229 err = 1;
230
231 /* Restore extended register state.
232 * See struct thread_struct in processor.h.
233 */
234 thread = &current->thread;
235
236 err |= __copy_from_user (regs->areg, sc->sc_areg, XCHAL_NUM_AREGS*4);
237 err |= __get_user(buf, &sc->sc_cpstate);
238 if (buf) {
239 if (verify_area(VERIFY_READ, buf, sizeof(*buf)))
240 goto badframe;
241 err |= restore_cpextra(buf);
242 }
243
244 regs->syscall = -1; /* disable syscall checks */
245 return err;
246
247badframe:
248 return 1;
249}
250
251static inline void
252flush_my_cpstate(struct task_struct *tsk)
253{
254 unsigned long flags;
255 local_irq_save(flags);
256
257#if 0 // FIXME
258 for (i = 0; i < XCHAL_CP_NUM; i++) {
259 if (tsk == coproc_owners[i]) {
260 xthal_validate_cp(i);
261 xthal_save_cpregs(tsk->thread.cpregs_ptr[i], i);
262
263 /* Invalidate and "disown" the cp to allow
264 * callers the chance to reset cp state in the
265 * task_struct. */
266
267 xthal_invalidate_cp(i);
268 coproc_owners[i] = 0;
269 }
270 }
271#endif
272 local_irq_restore(flags);
273}
274
275/* Return codes:
276 0: nothing saved
277 1: stuff to save, successful
278 -1: stuff to save, error happened
279*/
280static int
281save_cpextra (struct _cpstate *buf)
282{
283#if (XCHAL_EXTRA_SA_SIZE == 0) && (XCHAL_CP_NUM == 0)
284 return 0;
285#else
286
287 /* FIXME: If a task has never used a coprocessor, there is
288 * no need to save and restore anything. Tracking this
289 * information would allow us to optimize this section.
290 * Perhaps we can use current->used_math or (current->flags &
291 * PF_USEDFPU) or define a new field in the thread
292 * structure. */
293
294 /* We flush any live, task-owned cp state to the task_struct,
295 * then copy it all to the sigframe. Then we clear all
296 * cp/extra state in the task_struct, effectively
297 * clearing/resetting all cp/extra state for the signal
298 * handler (cp-exception handling will load these new values
299 * into the cp/extra registers.) This step is important for
300 * things like a floating-point cp, where the OS must reset
301 * the FCR to the default rounding mode. */
302
303 int err = 0;
304 struct task_struct *tsk = current;
305
306 flush_my_cpstate(tsk);
307 /* Note that we just copy everything: 'extra' and 'cp' state together.*/
308 err |= __copy_to_user(buf, tsk->thread.cp_save, XTENSA_CP_EXTRA_SIZE);
309 memset(tsk->thread.cp_save, 0, XTENSA_CP_EXTRA_SIZE);
310
311#if (XTENSA_CP_EXTRA_SIZE == 0)
312#error Sanity check on memset above, cpextra_size should not be zero.
313#endif
314
315 return err ? -1 : 1;
316#endif
317}
318
319static int
320setup_sigcontext(struct sigcontext *sc, struct _cpstate *cpstate,
321 struct pt_regs *regs, unsigned long mask)
322{
323 struct thread_struct *thread;
324 int err = 0;
325
326//printk("setup_sigcontext\n");
327#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
328 COPY(pc);
329 COPY(ps);
330 COPY(depc);
331 COPY(wmask);
332 COPY(lbeg);
333 COPY(lend);
334 COPY(lcount);
335 COPY(sar);
336 COPY(windowbase);
337 COPY(windowstart);
338#undef COPY
339
340 /* Save extended register state.
341 * See struct thread_struct in processor.h.
342 */
343 thread = &current->thread;
344 err |= __copy_to_user (sc->sc_areg, regs->areg, XCHAL_NUM_AREGS * 4);
345 err |= save_cpextra(cpstate);
346 err |= __put_user(err ? NULL : cpstate, &sc->sc_cpstate);
347 /* non-iBCS2 extensions.. */
348 err |= __put_user(mask, &sc->oldmask);
349
350 return err;
351}
352
353asmlinkage int sys_sigreturn(struct pt_regs *regs)
354{
355 struct sigframe *frame = (struct sigframe *)regs->areg[1];
356 sigset_t set;
357 if (regs->depc > 64)
358 panic ("Double exception sys_sigreturn\n");
359
360 if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
361 goto badframe;
362
363 if (__get_user(set.sig[0], &frame->sc.oldmask)
364 || (_NSIG_WORDS > 1
365 && __copy_from_user(&set.sig[1], &frame->extramask,
366 sizeof(frame->extramask))))
367 goto badframe;
368
369 sigdelsetmask(&set, ~_BLOCKABLE);
370
371 spin_lock_irq(&current->sighand->siglock);
372 current->blocked = set;
373 recalc_sigpending();
374 spin_unlock_irq(&current->sighand->siglock);
375
376 if (restore_sigcontext(regs, &frame->sc))
377 goto badframe;
378 return regs->areg[2];
379
380badframe:
381 force_sig(SIGSEGV, current);
382 return 0;
383}
384
385asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
386{
387 struct rt_sigframe *frame = (struct rt_sigframe *)regs->areg[1];
388 sigset_t set;
389 stack_t st;
390 int ret;
391 if (regs->depc > 64)
392 {
393 printk("!!!!!!! DEPC !!!!!!!\n");
394 return 0;
395 }
396
397 if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
398 goto badframe;
399
400 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
401 goto badframe;
402
403 sigdelsetmask(&set, ~_BLOCKABLE);
404 spin_lock_irq(&current->sighand->siglock);
405 current->blocked = set;
406 recalc_sigpending();
407 spin_unlock_irq(&current->sighand->siglock);
408
409 if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
410 goto badframe;
411 ret = regs->areg[2];
412
413 if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
414 goto badframe;
415 /* It is more difficult to avoid calling this function than to
416 call it and ignore errors. */
417 do_sigaltstack(&st, NULL, regs->areg[1]);
418
419 return ret;
420
421badframe:
422 force_sig(SIGSEGV, current);
423 return 0;
424}
425
426/*
427 * Set up a signal frame.
428 */
429
430/*
431 * Determine which stack to use..
432 */
433static inline void *
434get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
435{
436 if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp))
437 sp = current->sas_ss_sp + current->sas_ss_size;
438
439 return (void *)((sp - frame_size) & -16ul);
440}
441
442#define USE_SIGRETURN 0
443#define USE_RT_SIGRETURN 1
444
445static int
446gen_return_code(unsigned char *codemem, unsigned int use_rt_sigreturn)
447{
448 unsigned int retcall;
449 int err = 0;
450
451#if 0
452 /* Ignoring SA_RESTORER for now; it's supposed to be obsolete,
453 * and the xtensa glibc doesn't use it.
454 */
455 if (ka->sa.sa_flags & SA_RESTORER) {
456 regs->pr = (unsigned long) ka->sa.sa_restorer;
457 } else
458#endif /* 0 */
459 {
460
461#if (__NR_sigreturn > 255) || (__NR_rt_sigreturn > 255)
462
463/* The 12-bit immediate is really split up within the 24-bit MOVI
464 * instruction. As long as the above system call numbers fit within
465 * 8-bits, the following code works fine. See the Xtensa ISA for
466 * details.
467 */
468
469#error Generating the MOVI instruction below breaks!
470#endif
471
472 retcall = use_rt_sigreturn ? __NR_rt_sigreturn : __NR_sigreturn;
473
474#ifdef __XTENSA_EB__ /* Big Endian version */
475 /* Generate instruction: MOVI a2, retcall */
476 err |= __put_user(0x22, &codemem[0]);
477 err |= __put_user(0x0a, &codemem[1]);
478 err |= __put_user(retcall, &codemem[2]);
479 /* Generate instruction: SYSCALL */
480 err |= __put_user(0x00, &codemem[3]);
481 err |= __put_user(0x05, &codemem[4]);
482 err |= __put_user(0x00, &codemem[5]);
483
484#elif defined __XTENSA_EL__ /* Little Endian version */
485 /* Generate instruction: MOVI a2, retcall */
486 err |= __put_user(0x22, &codemem[0]);
487 err |= __put_user(0xa0, &codemem[1]);
488 err |= __put_user(retcall, &codemem[2]);
489 /* Generate instruction: SYSCALL */
490 err |= __put_user(0x00, &codemem[3]);
491 err |= __put_user(0x50, &codemem[4]);
492 err |= __put_user(0x00, &codemem[5]);
493#else
494#error Must use compiler for Xtensa processors.
495#endif
496 }
497
498 /* Flush generated code out of the data cache */
499
500 if (err == 0)
501 __flush_invalidate_cache_range((unsigned long)codemem, 6UL);
502
503 return err;
504}
505
506static void
507set_thread_state(struct pt_regs *regs, void *stack, unsigned char *retaddr,
508 void *handler, unsigned long arg1, void *arg2, void *arg3)
509{
510 /* Set up registers for signal handler */
511 start_thread(regs, (unsigned long) handler, (unsigned long) stack);
512
513 /* Set up a stack frame for a call4
514 * Note: PS.CALLINC is set to one by start_thread
515 */
516 regs->areg[4] = (((unsigned long) retaddr) & 0x3fffffff) | 0x40000000;
517 regs->areg[6] = arg1;
518 regs->areg[7] = (unsigned long) arg2;
519 regs->areg[8] = (unsigned long) arg3;
520}
521
522static void setup_frame(int sig, struct k_sigaction *ka,
523 sigset_t *set, struct pt_regs *regs)
524{
525 struct sigframe *frame;
526 int err = 0;
527 int signal;
528
529 frame = get_sigframe(ka, regs->areg[1], sizeof(*frame));
530 if (regs->depc > 64)
531 {
532 printk("!!!!!!! DEPC !!!!!!!\n");
533 return;
534 }
535
536
537 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
538 goto give_sigsegv;
539
540 signal = current_thread_info()->exec_domain
541 && current_thread_info()->exec_domain->signal_invmap
542 && sig < 32
543 ? current_thread_info()->exec_domain->signal_invmap[sig]
544 : sig;
545
546 err |= setup_sigcontext(&frame->sc, &frame->cpstate, regs, set->sig[0]);
547
548 if (_NSIG_WORDS > 1) {
549 err |= __copy_to_user(frame->extramask, &set->sig[1],
550 sizeof(frame->extramask));
551 }
552
553 /* Create sys_sigreturn syscall in stack frame */
554 err |= gen_return_code(frame->retcode, USE_SIGRETURN);
555
556 if (err)
557 goto give_sigsegv;
558
559 /* Create signal handler execution context.
560 * Return context not modified until this point.
561 */
562 set_thread_state(regs, frame, frame->retcode,
563 ka->sa.sa_handler, signal, &frame->sc, NULL);
564
565 /* Set access mode to USER_DS. Nomenclature is outdated, but
566 * functionality is used in uaccess.h
567 */
568 set_fs(USER_DS);
569
570
571#if DEBUG_SIG
572 printk("SIG deliver (%s:%d): signal=%d sp=%p pc=%08x\n",
573 current->comm, current->pid, signal, frame, regs->pc);
574#endif
575
576 return;
577
578give_sigsegv:
579 if (sig == SIGSEGV)
580 ka->sa.sa_handler = SIG_DFL;
581 force_sig(SIGSEGV, current);
582}
583
584static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
585 sigset_t *set, struct pt_regs *regs)
586{
587 struct rt_sigframe *frame;
588 int err = 0;
589 int signal;
590
591 frame = get_sigframe(ka, regs->areg[1], sizeof(*frame));
592 if (regs->depc > 64)
593 panic ("Double exception sys_sigreturn\n");
594
595 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
596 goto give_sigsegv;
597
598 signal = current_thread_info()->exec_domain
599 && current_thread_info()->exec_domain->signal_invmap
600 && sig < 32
601 ? current_thread_info()->exec_domain->signal_invmap[sig]
602 : sig;
603
604 err |= copy_siginfo_to_user(&frame->info, info);
605
606 /* Create the ucontext. */
607 err |= __put_user(0, &frame->uc.uc_flags);
608 err |= __put_user(0, &frame->uc.uc_link);
609 err |= __put_user((void *)current->sas_ss_sp,
610 &frame->uc.uc_stack.ss_sp);
611 err |= __put_user(sas_ss_flags(regs->areg[1]),
612 &frame->uc.uc_stack.ss_flags);
613 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
614 err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->cpstate,
615 regs, set->sig[0]);
616 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
617
618 /* Create sys_rt_sigreturn syscall in stack frame */
619 err |= gen_return_code(frame->retcode, USE_RT_SIGRETURN);
620
621 if (err)
622 goto give_sigsegv;
623
624 /* Create signal handler execution context.
625 * Return context not modified until this point.
626 */
627 set_thread_state(regs, frame, frame->retcode,
628 ka->sa.sa_handler, signal, &frame->info, &frame->uc);
629
630 /* Set access mode to USER_DS. Nomenclature is outdated, but
631 * functionality is used in uaccess.h
632 */
633 set_fs(USER_DS);
634
635#if DEBUG_SIG
636 printk("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08x\n",
637 current->comm, current->pid, signal, frame, regs->pc);
638#endif
639
640 return;
641
642give_sigsegv:
643 if (sig == SIGSEGV)
644 ka->sa.sa_handler = SIG_DFL;
645 force_sig(SIGSEGV, current);
646}
647
648
649
650/*
651 * Note that 'init' is a special process: it doesn't get signals it doesn't
652 * want to handle. Thus you cannot kill init even with a SIGKILL even by
653 * mistake.
654 *
655 * Note that we go through the signals twice: once to check the signals that
656 * the kernel can handle, and then we build all the user-level signal handling
657 * stack-frames in one go after that.
658 */
659int do_signal(struct pt_regs *regs, sigset_t *oldset)
660{
661 siginfo_t info;
662 int signr;
663 struct k_sigaction ka;
664
665 if (!oldset)
666 oldset = &current->blocked;
667
668 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
669
670 /* Are we from a system call? */
671 if (regs->syscall >= 0) {
672 /* If so, check system call restarting.. */
673 switch (regs->areg[2]) {
674 case ERESTARTNOHAND:
675 case ERESTART_RESTARTBLOCK:
676 regs->areg[2] = -EINTR;
677 break;
678
679 case ERESTARTSYS:
680 if (!(ka.sa.sa_flags & SA_RESTART)) {
681 regs->areg[2] = -EINTR;
682 break;
683 }
684 /* fallthrough */
685 case ERESTARTNOINTR:
686 regs->areg[2] = regs->syscall;
687 regs->pc -= 3;
688 }
689 }
690
691 if (signr == 0)
692 return 0; /* no signals delivered */
693
694 /* Whee! Actually deliver the signal. */
695
696 /* Set up the stack frame */
697 if (ka.sa.sa_flags & SA_SIGINFO)
698 setup_rt_frame(signr, &ka, &info, oldset, regs);
699 else
700 setup_frame(signr, &ka, oldset, regs);
701
702 if (ka.sa.sa_flags & SA_ONESHOT)
703 ka.sa.sa_handler = SIG_DFL;
704
705 if (!(ka.sa.sa_flags & SA_NODEFER)) {
706 spin_lock_irq(&current->sighand->siglock);
707 sigorsets(&current->blocked, &current->blocked, &ka.sa.sa_mask);
708 sigaddset(&current->blocked, signr);
709 recalc_sigpending();
710 spin_unlock_irq(&current->sighand->siglock);
711 }
712 return 1;
713}
diff --git a/arch/xtensa/kernel/syscalls.c b/arch/xtensa/kernel/syscalls.c
new file mode 100644
index 000000000000..abc8ed6c7026
--- /dev/null
+++ b/arch/xtensa/kernel/syscalls.c
@@ -0,0 +1,418 @@
1/*
2 * arch/xtensa/kernel/syscall.c
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 * Copyright (C) 2000 Silicon Graphics, Inc.
10 * Copyright (C) 1995 - 2000 by Ralf Baechle
11 *
12 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
13 * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
14 * Chris Zankel <chris@zankel.net>
15 * Kevin Chea
16 *
17 */
18
19#define DEBUG 0
20
21#include <linux/config.h>
22#include <linux/linkage.h>
23#include <linux/mm.h>
24#include <linux/smp.h>
25#include <linux/smp_lock.h>
26#include <linux/mman.h>
27#include <linux/sched.h>
28#include <linux/file.h>
29#include <linux/slab.h>
30#include <linux/utsname.h>
31#include <linux/unistd.h>
32#include <linux/stringify.h>
33#include <linux/syscalls.h>
34#include <linux/sem.h>
35#include <linux/msg.h>
36#include <linux/shm.h>
37#include <linux/errno.h>
38#include <asm/ptrace.h>
39#include <asm/signal.h>
40#include <asm/uaccess.h>
41#include <asm/hardirq.h>
42#include <asm/mman.h>
43#include <asm/shmparam.h>
44#include <asm/page.h>
45#include <asm/ipc.h>
46
47extern void do_syscall_trace(void);
48typedef int (*syscall_t)(void *a0,...);
49extern int (*do_syscalls)(struct pt_regs *regs, syscall_t fun,
50 int narg);
51extern syscall_t sys_call_table[];
52extern unsigned char sys_narg_table[];
53
54/*
55 * sys_pipe() is the normal C calling standard for creating a pipe. It's not
56 * the way unix traditional does this, though.
57 */
58
59int sys_pipe(int __user *userfds)
60{
61 int fd[2];
62 int error;
63
64 error = do_pipe(fd);
65 if (!error) {
66 if (copy_to_user(userfds, fd, 2 * sizeof(int)))
67 error = -EFAULT;
68 }
69 return error;
70}
71
72/*
73 * Common code for old and new mmaps.
74 */
75
76static inline long do_mmap2(unsigned long addr, unsigned long len,
77 unsigned long prot, unsigned long flags,
78 unsigned long fd, unsigned long pgoff)
79{
80 int error = -EBADF;
81 struct file * file = NULL;
82
83 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
84 if (!(flags & MAP_ANONYMOUS)) {
85 file = fget(fd);
86 if (!file)
87 goto out;
88 }
89
90 down_write(&current->mm->mmap_sem);
91 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
92 up_write(&current->mm->mmap_sem);
93
94 if (file)
95 fput(file);
96out:
97 return error;
98}
99
100unsigned long old_mmap(unsigned long addr, size_t len, int prot,
101 int flags, int fd, off_t offset)
102{
103 return do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
104}
105
106long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
107 unsigned long flags, unsigned long fd, unsigned long pgoff)
108{
109 return do_mmap2(addr, len, prot, flags, fd, pgoff);
110}
111
112int sys_fork(struct pt_regs *regs)
113{
114 return do_fork(SIGCHLD, regs->areg[1], regs, 0, NULL, NULL);
115}
116
117int sys_vfork(struct pt_regs *regs)
118{
119 return do_fork(CLONE_VFORK|CLONE_VM|SIGCHLD, regs->areg[1],
120 regs, 0, NULL, NULL);
121}
122
123int sys_clone(struct pt_regs *regs)
124{
125 unsigned long clone_flags;
126 unsigned long newsp;
127 int __user *parent_tidptr, *child_tidptr;
128 clone_flags = regs->areg[4];
129 newsp = regs->areg[3];
130 parent_tidptr = (int __user *)regs->areg[5];
131 child_tidptr = (int __user *)regs->areg[6];
132 if (!newsp)
133 newsp = regs->areg[1];
134 return do_fork(clone_flags,newsp,regs,0,parent_tidptr,child_tidptr);
135}
136
137/*
138 * sys_execve() executes a new program.
139 */
140
141int sys_execve(struct pt_regs *regs)
142{
143 int error;
144 char * filename;
145
146 filename = getname((char *) (long)regs->areg[5]);
147 error = PTR_ERR(filename);
148 if (IS_ERR(filename))
149 goto out;
150 error = do_execve(filename, (char **) (long)regs->areg[3],
151 (char **) (long)regs->areg[4], regs);
152 putname(filename);
153
154out:
155 return error;
156}
157
158int sys_uname(struct old_utsname * name)
159{
160 if (name && !copy_to_user(name, &system_utsname, sizeof (*name)))
161 return 0;
162 return -EFAULT;
163}
164
165int sys_olduname(struct oldold_utsname * name)
166{
167 int error;
168
169 if (!name)
170 return -EFAULT;
171 if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
172 return -EFAULT;
173
174 error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN);
175 error -= __put_user(0,name->sysname+__OLD_UTS_LEN);
176 error -= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN);
177 error -= __put_user(0,name->nodename+__OLD_UTS_LEN);
178 error -= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN);
179 error -= __put_user(0,name->release+__OLD_UTS_LEN);
180 error -= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN);
181 error -= __put_user(0,name->version+__OLD_UTS_LEN);
182 error -= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
183 error -= __put_user(0,name->machine+__OLD_UTS_LEN);
184
185 return error ? -EFAULT : 0;
186}
187
188
189/*
190 * Build the string table for the builtin "poor man's strace".
191 */
192
193#if DEBUG
194#define SYSCALL(fun, narg) #fun,
195static char *sfnames[] = {
196#include "syscalls.h"
197};
198#undef SYS
199#endif
200
201void system_call (struct pt_regs *regs)
202{
203 syscall_t syscall;
204 unsigned long parm0, parm1, parm2, parm3, parm4, parm5;
205 int nargs, res;
206 unsigned int syscallnr;
207 int ps;
208
209#if DEBUG
210 int i;
211 unsigned long parms[6];
212 char *sysname;
213#endif
214
215 regs->syscall = regs->areg[2];
216
217 do_syscall_trace();
218
219 /* Have to load after syscall_trace because strace
220 * sometimes changes regs->syscall.
221 */
222 syscallnr = regs->syscall;
223
224 parm0 = parm1 = parm2 = parm3 = parm4 = parm5 = 0;
225
226 /* Restore interrupt level to syscall invoker's.
227 * If this were in assembly, we wouldn't disable
228 * interrupts in the first place:
229 */
230 local_save_flags (ps);
231 local_irq_restore((ps & ~XCHAL_PS_INTLEVEL_MASK) |
232 (regs->ps & XCHAL_PS_INTLEVEL_MASK) );
233
234 if (syscallnr > __NR_Linux_syscalls) {
235 regs->areg[2] = -ENOSYS;
236 return;
237 }
238
239 syscall = sys_call_table[syscallnr];
240 nargs = sys_narg_table[syscallnr];
241
242 if (syscall == NULL) {
243 regs->areg[2] = -ENOSYS;
244 return;
245 }
246
247 /* There shouldn't be more than six arguments in the table! */
248
249 if (nargs > 6)
250 panic("Internal error - too many syscall arguments (%d)!\n",
251 nargs);
252
253 /* Linux takes system-call arguments in registers. The ABI
254 * and Xtensa software conventions require the system-call
255 * number in a2. If an argument exists in a2, we move it to
256 * the next available register. Note that for improved
257 * efficiency, we do NOT shift all parameters down one
258 * register to maintain the original order.
259 *
260 * At best case (zero arguments), we just write the syscall
261 * number to a2. At worst case (1 to 6 arguments), we move
262 * the argument in a2 to the next available register, then
263 * write the syscall number to a2.
264 *
265 * For clarity, the following truth table enumerates all
266 * possibilities.
267 *
268 * arguments syscall number arg0, arg1, arg2, arg3, arg4, arg5
269 * --------- -------------- ----------------------------------
270 * 0 a2
271 * 1 a2 a3
272 * 2 a2 a4, a3
273 * 3 a2 a5, a3, a4
274 * 4 a2 a6, a3, a4, a5
275 * 5 a2 a7, a3, a4, a5, a6
276 * 6 a2 a8, a3, a4, a5, a6, a7
277 */
278 if (nargs) {
279 parm0 = regs->areg[nargs+2];
280 parm1 = regs->areg[3];
281 parm2 = regs->areg[4];
282 parm3 = regs->areg[5];
283 parm4 = regs->areg[6];
284 parm5 = regs->areg[7];
285 } else /* nargs == 0 */
286 parm0 = (unsigned long) regs;
287
288#if DEBUG
289 parms[0] = parm0;
290 parms[1] = parm1;
291 parms[2] = parm2;
292 parms[3] = parm3;
293 parms[4] = parm4;
294 parms[5] = parm5;
295
296 sysname = sfnames[syscallnr];
297 if (strncmp(sysname, "sys_", 4) == 0)
298 sysname = sysname + 4;
299
300 printk("\017SYSCALL:I:%x:%d:%s %s(", regs->pc, current->pid,
301 current->comm, sysname);
302 for (i = 0; i < nargs; i++)
303 printk((i>0) ? ", %#lx" : "%#lx", parms[i]);
304 printk(")\n");
305#endif
306
307 res = syscall((void *)parm0, parm1, parm2, parm3, parm4, parm5);
308
309#if DEBUG
310 printk("\017SYSCALL:O:%d:%s %s(",current->pid, current->comm, sysname);
311 for (i = 0; i < nargs; i++)
312 printk((i>0) ? ", %#lx" : "%#lx", parms[i]);
313 if (res < 4096)
314 printk(") = %d\n", res);
315 else
316 printk(") = %#x\n", res);
317#endif /* DEBUG */
318
319 regs->areg[2] = res;
320 do_syscall_trace();
321}
322
323/*
324 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
325 *
326 * This is really horribly ugly.
327 */
328
329int sys_ipc (uint call, int first, int second,
330 int third, void __user *ptr, long fifth)
331{
332 int version, ret;
333
334 version = call >> 16; /* hack for backward compatibility */
335 call &= 0xffff;
336 ret = -ENOSYS;
337
338 switch (call) {
339 case SEMOP:
340 ret = sys_semtimedop (first, (struct sembuf __user *)ptr,
341 second, NULL);
342 break;
343
344 case SEMTIMEDOP:
345 ret = sys_semtimedop (first, (struct sembuf __user *)ptr,
346 second, (const struct timespec *) fifth);
347 break;
348
349 case SEMGET:
350 ret = sys_semget (first, second, third);
351 break;
352
353 case SEMCTL: {
354 union semun fourth;
355
356 if (ptr && !get_user(fourth.__pad, (void *__user *) ptr))
357 ret = sys_semctl (first, second, third, fourth);
358 break;
359 }
360
361 case MSGSND:
362 ret = sys_msgsnd (first, (struct msgbuf __user*) ptr,
363 second, third);
364 break;
365
366 case MSGRCV:
367 switch (version) {
368 case 0: {
369 struct ipc_kludge tmp;
370
371 if (ptr && !copy_from_user(&tmp,
372 (struct ipc_kludge *) ptr,
373 sizeof (tmp)))
374 ret = sys_msgrcv (first, tmp.msgp, second,
375 tmp.msgtyp, third);
376 break;
377 }
378
379 default:
380 ret = sys_msgrcv (first, (struct msgbuf __user *) ptr,
381 second, 0, third);
382 break;
383 }
384 break;
385
386 case MSGGET:
387 ret = sys_msgget ((key_t) first, second);
388 break;
389
390 case MSGCTL:
391 ret = sys_msgctl (first, second, (struct msqid_ds __user*) ptr);
392 break;
393
394 case SHMAT: {
395 ulong raddr;
396 ret = do_shmat (first, (char __user *) ptr, second, &raddr);
397
398 if (!ret)
399 ret = put_user (raddr, (ulong __user *) third);
400
401 break;
402 }
403
404 case SHMDT:
405 ret = sys_shmdt ((char __user *)ptr);
406 break;
407
408 case SHMGET:
409 ret = sys_shmget (first, second, third);
410 break;
411
412 case SHMCTL:
413 ret = sys_shmctl (first, second, (struct shmid_ds __user*) ptr);
414 break;
415 }
416 return ret;
417}
418
diff --git a/arch/xtensa/kernel/syscalls.h b/arch/xtensa/kernel/syscalls.h
new file mode 100644
index 000000000000..5b3f75f50feb
--- /dev/null
+++ b/arch/xtensa/kernel/syscalls.h
@@ -0,0 +1,248 @@
1/*
2 * arch/xtensa/kernel/syscalls.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle
9 * Copyright (C) 2001 - 2005 Tensilica Inc.
10 *
11 * Changes by Joe Taylor <joe@tensilica.com>
12 */
13
14/*
15 * This file is being included twice - once to build a list of all
16 * syscalls and once to build a table of how many arguments each syscall
17 * accepts. Syscalls that receive a pointer to the saved registers are
18 * marked as having zero arguments.
19 *
20 * The binary compatibility calls are in a separate list.
21 *
22 * Entry '0' used to be system_call. It's removed to disable indirect
23 * system calls for now so user tasks can't recurse. See mips'
24 * sys_syscall for a comparable example.
25 */
26
27SYSCALL(0, 0) /* 00 */
28
29SYSCALL(sys_exit, 1)
30SYSCALL(sys_fork, 0)
31SYSCALL(sys_read, 3)
32SYSCALL(sys_write, 3)
33SYSCALL(sys_open, 3) /* 05 */
34SYSCALL(sys_close, 1)
35SYSCALL(sys_waitpid, 3)
36SYSCALL(sys_creat, 2)
37SYSCALL(sys_link, 2)
38SYSCALL(sys_unlink, 1) /* 10 */
39SYSCALL(sys_execve, 0)
40SYSCALL(sys_chdir, 1)
41SYSCALL(sys_time, 1)
42SYSCALL(sys_mknod, 3)
43SYSCALL(sys_chmod, 2) /* 15 */
44SYSCALL(sys_lchown, 3)
45SYSCALL(sys_ni_syscall, 0)
46SYSCALL(sys_stat, 2)
47SYSCALL(sys_lseek, 3)
48SYSCALL(sys_getpid, 0) /* 20 */
49SYSCALL(sys_mount, 5)
50SYSCALL(sys_oldumount, 1)
51SYSCALL(sys_setuid, 1)
52SYSCALL(sys_getuid, 0)
53SYSCALL(sys_stime, 1) /* 25 */
54SYSCALL(sys_ptrace, 4)
55SYSCALL(sys_alarm, 1)
56SYSCALL(sys_fstat, 2)
57SYSCALL(sys_pause, 0)
58SYSCALL(sys_utime, 2) /* 30 */
59SYSCALL(sys_ni_syscall, 0)
60SYSCALL(sys_ni_syscall, 0)
61SYSCALL(sys_access, 2)
62SYSCALL(sys_nice, 1)
63SYSCALL(sys_ni_syscall, 0) /* 35 */
64SYSCALL(sys_sync, 0)
65SYSCALL(sys_kill, 2)
66SYSCALL(sys_rename, 2)
67SYSCALL(sys_mkdir, 2)
68SYSCALL(sys_rmdir, 1) /* 40 */
69SYSCALL(sys_dup, 1)
70SYSCALL(sys_pipe, 1)
71SYSCALL(sys_times, 1)
72SYSCALL(sys_ni_syscall, 0)
73SYSCALL(sys_brk, 1) /* 45 */
74SYSCALL(sys_setgid, 1)
75SYSCALL(sys_getgid, 0)
76SYSCALL(sys_ni_syscall, 0) /* was signal(2) */
77SYSCALL(sys_geteuid, 0)
78SYSCALL(sys_getegid, 0) /* 50 */
79SYSCALL(sys_acct, 1)
80SYSCALL(sys_umount, 2)
81SYSCALL(sys_ni_syscall, 0)
82SYSCALL(sys_ioctl, 3)
83SYSCALL(sys_fcntl, 3) /* 55 */
84SYSCALL(sys_ni_syscall, 2)
85SYSCALL(sys_setpgid, 2)
86SYSCALL(sys_ni_syscall, 0)
87SYSCALL(sys_olduname, 1)
88SYSCALL(sys_umask, 1) /* 60 */
89SYSCALL(sys_chroot, 1)
90SYSCALL(sys_ustat, 2)
91SYSCALL(sys_dup2, 2)
92SYSCALL(sys_getppid, 0)
93SYSCALL(sys_getpgrp, 0) /* 65 */
94SYSCALL(sys_setsid, 0)
95SYSCALL(sys_sigaction, 3)
96SYSCALL(sys_sgetmask, 0)
97SYSCALL(sys_ssetmask, 1)
98SYSCALL(sys_setreuid, 2) /* 70 */
99SYSCALL(sys_setregid, 2)
100SYSCALL(sys_sigsuspend, 0)
101SYSCALL(sys_sigpending, 1)
102SYSCALL(sys_sethostname, 2)
103SYSCALL(sys_setrlimit, 2) /* 75 */
104SYSCALL(sys_getrlimit, 2)
105SYSCALL(sys_getrusage, 2)
106SYSCALL(sys_gettimeofday, 2)
107SYSCALL(sys_settimeofday, 2)
108SYSCALL(sys_getgroups, 2) /* 80 */
109SYSCALL(sys_setgroups, 2)
110SYSCALL(sys_ni_syscall, 0) /* old_select */
111SYSCALL(sys_symlink, 2)
112SYSCALL(sys_lstat, 2)
113SYSCALL(sys_readlink, 3) /* 85 */
114SYSCALL(sys_uselib, 1)
115SYSCALL(sys_swapon, 2)
116SYSCALL(sys_reboot, 3)
117SYSCALL(old_readdir, 3)
118SYSCALL(old_mmap, 6) /* 90 */
119SYSCALL(sys_munmap, 2)
120SYSCALL(sys_truncate, 2)
121SYSCALL(sys_ftruncate, 2)
122SYSCALL(sys_fchmod, 2)
123SYSCALL(sys_fchown, 3) /* 95 */
124SYSCALL(sys_getpriority, 2)
125SYSCALL(sys_setpriority, 3)
126SYSCALL(sys_ni_syscall, 0)
127SYSCALL(sys_statfs, 2)
128SYSCALL(sys_fstatfs, 2) /* 100 */
129SYSCALL(sys_ni_syscall, 3)
130SYSCALL(sys_socketcall, 2)
131SYSCALL(sys_syslog, 3)
132SYSCALL(sys_setitimer, 3)
133SYSCALL(sys_getitimer, 2) /* 105 */
134SYSCALL(sys_newstat, 2)
135SYSCALL(sys_newlstat, 2)
136SYSCALL(sys_newfstat, 2)
137SYSCALL(sys_uname, 1)
138SYSCALL(sys_ni_syscall, 0) /* 110 */
139SYSCALL(sys_vhangup, 0)
140SYSCALL(sys_ni_syscall, 0) /* was sys_idle() */
141SYSCALL(sys_ni_syscall, 0)
142SYSCALL(sys_wait4, 4)
143SYSCALL(sys_swapoff, 1) /* 115 */
144SYSCALL(sys_sysinfo, 1)
145SYSCALL(sys_ipc, 5) /* 6 really, but glibc uses only 5) */
146SYSCALL(sys_fsync, 1)
147SYSCALL(sys_sigreturn, 0)
148SYSCALL(sys_clone, 0) /* 120 */
149SYSCALL(sys_setdomainname, 2)
150SYSCALL(sys_newuname, 1)
151SYSCALL(sys_ni_syscall, 0) /* sys_modify_ldt */
152SYSCALL(sys_adjtimex, 1)
153SYSCALL(sys_mprotect, 3) /* 125 */
154SYSCALL(sys_sigprocmask, 3)
155SYSCALL(sys_ni_syscall, 2) /* old sys_create_module */
156SYSCALL(sys_init_module, 2)
157SYSCALL(sys_delete_module, 1)
158SYSCALL(sys_ni_syscall, 1) /* old sys_get_kernel_sysm */ /* 130 */
159SYSCALL(sys_quotactl, 0)
160SYSCALL(sys_getpgid, 1)
161SYSCALL(sys_fchdir, 1)
162SYSCALL(sys_bdflush, 2)
163SYSCALL(sys_sysfs, 3) /* 135 */
164SYSCALL(sys_personality, 1)
165SYSCALL(sys_ni_syscall, 0) /* for afs_syscall */
166SYSCALL(sys_setfsuid, 1)
167SYSCALL(sys_setfsgid, 1)
168SYSCALL(sys_llseek, 5) /* 140 */
169SYSCALL(sys_getdents, 3)
170SYSCALL(sys_select, 5)
171SYSCALL(sys_flock, 2)
172SYSCALL(sys_msync, 3)
173SYSCALL(sys_readv, 3) /* 145 */
174SYSCALL(sys_writev, 3)
175SYSCALL(sys_ni_syscall, 3)
176SYSCALL(sys_ni_syscall, 3)
177SYSCALL(sys_ni_syscall, 4) /* handled in fast syscall handler. */
178SYSCALL(sys_ni_syscall, 0) /* 150 */
179SYSCALL(sys_getsid, 1)
180SYSCALL(sys_fdatasync, 1)
181SYSCALL(sys_sysctl, 1)
182SYSCALL(sys_mlock, 2)
183SYSCALL(sys_munlock, 2) /* 155 */
184SYSCALL(sys_mlockall, 1)
185SYSCALL(sys_munlockall, 0)
186SYSCALL(sys_sched_setparam,2)
187SYSCALL(sys_sched_getparam,2)
188SYSCALL(sys_sched_setscheduler,3) /* 160 */
189SYSCALL(sys_sched_getscheduler,1)
190SYSCALL(sys_sched_yield,0)
191SYSCALL(sys_sched_get_priority_max,1)
192SYSCALL(sys_sched_get_priority_min,1)
193SYSCALL(sys_sched_rr_get_interval,2) /* 165 */
194SYSCALL(sys_nanosleep,2)
195SYSCALL(sys_mremap,4)
196SYSCALL(sys_accept, 3)
197SYSCALL(sys_bind, 3)
198SYSCALL(sys_connect, 3) /* 170 */
199SYSCALL(sys_getpeername, 3)
200SYSCALL(sys_getsockname, 3)
201SYSCALL(sys_getsockopt, 5)
202SYSCALL(sys_listen, 2)
203SYSCALL(sys_recv, 4) /* 175 */
204SYSCALL(sys_recvfrom, 6)
205SYSCALL(sys_recvmsg, 3)
206SYSCALL(sys_send, 4)
207SYSCALL(sys_sendmsg, 3)
208SYSCALL(sys_sendto, 6) /* 180 */
209SYSCALL(sys_setsockopt, 5)
210SYSCALL(sys_shutdown, 2)
211SYSCALL(sys_socket, 3)
212SYSCALL(sys_socketpair, 4)
213SYSCALL(sys_setresuid, 3) /* 185 */
214SYSCALL(sys_getresuid, 3)
215SYSCALL(sys_ni_syscall, 5) /* old sys_query_module */
216SYSCALL(sys_poll, 3)
217SYSCALL(sys_nfsservctl, 3)
218SYSCALL(sys_setresgid, 3) /* 190 */
219SYSCALL(sys_getresgid, 3)
220SYSCALL(sys_prctl, 5)
221SYSCALL(sys_rt_sigreturn, 0)
222SYSCALL(sys_rt_sigaction, 4)
223SYSCALL(sys_rt_sigprocmask, 4) /* 195 */
224SYSCALL(sys_rt_sigpending, 2)
225SYSCALL(sys_rt_sigtimedwait, 4)
226SYSCALL(sys_rt_sigqueueinfo, 3)
227SYSCALL(sys_rt_sigsuspend, 0)
228SYSCALL(sys_pread64, 5) /* 200 */
229SYSCALL(sys_pwrite64, 5)
230SYSCALL(sys_chown, 3)
231SYSCALL(sys_getcwd, 2)
232SYSCALL(sys_capget, 2)
233SYSCALL(sys_capset, 2) /* 205 */
234SYSCALL(sys_sigaltstack, 0)
235SYSCALL(sys_sendfile, 4)
236SYSCALL(sys_ni_syscall, 0)
237SYSCALL(sys_ni_syscall, 0)
238SYSCALL(sys_mmap2, 6) /* 210 */
239SYSCALL(sys_truncate64, 2)
240SYSCALL(sys_ftruncate64, 2)
241SYSCALL(sys_stat64, 2)
242SYSCALL(sys_lstat64, 2)
243SYSCALL(sys_fstat64, 2) /* 215 */
244SYSCALL(sys_pivot_root, 2)
245SYSCALL(sys_mincore, 3)
246SYSCALL(sys_madvise, 3)
247SYSCALL(sys_getdents64, 3)
248SYSCALL(sys_vfork, 0) /* 220 */
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
new file mode 100644
index 000000000000..e07287db5a40
--- /dev/null
+++ b/arch/xtensa/kernel/time.c
@@ -0,0 +1,227 @@
1/*
2 * arch/xtensa/kernel/time.c
3 *
4 * Timer and clock support.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2005 Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 */
14
15#include <linux/config.h>
16#include <linux/errno.h>
17#include <linux/time.h>
18#include <linux/timex.h>
19#include <linux/interrupt.h>
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/irq.h>
23#include <linux/profile.h>
24#include <linux/delay.h>
25
26#include <asm/timex.h>
27#include <asm/platform.h>
28
29
30extern volatile unsigned long wall_jiffies;
31
32u64 jiffies_64 = INITIAL_JIFFIES;
33EXPORT_SYMBOL(jiffies_64);
34
35spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
36EXPORT_SYMBOL(rtc_lock);
37
38
39#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
40unsigned long ccount_per_jiffy; /* per 1/HZ */
41unsigned long ccount_nsec; /* nsec per ccount increment */
42#endif
43
44unsigned int last_ccount_stamp;
45static long last_rtc_update = 0;
46
47/*
48 * Scheduler clock - returns current tim in nanosec units.
49 */
50
51unsigned long long sched_clock(void)
52{
53 return (unsigned long long)jiffies * (1000000000 / HZ);
54}
55
56static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
57static struct irqaction timer_irqaction = {
58 .handler = timer_interrupt,
59 .flags = SA_INTERRUPT,
60 .name = "timer",
61};
62
63void __init time_init(void)
64{
65 time_t sec_o, sec_n = 0;
66
67 /* The platform must provide a function to calibrate the processor
68 * speed for the CALIBRATE.
69 */
70
71#if CONFIG_XTENSA_CALIBRATE_CCOUNT
72 printk("Calibrating CPU frequency ");
73 platform_calibrate_ccount();
74 printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ),
75 (int)(ccount_per_jiffy/(10000/HZ))%100);
76#endif
77
78 /* Set time from RTC (if provided) */
79
80 if (platform_get_rtc_time(&sec_o) == 0)
81 while (platform_get_rtc_time(&sec_n))
82 if (sec_o != sec_n)
83 break;
84
85 xtime.tv_nsec = 0;
86 last_rtc_update = xtime.tv_sec = sec_n;
87 last_ccount_stamp = get_ccount();
88
89 set_normalized_timespec(&wall_to_monotonic,
90 -xtime.tv_sec, -xtime.tv_nsec);
91
92 /* Initialize the linux timer interrupt. */
93
94 setup_irq(LINUX_TIMER_INT, &timer_irqaction);
95 set_linux_timer(get_ccount() + CCOUNT_PER_JIFFY);
96}
97
98
99int do_settimeofday(struct timespec *tv)
100{
101 time_t wtm_sec, sec = tv->tv_sec;
102 long wtm_nsec, nsec = tv->tv_nsec;
103 unsigned long ccount;
104
105 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
106 return -EINVAL;
107
108 write_seqlock_irq(&xtime_lock);
109
110 /* This is revolting. We need to set "xtime" correctly. However, the
111 * value in this location is the value at the most recent update of
112 * wall time. Discover what correction gettimeofday() would have
113 * made, and then undo it!
114 */
115 ccount = get_ccount();
116 nsec -= (ccount - last_ccount_stamp) * CCOUNT_NSEC;
117 nsec -= (jiffies - wall_jiffies) * CCOUNT_PER_JIFFY * CCOUNT_NSEC;
118
119 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
120 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
121
122 set_normalized_timespec(&xtime, sec, nsec);
123 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
124
125 time_adjust = 0; /* stop active adjtime() */
126 time_status |= STA_UNSYNC;
127 time_maxerror = NTP_PHASE_LIMIT;
128 time_esterror = NTP_PHASE_LIMIT;
129 write_sequnlock_irq(&xtime_lock);
130 return 0;
131}
132
133EXPORT_SYMBOL(do_settimeofday);
134
135
136void do_gettimeofday(struct timeval *tv)
137{
138 unsigned long flags;
139 unsigned long sec, usec, delta, lost, seq;
140
141 do {
142 seq = read_seqbegin_irqsave(&xtime_lock, flags);
143
144 delta = get_ccount() - last_ccount_stamp;
145 sec = xtime.tv_sec;
146 usec = (xtime.tv_nsec / NSEC_PER_USEC);
147
148 lost = jiffies - wall_jiffies;
149
150 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
151
152 usec += lost * (1000000UL/HZ) + (delta * CCOUNT_NSEC) / NSEC_PER_USEC;
153 for (; usec >= 1000000; sec++, usec -= 1000000)
154 ;
155
156 tv->tv_sec = sec;
157 tv->tv_usec = usec;
158}
159
160EXPORT_SYMBOL(do_gettimeofday);
161
162/*
163 * The timer interrupt is called HZ times per second.
164 */
165
166irqreturn_t timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
167{
168
169 unsigned long next;
170
171 next = get_linux_timer();
172
173again:
174 while ((signed long)(get_ccount() - next) > 0) {
175
176 profile_tick(CPU_PROFILING, regs);
177#ifndef CONFIG_SMP
178 update_process_times(user_mode(regs));
179#endif
180
181 write_seqlock(&xtime_lock);
182
183 last_ccount_stamp = next;
184 next += CCOUNT_PER_JIFFY;
185 do_timer (regs); /* Linux handler in kernel/timer.c */
186
187 if ((time_status & STA_UNSYNC) == 0 &&
188 xtime.tv_sec - last_rtc_update >= 659 &&
189 abs((xtime.tv_nsec/1000)-(1000000-1000000/HZ))<5000000/HZ &&
190 jiffies - wall_jiffies == 1) {
191
192 if (platform_set_rtc_time(xtime.tv_sec+1) == 0)
193 last_rtc_update = xtime.tv_sec+1;
194 else
195 /* Do it again in 60 s */
196 last_rtc_update += 60;
197 }
198 write_sequnlock(&xtime_lock);
199 }
200
201 /* NOTE: writing CCOMPAREn clears the interrupt. */
202
203 set_linux_timer (next);
204
205 /* Make sure we didn't miss any tick... */
206
207 if ((signed long)(get_ccount() - next) > 0)
208 goto again;
209
210 /* Allow platform to do something usefull (Wdog). */
211
212 platform_heartbeat();
213
214 return IRQ_HANDLED;
215}
216
217#ifndef CONFIG_GENERIC_CALIBRATE_DELAY
218void __devinit calibrate_delay(void)
219{
220 loops_per_jiffy = CCOUNT_PER_JIFFY;
221 printk("Calibrating delay loop (skipped)... "
222 "%lu.%02lu BogoMIPS preset\n",
223 loops_per_jiffy/(1000000/HZ),
224 (loops_per_jiffy/(10000/HZ)) % 100);
225}
226#endif
227
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
new file mode 100644
index 000000000000..804246e743b1
--- /dev/null
+++ b/arch/xtensa/kernel/traps.c
@@ -0,0 +1,498 @@
1/*
2 * arch/xtensa/kernel/traps.c
3 *
4 * Exception handling.
5 *
6 * Derived from code with the following copyrights:
7 * Copyright (C) 1994 - 1999 by Ralf Baechle
8 * Modified for R3000 by Paul M. Antoine, 1995, 1996
9 * Complete output from die() by Ulf Carlsson, 1998
10 * Copyright (C) 1999 Silicon Graphics, Inc.
11 *
12 * Essentially rewritten for the Xtensa architecture port.
13 *
14 * Copyright (C) 2001 - 2005 Tensilica Inc.
15 *
16 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
17 * Chris Zankel <chris@zankel.net>
18 * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
19 * Kevin Chea
20 *
21 * This file is subject to the terms and conditions of the GNU General Public
22 * License. See the file "COPYING" in the main directory of this archive
23 * for more details.
24 */
25
26#include <linux/kernel.h>
27#include <linux/sched.h>
28#include <linux/init.h>
29#include <linux/module.h>
30#include <linux/stringify.h>
31#include <linux/kallsyms.h>
32
33#include <asm/ptrace.h>
34#include <asm/timex.h>
35#include <asm/uaccess.h>
36#include <asm/pgtable.h>
37#include <asm/processor.h>
38
39#ifdef CONFIG_KGDB
40extern int gdb_enter;
41extern int return_from_debug_flag;
42#endif
43
44/*
45 * Machine specific interrupt handlers
46 */
47
48extern void kernel_exception(void);
49extern void user_exception(void);
50
51extern void fast_syscall_kernel(void);
52extern void fast_syscall_user(void);
53extern void fast_alloca(void);
54extern void fast_unaligned(void);
55extern void fast_second_level_miss(void);
56extern void fast_store_prohibited(void);
57extern void fast_coprocessor(void);
58
59extern void do_illegal_instruction (struct pt_regs*);
60extern void do_interrupt (struct pt_regs*);
61extern void do_unaligned_user (struct pt_regs*);
62extern void do_multihit (struct pt_regs*, unsigned long);
63extern void do_page_fault (struct pt_regs*, unsigned long);
64extern void do_debug (struct pt_regs*);
65extern void system_call (struct pt_regs*);
66
67/*
68 * The vector table must be preceded by a save area (which
69 * implies it must be in RAM, unless one places RAM immediately
70 * before a ROM and puts the vector at the start of the ROM (!))
71 */
72
73#define KRNL 0x01
74#define USER 0x02
75
76#define COPROCESSOR(x) \
77{ XCHAL_EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor }
78
79typedef struct {
80 int cause;
81 int fast;
82 void* handler;
83} dispatch_init_table_t;
84
85dispatch_init_table_t __init dispatch_init_table[] = {
86
87{ XCHAL_EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction},
88{ XCHAL_EXCCAUSE_SYSTEM_CALL, KRNL, fast_syscall_kernel },
89{ XCHAL_EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user },
90{ XCHAL_EXCCAUSE_SYSTEM_CALL, 0, system_call },
91/* XCHAL_EXCCAUSE_INSTRUCTION_FETCH unhandled */
92/* XCHAL_EXCCAUSE_LOAD_STORE_ERROR unhandled*/
93{ XCHAL_EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt },
94{ XCHAL_EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca },
95/* XCHAL_EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */
96/* XCHAL_EXCCAUSE_PRIVILEGED unhandled */
97#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
98#ifdef CONFIG_UNALIGNED_USER
99{ XCHAL_EXCCAUSE_UNALIGNED, USER, fast_unaligned },
100#else
101{ XCHAL_EXCCAUSE_UNALIGNED, 0, do_unaligned_user },
102#endif
103{ XCHAL_EXCCAUSE_UNALIGNED, KRNL, fast_unaligned },
104#endif
105{ XCHAL_EXCCAUSE_ITLB_MISS, 0, do_page_fault },
106{ XCHAL_EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
107{ XCHAL_EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit },
108{ XCHAL_EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault },
109/* XCHAL_EXCCAUSE_SIZE_RESTRICTION unhandled */
110{ XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault },
111{ XCHAL_EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss},
112{ XCHAL_EXCCAUSE_DTLB_MISS, 0, do_page_fault },
113{ XCHAL_EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit },
114{ XCHAL_EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault },
115/* XCHAL_EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */
116{ XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
117{ XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
118{ XCHAL_EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
119/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
120#if (XCHAL_CP_MASK & 1)
121COPROCESSOR(0),
122#endif
123#if (XCHAL_CP_MASK & 2)
124COPROCESSOR(1),
125#endif
126#if (XCHAL_CP_MASK & 4)
127COPROCESSOR(2),
128#endif
129#if (XCHAL_CP_MASK & 8)
130COPROCESSOR(3),
131#endif
132#if (XCHAL_CP_MASK & 16)
133COPROCESSOR(4),
134#endif
135#if (XCHAL_CP_MASK & 32)
136COPROCESSOR(5),
137#endif
138#if (XCHAL_CP_MASK & 64)
139COPROCESSOR(6),
140#endif
141#if (XCHAL_CP_MASK & 128)
142COPROCESSOR(7),
143#endif
144{ EXCCAUSE_MAPPED_DEBUG, 0, do_debug },
145{ -1, -1, 0 }
146
147};
148
149/* The exception table <exc_table> serves two functions:
150 * 1. it contains three dispatch tables (fast_user, fast_kernel, default-c)
151 * 2. it is a temporary memory buffer for the exception handlers.
152 */
153
154unsigned long exc_table[EXC_TABLE_SIZE/4];
155
156void die(const char*, struct pt_regs*, long);
157
158static inline void
159__die_if_kernel(const char *str, struct pt_regs *regs, long err)
160{
161 if (!user_mode(regs))
162 die(str, regs, err);
163}
164
165/*
166 * Unhandled Exceptions. Kill user task or panic if in kernel space.
167 */
168
169void do_unhandled(struct pt_regs *regs, unsigned long exccause)
170{
171 __die_if_kernel("Caught unhandled exception - should not happen",
172 regs, SIGKILL);
173
174 /* If in user mode, send SIGILL signal to current process */
175 printk("Caught unhandled exception in '%s' "
176 "(pid = %d, pc = %#010lx) - should not happen\n"
177 "\tEXCCAUSE is %ld\n",
178 current->comm, current->pid, regs->pc, exccause);
179 force_sig(SIGILL, current);
180}
181
182/*
183 * Multi-hit exception. This if fatal!
184 */
185
186void do_multihit(struct pt_regs *regs, unsigned long exccause)
187{
188 die("Caught multihit exception", regs, SIGKILL);
189}
190
191/*
192 * Level-1 interrupt.
193 * We currently have no priority encoding.
194 */
195
196unsigned long ignored_level1_interrupts;
197extern void do_IRQ(int, struct pt_regs *);
198
199void do_interrupt (struct pt_regs *regs)
200{
201 unsigned long intread = get_sr (INTREAD);
202 unsigned long intenable = get_sr (INTENABLE);
203 int i, mask;
204
205 /* Handle all interrupts (no priorities).
206 * (Clear the interrupt before processing, in case it's
207 * edge-triggered or software-generated)
208 */
209
210 for (i=0, mask = 1; i < XCHAL_NUM_INTERRUPTS; i++, mask <<= 1) {
211 if (mask & (intread & intenable)) {
212 set_sr (mask, INTCLEAR);
213 do_IRQ (i,regs);
214 }
215 }
216}
217
218/*
219 * Illegal instruction. Fatal if in kernel space.
220 */
221
222void
223do_illegal_instruction(struct pt_regs *regs)
224{
225 __die_if_kernel("Illegal instruction in kernel", regs, SIGKILL);
226
227 /* If in user mode, send SIGILL signal to current process. */
228
229 printk("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n",
230 current->comm, current->pid, regs->pc);
231 force_sig(SIGILL, current);
232}
233
234
235/*
236 * Handle unaligned memory accesses from user space. Kill task.
237 *
238 * If CONFIG_UNALIGNED_USER is not set, we don't allow unaligned memory
239 * accesses causes from user space.
240 */
241
242#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
243#ifndef CONFIG_UNALIGNED_USER
244void
245do_unaligned_user (struct pt_regs *regs)
246{
247 siginfo_t info;
248
249 __die_if_kernel("Unhandled unaligned exception in kernel",
250 regs, SIGKILL);
251
252 current->thread.bad_vaddr = regs->excvaddr;
253 current->thread.error_code = -3;
254 printk("Unaligned memory access to %08lx in '%s' "
255 "(pid = %d, pc = %#010lx)\n",
256 regs->excvaddr, current->comm, current->pid, regs->pc);
257 info.si_signo = SIGBUS;
258 info.si_errno = 0;
259 info.si_code = BUS_ADRALN;
260 info.si_addr = (void *) regs->excvaddr;
261 force_sig_info(SIGSEGV, &info, current);
262
263}
264#endif
265#endif
266
267void
268do_debug(struct pt_regs *regs)
269{
270#ifdef CONFIG_KGDB
271 /* If remote debugging is configured AND enabled, we give control to
272 * kgdb. Otherwise, we fall through, perhaps giving control to the
273 * native debugger.
274 */
275
276 if (gdb_enter) {
277 extern void gdb_handle_exception(struct pt_regs *);
278 gdb_handle_exception(regs);
279 return_from_debug_flag = 1;
280 return;
281 }
282#endif
283
284 __die_if_kernel("Breakpoint in kernel", regs, SIGKILL);
285
286 /* If in user mode, send SIGTRAP signal to current process */
287
288 force_sig(SIGTRAP, current);
289}
290
291
292/*
293 * Initialize dispatch tables.
294 *
295 * The exception vectors are stored compressed the __init section in the
296 * dispatch_init_table. This function initializes the following three tables
297 * from that compressed table:
298 * - fast user first dispatch table for user exceptions
299 * - fast kernel first dispatch table for kernel exceptions
300 * - default C-handler C-handler called by the default fast handler.
301 *
302 * See vectors.S for more details.
303 */
304
305#define set_handler(idx,handler) (exc_table[idx] = (unsigned long) (handler))
306
307void trap_init(void)
308{
309 int i;
310
311 /* Setup default vectors. */
312
313 for(i = 0; i < 64; i++) {
314 set_handler(EXC_TABLE_FAST_USER/4 + i, user_exception);
315 set_handler(EXC_TABLE_FAST_KERNEL/4 + i, kernel_exception);
316 set_handler(EXC_TABLE_DEFAULT/4 + i, do_unhandled);
317 }
318
319 /* Setup specific handlers. */
320
321 for(i = 0; dispatch_init_table[i].cause >= 0; i++) {
322
323 int fast = dispatch_init_table[i].fast;
324 int cause = dispatch_init_table[i].cause;
325 void *handler = dispatch_init_table[i].handler;
326
327 if (fast == 0)
328 set_handler (EXC_TABLE_DEFAULT/4 + cause, handler);
329 if (fast && fast & USER)
330 set_handler (EXC_TABLE_FAST_USER/4 + cause, handler);
331 if (fast && fast & KRNL)
332 set_handler (EXC_TABLE_FAST_KERNEL/4 + cause, handler);
333 }
334
335 /* Initialize EXCSAVE_1 to hold the address of the exception table. */
336
337 i = (unsigned long)exc_table;
338 __asm__ __volatile__("wsr %0, "__stringify(EXCSAVE_1)"\n" : : "a" (i));
339}
340
341/*
342 * This function dumps the current valid window frame and other base registers.
343 */
344
345void show_regs(struct pt_regs * regs)
346{
347 int i, wmask;
348
349 wmask = regs->wmask & ~1;
350
351 for (i = 0; i < 32; i++) {
352 if (wmask & (1 << (i / 4)))
353 break;
354 if ((i % 8) == 0)
355 printk ("\n" KERN_INFO "a%02d: ", i);
356 printk("%08lx ", regs->areg[i]);
357 }
358 printk("\n");
359
360 printk("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
361 regs->pc, regs->ps, regs->depc, regs->excvaddr);
362 printk("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
363 regs->lbeg, regs->lend, regs->lcount, regs->sar);
364 if (user_mode(regs))
365 printk("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
366 regs->windowbase, regs->windowstart, regs->wmask,
367 regs->syscall);
368}
369
370void show_trace(struct task_struct *task, unsigned long *sp)
371{
372 unsigned long a0, a1, pc;
373 unsigned long sp_start, sp_end;
374
375 a1 = (unsigned long)sp;
376
377 if (a1 == 0)
378 __asm__ __volatile__ ("mov %0, a1\n" : "=a"(a1));
379
380
381 sp_start = a1 & ~(THREAD_SIZE-1);
382 sp_end = sp_start + THREAD_SIZE;
383
384 printk("Call Trace:");
385#ifdef CONFIG_KALLSYMS
386 printk("\n");
387#endif
388 spill_registers();
389
390 while (a1 > sp_start && a1 < sp_end) {
391 sp = (unsigned long*)a1;
392
393 a0 = *(sp - 4);
394 a1 = *(sp - 3);
395
396 if (a1 <= (unsigned long) sp)
397 break;
398
399 pc = MAKE_PC_FROM_RA(a0, a1);
400
401 if (kernel_text_address(pc)) {
402 printk(" [<%08lx>] ", pc);
403 print_symbol("%s\n", pc);
404 }
405 }
406 printk("\n");
407}
408
409/*
410 * This routine abuses get_user()/put_user() to reference pointers
411 * with at least a bit of error checking ...
412 */
413
414static int kstack_depth_to_print = 24;
415
416void show_stack(struct task_struct *task, unsigned long *sp)
417{
418 int i = 0;
419 unsigned long *stack;
420
421 if (sp == 0)
422 __asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp));
423
424 stack = sp;
425
426 printk("\nStack: ");
427
428 for (i = 0; i < kstack_depth_to_print; i++) {
429 if (kstack_end(sp))
430 break;
431 if (i && ((i % 8) == 0))
432 printk("\n ");
433 printk("%08lx ", *sp++);
434 }
435 printk("\n");
436 show_trace(task, stack);
437}
438
439void dump_stack(void)
440{
441 show_stack(current, NULL);
442}
443
444EXPORT_SYMBOL(dump_stack);
445
446
447void show_code(unsigned int *pc)
448{
449 long i;
450
451 printk("\nCode:");
452
453 for(i = -3 ; i < 6 ; i++) {
454 unsigned long insn;
455 if (__get_user(insn, pc + i)) {
456 printk(" (Bad address in pc)\n");
457 break;
458 }
459 printk("%c%08lx%c",(i?' ':'<'),insn,(i?' ':'>'));
460 }
461}
462
463spinlock_t die_lock = SPIN_LOCK_UNLOCKED;
464
465void die(const char * str, struct pt_regs * regs, long err)
466{
467 static int die_counter;
468 int nl = 0;
469
470 console_verbose();
471 spin_lock_irq(&die_lock);
472
473 printk("%s: sig: %ld [#%d]\n", str, err, ++die_counter);
474#ifdef CONFIG_PREEMPT
475 printk("PREEMPT ");
476 nl = 1;
477#endif
478 if (nl)
479 printk("\n");
480 show_regs(regs);
481 if (!user_mode(regs))
482 show_stack(NULL, (unsigned long*)regs->areg[1]);
483
484 spin_unlock_irq(&die_lock);
485
486 if (in_interrupt())
487 panic("Fatal exception in interrupt");
488
489 if (panic_on_oops) {
490 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
491 set_current_state(TASK_UNINTERRUPTIBLE);
492 schedule_timeout(5 * HZ);
493 panic("Fatal exception");
494 }
495 do_exit(err);
496}
497
498
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
new file mode 100644
index 000000000000..81808f0c6742
--- /dev/null
+++ b/arch/xtensa/kernel/vectors.S
@@ -0,0 +1,464 @@
1/*
2 * arch/xtensa/kernel/vectors.S
3 *
4 * This file contains all exception vectors (user, kernel, and double),
5 * as well as the window vectors (overflow and underflow), and the debug
6 * vector. These are the primary vectors executed by the processor if an
7 * exception occurs.
8 *
9 * This file is subject to the terms and conditions of the GNU General
10 * Public License. See the file "COPYING" in the main directory of
11 * this archive for more details.
12 *
13 * Copyright (C) 2005 Tensilica, Inc.
14 *
15 * Chris Zankel <chris@zankel.net>
16 *
17 */
18
19/*
20 * We use a two-level table approach. The user and kernel exception vectors
21 * use a first-level dispatch table to dispatch the exception to a registered
22 * fast handler or the default handler, if no fast handler was registered.
23 * The default handler sets up a C-stack and dispatches the exception to a
24 * registerd C handler in the second-level dispatch table.
25 *
26 * Fast handler entry condition:
27 *
28 * a0: trashed, original value saved on stack (PT_AREG0)
29 * a1: a1
30 * a2: new stack pointer, original value in depc
31 * a3: dispatch table
32 * depc: a2, original value saved on stack (PT_DEPC)
33 * excsave_1: a3
34 *
35 * The value for PT_DEPC saved to stack also functions as a boolean to
36 * indicate that the exception is either a double or a regular exception:
37 *
38 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception
39 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
40 *
41 * Note: Neither the kernel nor the user exception handler generate literals.
42 *
43 */
44
45#include <linux/linkage.h>
46#include <asm/ptrace.h>
47#include <asm/ptrace.h>
48#include <asm/current.h>
49#include <asm/offsets.h>
50#include <asm/pgtable.h>
51#include <asm/processor.h>
52#include <asm/page.h>
53#include <asm/thread_info.h>
54#include <asm/processor.h>
55
56
57/*
58 * User exception vector. (Exceptions with PS.UM == 1, PS.EXCM == 0)
59 *
60 * We get here when an exception occurred while we were in userland.
61 * We switch to the kernel stack and jump to the first level handler
62 * associated to the exception cause.
63 *
64 * Note: the saved kernel stack pointer (EXC_TABLE_KSTK) is already
65 * decremented by PT_USER_SIZE.
66 */
67
68 .section .UserExceptionVector.text, "ax"
69
70ENTRY(_UserExceptionVector)
71
72 xsr a3, EXCSAVE_1 # save a3 and get dispatch table
73 wsr a2, DEPC # save a2
74 l32i a2, a3, EXC_TABLE_KSTK # load kernel stack to a2
75 s32i a0, a2, PT_AREG0 # save a0 to ESF
76 rsr a0, EXCCAUSE # retrieve exception cause
77 s32i a0, a2, PT_DEPC # mark it as a regular exception
78 addx4 a0, a0, a3 # find entry in table
79 l32i a0, a0, EXC_TABLE_FAST_USER # load handler
80 jx a0
81
82/*
83 * Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0)
84 *
85 * We get this exception when we were already in kernel space.
86 * We decrement the current stack pointer (kernel) by PT_SIZE and
87 * jump to the first-level handler associated with the exception cause.
88 *
89 * Note: we need to preserve space for the spill region.
90 */
91
92 .section .KernelExceptionVector.text, "ax"
93
94ENTRY(_KernelExceptionVector)
95
96 xsr a3, EXCSAVE_1 # save a3, and get dispatch table
97 wsr a2, DEPC # save a2
98 addi a2, a1, -16-PT_SIZE # adjust stack pointer
99 s32i a0, a2, PT_AREG0 # save a0 to ESF
100 rsr a0, EXCCAUSE # retrieve exception cause
101 s32i a0, a2, PT_DEPC # mark it as a regular exception
102 addx4 a0, a0, a3 # find entry in table
103 l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address
104 jx a0
105
106
107/*
108 * Double exception vector (Exceptions with PS.EXCM == 1)
109 * We get this exception when another exception occurs while were are
110 * already in an exception, such as window overflow/underflow exception,
111 * or 'expected' exceptions, for example memory exception when we were trying
112 * to read data from an invalid address in user space.
113 *
114 * Note that this vector is never invoked for level-1 interrupts, because such
115 * interrupts are disabled (masked) when PS.EXCM is set.
116 *
117 * We decode the exception and take the appropriate action. However, the
118 * double exception vector is much more careful, because a lot more error
119 * cases go through the double exception vector than through the user and
120 * kernel exception vectors.
121 *
122 * Occasionally, the kernel expects a double exception to occur. This usually
123 * happens when accessing user-space memory with the user's permissions
124 * (l32e/s32e instructions). The kernel state, though, is not always suitable
125 * for immediate transfer of control to handle_double, where "normal" exception
126 * processing occurs. Also in kernel mode, TLB misses can occur if accessing
127 * vmalloc memory, possibly requiring repair in a double exception handler.
128 *
129 * The variable at TABLE_FIXUP offset from the pointer in EXCSAVE_1 doubles as
130 * a boolean variable and a pointer to a fixup routine. If the variable
131 * EXC_TABLE_FIXUP is non-zero, this handler jumps to that address. A value of
132 * zero indicates to use the default kernel/user exception handler.
133 * There is only one exception, when the value is identical to the exc_table
134 * label, the kernel is in trouble. This mechanism is used to protect critical
135 * sections, mainly when the handler writes to the stack to assert the stack
136 * pointer is valid. Once the fixup/default handler leaves that area, the
137 * EXC_TABLE_FIXUP variable is reset to the fixup handler or zero.
138 *
139 * Procedures wishing to use this mechanism should set EXC_TABLE_FIXUP to the
140 * nonzero address of a fixup routine before it could cause a double exception
141 * and reset it before it returns.
142 *
143 * Some other things to take care of when a fast exception handler doesn't
144 * specify a particular fixup handler but wants to use the default handlers:
145 *
146 * - The original stack pointer (in a1) must not be modified. The fast
147 * exception handler should only use a2 as the stack pointer.
148 *
149 * - If the fast handler manipulates the stack pointer (in a2), it has to
150 * register a valid fixup handler and cannot use the default handlers.
151 *
152 * - The handler can use any other generic register from a3 to a15, but it
153 * must save the content of these registers to stack (PT_AREG3...PT_AREGx)
154 *
155 * - These registers must be saved before a double exception can occur.
156 *
157 * - If we ever implement handling signals while in double exceptions, the
158 * number of registers a fast handler has saved (excluding a0 and a1) must
159 * be written to PT_AREG1. (1 if only a3 is used, 2 for a3 and a4, etc. )
160 *
161 * The fixup handlers are special handlers:
162 *
163 * - Fixup entry conditions differ from regular exceptions:
164 *
165 * a0: DEPC
166 * a1: a1
167 * a2: trashed, original value in EXC_TABLE_DOUBLE_A2
168 * a3: exctable
169 * depc: a0
170 * excsave_1: a3
171 *
172 * - When the kernel enters the fixup handler, it still assumes it is in a
173 * critical section, so EXC_TABLE_FIXUP variable is set to exc_table.
174 * The fixup handler, therefore, has to re-register itself as the fixup
175 * handler before it returns from the double exception.
176 *
177 * - Fixup handler can share the same exception frame with the fast handler.
178 * The kernel stack pointer is not changed when entering the fixup handler.
179 *
180 * - Fixup handlers can jump to the default kernel and user exception
181 * handlers. Before it jumps, though, it has to setup a exception frame
182 * on stack. Because the default handler resets the register fixup handler
183 * the fixup handler must make sure that the default handler returns to
184 * it instead of the exception address, so it can re-register itself as
185 * the fixup handler.
186 *
187 * In case of a critical condition where the kernel cannot recover, we jump
188 * to unrecoverable_exception with the following entry conditions.
189 * All registers a0...a15 are unchanged from the last exception, except:
190 *
191 * a0: last address before we jumped to the unrecoverable_exception.
192 * excsave_1: a0
193 *
194 *
195 * See the handle_alloca_user and spill_registers routines for example clients.
196 *
197 * FIXME: Note: we currently don't allow signal handling coming from a double
198 * exception, so the item markt with (*) is not required.
199 */
200
201 .section .DoubleExceptionVector.text, "ax"
202 .begin literal_prefix .DoubleExceptionVector
203
204ENTRY(_DoubleExceptionVector)
205
206 /* Deliberately destroy excsave (don't assume it's value was valid). */
207
208 wsr a3, EXCSAVE_1 # save a3
209
210 /* Check for kernel double exception (usually fatal). */
211
212 rsr a3, PS
213 _bbci.l a3, PS_UM_SHIFT, .Lksp
214
215 /* Check if we are currently handling a window exception. */
216 /* Note: We don't need to indicate that we enter a critical section. */
217
218 xsr a0, DEPC # get DEPC, save a0
219
220 movi a3, XCHAL_WINDOW_VECTORS_VADDR
221 _bltu a0, a3, .Lfixup
222 addi a3, a3, XSHAL_WINDOW_VECTORS_SIZE
223 _bgeu a0, a3, .Lfixup
224
225 /* Window overflow/underflow exception. Get stack pointer. */
226
227 mov a3, a2
228 movi a2, exc_table
229 l32i a2, a2, EXC_TABLE_KSTK
230
231 /* Check for overflow/underflow exception, jump if overflow. */
232
233 _bbci.l a0, 6, .Lovfl
234
235 /* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3 */
236
237 /* Restart window underflow exception.
238 * We return to the instruction in user space that caused the window
239 * underflow exception. Therefore, we change window base to the value
240 * before we entered the window underflow exception and prepare the
241 * registers to return as if we were coming from a regular exception
242 * by changing depc (in a0).
243 * Note: We can trash the current window frame (a0...a3) and depc!
244 */
245
246 wsr a2, DEPC # save stack pointer temporarily
247 rsr a0, PS
248 extui a0, a0, XCHAL_PS_OWB_SHIFT, XCHAL_PS_OWB_BITS
249 wsr a0, WINDOWBASE
250 rsync
251
252 /* We are now in the previous window frame. Save registers again. */
253
254 xsr a2, DEPC # save a2 and get stack pointer
255 s32i a0, a2, PT_AREG0
256
257 wsr a3, EXCSAVE_1 # save a3
258 movi a3, exc_table
259
260 rsr a0, EXCCAUSE
261 s32i a0, a2, PT_DEPC # mark it as a regular exception
262 addx4 a0, a0, a3
263 l32i a0, a0, EXC_TABLE_FAST_USER
264 jx a0
265
266.Lfixup:/* Check for a fixup handler or if we were in a critical section. */
267
268 /* a0: depc, a1: a1, a2: a2, a3: trashed, depc: a0, excsave1: a3 */
269
270 movi a3, exc_table
271 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE # temporary variable
272
273 /* Enter critical section. */
274
275 l32i a2, a3, EXC_TABLE_FIXUP
276 s32i a3, a3, EXC_TABLE_FIXUP
277 beq a2, a3, .Lunrecoverable_fixup # critical!
278 beqz a2, .Ldflt # no handler was registered
279
280 /* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave: a3 */
281
282 jx a2
283
284.Ldflt: /* Get stack pointer. */
285
286 l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
287 addi a2, a3, -PT_USER_SIZE
288
289.Lovfl: /* Jump to default handlers. */
290
291 /* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3 */
292
293 xsr a3, DEPC
294 s32i a0, a2, PT_DEPC
295 s32i a3, a2, PT_AREG0
296
297 /* a0: avail, a1: a1, a2: kstk, a3: avail, depc: a2, excsave: a3 */
298
299 movi a3, exc_table
300 rsr a0, EXCCAUSE
301 addx4 a0, a0, a3
302 l32i a0, a0, EXC_TABLE_FAST_USER
303 jx a0
304
305 /*
306 * We only allow the ITLB miss exception if we are in kernel space.
307 * All other exceptions are unexpected and thus unrecoverable!
308 */
309
310 .extern fast_second_level_miss_double_kernel
311
312.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */
313
314 rsr a3, EXCCAUSE
315 beqi a3, XCHAL_EXCCAUSE_ITLB_MISS, 1f
316 addi a3, a3, -XCHAL_EXCCAUSE_DTLB_MISS
317 bnez a3, .Lunrecoverable
3181: movi a3, fast_second_level_miss_double_kernel
319 jx a3
320
321 /* Critical! We can't handle this situation. PANIC! */
322
323 .extern unrecoverable_exception
324
325.Lunrecoverable_fixup:
326 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
327 xsr a0, DEPC
328
329.Lunrecoverable:
330 rsr a3, EXCSAVE_1
331 wsr a0, EXCSAVE_1
332 movi a0, unrecoverable_exception
333 callx0 a0
334
335 .end literal_prefix
336
337
338/*
339 * Debug interrupt vector
340 *
341 * There is not much space here, so simply jump to another handler.
342 * EXCSAVE[DEBUGLEVEL] has been set to that handler.
343 */
344
345 .section .DebugInterruptVector.text, "ax"
346
347ENTRY(_DebugInterruptVector)
348 xsr a0, EXCSAVE + XCHAL_DEBUGLEVEL
349 jx a0
350
351
352
353/* Window overflow and underflow handlers.
354 * The handlers must be 64 bytes apart, first starting with the underflow
355 * handlers underflow-4 to underflow-12, then the overflow handlers
356 * overflow-4 to overflow-12.
357 *
358 * Note: We rerun the underflow handlers if we hit an exception, so
359 * we try to access any page that would cause a page fault early.
360 */
361
362 .section .WindowVectors.text, "ax"
363
364
365/* 4-Register Window Overflow Vector (Handler) */
366
367 .align 64
368.global _WindowOverflow4
369_WindowOverflow4:
370 s32e a0, a5, -16
371 s32e a1, a5, -12
372 s32e a2, a5, -8
373 s32e a3, a5, -4
374 rfwo
375
376
377/* 4-Register Window Underflow Vector (Handler) */
378
379 .align 64
380.global _WindowUnderflow4
381_WindowUnderflow4:
382 l32e a0, a5, -16
383 l32e a1, a5, -12
384 l32e a2, a5, -8
385 l32e a3, a5, -4
386 rfwu
387
388
389/* 8-Register Window Overflow Vector (Handler) */
390
391 .align 64
392.global _WindowOverflow8
393_WindowOverflow8:
394 s32e a0, a9, -16
395 l32e a0, a1, -12
396 s32e a2, a9, -8
397 s32e a1, a9, -12
398 s32e a3, a9, -4
399 s32e a4, a0, -32
400 s32e a5, a0, -28
401 s32e a6, a0, -24
402 s32e a7, a0, -20
403 rfwo
404
405/* 8-Register Window Underflow Vector (Handler) */
406
407 .align 64
408.global _WindowUnderflow8
409_WindowUnderflow8:
410 l32e a1, a9, -12
411 l32e a0, a9, -16
412 l32e a7, a1, -12
413 l32e a2, a9, -8
414 l32e a4, a7, -32
415 l32e a3, a9, -4
416 l32e a5, a7, -28
417 l32e a6, a7, -24
418 l32e a7, a7, -20
419 rfwu
420
421
422/* 12-Register Window Overflow Vector (Handler) */
423
424 .align 64
425.global _WindowOverflow12
426_WindowOverflow12:
427 s32e a0, a13, -16
428 l32e a0, a1, -12
429 s32e a1, a13, -12
430 s32e a2, a13, -8
431 s32e a3, a13, -4
432 s32e a4, a0, -48
433 s32e a5, a0, -44
434 s32e a6, a0, -40
435 s32e a7, a0, -36
436 s32e a8, a0, -32
437 s32e a9, a0, -28
438 s32e a10, a0, -24
439 s32e a11, a0, -20
440 rfwo
441
442/* 12-Register Window Underflow Vector (Handler) */
443
444 .align 64
445.global _WindowUnderflow12
446_WindowUnderflow12:
447 l32e a1, a13, -12
448 l32e a0, a13, -16
449 l32e a11, a1, -12
450 l32e a2, a13, -8
451 l32e a4, a11, -48
452 l32e a8, a11, -32
453 l32e a3, a13, -4
454 l32e a5, a11, -44
455 l32e a6, a11, -40
456 l32e a7, a11, -36
457 l32e a9, a11, -28
458 l32e a10, a11, -24
459 l32e a11, a11, -20
460 rfwu
461
462 .text
463
464
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..476b2b53cd01
--- /dev/null
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -0,0 +1,341 @@
1/*
2 * arch/xtensa/kernel/vmlinux.lds.S
3 *
4 * Xtensa linker script
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
14 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
15 */
16
17#include <asm-generic/vmlinux.lds.h>
18
19#include <linux/config.h>
20#define _NOCLANGUAGE
21#include <xtensa/config/core.h>
22#include <xtensa/config/system.h>
23OUTPUT_ARCH(xtensa)
24ENTRY(_start)
25
26#if XCHAL_MEMORY_ORDER == XTHAL_BIGENDIAN
27jiffies = jiffies_64 + 4;
28#else
29jiffies = jiffies_64;
30#endif
31
32#define KERNELOFFSET 0x1000
33
34/* Note: In the following macros, it would be nice to specify only the
35 vector name and section kind and construct "sym" and "section" using
36 CPP concatenation, but that does not work reliably. Concatenating a
37 string with "." produces an invalid token. CPP will not print a
38 warning because it thinks this is an assembly file, but it leaves
39 them as multiple tokens and there may or may not be whitespace
40 between them. */
41
42/* Macro for a relocation entry */
43
44#define RELOCATE_ENTRY(sym, section) \
45 LONG(sym ## _start); \
46 LONG(sym ## _end); \
47 LONG(LOADADDR(section))
48
49/* Macro to define a section for a vector.
50 *
51 * Use of the MIN function catches the types of errors illustrated in
52 * the following example:
53 *
54 * Assume the section .DoubleExceptionVector.literal is completely
55 * full. Then a programmer adds code to .DoubleExceptionVector.text
56 * that produces another literal. The final literal position will
57 * overlay onto the first word of the adjacent code section
58 * .DoubleExceptionVector.text. (In practice, the literals will
59 * overwrite the code, and the first few instructions will be
60 * garbage.)
61 */
62
63#define SECTION_VECTOR(sym, section, addr, max_prevsec_size, prevsec) \
64 section addr : AT((MIN(LOADADDR(prevsec) + max_prevsec_size, \
65 LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3) \
66 { \
67 . = ALIGN(4); \
68 sym ## _start = ABSOLUTE(.); \
69 *(section) \
70 sym ## _end = ABSOLUTE(.); \
71 }
72
73/*
74 * Mapping of input sections to output sections when linking.
75 */
76
77SECTIONS
78{
79 . = XCHAL_KSEG_CACHED_VADDR + KERNELOFFSET;
80 /* .text section */
81
82 _text = .;
83 _stext = .;
84 _ftext = .;
85
86 .text :
87 {
88 /* The .head.text section must be the first section! */
89 *(.head.text)
90 *(.literal .text)
91 *(.srom.text)
92 VMLINUX_SYMBOL(__sched_text_start) = .;
93 *(.sched.text.literal .sched.text)
94 VMLINUX_SYMBOL(__sched_text_end) = .;
95 VMLINUX_SYMBOL(__lock_text_start) = .;
96 *(.spinlock.text.literal .spinlock.text)
97 VMLINUX_SYMBOL(__lock_text_end) = .;
98
99 }
100 _etext = .;
101
102 . = ALIGN(16);
103
104 RODATA
105
106 /* Relocation table */
107
108 . = ALIGN(16);
109 __boot_reloc_table_start = ABSOLUTE(.);
110
111 __relocate : {
112
113 RELOCATE_ENTRY(_WindowVectors_text,
114 .WindowVectors.text);
115#if 0
116 RELOCATE_ENTRY(_KernelExceptionVector_literal,
117 .KernelExceptionVector.literal);
118#endif
119 RELOCATE_ENTRY(_KernelExceptionVector_text,
120 .KernelExceptionVector.text);
121#if 0
122 RELOCATE_ENTRY(_UserExceptionVector_literal,
123 .UserExceptionVector.literal);
124#endif
125 RELOCATE_ENTRY(_UserExceptionVector_text,
126 .UserExceptionVector.text);
127 RELOCATE_ENTRY(_DoubleExceptionVector_literal,
128 .DoubleExceptionVector.literal);
129 RELOCATE_ENTRY(_DoubleExceptionVector_text,
130 .DoubleExceptionVector.text);
131 }
132 __boot_reloc_table_end = ABSOLUTE(.) ;
133
134 .fixup : { *(.fixup) }
135
136 . = ALIGN(16);
137
138 __ex_table : {
139 __start___ex_table = .;
140 *(__ex_table)
141 __stop___ex_table = .;
142 }
143
144 /* Data section */
145
146 . = ALIGN(XCHAL_ICACHE_LINESIZE);
147 _fdata = .;
148 .data :
149 {
150 *(.data) CONSTRUCTORS
151 . = ALIGN(XCHAL_ICACHE_LINESIZE);
152 *(.data.cacheline_aligned)
153 }
154
155 _edata = .;
156
157 /* The initial task */
158 . = ALIGN(8192);
159 .data.init_task : { *(.data.init_task) }
160
161 /* Initialization code and data: */
162
163 . = ALIGN(1<<XCHAL_MMU_MIN_PTE_PAGE_SIZE);
164 __init_begin = .;
165 .init.text : {
166 _sinittext = .;
167 *(.init.text.literal) *(.init.text)
168 _einittext = .;
169 }
170
171 .init.data :
172 {
173 *(.init.data)
174 . = ALIGN(0x4);
175 __tagtable_begin = .;
176 *(.taglist)
177 __tagtable_end = .;
178 }
179
180 . = ALIGN(XCHAL_ICACHE_LINESIZE);
181
182 __setup_start = .;
183 .init.setup : { *(.init.setup) }
184 __setup_end = .;
185
186 __initcall_start = .;
187 .initcall.init : {
188 *(.initcall1.init)
189 *(.initcall2.init)
190 *(.initcall3.init)
191 *(.initcall4.init)
192 *(.initcall5.init)
193 *(.initcall6.init)
194 *(.initcall7.init)
195 }
196 __initcall_end = .;
197
198 __con_initcall_start = .;
199 .con_initcall.init : { *(.con_initcall.init) }
200 __con_initcall_end = .;
201
202 SECURITY_INIT
203
204 . = ALIGN(4);
205
206 __start___ftr_fixup = .;
207 __ftr_fixup : { *(__ftr_fixup) }
208 __stop___ftr_fixup = .;
209
210 . = ALIGN(32);
211 __per_cpu_start = .;
212 .data.percpu : { *(.data.percpu) }
213 __per_cpu_end = .;
214
215 . = ALIGN(4096);
216 __initramfs_start =.;
217 .init.ramfs : { *(.init.ramfs) }
218 __initramfs_end = .;
219
220 /* We need this dummy segment here */
221
222 . = ALIGN(4);
223 .dummy : { LONG(0) }
224
225 /* The vectors are relocated to the real position at startup time */
226
227 SECTION_VECTOR (_WindowVectors_text,
228 .WindowVectors.text,
229 XCHAL_WINDOW_VECTORS_VADDR, 4,
230 .dummy)
231 SECTION_VECTOR (_DebugInterruptVector_literal,
232 .DebugInterruptVector.literal,
233 XCHAL_INTLEVEL_VECTOR_VADDR(XCHAL_DEBUGLEVEL) - 4,
234 SIZEOF(.WindowVectors.text),
235 .WindowVectors.text)
236 SECTION_VECTOR (_DebugInterruptVector_text,
237 .DebugInterruptVector.text,
238 XCHAL_INTLEVEL_VECTOR_VADDR(XCHAL_DEBUGLEVEL),
239 4,
240 .DebugInterruptVector.literal)
241 SECTION_VECTOR (_KernelExceptionVector_literal,
242 .KernelExceptionVector.literal,
243 XCHAL_KERNELEXC_VECTOR_VADDR - 4,
244 SIZEOF(.DebugInterruptVector.text),
245 .DebugInterruptVector.text)
246 SECTION_VECTOR (_KernelExceptionVector_text,
247 .KernelExceptionVector.text,
248 XCHAL_KERNELEXC_VECTOR_VADDR,
249 4,
250 .KernelExceptionVector.literal)
251 SECTION_VECTOR (_UserExceptionVector_literal,
252 .UserExceptionVector.literal,
253 XCHAL_USEREXC_VECTOR_VADDR - 4,
254 SIZEOF(.KernelExceptionVector.text),
255 .KernelExceptionVector.text)
256 SECTION_VECTOR (_UserExceptionVector_text,
257 .UserExceptionVector.text,
258 XCHAL_USEREXC_VECTOR_VADDR,
259 4,
260 .UserExceptionVector.literal)
261 SECTION_VECTOR (_DoubleExceptionVector_literal,
262 .DoubleExceptionVector.literal,
263 XCHAL_DOUBLEEXC_VECTOR_VADDR - 16,
264 SIZEOF(.UserExceptionVector.text),
265 .UserExceptionVector.text)
266 SECTION_VECTOR (_DoubleExceptionVector_text,
267 .DoubleExceptionVector.text,
268 XCHAL_DOUBLEEXC_VECTOR_VADDR,
269 32,
270 .DoubleExceptionVector.literal)
271
272 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
273 . = ALIGN(1<<XCHAL_MMU_MIN_PTE_PAGE_SIZE);
274
275 __init_end = .;
276
277 . = ALIGN(8192);
278
279 /* BSS section */
280 _bss_start = .;
281 .sbss : { *(.sbss) *(.scommon) }
282 .bss : { *(COMMON) *(.bss) }
283 _bss_end = .;
284 _end = .;
285
286 /* only used by the boot loader */
287
288 . = ALIGN(0x10);
289 .bootstrap : { *(.bootstrap.literal .bootstrap.text .bootstrap.data) }
290
291 . = ALIGN(0x1000);
292 __initrd_start = .;
293 .initrd : { *(.initrd) }
294 __initrd_end = .;
295
296 .ResetVector.text XCHAL_RESET_VECTOR_VADDR :
297 {
298 *(.ResetVector.text)
299 }
300
301
302 /* Sections to be discarded */
303 /DISCARD/ :
304 {
305 *(.text.exit)
306 *(.text.exit.literal)
307 *(.data.exit)
308 *(.exitcall.exit)
309 }
310
311
312 .debug 0 : { *(.debug) }
313 .line 0 : { *(.line) }
314 .debug_srcinfo 0 : { *(.debug_srcinfo) }
315 .debug_sfnames 0 : { *(.debug_sfnames) }
316 .debug_aranges 0 : { *(.debug_aranges) }
317 .debug_pubnames 0 : { *(.debug_pubnames) }
318 .debug_info 0 : { *(.debug_info) }
319 .debug_abbrev 0 : { *(.debug_abbrev) }
320 .debug_line 0 : { *(.debug_line) }
321 .debug_frame 0 : { *(.debug_frame) }
322 .debug_str 0 : { *(.debug_str) }
323 .debug_loc 0 : { *(.debug_loc) }
324 .debug_macinfo 0 : { *(.debug_macinfo) }
325 .debug_weaknames 0 : { *(.debug_weaknames) }
326 .debug_funcnames 0 : { *(.debug_funcnames) }
327 .debug_typenames 0 : { *(.debug_typenames) }
328 .debug_varnames 0 : { *(.debug_varnames) }
329
330 .xt.insn 0 :
331 {
332 *(.xt.insn)
333 *(.gnu.linkonce.x*)
334 }
335
336 .xt.lit 0 :
337 {
338 *(.xt.lit)
339 *(.gnu.linkonce.p*)
340 }
341}
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
new file mode 100644
index 000000000000..efae56a51475
--- /dev/null
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -0,0 +1,123 @@
1/*
2 * arch/xtensa/kernel/xtensa_ksyms.c
3 *
4 * Export Xtensa-specific functions for loadable modules.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 *
12 * Joe Taylor <joe@tensilica.com>
13 */
14
15#include <linux/config.h>
16#include <linux/module.h>
17#include <linux/string.h>
18#include <linux/mm.h>
19#include <linux/interrupt.h>
20#include <asm/irq.h>
21#include <linux/in6.h>
22#include <linux/pci.h>
23#include <linux/ide.h>
24
25#include <asm/uaccess.h>
26#include <asm/checksum.h>
27#include <asm/dma.h>
28#include <asm/io.h>
29#include <asm/page.h>
30#include <asm/pgalloc.h>
31#include <asm/semaphore.h>
32#ifdef CONFIG_BLK_DEV_FD
33#include <asm/floppy.h>
34#endif
35#ifdef CONFIG_NET
36#include <net/checksum.h>
37#endif /* CONFIG_NET */
38
39
40/*
41 * String functions
42 */
43EXPORT_SYMBOL(memcmp);
44EXPORT_SYMBOL(memset);
45EXPORT_SYMBOL(memcpy);
46EXPORT_SYMBOL(memmove);
47EXPORT_SYMBOL(memchr);
48EXPORT_SYMBOL(strcat);
49EXPORT_SYMBOL(strchr);
50EXPORT_SYMBOL(strlen);
51EXPORT_SYMBOL(strpbrk);
52EXPORT_SYMBOL(strncat);
53EXPORT_SYMBOL(strnlen);
54EXPORT_SYMBOL(strrchr);
55EXPORT_SYMBOL(strstr);
56
57EXPORT_SYMBOL(enable_irq);
58EXPORT_SYMBOL(disable_irq);
59EXPORT_SYMBOL(kernel_thread);
60
61/*
62 * gcc internal math functions
63 */
64extern long long __ashrdi3(long long, int);
65extern long long __ashldi3(long long, int);
66extern long long __lshrdi3(long long, int);
67extern int __divsi3(int, int);
68extern int __modsi3(int, int);
69extern long long __muldi3(long long, long long);
70extern int __mulsi3(int, int);
71extern unsigned int __udivsi3(unsigned int, unsigned int);
72extern unsigned int __umodsi3(unsigned int, unsigned int);
73extern unsigned long long __umoddi3(unsigned long long, unsigned long long);
74extern unsigned long long __udivdi3(unsigned long long, unsigned long long);
75
76EXPORT_SYMBOL(__ashldi3);
77EXPORT_SYMBOL(__ashrdi3);
78EXPORT_SYMBOL(__lshrdi3);
79EXPORT_SYMBOL(__divsi3);
80EXPORT_SYMBOL(__modsi3);
81EXPORT_SYMBOL(__muldi3);
82EXPORT_SYMBOL(__mulsi3);
83EXPORT_SYMBOL(__udivsi3);
84EXPORT_SYMBOL(__umodsi3);
85EXPORT_SYMBOL(__udivdi3);
86EXPORT_SYMBOL(__umoddi3);
87
88/*
89 * Semaphore operations
90 */
91EXPORT_SYMBOL(__down);
92EXPORT_SYMBOL(__down_interruptible);
93EXPORT_SYMBOL(__down_trylock);
94EXPORT_SYMBOL(__up);
95
96#ifdef CONFIG_NET
97/*
98 * Networking support
99 */
100EXPORT_SYMBOL(csum_partial_copy_generic);
101#endif /* CONFIG_NET */
102
103/*
104 * Architecture-specific symbols
105 */
106EXPORT_SYMBOL(__xtensa_copy_user);
107
108/*
109 * Kernel hacking ...
110 */
111
112#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
113// FIXME EXPORT_SYMBOL(screen_info);
114#endif
115
116EXPORT_SYMBOL(get_wchan);
117
118EXPORT_SYMBOL(outsb);
119EXPORT_SYMBOL(outsw);
120EXPORT_SYMBOL(outsl);
121EXPORT_SYMBOL(insb);
122EXPORT_SYMBOL(insw);
123EXPORT_SYMBOL(insl);
diff --git a/arch/xtensa/lib/Makefile b/arch/xtensa/lib/Makefile
new file mode 100644
index 000000000000..ed935b58e8a4
--- /dev/null
+++ b/arch/xtensa/lib/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for Xtensa-specific library files.
3#
4
5lib-y += memcopy.o memset.o checksum.o strcasecmp.o \
6 usercopy.o strncpy_user.o strnlen_user.o
7lib-$(CONFIG_PCI) += pci-auto.o
diff --git a/arch/xtensa/lib/checksum.S b/arch/xtensa/lib/checksum.S
new file mode 100644
index 000000000000..e2d64dfd530c
--- /dev/null
+++ b/arch/xtensa/lib/checksum.S
@@ -0,0 +1,410 @@
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * IP/TCP/UDP checksumming routines
7 *
8 * Xtensa version: Copyright (C) 2001 Tensilica, Inc. by Kevin Chea
9 * Optimized by Joe Taylor
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#include <asm/errno.h>
18#include <linux/linkage.h>
19#define _ASMLANGUAGE
20#include <xtensa/config/core.h>
21
22/*
23 * computes a partial checksum, e.g. for TCP/UDP fragments
24 */
25
26/*
27 * unsigned int csum_partial(const unsigned char *buf, int len,
28 * unsigned int sum);
29 * a2 = buf
30 * a3 = len
31 * a4 = sum
32 *
33 * This function assumes 2- or 4-byte alignment. Other alignments will fail!
34 */
35
36/* ONES_ADD converts twos-complement math to ones-complement. */
37#define ONES_ADD(sum, val) \
38 add sum, sum, val ; \
39 bgeu sum, val, 99f ; \
40 addi sum, sum, 1 ; \
4199: ;
42
43.text
44ENTRY(csum_partial)
45 /*
46 * Experiments with Ethernet and SLIP connections show that buf
47 * is aligned on either a 2-byte or 4-byte boundary.
48 */
49 entry sp, 32
50 extui a5, a2, 0, 2
51 bnez a5, 8f /* branch if 2-byte aligned */
52 /* Fall-through on common case, 4-byte alignment */
531:
54 srli a5, a3, 5 /* 32-byte chunks */
55#if XCHAL_HAVE_LOOPS
56 loopgtz a5, 2f
57#else
58 beqz a5, 2f
59 slli a5, a5, 5
60 add a5, a5, a2 /* a5 = end of last 32-byte chunk */
61.Loop1:
62#endif
63 l32i a6, a2, 0
64 l32i a7, a2, 4
65 ONES_ADD(a4, a6)
66 ONES_ADD(a4, a7)
67 l32i a6, a2, 8
68 l32i a7, a2, 12
69 ONES_ADD(a4, a6)
70 ONES_ADD(a4, a7)
71 l32i a6, a2, 16
72 l32i a7, a2, 20
73 ONES_ADD(a4, a6)
74 ONES_ADD(a4, a7)
75 l32i a6, a2, 24
76 l32i a7, a2, 28
77 ONES_ADD(a4, a6)
78 ONES_ADD(a4, a7)
79 addi a2, a2, 4*8
80#if !XCHAL_HAVE_LOOPS
81 blt a2, a5, .Loop1
82#endif
832:
84 extui a5, a3, 2, 3 /* remaining 4-byte chunks */
85#if XCHAL_HAVE_LOOPS
86 loopgtz a5, 3f
87#else
88 beqz a5, 3f
89 slli a5, a5, 2
90 add a5, a5, a2 /* a5 = end of last 4-byte chunk */
91.Loop2:
92#endif
93 l32i a6, a2, 0
94 ONES_ADD(a4, a6)
95 addi a2, a2, 4
96#if !XCHAL_HAVE_LOOPS
97 blt a2, a5, .Loop2
98#endif
993:
100 _bbci.l a3, 1, 5f /* remaining 2-byte chunk */
101 l16ui a6, a2, 0
102 ONES_ADD(a4, a6)
103 addi a2, a2, 2
1045:
105 _bbci.l a3, 0, 7f /* remaining 1-byte chunk */
1066: l8ui a6, a2, 0
107#ifdef __XTENSA_EB__
108 slli a6, a6, 8 /* load byte into bits 8..15 */
109#endif
110 ONES_ADD(a4, a6)
1117:
112 mov a2, a4
113 retw
114
115 /* uncommon case, buf is 2-byte aligned */
1168:
117 beqz a3, 7b /* branch if len == 0 */
118 beqi a3, 1, 6b /* branch if len == 1 */
119
120 extui a5, a2, 0, 1
121 bnez a5, 8f /* branch if 1-byte aligned */
122
123 l16ui a6, a2, 0 /* common case, len >= 2 */
124 ONES_ADD(a4, a6)
125 addi a2, a2, 2 /* adjust buf */
126 addi a3, a3, -2 /* adjust len */
127 j 1b /* now buf is 4-byte aligned */
128
129 /* case: odd-byte aligned, len > 1
130 * This case is dog slow, so don't give us an odd address.
131 * (I don't think this ever happens, but just in case.)
132 */
1338:
134 srli a5, a3, 2 /* 4-byte chunks */
135#if XCHAL_HAVE_LOOPS
136 loopgtz a5, 2f
137#else
138 beqz a5, 2f
139 slli a5, a5, 2
140 add a5, a5, a2 /* a5 = end of last 4-byte chunk */
141.Loop3:
142#endif
143 l8ui a6, a2, 0 /* bits 24..31 */
144 l16ui a7, a2, 1 /* bits 8..23 */
145 l8ui a8, a2, 3 /* bits 0.. 8 */
146#ifdef __XTENSA_EB__
147 slli a6, a6, 24
148#else
149 slli a8, a8, 24
150#endif
151 slli a7, a7, 8
152 or a7, a7, a6
153 or a7, a7, a8
154 ONES_ADD(a4, a7)
155 addi a2, a2, 4
156#if !XCHAL_HAVE_LOOPS
157 blt a2, a5, .Loop3
158#endif
1592:
160 _bbci.l a3, 1, 3f /* remaining 2-byte chunk, still odd addr */
161 l8ui a6, a2, 0
162 l8ui a7, a2, 1
163#ifdef __XTENSA_EB__
164 slli a6, a6, 8
165#else
166 slli a7, a7, 8
167#endif
168 or a7, a7, a6
169 ONES_ADD(a4, a7)
170 addi a2, a2, 2
1713:
172 j 5b /* branch to handle the remaining byte */
173
174
175
176/*
177 * Copy from ds while checksumming, otherwise like csum_partial
178 *
179 * The macros SRC and DST specify the type of access for the instruction.
180 * thus we can call a custom exception handler for each access type.
181 */
182
183#define SRC(y...) \
184 9999: y; \
185 .section __ex_table, "a"; \
186 .long 9999b, 6001f ; \
187 .previous
188
189#define DST(y...) \
190 9999: y; \
191 .section __ex_table, "a"; \
192 .long 9999b, 6002f ; \
193 .previous
194
195/*
196unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
197 int sum, int *src_err_ptr, int *dst_err_ptr)
198 a2 = src
199 a3 = dst
200 a4 = len
201 a5 = sum
202 a6 = src_err_ptr
203 a7 = dst_err_ptr
204 a8 = temp
205 a9 = temp
206 a10 = temp
207 a11 = original len for exception handling
208 a12 = original dst for exception handling
209
210 This function is optimized for 4-byte aligned addresses. Other
211 alignments work, but not nearly as efficiently.
212 */
213
214ENTRY(csum_partial_copy_generic)
215 entry sp, 32
216 mov a12, a3
217 mov a11, a4
218 or a10, a2, a3
219
220 /* We optimize the following alignment tests for the 4-byte
221 aligned case. Two bbsi.l instructions might seem more optimal
222 (commented out below). However, both labels 5: and 3: are out
223 of the imm8 range, so the assembler relaxes them into
224 equivalent bbci.l, j combinations, which is actually
225 slower. */
226
227 extui a9, a10, 0, 2
228 beqz a9, 1f /* branch if both are 4-byte aligned */
229 bbsi.l a10, 0, 5f /* branch if one address is odd */
230 j 3f /* one address is 2-byte aligned */
231
232/* _bbsi.l a10, 0, 5f */ /* branch if odd address */
233/* _bbsi.l a10, 1, 3f */ /* branch if 2-byte-aligned address */
234
2351:
236 /* src and dst are both 4-byte aligned */
237 srli a10, a4, 5 /* 32-byte chunks */
238#if XCHAL_HAVE_LOOPS
239 loopgtz a10, 2f
240#else
241 beqz a10, 2f
242 slli a10, a10, 5
243 add a10, a10, a2 /* a10 = end of last 32-byte src chunk */
244.Loop5:
245#endif
246SRC( l32i a9, a2, 0 )
247SRC( l32i a8, a2, 4 )
248DST( s32i a9, a3, 0 )
249DST( s32i a8, a3, 4 )
250 ONES_ADD(a5, a9)
251 ONES_ADD(a5, a8)
252SRC( l32i a9, a2, 8 )
253SRC( l32i a8, a2, 12 )
254DST( s32i a9, a3, 8 )
255DST( s32i a8, a3, 12 )
256 ONES_ADD(a5, a9)
257 ONES_ADD(a5, a8)
258SRC( l32i a9, a2, 16 )
259SRC( l32i a8, a2, 20 )
260DST( s32i a9, a3, 16 )
261DST( s32i a8, a3, 20 )
262 ONES_ADD(a5, a9)
263 ONES_ADD(a5, a8)
264SRC( l32i a9, a2, 24 )
265SRC( l32i a8, a2, 28 )
266DST( s32i a9, a3, 24 )
267DST( s32i a8, a3, 28 )
268 ONES_ADD(a5, a9)
269 ONES_ADD(a5, a8)
270 addi a2, a2, 32
271 addi a3, a3, 32
272#if !XCHAL_HAVE_LOOPS
273 blt a2, a10, .Loop5
274#endif
2752:
276 extui a10, a4, 2, 3 /* remaining 4-byte chunks */
277 extui a4, a4, 0, 2 /* reset len for general-case, 2-byte chunks */
278#if XCHAL_HAVE_LOOPS
279 loopgtz a10, 3f
280#else
281 beqz a10, 3f
282 slli a10, a10, 2
283 add a10, a10, a2 /* a10 = end of last 4-byte src chunk */
284.Loop6:
285#endif
286SRC( l32i a9, a2, 0 )
287DST( s32i a9, a3, 0 )
288 ONES_ADD(a5, a9)
289 addi a2, a2, 4
290 addi a3, a3, 4
291#if !XCHAL_HAVE_LOOPS
292 blt a2, a10, .Loop6
293#endif
2943:
295 /*
296 Control comes to here in two cases: (1) It may fall through
297 to here from the 4-byte alignment case to process, at most,
298 one 2-byte chunk. (2) It branches to here from above if
299 either src or dst is 2-byte aligned, and we process all bytes
300 here, except for perhaps a trailing odd byte. It's
301 inefficient, so align your addresses to 4-byte boundaries.
302
303 a2 = src
304 a3 = dst
305 a4 = len
306 a5 = sum
307 */
308 srli a10, a4, 1 /* 2-byte chunks */
309#if XCHAL_HAVE_LOOPS
310 loopgtz a10, 4f
311#else
312 beqz a10, 4f
313 slli a10, a10, 1
314 add a10, a10, a2 /* a10 = end of last 2-byte src chunk */
315.Loop7:
316#endif
317SRC( l16ui a9, a2, 0 )
318DST( s16i a9, a3, 0 )
319 ONES_ADD(a5, a9)
320 addi a2, a2, 2
321 addi a3, a3, 2
322#if !XCHAL_HAVE_LOOPS
323 blt a2, a10, .Loop7
324#endif
3254:
326 /* This section processes a possible trailing odd byte. */
327 _bbci.l a4, 0, 8f /* 1-byte chunk */
328SRC( l8ui a9, a2, 0 )
329DST( s8i a9, a3, 0 )
330#ifdef __XTENSA_EB__
331 slli a9, a9, 8 /* shift byte to bits 8..15 */
332#endif
333 ONES_ADD(a5, a9)
3348:
335 mov a2, a5
336 retw
337
3385:
339 /* Control branch to here when either src or dst is odd. We
340 process all bytes using 8-bit accesses. Grossly inefficient,
341 so don't feed us an odd address. */
342
343 srli a10, a4, 1 /* handle in pairs for 16-bit csum */
344#if XCHAL_HAVE_LOOPS
345 loopgtz a10, 6f
346#else
347 beqz a10, 6f
348 slli a10, a10, 1
349 add a10, a10, a2 /* a10 = end of last odd-aligned, 2-byte src chunk */
350.Loop8:
351#endif
352SRC( l8ui a9, a2, 0 )
353SRC( l8ui a8, a2, 1 )
354DST( s8i a9, a3, 0 )
355DST( s8i a8, a3, 1 )
356#ifdef __XTENSA_EB__
357 slli a9, a9, 8 /* combine into a single 16-bit value */
358#else /* for checksum computation */
359 slli a8, a8, 8
360#endif
361 or a9, a9, a8
362 ONES_ADD(a5, a9)
363 addi a2, a2, 2
364 addi a3, a3, 2
365#if !XCHAL_HAVE_LOOPS
366 blt a2, a10, .Loop8
367#endif
3686:
369 j 4b /* process the possible trailing odd byte */
370
371
372# Exception handler:
373.section .fixup, "ax"
374/*
375 a6 = src_err_ptr
376 a7 = dst_err_ptr
377 a11 = original len for exception handling
378 a12 = original dst for exception handling
379*/
380
3816001:
382 _movi a2, -EFAULT
383 s32i a2, a6, 0 /* src_err_ptr */
384
385 # clear the complete destination - computing the rest
386 # is too much work
387 movi a2, 0
388#if XCHAL_HAVE_LOOPS
389 loopgtz a11, 2f
390#else
391 beqz a11, 2f
392 add a11, a11, a12 /* a11 = ending address */
393.Leloop:
394#endif
395 s8i a2, a12, 0
396 addi a12, a12, 1
397#if !XCHAL_HAVE_LOOPS
398 blt a12, a11, .Leloop
399#endif
4002:
401 retw
402
4036002:
404 movi a2, -EFAULT
405 s32i a2, a7, 0 /* dst_err_ptr */
406 movi a2, 0
407 retw
408
409.previous
410
diff --git a/arch/xtensa/lib/memcopy.S b/arch/xtensa/lib/memcopy.S
new file mode 100644
index 000000000000..e8f6d7eb7222
--- /dev/null
+++ b/arch/xtensa/lib/memcopy.S
@@ -0,0 +1,315 @@
1/*
2 * arch/xtensa/lib/hal/memcopy.S -- Core HAL library functions
3 * xthal_memcpy and xthal_bcopy
4 *
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * Copyright (C) 2002 - 2005 Tensilica Inc.
10 */
11
12#include <xtensa/coreasm.h>
13
14 .macro src_b r, w0, w1
15#ifdef __XTENSA_EB__
16 src \r, \w0, \w1
17#else
18 src \r, \w1, \w0
19#endif
20 .endm
21
22 .macro ssa8 r
23#ifdef __XTENSA_EB__
24 ssa8b \r
25#else
26 ssa8l \r
27#endif
28 .endm
29
30
31/*
32 * void *memcpy(void *dst, const void *src, size_t len);
33 * void *memmove(void *dst, const void *src, size_t len);
34 * void *bcopy(const void *src, void *dst, size_t len);
35 *
36 * This function is intended to do the same thing as the standard
37 * library function memcpy() (or bcopy()) for most cases.
38 * However, where the source and/or destination references
39 * an instruction RAM or ROM or a data RAM or ROM, that
40 * source and/or destination will always be accessed with
41 * 32-bit load and store instructions (as required for these
42 * types of devices).
43 *
44 * !!!!!!! XTFIXME:
45 * !!!!!!! Handling of IRAM/IROM has not yet
46 * !!!!!!! been implemented.
47 *
48 * The bcopy version is provided here to avoid the overhead
49 * of an extra call, for callers that require this convention.
50 *
51 * The (general case) algorithm is as follows:
52 * If destination is unaligned, align it by conditionally
53 * copying 1 and 2 bytes.
54 * If source is aligned,
55 * do 16 bytes with a loop, and then finish up with
56 * 8, 4, 2, and 1 byte copies conditional on the length;
57 * else (if source is unaligned),
58 * do the same, but use SRC to align the source data.
59 * This code tries to use fall-through branches for the common
60 * case of aligned source and destination and multiple
61 * of 4 (or 8) length.
62 *
63 * Register use:
64 * a0/ return address
65 * a1/ stack pointer
66 * a2/ return value
67 * a3/ src
68 * a4/ length
69 * a5/ dst
70 * a6/ tmp
71 * a7/ tmp
72 * a8/ tmp
73 * a9/ tmp
74 * a10/ tmp
75 * a11/ tmp
76 */
77
78 .text
79 .align 4
80 .global bcopy
81 .type bcopy,@function
82bcopy:
83 entry sp, 16 # minimal stack frame
84 # a2=src, a3=dst, a4=len
85 mov a5, a3 # copy dst so that a2 is return value
86 mov a3, a2
87 mov a2, a5
88 j .Lcommon # go to common code for memcpy+bcopy
89
90
91/*
92 * Byte by byte copy
93 */
94 .align 4
95 .byte 0 # 1 mod 4 alignment for LOOPNEZ
96 # (0 mod 4 alignment for LBEG)
97.Lbytecopy:
98#if XCHAL_HAVE_LOOPS
99 loopnez a4, .Lbytecopydone
100#else /* !XCHAL_HAVE_LOOPS */
101 beqz a4, .Lbytecopydone
102 add a7, a3, a4 # a7 = end address for source
103#endif /* !XCHAL_HAVE_LOOPS */
104.Lnextbyte:
105 l8ui a6, a3, 0
106 addi a3, a3, 1
107 s8i a6, a5, 0
108 addi a5, a5, 1
109#if !XCHAL_HAVE_LOOPS
110 blt a3, a7, .Lnextbyte
111#endif /* !XCHAL_HAVE_LOOPS */
112.Lbytecopydone:
113 retw
114
115/*
116 * Destination is unaligned
117 */
118
119 .align 4
120.Ldst1mod2: # dst is only byte aligned
121 _bltui a4, 7, .Lbytecopy # do short copies byte by byte
122
123 # copy 1 byte
124 l8ui a6, a3, 0
125 addi a3, a3, 1
126 addi a4, a4, -1
127 s8i a6, a5, 0
128 addi a5, a5, 1
129 _bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then
130 # return to main algorithm
131.Ldst2mod4: # dst 16-bit aligned
132 # copy 2 bytes
133 _bltui a4, 6, .Lbytecopy # do short copies byte by byte
134 l8ui a6, a3, 0
135 l8ui a7, a3, 1
136 addi a3, a3, 2
137 addi a4, a4, -2
138 s8i a6, a5, 0
139 s8i a7, a5, 1
140 addi a5, a5, 2
141 j .Ldstaligned # dst is now aligned, return to main algorithm
142
143 .align 4
144 .global memcpy
145 .type memcpy,@function
146memcpy:
147 .global memmove
148 .type memmove,@function
149memmove:
150
151 entry sp, 16 # minimal stack frame
152 # a2/ dst, a3/ src, a4/ len
153 mov a5, a2 # copy dst so that a2 is return value
154.Lcommon:
155 _bbsi.l a2, 0, .Ldst1mod2 # if dst is 1 mod 2
156 _bbsi.l a2, 1, .Ldst2mod4 # if dst is 2 mod 4
157.Ldstaligned: # return here from .Ldst?mod? once dst is aligned
158 srli a7, a4, 4 # number of loop iterations with 16B
159 # per iteration
160 movi a8, 3 # if source is not aligned,
161 _bany a3, a8, .Lsrcunaligned # then use shifting copy
162 /*
163 * Destination and source are word-aligned, use word copy.
164 */
165 # copy 16 bytes per iteration for word-aligned dst and word-aligned src
166#if XCHAL_HAVE_LOOPS
167 loopnez a7, .Loop1done
168#else /* !XCHAL_HAVE_LOOPS */
169 beqz a7, .Loop1done
170 slli a8, a7, 4
171 add a8, a8, a3 # a8 = end of last 16B source chunk
172#endif /* !XCHAL_HAVE_LOOPS */
173.Loop1:
174 l32i a6, a3, 0
175 l32i a7, a3, 4
176 s32i a6, a5, 0
177 l32i a6, a3, 8
178 s32i a7, a5, 4
179 l32i a7, a3, 12
180 s32i a6, a5, 8
181 addi a3, a3, 16
182 s32i a7, a5, 12
183 addi a5, a5, 16
184#if !XCHAL_HAVE_LOOPS
185 blt a3, a8, .Loop1
186#endif /* !XCHAL_HAVE_LOOPS */
187.Loop1done:
188 bbci.l a4, 3, .L2
189 # copy 8 bytes
190 l32i a6, a3, 0
191 l32i a7, a3, 4
192 addi a3, a3, 8
193 s32i a6, a5, 0
194 s32i a7, a5, 4
195 addi a5, a5, 8
196.L2:
197 bbsi.l a4, 2, .L3
198 bbsi.l a4, 1, .L4
199 bbsi.l a4, 0, .L5
200 retw
201.L3:
202 # copy 4 bytes
203 l32i a6, a3, 0
204 addi a3, a3, 4
205 s32i a6, a5, 0
206 addi a5, a5, 4
207 bbsi.l a4, 1, .L4
208 bbsi.l a4, 0, .L5
209 retw
210.L4:
211 # copy 2 bytes
212 l16ui a6, a3, 0
213 addi a3, a3, 2
214 s16i a6, a5, 0
215 addi a5, a5, 2
216 bbsi.l a4, 0, .L5
217 retw
218.L5:
219 # copy 1 byte
220 l8ui a6, a3, 0
221 s8i a6, a5, 0
222 retw
223
224/*
225 * Destination is aligned, Source is unaligned
226 */
227
228 .align 4
229.Lsrcunaligned:
230 _beqz a4, .Ldone # avoid loading anything for zero-length copies
231 # copy 16 bytes per iteration for word-aligned dst and unaligned src
232 ssa8 a3 # set shift amount from byte offset
233#define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS (simulator) with the
234 lint or ferret client, or 0 to save a few cycles */
235#if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT
236 and a11, a3, a8 # save unalignment offset for below
237 sub a3, a3, a11 # align a3
238#endif
239 l32i a6, a3, 0 # load first word
240#if XCHAL_HAVE_LOOPS
241 loopnez a7, .Loop2done
242#else /* !XCHAL_HAVE_LOOPS */
243 beqz a7, .Loop2done
244 slli a10, a7, 4
245 add a10, a10, a3 # a10 = end of last 16B source chunk
246#endif /* !XCHAL_HAVE_LOOPS */
247.Loop2:
248 l32i a7, a3, 4
249 l32i a8, a3, 8
250 src_b a6, a6, a7
251 s32i a6, a5, 0
252 l32i a9, a3, 12
253 src_b a7, a7, a8
254 s32i a7, a5, 4
255 l32i a6, a3, 16
256 src_b a8, a8, a9
257 s32i a8, a5, 8
258 addi a3, a3, 16
259 src_b a9, a9, a6
260 s32i a9, a5, 12
261 addi a5, a5, 16
262#if !XCHAL_HAVE_LOOPS
263 blt a3, a10, .Loop2
264#endif /* !XCHAL_HAVE_LOOPS */
265.Loop2done:
266 bbci.l a4, 3, .L12
267 # copy 8 bytes
268 l32i a7, a3, 4
269 l32i a8, a3, 8
270 src_b a6, a6, a7
271 s32i a6, a5, 0
272 addi a3, a3, 8
273 src_b a7, a7, a8
274 s32i a7, a5, 4
275 addi a5, a5, 8
276 mov a6, a8
277.L12:
278 bbci.l a4, 2, .L13
279 # copy 4 bytes
280 l32i a7, a3, 4
281 addi a3, a3, 4
282 src_b a6, a6, a7
283 s32i a6, a5, 0
284 addi a5, a5, 4
285 mov a6, a7
286.L13:
287#if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT
288 add a3, a3, a11 # readjust a3 with correct misalignment
289#endif
290 bbsi.l a4, 1, .L14
291 bbsi.l a4, 0, .L15
292.Ldone: retw
293.L14:
294 # copy 2 bytes
295 l8ui a6, a3, 0
296 l8ui a7, a3, 1
297 addi a3, a3, 2
298 s8i a6, a5, 0
299 s8i a7, a5, 1
300 addi a5, a5, 2
301 bbsi.l a4, 0, .L15
302 retw
303.L15:
304 # copy 1 byte
305 l8ui a6, a3, 0
306 s8i a6, a5, 0
307 retw
308
309/*
310 * Local Variables:
311 * mode:fundamental
312 * comment-start: "# "
313 * comment-start-skip: "# *"
314 * End:
315 */
diff --git a/arch/xtensa/lib/memset.S b/arch/xtensa/lib/memset.S
new file mode 100644
index 000000000000..4de25134bc62
--- /dev/null
+++ b/arch/xtensa/lib/memset.S
@@ -0,0 +1,160 @@
1/*
2 * arch/xtensa/lib/memset.S
3 *
4 * ANSI C standard library function memset
5 * (Well, almost. .fixup code might return zero.)
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file "COPYING" in the main directory of
9 * this archive for more details.
10 *
11 * Copyright (C) 2002 Tensilica Inc.
12 */
13
14#include <xtensa/coreasm.h>
15
16/*
17 * void *memset(void *dst, int c, size_t length)
18 *
19 * The algorithm is as follows:
20 * Create a word with c in all byte positions
21 * If the destination is aligned,
22 * do 16B chucks with a loop, and then finish up with
23 * 8B, 4B, 2B, and 1B stores conditional on the length.
24 * If destination is unaligned, align it by conditionally
25 * setting 1B and 2B and then go to aligned case.
26 * This code tries to use fall-through branches for the common
27 * case of an aligned destination (except for the branches to
28 * the alignment labels).
29 */
30
31/* Load or store instructions that may cause exceptions use the EX macro. */
32
33#define EX(insn,reg1,reg2,offset,handler) \
349: insn reg1, reg2, offset; \
35 .section __ex_table, "a"; \
36 .word 9b, handler; \
37 .previous
38
39
40.text
41.align 4
42.global memset
43.type memset,@function
44memset:
45 entry sp, 16 # minimal stack frame
46 # a2/ dst, a3/ c, a4/ length
47 extui a3, a3, 0, 8 # mask to just 8 bits
48 slli a7, a3, 8 # duplicate character in all bytes of word
49 or a3, a3, a7 # ...
50 slli a7, a3, 16 # ...
51 or a3, a3, a7 # ...
52 mov a5, a2 # copy dst so that a2 is return value
53 movi a6, 3 # for alignment tests
54 bany a2, a6, .Ldstunaligned # if dst is unaligned
55.L0: # return here from .Ldstunaligned when dst is aligned
56 srli a7, a4, 4 # number of loop iterations with 16B
57 # per iteration
58 bnez a4, .Laligned
59 retw
60
61/*
62 * Destination is word-aligned.
63 */
64 # set 16 bytes per iteration for word-aligned dst
65 .align 4 # 1 mod 4 alignment for LOOPNEZ
66 .byte 0 # (0 mod 4 alignment for LBEG)
67.Laligned:
68#if XCHAL_HAVE_LOOPS
69 loopnez a7, .Loop1done
70#else /* !XCHAL_HAVE_LOOPS */
71 beqz a7, .Loop1done
72 slli a6, a7, 4
73 add a6, a6, a5 # a6 = end of last 16B chunk
74#endif /* !XCHAL_HAVE_LOOPS */
75.Loop1:
76 EX(s32i, a3, a5, 0, memset_fixup)
77 EX(s32i, a3, a5, 4, memset_fixup)
78 EX(s32i, a3, a5, 8, memset_fixup)
79 EX(s32i, a3, a5, 12, memset_fixup)
80 addi a5, a5, 16
81#if !XCHAL_HAVE_LOOPS
82 blt a5, a6, .Loop1
83#endif /* !XCHAL_HAVE_LOOPS */
84.Loop1done:
85 bbci.l a4, 3, .L2
86 # set 8 bytes
87 EX(s32i, a3, a5, 0, memset_fixup)
88 EX(s32i, a3, a5, 4, memset_fixup)
89 addi a5, a5, 8
90.L2:
91 bbci.l a4, 2, .L3
92 # set 4 bytes
93 EX(s32i, a3, a5, 0, memset_fixup)
94 addi a5, a5, 4
95.L3:
96 bbci.l a4, 1, .L4
97 # set 2 bytes
98 EX(s16i, a3, a5, 0, memset_fixup)
99 addi a5, a5, 2
100.L4:
101 bbci.l a4, 0, .L5
102 # set 1 byte
103 EX(s8i, a3, a5, 0, memset_fixup)
104.L5:
105.Lret1:
106 retw
107
108/*
109 * Destination is unaligned
110 */
111
112.Ldstunaligned:
113 bltui a4, 8, .Lbyteset # do short copies byte by byte
114 bbci.l a5, 0, .L20 # branch if dst alignment half-aligned
115 # dst is only byte aligned
116 # set 1 byte
117 EX(s8i, a3, a5, 0, memset_fixup)
118 addi a5, a5, 1
119 addi a4, a4, -1
120 # now retest if dst aligned
121 bbci.l a5, 1, .L0 # if now aligned, return to main algorithm
122.L20:
123 # dst half-aligned
124 # set 2 bytes
125 EX(s16i, a3, a5, 0, memset_fixup)
126 addi a5, a5, 2
127 addi a4, a4, -2
128 j .L0 # dst is now aligned, return to main algorithm
129
130/*
131 * Byte by byte set
132 */
133 .align 4
134 .byte 0 # 1 mod 4 alignment for LOOPNEZ
135 # (0 mod 4 alignment for LBEG)
136.Lbyteset:
137#if XCHAL_HAVE_LOOPS
138 loopnez a4, .Lbytesetdone
139#else /* !XCHAL_HAVE_LOOPS */
140 beqz a4, .Lbytesetdone
141 add a6, a5, a4 # a6 = ending address
142#endif /* !XCHAL_HAVE_LOOPS */
143.Lbyteloop:
144 EX(s8i, a3, a5, 0, memset_fixup)
145 addi a5, a5, 1
146#if !XCHAL_HAVE_LOOPS
147 blt a5, a6, .Lbyteloop
148#endif /* !XCHAL_HAVE_LOOPS */
149.Lbytesetdone:
150 retw
151
152
153 .section .fixup, "ax"
154 .align 4
155
156/* We return zero if a failure occurred. */
157
158memset_fixup:
159 movi a2, 0
160 retw
diff --git a/arch/xtensa/lib/pci-auto.c b/arch/xtensa/lib/pci-auto.c
new file mode 100644
index 000000000000..90c790f6123b
--- /dev/null
+++ b/arch/xtensa/lib/pci-auto.c
@@ -0,0 +1,352 @@
1/*
2 * arch/xtensa/kernel/pci-auto.c
3 *
4 * PCI autoconfiguration library
5 *
6 * Copyright (C) 2001 - 2005 Tensilica Inc.
7 *
8 * Chris Zankel <zankel@tensilica.com, cez@zankel.net>
9 *
10 * Based on work from Matt Porter <mporter@mvista.com>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/pci.h>
21
22#include <asm/pci-bridge.h>
23
24
25/*
26 *
27 * Setting up a PCI
28 *
29 * pci_ctrl->first_busno = <first bus number (0)>
30 * pci_ctrl->last_busno = <last bus number (0xff)>
31 * pci_ctrl->ops = <PCI config operations>
32 * pci_ctrl->map_irq = <function to return the interrupt number for a device>
33 *
34 * pci_ctrl->io_space.start = <IO space start address (PCI view)>
35 * pci_ctrl->io_space.end = <IO space end address (PCI view)>
36 * pci_ctrl->io_space.base = <IO space offset: address 0 from CPU space>
37 * pci_ctrl->mem_space.start = <MEM space start address (PCI view)>
38 * pci_ctrl->mem_space.end = <MEM space end address (PCI view)>
39 * pci_ctrl->mem_space.base = <MEM space offset: address 0 from CPU space>
40 *
41 * pcibios_init_resource(&pci_ctrl->io_resource, <IO space start>,
42 * <IO space end>, IORESOURCE_IO, "PCI host bridge");
43 * pcibios_init_resource(&pci_ctrl->mem_resources[0], <MEM space start>,
44 * <MEM space end>, IORESOURCE_MEM, "PCI host bridge");
45 *
46 * pci_ctrl->last_busno = pciauto_bus_scan(pci_ctrl,pci_ctrl->first_busno);
47 *
48 * int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
49 *
50 */
51
52
53/* define DEBUG to print some debugging messages. */
54
55#undef DEBUG
56
57#ifdef DEBUG
58# define DBG(x...) printk(x)
59#else
60# define DBG(x...)
61#endif
62
63static int pciauto_upper_iospc;
64static int pciauto_upper_memspc;
65
66static struct pci_dev pciauto_dev;
67static struct pci_bus pciauto_bus;
68
69/*
70 * Helper functions
71 */
72
73/* Initialize the bars of a PCI device. */
74
75static void __init
76pciauto_setup_bars(struct pci_dev *dev, int bar_limit)
77{
78 int bar_size;
79 int bar, bar_nr;
80 int *upper_limit;
81 int found_mem64 = 0;
82
83 for (bar = PCI_BASE_ADDRESS_0, bar_nr = 0;
84 bar <= bar_limit;
85 bar+=4, bar_nr++)
86 {
87 /* Tickle the BAR and get the size */
88 pci_write_config_dword(dev, bar, 0xffffffff);
89 pci_read_config_dword(dev, bar, &bar_size);
90
91 /* If BAR is not implemented go to the next BAR */
92 if (!bar_size)
93 continue;
94
95 /* Check the BAR type and set our address mask */
96 if (bar_size & PCI_BASE_ADDRESS_SPACE_IO)
97 {
98 bar_size &= PCI_BASE_ADDRESS_IO_MASK;
99 upper_limit = &pciauto_upper_iospc;
100 DBG("PCI Autoconfig: BAR %d, I/O, ", bar_nr);
101 }
102 else
103 {
104 if ((bar_size & PCI_BASE_ADDRESS_MEM_TYPE_MASK) ==
105 PCI_BASE_ADDRESS_MEM_TYPE_64)
106 found_mem64 = 1;
107
108 bar_size &= PCI_BASE_ADDRESS_MEM_MASK;
109 upper_limit = &pciauto_upper_memspc;
110 DBG("PCI Autoconfig: BAR %d, Mem, ", bar_nr);
111 }
112
113 /* Allocate a base address (bar_size is negative!) */
114 *upper_limit = (*upper_limit + bar_size) & bar_size;
115
116 /* Write it out and update our limit */
117 pci_write_config_dword(dev, bar, *upper_limit);
118
119 /*
120 * If we are a 64-bit decoder then increment to the
121 * upper 32 bits of the bar and force it to locate
122 * in the lower 4GB of memory.
123 */
124
125 if (found_mem64)
126 pci_write_config_dword(dev, (bar+=4), 0x00000000);
127
128 DBG("size=0x%x, address=0x%x\n", ~bar_size + 1, *upper_limit);
129 }
130}
131
132/* Initialize the interrupt number. */
133
134static void __init
135pciauto_setup_irq(struct pci_controller* pci_ctrl,struct pci_dev *dev,int devfn)
136{
137 u8 pin;
138 int irq = 0;
139
140 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
141
142 /* Fix illegal pin numbers. */
143
144 if (pin == 0 || pin > 4)
145 pin = 1;
146
147 if (pci_ctrl->map_irq)
148 irq = pci_ctrl->map_irq(dev, PCI_SLOT(devfn), pin);
149
150 if (irq == -1)
151 irq = 0;
152
153 DBG("PCI Autoconfig: Interrupt %d, pin %d\n", irq, pin);
154
155 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
156}
157
158
159static void __init
160pciauto_prescan_setup_bridge(struct pci_dev *dev, int current_bus,
161 int sub_bus, int *iosave, int *memsave)
162{
163 /* Configure bus number registers */
164 pci_write_config_byte(dev, PCI_PRIMARY_BUS, current_bus);
165 pci_write_config_byte(dev, PCI_SECONDARY_BUS, sub_bus + 1);
166 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, 0xff);
167
168 /* Round memory allocator to 1MB boundary */
169 pciauto_upper_memspc &= ~(0x100000 - 1);
170 *memsave = pciauto_upper_memspc;
171
172 /* Round I/O allocator to 4KB boundary */
173 pciauto_upper_iospc &= ~(0x1000 - 1);
174 *iosave = pciauto_upper_iospc;
175
176 /* Set up memory and I/O filter limits, assume 32-bit I/O space */
177 pci_write_config_word(dev, PCI_MEMORY_LIMIT,
178 ((pciauto_upper_memspc - 1) & 0xfff00000) >> 16);
179 pci_write_config_byte(dev, PCI_IO_LIMIT,
180 ((pciauto_upper_iospc - 1) & 0x0000f000) >> 8);
181 pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16,
182 ((pciauto_upper_iospc - 1) & 0xffff0000) >> 16);
183}
184
185static void __init
186pciauto_postscan_setup_bridge(struct pci_dev *dev, int current_bus, int sub_bus,
187 int *iosave, int *memsave)
188{
189 int cmdstat;
190
191 /* Configure bus number registers */
192 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, sub_bus);
193
194 /*
195 * Round memory allocator to 1MB boundary.
196 * If no space used, allocate minimum.
197 */
198 pciauto_upper_memspc &= ~(0x100000 - 1);
199 if (*memsave == pciauto_upper_memspc)
200 pciauto_upper_memspc -= 0x00100000;
201
202 pci_write_config_word(dev, PCI_MEMORY_BASE, pciauto_upper_memspc >> 16);
203
204 /* Allocate 1MB for pre-fretch */
205 pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT,
206 ((pciauto_upper_memspc - 1) & 0xfff00000) >> 16);
207
208 pciauto_upper_memspc -= 0x100000;
209
210 pci_write_config_word(dev, PCI_PREF_MEMORY_BASE,
211 pciauto_upper_memspc >> 16);
212
213 /* Round I/O allocator to 4KB boundary */
214 pciauto_upper_iospc &= ~(0x1000 - 1);
215 if (*iosave == pciauto_upper_iospc)
216 pciauto_upper_iospc -= 0x1000;
217
218 pci_write_config_byte(dev, PCI_IO_BASE,
219 (pciauto_upper_iospc & 0x0000f000) >> 8);
220 pci_write_config_word(dev, PCI_IO_BASE_UPPER16,
221 pciauto_upper_iospc >> 16);
222
223 /* Enable memory and I/O accesses, enable bus master */
224 pci_read_config_dword(dev, PCI_COMMAND, &cmdstat);
225 pci_write_config_dword(dev, PCI_COMMAND,
226 cmdstat |
227 PCI_COMMAND_IO |
228 PCI_COMMAND_MEMORY |
229 PCI_COMMAND_MASTER);
230}
231
232/*
233 * Scan the current PCI bus.
234 */
235
236
237int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
238{
239 int sub_bus, pci_devfn, pci_class, cmdstat, found_multi=0;
240 unsigned short vid;
241 unsigned char header_type;
242 struct pci_dev *dev = &pciauto_dev;
243
244 pciauto_dev.bus = &pciauto_bus;
245 pciauto_dev.sysdata = pci_ctrl;
246 pciauto_bus.ops = pci_ctrl->ops;
247
248 /*
249 * Fetch our I/O and memory space upper boundaries used
250 * to allocated base addresses on this pci_controller.
251 */
252
253 if (current_bus == pci_ctrl->first_busno)
254 {
255 pciauto_upper_iospc = pci_ctrl->io_resource.end + 1;
256 pciauto_upper_memspc = pci_ctrl->mem_resources[0].end + 1;
257 }
258
259 sub_bus = current_bus;
260
261 for (pci_devfn = 0; pci_devfn < 0xff; pci_devfn++)
262 {
263 /* Skip our host bridge */
264 if ((current_bus == pci_ctrl->first_busno) && (pci_devfn == 0))
265 continue;
266
267 if (PCI_FUNC(pci_devfn) && !found_multi)
268 continue;
269
270 pciauto_bus.number = current_bus;
271 pciauto_dev.devfn = pci_devfn;
272
273 /* If config space read fails from this device, move on */
274 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type))
275 continue;
276
277 if (!PCI_FUNC(pci_devfn))
278 found_multi = header_type & 0x80;
279 pci_read_config_word(dev, PCI_VENDOR_ID, &vid);
280
281 if (vid == 0xffff || vid == 0x0000) {
282 found_multi = 0;
283 continue;
284 }
285
286 pci_read_config_dword(dev, PCI_CLASS_REVISION, &pci_class);
287
288 if ((pci_class >> 16) == PCI_CLASS_BRIDGE_PCI) {
289
290 int iosave, memsave;
291
292 DBG("PCI Autoconfig: Found P2P bridge, device %d\n",
293 PCI_SLOT(pci_devfn));
294
295 /* Allocate PCI I/O and/or memory space */
296 pciauto_setup_bars(dev, PCI_BASE_ADDRESS_1);
297
298 pciauto_prescan_setup_bridge(dev, current_bus, sub_bus,
299 &iosave, &memsave);
300 sub_bus = pciauto_bus_scan(pci_ctrl, sub_bus+1);
301 pciauto_postscan_setup_bridge(dev, current_bus, sub_bus,
302 &iosave, &memsave);
303 pciauto_bus.number = current_bus;
304
305 continue;
306
307 }
308
309
310#if 0
311 /* Skip legacy mode IDE controller */
312
313 if ((pci_class >> 16) == PCI_CLASS_STORAGE_IDE) {
314
315 unsigned char prg_iface;
316 pci_read_config_byte(dev, PCI_CLASS_PROG, &prg_iface);
317
318 if (!(prg_iface & PCIAUTO_IDE_MODE_MASK)) {
319 DBG("PCI Autoconfig: Skipping legacy mode "
320 "IDE controller\n");
321 continue;
322 }
323 }
324#endif
325
326 /*
327 * Found a peripheral, enable some standard
328 * settings
329 */
330
331 pci_read_config_dword(dev, PCI_COMMAND, &cmdstat);
332 pci_write_config_dword(dev, PCI_COMMAND,
333 cmdstat |
334 PCI_COMMAND_IO |
335 PCI_COMMAND_MEMORY |
336 PCI_COMMAND_MASTER);
337 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80);
338
339 /* Allocate PCI I/O and/or memory space */
340 DBG("PCI Autoconfig: Found Bus %d, Device %d, Function %d\n",
341 current_bus, PCI_SLOT(pci_devfn), PCI_FUNC(pci_devfn) );
342
343 pciauto_setup_bars(dev, PCI_BASE_ADDRESS_5);
344 pciauto_setup_irq(pci_ctrl, dev, pci_devfn);
345 }
346 return sub_bus;
347}
348
349
350
351
352
diff --git a/arch/xtensa/lib/strcasecmp.c b/arch/xtensa/lib/strcasecmp.c
new file mode 100644
index 000000000000..165b2d6effa5
--- /dev/null
+++ b/arch/xtensa/lib/strcasecmp.c
@@ -0,0 +1,32 @@
1/*
2 * linux/arch/xtensa/lib/strcasecmp.c
3 *
4 * This file is subject to the terms and conditions of the GNU General
5 * Public License. See the file "COPYING" in the main directory of
6 * this archive for more details.
7 *
8 * Copyright (C) 2002 Tensilica Inc.
9 */
10
11#include <linux/string.h>
12
13
14/* We handle nothing here except the C locale. Since this is used in
15 only one place, on strings known to contain only 7 bit ASCII, this
16 is ok. */
17
18int strcasecmp(const char *a, const char *b)
19{
20 int ca, cb;
21
22 do {
23 ca = *a++ & 0xff;
24 cb = *b++ & 0xff;
25 if (ca >= 'A' && ca <= 'Z')
26 ca += 'a' - 'A';
27 if (cb >= 'A' && cb <= 'Z')
28 cb += 'a' - 'A';
29 } while (ca == cb && ca != '\0');
30
31 return ca - cb;
32}
diff --git a/arch/xtensa/lib/strncpy_user.S b/arch/xtensa/lib/strncpy_user.S
new file mode 100644
index 000000000000..71d55df43893
--- /dev/null
+++ b/arch/xtensa/lib/strncpy_user.S
@@ -0,0 +1,224 @@
1/*
2 * arch/xtensa/lib/strncpy_user.S
3 *
4 * This file is subject to the terms and conditions of the GNU General
5 * Public License. See the file "COPYING" in the main directory of
6 * this archive for more details.
7 *
8 * Returns: -EFAULT if exception before terminator, N if the entire
9 * buffer filled, else strlen.
10 *
11 * Copyright (C) 2002 Tensilica Inc.
12 */
13
14#include <xtensa/coreasm.h>
15#include <linux/errno.h>
16
17/* Load or store instructions that may cause exceptions use the EX macro. */
18
19#define EX(insn,reg1,reg2,offset,handler) \
209: insn reg1, reg2, offset; \
21 .section __ex_table, "a"; \
22 .word 9b, handler; \
23 .previous
24
25/*
26 * char *__strncpy_user(char *dst, const char *src, size_t len)
27 */
28.text
29.begin literal
30.align 4
31.Lmask0:
32 .byte 0xff, 0x00, 0x00, 0x00
33.Lmask1:
34 .byte 0x00, 0xff, 0x00, 0x00
35.Lmask2:
36 .byte 0x00, 0x00, 0xff, 0x00
37.Lmask3:
38 .byte 0x00, 0x00, 0x00, 0xff
39.end literal
40
41# Register use
42# a0/ return address
43# a1/ stack pointer
44# a2/ return value
45# a3/ src
46# a4/ len
47# a5/ mask0
48# a6/ mask1
49# a7/ mask2
50# a8/ mask3
51# a9/ tmp
52# a10/ tmp
53# a11/ dst
54# a12/ tmp
55
56.align 4
57.global __strncpy_user
58.type __strncpy_user,@function
59__strncpy_user:
60 entry sp, 16 # minimal stack frame
61 # a2/ dst, a3/ src, a4/ len
62 mov a11, a2 # leave dst in return value register
63 beqz a4, .Lret # if len is zero
64 l32r a5, .Lmask0 # mask for byte 0
65 l32r a6, .Lmask1 # mask for byte 1
66 l32r a7, .Lmask2 # mask for byte 2
67 l32r a8, .Lmask3 # mask for byte 3
68 bbsi.l a3, 0, .Lsrc1mod2 # if only 8-bit aligned
69 bbsi.l a3, 1, .Lsrc2mod4 # if only 16-bit aligned
70.Lsrcaligned: # return here when src is word-aligned
71 srli a12, a4, 2 # number of loop iterations with 4B per loop
72 movi a9, 3
73 bnone a11, a9, .Laligned
74 j .Ldstunaligned
75
76.Lsrc1mod2: # src address is odd
77 EX(l8ui, a9, a3, 0, fixup_l) # get byte 0
78 addi a3, a3, 1 # advance src pointer
79 EX(s8i, a9, a11, 0, fixup_s) # store byte 0
80 beqz a9, .Lret # if byte 0 is zero
81 addi a11, a11, 1 # advance dst pointer
82 addi a4, a4, -1 # decrement len
83 beqz a4, .Lret # if len is zero
84 bbci.l a3, 1, .Lsrcaligned # if src is now word-aligned
85
86.Lsrc2mod4: # src address is 2 mod 4
87 EX(l8ui, a9, a3, 0, fixup_l) # get byte 0
88 /* 1-cycle interlock */
89 EX(s8i, a9, a11, 0, fixup_s) # store byte 0
90 beqz a9, .Lret # if byte 0 is zero
91 addi a11, a11, 1 # advance dst pointer
92 addi a4, a4, -1 # decrement len
93 beqz a4, .Lret # if len is zero
94 EX(l8ui, a9, a3, 1, fixup_l) # get byte 0
95 addi a3, a3, 2 # advance src pointer
96 EX(s8i, a9, a11, 0, fixup_s) # store byte 0
97 beqz a9, .Lret # if byte 0 is zero
98 addi a11, a11, 1 # advance dst pointer
99 addi a4, a4, -1 # decrement len
100 bnez a4, .Lsrcaligned # if len is nonzero
101.Lret:
102 sub a2, a11, a2 # compute strlen
103 retw
104
105/*
106 * dst is word-aligned, src is word-aligned
107 */
108 .align 4 # 1 mod 4 alignment for LOOPNEZ
109 .byte 0 # (0 mod 4 alignment for LBEG)
110.Laligned:
111#if XCHAL_HAVE_LOOPS
112 loopnez a12, .Loop1done
113#else
114 beqz a12, .Loop1done
115 slli a12, a12, 2
116 add a12, a12, a11 # a12 = end of last 4B chunck
117#endif
118.Loop1:
119 EX(l32i, a9, a3, 0, fixup_l) # get word from src
120 addi a3, a3, 4 # advance src pointer
121 bnone a9, a5, .Lz0 # if byte 0 is zero
122 bnone a9, a6, .Lz1 # if byte 1 is zero
123 bnone a9, a7, .Lz2 # if byte 2 is zero
124 EX(s32i, a9, a11, 0, fixup_s) # store word to dst
125 bnone a9, a8, .Lz3 # if byte 3 is zero
126 addi a11, a11, 4 # advance dst pointer
127#if !XCHAL_HAVE_LOOPS
128 blt a11, a12, .Loop1
129#endif
130
131.Loop1done:
132 bbci.l a4, 1, .L100
133 # copy 2 bytes
134 EX(l16ui, a9, a3, 0, fixup_l)
135 addi a3, a3, 2 # advance src pointer
136#ifdef __XTENSA_EB__
137 bnone a9, a7, .Lz0 # if byte 2 is zero
138 bnone a9, a8, .Lz1 # if byte 3 is zero
139#else
140 bnone a9, a5, .Lz0 # if byte 0 is zero
141 bnone a9, a6, .Lz1 # if byte 1 is zero
142#endif
143 EX(s16i, a9, a11, 0, fixup_s)
144 addi a11, a11, 2 # advance dst pointer
145.L100:
146 bbci.l a4, 0, .Lret
147 EX(l8ui, a9, a3, 0, fixup_l)
148 /* slot */
149 EX(s8i, a9, a11, 0, fixup_s)
150 beqz a9, .Lret # if byte is zero
151 addi a11, a11, 1-3 # advance dst ptr 1, but also cancel
152 # the effect of adding 3 in .Lz3 code
153 /* fall thru to .Lz3 and "retw" */
154
155.Lz3: # byte 3 is zero
156 addi a11, a11, 3 # advance dst pointer
157 sub a2, a11, a2 # compute strlen
158 retw
159.Lz0: # byte 0 is zero
160#ifdef __XTENSA_EB__
161 movi a9, 0
162#endif /* __XTENSA_EB__ */
163 EX(s8i, a9, a11, 0, fixup_s)
164 sub a2, a11, a2 # compute strlen
165 retw
166.Lz1: # byte 1 is zero
167#ifdef __XTENSA_EB__
168 extui a9, a9, 16, 16
169#endif /* __XTENSA_EB__ */
170 EX(s16i, a9, a11, 0, fixup_s)
171 addi a11, a11, 1 # advance dst pointer
172 sub a2, a11, a2 # compute strlen
173 retw
174.Lz2: # byte 2 is zero
175#ifdef __XTENSA_EB__
176 extui a9, a9, 16, 16
177#endif /* __XTENSA_EB__ */
178 EX(s16i, a9, a11, 0, fixup_s)
179 movi a9, 0
180 EX(s8i, a9, a11, 2, fixup_s)
181 addi a11, a11, 2 # advance dst pointer
182 sub a2, a11, a2 # compute strlen
183 retw
184
185 .align 4 # 1 mod 4 alignment for LOOPNEZ
186 .byte 0 # (0 mod 4 alignment for LBEG)
187.Ldstunaligned:
188/*
189 * for now just use byte copy loop
190 */
191#if XCHAL_HAVE_LOOPS
192 loopnez a4, .Lunalignedend
193#else
194 beqz a4, .Lunalignedend
195 add a12, a11, a4 # a12 = ending address
196#endif /* XCHAL_HAVE_LOOPS */
197.Lnextbyte:
198 EX(l8ui, a9, a3, 0, fixup_l)
199 addi a3, a3, 1
200 EX(s8i, a9, a11, 0, fixup_s)
201 beqz a9, .Lunalignedend
202 addi a11, a11, 1
203#if !XCHAL_HAVE_LOOPS
204 blt a11, a12, .Lnextbyte
205#endif
206
207.Lunalignedend:
208 sub a2, a11, a2 # compute strlen
209 retw
210
211
212 .section .fixup, "ax"
213 .align 4
214
215 /* For now, just return -EFAULT. Future implementations might
216 * like to clear remaining kernel space, like the fixup
217 * implementation in memset(). Thus, we differentiate between
218 * load/store fixups. */
219
220fixup_s:
221fixup_l:
222 movi a2, -EFAULT
223 retw
224
diff --git a/arch/xtensa/lib/strnlen_user.S b/arch/xtensa/lib/strnlen_user.S
new file mode 100644
index 000000000000..cdff4d670f3b
--- /dev/null
+++ b/arch/xtensa/lib/strnlen_user.S
@@ -0,0 +1,147 @@
1/*
2 * arch/xtensa/lib/strnlen_user.S
3 *
4 * This file is subject to the terms and conditions of the GNU General
5 * Public License. See the file "COPYING" in the main directory of
6 * this archive for more details.
7 *
8 * Returns strnlen, including trailing zero terminator.
9 * Zero indicates error.
10 *
11 * Copyright (C) 2002 Tensilica Inc.
12 */
13
14#include <xtensa/coreasm.h>
15
16/* Load or store instructions that may cause exceptions use the EX macro. */
17
18#define EX(insn,reg1,reg2,offset,handler) \
199: insn reg1, reg2, offset; \
20 .section __ex_table, "a"; \
21 .word 9b, handler; \
22 .previous
23
24/*
25 * size_t __strnlen_user(const char *s, size_t len)
26 */
27.text
28.begin literal
29.align 4
30.Lmask0:
31 .byte 0xff, 0x00, 0x00, 0x00
32.Lmask1:
33 .byte 0x00, 0xff, 0x00, 0x00
34.Lmask2:
35 .byte 0x00, 0x00, 0xff, 0x00
36.Lmask3:
37 .byte 0x00, 0x00, 0x00, 0xff
38.end literal
39
40# Register use:
41# a2/ src
42# a3/ len
43# a4/ tmp
44# a5/ mask0
45# a6/ mask1
46# a7/ mask2
47# a8/ mask3
48# a9/ tmp
49# a10/ tmp
50
51.align 4
52.global __strnlen_user
53.type __strnlen_user,@function
54__strnlen_user:
55 entry sp, 16 # minimal stack frame
56 # a2/ s, a3/ len
57 addi a4, a2, -4 # because we overincrement at the end;
58 # we compensate with load offsets of 4
59 l32r a5, .Lmask0 # mask for byte 0
60 l32r a6, .Lmask1 # mask for byte 1
61 l32r a7, .Lmask2 # mask for byte 2
62 l32r a8, .Lmask3 # mask for byte 3
63 bbsi.l a2, 0, .L1mod2 # if only 8-bit aligned
64 bbsi.l a2, 1, .L2mod4 # if only 16-bit aligned
65
66/*
67 * String is word-aligned.
68 */
69.Laligned:
70 srli a10, a3, 2 # number of loop iterations with 4B per loop
71#if XCHAL_HAVE_LOOPS
72 loopnez a10, .Ldone
73#else
74 beqz a10, .Ldone
75 slli a10, a10, 2
76 add a10, a10, a4 # a10 = end of last 4B chunk
77#endif /* XCHAL_HAVE_LOOPS */
78.Loop:
79 EX(l32i, a9, a4, 4, lenfixup) # get next word of string
80 addi a4, a4, 4 # advance string pointer
81 bnone a9, a5, .Lz0 # if byte 0 is zero
82 bnone a9, a6, .Lz1 # if byte 1 is zero
83 bnone a9, a7, .Lz2 # if byte 2 is zero
84 bnone a9, a8, .Lz3 # if byte 3 is zero
85#if !XCHAL_HAVE_LOOPS
86 blt a4, a10, .Loop
87#endif
88
89.Ldone:
90 EX(l32i, a9, a4, 4, lenfixup) # load 4 bytes for remaining checks
91
92 bbci.l a3, 1, .L100
93 # check two more bytes (bytes 0, 1 of word)
94 addi a4, a4, 2 # advance string pointer
95 bnone a9, a5, .Lz0 # if byte 0 is zero
96 bnone a9, a6, .Lz1 # if byte 1 is zero
97.L100:
98 bbci.l a3, 0, .L101
99 # check one more byte (byte 2 of word)
100 # Actually, we don't need to check. Zero or nonzero, we'll add one.
101 # Do not add an extra one for the NULL terminator since we have
102 # exhausted the original len parameter.
103 addi a4, a4, 1 # advance string pointer
104.L101:
105 sub a2, a4, a2 # compute length
106 retw
107
108# NOTE that in several places below, we point to the byte just after
109# the zero byte in order to include the NULL terminator in the count.
110
111.Lz3: # byte 3 is zero
112 addi a4, a4, 3 # point to zero byte
113.Lz0: # byte 0 is zero
114 addi a4, a4, 1 # point just beyond zero byte
115 sub a2, a4, a2 # subtract to get length
116 retw
117.Lz1: # byte 1 is zero
118 addi a4, a4, 1+1 # point just beyond zero byte
119 sub a2, a4, a2 # subtract to get length
120 retw
121.Lz2: # byte 2 is zero
122 addi a4, a4, 2+1 # point just beyond zero byte
123 sub a2, a4, a2 # subtract to get length
124 retw
125
126.L1mod2: # address is odd
127 EX(l8ui, a9, a4, 4, lenfixup) # get byte 0
128 addi a4, a4, 1 # advance string pointer
129 beqz a9, .Lz3 # if byte 0 is zero
130 bbci.l a4, 1, .Laligned # if string pointer is now word-aligned
131
132.L2mod4: # address is 2 mod 4
133 addi a4, a4, 2 # advance ptr for aligned access
134 EX(l32i, a9, a4, 0, lenfixup) # get word with first two bytes of string
135 bnone a9, a7, .Lz2 # if byte 2 (of word, not string) is zero
136 bany a9, a8, .Laligned # if byte 3 (of word, not string) is nonzero
137 # byte 3 is zero
138 addi a4, a4, 3+1 # point just beyond zero byte
139 sub a2, a4, a2 # subtract to get length
140 retw
141
142 .section .fixup, "ax"
143 .align 4
144lenfixup:
145 movi a2, 0
146 retw
147
diff --git a/arch/xtensa/lib/usercopy.S b/arch/xtensa/lib/usercopy.S
new file mode 100644
index 000000000000..265db2693cbd
--- /dev/null
+++ b/arch/xtensa/lib/usercopy.S
@@ -0,0 +1,321 @@
1/*
2 * arch/xtensa/lib/usercopy.S
3 *
4 * Copy to/from user space (derived from arch/xtensa/lib/hal/memcopy.S)
5 *
6 * DO NOT COMBINE this function with <arch/xtensa/lib/hal/memcopy.S>.
7 * It needs to remain separate and distinct. The hal files are part
8 * of the the Xtensa link-time HAL, and those files may differ per
9 * processor configuration. Patching the kernel for another
10 * processor configuration includes replacing the hal files, and we
11 * could loose the special functionality for accessing user-space
12 * memory during such a patch. We sacrifice a little code space here
13 * in favor to simplify code maintenance.
14 *
15 * This file is subject to the terms and conditions of the GNU General
16 * Public License. See the file "COPYING" in the main directory of
17 * this archive for more details.
18 *
19 * Copyright (C) 2002 Tensilica Inc.
20 */
21
22
23/*
24 * size_t __xtensa_copy_user (void *dst, const void *src, size_t len);
25 *
26 * The returned value is the number of bytes not copied. Implies zero
27 * is success.
28 *
29 * The general case algorithm is as follows:
30 * If the destination and source are both aligned,
31 * do 16B chunks with a loop, and then finish up with
32 * 8B, 4B, 2B, and 1B copies conditional on the length.
33 * If destination is aligned and source unaligned,
34 * do the same, but use SRC to align the source data.
35 * If destination is unaligned, align it by conditionally
36 * copying 1B and 2B and then retest.
37 * This code tries to use fall-through braches for the common
38 * case of aligned destinations (except for the branches to
39 * the alignment label).
40 *
41 * Register use:
42 * a0/ return address
43 * a1/ stack pointer
44 * a2/ return value
45 * a3/ src
46 * a4/ length
47 * a5/ dst
48 * a6/ tmp
49 * a7/ tmp
50 * a8/ tmp
51 * a9/ tmp
52 * a10/ tmp
53 * a11/ original length
54 */
55
56#include <xtensa/coreasm.h>
57
58#ifdef __XTENSA_EB__
59#define ALIGN(R, W0, W1) src R, W0, W1
60#define SSA8(R) ssa8b R
61#else
62#define ALIGN(R, W0, W1) src R, W1, W0
63#define SSA8(R) ssa8l R
64#endif
65
66/* Load or store instructions that may cause exceptions use the EX macro. */
67
68#define EX(insn,reg1,reg2,offset,handler) \
699: insn reg1, reg2, offset; \
70 .section __ex_table, "a"; \
71 .word 9b, handler; \
72 .previous
73
74
75 .text
76 .align 4
77 .global __xtensa_copy_user
78 .type __xtensa_copy_user,@function
79__xtensa_copy_user:
80 entry sp, 16 # minimal stack frame
81 # a2/ dst, a3/ src, a4/ len
82 mov a5, a2 # copy dst so that a2 is return value
83 mov a11, a4 # preserve original len for error case
84.Lcommon:
85 bbsi.l a2, 0, .Ldst1mod2 # if dst is 1 mod 2
86 bbsi.l a2, 1, .Ldst2mod4 # if dst is 2 mod 4
87.Ldstaligned: # return here from .Ldstunaligned when dst is aligned
88 srli a7, a4, 4 # number of loop iterations with 16B
89 # per iteration
90 movi a8, 3 # if source is also aligned,
91 bnone a3, a8, .Laligned # then use word copy
92 SSA8( a3) # set shift amount from byte offset
93 bnez a4, .Lsrcunaligned
94 movi a2, 0 # return success for len==0
95 retw
96
97/*
98 * Destination is unaligned
99 */
100
101.Ldst1mod2: # dst is only byte aligned
102 bltui a4, 7, .Lbytecopy # do short copies byte by byte
103
104 # copy 1 byte
105 EX(l8ui, a6, a3, 0, l_fixup)
106 addi a3, a3, 1
107 EX(s8i, a6, a5, 0, s_fixup)
108 addi a5, a5, 1
109 addi a4, a4, -1
110 bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then
111 # return to main algorithm
112.Ldst2mod4: # dst 16-bit aligned
113 # copy 2 bytes
114 bltui a4, 6, .Lbytecopy # do short copies byte by byte
115 EX(l8ui, a6, a3, 0, l_fixup)
116 EX(l8ui, a7, a3, 1, l_fixup)
117 addi a3, a3, 2
118 EX(s8i, a6, a5, 0, s_fixup)
119 EX(s8i, a7, a5, 1, s_fixup)
120 addi a5, a5, 2
121 addi a4, a4, -2
122 j .Ldstaligned # dst is now aligned, return to main algorithm
123
124/*
125 * Byte by byte copy
126 */
127 .align 4
128 .byte 0 # 1 mod 4 alignment for LOOPNEZ
129 # (0 mod 4 alignment for LBEG)
130.Lbytecopy:
131#if XCHAL_HAVE_LOOPS
132 loopnez a4, .Lbytecopydone
133#else /* !XCHAL_HAVE_LOOPS */
134 beqz a4, .Lbytecopydone
135 add a7, a3, a4 # a7 = end address for source
136#endif /* !XCHAL_HAVE_LOOPS */
137.Lnextbyte:
138 EX(l8ui, a6, a3, 0, l_fixup)
139 addi a3, a3, 1
140 EX(s8i, a6, a5, 0, s_fixup)
141 addi a5, a5, 1
142#if !XCHAL_HAVE_LOOPS
143 blt a3, a7, .Lnextbyte
144#endif /* !XCHAL_HAVE_LOOPS */
145.Lbytecopydone:
146 movi a2, 0 # return success for len bytes copied
147 retw
148
149/*
150 * Destination and source are word-aligned.
151 */
152 # copy 16 bytes per iteration for word-aligned dst and word-aligned src
153 .align 4 # 1 mod 4 alignment for LOOPNEZ
154 .byte 0 # (0 mod 4 alignment for LBEG)
155.Laligned:
156#if XCHAL_HAVE_LOOPS
157 loopnez a7, .Loop1done
158#else /* !XCHAL_HAVE_LOOPS */
159 beqz a7, .Loop1done
160 slli a8, a7, 4
161 add a8, a8, a3 # a8 = end of last 16B source chunk
162#endif /* !XCHAL_HAVE_LOOPS */
163.Loop1:
164 EX(l32i, a6, a3, 0, l_fixup)
165 EX(l32i, a7, a3, 4, l_fixup)
166 EX(s32i, a6, a5, 0, s_fixup)
167 EX(l32i, a6, a3, 8, l_fixup)
168 EX(s32i, a7, a5, 4, s_fixup)
169 EX(l32i, a7, a3, 12, l_fixup)
170 EX(s32i, a6, a5, 8, s_fixup)
171 addi a3, a3, 16
172 EX(s32i, a7, a5, 12, s_fixup)
173 addi a5, a5, 16
174#if !XCHAL_HAVE_LOOPS
175 blt a3, a8, .Loop1
176#endif /* !XCHAL_HAVE_LOOPS */
177.Loop1done:
178 bbci.l a4, 3, .L2
179 # copy 8 bytes
180 EX(l32i, a6, a3, 0, l_fixup)
181 EX(l32i, a7, a3, 4, l_fixup)
182 addi a3, a3, 8
183 EX(s32i, a6, a5, 0, s_fixup)
184 EX(s32i, a7, a5, 4, s_fixup)
185 addi a5, a5, 8
186.L2:
187 bbci.l a4, 2, .L3
188 # copy 4 bytes
189 EX(l32i, a6, a3, 0, l_fixup)
190 addi a3, a3, 4
191 EX(s32i, a6, a5, 0, s_fixup)
192 addi a5, a5, 4
193.L3:
194 bbci.l a4, 1, .L4
195 # copy 2 bytes
196 EX(l16ui, a6, a3, 0, l_fixup)
197 addi a3, a3, 2
198 EX(s16i, a6, a5, 0, s_fixup)
199 addi a5, a5, 2
200.L4:
201 bbci.l a4, 0, .L5
202 # copy 1 byte
203 EX(l8ui, a6, a3, 0, l_fixup)
204 EX(s8i, a6, a5, 0, s_fixup)
205.L5:
206 movi a2, 0 # return success for len bytes copied
207 retw
208
209/*
210 * Destination is aligned, Source is unaligned
211 */
212
213 .align 4
214 .byte 0 # 1 mod 4 alignement for LOOPNEZ
215 # (0 mod 4 alignment for LBEG)
216.Lsrcunaligned:
217 # copy 16 bytes per iteration for word-aligned dst and unaligned src
218 and a10, a3, a8 # save unalignment offset for below
219 sub a3, a3, a10 # align a3 (to avoid sim warnings only; not needed for hardware)
220 EX(l32i, a6, a3, 0, l_fixup) # load first word
221#if XCHAL_HAVE_LOOPS
222 loopnez a7, .Loop2done
223#else /* !XCHAL_HAVE_LOOPS */
224 beqz a7, .Loop2done
225 slli a10, a7, 4
226 add a10, a10, a3 # a10 = end of last 16B source chunk
227#endif /* !XCHAL_HAVE_LOOPS */
228.Loop2:
229 EX(l32i, a7, a3, 4, l_fixup)
230 EX(l32i, a8, a3, 8, l_fixup)
231 ALIGN( a6, a6, a7)
232 EX(s32i, a6, a5, 0, s_fixup)
233 EX(l32i, a9, a3, 12, l_fixup)
234 ALIGN( a7, a7, a8)
235 EX(s32i, a7, a5, 4, s_fixup)
236 EX(l32i, a6, a3, 16, l_fixup)
237 ALIGN( a8, a8, a9)
238 EX(s32i, a8, a5, 8, s_fixup)
239 addi a3, a3, 16
240 ALIGN( a9, a9, a6)
241 EX(s32i, a9, a5, 12, s_fixup)
242 addi a5, a5, 16
243#if !XCHAL_HAVE_LOOPS
244 blt a3, a10, .Loop2
245#endif /* !XCHAL_HAVE_LOOPS */
246.Loop2done:
247 bbci.l a4, 3, .L12
248 # copy 8 bytes
249 EX(l32i, a7, a3, 4, l_fixup)
250 EX(l32i, a8, a3, 8, l_fixup)
251 ALIGN( a6, a6, a7)
252 EX(s32i, a6, a5, 0, s_fixup)
253 addi a3, a3, 8
254 ALIGN( a7, a7, a8)
255 EX(s32i, a7, a5, 4, s_fixup)
256 addi a5, a5, 8
257 mov a6, a8
258.L12:
259 bbci.l a4, 2, .L13
260 # copy 4 bytes
261 EX(l32i, a7, a3, 4, l_fixup)
262 addi a3, a3, 4
263 ALIGN( a6, a6, a7)
264 EX(s32i, a6, a5, 0, s_fixup)
265 addi a5, a5, 4
266 mov a6, a7
267.L13:
268 add a3, a3, a10 # readjust a3 with correct misalignment
269 bbci.l a4, 1, .L14
270 # copy 2 bytes
271 EX(l8ui, a6, a3, 0, l_fixup)
272 EX(l8ui, a7, a3, 1, l_fixup)
273 addi a3, a3, 2
274 EX(s8i, a6, a5, 0, s_fixup)
275 EX(s8i, a7, a5, 1, s_fixup)
276 addi a5, a5, 2
277.L14:
278 bbci.l a4, 0, .L15
279 # copy 1 byte
280 EX(l8ui, a6, a3, 0, l_fixup)
281 EX(s8i, a6, a5, 0, s_fixup)
282.L15:
283 movi a2, 0 # return success for len bytes copied
284 retw
285
286
287 .section .fixup, "ax"
288 .align 4
289
290/* a2 = original dst; a5 = current dst; a11= original len
291 * bytes_copied = a5 - a2
292 * retval = bytes_not_copied = original len - bytes_copied
293 * retval = a11 - (a5 - a2)
294 *
295 * Clearing the remaining pieces of kernel memory plugs security
296 * holes. This functionality is the equivalent of the *_zeroing
297 * functions that some architectures provide.
298 */
299
300.Lmemset:
301 .word memset
302
303s_fixup:
304 sub a2, a5, a2 /* a2 <-- bytes copied */
305 sub a2, a11, a2 /* a2 <-- bytes not copied */
306 retw
307
308l_fixup:
309 sub a2, a5, a2 /* a2 <-- bytes copied */
310 sub a2, a11, a2 /* a2 <-- bytes not copied == return value */
311
312 /* void *memset(void *s, int c, size_t n); */
313 mov a6, a5 /* s */
314 movi a7, 0 /* c */
315 mov a8, a2 /* n */
316 l32r a4, .Lmemset
317 callx4 a4
318 /* Ignore memset return value in a6. */
319 /* a2 still contains bytes not copied. */
320 retw
321
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile
new file mode 100644
index 000000000000..a5aed5932d7b
--- /dev/null
+++ b/arch/xtensa/mm/Makefile
@@ -0,0 +1,13 @@
1#
2# Makefile for the Linux/Xtensa-specific parts of the memory manager.
3#
4# Note! Dependencies are done automagically by 'make dep', which also
5# removes any old dependencies. DON'T put your own dependencies here
6# unless it's something special (ie not a .c file).
7#
8# Note 2! The CFLAGS definition is now in the main makefile...
9
10obj-y := init.o fault.o tlb.o misc.o
11obj-m :=
12obj-n :=
13obj- :=
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
new file mode 100644
index 000000000000..a945a33e85a1
--- /dev/null
+++ b/arch/xtensa/mm/fault.c
@@ -0,0 +1,241 @@
1// TODO VM_EXEC flag work-around, cache aliasing
2/*
3 * arch/xtensa/mm/fault.c
4 *
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * Copyright (C) 2001 - 2005 Tensilica Inc.
10 *
11 * Chris Zankel <chris@zankel.net>
12 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
13 */
14
15#include <linux/mm.h>
16#include <linux/module.h>
17#include <asm/mmu_context.h>
18#include <asm/cacheflush.h>
19#include <asm/hardirq.h>
20#include <asm/uaccess.h>
21#include <asm/system.h>
22#include <asm/pgalloc.h>
23
24unsigned long asid_cache = ASID_FIRST_VERSION;
25void bad_page_fault(struct pt_regs*, unsigned long, int);
26
27/*
28 * This routine handles page faults. It determines the address,
29 * and the problem, and then passes it off to one of the appropriate
30 * routines.
31 *
32 * Note: does not handle Miss and MultiHit.
33 */
34
35void do_page_fault(struct pt_regs *regs)
36{
37 struct vm_area_struct * vma;
38 struct mm_struct *mm = current->mm;
39 unsigned int exccause = regs->exccause;
40 unsigned int address = regs->excvaddr;
41 siginfo_t info;
42
43 int is_write, is_exec;
44
45 info.si_code = SEGV_MAPERR;
46
47 /* We fault-in kernel-space virtual memory on-demand. The
48 * 'reference' page table is init_mm.pgd.
49 */
50 if (address >= TASK_SIZE && !user_mode(regs))
51 goto vmalloc_fault;
52
53 /* If we're in an interrupt or have no user
54 * context, we must not take the fault..
55 */
56 if (in_atomic() || !mm) {
57 bad_page_fault(regs, address, SIGSEGV);
58 return;
59 }
60
61 is_write = (exccause == XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
62 is_exec = (exccause == XCHAL_EXCCAUSE_ITLB_PRIVILEGE ||
63 exccause == XCHAL_EXCCAUSE_ITLB_MISS ||
64 exccause == XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
65
66#if 0
67 printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid,
68 address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
69#endif
70
71 down_read(&mm->mmap_sem);
72 vma = find_vma(mm, address);
73
74 if (!vma)
75 goto bad_area;
76 if (vma->vm_start <= address)
77 goto good_area;
78 if (!(vma->vm_flags & VM_GROWSDOWN))
79 goto bad_area;
80 if (expand_stack(vma, address))
81 goto bad_area;
82
83 /* Ok, we have a good vm_area for this memory access, so
84 * we can handle it..
85 */
86
87good_area:
88 info.si_code = SEGV_ACCERR;
89
90 if (is_write) {
91 if (!(vma->vm_flags & VM_WRITE))
92 goto bad_area;
93 } else if (is_exec) {
94 if (!(vma->vm_flags & VM_EXEC))
95 goto bad_area;
96 } else /* Allow read even from write-only pages. */
97 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
98 goto bad_area;
99
100 /* If for any reason at all we couldn't handle the fault,
101 * make sure we exit gracefully rather than endlessly redo
102 * the fault.
103 */
104survive:
105 switch (handle_mm_fault(mm, vma, address, is_write)) {
106 case VM_FAULT_MINOR:
107 current->min_flt++;
108 break;
109 case VM_FAULT_MAJOR:
110 current->maj_flt++;
111 break;
112 case VM_FAULT_SIGBUS:
113 goto do_sigbus;
114 case VM_FAULT_OOM:
115 goto out_of_memory;
116 default:
117 BUG();
118 }
119
120 up_read(&mm->mmap_sem);
121 return;
122
123 /* Something tried to access memory that isn't in our memory map..
124 * Fix it, but check if it's kernel or user first..
125 */
126bad_area:
127 up_read(&mm->mmap_sem);
128 if (user_mode(regs)) {
129 current->thread.bad_vaddr = address;
130 current->thread.error_code = is_write;
131 info.si_signo = SIGSEGV;
132 info.si_errno = 0;
133 /* info.si_code has been set above */
134 info.si_addr = (void *) address;
135 force_sig_info(SIGSEGV, &info, current);
136 return;
137 }
138 bad_page_fault(regs, address, SIGSEGV);
139 return;
140
141
142 /* We ran out of memory, or some other thing happened to us that made
143 * us unable to handle the page fault gracefully.
144 */
145out_of_memory:
146 up_read(&mm->mmap_sem);
147 if (current->pid == 1) {
148 yield();
149 down_read(&mm->mmap_sem);
150 goto survive;
151 }
152 printk("VM: killing process %s\n", current->comm);
153 if (user_mode(regs))
154 do_exit(SIGKILL);
155 bad_page_fault(regs, address, SIGKILL);
156 return;
157
158do_sigbus:
159 up_read(&mm->mmap_sem);
160
161 /* Send a sigbus, regardless of whether we were in kernel
162 * or user mode.
163 */
164 current->thread.bad_vaddr = address;
165 info.si_code = SIGBUS;
166 info.si_errno = 0;
167 info.si_code = BUS_ADRERR;
168 info.si_addr = (void *) address;
169 force_sig_info(SIGBUS, &info, current);
170
171 /* Kernel mode? Handle exceptions or die */
172 if (!user_mode(regs))
173 bad_page_fault(regs, address, SIGBUS);
174
175vmalloc_fault:
176 {
177 /* Synchronize this task's top level page-table
178 * with the 'reference' page table.
179 */
180 struct mm_struct *act_mm = current->active_mm;
181 int index = pgd_index(address);
182 pgd_t *pgd, *pgd_k;
183 pmd_t *pmd, *pmd_k;
184 pte_t *pte_k;
185
186 if (act_mm == NULL)
187 goto bad_page_fault;
188
189 pgd = act_mm->pgd + index;
190 pgd_k = init_mm.pgd + index;
191
192 if (!pgd_present(*pgd_k))
193 goto bad_page_fault;
194
195 pgd_val(*pgd) = pgd_val(*pgd_k);
196
197 pmd = pmd_offset(pgd, address);
198 pmd_k = pmd_offset(pgd_k, address);
199 if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
200 goto bad_page_fault;
201
202 pmd_val(*pmd) = pmd_val(*pmd_k);
203 pte_k = pte_offset_kernel(pmd_k, address);
204
205 if (!pte_present(*pte_k))
206 goto bad_page_fault;
207 return;
208 }
209bad_page_fault:
210 bad_page_fault(regs, address, SIGKILL);
211 return;
212}
213
214
215void
216bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
217{
218 extern void die(const char*, struct pt_regs*, long);
219 const struct exception_table_entry *entry;
220
221 /* Are we prepared to handle this kernel fault? */
222 if ((entry = search_exception_tables(regs->pc)) != NULL) {
223#if 1
224 printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n",
225 current->comm, regs->pc, entry->fixup);
226#endif
227 current->thread.bad_uaddr = address;
228 regs->pc = entry->fixup;
229 return;
230 }
231
232 /* Oops. The kernel tried to access some bad page. We'll have to
233 * terminate things with extreme prejudice.
234 */
235 printk(KERN_ALERT "Unable to handle kernel paging request at virtual "
236 "address %08lx\n pc = %08lx, ra = %08lx\n",
237 address, regs->pc, regs->areg[0]);
238 die("Oops", regs, sig);
239 do_exit(sig);
240}
241
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
new file mode 100644
index 000000000000..56aace84aaeb
--- /dev/null
+++ b/arch/xtensa/mm/init.c
@@ -0,0 +1,551 @@
1/*
2 * arch/xtensa/mm/init.c
3 *
4 * Derived from MIPS, PPC.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
14 * Marc Gauthier
15 * Kevin Chea
16 */
17
18#include <linux/config.h>
19#include <linux/init.h>
20#include <linux/signal.h>
21#include <linux/sched.h>
22#include <linux/kernel.h>
23#include <linux/errno.h>
24#include <linux/string.h>
25#include <linux/types.h>
26#include <linux/ptrace.h>
27#include <linux/bootmem.h>
28#include <linux/swap.h>
29
30#include <asm/pgtable.h>
31#include <asm/bootparam.h>
32#include <asm/mmu_context.h>
33#include <asm/tlb.h>
34#include <asm/tlbflush.h>
35#include <asm/page.h>
36#include <asm/pgalloc.h>
37#include <asm/pgtable.h>
38
39
40#define DEBUG 0
41
42DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
43//static DEFINE_SPINLOCK(tlb_lock);
44
45/*
46 * This flag is used to indicate that the page was mapped and modified in
47 * kernel space, so the cache is probably dirty at that address.
48 * If cache aliasing is enabled and the page color mismatches, update_mmu_cache
49 * synchronizes the caches if this bit is set.
50 */
51
52#define PG_cache_clean PG_arch_1
53
54/* References to section boundaries */
55
56extern char _ftext, _etext, _fdata, _edata, _rodata_end;
57extern char __init_begin, __init_end;
58
59/*
60 * mem_reserve(start, end, must_exist)
61 *
62 * Reserve some memory from the memory pool.
63 *
64 * Parameters:
65 * start Start of region,
66 * end End of region,
67 * must_exist Must exist in memory pool.
68 *
69 * Returns:
70 * 0 (memory area couldn't be mapped)
71 * -1 (success)
72 */
73
74int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
75{
76 int i;
77
78 if (start == end)
79 return 0;
80
81 start = start & PAGE_MASK;
82 end = PAGE_ALIGN(end);
83
84 for (i = 0; i < sysmem.nr_banks; i++)
85 if (start < sysmem.bank[i].end
86 && end >= sysmem.bank[i].start)
87 break;
88
89 if (i == sysmem.nr_banks) {
90 if (must_exist)
91 printk (KERN_WARNING "mem_reserve: [0x%0lx, 0x%0lx) "
92 "not in any region!\n", start, end);
93 return 0;
94 }
95
96 if (start > sysmem.bank[i].start) {
97 if (end < sysmem.bank[i].end) {
98 /* split entry */
99 if (sysmem.nr_banks >= SYSMEM_BANKS_MAX)
100 panic("meminfo overflow\n");
101 sysmem.bank[sysmem.nr_banks].start = end;
102 sysmem.bank[sysmem.nr_banks].end = sysmem.bank[i].end;
103 sysmem.nr_banks++;
104 }
105 sysmem.bank[i].end = start;
106 } else {
107 if (end < sysmem.bank[i].end)
108 sysmem.bank[i].start = end;
109 else {
110 /* remove entry */
111 sysmem.nr_banks--;
112 sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start;
113 sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end;
114 }
115 }
116 return -1;
117}
118
119
120/*
121 * Initialize the bootmem system and give it all the memory we have available.
122 */
123
124void __init bootmem_init(void)
125{
126 unsigned long pfn;
127 unsigned long bootmap_start, bootmap_size;
128 int i;
129
130 max_low_pfn = max_pfn = 0;
131 min_low_pfn = ~0;
132
133 for (i=0; i < sysmem.nr_banks; i++) {
134 pfn = PAGE_ALIGN(sysmem.bank[i].start) >> PAGE_SHIFT;
135 if (pfn < min_low_pfn)
136 min_low_pfn = pfn;
137 pfn = PAGE_ALIGN(sysmem.bank[i].end - 1) >> PAGE_SHIFT;
138 if (pfn > max_pfn)
139 max_pfn = pfn;
140 }
141
142 if (min_low_pfn > max_pfn)
143 panic("No memory found!\n");
144
145 max_low_pfn = max_pfn < MAX_LOW_MEMORY >> PAGE_SHIFT ?
146 max_pfn : MAX_LOW_MEMORY >> PAGE_SHIFT;
147
148 /* Find an area to use for the bootmem bitmap. */
149
150 bootmap_size = bootmem_bootmap_pages(max_low_pfn) << PAGE_SHIFT;
151 bootmap_start = ~0;
152
153 for (i=0; i<sysmem.nr_banks; i++)
154 if (sysmem.bank[i].end - sysmem.bank[i].start >= bootmap_size) {
155 bootmap_start = sysmem.bank[i].start;
156 break;
157 }
158
159 if (bootmap_start == ~0UL)
160 panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
161
162 /* Reserve the bootmem bitmap area */
163
164 mem_reserve(bootmap_start, bootmap_start + bootmap_size, 1);
165 bootmap_size = init_bootmem_node(NODE_DATA(0), min_low_pfn,
166 bootmap_start >> PAGE_SHIFT,
167 max_low_pfn);
168
169 /* Add all remaining memory pieces into the bootmem map */
170
171 for (i=0; i<sysmem.nr_banks; i++)
172 free_bootmem(sysmem.bank[i].start,
173 sysmem.bank[i].end - sysmem.bank[i].start);
174
175}
176
177
178void __init paging_init(void)
179{
180 unsigned long zones_size[MAX_NR_ZONES];
181 int i;
182
183 /* All pages are DMA-able, so we put them all in the DMA zone. */
184
185 zones_size[ZONE_DMA] = max_low_pfn;
186 for (i = 1; i < MAX_NR_ZONES; i++)
187 zones_size[i] = 0;
188
189#ifdef CONFIG_HIGHMEM
190 zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
191#endif
192
193 /* Initialize the kernel's page tables. */
194
195 memset(swapper_pg_dir, 0, PAGE_SIZE);
196
197 free_area_init(zones_size);
198}
199
200/*
201 * Flush the mmu and reset associated register to default values.
202 */
203
204void __init init_mmu (void)
205{
206 /* Writing zeros to the <t>TLBCFG special registers ensure
207 * that valid values exist in the register. For existing
208 * PGSZID<w> fields, zero selects the first element of the
209 * page-size array. For nonexistant PGSZID<w> fields, zero is
210 * the best value to write. Also, when changing PGSZID<w>
211 * fields, the corresponding TLB must be flushed.
212 */
213 set_itlbcfg_register (0);
214 set_dtlbcfg_register (0);
215 flush_tlb_all ();
216
217 /* Set rasid register to a known value. */
218
219 set_rasid_register (ASID_ALL_RESERVED);
220
221 /* Set PTEVADDR special register to the start of the page
222 * table, which is in kernel mappable space (ie. not
223 * statically mapped). This register's value is undefined on
224 * reset.
225 */
226 set_ptevaddr_register (PGTABLE_START);
227}
228
229/*
230 * Initialize memory pages.
231 */
232
233void __init mem_init(void)
234{
235 unsigned long codesize, reservedpages, datasize, initsize;
236 unsigned long highmemsize, tmp, ram;
237
238 max_mapnr = num_physpages = max_low_pfn;
239 high_memory = (void *) __va(max_mapnr << PAGE_SHIFT);
240 highmemsize = 0;
241
242#if CONFIG_HIGHMEM
243#error HIGHGMEM not implemented in init.c
244#endif
245
246 totalram_pages += free_all_bootmem();
247
248 reservedpages = ram = 0;
249 for (tmp = 0; tmp < max_low_pfn; tmp++) {
250 ram++;
251 if (PageReserved(mem_map+tmp))
252 reservedpages++;
253 }
254
255 codesize = (unsigned long) &_etext - (unsigned long) &_ftext;
256 datasize = (unsigned long) &_edata - (unsigned long) &_fdata;
257 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
258
259 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, "
260 "%ldk data, %ldk init %ldk highmem)\n",
261 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
262 ram << (PAGE_SHIFT-10),
263 codesize >> 10,
264 reservedpages << (PAGE_SHIFT-10),
265 datasize >> 10,
266 initsize >> 10,
267 highmemsize >> 10);
268}
269
270void
271free_reserved_mem(void *start, void *end)
272{
273 for (; start < end; start += PAGE_SIZE) {
274 ClearPageReserved(virt_to_page(start));
275 set_page_count(virt_to_page(start), 1);
276 free_page((unsigned long)start);
277 totalram_pages++;
278 }
279}
280
281#ifdef CONFIG_BLK_DEV_INITRD
282extern int initrd_is_mapped;
283
284void free_initrd_mem(unsigned long start, unsigned long end)
285{
286 if (initrd_is_mapped) {
287 free_reserved_mem((void*)start, (void*)end);
288 printk ("Freeing initrd memory: %ldk freed\n",(end-start)>>10);
289 }
290}
291#endif
292
293void free_initmem(void)
294{
295 free_reserved_mem(&__init_begin, &__init_end);
296 printk("Freeing unused kernel memory: %dk freed\n",
297 (&__init_end - &__init_begin) >> 10);
298}
299
300void show_mem(void)
301{
302 int i, free = 0, total = 0, reserved = 0;
303 int shared = 0, cached = 0;
304
305 printk("Mem-info:\n");
306 show_free_areas();
307 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
308 i = max_mapnr;
309 while (i-- > 0) {
310 total++;
311 if (PageReserved(mem_map+i))
312 reserved++;
313 else if (PageSwapCache(mem_map+i))
314 cached++;
315 else if (!page_count(mem_map + i))
316 free++;
317 else
318 shared += page_count(mem_map + i) - 1;
319 }
320 printk("%d pages of RAM\n", total);
321 printk("%d reserved pages\n", reserved);
322 printk("%d pages shared\n", shared);
323 printk("%d pages swap cached\n",cached);
324 printk("%d free pages\n", free);
325}
326
327/* ------------------------------------------------------------------------- */
328
329#if (DCACHE_WAY_SIZE > PAGE_SIZE)
330
331/*
332 * With cache aliasing, the page color of the page in kernel space and user
333 * space might mismatch. We temporarily map the page to a different virtual
334 * address with the same color and clear the page there.
335 */
336
337void clear_user_page(void *kaddr, unsigned long vaddr, struct page* page)
338{
339
340 /* There shouldn't be any entries for this page. */
341
342 __flush_invalidate_dcache_page_phys(__pa(page_address(page)));
343
344 if (!PAGE_COLOR_EQ(vaddr, kaddr)) {
345 unsigned long v, p;
346
347 /* Temporarily map page to DTLB_WAY_DCACHE_ALIAS0. */
348
349 spin_lock(&tlb_lock);
350
351 p = (unsigned long)pte_val((mk_pte(page,PAGE_KERNEL)));
352 kaddr = (void*)PAGE_COLOR_MAP0(vaddr);
353 v = (unsigned long)kaddr | DTLB_WAY_DCACHE_ALIAS0;
354 __asm__ __volatile__("wdtlb %0,%1; dsync" : :"a" (p), "a" (v));
355
356 clear_page(kaddr);
357
358 spin_unlock(&tlb_lock);
359 } else {
360 clear_page(kaddr);
361 }
362
363 /* We need to make sure that i$ and d$ are coherent. */
364
365 clear_bit(PG_cache_clean, &page->flags);
366}
367
368/*
369 * With cache aliasing, we have to make sure that the page color of the page
370 * in kernel space matches that of the virtual user address before we read
371 * the page. If the page color differ, we create a temporary DTLB entry with
372 * the corrent page color and use this 'temporary' address as the source.
373 * We then use the same approach as in clear_user_page and copy the data
374 * to the kernel space and clear the PG_cache_clean bit to synchronize caches
375 * later.
376 *
377 * Note:
378 * Instead of using another 'way' for the temporary DTLB entry, we could
379 * probably use the same entry that points to the kernel address (after
380 * saving the original value and restoring it when we are done).
381 */
382
383void copy_user_page(void* to, void* from, unsigned long vaddr,
384 struct page* to_page)
385{
386 /* There shouldn't be any entries for the new page. */
387
388 __flush_invalidate_dcache_page_phys(__pa(page_address(to_page)));
389
390 spin_lock(&tlb_lock);
391
392 if (!PAGE_COLOR_EQ(vaddr, from)) {
393 unsigned long v, p, t;
394
395 __asm__ __volatile__ ("pdtlb %1,%2; rdtlb1 %0,%1"
396 : "=a"(p), "=a"(t) : "a"(from));
397 from = (void*)PAGE_COLOR_MAP0(vaddr);
398 v = (unsigned long)from | DTLB_WAY_DCACHE_ALIAS0;
399 __asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v));
400 }
401
402 if (!PAGE_COLOR_EQ(vaddr, to)) {
403 unsigned long v, p;
404
405 p = (unsigned long)pte_val((mk_pte(to_page,PAGE_KERNEL)));
406 to = (void*)PAGE_COLOR_MAP1(vaddr);
407 v = (unsigned long)to | DTLB_WAY_DCACHE_ALIAS1;
408 __asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v));
409 }
410 copy_page(to, from);
411
412 spin_unlock(&tlb_lock);
413
414 /* We need to make sure that i$ and d$ are coherent. */
415
416 clear_bit(PG_cache_clean, &to_page->flags);
417}
418
419
420
421/*
422 * Any time the kernel writes to a user page cache page, or it is about to
423 * read from a page cache page this routine is called.
424 *
425 * Note:
426 * The kernel currently only provides one architecture bit in the page
427 * flags that we use for I$/D$ coherency. Maybe, in future, we can
428 * use a sepearte bit for deferred dcache aliasing:
429 * If the page is not mapped yet, we only need to set a flag,
430 * if mapped, we need to invalidate the page.
431 */
432// FIXME: we probably need this for WB caches not only for Page Coloring..
433
434void flush_dcache_page(struct page *page)
435{
436 unsigned long addr = __pa(page_address(page));
437 struct address_space *mapping = page_mapping(page);
438
439 __flush_invalidate_dcache_page_phys(addr);
440
441 if (!test_bit(PG_cache_clean, &page->flags))
442 return;
443
444 /* If this page hasn't been mapped, yet, handle I$/D$ coherency later.*/
445#if 0
446 if (mapping && !mapping_mapped(mapping))
447 clear_bit(PG_cache_clean, &page->flags);
448 else
449#endif
450 __invalidate_icache_page_phys(addr);
451}
452
453void flush_cache_range(struct vm_area_struct* vma, unsigned long s,
454 unsigned long e)
455{
456 __flush_invalidate_cache_all();
457}
458
459void flush_cache_page(struct vm_area_struct* vma, unsigned long address,
460 unsigned long pfn)
461{
462 struct page *page = pfn_to_page(pfn);
463
464 /* Remove any entry for the old mapping. */
465
466 if (current->active_mm == vma->vm_mm) {
467 unsigned long addr = __pa(page_address(page));
468 __flush_invalidate_dcache_page_phys(addr);
469 if ((vma->vm_flags & VM_EXEC) != 0)
470 __invalidate_icache_page_phys(addr);
471 } else {
472 BUG();
473 }
474}
475
476#endif /* (DCACHE_WAY_SIZE > PAGE_SIZE) */
477
478
479pte_t* pte_alloc_one_kernel (struct mm_struct* mm, unsigned long addr)
480{
481 pte_t* pte = (pte_t*)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 0);
482 if (likely(pte)) {
483 pte_t* ptep = (pte_t*)(pte_val(*pte) + PAGE_OFFSET);
484 int i;
485 for (i = 0; i < 1024; i++, ptep++)
486 pte_clear(mm, addr, ptep);
487 }
488 return pte;
489}
490
491struct page* pte_alloc_one(struct mm_struct *mm, unsigned long addr)
492{
493 struct page *page;
494
495 page = alloc_pages(GFP_KERNEL | __GFP_REPEAT, 0);
496
497 if (likely(page)) {
498 pte_t* ptep = kmap_atomic(page, KM_USER0);
499 int i;
500
501 for (i = 0; i < 1024; i++, ptep++)
502 pte_clear(mm, addr, ptep);
503
504 kunmap_atomic(ptep, KM_USER0);
505 }
506 return page;
507}
508
509
510/*
511 * Handle D$/I$ coherency.
512 *
513 * Note:
514 * We only have one architecture bit for the page flags, so we cannot handle
515 * cache aliasing, yet.
516 */
517
518void
519update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte)
520{
521 unsigned long pfn = pte_pfn(pte);
522 struct page *page;
523 unsigned long vaddr = addr & PAGE_MASK;
524
525 if (!pfn_valid(pfn))
526 return;
527
528 page = pfn_to_page(pfn);
529
530 invalidate_itlb_mapping(addr);
531 invalidate_dtlb_mapping(addr);
532
533 /* We have a new mapping. Use it. */
534
535 write_dtlb_entry(pte, dtlb_probe(addr));
536
537 /* If the processor can execute from this page, synchronize D$/I$. */
538
539 if ((vma->vm_flags & VM_EXEC) != 0) {
540
541 write_itlb_entry(pte, itlb_probe(addr));
542
543 /* Synchronize caches, if not clean. */
544
545 if (!test_and_set_bit(PG_cache_clean, &page->flags)) {
546 __flush_dcache_page(vaddr);
547 __invalidate_icache_page(vaddr);
548 }
549 }
550}
551
diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S
new file mode 100644
index 000000000000..327c0f17187c
--- /dev/null
+++ b/arch/xtensa/mm/misc.S
@@ -0,0 +1,374 @@
1/*
2 * arch/xtensa/mm/misc.S
3 *
4 * Miscellaneous assembly functions.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 */
14
15/* Note: we might want to implement some of the loops as zero-overhead-loops,
16 * where applicable and if supported by the processor.
17 */
18
19#include <linux/linkage.h>
20#include <asm/page.h>
21#include <asm/pgtable.h>
22
23#include <xtensa/cacheasm.h>
24#include <xtensa/cacheattrasm.h>
25
26/* clear_page (page) */
27
28ENTRY(clear_page)
29 entry a1, 16
30 addi a4, a2, PAGE_SIZE
31 movi a3, 0
32
331: s32i a3, a2, 0
34 s32i a3, a2, 4
35 s32i a3, a2, 8
36 s32i a3, a2, 12
37 s32i a3, a2, 16
38 s32i a3, a2, 20
39 s32i a3, a2, 24
40 s32i a3, a2, 28
41 addi a2, a2, 32
42 blt a2, a4, 1b
43
44 retw
45
46/*
47 * copy_page (void *to, void *from)
48 * a2 a3
49 */
50
51ENTRY(copy_page)
52 entry a1, 16
53 addi a4, a2, PAGE_SIZE
54
551: l32i a5, a3, 0
56 l32i a6, a3, 4
57 l32i a7, a3, 8
58 s32i a5, a2, 0
59 s32i a6, a2, 4
60 s32i a7, a2, 8
61 l32i a5, a3, 12
62 l32i a6, a3, 16
63 l32i a7, a3, 20
64 s32i a5, a2, 12
65 s32i a6, a2, 16
66 s32i a7, a2, 20
67 l32i a5, a3, 24
68 l32i a6, a3, 28
69 s32i a5, a2, 24
70 s32i a6, a2, 28
71 addi a2, a2, 32
72 addi a3, a3, 32
73 blt a2, a4, 1b
74
75 retw
76
77
78/*
79 * void __flush_invalidate_cache_all(void)
80 */
81
82ENTRY(__flush_invalidate_cache_all)
83 entry sp, 16
84 dcache_writeback_inv_all a2, a3
85 icache_invalidate_all a2, a3
86 retw
87
88/*
89 * void __invalidate_icache_all(void)
90 */
91
92ENTRY(__invalidate_icache_all)
93 entry sp, 16
94 icache_invalidate_all a2, a3
95 retw
96
97/*
98 * void __flush_invalidate_dcache_all(void)
99 */
100
101ENTRY(__flush_invalidate_dcache_all)
102 entry sp, 16
103 dcache_writeback_inv_all a2, a3
104 retw
105
106
107/*
108 * void __flush_invalidate_cache_range(ulong start, ulong size)
109 */
110
111ENTRY(__flush_invalidate_cache_range)
112 entry sp, 16
113 mov a4, a2
114 mov a5, a3
115 dcache_writeback_inv_region a4, a5, a6
116 icache_invalidate_region a2, a3, a4
117 retw
118
119/*
120 * void __invalidate_icache_page(ulong start)
121 */
122
123ENTRY(__invalidate_icache_page)
124 entry sp, 16
125 movi a3, PAGE_SIZE
126 icache_invalidate_region a2, a3, a4
127 retw
128
129/*
130 * void __invalidate_dcache_page(ulong start)
131 */
132
133ENTRY(__invalidate_dcache_page)
134 entry sp, 16
135 movi a3, PAGE_SIZE
136 dcache_invalidate_region a2, a3, a4
137 retw
138
139/*
140 * void __invalidate_icache_range(ulong start, ulong size)
141 */
142
143ENTRY(__invalidate_icache_range)
144 entry sp, 16
145 icache_invalidate_region a2, a3, a4
146 retw
147
148/*
149 * void __invalidate_dcache_range(ulong start, ulong size)
150 */
151
152ENTRY(__invalidate_dcache_range)
153 entry sp, 16
154 dcache_invalidate_region a2, a3, a4
155 retw
156
157/*
158 * void __flush_dcache_page(ulong start)
159 */
160
161ENTRY(__flush_dcache_page)
162 entry sp, 16
163 movi a3, PAGE_SIZE
164 dcache_writeback_region a2, a3, a4
165 retw
166
167/*
168 * void __flush_invalidate_dcache_page(ulong start)
169 */
170
171ENTRY(__flush_invalidate_dcache_page)
172 entry sp, 16
173 movi a3, PAGE_SIZE
174 dcache_writeback_inv_region a2, a3, a4
175 retw
176
177/*
178 * void __flush_invalidate_dcache_range(ulong start, ulong size)
179 */
180
181ENTRY(__flush_invalidate_dcache_range)
182 entry sp, 16
183 dcache_writeback_inv_region a2, a3, a4
184 retw
185
186/*
187 * void __invalidate_dcache_all(void)
188 */
189
190ENTRY(__invalidate_dcache_all)
191 entry sp, 16
192 dcache_invalidate_all a2, a3
193 retw
194
195/*
196 * void __flush_invalidate_dcache_page_phys(ulong start)
197 */
198
199ENTRY(__flush_invalidate_dcache_page_phys)
200 entry sp, 16
201
202 movi a3, XCHAL_DCACHE_SIZE
203 movi a4, PAGE_MASK | 1
204 addi a2, a2, 1
205
2061: addi a3, a3, -XCHAL_DCACHE_LINESIZE
207
208 ldct a6, a3
209 dsync
210 and a6, a6, a4
211 beq a6, a2, 2f
212 bgeui a3, 2, 1b
213 retw
214
2152: diwbi a3, 0
216 bgeui a3, 2, 1b
217 retw
218
219ENTRY(check_dcache_low0)
220 entry sp, 16
221
222 movi a3, XCHAL_DCACHE_SIZE / 4
223 movi a4, PAGE_MASK | 1
224 addi a2, a2, 1
225
2261: addi a3, a3, -XCHAL_DCACHE_LINESIZE
227
228 ldct a6, a3
229 dsync
230 and a6, a6, a4
231 beq a6, a2, 2f
232 bgeui a3, 2, 1b
233 retw
234
2352: j 2b
236
237ENTRY(check_dcache_high0)
238 entry sp, 16
239
240 movi a5, XCHAL_DCACHE_SIZE / 4
241 movi a3, XCHAL_DCACHE_SIZE / 2
242 movi a4, PAGE_MASK | 1
243 addi a2, a2, 1
244
2451: addi a3, a3, -XCHAL_DCACHE_LINESIZE
246 addi a5, a5, -XCHAL_DCACHE_LINESIZE
247
248 ldct a6, a3
249 dsync
250 and a6, a6, a4
251 beq a6, a2, 2f
252 bgeui a5, 2, 1b
253 retw
254
2552: j 2b
256
257ENTRY(check_dcache_low1)
258 entry sp, 16
259
260 movi a5, XCHAL_DCACHE_SIZE / 4
261 movi a3, XCHAL_DCACHE_SIZE * 3 / 4
262 movi a4, PAGE_MASK | 1
263 addi a2, a2, 1
264
2651: addi a3, a3, -XCHAL_DCACHE_LINESIZE
266 addi a5, a5, -XCHAL_DCACHE_LINESIZE
267
268 ldct a6, a3
269 dsync
270 and a6, a6, a4
271 beq a6, a2, 2f
272 bgeui a5, 2, 1b
273 retw
274
2752: j 2b
276
277ENTRY(check_dcache_high1)
278 entry sp, 16
279
280 movi a5, XCHAL_DCACHE_SIZE / 4
281 movi a3, XCHAL_DCACHE_SIZE
282 movi a4, PAGE_MASK | 1
283 addi a2, a2, 1
284
2851: addi a3, a3, -XCHAL_DCACHE_LINESIZE
286 addi a5, a5, -XCHAL_DCACHE_LINESIZE
287
288 ldct a6, a3
289 dsync
290 and a6, a6, a4
291 beq a6, a2, 2f
292 bgeui a5, 2, 1b
293 retw
294
2952: j 2b
296
297
298/*
299 * void __invalidate_icache_page_phys(ulong start)
300 */
301
302ENTRY(__invalidate_icache_page_phys)
303 entry sp, 16
304
305 movi a3, XCHAL_ICACHE_SIZE
306 movi a4, PAGE_MASK | 1
307 addi a2, a2, 1
308
3091: addi a3, a3, -XCHAL_ICACHE_LINESIZE
310
311 lict a6, a3
312 isync
313 and a6, a6, a4
314 beq a6, a2, 2f
315 bgeui a3, 2, 1b
316 retw
317
3182: iii a3, 0
319 bgeui a3, 2, 1b
320 retw
321
322
323#if 0
324
325 movi a3, XCHAL_DCACHE_WAYS - 1
326 movi a4, PAGE_SIZE
327
3281: mov a5, a2
329 add a6, a2, a4
330
3312: diwbi a5, 0
332 diwbi a5, XCHAL_DCACHE_LINESIZE
333 diwbi a5, XCHAL_DCACHE_LINESIZE * 2
334 diwbi a5, XCHAL_DCACHE_LINESIZE * 3
335
336 addi a5, a5, XCHAL_DCACHE_LINESIZE * 4
337 blt a5, a6, 2b
338
339 addi a3, a3, -1
340 addi a2, a2, XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS
341 bgez a3, 1b
342
343 retw
344
345ENTRY(__invalidate_icache_page_index)
346 entry sp, 16
347
348 movi a3, XCHAL_ICACHE_WAYS - 1
349 movi a4, PAGE_SIZE
350
3511: mov a5, a2
352 add a6, a2, a4
353
3542: iii a5, 0
355 iii a5, XCHAL_ICACHE_LINESIZE
356 iii a5, XCHAL_ICACHE_LINESIZE * 2
357 iii a5, XCHAL_ICACHE_LINESIZE * 3
358
359 addi a5, a5, XCHAL_ICACHE_LINESIZE * 4
360 blt a5, a6, 2b
361
362 addi a3, a3, -1
363 addi a2, a2, XCHAL_ICACHE_SIZE / XCHAL_ICACHE_WAYS
364 bgez a3, 2b
365
366 retw
367
368#endif
369
370
371
372
373
374
diff --git a/arch/xtensa/mm/pgtable.c b/arch/xtensa/mm/pgtable.c
new file mode 100644
index 000000000000..e5e119c820e4
--- /dev/null
+++ b/arch/xtensa/mm/pgtable.c
@@ -0,0 +1,76 @@
1/*
2 * arch/xtensa/mm/fault.c
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 *
10 * Chris Zankel <chris@zankel.net>
11 */
12
13#if (DCACHE_SIZE > PAGE_SIZE)
14
15pte_t* pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
16{
17 pte_t *pte, p;
18 int color = ADDR_COLOR(address);
19 int i;
20
21 p = (pte_t*) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, COLOR_ORDER);
22
23 if (likely(p)) {
24 struct page *page;
25
26 for (i = 0; i < COLOR_SIZE; i++, p++) {
27 page = virt_to_page(pte);
28
29 set_page_count(page, 1);
30 ClearPageCompound(page);
31
32 if (ADDR_COLOR(p) == color)
33 pte = p;
34 else
35 free_page(p);
36 }
37 clear_page(pte);
38 }
39 return pte;
40}
41
42#ifdef PROFILING
43
44int mask;
45int hit;
46int flush;
47
48#endif
49
50struct page* pte_alloc_one(struct mm_struct *mm, unsigned long address)
51{
52 struct page *page, p;
53 int color = ADDR_COLOR(address);
54
55 p = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
56
57 if (likely(p)) {
58 for (i = 0; i < PAGE_ORDER; i++) {
59 set_page_count(p, 1);
60 ClearPageCompound(p);
61
62 if (PADDR_COLOR(page_address(pg)) == color)
63 page = p;
64 else
65 free_page(p);
66 }
67 clear_highpage(page);
68 }
69
70 return page;
71}
72
73#endif
74
75
76
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
new file mode 100644
index 000000000000..d3bd3bfc3b3b
--- /dev/null
+++ b/arch/xtensa/mm/tlb.c
@@ -0,0 +1,545 @@
1/*
2 * arch/xtensa/mm/mmu.c
3 *
4 * Logic that manipulates the Xtensa MMU. Derived from MIPS.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2003 Tensilica Inc.
11 *
12 * Joe Taylor
13 * Chris Zankel <chris@zankel.net>
14 * Marc Gauthier
15 */
16
17#include <linux/mm.h>
18#include <asm/processor.h>
19#include <asm/mmu_context.h>
20#include <asm/tlbflush.h>
21#include <asm/system.h>
22#include <asm/cacheflush.h>
23
24
25static inline void __flush_itlb_all (void)
26{
27 int way, index;
28
29 for (way = 0; way < XCHAL_ITLB_ARF_WAYS; way++) {
30 for (index = 0; index < ITLB_ENTRIES_PER_ARF_WAY; index++) {
31 int entry = way + (index << PAGE_SHIFT);
32 invalidate_itlb_entry_no_isync (entry);
33 }
34 }
35 asm volatile ("isync\n");
36}
37
38static inline void __flush_dtlb_all (void)
39{
40 int way, index;
41
42 for (way = 0; way < XCHAL_DTLB_ARF_WAYS; way++) {
43 for (index = 0; index < DTLB_ENTRIES_PER_ARF_WAY; index++) {
44 int entry = way + (index << PAGE_SHIFT);
45 invalidate_dtlb_entry_no_isync (entry);
46 }
47 }
48 asm volatile ("isync\n");
49}
50
51
52void flush_tlb_all (void)
53{
54 __flush_itlb_all();
55 __flush_dtlb_all();
56}
57
58/* If mm is current, we simply assign the current task a new ASID, thus,
59 * invalidating all previous tlb entries. If mm is someone else's user mapping,
60 * wie invalidate the context, thus, when that user mapping is swapped in,
61 * a new context will be assigned to it.
62 */
63
64void flush_tlb_mm(struct mm_struct *mm)
65{
66#if 0
67 printk("[tlbmm<%lx>]\n", (unsigned long)mm->context);
68#endif
69
70 if (mm == current->active_mm) {
71 int flags;
72 local_save_flags(flags);
73 get_new_mmu_context(mm, asid_cache);
74 set_rasid_register(ASID_INSERT(mm->context));
75 local_irq_restore(flags);
76 }
77 else
78 mm->context = 0;
79}
80
81void flush_tlb_range (struct vm_area_struct *vma,
82 unsigned long start, unsigned long end)
83{
84 struct mm_struct *mm = vma->vm_mm;
85 unsigned long flags;
86
87 if (mm->context == NO_CONTEXT)
88 return;
89
90#if 0
91 printk("[tlbrange<%02lx,%08lx,%08lx>]\n",
92 (unsigned long)mm->context, start, end);
93#endif
94 local_save_flags(flags);
95
96 if (end-start + (PAGE_SIZE-1) <= SMALLEST_NTLB_ENTRIES << PAGE_SHIFT) {
97 int oldpid = get_rasid_register();
98 set_rasid_register (ASID_INSERT(mm->context));
99 start &= PAGE_MASK;
100 if (vma->vm_flags & VM_EXEC)
101 while(start < end) {
102 invalidate_itlb_mapping(start);
103 invalidate_dtlb_mapping(start);
104 start += PAGE_SIZE;
105 }
106 else
107 while(start < end) {
108 invalidate_dtlb_mapping(start);
109 start += PAGE_SIZE;
110 }
111
112 set_rasid_register(oldpid);
113 } else {
114 get_new_mmu_context(mm, asid_cache);
115 if (mm == current->active_mm)
116 set_rasid_register(ASID_INSERT(mm->context));
117 }
118 local_irq_restore(flags);
119}
120
121void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
122{
123 struct mm_struct* mm = vma->vm_mm;
124 unsigned long flags;
125 int oldpid;
126#if 0
127 printk("[tlbpage<%02lx,%08lx>]\n",
128 (unsigned long)mm->context, page);
129#endif
130
131 if(mm->context == NO_CONTEXT)
132 return;
133
134 local_save_flags(flags);
135
136 oldpid = get_rasid_register();
137
138 if (vma->vm_flags & VM_EXEC)
139 invalidate_itlb_mapping(page);
140 invalidate_dtlb_mapping(page);
141
142 set_rasid_register(oldpid);
143
144 local_irq_restore(flags);
145
146#if 0
147 flush_tlb_all();
148 return;
149#endif
150}
151
152
153#ifdef DEBUG_TLB
154
155#define USE_ITLB 0
156#define USE_DTLB 1
157
158struct way_config_t {
159 int indicies;
160 int indicies_log2;
161 int pgsz_log2;
162 int arf;
163};
164
165static struct way_config_t itlb[XCHAL_ITLB_WAYS] =
166{
167 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES),
168 XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES_LOG2),
169 XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, PAGESZ_LOG2_MIN),
170 XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ARF)
171 },
172 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES),
173 XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES_LOG2),
174 XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, PAGESZ_LOG2_MIN),
175 XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ARF)
176 },
177 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES),
178 XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES_LOG2),
179 XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, PAGESZ_LOG2_MIN),
180 XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ARF)
181 },
182 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES),
183 XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES_LOG2),
184 XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, PAGESZ_LOG2_MIN),
185 XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ARF)
186 },
187 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES),
188 XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES_LOG2),
189 XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, PAGESZ_LOG2_MIN),
190 XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ARF)
191 },
192 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES),
193 XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES_LOG2),
194 XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, PAGESZ_LOG2_MIN),
195 XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ARF)
196 },
197 { XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES),
198 XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES_LOG2),
199 XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, PAGESZ_LOG2_MIN),
200 XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ARF)
201 }
202};
203
204static struct way_config_t dtlb[XCHAL_DTLB_WAYS] =
205{
206 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES),
207 XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES_LOG2),
208 XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, PAGESZ_LOG2_MIN),
209 XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ARF)
210 },
211 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES),
212 XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES_LOG2),
213 XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, PAGESZ_LOG2_MIN),
214 XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ARF)
215 },
216 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES),
217 XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES_LOG2),
218 XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, PAGESZ_LOG2_MIN),
219 XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ARF)
220 },
221 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES),
222 XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES_LOG2),
223 XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, PAGESZ_LOG2_MIN),
224 XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ARF)
225 },
226 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES),
227 XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES_LOG2),
228 XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, PAGESZ_LOG2_MIN),
229 XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ARF)
230 },
231 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES),
232 XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES_LOG2),
233 XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, PAGESZ_LOG2_MIN),
234 XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ARF)
235 },
236 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES),
237 XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES_LOG2),
238 XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, PAGESZ_LOG2_MIN),
239 XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ARF)
240 },
241 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES),
242 XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES_LOG2),
243 XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, PAGESZ_LOG2_MIN),
244 XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ARF)
245 },
246 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES),
247 XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES_LOG2),
248 XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, PAGESZ_LOG2_MIN),
249 XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ARF)
250 },
251 { XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES),
252 XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES_LOG2),
253 XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, PAGESZ_LOG2_MIN),
254 XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ARF)
255 }
256};
257
258/* Total number of entries: */
259#define ITLB_TOTAL_ENTRIES \
260 XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES) + \
261 XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES) + \
262 XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES) + \
263 XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES) + \
264 XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES) + \
265 XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES) + \
266 XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES)
267#define DTLB_TOTAL_ENTRIES \
268 XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES) + \
269 XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES) + \
270 XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES) + \
271 XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES) + \
272 XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES) + \
273 XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES) + \
274 XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES) + \
275 XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES) + \
276 XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES) + \
277 XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES)
278
279
280typedef struct {
281 unsigned va;
282 unsigned pa;
283 unsigned char asid;
284 unsigned char ca;
285 unsigned char way;
286 unsigned char index;
287 unsigned char pgsz_log2; /* 0 .. 32 */
288 unsigned char type; /* 0=ITLB 1=DTLB */
289} tlb_dump_entry_t;
290
291/* Return -1 if a precedes b, +1 if a follows b, 0 if same: */
292int cmp_tlb_dump_info( tlb_dump_entry_t *a, tlb_dump_entry_t *b )
293{
294 if (a->asid < b->asid) return -1;
295 if (a->asid > b->asid) return 1;
296 if (a->va < b->va) return -1;
297 if (a->va > b->va) return 1;
298 if (a->pa < b->pa) return -1;
299 if (a->pa > b->pa) return 1;
300 if (a->ca < b->ca) return -1;
301 if (a->ca > b->ca) return 1;
302 if (a->way < b->way) return -1;
303 if (a->way > b->way) return 1;
304 if (a->index < b->index) return -1;
305 if (a->index > b->index) return 1;
306 return 0;
307}
308
309void sort_tlb_dump_info( tlb_dump_entry_t *t, int n )
310{
311 int i, j;
312 /* Simple O(n*n) sort: */
313 for (i = 0; i < n-1; i++)
314 for (j = i+1; j < n; j++)
315 if (cmp_tlb_dump_info(t+i, t+j) > 0) {
316 tlb_dump_entry_t tmp = t[i];
317 t[i] = t[j];
318 t[j] = tmp;
319 }
320}
321
322
323static tlb_dump_entry_t itlb_dump_info[ITLB_TOTAL_ENTRIES];
324static tlb_dump_entry_t dtlb_dump_info[DTLB_TOTAL_ENTRIES];
325
326
327static inline char *way_type (int type)
328{
329 return type ? "autorefill" : "non-autorefill";
330}
331
332void print_entry (struct way_config_t *way_info,
333 unsigned int way,
334 unsigned int index,
335 unsigned int virtual,
336 unsigned int translation)
337{
338 char valid_chr;
339 unsigned int va, pa, asid, ca;
340
341 va = virtual &
342 ~((1 << (way_info->pgsz_log2 + way_info->indicies_log2)) - 1);
343 asid = virtual & ((1 << XCHAL_MMU_ASID_BITS) - 1);
344 pa = translation & ~((1 << way_info->pgsz_log2) - 1);
345 ca = translation & ((1 << XCHAL_MMU_CA_BITS) - 1);
346 valid_chr = asid ? 'V' : 'I';
347
348 /* Compute and incorporate the effect of the index bits on the
349 * va. It's more useful for kernel debugging, since we always
350 * want to know the effective va anyway. */
351
352 va += index << way_info->pgsz_log2;
353
354 printk ("\t[%d,%d] (%c) vpn 0x%.8x ppn 0x%.8x asid 0x%.2x am 0x%x\n",
355 way, index, valid_chr, va, pa, asid, ca);
356}
357
358void print_itlb_entry (struct way_config_t *way_info, int way, int index)
359{
360 print_entry (way_info, way, index,
361 read_itlb_virtual (way + (index << way_info->pgsz_log2)),
362 read_itlb_translation (way + (index << way_info->pgsz_log2)));
363}
364
365void print_dtlb_entry (struct way_config_t *way_info, int way, int index)
366{
367 print_entry (way_info, way, index,
368 read_dtlb_virtual (way + (index << way_info->pgsz_log2)),
369 read_dtlb_translation (way + (index << way_info->pgsz_log2)));
370}
371
372void dump_itlb (void)
373{
374 int way, index;
375
376 printk ("\nITLB: ways = %d\n", XCHAL_ITLB_WAYS);
377
378 for (way = 0; way < XCHAL_ITLB_WAYS; way++) {
379 printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n",
380 way, itlb[way].indicies,
381 itlb[way].pgsz_log2, way_type(itlb[way].arf));
382 for (index = 0; index < itlb[way].indicies; index++) {
383 print_itlb_entry(&itlb[way], way, index);
384 }
385 }
386}
387
388void dump_dtlb (void)
389{
390 int way, index;
391
392 printk ("\nDTLB: ways = %d\n", XCHAL_DTLB_WAYS);
393
394 for (way = 0; way < XCHAL_DTLB_WAYS; way++) {
395 printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n",
396 way, dtlb[way].indicies,
397 dtlb[way].pgsz_log2, way_type(dtlb[way].arf));
398 for (index = 0; index < dtlb[way].indicies; index++) {
399 print_dtlb_entry(&dtlb[way], way, index);
400 }
401 }
402}
403
404void dump_tlb (tlb_dump_entry_t *tinfo, struct way_config_t *config,
405 int entries, int ways, int type, int show_invalid)
406{
407 tlb_dump_entry_t *e = tinfo;
408 int way, i;
409
410 /* Gather all info: */
411 for (way = 0; way < ways; way++) {
412 struct way_config_t *cfg = config + way;
413 for (i = 0; i < cfg->indicies; i++) {
414 unsigned wayindex = way + (i << cfg->pgsz_log2);
415 unsigned vv = (type ? read_dtlb_virtual (wayindex)
416 : read_itlb_virtual (wayindex));
417 unsigned pp = (type ? read_dtlb_translation (wayindex)
418 : read_itlb_translation (wayindex));
419
420 /* Compute and incorporate the effect of the index bits on the
421 * va. It's more useful for kernel debugging, since we always
422 * want to know the effective va anyway. */
423
424 e->va = (vv & ~((1 << (cfg->pgsz_log2 + cfg->indicies_log2)) - 1));
425 e->va += (i << cfg->pgsz_log2);
426 e->pa = (pp & ~((1 << cfg->pgsz_log2) - 1));
427 e->asid = (vv & ((1 << XCHAL_MMU_ASID_BITS) - 1));
428 e->ca = (pp & ((1 << XCHAL_MMU_CA_BITS) - 1));
429 e->way = way;
430 e->index = i;
431 e->pgsz_log2 = cfg->pgsz_log2;
432 e->type = type;
433 e++;
434 }
435 }
436#if 1
437 /* Sort by ASID and VADDR: */
438 sort_tlb_dump_info (tinfo, entries);
439#endif
440
441 /* Display all sorted info: */
442 printk ("\n%cTLB dump:\n", (type ? 'D' : 'I'));
443 for (e = tinfo, i = 0; i < entries; i++, e++) {
444#if 0
445 if (e->asid == 0 && !show_invalid)
446 continue;
447#endif
448 printk ("%c way=%d i=%d ASID=%02X V=%08X -> P=%08X CA=%X (%d %cB)\n",
449 (e->type ? 'D' : 'I'), e->way, e->index,
450 e->asid, e->va, e->pa, e->ca,
451 (1 << (e->pgsz_log2 % 10)),
452 " kMG"[e->pgsz_log2 / 10]
453 );
454 }
455}
456
457void dump_tlbs2 (int showinv)
458{
459 dump_tlb (itlb_dump_info, itlb, ITLB_TOTAL_ENTRIES, XCHAL_ITLB_WAYS, 0, showinv);
460 dump_tlb (dtlb_dump_info, dtlb, DTLB_TOTAL_ENTRIES, XCHAL_DTLB_WAYS, 1, showinv);
461}
462
463void dump_all_tlbs (void)
464{
465 dump_tlbs2 (1);
466}
467
468void dump_valid_tlbs (void)
469{
470 dump_tlbs2 (0);
471}
472
473
474void dump_tlbs (void)
475{
476 dump_itlb();
477 dump_dtlb();
478}
479
480void dump_cache_tag(int dcache, int idx)
481{
482 int w, i, s, e;
483 unsigned long tag, index;
484 unsigned long num_lines, num_ways, cache_size, line_size;
485
486 num_ways = dcache ? XCHAL_DCACHE_WAYS : XCHAL_ICACHE_WAYS;
487 cache_size = dcache ? XCHAL_DCACHE_SIZE : XCHAL_ICACHE_SIZE;
488 line_size = dcache ? XCHAL_DCACHE_LINESIZE : XCHAL_ICACHE_LINESIZE;
489
490 num_lines = cache_size / num_ways;
491
492 s = 0; e = num_lines;
493
494 if (idx >= 0)
495 e = (s = idx * line_size) + 1;
496
497 for (i = s; i < e; i+= line_size) {
498 printk("\nline %#08x:", i);
499 for (w = 0; w < num_ways; w++) {
500 index = w * num_lines + i;
501 if (dcache)
502 __asm__ __volatile__("ldct %0, %1\n\t"
503 : "=a"(tag) : "a"(index));
504 else
505 __asm__ __volatile__("lict %0, %1\n\t"
506 : "=a"(tag) : "a"(index));
507
508 printk(" %#010lx", tag);
509 }
510 }
511 printk ("\n");
512}
513
514void dump_icache(int index)
515{
516 unsigned long data, addr;
517 int w, i;
518
519 const unsigned long num_ways = XCHAL_ICACHE_WAYS;
520 const unsigned long cache_size = XCHAL_ICACHE_SIZE;
521 const unsigned long line_size = XCHAL_ICACHE_LINESIZE;
522 const unsigned long num_lines = cache_size / num_ways / line_size;
523
524 for (w = 0; w < num_ways; w++) {
525 printk ("\nWay %d", w);
526
527 for (i = 0; i < line_size; i+= 4) {
528 addr = w * num_lines + index * line_size + i;
529 __asm__ __volatile__("licw %0, %1\n\t"
530 : "=a"(data) : "a"(addr));
531 printk(" %#010lx", data);
532 }
533 }
534 printk ("\n");
535}
536
537void dump_cache_tags(void)
538{
539 printk("Instruction cache\n");
540 dump_cache_tag(0, -1);
541 printk("Data cache\n");
542 dump_cache_tag(1, -1);
543}
544
545#endif
diff --git a/arch/xtensa/platform-iss/Makefile b/arch/xtensa/platform-iss/Makefile
new file mode 100644
index 000000000000..5b394e9620e5
--- /dev/null
+++ b/arch/xtensa/platform-iss/Makefile
@@ -0,0 +1,13 @@
1# $Id: Makefile,v 1.1.1.1 2002/08/28 16:10:14 aroll Exp $
2#
3# Makefile for the Xtensa Instruction Set Simulator (ISS)
4# "prom monitor" library routines under Linux.
5#
6# Note! Dependencies are done automagically by 'make dep', which also
7# removes any old dependencies. DON'T put your own dependencies here
8# unless it's something special (ie not a .c file).
9#
10# Note 2! The CFLAGS definitions are in the main makefile...
11
12obj-y = io.o console.o setup.o network.o
13
diff --git a/arch/xtensa/platform-iss/console.c b/arch/xtensa/platform-iss/console.c
new file mode 100644
index 000000000000..9e2b53f6a907
--- /dev/null
+++ b/arch/xtensa/platform-iss/console.c
@@ -0,0 +1,303 @@
1/*
2 * arch/xtensa/platform-iss/console.c
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001-2005 Tensilica Inc.
9 * Authors Christian Zankel, Joe Taylor
10 */
11
12#include <linux/module.h>
13#include <linux/config.h>
14#include <linux/kernel.h>
15#include <linux/sched.h>
16#include <linux/console.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <linux/mm.h>
20#include <linux/major.h>
21#include <linux/param.h>
22#include <linux/serial.h>
23#include <linux/serialP.h>
24#include <linux/console.h>
25
26#include <asm/uaccess.h>
27#include <asm/irq.h>
28
29#include <xtensa/simcall.h>
30
31#include <linux/tty.h>
32#include <linux/tty_flip.h>
33
34#ifdef SERIAL_INLINE
35#define _INLINE_ inline
36#endif
37
38#define SERIAL_MAX_NUM_LINES 1
39#define SERIAL_TIMER_VALUE (20 * HZ)
40
41static struct tty_driver *serial_driver;
42static struct timer_list serial_timer;
43
44static DEFINE_SPINLOCK(timer_lock);
45
46int errno;
47
48static int __simc (int a, int b, int c, int d, int e, int f)
49{
50 int ret;
51 __asm__ __volatile__ ("simcall\n"
52 "mov %0, a2\n"
53 "mov %1, a3\n" : "=a" (ret), "=a" (errno)
54 : : "a2", "a3");
55 return ret;
56}
57
58static char *serial_version = "0.1";
59static char *serial_name = "ISS serial driver";
60
61/*
62 * This routine is called whenever a serial port is opened. It
63 * enables interrupts for a serial port, linking in its async structure into
64 * the IRQ chain. It also performs the serial-specific
65 * initialization for the tty structure.
66 */
67
68static void rs_poll(unsigned long);
69
70static int rs_open(struct tty_struct *tty, struct file * filp)
71{
72 int line = tty->index;
73
74 if ((line < 0) || (line >= SERIAL_MAX_NUM_LINES))
75 return -ENODEV;
76
77 spin_lock(&timer_lock);
78
79 if (tty->count == 1) {
80 init_timer(&serial_timer);
81 serial_timer.data = (unsigned long) tty;
82 serial_timer.function = rs_poll;
83 mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
84 }
85 spin_unlock(&timer_lock);
86
87 return 0;
88}
89
90
91/*
92 * ------------------------------------------------------------
93 * iss_serial_close()
94 *
95 * This routine is called when the serial port gets closed. First, we
96 * wait for the last remaining data to be sent. Then, we unlink its
97 * async structure from the interrupt chain if necessary, and we free
98 * that IRQ if nothing is left in the chain.
99 * ------------------------------------------------------------
100 */
101static void rs_close(struct tty_struct *tty, struct file * filp)
102{
103 spin_lock(&timer_lock);
104 if (tty->count == 1)
105 del_timer_sync(&serial_timer);
106 spin_unlock(&timer_lock);
107}
108
109
110static int rs_write(struct tty_struct * tty,
111 const unsigned char *buf, int count)
112{
113 /* see drivers/char/serialX.c to reference original version */
114
115 __simc (SYS_write, 1, (unsigned long)buf, count, 0, 0);
116 return count;
117}
118
119static void rs_poll(unsigned long priv)
120{
121 struct tty_struct* tty = (struct tty_struct*) priv;
122
123 struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
124 int i = 0;
125 unsigned char c;
126
127 spin_lock(&timer_lock);
128
129 while (__simc(SYS_select_one, 0, XTISS_SELECT_ONE_READ, (int)&tv,0,0)){
130 __simc (SYS_read, 0, (unsigned long)&c, 1, 0, 0);
131 tty->flip.count++;
132 *tty->flip.char_buf_ptr++ = c;
133 *tty->flip.flag_buf_ptr++ = TTY_NORMAL;
134 i++;
135 }
136
137 if (i)
138 tty_flip_buffer_push(tty);
139
140
141 mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
142 spin_unlock(&timer_lock);
143}
144
145
146static void rs_put_char(struct tty_struct *tty, unsigned char ch)
147{
148 char buf[2];
149
150 if (!tty)
151 return;
152
153 buf[0] = ch;
154 buf[1] = '\0'; /* Is this NULL necessary? */
155 __simc (SYS_write, 1, (unsigned long) buf, 1, 0, 0);
156}
157
158static void rs_flush_chars(struct tty_struct *tty)
159{
160}
161
162static int rs_write_room(struct tty_struct *tty)
163{
164 /* Let's say iss can always accept 2K characters.. */
165 return 2 * 1024;
166}
167
168static int rs_chars_in_buffer(struct tty_struct *tty)
169{
170 /* the iss doesn't buffer characters */
171 return 0;
172}
173
174static void rs_hangup(struct tty_struct *tty)
175{
176 /* Stub, once again.. */
177}
178
179static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
180{
181 /* Stub, once again.. */
182}
183
184static int rs_read_proc(char *page, char **start, off_t off, int count,
185 int *eof, void *data)
186{
187 int len = 0;
188 off_t begin = 0;
189
190 len += sprintf(page, "serinfo:1.0 driver:%s\n", serial_version);
191 *eof = 1;
192
193 if (off >= len + begin)
194 return 0;
195
196 *start = page + (off - begin);
197 return ((count < begin + len - off) ? count : begin + len - off);
198}
199
200
201int register_serial(struct serial_struct*);
202void unregister_serial(int);
203
204static struct tty_operations serial_ops = {
205 .open = rs_open,
206 .close = rs_close,
207 .write = rs_write,
208 .put_char = rs_put_char,
209 .flush_chars = rs_flush_chars,
210 .write_room = rs_write_room,
211 .chars_in_buffer = rs_chars_in_buffer,
212 .hangup = rs_hangup,
213 .wait_until_sent = rs_wait_until_sent,
214 .read_proc = rs_read_proc
215};
216
217int __init rs_init(void)
218{
219 serial_driver = alloc_tty_driver(1);
220
221 printk ("%s %s\n", serial_name, serial_version);
222
223 /* Initialize the tty_driver structure */
224
225 serial_driver->owner = THIS_MODULE;
226 serial_driver->driver_name = "iss_serial";
227 serial_driver->name = "ttyS";
228 serial_driver->major = TTY_MAJOR;
229 serial_driver->minor_start = 64;
230 serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
231 serial_driver->subtype = SERIAL_TYPE_NORMAL;
232 serial_driver->init_termios = tty_std_termios;
233 serial_driver->init_termios.c_cflag =
234 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
235 serial_driver->flags = TTY_DRIVER_REAL_RAW;
236
237 tty_set_operations(serial_driver, &serial_ops);
238
239 if (tty_register_driver(serial_driver))
240 panic("Couldn't register serial driver\n");
241 return 0;
242}
243
244
245static __exit void rs_exit(void)
246{
247 int error;
248
249 if ((error = tty_unregister_driver(serial_driver)))
250 printk("ISS_SERIAL: failed to unregister serial driver (%d)\n",
251 error);
252 put_tty_driver(serial_driver);
253}
254
255
256/* We use `late_initcall' instead of just `__initcall' as a workaround for
257 * the fact that (1) simcons_tty_init can't be called before tty_init,
258 * (2) tty_init is called via `module_init', (3) if statically linked,
259 * module_init == device_init, and (4) there's no ordering of init lists.
260 * We can do this easily because simcons is always statically linked, but
261 * other tty drivers that depend on tty_init and which must use
262 * `module_init' to declare their init routines are likely to be broken.
263 */
264
265late_initcall(rs_init);
266
267
268#ifdef CONFIG_SERIAL_CONSOLE
269
270static void iss_console_write(struct console *co, const char *s, unsigned count)
271{
272 int len = strlen(s);
273
274 if (s != 0 && *s != 0)
275 __simc (SYS_write, 1, (unsigned long)s,
276 count < len ? count : len,0,0);
277}
278
279static struct tty_driver* iss_console_device(struct console *c, int *index)
280{
281 *index = c->index;
282 return serial_driver;
283}
284
285
286static struct console sercons = {
287 .name = "ttyS",
288 .write = iss_console_write,
289 .device = iss_console_device,
290 .flags = CON_PRINTBUFFER,
291 .index = -1
292};
293
294static int __init iss_console_init(void)
295{
296 register_console(&sercons);
297 return 0;
298}
299
300console_initcall(iss_console_init);
301
302#endif /* CONFIG_SERIAL_CONSOLE */
303
diff --git a/arch/xtensa/platform-iss/io.c b/arch/xtensa/platform-iss/io.c
new file mode 100644
index 000000000000..5b161a5cb65f
--- /dev/null
+++ b/arch/xtensa/platform-iss/io.c
@@ -0,0 +1,32 @@
1/* This file isn't really needed right now. */
2
3#if 0
4
5#include <asm/io.h>
6#include <xtensa/simcall.h>
7
8extern int __simc ();
9
10
11char iss_serial_getc()
12{
13 char c;
14 __simc( SYS_read, 0, &c, 1 );
15 return c;
16}
17
18void iss_serial_putc( char c )
19{
20 __simc( SYS_write, 1, &c, 1 );
21}
22
23void iss_serial_puts( char *s )
24{
25 if( s != 0 && *s != 0 )
26 __simc( SYS_write, 1, s, strlen(s) );
27}
28
29/*#error Need I/O ports to specific hardware!*/
30
31#endif
32
diff --git a/arch/xtensa/platform-iss/network.c b/arch/xtensa/platform-iss/network.c
new file mode 100644
index 000000000000..498d7dced1f4
--- /dev/null
+++ b/arch/xtensa/platform-iss/network.c
@@ -0,0 +1,855 @@
1/*
2 *
3 * arch/xtensa/platform-iss/network.c
4 *
5 * Platform specific initialization.
6 *
7 * Authors: Chris Zankel <chris@zankel.net>
8 * Based on work form the UML team.
9 *
10 * Copyright 2005 Tensilica Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 */
18
19#include <linux/config.h>
20#include <linux/list.h>
21#include <linux/irq.h>
22#include <linux/spinlock.h>
23#include <linux/slab.h>
24#include <linux/timer.h>
25#include <linux/if_ether.h>
26#include <linux/inetdevice.h>
27#include <linux/init.h>
28#include <linux/if_tun.h>
29#include <linux/etherdevice.h>
30#include <linux/interrupt.h>
31#include <linux/ioctl.h>
32#include <linux/bootmem.h>
33#include <linux/ethtool.h>
34#include <linux/rtnetlink.h>
35#include <linux/timer.h>
36
37#include <xtensa/simcall.h>
38
39#define DRIVER_NAME "iss-netdev"
40#define ETH_MAX_PACKET 1500
41#define ETH_HEADER_OTHER 14
42#define ISS_NET_TIMER_VALUE (2 * HZ)
43
44
45static DEFINE_SPINLOCK(opened_lock);
46static LIST_HEAD(opened);
47
48static DEFINE_SPINLOCK(devices_lock);
49static LIST_HEAD(devices);
50
51/* ------------------------------------------------------------------------- */
52
53/* We currently only support the TUNTAP transport protocol. */
54
55#define TRANSPORT_TUNTAP_NAME "tuntap"
56#define TRANSPORT_TUNTAP_MTU ETH_MAX_PACKET
57
58struct tuntap_info {
59 char dev_name[IFNAMSIZ];
60 int fixed_config;
61 unsigned char gw[ETH_ALEN];
62 int fd;
63};
64
65/* ------------------------------------------------------------------------- */
66
67
68/* This structure contains out private information for the driver. */
69
70struct iss_net_private {
71
72 struct list_head device_list;
73 struct list_head opened_list;
74
75 spinlock_t lock;
76 struct net_device *dev;
77 struct platform_device pdev;
78 struct timer_list tl;
79 struct net_device_stats stats;
80
81 struct timer_list timer;
82 unsigned int timer_val;
83
84 int index;
85 int mtu;
86
87 unsigned char mac[ETH_ALEN];
88 int have_mac;
89
90 struct {
91 union {
92 struct tuntap_info tuntap;
93 } info;
94
95 int (*open)(struct iss_net_private *lp);
96 void (*close)(struct iss_net_private *lp);
97 int (*read)(struct iss_net_private *lp, struct sk_buff **skb);
98 int (*write)(struct iss_net_private *lp, struct sk_buff **skb);
99 unsigned short (*protocol)(struct sk_buff *skb);
100 int (*poll)(struct iss_net_private *lp);
101 } tp;
102
103};
104
105/* ======================= ISS SIMCALL INTERFACE =========================== */
106
107/* Note: __simc must _not_ be declared inline! */
108
109static int errno;
110
111static int __simc (int a, int b, int c, int d, int e, int f)
112{
113 int ret;
114 __asm__ __volatile__ ("simcall\n"
115 "mov %0, a2\n"
116 "mov %1, a3\n" : "=a" (ret), "=a" (errno)
117 : : "a2", "a3");
118 return ret;
119}
120
121static int inline simc_open(char *file, int flags, int mode)
122{
123 return __simc(SYS_open, (int) file, flags, mode, 0, 0);
124}
125
126static int inline simc_close(int fd)
127{
128 return __simc(SYS_close, fd, 0, 0, 0, 0);
129}
130
131static int inline simc_ioctl(int fd, int request, void *arg)
132{
133 return __simc(SYS_ioctl, fd, request, (int) arg, 0, 0);
134}
135
136static int inline simc_read(int fd, void *buf, size_t count)
137{
138 return __simc(SYS_read, fd, (int) buf, count, 0, 0);
139}
140
141static int inline simc_write(int fd, void *buf, size_t count)
142{
143 return __simc(SYS_write, fd, (int) buf, count, 0, 0);
144}
145
146static int inline simc_poll(int fd)
147{
148 struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
149
150 return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv,0,0);
151}
152
153/* ================================ HELPERS ================================ */
154
155
156static char *split_if_spec(char *str, ...)
157{
158 char **arg, *end;
159 va_list ap;
160
161 va_start(ap, str);
162 while ((arg = va_arg(ap, char**)) != NULL) {
163 if (*str == '\0')
164 return NULL;
165 end = strchr(str, ',');
166 if (end != str)
167 *arg = str;
168 if (end == NULL)
169 return NULL;
170 *end ++ = '\0';
171 str = end;
172 }
173 va_end(ap);
174 return str;
175}
176
177
178#if 0
179/* Adjust SKB. */
180
181struct sk_buff *ether_adjust_skb(struct sk_buff *skb, int extra)
182{
183 if ((skb != NULL) && (skb_tailroom(skb) < extra)) {
184 struct sk_buff *skb2;
185
186 skb2 = skb_copy_expand(skb, 0, extra, GFP_ATOMIC);
187 dev_kfree_skb(skb);
188 skb = skb2;
189 }
190 if (skb != NULL)
191 skb_put(skb, extra);
192
193 return skb;
194}
195#endif
196
197/* Return the IP address as a string for a given device. */
198
199static void dev_ip_addr(void *d, char *buf, char *bin_buf)
200{
201 struct net_device *dev = d;
202 struct in_device *ip = dev->ip_ptr;
203 struct in_ifaddr *in;
204 u32 addr;
205
206 if ((ip == NULL) || ((in = ip->ifa_list) == NULL)) {
207 printk(KERN_WARNING "Device not assigned an IP address!\n");
208 return;
209 }
210
211 addr = in->ifa_address;
212 sprintf(buf, "%d.%d.%d.%d", addr & 0xff, (addr >> 8) & 0xff,
213 (addr >> 16) & 0xff, addr >> 24);
214
215 if (bin_buf) {
216 bin_buf[0] = addr & 0xff;
217 bin_buf[1] = (addr >> 8) & 0xff;
218 bin_buf[2] = (addr >> 16) & 0xff;
219 bin_buf[3] = addr >> 24;
220 }
221}
222
223/* Set Ethernet address of the specified device. */
224
225static void inline set_ether_mac(void *d, unsigned char *addr)
226{
227 struct net_device *dev = d;
228 memcpy(dev->dev_addr, addr, ETH_ALEN);
229}
230
231
232/* ======================= TUNTAP TRANSPORT INTERFACE ====================== */
233
234static int tuntap_open(struct iss_net_private *lp)
235{
236 struct ifreq ifr;
237 char *dev_name = lp->tp.info.tuntap.dev_name;
238 int err = -EINVAL;
239 int fd;
240
241 /* We currently only support a fixed configuration. */
242
243 if (!lp->tp.info.tuntap.fixed_config)
244 return -EINVAL;
245
246 if ((fd = simc_open("/dev/net/tun", 02, 0)) < 0) { /* O_RDWR */
247 printk("Failed to open /dev/net/tun, returned %d "
248 "(errno = %d)\n", fd, errno);
249 return fd;
250 }
251
252 memset(&ifr, 0, sizeof ifr);
253 ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
254 strlcpy(ifr.ifr_name, dev_name, sizeof ifr.ifr_name - 1);
255
256 if ((err = simc_ioctl(fd, TUNSETIFF, (void*) &ifr)) < 0) {
257 printk("Failed to set interface, returned %d "
258 "(errno = %d)\n", err, errno);
259 simc_close(fd);
260 return err;
261 }
262
263 lp->tp.info.tuntap.fd = fd;
264 return err;
265}
266
267static void tuntap_close(struct iss_net_private *lp)
268{
269#if 0
270 if (lp->tp.info.tuntap.fixed_config)
271 iter_addresses(lp->tp.info.tuntap.dev, close_addr, lp->host.dev_name);
272#endif
273 simc_close(lp->tp.info.tuntap.fd);
274 lp->tp.info.tuntap.fd = -1;
275}
276
277static int tuntap_read (struct iss_net_private *lp, struct sk_buff **skb)
278{
279#if 0
280 *skb = ether_adjust_skb(*skb, ETH_HEADER_OTHER);
281 if (*skb == NULL)
282 return -ENOMEM;
283#endif
284
285 return simc_read(lp->tp.info.tuntap.fd,
286 (*skb)->data, (*skb)->dev->mtu + ETH_HEADER_OTHER);
287}
288
289static int tuntap_write (struct iss_net_private *lp, struct sk_buff **skb)
290{
291 return simc_write(lp->tp.info.tuntap.fd, (*skb)->data, (*skb)->len);
292}
293
294unsigned short tuntap_protocol(struct sk_buff *skb)
295{
296 return eth_type_trans(skb, skb->dev);
297}
298
299static int tuntap_poll(struct iss_net_private *lp)
300{
301 return simc_poll(lp->tp.info.tuntap.fd);
302}
303
304/*
305 * Currently only a device name is supported.
306 * ethX=tuntap[,[mac address][,[device name]]]
307 */
308
309static int tuntap_probe(struct iss_net_private *lp, int index, char *init)
310{
311 const int len = strlen(TRANSPORT_TUNTAP_NAME);
312 char *dev_name = NULL, *mac_str = NULL, *rem = NULL;
313
314 /* Transport should be 'tuntap': ethX=tuntap,mac,dev_name */
315
316 if (strncmp(init, TRANSPORT_TUNTAP_NAME, len))
317 return 0;
318
319 if (*(init += strlen(TRANSPORT_TUNTAP_NAME)) == ',') {
320 if ((rem=split_if_spec(init+1, &mac_str, &dev_name)) != NULL) {
321 printk("Extra garbage on specification : '%s'\n", rem);
322 return 0;
323 }
324 } else if (*init != '\0') {
325 printk("Invalid argument: %s. Skipping device!\n", init);
326 return 0;
327 }
328
329 if (dev_name) {
330 strncpy(lp->tp.info.tuntap.dev_name, dev_name,
331 sizeof lp->tp.info.tuntap.dev_name);
332 lp->tp.info.tuntap.fixed_config = 1;
333 } else
334 strcpy(lp->tp.info.tuntap.dev_name, TRANSPORT_TUNTAP_NAME);
335
336
337#if 0
338 if (setup_etheraddr(mac_str, lp->mac))
339 lp->have_mac = 1;
340#endif
341 lp->mtu = TRANSPORT_TUNTAP_MTU;
342
343 //lp->info.tuntap.gate_addr = gate_addr;
344
345 lp->tp.info.tuntap.fd = -1;
346
347 lp->tp.open = tuntap_open;
348 lp->tp.close = tuntap_close;
349 lp->tp.read = tuntap_read;
350 lp->tp.write = tuntap_write;
351 lp->tp.protocol = tuntap_protocol;
352 lp->tp.poll = tuntap_poll;
353
354 printk("TUN/TAP backend - ");
355#if 0
356 if (lp->host.gate_addr != NULL)
357 printk("IP = %s", lp->host.gate_addr);
358#endif
359 printk("\n");
360
361 return 1;
362}
363
364/* ================================ ISS NET ================================ */
365
366static int iss_net_rx(struct net_device *dev)
367{
368 struct iss_net_private *lp = dev->priv;
369 int pkt_len;
370 struct sk_buff *skb;
371
372 /* Check if there is any new data. */
373
374 if (lp->tp.poll(lp) == 0)
375 return 0;
376
377 /* Try to allocate memory, if it fails, try again next round. */
378
379 if ((skb = dev_alloc_skb(dev->mtu + 2 + ETH_HEADER_OTHER)) == NULL) {
380 lp->stats.rx_dropped++;
381 return 0;
382 }
383
384 skb_reserve(skb, 2);
385
386 /* Setup skb */
387
388 skb->dev = dev;
389 skb->mac.raw = skb->data;
390 pkt_len = lp->tp.read(lp, &skb);
391 skb_put(skb, pkt_len);
392
393 if (pkt_len > 0) {
394 skb_trim(skb, pkt_len);
395 skb->protocol = lp->tp.protocol(skb);
396 // netif_rx(skb);
397 netif_rx_ni(skb);
398
399 lp->stats.rx_bytes += skb->len;
400 lp->stats.rx_packets++;
401 return pkt_len;
402 }
403 kfree_skb(skb);
404 return pkt_len;
405}
406
407static int iss_net_poll(void)
408{
409 struct list_head *ele;
410 int err, ret = 0;
411
412 spin_lock(&opened_lock);
413
414 list_for_each(ele, &opened) {
415 struct iss_net_private *lp;
416
417 lp = list_entry(ele, struct iss_net_private, opened_list);
418
419 if (!netif_running(lp->dev))
420 break;
421
422 spin_lock(&lp->lock);
423
424 while ((err = iss_net_rx(lp->dev)) > 0)
425 ret++;
426
427 spin_unlock(&lp->lock);
428
429 if (err < 0) {
430 printk(KERN_ERR "Device '%s' read returned %d, "
431 "shutting it down\n", lp->dev->name, err);
432 dev_close(lp->dev);
433 } else {
434 // FIXME reactivate_fd(lp->fd, ISS_ETH_IRQ);
435 }
436 }
437
438 spin_unlock(&opened_lock);
439 return ret;
440}
441
442
443static void iss_net_timer(unsigned long priv)
444{
445 struct iss_net_private* lp = (struct iss_net_private*) priv;
446
447 spin_lock(&lp->lock);
448
449 iss_net_poll();
450
451 mod_timer(&lp->timer, jiffies + lp->timer_val);
452
453 spin_unlock(&lp->lock);
454}
455
456
457static int iss_net_open(struct net_device *dev)
458{
459 struct iss_net_private *lp = dev->priv;
460 char addr[sizeof "255.255.255.255\0"];
461 int err;
462
463 spin_lock(&lp->lock);
464
465 if ((err = lp->tp.open(lp)) < 0)
466 goto out;
467
468 if (!lp->have_mac) {
469 dev_ip_addr(dev, addr, &lp->mac[2]);
470 set_ether_mac(dev, lp->mac);
471 }
472
473 netif_start_queue(dev);
474
475 /* clear buffer - it can happen that the host side of the interface
476 * is full when we gethere. In this case, new data is never queued,
477 * SIGIOs never arrive, and the net never works.
478 */
479 while ((err = iss_net_rx(dev)) > 0)
480 ;
481
482 spin_lock(&opened_lock);
483 list_add(&lp->opened_list, &opened);
484 spin_unlock(&opened_lock);
485
486 init_timer(&lp->timer);
487 lp->timer_val = ISS_NET_TIMER_VALUE;
488 lp->timer.data = (unsigned long) lp;
489 lp->timer.function = iss_net_timer;
490 mod_timer(&lp->timer, jiffies + lp->timer_val);
491
492out:
493 spin_unlock(&lp->lock);
494 return err;
495}
496
497static int iss_net_close(struct net_device *dev)
498{
499 struct iss_net_private *lp = dev->priv;
500printk("iss_net_close!\n");
501 netif_stop_queue(dev);
502 spin_lock(&lp->lock);
503
504 spin_lock(&opened_lock);
505 list_del(&opened);
506 spin_unlock(&opened_lock);
507
508 del_timer_sync(&lp->timer);
509
510 lp->tp.close(lp);
511
512 spin_unlock(&lp->lock);
513 return 0;
514}
515
516static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
517{
518 struct iss_net_private *lp = dev->priv;
519 unsigned long flags;
520 int len;
521
522 netif_stop_queue(dev);
523 spin_lock_irqsave(&lp->lock, flags);
524
525 len = lp->tp.write(lp, &skb);
526
527 if (len == skb->len) {
528 lp->stats.tx_packets++;
529 lp->stats.tx_bytes += skb->len;
530 dev->trans_start = jiffies;
531 netif_start_queue(dev);
532
533 /* this is normally done in the interrupt when tx finishes */
534 netif_wake_queue(dev);
535
536 } else if (len == 0) {
537 netif_start_queue(dev);
538 lp->stats.tx_dropped++;
539
540 } else {
541 netif_start_queue(dev);
542 printk(KERN_ERR "iss_net_start_xmit: failed(%d)\n", len);
543 }
544
545 spin_unlock_irqrestore(&lp->lock, flags);
546
547 dev_kfree_skb(skb);
548 return 0;
549}
550
551
552static struct net_device_stats *iss_net_get_stats(struct net_device *dev)
553{
554 struct iss_net_private *lp = dev->priv;
555 return &lp->stats;
556}
557
558static void iss_net_set_multicast_list(struct net_device *dev)
559{
560#if 0
561 if (dev->flags & IFF_PROMISC)
562 return;
563 else if (dev->mc_count)
564 dev->flags |= IFF_ALLMULTI;
565 else
566 dev->flags &= ~IFF_ALLMULTI;
567#endif
568}
569
570static void iss_net_tx_timeout(struct net_device *dev)
571{
572#if 0
573 dev->trans_start = jiffies;
574 netif_wake_queue(dev);
575#endif
576}
577
578static int iss_net_set_mac(struct net_device *dev, void *addr)
579{
580#if 0
581 struct iss_net_private *lp = dev->priv;
582 struct sockaddr *hwaddr = addr;
583
584 spin_lock(&lp->lock);
585 memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN);
586 spin_unlock(&lp->lock);
587#endif
588
589 return 0;
590}
591
592static int iss_net_change_mtu(struct net_device *dev, int new_mtu)
593{
594#if 0
595 struct iss_net_private *lp = dev->priv;
596 int err = 0;
597
598 spin_lock(&lp->lock);
599
600 // FIXME not needed new_mtu = transport_set_mtu(new_mtu, &lp->user);
601
602 if (new_mtu < 0)
603 err = new_mtu;
604 else
605 dev->mtu = new_mtu;
606
607 spin_unlock(&lp->lock);
608 return err;
609#endif
610 return -EINVAL;
611}
612
613static int iss_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
614{
615#if 0
616 static const struct ethtool_drvinfo info = {
617 .cmd = ETHTOOL_GDRVINFO,
618 .driver = DRIVER_NAME,
619 .version = "42",
620 };
621 void *useraddr;
622 u32 ethcmd;
623
624 switch (cmd) {
625 case SIOCETHTOOL:
626 useraddr = ifr->ifr_data;
627 if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
628 return -EFAULT;
629
630 switch (ethcmd) {
631 case ETHTOOL_GDRVINFO:
632 if (copy_to_user(useraddr, &info, sizeof(info)))
633 return -EFAULT;
634 return 0;
635 default:
636 return -EOPNOTSUPP;
637 }
638 default:
639 return -EINVAL;
640 }
641#endif
642 return -EINVAL;
643}
644
645void iss_net_user_timer_expire(unsigned long _conn)
646{
647}
648
649
650static struct device_driver iss_net_driver = {
651 .name = DRIVER_NAME,
652 .bus = &platform_bus_type,
653};
654
655static int driver_registered;
656
657static int iss_net_configure(int index, char *init)
658{
659 struct net_device *dev;
660 struct iss_net_private *lp;
661 int err;
662
663 if ((dev = alloc_etherdev(sizeof *lp)) == NULL) {
664 printk(KERN_ERR "eth_configure: failed to allocate device\n");
665 return 1;
666 }
667
668 /* Initialize private element. */
669
670 lp = dev->priv;
671 *lp = ((struct iss_net_private) {
672 .device_list = LIST_HEAD_INIT(lp->device_list),
673 .opened_list = LIST_HEAD_INIT(lp->opened_list),
674 .lock = SPIN_LOCK_UNLOCKED,
675 .dev = dev,
676 .index = index,
677 //.fd = -1,
678 .mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0 },
679 .have_mac = 0,
680 });
681
682 /*
683 * Try all transport protocols.
684 * Note: more protocols can be added by adding '&& !X_init(lp, eth)'.
685 */
686
687 if (!tuntap_probe(lp, index, init)) {
688 printk("Invalid arguments. Skipping device!\n");
689 goto errout;
690 }
691
692 printk(KERN_INFO "Netdevice %d ", index);
693 if (lp->have_mac)
694 printk("(%02x:%02x:%02x:%02x:%02x:%02x) ",
695 lp->mac[0], lp->mac[1],
696 lp->mac[2], lp->mac[3],
697 lp->mac[4], lp->mac[5]);
698 printk(": ");
699
700 /* sysfs register */
701
702 if (!driver_registered) {
703 driver_register(&iss_net_driver);
704 driver_registered = 1;
705 }
706
707 spin_lock(&devices_lock);
708 list_add(&lp->device_list, &devices);
709 spin_unlock(&devices_lock);
710
711 lp->pdev.id = index;
712 lp->pdev.name = DRIVER_NAME;
713 platform_device_register(&lp->pdev);
714 SET_NETDEV_DEV(dev,&lp->pdev.dev);
715
716 /*
717 * If this name ends up conflicting with an existing registered
718 * netdevice, that is OK, register_netdev{,ice}() will notice this
719 * and fail.
720 */
721 snprintf(dev->name, sizeof dev->name, "eth%d", index);
722
723 dev->mtu = lp->mtu;
724 dev->open = iss_net_open;
725 dev->hard_start_xmit = iss_net_start_xmit;
726 dev->stop = iss_net_close;
727 dev->get_stats = iss_net_get_stats;
728 dev->set_multicast_list = iss_net_set_multicast_list;
729 dev->tx_timeout = iss_net_tx_timeout;
730 dev->set_mac_address = iss_net_set_mac;
731 dev->change_mtu = iss_net_change_mtu;
732 dev->do_ioctl = iss_net_ioctl;
733 dev->watchdog_timeo = (HZ >> 1);
734 dev->irq = -1;
735
736 rtnl_lock();
737 err = register_netdevice(dev);
738 rtnl_unlock();
739
740 if (err) {
741 printk("Error registering net device!\n");
742 /* XXX: should we call ->remove() here? */
743 free_netdev(dev);
744 return 1;
745 }
746
747 init_timer(&lp->tl);
748 lp->tl.function = iss_net_user_timer_expire;
749
750#if 0
751 if (lp->have_mac)
752 set_ether_mac(dev, lp->mac);
753#endif
754 return 0;
755
756errout:
757 // FIXME: unregister; free, etc..
758 return -EIO;
759
760}
761
762/* ------------------------------------------------------------------------- */
763
764/* Filled in during early boot */
765
766struct list_head eth_cmd_line = LIST_HEAD_INIT(eth_cmd_line);
767
768struct iss_net_init {
769 struct list_head list;
770 char *init; /* init string */
771 int index;
772};
773
774/*
775 * Parse the command line and look for 'ethX=...' fields, and register all
776 * those fields. They will be later initialized in iss_net_init.
777 */
778
779#define ERR KERN_ERR "iss_net_setup: "
780
781static int iss_net_setup(char *str)
782{
783 struct iss_net_private *device = NULL;
784 struct iss_net_init *new;
785 struct list_head *ele;
786 char *end;
787 int n;
788
789 n = simple_strtoul(str, &end, 0);
790 if (end == str) {
791 printk(ERR "Failed to parse '%s'\n", str);
792 return 1;
793 }
794 if (n < 0) {
795 printk(ERR "Device %d is negative\n", n);
796 return 1;
797 }
798 if (*(str = end) != '=') {
799 printk(ERR "Expected '=' after device number\n");
800 return 1;
801 }
802
803 spin_lock(&devices_lock);
804
805 list_for_each(ele, &devices) {
806 device = list_entry(ele, struct iss_net_private, device_list);
807 if (device->index == n)
808 break;
809 }
810
811 spin_unlock(&devices_lock);
812
813 if (device && device->index == n) {
814 printk(ERR "Device %d already configured\n", n);
815 return 1;
816 }
817
818 if ((new = alloc_bootmem(sizeof new)) == NULL) {
819 printk("Alloc_bootmem failed\n");
820 return 1;
821 }
822
823 INIT_LIST_HEAD(&new->list);
824 new->index = n;
825 new->init = str + 1;
826
827 list_add_tail(&new->list, &eth_cmd_line);
828 return 1;
829}
830
831#undef ERR
832
833__setup("eth", iss_net_setup);
834
835/*
836 * Initialize all ISS Ethernet devices previously registered in iss_net_setup.
837 */
838
839static int iss_net_init(void)
840{
841 struct list_head *ele, *next;
842
843 /* Walk through all Ethernet devices specified in the command line. */
844
845 list_for_each_safe(ele, next, &eth_cmd_line) {
846 struct iss_net_init *eth;
847 eth = list_entry(ele, struct iss_net_init, list);
848 iss_net_configure(eth->index, eth->init);
849 }
850
851 return 1;
852}
853
854module_init(iss_net_init);
855
diff --git a/arch/xtensa/platform-iss/setup.c b/arch/xtensa/platform-iss/setup.c
new file mode 100644
index 000000000000..2e6dcbf0cc04
--- /dev/null
+++ b/arch/xtensa/platform-iss/setup.c
@@ -0,0 +1,112 @@
1/*
2 *
3 * arch/xtensa/platform-iss/setup.c
4 *
5 * Platform specific initialization.
6 *
7 * Authors: Chris Zankel <chris@zankel.net>
8 * Joe Taylor <joe@tensilica.com>
9 *
10 * Copyright 2001 - 2005 Tensilica Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 */
18#include <linux/config.h>
19#include <linux/stddef.h>
20#include <linux/kernel.h>
21#include <linux/init.h>
22#include <linux/errno.h>
23#include <linux/reboot.h>
24#include <linux/pci.h>
25#include <linux/kdev_t.h>
26#include <linux/types.h>
27#include <linux/major.h>
28#include <linux/blkdev.h>
29#include <linux/console.h>
30#include <linux/delay.h>
31#include <linux/stringify.h>
32#include <linux/notifier.h>
33
34#include <asm/platform.h>
35#include <asm/bootparam.h>
36
37
38void __init platform_init(bp_tag_t* bootparam)
39{
40
41}
42
43void platform_halt(void)
44{
45 printk (" ** Called platform_halt(), looping forever! **\n");
46 while (1);
47}
48
49void platform_power_off(void)
50{
51 printk (" ** Called platform_power_off(), looping forever! **\n");
52 while (1);
53}
54void platform_restart(void)
55{
56 /* Flush and reset the mmu, simulate a processor reset, and
57 * jump to the reset vector. */
58
59 __asm__ __volatile__("movi a2, 15\n\t"
60 "wsr a2, " __stringify(ICOUNTLEVEL) "\n\t"
61 "movi a2, 0\n\t"
62 "wsr a2, " __stringify(ICOUNT) "\n\t"
63 "wsr a2, " __stringify(IBREAKENABLE) "\n\t"
64 "wsr a2, " __stringify(LCOUNT) "\n\t"
65 "movi a2, 0x1f\n\t"
66 "wsr a2, " __stringify(PS) "\n\t"
67 "isync\n\t"
68 "jx %0\n\t"
69 :
70 : "a" (XCHAL_RESET_VECTOR_VADDR)
71 : "a2");
72
73 /* control never gets here */
74}
75
76extern void iss_net_poll(void);
77
78const char twirl[]="|/-\\|/-\\";
79
80void platform_heartbeat(void)
81{
82#if 0
83 static int i = 0, j = 0;
84
85 if (--i < 0) {
86 i = 99;
87 printk("\r%c\r", twirl[j++]);
88 if (j == 8)
89 j = 0;
90 }
91#endif
92}
93
94
95
96static int
97iss_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
98{
99 __asm__ __volatile__("movi a2, -1; simcall\n");
100 return NOTIFY_DONE;
101}
102
103static struct notifier_block iss_panic_block = {
104 iss_panic_event,
105 NULL,
106 0
107};
108
109void __init platform_setup(char **p_cmdline)
110{
111 notifier_chain_register(&panic_notifier_list, &iss_panic_block);
112}