aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/feature-removal-schedule.txt10
-rw-r--r--Documentation/s390/Debugging390.txt2
-rw-r--r--Documentation/video-output.txt34
-rw-r--r--MAINTAINERS16
-rw-r--r--Makefile17
-rw-r--r--arch/i386/defconfig3
-rw-r--r--arch/i386/kernel/acpi/boot.c235
-rw-r--r--arch/i386/kernel/acpi/earlyquirk.c4
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longhaul.c15
-rw-r--r--arch/i386/kernel/mpparse.c4
-rw-r--r--arch/i386/kernel/srat.c84
-rw-r--r--arch/i386/mach-es7000/es7000.h9
-rw-r--r--arch/i386/mach-es7000/es7000plat.c53
-rw-r--r--arch/i386/pci/mmconfig.c24
-rw-r--r--arch/ia64/Kconfig9
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c4
-rw-r--r--arch/ia64/kernel/acpi.c200
-rw-r--r--arch/ia64/kernel/crash.c16
-rw-r--r--arch/ia64/kernel/crash_dump.c3
-rw-r--r--arch/ia64/kernel/efi.c2
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/ia64/kernel/iosapic.c5
-rw-r--r--arch/ia64/kernel/machine_kexec.c15
-rw-r--r--arch/ia64/kernel/process.c16
-rw-r--r--arch/ia64/kernel/ptrace.c14
-rw-r--r--arch/ia64/kernel/setup.c31
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S1
-rw-r--r--arch/ia64/mm/contig.c76
-rw-r--r--arch/ia64/mm/discontig.c46
-rw-r--r--arch/ia64/mm/init.c38
-rw-r--r--arch/ia64/sn/kernel/huberror.c16
-rw-r--r--arch/ia64/sn/kernel/io_acpi_init.c314
-rw-r--r--arch/ia64/sn/kernel/io_common.c90
-rw-r--r--arch/ia64/sn/kernel/io_init.c54
-rw-r--r--arch/ia64/sn/kernel/iomv.c5
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c6
-rw-r--r--arch/mips/Kconfig103
-rw-r--r--arch/mips/Kconfig.debug8
-rw-r--r--arch/mips/arc/identify.c2
-rw-r--r--arch/mips/arc/memory.c18
-rw-r--r--arch/mips/au1000/common/irq.c8
-rw-r--r--arch/mips/au1000/common/pci.c18
-rw-r--r--arch/mips/au1000/common/prom.c3
-rw-r--r--arch/mips/au1000/common/setup.c19
-rw-r--r--arch/mips/au1000/pb1100/board_setup.c93
-rw-r--r--arch/mips/au1000/pb1200/irqmap.c32
-rw-r--r--arch/mips/basler/excite/excite_irq.c6
-rw-r--r--arch/mips/cobalt/irq.c2
-rw-r--r--arch/mips/cobalt/setup.c3
-rw-r--r--arch/mips/ddb5xxx/common/prom.c3
-rw-r--r--arch/mips/ddb5xxx/ddb5477/irq.c9
-rw-r--r--arch/mips/ddb5xxx/ddb5477/irq_5477.c2
-rw-r--r--arch/mips/dec/ioasic-irq.c4
-rw-r--r--arch/mips/dec/kn02-irq.c2
-rw-r--r--arch/mips/dec/prom/memory.c17
-rw-r--r--arch/mips/dec/setup.c12
-rw-r--r--arch/mips/emma2rh/common/irq_emma2rh.c2
-rw-r--r--arch/mips/emma2rh/markeins/irq.c2
-rw-r--r--arch/mips/emma2rh/markeins/irq_markeins.c4
-rw-r--r--arch/mips/gt64120/ev64120/irq.c2
-rw-r--r--arch/mips/gt64120/ev64120/setup.c3
-rw-r--r--arch/mips/gt64120/momenco_ocelot/dbg_io.c4
-rw-r--r--arch/mips/gt64120/momenco_ocelot/irq.c4
-rw-r--r--arch/mips/gt64120/momenco_ocelot/prom.c3
-rw-r--r--arch/mips/gt64120/wrppmc/irq.c2
-rw-r--r--arch/mips/gt64120/wrppmc/setup.c3
-rw-r--r--arch/mips/jazz/irq.c2
-rw-r--r--arch/mips/jmr3927/common/prom.c3
-rw-r--r--arch/mips/jmr3927/rbhma3100/irq.c2
-rw-r--r--arch/mips/jmr3927/rbhma3100/setup.c2
-rw-r--r--arch/mips/kernel/asm-offsets.c4
-rw-r--r--arch/mips/kernel/cpu-probe.c2
-rw-r--r--arch/mips/kernel/gdb-stub.c6
-rw-r--r--arch/mips/kernel/head.S25
-rw-r--r--arch/mips/kernel/i8259.c24
-rw-r--r--arch/mips/kernel/irixelf.c331
-rw-r--r--arch/mips/kernel/irq-msc01.c4
-rw-r--r--arch/mips/kernel/irq-mv6434x.c14
-rw-r--r--arch/mips/kernel/irq-rm7000.c13
-rw-r--r--arch/mips/kernel/irq-rm9000.c24
-rw-r--r--arch/mips/kernel/irq_cpu.c21
-rw-r--r--arch/mips/kernel/linux32.c28
-rw-r--r--arch/mips/kernel/mips-mt.c9
-rw-r--r--arch/mips/kernel/proc.c8
-rw-r--r--arch/mips/kernel/process.c6
-rw-r--r--arch/mips/kernel/r4k_fpu.S19
-rw-r--r--arch/mips/kernel/rtlx.c4
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/setup.c47
-rw-r--r--arch/mips/kernel/signal.c6
-rw-r--r--arch/mips/kernel/signal_n32.c4
-rw-r--r--arch/mips/kernel/smp-mt.c9
-rw-r--r--arch/mips/kernel/smtc.c54
-rw-r--r--arch/mips/kernel/sysirix.c4
-rw-r--r--arch/mips/kernel/vpe.c36
-rw-r--r--arch/mips/lasat/interrupt.c2
-rw-r--r--arch/mips/lasat/prom.c3
-rw-r--r--arch/mips/lib-32/Makefile2
-rw-r--r--arch/mips/lib-64/Makefile2
-rw-r--r--arch/mips/lib-64/memset.S142
-rw-r--r--arch/mips/lib/Makefile2
-rw-r--r--arch/mips/lib/memset.S (renamed from arch/mips/lib-32/memset.S)35
-rw-r--r--arch/mips/lib/uncached.c4
-rw-r--r--arch/mips/mips-boards/atlas/atlas_int.c9
-rw-r--r--arch/mips/mips-boards/generic/memory.c18
-rw-r--r--arch/mips/mips-boards/malta/malta_int.c7
-rw-r--r--arch/mips/mips-boards/sead/sead_int.c2
-rw-r--r--arch/mips/mips-boards/sim/sim_int.c6
-rw-r--r--arch/mips/mips-boards/sim/sim_mem.c16
-rw-r--r--arch/mips/mm/init.c50
-rw-r--r--arch/mips/momentum/jaguar_atx/Makefile2
-rw-r--r--arch/mips/momentum/jaguar_atx/irq.c4
-rw-r--r--arch/mips/momentum/jaguar_atx/jaguar_atx_fpga.h6
-rw-r--r--arch/mips/momentum/jaguar_atx/platform.c235
-rw-r--r--arch/mips/momentum/jaguar_atx/prom.c58
-rw-r--r--arch/mips/momentum/ocelot_3/irq.c2
-rw-r--r--arch/mips/momentum/ocelot_3/prom.c3
-rw-r--r--arch/mips/momentum/ocelot_c/cpci-irq.c2
-rw-r--r--arch/mips/momentum/ocelot_c/dbg_io.c4
-rw-r--r--arch/mips/momentum/ocelot_c/irq.c2
-rw-r--r--arch/mips/momentum/ocelot_c/prom.c3
-rw-r--r--arch/mips/momentum/ocelot_c/uart-irq.c2
-rw-r--r--arch/mips/momentum/ocelot_g/dbg_io.c4
-rw-r--r--arch/mips/momentum/ocelot_g/irq.c4
-rw-r--r--arch/mips/momentum/ocelot_g/prom.c3
-rw-r--r--arch/mips/oprofile/Kconfig2
-rw-r--r--arch/mips/pci/fixup-vr4133.c16
-rw-r--r--arch/mips/philips/pnx8550/common/int.c2
-rw-r--r--arch/mips/philips/pnx8550/common/prom.c3
-rw-r--r--arch/mips/pmc-sierra/yosemite/dbg_io.c2
-rw-r--r--arch/mips/pmc-sierra/yosemite/irq.c6
-rw-r--r--arch/mips/pmc-sierra/yosemite/prom.c3
-rw-r--r--arch/mips/pmc-sierra/yosemite/setup.c2
-rw-r--r--arch/mips/qemu/q-mem.c3
-rw-r--r--arch/mips/sgi-ip22/ip22-eisa.c4
-rw-r--r--arch/mips/sgi-ip22/ip22-int.c13
-rw-r--r--arch/mips/sgi-ip22/ip22-mc.c3
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c3
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c2
-rw-r--r--arch/mips/sgi-ip32/ip32-irq.c10
-rw-r--r--arch/mips/sgi-ip32/ip32-memory.c3
-rw-r--r--arch/mips/sibyte/bcm1480/irq.c2
-rw-r--r--arch/mips/sibyte/cfe/setup.c3
-rw-r--r--arch/mips/sibyte/sb1250/irq.c2
-rw-r--r--arch/mips/sibyte/sb1250/prom.c3
-rw-r--r--arch/mips/sni/irq.c2
-rw-r--r--arch/mips/sni/sniprom.c3
-rw-r--r--arch/mips/tx4927/common/tx4927_irq.c4
-rw-r--r--arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c12
-rw-r--r--arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_prom.c3
-rw-r--r--arch/mips/tx4938/common/irq.c4
-rw-r--r--arch/mips/tx4938/toshiba_rbtx4938/irq.c2
-rw-r--r--arch/mips/tx4938/toshiba_rbtx4938/prom.c3
-rw-r--r--arch/mips/vr41xx/common/icu.c31
-rw-r--r--arch/mips/vr41xx/common/init.c3
-rw-r--r--arch/mips/vr41xx/common/irq.c18
-rw-r--r--arch/mips/vr41xx/nec-cmbvr4133/irq.c53
-rw-r--r--arch/s390/Kconfig29
-rw-r--r--arch/s390/appldata/appldata_base.c2
-rw-r--r--arch/s390/appldata/appldata_mem.c2
-rw-r--r--arch/s390/appldata/appldata_net_sum.c2
-rw-r--r--arch/s390/crypto/Kconfig60
-rw-r--r--arch/s390/crypto/Makefile3
-rw-r--r--arch/s390/crypto/aes_s390.c47
-rw-r--r--arch/s390/crypto/crypt_s390.h281
-rw-r--r--arch/s390/crypto/crypt_s390_query.c129
-rw-r--r--arch/s390/crypto/des_check_key.c6
-rw-r--r--arch/s390/crypto/des_s390.c8
-rw-r--r--arch/s390/crypto/prng.c213
-rw-r--r--arch/s390/crypto/sha1_s390.c83
-rw-r--r--arch/s390/crypto/sha256_s390.c11
-rw-r--r--arch/s390/defconfig12
-rw-r--r--arch/s390/hypfs/Makefile2
-rw-r--r--arch/s390/hypfs/hypfs.h9
-rw-r--r--arch/s390/hypfs/hypfs_diag.h16
-rw-r--r--arch/s390/hypfs/hypfs_vm.c231
-rw-r--r--arch/s390/hypfs/inode.c31
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/base.S150
-rw-r--r--arch/s390/kernel/binfmt_elf32.c2
-rw-r--r--arch/s390/kernel/compat_exec_domain.c5
-rw-r--r--arch/s390/kernel/compat_linux.c24
-rw-r--r--arch/s390/kernel/compat_linux.h31
-rw-r--r--arch/s390/kernel/compat_signal.c8
-rw-r--r--arch/s390/kernel/cpcmd.c14
-rw-r--r--arch/s390/kernel/crash.c1
-rw-r--r--arch/s390/kernel/debug.c18
-rw-r--r--arch/s390/kernel/early.c306
-rw-r--r--arch/s390/kernel/ebcdic.c1
-rw-r--r--arch/s390/kernel/head31.S194
-rw-r--r--arch/s390/kernel/head64.S193
-rw-r--r--arch/s390/kernel/ipl.c106
-rw-r--r--arch/s390/kernel/irq.c15
-rw-r--r--arch/s390/kernel/kprobes.c32
-rw-r--r--arch/s390/kernel/machine_kexec.c1
-rw-r--r--arch/s390/kernel/module.c5
-rw-r--r--arch/s390/kernel/process.c4
-rw-r--r--arch/s390/kernel/profile.c20
-rw-r--r--arch/s390/kernel/ptrace.c46
-rw-r--r--arch/s390/kernel/reset.S90
-rw-r--r--arch/s390/kernel/s390_ext.c8
-rw-r--r--arch/s390/kernel/setup.c155
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/smp.c32
-rw-r--r--arch/s390/kernel/stacktrace.c10
-rw-r--r--arch/s390/kernel/time.c1185
-rw-r--r--arch/s390/kernel/traps.c24
-rw-r--r--arch/s390/kernel/vmlinux.lds.S13
-rw-r--r--arch/s390/kernel/vtime.c10
-rw-r--r--arch/s390/lib/Makefile2
-rw-r--r--arch/s390/lib/delay.c48
-rw-r--r--arch/s390/lib/qrnnd.S77
-rw-r--r--arch/s390/lib/uaccess.h23
-rw-r--r--arch/s390/lib/uaccess_mvcos.c78
-rw-r--r--arch/s390/lib/uaccess_pt.c329
-rw-r--r--arch/s390/lib/uaccess_std.c23
-rw-r--r--arch/s390/math-emu/Makefile2
-rw-r--r--arch/s390/math-emu/math.c2
-rw-r--r--arch/s390/math-emu/qrnnd.S77
-rw-r--r--arch/s390/mm/cmm.c4
-rw-r--r--arch/s390/mm/extmem.c66
-rw-r--r--arch/s390/mm/fault.c93
-rw-r--r--arch/s390/mm/init.c20
-rw-r--r--arch/s390/mm/vmem.c14
-rw-r--r--arch/x86_64/kernel/early-quirks.c4
-rw-r--r--arch/x86_64/kernel/genapic.c4
-rw-r--r--arch/x86_64/kernel/mpparse.c2
-rw-r--r--arch/x86_64/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86_64/kernel/time.c18
-rw-r--r--arch/x86_64/mm/srat.c48
-rw-r--r--arch/x86_64/pci/mmconfig.c29
-rw-r--r--crypto/Kconfig49
-rw-r--r--drivers/acpi/Kconfig37
-rw-r--r--drivers/acpi/Makefile5
-rw-r--r--drivers/acpi/asus_acpi.c9
-rw-r--r--drivers/acpi/battery.c4
-rw-r--r--drivers/acpi/bay.c490
-rw-r--r--drivers/acpi/blacklist.c29
-rw-r--r--drivers/acpi/bus.c44
-rw-r--r--drivers/acpi/button.c2
-rw-r--r--drivers/acpi/container.c6
-rw-r--r--drivers/acpi/debug.c62
-rw-r--r--drivers/acpi/dispatcher/dsfield.c32
-rw-r--r--drivers/acpi/dispatcher/dsinit.c25
-rw-r--r--drivers/acpi/dispatcher/dsmethod.c55
-rw-r--r--drivers/acpi/dispatcher/dsmthdat.c2
-rw-r--r--drivers/acpi/dispatcher/dsobject.c78
-rw-r--r--drivers/acpi/dispatcher/dsopcode.c6
-rw-r--r--drivers/acpi/dispatcher/dsutils.c2
-rw-r--r--drivers/acpi/dispatcher/dswexec.c12
-rw-r--r--drivers/acpi/dispatcher/dswload.c19
-rw-r--r--drivers/acpi/dispatcher/dswscope.c2
-rw-r--r--drivers/acpi/dispatcher/dswstate.c2
-rw-r--r--drivers/acpi/dock.c16
-rw-r--r--drivers/acpi/ec.c13
-rw-r--r--drivers/acpi/events/evevent.c17
-rw-r--r--drivers/acpi/events/evgpe.c91
-rw-r--r--drivers/acpi/events/evgpeblk.c64
-rw-r--r--drivers/acpi/events/evmisc.c201
-rw-r--r--drivers/acpi/events/evregion.c17
-rw-r--r--drivers/acpi/events/evrgnini.c168
-rw-r--r--drivers/acpi/events/evsci.c14
-rw-r--r--drivers/acpi/events/evxface.c8
-rw-r--r--drivers/acpi/events/evxfevnt.c27
-rw-r--r--drivers/acpi/events/evxfregn.c2
-rw-r--r--drivers/acpi/executer/exconfig.c235
-rw-r--r--drivers/acpi/executer/exconvrt.c2
-rw-r--r--drivers/acpi/executer/excreate.c21
-rw-r--r--drivers/acpi/executer/exdump.c29
-rw-r--r--drivers/acpi/executer/exfield.c2
-rw-r--r--drivers/acpi/executer/exfldio.c7
-rw-r--r--drivers/acpi/executer/exmisc.c2
-rw-r--r--drivers/acpi/executer/exmutex.c86
-rw-r--r--drivers/acpi/executer/exnames.c2
-rw-r--r--drivers/acpi/executer/exoparg1.c4
-rw-r--r--drivers/acpi/executer/exoparg2.c2
-rw-r--r--drivers/acpi/executer/exoparg3.c2
-rw-r--r--drivers/acpi/executer/exoparg6.c2
-rw-r--r--drivers/acpi/executer/exprep.c2
-rw-r--r--drivers/acpi/executer/exregion.c16
-rw-r--r--drivers/acpi/executer/exresnte.c2
-rw-r--r--drivers/acpi/executer/exresolv.c10
-rw-r--r--drivers/acpi/executer/exresop.c12
-rw-r--r--drivers/acpi/executer/exstore.c2
-rw-r--r--drivers/acpi/executer/exstoren.c2
-rw-r--r--drivers/acpi/executer/exstorob.c2
-rw-r--r--drivers/acpi/executer/exsystem.c110
-rw-r--r--drivers/acpi/executer/exutils.c106
-rw-r--r--drivers/acpi/fan.c8
-rw-r--r--drivers/acpi/glue.c123
-rw-r--r--drivers/acpi/hardware/hwacpi.c56
-rw-r--r--drivers/acpi/hardware/hwgpe.c15
-rw-r--r--drivers/acpi/hardware/hwregs.c98
-rw-r--r--drivers/acpi/hardware/hwsleep.c81
-rw-r--r--drivers/acpi/hardware/hwtimer.c9
-rw-r--r--drivers/acpi/motherboard.c191
-rw-r--r--drivers/acpi/namespace/nsaccess.c36
-rw-r--r--drivers/acpi/namespace/nsalloc.c14
-rw-r--r--drivers/acpi/namespace/nsdump.c13
-rw-r--r--drivers/acpi/namespace/nsdumpdv.c2
-rw-r--r--drivers/acpi/namespace/nseval.c13
-rw-r--r--drivers/acpi/namespace/nsinit.c9
-rw-r--r--drivers/acpi/namespace/nsload.c160
-rw-r--r--drivers/acpi/namespace/nsnames.c2
-rw-r--r--drivers/acpi/namespace/nsobject.c2
-rw-r--r--drivers/acpi/namespace/nsparse.c52
-rw-r--r--drivers/acpi/namespace/nssearch.c9
-rw-r--r--drivers/acpi/namespace/nsutils.c9
-rw-r--r--drivers/acpi/namespace/nswalk.c65
-rw-r--r--drivers/acpi/namespace/nsxfeval.c13
-rw-r--r--drivers/acpi/namespace/nsxfname.c47
-rw-r--r--drivers/acpi/namespace/nsxfobj.c2
-rw-r--r--drivers/acpi/numa.c77
-rw-r--r--drivers/acpi/osl.c97
-rw-r--r--drivers/acpi/parser/psargs.c2
-rw-r--r--drivers/acpi/parser/psloop.c1408
-rw-r--r--drivers/acpi/parser/psopcode.c2
-rw-r--r--drivers/acpi/parser/psparse.c7
-rw-r--r--drivers/acpi/parser/psscope.c2
-rw-r--r--drivers/acpi/parser/pstree.c2
-rw-r--r--drivers/acpi/parser/psutils.c2
-rw-r--r--drivers/acpi/parser/pswalk.c2
-rw-r--r--drivers/acpi/parser/psxface.c116
-rw-r--r--drivers/acpi/pci_link.c4
-rw-r--r--drivers/acpi/pci_root.c38
-rw-r--r--drivers/acpi/processor_core.c189
-rw-r--r--drivers/acpi/processor_idle.c52
-rw-r--r--drivers/acpi/processor_perflib.c27
-rw-r--r--drivers/acpi/processor_throttling.c4
-rw-r--r--drivers/acpi/resources/rsaddr.c2
-rw-r--r--drivers/acpi/resources/rscalc.c2
-rw-r--r--drivers/acpi/resources/rscreate.c2
-rw-r--r--drivers/acpi/resources/rsdump.c2
-rw-r--r--drivers/acpi/resources/rsinfo.c2
-rw-r--r--drivers/acpi/resources/rsio.c2
-rw-r--r--drivers/acpi/resources/rsirq.c2
-rw-r--r--drivers/acpi/resources/rslist.c2
-rw-r--r--drivers/acpi/resources/rsmemory.c2
-rw-r--r--drivers/acpi/resources/rsmisc.c2
-rw-r--r--drivers/acpi/resources/rsutils.c2
-rw-r--r--drivers/acpi/resources/rsxface.c2
-rw-r--r--drivers/acpi/scan.c1265
-rw-r--r--drivers/acpi/sleep/proc.c36
-rw-r--r--drivers/acpi/system.c39
-rw-r--r--drivers/acpi/tables.c508
-rw-r--r--drivers/acpi/tables/Makefile3
-rw-r--r--drivers/acpi/tables/tbconvrt.c622
-rw-r--r--drivers/acpi/tables/tbfadt.c434
-rw-r--r--drivers/acpi/tables/tbfind.c126
-rw-r--r--drivers/acpi/tables/tbget.c471
-rw-r--r--drivers/acpi/tables/tbgetall.c311
-rw-r--r--drivers/acpi/tables/tbinstal.c664
-rw-r--r--drivers/acpi/tables/tbrsdt.c307
-rw-r--r--drivers/acpi/tables/tbutils.c513
-rw-r--r--drivers/acpi/tables/tbxface.c671
-rw-r--r--drivers/acpi/tables/tbxfroot.c552
-rw-r--r--drivers/acpi/thermal.c4
-rw-r--r--drivers/acpi/utilities/utalloc.c11
-rw-r--r--drivers/acpi/utilities/utcache.c10
-rw-r--r--drivers/acpi/utilities/utcopy.c11
-rw-r--r--drivers/acpi/utilities/utdebug.c7
-rw-r--r--drivers/acpi/utilities/utdelete.c16
-rw-r--r--drivers/acpi/utilities/uteval.c2
-rw-r--r--drivers/acpi/utilities/utglobal.c199
-rw-r--r--drivers/acpi/utilities/utinit.c114
-rw-r--r--drivers/acpi/utilities/utmath.c2
-rw-r--r--drivers/acpi/utilities/utmisc.c102
-rw-r--r--drivers/acpi/utilities/utmutex.c2
-rw-r--r--drivers/acpi/utilities/utobject.c2
-rw-r--r--drivers/acpi/utilities/utresrc.c2
-rw-r--r--drivers/acpi/utilities/utstate.c2
-rw-r--r--drivers/acpi/utilities/utxface.c29
-rw-r--r--drivers/acpi/video.c166
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c18
-rw-r--r--drivers/char/tpm/tpm_bios.c8
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/firmware/pcdp.c2
-rw-r--r--drivers/hid/Kconfig14
-rw-r--r--drivers/hid/Makefile11
-rw-r--r--drivers/hid/hid-core.c8
-rw-r--r--drivers/hid/hid-debug.c764
-rw-r--r--drivers/hid/hid-input.c35
-rw-r--r--drivers/infiniband/core/addr.c3
-rw-r--r--drivers/infiniband/core/mad.c11
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h29
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c65
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h8
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c6
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c78
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c395
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_uc.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c7
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h1
-rw-r--r--drivers/misc/Kconfig19
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/asus-laptop.c1165
-rw-r--r--drivers/misc/tifm_7xx1.c402
-rw-r--r--drivers/misc/tifm_core.c65
-rw-r--r--drivers/mmc/at91_mci.c3
-rw-r--r--drivers/mmc/au1xmmc.c13
-rw-r--r--drivers/mmc/imxmmc.c4
-rw-r--r--drivers/mmc/mmc.c182
-rw-r--r--drivers/mmc/mmc_block.c15
-rw-r--r--drivers/mmc/mmc_queue.c2
-rw-r--r--drivers/mmc/mmc_sysfs.c2
-rw-r--r--drivers/mmc/mmci.c15
-rw-r--r--drivers/mmc/omap.c6
-rw-r--r--drivers/mmc/pxamci.c10
-rw-r--r--drivers/mmc/sdhci.c91
-rw-r--r--drivers/mmc/sdhci.h2
-rw-r--r--drivers/mmc/tifm_sd.c487
-rw-r--r--drivers/mmc/wbsd.c102
-rw-r--r--drivers/mmc/wbsd.h1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c10
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c155
-rw-r--r--drivers/pnp/pnpacpi/Kconfig4
-rw-r--r--drivers/pnp/system.c52
-rw-r--r--drivers/s390/Kconfig8
-rw-r--r--drivers/s390/Makefile2
-rw-r--r--drivers/s390/block/dasd.c33
-rw-r--r--drivers/s390/block/dasd_3990_erp.c5
-rw-r--r--drivers/s390/block/dasd_devmap.c6
-rw-r--r--drivers/s390/block/dasd_diag.c8
-rw-r--r--drivers/s390/block/dasd_eckd.c95
-rw-r--r--drivers/s390/block/dasd_eer.c24
-rw-r--r--drivers/s390/block/dasd_erp.c80
-rw-r--r--drivers/s390/block/dasd_fba.c4
-rw-r--r--drivers/s390/block/dasd_genhd.c2
-rw-r--r--drivers/s390/block/dasd_int.h1
-rw-r--r--drivers/s390/block/dasd_proc.c8
-rw-r--r--drivers/s390/block/dcssblk.c6
-rw-r--r--drivers/s390/char/Makefile4
-rw-r--r--drivers/s390/char/con3215.c2
-rw-r--r--drivers/s390/char/con3270.c3
-rw-r--r--drivers/s390/char/defkeymap.c2
-rw-r--r--drivers/s390/char/fs3270.c4
-rw-r--r--drivers/s390/char/keyboard.c2
-rw-r--r--drivers/s390/char/monwriter.c4
-rw-r--r--drivers/s390/char/raw3270.c4
-rw-r--r--drivers/s390/char/sclp.c93
-rw-r--r--drivers/s390/char/sclp.h18
-rw-r--r--drivers/s390/char/sclp_con.c2
-rw-r--r--drivers/s390/char/sclp_cpi.c2
-rw-r--r--drivers/s390/char/sclp_info.c57
-rw-r--r--drivers/s390/char/sclp_rw.c2
-rw-r--r--drivers/s390/char/sclp_tty.c2
-rw-r--r--drivers/s390/char/sclp_vt220.c4
-rw-r--r--drivers/s390/char/tape.h22
-rw-r--r--drivers/s390/char/tape_3590.c479
-rw-r--r--drivers/s390/char/tape_3590.h53
-rw-r--r--drivers/s390/char/tape_block.c4
-rw-r--r--drivers/s390/char/tape_char.c27
-rw-r--r--drivers/s390/char/tape_core.c69
-rw-r--r--drivers/s390/char/tty3270.c13
-rw-r--r--drivers/s390/char/vmlogrdr.c5
-rw-r--r--drivers/s390/cio/blacklist.c10
-rw-r--r--drivers/s390/cio/ccwgroup.c6
-rw-r--r--drivers/s390/cio/chsc.c270
-rw-r--r--drivers/s390/cio/chsc.h11
-rw-r--r--drivers/s390/cio/cio.c37
-rw-r--r--drivers/s390/cio/cmf.c4
-rw-r--r--drivers/s390/cio/css.c13
-rw-r--r--drivers/s390/cio/css.h2
-rw-r--r--drivers/s390/cio/device.c12
-rw-r--r--drivers/s390/cio/device.h2
-rw-r--r--drivers/s390/cio/device_fsm.c8
-rw-r--r--drivers/s390/cio/device_ops.c2
-rw-r--r--drivers/s390/cio/device_status.c8
-rw-r--r--drivers/s390/cio/qdio.c77
-rw-r--r--drivers/s390/crypto/ap_bus.c8
-rw-r--r--drivers/s390/crypto/zcrypt_api.c20
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c8
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c3
-rw-r--r--drivers/s390/net/claw.c16
-rw-r--r--drivers/s390/net/ctcmain.c8
-rw-r--r--drivers/s390/net/cu3088.c2
-rw-r--r--drivers/s390/net/lcs.c6
-rw-r--r--drivers/s390/net/netiucv.c4
-rw-r--r--drivers/s390/net/qeth_eddp.c28
-rw-r--r--drivers/s390/net/qeth_main.c92
-rw-r--r--drivers/s390/net/qeth_sys.c30
-rw-r--r--drivers/s390/s390mach.c37
-rw-r--r--drivers/s390/s390mach.h3
-rw-r--r--drivers/s390/scsi/zfcp_aux.c25
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c44
-rw-r--r--drivers/s390/scsi/zfcp_erp.c7
-rw-r--r--drivers/s390/scsi/zfcp_ext.h4
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c38
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c18
-rw-r--r--drivers/s390/sysinfo.c63
-rw-r--r--drivers/usb/input/Kconfig8
-rw-r--r--drivers/usb/input/Makefile3
-rw-r--r--drivers/usb/input/hid-core.c81
-rw-r--r--drivers/usb/input/hid-ff.c3
-rw-r--r--drivers/usb/input/hid-plff.c129
-rw-r--r--drivers/video/output.c129
-rw-r--r--fs/cifs/CHANGES2
-rw-r--r--fs/cifs/cifsfs.c10
-rw-r--r--fs/cifs/file.c12
-rw-r--r--fs/cifs/readdir.c6
-rw-r--r--fs/cifs/smbdes.c10
-rw-r--r--fs/dlm/Kconfig18
-rw-r--r--fs/dlm/config.c154
-rw-r--r--fs/dlm/config.h17
-rw-r--r--fs/dlm/dlm_internal.h20
-rw-r--r--fs/dlm/lock.c87
-rw-r--r--fs/dlm/lockspace.c10
-rw-r--r--fs/dlm/lowcomms-sctp.c151
-rw-r--r--fs/dlm/lowcomms-tcp.c361
-rw-r--r--fs/dlm/midcomms.c4
-rw-r--r--fs/dlm/rcom.c85
-rw-r--r--fs/dlm/recover.c8
-rw-r--r--fs/dlm/recoverd.c22
-rw-r--r--fs/dlm/user.c9
-rw-r--r--fs/dlm/util.c4
-rw-r--r--fs/gfs2/Kconfig47
-rw-r--r--fs/gfs2/bmap.c10
-rw-r--r--fs/gfs2/dir.c25
-rw-r--r--fs/gfs2/dir.h21
-rw-r--r--fs/gfs2/eattr.c8
-rw-r--r--fs/gfs2/glock.c316
-rw-r--r--fs/gfs2/glock.h11
-rw-r--r--fs/gfs2/glops.c136
-rw-r--r--fs/gfs2/incore.h18
-rw-r--r--fs/gfs2/inode.c61
-rw-r--r--fs/gfs2/lm.c8
-rw-r--r--fs/gfs2/locking/dlm/lock_dlm.h2
-rw-r--r--fs/gfs2/locking/dlm/main.c6
-rw-r--r--fs/gfs2/locking/dlm/mount.c6
-rw-r--r--fs/gfs2/locking/dlm/sysfs.c13
-rw-r--r--fs/gfs2/lops.c14
-rw-r--r--fs/gfs2/ops_address.c134
-rw-r--r--fs/gfs2/ops_dentry.c16
-rw-r--r--fs/gfs2/ops_export.c15
-rw-r--r--fs/gfs2/ops_file.c52
-rw-r--r--fs/gfs2/ops_inode.c55
-rw-r--r--fs/gfs2/ops_super.c11
-rw-r--r--fs/gfs2/ops_vm.c24
-rw-r--r--fs/gfs2/super.c16
-rw-r--r--fs/gfs2/sys.c10
-rw-r--r--fs/jfs/inode.c6
-rw-r--r--fs/jfs/jfs_debug.h5
-rw-r--r--fs/jfs/jfs_dmap.c16
-rw-r--r--fs/jfs/jfs_imap.c16
-rw-r--r--fs/jfs/jfs_incore.h29
-rw-r--r--fs/jfs/jfs_lock.h2
-rw-r--r--fs/jfs/jfs_metapage.c2
-rw-r--r--fs/jfs/jfs_txnmgr.c2
-rw-r--r--fs/jfs/jfs_xtree.c15
-rw-r--r--fs/jfs/namei.c48
-rw-r--r--fs/ocfs2/journal.h4
-rw-r--r--include/acpi/acconfig.h13
-rw-r--r--include/acpi/acdebug.h8
-rw-r--r--include/acpi/acdisasm.h22
-rw-r--r--include/acpi/acdispat.h4
-rw-r--r--include/acpi/acevents.h2
-rw-r--r--include/acpi/acexcep.h10
-rw-r--r--include/acpi/acglobal.h117
-rw-r--r--include/acpi/achware.h6
-rw-r--r--include/acpi/acinterp.h14
-rw-r--r--include/acpi/aclocal.h77
-rw-r--r--include/acpi/acmacros.h71
-rw-r--r--include/acpi/acnames.h2
-rw-r--r--include/acpi/acnamesp.h21
-rw-r--r--include/acpi/acobject.h19
-rw-r--r--include/acpi/acopcode.h4
-rw-r--r--include/acpi/acoutput.h2
-rw-r--r--include/acpi/acparser.h2
-rw-r--r--include/acpi/acpi.h2
-rw-r--r--include/acpi/acpi_bus.h23
-rw-r--r--include/acpi/acpi_drivers.h15
-rw-r--r--include/acpi/acpiosxf.h8
-rw-r--r--include/acpi/acpixf.h34
-rw-r--r--include/acpi/acresrc.h2
-rw-r--r--include/acpi/acstruct.h5
-rw-r--r--include/acpi/actables.h106
-rw-r--r--include/acpi/actbl.h333
-rw-r--r--include/acpi/actbl1.h568
-rw-r--r--include/acpi/actbl2.h49
-rw-r--r--include/acpi/actbl71.h134
-rw-r--r--include/acpi/actypes.h106
-rw-r--r--include/acpi/acutils.h8
-rw-r--r--include/acpi/amlcode.h4
-rw-r--r--include/acpi/amlresrc.h2
-rw-r--r--include/acpi/platform/acenv.h2
-rw-r--r--include/acpi/platform/acgcc.h2
-rw-r--r--include/acpi/platform/aclinux.h2
-rw-r--r--include/asm-i386/acpi.h24
-rw-r--r--include/asm-i386/mach-es7000/mach_mpparse.h17
-rw-r--r--include/asm-ia64/acpi.h10
-rw-r--r--include/asm-ia64/dma.h2
-rw-r--r--include/asm-ia64/esi.h1
-rw-r--r--include/asm-ia64/meminit.h3
-rw-r--r--include/asm-ia64/pgalloc.h3
-rw-r--r--include/asm-ia64/sn/acpi.h3
-rw-r--r--include/asm-ia64/sn/pcibr_provider.h2
-rw-r--r--include/asm-ia64/sn/pcidev.h8
-rw-r--r--include/asm-ia64/swiotlb.h9
-rw-r--r--include/asm-ia64/thread_info.h4
-rw-r--r--include/asm-ia64/unistd.h4
-rw-r--r--include/asm-mips/bootinfo.h4
-rw-r--r--include/asm-mips/ddb5xxx/ddb5477.h41
-rw-r--r--include/asm-mips/dec/interrupts.h3
-rw-r--r--include/asm-mips/dma.h1
-rw-r--r--include/asm-mips/emma2rh/emma2rh.h5
-rw-r--r--include/asm-mips/emma2rh/markeins.h1
-rw-r--r--include/asm-mips/i8259.h3
-rw-r--r--include/asm-mips/io.h4
-rw-r--r--include/asm-mips/irq.h2
-rw-r--r--include/asm-mips/irq_cpu.h6
-rw-r--r--include/asm-mips/mach-au1x00/au1000.h1
-rw-r--r--include/asm-mips/mach-cobalt/cobalt.h4
-rw-r--r--include/asm-mips/mach-emma2rh/irq.h2
-rw-r--r--include/asm-mips/mach-generic/irq.h32
-rw-r--r--include/asm-mips/mach-mips/irq.h2
-rw-r--r--include/asm-mips/mach-vr41xx/irq.h11
-rw-r--r--include/asm-mips/mips-boards/atlasint.h4
-rw-r--r--include/asm-mips/mips-boards/maltaint.h4
-rw-r--r--include/asm-mips/mips-boards/prom.h1
-rw-r--r--include/asm-mips/mips-boards/seadint.h4
-rw-r--r--include/asm-mips/mips-boards/simint.h3
-rw-r--r--include/asm-mips/mipsmtregs.h2
-rw-r--r--include/asm-mips/page.h25
-rw-r--r--include/asm-mips/rtlx.h3
-rw-r--r--include/asm-mips/sections.h2
-rw-r--r--include/asm-mips/sgi/ip22.h13
-rw-r--r--include/asm-mips/smtc_ipi.h3
-rw-r--r--include/asm-mips/uaccess.h3
-rw-r--r--include/asm-mips/vr41xx/cmbvr4133.h5
-rw-r--r--include/asm-s390/compat.h28
-rw-r--r--include/asm-s390/etr.h219
-rw-r--r--include/asm-s390/hardirq.h2
-rw-r--r--include/asm-s390/io.h4
-rw-r--r--include/asm-s390/kdebug.h3
-rw-r--r--include/asm-s390/lowcore.h6
-rw-r--r--include/asm-s390/mmu_context.h50
-rw-r--r--include/asm-s390/pgalloc.h85
-rw-r--r--include/asm-s390/pgtable.h147
-rw-r--r--include/asm-s390/processor.h27
-rw-r--r--include/asm-s390/ptrace.h11
-rw-r--r--include/asm-s390/reset.h3
-rw-r--r--include/asm-s390/sclp.h39
-rw-r--r--include/asm-s390/sections.h2
-rw-r--r--include/asm-s390/setup.h23
-rw-r--r--include/asm-s390/sfp-util.h (renamed from arch/s390/math-emu/sfp-util.h)6
-rw-r--r--include/asm-s390/smp.h6
-rw-r--r--include/asm-s390/system.h4
-rw-r--r--include/asm-s390/tape390.h72
-rw-r--r--include/asm-s390/timer.h3
-rw-r--r--include/asm-s390/timex.h50
-rw-r--r--include/asm-s390/tlbflush.h9
-rw-r--r--include/asm-s390/uaccess.h2
-rw-r--r--include/asm-x86_64/acpi.h24
-rw-r--r--include/asm-x86_64/swiotlb.h8
-rw-r--r--include/linux/acpi.h339
-rw-r--r--include/linux/hid-debug.h749
-rw-r--r--include/linux/hid.h17
-rw-r--r--include/linux/mmc/card.h3
-rw-r--r--include/linux/mmc/host.h10
-rw-r--r--include/linux/mmc/mmc.h1
-rw-r--r--include/linux/mmc/protocol.h13
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/tifm.h35
-rw-r--r--include/linux/video_output.h42
-rw-r--r--include/rdma/ib_user_mad.h2
-rw-r--r--include/rdma/ib_verbs.h3
-rw-r--r--lib/swiotlb.c290
-rw-r--r--scripts/Kbuild.include104
-rw-r--r--scripts/gen_initramfs_list.sh43
-rwxr-xr-xscripts/makelst34
-rw-r--r--security/keys/key.c33
683 files changed, 18978 insertions, 15433 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 171daec4f64c..2dc5e5da8f88 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -274,6 +274,7 @@ Who: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
274 274
275--------------------------- 275---------------------------
276 276
277<<<<<<< test:Documentation/feature-removal-schedule.txt
277What: ACPI hotkey driver (CONFIG_ACPI_HOTKEY) 278What: ACPI hotkey driver (CONFIG_ACPI_HOTKEY)
278When: 2.6.21 279When: 2.6.21
279Why: hotkey.c was an attempt to consolidate multiple drivers that use 280Why: hotkey.c was an attempt to consolidate multiple drivers that use
@@ -306,11 +307,18 @@ Why: The ACPI namespace is effectively the symbol list for
306 the BIOS can be extracted and disassembled with acpidump 307 the BIOS can be extracted and disassembled with acpidump
307 and iasl as documented in the pmtools package here: 308 and iasl as documented in the pmtools package here:
308 http://ftp.kernel.org/pub/linux/kernel/people/lenb/acpi/utils 309 http://ftp.kernel.org/pub/linux/kernel/people/lenb/acpi/utils
309
310Who: Len Brown <len.brown@intel.com> 310Who: Len Brown <len.brown@intel.com>
311 311
312--------------------------- 312---------------------------
313 313
314What: ACPI procfs interface
315When: July 2007
316Why: After ACPI sysfs conversion, ACPI attributes will be duplicated
317 in sysfs and the ACPI procfs interface should be removed.
318Who: Zhang Rui <rui.zhang@intel.com>
319
320---------------------------
321
314What: /proc/acpi/button 322What: /proc/acpi/button
315When: August 2007 323When: August 2007
316Why: /proc/acpi/button has been replaced by events to the input layer 324Why: /proc/acpi/button has been replaced by events to the input layer
diff --git a/Documentation/s390/Debugging390.txt b/Documentation/s390/Debugging390.txt
index 3f9ddbc23b27..0993969609cf 100644
--- a/Documentation/s390/Debugging390.txt
+++ b/Documentation/s390/Debugging390.txt
@@ -480,7 +480,7 @@ r2 argument 0 / return value 0 call-clobbered
480r3 argument 1 / return value 1 (if long long) call-clobbered 480r3 argument 1 / return value 1 (if long long) call-clobbered
481r4 argument 2 call-clobbered 481r4 argument 2 call-clobbered
482r5 argument 3 call-clobbered 482r5 argument 3 call-clobbered
483r6 argument 5 saved 483r6 argument 4 saved
484r7 pointer-to arguments 5 to ... saved 484r7 pointer-to arguments 5 to ... saved
485r8 this & that saved 485r8 this & that saved
486r9 this & that saved 486r9 this & that saved
diff --git a/Documentation/video-output.txt b/Documentation/video-output.txt
new file mode 100644
index 000000000000..e517011be4f9
--- /dev/null
+++ b/Documentation/video-output.txt
@@ -0,0 +1,34 @@
1
2 Video Output Switcher Control
3 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4 2006 luming.yu@intel.com
5
6The output sysfs class driver provides an abstract video output layer that
7can be used to hook platform specific methods to enable/disable video output
8device through common sysfs interface. For example, on my IBM ThinkPad T42
9laptop, The ACPI video driver registered its output devices and read/write
10method for 'state' with output sysfs class. The user interface under sysfs is:
11
12linux:/sys/class/video_output # tree .
13.
14|-- CRT0
15| |-- device -> ../../../devices/pci0000:00/0000:00:01.0
16| |-- state
17| |-- subsystem -> ../../../class/video_output
18| `-- uevent
19|-- DVI0
20| |-- device -> ../../../devices/pci0000:00/0000:00:01.0
21| |-- state
22| |-- subsystem -> ../../../class/video_output
23| `-- uevent
24|-- LCD0
25| |-- device -> ../../../devices/pci0000:00/0000:00:01.0
26| |-- state
27| |-- subsystem -> ../../../class/video_output
28| `-- uevent
29`-- TV0
30 |-- device -> ../../../devices/pci0000:00/0000:00:01.0
31 |-- state
32 |-- subsystem -> ../../../class/video_output
33 `-- uevent
34
diff --git a/MAINTAINERS b/MAINTAINERS
index f9343398a41f..fe35f3ac4cd3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -584,6 +584,14 @@ W: http://sourceforge.net/projects/acpi4asus
584W: http://xf.iksaif.net/acpi4asus 584W: http://xf.iksaif.net/acpi4asus
585S: Maintained 585S: Maintained
586 586
587ASUS LAPTOP EXTRAS DRIVER
588P: Corentin Chary
589M: corentincj@iksaif.net
590L: acpi4asus-user@lists.sourceforge.net
591W: http://sourceforge.net/projects/acpi4asus
592W: http://xf.iksaif.net/acpi4asus
593S: Maintained
594
587ATA OVER ETHERNET DRIVER 595ATA OVER ETHERNET DRIVER
588P: Ed L. Cashin 596P: Ed L. Cashin
589M: ecashin@coraid.com 597M: ecashin@coraid.com
@@ -2807,7 +2815,7 @@ M: schwidefsky@de.ibm.com
2807P: Heiko Carstens 2815P: Heiko Carstens
2808M: heiko.carstens@de.ibm.com 2816M: heiko.carstens@de.ibm.com
2809M: linux390@de.ibm.com 2817M: linux390@de.ibm.com
2810L: linux-390@vm.marist.edu 2818L: linux-s390@vger.kernel.org
2811W: http://www.ibm.com/developerworks/linux/linux390/ 2819W: http://www.ibm.com/developerworks/linux/linux390/
2812S: Supported 2820S: Supported
2813 2821
@@ -2815,7 +2823,7 @@ S390 NETWORK DRIVERS
2815P: Frank Pavlic 2823P: Frank Pavlic
2816M: fpavlic@de.ibm.com 2824M: fpavlic@de.ibm.com
2817M: linux390@de.ibm.com 2825M: linux390@de.ibm.com
2818L: linux-390@vm.marist.edu 2826L: linux-s390@vger.kernel.org
2819W: http://www.ibm.com/developerworks/linux/linux390/ 2827W: http://www.ibm.com/developerworks/linux/linux390/
2820S: Supported 2828S: Supported
2821 2829
@@ -2823,7 +2831,7 @@ S390 ZFCP DRIVER
2823P: Swen Schillig 2831P: Swen Schillig
2824M: swen@vnet.ibm.com 2832M: swen@vnet.ibm.com
2825M: linux390@de.ibm.com 2833M: linux390@de.ibm.com
2826L: linux-390@vm.marist.edu 2834L: linux-s390@vger.kernel.org
2827W: http://www.ibm.com/developerworks/linux/linux390/ 2835W: http://www.ibm.com/developerworks/linux/linux390/
2828S: Supported 2836S: Supported
2829 2837
@@ -3663,7 +3671,7 @@ S: Maintained
3663W83L51xD SD/MMC CARD INTERFACE DRIVER 3671W83L51xD SD/MMC CARD INTERFACE DRIVER
3664P: Pierre Ossman 3672P: Pierre Ossman
3665M: drzeus-wbsd@drzeus.cx 3673M: drzeus-wbsd@drzeus.cx
3666L: wbsd-devel@list.drzeus.cx 3674L: linux-kernel@vger.kernel.org
3667W: http://projects.drzeus.cx/wbsd 3675W: http://projects.drzeus.cx/wbsd
3668S: Maintained 3676S: Maintained
3669 3677
diff --git a/Makefile b/Makefile
index 7e2750f4ca70..cdeda68cf2aa 100644
--- a/Makefile
+++ b/Makefile
@@ -776,7 +776,7 @@ $(vmlinux-dirs): prepare scripts
776# $(EXTRAVERSION) eg, -rc6 776# $(EXTRAVERSION) eg, -rc6
777# $(localver-full) 777# $(localver-full)
778# $(localver) 778# $(localver)
779# localversion* (all localversion* files) 779# localversion* (files without backups, containing '~')
780# $(CONFIG_LOCALVERSION) (from kernel config setting) 780# $(CONFIG_LOCALVERSION) (from kernel config setting)
781# $(localver-auto) (only if CONFIG_LOCALVERSION_AUTO is set) 781# $(localver-auto) (only if CONFIG_LOCALVERSION_AUTO is set)
782# ./scripts/setlocalversion (SCM tag, if one exists) 782# ./scripts/setlocalversion (SCM tag, if one exists)
@@ -787,17 +787,12 @@ $(vmlinux-dirs): prepare scripts
787# moment, only git is supported but other SCMs can edit the script 787# moment, only git is supported but other SCMs can edit the script
788# scripts/setlocalversion and add the appropriate checks as needed. 788# scripts/setlocalversion and add the appropriate checks as needed.
789 789
790nullstring := 790pattern = ".*/localversion[^~]*"
791space := $(nullstring) # end of line 791string = $(shell cat /dev/null \
792 `find $(objtree) $(srctree) -maxdepth 1 -regex $(pattern) | sort`)
792 793
793___localver = $(objtree)/localversion* $(srctree)/localversion* 794localver = $(subst $(space),, $(string) \
794__localver = $(sort $(wildcard $(___localver))) 795 $(patsubst "%",%,$(CONFIG_LOCALVERSION)))
795# skip backup files (containing '~')
796_localver = $(foreach f, $(__localver), $(if $(findstring ~, $(f)),,$(f)))
797
798localver = $(subst $(space),, \
799 $(shell cat /dev/null $(_localver)) \
800 $(patsubst "%",%,$(CONFIG_LOCALVERSION)))
801 796
802# If CONFIG_LOCALVERSION_AUTO is set scripts/setlocalversion is called 797# If CONFIG_LOCALVERSION_AUTO is set scripts/setlocalversion is called
803# and if the SCM is know a tag from the SCM is appended. 798# and if the SCM is know a tag from the SCM is appended.
diff --git a/arch/i386/defconfig b/arch/i386/defconfig
index 5d80edfc61b7..bb0c376b62b3 100644
--- a/arch/i386/defconfig
+++ b/arch/i386/defconfig
@@ -466,7 +466,8 @@ CONFIG_FW_LOADER=y
466# 466#
467# Plug and Play support 467# Plug and Play support
468# 468#
469# CONFIG_PNP is not set 469CONFIG_PNP=y
470CONFIG_PNPACPI=y
470 471
471# 472#
472# Block devices 473# Block devices
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index cbcb2c27f48b..e94aff6888ca 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -66,7 +66,7 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return
66 66
67#define BAD_MADT_ENTRY(entry, end) ( \ 67#define BAD_MADT_ENTRY(entry, end) ( \
68 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ 68 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
69 ((acpi_table_entry_header *)entry)->length < sizeof(*entry)) 69 ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
70 70
71#define PREFIX "ACPI: " 71#define PREFIX "ACPI: "
72 72
@@ -79,7 +79,7 @@ int acpi_ioapic;
79int acpi_strict; 79int acpi_strict;
80EXPORT_SYMBOL(acpi_strict); 80EXPORT_SYMBOL(acpi_strict);
81 81
82acpi_interrupt_flags acpi_sci_flags __initdata; 82u8 acpi_sci_flags __initdata;
83int acpi_sci_override_gsi __initdata; 83int acpi_sci_override_gsi __initdata;
84int acpi_skip_timer_override __initdata; 84int acpi_skip_timer_override __initdata;
85int acpi_use_timer_override __initdata; 85int acpi_use_timer_override __initdata;
@@ -92,11 +92,6 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
92#warning ACPI uses CMPXCHG, i486 and later hardware 92#warning ACPI uses CMPXCHG, i486 and later hardware
93#endif 93#endif
94 94
95#define MAX_MADT_ENTRIES 256
96u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] =
97 {[0 ... MAX_MADT_ENTRIES - 1] = 0xff };
98EXPORT_SYMBOL(x86_acpiid_to_apicid);
99
100/* -------------------------------------------------------------------------- 95/* --------------------------------------------------------------------------
101 Boot-time Configuration 96 Boot-time Configuration
102 -------------------------------------------------------------------------- */ 97 -------------------------------------------------------------------------- */
@@ -166,30 +161,26 @@ char *__acpi_map_table(unsigned long phys, unsigned long size)
166 161
167#ifdef CONFIG_PCI_MMCONFIG 162#ifdef CONFIG_PCI_MMCONFIG
168/* The physical address of the MMCONFIG aperture. Set from ACPI tables. */ 163/* The physical address of the MMCONFIG aperture. Set from ACPI tables. */
169struct acpi_table_mcfg_config *pci_mmcfg_config; 164struct acpi_mcfg_allocation *pci_mmcfg_config;
170int pci_mmcfg_config_num; 165int pci_mmcfg_config_num;
171 166
172int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size) 167int __init acpi_parse_mcfg(struct acpi_table_header *header)
173{ 168{
174 struct acpi_table_mcfg *mcfg; 169 struct acpi_table_mcfg *mcfg;
175 unsigned long i; 170 unsigned long i;
176 int config_size; 171 int config_size;
177 172
178 if (!phys_addr || !size) 173 if (!header)
179 return -EINVAL; 174 return -EINVAL;
180 175
181 mcfg = (struct acpi_table_mcfg *)__acpi_map_table(phys_addr, size); 176 mcfg = (struct acpi_table_mcfg *)header;
182 if (!mcfg) {
183 printk(KERN_WARNING PREFIX "Unable to map MCFG\n");
184 return -ENODEV;
185 }
186 177
187 /* how many config structures do we have */ 178 /* how many config structures do we have */
188 pci_mmcfg_config_num = 0; 179 pci_mmcfg_config_num = 0;
189 i = size - sizeof(struct acpi_table_mcfg); 180 i = header->length - sizeof(struct acpi_table_mcfg);
190 while (i >= sizeof(struct acpi_table_mcfg_config)) { 181 while (i >= sizeof(struct acpi_mcfg_allocation)) {
191 ++pci_mmcfg_config_num; 182 ++pci_mmcfg_config_num;
192 i -= sizeof(struct acpi_table_mcfg_config); 183 i -= sizeof(struct acpi_mcfg_allocation);
193 }; 184 };
194 if (pci_mmcfg_config_num == 0) { 185 if (pci_mmcfg_config_num == 0) {
195 printk(KERN_ERR PREFIX "MMCONFIG has no entries\n"); 186 printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
@@ -204,9 +195,9 @@ int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
204 return -ENOMEM; 195 return -ENOMEM;
205 } 196 }
206 197
207 memcpy(pci_mmcfg_config, &mcfg->config, config_size); 198 memcpy(pci_mmcfg_config, &mcfg[1], config_size);
208 for (i = 0; i < pci_mmcfg_config_num; ++i) { 199 for (i = 0; i < pci_mmcfg_config_num; ++i) {
209 if (mcfg->config[i].base_reserved) { 200 if (pci_mmcfg_config[i].address > 0xFFFFFFFF) {
210 printk(KERN_ERR PREFIX 201 printk(KERN_ERR PREFIX
211 "MMCONFIG not in low 4GB of memory\n"); 202 "MMCONFIG not in low 4GB of memory\n");
212 kfree(pci_mmcfg_config); 203 kfree(pci_mmcfg_config);
@@ -220,24 +211,24 @@ int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
220#endif /* CONFIG_PCI_MMCONFIG */ 211#endif /* CONFIG_PCI_MMCONFIG */
221 212
222#ifdef CONFIG_X86_LOCAL_APIC 213#ifdef CONFIG_X86_LOCAL_APIC
223static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size) 214static int __init acpi_parse_madt(struct acpi_table_header *table)
224{ 215{
225 struct acpi_table_madt *madt = NULL; 216 struct acpi_table_madt *madt = NULL;
226 217
227 if (!phys_addr || !size || !cpu_has_apic) 218 if (!cpu_has_apic)
228 return -EINVAL; 219 return -EINVAL;
229 220
230 madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size); 221 madt = (struct acpi_table_madt *)table;
231 if (!madt) { 222 if (!madt) {
232 printk(KERN_WARNING PREFIX "Unable to map MADT\n"); 223 printk(KERN_WARNING PREFIX "Unable to map MADT\n");
233 return -ENODEV; 224 return -ENODEV;
234 } 225 }
235 226
236 if (madt->lapic_address) { 227 if (madt->address) {
237 acpi_lapic_addr = (u64) madt->lapic_address; 228 acpi_lapic_addr = (u64) madt->address;
238 229
239 printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n", 230 printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
240 madt->lapic_address); 231 madt->address);
241 } 232 }
242 233
243 acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id); 234 acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
@@ -246,21 +237,17 @@ static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
246} 237}
247 238
248static int __init 239static int __init
249acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end) 240acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
250{ 241{
251 struct acpi_table_lapic *processor = NULL; 242 struct acpi_madt_local_apic *processor = NULL;
252 243
253 processor = (struct acpi_table_lapic *)header; 244 processor = (struct acpi_madt_local_apic *)header;
254 245
255 if (BAD_MADT_ENTRY(processor, end)) 246 if (BAD_MADT_ENTRY(processor, end))
256 return -EINVAL; 247 return -EINVAL;
257 248
258 acpi_table_print_madt_entry(header); 249 acpi_table_print_madt_entry(header);
259 250
260 /* Record local apic id only when enabled */
261 if (processor->flags.enabled)
262 x86_acpiid_to_apicid[processor->acpi_id] = processor->id;
263
264 /* 251 /*
265 * We need to register disabled CPU as well to permit 252 * We need to register disabled CPU as well to permit
266 * counting disabled CPUs. This allows us to size 253 * counting disabled CPUs. This allows us to size
@@ -269,18 +256,18 @@ acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end)
269 * when we use CPU hotplug. 256 * when we use CPU hotplug.
270 */ 257 */
271 mp_register_lapic(processor->id, /* APIC ID */ 258 mp_register_lapic(processor->id, /* APIC ID */
272 processor->flags.enabled); /* Enabled? */ 259 processor->lapic_flags & ACPI_MADT_ENABLED); /* Enabled? */
273 260
274 return 0; 261 return 0;
275} 262}
276 263
277static int __init 264static int __init
278acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header, 265acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
279 const unsigned long end) 266 const unsigned long end)
280{ 267{
281 struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL; 268 struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL;
282 269
283 lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr *)header; 270 lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header;
284 271
285 if (BAD_MADT_ENTRY(lapic_addr_ovr, end)) 272 if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
286 return -EINVAL; 273 return -EINVAL;
@@ -291,11 +278,11 @@ acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
291} 278}
292 279
293static int __init 280static int __init
294acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end) 281acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
295{ 282{
296 struct acpi_table_lapic_nmi *lapic_nmi = NULL; 283 struct acpi_madt_local_apic_nmi *lapic_nmi = NULL;
297 284
298 lapic_nmi = (struct acpi_table_lapic_nmi *)header; 285 lapic_nmi = (struct acpi_madt_local_apic_nmi *)header;
299 286
300 if (BAD_MADT_ENTRY(lapic_nmi, end)) 287 if (BAD_MADT_ENTRY(lapic_nmi, end))
301 return -EINVAL; 288 return -EINVAL;
@@ -313,11 +300,11 @@ acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
313#ifdef CONFIG_X86_IO_APIC 300#ifdef CONFIG_X86_IO_APIC
314 301
315static int __init 302static int __init
316acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end) 303acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
317{ 304{
318 struct acpi_table_ioapic *ioapic = NULL; 305 struct acpi_madt_io_apic *ioapic = NULL;
319 306
320 ioapic = (struct acpi_table_ioapic *)header; 307 ioapic = (struct acpi_madt_io_apic *)header;
321 308
322 if (BAD_MADT_ENTRY(ioapic, end)) 309 if (BAD_MADT_ENTRY(ioapic, end))
323 return -EINVAL; 310 return -EINVAL;
@@ -342,11 +329,11 @@ static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
342 polarity = 3; 329 polarity = 3;
343 330
344 /* Command-line over-ride via acpi_sci= */ 331 /* Command-line over-ride via acpi_sci= */
345 if (acpi_sci_flags.trigger) 332 if (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)
346 trigger = acpi_sci_flags.trigger; 333 trigger = (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2;
347 334
348 if (acpi_sci_flags.polarity) 335 if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK)
349 polarity = acpi_sci_flags.polarity; 336 polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
350 337
351 /* 338 /*
352 * mp_config_acpi_legacy_irqs() already setup IRQs < 16 339 * mp_config_acpi_legacy_irqs() already setup IRQs < 16
@@ -357,51 +344,52 @@ static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
357 344
358 /* 345 /*
359 * stash over-ride to indicate we've been here 346 * stash over-ride to indicate we've been here
360 * and for later update of acpi_fadt 347 * and for later update of acpi_gbl_FADT
361 */ 348 */
362 acpi_sci_override_gsi = gsi; 349 acpi_sci_override_gsi = gsi;
363 return; 350 return;
364} 351}
365 352
366static int __init 353static int __init
367acpi_parse_int_src_ovr(acpi_table_entry_header * header, 354acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
368 const unsigned long end) 355 const unsigned long end)
369{ 356{
370 struct acpi_table_int_src_ovr *intsrc = NULL; 357 struct acpi_madt_interrupt_override *intsrc = NULL;
371 358
372 intsrc = (struct acpi_table_int_src_ovr *)header; 359 intsrc = (struct acpi_madt_interrupt_override *)header;
373 360
374 if (BAD_MADT_ENTRY(intsrc, end)) 361 if (BAD_MADT_ENTRY(intsrc, end))
375 return -EINVAL; 362 return -EINVAL;
376 363
377 acpi_table_print_madt_entry(header); 364 acpi_table_print_madt_entry(header);
378 365
379 if (intsrc->bus_irq == acpi_fadt.sci_int) { 366 if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) {
380 acpi_sci_ioapic_setup(intsrc->global_irq, 367 acpi_sci_ioapic_setup(intsrc->global_irq,
381 intsrc->flags.polarity, 368 intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
382 intsrc->flags.trigger); 369 (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
383 return 0; 370 return 0;
384 } 371 }
385 372
386 if (acpi_skip_timer_override && 373 if (acpi_skip_timer_override &&
387 intsrc->bus_irq == 0 && intsrc->global_irq == 2) { 374 intsrc->source_irq == 0 && intsrc->global_irq == 2) {
388 printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); 375 printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
389 return 0; 376 return 0;
390 } 377 }
391 378
392 mp_override_legacy_irq(intsrc->bus_irq, 379 mp_override_legacy_irq(intsrc->source_irq,
393 intsrc->flags.polarity, 380 intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
394 intsrc->flags.trigger, intsrc->global_irq); 381 (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2,
382 intsrc->global_irq);
395 383
396 return 0; 384 return 0;
397} 385}
398 386
399static int __init 387static int __init
400acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end) 388acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
401{ 389{
402 struct acpi_table_nmi_src *nmi_src = NULL; 390 struct acpi_madt_nmi_source *nmi_src = NULL;
403 391
404 nmi_src = (struct acpi_table_nmi_src *)header; 392 nmi_src = (struct acpi_madt_nmi_source *)header;
405 393
406 if (BAD_MADT_ENTRY(nmi_src, end)) 394 if (BAD_MADT_ENTRY(nmi_src, end))
407 return -EINVAL; 395 return -EINVAL;
@@ -417,7 +405,7 @@ acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
417 405
418/* 406/*
419 * acpi_pic_sci_set_trigger() 407 * acpi_pic_sci_set_trigger()
420 * 408 *
421 * use ELCR to set PIC-mode trigger type for SCI 409 * use ELCR to set PIC-mode trigger type for SCI
422 * 410 *
423 * If a PIC-mode SCI is not recognized or gives spurious IRQ7's 411 * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
@@ -511,7 +499,7 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
511{ 499{
512 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 500 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
513 union acpi_object *obj; 501 union acpi_object *obj;
514 struct acpi_table_lapic *lapic; 502 struct acpi_madt_local_apic *lapic;
515 cpumask_t tmp_map, new_map; 503 cpumask_t tmp_map, new_map;
516 u8 physid; 504 u8 physid;
517 int cpu; 505 int cpu;
@@ -529,10 +517,10 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
529 return -EINVAL; 517 return -EINVAL;
530 } 518 }
531 519
532 lapic = (struct acpi_table_lapic *)obj->buffer.pointer; 520 lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer;
533 521
534 if ((lapic->header.type != ACPI_MADT_LAPIC) || 522 if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC ||
535 (!lapic->flags.enabled)) { 523 !(lapic->lapic_flags & ACPI_MADT_ENABLED)) {
536 kfree(buffer.pointer); 524 kfree(buffer.pointer);
537 return -EINVAL; 525 return -EINVAL;
538 } 526 }
@@ -544,7 +532,7 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
544 buffer.pointer = NULL; 532 buffer.pointer = NULL;
545 533
546 tmp_map = cpu_present_map; 534 tmp_map = cpu_present_map;
547 mp_register_lapic(physid, lapic->flags.enabled); 535 mp_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
548 536
549 /* 537 /*
550 * If mp_register_lapic successfully generates a new logical cpu 538 * If mp_register_lapic successfully generates a new logical cpu
@@ -566,14 +554,6 @@ EXPORT_SYMBOL(acpi_map_lsapic);
566 554
567int acpi_unmap_lsapic(int cpu) 555int acpi_unmap_lsapic(int cpu)
568{ 556{
569 int i;
570
571 for_each_possible_cpu(i) {
572 if (x86_acpiid_to_apicid[i] == x86_cpu_to_apicid[cpu]) {
573 x86_acpiid_to_apicid[i] = -1;
574 break;
575 }
576 }
577 x86_cpu_to_apicid[cpu] = -1; 557 x86_cpu_to_apicid[cpu] = -1;
578 cpu_clear(cpu, cpu_present_map); 558 cpu_clear(cpu, cpu_present_map);
579 num_processors--; 559 num_processors--;
@@ -619,42 +599,36 @@ acpi_scan_rsdp(unsigned long start, unsigned long length)
619 return 0; 599 return 0;
620} 600}
621 601
622static int __init acpi_parse_sbf(unsigned long phys_addr, unsigned long size) 602static int __init acpi_parse_sbf(struct acpi_table_header *table)
623{ 603{
624 struct acpi_table_sbf *sb; 604 struct acpi_table_boot *sb;
625
626 if (!phys_addr || !size)
627 return -EINVAL;
628 605
629 sb = (struct acpi_table_sbf *)__acpi_map_table(phys_addr, size); 606 sb = (struct acpi_table_boot *)table;
630 if (!sb) { 607 if (!sb) {
631 printk(KERN_WARNING PREFIX "Unable to map SBF\n"); 608 printk(KERN_WARNING PREFIX "Unable to map SBF\n");
632 return -ENODEV; 609 return -ENODEV;
633 } 610 }
634 611
635 sbf_port = sb->sbf_cmos; /* Save CMOS port */ 612 sbf_port = sb->cmos_index; /* Save CMOS port */
636 613
637 return 0; 614 return 0;
638} 615}
639 616
640#ifdef CONFIG_HPET_TIMER 617#ifdef CONFIG_HPET_TIMER
641 618
642static int __init acpi_parse_hpet(unsigned long phys, unsigned long size) 619static int __init acpi_parse_hpet(struct acpi_table_header *table)
643{ 620{
644 struct acpi_table_hpet *hpet_tbl; 621 struct acpi_table_hpet *hpet_tbl;
645 struct resource *hpet_res; 622 struct resource *hpet_res;
646 resource_size_t res_start; 623 resource_size_t res_start;
647 624
648 if (!phys || !size) 625 hpet_tbl = (struct acpi_table_hpet *)table;
649 return -EINVAL;
650
651 hpet_tbl = (struct acpi_table_hpet *)__acpi_map_table(phys, size);
652 if (!hpet_tbl) { 626 if (!hpet_tbl) {
653 printk(KERN_WARNING PREFIX "Unable to map HPET\n"); 627 printk(KERN_WARNING PREFIX "Unable to map HPET\n");
654 return -ENODEV; 628 return -ENODEV;
655 } 629 }
656 630
657 if (hpet_tbl->addr.space_id != ACPI_SPACE_MEM) { 631 if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
658 printk(KERN_WARNING PREFIX "HPET timers must be located in " 632 printk(KERN_WARNING PREFIX "HPET timers must be located in "
659 "memory.\n"); 633 "memory.\n");
660 return -1; 634 return -1;
@@ -667,29 +641,28 @@ static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
667 hpet_res->name = (void *)&hpet_res[1]; 641 hpet_res->name = (void *)&hpet_res[1];
668 hpet_res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 642 hpet_res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
669 snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, 643 snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE,
670 "HPET %u", hpet_tbl->number); 644 "HPET %u", hpet_tbl->sequence);
671 hpet_res->end = (1 * 1024) - 1; 645 hpet_res->end = (1 * 1024) - 1;
672 } 646 }
673 647
674#ifdef CONFIG_X86_64 648#ifdef CONFIG_X86_64
675 vxtime.hpet_address = hpet_tbl->addr.addrl | 649 vxtime.hpet_address = hpet_tbl->address.address;
676 ((long)hpet_tbl->addr.addrh << 32);
677 650
678 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", 651 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
679 hpet_tbl->id, vxtime.hpet_address); 652 hpet_tbl->id, vxtime.hpet_address);
680 653
681 res_start = vxtime.hpet_address; 654 res_start = vxtime.hpet_address;
682#else /* X86 */ 655#else /* X86 */
683 { 656 {
684 extern unsigned long hpet_address; 657 extern unsigned long hpet_address;
685 658
686 hpet_address = hpet_tbl->addr.addrl; 659 hpet_address = hpet_tbl->address.address;
687 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", 660 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
688 hpet_tbl->id, hpet_address); 661 hpet_tbl->id, hpet_address);
689 662
690 res_start = hpet_address; 663 res_start = hpet_address;
691 } 664 }
692#endif /* X86 */ 665#endif /* X86 */
693 666
694 if (hpet_res) { 667 if (hpet_res) {
695 hpet_res->start = res_start; 668 hpet_res->start = res_start;
@@ -707,42 +680,28 @@ static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
707extern u32 pmtmr_ioport; 680extern u32 pmtmr_ioport;
708#endif 681#endif
709 682
710static int __init acpi_parse_fadt(unsigned long phys, unsigned long size) 683static int __init acpi_parse_fadt(struct acpi_table_header *table)
711{ 684{
712 struct fadt_descriptor *fadt = NULL;
713
714 fadt = (struct fadt_descriptor *)__acpi_map_table(phys, size);
715 if (!fadt) {
716 printk(KERN_WARNING PREFIX "Unable to map FADT\n");
717 return 0;
718 }
719 /* initialize sci_int early for INT_SRC_OVR MADT parsing */
720 acpi_fadt.sci_int = fadt->sci_int;
721
722 /* initialize rev and apic_phys_dest_mode for x86_64 genapic */
723 acpi_fadt.revision = fadt->revision;
724 acpi_fadt.force_apic_physical_destination_mode =
725 fadt->force_apic_physical_destination_mode;
726 685
727#ifdef CONFIG_X86_PM_TIMER 686#ifdef CONFIG_X86_PM_TIMER
728 /* detect the location of the ACPI PM Timer */ 687 /* detect the location of the ACPI PM Timer */
729 if (fadt->revision >= FADT2_REVISION_ID) { 688 if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) {
730 /* FADT rev. 2 */ 689 /* FADT rev. 2 */
731 if (fadt->xpm_tmr_blk.address_space_id != 690 if (acpi_gbl_FADT.xpm_timer_block.space_id !=
732 ACPI_ADR_SPACE_SYSTEM_IO) 691 ACPI_ADR_SPACE_SYSTEM_IO)
733 return 0; 692 return 0;
734 693
735 pmtmr_ioport = fadt->xpm_tmr_blk.address; 694 pmtmr_ioport = acpi_gbl_FADT.xpm_timer_block.address;
736 /* 695 /*
737 * "X" fields are optional extensions to the original V1.0 696 * "X" fields are optional extensions to the original V1.0
738 * fields, so we must selectively expand V1.0 fields if the 697 * fields, so we must selectively expand V1.0 fields if the
739 * corresponding X field is zero. 698 * corresponding X field is zero.
740 */ 699 */
741 if (!pmtmr_ioport) 700 if (!pmtmr_ioport)
742 pmtmr_ioport = fadt->V1_pm_tmr_blk; 701 pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
743 } else { 702 } else {
744 /* FADT rev. 1 */ 703 /* FADT rev. 1 */
745 pmtmr_ioport = fadt->V1_pm_tmr_blk; 704 pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
746 } 705 }
747 if (pmtmr_ioport) 706 if (pmtmr_ioport)
748 printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n", 707 printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
@@ -784,13 +743,13 @@ static int __init acpi_parse_madt_lapic_entries(void)
784 if (!cpu_has_apic) 743 if (!cpu_has_apic)
785 return -ENODEV; 744 return -ENODEV;
786 745
787 /* 746 /*
788 * Note that the LAPIC address is obtained from the MADT (32-bit value) 747 * Note that the LAPIC address is obtained from the MADT (32-bit value)
789 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). 748 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
790 */ 749 */
791 750
792 count = 751 count =
793 acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, 752 acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
794 acpi_parse_lapic_addr_ovr, 0); 753 acpi_parse_lapic_addr_ovr, 0);
795 if (count < 0) { 754 if (count < 0) {
796 printk(KERN_ERR PREFIX 755 printk(KERN_ERR PREFIX
@@ -800,7 +759,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
800 759
801 mp_register_lapic_address(acpi_lapic_addr); 760 mp_register_lapic_address(acpi_lapic_addr);
802 761
803 count = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic, 762 count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic,
804 MAX_APICS); 763 MAX_APICS);
805 if (!count) { 764 if (!count) {
806 printk(KERN_ERR PREFIX "No LAPIC entries present\n"); 765 printk(KERN_ERR PREFIX "No LAPIC entries present\n");
@@ -813,7 +772,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
813 } 772 }
814 773
815 count = 774 count =
816 acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0); 775 acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0);
817 if (count < 0) { 776 if (count < 0) {
818 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); 777 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
819 /* TBD: Cleanup to allow fallback to MPS */ 778 /* TBD: Cleanup to allow fallback to MPS */
@@ -842,7 +801,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
842 return -ENODEV; 801 return -ENODEV;
843 } 802 }
844 803
845 if (!cpu_has_apic) 804 if (!cpu_has_apic)
846 return -ENODEV; 805 return -ENODEV;
847 806
848 /* 807 /*
@@ -855,7 +814,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
855 } 814 }
856 815
857 count = 816 count =
858 acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic, 817 acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
859 MAX_IO_APICS); 818 MAX_IO_APICS);
860 if (!count) { 819 if (!count) {
861 printk(KERN_ERR PREFIX "No IOAPIC entries present\n"); 820 printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
@@ -866,7 +825,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
866 } 825 }
867 826
868 count = 827 count =
869 acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 828 acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr,
870 NR_IRQ_VECTORS); 829 NR_IRQ_VECTORS);
871 if (count < 0) { 830 if (count < 0) {
872 printk(KERN_ERR PREFIX 831 printk(KERN_ERR PREFIX
@@ -880,13 +839,13 @@ static int __init acpi_parse_madt_ioapic_entries(void)
880 * pretend we got one so we can set the SCI flags. 839 * pretend we got one so we can set the SCI flags.
881 */ 840 */
882 if (!acpi_sci_override_gsi) 841 if (!acpi_sci_override_gsi)
883 acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0); 842 acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0);
884 843
885 /* Fill in identity legacy mapings where no override */ 844 /* Fill in identity legacy mapings where no override */
886 mp_config_acpi_legacy_irqs(); 845 mp_config_acpi_legacy_irqs();
887 846
888 count = 847 count =
889 acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 848 acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src,
890 NR_IRQ_VECTORS); 849 NR_IRQ_VECTORS);
891 if (count < 0) { 850 if (count < 0) {
892 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); 851 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
@@ -908,7 +867,7 @@ static void __init acpi_process_madt(void)
908#ifdef CONFIG_X86_LOCAL_APIC 867#ifdef CONFIG_X86_LOCAL_APIC
909 int count, error; 868 int count, error;
910 869
911 count = acpi_table_parse(ACPI_APIC, acpi_parse_madt); 870 count = acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt);
912 if (count >= 1) { 871 if (count >= 1) {
913 872
914 /* 873 /*
@@ -1195,7 +1154,7 @@ int __init acpi_boot_table_init(void)
1195 if (acpi_disabled && !acpi_ht) 1154 if (acpi_disabled && !acpi_ht)
1196 return 1; 1155 return 1;
1197 1156
1198 /* 1157 /*
1199 * Initialize the ACPI boot-time table parser. 1158 * Initialize the ACPI boot-time table parser.
1200 */ 1159 */
1201 error = acpi_table_init(); 1160 error = acpi_table_init();
@@ -1204,7 +1163,7 @@ int __init acpi_boot_table_init(void)
1204 return error; 1163 return error;
1205 } 1164 }
1206 1165
1207 acpi_table_parse(ACPI_BOOT, acpi_parse_sbf); 1166 acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
1208 1167
1209 /* 1168 /*
1210 * blacklist may disable ACPI entirely 1169 * blacklist may disable ACPI entirely
@@ -1232,19 +1191,19 @@ int __init acpi_boot_init(void)
1232 if (acpi_disabled && !acpi_ht) 1191 if (acpi_disabled && !acpi_ht)
1233 return 1; 1192 return 1;
1234 1193
1235 acpi_table_parse(ACPI_BOOT, acpi_parse_sbf); 1194 acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
1236 1195
1237 /* 1196 /*
1238 * set sci_int and PM timer address 1197 * set sci_int and PM timer address
1239 */ 1198 */
1240 acpi_table_parse(ACPI_FADT, acpi_parse_fadt); 1199 acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt);
1241 1200
1242 /* 1201 /*
1243 * Process the Multiple APIC Description Table (MADT), if present 1202 * Process the Multiple APIC Description Table (MADT), if present
1244 */ 1203 */
1245 acpi_process_madt(); 1204 acpi_process_madt();
1246 1205
1247 acpi_table_parse(ACPI_HPET, acpi_parse_hpet); 1206 acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
1248 1207
1249 return 0; 1208 return 0;
1250} 1209}
@@ -1315,13 +1274,17 @@ static int __init setup_acpi_sci(char *s)
1315 if (!s) 1274 if (!s)
1316 return -EINVAL; 1275 return -EINVAL;
1317 if (!strcmp(s, "edge")) 1276 if (!strcmp(s, "edge"))
1318 acpi_sci_flags.trigger = 1; 1277 acpi_sci_flags = ACPI_MADT_TRIGGER_EDGE |
1278 (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
1319 else if (!strcmp(s, "level")) 1279 else if (!strcmp(s, "level"))
1320 acpi_sci_flags.trigger = 3; 1280 acpi_sci_flags = ACPI_MADT_TRIGGER_LEVEL |
1281 (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
1321 else if (!strcmp(s, "high")) 1282 else if (!strcmp(s, "high"))
1322 acpi_sci_flags.polarity = 1; 1283 acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_HIGH |
1284 (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
1323 else if (!strcmp(s, "low")) 1285 else if (!strcmp(s, "low"))
1324 acpi_sci_flags.polarity = 3; 1286 acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_LOW |
1287 (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
1325 else 1288 else
1326 return -EINVAL; 1289 return -EINVAL;
1327 return 0; 1290 return 0;
diff --git a/arch/i386/kernel/acpi/earlyquirk.c b/arch/i386/kernel/acpi/earlyquirk.c
index 4b60af7f91dd..bf86f7662d8b 100644
--- a/arch/i386/kernel/acpi/earlyquirk.c
+++ b/arch/i386/kernel/acpi/earlyquirk.c
@@ -16,7 +16,7 @@
16 16
17static int nvidia_hpet_detected __initdata; 17static int nvidia_hpet_detected __initdata;
18 18
19static int __init nvidia_hpet_check(unsigned long phys, unsigned long size) 19static int __init nvidia_hpet_check(struct acpi_table_header *header)
20{ 20{
21 nvidia_hpet_detected = 1; 21 nvidia_hpet_detected = 1;
22 return 0; 22 return 0;
@@ -30,7 +30,7 @@ static int __init check_bridge(int vendor, int device)
30 is enabled. */ 30 is enabled. */
31 if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) { 31 if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) {
32 nvidia_hpet_detected = 0; 32 nvidia_hpet_detected = 0;
33 acpi_table_parse(ACPI_HPET, nvidia_hpet_check); 33 acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check);
34 if (nvidia_hpet_detected == 0) { 34 if (nvidia_hpet_detected == 0) {
35 acpi_skip_timer_override = 1; 35 acpi_skip_timer_override = 1;
36 printk(KERN_INFO "Nvidia board " 36 printk(KERN_INFO "Nvidia board "
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c
index e940e00b96c9..a3db9332d652 100644
--- a/arch/i386/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c
@@ -190,7 +190,7 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
190 /* Invoke C3 */ 190 /* Invoke C3 */
191 inb(cx_address); 191 inb(cx_address);
192 /* Dummy op - must do something useless after P_LVL3 read */ 192 /* Dummy op - must do something useless after P_LVL3 read */
193 t = inl(acpi_fadt.xpm_tmr_blk.address); 193 t = inl(acpi_gbl_FADT.xpm_timer_block.address);
194 } 194 }
195 /* Disable bus ratio bit */ 195 /* Disable bus ratio bit */
196 local_irq_disable(); 196 local_irq_disable();
@@ -250,8 +250,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
250 outb(3, 0x22); 250 outb(3, 0x22);
251 } else if ((pr != NULL) && pr->flags.bm_control) { 251 } else if ((pr != NULL) && pr->flags.bm_control) {
252 /* Disable bus master arbitration */ 252 /* Disable bus master arbitration */
253 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1, 253 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
254 ACPI_MTX_DO_NOT_LOCK);
255 } 254 }
256 switch (longhaul_version) { 255 switch (longhaul_version) {
257 256
@@ -281,8 +280,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
281 case TYPE_POWERSAVER: 280 case TYPE_POWERSAVER:
282 if (longhaul_flags & USE_ACPI_C3) { 281 if (longhaul_flags & USE_ACPI_C3) {
283 /* Don't allow wakeup */ 282 /* Don't allow wakeup */
284 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0, 283 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
285 ACPI_MTX_DO_NOT_LOCK);
286 do_powersaver(cx->address, clock_ratio_index); 284 do_powersaver(cx->address, clock_ratio_index);
287 } else { 285 } else {
288 do_powersaver(0, clock_ratio_index); 286 do_powersaver(0, clock_ratio_index);
@@ -295,8 +293,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
295 outb(0, 0x22); 293 outb(0, 0x22);
296 } else if ((pr != NULL) && pr->flags.bm_control) { 294 } else if ((pr != NULL) && pr->flags.bm_control) {
297 /* Enable bus master arbitration */ 295 /* Enable bus master arbitration */
298 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0, 296 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
299 ACPI_MTX_DO_NOT_LOCK);
300 } 297 }
301 outb(pic2_mask,0xA1); /* restore mask */ 298 outb(pic2_mask,0xA1); /* restore mask */
302 outb(pic1_mask,0x21); 299 outb(pic1_mask,0x21);
@@ -414,7 +411,7 @@ static int __init longhaul_get_ranges(void)
414 highest_speed = calc_speed(maxmult); 411 highest_speed = calc_speed(maxmult);
415 lowest_speed = calc_speed(minmult); 412 lowest_speed = calc_speed(minmult);
416 dprintk ("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, 413 dprintk ("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb,
417 print_speed(lowest_speed/1000), 414 print_speed(lowest_speed/1000),
418 print_speed(highest_speed/1000)); 415 print_speed(highest_speed/1000));
419 416
420 if (lowest_speed == highest_speed) { 417 if (lowest_speed == highest_speed) {
@@ -498,7 +495,7 @@ static void __init longhaul_setup_voltagescaling(void)
498 maxvid.mV/1000, maxvid.mV%1000, 495 maxvid.mV/1000, maxvid.mV%1000,
499 minvid.mV/1000, minvid.mV%1000, 496 minvid.mV/1000, minvid.mV%1000,
500 numvscales); 497 numvscales);
501 498
502 j = 0; 499 j = 0;
503 while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) { 500 while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) {
504 speed = longhaul_table[j].frequency; 501 speed = longhaul_table[j].frequency;
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index 49bff3596bff..4f5983c98669 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -1057,7 +1057,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
1057 static int gsi_to_irq[MAX_GSI_NUM]; 1057 static int gsi_to_irq[MAX_GSI_NUM];
1058 1058
1059 /* Don't set up the ACPI SCI because it's already set up */ 1059 /* Don't set up the ACPI SCI because it's already set up */
1060 if (acpi_fadt.sci_int == gsi) 1060 if (acpi_gbl_FADT.sci_interrupt == gsi)
1061 return gsi; 1061 return gsi;
1062 1062
1063 ioapic = mp_find_ioapic(gsi); 1063 ioapic = mp_find_ioapic(gsi);
@@ -1114,7 +1114,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
1114 /* 1114 /*
1115 * Don't assign IRQ used by ACPI SCI 1115 * Don't assign IRQ used by ACPI SCI
1116 */ 1116 */
1117 if (gsi == acpi_fadt.sci_int) 1117 if (gsi == acpi_gbl_FADT.sci_interrupt)
1118 gsi = pci_irq++; 1118 gsi = pci_irq++;
1119 gsi_to_irq[irq] = gsi; 1119 gsi_to_irq[irq] = gsi;
1120 } else { 1120 } else {
diff --git a/arch/i386/kernel/srat.c b/arch/i386/kernel/srat.c
index f7e735c077c3..2a8713ec0f9a 100644
--- a/arch/i386/kernel/srat.c
+++ b/arch/i386/kernel/srat.c
@@ -62,19 +62,19 @@ extern void * boot_ioremap(unsigned long, unsigned long);
62/* Identify CPU proximity domains */ 62/* Identify CPU proximity domains */
63static void __init parse_cpu_affinity_structure(char *p) 63static void __init parse_cpu_affinity_structure(char *p)
64{ 64{
65 struct acpi_table_processor_affinity *cpu_affinity = 65 struct acpi_srat_cpu_affinity *cpu_affinity =
66 (struct acpi_table_processor_affinity *) p; 66 (struct acpi_srat_cpu_affinity *) p;
67 67
68 if (!cpu_affinity->flags.enabled) 68 if ((cpu_affinity->flags & ACPI_SRAT_CPU_ENABLED) == 0)
69 return; /* empty entry */ 69 return; /* empty entry */
70 70
71 /* mark this node as "seen" in node bitmap */ 71 /* mark this node as "seen" in node bitmap */
72 BMAP_SET(pxm_bitmap, cpu_affinity->proximity_domain); 72 BMAP_SET(pxm_bitmap, cpu_affinity->proximity_domain_lo);
73 73
74 apicid_to_pxm[cpu_affinity->apic_id] = cpu_affinity->proximity_domain; 74 apicid_to_pxm[cpu_affinity->apic_id] = cpu_affinity->proximity_domain_lo;
75 75
76 printk("CPU 0x%02X in proximity domain 0x%02X\n", 76 printk("CPU 0x%02X in proximity domain 0x%02X\n",
77 cpu_affinity->apic_id, cpu_affinity->proximity_domain); 77 cpu_affinity->apic_id, cpu_affinity->proximity_domain_lo);
78} 78}
79 79
80/* 80/*
@@ -84,28 +84,27 @@ static void __init parse_cpu_affinity_structure(char *p)
84static void __init parse_memory_affinity_structure (char *sratp) 84static void __init parse_memory_affinity_structure (char *sratp)
85{ 85{
86 unsigned long long paddr, size; 86 unsigned long long paddr, size;
87 unsigned long start_pfn, end_pfn; 87 unsigned long start_pfn, end_pfn;
88 u8 pxm; 88 u8 pxm;
89 struct node_memory_chunk_s *p, *q, *pend; 89 struct node_memory_chunk_s *p, *q, *pend;
90 struct acpi_table_memory_affinity *memory_affinity = 90 struct acpi_srat_mem_affinity *memory_affinity =
91 (struct acpi_table_memory_affinity *) sratp; 91 (struct acpi_srat_mem_affinity *) sratp;
92 92
93 if (!memory_affinity->flags.enabled) 93 if ((memory_affinity->flags & ACPI_SRAT_MEM_ENABLED) == 0)
94 return; /* empty entry */ 94 return; /* empty entry */
95 95
96 pxm = memory_affinity->proximity_domain & 0xff;
97
96 /* mark this node as "seen" in node bitmap */ 98 /* mark this node as "seen" in node bitmap */
97 BMAP_SET(pxm_bitmap, memory_affinity->proximity_domain); 99 BMAP_SET(pxm_bitmap, pxm);
98 100
99 /* calculate info for memory chunk structure */ 101 /* calculate info for memory chunk structure */
100 paddr = memory_affinity->base_addr_hi; 102 paddr = memory_affinity->base_address;
101 paddr = (paddr << 32) | memory_affinity->base_addr_lo; 103 size = memory_affinity->length;
102 size = memory_affinity->length_hi; 104
103 size = (size << 32) | memory_affinity->length_lo;
104
105 start_pfn = paddr >> PAGE_SHIFT; 105 start_pfn = paddr >> PAGE_SHIFT;
106 end_pfn = (paddr + size) >> PAGE_SHIFT; 106 end_pfn = (paddr + size) >> PAGE_SHIFT;
107 107
108 pxm = memory_affinity->proximity_domain;
109 108
110 if (num_memory_chunks >= MAXCHUNKS) { 109 if (num_memory_chunks >= MAXCHUNKS) {
111 printk("Too many mem chunks in SRAT. Ignoring %lld MBytes at %llx\n", 110 printk("Too many mem chunks in SRAT. Ignoring %lld MBytes at %llx\n",
@@ -132,8 +131,8 @@ static void __init parse_memory_affinity_structure (char *sratp)
132 printk("Memory range 0x%lX to 0x%lX (type 0x%X) in proximity domain 0x%02X %s\n", 131 printk("Memory range 0x%lX to 0x%lX (type 0x%X) in proximity domain 0x%02X %s\n",
133 start_pfn, end_pfn, 132 start_pfn, end_pfn,
134 memory_affinity->memory_type, 133 memory_affinity->memory_type,
135 memory_affinity->proximity_domain, 134 pxm,
136 (memory_affinity->flags.hot_pluggable ? 135 ((memory_affinity->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ?
137 "enabled and removable" : "enabled" ) ); 136 "enabled and removable" : "enabled" ) );
138} 137}
139 138
@@ -185,10 +184,10 @@ static int __init acpi20_parse_srat(struct acpi_table_srat *sratp)
185 num_memory_chunks = 0; 184 num_memory_chunks = 0;
186 while (p < end) { 185 while (p < end) {
187 switch (*p) { 186 switch (*p) {
188 case ACPI_SRAT_PROCESSOR_AFFINITY: 187 case ACPI_SRAT_TYPE_CPU_AFFINITY:
189 parse_cpu_affinity_structure(p); 188 parse_cpu_affinity_structure(p);
190 break; 189 break;
191 case ACPI_SRAT_MEMORY_AFFINITY: 190 case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
192 parse_memory_affinity_structure(p); 191 parse_memory_affinity_structure(p);
193 break; 192 break;
194 default: 193 default:
@@ -262,31 +261,30 @@ out_fail:
262 return 0; 261 return 0;
263} 262}
264 263
264struct acpi_static_rsdt {
265 struct acpi_table_rsdt table;
266 u32 padding[7]; /* Allow for 7 more table entries */
267};
268
265int __init get_memcfg_from_srat(void) 269int __init get_memcfg_from_srat(void)
266{ 270{
267 struct acpi_table_header *header = NULL; 271 struct acpi_table_header *header = NULL;
268 struct acpi_table_rsdp *rsdp = NULL; 272 struct acpi_table_rsdp *rsdp = NULL;
269 struct acpi_table_rsdt *rsdt = NULL; 273 struct acpi_table_rsdt *rsdt = NULL;
270 struct acpi_pointer *rsdp_address = NULL; 274 acpi_native_uint rsdp_address = 0;
271 struct acpi_table_rsdt saved_rsdt; 275 struct acpi_static_rsdt saved_rsdt;
272 int tables = 0; 276 int tables = 0;
273 int i = 0; 277 int i = 0;
274 278
275 if (ACPI_FAILURE(acpi_find_root_pointer(ACPI_PHYSICAL_ADDRESSING, 279 rsdp_address = acpi_find_rsdp();
276 rsdp_address))) { 280 if (!rsdp_address) {
277 printk("%s: System description tables not found\n", 281 printk("%s: System description tables not found\n",
278 __FUNCTION__); 282 __FUNCTION__);
279 goto out_err; 283 goto out_err;
280 } 284 }
281 285
282 if (rsdp_address->pointer_type == ACPI_PHYSICAL_POINTER) { 286 printk("%s: assigning address to rsdp\n", __FUNCTION__);
283 printk("%s: assigning address to rsdp\n", __FUNCTION__); 287 rsdp = (struct acpi_table_rsdp *)(u32)rsdp_address;
284 rsdp = (struct acpi_table_rsdp *)
285 (u32)rsdp_address->pointer.physical;
286 } else {
287 printk("%s: rsdp_address is not a physical pointer\n", __FUNCTION__);
288 goto out_err;
289 }
290 if (!rsdp) { 288 if (!rsdp) {
291 printk("%s: Didn't find ACPI root!\n", __FUNCTION__); 289 printk("%s: Didn't find ACPI root!\n", __FUNCTION__);
292 goto out_err; 290 goto out_err;
@@ -295,13 +293,13 @@ int __init get_memcfg_from_srat(void)
295 printk(KERN_INFO "%.8s v%d [%.6s]\n", rsdp->signature, rsdp->revision, 293 printk(KERN_INFO "%.8s v%d [%.6s]\n", rsdp->signature, rsdp->revision,
296 rsdp->oem_id); 294 rsdp->oem_id);
297 295
298 if (strncmp(rsdp->signature, RSDP_SIG,strlen(RSDP_SIG))) { 296 if (strncmp(rsdp->signature, ACPI_SIG_RSDP,strlen(ACPI_SIG_RSDP))) {
299 printk(KERN_WARNING "%s: RSDP table signature incorrect\n", __FUNCTION__); 297 printk(KERN_WARNING "%s: RSDP table signature incorrect\n", __FUNCTION__);
300 goto out_err; 298 goto out_err;
301 } 299 }
302 300
303 rsdt = (struct acpi_table_rsdt *) 301 rsdt = (struct acpi_table_rsdt *)
304 boot_ioremap(rsdp->rsdt_address, sizeof(struct acpi_table_rsdt)); 302 boot_ioremap(rsdp->rsdt_physical_address, sizeof(struct acpi_table_rsdt));
305 303
306 if (!rsdt) { 304 if (!rsdt) {
307 printk(KERN_WARNING 305 printk(KERN_WARNING
@@ -310,9 +308,9 @@ int __init get_memcfg_from_srat(void)
310 goto out_err; 308 goto out_err;
311 } 309 }
312 310
313 header = & rsdt->header; 311 header = &rsdt->header;
314 312
315 if (strncmp(header->signature, RSDT_SIG, strlen(RSDT_SIG))) { 313 if (strncmp(header->signature, ACPI_SIG_RSDT, strlen(ACPI_SIG_RSDT))) {
316 printk(KERN_WARNING "ACPI: RSDT signature incorrect\n"); 314 printk(KERN_WARNING "ACPI: RSDT signature incorrect\n");
317 goto out_err; 315 goto out_err;
318 } 316 }
@@ -330,9 +328,9 @@ int __init get_memcfg_from_srat(void)
330 328
331 memcpy(&saved_rsdt, rsdt, sizeof(saved_rsdt)); 329 memcpy(&saved_rsdt, rsdt, sizeof(saved_rsdt));
332 330
333 if (saved_rsdt.header.length > sizeof(saved_rsdt)) { 331 if (saved_rsdt.table.header.length > sizeof(saved_rsdt)) {
334 printk(KERN_WARNING "ACPI: Too big length in RSDT: %d\n", 332 printk(KERN_WARNING "ACPI: Too big length in RSDT: %d\n",
335 saved_rsdt.header.length); 333 saved_rsdt.table.header.length);
336 goto out_err; 334 goto out_err;
337 } 335 }
338 336
@@ -341,15 +339,15 @@ int __init get_memcfg_from_srat(void)
341 for (i = 0; i < tables; i++) { 339 for (i = 0; i < tables; i++) {
342 /* Map in header, then map in full table length. */ 340 /* Map in header, then map in full table length. */
343 header = (struct acpi_table_header *) 341 header = (struct acpi_table_header *)
344 boot_ioremap(saved_rsdt.entry[i], sizeof(struct acpi_table_header)); 342 boot_ioremap(saved_rsdt.table.table_offset_entry[i], sizeof(struct acpi_table_header));
345 if (!header) 343 if (!header)
346 break; 344 break;
347 header = (struct acpi_table_header *) 345 header = (struct acpi_table_header *)
348 boot_ioremap(saved_rsdt.entry[i], header->length); 346 boot_ioremap(saved_rsdt.table.table_offset_entry[i], header->length);
349 if (!header) 347 if (!header)
350 break; 348 break;
351 349
352 if (strncmp((char *) &header->signature, "SRAT", 4)) 350 if (strncmp((char *) &header->signature, ACPI_SIG_SRAT, 4))
353 continue; 351 continue;
354 352
355 /* we've found the srat table. don't need to look at any more tables */ 353 /* we've found the srat table. don't need to look at any more tables */
diff --git a/arch/i386/mach-es7000/es7000.h b/arch/i386/mach-es7000/es7000.h
index 80566ca4a80a..c8d5aa132fa0 100644
--- a/arch/i386/mach-es7000/es7000.h
+++ b/arch/i386/mach-es7000/es7000.h
@@ -84,15 +84,6 @@ struct es7000_oem_table {
84}; 84};
85 85
86#ifdef CONFIG_ACPI 86#ifdef CONFIG_ACPI
87struct acpi_table_sdt {
88 unsigned long pa;
89 unsigned long count;
90 struct {
91 unsigned long pa;
92 enum acpi_table_id id;
93 unsigned long size;
94 } entry[50];
95};
96 87
97struct oem_table { 88struct oem_table {
98 struct acpi_table_header Header; 89 struct acpi_table_header Header;
diff --git a/arch/i386/mach-es7000/es7000plat.c b/arch/i386/mach-es7000/es7000plat.c
index 3d0fc853516d..9be6ceabf042 100644
--- a/arch/i386/mach-es7000/es7000plat.c
+++ b/arch/i386/mach-es7000/es7000plat.c
@@ -160,51 +160,14 @@ parse_unisys_oem (char *oemptr)
160int __init 160int __init
161find_unisys_acpi_oem_table(unsigned long *oem_addr) 161find_unisys_acpi_oem_table(unsigned long *oem_addr)
162{ 162{
163 struct acpi_table_rsdp *rsdp = NULL; 163 struct acpi_table_header *header = NULL;
164 unsigned long rsdp_phys = 0; 164 int i = 0;
165 struct acpi_table_header *header = NULL; 165 while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) {
166 int i; 166 if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) {
167 struct acpi_table_sdt sdt; 167 struct oem_table *t = (struct oem_table *)header;
168 168 *oem_addr = (unsigned long)__acpi_map_table(t->OEMTableAddr,
169 rsdp_phys = acpi_find_rsdp(); 169 t->OEMTableSize);
170 rsdp = __va(rsdp_phys); 170 return 0;
171 if (rsdp->rsdt_address) {
172 struct acpi_table_rsdt *mapped_rsdt = NULL;
173 sdt.pa = rsdp->rsdt_address;
174
175 header = (struct acpi_table_header *)
176 __acpi_map_table(sdt.pa, sizeof(struct acpi_table_header));
177 if (!header)
178 return -ENODEV;
179
180 sdt.count = (header->length - sizeof(struct acpi_table_header)) >> 3;
181 mapped_rsdt = (struct acpi_table_rsdt *)
182 __acpi_map_table(sdt.pa, header->length);
183 if (!mapped_rsdt)
184 return -ENODEV;
185
186 header = &mapped_rsdt->header;
187
188 for (i = 0; i < sdt.count; i++)
189 sdt.entry[i].pa = (unsigned long) mapped_rsdt->entry[i];
190 };
191 for (i = 0; i < sdt.count; i++) {
192
193 header = (struct acpi_table_header *)
194 __acpi_map_table(sdt.entry[i].pa,
195 sizeof(struct acpi_table_header));
196 if (!header)
197 continue;
198 if (!strncmp((char *) &header->signature, "OEM1", 4)) {
199 if (!strncmp((char *) &header->oem_id, "UNISYS", 6)) {
200 void *addr;
201 struct oem_table *t;
202 acpi_table_print(header, sdt.entry[i].pa);
203 t = (struct oem_table *) __acpi_map_table(sdt.entry[i].pa, header->length);
204 addr = (void *) __acpi_map_table(t->OEMTableAddr, t->OEMTableSize);
205 *oem_addr = (unsigned long) addr;
206 return 0;
207 }
208 } 171 }
209 } 172 }
210 return -1; 173 return -1;
diff --git a/arch/i386/pci/mmconfig.c b/arch/i386/pci/mmconfig.c
index e2616a266e13..5700220dcf5f 100644
--- a/arch/i386/pci/mmconfig.c
+++ b/arch/i386/pci/mmconfig.c
@@ -36,7 +36,7 @@ static DECLARE_BITMAP(fallback_slots, MAX_CHECK_BUS*32);
36static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn) 36static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
37{ 37{
38 int cfg_num = -1; 38 int cfg_num = -1;
39 struct acpi_table_mcfg_config *cfg; 39 struct acpi_mcfg_allocation *cfg;
40 40
41 if (seg == 0 && bus < MAX_CHECK_BUS && 41 if (seg == 0 && bus < MAX_CHECK_BUS &&
42 test_bit(PCI_SLOT(devfn) + 32*bus, fallback_slots)) 42 test_bit(PCI_SLOT(devfn) + 32*bus, fallback_slots))
@@ -48,11 +48,11 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
48 break; 48 break;
49 } 49 }
50 cfg = &pci_mmcfg_config[cfg_num]; 50 cfg = &pci_mmcfg_config[cfg_num];
51 if (cfg->pci_segment_group_number != seg) 51 if (cfg->pci_segment != seg)
52 continue; 52 continue;
53 if ((cfg->start_bus_number <= bus) && 53 if ((cfg->start_bus_number <= bus) &&
54 (cfg->end_bus_number >= bus)) 54 (cfg->end_bus_number >= bus))
55 return cfg->base_address; 55 return cfg->address;
56 } 56 }
57 57
58 /* Handle more broken MCFG tables on Asus etc. 58 /* Handle more broken MCFG tables on Asus etc.
@@ -60,9 +60,9 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
60 this applies to all busses. */ 60 this applies to all busses. */
61 cfg = &pci_mmcfg_config[0]; 61 cfg = &pci_mmcfg_config[0];
62 if (pci_mmcfg_config_num == 1 && 62 if (pci_mmcfg_config_num == 1 &&
63 cfg->pci_segment_group_number == 0 && 63 cfg->pci_segment == 0 &&
64 (cfg->start_bus_number | cfg->end_bus_number) == 0) 64 (cfg->start_bus_number | cfg->end_bus_number) == 0)
65 return cfg->base_address; 65 return cfg->address;
66 66
67 /* Fall back to type 0 */ 67 /* Fall back to type 0 */
68 return 0; 68 return 0;
@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
125 unsigned long flags; 125 unsigned long flags;
126 u32 base; 126 u32 base;
127 127
128 if ((bus > 255) || (devfn > 255) || (reg > 4095)) 128 if ((bus > 255) || (devfn > 255) || (reg > 4095))
129 return -EINVAL; 129 return -EINVAL;
130 130
131 base = get_base_addr(seg, bus, devfn); 131 base = get_base_addr(seg, bus, devfn);
@@ -199,19 +199,19 @@ void __init pci_mmcfg_init(int type)
199 if ((pci_probe & PCI_PROBE_MMCONF) == 0) 199 if ((pci_probe & PCI_PROBE_MMCONF) == 0)
200 return; 200 return;
201 201
202 acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg); 202 acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg);
203 if ((pci_mmcfg_config_num == 0) || 203 if ((pci_mmcfg_config_num == 0) ||
204 (pci_mmcfg_config == NULL) || 204 (pci_mmcfg_config == NULL) ||
205 (pci_mmcfg_config[0].base_address == 0)) 205 (pci_mmcfg_config[0].address == 0))
206 return; 206 return;
207 207
208 /* Only do this check when type 1 works. If it doesn't work 208 /* Only do this check when type 1 works. If it doesn't work
209 assume we run on a Mac and always use MCFG */ 209 assume we run on a Mac and always use MCFG */
210 if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].base_address, 210 if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].address,
211 pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN, 211 pci_mmcfg_config[0].address + MMCONFIG_APER_MIN,
212 E820_RESERVED)) { 212 E820_RESERVED)) {
213 printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n", 213 printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %lx is not E820-reserved\n",
214 pci_mmcfg_config[0].base_address); 214 (unsigned long)pci_mmcfg_config[0].address);
215 printk(KERN_ERR "PCI: Not using MMCONFIG.\n"); 215 printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
216 return; 216 return;
217 } 217 }
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index fcacfe291b9b..f1d2899e9a62 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -11,6 +11,8 @@ menu "Processor type and features"
11 11
12config IA64 12config IA64
13 bool 13 bool
14 select PCI if (!IA64_HP_SIM)
15 select ACPI if (!IA64_HP_SIM)
14 default y 16 default y
15 help 17 help
16 The Itanium Processor Family is Intel's 64-bit successor to 18 The Itanium Processor Family is Intel's 64-bit successor to
@@ -28,7 +30,6 @@ config MMU
28 30
29config SWIOTLB 31config SWIOTLB
30 bool 32 bool
31 default y
32 33
33config RWSEM_XCHGADD_ALGORITHM 34config RWSEM_XCHGADD_ALGORITHM
34 bool 35 bool
@@ -84,10 +85,9 @@ choice
84 85
85config IA64_GENERIC 86config IA64_GENERIC
86 bool "generic" 87 bool "generic"
87 select ACPI
88 select PCI
89 select NUMA 88 select NUMA
90 select ACPI_NUMA 89 select ACPI_NUMA
90 select SWIOTLB
91 help 91 help
92 This selects the system type of your hardware. A "generic" kernel 92 This selects the system type of your hardware. A "generic" kernel
93 will run on any supported IA-64 system. However, if you configure 93 will run on any supported IA-64 system. However, if you configure
@@ -104,6 +104,7 @@ config IA64_GENERIC
104 104
105config IA64_DIG 105config IA64_DIG
106 bool "DIG-compliant" 106 bool "DIG-compliant"
107 select SWIOTLB
107 108
108config IA64_HP_ZX1 109config IA64_HP_ZX1
109 bool "HP-zx1/sx1000" 110 bool "HP-zx1/sx1000"
@@ -113,6 +114,7 @@ config IA64_HP_ZX1
113 114
114config IA64_HP_ZX1_SWIOTLB 115config IA64_HP_ZX1_SWIOTLB
115 bool "HP-zx1/sx1000 with software I/O TLB" 116 bool "HP-zx1/sx1000 with software I/O TLB"
117 select SWIOTLB
116 help 118 help
117 Build a kernel that runs on HP zx1 and sx1000 systems even when they 119 Build a kernel that runs on HP zx1 and sx1000 systems even when they
118 have broken PCI devices which cannot DMA to full 32 bits. Apart 120 have broken PCI devices which cannot DMA to full 32 bits. Apart
@@ -131,6 +133,7 @@ config IA64_SGI_SN2
131 133
132config IA64_HP_SIM 134config IA64_HP_SIM
133 bool "Ski-simulator" 135 bool "Ski-simulator"
136 select SWIOTLB
134 137
135endchoice 138endchoice
136 139
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index a5a5637507be..2153bcacbe6c 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -192,3 +192,7 @@ EXPORT_SYMBOL(hwsw_unmap_sg);
192EXPORT_SYMBOL(hwsw_dma_supported); 192EXPORT_SYMBOL(hwsw_dma_supported);
193EXPORT_SYMBOL(hwsw_alloc_coherent); 193EXPORT_SYMBOL(hwsw_alloc_coherent);
194EXPORT_SYMBOL(hwsw_free_coherent); 194EXPORT_SYMBOL(hwsw_free_coherent);
195EXPORT_SYMBOL(hwsw_sync_single_for_cpu);
196EXPORT_SYMBOL(hwsw_sync_single_for_device);
197EXPORT_SYMBOL(hwsw_sync_sg_for_cpu);
198EXPORT_SYMBOL(hwsw_sync_sg_for_device);
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 29f05d4b68cd..9197d7b361b3 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -55,7 +55,7 @@
55 55
56#define BAD_MADT_ENTRY(entry, end) ( \ 56#define BAD_MADT_ENTRY(entry, end) ( \
57 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ 57 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
58 ((acpi_table_entry_header *)entry)->length < sizeof(*entry)) 58 ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
59 59
60#define PREFIX "ACPI: " 60#define PREFIX "ACPI: "
61 61
@@ -67,16 +67,11 @@ EXPORT_SYMBOL(pm_power_off);
67unsigned int acpi_cpei_override; 67unsigned int acpi_cpei_override;
68unsigned int acpi_cpei_phys_cpuid; 68unsigned int acpi_cpei_phys_cpuid;
69 69
70#define MAX_SAPICS 256
71u16 ia64_acpiid_to_sapicid[MAX_SAPICS] = {[0 ... MAX_SAPICS - 1] = -1 };
72
73EXPORT_SYMBOL(ia64_acpiid_to_sapicid);
74
75const char *acpi_get_sysname(void) 70const char *acpi_get_sysname(void)
76{ 71{
77#ifdef CONFIG_IA64_GENERIC 72#ifdef CONFIG_IA64_GENERIC
78 unsigned long rsdp_phys; 73 unsigned long rsdp_phys;
79 struct acpi20_table_rsdp *rsdp; 74 struct acpi_table_rsdp *rsdp;
80 struct acpi_table_xsdt *xsdt; 75 struct acpi_table_xsdt *xsdt;
81 struct acpi_table_header *hdr; 76 struct acpi_table_header *hdr;
82 77
@@ -87,16 +82,16 @@ const char *acpi_get_sysname(void)
87 return "dig"; 82 return "dig";
88 } 83 }
89 84
90 rsdp = (struct acpi20_table_rsdp *)__va(rsdp_phys); 85 rsdp = (struct acpi_table_rsdp *)__va(rsdp_phys);
91 if (strncmp(rsdp->signature, RSDP_SIG, sizeof(RSDP_SIG) - 1)) { 86 if (strncmp(rsdp->signature, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1)) {
92 printk(KERN_ERR 87 printk(KERN_ERR
93 "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n"); 88 "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n");
94 return "dig"; 89 return "dig";
95 } 90 }
96 91
97 xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_address); 92 xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_physical_address);
98 hdr = &xsdt->header; 93 hdr = &xsdt->header;
99 if (strncmp(hdr->signature, XSDT_SIG, sizeof(XSDT_SIG) - 1)) { 94 if (strncmp(hdr->signature, ACPI_SIG_XSDT, sizeof(ACPI_SIG_XSDT) - 1)) {
100 printk(KERN_ERR 95 printk(KERN_ERR
101 "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n"); 96 "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n");
102 return "dig"; 97 return "dig";
@@ -169,12 +164,12 @@ struct acpi_table_madt *acpi_madt __initdata;
169static u8 has_8259; 164static u8 has_8259;
170 165
171static int __init 166static int __init
172acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header, 167acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
173 const unsigned long end) 168 const unsigned long end)
174{ 169{
175 struct acpi_table_lapic_addr_ovr *lapic; 170 struct acpi_madt_local_apic_override *lapic;
176 171
177 lapic = (struct acpi_table_lapic_addr_ovr *)header; 172 lapic = (struct acpi_madt_local_apic_override *)header;
178 173
179 if (BAD_MADT_ENTRY(lapic, end)) 174 if (BAD_MADT_ENTRY(lapic, end))
180 return -EINVAL; 175 return -EINVAL;
@@ -187,22 +182,19 @@ acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
187} 182}
188 183
189static int __init 184static int __init
190acpi_parse_lsapic(acpi_table_entry_header * header, const unsigned long end) 185acpi_parse_lsapic(struct acpi_subtable_header * header, const unsigned long end)
191{ 186{
192 struct acpi_table_lsapic *lsapic; 187 struct acpi_madt_local_sapic *lsapic;
193 188
194 lsapic = (struct acpi_table_lsapic *)header; 189 lsapic = (struct acpi_madt_local_sapic *)header;
195 190
196 if (BAD_MADT_ENTRY(lsapic, end)) 191 /*Skip BAD_MADT_ENTRY check, as lsapic size could vary */
197 return -EINVAL;
198 192
199 if (lsapic->flags.enabled) { 193 if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
200#ifdef CONFIG_SMP 194#ifdef CONFIG_SMP
201 smp_boot_data.cpu_phys_id[available_cpus] = 195 smp_boot_data.cpu_phys_id[available_cpus] =
202 (lsapic->id << 8) | lsapic->eid; 196 (lsapic->id << 8) | lsapic->eid;
203#endif 197#endif
204 ia64_acpiid_to_sapicid[lsapic->acpi_id] =
205 (lsapic->id << 8) | lsapic->eid;
206 ++available_cpus; 198 ++available_cpus;
207 } 199 }
208 200
@@ -211,11 +203,11 @@ acpi_parse_lsapic(acpi_table_entry_header * header, const unsigned long end)
211} 203}
212 204
213static int __init 205static int __init
214acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end) 206acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
215{ 207{
216 struct acpi_table_lapic_nmi *lacpi_nmi; 208 struct acpi_madt_local_apic_nmi *lacpi_nmi;
217 209
218 lacpi_nmi = (struct acpi_table_lapic_nmi *)header; 210 lacpi_nmi = (struct acpi_madt_local_apic_nmi *)header;
219 211
220 if (BAD_MADT_ENTRY(lacpi_nmi, end)) 212 if (BAD_MADT_ENTRY(lacpi_nmi, end))
221 return -EINVAL; 213 return -EINVAL;
@@ -225,11 +217,11 @@ acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
225} 217}
226 218
227static int __init 219static int __init
228acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end) 220acpi_parse_iosapic(struct acpi_subtable_header * header, const unsigned long end)
229{ 221{
230 struct acpi_table_iosapic *iosapic; 222 struct acpi_madt_io_sapic *iosapic;
231 223
232 iosapic = (struct acpi_table_iosapic *)header; 224 iosapic = (struct acpi_madt_io_sapic *)header;
233 225
234 if (BAD_MADT_ENTRY(iosapic, end)) 226 if (BAD_MADT_ENTRY(iosapic, end))
235 return -EINVAL; 227 return -EINVAL;
@@ -240,13 +232,13 @@ acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end)
240static unsigned int __initdata acpi_madt_rev; 232static unsigned int __initdata acpi_madt_rev;
241 233
242static int __init 234static int __init
243acpi_parse_plat_int_src(acpi_table_entry_header * header, 235acpi_parse_plat_int_src(struct acpi_subtable_header * header,
244 const unsigned long end) 236 const unsigned long end)
245{ 237{
246 struct acpi_table_plat_int_src *plintsrc; 238 struct acpi_madt_interrupt_source *plintsrc;
247 int vector; 239 int vector;
248 240
249 plintsrc = (struct acpi_table_plat_int_src *)header; 241 plintsrc = (struct acpi_madt_interrupt_source *)header;
250 242
251 if (BAD_MADT_ENTRY(plintsrc, end)) 243 if (BAD_MADT_ENTRY(plintsrc, end))
252 return -EINVAL; 244 return -EINVAL;
@@ -257,19 +249,19 @@ acpi_parse_plat_int_src(acpi_table_entry_header * header,
257 */ 249 */
258 vector = iosapic_register_platform_intr(plintsrc->type, 250 vector = iosapic_register_platform_intr(plintsrc->type,
259 plintsrc->global_irq, 251 plintsrc->global_irq,
260 plintsrc->iosapic_vector, 252 plintsrc->io_sapic_vector,
261 plintsrc->eid, 253 plintsrc->eid,
262 plintsrc->id, 254 plintsrc->id,
263 (plintsrc->flags.polarity == 255 ((plintsrc->inti_flags & ACPI_MADT_POLARITY_MASK) ==
264 1) ? IOSAPIC_POL_HIGH : 256 ACPI_MADT_POLARITY_ACTIVE_HIGH) ?
265 IOSAPIC_POL_LOW, 257 IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
266 (plintsrc->flags.trigger == 258 ((plintsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
267 1) ? IOSAPIC_EDGE : 259 ACPI_MADT_TRIGGER_EDGE) ?
268 IOSAPIC_LEVEL); 260 IOSAPIC_EDGE : IOSAPIC_LEVEL);
269 261
270 platform_intr_list[plintsrc->type] = vector; 262 platform_intr_list[plintsrc->type] = vector;
271 if (acpi_madt_rev > 1) { 263 if (acpi_madt_rev > 1) {
272 acpi_cpei_override = plintsrc->plint_flags.cpei_override_flag; 264 acpi_cpei_override = plintsrc->flags & ACPI_MADT_CPEI_OVERRIDE;
273 } 265 }
274 266
275 /* 267 /*
@@ -324,30 +316,32 @@ unsigned int get_cpei_target_cpu(void)
324} 316}
325 317
326static int __init 318static int __init
327acpi_parse_int_src_ovr(acpi_table_entry_header * header, 319acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
328 const unsigned long end) 320 const unsigned long end)
329{ 321{
330 struct acpi_table_int_src_ovr *p; 322 struct acpi_madt_interrupt_override *p;
331 323
332 p = (struct acpi_table_int_src_ovr *)header; 324 p = (struct acpi_madt_interrupt_override *)header;
333 325
334 if (BAD_MADT_ENTRY(p, end)) 326 if (BAD_MADT_ENTRY(p, end))
335 return -EINVAL; 327 return -EINVAL;
336 328
337 iosapic_override_isa_irq(p->bus_irq, p->global_irq, 329 iosapic_override_isa_irq(p->source_irq, p->global_irq,
338 (p->flags.polarity == 330 ((p->inti_flags & ACPI_MADT_POLARITY_MASK) ==
339 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, 331 ACPI_MADT_POLARITY_ACTIVE_HIGH) ?
340 (p->flags.trigger == 332 IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
341 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); 333 ((p->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
334 ACPI_MADT_TRIGGER_EDGE) ?
335 IOSAPIC_EDGE : IOSAPIC_LEVEL);
342 return 0; 336 return 0;
343} 337}
344 338
345static int __init 339static int __init
346acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end) 340acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
347{ 341{
348 struct acpi_table_nmi_src *nmi_src; 342 struct acpi_madt_nmi_source *nmi_src;
349 343
350 nmi_src = (struct acpi_table_nmi_src *)header; 344 nmi_src = (struct acpi_madt_nmi_source *)header;
351 345
352 if (BAD_MADT_ENTRY(nmi_src, end)) 346 if (BAD_MADT_ENTRY(nmi_src, end))
353 return -EINVAL; 347 return -EINVAL;
@@ -371,12 +365,12 @@ static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
371 } 365 }
372} 366}
373 367
374static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size) 368static int __init acpi_parse_madt(struct acpi_table_header *table)
375{ 369{
376 if (!phys_addr || !size) 370 if (!table)
377 return -EINVAL; 371 return -EINVAL;
378 372
379 acpi_madt = (struct acpi_table_madt *)__va(phys_addr); 373 acpi_madt = (struct acpi_table_madt *)table;
380 374
381 acpi_madt_rev = acpi_madt->header.revision; 375 acpi_madt_rev = acpi_madt->header.revision;
382 376
@@ -384,14 +378,14 @@ static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
384#ifdef CONFIG_ITANIUM 378#ifdef CONFIG_ITANIUM
385 has_8259 = 1; /* Firmware on old Itanium systems is broken */ 379 has_8259 = 1; /* Firmware on old Itanium systems is broken */
386#else 380#else
387 has_8259 = acpi_madt->flags.pcat_compat; 381 has_8259 = acpi_madt->flags & ACPI_MADT_PCAT_COMPAT;
388#endif 382#endif
389 iosapic_system_init(has_8259); 383 iosapic_system_init(has_8259);
390 384
391 /* Get base address of IPI Message Block */ 385 /* Get base address of IPI Message Block */
392 386
393 if (acpi_madt->lapic_address) 387 if (acpi_madt->address)
394 ipi_base_addr = ioremap(acpi_madt->lapic_address, 0); 388 ipi_base_addr = ioremap(acpi_madt->address, 0);
395 389
396 printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr); 390 printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr);
397 391
@@ -413,23 +407,24 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
413#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag)) 407#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag))
414static struct acpi_table_slit __initdata *slit_table; 408static struct acpi_table_slit __initdata *slit_table;
415 409
416static int get_processor_proximity_domain(struct acpi_table_processor_affinity *pa) 410static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
417{ 411{
418 int pxm; 412 int pxm;
419 413
420 pxm = pa->proximity_domain; 414 pxm = pa->proximity_domain_lo;
421 if (ia64_platform_is("sn2")) 415 if (ia64_platform_is("sn2"))
422 pxm += pa->reserved[0] << 8; 416 pxm += pa->proximity_domain_hi[0] << 8;
423 return pxm; 417 return pxm;
424} 418}
425 419
426static int get_memory_proximity_domain(struct acpi_table_memory_affinity *ma) 420static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
427{ 421{
428 int pxm; 422 int pxm;
429 423
430 pxm = ma->proximity_domain; 424 pxm = ma->proximity_domain;
431 if (ia64_platform_is("sn2")) 425 if (!ia64_platform_is("sn2"))
432 pxm += ma->reserved1[0] << 8; 426 pxm &= 0xff;
427
433 return pxm; 428 return pxm;
434} 429}
435 430
@@ -442,7 +437,7 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
442 u32 len; 437 u32 len;
443 438
444 len = sizeof(struct acpi_table_header) + 8 439 len = sizeof(struct acpi_table_header) + 8
445 + slit->localities * slit->localities; 440 + slit->locality_count * slit->locality_count;
446 if (slit->header.length != len) { 441 if (slit->header.length != len) {
447 printk(KERN_ERR 442 printk(KERN_ERR
448 "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", 443 "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
@@ -454,11 +449,11 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
454} 449}
455 450
456void __init 451void __init
457acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa) 452acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
458{ 453{
459 int pxm; 454 int pxm;
460 455
461 if (!pa->flags.enabled) 456 if (!(pa->flags & ACPI_SRAT_CPU_ENABLED))
462 return; 457 return;
463 458
464 pxm = get_processor_proximity_domain(pa); 459 pxm = get_processor_proximity_domain(pa);
@@ -467,14 +462,14 @@ acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
467 pxm_bit_set(pxm); 462 pxm_bit_set(pxm);
468 463
469 node_cpuid[srat_num_cpus].phys_id = 464 node_cpuid[srat_num_cpus].phys_id =
470 (pa->apic_id << 8) | (pa->lsapic_eid); 465 (pa->apic_id << 8) | (pa->local_sapic_eid);
471 /* nid should be overridden as logical node id later */ 466 /* nid should be overridden as logical node id later */
472 node_cpuid[srat_num_cpus].nid = pxm; 467 node_cpuid[srat_num_cpus].nid = pxm;
473 srat_num_cpus++; 468 srat_num_cpus++;
474} 469}
475 470
476void __init 471void __init
477acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) 472acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
478{ 473{
479 unsigned long paddr, size; 474 unsigned long paddr, size;
480 int pxm; 475 int pxm;
@@ -483,13 +478,11 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
483 pxm = get_memory_proximity_domain(ma); 478 pxm = get_memory_proximity_domain(ma);
484 479
485 /* fill node memory chunk structure */ 480 /* fill node memory chunk structure */
486 paddr = ma->base_addr_hi; 481 paddr = ma->base_address;
487 paddr = (paddr << 32) | ma->base_addr_lo; 482 size = ma->length;
488 size = ma->length_hi;
489 size = (size << 32) | ma->length_lo;
490 483
491 /* Ignore disabled entries */ 484 /* Ignore disabled entries */
492 if (!ma->flags.enabled) 485 if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
493 return; 486 return;
494 487
495 /* record this node in proximity bitmap */ 488 /* record this node in proximity bitmap */
@@ -560,16 +553,16 @@ void __init acpi_numa_arch_fixup(void)
560 if (!slit_table) 553 if (!slit_table)
561 return; 554 return;
562 memset(numa_slit, -1, sizeof(numa_slit)); 555 memset(numa_slit, -1, sizeof(numa_slit));
563 for (i = 0; i < slit_table->localities; i++) { 556 for (i = 0; i < slit_table->locality_count; i++) {
564 if (!pxm_bit_test(i)) 557 if (!pxm_bit_test(i))
565 continue; 558 continue;
566 node_from = pxm_to_node(i); 559 node_from = pxm_to_node(i);
567 for (j = 0; j < slit_table->localities; j++) { 560 for (j = 0; j < slit_table->locality_count; j++) {
568 if (!pxm_bit_test(j)) 561 if (!pxm_bit_test(j))
569 continue; 562 continue;
570 node_to = pxm_to_node(j); 563 node_to = pxm_to_node(j);
571 node_distance(node_from, node_to) = 564 node_distance(node_from, node_to) =
572 slit_table->entry[i * slit_table->localities + j]; 565 slit_table->entry[i * slit_table->locality_count + j];
573 } 566 }
574 } 567 }
575 568
@@ -617,21 +610,21 @@ void acpi_unregister_gsi(u32 gsi)
617 610
618EXPORT_SYMBOL(acpi_unregister_gsi); 611EXPORT_SYMBOL(acpi_unregister_gsi);
619 612
620static int __init acpi_parse_fadt(unsigned long phys_addr, unsigned long size) 613static int __init acpi_parse_fadt(struct acpi_table_header *table)
621{ 614{
622 struct acpi_table_header *fadt_header; 615 struct acpi_table_header *fadt_header;
623 struct fadt_descriptor *fadt; 616 struct acpi_table_fadt *fadt;
624 617
625 if (!phys_addr || !size) 618 if (!table)
626 return -EINVAL; 619 return -EINVAL;
627 620
628 fadt_header = (struct acpi_table_header *)__va(phys_addr); 621 fadt_header = (struct acpi_table_header *)table;
629 if (fadt_header->revision != 3) 622 if (fadt_header->revision != 3)
630 return -ENODEV; /* Only deal with ACPI 2.0 FADT */ 623 return -ENODEV; /* Only deal with ACPI 2.0 FADT */
631 624
632 fadt = (struct fadt_descriptor *)fadt_header; 625 fadt = (struct acpi_table_fadt *)fadt_header;
633 626
634 acpi_register_gsi(fadt->sci_int, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW); 627 acpi_register_gsi(fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
635 return 0; 628 return 0;
636} 629}
637 630
@@ -658,7 +651,7 @@ int __init acpi_boot_init(void)
658 * information -- the successor to MPS tables. 651 * information -- the successor to MPS tables.
659 */ 652 */
660 653
661 if (acpi_table_parse(ACPI_APIC, acpi_parse_madt) < 1) { 654 if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt) < 1) {
662 printk(KERN_ERR PREFIX "Can't find MADT\n"); 655 printk(KERN_ERR PREFIX "Can't find MADT\n");
663 goto skip_madt; 656 goto skip_madt;
664 } 657 }
@@ -666,40 +659,40 @@ int __init acpi_boot_init(void)
666 /* Local APIC */ 659 /* Local APIC */
667 660
668 if (acpi_table_parse_madt 661 if (acpi_table_parse_madt
669 (ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0) < 0) 662 (ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0) < 0)
670 printk(KERN_ERR PREFIX 663 printk(KERN_ERR PREFIX
671 "Error parsing LAPIC address override entry\n"); 664 "Error parsing LAPIC address override entry\n");
672 665
673 if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic, NR_CPUS) 666 if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_lsapic, NR_CPUS)
674 < 1) 667 < 1)
675 printk(KERN_ERR PREFIX 668 printk(KERN_ERR PREFIX
676 "Error parsing MADT - no LAPIC entries\n"); 669 "Error parsing MADT - no LAPIC entries\n");
677 670
678 if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0) 671 if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0)
679 < 0) 672 < 0)
680 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); 673 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
681 674
682 /* I/O APIC */ 675 /* I/O APIC */
683 676
684 if (acpi_table_parse_madt 677 if (acpi_table_parse_madt
685 (ACPI_MADT_IOSAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) 678 (ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1)
686 printk(KERN_ERR PREFIX 679 printk(KERN_ERR PREFIX
687 "Error parsing MADT - no IOSAPIC entries\n"); 680 "Error parsing MADT - no IOSAPIC entries\n");
688 681
689 /* System-Level Interrupt Routing */ 682 /* System-Level Interrupt Routing */
690 683
691 if (acpi_table_parse_madt 684 if (acpi_table_parse_madt
692 (ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src, 685 (ACPI_MADT_TYPE_INTERRUPT_SOURCE, acpi_parse_plat_int_src,
693 ACPI_MAX_PLATFORM_INTERRUPTS) < 0) 686 ACPI_MAX_PLATFORM_INTERRUPTS) < 0)
694 printk(KERN_ERR PREFIX 687 printk(KERN_ERR PREFIX
695 "Error parsing platform interrupt source entry\n"); 688 "Error parsing platform interrupt source entry\n");
696 689
697 if (acpi_table_parse_madt 690 if (acpi_table_parse_madt
698 (ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 0) < 0) 691 (ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, 0) < 0)
699 printk(KERN_ERR PREFIX 692 printk(KERN_ERR PREFIX
700 "Error parsing interrupt source overrides entry\n"); 693 "Error parsing interrupt source overrides entry\n");
701 694
702 if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 0) < 0) 695 if (acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, 0) < 0)
703 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); 696 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
704 skip_madt: 697 skip_madt:
705 698
@@ -709,7 +702,7 @@ int __init acpi_boot_init(void)
709 * gets interrupts such as power and sleep buttons. If it's not 702 * gets interrupts such as power and sleep buttons. If it's not
710 * on a Legacy interrupt, it needs to be setup. 703 * on a Legacy interrupt, it needs to be setup.
711 */ 704 */
712 if (acpi_table_parse(ACPI_FADT, acpi_parse_fadt) < 1) 705 if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt) < 1)
713 printk(KERN_ERR PREFIX "Can't find FADT\n"); 706 printk(KERN_ERR PREFIX "Can't find FADT\n");
714 707
715#ifdef CONFIG_SMP 708#ifdef CONFIG_SMP
@@ -842,7 +835,7 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
842{ 835{
843 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 836 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
844 union acpi_object *obj; 837 union acpi_object *obj;
845 struct acpi_table_lsapic *lsapic; 838 struct acpi_madt_local_sapic *lsapic;
846 cpumask_t tmp_map; 839 cpumask_t tmp_map;
847 long physid; 840 long physid;
848 int cpu; 841 int cpu;
@@ -854,16 +847,16 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
854 return -EINVAL; 847 return -EINVAL;
855 848
856 obj = buffer.pointer; 849 obj = buffer.pointer;
857 if (obj->type != ACPI_TYPE_BUFFER || 850 if (obj->type != ACPI_TYPE_BUFFER)
858 obj->buffer.length < sizeof(*lsapic)) { 851 {
859 kfree(buffer.pointer); 852 kfree(buffer.pointer);
860 return -EINVAL; 853 return -EINVAL;
861 } 854 }
862 855
863 lsapic = (struct acpi_table_lsapic *)obj->buffer.pointer; 856 lsapic = (struct acpi_madt_local_sapic *)obj->buffer.pointer;
864 857
865 if ((lsapic->header.type != ACPI_MADT_LSAPIC) || 858 if ((lsapic->header.type != ACPI_MADT_TYPE_LOCAL_SAPIC) ||
866 (!lsapic->flags.enabled)) { 859 (!lsapic->lapic_flags & ACPI_MADT_ENABLED)) {
867 kfree(buffer.pointer); 860 kfree(buffer.pointer);
868 return -EINVAL; 861 return -EINVAL;
869 } 862 }
@@ -883,7 +876,6 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
883 876
884 cpu_set(cpu, cpu_present_map); 877 cpu_set(cpu, cpu_present_map);
885 ia64_cpu_to_sapicid[cpu] = physid; 878 ia64_cpu_to_sapicid[cpu] = physid;
886 ia64_acpiid_to_sapicid[lsapic->acpi_id] = ia64_cpu_to_sapicid[cpu];
887 879
888 *pcpu = cpu; 880 *pcpu = cpu;
889 return (0); 881 return (0);
@@ -893,14 +885,6 @@ EXPORT_SYMBOL(acpi_map_lsapic);
893 885
894int acpi_unmap_lsapic(int cpu) 886int acpi_unmap_lsapic(int cpu)
895{ 887{
896 int i;
897
898 for (i = 0; i < MAX_SAPICS; i++) {
899 if (ia64_acpiid_to_sapicid[i] == ia64_cpu_to_sapicid[cpu]) {
900 ia64_acpiid_to_sapicid[i] = -1;
901 break;
902 }
903 }
904 ia64_cpu_to_sapicid[cpu] = -1; 888 ia64_cpu_to_sapicid[cpu] = -1;
905 cpu_clear(cpu, cpu_present_map); 889 cpu_clear(cpu, cpu_present_map);
906 890
@@ -920,7 +904,7 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
920{ 904{
921 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 905 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
922 union acpi_object *obj; 906 union acpi_object *obj;
923 struct acpi_table_iosapic *iosapic; 907 struct acpi_madt_io_sapic *iosapic;
924 unsigned int gsi_base; 908 unsigned int gsi_base;
925 int pxm, node; 909 int pxm, node;
926 910
@@ -938,9 +922,9 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
938 return AE_OK; 922 return AE_OK;
939 } 923 }
940 924
941 iosapic = (struct acpi_table_iosapic *)obj->buffer.pointer; 925 iosapic = (struct acpi_madt_io_sapic *)obj->buffer.pointer;
942 926
943 if (iosapic->header.type != ACPI_MADT_IOSAPIC) { 927 if (iosapic->header.type != ACPI_MADT_TYPE_IO_SAPIC) {
944 kfree(buffer.pointer); 928 kfree(buffer.pointer);
945 return AE_OK; 929 return AE_OK;
946 } 930 }
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index bc2f64d72244..9d92097ce96d 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -79,6 +79,7 @@ crash_save_this_cpu()
79 final_note(buf); 79 final_note(buf);
80} 80}
81 81
82#ifdef CONFIG_SMP
82static int 83static int
83kdump_wait_cpu_freeze(void) 84kdump_wait_cpu_freeze(void)
84{ 85{
@@ -91,6 +92,7 @@ kdump_wait_cpu_freeze(void)
91 } 92 }
92 return 1; 93 return 1;
93} 94}
95#endif
94 96
95void 97void
96machine_crash_shutdown(struct pt_regs *pt) 98machine_crash_shutdown(struct pt_regs *pt)
@@ -116,6 +118,11 @@ machine_crash_shutdown(struct pt_regs *pt)
116static void 118static void
117machine_kdump_on_init(void) 119machine_kdump_on_init(void)
118{ 120{
121 if (!ia64_kimage) {
122 printk(KERN_NOTICE "machine_kdump_on_init(): "
123 "kdump not configured\n");
124 return;
125 }
119 local_irq_disable(); 126 local_irq_disable();
120 kexec_disable_iosapic(); 127 kexec_disable_iosapic();
121 machine_kexec(ia64_kimage); 128 machine_kexec(ia64_kimage);
@@ -132,11 +139,12 @@ kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
132 atomic_inc(&kdump_cpu_freezed); 139 atomic_inc(&kdump_cpu_freezed);
133 kdump_status[cpuid] = 1; 140 kdump_status[cpuid] = 1;
134 mb(); 141 mb();
135 if (cpuid == 0) { 142#ifdef CONFIG_HOTPLUG_CPU
136 for (;;) 143 if (cpuid != 0)
137 cpu_relax();
138 } else
139 ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]); 144 ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]);
145#endif
146 for (;;)
147 cpu_relax();
140} 148}
141 149
142static int 150static int
diff --git a/arch/ia64/kernel/crash_dump.c b/arch/ia64/kernel/crash_dump.c
index 83b8c91c1408..da60e90eeeb1 100644
--- a/arch/ia64/kernel/crash_dump.c
+++ b/arch/ia64/kernel/crash_dump.c
@@ -9,7 +9,8 @@
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/types.h> 10#include <linux/types.h>
11 11
12#include <linux/uaccess.h> 12#include <asm/page.h>
13#include <asm/uaccess.h>
13 14
14/** 15/**
15 * copy_oldmem_page - copy one page from "oldmem" 16 * copy_oldmem_page - copy one page from "oldmem"
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 0b25a7d4e1e4..6c03928544c2 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -380,7 +380,7 @@ efi_get_pal_addr (void)
380#endif 380#endif
381 return __va(md->phys_addr); 381 return __va(md->phys_addr);
382 } 382 }
383 printk(KERN_WARNING "%s: no PAL-code memory-descriptor found", 383 printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
384 __FUNCTION__); 384 __FUNCTION__);
385 return NULL; 385 return NULL;
386} 386}
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 15234ed3a341..e7873eeae448 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1610,5 +1610,7 @@ sys_call_table:
1610 data8 sys_sync_file_range // 1300 1610 data8 sys_sync_file_range // 1300
1611 data8 sys_tee 1611 data8 sys_tee
1612 data8 sys_vmsplice 1612 data8 sys_vmsplice
1613 data8 sys_ni_syscall // reserved for move_pages
1614 data8 sys_getcpu
1613 1615
1614 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1616 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 0fc5fb7865cf..d6aab40c6416 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -925,6 +925,11 @@ iosapic_unregister_intr (unsigned int gsi)
925 /* Clear the interrupt controller descriptor */ 925 /* Clear the interrupt controller descriptor */
926 idesc->chip = &no_irq_type; 926 idesc->chip = &no_irq_type;
927 927
928#ifdef CONFIG_SMP
929 /* Clear affinity */
930 cpus_setall(idesc->affinity);
931#endif
932
928 /* Clear the interrupt information */ 933 /* Clear the interrupt information */
929 memset(&iosapic_intr_info[vector], 0, 934 memset(&iosapic_intr_info[vector], 0,
930 sizeof(struct iosapic_intr_info)); 935 sizeof(struct iosapic_intr_info));
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
index e2ccc9f660c5..4f0f3b8c1ee2 100644
--- a/arch/ia64/kernel/machine_kexec.c
+++ b/arch/ia64/kernel/machine_kexec.c
@@ -14,6 +14,7 @@
14#include <linux/kexec.h> 14#include <linux/kexec.h>
15#include <linux/cpu.h> 15#include <linux/cpu.h>
16#include <linux/irq.h> 16#include <linux/irq.h>
17#include <linux/efi.h>
17#include <asm/mmu_context.h> 18#include <asm/mmu_context.h>
18#include <asm/setup.h> 19#include <asm/setup.h>
19#include <asm/delay.h> 20#include <asm/delay.h>
@@ -68,22 +69,10 @@ void machine_kexec_cleanup(struct kimage *image)
68{ 69{
69} 70}
70 71
71void machine_shutdown(void)
72{
73 int cpu;
74
75 for_each_online_cpu(cpu) {
76 if (cpu != smp_processor_id())
77 cpu_down(cpu);
78 }
79 kexec_disable_iosapic();
80}
81
82/* 72/*
83 * Do not allocate memory (or fail in any way) in machine_kexec(). 73 * Do not allocate memory (or fail in any way) in machine_kexec().
84 * We are past the point of no return, committed to rebooting now. 74 * We are past the point of no return, committed to rebooting now.
85 */ 75 */
86extern void *efi_get_pal_addr(void);
87static void ia64_machine_kexec(struct unw_frame_info *info, void *arg) 76static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
88{ 77{
89 struct kimage *image = arg; 78 struct kimage *image = arg;
@@ -93,6 +82,7 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
93 unsigned long vector; 82 unsigned long vector;
94 int ii; 83 int ii;
95 84
85 BUG_ON(!image);
96 if (image->type == KEXEC_TYPE_CRASH) { 86 if (image->type == KEXEC_TYPE_CRASH) {
97 crash_save_this_cpu(); 87 crash_save_this_cpu();
98 current->thread.ksp = (__u64)info->sw - 16; 88 current->thread.ksp = (__u64)info->sw - 16;
@@ -131,6 +121,7 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
131 121
132void machine_kexec(struct kimage *image) 122void machine_kexec(struct kimage *image)
133{ 123{
124 BUG_ON(!image);
134 unw_init_running(ia64_machine_kexec, image); 125 unw_init_running(ia64_machine_kexec, image);
135 for(;;); 126 for(;;);
136} 127}
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 17685abaf496..ae96d4176995 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -34,6 +34,7 @@
34#include <asm/ia32.h> 34#include <asm/ia32.h>
35#include <asm/irq.h> 35#include <asm/irq.h>
36#include <asm/kdebug.h> 36#include <asm/kdebug.h>
37#include <asm/kexec.h>
37#include <asm/pgalloc.h> 38#include <asm/pgalloc.h>
38#include <asm/processor.h> 39#include <asm/processor.h>
39#include <asm/sal.h> 40#include <asm/sal.h>
@@ -803,6 +804,21 @@ cpu_halt (void)
803 ia64_pal_halt(min_power_state); 804 ia64_pal_halt(min_power_state);
804} 805}
805 806
807void machine_shutdown(void)
808{
809#ifdef CONFIG_HOTPLUG_CPU
810 int cpu;
811
812 for_each_online_cpu(cpu) {
813 if (cpu != smp_processor_id())
814 cpu_down(cpu);
815 }
816#endif
817#ifdef CONFIG_KEXEC
818 kexec_disable_iosapic();
819#endif
820}
821
806void 822void
807machine_restart (char *restart_cmd) 823machine_restart (char *restart_cmd)
808{ 824{
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index aa705e46b974..3f8918782e0c 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -607,7 +607,7 @@ find_thread_for_addr (struct task_struct *child, unsigned long addr)
607 */ 607 */
608 list_for_each_safe(this, next, &current->children) { 608 list_for_each_safe(this, next, &current->children) {
609 p = list_entry(this, struct task_struct, sibling); 609 p = list_entry(this, struct task_struct, sibling);
610 if (p->mm != mm) 610 if (p->tgid != child->tgid)
611 continue; 611 continue;
612 if (thread_matches(p, addr)) { 612 if (thread_matches(p, addr)) {
613 child = p; 613 child = p;
@@ -1405,6 +1405,7 @@ ptrace_disable (struct task_struct *child)
1405 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); 1405 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1406 1406
1407 /* make sure the single step/taken-branch trap bits are not set: */ 1407 /* make sure the single step/taken-branch trap bits are not set: */
1408 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1408 child_psr->ss = 0; 1409 child_psr->ss = 0;
1409 child_psr->tb = 0; 1410 child_psr->tb = 0;
1410} 1411}
@@ -1525,6 +1526,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1525 * Make sure the single step/taken-branch trap bits 1526 * Make sure the single step/taken-branch trap bits
1526 * are not set: 1527 * are not set:
1527 */ 1528 */
1529 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1528 ia64_psr(pt)->ss = 0; 1530 ia64_psr(pt)->ss = 0;
1529 ia64_psr(pt)->tb = 0; 1531 ia64_psr(pt)->tb = 0;
1530 1532
@@ -1556,6 +1558,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1556 goto out_tsk; 1558 goto out_tsk;
1557 1559
1558 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 1560 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
1561 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1559 if (request == PTRACE_SINGLESTEP) { 1562 if (request == PTRACE_SINGLESTEP) {
1560 ia64_psr(pt)->ss = 1; 1563 ia64_psr(pt)->ss = 1;
1561 } else { 1564 } else {
@@ -1595,13 +1598,9 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1595} 1598}
1596 1599
1597 1600
1598void 1601static void
1599syscall_trace (void) 1602syscall_trace (void)
1600{ 1603{
1601 if (!test_thread_flag(TIF_SYSCALL_TRACE))
1602 return;
1603 if (!(current->ptrace & PT_PTRACED))
1604 return;
1605 /* 1604 /*
1606 * The 0x80 provides a way for the tracing parent to 1605 * The 0x80 provides a way for the tracing parent to
1607 * distinguish between a syscall stop and SIGTRAP delivery. 1606 * distinguish between a syscall stop and SIGTRAP delivery.
@@ -1664,7 +1663,8 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1664 audit_syscall_exit(success, result); 1663 audit_syscall_exit(success, result);
1665 } 1664 }
1666 1665
1667 if (test_thread_flag(TIF_SYSCALL_TRACE) 1666 if ((test_thread_flag(TIF_SYSCALL_TRACE)
1667 || test_thread_flag(TIF_SINGLESTEP))
1668 && (current->ptrace & PT_PTRACED)) 1668 && (current->ptrace & PT_PTRACED))
1669 syscall_trace(); 1669 syscall_trace();
1670} 1670}
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index ad567b8d432e..83c2629e1c4c 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -569,34 +569,31 @@ show_cpuinfo (struct seq_file *m, void *v)
569 { 1UL << 1, "spontaneous deferral"}, 569 { 1UL << 1, "spontaneous deferral"},
570 { 1UL << 2, "16-byte atomic ops" } 570 { 1UL << 2, "16-byte atomic ops" }
571 }; 571 };
572 char features[128], *cp, sep; 572 char features[128], *cp, *sep;
573 struct cpuinfo_ia64 *c = v; 573 struct cpuinfo_ia64 *c = v;
574 unsigned long mask; 574 unsigned long mask;
575 unsigned long proc_freq; 575 unsigned long proc_freq;
576 int i; 576 int i, size;
577 577
578 mask = c->features; 578 mask = c->features;
579 579
580 /* build the feature string: */ 580 /* build the feature string: */
581 memcpy(features, " standard", 10); 581 memcpy(features, "standard", 9);
582 cp = features; 582 cp = features;
583 sep = 0; 583 size = sizeof(features);
584 for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) { 584 sep = "";
585 for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) {
585 if (mask & feature_bits[i].mask) { 586 if (mask & feature_bits[i].mask) {
586 if (sep) 587 cp += snprintf(cp, size, "%s%s", sep,
587 *cp++ = sep; 588 feature_bits[i].feature_name),
588 sep = ','; 589 sep = ", ";
589 *cp++ = ' ';
590 strcpy(cp, feature_bits[i].feature_name);
591 cp += strlen(feature_bits[i].feature_name);
592 mask &= ~feature_bits[i].mask; 590 mask &= ~feature_bits[i].mask;
591 size = sizeof(features) - (cp - features);
593 } 592 }
594 } 593 }
595 if (mask) { 594 if (mask && size > 1) {
596 /* print unknown features as a hex value: */ 595 /* print unknown features as a hex value */
597 if (sep) 596 snprintf(cp, size, "%s0x%lx", sep, mask);
598 *cp++ = sep;
599 sprintf(cp, " 0x%lx", mask);
600 } 597 }
601 598
602 proc_freq = cpufreq_quick_get(cpunum); 599 proc_freq = cpufreq_quick_get(cpunum);
@@ -612,7 +609,7 @@ show_cpuinfo (struct seq_file *m, void *v)
612 "model name : %s\n" 609 "model name : %s\n"
613 "revision : %u\n" 610 "revision : %u\n"
614 "archrev : %u\n" 611 "archrev : %u\n"
615 "features :%s\n" /* don't change this---it _is_ right! */ 612 "features : %s\n"
616 "cpu number : %lu\n" 613 "cpu number : %lu\n"
617 "cpu regs : %u\n" 614 "cpu regs : %u\n"
618 "cpu MHz : %lu.%06lu\n" 615 "cpu MHz : %lu.%06lu\n"
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index d6083a0936f4..8f3d0066f446 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -157,6 +157,7 @@ SECTIONS
157 } 157 }
158#endif 158#endif
159 159
160 . = ALIGN(8);
160 __con_initcall_start = .; 161 __con_initcall_start = .;
161 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) 162 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET)
162 { *(.con_initcall.init) } 163 { *(.con_initcall.init) }
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 1e79551231b9..63e6d49c5813 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -30,47 +30,69 @@ static unsigned long max_gap;
30#endif 30#endif
31 31
32/** 32/**
33 * show_mem - display a memory statistics summary 33 * show_mem - give short summary of memory stats
34 * 34 *
35 * Just walks the pages in the system and describes where they're allocated. 35 * Shows a simple page count of reserved and used pages in the system.
36 * For discontig machines, it does this on a per-pgdat basis.
36 */ 37 */
37void 38void show_mem(void)
38show_mem (void)
39{ 39{
40 int i, total = 0, reserved = 0; 40 int i, total_reserved = 0;
41 int shared = 0, cached = 0; 41 int total_shared = 0, total_cached = 0;
42 unsigned long total_present = 0;
43 pg_data_t *pgdat;
42 44
43 printk(KERN_INFO "Mem-info:\n"); 45 printk(KERN_INFO "Mem-info:\n");
44 show_free_areas(); 46 show_free_areas();
45
46 printk(KERN_INFO "Free swap: %6ldkB\n", 47 printk(KERN_INFO "Free swap: %6ldkB\n",
47 nr_swap_pages<<(PAGE_SHIFT-10)); 48 nr_swap_pages<<(PAGE_SHIFT-10));
48 i = max_mapnr; 49 printk(KERN_INFO "Node memory in pages:\n");
49 for (i = 0; i < max_mapnr; i++) { 50 for_each_online_pgdat(pgdat) {
50 if (!pfn_valid(i)) { 51 unsigned long present;
52 unsigned long flags;
53 int shared = 0, cached = 0, reserved = 0;
54
55 pgdat_resize_lock(pgdat, &flags);
56 present = pgdat->node_present_pages;
57 for(i = 0; i < pgdat->node_spanned_pages; i++) {
58 struct page *page;
59 if (pfn_valid(pgdat->node_start_pfn + i))
60 page = pfn_to_page(pgdat->node_start_pfn + i);
61 else {
51#ifdef CONFIG_VIRTUAL_MEM_MAP 62#ifdef CONFIG_VIRTUAL_MEM_MAP
52 if (max_gap < LARGE_GAP) 63 if (max_gap < LARGE_GAP)
53 continue; 64 continue;
54 i = vmemmap_find_next_valid_pfn(0, i) - 1;
55#endif 65#endif
56 continue; 66 i = vmemmap_find_next_valid_pfn(pgdat->node_id,
67 i) - 1;
68 continue;
69 }
70 if (PageReserved(page))
71 reserved++;
72 else if (PageSwapCache(page))
73 cached++;
74 else if (page_count(page))
75 shared += page_count(page)-1;
57 } 76 }
58 total++; 77 pgdat_resize_unlock(pgdat, &flags);
59 if (PageReserved(mem_map+i)) 78 total_present += present;
60 reserved++; 79 total_reserved += reserved;
61 else if (PageSwapCache(mem_map+i)) 80 total_cached += cached;
62 cached++; 81 total_shared += shared;
63 else if (page_count(mem_map + i)) 82 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, "
64 shared += page_count(mem_map + i) - 1; 83 "shrd: %10d, swpd: %10d\n", pgdat->node_id,
84 present, reserved, shared, cached);
65 } 85 }
66 printk(KERN_INFO "%d pages of RAM\n", total); 86 printk(KERN_INFO "%ld pages of RAM\n", total_present);
67 printk(KERN_INFO "%d reserved pages\n", reserved); 87 printk(KERN_INFO "%d reserved pages\n", total_reserved);
68 printk(KERN_INFO "%d pages shared\n", shared); 88 printk(KERN_INFO "%d pages shared\n", total_shared);
69 printk(KERN_INFO "%d pages swap cached\n", cached); 89 printk(KERN_INFO "%d pages swap cached\n", total_cached);
70 printk(KERN_INFO "%ld pages in page table cache\n", 90 printk(KERN_INFO "Total of %ld pages in page table cache\n",
71 pgtable_quicklist_total_size()); 91 pgtable_quicklist_total_size());
92 printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
72} 93}
73 94
95
74/* physical address where the bootmem map is located */ 96/* physical address where the bootmem map is located */
75unsigned long bootmap_start; 97unsigned long bootmap_start;
76 98
@@ -177,7 +199,7 @@ find_memory (void)
177 199
178#ifdef CONFIG_CRASH_DUMP 200#ifdef CONFIG_CRASH_DUMP
179 /* If we are doing a crash dump, we still need to know the real mem 201 /* If we are doing a crash dump, we still need to know the real mem
180 * size before original memory map is * reset. */ 202 * size before original memory map is reset. */
181 saved_max_pfn = max_pfn; 203 saved_max_pfn = max_pfn;
182#endif 204#endif
183} 205}
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 96722cb1b49d..6eae596c509d 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -412,37 +412,6 @@ static void __init memory_less_nodes(void)
412 return; 412 return;
413} 413}
414 414
415#ifdef CONFIG_SPARSEMEM
416/**
417 * register_sparse_mem - notify SPARSEMEM that this memory range exists.
418 * @start: physical start of range
419 * @end: physical end of range
420 * @arg: unused
421 *
422 * Simply calls SPARSEMEM to register memory section(s).
423 */
424static int __init register_sparse_mem(unsigned long start, unsigned long end,
425 void *arg)
426{
427 int nid;
428
429 start = __pa(start) >> PAGE_SHIFT;
430 end = __pa(end) >> PAGE_SHIFT;
431 nid = early_pfn_to_nid(start);
432 memory_present(nid, start, end);
433
434 return 0;
435}
436
437static void __init arch_sparse_init(void)
438{
439 efi_memmap_walk(register_sparse_mem, NULL);
440 sparse_init();
441}
442#else
443#define arch_sparse_init() do {} while (0)
444#endif
445
446/** 415/**
447 * find_memory - walk the EFI memory map and setup the bootmem allocator 416 * find_memory - walk the EFI memory map and setup the bootmem allocator
448 * 417 *
@@ -473,6 +442,9 @@ void __init find_memory(void)
473 node_clear(node, memory_less_mask); 442 node_clear(node, memory_less_mask);
474 mem_data[node].min_pfn = ~0UL; 443 mem_data[node].min_pfn = ~0UL;
475 } 444 }
445
446 efi_memmap_walk(register_active_ranges, NULL);
447
476 /* 448 /*
477 * Initialize the boot memory maps in reverse order since that's 449 * Initialize the boot memory maps in reverse order since that's
478 * what the bootmem allocator expects 450 * what the bootmem allocator expects
@@ -506,6 +478,12 @@ void __init find_memory(void)
506 max_pfn = max_low_pfn; 478 max_pfn = max_low_pfn;
507 479
508 find_initrd(); 480 find_initrd();
481
482#ifdef CONFIG_CRASH_DUMP
483 /* If we are doing a crash dump, we still need to know the real mem
484 * size before original memory map is reset. */
485 saved_max_pfn = max_pfn;
486#endif
509} 487}
510 488
511#ifdef CONFIG_SMP 489#ifdef CONFIG_SMP
@@ -654,7 +632,6 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
654{ 632{
655 unsigned long end = start + len; 633 unsigned long end = start + len;
656 634
657 add_active_range(node, start >> PAGE_SHIFT, end >> PAGE_SHIFT);
658 mem_data[node].num_physpages += len >> PAGE_SHIFT; 635 mem_data[node].num_physpages += len >> PAGE_SHIFT;
659 if (start <= __pa(MAX_DMA_ADDRESS)) 636 if (start <= __pa(MAX_DMA_ADDRESS))
660 mem_data[node].num_dma_physpages += 637 mem_data[node].num_dma_physpages +=
@@ -686,10 +663,11 @@ void __init paging_init(void)
686 663
687 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; 664 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
688 665
689 arch_sparse_init();
690
691 efi_memmap_walk(filter_rsvd_memory, count_node_pages); 666 efi_memmap_walk(filter_rsvd_memory, count_node_pages);
692 667
668 sparse_memory_present_with_active_regions(MAX_NUMNODES);
669 sparse_init();
670
693#ifdef CONFIG_VIRTUAL_MEM_MAP 671#ifdef CONFIG_VIRTUAL_MEM_MAP
694 vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * 672 vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
695 sizeof(struct page)); 673 sizeof(struct page));
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 1373fae7657f..faaca21a3718 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -19,6 +19,7 @@
19#include <linux/swap.h> 19#include <linux/swap.h>
20#include <linux/proc_fs.h> 20#include <linux/proc_fs.h>
21#include <linux/bitops.h> 21#include <linux/bitops.h>
22#include <linux/kexec.h>
22 23
23#include <asm/a.out.h> 24#include <asm/a.out.h>
24#include <asm/dma.h> 25#include <asm/dma.h>
@@ -128,6 +129,25 @@ lazy_mmu_prot_update (pte_t pte)
128 set_bit(PG_arch_1, &page->flags); /* mark page as clean */ 129 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
129} 130}
130 131
132/*
133 * Since DMA is i-cache coherent, any (complete) pages that were written via
134 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
135 * flush them when they get mapped into an executable vm-area.
136 */
137void
138dma_mark_clean(void *addr, size_t size)
139{
140 unsigned long pg_addr, end;
141
142 pg_addr = PAGE_ALIGN((unsigned long) addr);
143 end = (unsigned long) addr + size;
144 while (pg_addr + PAGE_SIZE <= end) {
145 struct page *page = virt_to_page(pg_addr);
146 set_bit(PG_arch_1, &page->flags);
147 pg_addr += PAGE_SIZE;
148 }
149}
150
131inline void 151inline void
132ia64_set_rbs_bot (void) 152ia64_set_rbs_bot (void)
133{ 153{
@@ -595,13 +615,27 @@ find_largest_hole (u64 start, u64 end, void *arg)
595 return 0; 615 return 0;
596} 616}
597 617
618#endif /* CONFIG_VIRTUAL_MEM_MAP */
619
598int __init 620int __init
599register_active_ranges(u64 start, u64 end, void *arg) 621register_active_ranges(u64 start, u64 end, void *arg)
600{ 622{
601 add_active_range(0, __pa(start) >> PAGE_SHIFT, __pa(end) >> PAGE_SHIFT); 623 int nid = paddr_to_nid(__pa(start));
624
625 if (nid < 0)
626 nid = 0;
627#ifdef CONFIG_KEXEC
628 if (start > crashk_res.start && start < crashk_res.end)
629 start = crashk_res.end;
630 if (end > crashk_res.start && end < crashk_res.end)
631 end = crashk_res.start;
632#endif
633
634 if (start < end)
635 add_active_range(nid, __pa(start) >> PAGE_SHIFT,
636 __pa(end) >> PAGE_SHIFT);
602 return 0; 637 return 0;
603} 638}
604#endif /* CONFIG_VIRTUAL_MEM_MAP */
605 639
606static int __init 640static int __init
607count_reserved_pages (u64 start, u64 end, void *arg) 641count_reserved_pages (u64 start, u64 end, void *arg)
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c
index abca6bd7962f..fcf7f93c4b61 100644
--- a/arch/ia64/sn/kernel/huberror.c
+++ b/arch/ia64/sn/kernel/huberror.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1992 - 1997, 2000,2002-2005 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 1992 - 1997, 2000,2002-2007 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8 8
9#include <linux/types.h> 9#include <linux/types.h>
@@ -38,12 +38,20 @@ static irqreturn_t hub_eint_handler(int irq, void *arg)
38 (u64) nasid, 0, 0, 0, 0, 0, 0); 38 (u64) nasid, 0, 0, 0, 0, 0, 0);
39 39
40 if ((int)ret_stuff.v0) 40 if ((int)ret_stuff.v0)
41 panic("hubii_eint_handler(): Fatal TIO Error"); 41 panic("%s: Fatal %s Error", __FUNCTION__,
42 ((nasid & 1) ? "TIO" : "HUBII"));
42 43
43 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */ 44 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
44 (void)hubiio_crb_error_handler(hubdev_info); 45 (void)hubiio_crb_error_handler(hubdev_info);
45 } else 46 } else
46 bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid))); 47 if (nasid & 1) { /* TIO errors */
48 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT,
49 (u64) nasid, 0, 0, 0, 0, 0, 0);
50
51 if ((int)ret_stuff.v0)
52 panic("%s: Fatal TIO Error", __FUNCTION__);
53 } else
54 bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid)));
47 55
48 return IRQ_HANDLED; 56 return IRQ_HANDLED;
49} 57}
diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c
index cb96b4ea7df6..8c331ca6e5c9 100644
--- a/arch/ia64/sn/kernel/io_acpi_init.c
+++ b/arch/ia64/sn/kernel/io_acpi_init.c
@@ -13,6 +13,7 @@
13#include <asm/sn/sn_sal.h> 13#include <asm/sn/sn_sal.h>
14#include "xtalk/hubdev.h" 14#include "xtalk/hubdev.h"
15#include <linux/acpi.h> 15#include <linux/acpi.h>
16#include <acpi/acnamesp.h>
16 17
17 18
18/* 19/*
@@ -31,6 +32,12 @@ struct acpi_vendor_uuid sn_uuid = {
31 0xa2, 0x7c, 0x08, 0x00, 0x69, 0x13, 0xea, 0x51 }, 32 0xa2, 0x7c, 0x08, 0x00, 0x69, 0x13, 0xea, 0x51 },
32}; 33};
33 34
35struct sn_pcidev_match {
36 u8 bus;
37 unsigned int devfn;
38 acpi_handle handle;
39};
40
34/* 41/*
35 * Perform the early IO init in PROM. 42 * Perform the early IO init in PROM.
36 */ 43 */
@@ -119,9 +126,11 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
119 status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS, 126 status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
120 &sn_uuid, &buffer); 127 &sn_uuid, &buffer);
121 if (ACPI_FAILURE(status)) { 128 if (ACPI_FAILURE(status)) {
122 printk(KERN_ERR "get_acpi_pcibus_ptr: " 129 printk(KERN_ERR "%s: "
123 "get_acpi_bussoft_info() failed: %d\n", 130 "acpi_get_vendor_resource() failed (0x%x) for: ",
124 status); 131 __FUNCTION__, status);
132 acpi_ns_print_node_pathname(handle, NULL);
133 printk("\n");
125 return NULL; 134 return NULL;
126 } 135 }
127 resource = buffer.pointer; 136 resource = buffer.pointer;
@@ -130,8 +139,8 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
130 if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) != 139 if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
131 sizeof(struct pcibus_bussoft *)) { 140 sizeof(struct pcibus_bussoft *)) {
132 printk(KERN_ERR 141 printk(KERN_ERR
133 "get_acpi_bussoft_ptr: Invalid vendor data " 142 "%s: Invalid vendor data length %d\n",
134 "length %d\n", vendor->byte_length); 143 __FUNCTION__, vendor->byte_length);
135 kfree(buffer.pointer); 144 kfree(buffer.pointer);
136 return NULL; 145 return NULL;
137 } 146 }
@@ -143,34 +152,254 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
143} 152}
144 153
145/* 154/*
146 * sn_acpi_bus_fixup 155 * sn_extract_device_info - Extract the pcidev_info and the sn_irq_info
156 * pointers from the vendor resource using the
157 * provided acpi handle, and copy the structures
158 * into the argument buffers.
147 */ 159 */
148void 160static int
149sn_acpi_bus_fixup(struct pci_bus *bus) 161sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info,
162 struct sn_irq_info **sn_irq_info)
150{ 163{
151 struct pci_dev *pci_dev = NULL; 164 u64 addr;
152 struct pcibus_bussoft *prom_bussoft_ptr; 165 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
153 extern void sn_common_bus_fixup(struct pci_bus *, 166 struct sn_irq_info *irq_info, *irq_info_prom;
154 struct pcibus_bussoft *); 167 struct pcidev_info *pcidev_ptr, *pcidev_prom_ptr;
168 struct acpi_resource *resource;
169 int ret = 0;
170 acpi_status status;
171 struct acpi_resource_vendor_typed *vendor;
155 172
156 if (!bus->parent) { /* If root bus */ 173 /*
157 prom_bussoft_ptr = sn_get_bussoft_ptr(bus); 174 * The pointer to this device's pcidev_info structure in
158 if (prom_bussoft_ptr == NULL) { 175 * the PROM, is in the vendor resource.
176 */
177 status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
178 &sn_uuid, &buffer);
179 if (ACPI_FAILURE(status)) {
180 printk(KERN_ERR
181 "%s: acpi_get_vendor_resource() failed (0x%x) for: ",
182 __FUNCTION__, status);
183 acpi_ns_print_node_pathname(handle, NULL);
184 printk("\n");
185 return 1;
186 }
187
188 resource = buffer.pointer;
189 vendor = &resource->data.vendor_typed;
190 if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
191 sizeof(struct pci_devdev_info *)) {
192 printk(KERN_ERR
193 "%s: Invalid vendor data length: %d for: ",
194 __FUNCTION__, vendor->byte_length);
195 acpi_ns_print_node_pathname(handle, NULL);
196 printk("\n");
197 ret = 1;
198 goto exit;
199 }
200
201 pcidev_ptr = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
202 if (!pcidev_ptr)
203 panic("%s: Unable to alloc memory for pcidev_info", __FUNCTION__);
204
205 memcpy(&addr, vendor->byte_data, sizeof(struct pcidev_info *));
206 pcidev_prom_ptr = __va(addr);
207 memcpy(pcidev_ptr, pcidev_prom_ptr, sizeof(struct pcidev_info));
208
209 /* Get the IRQ info */
210 irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
211 if (!irq_info)
212 panic("%s: Unable to alloc memory for sn_irq_info", __FUNCTION__);
213
214 if (pcidev_ptr->pdi_sn_irq_info) {
215 irq_info_prom = __va(pcidev_ptr->pdi_sn_irq_info);
216 memcpy(irq_info, irq_info_prom, sizeof(struct sn_irq_info));
217 }
218
219 *pcidev_info = pcidev_ptr;
220 *sn_irq_info = irq_info;
221
222exit:
223 kfree(buffer.pointer);
224 return ret;
225}
226
227static unsigned int
228get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle)
229{
230 unsigned long adr;
231 acpi_handle child;
232 unsigned int devfn;
233 int function;
234 acpi_handle parent;
235 int slot;
236 acpi_status status;
237
238 /*
239 * Do an upward search to find the root bus device, and
240 * obtain the host devfn from the previous child device.
241 */
242 child = device_handle;
243 while (child) {
244 status = acpi_get_parent(child, &parent);
245 if (ACPI_FAILURE(status)) {
246 printk(KERN_ERR "%s: acpi_get_parent() failed "
247 "(0x%x) for: ", __FUNCTION__, status);
248 acpi_ns_print_node_pathname(child, NULL);
249 printk("\n");
250 panic("%s: Unable to find host devfn\n", __FUNCTION__);
251 }
252 if (parent == rootbus_handle)
253 break;
254 child = parent;
255 }
256 if (!child) {
257 printk(KERN_ERR "%s: Unable to find root bus for: ",
258 __FUNCTION__);
259 acpi_ns_print_node_pathname(device_handle, NULL);
260 printk("\n");
261 BUG();
262 }
263
264 status = acpi_evaluate_integer(child, METHOD_NAME__ADR, NULL, &adr);
265 if (ACPI_FAILURE(status)) {
266 printk(KERN_ERR "%s: Unable to get _ADR (0x%x) for: ",
267 __FUNCTION__, status);
268 acpi_ns_print_node_pathname(child, NULL);
269 printk("\n");
270 panic("%s: Unable to find host devfn\n", __FUNCTION__);
271 }
272
273 slot = (adr >> 16) & 0xffff;
274 function = adr & 0xffff;
275 devfn = PCI_DEVFN(slot, function);
276 return devfn;
277}
278
279/*
280 * find_matching_device - Callback routine to find the ACPI device
281 * that matches up with our pci_dev device.
282 * Matching is done on bus number and devfn.
283 * To find the bus number for a particular
284 * ACPI device, we must look at the _BBN method
285 * of its parent.
286 */
287static acpi_status
288find_matching_device(acpi_handle handle, u32 lvl, void *context, void **rv)
289{
290 unsigned long bbn = -1;
291 unsigned long adr;
292 acpi_handle parent = NULL;
293 acpi_status status;
294 unsigned int devfn;
295 int function;
296 int slot;
297 struct sn_pcidev_match *info = context;
298
299 status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
300 &adr);
301 if (ACPI_SUCCESS(status)) {
302 status = acpi_get_parent(handle, &parent);
303 if (ACPI_FAILURE(status)) {
159 printk(KERN_ERR 304 printk(KERN_ERR
160 "sn_pci_fixup_bus: 0x%04x:0x%02x Unable to " 305 "%s: acpi_get_parent() failed (0x%x) for: ",
161 "obtain prom_bussoft_ptr\n", 306 __FUNCTION__, status);
162 pci_domain_nr(bus), bus->number); 307 acpi_ns_print_node_pathname(handle, NULL);
163 return; 308 printk("\n");
309 return AE_OK;
310 }
311 status = acpi_evaluate_integer(parent, METHOD_NAME__BBN,
312 NULL, &bbn);
313 if (ACPI_FAILURE(status)) {
314 printk(KERN_ERR
315 "%s: Failed to find _BBN in parent of: ",
316 __FUNCTION__);
317 acpi_ns_print_node_pathname(handle, NULL);
318 printk("\n");
319 return AE_OK;
320 }
321
322 slot = (adr >> 16) & 0xffff;
323 function = adr & 0xffff;
324 devfn = PCI_DEVFN(slot, function);
325 if ((info->devfn == devfn) && (info->bus == bbn)) {
326 /* We have a match! */
327 info->handle = handle;
328 return 1;
164 } 329 }
165 sn_common_bus_fixup(bus, prom_bussoft_ptr);
166 } 330 }
167 list_for_each_entry(pci_dev, &bus->devices, bus_list) { 331 return AE_OK;
168 sn_pci_fixup_slot(pci_dev); 332}
333
334/*
335 * sn_acpi_get_pcidev_info - Search ACPI namespace for the acpi
336 * device matching the specified pci_dev,
337 * and return the pcidev info and irq info.
338 */
339int
340sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info,
341 struct sn_irq_info **sn_irq_info)
342{
343 unsigned int host_devfn;
344 struct sn_pcidev_match pcidev_match;
345 acpi_handle rootbus_handle;
346 unsigned long segment;
347 acpi_status status;
348
349 rootbus_handle = PCI_CONTROLLER(dev)->acpi_handle;
350 status = acpi_evaluate_integer(rootbus_handle, METHOD_NAME__SEG, NULL,
351 &segment);
352 if (ACPI_SUCCESS(status)) {
353 if (segment != pci_domain_nr(dev)) {
354 printk(KERN_ERR
355 "%s: Segment number mismatch, 0x%lx vs 0x%x for: ",
356 __FUNCTION__, segment, pci_domain_nr(dev));
357 acpi_ns_print_node_pathname(rootbus_handle, NULL);
358 printk("\n");
359 return 1;
360 }
361 } else {
362 printk(KERN_ERR "%s: Unable to get __SEG from: ",
363 __FUNCTION__);
364 acpi_ns_print_node_pathname(rootbus_handle, NULL);
365 printk("\n");
366 return 1;
367 }
368
369 /*
370 * We want to search all devices in this segment/domain
371 * of the ACPI namespace for the matching ACPI device,
372 * which holds the pcidev_info pointer in its vendor resource.
373 */
374 pcidev_match.bus = dev->bus->number;
375 pcidev_match.devfn = dev->devfn;
376 pcidev_match.handle = NULL;
377
378 acpi_walk_namespace(ACPI_TYPE_DEVICE, rootbus_handle, ACPI_UINT32_MAX,
379 find_matching_device, &pcidev_match, NULL);
380
381 if (!pcidev_match.handle) {
382 printk(KERN_ERR
383 "%s: Could not find matching ACPI device for %s.\n",
384 __FUNCTION__, pci_name(dev));
385 return 1;
169 } 386 }
387
388 if (sn_extract_device_info(pcidev_match.handle, pcidev_info, sn_irq_info))
389 return 1;
390
391 /* Build up the pcidev_info.pdi_slot_host_handle */
392 host_devfn = get_host_devfn(pcidev_match.handle, rootbus_handle);
393 (*pcidev_info)->pdi_slot_host_handle =
394 ((unsigned long) pci_domain_nr(dev) << 40) |
395 /* bus == 0 */
396 host_devfn;
397 return 0;
170} 398}
171 399
172/* 400/*
173 * sn_acpi_slot_fixup - Perform any SN specific slot fixup. 401 * sn_acpi_slot_fixup - Obtain the pcidev_info and sn_irq_info.
402 * Perform any SN specific slot fixup.
174 * At present there does not appear to be 403 * At present there does not appear to be
175 * any generic way to handle a ROM image 404 * any generic way to handle a ROM image
176 * that has been shadowed by the PROM, so 405 * that has been shadowed by the PROM, so
@@ -179,11 +408,18 @@ sn_acpi_bus_fixup(struct pci_bus *bus)
179 */ 408 */
180 409
181void 410void
182sn_acpi_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info) 411sn_acpi_slot_fixup(struct pci_dev *dev)
183{ 412{
184 void __iomem *addr; 413 void __iomem *addr;
414 struct pcidev_info *pcidev_info = NULL;
415 struct sn_irq_info *sn_irq_info = NULL;
185 size_t size; 416 size_t size;
186 417
418 if (sn_acpi_get_pcidev_info(dev, &pcidev_info, &sn_irq_info)) {
419 panic("%s: Failure obtaining pcidev_info for %s\n",
420 __FUNCTION__, pci_name(dev));
421 }
422
187 if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) { 423 if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) {
188 /* 424 /*
189 * A valid ROM image exists and has been shadowed by the 425 * A valid ROM image exists and has been shadowed by the
@@ -200,8 +436,11 @@ sn_acpi_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
200 (unsigned long) addr + size; 436 (unsigned long) addr + size;
201 dev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_BIOS_COPY; 437 dev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_BIOS_COPY;
202 } 438 }
439 sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info);
203} 440}
204 441
442EXPORT_SYMBOL(sn_acpi_slot_fixup);
443
205static struct acpi_driver acpi_sn_hubdev_driver = { 444static struct acpi_driver acpi_sn_hubdev_driver = {
206 .name = "SGI HUBDEV Driver", 445 .name = "SGI HUBDEV Driver",
207 .ids = "SGIHUB,SGITIO", 446 .ids = "SGIHUB,SGITIO",
@@ -212,6 +451,33 @@ static struct acpi_driver acpi_sn_hubdev_driver = {
212 451
213 452
214/* 453/*
454 * sn_acpi_bus_fixup - Perform SN specific setup of software structs
455 * (pcibus_bussoft, pcidev_info) and hardware
456 * registers, for the specified bus and devices under it.
457 */
458void
459sn_acpi_bus_fixup(struct pci_bus *bus)
460{
461 struct pci_dev *pci_dev = NULL;
462 struct pcibus_bussoft *prom_bussoft_ptr;
463
464 if (!bus->parent) { /* If root bus */
465 prom_bussoft_ptr = sn_get_bussoft_ptr(bus);
466 if (prom_bussoft_ptr == NULL) {
467 printk(KERN_ERR
468 "%s: 0x%04x:0x%02x Unable to "
469 "obtain prom_bussoft_ptr\n",
470 __FUNCTION__, pci_domain_nr(bus), bus->number);
471 return;
472 }
473 sn_common_bus_fixup(bus, prom_bussoft_ptr);
474 }
475 list_for_each_entry(pci_dev, &bus->devices, bus_list) {
476 sn_acpi_slot_fixup(pci_dev);
477 }
478}
479
480/*
215 * sn_io_acpi_init - PROM has ACPI support for IO, defining at a minimum the 481 * sn_io_acpi_init - PROM has ACPI support for IO, defining at a minimum the
216 * nodes and root buses in the DSDT. As a result, bus scanning 482 * nodes and root buses in the DSDT. As a result, bus scanning
217 * will be initiated by the Linux ACPI code. 483 * will be initiated by the Linux ACPI code.
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
index d4dd8f4b6b8d..d48bcd83253c 100644
--- a/arch/ia64/sn/kernel/io_common.c
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -26,14 +26,10 @@
26#include <linux/acpi.h> 26#include <linux/acpi.h>
27#include <asm/sn/sn2/sn_hwperf.h> 27#include <asm/sn/sn2/sn_hwperf.h>
28#include <asm/sn/acpi.h> 28#include <asm/sn/acpi.h>
29#include "acpi/acglobal.h"
29 30
30extern void sn_init_cpei_timer(void); 31extern void sn_init_cpei_timer(void);
31extern void register_sn_procfs(void); 32extern void register_sn_procfs(void);
32extern void sn_acpi_bus_fixup(struct pci_bus *);
33extern void sn_bus_fixup(struct pci_bus *);
34extern void sn_acpi_slot_fixup(struct pci_dev *, struct pcidev_info *);
35extern void sn_more_slot_fixup(struct pci_dev *, struct pcidev_info *);
36extern void sn_legacy_pci_window_fixup(struct pci_controller *, u64, u64);
37extern void sn_io_acpi_init(void); 33extern void sn_io_acpi_init(void);
38extern void sn_io_init(void); 34extern void sn_io_init(void);
39 35
@@ -48,6 +44,9 @@ struct sysdata_el {
48 44
49int sn_ioif_inited; /* SN I/O infrastructure initialized? */ 45int sn_ioif_inited; /* SN I/O infrastructure initialized? */
50 46
47int sn_acpi_rev; /* SN ACPI revision */
48EXPORT_SYMBOL_GPL(sn_acpi_rev);
49
51struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */ 50struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
52 51
53/* 52/*
@@ -99,25 +98,6 @@ sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
99} 98}
100 99
101/* 100/*
102 * Retrieve the pci device information given the bus and device|function number.
103 */
104static inline u64
105sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
106 u64 sn_irq_info)
107{
108 struct ia64_sal_retval ret_stuff;
109 ret_stuff.status = 0;
110 ret_stuff.v0 = 0;
111
112 SAL_CALL_NOLOCK(ret_stuff,
113 (u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
114 (u64) segment, (u64) bus_number, (u64) devfn,
115 (u64) pci_dev,
116 sn_irq_info, 0, 0);
117 return ret_stuff.v0;
118}
119
120/*
121 * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified 101 * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified
122 * device. 102 * device.
123 */ 103 */
@@ -249,50 +229,25 @@ void sn_pci_unfixup_slot(struct pci_dev *dev)
249} 229}
250 230
251/* 231/*
252 * sn_pci_fixup_slot() - This routine sets up a slot's resources consistent 232 * sn_pci_fixup_slot()
253 * with the Linux PCI abstraction layer. Resources
254 * acquired from our PCI provider include PIO maps
255 * to BAR space and interrupt objects.
256 */ 233 */
257void sn_pci_fixup_slot(struct pci_dev *dev) 234void sn_pci_fixup_slot(struct pci_dev *dev, struct pcidev_info *pcidev_info,
235 struct sn_irq_info *sn_irq_info)
258{ 236{
259 int segment = pci_domain_nr(dev->bus); 237 int segment = pci_domain_nr(dev->bus);
260 int status = 0;
261 struct pcibus_bussoft *bs; 238 struct pcibus_bussoft *bs;
262 struct pci_bus *host_pci_bus; 239 struct pci_bus *host_pci_bus;
263 struct pci_dev *host_pci_dev; 240 struct pci_dev *host_pci_dev;
264 struct pcidev_info *pcidev_info; 241 unsigned int bus_no, devfn;
265 struct sn_irq_info *sn_irq_info;
266 unsigned int bus_no, devfn;
267 242
268 pci_dev_get(dev); /* for the sysdata pointer */ 243 pci_dev_get(dev); /* for the sysdata pointer */
269 pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
270 if (!pcidev_info)
271 BUG(); /* Cannot afford to run out of memory */
272
273 sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
274 if (!sn_irq_info)
275 BUG(); /* Cannot afford to run out of memory */
276
277 /* Call to retrieve pci device information needed by kernel. */
278 status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number,
279 dev->devfn,
280 (u64) __pa(pcidev_info),
281 (u64) __pa(sn_irq_info));
282 if (status)
283 BUG(); /* Cannot get platform pci device information */
284 244
285 /* Add pcidev_info to list in pci_controller.platform_data */ 245 /* Add pcidev_info to list in pci_controller.platform_data */
286 list_add_tail(&pcidev_info->pdi_list, 246 list_add_tail(&pcidev_info->pdi_list,
287 &(SN_PLATFORM_DATA(dev->bus)->pcidev_info)); 247 &(SN_PLATFORM_DATA(dev->bus)->pcidev_info));
288
289 if (SN_ACPI_BASE_SUPPORT())
290 sn_acpi_slot_fixup(dev, pcidev_info);
291 else
292 sn_more_slot_fixup(dev, pcidev_info);
293 /* 248 /*
294 * Using the PROMs values for the PCI host bus, get the Linux 249 * Using the PROMs values for the PCI host bus, get the Linux
295 * PCI host_pci_dev struct and set up host bus linkages 250 * PCI host_pci_dev struct and set up host bus linkages
296 */ 251 */
297 252
298 bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff; 253 bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff;
@@ -489,11 +444,6 @@ void sn_generate_path(struct pci_bus *pci_bus, char *address)
489 sprintf(address, "%s^%d", address, geo_slot(geoid)); 444 sprintf(address, "%s^%d", address, geo_slot(geoid));
490} 445}
491 446
492/*
493 * sn_pci_fixup_bus() - Perform SN specific setup of software structs
494 * (pcibus_bussoft, pcidev_info) and hardware
495 * registers, for the specified bus and devices under it.
496 */
497void __devinit 447void __devinit
498sn_pci_fixup_bus(struct pci_bus *bus) 448sn_pci_fixup_bus(struct pci_bus *bus)
499{ 449{
@@ -519,6 +469,15 @@ sn_io_early_init(void)
519 if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM()) 469 if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
520 return 0; 470 return 0;
521 471
472 /* we set the acpi revision to that of the DSDT table OEM rev. */
473 {
474 struct acpi_table_header *header = NULL;
475
476 acpi_get_table_by_index(ACPI_TABLE_INDEX_DSDT, &header);
477 BUG_ON(header == NULL);
478 sn_acpi_rev = header->oem_revision;
479 }
480
522 /* 481 /*
523 * prime sn_pci_provider[]. Individial provider init routines will 482 * prime sn_pci_provider[]. Individial provider init routines will
524 * override their respective default entries. 483 * override their respective default entries.
@@ -544,8 +503,12 @@ sn_io_early_init(void)
544 register_sn_procfs(); 503 register_sn_procfs();
545#endif 504#endif
546 505
547 printk(KERN_INFO "ACPI DSDT OEM Rev 0x%x\n", 506 {
548 acpi_gbl_DSDT->oem_revision); 507 struct acpi_table_header *header;
508 (void)acpi_get_table_by_index(ACPI_TABLE_INDEX_DSDT, &header);
509 printk(KERN_INFO "ACPI DSDT OEM Rev 0x%x\n",
510 header->oem_revision);
511 }
549 if (SN_ACPI_BASE_SUPPORT()) 512 if (SN_ACPI_BASE_SUPPORT())
550 sn_io_acpi_init(); 513 sn_io_acpi_init();
551 else 514 else
@@ -605,7 +568,6 @@ sn_io_late_init(void)
605 568
606fs_initcall(sn_io_late_init); 569fs_initcall(sn_io_late_init);
607 570
608EXPORT_SYMBOL(sn_pci_fixup_slot);
609EXPORT_SYMBOL(sn_pci_unfixup_slot); 571EXPORT_SYMBOL(sn_pci_unfixup_slot);
610EXPORT_SYMBOL(sn_bus_store_sysdata); 572EXPORT_SYMBOL(sn_bus_store_sysdata);
611EXPORT_SYMBOL(sn_bus_free_sysdata); 573EXPORT_SYMBOL(sn_bus_free_sysdata);
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 9ad843e0383b..600be3ebae05 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -56,6 +56,25 @@ static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
56 return ret_stuff.v0; 56 return ret_stuff.v0;
57} 57}
58 58
59/*
60 * Retrieve the pci device information given the bus and device|function number.
61 */
62static inline u64
63sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
64 u64 sn_irq_info)
65{
66 struct ia64_sal_retval ret_stuff;
67 ret_stuff.status = 0;
68 ret_stuff.v0 = 0;
69
70 SAL_CALL_NOLOCK(ret_stuff,
71 (u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
72 (u64) segment, (u64) bus_number, (u64) devfn,
73 (u64) pci_dev,
74 sn_irq_info, 0, 0);
75 return ret_stuff.v0;
76}
77
59 78
60/* 79/*
61 * sn_fixup_ionodes() - This routine initializes the HUB data structure for 80 * sn_fixup_ionodes() - This routine initializes the HUB data structure for
@@ -172,18 +191,40 @@ sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
172} 191}
173 192
174/* 193/*
175 * sn_more_slot_fixup() - We are not running with an ACPI capable PROM, 194 * sn_io_slot_fixup() - We are not running with an ACPI capable PROM,
176 * and need to convert the pci_dev->resource 195 * and need to convert the pci_dev->resource
177 * 'start' and 'end' addresses to mapped addresses, 196 * 'start' and 'end' addresses to mapped addresses,
178 * and setup the pci_controller->window array entries. 197 * and setup the pci_controller->window array entries.
179 */ 198 */
180void 199void
181sn_more_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info) 200sn_io_slot_fixup(struct pci_dev *dev)
182{ 201{
183 unsigned int count = 0; 202 unsigned int count = 0;
184 int idx; 203 int idx;
185 s64 pci_addrs[PCI_ROM_RESOURCE + 1]; 204 s64 pci_addrs[PCI_ROM_RESOURCE + 1];
186 unsigned long addr, end, size, start; 205 unsigned long addr, end, size, start;
206 struct pcidev_info *pcidev_info;
207 struct sn_irq_info *sn_irq_info;
208 int status;
209
210 pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
211 if (!pcidev_info)
212 panic("%s: Unable to alloc memory for pcidev_info", __FUNCTION__);
213
214 sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
215 if (!sn_irq_info)
216 panic("%s: Unable to alloc memory for sn_irq_info", __FUNCTION__);
217
218 /* Call to retrieve pci device information needed by kernel. */
219 status = sal_get_pcidev_info((u64) pci_domain_nr(dev),
220 (u64) dev->bus->number,
221 dev->devfn,
222 (u64) __pa(pcidev_info),
223 (u64) __pa(sn_irq_info));
224
225 if (status)
226 BUG(); /* Cannot get platform pci device information */
227
187 228
188 /* Copy over PIO Mapped Addresses */ 229 /* Copy over PIO Mapped Addresses */
189 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { 230 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
@@ -219,8 +260,12 @@ sn_more_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
219 */ 260 */
220 if (count > 0) 261 if (count > 0)
221 sn_pci_window_fixup(dev, count, pci_addrs); 262 sn_pci_window_fixup(dev, count, pci_addrs);
263
264 sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info);
222} 265}
223 266
267EXPORT_SYMBOL(sn_io_slot_fixup);
268
224/* 269/*
225 * sn_pci_controller_fixup() - This routine sets up a bus's resources 270 * sn_pci_controller_fixup() - This routine sets up a bus's resources
226 * consistent with the Linux PCI abstraction layer. 271 * consistent with the Linux PCI abstraction layer.
@@ -272,9 +317,6 @@ sn_bus_fixup(struct pci_bus *bus)
272{ 317{
273 struct pci_dev *pci_dev = NULL; 318 struct pci_dev *pci_dev = NULL;
274 struct pcibus_bussoft *prom_bussoft_ptr; 319 struct pcibus_bussoft *prom_bussoft_ptr;
275 extern void sn_common_bus_fixup(struct pci_bus *,
276 struct pcibus_bussoft *);
277
278 320
279 if (!bus->parent) { /* If root bus */ 321 if (!bus->parent) { /* If root bus */
280 prom_bussoft_ptr = PCI_CONTROLLER(bus)->platform_data; 322 prom_bussoft_ptr = PCI_CONTROLLER(bus)->platform_data;
@@ -291,7 +333,7 @@ sn_bus_fixup(struct pci_bus *bus)
291 prom_bussoft_ptr->bs_legacy_mem); 333 prom_bussoft_ptr->bs_legacy_mem);
292 } 334 }
293 list_for_each_entry(pci_dev, &bus->devices, bus_list) { 335 list_for_each_entry(pci_dev, &bus->devices, bus_list) {
294 sn_pci_fixup_slot(pci_dev); 336 sn_io_slot_fixup(pci_dev);
295 } 337 }
296 338
297} 339}
diff --git a/arch/ia64/sn/kernel/iomv.c b/arch/ia64/sn/kernel/iomv.c
index 4aa4f301d56d..ab7e2fd40798 100644
--- a/arch/ia64/sn/kernel/iomv.c
+++ b/arch/ia64/sn/kernel/iomv.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
@@ -26,9 +26,10 @@
26 * @port: port to convert 26 * @port: port to convert
27 * 27 *
28 * Legacy in/out instructions are converted to ld/st instructions 28 * Legacy in/out instructions are converted to ld/st instructions
29 * on IA64. This routine will convert a port number into a valid 29 * on IA64. This routine will convert a port number into a valid
30 * SN i/o address. Used by sn_in*() and sn_out*(). 30 * SN i/o address. Used by sn_in*() and sn_out*().
31 */ 31 */
32
32void *sn_io_addr(unsigned long port) 33void *sn_io_addr(unsigned long port)
33{ 34{
34 if (!IS_RUNNING_ON_SIMULATOR()) { 35 if (!IS_RUNNING_ON_SIMULATOR()) {
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index 6846dc9b432d..04a8256017eb 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -20,7 +20,8 @@
20#include "xtalk/hubdev.h" 20#include "xtalk/hubdev.h"
21 21
22int 22int
23sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp) 23sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp,
24 char **ssdt)
24{ 25{
25 struct ia64_sal_retval ret_stuff; 26 struct ia64_sal_retval ret_stuff;
26 u64 busnum; 27 u64 busnum;
@@ -32,7 +33,8 @@ sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp)
32 segment = soft->pbi_buscommon.bs_persist_segment; 33 segment = soft->pbi_buscommon.bs_persist_segment;
33 busnum = soft->pbi_buscommon.bs_persist_busnum; 34 busnum = soft->pbi_buscommon.bs_persist_busnum;
34 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment, 35 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment,
35 busnum, (u64) device, (u64) resp, 0, 0, 0); 36 busnum, (u64) device, (u64) resp, (u64)ia64_tpa(ssdt),
37 0, 0);
36 38
37 return (int)ret_stuff.v0; 39 return (int)ret_stuff.v0;
38} 40}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index bbd386f572d9..44a0224c32dd 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -575,6 +575,7 @@ config SGI_IP27
575 select DMA_IP27 575 select DMA_IP27
576 select EARLY_PRINTK 576 select EARLY_PRINTK
577 select HW_HAS_PCI 577 select HW_HAS_PCI
578 select NR_CPUS_DEFAULT_64
578 select PCI_DOMAINS 579 select PCI_DOMAINS
579 select SYS_HAS_CPU_R10000 580 select SYS_HAS_CPU_R10000
580 select SYS_SUPPORTS_64BIT_KERNEL 581 select SYS_SUPPORTS_64BIT_KERNEL
@@ -612,6 +613,7 @@ config SIBYTE_BIGSUR
612 bool "Sibyte BCM91480B-BigSur" 613 bool "Sibyte BCM91480B-BigSur"
613 select BOOT_ELF32 614 select BOOT_ELF32
614 select DMA_COHERENT 615 select DMA_COHERENT
616 select NR_CPUS_DEFAULT_4
615 select PCI_DOMAINS 617 select PCI_DOMAINS
616 select SIBYTE_BCM1x80 618 select SIBYTE_BCM1x80
617 select SWAP_IO_SPACE 619 select SWAP_IO_SPACE
@@ -623,6 +625,7 @@ config SIBYTE_SWARM
623 bool "Sibyte BCM91250A-SWARM" 625 bool "Sibyte BCM91250A-SWARM"
624 select BOOT_ELF32 626 select BOOT_ELF32
625 select DMA_COHERENT 627 select DMA_COHERENT
628 select NR_CPUS_DEFAULT_2
626 select SIBYTE_SB1250 629 select SIBYTE_SB1250
627 select SWAP_IO_SPACE 630 select SWAP_IO_SPACE
628 select SYS_HAS_CPU_SB1 631 select SYS_HAS_CPU_SB1
@@ -635,6 +638,7 @@ config SIBYTE_SENTOSA
635 depends on EXPERIMENTAL 638 depends on EXPERIMENTAL
636 select BOOT_ELF32 639 select BOOT_ELF32
637 select DMA_COHERENT 640 select DMA_COHERENT
641 select NR_CPUS_DEFAULT_2
638 select SIBYTE_SB1250 642 select SIBYTE_SB1250
639 select SWAP_IO_SPACE 643 select SWAP_IO_SPACE
640 select SYS_HAS_CPU_SB1 644 select SYS_HAS_CPU_SB1
@@ -668,6 +672,7 @@ config SIBYTE_PTSWARM
668 depends on EXPERIMENTAL 672 depends on EXPERIMENTAL
669 select BOOT_ELF32 673 select BOOT_ELF32
670 select DMA_COHERENT 674 select DMA_COHERENT
675 select NR_CPUS_DEFAULT_2
671 select SIBYTE_SB1250 676 select SIBYTE_SB1250
672 select SWAP_IO_SPACE 677 select SWAP_IO_SPACE
673 select SYS_HAS_CPU_SB1 678 select SYS_HAS_CPU_SB1
@@ -680,6 +685,7 @@ config SIBYTE_LITTLESUR
680 depends on EXPERIMENTAL 685 depends on EXPERIMENTAL
681 select BOOT_ELF32 686 select BOOT_ELF32
682 select DMA_COHERENT 687 select DMA_COHERENT
688 select NR_CPUS_DEFAULT_2
683 select SIBYTE_SB1250 689 select SIBYTE_SB1250
684 select SWAP_IO_SPACE 690 select SWAP_IO_SPACE
685 select SYS_HAS_CPU_SB1 691 select SYS_HAS_CPU_SB1
@@ -790,23 +796,6 @@ config TOSHIBA_RBTX4938
790 796
791endchoice 797endchoice
792 798
793config KEXEC
794 bool "Kexec system call (EXPERIMENTAL)"
795 depends on EXPERIMENTAL
796 help
797 kexec is a system call that implements the ability to shutdown your
798 current kernel, and to start another kernel. It is like a reboot
799 but it is indepedent of the system firmware. And like a reboot
800 you can start any kernel with it, not just Linux.
801
802 The name comes from the similiarity to the exec system call.
803
804 It is an ongoing process to be certain the hardware in a machine
805 is properly shutdown, so do not be surprised if this code does not
806 initially work for you. It may help to enable device hotplugging
807 support. As of this writing the exact hardware interface is
808 strongly in flux, so no good recommendation can be made.
809
810source "arch/mips/ddb5xxx/Kconfig" 799source "arch/mips/ddb5xxx/Kconfig"
811source "arch/mips/gt64120/ev64120/Kconfig" 800source "arch/mips/gt64120/ev64120/Kconfig"
812source "arch/mips/jazz/Kconfig" 801source "arch/mips/jazz/Kconfig"
@@ -1541,6 +1530,8 @@ config MIPS_MT_SMTC
1541 select CPU_MIPSR2_IRQ_VI 1530 select CPU_MIPSR2_IRQ_VI
1542 select CPU_MIPSR2_SRS 1531 select CPU_MIPSR2_SRS
1543 select MIPS_MT 1532 select MIPS_MT
1533 select NR_CPUS_DEFAULT_2
1534 select NR_CPUS_DEFAULT_8
1544 select SMP 1535 select SMP
1545 select SYS_SUPPORTS_SMP 1536 select SYS_SUPPORTS_SMP
1546 help 1537 help
@@ -1756,13 +1747,34 @@ config SMP
1756config SYS_SUPPORTS_SMP 1747config SYS_SUPPORTS_SMP
1757 bool 1748 bool
1758 1749
1750config NR_CPUS_DEFAULT_2
1751 bool
1752
1753config NR_CPUS_DEFAULT_4
1754 bool
1755
1756config NR_CPUS_DEFAULT_8
1757 bool
1758
1759config NR_CPUS_DEFAULT_16
1760 bool
1761
1762config NR_CPUS_DEFAULT_32
1763 bool
1764
1765config NR_CPUS_DEFAULT_64
1766 bool
1767
1759config NR_CPUS 1768config NR_CPUS
1760 int "Maximum number of CPUs (2-64)" 1769 int "Maximum number of CPUs (2-64)"
1761 range 2 64 1770 range 2 64
1762 depends on SMP 1771 depends on SMP
1763 default "64" if SGI_IP27 1772 default "2" if NR_CPUS_DEFAULT_2
1764 default "2" 1773 default "4" if NR_CPUS_DEFAULT_4
1765 default "8" if MIPS_MT_SMTC 1774 default "8" if NR_CPUS_DEFAULT_8
1775 default "16" if NR_CPUS_DEFAULT_16
1776 default "32" if NR_CPUS_DEFAULT_32
1777 default "64" if NR_CPUS_DEFAULT_64
1766 help 1778 help
1767 This allows you to specify the maximum number of CPUs which this 1779 This allows you to specify the maximum number of CPUs which this
1768 kernel will support. The maximum supported value is 32 for 32-bit 1780 kernel will support. The maximum supported value is 32 for 32-bit
@@ -1859,6 +1871,40 @@ config MIPS_INSANE_LARGE
1859 This will result in additional memory usage, so it is not 1871 This will result in additional memory usage, so it is not
1860 recommended for normal users. 1872 recommended for normal users.
1861 1873
1874config KEXEC
1875 bool "Kexec system call (EXPERIMENTAL)"
1876 depends on EXPERIMENTAL
1877 help
1878 kexec is a system call that implements the ability to shutdown your
1879 current kernel, and to start another kernel. It is like a reboot
1880 but it is indepedent of the system firmware. And like a reboot
1881 you can start any kernel with it, not just Linux.
1882
1883 The name comes from the similiarity to the exec system call.
1884
1885 It is an ongoing process to be certain the hardware in a machine
1886 is properly shutdown, so do not be surprised if this code does not
1887 initially work for you. It may help to enable device hotplugging
1888 support. As of this writing the exact hardware interface is
1889 strongly in flux, so no good recommendation can be made.
1890
1891config SECCOMP
1892 bool "Enable seccomp to safely compute untrusted bytecode"
1893 depends on PROC_FS && BROKEN
1894 default y
1895 help
1896 This kernel feature is useful for number crunching applications
1897 that may need to compute untrusted bytecode during their
1898 execution. By using pipes or other transports made available to
1899 the process as file descriptors supporting the read/write
1900 syscalls, it's possible to isolate those applications in
1901 their own address space using seccomp. Once seccomp is
1902 enabled via /proc/<pid>/seccomp, it cannot be disabled
1903 and the task is only allowed to execute a few safe syscalls
1904 defined by each seccomp mode.
1905
1906 If unsure, say Y. Only embedded should say N here.
1907
1862endmenu 1908endmenu
1863 1909
1864config RWSEM_GENERIC_SPINLOCK 1910config RWSEM_GENERIC_SPINLOCK
@@ -2025,23 +2071,6 @@ config BINFMT_ELF32
2025 bool 2071 bool
2026 default y if MIPS32_O32 || MIPS32_N32 2072 default y if MIPS32_O32 || MIPS32_N32
2027 2073
2028config SECCOMP
2029 bool "Enable seccomp to safely compute untrusted bytecode"
2030 depends on PROC_FS && BROKEN
2031 default y
2032 help
2033 This kernel feature is useful for number crunching applications
2034 that may need to compute untrusted bytecode during their
2035 execution. By using pipes or other transports made available to
2036 the process as file descriptors supporting the read/write
2037 syscalls, it's possible to isolate those applications in
2038 their own address space using seccomp. Once seccomp is
2039 enabled via /proc/<pid>/seccomp, it cannot be disabled
2040 and the task is only allowed to execute a few safe syscalls
2041 defined by each seccomp mode.
2042
2043 If unsure, say Y. Only embedded should say N here.
2044
2045config PM 2074config PM
2046 bool "Power Management support (EXPERIMENTAL)" 2075 bool "Power Management support (EXPERIMENTAL)"
2047 depends on EXPERIMENTAL && SOC_AU1X00 2076 depends on EXPERIMENTAL && SOC_AU1X00
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 5d6afb52d904..9351f1c04a9d 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -22,10 +22,10 @@ config CMDLINE
22 string "Default kernel command string" 22 string "Default kernel command string"
23 default "" 23 default ""
24 help 24 help
25 On some platforms, there is currently no way for the boot loader to 25 On some platforms, there is currently no way for the boot loader to
26 pass arguments to the kernel. For these platforms, you can supply 26 pass arguments to the kernel. For these platforms, you can supply
27 some command-line options at build time by entering them here. In 27 some command-line options at build time by entering them here. In
28 other cases you can specify kernel args so that you don't have 28 other cases you can specify kernel args so that you don't have
29 to set them up in board prom initialization routines. 29 to set them up in board prom initialization routines.
30 30
31config DEBUG_STACK_USAGE 31config DEBUG_STACK_USAGE
diff --git a/arch/mips/arc/identify.c b/arch/mips/arc/identify.c
index 3ba7c47f9f23..4b907369b0f9 100644
--- a/arch/mips/arc/identify.c
+++ b/arch/mips/arc/identify.c
@@ -77,7 +77,7 @@ static struct smatch * __init string_to_mach(const char *s)
77{ 77{
78 int i; 78 int i;
79 79
80 for (i = 0; i < (sizeof(mach_table) / sizeof (mach_table[0])); i++) { 80 for (i = 0; i < ARRAY_SIZE(mach_table); i++) {
81 if (!strcmp(s, mach_table[i].arcname)) 81 if (!strcmp(s, mach_table[i].arcname))
82 return &mach_table[i]; 82 return &mach_table[i];
83 } 83 }
diff --git a/arch/mips/arc/memory.c b/arch/mips/arc/memory.c
index 8a9ef58cc399..456cb81a32d9 100644
--- a/arch/mips/arc/memory.c
+++ b/arch/mips/arc/memory.c
@@ -141,30 +141,20 @@ void __init prom_meminit(void)
141 } 141 }
142} 142}
143 143
144unsigned long __init prom_free_prom_memory(void) 144void __init prom_free_prom_memory(void)
145{ 145{
146 unsigned long freed = 0;
147 unsigned long addr; 146 unsigned long addr;
148 int i; 147 int i;
149 148
150 if (prom_flags & PROM_FLAG_DONT_FREE_TEMP) 149 if (prom_flags & PROM_FLAG_DONT_FREE_TEMP)
151 return 0; 150 return;
152 151
153 for (i = 0; i < boot_mem_map.nr_map; i++) { 152 for (i = 0; i < boot_mem_map.nr_map; i++) {
154 if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA) 153 if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA)
155 continue; 154 continue;
156 155
157 addr = boot_mem_map.map[i].addr; 156 addr = boot_mem_map.map[i].addr;
158 while (addr < boot_mem_map.map[i].addr 157 free_init_pages("prom memory",
159 + boot_mem_map.map[i].size) { 158 addr, addr + boot_mem_map.map[i].size);
160 ClearPageReserved(virt_to_page(__va(addr)));
161 init_page_count(virt_to_page(__va(addr)));
162 free_page((unsigned long)__va(addr));
163 addr += PAGE_SIZE;
164 freed += PAGE_SIZE;
165 }
166 } 159 }
167 printk(KERN_INFO "Freeing prom memory: %ldkb freed\n", freed >> 10);
168
169 return freed;
170} 160}
diff --git a/arch/mips/au1000/common/irq.c b/arch/mips/au1000/common/irq.c
index 9cf7b6715836..ea6e99fbe2f7 100644
--- a/arch/mips/au1000/common/irq.c
+++ b/arch/mips/au1000/common/irq.c
@@ -233,7 +233,7 @@ void restore_local_and_enable(int controller, unsigned long mask)
233 233
234 234
235static struct irq_chip rise_edge_irq_type = { 235static struct irq_chip rise_edge_irq_type = {
236 .typename = "Au1000 Rise Edge", 236 .name = "Au1000 Rise Edge",
237 .ack = mask_and_ack_rise_edge_irq, 237 .ack = mask_and_ack_rise_edge_irq,
238 .mask = local_disable_irq, 238 .mask = local_disable_irq,
239 .mask_ack = mask_and_ack_rise_edge_irq, 239 .mask_ack = mask_and_ack_rise_edge_irq,
@@ -242,7 +242,7 @@ static struct irq_chip rise_edge_irq_type = {
242}; 242};
243 243
244static struct irq_chip fall_edge_irq_type = { 244static struct irq_chip fall_edge_irq_type = {
245 .typename = "Au1000 Fall Edge", 245 .name = "Au1000 Fall Edge",
246 .ack = mask_and_ack_fall_edge_irq, 246 .ack = mask_and_ack_fall_edge_irq,
247 .mask = local_disable_irq, 247 .mask = local_disable_irq,
248 .mask_ack = mask_and_ack_fall_edge_irq, 248 .mask_ack = mask_and_ack_fall_edge_irq,
@@ -251,7 +251,7 @@ static struct irq_chip fall_edge_irq_type = {
251}; 251};
252 252
253static struct irq_chip either_edge_irq_type = { 253static struct irq_chip either_edge_irq_type = {
254 .typename = "Au1000 Rise or Fall Edge", 254 .name = "Au1000 Rise or Fall Edge",
255 .ack = mask_and_ack_either_edge_irq, 255 .ack = mask_and_ack_either_edge_irq,
256 .mask = local_disable_irq, 256 .mask = local_disable_irq,
257 .mask_ack = mask_and_ack_either_edge_irq, 257 .mask_ack = mask_and_ack_either_edge_irq,
@@ -260,7 +260,7 @@ static struct irq_chip either_edge_irq_type = {
260}; 260};
261 261
262static struct irq_chip level_irq_type = { 262static struct irq_chip level_irq_type = {
263 .typename = "Au1000 Level", 263 .name = "Au1000 Level",
264 .ack = mask_and_ack_level_irq, 264 .ack = mask_and_ack_level_irq,
265 .mask = local_disable_irq, 265 .mask = local_disable_irq,
266 .mask_ack = mask_and_ack_level_irq, 266 .mask_ack = mask_and_ack_level_irq,
diff --git a/arch/mips/au1000/common/pci.c b/arch/mips/au1000/common/pci.c
index 9f8ce08e173b..6c25e6c09f78 100644
--- a/arch/mips/au1000/common/pci.c
+++ b/arch/mips/au1000/common/pci.c
@@ -76,13 +76,17 @@ static int __init au1x_pci_setup(void)
76 } 76 }
77 77
78#ifdef CONFIG_DMA_NONCOHERENT 78#ifdef CONFIG_DMA_NONCOHERENT
79 /* 79 {
80 * Set the NC bit in controller for Au1500 pre-AC silicon 80 /*
81 */ 81 * Set the NC bit in controller for Au1500 pre-AC silicon
82 u32 prid = read_c0_prid(); 82 */
83 if ( (prid & 0xFF000000) == 0x01000000 && prid < 0x01030202) { 83 u32 prid = read_c0_prid();
84 au_writel( 1<<16 | au_readl(Au1500_PCI_CFG), Au1500_PCI_CFG); 84
85 printk("Non-coherent PCI accesses enabled\n"); 85 if ((prid & 0xFF000000) == 0x01000000 && prid < 0x01030202) {
86 au_writel((1 << 16) | au_readl(Au1500_PCI_CFG),
87 Au1500_PCI_CFG);
88 printk("Non-coherent PCI accesses enabled\n");
89 }
86 } 90 }
87#endif 91#endif
88 92
diff --git a/arch/mips/au1000/common/prom.c b/arch/mips/au1000/common/prom.c
index 6fce60af005d..a8637cdb5b4b 100644
--- a/arch/mips/au1000/common/prom.c
+++ b/arch/mips/au1000/common/prom.c
@@ -149,9 +149,8 @@ int get_ethernet_addr(char *ethernet_addr)
149 return 0; 149 return 0;
150} 150}
151 151
152unsigned long __init prom_free_prom_memory(void) 152void __init prom_free_prom_memory(void)
153{ 153{
154 return 0;
155} 154}
156 155
157EXPORT_SYMBOL(prom_getcmdline); 156EXPORT_SYMBOL(prom_getcmdline);
diff --git a/arch/mips/au1000/common/setup.c b/arch/mips/au1000/common/setup.c
index 919172db560c..13fe187f35d6 100644
--- a/arch/mips/au1000/common/setup.c
+++ b/arch/mips/au1000/common/setup.c
@@ -141,17 +141,20 @@ void __init plat_mem_setup(void)
141/* This routine should be valid for all Au1x based boards */ 141/* This routine should be valid for all Au1x based boards */
142phys_t __fixup_bigphys_addr(phys_t phys_addr, phys_t size) 142phys_t __fixup_bigphys_addr(phys_t phys_addr, phys_t size)
143{ 143{
144 u32 start, end;
145
146 /* Don't fixup 36 bit addresses */ 144 /* Don't fixup 36 bit addresses */
147 if ((phys_addr >> 32) != 0) return phys_addr; 145 if ((phys_addr >> 32) != 0)
146 return phys_addr;
148 147
149#ifdef CONFIG_PCI 148#ifdef CONFIG_PCI
150 start = (u32)Au1500_PCI_MEM_START; 149 {
151 end = (u32)Au1500_PCI_MEM_END; 150 u32 start, end;
152 /* check for pci memory window */ 151
153 if ((phys_addr >= start) && ((phys_addr + size) < end)) { 152 start = (u32)Au1500_PCI_MEM_START;
154 return (phys_t)((phys_addr - start) + Au1500_PCI_MEM_START); 153 end = (u32)Au1500_PCI_MEM_END;
154 /* check for pci memory window */
155 if ((phys_addr >= start) && ((phys_addr + size) < end))
156 return (phys_t)
157 ((phys_addr - start) + Au1500_PCI_MEM_START);
155 } 158 }
156#endif 159#endif
157 160
diff --git a/arch/mips/au1000/pb1100/board_setup.c b/arch/mips/au1000/pb1100/board_setup.c
index 2d1533f116c0..6bc1f8e1b608 100644
--- a/arch/mips/au1000/pb1100/board_setup.c
+++ b/arch/mips/au1000/pb1100/board_setup.c
@@ -47,8 +47,7 @@ void board_reset (void)
47 47
48void __init board_setup(void) 48void __init board_setup(void)
49{ 49{
50 u32 pin_func; 50 volatile void __iomem * base = (volatile void __iomem *) 0xac000000UL;
51 u32 sys_freqctrl, sys_clksrc;
52 51
53 // set AUX clock to 12MHz * 8 = 96 MHz 52 // set AUX clock to 12MHz * 8 = 96 MHz
54 au_writel(8, SYS_AUXPLL); 53 au_writel(8, SYS_AUXPLL);
@@ -56,58 +55,62 @@ void __init board_setup(void)
56 udelay(100); 55 udelay(100);
57 56
58#ifdef CONFIG_USB_OHCI 57#ifdef CONFIG_USB_OHCI
59 // configure pins GPIO[14:9] as GPIO 58 {
60 pin_func = au_readl(SYS_PINFUNC) & (u32)(~0x80); 59 u32 pin_func, sys_freqctrl, sys_clksrc;
61 60
62 /* zero and disable FREQ2 */ 61 // configure pins GPIO[14:9] as GPIO
63 sys_freqctrl = au_readl(SYS_FREQCTRL0); 62 pin_func = au_readl(SYS_PINFUNC) & (u32)(~0x80);
64 sys_freqctrl &= ~0xFFF00000; 63
65 au_writel(sys_freqctrl, SYS_FREQCTRL0); 64 /* zero and disable FREQ2 */
66 65 sys_freqctrl = au_readl(SYS_FREQCTRL0);
67 /* zero and disable USBH/USBD/IrDA clock */ 66 sys_freqctrl &= ~0xFFF00000;
68 sys_clksrc = au_readl(SYS_CLKSRC); 67 au_writel(sys_freqctrl, SYS_FREQCTRL0);
69 sys_clksrc &= ~0x0000001F; 68
70 au_writel(sys_clksrc, SYS_CLKSRC); 69 /* zero and disable USBH/USBD/IrDA clock */
71 70 sys_clksrc = au_readl(SYS_CLKSRC);
72 sys_freqctrl = au_readl(SYS_FREQCTRL0); 71 sys_clksrc &= ~0x0000001F;
73 sys_freqctrl &= ~0xFFF00000; 72 au_writel(sys_clksrc, SYS_CLKSRC);
74 73
75 sys_clksrc = au_readl(SYS_CLKSRC); 74 sys_freqctrl = au_readl(SYS_FREQCTRL0);
76 sys_clksrc &= ~0x0000001F; 75 sys_freqctrl &= ~0xFFF00000;
77 76
78 // FREQ2 = aux/2 = 48 MHz 77 sys_clksrc = au_readl(SYS_CLKSRC);
79 sys_freqctrl |= ((0<<22) | (1<<21) | (1<<20)); 78 sys_clksrc &= ~0x0000001F;
80 au_writel(sys_freqctrl, SYS_FREQCTRL0); 79
81 80 // FREQ2 = aux/2 = 48 MHz
82 /* 81 sys_freqctrl |= ((0<<22) | (1<<21) | (1<<20));
83 * Route 48MHz FREQ2 into USBH/USBD/IrDA 82 au_writel(sys_freqctrl, SYS_FREQCTRL0);
84 */ 83
85 sys_clksrc |= ((4<<2) | (0<<1) | 0 ); 84 /*
86 au_writel(sys_clksrc, SYS_CLKSRC); 85 * Route 48MHz FREQ2 into USBH/USBD/IrDA
87 86 */
88 /* setup the static bus controller */ 87 sys_clksrc |= ((4<<2) | (0<<1) | 0 );
89 au_writel(0x00000002, MEM_STCFG3); /* type = PCMCIA */ 88 au_writel(sys_clksrc, SYS_CLKSRC);
90 au_writel(0x280E3D07, MEM_STTIME3); /* 250ns cycle time */ 89
91 au_writel(0x10000000, MEM_STADDR3); /* any PCMCIA select */ 90 /* setup the static bus controller */
92 91 au_writel(0x00000002, MEM_STCFG3); /* type = PCMCIA */
93 // get USB Functionality pin state (device vs host drive pins) 92 au_writel(0x280E3D07, MEM_STTIME3); /* 250ns cycle time */
94 pin_func = au_readl(SYS_PINFUNC) & (u32)(~0x8000); 93 au_writel(0x10000000, MEM_STADDR3); /* any PCMCIA select */
95 // 2nd USB port is USB host 94
96 pin_func |= 0x8000; 95 // get USB Functionality pin state (device vs host drive pins)
97 au_writel(pin_func, SYS_PINFUNC); 96 pin_func = au_readl(SYS_PINFUNC) & (u32)(~0x8000);
97 // 2nd USB port is USB host
98 pin_func |= 0x8000;
99 au_writel(pin_func, SYS_PINFUNC);
100 }
98#endif // defined (CONFIG_USB_OHCI) 101#endif // defined (CONFIG_USB_OHCI)
99 102
100 /* Enable sys bus clock divider when IDLE state or no bus activity. */ 103 /* Enable sys bus clock divider when IDLE state or no bus activity. */
101 au_writel(au_readl(SYS_POWERCTRL) | (0x3 << 5), SYS_POWERCTRL); 104 au_writel(au_readl(SYS_POWERCTRL) | (0x3 << 5), SYS_POWERCTRL);
102 105
103 // Enable the RTC if not already enabled 106 // Enable the RTC if not already enabled
104 if (!(readb(0xac000028) & 0x20)) { 107 if (!(readb(base + 0x28) & 0x20)) {
105 writeb(readb(0xac000028) | 0x20, 0xac000028); 108 writeb(readb(base + 0x28) | 0x20, base + 0x28);
106 au_sync(); 109 au_sync();
107 } 110 }
108 // Put the clock in BCD mode 111 // Put the clock in BCD mode
109 if (readb(0xac00002C) & 0x4) { /* reg B */ 112 if (readb(base + 0x2C) & 0x4) { /* reg B */
110 writeb(readb(0xac00002c) & ~0x4, 0xac00002c); 113 writeb(readb(base + 0x2c) & ~0x4, base + 0x2c);
111 au_sync(); 114 au_sync();
112 } 115 }
113} 116}
diff --git a/arch/mips/au1000/pb1200/irqmap.c b/arch/mips/au1000/pb1200/irqmap.c
index 91983ba407c4..b73b2d18bf56 100644
--- a/arch/mips/au1000/pb1200/irqmap.c
+++ b/arch/mips/au1000/pb1200/irqmap.c
@@ -137,33 +137,20 @@ static void pb1200_shutdown_irq( unsigned int irq_nr )
137 return; 137 return;
138} 138}
139 139
140static inline void pb1200_mask_and_ack_irq(unsigned int irq_nr)
141{
142 pb1200_disable_irq( irq_nr );
143}
144
145static void pb1200_end_irq(unsigned int irq_nr)
146{
147 if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))) {
148 pb1200_enable_irq(irq_nr);
149 }
150}
151
152static struct irq_chip external_irq_type = 140static struct irq_chip external_irq_type =
153{ 141{
154#ifdef CONFIG_MIPS_PB1200 142#ifdef CONFIG_MIPS_PB1200
155 "Pb1200 Ext", 143 .name = "Pb1200 Ext",
156#endif 144#endif
157#ifdef CONFIG_MIPS_DB1200 145#ifdef CONFIG_MIPS_DB1200
158 "Db1200 Ext", 146 .name = "Db1200 Ext",
159#endif 147#endif
160 pb1200_startup_irq, 148 .startup = pb1200_startup_irq,
161 pb1200_shutdown_irq, 149 .shutdown = pb1200_shutdown_irq,
162 pb1200_enable_irq, 150 .ack = pb1200_disable_irq,
163 pb1200_disable_irq, 151 .mask = pb1200_disable_irq,
164 pb1200_mask_and_ack_irq, 152 .mask_ack = pb1200_disable_irq,
165 pb1200_end_irq, 153 .unmask = pb1200_enable_irq,
166 NULL
167}; 154};
168 155
169void _board_init_irq(void) 156void _board_init_irq(void)
@@ -172,7 +159,8 @@ void _board_init_irq(void)
172 159
173 for (irq_nr = PB1200_INT_BEGIN; irq_nr <= PB1200_INT_END; irq_nr++) 160 for (irq_nr = PB1200_INT_BEGIN; irq_nr <= PB1200_INT_END; irq_nr++)
174 { 161 {
175 irq_desc[irq_nr].chip = &external_irq_type; 162 set_irq_chip_and_handler(irq_nr, &external_irq_type,
163 handle_level_irq);
176 pb1200_disable_irq(irq_nr); 164 pb1200_disable_irq(irq_nr);
177 } 165 }
178 166
diff --git a/arch/mips/basler/excite/excite_irq.c b/arch/mips/basler/excite/excite_irq.c
index 2e2061a286c5..1ecab6350421 100644
--- a/arch/mips/basler/excite/excite_irq.c
+++ b/arch/mips/basler/excite/excite_irq.c
@@ -47,9 +47,9 @@ extern asmlinkage void excite_handle_int(void);
47 */ 47 */
48void __init arch_init_irq(void) 48void __init arch_init_irq(void)
49{ 49{
50 mips_cpu_irq_init(0); 50 mips_cpu_irq_init();
51 rm7k_cpu_irq_init(8); 51 rm7k_cpu_irq_init();
52 rm9k_cpu_irq_init(12); 52 rm9k_cpu_irq_init();
53 53
54#ifdef CONFIG_KGDB 54#ifdef CONFIG_KGDB
55 excite_kgdb_init(); 55 excite_kgdb_init();
diff --git a/arch/mips/cobalt/irq.c b/arch/mips/cobalt/irq.c
index 4c46f0e73783..fe93b846923b 100644
--- a/arch/mips/cobalt/irq.c
+++ b/arch/mips/cobalt/irq.c
@@ -104,7 +104,7 @@ void __init arch_init_irq(void)
104 GT_WRITE(GT_INTRMASK_OFS, 0); 104 GT_WRITE(GT_INTRMASK_OFS, 0);
105 105
106 init_i8259_irqs(); /* 0 ... 15 */ 106 init_i8259_irqs(); /* 0 ... 15 */
107 mips_cpu_irq_init(COBALT_CPU_IRQ); /* 16 ... 23 */ 107 mips_cpu_irq_init(); /* 16 ... 23 */
108 108
109 /* 109 /*
110 * Mask all cpu interrupts 110 * Mask all cpu interrupts
diff --git a/arch/mips/cobalt/setup.c b/arch/mips/cobalt/setup.c
index e8f0f20b852d..a4b69b543bd9 100644
--- a/arch/mips/cobalt/setup.c
+++ b/arch/mips/cobalt/setup.c
@@ -204,8 +204,7 @@ void __init prom_init(void)
204 add_memory_region(0x0, memsz, BOOT_MEM_RAM); 204 add_memory_region(0x0, memsz, BOOT_MEM_RAM);
205} 205}
206 206
207unsigned long __init prom_free_prom_memory(void) 207void __init prom_free_prom_memory(void)
208{ 208{
209 /* Nothing to do! */ 209 /* Nothing to do! */
210 return 0;
211} 210}
diff --git a/arch/mips/ddb5xxx/common/prom.c b/arch/mips/ddb5xxx/common/prom.c
index efef0f57ce1e..54a857b5e3ba 100644
--- a/arch/mips/ddb5xxx/common/prom.c
+++ b/arch/mips/ddb5xxx/common/prom.c
@@ -59,9 +59,8 @@ void __init prom_init(void)
59#endif 59#endif
60} 60}
61 61
62unsigned long __init prom_free_prom_memory(void) 62void __init prom_free_prom_memory(void)
63{ 63{
64 return 0;
65} 64}
66 65
67#if defined(CONFIG_DDB5477) 66#if defined(CONFIG_DDB5477)
diff --git a/arch/mips/ddb5xxx/ddb5477/irq.c b/arch/mips/ddb5xxx/ddb5477/irq.c
index a8bd2e66705c..2b23234a5b95 100644
--- a/arch/mips/ddb5xxx/ddb5477/irq.c
+++ b/arch/mips/ddb5xxx/ddb5477/irq.c
@@ -17,6 +17,7 @@
17#include <linux/ptrace.h> 17#include <linux/ptrace.h>
18 18
19#include <asm/i8259.h> 19#include <asm/i8259.h>
20#include <asm/irq_cpu.h>
20#include <asm/system.h> 21#include <asm/system.h>
21#include <asm/mipsregs.h> 22#include <asm/mipsregs.h>
22#include <asm/debug.h> 23#include <asm/debug.h>
@@ -73,7 +74,6 @@ set_pci_int_attr(u32 pci, u32 intn, u32 active, u32 trigger)
73} 74}
74 75
75extern void vrc5477_irq_init(u32 base); 76extern void vrc5477_irq_init(u32 base);
76extern void mips_cpu_irq_init(u32 base);
77static struct irqaction irq_cascade = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL }; 77static struct irqaction irq_cascade = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL };
78 78
79void __init arch_init_irq(void) 79void __init arch_init_irq(void)
@@ -125,7 +125,7 @@ void __init arch_init_irq(void)
125 125
126 /* init all controllers */ 126 /* init all controllers */
127 init_i8259_irqs(); 127 init_i8259_irqs();
128 mips_cpu_irq_init(CPU_IRQ_BASE); 128 mips_cpu_irq_init();
129 vrc5477_irq_init(VRC5477_IRQ_BASE); 129 vrc5477_irq_init(VRC5477_IRQ_BASE);
130 130
131 131
@@ -146,8 +146,7 @@ u8 i8259_interrupt_ack(void)
146 irq = *(volatile u8 *) KSEG1ADDR(DDB_PCI_IACK_BASE); 146 irq = *(volatile u8 *) KSEG1ADDR(DDB_PCI_IACK_BASE);
147 ddb_out32(DDB_PCIINIT10, reg); 147 ddb_out32(DDB_PCIINIT10, reg);
148 148
149 /* i8259.c set the base vector to be 0x0 */ 149 return irq;
150 return irq + I8259_IRQ_BASE;
151} 150}
152/* 151/*
153 * the first level int-handler will jump here if it is a vrc5477 irq 152 * the first level int-handler will jump here if it is a vrc5477 irq
@@ -177,7 +176,7 @@ static void vrc5477_irq_dispatch(void)
177 /* check for i8259 interrupts */ 176 /* check for i8259 interrupts */
178 if (intStatus & (1 << VRC5477_I8259_CASCADE)) { 177 if (intStatus & (1 << VRC5477_I8259_CASCADE)) {
179 int i8259_irq = i8259_interrupt_ack(); 178 int i8259_irq = i8259_interrupt_ack();
180 do_IRQ(I8259_IRQ_BASE + i8259_irq); 179 do_IRQ(i8259_irq);
181 return; 180 return;
182 } 181 }
183 } 182 }
diff --git a/arch/mips/ddb5xxx/ddb5477/irq_5477.c b/arch/mips/ddb5xxx/ddb5477/irq_5477.c
index 96249aa5df5d..98c3b15eb369 100644
--- a/arch/mips/ddb5xxx/ddb5477/irq_5477.c
+++ b/arch/mips/ddb5xxx/ddb5477/irq_5477.c
@@ -82,7 +82,7 @@ vrc5477_irq_end(unsigned int irq)
82} 82}
83 83
84struct irq_chip vrc5477_irq_controller = { 84struct irq_chip vrc5477_irq_controller = {
85 .typename = "vrc5477_irq", 85 .name = "vrc5477_irq",
86 .ack = vrc5477_irq_ack, 86 .ack = vrc5477_irq_ack,
87 .mask = vrc5477_irq_disable, 87 .mask = vrc5477_irq_disable,
88 .mask_ack = vrc5477_irq_ack, 88 .mask_ack = vrc5477_irq_ack,
diff --git a/arch/mips/dec/ioasic-irq.c b/arch/mips/dec/ioasic-irq.c
index 4c7cb4048d35..3acb133668dc 100644
--- a/arch/mips/dec/ioasic-irq.c
+++ b/arch/mips/dec/ioasic-irq.c
@@ -62,7 +62,7 @@ static inline void end_ioasic_irq(unsigned int irq)
62} 62}
63 63
64static struct irq_chip ioasic_irq_type = { 64static struct irq_chip ioasic_irq_type = {
65 .typename = "IO-ASIC", 65 .name = "IO-ASIC",
66 .ack = ack_ioasic_irq, 66 .ack = ack_ioasic_irq,
67 .mask = mask_ioasic_irq, 67 .mask = mask_ioasic_irq,
68 .mask_ack = ack_ioasic_irq, 68 .mask_ack = ack_ioasic_irq,
@@ -84,7 +84,7 @@ static inline void end_ioasic_dma_irq(unsigned int irq)
84} 84}
85 85
86static struct irq_chip ioasic_dma_irq_type = { 86static struct irq_chip ioasic_dma_irq_type = {
87 .typename = "IO-ASIC-DMA", 87 .name = "IO-ASIC-DMA",
88 .ack = ack_ioasic_dma_irq, 88 .ack = ack_ioasic_dma_irq,
89 .mask = mask_ioasic_dma_irq, 89 .mask = mask_ioasic_dma_irq,
90 .mask_ack = ack_ioasic_dma_irq, 90 .mask_ack = ack_ioasic_dma_irq,
diff --git a/arch/mips/dec/kn02-irq.c b/arch/mips/dec/kn02-irq.c
index 916e46b8ccd8..02439dc0ba83 100644
--- a/arch/mips/dec/kn02-irq.c
+++ b/arch/mips/dec/kn02-irq.c
@@ -58,7 +58,7 @@ static void ack_kn02_irq(unsigned int irq)
58} 58}
59 59
60static struct irq_chip kn02_irq_type = { 60static struct irq_chip kn02_irq_type = {
61 .typename = "KN02-CSR", 61 .name = "KN02-CSR",
62 .ack = ack_kn02_irq, 62 .ack = ack_kn02_irq,
63 .mask = mask_kn02_irq, 63 .mask = mask_kn02_irq,
64 .mask_ack = ack_kn02_irq, 64 .mask_ack = ack_kn02_irq,
diff --git a/arch/mips/dec/prom/memory.c b/arch/mips/dec/prom/memory.c
index 3aa01d268f2d..5a557e268f78 100644
--- a/arch/mips/dec/prom/memory.c
+++ b/arch/mips/dec/prom/memory.c
@@ -92,9 +92,9 @@ void __init prom_meminit(u32 magic)
92 rex_setup_memory_region(); 92 rex_setup_memory_region();
93} 93}
94 94
95unsigned long __init prom_free_prom_memory(void) 95void __init prom_free_prom_memory(void)
96{ 96{
97 unsigned long addr, end; 97 unsigned long end;
98 98
99 /* 99 /*
100 * Free everything below the kernel itself but leave 100 * Free everything below the kernel itself but leave
@@ -114,16 +114,5 @@ unsigned long __init prom_free_prom_memory(void)
114#endif 114#endif
115 end = __pa(&_text); 115 end = __pa(&_text);
116 116
117 addr = PAGE_SIZE; 117 free_init_pages("unused PROM memory", PAGE_SIZE, end);
118 while (addr < end) {
119 ClearPageReserved(virt_to_page(__va(addr)));
120 init_page_count(virt_to_page(__va(addr)));
121 free_page((unsigned long)__va(addr));
122 addr += PAGE_SIZE;
123 }
124
125 printk("Freeing unused PROM memory: %ldkb freed\n",
126 (end - PAGE_SIZE) >> 10);
127
128 return end - PAGE_SIZE;
129} 118}
diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c
index d34032ac492a..1058e2f409bb 100644
--- a/arch/mips/dec/setup.c
+++ b/arch/mips/dec/setup.c
@@ -234,7 +234,7 @@ static void __init dec_init_kn01(void)
234 memcpy(&cpu_mask_nr_tbl, &kn01_cpu_mask_nr_tbl, 234 memcpy(&cpu_mask_nr_tbl, &kn01_cpu_mask_nr_tbl,
235 sizeof(kn01_cpu_mask_nr_tbl)); 235 sizeof(kn01_cpu_mask_nr_tbl));
236 236
237 mips_cpu_irq_init(DEC_CPU_IRQ_BASE); 237 mips_cpu_irq_init();
238 238
239} /* dec_init_kn01 */ 239} /* dec_init_kn01 */
240 240
@@ -309,7 +309,7 @@ static void __init dec_init_kn230(void)
309 memcpy(&cpu_mask_nr_tbl, &kn230_cpu_mask_nr_tbl, 309 memcpy(&cpu_mask_nr_tbl, &kn230_cpu_mask_nr_tbl,
310 sizeof(kn230_cpu_mask_nr_tbl)); 310 sizeof(kn230_cpu_mask_nr_tbl));
311 311
312 mips_cpu_irq_init(DEC_CPU_IRQ_BASE); 312 mips_cpu_irq_init();
313 313
314} /* dec_init_kn230 */ 314} /* dec_init_kn230 */
315 315
@@ -403,7 +403,7 @@ static void __init dec_init_kn02(void)
403 memcpy(&asic_mask_nr_tbl, &kn02_asic_mask_nr_tbl, 403 memcpy(&asic_mask_nr_tbl, &kn02_asic_mask_nr_tbl,
404 sizeof(kn02_asic_mask_nr_tbl)); 404 sizeof(kn02_asic_mask_nr_tbl));
405 405
406 mips_cpu_irq_init(DEC_CPU_IRQ_BASE); 406 mips_cpu_irq_init();
407 init_kn02_irqs(KN02_IRQ_BASE); 407 init_kn02_irqs(KN02_IRQ_BASE);
408 408
409} /* dec_init_kn02 */ 409} /* dec_init_kn02 */
@@ -504,7 +504,7 @@ static void __init dec_init_kn02ba(void)
504 memcpy(&asic_mask_nr_tbl, &kn02ba_asic_mask_nr_tbl, 504 memcpy(&asic_mask_nr_tbl, &kn02ba_asic_mask_nr_tbl,
505 sizeof(kn02ba_asic_mask_nr_tbl)); 505 sizeof(kn02ba_asic_mask_nr_tbl));
506 506
507 mips_cpu_irq_init(DEC_CPU_IRQ_BASE); 507 mips_cpu_irq_init();
508 init_ioasic_irqs(IO_IRQ_BASE); 508 init_ioasic_irqs(IO_IRQ_BASE);
509 509
510} /* dec_init_kn02ba */ 510} /* dec_init_kn02ba */
@@ -601,7 +601,7 @@ static void __init dec_init_kn02ca(void)
601 memcpy(&asic_mask_nr_tbl, &kn02ca_asic_mask_nr_tbl, 601 memcpy(&asic_mask_nr_tbl, &kn02ca_asic_mask_nr_tbl,
602 sizeof(kn02ca_asic_mask_nr_tbl)); 602 sizeof(kn02ca_asic_mask_nr_tbl));
603 603
604 mips_cpu_irq_init(DEC_CPU_IRQ_BASE); 604 mips_cpu_irq_init();
605 init_ioasic_irqs(IO_IRQ_BASE); 605 init_ioasic_irqs(IO_IRQ_BASE);
606 606
607} /* dec_init_kn02ca */ 607} /* dec_init_kn02ca */
@@ -702,7 +702,7 @@ static void __init dec_init_kn03(void)
702 memcpy(&asic_mask_nr_tbl, &kn03_asic_mask_nr_tbl, 702 memcpy(&asic_mask_nr_tbl, &kn03_asic_mask_nr_tbl,
703 sizeof(kn03_asic_mask_nr_tbl)); 703 sizeof(kn03_asic_mask_nr_tbl));
704 704
705 mips_cpu_irq_init(DEC_CPU_IRQ_BASE); 705 mips_cpu_irq_init();
706 init_ioasic_irqs(IO_IRQ_BASE); 706 init_ioasic_irqs(IO_IRQ_BASE);
707 707
708} /* dec_init_kn03 */ 708} /* dec_init_kn03 */
diff --git a/arch/mips/emma2rh/common/irq_emma2rh.c b/arch/mips/emma2rh/common/irq_emma2rh.c
index 8d880f0b06ec..96df37b77759 100644
--- a/arch/mips/emma2rh/common/irq_emma2rh.c
+++ b/arch/mips/emma2rh/common/irq_emma2rh.c
@@ -57,7 +57,7 @@ static void emma2rh_irq_disable(unsigned int irq)
57} 57}
58 58
59struct irq_chip emma2rh_irq_controller = { 59struct irq_chip emma2rh_irq_controller = {
60 .typename = "emma2rh_irq", 60 .name = "emma2rh_irq",
61 .ack = emma2rh_irq_disable, 61 .ack = emma2rh_irq_disable,
62 .mask = emma2rh_irq_disable, 62 .mask = emma2rh_irq_disable,
63 .mask_ack = emma2rh_irq_disable, 63 .mask_ack = emma2rh_irq_disable,
diff --git a/arch/mips/emma2rh/markeins/irq.c b/arch/mips/emma2rh/markeins/irq.c
index c93369cb4115..3299b6dfe764 100644
--- a/arch/mips/emma2rh/markeins/irq.c
+++ b/arch/mips/emma2rh/markeins/irq.c
@@ -106,7 +106,7 @@ void __init arch_init_irq(void)
106 emma2rh_irq_init(EMMA2RH_IRQ_BASE); 106 emma2rh_irq_init(EMMA2RH_IRQ_BASE);
107 emma2rh_sw_irq_init(EMMA2RH_SW_IRQ_BASE); 107 emma2rh_sw_irq_init(EMMA2RH_SW_IRQ_BASE);
108 emma2rh_gpio_irq_init(EMMA2RH_GPIO_IRQ_BASE); 108 emma2rh_gpio_irq_init(EMMA2RH_GPIO_IRQ_BASE);
109 mips_cpu_irq_init(CPU_IRQ_BASE); 109 mips_cpu_irq_init();
110 110
111 /* setup cascade interrupts */ 111 /* setup cascade interrupts */
112 setup_irq(EMMA2RH_IRQ_BASE + EMMA2RH_SW_CASCADE, &irq_cascade); 112 setup_irq(EMMA2RH_IRQ_BASE + EMMA2RH_SW_CASCADE, &irq_cascade);
diff --git a/arch/mips/emma2rh/markeins/irq_markeins.c b/arch/mips/emma2rh/markeins/irq_markeins.c
index 2116d9be5fa9..fba5c156f472 100644
--- a/arch/mips/emma2rh/markeins/irq_markeins.c
+++ b/arch/mips/emma2rh/markeins/irq_markeins.c
@@ -49,7 +49,7 @@ static void emma2rh_sw_irq_disable(unsigned int irq)
49} 49}
50 50
51struct irq_chip emma2rh_sw_irq_controller = { 51struct irq_chip emma2rh_sw_irq_controller = {
52 .typename = "emma2rh_sw_irq", 52 .name = "emma2rh_sw_irq",
53 .ack = emma2rh_sw_irq_disable, 53 .ack = emma2rh_sw_irq_disable,
54 .mask = emma2rh_sw_irq_disable, 54 .mask = emma2rh_sw_irq_disable,
55 .mask_ack = emma2rh_sw_irq_disable, 55 .mask_ack = emma2rh_sw_irq_disable,
@@ -115,7 +115,7 @@ static void emma2rh_gpio_irq_end(unsigned int irq)
115} 115}
116 116
117struct irq_chip emma2rh_gpio_irq_controller = { 117struct irq_chip emma2rh_gpio_irq_controller = {
118 .typename = "emma2rh_gpio_irq", 118 .name = "emma2rh_gpio_irq",
119 .ack = emma2rh_gpio_irq_ack, 119 .ack = emma2rh_gpio_irq_ack,
120 .mask = emma2rh_gpio_irq_disable, 120 .mask = emma2rh_gpio_irq_disable,
121 .mask_ack = emma2rh_gpio_irq_ack, 121 .mask_ack = emma2rh_gpio_irq_ack,
diff --git a/arch/mips/gt64120/ev64120/irq.c b/arch/mips/gt64120/ev64120/irq.c
index b3e5796c81d7..04572b9c9642 100644
--- a/arch/mips/gt64120/ev64120/irq.c
+++ b/arch/mips/gt64120/ev64120/irq.c
@@ -88,7 +88,7 @@ static void end_ev64120_irq(unsigned int irq)
88} 88}
89 89
90static struct irq_chip ev64120_irq_type = { 90static struct irq_chip ev64120_irq_type = {
91 .typename = "EV64120", 91 .name = "EV64120",
92 .ack = disable_ev64120_irq, 92 .ack = disable_ev64120_irq,
93 .mask = disable_ev64120_irq, 93 .mask = disable_ev64120_irq,
94 .mask_ack = disable_ev64120_irq, 94 .mask_ack = disable_ev64120_irq,
diff --git a/arch/mips/gt64120/ev64120/setup.c b/arch/mips/gt64120/ev64120/setup.c
index 99c8d42212e2..477848c22a2c 100644
--- a/arch/mips/gt64120/ev64120/setup.c
+++ b/arch/mips/gt64120/ev64120/setup.c
@@ -59,9 +59,8 @@ extern void galileo_machine_power_off(void);
59 */ 59 */
60extern struct pci_ops galileo_pci_ops; 60extern struct pci_ops galileo_pci_ops;
61 61
62unsigned long __init prom_free_prom_memory(void) 62void __init prom_free_prom_memory(void)
63{ 63{
64 return 0;
65} 64}
66 65
67/* 66/*
diff --git a/arch/mips/gt64120/momenco_ocelot/dbg_io.c b/arch/mips/gt64120/momenco_ocelot/dbg_io.c
index 2128684584f5..32d6fb4ee679 100644
--- a/arch/mips/gt64120/momenco_ocelot/dbg_io.c
+++ b/arch/mips/gt64120/momenco_ocelot/dbg_io.c
@@ -1,6 +1,4 @@
1 1
2#ifdef CONFIG_KGDB
3
4#include <asm/serial.h> /* For the serial port location and base baud */ 2#include <asm/serial.h> /* For the serial port location and base baud */
5 3
6/* --- CONFIG --- */ 4/* --- CONFIG --- */
@@ -121,5 +119,3 @@ int putDebugChar(uint8 byte)
121 UART16550_WRITE(OFS_SEND_BUFFER, byte); 119 UART16550_WRITE(OFS_SEND_BUFFER, byte);
122 return 1; 120 return 1;
123} 121}
124
125#endif
diff --git a/arch/mips/gt64120/momenco_ocelot/irq.c b/arch/mips/gt64120/momenco_ocelot/irq.c
index d9294401ccb0..2585d9dbda33 100644
--- a/arch/mips/gt64120/momenco_ocelot/irq.c
+++ b/arch/mips/gt64120/momenco_ocelot/irq.c
@@ -90,6 +90,6 @@ void __init arch_init_irq(void)
90 clear_c0_status(ST0_IM); 90 clear_c0_status(ST0_IM);
91 local_irq_disable(); 91 local_irq_disable();
92 92
93 mips_cpu_irq_init(0); 93 mips_cpu_irq_init();
94 rm7k_cpu_irq_init(8); 94 rm7k_cpu_irq_init();
95} 95}
diff --git a/arch/mips/gt64120/momenco_ocelot/prom.c b/arch/mips/gt64120/momenco_ocelot/prom.c
index 8677b6d3ada7..78f393b2afd9 100644
--- a/arch/mips/gt64120/momenco_ocelot/prom.c
+++ b/arch/mips/gt64120/momenco_ocelot/prom.c
@@ -67,7 +67,6 @@ void __init prom_init(void)
67 add_memory_region(0, 64 << 20, BOOT_MEM_RAM); 67 add_memory_region(0, 64 << 20, BOOT_MEM_RAM);
68} 68}
69 69
70unsigned long __init prom_free_prom_memory(void) 70void __init prom_free_prom_memory(void)
71{ 71{
72 return 0;
73} 72}
diff --git a/arch/mips/gt64120/wrppmc/irq.c b/arch/mips/gt64120/wrppmc/irq.c
index eedfc24e1eae..d3d96591780e 100644
--- a/arch/mips/gt64120/wrppmc/irq.c
+++ b/arch/mips/gt64120/wrppmc/irq.c
@@ -63,7 +63,7 @@ void gt64120_init_pic(void)
63void __init arch_init_irq(void) 63void __init arch_init_irq(void)
64{ 64{
65 /* IRQ 0 - 7 are for MIPS common irq_cpu controller */ 65 /* IRQ 0 - 7 are for MIPS common irq_cpu controller */
66 mips_cpu_irq_init(0); 66 mips_cpu_irq_init();
67 67
68 gt64120_init_pic(); 68 gt64120_init_pic();
69} 69}
diff --git a/arch/mips/gt64120/wrppmc/setup.c b/arch/mips/gt64120/wrppmc/setup.c
index 429afc400cb4..121188d5ec4a 100644
--- a/arch/mips/gt64120/wrppmc/setup.c
+++ b/arch/mips/gt64120/wrppmc/setup.c
@@ -93,9 +93,8 @@ void __init wrppmc_early_printk(const char *fmt, ...)
93} 93}
94#endif /* WRPPMC_EARLY_DEBUG */ 94#endif /* WRPPMC_EARLY_DEBUG */
95 95
96unsigned long __init prom_free_prom_memory(void) 96void __init prom_free_prom_memory(void)
97{ 97{
98 return 0;
99} 98}
100 99
101#ifdef CONFIG_SERIAL_8250 100#ifdef CONFIG_SERIAL_8250
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c
index f8d417b5c2bb..295892e4ce53 100644
--- a/arch/mips/jazz/irq.c
+++ b/arch/mips/jazz/irq.c
@@ -40,7 +40,7 @@ void disable_r4030_irq(unsigned int irq)
40} 40}
41 41
42static struct irq_chip r4030_irq_type = { 42static struct irq_chip r4030_irq_type = {
43 .typename = "R4030", 43 .name = "R4030",
44 .ack = disable_r4030_irq, 44 .ack = disable_r4030_irq,
45 .mask = disable_r4030_irq, 45 .mask = disable_r4030_irq,
46 .mask_ack = disable_r4030_irq, 46 .mask_ack = disable_r4030_irq,
diff --git a/arch/mips/jmr3927/common/prom.c b/arch/mips/jmr3927/common/prom.c
index 5d5838f41d23..aa481b774c42 100644
--- a/arch/mips/jmr3927/common/prom.c
+++ b/arch/mips/jmr3927/common/prom.c
@@ -75,7 +75,6 @@ void __init prom_init_cmdline(void)
75 *cp = '\0'; 75 *cp = '\0';
76} 76}
77 77
78unsigned long __init prom_free_prom_memory(void) 78void __init prom_free_prom_memory(void)
79{ 79{
80 return 0;
81} 80}
diff --git a/arch/mips/jmr3927/rbhma3100/irq.c b/arch/mips/jmr3927/rbhma3100/irq.c
index 3da49c5aaf49..7d2c203cb406 100644
--- a/arch/mips/jmr3927/rbhma3100/irq.c
+++ b/arch/mips/jmr3927/rbhma3100/irq.c
@@ -439,7 +439,7 @@ void __init arch_init_irq(void)
439} 439}
440 440
441static struct irq_chip jmr3927_irq_controller = { 441static struct irq_chip jmr3927_irq_controller = {
442 .typename = "jmr3927_irq", 442 .name = "jmr3927_irq",
443 .ack = jmr3927_irq_ack, 443 .ack = jmr3927_irq_ack,
444 .mask = jmr3927_irq_disable, 444 .mask = jmr3927_irq_disable,
445 .mask_ack = jmr3927_irq_ack, 445 .mask_ack = jmr3927_irq_ack,
diff --git a/arch/mips/jmr3927/rbhma3100/setup.c b/arch/mips/jmr3927/rbhma3100/setup.c
index 138f25efe38a..7ca3d6d07b34 100644
--- a/arch/mips/jmr3927/rbhma3100/setup.c
+++ b/arch/mips/jmr3927/rbhma3100/setup.c
@@ -434,7 +434,7 @@ void __init tx3927_setup(void)
434 434
435 /* DMA */ 435 /* DMA */
436 tx3927_dmaptr->mcr = 0; 436 tx3927_dmaptr->mcr = 0;
437 for (i = 0; i < sizeof(tx3927_dmaptr->ch) / sizeof(tx3927_dmaptr->ch[0]); i++) { 437 for (i = 0; i < ARRAY_SIZE(tx3927_dmaptr->ch); i++) {
438 /* reset channel */ 438 /* reset channel */
439 tx3927_dmaptr->ch[i].ccr = TX3927_DMA_CCR_CHRST; 439 tx3927_dmaptr->ch[i].ccr = TX3927_DMA_CCR_CHRST;
440 tx3927_dmaptr->ch[i].ccr = 0; 440 tx3927_dmaptr->ch[i].ccr = 0;
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index ff88b06f89df..ea7df4b8da33 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -234,10 +234,6 @@ void output_mm_defines(void)
234 constant("#define _PMD_SHIFT ", PMD_SHIFT); 234 constant("#define _PMD_SHIFT ", PMD_SHIFT);
235 constant("#define _PGDIR_SHIFT ", PGDIR_SHIFT); 235 constant("#define _PGDIR_SHIFT ", PGDIR_SHIFT);
236 linefeed; 236 linefeed;
237 constant("#define _PGD_ORDER ", PGD_ORDER);
238 constant("#define _PMD_ORDER ", PMD_ORDER);
239 constant("#define _PTE_ORDER ", PTE_ORDER);
240 linefeed;
241 constant("#define _PTRS_PER_PGD ", PTRS_PER_PGD); 237 constant("#define _PTRS_PER_PGD ", PTRS_PER_PGD);
242 constant("#define _PTRS_PER_PMD ", PTRS_PER_PMD); 238 constant("#define _PTRS_PER_PMD ", PTRS_PER_PMD);
243 constant("#define _PTRS_PER_PTE ", PTRS_PER_PTE); 239 constant("#define _PTRS_PER_PTE ", PTRS_PER_PTE);
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 442839e9578c..f59ef271d247 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -565,7 +565,7 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
565 if (config3 & MIPS_CONF3_VEIC) 565 if (config3 & MIPS_CONF3_VEIC)
566 c->options |= MIPS_CPU_VEIC; 566 c->options |= MIPS_CPU_VEIC;
567 if (config3 & MIPS_CONF3_MT) 567 if (config3 & MIPS_CONF3_MT)
568 c->ases |= MIPS_ASE_MIPSMT; 568 c->ases |= MIPS_ASE_MIPSMT;
569 569
570 return config3 & MIPS_CONF_M; 570 return config3 & MIPS_CONF_M;
571} 571}
diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c
index 719d26968cb2..7bc882049269 100644
--- a/arch/mips/kernel/gdb-stub.c
+++ b/arch/mips/kernel/gdb-stub.c
@@ -505,13 +505,13 @@ void show_gdbregs(struct gdb_regs * regs)
505 */ 505 */
506 printk("$0 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 506 printk("$0 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
507 regs->reg0, regs->reg1, regs->reg2, regs->reg3, 507 regs->reg0, regs->reg1, regs->reg2, regs->reg3,
508 regs->reg4, regs->reg5, regs->reg6, regs->reg7); 508 regs->reg4, regs->reg5, regs->reg6, regs->reg7);
509 printk("$8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 509 printk("$8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
510 regs->reg8, regs->reg9, regs->reg10, regs->reg11, 510 regs->reg8, regs->reg9, regs->reg10, regs->reg11,
511 regs->reg12, regs->reg13, regs->reg14, regs->reg15); 511 regs->reg12, regs->reg13, regs->reg14, regs->reg15);
512 printk("$16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 512 printk("$16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
513 regs->reg16, regs->reg17, regs->reg18, regs->reg19, 513 regs->reg16, regs->reg17, regs->reg18, regs->reg19,
514 regs->reg20, regs->reg21, regs->reg22, regs->reg23); 514 regs->reg20, regs->reg21, regs->reg22, regs->reg23);
515 printk("$24: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 515 printk("$24: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
516 regs->reg24, regs->reg25, regs->reg26, regs->reg27, 516 regs->reg24, regs->reg25, regs->reg26, regs->reg27,
517 regs->reg28, regs->reg29, regs->reg30, regs->reg31); 517 regs->reg28, regs->reg29, regs->reg30, regs->reg31);
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 9a7811d13db2..6f57ca44291f 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -231,28 +231,3 @@ NESTED(smp_bootstrap, 16, sp)
231#endif /* CONFIG_SMP */ 231#endif /* CONFIG_SMP */
232 232
233 __FINIT 233 __FINIT
234
235 .comm kernelsp, NR_CPUS * 8, 8
236 .comm pgd_current, NR_CPUS * 8, 8
237
238 .comm fw_arg0, SZREG, SZREG # firmware arguments
239 .comm fw_arg1, SZREG, SZREG
240 .comm fw_arg2, SZREG, SZREG
241 .comm fw_arg3, SZREG, SZREG
242
243 .macro page name, order
244 .comm \name, (_PAGE_SIZE << \order), (_PAGE_SIZE << \order)
245 .endm
246
247 /*
248 * On 64-bit we've got three-level pagetables with a slightly
249 * different layout ...
250 */
251 page swapper_pg_dir, _PGD_ORDER
252#ifdef CONFIG_64BIT
253#if defined(CONFIG_MODULES) && !defined(CONFIG_BUILD_ELF64)
254 page module_pg_dir, _PGD_ORDER
255#endif
256 page invalid_pmd_table, _PMD_ORDER
257#endif
258 page invalid_pte_table, _PTE_ORDER
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index b59a676c6d0e..b33ba6cd7f5b 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -54,9 +54,11 @@ static unsigned int cached_irq_mask = 0xffff;
54 54
55void disable_8259A_irq(unsigned int irq) 55void disable_8259A_irq(unsigned int irq)
56{ 56{
57 unsigned int mask = 1 << irq; 57 unsigned int mask;
58 unsigned long flags; 58 unsigned long flags;
59 59
60 irq -= I8259A_IRQ_BASE;
61 mask = 1 << irq;
60 spin_lock_irqsave(&i8259A_lock, flags); 62 spin_lock_irqsave(&i8259A_lock, flags);
61 cached_irq_mask |= mask; 63 cached_irq_mask |= mask;
62 if (irq & 8) 64 if (irq & 8)
@@ -68,9 +70,11 @@ void disable_8259A_irq(unsigned int irq)
68 70
69void enable_8259A_irq(unsigned int irq) 71void enable_8259A_irq(unsigned int irq)
70{ 72{
71 unsigned int mask = ~(1 << irq); 73 unsigned int mask;
72 unsigned long flags; 74 unsigned long flags;
73 75
76 irq -= I8259A_IRQ_BASE;
77 mask = ~(1 << irq);
74 spin_lock_irqsave(&i8259A_lock, flags); 78 spin_lock_irqsave(&i8259A_lock, flags);
75 cached_irq_mask &= mask; 79 cached_irq_mask &= mask;
76 if (irq & 8) 80 if (irq & 8)
@@ -82,10 +86,12 @@ void enable_8259A_irq(unsigned int irq)
82 86
83int i8259A_irq_pending(unsigned int irq) 87int i8259A_irq_pending(unsigned int irq)
84{ 88{
85 unsigned int mask = 1 << irq; 89 unsigned int mask;
86 unsigned long flags; 90 unsigned long flags;
87 int ret; 91 int ret;
88 92
93 irq -= I8259A_IRQ_BASE;
94 mask = 1 << irq;
89 spin_lock_irqsave(&i8259A_lock, flags); 95 spin_lock_irqsave(&i8259A_lock, flags);
90 if (irq < 8) 96 if (irq < 8)
91 ret = inb(PIC_MASTER_CMD) & mask; 97 ret = inb(PIC_MASTER_CMD) & mask;
@@ -134,9 +140,11 @@ static inline int i8259A_irq_real(unsigned int irq)
134 */ 140 */
135void mask_and_ack_8259A(unsigned int irq) 141void mask_and_ack_8259A(unsigned int irq)
136{ 142{
137 unsigned int irqmask = 1 << irq; 143 unsigned int irqmask;
138 unsigned long flags; 144 unsigned long flags;
139 145
146 irq -= I8259A_IRQ_BASE;
147 irqmask = 1 << irq;
140 spin_lock_irqsave(&i8259A_lock, flags); 148 spin_lock_irqsave(&i8259A_lock, flags);
141 /* 149 /*
142 * Lightweight spurious IRQ detection. We do not want 150 * Lightweight spurious IRQ detection. We do not want
@@ -169,8 +177,8 @@ handle_real_irq:
169 outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */ 177 outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */
170 } 178 }
171#ifdef CONFIG_MIPS_MT_SMTC 179#ifdef CONFIG_MIPS_MT_SMTC
172 if (irq_hwmask[irq] & ST0_IM) 180 if (irq_hwmask[irq] & ST0_IM)
173 set_c0_status(irq_hwmask[irq] & ST0_IM); 181 set_c0_status(irq_hwmask[irq] & ST0_IM);
174#endif /* CONFIG_MIPS_MT_SMTC */ 182#endif /* CONFIG_MIPS_MT_SMTC */
175 spin_unlock_irqrestore(&i8259A_lock, flags); 183 spin_unlock_irqrestore(&i8259A_lock, flags);
176 return; 184 return;
@@ -322,8 +330,8 @@ void __init init_i8259_irqs (void)
322 330
323 init_8259A(0); 331 init_8259A(0);
324 332
325 for (i = 0; i < 16; i++) 333 for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++)
326 set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq); 334 set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq);
327 335
328 setup_irq(PIC_CASCADE_IR, &irq2); 336 setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
329} 337}
diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c
index 37cad5de515c..3cc25c05d367 100644
--- a/arch/mips/kernel/irixelf.c
+++ b/arch/mips/kernel/irixelf.c
@@ -10,6 +10,8 @@
10 * Copyright (C) 1996 - 2004 David S. Miller <dm@engr.sgi.com> 10 * Copyright (C) 1996 - 2004 David S. Miller <dm@engr.sgi.com>
11 * Copyright (C) 2004 - 2005 Steven J. Hill <sjhill@realitydiluted.com> 11 * Copyright (C) 2004 - 2005 Steven J. Hill <sjhill@realitydiluted.com>
12 */ 12 */
13#undef DEBUG
14
13#include <linux/module.h> 15#include <linux/module.h>
14#include <linux/fs.h> 16#include <linux/fs.h>
15#include <linux/stat.h> 17#include <linux/stat.h>
@@ -40,8 +42,6 @@
40 42
41#include <linux/elf.h> 43#include <linux/elf.h>
42 44
43#undef DEBUG
44
45static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs); 45static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs);
46static int load_irix_library(struct file *); 46static int load_irix_library(struct file *);
47static int irix_core_dump(long signr, struct pt_regs * regs, 47static int irix_core_dump(long signr, struct pt_regs * regs,
@@ -52,72 +52,102 @@ static struct linux_binfmt irix_format = {
52 irix_core_dump, PAGE_SIZE 52 irix_core_dump, PAGE_SIZE
53}; 53};
54 54
55#ifdef DEBUG
56/* Debugging routines. */ 55/* Debugging routines. */
57static char *get_elf_p_type(Elf32_Word p_type) 56static char *get_elf_p_type(Elf32_Word p_type)
58{ 57{
59 int i = (int) p_type; 58#ifdef DEBUG
60 59 switch (p_type) {
61 switch(i) { 60 case PT_NULL:
62 case PT_NULL: return("PT_NULL"); break; 61 return "PT_NULL";
63 case PT_LOAD: return("PT_LOAD"); break; 62 break;
64 case PT_DYNAMIC: return("PT_DYNAMIC"); break; 63
65 case PT_INTERP: return("PT_INTERP"); break; 64 case PT_LOAD:
66 case PT_NOTE: return("PT_NOTE"); break; 65 return "PT_LOAD";
67 case PT_SHLIB: return("PT_SHLIB"); break; 66 break;
68 case PT_PHDR: return("PT_PHDR"); break; 67
69 case PT_LOPROC: return("PT_LOPROC/REGINFO"); break; 68 case PT_DYNAMIC:
70 case PT_HIPROC: return("PT_HIPROC"); break; 69 return "PT_DYNAMIC";
71 default: return("PT_BOGUS"); break; 70 break;
71
72 case PT_INTERP:
73 return "PT_INTERP";
74 break;
75
76 case PT_NOTE:
77 return "PT_NOTE";
78 break;
79
80 case PT_SHLIB:
81 return "PT_SHLIB";
82 break;
83
84 case PT_PHDR:
85 return "PT_PHDR";
86 break;
87
88 case PT_LOPROC:
89 return "PT_LOPROC/REGINFO";
90 break;
91
92 case PT_HIPROC:
93 return "PT_HIPROC";
94 break;
95
96 default:
97 return "PT_BOGUS";
98 break;
72 } 99 }
100#endif
73} 101}
74 102
75static void print_elfhdr(struct elfhdr *ehp) 103static void print_elfhdr(struct elfhdr *ehp)
76{ 104{
77 int i; 105 int i;
78 106
79 printk("ELFHDR: e_ident<"); 107 pr_debug("ELFHDR: e_ident<");
80 for(i = 0; i < (EI_NIDENT - 1); i++) printk("%x ", ehp->e_ident[i]); 108 for (i = 0; i < (EI_NIDENT - 1); i++)
81 printk("%x>\n", ehp->e_ident[i]); 109 pr_debug("%x ", ehp->e_ident[i]);
82 printk(" e_type[%04x] e_machine[%04x] e_version[%08lx]\n", 110 pr_debug("%x>\n", ehp->e_ident[i]);
83 (unsigned short) ehp->e_type, (unsigned short) ehp->e_machine, 111 pr_debug(" e_type[%04x] e_machine[%04x] e_version[%08lx]\n",
84 (unsigned long) ehp->e_version); 112 (unsigned short) ehp->e_type, (unsigned short) ehp->e_machine,
85 printk(" e_entry[%08lx] e_phoff[%08lx] e_shoff[%08lx] " 113 (unsigned long) ehp->e_version);
86 "e_flags[%08lx]\n", 114 pr_debug(" e_entry[%08lx] e_phoff[%08lx] e_shoff[%08lx] "
87 (unsigned long) ehp->e_entry, (unsigned long) ehp->e_phoff, 115 "e_flags[%08lx]\n",
88 (unsigned long) ehp->e_shoff, (unsigned long) ehp->e_flags); 116 (unsigned long) ehp->e_entry, (unsigned long) ehp->e_phoff,
89 printk(" e_ehsize[%04x] e_phentsize[%04x] e_phnum[%04x]\n", 117 (unsigned long) ehp->e_shoff, (unsigned long) ehp->e_flags);
90 (unsigned short) ehp->e_ehsize, (unsigned short) ehp->e_phentsize, 118 pr_debug(" e_ehsize[%04x] e_phentsize[%04x] e_phnum[%04x]\n",
91 (unsigned short) ehp->e_phnum); 119 (unsigned short) ehp->e_ehsize,
92 printk(" e_shentsize[%04x] e_shnum[%04x] e_shstrndx[%04x]\n", 120 (unsigned short) ehp->e_phentsize,
93 (unsigned short) ehp->e_shentsize, (unsigned short) ehp->e_shnum, 121 (unsigned short) ehp->e_phnum);
94 (unsigned short) ehp->e_shstrndx); 122 pr_debug(" e_shentsize[%04x] e_shnum[%04x] e_shstrndx[%04x]\n",
123 (unsigned short) ehp->e_shentsize,
124 (unsigned short) ehp->e_shnum,
125 (unsigned short) ehp->e_shstrndx);
95} 126}
96 127
97static void print_phdr(int i, struct elf_phdr *ep) 128static void print_phdr(int i, struct elf_phdr *ep)
98{ 129{
99 printk("PHDR[%d]: p_type[%s] p_offset[%08lx] p_vaddr[%08lx] " 130 pr_debug("PHDR[%d]: p_type[%s] p_offset[%08lx] p_vaddr[%08lx] "
100 "p_paddr[%08lx]\n", i, get_elf_p_type(ep->p_type), 131 "p_paddr[%08lx]\n", i, get_elf_p_type(ep->p_type),
101 (unsigned long) ep->p_offset, (unsigned long) ep->p_vaddr, 132 (unsigned long) ep->p_offset, (unsigned long) ep->p_vaddr,
102 (unsigned long) ep->p_paddr); 133 (unsigned long) ep->p_paddr);
103 printk(" p_filesz[%08lx] p_memsz[%08lx] p_flags[%08lx] " 134 pr_debug(" p_filesz[%08lx] p_memsz[%08lx] p_flags[%08lx] "
104 "p_align[%08lx]\n", (unsigned long) ep->p_filesz, 135 "p_align[%08lx]\n", (unsigned long) ep->p_filesz,
105 (unsigned long) ep->p_memsz, (unsigned long) ep->p_flags, 136 (unsigned long) ep->p_memsz, (unsigned long) ep->p_flags,
106 (unsigned long) ep->p_align); 137 (unsigned long) ep->p_align);
107} 138}
108 139
109static void dump_phdrs(struct elf_phdr *ep, int pnum) 140static void dump_phdrs(struct elf_phdr *ep, int pnum)
110{ 141{
111 int i; 142 int i;
112 143
113 for(i = 0; i < pnum; i++, ep++) { 144 for (i = 0; i < pnum; i++, ep++) {
114 if((ep->p_type == PT_LOAD) || 145 if ((ep->p_type == PT_LOAD) ||
115 (ep->p_type == PT_INTERP) || 146 (ep->p_type == PT_INTERP) ||
116 (ep->p_type == PT_PHDR)) 147 (ep->p_type == PT_PHDR))
117 print_phdr(i, ep); 148 print_phdr(i, ep);
118 } 149 }
119} 150}
120#endif /* DEBUG */
121 151
122static void set_brk(unsigned long start, unsigned long end) 152static void set_brk(unsigned long start, unsigned long end)
123{ 153{
@@ -156,11 +186,10 @@ static unsigned long * create_irix_tables(char * p, int argc, int envc,
156 elf_addr_t *envp; 186 elf_addr_t *envp;
157 elf_addr_t *sp, *csp; 187 elf_addr_t *sp, *csp;
158 188
159#ifdef DEBUG 189 pr_debug("create_irix_tables: p[%p] argc[%d] envc[%d] "
160 printk("create_irix_tables: p[%p] argc[%d] envc[%d] " 190 "load_addr[%08x] interp_load_addr[%08x]\n",
161 "load_addr[%08x] interp_load_addr[%08x]\n", 191 p, argc, envc, load_addr, interp_load_addr);
162 p, argc, envc, load_addr, interp_load_addr); 192
163#endif
164 sp = (elf_addr_t *) (~15UL & (unsigned long) p); 193 sp = (elf_addr_t *) (~15UL & (unsigned long) p);
165 csp = sp; 194 csp = sp;
166 csp -= exec ? DLINFO_ITEMS*2 : 2; 195 csp -= exec ? DLINFO_ITEMS*2 : 2;
@@ -181,7 +210,7 @@ static unsigned long * create_irix_tables(char * p, int argc, int envc,
181 sp -= 2; 210 sp -= 2;
182 NEW_AUX_ENT(0, AT_NULL, 0); 211 NEW_AUX_ENT(0, AT_NULL, 0);
183 212
184 if(exec) { 213 if (exec) {
185 sp -= 11*2; 214 sp -= 11*2;
186 215
187 NEW_AUX_ENT (0, AT_PHDR, load_addr + exec->e_phoff); 216 NEW_AUX_ENT (0, AT_PHDR, load_addr + exec->e_phoff);
@@ -245,9 +274,7 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
245 last_bss = 0; 274 last_bss = 0;
246 error = load_addr = 0; 275 error = load_addr = 0;
247 276
248#ifdef DEBUG
249 print_elfhdr(interp_elf_ex); 277 print_elfhdr(interp_elf_ex);
250#endif
251 278
252 /* First of all, some simple consistency checks */ 279 /* First of all, some simple consistency checks */
253 if ((interp_elf_ex->e_type != ET_EXEC && 280 if ((interp_elf_ex->e_type != ET_EXEC &&
@@ -258,7 +285,7 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
258 } 285 }
259 286
260 /* Now read in all of the header information */ 287 /* Now read in all of the header information */
261 if(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > PAGE_SIZE) { 288 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > PAGE_SIZE) {
262 printk("IRIX interp header bigger than a page (%d)\n", 289 printk("IRIX interp header bigger than a page (%d)\n",
263 (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum)); 290 (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum));
264 return 0xffffffff; 291 return 0xffffffff;
@@ -267,15 +294,15 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
267 elf_phdata = kmalloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum, 294 elf_phdata = kmalloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum,
268 GFP_KERNEL); 295 GFP_KERNEL);
269 296
270 if(!elf_phdata) { 297 if (!elf_phdata) {
271 printk("Cannot kmalloc phdata for IRIX interp.\n"); 298 printk("Cannot kmalloc phdata for IRIX interp.\n");
272 return 0xffffffff; 299 return 0xffffffff;
273 } 300 }
274 301
275 /* If the size of this structure has changed, then punt, since 302 /* If the size of this structure has changed, then punt, since
276 * we will be doing the wrong thing. 303 * we will be doing the wrong thing.
277 */ 304 */
278 if(interp_elf_ex->e_phentsize != 32) { 305 if (interp_elf_ex->e_phentsize != 32) {
279 printk("IRIX interp e_phentsize == %d != 32 ", 306 printk("IRIX interp e_phentsize == %d != 32 ",
280 interp_elf_ex->e_phentsize); 307 interp_elf_ex->e_phentsize);
281 kfree(elf_phdata); 308 kfree(elf_phdata);
@@ -286,61 +313,71 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
286 (char *) elf_phdata, 313 (char *) elf_phdata,
287 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum); 314 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
288 315
289#ifdef DEBUG
290 dump_phdrs(elf_phdata, interp_elf_ex->e_phnum); 316 dump_phdrs(elf_phdata, interp_elf_ex->e_phnum);
291#endif
292 317
293 eppnt = elf_phdata; 318 eppnt = elf_phdata;
294 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) { 319 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
295 if(eppnt->p_type == PT_LOAD) { 320 if (eppnt->p_type == PT_LOAD) {
296 int elf_type = MAP_PRIVATE | MAP_DENYWRITE; 321 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
297 int elf_prot = 0; 322 int elf_prot = 0;
298 unsigned long vaddr = 0; 323 unsigned long vaddr = 0;
299 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ; 324 if (eppnt->p_flags & PF_R)
300 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; 325 elf_prot = PROT_READ;
301 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; 326 if (eppnt->p_flags & PF_W)
302 elf_type |= MAP_FIXED; 327 elf_prot |= PROT_WRITE;
303 vaddr = eppnt->p_vaddr; 328 if (eppnt->p_flags & PF_X)
304 329 elf_prot |= PROT_EXEC;
305 pr_debug("INTERP do_mmap(%p, %08lx, %08lx, %08lx, %08lx, %08lx) ", 330 elf_type |= MAP_FIXED;
306 interpreter, vaddr, 331 vaddr = eppnt->p_vaddr;
307 (unsigned long) (eppnt->p_filesz + (eppnt->p_vaddr & 0xfff)), 332
308 (unsigned long) elf_prot, (unsigned long) elf_type, 333 pr_debug("INTERP do_mmap"
309 (unsigned long) (eppnt->p_offset & 0xfffff000)); 334 "(%p, %08lx, %08lx, %08lx, %08lx, %08lx) ",
310 down_write(&current->mm->mmap_sem); 335 interpreter, vaddr,
311 error = do_mmap(interpreter, vaddr, 336 (unsigned long)
312 eppnt->p_filesz + (eppnt->p_vaddr & 0xfff), 337 (eppnt->p_filesz + (eppnt->p_vaddr & 0xfff)),
313 elf_prot, elf_type, 338 (unsigned long)
314 eppnt->p_offset & 0xfffff000); 339 elf_prot, (unsigned long) elf_type,
315 up_write(&current->mm->mmap_sem); 340 (unsigned long)
316 341 (eppnt->p_offset & 0xfffff000));
317 if(error < 0 && error > -1024) { 342
318 printk("Aieee IRIX interp mmap error=%d\n", error); 343 down_write(&current->mm->mmap_sem);
319 break; /* Real error */ 344 error = do_mmap(interpreter, vaddr,
320 } 345 eppnt->p_filesz + (eppnt->p_vaddr & 0xfff),
321 pr_debug("error=%08lx ", (unsigned long) error); 346 elf_prot, elf_type,
322 if(!load_addr && interp_elf_ex->e_type == ET_DYN) { 347 eppnt->p_offset & 0xfffff000);
323 load_addr = error; 348 up_write(&current->mm->mmap_sem);
324 pr_debug("load_addr = error "); 349
325 } 350 if (error < 0 && error > -1024) {
326 351 printk("Aieee IRIX interp mmap error=%d\n",
327 /* Find the end of the file mapping for this phdr, and keep 352 error);
328 * track of the largest address we see for this. 353 break; /* Real error */
329 */ 354 }
330 k = eppnt->p_vaddr + eppnt->p_filesz; 355 pr_debug("error=%08lx ", (unsigned long) error);
331 if(k > elf_bss) elf_bss = k; 356 if (!load_addr && interp_elf_ex->e_type == ET_DYN) {
332 357 load_addr = error;
333 /* Do the same thing for the memory mapping - between 358 pr_debug("load_addr = error ");
334 * elf_bss and last_bss is the bss section. 359 }
335 */ 360
336 k = eppnt->p_memsz + eppnt->p_vaddr; 361 /*
337 if(k > last_bss) last_bss = k; 362 * Find the end of the file mapping for this phdr, and
338 pr_debug("\n"); 363 * keep track of the largest address we see for this.
339 } 364 */
365 k = eppnt->p_vaddr + eppnt->p_filesz;
366 if (k > elf_bss)
367 elf_bss = k;
368
369 /* Do the same thing for the memory mapping - between
370 * elf_bss and last_bss is the bss section.
371 */
372 k = eppnt->p_memsz + eppnt->p_vaddr;
373 if (k > last_bss)
374 last_bss = k;
375 pr_debug("\n");
376 }
340 } 377 }
341 378
342 /* Now use mmap to map the library into memory. */ 379 /* Now use mmap to map the library into memory. */
343 if(error < 0 && error > -1024) { 380 if (error < 0 && error > -1024) {
344 pr_debug("got error %d\n", error); 381 pr_debug("got error %d\n", error);
345 kfree(elf_phdata); 382 kfree(elf_phdata);
346 return 0xffffffff; 383 return 0xffffffff;
@@ -377,7 +414,7 @@ static int verify_binary(struct elfhdr *ehp, struct linux_binprm *bprm)
377 return -ENOEXEC; 414 return -ENOEXEC;
378 415
379 /* First of all, some simple consistency checks */ 416 /* First of all, some simple consistency checks */
380 if((ehp->e_type != ET_EXEC && ehp->e_type != ET_DYN) || 417 if ((ehp->e_type != ET_EXEC && ehp->e_type != ET_DYN) ||
381 !bprm->file->f_op->mmap) { 418 !bprm->file->f_op->mmap) {
382 return -ENOEXEC; 419 return -ENOEXEC;
383 } 420 }
@@ -388,7 +425,7 @@ static int verify_binary(struct elfhdr *ehp, struct linux_binprm *bprm)
388 * XXX all registers as 64bits on cpu's capable of this at 425 * XXX all registers as 64bits on cpu's capable of this at
389 * XXX exception time plus frob the XTLB exception vector. 426 * XXX exception time plus frob the XTLB exception vector.
390 */ 427 */
391 if((ehp->e_flags & EF_MIPS_ABI2)) 428 if ((ehp->e_flags & EF_MIPS_ABI2))
392 return -ENOEXEC; 429 return -ENOEXEC;
393 430
394 return 0; 431 return 0;
@@ -410,7 +447,7 @@ static inline int look_for_irix_interpreter(char **name,
410 struct file *file = NULL; 447 struct file *file = NULL;
411 448
412 *name = NULL; 449 *name = NULL;
413 for(i = 0; i < pnum; i++, epp++) { 450 for (i = 0; i < pnum; i++, epp++) {
414 if (epp->p_type != PT_INTERP) 451 if (epp->p_type != PT_INTERP)
415 continue; 452 continue;
416 453
@@ -467,8 +504,8 @@ static inline void map_executable(struct file *fp, struct elf_phdr *epp, int pnu
467 unsigned int tmp; 504 unsigned int tmp;
468 int i, prot; 505 int i, prot;
469 506
470 for(i = 0; i < pnum; i++, epp++) { 507 for (i = 0; i < pnum; i++, epp++) {
471 if(epp->p_type != PT_LOAD) 508 if (epp->p_type != PT_LOAD)
472 continue; 509 continue;
473 510
474 /* Map it. */ 511 /* Map it. */
@@ -483,23 +520,23 @@ static inline void map_executable(struct file *fp, struct elf_phdr *epp, int pnu
483 up_write(&current->mm->mmap_sem); 520 up_write(&current->mm->mmap_sem);
484 521
485 /* Fixup location tracking vars. */ 522 /* Fixup location tracking vars. */
486 if((epp->p_vaddr & 0xfffff000) < *estack) 523 if ((epp->p_vaddr & 0xfffff000) < *estack)
487 *estack = (epp->p_vaddr & 0xfffff000); 524 *estack = (epp->p_vaddr & 0xfffff000);
488 if(!*laddr) 525 if (!*laddr)
489 *laddr = epp->p_vaddr - epp->p_offset; 526 *laddr = epp->p_vaddr - epp->p_offset;
490 if(epp->p_vaddr < *scode) 527 if (epp->p_vaddr < *scode)
491 *scode = epp->p_vaddr; 528 *scode = epp->p_vaddr;
492 529
493 tmp = epp->p_vaddr + epp->p_filesz; 530 tmp = epp->p_vaddr + epp->p_filesz;
494 if(tmp > *ebss) 531 if (tmp > *ebss)
495 *ebss = tmp; 532 *ebss = tmp;
496 if((epp->p_flags & PF_X) && *ecode < tmp) 533 if ((epp->p_flags & PF_X) && *ecode < tmp)
497 *ecode = tmp; 534 *ecode = tmp;
498 if(*edata < tmp) 535 if (*edata < tmp)
499 *edata = tmp; 536 *edata = tmp;
500 537
501 tmp = epp->p_vaddr + epp->p_memsz; 538 tmp = epp->p_vaddr + epp->p_memsz;
502 if(tmp > *ebrk) 539 if (tmp > *ebrk)
503 *ebrk = tmp; 540 *ebrk = tmp;
504 } 541 }
505 542
@@ -513,12 +550,12 @@ static inline int map_interpreter(struct elf_phdr *epp, struct elfhdr *ihp,
513 int i; 550 int i;
514 551
515 *eentry = 0xffffffff; 552 *eentry = 0xffffffff;
516 for(i = 0; i < pnum; i++, epp++) { 553 for (i = 0; i < pnum; i++, epp++) {
517 if(epp->p_type != PT_INTERP) 554 if (epp->p_type != PT_INTERP)
518 continue; 555 continue;
519 556
520 /* We should have fielded this error elsewhere... */ 557 /* We should have fielded this error elsewhere... */
521 if(*eentry != 0xffffffff) 558 if (*eentry != 0xffffffff)
522 return -1; 559 return -1;
523 560
524 set_fs(old_fs); 561 set_fs(old_fs);
@@ -604,9 +641,7 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
604 if (elf_ex.e_shnum > 20) 641 if (elf_ex.e_shnum > 20)
605 goto out; 642 goto out;
606 643
607#ifdef DEBUG
608 print_elfhdr(&elf_ex); 644 print_elfhdr(&elf_ex);
609#endif
610 645
611 /* Now read in all of the header information */ 646 /* Now read in all of the header information */
612 size = elf_ex.e_phentsize * elf_ex.e_phnum; 647 size = elf_ex.e_phentsize * elf_ex.e_phnum;
@@ -622,13 +657,11 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
622 if (retval < 0) 657 if (retval < 0)
623 goto out_free_ph; 658 goto out_free_ph;
624 659
625#ifdef DEBUG
626 dump_phdrs(elf_phdata, elf_ex.e_phnum); 660 dump_phdrs(elf_phdata, elf_ex.e_phnum);
627#endif
628 661
629 /* Set some things for later. */ 662 /* Set some things for later. */
630 for(i = 0; i < elf_ex.e_phnum; i++) { 663 for (i = 0; i < elf_ex.e_phnum; i++) {
631 switch(elf_phdata[i].p_type) { 664 switch (elf_phdata[i].p_type) {
632 case PT_INTERP: 665 case PT_INTERP:
633 has_interp = 1; 666 has_interp = 1;
634 elf_ihdr = &elf_phdata[i]; 667 elf_ihdr = &elf_phdata[i];
@@ -667,7 +700,7 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
667 700
668 if (elf_interpreter) { 701 if (elf_interpreter) {
669 retval = verify_irix_interpreter(&interp_elf_ex); 702 retval = verify_irix_interpreter(&interp_elf_ex);
670 if(retval) 703 if (retval)
671 goto out_free_interp; 704 goto out_free_interp;
672 } 705 }
673 706
@@ -706,12 +739,12 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
706 &load_addr, &start_code, &elf_bss, &end_code, 739 &load_addr, &start_code, &elf_bss, &end_code,
707 &end_data, &elf_brk); 740 &end_data, &elf_brk);
708 741
709 if(elf_interpreter) { 742 if (elf_interpreter) {
710 retval = map_interpreter(elf_phdata, &interp_elf_ex, 743 retval = map_interpreter(elf_phdata, &interp_elf_ex,
711 interpreter, &interp_load_addr, 744 interpreter, &interp_load_addr,
712 elf_ex.e_phnum, old_fs, &elf_entry); 745 elf_ex.e_phnum, old_fs, &elf_entry);
713 kfree(elf_interpreter); 746 kfree(elf_interpreter);
714 if(retval) { 747 if (retval) {
715 set_fs(old_fs); 748 set_fs(old_fs);
716 printk("Unable to load IRIX ELF interpreter\n"); 749 printk("Unable to load IRIX ELF interpreter\n");
717 send_sig(SIGSEGV, current, 0); 750 send_sig(SIGSEGV, current, 0);
@@ -809,12 +842,12 @@ static int load_irix_library(struct file *file)
809 return -ENOEXEC; 842 return -ENOEXEC;
810 843
811 /* First of all, some simple consistency checks. */ 844 /* First of all, some simple consistency checks. */
812 if(elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 || 845 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
813 !file->f_op->mmap) 846 !file->f_op->mmap)
814 return -ENOEXEC; 847 return -ENOEXEC;
815 848
816 /* Now read in all of the header information. */ 849 /* Now read in all of the header information. */
817 if(sizeof(struct elf_phdr) * elf_ex.e_phnum > PAGE_SIZE) 850 if (sizeof(struct elf_phdr) * elf_ex.e_phnum > PAGE_SIZE)
818 return -ENOEXEC; 851 return -ENOEXEC;
819 852
820 elf_phdata = kmalloc(sizeof(struct elf_phdr) * elf_ex.e_phnum, GFP_KERNEL); 853 elf_phdata = kmalloc(sizeof(struct elf_phdr) * elf_ex.e_phnum, GFP_KERNEL);
@@ -825,15 +858,15 @@ static int load_irix_library(struct file *file)
825 sizeof(struct elf_phdr) * elf_ex.e_phnum); 858 sizeof(struct elf_phdr) * elf_ex.e_phnum);
826 859
827 j = 0; 860 j = 0;
828 for(i=0; i<elf_ex.e_phnum; i++) 861 for (i=0; i<elf_ex.e_phnum; i++)
829 if((elf_phdata + i)->p_type == PT_LOAD) j++; 862 if ((elf_phdata + i)->p_type == PT_LOAD) j++;
830 863
831 if(j != 1) { 864 if (j != 1) {
832 kfree(elf_phdata); 865 kfree(elf_phdata);
833 return -ENOEXEC; 866 return -ENOEXEC;
834 } 867 }
835 868
836 while(elf_phdata->p_type != PT_LOAD) elf_phdata++; 869 while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
837 870
838 /* Now use mmap to map the library into memory. */ 871 /* Now use mmap to map the library into memory. */
839 down_write(&current->mm->mmap_sem); 872 down_write(&current->mm->mmap_sem);
@@ -889,9 +922,7 @@ unsigned long irix_mapelf(int fd, struct elf_phdr __user *user_phdrp, int cnt)
889 return -EFAULT; 922 return -EFAULT;
890 } 923 }
891 924
892#ifdef DEBUG
893 dump_phdrs(user_phdrp, cnt); 925 dump_phdrs(user_phdrp, cnt);
894#endif
895 926
896 for (i = 0; i < cnt; i++, hp++) { 927 for (i = 0; i < cnt; i++, hp++) {
897 if (__get_user(type, &hp->p_type)) 928 if (__get_user(type, &hp->p_type))
@@ -905,14 +936,14 @@ unsigned long irix_mapelf(int fd, struct elf_phdr __user *user_phdrp, int cnt)
905 filp = fget(fd); 936 filp = fget(fd);
906 if (!filp) 937 if (!filp)
907 return -EACCES; 938 return -EACCES;
908 if(!filp->f_op) { 939 if (!filp->f_op) {
909 printk("irix_mapelf: Bogon filp!\n"); 940 printk("irix_mapelf: Bogon filp!\n");
910 fput(filp); 941 fput(filp);
911 return -EACCES; 942 return -EACCES;
912 } 943 }
913 944
914 hp = user_phdrp; 945 hp = user_phdrp;
915 for(i = 0; i < cnt; i++, hp++) { 946 for (i = 0; i < cnt; i++, hp++) {
916 int prot; 947 int prot;
917 948
918 retval = __get_user(vaddr, &hp->p_vaddr); 949 retval = __get_user(vaddr, &hp->p_vaddr);
@@ -1015,8 +1046,6 @@ static int notesize(struct memelfnote *en)
1015 return sz; 1046 return sz;
1016} 1047}
1017 1048
1018/* #define DEBUG */
1019
1020#define DUMP_WRITE(addr, nr) \ 1049#define DUMP_WRITE(addr, nr) \
1021 if (!dump_write(file, (addr), (nr))) \ 1050 if (!dump_write(file, (addr), (nr))) \
1022 goto end_coredump; 1051 goto end_coredump;
@@ -1093,9 +1122,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1093 1122
1094 segs++; 1123 segs++;
1095 } 1124 }
1096#ifdef DEBUG 1125 pr_debug("irix_core_dump: %d segs taking %d bytes\n", segs, size);
1097 printk("irix_core_dump: %d segs taking %d bytes\n", segs, size);
1098#endif
1099 1126
1100 /* Set up header. */ 1127 /* Set up header. */
1101 memcpy(elf.e_ident, ELFMAG, SELFMAG); 1128 memcpy(elf.e_ident, ELFMAG, SELFMAG);
@@ -1221,7 +1248,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1221 struct elf_phdr phdr; 1248 struct elf_phdr phdr;
1222 int sz = 0; 1249 int sz = 0;
1223 1250
1224 for(i = 0; i < numnote; i++) 1251 for (i = 0; i < numnote; i++)
1225 sz += notesize(&notes[i]); 1252 sz += notesize(&notes[i]);
1226 1253
1227 phdr.p_type = PT_NOTE; 1254 phdr.p_type = PT_NOTE;
@@ -1241,7 +1268,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1241 dataoff = offset = roundup(offset, PAGE_SIZE); 1268 dataoff = offset = roundup(offset, PAGE_SIZE);
1242 1269
1243 /* Write program headers for segments dump. */ 1270 /* Write program headers for segments dump. */
1244 for(vma = current->mm->mmap, i = 0; 1271 for (vma = current->mm->mmap, i = 0;
1245 i < segs && vma != NULL; vma = vma->vm_next) { 1272 i < segs && vma != NULL; vma = vma->vm_next) {
1246 struct elf_phdr phdr; 1273 struct elf_phdr phdr;
1247 size_t sz; 1274 size_t sz;
@@ -1267,7 +1294,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1267 DUMP_WRITE(&phdr, sizeof(phdr)); 1294 DUMP_WRITE(&phdr, sizeof(phdr));
1268 } 1295 }
1269 1296
1270 for(i = 0; i < numnote; i++) 1297 for (i = 0; i < numnote; i++)
1271 if (!writenote(&notes[i], file)) 1298 if (!writenote(&notes[i], file))
1272 goto end_coredump; 1299 goto end_coredump;
1273 1300
@@ -1275,7 +1302,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1275 1302
1276 DUMP_SEEK(dataoff); 1303 DUMP_SEEK(dataoff);
1277 1304
1278 for(i = 0, vma = current->mm->mmap; 1305 for (i = 0, vma = current->mm->mmap;
1279 i < segs && vma != NULL; 1306 i < segs && vma != NULL;
1280 vma = vma->vm_next) { 1307 vma = vma->vm_next) {
1281 unsigned long addr = vma->vm_start; 1308 unsigned long addr = vma->vm_start;
@@ -1284,9 +1311,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1284 if (!maydump(vma)) 1311 if (!maydump(vma))
1285 continue; 1312 continue;
1286 i++; 1313 i++;
1287#ifdef DEBUG 1314 pr_debug("elf_core_dump: writing %08lx %lx\n", addr, len);
1288 printk("elf_core_dump: writing %08lx %lx\n", addr, len);
1289#endif
1290 DUMP_WRITE((void __user *)addr, len); 1315 DUMP_WRITE((void __user *)addr, len);
1291 } 1316 }
1292 1317
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index bcaad6696082..2967537221e2 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -112,7 +112,7 @@ msc_bind_eic_interrupt (unsigned int irq, unsigned int set)
112} 112}
113 113
114struct irq_chip msc_levelirq_type = { 114struct irq_chip msc_levelirq_type = {
115 .typename = "SOC-it-Level", 115 .name = "SOC-it-Level",
116 .ack = level_mask_and_ack_msc_irq, 116 .ack = level_mask_and_ack_msc_irq,
117 .mask = mask_msc_irq, 117 .mask = mask_msc_irq,
118 .mask_ack = level_mask_and_ack_msc_irq, 118 .mask_ack = level_mask_and_ack_msc_irq,
@@ -122,7 +122,7 @@ struct irq_chip msc_levelirq_type = {
122}; 122};
123 123
124struct irq_chip msc_edgeirq_type = { 124struct irq_chip msc_edgeirq_type = {
125 .typename = "SOC-it-Edge", 125 .name = "SOC-it-Edge",
126 .ack = edge_mask_and_ack_msc_irq, 126 .ack = edge_mask_and_ack_msc_irq,
127 .mask = mask_msc_irq, 127 .mask = mask_msc_irq,
128 .mask_ack = edge_mask_and_ack_msc_irq, 128 .mask_ack = edge_mask_and_ack_msc_irq,
diff --git a/arch/mips/kernel/irq-mv6434x.c b/arch/mips/kernel/irq-mv6434x.c
index efbd219845b5..3dd561832e4c 100644
--- a/arch/mips/kernel/irq-mv6434x.c
+++ b/arch/mips/kernel/irq-mv6434x.c
@@ -23,13 +23,13 @@ static unsigned int irq_base;
23 23
24static inline int ls1bit32(unsigned int x) 24static inline int ls1bit32(unsigned int x)
25{ 25{
26 int b = 31, s; 26 int b = 31, s;
27 27
28 s = 16; if (x << 16 == 0) s = 0; b -= s; x <<= s; 28 s = 16; if (x << 16 == 0) s = 0; b -= s; x <<= s;
29 s = 8; if (x << 8 == 0) s = 0; b -= s; x <<= s; 29 s = 8; if (x << 8 == 0) s = 0; b -= s; x <<= s;
30 s = 4; if (x << 4 == 0) s = 0; b -= s; x <<= s; 30 s = 4; if (x << 4 == 0) s = 0; b -= s; x <<= s;
31 s = 2; if (x << 2 == 0) s = 0; b -= s; x <<= s; 31 s = 2; if (x << 2 == 0) s = 0; b -= s; x <<= s;
32 s = 1; if (x << 1 == 0) s = 0; b -= s; 32 s = 1; if (x << 1 == 0) s = 0; b -= s;
33 33
34 return b; 34 return b;
35} 35}
@@ -92,7 +92,7 @@ void ll_mv64340_irq(void)
92} 92}
93 93
94struct irq_chip mv64340_irq_type = { 94struct irq_chip mv64340_irq_type = {
95 .typename = "MV-64340", 95 .name = "MV-64340",
96 .ack = mask_mv64340_irq, 96 .ack = mask_mv64340_irq,
97 .mask = mask_mv64340_irq, 97 .mask = mask_mv64340_irq,
98 .mask_ack = mask_mv64340_irq, 98 .mask_ack = mask_mv64340_irq,
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c
index 123324ba8c14..250732883488 100644
--- a/arch/mips/kernel/irq-rm7000.c
+++ b/arch/mips/kernel/irq-rm7000.c
@@ -17,28 +17,27 @@
17#include <asm/mipsregs.h> 17#include <asm/mipsregs.h>
18#include <asm/system.h> 18#include <asm/system.h>
19 19
20static int irq_base;
21
22static inline void unmask_rm7k_irq(unsigned int irq) 20static inline void unmask_rm7k_irq(unsigned int irq)
23{ 21{
24 set_c0_intcontrol(0x100 << (irq - irq_base)); 22 set_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE));
25} 23}
26 24
27static inline void mask_rm7k_irq(unsigned int irq) 25static inline void mask_rm7k_irq(unsigned int irq)
28{ 26{
29 clear_c0_intcontrol(0x100 << (irq - irq_base)); 27 clear_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE));
30} 28}
31 29
32static struct irq_chip rm7k_irq_controller = { 30static struct irq_chip rm7k_irq_controller = {
33 .typename = "RM7000", 31 .name = "RM7000",
34 .ack = mask_rm7k_irq, 32 .ack = mask_rm7k_irq,
35 .mask = mask_rm7k_irq, 33 .mask = mask_rm7k_irq,
36 .mask_ack = mask_rm7k_irq, 34 .mask_ack = mask_rm7k_irq,
37 .unmask = unmask_rm7k_irq, 35 .unmask = unmask_rm7k_irq,
38}; 36};
39 37
40void __init rm7k_cpu_irq_init(int base) 38void __init rm7k_cpu_irq_init(void)
41{ 39{
40 int base = RM7K_CPU_IRQ_BASE;
42 int i; 41 int i;
43 42
44 clear_c0_intcontrol(0x00000f00); /* Mask all */ 43 clear_c0_intcontrol(0x00000f00); /* Mask all */
@@ -46,6 +45,4 @@ void __init rm7k_cpu_irq_init(int base)
46 for (i = base; i < base + 4; i++) 45 for (i = base; i < base + 4; i++)
47 set_irq_chip_and_handler(i, &rm7k_irq_controller, 46 set_irq_chip_and_handler(i, &rm7k_irq_controller,
48 handle_level_irq); 47 handle_level_irq);
49
50 irq_base = base;
51} 48}
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c
index 0e6f4c5349d2..ae83d2df6f31 100644
--- a/arch/mips/kernel/irq-rm9000.c
+++ b/arch/mips/kernel/irq-rm9000.c
@@ -18,16 +18,14 @@
18#include <asm/mipsregs.h> 18#include <asm/mipsregs.h>
19#include <asm/system.h> 19#include <asm/system.h>
20 20
21static int irq_base;
22
23static inline void unmask_rm9k_irq(unsigned int irq) 21static inline void unmask_rm9k_irq(unsigned int irq)
24{ 22{
25 set_c0_intcontrol(0x1000 << (irq - irq_base)); 23 set_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE));
26} 24}
27 25
28static inline void mask_rm9k_irq(unsigned int irq) 26static inline void mask_rm9k_irq(unsigned int irq)
29{ 27{
30 clear_c0_intcontrol(0x1000 << (irq - irq_base)); 28 clear_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE));
31} 29}
32 30
33static inline void rm9k_cpu_irq_enable(unsigned int irq) 31static inline void rm9k_cpu_irq_enable(unsigned int irq)
@@ -39,15 +37,6 @@ static inline void rm9k_cpu_irq_enable(unsigned int irq)
39 local_irq_restore(flags); 37 local_irq_restore(flags);
40} 38}
41 39
42static void rm9k_cpu_irq_disable(unsigned int irq)
43{
44 unsigned long flags;
45
46 local_irq_save(flags);
47 mask_rm9k_irq(irq);
48 local_irq_restore(flags);
49}
50
51/* 40/*
52 * Performance counter interrupts are global on all processors. 41 * Performance counter interrupts are global on all processors.
53 */ 42 */
@@ -81,7 +70,7 @@ static void rm9k_perfcounter_irq_shutdown(unsigned int irq)
81} 70}
82 71
83static struct irq_chip rm9k_irq_controller = { 72static struct irq_chip rm9k_irq_controller = {
84 .typename = "RM9000", 73 .name = "RM9000",
85 .ack = mask_rm9k_irq, 74 .ack = mask_rm9k_irq,
86 .mask = mask_rm9k_irq, 75 .mask = mask_rm9k_irq,
87 .mask_ack = mask_rm9k_irq, 76 .mask_ack = mask_rm9k_irq,
@@ -89,7 +78,7 @@ static struct irq_chip rm9k_irq_controller = {
89}; 78};
90 79
91static struct irq_chip rm9k_perfcounter_irq = { 80static struct irq_chip rm9k_perfcounter_irq = {
92 .typename = "RM9000", 81 .name = "RM9000",
93 .startup = rm9k_perfcounter_irq_startup, 82 .startup = rm9k_perfcounter_irq_startup,
94 .shutdown = rm9k_perfcounter_irq_shutdown, 83 .shutdown = rm9k_perfcounter_irq_shutdown,
95 .ack = mask_rm9k_irq, 84 .ack = mask_rm9k_irq,
@@ -102,8 +91,9 @@ unsigned int rm9000_perfcount_irq;
102 91
103EXPORT_SYMBOL(rm9000_perfcount_irq); 92EXPORT_SYMBOL(rm9000_perfcount_irq);
104 93
105void __init rm9k_cpu_irq_init(int base) 94void __init rm9k_cpu_irq_init(void)
106{ 95{
96 int base = RM9K_CPU_IRQ_BASE;
107 int i; 97 int i;
108 98
109 clear_c0_intcontrol(0x0000f000); /* Mask all */ 99 clear_c0_intcontrol(0x0000f000); /* Mask all */
@@ -115,6 +105,4 @@ void __init rm9k_cpu_irq_init(int base)
115 rm9000_perfcount_irq = base + 1; 105 rm9000_perfcount_irq = base + 1;
116 set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq, 106 set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq,
117 handle_level_irq); 107 handle_level_irq);
118
119 irq_base = base;
120} 108}
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index fcc86b96ccf6..7b66e03b5899 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -25,7 +25,7 @@
25 * Don't even think about using this on SMP. You have been warned. 25 * Don't even think about using this on SMP. You have been warned.
26 * 26 *
27 * This file exports one global function: 27 * This file exports one global function:
28 * void mips_cpu_irq_init(int irq_base); 28 * void mips_cpu_irq_init(void);
29 */ 29 */
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/interrupt.h> 31#include <linux/interrupt.h>
@@ -36,22 +36,20 @@
36#include <asm/mipsmtregs.h> 36#include <asm/mipsmtregs.h>
37#include <asm/system.h> 37#include <asm/system.h>
38 38
39static int mips_cpu_irq_base;
40
41static inline void unmask_mips_irq(unsigned int irq) 39static inline void unmask_mips_irq(unsigned int irq)
42{ 40{
43 set_c0_status(0x100 << (irq - mips_cpu_irq_base)); 41 set_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE));
44 irq_enable_hazard(); 42 irq_enable_hazard();
45} 43}
46 44
47static inline void mask_mips_irq(unsigned int irq) 45static inline void mask_mips_irq(unsigned int irq)
48{ 46{
49 clear_c0_status(0x100 << (irq - mips_cpu_irq_base)); 47 clear_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE));
50 irq_disable_hazard(); 48 irq_disable_hazard();
51} 49}
52 50
53static struct irq_chip mips_cpu_irq_controller = { 51static struct irq_chip mips_cpu_irq_controller = {
54 .typename = "MIPS", 52 .name = "MIPS",
55 .ack = mask_mips_irq, 53 .ack = mask_mips_irq,
56 .mask = mask_mips_irq, 54 .mask = mask_mips_irq,
57 .mask_ack = mask_mips_irq, 55 .mask_ack = mask_mips_irq,
@@ -70,7 +68,7 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
70{ 68{
71 unsigned int vpflags = dvpe(); 69 unsigned int vpflags = dvpe();
72 70
73 clear_c0_cause(0x100 << (irq - mips_cpu_irq_base)); 71 clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE));
74 evpe(vpflags); 72 evpe(vpflags);
75 unmask_mips_mt_irq(irq); 73 unmask_mips_mt_irq(irq);
76 74
@@ -84,13 +82,13 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
84static void mips_mt_cpu_irq_ack(unsigned int irq) 82static void mips_mt_cpu_irq_ack(unsigned int irq)
85{ 83{
86 unsigned int vpflags = dvpe(); 84 unsigned int vpflags = dvpe();
87 clear_c0_cause(0x100 << (irq - mips_cpu_irq_base)); 85 clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE));
88 evpe(vpflags); 86 evpe(vpflags);
89 mask_mips_mt_irq(irq); 87 mask_mips_mt_irq(irq);
90} 88}
91 89
92static struct irq_chip mips_mt_cpu_irq_controller = { 90static struct irq_chip mips_mt_cpu_irq_controller = {
93 .typename = "MIPS", 91 .name = "MIPS",
94 .startup = mips_mt_cpu_irq_startup, 92 .startup = mips_mt_cpu_irq_startup,
95 .ack = mips_mt_cpu_irq_ack, 93 .ack = mips_mt_cpu_irq_ack,
96 .mask = mask_mips_mt_irq, 94 .mask = mask_mips_mt_irq,
@@ -99,8 +97,9 @@ static struct irq_chip mips_mt_cpu_irq_controller = {
99 .eoi = unmask_mips_mt_irq, 97 .eoi = unmask_mips_mt_irq,
100}; 98};
101 99
102void __init mips_cpu_irq_init(int irq_base) 100void __init mips_cpu_irq_init(void)
103{ 101{
102 int irq_base = MIPS_CPU_IRQ_BASE;
104 int i; 103 int i;
105 104
106 /* Mask interrupts. */ 105 /* Mask interrupts. */
@@ -118,6 +117,4 @@ void __init mips_cpu_irq_init(int irq_base)
118 for (i = irq_base + 2; i < irq_base + 8; i++) 117 for (i = irq_base + 2; i < irq_base + 8; i++)
119 set_irq_chip_and_handler(i, &mips_cpu_irq_controller, 118 set_irq_chip_and_handler(i, &mips_cpu_irq_controller,
120 handle_level_irq); 119 handle_level_irq);
121
122 mips_cpu_irq_base = irq_base;
123} 120}
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index de3fae260ff8..0b8ce59429a8 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -194,15 +194,15 @@ sysn32_waitid(int which, compat_pid_t pid,
194} 194}
195 195
196struct sysinfo32 { 196struct sysinfo32 {
197 s32 uptime; 197 s32 uptime;
198 u32 loads[3]; 198 u32 loads[3];
199 u32 totalram; 199 u32 totalram;
200 u32 freeram; 200 u32 freeram;
201 u32 sharedram; 201 u32 sharedram;
202 u32 bufferram; 202 u32 bufferram;
203 u32 totalswap; 203 u32 totalswap;
204 u32 freeswap; 204 u32 freeswap;
205 u16 procs; 205 u16 procs;
206 u32 totalhigh; 206 u32 totalhigh;
207 u32 freehigh; 207 u32 freehigh;
208 u32 mem_unit; 208 u32 mem_unit;
@@ -558,7 +558,7 @@ extern asmlinkage long sys_ustat(dev_t dev, struct ustat __user * ubuf);
558asmlinkage int sys32_ustat(dev_t dev, struct ustat32 __user * ubuf32) 558asmlinkage int sys32_ustat(dev_t dev, struct ustat32 __user * ubuf32)
559{ 559{
560 int err; 560 int err;
561 struct ustat tmp; 561 struct ustat tmp;
562 struct ustat32 tmp32; 562 struct ustat32 tmp32;
563 mm_segment_t old_fs = get_fs(); 563 mm_segment_t old_fs = get_fs();
564 564
@@ -569,11 +569,11 @@ asmlinkage int sys32_ustat(dev_t dev, struct ustat32 __user * ubuf32)
569 if (err) 569 if (err)
570 goto out; 570 goto out;
571 571
572 memset(&tmp32,0,sizeof(struct ustat32)); 572 memset(&tmp32,0,sizeof(struct ustat32));
573 tmp32.f_tfree = tmp.f_tfree; 573 tmp32.f_tfree = tmp.f_tfree;
574 tmp32.f_tinode = tmp.f_tinode; 574 tmp32.f_tinode = tmp.f_tinode;
575 575
576 err = copy_to_user(ubuf32,&tmp32,sizeof(struct ustat32)) ? -EFAULT : 0; 576 err = copy_to_user(ubuf32,&tmp32,sizeof(struct ustat32)) ? -EFAULT : 0;
577 577
578out: 578out:
579 return err; 579 return err;
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index c1373a6e668b..a32f6797353a 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -96,6 +96,10 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
96 goto out_unlock; 96 goto out_unlock;
97 } 97 }
98 98
99 retval = security_task_setscheduler(p, 0, NULL);
100 if (retval)
101 goto out_unlock;
102
99 /* Record new user-specified CPU set for future reference */ 103 /* Record new user-specified CPU set for future reference */
100 p->thread.user_cpus_allowed = new_mask; 104 p->thread.user_cpus_allowed = new_mask;
101 105
@@ -141,8 +145,9 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
141 p = find_process_by_pid(pid); 145 p = find_process_by_pid(pid);
142 if (!p) 146 if (!p)
143 goto out_unlock; 147 goto out_unlock;
144 148 retval = security_task_getscheduler(p);
145 retval = 0; 149 if (retval)
150 goto out_unlock;
146 151
147 cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map); 152 cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);
148 153
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 4ed37ba19731..5ddc2e9deecf 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -31,13 +31,13 @@ static const char *cpu_name[] = {
31 [CPU_R4000PC] = "R4000PC", 31 [CPU_R4000PC] = "R4000PC",
32 [CPU_R4000SC] = "R4000SC", 32 [CPU_R4000SC] = "R4000SC",
33 [CPU_R4000MC] = "R4000MC", 33 [CPU_R4000MC] = "R4000MC",
34 [CPU_R4200] = "R4200", 34 [CPU_R4200] = "R4200",
35 [CPU_R4400PC] = "R4400PC", 35 [CPU_R4400PC] = "R4400PC",
36 [CPU_R4400SC] = "R4400SC", 36 [CPU_R4400SC] = "R4400SC",
37 [CPU_R4400MC] = "R4400MC", 37 [CPU_R4400MC] = "R4400MC",
38 [CPU_R4600] = "R4600", 38 [CPU_R4600] = "R4600",
39 [CPU_R6000] = "R6000", 39 [CPU_R6000] = "R6000",
40 [CPU_R6000A] = "R6000A", 40 [CPU_R6000A] = "R6000A",
41 [CPU_R8000] = "R8000", 41 [CPU_R8000] = "R8000",
42 [CPU_R10000] = "R10000", 42 [CPU_R10000] = "R10000",
43 [CPU_R12000] = "R12000", 43 [CPU_R12000] = "R12000",
@@ -46,14 +46,14 @@ static const char *cpu_name[] = {
46 [CPU_R4650] = "R4650", 46 [CPU_R4650] = "R4650",
47 [CPU_R4700] = "R4700", 47 [CPU_R4700] = "R4700",
48 [CPU_R5000] = "R5000", 48 [CPU_R5000] = "R5000",
49 [CPU_R5000A] = "R5000A", 49 [CPU_R5000A] = "R5000A",
50 [CPU_R4640] = "R4640", 50 [CPU_R4640] = "R4640",
51 [CPU_NEVADA] = "Nevada", 51 [CPU_NEVADA] = "Nevada",
52 [CPU_RM7000] = "RM7000", 52 [CPU_RM7000] = "RM7000",
53 [CPU_RM9000] = "RM9000", 53 [CPU_RM9000] = "RM9000",
54 [CPU_R5432] = "R5432", 54 [CPU_R5432] = "R5432",
55 [CPU_4KC] = "MIPS 4Kc", 55 [CPU_4KC] = "MIPS 4Kc",
56 [CPU_5KC] = "MIPS 5Kc", 56 [CPU_5KC] = "MIPS 5Kc",
57 [CPU_R4310] = "R4310", 57 [CPU_R4310] = "R4310",
58 [CPU_SB1] = "SiByte SB1", 58 [CPU_SB1] = "SiByte SB1",
59 [CPU_SB1A] = "SiByte SB1A", 59 [CPU_SB1A] = "SiByte SB1A",
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index ec8209f3a0c6..04e5b38d327d 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -41,10 +41,6 @@
41#include <asm/isadep.h> 41#include <asm/isadep.h>
42#include <asm/inst.h> 42#include <asm/inst.h>
43#include <asm/stacktrace.h> 43#include <asm/stacktrace.h>
44#ifdef CONFIG_MIPS_MT_SMTC
45#include <asm/mipsmtregs.h>
46extern void smtc_idle_loop_hook(void);
47#endif /* CONFIG_MIPS_MT_SMTC */
48 44
49/* 45/*
50 * The idle thread. There's no useful work to be done, so just try to conserve 46 * The idle thread. There's no useful work to be done, so just try to conserve
@@ -57,6 +53,8 @@ ATTRIB_NORET void cpu_idle(void)
57 while (1) { 53 while (1) {
58 while (!need_resched()) { 54 while (!need_resched()) {
59#ifdef CONFIG_MIPS_MT_SMTC 55#ifdef CONFIG_MIPS_MT_SMTC
56 extern void smtc_idle_loop_hook(void);
57
60 smtc_idle_loop_hook(); 58 smtc_idle_loop_hook();
61#endif /* CONFIG_MIPS_MT_SMTC */ 59#endif /* CONFIG_MIPS_MT_SMTC */
62 if (cpu_wait) 60 if (cpu_wait)
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 880fa6e841ee..59c1577ecbb3 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -114,6 +114,14 @@ LEAF(_save_fp_context32)
114 */ 114 */
115LEAF(_restore_fp_context) 115LEAF(_restore_fp_context)
116 EX lw t0, SC_FPC_CSR(a0) 116 EX lw t0, SC_FPC_CSR(a0)
117
118 /* Fail if the CSR has exceptions pending */
119 srl t1, t0, 5
120 and t1, t0
121 andi t1, 0x1f << 7
122 bnez t1, fault
123 nop
124
117#ifdef CONFIG_64BIT 125#ifdef CONFIG_64BIT
118 EX ldc1 $f1, SC_FPREGS+8(a0) 126 EX ldc1 $f1, SC_FPREGS+8(a0)
119 EX ldc1 $f3, SC_FPREGS+24(a0) 127 EX ldc1 $f3, SC_FPREGS+24(a0)
@@ -157,6 +165,14 @@ LEAF(_restore_fp_context)
157LEAF(_restore_fp_context32) 165LEAF(_restore_fp_context32)
158 /* Restore an o32 sigcontext. */ 166 /* Restore an o32 sigcontext. */
159 EX lw t0, SC32_FPC_CSR(a0) 167 EX lw t0, SC32_FPC_CSR(a0)
168
169 /* Fail if the CSR has exceptions pending */
170 srl t1, t0, 5
171 and t1, t0
172 andi t1, 0x1f << 7
173 bnez t1, fault
174 nop
175
160 EX ldc1 $f0, SC32_FPREGS+0(a0) 176 EX ldc1 $f0, SC32_FPREGS+0(a0)
161 EX ldc1 $f2, SC32_FPREGS+16(a0) 177 EX ldc1 $f2, SC32_FPREGS+16(a0)
162 EX ldc1 $f4, SC32_FPREGS+32(a0) 178 EX ldc1 $f4, SC32_FPREGS+32(a0)
@@ -177,9 +193,10 @@ LEAF(_restore_fp_context32)
177 jr ra 193 jr ra
178 li v0, 0 # success 194 li v0, 0 # success
179 END(_restore_fp_context32) 195 END(_restore_fp_context32)
180 .set reorder
181#endif 196#endif
182 197
198 .set reorder
199
183 .type fault@function 200 .type fault@function
184 .ent fault 201 .ent fault
185fault: li v0, -EFAULT # failure 202fault: li v0, -EFAULT # failure
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index 5a99e3e0c96d..8610f4a925e9 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -63,7 +63,7 @@ extern void *vpe_get_shared(int index);
63 63
64static void rtlx_dispatch(void) 64static void rtlx_dispatch(void)
65{ 65{
66 do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ); 66 do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ);
67} 67}
68 68
69 69
@@ -491,7 +491,7 @@ static struct irqaction rtlx_irq = {
491 .name = "RTLX", 491 .name = "RTLX",
492}; 492};
493 493
494static int rtlx_irq_num = MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ; 494static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ;
495 495
496static char register_chrdev_failed[] __initdata = 496static char register_chrdev_failed[] __initdata =
497 KERN_ERR "rtlx_module_init: unable to register device\n"; 497 KERN_ERR "rtlx_module_init: unable to register device\n";
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index a7bff2a54723..39add2341aa2 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -384,7 +384,7 @@ EXPORT(sysn32_call_table)
384 PTR sys_readlinkat 384 PTR sys_readlinkat
385 PTR sys_fchmodat 385 PTR sys_fchmodat
386 PTR sys_faccessat 386 PTR sys_faccessat
387 PTR sys_pselect6 387 PTR compat_sys_pselect6
388 PTR sys_ppoll /* 6265 */ 388 PTR sys_ppoll /* 6265 */
389 PTR sys_unshare 389 PTR sys_unshare
390 PTR sys_splice 390 PTR sys_splice
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index e91379c1be1d..c58b8e0105ea 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -506,7 +506,7 @@ sys_call_table:
506 PTR sys_readlinkat 506 PTR sys_readlinkat
507 PTR sys_fchmodat 507 PTR sys_fchmodat
508 PTR sys_faccessat /* 4300 */ 508 PTR sys_faccessat /* 4300 */
509 PTR sys_pselect6 509 PTR compat_sys_pselect6
510 PTR sys_ppoll 510 PTR sys_ppoll
511 PTR sys_unshare 511 PTR sys_unshare
512 PTR sys_splice 512 PTR sys_splice
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 89440a0d8528..d2e01e7167b8 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -271,8 +271,7 @@ static void __init bootmem_init(void)
271static void __init bootmem_init(void) 271static void __init bootmem_init(void)
272{ 272{
273 unsigned long reserved_end; 273 unsigned long reserved_end;
274 unsigned long highest = 0; 274 unsigned long mapstart = ~0UL;
275 unsigned long mapstart = -1UL;
276 unsigned long bootmap_size; 275 unsigned long bootmap_size;
277 int i; 276 int i;
278 277
@@ -284,6 +283,13 @@ static void __init bootmem_init(void)
284 reserved_end = max(init_initrd(), PFN_UP(__pa_symbol(&_end))); 283 reserved_end = max(init_initrd(), PFN_UP(__pa_symbol(&_end)));
285 284
286 /* 285 /*
286 * max_low_pfn is not a number of pages. The number of pages
287 * of the system is given by 'max_low_pfn - min_low_pfn'.
288 */
289 min_low_pfn = ~0UL;
290 max_low_pfn = 0;
291
292 /*
287 * Find the highest page frame number we have available. 293 * Find the highest page frame number we have available.
288 */ 294 */
289 for (i = 0; i < boot_mem_map.nr_map; i++) { 295 for (i = 0; i < boot_mem_map.nr_map; i++) {
@@ -296,8 +302,10 @@ static void __init bootmem_init(void)
296 end = PFN_DOWN(boot_mem_map.map[i].addr 302 end = PFN_DOWN(boot_mem_map.map[i].addr
297 + boot_mem_map.map[i].size); 303 + boot_mem_map.map[i].size);
298 304
299 if (end > highest) 305 if (end > max_low_pfn)
300 highest = end; 306 max_low_pfn = end;
307 if (start < min_low_pfn)
308 min_low_pfn = start;
301 if (end <= reserved_end) 309 if (end <= reserved_end)
302 continue; 310 continue;
303 if (start >= mapstart) 311 if (start >= mapstart)
@@ -305,22 +313,36 @@ static void __init bootmem_init(void)
305 mapstart = max(reserved_end, start); 313 mapstart = max(reserved_end, start);
306 } 314 }
307 315
316 if (min_low_pfn >= max_low_pfn)
317 panic("Incorrect memory mapping !!!");
318 if (min_low_pfn > ARCH_PFN_OFFSET) {
319 printk(KERN_INFO
320 "Wasting %lu bytes for tracking %lu unused pages\n",
321 (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
322 min_low_pfn - ARCH_PFN_OFFSET);
323 } else if (min_low_pfn < ARCH_PFN_OFFSET) {
324 printk(KERN_INFO
325 "%lu free pages won't be used\n",
326 ARCH_PFN_OFFSET - min_low_pfn);
327 }
328 min_low_pfn = ARCH_PFN_OFFSET;
329
308 /* 330 /*
309 * Determine low and high memory ranges 331 * Determine low and high memory ranges
310 */ 332 */
311 if (highest > PFN_DOWN(HIGHMEM_START)) { 333 if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
312#ifdef CONFIG_HIGHMEM 334#ifdef CONFIG_HIGHMEM
313 highstart_pfn = PFN_DOWN(HIGHMEM_START); 335 highstart_pfn = PFN_DOWN(HIGHMEM_START);
314 highend_pfn = highest; 336 highend_pfn = max_low_pfn;
315#endif 337#endif
316 highest = PFN_DOWN(HIGHMEM_START); 338 max_low_pfn = PFN_DOWN(HIGHMEM_START);
317 } 339 }
318 340
319 /* 341 /*
320 * Initialize the boot-time allocator with low memory only. 342 * Initialize the boot-time allocator with low memory only.
321 */ 343 */
322 bootmap_size = init_bootmem(mapstart, highest); 344 bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart,
323 345 min_low_pfn, max_low_pfn);
324 /* 346 /*
325 * Register fully available low RAM pages with the bootmem allocator. 347 * Register fully available low RAM pages with the bootmem allocator.
326 */ 348 */
@@ -507,9 +529,9 @@ void __init setup_arch(char **cmdline_p)
507 529
508#if defined(CONFIG_VT) 530#if defined(CONFIG_VT)
509#if defined(CONFIG_VGA_CONSOLE) 531#if defined(CONFIG_VGA_CONSOLE)
510 conswitchp = &vga_con; 532 conswitchp = &vga_con;
511#elif defined(CONFIG_DUMMY_CONSOLE) 533#elif defined(CONFIG_DUMMY_CONSOLE)
512 conswitchp = &dummy_con; 534 conswitchp = &dummy_con;
513#endif 535#endif
514#endif 536#endif
515 537
@@ -541,3 +563,6 @@ int __init dsp_disable(char *s)
541} 563}
542 564
543__setup("nodsp", dsp_disable); 565__setup("nodsp", dsp_disable);
566
567unsigned long kernelsp[NR_CPUS];
568unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index b9d358e05214..9a44053cd9f1 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -89,7 +89,7 @@ _sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
89 spin_lock_irq(&current->sighand->siglock); 89 spin_lock_irq(&current->sighand->siglock);
90 current->saved_sigmask = current->blocked; 90 current->saved_sigmask = current->blocked;
91 current->blocked = newset; 91 current->blocked = newset;
92 recalc_sigpending(); 92 recalc_sigpending();
93 spin_unlock_irq(&current->sighand->siglock); 93 spin_unlock_irq(&current->sighand->siglock);
94 94
95 current->state = TASK_INTERRUPTIBLE; 95 current->state = TASK_INTERRUPTIBLE;
@@ -124,7 +124,7 @@ asmlinkage int sys_sigaction(int sig, const struct sigaction __user *act,
124 124
125 if (!ret && oact) { 125 if (!ret && oact) {
126 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) 126 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
127 return -EFAULT; 127 return -EFAULT;
128 err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 128 err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
129 err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler); 129 err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler);
130 err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig); 130 err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
@@ -304,7 +304,7 @@ int setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
304 current->comm, current->pid, 304 current->comm, current->pid,
305 frame, regs->cp0_epc, frame->regs[31]); 305 frame, regs->cp0_epc, frame->regs[31]);
306#endif 306#endif
307 return 0; 307 return 0;
308 308
309give_sigsegv: 309give_sigsegv:
310 force_sigsegv(signr, current); 310 force_sigsegv(signr, current);
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index a67c18555ed3..b28646b3ceae 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -105,7 +105,7 @@ _sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
105 spin_lock_irq(&current->sighand->siglock); 105 spin_lock_irq(&current->sighand->siglock);
106 current->saved_sigmask = current->blocked; 106 current->saved_sigmask = current->blocked;
107 current->blocked = newset; 107 current->blocked = newset;
108 recalc_sigpending(); 108 recalc_sigpending();
109 spin_unlock_irq(&current->sighand->siglock); 109 spin_unlock_irq(&current->sighand->siglock);
110 110
111 current->state = TASK_INTERRUPTIBLE; 111 current->state = TASK_INTERRUPTIBLE;
@@ -184,7 +184,7 @@ int setup_rt_frame_n32(struct k_sigaction * ka,
184 /* Create the ucontext. */ 184 /* Create the ucontext. */
185 err |= __put_user(0, &frame->rs_uc.uc_flags); 185 err |= __put_user(0, &frame->rs_uc.uc_flags);
186 err |= __put_user(0, &frame->rs_uc.uc_link); 186 err |= __put_user(0, &frame->rs_uc.uc_link);
187 sp = (int) (long) current->sas_ss_sp; 187 sp = (int) (long) current->sas_ss_sp;
188 err |= __put_user(sp, 188 err |= __put_user(sp,
189 &frame->rs_uc.uc_stack.ss_sp); 189 &frame->rs_uc.uc_stack.ss_sp);
190 err |= __put_user(sas_ss_flags(regs->regs[29]), 190 err |= __put_user(sas_ss_flags(regs->regs[29]),
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 1ee689c0e0c9..64b62bdfb4f6 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -35,7 +35,6 @@
35#include <asm/mipsregs.h> 35#include <asm/mipsregs.h>
36#include <asm/mipsmtregs.h> 36#include <asm/mipsmtregs.h>
37#include <asm/mips_mt.h> 37#include <asm/mips_mt.h>
38#include <asm/mips-boards/maltaint.h> /* This is f*cking wrong */
39 38
40#define MIPS_CPU_IPI_RESCHED_IRQ 0 39#define MIPS_CPU_IPI_RESCHED_IRQ 0
41#define MIPS_CPU_IPI_CALL_IRQ 1 40#define MIPS_CPU_IPI_CALL_IRQ 1
@@ -108,12 +107,12 @@ void __init sanitize_tlb_entries(void)
108 107
109static void ipi_resched_dispatch(void) 108static void ipi_resched_dispatch(void)
110{ 109{
111 do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ); 110 do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
112} 111}
113 112
114static void ipi_call_dispatch(void) 113static void ipi_call_dispatch(void)
115{ 114{
116 do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ); 115 do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
117} 116}
118 117
119static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) 118static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
@@ -270,8 +269,8 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
270 set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch); 269 set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
271 } 270 }
272 271
273 cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ; 272 cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
274 cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ; 273 cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
275 274
276 setup_irq(cpu_ipi_resched_irq, &irq_resched); 275 setup_irq(cpu_ipi_resched_irq, &irq_resched);
277 setup_irq(cpu_ipi_call_irq, &irq_call); 276 setup_irq(cpu_ipi_call_irq, &irq_call);
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 6a857bf030b0..9251ea824937 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -26,16 +26,6 @@
26 * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. 26 * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set.
27 */ 27 */
28 28
29/*
30 * MIPSCPU_INT_BASE is identically defined in both
31 * asm-mips/mips-boards/maltaint.h and asm-mips/mips-boards/simint.h,
32 * but as yet there's no properly organized include structure that
33 * will ensure that the right *int.h file will be included for a
34 * given platform build.
35 */
36
37#define MIPSCPU_INT_BASE 16
38
39#define MIPS_CPU_IPI_IRQ 1 29#define MIPS_CPU_IPI_IRQ 1
40 30
41#define LOCK_MT_PRA() \ 31#define LOCK_MT_PRA() \
@@ -77,15 +67,15 @@ unsigned int ipi_timer_latch[NR_CPUS];
77 67
78#define IPIBUF_PER_CPU 4 68#define IPIBUF_PER_CPU 4
79 69
80struct smtc_ipi_q IPIQ[NR_CPUS]; 70static struct smtc_ipi_q IPIQ[NR_CPUS];
81struct smtc_ipi_q freeIPIq; 71static struct smtc_ipi_q freeIPIq;
82 72
83 73
84/* Forward declarations */ 74/* Forward declarations */
85 75
86void ipi_decode(struct smtc_ipi *); 76void ipi_decode(struct smtc_ipi *);
87void post_direct_ipi(int cpu, struct smtc_ipi *pipi); 77static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
88void setup_cross_vpe_interrupts(void); 78static void setup_cross_vpe_interrupts(void);
89void init_smtc_stats(void); 79void init_smtc_stats(void);
90 80
91/* Global SMTC Status */ 81/* Global SMTC Status */
@@ -200,7 +190,7 @@ void __init sanitize_tlb_entries(void)
200 * Configure shared TLB - VPC configuration bit must be set by caller 190 * Configure shared TLB - VPC configuration bit must be set by caller
201 */ 191 */
202 192
203void smtc_configure_tlb(void) 193static void smtc_configure_tlb(void)
204{ 194{
205 int i,tlbsiz,vpes; 195 int i,tlbsiz,vpes;
206 unsigned long mvpconf0; 196 unsigned long mvpconf0;
@@ -648,7 +638,7 @@ int setup_irq_smtc(unsigned int irq, struct irqaction * new,
648 * the VPE. 638 * the VPE.
649 */ 639 */
650 640
651void smtc_ipi_qdump(void) 641static void smtc_ipi_qdump(void)
652{ 642{
653 int i; 643 int i;
654 644
@@ -686,28 +676,6 @@ static __inline__ int atomic_postincrement(unsigned int *pv)
686 return result; 676 return result;
687} 677}
688 678
689/* No longer used in IPI dispatch, but retained for future recycling */
690
691static __inline__ int atomic_postclear(unsigned int *pv)
692{
693 unsigned long result;
694
695 unsigned long temp;
696
697 __asm__ __volatile__(
698 "1: ll %0, %2 \n"
699 " or %1, $0, $0 \n"
700 " sc %1, %2 \n"
701 " beqz %1, 1b \n"
702 " sync \n"
703 : "=&r" (result), "=&r" (temp), "=m" (*pv)
704 : "m" (*pv)
705 : "memory");
706
707 return result;
708}
709
710
711void smtc_send_ipi(int cpu, int type, unsigned int action) 679void smtc_send_ipi(int cpu, int type, unsigned int action)
712{ 680{
713 int tcstatus; 681 int tcstatus;
@@ -781,7 +749,7 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
781/* 749/*
782 * Send IPI message to Halted TC, TargTC/TargVPE already having been set 750 * Send IPI message to Halted TC, TargTC/TargVPE already having been set
783 */ 751 */
784void post_direct_ipi(int cpu, struct smtc_ipi *pipi) 752static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
785{ 753{
786 struct pt_regs *kstack; 754 struct pt_regs *kstack;
787 unsigned long tcstatus; 755 unsigned long tcstatus;
@@ -921,7 +889,7 @@ void smtc_timer_broadcast(int vpe)
921 * interrupts. 889 * interrupts.
922 */ 890 */
923 891
924static int cpu_ipi_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_IRQ; 892static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ;
925 893
926static irqreturn_t ipi_interrupt(int irq, void *dev_idm) 894static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
927{ 895{
@@ -1000,7 +968,7 @@ static void ipi_irq_dispatch(void)
1000 968
1001static struct irqaction irq_ipi; 969static struct irqaction irq_ipi;
1002 970
1003void setup_cross_vpe_interrupts(void) 971static void setup_cross_vpe_interrupts(void)
1004{ 972{
1005 if (!cpu_has_vint) 973 if (!cpu_has_vint)
1006 panic("SMTC Kernel requires Vectored Interupt support"); 974 panic("SMTC Kernel requires Vectored Interupt support");
@@ -1191,7 +1159,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1191 * It would be nice to be able to use a spinlock here, 1159 * It would be nice to be able to use a spinlock here,
1192 * but this is invoked from within TLB flush routines 1160 * but this is invoked from within TLB flush routines
1193 * that protect themselves with DVPE, so if a lock is 1161 * that protect themselves with DVPE, so if a lock is
1194 * held by another TC, it'll never be freed. 1162 * held by another TC, it'll never be freed.
1195 * 1163 *
1196 * DVPE/DMT must not be done with interrupts enabled, 1164 * DVPE/DMT must not be done with interrupts enabled,
1197 * so even so most callers will already have disabled 1165 * so even so most callers will already have disabled
@@ -1296,7 +1264,7 @@ void smtc_flush_tlb_asid(unsigned long asid)
1296 * Support for single-threading cache flush operations. 1264 * Support for single-threading cache flush operations.
1297 */ 1265 */
1298 1266
1299int halt_state_save[NR_CPUS]; 1267static int halt_state_save[NR_CPUS];
1300 1268
1301/* 1269/*
1302 * To really, really be sure that nothing is being done 1270 * To really, really be sure that nothing is being done
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
index 6c2406a93f2b..93a148486f88 100644
--- a/arch/mips/kernel/sysirix.c
+++ b/arch/mips/kernel/sysirix.c
@@ -669,7 +669,7 @@ asmlinkage int irix_mount(char __user *dev_name, char __user *dir_name,
669 669
670struct irix_statfs { 670struct irix_statfs {
671 short f_type; 671 short f_type;
672 long f_bsize, f_frsize, f_blocks, f_bfree, f_files, f_ffree; 672 long f_bsize, f_frsize, f_blocks, f_bfree, f_files, f_ffree;
673 char f_fname[6], f_fpack[6]; 673 char f_fname[6], f_fpack[6];
674}; 674};
675 675
@@ -959,7 +959,7 @@ static inline loff_t llseek(struct file *file, loff_t offset, int origin)
959 959
960 fn = default_llseek; 960 fn = default_llseek;
961 if (file->f_op && file->f_op->llseek) 961 if (file->f_op && file->f_op->llseek)
962 fn = file->f_op->llseek; 962 fn = file->f_op->llseek;
963 lock_kernel(); 963 lock_kernel();
964 retval = fn(file, offset, origin); 964 retval = fn(file, offset, origin);
965 unlock_kernel(); 965 unlock_kernel();
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 458fccf87c54..459624969c99 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -522,7 +522,7 @@ static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
522}; 522};
523 523
524static char *rstrs[] = { 524static char *rstrs[] = {
525 [R_MIPS_NONE] = "MIPS_NONE", 525 [R_MIPS_NONE] = "MIPS_NONE",
526 [R_MIPS_32] = "MIPS_32", 526 [R_MIPS_32] = "MIPS_32",
527 [R_MIPS_26] = "MIPS_26", 527 [R_MIPS_26] = "MIPS_26",
528 [R_MIPS_HI16] = "MIPS_HI16", 528 [R_MIPS_HI16] = "MIPS_HI16",
@@ -695,7 +695,7 @@ static void dump_tclist(void)
695} 695}
696 696
697/* We are prepared so configure and start the VPE... */ 697/* We are prepared so configure and start the VPE... */
698int vpe_run(struct vpe * v) 698static int vpe_run(struct vpe * v)
699{ 699{
700 struct vpe_notifications *n; 700 struct vpe_notifications *n;
701 unsigned long val, dmt_flag; 701 unsigned long val, dmt_flag;
@@ -713,16 +713,16 @@ int vpe_run(struct vpe * v)
713 dvpe(); 713 dvpe();
714 714
715 if (!list_empty(&v->tc)) { 715 if (!list_empty(&v->tc)) {
716 if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) { 716 if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
717 printk(KERN_WARNING "VPE loader: TC %d is already in use.\n", 717 printk(KERN_WARNING "VPE loader: TC %d is already in use.\n",
718 t->index); 718 t->index);
719 return -ENOEXEC; 719 return -ENOEXEC;
720 } 720 }
721 } else { 721 } else {
722 printk(KERN_WARNING "VPE loader: No TC's associated with VPE %d\n", 722 printk(KERN_WARNING "VPE loader: No TC's associated with VPE %d\n",
723 v->minor); 723 v->minor);
724 return -ENOEXEC; 724 return -ENOEXEC;
725 } 725 }
726 726
727 /* Put MVPE's into 'configuration state' */ 727 /* Put MVPE's into 'configuration state' */
728 set_c0_mvpcontrol(MVPCONTROL_VPC); 728 set_c0_mvpcontrol(MVPCONTROL_VPC);
@@ -775,14 +775,14 @@ int vpe_run(struct vpe * v)
775 775
776 back_to_back_c0_hazard(); 776 back_to_back_c0_hazard();
777 777
778 /* Set up the XTC bit in vpeconf0 to point at our tc */ 778 /* Set up the XTC bit in vpeconf0 to point at our tc */
779 write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC)) 779 write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
780 | (t->index << VPECONF0_XTC_SHIFT)); 780 | (t->index << VPECONF0_XTC_SHIFT));
781 781
782 back_to_back_c0_hazard(); 782 back_to_back_c0_hazard();
783 783
784 /* enable this VPE */ 784 /* enable this VPE */
785 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); 785 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
786 786
787 /* clear out any left overs from a previous program */ 787 /* clear out any left overs from a previous program */
788 write_vpe_c0_status(0); 788 write_vpe_c0_status(0);
@@ -832,7 +832,7 @@ static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
832 * contents of the program (p)buffer performing relocatations/etc, free's it 832 * contents of the program (p)buffer performing relocatations/etc, free's it
833 * when finished. 833 * when finished.
834 */ 834 */
835int vpe_elfload(struct vpe * v) 835static int vpe_elfload(struct vpe * v)
836{ 836{
837 Elf_Ehdr *hdr; 837 Elf_Ehdr *hdr;
838 Elf_Shdr *sechdrs; 838 Elf_Shdr *sechdrs;
diff --git a/arch/mips/lasat/interrupt.c b/arch/mips/lasat/interrupt.c
index 2affa5ff171c..9a622b9a1051 100644
--- a/arch/mips/lasat/interrupt.c
+++ b/arch/mips/lasat/interrupt.c
@@ -45,7 +45,7 @@ void enable_lasat_irq(unsigned int irq_nr)
45} 45}
46 46
47static struct irq_chip lasat_irq_type = { 47static struct irq_chip lasat_irq_type = {
48 .typename = "Lasat", 48 .name = "Lasat",
49 .ack = disable_lasat_irq, 49 .ack = disable_lasat_irq,
50 .mask = disable_lasat_irq, 50 .mask = disable_lasat_irq,
51 .mask_ack = disable_lasat_irq, 51 .mask_ack = disable_lasat_irq,
diff --git a/arch/mips/lasat/prom.c b/arch/mips/lasat/prom.c
index 88c7ab871ec4..d47692f73a26 100644
--- a/arch/mips/lasat/prom.c
+++ b/arch/mips/lasat/prom.c
@@ -132,9 +132,8 @@ void __init prom_init(void)
132 add_memory_region(0, lasat_board_info.li_memsize, BOOT_MEM_RAM); 132 add_memory_region(0, lasat_board_info.li_memsize, BOOT_MEM_RAM);
133} 133}
134 134
135unsigned long __init prom_free_prom_memory(void) 135void __init prom_free_prom_memory(void)
136{ 136{
137 return 0;
138} 137}
139 138
140const char *get_system_type(void) 139const char *get_system_type(void)
diff --git a/arch/mips/lib-32/Makefile b/arch/mips/lib-32/Makefile
index dcd4d2ed2ac4..2036cf5e6857 100644
--- a/arch/mips/lib-32/Makefile
+++ b/arch/mips/lib-32/Makefile
@@ -2,7 +2,7 @@
2# Makefile for MIPS-specific library files.. 2# Makefile for MIPS-specific library files..
3# 3#
4 4
5lib-y += memset.o watch.o 5lib-y += watch.o
6 6
7obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o 7obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o
8obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o 8obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o
diff --git a/arch/mips/lib-64/Makefile b/arch/mips/lib-64/Makefile
index dcd4d2ed2ac4..2036cf5e6857 100644
--- a/arch/mips/lib-64/Makefile
+++ b/arch/mips/lib-64/Makefile
@@ -2,7 +2,7 @@
2# Makefile for MIPS-specific library files.. 2# Makefile for MIPS-specific library files..
3# 3#
4 4
5lib-y += memset.o watch.o 5lib-y += watch.o
6 6
7obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o 7obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o
8obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o 8obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o
diff --git a/arch/mips/lib-64/memset.S b/arch/mips/lib-64/memset.S
deleted file mode 100644
index e2c42c85113b..000000000000
--- a/arch/mips/lib-64/memset.S
+++ /dev/null
@@ -1,142 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1998, 1999, 2000 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9#include <asm/asm.h>
10#include <asm/asm-offsets.h>
11#include <asm/regdef.h>
12
13#define EX(insn,reg,addr,handler) \
149: insn reg, addr; \
15 .section __ex_table,"a"; \
16 PTR 9b, handler; \
17 .previous
18
19 .macro f_fill64 dst, offset, val, fixup
20 EX(LONG_S, \val, (\offset + 0 * LONGSIZE)(\dst), \fixup)
21 EX(LONG_S, \val, (\offset + 1 * LONGSIZE)(\dst), \fixup)
22 EX(LONG_S, \val, (\offset + 2 * LONGSIZE)(\dst), \fixup)
23 EX(LONG_S, \val, (\offset + 3 * LONGSIZE)(\dst), \fixup)
24 EX(LONG_S, \val, (\offset + 4 * LONGSIZE)(\dst), \fixup)
25 EX(LONG_S, \val, (\offset + 5 * LONGSIZE)(\dst), \fixup)
26 EX(LONG_S, \val, (\offset + 6 * LONGSIZE)(\dst), \fixup)
27 EX(LONG_S, \val, (\offset + 7 * LONGSIZE)(\dst), \fixup)
28 .endm
29
30/*
31 * memset(void *s, int c, size_t n)
32 *
33 * a0: start of area to clear
34 * a1: char to fill with
35 * a2: size of area to clear
36 */
37 .set noreorder
38 .align 5
39LEAF(memset)
40 beqz a1, 1f
41 move v0, a0 /* result */
42
43 andi a1, 0xff /* spread fillword */
44 dsll t1, a1, 8
45 or a1, t1
46 dsll t1, a1, 16
47 or a1, t1
48 dsll t1, a1, 32
49 or a1, t1
501:
51
52FEXPORT(__bzero)
53 sltiu t0, a2, LONGSIZE /* very small region? */
54 bnez t0, small_memset
55 andi t0, a0, LONGMASK /* aligned? */
56
57 beqz t0, 1f
58 PTR_SUBU t0, LONGSIZE /* alignment in bytes */
59
60#ifdef __MIPSEB__
61 EX(sdl, a1, (a0), first_fixup) /* make dword aligned */
62#endif
63#ifdef __MIPSEL__
64 EX(sdr, a1, (a0), first_fixup) /* make dword aligned */
65#endif
66 PTR_SUBU a0, t0 /* long align ptr */
67 PTR_ADDU a2, t0 /* correct size */
68
691: ori t1, a2, 0x3f /* # of full blocks */
70 xori t1, 0x3f
71 beqz t1, memset_partial /* no block to fill */
72 andi t0, a2, 0x38
73
74 PTR_ADDU t1, a0 /* end address */
75 .set reorder
761: PTR_ADDIU a0, 64
77 f_fill64 a0, -64, a1, fwd_fixup
78 bne t1, a0, 1b
79 .set noreorder
80
81memset_partial:
82 PTR_LA t1, 2f /* where to start */
83 .set noat
84 dsrl AT, t0, 1
85 PTR_SUBU t1, AT
86 .set noat
87 jr t1
88 PTR_ADDU a0, t0 /* dest ptr */
89
90 .set push
91 .set noreorder
92 .set nomacro
93 f_fill64 a0, -64, a1, partial_fixup /* ... but first do longs ... */
942: .set pop
95 andi a2, LONGMASK /* At most one long to go */
96
97 beqz a2, 1f
98 PTR_ADDU a0, a2 /* What's left */
99#ifdef __MIPSEB__
100 EX(sdr, a1, -1(a0), last_fixup)
101#endif
102#ifdef __MIPSEL__
103 EX(sdl, a1, -1(a0), last_fixup)
104#endif
1051: jr ra
106 move a2, zero
107
108small_memset:
109 beqz a2, 2f
110 PTR_ADDU t1, a0, a2
111
1121: PTR_ADDIU a0, 1 /* fill bytewise */
113 bne t1, a0, 1b
114 sb a1, -1(a0)
115
1162: jr ra /* done */
117 move a2, zero
118 END(memset)
119
120first_fixup:
121 jr ra
122 nop
123
124fwd_fixup:
125 PTR_L t0, TI_TASK($28)
126 LONG_L t0, THREAD_BUADDR(t0)
127 andi a2, 0x3f
128 LONG_ADDU a2, t1
129 jr ra
130 LONG_SUBU a2, t0
131
132partial_fixup:
133 PTR_L t0, TI_TASK($28)
134 LONG_L t0, THREAD_BUADDR(t0)
135 andi a2, LONGMASK
136 LONG_ADDU a2, t1
137 jr ra
138 LONG_SUBU a2, t0
139
140last_fixup:
141 jr ra
142 andi v1, a2, LONGMASK
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index 989c900b8b14..5ad501b30b43 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -2,7 +2,7 @@
2# Makefile for MIPS-specific library files.. 2# Makefile for MIPS-specific library files..
3# 3#
4 4
5lib-y += csum_partial.o memcpy.o promlib.o \ 5lib-y += csum_partial.o memcpy.o memset.o promlib.o \
6 strlen_user.o strncpy_user.o strnlen_user.o uncached.o 6 strlen_user.o strncpy_user.o strnlen_user.o uncached.o
7 7
8obj-y += iomap.o 8obj-y += iomap.o
diff --git a/arch/mips/lib-32/memset.S b/arch/mips/lib/memset.S
index 1981485bd48b..3f8b8b3d0b23 100644
--- a/arch/mips/lib-32/memset.S
+++ b/arch/mips/lib/memset.S
@@ -10,6 +10,14 @@
10#include <asm/asm-offsets.h> 10#include <asm/asm-offsets.h>
11#include <asm/regdef.h> 11#include <asm/regdef.h>
12 12
13#if LONGSIZE == 4
14#define LONG_S_L swl
15#define LONG_S_R swr
16#else
17#define LONG_S_L sdl
18#define LONG_S_R sdr
19#endif
20
13#define EX(insn,reg,addr,handler) \ 21#define EX(insn,reg,addr,handler) \
149: insn reg, addr; \ 229: insn reg, addr; \
15 .section __ex_table,"a"; \ 23 .section __ex_table,"a"; \
@@ -25,6 +33,7 @@
25 EX(LONG_S, \val, (\offset + 5 * LONGSIZE)(\dst), \fixup) 33 EX(LONG_S, \val, (\offset + 5 * LONGSIZE)(\dst), \fixup)
26 EX(LONG_S, \val, (\offset + 6 * LONGSIZE)(\dst), \fixup) 34 EX(LONG_S, \val, (\offset + 6 * LONGSIZE)(\dst), \fixup)
27 EX(LONG_S, \val, (\offset + 7 * LONGSIZE)(\dst), \fixup) 35 EX(LONG_S, \val, (\offset + 7 * LONGSIZE)(\dst), \fixup)
36#if LONGSIZE == 4
28 EX(LONG_S, \val, (\offset + 8 * LONGSIZE)(\dst), \fixup) 37 EX(LONG_S, \val, (\offset + 8 * LONGSIZE)(\dst), \fixup)
29 EX(LONG_S, \val, (\offset + 9 * LONGSIZE)(\dst), \fixup) 38 EX(LONG_S, \val, (\offset + 9 * LONGSIZE)(\dst), \fixup)
30 EX(LONG_S, \val, (\offset + 10 * LONGSIZE)(\dst), \fixup) 39 EX(LONG_S, \val, (\offset + 10 * LONGSIZE)(\dst), \fixup)
@@ -33,6 +42,7 @@
33 EX(LONG_S, \val, (\offset + 13 * LONGSIZE)(\dst), \fixup) 42 EX(LONG_S, \val, (\offset + 13 * LONGSIZE)(\dst), \fixup)
34 EX(LONG_S, \val, (\offset + 14 * LONGSIZE)(\dst), \fixup) 43 EX(LONG_S, \val, (\offset + 14 * LONGSIZE)(\dst), \fixup)
35 EX(LONG_S, \val, (\offset + 15 * LONGSIZE)(\dst), \fixup) 44 EX(LONG_S, \val, (\offset + 15 * LONGSIZE)(\dst), \fixup)
45#endif
36 .endm 46 .endm
37 47
38/* 48/*
@@ -49,9 +59,13 @@ LEAF(memset)
49 move v0, a0 /* result */ 59 move v0, a0 /* result */
50 60
51 andi a1, 0xff /* spread fillword */ 61 andi a1, 0xff /* spread fillword */
52 sll t1, a1, 8 62 LONG_SLL t1, a1, 8
53 or a1, t1 63 or a1, t1
54 sll t1, a1, 16 64 LONG_SLL t1, a1, 16
65#if LONGSIZE == 8
66 or a1, t1
67 LONG_SLL t1, a1, 32
68#endif
55 or a1, t1 69 or a1, t1
561: 701:
57 71
@@ -64,10 +78,10 @@ FEXPORT(__bzero)
64 PTR_SUBU t0, LONGSIZE /* alignment in bytes */ 78 PTR_SUBU t0, LONGSIZE /* alignment in bytes */
65 79
66#ifdef __MIPSEB__ 80#ifdef __MIPSEB__
67 EX(swl, a1, (a0), first_fixup) /* make word aligned */ 81 EX(LONG_S_L, a1, (a0), first_fixup) /* make word/dword aligned */
68#endif 82#endif
69#ifdef __MIPSEL__ 83#ifdef __MIPSEL__
70 EX(swr, a1, (a0), first_fixup) /* make word aligned */ 84 EX(LONG_S_R, a1, (a0), first_fixup) /* make word/dword aligned */
71#endif 85#endif
72 PTR_SUBU a0, t0 /* long align ptr */ 86 PTR_SUBU a0, t0 /* long align ptr */
73 PTR_ADDU a2, t0 /* correct size */ 87 PTR_ADDU a2, t0 /* correct size */
@@ -75,7 +89,7 @@ FEXPORT(__bzero)
751: ori t1, a2, 0x3f /* # of full blocks */ 891: ori t1, a2, 0x3f /* # of full blocks */
76 xori t1, 0x3f 90 xori t1, 0x3f
77 beqz t1, memset_partial /* no block to fill */ 91 beqz t1, memset_partial /* no block to fill */
78 andi t0, a2, 0x3c 92 andi t0, a2, 0x40-LONGSIZE
79 93
80 PTR_ADDU t1, a0 /* end address */ 94 PTR_ADDU t1, a0 /* end address */
81 .set reorder 95 .set reorder
@@ -86,7 +100,14 @@ FEXPORT(__bzero)
86 100
87memset_partial: 101memset_partial:
88 PTR_LA t1, 2f /* where to start */ 102 PTR_LA t1, 2f /* where to start */
103#if LONGSIZE == 4
89 PTR_SUBU t1, t0 104 PTR_SUBU t1, t0
105#else
106 .set noat
107 LONG_SRL AT, t0, 1
108 PTR_SUBU t1, AT
109 .set noat
110#endif
90 jr t1 111 jr t1
91 PTR_ADDU a0, t0 /* dest ptr */ 112 PTR_ADDU a0, t0 /* dest ptr */
92 113
@@ -100,10 +121,10 @@ memset_partial:
100 beqz a2, 1f 121 beqz a2, 1f
101 PTR_ADDU a0, a2 /* What's left */ 122 PTR_ADDU a0, a2 /* What's left */
102#ifdef __MIPSEB__ 123#ifdef __MIPSEB__
103 EX(swr, a1, -1(a0), last_fixup) 124 EX(LONG_S_R, a1, -1(a0), last_fixup)
104#endif 125#endif
105#ifdef __MIPSEL__ 126#ifdef __MIPSEL__
106 EX(swl, a1, -1(a0), last_fixup) 127 EX(LONG_S_L, a1, -1(a0), last_fixup)
107#endif 128#endif
1081: jr ra 1291: jr ra
109 move a2, zero 130 move a2, zero
diff --git a/arch/mips/lib/uncached.c b/arch/mips/lib/uncached.c
index 98ce89f8068b..2388f7f3ffde 100644
--- a/arch/mips/lib/uncached.c
+++ b/arch/mips/lib/uncached.c
@@ -44,20 +44,24 @@ unsigned long __init run_uncached(void *func)
44 44
45 if (sp >= (long)CKSEG0 && sp < (long)CKSEG2) 45 if (sp >= (long)CKSEG0 && sp < (long)CKSEG2)
46 usp = CKSEG1ADDR(sp); 46 usp = CKSEG1ADDR(sp);
47#ifdef CONFIG_64BIT
47 else if ((long long)sp >= (long long)PHYS_TO_XKPHYS(0LL, 0) && 48 else if ((long long)sp >= (long long)PHYS_TO_XKPHYS(0LL, 0) &&
48 (long long)sp < (long long)PHYS_TO_XKPHYS(8LL, 0)) 49 (long long)sp < (long long)PHYS_TO_XKPHYS(8LL, 0))
49 usp = PHYS_TO_XKPHYS((long long)K_CALG_UNCACHED, 50 usp = PHYS_TO_XKPHYS((long long)K_CALG_UNCACHED,
50 XKPHYS_TO_PHYS((long long)sp)); 51 XKPHYS_TO_PHYS((long long)sp));
52#endif
51 else { 53 else {
52 BUG(); 54 BUG();
53 usp = sp; 55 usp = sp;
54 } 56 }
55 if (lfunc >= (long)CKSEG0 && lfunc < (long)CKSEG2) 57 if (lfunc >= (long)CKSEG0 && lfunc < (long)CKSEG2)
56 ufunc = CKSEG1ADDR(lfunc); 58 ufunc = CKSEG1ADDR(lfunc);
59#ifdef CONFIG_64BIT
57 else if ((long long)lfunc >= (long long)PHYS_TO_XKPHYS(0LL, 0) && 60 else if ((long long)lfunc >= (long long)PHYS_TO_XKPHYS(0LL, 0) &&
58 (long long)lfunc < (long long)PHYS_TO_XKPHYS(8LL, 0)) 61 (long long)lfunc < (long long)PHYS_TO_XKPHYS(8LL, 0))
59 ufunc = PHYS_TO_XKPHYS((long long)K_CALG_UNCACHED, 62 ufunc = PHYS_TO_XKPHYS((long long)K_CALG_UNCACHED,
60 XKPHYS_TO_PHYS((long long)lfunc)); 63 XKPHYS_TO_PHYS((long long)lfunc));
64#endif
61 else { 65 else {
62 BUG(); 66 BUG();
63 ufunc = lfunc; 67 ufunc = lfunc;
diff --git a/arch/mips/mips-boards/atlas/atlas_int.c b/arch/mips/mips-boards/atlas/atlas_int.c
index 43dba6ce6603..dfa0acbd7fc2 100644
--- a/arch/mips/mips-boards/atlas/atlas_int.c
+++ b/arch/mips/mips-boards/atlas/atlas_int.c
@@ -32,6 +32,7 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/interrupt.h> 33#include <linux/interrupt.h>
34#include <linux/kernel_stat.h> 34#include <linux/kernel_stat.h>
35#include <linux/kernel.h>
35 36
36#include <asm/gdb-stub.h> 37#include <asm/gdb-stub.h>
37#include <asm/io.h> 38#include <asm/io.h>
@@ -69,7 +70,7 @@ static void end_atlas_irq(unsigned int irq)
69} 70}
70 71
71static struct irq_chip atlas_irq_type = { 72static struct irq_chip atlas_irq_type = {
72 .typename = "Atlas", 73 .name = "Atlas",
73 .ack = disable_atlas_irq, 74 .ack = disable_atlas_irq,
74 .mask = disable_atlas_irq, 75 .mask = disable_atlas_irq,
75 .mask_ack = disable_atlas_irq, 76 .mask_ack = disable_atlas_irq,
@@ -220,7 +221,7 @@ msc_irqmap_t __initdata msc_irqmap[] = {
220 {MSC01C_INT_TMR, MSC01_IRQ_EDGE, 0}, 221 {MSC01C_INT_TMR, MSC01_IRQ_EDGE, 0},
221 {MSC01C_INT_PCI, MSC01_IRQ_LEVEL, 0}, 222 {MSC01C_INT_PCI, MSC01_IRQ_LEVEL, 0},
222}; 223};
223int __initdata msc_nr_irqs = sizeof(msc_irqmap) / sizeof(*msc_irqmap); 224int __initdata msc_nr_irqs = ARRAY_SIZE(msc_irqmap);
224 225
225msc_irqmap_t __initdata msc_eicirqmap[] = { 226msc_irqmap_t __initdata msc_eicirqmap[] = {
226 {MSC01E_INT_SW0, MSC01_IRQ_LEVEL, 0}, 227 {MSC01E_INT_SW0, MSC01_IRQ_LEVEL, 0},
@@ -231,14 +232,14 @@ msc_irqmap_t __initdata msc_eicirqmap[] = {
231 {MSC01E_INT_PERFCTR, MSC01_IRQ_LEVEL, 0}, 232 {MSC01E_INT_PERFCTR, MSC01_IRQ_LEVEL, 0},
232 {MSC01E_INT_CPUCTR, MSC01_IRQ_LEVEL, 0} 233 {MSC01E_INT_CPUCTR, MSC01_IRQ_LEVEL, 0}
233}; 234};
234int __initdata msc_nr_eicirqs = sizeof(msc_eicirqmap) / sizeof(*msc_eicirqmap); 235int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap);
235 236
236void __init arch_init_irq(void) 237void __init arch_init_irq(void)
237{ 238{
238 init_atlas_irqs(ATLAS_INT_BASE); 239 init_atlas_irqs(ATLAS_INT_BASE);
239 240
240 if (!cpu_has_veic) 241 if (!cpu_has_veic)
241 mips_cpu_irq_init(MIPSCPU_INT_BASE); 242 mips_cpu_irq_init();
242 243
243 switch(mips_revision_corid) { 244 switch(mips_revision_corid) {
244 case MIPS_REVISION_CORID_CORE_MSC: 245 case MIPS_REVISION_CORID_CORE_MSC:
diff --git a/arch/mips/mips-boards/generic/memory.c b/arch/mips/mips-boards/generic/memory.c
index eeed944e0f83..ebf0e16c5a0d 100644
--- a/arch/mips/mips-boards/generic/memory.c
+++ b/arch/mips/mips-boards/generic/memory.c
@@ -166,9 +166,8 @@ void __init prom_meminit(void)
166 } 166 }
167} 167}
168 168
169unsigned long __init prom_free_prom_memory(void) 169void __init prom_free_prom_memory(void)
170{ 170{
171 unsigned long freed = 0;
172 unsigned long addr; 171 unsigned long addr;
173 int i; 172 int i;
174 173
@@ -176,17 +175,8 @@ unsigned long __init prom_free_prom_memory(void)
176 if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA) 175 if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA)
177 continue; 176 continue;
178 177
179 addr = PAGE_ALIGN(boot_mem_map.map[i].addr); 178 addr = boot_mem_map.map[i].addr;
180 while (addr < boot_mem_map.map[i].addr 179 free_init_pages("prom memory",
181 + boot_mem_map.map[i].size) { 180 addr, addr + boot_mem_map.map[i].size);
182 ClearPageReserved(virt_to_page(__va(addr)));
183 init_page_count(virt_to_page(__va(addr)));
184 free_page((unsigned long)__va(addr));
185 addr += PAGE_SIZE;
186 freed += PAGE_SIZE;
187 }
188 } 181 }
189 printk("Freeing prom memory: %ldkb freed\n", freed >> 10);
190
191 return freed;
192} 182}
diff --git a/arch/mips/mips-boards/malta/malta_int.c b/arch/mips/mips-boards/malta/malta_int.c
index 90ad5bf3e2f1..3c206bb17160 100644
--- a/arch/mips/mips-boards/malta/malta_int.c
+++ b/arch/mips/mips-boards/malta/malta_int.c
@@ -27,6 +27,7 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/interrupt.h> 28#include <linux/interrupt.h>
29#include <linux/kernel_stat.h> 29#include <linux/kernel_stat.h>
30#include <linux/kernel.h>
30#include <linux/random.h> 31#include <linux/random.h>
31 32
32#include <asm/i8259.h> 33#include <asm/i8259.h>
@@ -289,7 +290,7 @@ msc_irqmap_t __initdata msc_irqmap[] = {
289 {MSC01C_INT_TMR, MSC01_IRQ_EDGE, 0}, 290 {MSC01C_INT_TMR, MSC01_IRQ_EDGE, 0},
290 {MSC01C_INT_PCI, MSC01_IRQ_LEVEL, 0}, 291 {MSC01C_INT_PCI, MSC01_IRQ_LEVEL, 0},
291}; 292};
292int __initdata msc_nr_irqs = sizeof(msc_irqmap)/sizeof(msc_irqmap_t); 293int __initdata msc_nr_irqs = ARRAY_SIZE(msc_irqmap);
293 294
294msc_irqmap_t __initdata msc_eicirqmap[] = { 295msc_irqmap_t __initdata msc_eicirqmap[] = {
295 {MSC01E_INT_SW0, MSC01_IRQ_LEVEL, 0}, 296 {MSC01E_INT_SW0, MSC01_IRQ_LEVEL, 0},
@@ -303,14 +304,14 @@ msc_irqmap_t __initdata msc_eicirqmap[] = {
303 {MSC01E_INT_PERFCTR, MSC01_IRQ_LEVEL, 0}, 304 {MSC01E_INT_PERFCTR, MSC01_IRQ_LEVEL, 0},
304 {MSC01E_INT_CPUCTR, MSC01_IRQ_LEVEL, 0} 305 {MSC01E_INT_CPUCTR, MSC01_IRQ_LEVEL, 0}
305}; 306};
306int __initdata msc_nr_eicirqs = sizeof(msc_eicirqmap)/sizeof(msc_irqmap_t); 307int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap);
307 308
308void __init arch_init_irq(void) 309void __init arch_init_irq(void)
309{ 310{
310 init_i8259_irqs(); 311 init_i8259_irqs();
311 312
312 if (!cpu_has_veic) 313 if (!cpu_has_veic)
313 mips_cpu_irq_init (MIPSCPU_INT_BASE); 314 mips_cpu_irq_init();
314 315
315 switch(mips_revision_corid) { 316 switch(mips_revision_corid) {
316 case MIPS_REVISION_CORID_CORE_MSC: 317 case MIPS_REVISION_CORID_CORE_MSC:
diff --git a/arch/mips/mips-boards/sead/sead_int.c b/arch/mips/mips-boards/sead/sead_int.c
index 874ccb0066b8..c4b9de3a7f27 100644
--- a/arch/mips/mips-boards/sead/sead_int.c
+++ b/arch/mips/mips-boards/sead/sead_int.c
@@ -113,5 +113,5 @@ asmlinkage void plat_irq_dispatch(void)
113 113
114void __init arch_init_irq(void) 114void __init arch_init_irq(void)
115{ 115{
116 mips_cpu_irq_init(MIPSCPU_INT_BASE); 116 mips_cpu_irq_init();
117} 117}
diff --git a/arch/mips/mips-boards/sim/sim_int.c b/arch/mips/mips-boards/sim/sim_int.c
index 2ce449dce6f2..15ac0655c1ff 100644
--- a/arch/mips/mips-boards/sim/sim_int.c
+++ b/arch/mips/mips-boards/sim/sim_int.c
@@ -21,9 +21,7 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/kernel_stat.h> 22#include <linux/kernel_stat.h>
23#include <asm/mips-boards/simint.h> 23#include <asm/mips-boards/simint.h>
24 24#include <asm/irq_cpu.h>
25
26extern void mips_cpu_irq_init(int);
27 25
28static inline int clz(unsigned long x) 26static inline int clz(unsigned long x)
29{ 27{
@@ -86,5 +84,5 @@ asmlinkage void plat_irq_dispatch(void)
86 84
87void __init arch_init_irq(void) 85void __init arch_init_irq(void)
88{ 86{
89 mips_cpu_irq_init(MIPSCPU_INT_BASE); 87 mips_cpu_irq_init();
90} 88}
diff --git a/arch/mips/mips-boards/sim/sim_mem.c b/arch/mips/mips-boards/sim/sim_mem.c
index f7ce76983328..46bc16f8b15d 100644
--- a/arch/mips/mips-boards/sim/sim_mem.c
+++ b/arch/mips/mips-boards/sim/sim_mem.c
@@ -99,10 +99,9 @@ void __init prom_meminit(void)
99 } 99 }
100} 100}
101 101
102unsigned long __init prom_free_prom_memory(void) 102void __init prom_free_prom_memory(void)
103{ 103{
104 int i; 104 int i;
105 unsigned long freed = 0;
106 unsigned long addr; 105 unsigned long addr;
107 106
108 for (i = 0; i < boot_mem_map.nr_map; i++) { 107 for (i = 0; i < boot_mem_map.nr_map; i++) {
@@ -110,16 +109,7 @@ unsigned long __init prom_free_prom_memory(void)
110 continue; 109 continue;
111 110
112 addr = boot_mem_map.map[i].addr; 111 addr = boot_mem_map.map[i].addr;
113 while (addr < boot_mem_map.map[i].addr 112 free_init_pages("prom memory",
114 + boot_mem_map.map[i].size) { 113 addr, addr + boot_mem_map.map[i].size);
115 ClearPageReserved(virt_to_page(__va(addr)));
116 init_page_count(virt_to_page(__va(addr)));
117 free_page((unsigned long)__va(addr));
118 addr += PAGE_SIZE;
119 freed += PAGE_SIZE;
120 }
121 } 114 }
122 printk("Freeing prom memory: %ldkb freed\n", freed >> 10);
123
124 return freed;
125} 115}
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 49065c133ebf..125a4a85ec05 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -341,7 +341,6 @@ static int __init page_is_ram(unsigned long pagenr)
341void __init paging_init(void) 341void __init paging_init(void)
342{ 342{
343 unsigned long zones_size[MAX_NR_ZONES] = { 0, }; 343 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
344 unsigned long max_dma, low;
345#ifndef CONFIG_FLATMEM 344#ifndef CONFIG_FLATMEM
346 unsigned long zholes_size[MAX_NR_ZONES] = { 0, }; 345 unsigned long zholes_size[MAX_NR_ZONES] = { 0, };
347 unsigned long i, j, pfn; 346 unsigned long i, j, pfn;
@@ -354,19 +353,19 @@ void __init paging_init(void)
354#endif 353#endif
355 kmap_coherent_init(); 354 kmap_coherent_init();
356 355
357 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
358 low = max_low_pfn;
359
360#ifdef CONFIG_ISA 356#ifdef CONFIG_ISA
361 if (low < max_dma) 357 if (max_low_pfn >= MAX_DMA_PFN)
362 zones_size[ZONE_DMA] = low; 358 if (min_low_pfn >= MAX_DMA_PFN) {
363 else { 359 zones_size[ZONE_DMA] = 0;
364 zones_size[ZONE_DMA] = max_dma; 360 zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
365 zones_size[ZONE_NORMAL] = low - max_dma; 361 } else {
366 } 362 zones_size[ZONE_DMA] = MAX_DMA_PFN - min_low_pfn;
367#else 363 zones_size[ZONE_NORMAL] = max_low_pfn - MAX_DMA_PFN;
368 zones_size[ZONE_DMA] = low; 364 }
365 else
369#endif 366#endif
367 zones_size[ZONE_DMA] = max_low_pfn - min_low_pfn;
368
370#ifdef CONFIG_HIGHMEM 369#ifdef CONFIG_HIGHMEM
371 zones_size[ZONE_HIGHMEM] = highend_pfn - highstart_pfn; 370 zones_size[ZONE_HIGHMEM] = highend_pfn - highstart_pfn;
372 371
@@ -467,7 +466,7 @@ void __init mem_init(void)
467} 466}
468#endif /* !CONFIG_NEED_MULTIPLE_NODES */ 467#endif /* !CONFIG_NEED_MULTIPLE_NODES */
469 468
470static void free_init_pages(char *what, unsigned long begin, unsigned long end) 469void free_init_pages(const char *what, unsigned long begin, unsigned long end)
471{ 470{
472 unsigned long pfn; 471 unsigned long pfn;
473 472
@@ -493,18 +492,25 @@ void free_initrd_mem(unsigned long start, unsigned long end)
493} 492}
494#endif 493#endif
495 494
496extern unsigned long prom_free_prom_memory(void);
497
498void free_initmem(void) 495void free_initmem(void)
499{ 496{
500 unsigned long freed; 497 prom_free_prom_memory();
501
502 freed = prom_free_prom_memory();
503 if (freed)
504 printk(KERN_INFO "Freeing firmware memory: %ldkb freed\n",
505 freed >> 10);
506
507 free_init_pages("unused kernel memory", 498 free_init_pages("unused kernel memory",
508 __pa_symbol(&__init_begin), 499 __pa_symbol(&__init_begin),
509 __pa_symbol(&__init_end)); 500 __pa_symbol(&__init_end));
510} 501}
502
503unsigned long pgd_current[NR_CPUS];
504/*
505 * On 64-bit we've got three-level pagetables with a slightly
506 * different layout ...
507 */
508#define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
509pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
510#ifdef CONFIG_64BIT
511#ifdef MODULE_START
512pgd_t module_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
513#endif
514pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
515#endif
516pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
diff --git a/arch/mips/momentum/jaguar_atx/Makefile b/arch/mips/momentum/jaguar_atx/Makefile
index 67372f3f9654..2e8cebd49bc0 100644
--- a/arch/mips/momentum/jaguar_atx/Makefile
+++ b/arch/mips/momentum/jaguar_atx/Makefile
@@ -6,7 +6,7 @@
6# unless it's something special (ie not a .c file). 6# unless it's something special (ie not a .c file).
7# 7#
8 8
9obj-y += irq.o prom.o reset.o setup.o 9obj-y += irq.o platform.o prom.o reset.o setup.o
10 10
11obj-$(CONFIG_SERIAL_8250_CONSOLE) += ja-console.o 11obj-$(CONFIG_SERIAL_8250_CONSOLE) += ja-console.o
12obj-$(CONFIG_REMOTE_DEBUG) += dbg_io.o 12obj-$(CONFIG_REMOTE_DEBUG) += dbg_io.o
diff --git a/arch/mips/momentum/jaguar_atx/irq.c b/arch/mips/momentum/jaguar_atx/irq.c
index 2efb25aa1aed..f2b432585df2 100644
--- a/arch/mips/momentum/jaguar_atx/irq.c
+++ b/arch/mips/momentum/jaguar_atx/irq.c
@@ -82,8 +82,8 @@ void __init arch_init_irq(void)
82 */ 82 */
83 clear_c0_status(ST0_IM); 83 clear_c0_status(ST0_IM);
84 84
85 mips_cpu_irq_init(0); 85 mips_cpu_irq_init();
86 rm7k_cpu_irq_init(8); 86 rm7k_cpu_irq_init();
87 87
88 /* set up the cascading interrupts */ 88 /* set up the cascading interrupts */
89 setup_irq(8, &cascade_mv64340); 89 setup_irq(8, &cascade_mv64340);
diff --git a/arch/mips/momentum/jaguar_atx/jaguar_atx_fpga.h b/arch/mips/momentum/jaguar_atx/jaguar_atx_fpga.h
index 6978654c712b..022f6974b76e 100644
--- a/arch/mips/momentum/jaguar_atx/jaguar_atx_fpga.h
+++ b/arch/mips/momentum/jaguar_atx/jaguar_atx_fpga.h
@@ -46,7 +46,9 @@
46 46
47extern unsigned long ja_fpga_base; 47extern unsigned long ja_fpga_base;
48 48
49#define JAGUAR_FPGA_WRITE(x,y) writeb(x, ja_fpga_base + JAGUAR_ATX_REG_##y) 49#define __FPGA_REG_TO_ADDR(reg) \
50#define JAGUAR_FPGA_READ(x) readb(ja_fpga_base + JAGUAR_ATX_REG_##x) 50 ((void *) ja_fpga_base + JAGUAR_ATX_REG_##reg)
51#define JAGUAR_FPGA_WRITE(x, reg) writeb(x, __FPGA_REG_TO_ADDR(reg))
52#define JAGUAR_FPGA_READ(reg) readb(__FPGA_REG_TO_ADDR(reg))
51 53
52#endif 54#endif
diff --git a/arch/mips/momentum/jaguar_atx/platform.c b/arch/mips/momentum/jaguar_atx/platform.c
new file mode 100644
index 000000000000..035ea5137c71
--- /dev/null
+++ b/arch/mips/momentum/jaguar_atx/platform.c
@@ -0,0 +1,235 @@
1#include <linux/delay.h>
2#include <linux/if_ether.h>
3#include <linux/ioport.h>
4#include <linux/mv643xx.h>
5#include <linux/platform_device.h>
6
7#include "jaguar_atx_fpga.h"
8
9#if defined(CONFIG_MV643XX_ETH) || defined(CONFIG_MV643XX_ETH_MODULE)
10
11static struct resource mv643xx_eth_shared_resources[] = {
12 [0] = {
13 .name = "ethernet shared base",
14 .start = 0xf1000000 + MV643XX_ETH_SHARED_REGS,
15 .end = 0xf1000000 + MV643XX_ETH_SHARED_REGS +
16 MV643XX_ETH_SHARED_REGS_SIZE - 1,
17 .flags = IORESOURCE_MEM,
18 },
19};
20
21static struct platform_device mv643xx_eth_shared_device = {
22 .name = MV643XX_ETH_SHARED_NAME,
23 .id = 0,
24 .num_resources = ARRAY_SIZE(mv643xx_eth_shared_resources),
25 .resource = mv643xx_eth_shared_resources,
26};
27
28#define MV_SRAM_BASE 0xfe000000UL
29#define MV_SRAM_SIZE (256 * 1024)
30
31#define MV_SRAM_RXRING_SIZE (MV_SRAM_SIZE / 4)
32#define MV_SRAM_TXRING_SIZE (MV_SRAM_SIZE / 4)
33
34#define MV_SRAM_BASE_ETH0 MV_SRAM_BASE
35#define MV_SRAM_BASE_ETH1 (MV_SRAM_BASE + (MV_SRAM_SIZE / 2))
36
37#define MV64x60_IRQ_ETH_0 48
38#define MV64x60_IRQ_ETH_1 49
39#define MV64x60_IRQ_ETH_2 50
40
41#ifdef CONFIG_MV643XX_ETH_0
42
43static struct resource mv64x60_eth0_resources[] = {
44 [0] = {
45 .name = "eth0 irq",
46 .start = MV64x60_IRQ_ETH_0,
47 .end = MV64x60_IRQ_ETH_0,
48 .flags = IORESOURCE_IRQ,
49 },
50};
51
52static char eth0_mac_addr[ETH_ALEN];
53
54static struct mv643xx_eth_platform_data eth0_pd = {
55 .mac_addr = eth0_mac_addr,
56
57 .tx_sram_addr = MV_SRAM_BASE_ETH0,
58 .tx_sram_size = MV_SRAM_TXRING_SIZE,
59 .tx_queue_size = MV_SRAM_TXRING_SIZE / 16,
60
61 .rx_sram_addr = MV_SRAM_BASE_ETH0 + MV_SRAM_TXRING_SIZE,
62 .rx_sram_size = MV_SRAM_RXRING_SIZE,
63 .rx_queue_size = MV_SRAM_RXRING_SIZE / 16,
64};
65
66static struct platform_device eth0_device = {
67 .name = MV643XX_ETH_NAME,
68 .id = 0,
69 .num_resources = ARRAY_SIZE(mv64x60_eth0_resources),
70 .resource = mv64x60_eth0_resources,
71 .dev = {
72 .platform_data = &eth0_pd,
73 },
74};
75#endif /* CONFIG_MV643XX_ETH_0 */
76
77#ifdef CONFIG_MV643XX_ETH_1
78
79static struct resource mv64x60_eth1_resources[] = {
80 [0] = {
81 .name = "eth1 irq",
82 .start = MV64x60_IRQ_ETH_1,
83 .end = MV64x60_IRQ_ETH_1,
84 .flags = IORESOURCE_IRQ,
85 },
86};
87
88static char eth1_mac_addr[ETH_ALEN];
89
90static struct mv643xx_eth_platform_data eth1_pd = {
91 .mac_addr = eth1_mac_addr,
92
93 .tx_sram_addr = MV_SRAM_BASE_ETH1,
94 .tx_sram_size = MV_SRAM_TXRING_SIZE,
95 .tx_queue_size = MV_SRAM_TXRING_SIZE / 16,
96
97 .rx_sram_addr = MV_SRAM_BASE_ETH1 + MV_SRAM_TXRING_SIZE,
98 .rx_sram_size = MV_SRAM_RXRING_SIZE,
99 .rx_queue_size = MV_SRAM_RXRING_SIZE / 16,
100};
101
102static struct platform_device eth1_device = {
103 .name = MV643XX_ETH_NAME,
104 .id = 1,
105 .num_resources = ARRAY_SIZE(mv64x60_eth1_resources),
106 .resource = mv64x60_eth1_resources,
107 .dev = {
108 .platform_data = &eth1_pd,
109 },
110};
111#endif /* CONFIG_MV643XX_ETH_1 */
112
113#ifdef CONFIG_MV643XX_ETH_2
114
115static struct resource mv64x60_eth2_resources[] = {
116 [0] = {
117 .name = "eth2 irq",
118 .start = MV64x60_IRQ_ETH_2,
119 .end = MV64x60_IRQ_ETH_2,
120 .flags = IORESOURCE_IRQ,
121 },
122};
123
124static char eth2_mac_addr[ETH_ALEN];
125
126static struct mv643xx_eth_platform_data eth2_pd = {
127 .mac_addr = eth2_mac_addr,
128};
129
130static struct platform_device eth2_device = {
131 .name = MV643XX_ETH_NAME,
132 .id = 1,
133 .num_resources = ARRAY_SIZE(mv64x60_eth2_resources),
134 .resource = mv64x60_eth2_resources,
135 .dev = {
136 .platform_data = &eth2_pd,
137 },
138};
139#endif /* CONFIG_MV643XX_ETH_2 */
140
141static struct platform_device *mv643xx_eth_pd_devs[] __initdata = {
142 &mv643xx_eth_shared_device,
143#ifdef CONFIG_MV643XX_ETH_0
144 &eth0_device,
145#endif
146#ifdef CONFIG_MV643XX_ETH_1
147 &eth1_device,
148#endif
149#ifdef CONFIG_MV643XX_ETH_2
150 &eth2_device,
151#endif
152};
153
154static u8 __init exchange_bit(u8 val, u8 cs)
155{
156 /* place the data */
157 JAGUAR_FPGA_WRITE((val << 2) | cs, EEPROM_MODE);
158 udelay(1);
159
160 /* turn the clock on */
161 JAGUAR_FPGA_WRITE((val << 2) | cs | 0x2, EEPROM_MODE);
162 udelay(1);
163
164 /* turn the clock off and read-strobe */
165 JAGUAR_FPGA_WRITE((val << 2) | cs | 0x10, EEPROM_MODE);
166
167 /* return the data */
168 return (JAGUAR_FPGA_READ(EEPROM_MODE) >> 3) & 0x1;
169}
170
171static void __init get_mac(char dest[6])
172{
173 u8 read_opcode[12] = {1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
174 int i,j;
175
176 for (i = 0; i < 12; i++)
177 exchange_bit(read_opcode[i], 1);
178
179 for (j = 0; j < 6; j++) {
180 dest[j] = 0;
181 for (i = 0; i < 8; i++) {
182 dest[j] <<= 1;
183 dest[j] |= exchange_bit(0, 1);
184 }
185 }
186
187 /* turn off CS */
188 exchange_bit(0,0);
189}
190
191/*
192 * Copy and increment ethernet MAC address by a small value.
193 *
194 * This is useful for systems where the only one MAC address is stored in
195 * non-volatile memory for multiple ports.
196 */
197static inline void eth_mac_add(unsigned char *dst, unsigned char *src,
198 unsigned int add)
199{
200 int i;
201
202 BUG_ON(add >= 256);
203
204 for (i = ETH_ALEN; i >= 0; i--) {
205 dst[i] = src[i] + add;
206 add = dst[i] < src[i]; /* compute carry */
207 }
208
209 WARN_ON(add);
210}
211
212static int __init mv643xx_eth_add_pds(void)
213{
214 unsigned char mac[ETH_ALEN];
215 int ret;
216
217 get_mac(mac);
218#ifdef CONFIG_MV643XX_ETH_0
219 eth_mac_add(eth1_mac_addr, mac, 0);
220#endif
221#ifdef CONFIG_MV643XX_ETH_1
222 eth_mac_add(eth1_mac_addr, mac, 1);
223#endif
224#ifdef CONFIG_MV643XX_ETH_2
225 eth_mac_add(eth2_mac_addr, mac, 2);
226#endif
227 ret = platform_add_devices(mv643xx_eth_pd_devs,
228 ARRAY_SIZE(mv643xx_eth_pd_devs));
229
230 return ret;
231}
232
233device_initcall(mv643xx_eth_add_pds);
234
235#endif /* defined(CONFIG_MV643XX_ETH) || defined(CONFIG_MV643XX_ETH_MODULE) */
diff --git a/arch/mips/momentum/jaguar_atx/prom.c b/arch/mips/momentum/jaguar_atx/prom.c
index 3d2712929293..5dd154ee58f6 100644
--- a/arch/mips/momentum/jaguar_atx/prom.c
+++ b/arch/mips/momentum/jaguar_atx/prom.c
@@ -39,56 +39,6 @@ const char *get_system_type(void)
39 return "Momentum Jaguar-ATX"; 39 return "Momentum Jaguar-ATX";
40} 40}
41 41
42#ifdef CONFIG_MV643XX_ETH
43extern unsigned char prom_mac_addr_base[6];
44
45static void burn_clocks(void)
46{
47 int i;
48
49 /* this loop should burn at least 1us -- this should be plenty */
50 for (i = 0; i < 0x10000; i++)
51 ;
52}
53
54static u8 exchange_bit(u8 val, u8 cs)
55{
56 /* place the data */
57 JAGUAR_FPGA_WRITE((val << 2) | cs, EEPROM_MODE);
58 burn_clocks();
59
60 /* turn the clock on */
61 JAGUAR_FPGA_WRITE((val << 2) | cs | 0x2, EEPROM_MODE);
62 burn_clocks();
63
64 /* turn the clock off and read-strobe */
65 JAGUAR_FPGA_WRITE((val << 2) | cs | 0x10, EEPROM_MODE);
66
67 /* return the data */
68 return ((JAGUAR_FPGA_READ(EEPROM_MODE) >> 3) & 0x1);
69}
70
71void get_mac(char dest[6])
72{
73 u8 read_opcode[12] = {1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
74 int i,j;
75
76 for (i = 0; i < 12; i++)
77 exchange_bit(read_opcode[i], 1);
78
79 for (j = 0; j < 6; j++) {
80 dest[j] = 0;
81 for (i = 0; i < 8; i++) {
82 dest[j] <<= 1;
83 dest[j] |= exchange_bit(0, 1);
84 }
85 }
86
87 /* turn off CS */
88 exchange_bit(0,0);
89}
90#endif
91
92#ifdef CONFIG_64BIT 42#ifdef CONFIG_64BIT
93 43
94unsigned long signext(unsigned long addr) 44unsigned long signext(unsigned long addr)
@@ -228,16 +178,10 @@ void __init prom_init(void)
228#endif /* CONFIG_64BIT */ 178#endif /* CONFIG_64BIT */
229 mips_machgroup = MACH_GROUP_MOMENCO; 179 mips_machgroup = MACH_GROUP_MOMENCO;
230 mips_machtype = MACH_MOMENCO_JAGUAR_ATX; 180 mips_machtype = MACH_MOMENCO_JAGUAR_ATX;
231
232#ifdef CONFIG_MV643XX_ETH
233 /* get the base MAC address for on-board ethernet ports */
234 get_mac(prom_mac_addr_base);
235#endif
236} 181}
237 182
238unsigned long __init prom_free_prom_memory(void) 183void __init prom_free_prom_memory(void)
239{ 184{
240 return 0;
241} 185}
242 186
243void __init prom_fixup_mem_map(unsigned long start, unsigned long end) 187void __init prom_fixup_mem_map(unsigned long start, unsigned long end)
diff --git a/arch/mips/momentum/ocelot_3/irq.c b/arch/mips/momentum/ocelot_3/irq.c
index cea0e5deb80e..3862d1d1add4 100644
--- a/arch/mips/momentum/ocelot_3/irq.c
+++ b/arch/mips/momentum/ocelot_3/irq.c
@@ -65,7 +65,7 @@ void __init arch_init_irq(void)
65 */ 65 */
66 clear_c0_status(ST0_IM | ST0_BEV); 66 clear_c0_status(ST0_IM | ST0_BEV);
67 67
68 rm7k_cpu_irq_init(8); 68 rm7k_cpu_irq_init();
69 69
70 /* set up the cascading interrupts */ 70 /* set up the cascading interrupts */
71 setup_irq(8, &cascade_mv64340); /* unmask intControl IM8, IRQ 9 */ 71 setup_irq(8, &cascade_mv64340); /* unmask intControl IM8, IRQ 9 */
diff --git a/arch/mips/momentum/ocelot_3/prom.c b/arch/mips/momentum/ocelot_3/prom.c
index 6ce9b7fdb824..8e02df63578a 100644
--- a/arch/mips/momentum/ocelot_3/prom.c
+++ b/arch/mips/momentum/ocelot_3/prom.c
@@ -180,9 +180,8 @@ void __init prom_init(void)
180#endif 180#endif
181} 181}
182 182
183unsigned long __init prom_free_prom_memory(void) 183void __init prom_free_prom_memory(void)
184{ 184{
185 return 0;
186} 185}
187 186
188void __init prom_fixup_mem_map(unsigned long start, unsigned long end) 187void __init prom_fixup_mem_map(unsigned long start, unsigned long end)
diff --git a/arch/mips/momentum/ocelot_c/cpci-irq.c b/arch/mips/momentum/ocelot_c/cpci-irq.c
index bb11fef08472..186a140fd2a9 100644
--- a/arch/mips/momentum/ocelot_c/cpci-irq.c
+++ b/arch/mips/momentum/ocelot_c/cpci-irq.c
@@ -84,7 +84,7 @@ void ll_cpci_irq(void)
84} 84}
85 85
86struct irq_chip cpci_irq_type = { 86struct irq_chip cpci_irq_type = {
87 .typename = "CPCI/FPGA", 87 .name = "CPCI/FPGA",
88 .ack = mask_cpci_irq, 88 .ack = mask_cpci_irq,
89 .mask = mask_cpci_irq, 89 .mask = mask_cpci_irq,
90 .mask_ack = mask_cpci_irq, 90 .mask_ack = mask_cpci_irq,
diff --git a/arch/mips/momentum/ocelot_c/dbg_io.c b/arch/mips/momentum/ocelot_c/dbg_io.c
index 2128684584f5..32d6fb4ee679 100644
--- a/arch/mips/momentum/ocelot_c/dbg_io.c
+++ b/arch/mips/momentum/ocelot_c/dbg_io.c
@@ -1,6 +1,4 @@
1 1
2#ifdef CONFIG_KGDB
3
4#include <asm/serial.h> /* For the serial port location and base baud */ 2#include <asm/serial.h> /* For the serial port location and base baud */
5 3
6/* --- CONFIG --- */ 4/* --- CONFIG --- */
@@ -121,5 +119,3 @@ int putDebugChar(uint8 byte)
121 UART16550_WRITE(OFS_SEND_BUFFER, byte); 119 UART16550_WRITE(OFS_SEND_BUFFER, byte);
122 return 1; 120 return 1;
123} 121}
124
125#endif
diff --git a/arch/mips/momentum/ocelot_c/irq.c b/arch/mips/momentum/ocelot_c/irq.c
index ea65223a6d2c..40472f7944d7 100644
--- a/arch/mips/momentum/ocelot_c/irq.c
+++ b/arch/mips/momentum/ocelot_c/irq.c
@@ -94,7 +94,7 @@ void __init arch_init_irq(void)
94 */ 94 */
95 clear_c0_status(ST0_IM); 95 clear_c0_status(ST0_IM);
96 96
97 mips_cpu_irq_init(0); 97 mips_cpu_irq_init();
98 98
99 /* set up the cascading interrupts */ 99 /* set up the cascading interrupts */
100 setup_irq(3, &cascade_fpga); 100 setup_irq(3, &cascade_fpga);
diff --git a/arch/mips/momentum/ocelot_c/prom.c b/arch/mips/momentum/ocelot_c/prom.c
index d0b77e101d74..b689ceea8cfb 100644
--- a/arch/mips/momentum/ocelot_c/prom.c
+++ b/arch/mips/momentum/ocelot_c/prom.c
@@ -178,7 +178,6 @@ void __init prom_init(void)
178#endif 178#endif
179} 179}
180 180
181unsigned long __init prom_free_prom_memory(void) 181void __init prom_free_prom_memory(void)
182{ 182{
183 return 0;
184} 183}
diff --git a/arch/mips/momentum/ocelot_c/uart-irq.c b/arch/mips/momentum/ocelot_c/uart-irq.c
index a7a80c0da569..de1a31ee52f3 100644
--- a/arch/mips/momentum/ocelot_c/uart-irq.c
+++ b/arch/mips/momentum/ocelot_c/uart-irq.c
@@ -77,7 +77,7 @@ void ll_uart_irq(void)
77} 77}
78 78
79struct irq_chip uart_irq_type = { 79struct irq_chip uart_irq_type = {
80 .typename = "UART/FPGA", 80 .name = "UART/FPGA",
81 .ack = mask_uart_irq, 81 .ack = mask_uart_irq,
82 .mask = mask_uart_irq, 82 .mask = mask_uart_irq,
83 .mask_ack = mask_uart_irq, 83 .mask_ack = mask_uart_irq,
diff --git a/arch/mips/momentum/ocelot_g/dbg_io.c b/arch/mips/momentum/ocelot_g/dbg_io.c
index 2128684584f5..32d6fb4ee679 100644
--- a/arch/mips/momentum/ocelot_g/dbg_io.c
+++ b/arch/mips/momentum/ocelot_g/dbg_io.c
@@ -1,6 +1,4 @@
1 1
2#ifdef CONFIG_KGDB
3
4#include <asm/serial.h> /* For the serial port location and base baud */ 2#include <asm/serial.h> /* For the serial port location and base baud */
5 3
6/* --- CONFIG --- */ 4/* --- CONFIG --- */
@@ -121,5 +119,3 @@ int putDebugChar(uint8 byte)
121 UART16550_WRITE(OFS_SEND_BUFFER, byte); 119 UART16550_WRITE(OFS_SEND_BUFFER, byte);
122 return 1; 120 return 1;
123} 121}
124
125#endif
diff --git a/arch/mips/momentum/ocelot_g/irq.c b/arch/mips/momentum/ocelot_g/irq.c
index da46524e87cb..273541fe7087 100644
--- a/arch/mips/momentum/ocelot_g/irq.c
+++ b/arch/mips/momentum/ocelot_g/irq.c
@@ -94,8 +94,8 @@ void __init arch_init_irq(void)
94 clear_c0_status(ST0_IM); 94 clear_c0_status(ST0_IM);
95 local_irq_disable(); 95 local_irq_disable();
96 96
97 mips_cpu_irq_init(0); 97 mips_cpu_irq_init();
98 rm7k_cpu_irq_init(8); 98 rm7k_cpu_irq_init();
99 99
100 gt64240_irq_init(); 100 gt64240_irq_init();
101} 101}
diff --git a/arch/mips/momentum/ocelot_g/prom.c b/arch/mips/momentum/ocelot_g/prom.c
index 2f75c6b91ec5..836d0830720d 100644
--- a/arch/mips/momentum/ocelot_g/prom.c
+++ b/arch/mips/momentum/ocelot_g/prom.c
@@ -79,7 +79,6 @@ void __init prom_init(void)
79 } 79 }
80} 80}
81 81
82unsigned long __init prom_free_prom_memory(void) 82void __init prom_free_prom_memory(void)
83{ 83{
84 return 0;
85} 84}
diff --git a/arch/mips/oprofile/Kconfig b/arch/mips/oprofile/Kconfig
index 55feaf798596..ca395ef06d4e 100644
--- a/arch/mips/oprofile/Kconfig
+++ b/arch/mips/oprofile/Kconfig
@@ -11,7 +11,7 @@ config PROFILING
11 11
12config OPROFILE 12config OPROFILE
13 tristate "OProfile system profiling (EXPERIMENTAL)" 13 tristate "OProfile system profiling (EXPERIMENTAL)"
14 depends on PROFILING && EXPERIMENTAL 14 depends on PROFILING && !!MIPS_MT_SMTC && EXPERIMENTAL
15 help 15 help
16 OProfile is a profiling system capable of profiling the 16 OProfile is a profiling system capable of profiling the
17 whole system, include the kernel, kernel modules, libraries, 17 whole system, include the kernel, kernel modules, libraries,
diff --git a/arch/mips/pci/fixup-vr4133.c b/arch/mips/pci/fixup-vr4133.c
index 597b89764ba1..a8d9d22b13df 100644
--- a/arch/mips/pci/fixup-vr4133.c
+++ b/arch/mips/pci/fixup-vr4133.c
@@ -17,8 +17,10 @@
17 */ 17 */
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/pci.h> 19#include <linux/pci.h>
20#include <linux/kernel.h>
20 21
21#include <asm/io.h> 22#include <asm/io.h>
23#include <asm/i8259.h>
22#include <asm/vr41xx/cmbvr4133.h> 24#include <asm/vr41xx/cmbvr4133.h>
23 25
24extern int vr4133_rockhopper; 26extern int vr4133_rockhopper;
@@ -142,7 +144,7 @@ int rockhopper_get_irq(struct pci_dev *dev, u8 pin, u8 slot)
142 if (bus == NULL) 144 if (bus == NULL)
143 return -1; 145 return -1;
144 146
145 for (i = 0; i < sizeof (int_map) / sizeof (int_map[0]); i++) { 147 for (i = 0; i < ARRAY_SIZE(int_map); i++) {
146 if (int_map[i].bus == bus->number && int_map[i].slot == slot) { 148 if (int_map[i].bus == bus->number && int_map[i].slot == slot) {
147 int line; 149 int line;
148 for (line = 0; line < 4; line++) 150 for (line = 0; line < 4; line++)
@@ -160,17 +162,7 @@ int rockhopper_get_irq(struct pci_dev *dev, u8 pin, u8 slot)
160#ifdef CONFIG_ROCKHOPPER 162#ifdef CONFIG_ROCKHOPPER
161void i8259_init(void) 163void i8259_init(void)
162{ 164{
163 outb(0x11, 0x20); /* Master ICW1 */ 165 init_i8259_irqs();
164 outb(I8259_IRQ_BASE, 0x21); /* Master ICW2 */
165 outb(0x04, 0x21); /* Master ICW3 */
166 outb(0x01, 0x21); /* Master ICW4 */
167 outb(0xff, 0x21); /* Master IMW */
168
169 outb(0x11, 0xa0); /* Slave ICW1 */
170 outb(I8259_IRQ_BASE + 8, 0xa1); /* Slave ICW2 */
171 outb(0x02, 0xa1); /* Slave ICW3 */
172 outb(0x01, 0xa1); /* Slave ICW4 */
173 outb(0xff, 0xa1); /* Slave IMW */
174 166
175 outb(0x00, 0x4d0); 167 outb(0x00, 0x4d0);
176 outb(0x02, 0x4d1); /* USB IRQ9 is level */ 168 outb(0x02, 0x4d1); /* USB IRQ9 is level */
diff --git a/arch/mips/philips/pnx8550/common/int.c b/arch/mips/philips/pnx8550/common/int.c
index 2c36c108c4d6..d48665ebd33c 100644
--- a/arch/mips/philips/pnx8550/common/int.c
+++ b/arch/mips/philips/pnx8550/common/int.c
@@ -159,7 +159,7 @@ int pnx8550_set_gic_priority(int irq, int priority)
159} 159}
160 160
161static struct irq_chip level_irq_type = { 161static struct irq_chip level_irq_type = {
162 .typename = "PNX Level IRQ", 162 .name = "PNX Level IRQ",
163 .ack = mask_irq, 163 .ack = mask_irq,
164 .mask = mask_irq, 164 .mask = mask_irq,
165 .mask_ack = mask_irq, 165 .mask_ack = mask_irq,
diff --git a/arch/mips/philips/pnx8550/common/prom.c b/arch/mips/philips/pnx8550/common/prom.c
index eb6ec11fef07..8aeed6c2b8c3 100644
--- a/arch/mips/philips/pnx8550/common/prom.c
+++ b/arch/mips/philips/pnx8550/common/prom.c
@@ -106,9 +106,8 @@ int get_ethernet_addr(char *ethernet_addr)
106 return 0; 106 return 0;
107} 107}
108 108
109unsigned long __init prom_free_prom_memory(void) 109void __init prom_free_prom_memory(void)
110{ 110{
111 return 0;
112} 111}
113 112
114extern int pnx8550_console_port; 113extern int pnx8550_console_port;
diff --git a/arch/mips/pmc-sierra/yosemite/dbg_io.c b/arch/mips/pmc-sierra/yosemite/dbg_io.c
index 0f659c9106ac..6362c702e389 100644
--- a/arch/mips/pmc-sierra/yosemite/dbg_io.c
+++ b/arch/mips/pmc-sierra/yosemite/dbg_io.c
@@ -93,7 +93,7 @@
93 * Functions to READ and WRITE to serial port 1 93 * Functions to READ and WRITE to serial port 1
94 */ 94 */
95#define SERIAL_READ_1(ofs) (*((volatile unsigned char*) \ 95#define SERIAL_READ_1(ofs) (*((volatile unsigned char*) \
96 (TITAN_SERIAL_BASE_1 + ofs) 96 (TITAN_SERIAL_BASE_1 + ofs)))
97 97
98#define SERIAL_WRITE_1(ofs, val) ((*((volatile unsigned char*) \ 98#define SERIAL_WRITE_1(ofs, val) ((*((volatile unsigned char*) \
99 (TITAN_SERIAL_BASE_1 + ofs))) = val) 99 (TITAN_SERIAL_BASE_1 + ofs))) = val)
diff --git a/arch/mips/pmc-sierra/yosemite/irq.c b/arch/mips/pmc-sierra/yosemite/irq.c
index adb048527e76..428d1f45a287 100644
--- a/arch/mips/pmc-sierra/yosemite/irq.c
+++ b/arch/mips/pmc-sierra/yosemite/irq.c
@@ -148,9 +148,9 @@ void __init arch_init_irq(void)
148{ 148{
149 clear_c0_status(ST0_IM); 149 clear_c0_status(ST0_IM);
150 150
151 mips_cpu_irq_init(0); 151 mips_cpu_irq_init();
152 rm7k_cpu_irq_init(8); 152 rm7k_cpu_irq_init();
153 rm9k_cpu_irq_init(12); 153 rm9k_cpu_irq_init();
154 154
155#ifdef CONFIG_KGDB 155#ifdef CONFIG_KGDB
156 /* At this point, initialize the second serial port */ 156 /* At this point, initialize the second serial port */
diff --git a/arch/mips/pmc-sierra/yosemite/prom.c b/arch/mips/pmc-sierra/yosemite/prom.c
index 9fe4973377c3..1e1685e415a4 100644
--- a/arch/mips/pmc-sierra/yosemite/prom.c
+++ b/arch/mips/pmc-sierra/yosemite/prom.c
@@ -132,9 +132,8 @@ void __init prom_init(void)
132 prom_grab_secondary(); 132 prom_grab_secondary();
133} 133}
134 134
135unsigned long __init prom_free_prom_memory(void) 135void __init prom_free_prom_memory(void)
136{ 136{
137 return 0;
138} 137}
139 138
140void __init prom_fixup_mem_map(unsigned long start, unsigned long end) 139void __init prom_fixup_mem_map(unsigned long start, unsigned long end)
diff --git a/arch/mips/pmc-sierra/yosemite/setup.c b/arch/mips/pmc-sierra/yosemite/setup.c
index 1b9b0d396d3e..6a6e15e40009 100644
--- a/arch/mips/pmc-sierra/yosemite/setup.c
+++ b/arch/mips/pmc-sierra/yosemite/setup.c
@@ -171,6 +171,7 @@ static void __init py_map_ocd(void)
171 171
172static void __init py_uart_setup(void) 172static void __init py_uart_setup(void)
173{ 173{
174#ifdef CONFIG_SERIAL_8250
174 struct uart_port up; 175 struct uart_port up;
175 176
176 /* 177 /*
@@ -188,6 +189,7 @@ static void __init py_uart_setup(void)
188 189
189 if (early_serial_setup(&up)) 190 if (early_serial_setup(&up))
190 printk(KERN_ERR "Early serial init of port 0 failed\n"); 191 printk(KERN_ERR "Early serial init of port 0 failed\n");
192#endif /* CONFIG_SERIAL_8250 */
191} 193}
192 194
193static void __init py_rtc_setup(void) 195static void __init py_rtc_setup(void)
diff --git a/arch/mips/qemu/q-mem.c b/arch/mips/qemu/q-mem.c
index d174fac43031..dae39b59de15 100644
--- a/arch/mips/qemu/q-mem.c
+++ b/arch/mips/qemu/q-mem.c
@@ -1,6 +1,5 @@
1#include <linux/init.h> 1#include <linux/init.h>
2 2
3unsigned long __init prom_free_prom_memory(void) 3void __init prom_free_prom_memory(void)
4{ 4{
5 return 0UL;
6} 5}
diff --git a/arch/mips/sgi-ip22/ip22-eisa.c b/arch/mips/sgi-ip22/ip22-eisa.c
index a1a9af6da7bf..6b6e97b90c6e 100644
--- a/arch/mips/sgi-ip22/ip22-eisa.c
+++ b/arch/mips/sgi-ip22/ip22-eisa.c
@@ -139,7 +139,7 @@ static void end_eisa1_irq(unsigned int irq)
139} 139}
140 140
141static struct irq_chip ip22_eisa1_irq_type = { 141static struct irq_chip ip22_eisa1_irq_type = {
142 .typename = "IP22 EISA", 142 .name = "IP22 EISA",
143 .startup = startup_eisa1_irq, 143 .startup = startup_eisa1_irq,
144 .ack = mask_and_ack_eisa1_irq, 144 .ack = mask_and_ack_eisa1_irq,
145 .mask = disable_eisa1_irq, 145 .mask = disable_eisa1_irq,
@@ -194,7 +194,7 @@ static void end_eisa2_irq(unsigned int irq)
194} 194}
195 195
196static struct irq_chip ip22_eisa2_irq_type = { 196static struct irq_chip ip22_eisa2_irq_type = {
197 .typename = "IP22 EISA", 197 .name = "IP22 EISA",
198 .startup = startup_eisa2_irq, 198 .startup = startup_eisa2_irq,
199 .ack = mask_and_ack_eisa2_irq, 199 .ack = mask_and_ack_eisa2_irq,
200 .mask = disable_eisa2_irq, 200 .mask = disable_eisa2_irq,
diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c
index c44f8be0644f..b454924aeb56 100644
--- a/arch/mips/sgi-ip22/ip22-int.c
+++ b/arch/mips/sgi-ip22/ip22-int.c
@@ -19,6 +19,7 @@
19 19
20#include <asm/mipsregs.h> 20#include <asm/mipsregs.h>
21#include <asm/addrspace.h> 21#include <asm/addrspace.h>
22#include <asm/irq_cpu.h>
22 23
23#include <asm/sgi/ioc.h> 24#include <asm/sgi/ioc.h>
24#include <asm/sgi/hpc3.h> 25#include <asm/sgi/hpc3.h>
@@ -52,7 +53,7 @@ static void disable_local0_irq(unsigned int irq)
52} 53}
53 54
54static struct irq_chip ip22_local0_irq_type = { 55static struct irq_chip ip22_local0_irq_type = {
55 .typename = "IP22 local 0", 56 .name = "IP22 local 0",
56 .ack = disable_local0_irq, 57 .ack = disable_local0_irq,
57 .mask = disable_local0_irq, 58 .mask = disable_local0_irq,
58 .mask_ack = disable_local0_irq, 59 .mask_ack = disable_local0_irq,
@@ -73,7 +74,7 @@ void disable_local1_irq(unsigned int irq)
73} 74}
74 75
75static struct irq_chip ip22_local1_irq_type = { 76static struct irq_chip ip22_local1_irq_type = {
76 .typename = "IP22 local 1", 77 .name = "IP22 local 1",
77 .ack = disable_local1_irq, 78 .ack = disable_local1_irq,
78 .mask = disable_local1_irq, 79 .mask = disable_local1_irq,
79 .mask_ack = disable_local1_irq, 80 .mask_ack = disable_local1_irq,
@@ -94,7 +95,7 @@ void disable_local2_irq(unsigned int irq)
94} 95}
95 96
96static struct irq_chip ip22_local2_irq_type = { 97static struct irq_chip ip22_local2_irq_type = {
97 .typename = "IP22 local 2", 98 .name = "IP22 local 2",
98 .ack = disable_local2_irq, 99 .ack = disable_local2_irq,
99 .mask = disable_local2_irq, 100 .mask = disable_local2_irq,
100 .mask_ack = disable_local2_irq, 101 .mask_ack = disable_local2_irq,
@@ -115,7 +116,7 @@ void disable_local3_irq(unsigned int irq)
115} 116}
116 117
117static struct irq_chip ip22_local3_irq_type = { 118static struct irq_chip ip22_local3_irq_type = {
118 .typename = "IP22 local 3", 119 .name = "IP22 local 3",
119 .ack = disable_local3_irq, 120 .ack = disable_local3_irq,
120 .mask = disable_local3_irq, 121 .mask = disable_local3_irq,
121 .mask_ack = disable_local3_irq, 122 .mask_ack = disable_local3_irq,
@@ -253,8 +254,6 @@ asmlinkage void plat_irq_dispatch(void)
253 indy_8254timer_irq(); 254 indy_8254timer_irq();
254} 255}
255 256
256extern void mips_cpu_irq_init(unsigned int irq_base);
257
258void __init arch_init_irq(void) 257void __init arch_init_irq(void)
259{ 258{
260 int i; 259 int i;
@@ -316,7 +315,7 @@ void __init arch_init_irq(void)
316 sgint->cmeimask1 = 0; 315 sgint->cmeimask1 = 0;
317 316
318 /* init CPU irqs */ 317 /* init CPU irqs */
319 mips_cpu_irq_init(SGINT_CPU); 318 mips_cpu_irq_init();
320 319
321 for (i = SGINT_LOCAL0; i < SGI_INTERRUPTS; i++) { 320 for (i = SGINT_LOCAL0; i < SGI_INTERRUPTS; i++) {
322 struct irq_chip *handler; 321 struct irq_chip *handler;
diff --git a/arch/mips/sgi-ip22/ip22-mc.c b/arch/mips/sgi-ip22/ip22-mc.c
index b58bd522262b..ddb6506d8341 100644
--- a/arch/mips/sgi-ip22/ip22-mc.c
+++ b/arch/mips/sgi-ip22/ip22-mc.c
@@ -202,7 +202,6 @@ void __init sgimc_init(void)
202} 202}
203 203
204void __init prom_meminit(void) {} 204void __init prom_meminit(void) {}
205unsigned long __init prom_free_prom_memory(void) 205void __init prom_free_prom_memory(void)
206{ 206{
207 return 0;
208} 207}
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 319f8803ef6f..60ade7690e09 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -333,7 +333,7 @@ static inline void disable_bridge_irq(unsigned int irq)
333} 333}
334 334
335static struct irq_chip bridge_irq_type = { 335static struct irq_chip bridge_irq_type = {
336 .typename = "bridge", 336 .name = "bridge",
337 .startup = startup_bridge_irq, 337 .startup = startup_bridge_irq,
338 .shutdown = shutdown_bridge_irq, 338 .shutdown = shutdown_bridge_irq,
339 .ack = disable_bridge_irq, 339 .ack = disable_bridge_irq,
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 16e5682b01f1..0e3d535e9f43 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -498,10 +498,9 @@ void __init prom_meminit(void)
498 } 498 }
499} 499}
500 500
501unsigned long __init prom_free_prom_memory(void) 501void __init prom_free_prom_memory(void)
502{ 502{
503 /* We got nothing to free here ... */ 503 /* We got nothing to free here ... */
504 return 0;
505} 504}
506 505
507extern void pagetable_init(void); 506extern void pagetable_init(void);
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index c20e9899b34b..9ce513629b14 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -181,7 +181,7 @@ static void disable_rt_irq(unsigned int irq)
181} 181}
182 182
183static struct irq_chip rt_irq_type = { 183static struct irq_chip rt_irq_type = {
184 .typename = "SN HUB RT timer", 184 .name = "SN HUB RT timer",
185 .ack = disable_rt_irq, 185 .ack = disable_rt_irq,
186 .mask = disable_rt_irq, 186 .mask = disable_rt_irq,
187 .mask_ack = disable_rt_irq, 187 .mask_ack = disable_rt_irq,
diff --git a/arch/mips/sgi-ip32/ip32-irq.c b/arch/mips/sgi-ip32/ip32-irq.c
index ae063864c026..8c450d9e8696 100644
--- a/arch/mips/sgi-ip32/ip32-irq.c
+++ b/arch/mips/sgi-ip32/ip32-irq.c
@@ -144,7 +144,7 @@ static void end_cpu_irq(unsigned int irq)
144} 144}
145 145
146static struct irq_chip ip32_cpu_interrupt = { 146static struct irq_chip ip32_cpu_interrupt = {
147 .typename = "IP32 CPU", 147 .name = "IP32 CPU",
148 .ack = disable_cpu_irq, 148 .ack = disable_cpu_irq,
149 .mask = disable_cpu_irq, 149 .mask = disable_cpu_irq,
150 .mask_ack = disable_cpu_irq, 150 .mask_ack = disable_cpu_irq,
@@ -193,7 +193,7 @@ static void end_crime_irq(unsigned int irq)
193} 193}
194 194
195static struct irq_chip ip32_crime_interrupt = { 195static struct irq_chip ip32_crime_interrupt = {
196 .typename = "IP32 CRIME", 196 .name = "IP32 CRIME",
197 .ack = mask_and_ack_crime_irq, 197 .ack = mask_and_ack_crime_irq,
198 .mask = disable_crime_irq, 198 .mask = disable_crime_irq,
199 .mask_ack = mask_and_ack_crime_irq, 199 .mask_ack = mask_and_ack_crime_irq,
@@ -234,7 +234,7 @@ static void end_macepci_irq(unsigned int irq)
234} 234}
235 235
236static struct irq_chip ip32_macepci_interrupt = { 236static struct irq_chip ip32_macepci_interrupt = {
237 .typename = "IP32 MACE PCI", 237 .name = "IP32 MACE PCI",
238 .ack = disable_macepci_irq, 238 .ack = disable_macepci_irq,
239 .mask = disable_macepci_irq, 239 .mask = disable_macepci_irq,
240 .mask_ack = disable_macepci_irq, 240 .mask_ack = disable_macepci_irq,
@@ -347,7 +347,7 @@ static void end_maceisa_irq(unsigned irq)
347} 347}
348 348
349static struct irq_chip ip32_maceisa_interrupt = { 349static struct irq_chip ip32_maceisa_interrupt = {
350 .typename = "IP32 MACE ISA", 350 .name = "IP32 MACE ISA",
351 .ack = mask_and_ack_maceisa_irq, 351 .ack = mask_and_ack_maceisa_irq,
352 .mask = disable_maceisa_irq, 352 .mask = disable_maceisa_irq,
353 .mask_ack = mask_and_ack_maceisa_irq, 353 .mask_ack = mask_and_ack_maceisa_irq,
@@ -379,7 +379,7 @@ static void end_mace_irq(unsigned int irq)
379} 379}
380 380
381static struct irq_chip ip32_mace_interrupt = { 381static struct irq_chip ip32_mace_interrupt = {
382 .typename = "IP32 MACE", 382 .name = "IP32 MACE",
383 .ack = disable_mace_irq, 383 .ack = disable_mace_irq,
384 .mask = disable_mace_irq, 384 .mask = disable_mace_irq,
385 .mask_ack = disable_mace_irq, 385 .mask_ack = disable_mace_irq,
diff --git a/arch/mips/sgi-ip32/ip32-memory.c b/arch/mips/sgi-ip32/ip32-memory.c
index d37d40a3cdae..849d392a0013 100644
--- a/arch/mips/sgi-ip32/ip32-memory.c
+++ b/arch/mips/sgi-ip32/ip32-memory.c
@@ -43,7 +43,6 @@ void __init prom_meminit (void)
43} 43}
44 44
45 45
46unsigned long __init prom_free_prom_memory (void) 46void __init prom_free_prom_memory(void)
47{ 47{
48 return 0;
49} 48}
diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c
index 2e8f6b2e2420..1dc5d05d8962 100644
--- a/arch/mips/sibyte/bcm1480/irq.c
+++ b/arch/mips/sibyte/bcm1480/irq.c
@@ -82,7 +82,7 @@ extern char sb1250_duart_present[];
82#endif 82#endif
83 83
84static struct irq_chip bcm1480_irq_type = { 84static struct irq_chip bcm1480_irq_type = {
85 .typename = "BCM1480-IMR", 85 .name = "BCM1480-IMR",
86 .ack = ack_bcm1480_irq, 86 .ack = ack_bcm1480_irq,
87 .mask = disable_bcm1480_irq, 87 .mask = disable_bcm1480_irq,
88 .mask_ack = ack_bcm1480_irq, 88 .mask_ack = ack_bcm1480_irq,
diff --git a/arch/mips/sibyte/cfe/setup.c b/arch/mips/sibyte/cfe/setup.c
index 6e8952da6e2a..9e6099e69622 100644
--- a/arch/mips/sibyte/cfe/setup.c
+++ b/arch/mips/sibyte/cfe/setup.c
@@ -343,10 +343,9 @@ void __init prom_init(void)
343 prom_meminit(); 343 prom_meminit();
344} 344}
345 345
346unsigned long __init prom_free_prom_memory(void) 346void __init prom_free_prom_memory(void)
347{ 347{
348 /* Not sure what I'm supposed to do here. Nothing, I think */ 348 /* Not sure what I'm supposed to do here. Nothing, I think */
349 return 0;
350} 349}
351 350
352void prom_putchar(char c) 351void prom_putchar(char c)
diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c
index 82ce7533053f..148239446e6e 100644
--- a/arch/mips/sibyte/sb1250/irq.c
+++ b/arch/mips/sibyte/sb1250/irq.c
@@ -67,7 +67,7 @@ extern char sb1250_duart_present[];
67#endif 67#endif
68 68
69static struct irq_chip sb1250_irq_type = { 69static struct irq_chip sb1250_irq_type = {
70 .typename = "SB1250-IMR", 70 .name = "SB1250-IMR",
71 .ack = ack_sb1250_irq, 71 .ack = ack_sb1250_irq,
72 .mask = disable_sb1250_irq, 72 .mask = disable_sb1250_irq,
73 .mask_ack = ack_sb1250_irq, 73 .mask_ack = ack_sb1250_irq,
diff --git a/arch/mips/sibyte/sb1250/prom.c b/arch/mips/sibyte/sb1250/prom.c
index 3c33a4517bc3..257c4e674353 100644
--- a/arch/mips/sibyte/sb1250/prom.c
+++ b/arch/mips/sibyte/sb1250/prom.c
@@ -87,10 +87,9 @@ void __init prom_init(void)
87 prom_meminit(); 87 prom_meminit();
88} 88}
89 89
90unsigned long __init prom_free_prom_memory(void) 90void __init prom_free_prom_memory(void)
91{ 91{
92 /* Not sure what I'm supposed to do here. Nothing, I think */ 92 /* Not sure what I'm supposed to do here. Nothing, I think */
93 return 0;
94} 93}
95 94
96void prom_putchar(char c) 95void prom_putchar(char c)
diff --git a/arch/mips/sni/irq.c b/arch/mips/sni/irq.c
index 8511bcc6d99d..039e8e540508 100644
--- a/arch/mips/sni/irq.c
+++ b/arch/mips/sni/irq.c
@@ -37,7 +37,7 @@ static void end_pciasic_irq(unsigned int irq)
37} 37}
38 38
39static struct irq_chip pciasic_irq_type = { 39static struct irq_chip pciasic_irq_type = {
40 .typename = "ASIC-PCI", 40 .name = "ASIC-PCI",
41 .ack = disable_pciasic_irq, 41 .ack = disable_pciasic_irq,
42 .mask = disable_pciasic_irq, 42 .mask = disable_pciasic_irq,
43 .mask_ack = disable_pciasic_irq, 43 .mask_ack = disable_pciasic_irq,
diff --git a/arch/mips/sni/sniprom.c b/arch/mips/sni/sniprom.c
index d1d0f1f493b4..1213d166f22e 100644
--- a/arch/mips/sni/sniprom.c
+++ b/arch/mips/sni/sniprom.c
@@ -67,9 +67,8 @@ void prom_printf(char *fmt, ...)
67 va_end(args); 67 va_end(args);
68} 68}
69 69
70unsigned long prom_free_prom_memory(void) 70void __init prom_free_prom_memory(void)
71{ 71{
72 return 0;
73} 72}
74 73
75/* 74/*
diff --git a/arch/mips/tx4927/common/tx4927_irq.c b/arch/mips/tx4927/common/tx4927_irq.c
index ed4a19adf361..e7f3e5b84dcf 100644
--- a/arch/mips/tx4927/common/tx4927_irq.c
+++ b/arch/mips/tx4927/common/tx4927_irq.c
@@ -120,7 +120,7 @@ static void tx4927_irq_pic_disable(unsigned int irq);
120 120
121#define TX4927_CP0_NAME "TX4927-CP0" 121#define TX4927_CP0_NAME "TX4927-CP0"
122static struct irq_chip tx4927_irq_cp0_type = { 122static struct irq_chip tx4927_irq_cp0_type = {
123 .typename = TX4927_CP0_NAME, 123 .name = TX4927_CP0_NAME,
124 .ack = tx4927_irq_cp0_disable, 124 .ack = tx4927_irq_cp0_disable,
125 .mask = tx4927_irq_cp0_disable, 125 .mask = tx4927_irq_cp0_disable,
126 .mask_ack = tx4927_irq_cp0_disable, 126 .mask_ack = tx4927_irq_cp0_disable,
@@ -129,7 +129,7 @@ static struct irq_chip tx4927_irq_cp0_type = {
129 129
130#define TX4927_PIC_NAME "TX4927-PIC" 130#define TX4927_PIC_NAME "TX4927-PIC"
131static struct irq_chip tx4927_irq_pic_type = { 131static struct irq_chip tx4927_irq_pic_type = {
132 .typename = TX4927_PIC_NAME, 132 .name = TX4927_PIC_NAME,
133 .ack = tx4927_irq_pic_disable, 133 .ack = tx4927_irq_pic_disable,
134 .mask = tx4927_irq_pic_disable, 134 .mask = tx4927_irq_pic_disable,
135 .mask_ack = tx4927_irq_pic_disable, 135 .mask_ack = tx4927_irq_pic_disable,
diff --git a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
index b54b529a29f9..dcce88f403c9 100644
--- a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
+++ b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
@@ -228,7 +228,7 @@ static void toshiba_rbtx4927_irq_isa_mask_and_ack(unsigned int irq);
228 228
229#define TOSHIBA_RBTX4927_IOC_NAME "RBTX4927-IOC" 229#define TOSHIBA_RBTX4927_IOC_NAME "RBTX4927-IOC"
230static struct irq_chip toshiba_rbtx4927_irq_ioc_type = { 230static struct irq_chip toshiba_rbtx4927_irq_ioc_type = {
231 .typename = TOSHIBA_RBTX4927_IOC_NAME, 231 .name = TOSHIBA_RBTX4927_IOC_NAME,
232 .ack = toshiba_rbtx4927_irq_ioc_disable, 232 .ack = toshiba_rbtx4927_irq_ioc_disable,
233 .mask = toshiba_rbtx4927_irq_ioc_disable, 233 .mask = toshiba_rbtx4927_irq_ioc_disable,
234 .mask_ack = toshiba_rbtx4927_irq_ioc_disable, 234 .mask_ack = toshiba_rbtx4927_irq_ioc_disable,
@@ -241,7 +241,7 @@ static struct irq_chip toshiba_rbtx4927_irq_ioc_type = {
241#ifdef CONFIG_TOSHIBA_FPCIB0 241#ifdef CONFIG_TOSHIBA_FPCIB0
242#define TOSHIBA_RBTX4927_ISA_NAME "RBTX4927-ISA" 242#define TOSHIBA_RBTX4927_ISA_NAME "RBTX4927-ISA"
243static struct irq_chip toshiba_rbtx4927_irq_isa_type = { 243static struct irq_chip toshiba_rbtx4927_irq_isa_type = {
244 .typename = TOSHIBA_RBTX4927_ISA_NAME, 244 .name = TOSHIBA_RBTX4927_ISA_NAME,
245 .ack = toshiba_rbtx4927_irq_isa_mask_and_ack, 245 .ack = toshiba_rbtx4927_irq_isa_mask_and_ack,
246 .mask = toshiba_rbtx4927_irq_isa_disable, 246 .mask = toshiba_rbtx4927_irq_isa_disable,
247 .mask_ack = toshiba_rbtx4927_irq_isa_mask_and_ack, 247 .mask_ack = toshiba_rbtx4927_irq_isa_mask_and_ack,
@@ -490,13 +490,13 @@ void toshiba_rbtx4927_irq_dump(char *key)
490 { 490 {
491 u32 i, j = 0; 491 u32 i, j = 0;
492 for (i = 0; i < NR_IRQS; i++) { 492 for (i = 0; i < NR_IRQS; i++) {
493 if (strcmp(irq_desc[i].chip->typename, "none") 493 if (strcmp(irq_desc[i].chip->name, "none")
494 == 0) 494 == 0)
495 continue; 495 continue;
496 496
497 if ((i >= 1) 497 if ((i >= 1)
498 && (irq_desc[i - 1].chip->typename == 498 && (irq_desc[i - 1].chip->name ==
499 irq_desc[i].chip->typename)) { 499 irq_desc[i].chip->name)) {
500 j++; 500 j++;
501 } else { 501 } else {
502 j = 0; 502 j = 0;
@@ -510,7 +510,7 @@ void toshiba_rbtx4927_irq_dump(char *key)
510 (u32) (irq_desc[i].action ? irq_desc[i]. 510 (u32) (irq_desc[i].action ? irq_desc[i].
511 action->handler : 0), 511 action->handler : 0),
512 irq_desc[i].depth, 512 irq_desc[i].depth,
513 irq_desc[i].chip->typename, j); 513 irq_desc[i].chip->name, j);
514 } 514 }
515 } 515 }
516#endif 516#endif
diff --git a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_prom.c b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_prom.c
index efe50562f0ce..9a3a5babd1fb 100644
--- a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_prom.c
+++ b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_prom.c
@@ -80,9 +80,8 @@ void __init prom_init(void)
80 add_memory_region(0, msize << 20, BOOT_MEM_RAM); 80 add_memory_region(0, msize << 20, BOOT_MEM_RAM);
81} 81}
82 82
83unsigned long __init prom_free_prom_memory(void) 83void __init prom_free_prom_memory(void)
84{ 84{
85 return 0;
86} 85}
87 86
88const char *get_system_type(void) 87const char *get_system_type(void)
diff --git a/arch/mips/tx4938/common/irq.c b/arch/mips/tx4938/common/irq.c
index a347b424d91c..3a2dbfc25014 100644
--- a/arch/mips/tx4938/common/irq.c
+++ b/arch/mips/tx4938/common/irq.c
@@ -49,7 +49,7 @@ static void tx4938_irq_pic_disable(unsigned int irq);
49 49
50#define TX4938_CP0_NAME "TX4938-CP0" 50#define TX4938_CP0_NAME "TX4938-CP0"
51static struct irq_chip tx4938_irq_cp0_type = { 51static struct irq_chip tx4938_irq_cp0_type = {
52 .typename = TX4938_CP0_NAME, 52 .name = TX4938_CP0_NAME,
53 .ack = tx4938_irq_cp0_disable, 53 .ack = tx4938_irq_cp0_disable,
54 .mask = tx4938_irq_cp0_disable, 54 .mask = tx4938_irq_cp0_disable,
55 .mask_ack = tx4938_irq_cp0_disable, 55 .mask_ack = tx4938_irq_cp0_disable,
@@ -58,7 +58,7 @@ static struct irq_chip tx4938_irq_cp0_type = {
58 58
59#define TX4938_PIC_NAME "TX4938-PIC" 59#define TX4938_PIC_NAME "TX4938-PIC"
60static struct irq_chip tx4938_irq_pic_type = { 60static struct irq_chip tx4938_irq_pic_type = {
61 .typename = TX4938_PIC_NAME, 61 .name = TX4938_PIC_NAME,
62 .ack = tx4938_irq_pic_disable, 62 .ack = tx4938_irq_pic_disable,
63 .mask = tx4938_irq_pic_disable, 63 .mask = tx4938_irq_pic_disable,
64 .mask_ack = tx4938_irq_pic_disable, 64 .mask_ack = tx4938_irq_pic_disable,
diff --git a/arch/mips/tx4938/toshiba_rbtx4938/irq.c b/arch/mips/tx4938/toshiba_rbtx4938/irq.c
index b6f363d08011..2e96dbb248b1 100644
--- a/arch/mips/tx4938/toshiba_rbtx4938/irq.c
+++ b/arch/mips/tx4938/toshiba_rbtx4938/irq.c
@@ -92,7 +92,7 @@ static void toshiba_rbtx4938_irq_ioc_disable(unsigned int irq);
92 92
93#define TOSHIBA_RBTX4938_IOC_NAME "RBTX4938-IOC" 93#define TOSHIBA_RBTX4938_IOC_NAME "RBTX4938-IOC"
94static struct irq_chip toshiba_rbtx4938_irq_ioc_type = { 94static struct irq_chip toshiba_rbtx4938_irq_ioc_type = {
95 .typename = TOSHIBA_RBTX4938_IOC_NAME, 95 .name = TOSHIBA_RBTX4938_IOC_NAME,
96 .ack = toshiba_rbtx4938_irq_ioc_disable, 96 .ack = toshiba_rbtx4938_irq_ioc_disable,
97 .mask = toshiba_rbtx4938_irq_ioc_disable, 97 .mask = toshiba_rbtx4938_irq_ioc_disable,
98 .mask_ack = toshiba_rbtx4938_irq_ioc_disable, 98 .mask_ack = toshiba_rbtx4938_irq_ioc_disable,
diff --git a/arch/mips/tx4938/toshiba_rbtx4938/prom.c b/arch/mips/tx4938/toshiba_rbtx4938/prom.c
index e44daf30a7c1..7dc6a0aae21c 100644
--- a/arch/mips/tx4938/toshiba_rbtx4938/prom.c
+++ b/arch/mips/tx4938/toshiba_rbtx4938/prom.c
@@ -56,9 +56,8 @@ void __init prom_init(void)
56 return; 56 return;
57} 57}
58 58
59unsigned long __init prom_free_prom_memory(void) 59void __init prom_free_prom_memory(void)
60{ 60{
61 return 0;
62} 61}
63 62
64void __init prom_fixup_mem_map(unsigned long start, unsigned long end) 63void __init prom_fixup_mem_map(unsigned long start, unsigned long end)
diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
index c075261976c5..adabc6bad440 100644
--- a/arch/mips/vr41xx/common/icu.c
+++ b/arch/mips/vr41xx/common/icu.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2001-2002 MontaVista Software Inc. 4 * Copyright (C) 2001-2002 MontaVista Software Inc.
5 * Author: Yoichi Yuasa <yyuasa@mvista.com or source@mvista.com> 5 * Author: Yoichi Yuasa <yyuasa@mvista.com or source@mvista.com>
6 * Copyright (C) 2003-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> 6 * Copyright (C) 2003-2006 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -68,6 +68,7 @@ static unsigned char sysint2_assign[16] = {
68#define MPIUINTREG 0x0e 68#define MPIUINTREG 0x0e
69#define MAIUINTREG 0x10 69#define MAIUINTREG 0x10
70#define MKIUINTREG 0x12 70#define MKIUINTREG 0x12
71#define MMACINTREG 0x12
71#define MGIUINTLREG 0x14 72#define MGIUINTLREG 0x14
72#define MDSIUINTREG 0x16 73#define MDSIUINTREG 0x16
73#define NMIREG 0x18 74#define NMIREG 0x18
@@ -241,6 +242,30 @@ void vr41xx_disable_kiuint(uint16_t mask)
241 242
242EXPORT_SYMBOL(vr41xx_disable_kiuint); 243EXPORT_SYMBOL(vr41xx_disable_kiuint);
243 244
245void vr41xx_enable_macint(uint16_t mask)
246{
247 struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
248 unsigned long flags;
249
250 spin_lock_irqsave(&desc->lock, flags);
251 icu1_set(MMACINTREG, mask);
252 spin_unlock_irqrestore(&desc->lock, flags);
253}
254
255EXPORT_SYMBOL(vr41xx_enable_macint);
256
257void vr41xx_disable_macint(uint16_t mask)
258{
259 struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
260 unsigned long flags;
261
262 spin_lock_irqsave(&desc->lock, flags);
263 icu1_clear(MMACINTREG, mask);
264 spin_unlock_irqrestore(&desc->lock, flags);
265}
266
267EXPORT_SYMBOL(vr41xx_disable_macint);
268
244void vr41xx_enable_dsiuint(uint16_t mask) 269void vr41xx_enable_dsiuint(uint16_t mask)
245{ 270{
246 struct irq_desc *desc = irq_desc + DSIU_IRQ; 271 struct irq_desc *desc = irq_desc + DSIU_IRQ;
@@ -428,7 +453,7 @@ static void enable_sysint1_irq(unsigned int irq)
428} 453}
429 454
430static struct irq_chip sysint1_irq_type = { 455static struct irq_chip sysint1_irq_type = {
431 .typename = "SYSINT1", 456 .name = "SYSINT1",
432 .ack = disable_sysint1_irq, 457 .ack = disable_sysint1_irq,
433 .mask = disable_sysint1_irq, 458 .mask = disable_sysint1_irq,
434 .mask_ack = disable_sysint1_irq, 459 .mask_ack = disable_sysint1_irq,
@@ -446,7 +471,7 @@ static void enable_sysint2_irq(unsigned int irq)
446} 471}
447 472
448static struct irq_chip sysint2_irq_type = { 473static struct irq_chip sysint2_irq_type = {
449 .typename = "SYSINT2", 474 .name = "SYSINT2",
450 .ack = disable_sysint2_irq, 475 .ack = disable_sysint2_irq,
451 .mask = disable_sysint2_irq, 476 .mask = disable_sysint2_irq,
452 .mask_ack = disable_sysint2_irq, 477 .mask_ack = disable_sysint2_irq,
diff --git a/arch/mips/vr41xx/common/init.c b/arch/mips/vr41xx/common/init.c
index a2e285c1d4d5..4f97e0ba9e24 100644
--- a/arch/mips/vr41xx/common/init.c
+++ b/arch/mips/vr41xx/common/init.c
@@ -81,7 +81,6 @@ void __init prom_init(void)
81 } 81 }
82} 82}
83 83
84unsigned long __init prom_free_prom_memory (void) 84void __init prom_free_prom_memory(void)
85{ 85{
86 return 0UL;
87} 86}
diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
index 16decf4ac2f4..cba36a247e32 100644
--- a/arch/mips/vr41xx/common/irq.c
+++ b/arch/mips/vr41xx/common/irq.c
@@ -95,27 +95,27 @@ asmlinkage void plat_irq_dispatch(void)
95 unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM; 95 unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM;
96 96
97 if (pending & CAUSEF_IP7) 97 if (pending & CAUSEF_IP7)
98 do_IRQ(7); 98 do_IRQ(TIMER_IRQ);
99 else if (pending & 0x7800) { 99 else if (pending & 0x7800) {
100 if (pending & CAUSEF_IP3) 100 if (pending & CAUSEF_IP3)
101 irq_dispatch(3); 101 irq_dispatch(INT1_IRQ);
102 else if (pending & CAUSEF_IP4) 102 else if (pending & CAUSEF_IP4)
103 irq_dispatch(4); 103 irq_dispatch(INT2_IRQ);
104 else if (pending & CAUSEF_IP5) 104 else if (pending & CAUSEF_IP5)
105 irq_dispatch(5); 105 irq_dispatch(INT3_IRQ);
106 else if (pending & CAUSEF_IP6) 106 else if (pending & CAUSEF_IP6)
107 irq_dispatch(6); 107 irq_dispatch(INT4_IRQ);
108 } else if (pending & CAUSEF_IP2) 108 } else if (pending & CAUSEF_IP2)
109 irq_dispatch(2); 109 irq_dispatch(INT0_IRQ);
110 else if (pending & CAUSEF_IP0) 110 else if (pending & CAUSEF_IP0)
111 do_IRQ(0); 111 do_IRQ(MIPS_SOFTINT0_IRQ);
112 else if (pending & CAUSEF_IP1) 112 else if (pending & CAUSEF_IP1)
113 do_IRQ(1); 113 do_IRQ(MIPS_SOFTINT1_IRQ);
114 else 114 else
115 spurious_interrupt(); 115 spurious_interrupt();
116} 116}
117 117
118void __init arch_init_irq(void) 118void __init arch_init_irq(void)
119{ 119{
120 mips_cpu_irq_init(MIPS_CPU_IRQ_BASE); 120 mips_cpu_irq_init();
121} 121}
diff --git a/arch/mips/vr41xx/nec-cmbvr4133/irq.c b/arch/mips/vr41xx/nec-cmbvr4133/irq.c
index 128ed8d6f111..7d2d076b0f54 100644
--- a/arch/mips/vr41xx/nec-cmbvr4133/irq.c
+++ b/arch/mips/vr41xx/nec-cmbvr4133/irq.c
@@ -21,60 +21,16 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22 22
23#include <asm/io.h> 23#include <asm/io.h>
24#include <asm/i8259.h>
24#include <asm/vr41xx/cmbvr4133.h> 25#include <asm/vr41xx/cmbvr4133.h>
25 26
26extern void enable_8259A_irq(unsigned int irq);
27extern void disable_8259A_irq(unsigned int irq);
28extern void mask_and_ack_8259A(unsigned int irq);
29extern void init_8259A(int hoge);
30
31extern int vr4133_rockhopper; 27extern int vr4133_rockhopper;
32 28
33static void enable_i8259_irq(unsigned int irq)
34{
35 enable_8259A_irq(irq - I8259_IRQ_BASE);
36}
37
38static void disable_i8259_irq(unsigned int irq)
39{
40 disable_8259A_irq(irq - I8259_IRQ_BASE);
41}
42
43static void ack_i8259_irq(unsigned int irq)
44{
45 mask_and_ack_8259A(irq - I8259_IRQ_BASE);
46}
47
48static struct irq_chip i8259_irq_type = {
49 .typename = "XT-PIC",
50 .ack = ack_i8259_irq,
51 .mask = disable_i8259_irq,
52 .mask_ack = ack_i8259_irq,
53 .unmask = enable_i8259_irq,
54};
55
56static int i8259_get_irq_number(int irq) 29static int i8259_get_irq_number(int irq)
57{ 30{
58 unsigned long isr; 31 return i8259_irq();
59
60 isr = inb(0x20);
61 irq = ffz(~isr);
62 if (irq == 2) {
63 isr = inb(0xa0);
64 irq = 8 + ffz(~isr);
65 }
66
67 if (irq < 0 || irq > 15)
68 return -EINVAL;
69
70 return I8259_IRQ_BASE + irq;
71} 32}
72 33
73static struct irqaction i8259_slave_cascade = {
74 .handler = &no_action,
75 .name = "cascade",
76};
77
78void __init rockhopper_init_irq(void) 34void __init rockhopper_init_irq(void)
79{ 35{
80 int i; 36 int i;
@@ -84,11 +40,6 @@ void __init rockhopper_init_irq(void)
84 return; 40 return;
85 } 41 }
86 42
87 for (i = I8259_IRQ_BASE; i <= I8259_IRQ_LAST; i++)
88 set_irq_chip_and_handler(i, &i8259_irq_type, handle_level_irq);
89
90 setup_irq(I8259_SLAVE_IRQ, &i8259_slave_cascade);
91
92 vr41xx_set_irq_trigger(CMBVR41XX_INTC_PIN, TRIGGER_LEVEL, SIGNAL_THROUGH); 43 vr41xx_set_irq_trigger(CMBVR41XX_INTC_PIN, TRIGGER_LEVEL, SIGNAL_THROUGH);
93 vr41xx_set_irq_level(CMBVR41XX_INTC_PIN, LEVEL_HIGH); 44 vr41xx_set_irq_level(CMBVR41XX_INTC_PIN, LEVEL_HIGH);
94 vr41xx_cascade_irq(CMBVR41XX_INTC_IRQ, i8259_get_irq_number); 45 vr41xx_cascade_irq(CMBVR41XX_INTC_IRQ, i8259_get_irq_number);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 12272361c018..eaed402ad346 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -34,10 +34,6 @@ config GENERIC_HWEIGHT
34 bool 34 bool
35 default y 35 default y
36 36
37config GENERIC_CALIBRATE_DELAY
38 bool
39 default y
40
41config GENERIC_TIME 37config GENERIC_TIME
42 def_bool y 38 def_bool y
43 39
@@ -134,6 +130,31 @@ config AUDIT_ARCH
134 bool 130 bool
135 default y 131 default y
136 132
133config S390_SWITCH_AMODE
134 bool "Switch kernel/user addressing modes"
135 help
136 This option allows to switch the addressing modes of kernel and user
137 space. The kernel parameter switch_amode=on will enable this feature,
138 default is disabled. Enabling this (via kernel parameter) on machines
139 earlier than IBM System z9-109 EC/BC will reduce system performance.
140
141 Note that this option will also be selected by selecting the execute
142 protection option below. Enabling the execute protection via the
143 noexec kernel parameter will also switch the addressing modes,
144 independent of the switch_amode kernel parameter.
145
146
147config S390_EXEC_PROTECT
148 bool "Data execute protection"
149 select S390_SWITCH_AMODE
150 help
151 This option allows to enable a buffer overflow protection for user
152 space programs and it also selects the addressing mode option above.
153 The kernel parameter noexec=on will enable this feature and also
154 switch the addressing modes, default is disabled. Enabling this (via
155 kernel parameter) on machines earlier than IBM System z9-109 EC/BC
156 will reduce system performance.
157
137comment "Code generation options" 158comment "Code generation options"
138 159
139choice 160choice
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index b8c237290263..c9da7d16145e 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -81,7 +81,7 @@ static struct ctl_table appldata_dir_table[] = {
81/* 81/*
82 * Timer 82 * Timer
83 */ 83 */
84DEFINE_PER_CPU(struct vtimer_list, appldata_timer); 84static DEFINE_PER_CPU(struct vtimer_list, appldata_timer);
85static atomic_t appldata_expire_count = ATOMIC_INIT(0); 85static atomic_t appldata_expire_count = ATOMIC_INIT(0);
86 86
87static DEFINE_SPINLOCK(appldata_timer_lock); 87static DEFINE_SPINLOCK(appldata_timer_lock);
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
index 8aea3698a77b..4ca615788702 100644
--- a/arch/s390/appldata/appldata_mem.c
+++ b/arch/s390/appldata/appldata_mem.c
@@ -36,7 +36,7 @@
36 * book: 36 * book:
37 * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml 37 * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
38 */ 38 */
39struct appldata_mem_data { 39static struct appldata_mem_data {
40 u64 timestamp; 40 u64 timestamp;
41 u32 sync_count_1; /* after VM collected the record data, */ 41 u32 sync_count_1; /* after VM collected the record data, */
42 u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the 42 u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c
index 075e619bf37d..f64b8c867ae2 100644
--- a/arch/s390/appldata/appldata_net_sum.c
+++ b/arch/s390/appldata/appldata_net_sum.c
@@ -34,7 +34,7 @@
34 * book: 34 * book:
35 * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml 35 * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
36 */ 36 */
37struct appldata_net_sum_data { 37static struct appldata_net_sum_data {
38 u64 timestamp; 38 u64 timestamp;
39 u32 sync_count_1; /* after VM collected the record data, */ 39 u32 sync_count_1; /* after VM collected the record data, */
40 u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the 40 u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the
diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig
new file mode 100644
index 000000000000..99ff9f08e4d7
--- /dev/null
+++ b/arch/s390/crypto/Kconfig
@@ -0,0 +1,60 @@
1config CRYPTO_SHA1_S390
2 tristate "SHA1 digest algorithm"
3 depends on S390
4 select CRYPTO_ALGAPI
5 help
6 This is the s390 hardware accelerated implementation of the
7 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
8
9config CRYPTO_SHA256_S390
10 tristate "SHA256 digest algorithm"
11 depends on S390
12 select CRYPTO_ALGAPI
13 help
14 This is the s390 hardware accelerated implementation of the
15 SHA256 secure hash standard (DFIPS 180-2).
16
17 This version of SHA implements a 256 bit hash with 128 bits of
18 security against collision attacks.
19
20config CRYPTO_DES_S390
21 tristate "DES and Triple DES cipher algorithms"
22 depends on S390
23 select CRYPTO_ALGAPI
24 select CRYPTO_BLKCIPHER
25 help
26 This us the s390 hardware accelerated implementation of the
27 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
28
29config CRYPTO_AES_S390
30 tristate "AES cipher algorithms"
31 depends on S390
32 select CRYPTO_ALGAPI
33 select CRYPTO_BLKCIPHER
34 help
35 This is the s390 hardware accelerated implementation of the
36 AES cipher algorithms (FIPS-197). AES uses the Rijndael
37 algorithm.
38
39 Rijndael appears to be consistently a very good performer in
40 both hardware and software across a wide range of computing
41 environments regardless of its use in feedback or non-feedback
42 modes. Its key setup time is excellent, and its key agility is
43 good. Rijndael's very low memory requirements make it very well
44 suited for restricted-space environments, in which it also
45 demonstrates excellent performance. Rijndael's operations are
46 among the easiest to defend against power and timing attacks.
47
48 On s390 the System z9-109 currently only supports the key size
49 of 128 bit.
50
51config S390_PRNG
52 tristate "Pseudo random number generator device driver"
53 depends on S390
54 default "m"
55 help
56 Select this option if you want to use the s390 pseudo random number
57 generator. The PRNG is part of the cryptograhic processor functions
58 and uses triple-DES to generate secure random numbers like the
59 ANSI X9.17 standard. The PRNG is usable via the char device
60 /dev/prandom.
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile
index bfe2541dc5cf..14e552c5cc43 100644
--- a/arch/s390/crypto/Makefile
+++ b/arch/s390/crypto/Makefile
@@ -6,5 +6,4 @@ obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o
6obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o 6obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o
7obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o des_check_key.o 7obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o des_check_key.o
8obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o 8obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o
9 9obj-$(CONFIG_S390_PRNG) += prng.o
10obj-$(CONFIG_CRYPTO_TEST) += crypt_s390_query.o
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 15c9eec02928..91636353f6f0 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -4,7 +4,7 @@
4 * s390 implementation of the AES Cipher Algorithm. 4 * s390 implementation of the AES Cipher Algorithm.
5 * 5 *
6 * s390 Version: 6 * s390 Version:
7 * Copyright (C) 2005 IBM Deutschland GmbH, IBM Corporation 7 * Copyright IBM Corp. 2005,2007
8 * Author(s): Jan Glauber (jang@de.ibm.com) 8 * Author(s): Jan Glauber (jang@de.ibm.com)
9 * 9 *
10 * Derived from "crypto/aes.c" 10 * Derived from "crypto/aes.c"
@@ -27,9 +27,11 @@
27/* data block size for all key lengths */ 27/* data block size for all key lengths */
28#define AES_BLOCK_SIZE 16 28#define AES_BLOCK_SIZE 16
29 29
30int has_aes_128 = 0; 30#define AES_KEYLEN_128 1
31int has_aes_192 = 0; 31#define AES_KEYLEN_192 2
32int has_aes_256 = 0; 32#define AES_KEYLEN_256 4
33
34static char keylen_flag = 0;
33 35
34struct s390_aes_ctx { 36struct s390_aes_ctx {
35 u8 iv[AES_BLOCK_SIZE]; 37 u8 iv[AES_BLOCK_SIZE];
@@ -47,20 +49,19 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
47 49
48 switch (key_len) { 50 switch (key_len) {
49 case 16: 51 case 16:
50 if (!has_aes_128) 52 if (!(keylen_flag & AES_KEYLEN_128))
51 goto fail; 53 goto fail;
52 break; 54 break;
53 case 24: 55 case 24:
54 if (!has_aes_192) 56 if (!(keylen_flag & AES_KEYLEN_192))
55 goto fail; 57 goto fail;
56 58
57 break; 59 break;
58 case 32: 60 case 32:
59 if (!has_aes_256) 61 if (!(keylen_flag & AES_KEYLEN_256))
60 goto fail; 62 goto fail;
61 break; 63 break;
62 default: 64 default:
63 /* invalid key length */
64 goto fail; 65 goto fail;
65 break; 66 break;
66 } 67 }
@@ -322,34 +323,32 @@ static int __init aes_init(void)
322 int ret; 323 int ret;
323 324
324 if (crypt_s390_func_available(KM_AES_128_ENCRYPT)) 325 if (crypt_s390_func_available(KM_AES_128_ENCRYPT))
325 has_aes_128 = 1; 326 keylen_flag |= AES_KEYLEN_128;
326 if (crypt_s390_func_available(KM_AES_192_ENCRYPT)) 327 if (crypt_s390_func_available(KM_AES_192_ENCRYPT))
327 has_aes_192 = 1; 328 keylen_flag |= AES_KEYLEN_192;
328 if (crypt_s390_func_available(KM_AES_256_ENCRYPT)) 329 if (crypt_s390_func_available(KM_AES_256_ENCRYPT))
329 has_aes_256 = 1; 330 keylen_flag |= AES_KEYLEN_256;
331
332 if (!keylen_flag)
333 return -EOPNOTSUPP;
330 334
331 if (!has_aes_128 && !has_aes_192 && !has_aes_256) 335 /* z9 109 and z9 BC/EC only support 128 bit key length */
332 return -ENOSYS; 336 if (keylen_flag == AES_KEYLEN_128)
337 printk(KERN_INFO
338 "aes_s390: hardware acceleration only available for"
339 "128 bit keys\n");
333 340
334 ret = crypto_register_alg(&aes_alg); 341 ret = crypto_register_alg(&aes_alg);
335 if (ret != 0) { 342 if (ret)
336 printk(KERN_INFO "crypt_s390: aes-s390 couldn't be loaded.\n");
337 goto aes_err; 343 goto aes_err;
338 }
339 344
340 ret = crypto_register_alg(&ecb_aes_alg); 345 ret = crypto_register_alg(&ecb_aes_alg);
341 if (ret != 0) { 346 if (ret)
342 printk(KERN_INFO
343 "crypt_s390: ecb-aes-s390 couldn't be loaded.\n");
344 goto ecb_aes_err; 347 goto ecb_aes_err;
345 }
346 348
347 ret = crypto_register_alg(&cbc_aes_alg); 349 ret = crypto_register_alg(&cbc_aes_alg);
348 if (ret != 0) { 350 if (ret)
349 printk(KERN_INFO
350 "crypt_s390: cbc-aes-s390 couldn't be loaded.\n");
351 goto cbc_aes_err; 351 goto cbc_aes_err;
352 }
353 352
354out: 353out:
355 return ret; 354 return ret;
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h
index 2b137089f625..2775d2618332 100644
--- a/arch/s390/crypto/crypt_s390.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -3,8 +3,9 @@
3 * 3 *
4 * Support for s390 cryptographic instructions. 4 * Support for s390 cryptographic instructions.
5 * 5 *
6 * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation 6 * Copyright IBM Corp. 2003,2007
7 * Author(s): Thomas Spatzier (tspat@de.ibm.com) 7 * Author(s): Thomas Spatzier
8 * Jan Glauber (jan.glauber@de.ibm.com)
8 * 9 *
9 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free 11 * under the terms of the GNU General Public License as published by the Free
@@ -32,7 +33,8 @@ enum crypt_s390_operations {
32 CRYPT_S390_KMAC = 0x0500 33 CRYPT_S390_KMAC = 0x0500
33}; 34};
34 35
35/* function codes for KM (CIPHER MESSAGE) instruction 36/*
37 * function codes for KM (CIPHER MESSAGE) instruction
36 * 0x80 is the decipher modifier bit 38 * 0x80 is the decipher modifier bit
37 */ 39 */
38enum crypt_s390_km_func { 40enum crypt_s390_km_func {
@@ -51,7 +53,8 @@ enum crypt_s390_km_func {
51 KM_AES_256_DECRYPT = CRYPT_S390_KM | 0x14 | 0x80, 53 KM_AES_256_DECRYPT = CRYPT_S390_KM | 0x14 | 0x80,
52}; 54};
53 55
54/* function codes for KMC (CIPHER MESSAGE WITH CHAINING) 56/*
57 * function codes for KMC (CIPHER MESSAGE WITH CHAINING)
55 * instruction 58 * instruction
56 */ 59 */
57enum crypt_s390_kmc_func { 60enum crypt_s390_kmc_func {
@@ -68,9 +71,11 @@ enum crypt_s390_kmc_func {
68 KMC_AES_192_DECRYPT = CRYPT_S390_KMC | 0x13 | 0x80, 71 KMC_AES_192_DECRYPT = CRYPT_S390_KMC | 0x13 | 0x80,
69 KMC_AES_256_ENCRYPT = CRYPT_S390_KMC | 0x14, 72 KMC_AES_256_ENCRYPT = CRYPT_S390_KMC | 0x14,
70 KMC_AES_256_DECRYPT = CRYPT_S390_KMC | 0x14 | 0x80, 73 KMC_AES_256_DECRYPT = CRYPT_S390_KMC | 0x14 | 0x80,
74 KMC_PRNG = CRYPT_S390_KMC | 0x43,
71}; 75};
72 76
73/* function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) 77/*
78 * function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
74 * instruction 79 * instruction
75 */ 80 */
76enum crypt_s390_kimd_func { 81enum crypt_s390_kimd_func {
@@ -79,7 +84,8 @@ enum crypt_s390_kimd_func {
79 KIMD_SHA_256 = CRYPT_S390_KIMD | 2, 84 KIMD_SHA_256 = CRYPT_S390_KIMD | 2,
80}; 85};
81 86
82/* function codes for KLMD (COMPUTE LAST MESSAGE DIGEST) 87/*
88 * function codes for KLMD (COMPUTE LAST MESSAGE DIGEST)
83 * instruction 89 * instruction
84 */ 90 */
85enum crypt_s390_klmd_func { 91enum crypt_s390_klmd_func {
@@ -88,7 +94,8 @@ enum crypt_s390_klmd_func {
88 KLMD_SHA_256 = CRYPT_S390_KLMD | 2, 94 KLMD_SHA_256 = CRYPT_S390_KLMD | 2,
89}; 95};
90 96
91/* function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) 97/*
98 * function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
92 * instruction 99 * instruction
93 */ 100 */
94enum crypt_s390_kmac_func { 101enum crypt_s390_kmac_func {
@@ -98,229 +105,219 @@ enum crypt_s390_kmac_func {
98 KMAC_TDEA_192 = CRYPT_S390_KMAC | 3 105 KMAC_TDEA_192 = CRYPT_S390_KMAC | 3
99}; 106};
100 107
101/* status word for s390 crypto instructions' QUERY functions */ 108/**
102struct crypt_s390_query_status { 109 * crypt_s390_km:
103 u64 high; 110 * @func: the function code passed to KM; see crypt_s390_km_func
104 u64 low; 111 * @param: address of parameter block; see POP for details on each func
105}; 112 * @dest: address of destination memory area
106 113 * @src: address of source memory area
107/* 114 * @src_len: length of src operand in bytes
115 *
108 * Executes the KM (CIPHER MESSAGE) operation of the CPU. 116 * Executes the KM (CIPHER MESSAGE) operation of the CPU.
109 * @param func: the function code passed to KM; see crypt_s390_km_func 117 *
110 * @param param: address of parameter block; see POP for details on each func 118 * Returns -1 for failure, 0 for the query func, number of processed
111 * @param dest: address of destination memory area 119 * bytes for encryption/decryption funcs
112 * @param src: address of source memory area
113 * @param src_len: length of src operand in bytes
114 * @returns < zero for failure, 0 for the query func, number of processed bytes
115 * for encryption/decryption funcs
116 */ 120 */
117static inline int 121static inline int crypt_s390_km(long func, void *param,
118crypt_s390_km(long func, void* param, u8* dest, const u8* src, long src_len) 122 u8 *dest, const u8 *src, long src_len)
119{ 123{
120 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 124 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
121 register void* __param asm("1") = param; 125 register void *__param asm("1") = param;
122 register const u8* __src asm("2") = src; 126 register const u8 *__src asm("2") = src;
123 register long __src_len asm("3") = src_len; 127 register long __src_len asm("3") = src_len;
124 register u8* __dest asm("4") = dest; 128 register u8 *__dest asm("4") = dest;
125 int ret; 129 int ret;
126 130
127 asm volatile( 131 asm volatile(
128 "0: .insn rre,0xb92e0000,%3,%1 \n" /* KM opcode */ 132 "0: .insn rre,0xb92e0000,%3,%1 \n" /* KM opcode */
129 "1: brc 1,0b \n" /* handle partial completion */ 133 "1: brc 1,0b \n" /* handle partial completion */
130 " ahi %0,%h7\n" 134 " la %0,0\n"
131 "2: ahi %0,%h8\n" 135 "2:\n"
132 "3:\n" 136 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
133 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
134 : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) 137 : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
135 : "d" (__func), "a" (__param), "0" (-EFAULT), 138 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
136 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
137 if (ret < 0) 139 if (ret < 0)
138 return ret; 140 return ret;
139 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; 141 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
140} 142}
141 143
142/* 144/**
145 * crypt_s390_kmc:
146 * @func: the function code passed to KM; see crypt_s390_kmc_func
147 * @param: address of parameter block; see POP for details on each func
148 * @dest: address of destination memory area
149 * @src: address of source memory area
150 * @src_len: length of src operand in bytes
151 *
143 * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the CPU. 152 * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the CPU.
144 * @param func: the function code passed to KM; see crypt_s390_kmc_func 153 *
145 * @param param: address of parameter block; see POP for details on each func 154 * Returns -1 for failure, 0 for the query func, number of processed
146 * @param dest: address of destination memory area 155 * bytes for encryption/decryption funcs
147 * @param src: address of source memory area
148 * @param src_len: length of src operand in bytes
149 * @returns < zero for failure, 0 for the query func, number of processed bytes
150 * for encryption/decryption funcs
151 */ 156 */
152static inline int 157static inline int crypt_s390_kmc(long func, void *param,
153crypt_s390_kmc(long func, void* param, u8* dest, const u8* src, long src_len) 158 u8 *dest, const u8 *src, long src_len)
154{ 159{
155 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 160 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
156 register void* __param asm("1") = param; 161 register void *__param asm("1") = param;
157 register const u8* __src asm("2") = src; 162 register const u8 *__src asm("2") = src;
158 register long __src_len asm("3") = src_len; 163 register long __src_len asm("3") = src_len;
159 register u8* __dest asm("4") = dest; 164 register u8 *__dest asm("4") = dest;
160 int ret; 165 int ret;
161 166
162 asm volatile( 167 asm volatile(
163 "0: .insn rre,0xb92f0000,%3,%1 \n" /* KMC opcode */ 168 "0: .insn rre,0xb92f0000,%3,%1 \n" /* KMC opcode */
164 "1: brc 1,0b \n" /* handle partial completion */ 169 "1: brc 1,0b \n" /* handle partial completion */
165 " ahi %0,%h7\n" 170 " la %0,0\n"
166 "2: ahi %0,%h8\n" 171 "2:\n"
167 "3:\n" 172 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
168 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
169 : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) 173 : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
170 : "d" (__func), "a" (__param), "0" (-EFAULT), 174 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
171 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
172 if (ret < 0) 175 if (ret < 0)
173 return ret; 176 return ret;
174 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; 177 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
175} 178}
176 179
177/* 180/**
181 * crypt_s390_kimd:
182 * @func: the function code passed to KM; see crypt_s390_kimd_func
183 * @param: address of parameter block; see POP for details on each func
184 * @src: address of source memory area
185 * @src_len: length of src operand in bytes
186 *
178 * Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation 187 * Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation
179 * of the CPU. 188 * of the CPU.
180 * @param func: the function code passed to KM; see crypt_s390_kimd_func 189 *
181 * @param param: address of parameter block; see POP for details on each func 190 * Returns -1 for failure, 0 for the query func, number of processed
182 * @param src: address of source memory area 191 * bytes for digest funcs
183 * @param src_len: length of src operand in bytes
184 * @returns < zero for failure, 0 for the query func, number of processed bytes
185 * for digest funcs
186 */ 192 */
187static inline int 193static inline int crypt_s390_kimd(long func, void *param,
188crypt_s390_kimd(long func, void* param, const u8* src, long src_len) 194 const u8 *src, long src_len)
189{ 195{
190 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 196 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
191 register void* __param asm("1") = param; 197 register void *__param asm("1") = param;
192 register const u8* __src asm("2") = src; 198 register const u8 *__src asm("2") = src;
193 register long __src_len asm("3") = src_len; 199 register long __src_len asm("3") = src_len;
194 int ret; 200 int ret;
195 201
196 asm volatile( 202 asm volatile(
197 "0: .insn rre,0xb93e0000,%1,%1 \n" /* KIMD opcode */ 203 "0: .insn rre,0xb93e0000,%1,%1 \n" /* KIMD opcode */
198 "1: brc 1,0b \n" /* handle partial completion */ 204 "1: brc 1,0b \n" /* handle partial completion */
199 " ahi %0,%h6\n" 205 " la %0,0\n"
200 "2: ahi %0,%h7\n" 206 "2:\n"
201 "3:\n" 207 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
202 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
203 : "=d" (ret), "+a" (__src), "+d" (__src_len) 208 : "=d" (ret), "+a" (__src), "+d" (__src_len)
204 : "d" (__func), "a" (__param), "0" (-EFAULT), 209 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
205 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
206 if (ret < 0) 210 if (ret < 0)
207 return ret; 211 return ret;
208 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; 212 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
209} 213}
210 214
211/* 215/**
216 * crypt_s390_klmd:
217 * @func: the function code passed to KM; see crypt_s390_klmd_func
218 * @param: address of parameter block; see POP for details on each func
219 * @src: address of source memory area
220 * @src_len: length of src operand in bytes
221 *
212 * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the CPU. 222 * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the CPU.
213 * @param func: the function code passed to KM; see crypt_s390_klmd_func 223 *
214 * @param param: address of parameter block; see POP for details on each func 224 * Returns -1 for failure, 0 for the query func, number of processed
215 * @param src: address of source memory area 225 * bytes for digest funcs
216 * @param src_len: length of src operand in bytes
217 * @returns < zero for failure, 0 for the query func, number of processed bytes
218 * for digest funcs
219 */ 226 */
220static inline int 227static inline int crypt_s390_klmd(long func, void *param,
221crypt_s390_klmd(long func, void* param, const u8* src, long src_len) 228 const u8 *src, long src_len)
222{ 229{
223 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 230 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
224 register void* __param asm("1") = param; 231 register void *__param asm("1") = param;
225 register const u8* __src asm("2") = src; 232 register const u8 *__src asm("2") = src;
226 register long __src_len asm("3") = src_len; 233 register long __src_len asm("3") = src_len;
227 int ret; 234 int ret;
228 235
229 asm volatile( 236 asm volatile(
230 "0: .insn rre,0xb93f0000,%1,%1 \n" /* KLMD opcode */ 237 "0: .insn rre,0xb93f0000,%1,%1 \n" /* KLMD opcode */
231 "1: brc 1,0b \n" /* handle partial completion */ 238 "1: brc 1,0b \n" /* handle partial completion */
232 " ahi %0,%h6\n" 239 " la %0,0\n"
233 "2: ahi %0,%h7\n" 240 "2:\n"
234 "3:\n" 241 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
235 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
236 : "=d" (ret), "+a" (__src), "+d" (__src_len) 242 : "=d" (ret), "+a" (__src), "+d" (__src_len)
237 : "d" (__func), "a" (__param), "0" (-EFAULT), 243 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
238 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
239 if (ret < 0) 244 if (ret < 0)
240 return ret; 245 return ret;
241 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; 246 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
242} 247}
243 248
244/* 249/**
250 * crypt_s390_kmac:
251 * @func: the function code passed to KM; see crypt_s390_klmd_func
252 * @param: address of parameter block; see POP for details on each func
253 * @src: address of source memory area
254 * @src_len: length of src operand in bytes
255 *
245 * Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation 256 * Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation
246 * of the CPU. 257 * of the CPU.
247 * @param func: the function code passed to KM; see crypt_s390_klmd_func 258 *
248 * @param param: address of parameter block; see POP for details on each func 259 * Returns -1 for failure, 0 for the query func, number of processed
249 * @param src: address of source memory area 260 * bytes for digest funcs
250 * @param src_len: length of src operand in bytes
251 * @returns < zero for failure, 0 for the query func, number of processed bytes
252 * for digest funcs
253 */ 261 */
254static inline int 262static inline int crypt_s390_kmac(long func, void *param,
255crypt_s390_kmac(long func, void* param, const u8* src, long src_len) 263 const u8 *src, long src_len)
256{ 264{
257 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 265 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
258 register void* __param asm("1") = param; 266 register void *__param asm("1") = param;
259 register const u8* __src asm("2") = src; 267 register const u8 *__src asm("2") = src;
260 register long __src_len asm("3") = src_len; 268 register long __src_len asm("3") = src_len;
261 int ret; 269 int ret;
262 270
263 asm volatile( 271 asm volatile(
264 "0: .insn rre,0xb91e0000,%1,%1 \n" /* KLAC opcode */ 272 "0: .insn rre,0xb91e0000,%1,%1 \n" /* KLAC opcode */
265 "1: brc 1,0b \n" /* handle partial completion */ 273 "1: brc 1,0b \n" /* handle partial completion */
266 " ahi %0,%h6\n" 274 " la %0,0\n"
267 "2: ahi %0,%h7\n" 275 "2:\n"
268 "3:\n" 276 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
269 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
270 : "=d" (ret), "+a" (__src), "+d" (__src_len) 277 : "=d" (ret), "+a" (__src), "+d" (__src_len)
271 : "d" (__func), "a" (__param), "0" (-EFAULT), 278 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
272 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
273 if (ret < 0) 279 if (ret < 0)
274 return ret; 280 return ret;
275 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; 281 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
276} 282}
277 283
278/** 284/**
285 * crypt_s390_func_available:
286 * @func: the function code of the specific function; 0 if op in general
287 *
279 * Tests if a specific crypto function is implemented on the machine. 288 * Tests if a specific crypto function is implemented on the machine.
280 * @param func: the function code of the specific function; 0 if op in general 289 *
281 * @return 1 if func available; 0 if func or op in general not available 290 * Returns 1 if func available; 0 if func or op in general not available
282 */ 291 */
283static inline int 292static inline int crypt_s390_func_available(int func)
284crypt_s390_func_available(int func)
285{ 293{
294 unsigned char status[16];
286 int ret; 295 int ret;
287 296
288 struct crypt_s390_query_status status = { 297 switch (func & CRYPT_S390_OP_MASK) {
289 .high = 0, 298 case CRYPT_S390_KM:
290 .low = 0 299 ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
291 }; 300 break;
292 switch (func & CRYPT_S390_OP_MASK){ 301 case CRYPT_S390_KMC:
293 case CRYPT_S390_KM: 302 ret = crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0);
294 ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0); 303 break;
295 break; 304 case CRYPT_S390_KIMD:
296 case CRYPT_S390_KMC: 305 ret = crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0);
297 ret = crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0); 306 break;
298 break; 307 case CRYPT_S390_KLMD:
299 case CRYPT_S390_KIMD: 308 ret = crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0);
300 ret = crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0); 309 break;
301 break; 310 case CRYPT_S390_KMAC:
302 case CRYPT_S390_KLMD: 311 ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
303 ret = crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0); 312 break;
304 break; 313 default:
305 case CRYPT_S390_KMAC: 314 return 0;
306 ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
307 break;
308 default:
309 ret = 0;
310 return ret;
311 }
312 if (ret >= 0){
313 func &= CRYPT_S390_FUNC_MASK;
314 func &= 0x7f; //mask modifier bit
315 if (func < 64){
316 ret = (status.high >> (64 - func - 1)) & 0x1;
317 } else {
318 ret = (status.low >> (128 - func - 1)) & 0x1;
319 }
320 } else {
321 ret = 0;
322 } 315 }
323 return ret; 316 if (ret < 0)
317 return 0;
318 func &= CRYPT_S390_FUNC_MASK;
319 func &= 0x7f; /* mask modifier bit */
320 return (status[func >> 3] & (0x80 >> (func & 7))) != 0;
324} 321}
325 322
326#endif // _CRYPTO_ARCH_S390_CRYPT_S390_H 323#endif /* _CRYPTO_ARCH_S390_CRYPT_S390_H */
diff --git a/arch/s390/crypto/crypt_s390_query.c b/arch/s390/crypto/crypt_s390_query.c
deleted file mode 100644
index 54fb11d7fadd..000000000000
--- a/arch/s390/crypto/crypt_s390_query.c
+++ /dev/null
@@ -1,129 +0,0 @@
1/*
2 * Cryptographic API.
3 *
4 * Support for s390 cryptographic instructions.
5 * Testing module for querying processor crypto capabilities.
6 *
7 * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
8 * Author(s): Thomas Spatzier (tspat@de.ibm.com)
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/kernel.h>
19#include <asm/errno.h>
20#include "crypt_s390.h"
21
22static void query_available_functions(void)
23{
24 printk(KERN_INFO "#####################\n");
25
26 /* query available KM functions */
27 printk(KERN_INFO "KM_QUERY: %d\n",
28 crypt_s390_func_available(KM_QUERY));
29 printk(KERN_INFO "KM_DEA: %d\n",
30 crypt_s390_func_available(KM_DEA_ENCRYPT));
31 printk(KERN_INFO "KM_TDEA_128: %d\n",
32 crypt_s390_func_available(KM_TDEA_128_ENCRYPT));
33 printk(KERN_INFO "KM_TDEA_192: %d\n",
34 crypt_s390_func_available(KM_TDEA_192_ENCRYPT));
35 printk(KERN_INFO "KM_AES_128: %d\n",
36 crypt_s390_func_available(KM_AES_128_ENCRYPT));
37 printk(KERN_INFO "KM_AES_192: %d\n",
38 crypt_s390_func_available(KM_AES_192_ENCRYPT));
39 printk(KERN_INFO "KM_AES_256: %d\n",
40 crypt_s390_func_available(KM_AES_256_ENCRYPT));
41
42 /* query available KMC functions */
43 printk(KERN_INFO "KMC_QUERY: %d\n",
44 crypt_s390_func_available(KMC_QUERY));
45 printk(KERN_INFO "KMC_DEA: %d\n",
46 crypt_s390_func_available(KMC_DEA_ENCRYPT));
47 printk(KERN_INFO "KMC_TDEA_128: %d\n",
48 crypt_s390_func_available(KMC_TDEA_128_ENCRYPT));
49 printk(KERN_INFO "KMC_TDEA_192: %d\n",
50 crypt_s390_func_available(KMC_TDEA_192_ENCRYPT));
51 printk(KERN_INFO "KMC_AES_128: %d\n",
52 crypt_s390_func_available(KMC_AES_128_ENCRYPT));
53 printk(KERN_INFO "KMC_AES_192: %d\n",
54 crypt_s390_func_available(KMC_AES_192_ENCRYPT));
55 printk(KERN_INFO "KMC_AES_256: %d\n",
56 crypt_s390_func_available(KMC_AES_256_ENCRYPT));
57
58 /* query available KIMD functions */
59 printk(KERN_INFO "KIMD_QUERY: %d\n",
60 crypt_s390_func_available(KIMD_QUERY));
61 printk(KERN_INFO "KIMD_SHA_1: %d\n",
62 crypt_s390_func_available(KIMD_SHA_1));
63 printk(KERN_INFO "KIMD_SHA_256: %d\n",
64 crypt_s390_func_available(KIMD_SHA_256));
65
66 /* query available KLMD functions */
67 printk(KERN_INFO "KLMD_QUERY: %d\n",
68 crypt_s390_func_available(KLMD_QUERY));
69 printk(KERN_INFO "KLMD_SHA_1: %d\n",
70 crypt_s390_func_available(KLMD_SHA_1));
71 printk(KERN_INFO "KLMD_SHA_256: %d\n",
72 crypt_s390_func_available(KLMD_SHA_256));
73
74 /* query available KMAC functions */
75 printk(KERN_INFO "KMAC_QUERY: %d\n",
76 crypt_s390_func_available(KMAC_QUERY));
77 printk(KERN_INFO "KMAC_DEA: %d\n",
78 crypt_s390_func_available(KMAC_DEA));
79 printk(KERN_INFO "KMAC_TDEA_128: %d\n",
80 crypt_s390_func_available(KMAC_TDEA_128));
81 printk(KERN_INFO "KMAC_TDEA_192: %d\n",
82 crypt_s390_func_available(KMAC_TDEA_192));
83}
84
85static int init(void)
86{
87 struct crypt_s390_query_status status = {
88 .high = 0,
89 .low = 0
90 };
91
92 printk(KERN_INFO "crypt_s390: querying available crypto functions\n");
93 crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
94 printk(KERN_INFO "KM:\t%016llx %016llx\n",
95 (unsigned long long) status.high,
96 (unsigned long long) status.low);
97 status.high = status.low = 0;
98 crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0);
99 printk(KERN_INFO "KMC:\t%016llx %016llx\n",
100 (unsigned long long) status.high,
101 (unsigned long long) status.low);
102 status.high = status.low = 0;
103 crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0);
104 printk(KERN_INFO "KIMD:\t%016llx %016llx\n",
105 (unsigned long long) status.high,
106 (unsigned long long) status.low);
107 status.high = status.low = 0;
108 crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0);
109 printk(KERN_INFO "KLMD:\t%016llx %016llx\n",
110 (unsigned long long) status.high,
111 (unsigned long long) status.low);
112 status.high = status.low = 0;
113 crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
114 printk(KERN_INFO "KMAC:\t%016llx %016llx\n",
115 (unsigned long long) status.high,
116 (unsigned long long) status.low);
117
118 query_available_functions();
119 return -ECANCELED;
120}
121
122static void __exit cleanup(void)
123{
124}
125
126module_init(init);
127module_exit(cleanup);
128
129MODULE_LICENSE("GPL");
diff --git a/arch/s390/crypto/des_check_key.c b/arch/s390/crypto/des_check_key.c
index e3f5c5f238fe..5706af266442 100644
--- a/arch/s390/crypto/des_check_key.c
+++ b/arch/s390/crypto/des_check_key.c
@@ -10,8 +10,9 @@
10 * scatterlist interface. Changed LGPL to GPL per section 3 of the LGPL. 10 * scatterlist interface. Changed LGPL to GPL per section 3 of the LGPL.
11 * 11 *
12 * s390 Version: 12 * s390 Version:
13 * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation 13 * Copyright IBM Corp. 2003
14 * Author(s): Thomas Spatzier (tspat@de.ibm.com) 14 * Author(s): Thomas Spatzier
15 * Jan Glauber (jan.glauber@de.ibm.com)
15 * 16 *
16 * Derived from "crypto/des.c" 17 * Derived from "crypto/des.c"
17 * Copyright (c) 1992 Dana L. How. 18 * Copyright (c) 1992 Dana L. How.
@@ -30,6 +31,7 @@
30#include <linux/module.h> 31#include <linux/module.h>
31#include <linux/errno.h> 32#include <linux/errno.h>
32#include <linux/crypto.h> 33#include <linux/crypto.h>
34#include "crypto_des.h"
33 35
34#define ROR(d,c,o) ((d) = (d) >> (c) | (d) << (o)) 36#define ROR(d,c,o) ((d) = (d) >> (c) | (d) << (o))
35 37
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 2aba04852fe3..ea22707f435f 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -3,9 +3,9 @@
3 * 3 *
4 * s390 implementation of the DES Cipher Algorithm. 4 * s390 implementation of the DES Cipher Algorithm.
5 * 5 *
6 * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright IBM Corp. 2003,2007
7 * Author(s): Thomas Spatzier (tspat@de.ibm.com) 7 * Author(s): Thomas Spatzier
8 * 8 * Jan Glauber (jan.glauber@de.ibm.com)
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -557,7 +557,7 @@ static int init(void)
557 if (!crypt_s390_func_available(KM_DEA_ENCRYPT) || 557 if (!crypt_s390_func_available(KM_DEA_ENCRYPT) ||
558 !crypt_s390_func_available(KM_TDEA_128_ENCRYPT) || 558 !crypt_s390_func_available(KM_TDEA_128_ENCRYPT) ||
559 !crypt_s390_func_available(KM_TDEA_192_ENCRYPT)) 559 !crypt_s390_func_available(KM_TDEA_192_ENCRYPT))
560 return -ENOSYS; 560 return -EOPNOTSUPP;
561 561
562 ret = crypto_register_alg(&des_alg); 562 ret = crypto_register_alg(&des_alg);
563 if (ret) 563 if (ret)
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
new file mode 100644
index 000000000000..8eb3a1aedc22
--- /dev/null
+++ b/arch/s390/crypto/prng.c
@@ -0,0 +1,213 @@
1/*
2 * Copyright IBM Corp. 2006,2007
3 * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
4 * Driver for the s390 pseudo random number generator
5 */
6#include <linux/fs.h>
7#include <linux/init.h>
8#include <linux/kernel.h>
9#include <linux/miscdevice.h>
10#include <linux/module.h>
11#include <linux/moduleparam.h>
12#include <linux/random.h>
13#include <asm/debug.h>
14#include <asm/uaccess.h>
15
16#include "crypt_s390.h"
17
18MODULE_LICENSE("GPL");
19MODULE_AUTHOR("Jan Glauber <jan.glauber@de.ibm.com>");
20MODULE_DESCRIPTION("s390 PRNG interface");
21
22static int prng_chunk_size = 256;
23module_param(prng_chunk_size, int, S_IRUSR | S_IRGRP | S_IROTH);
24MODULE_PARM_DESC(prng_chunk_size, "PRNG read chunk size in bytes");
25
26static int prng_entropy_limit = 4096;
27module_param(prng_entropy_limit, int, S_IRUSR | S_IRGRP | S_IROTH | S_IWUSR);
28MODULE_PARM_DESC(prng_entropy_limit,
29 "PRNG add entropy after that much bytes were produced");
30
31/*
32 * Any one who considers arithmetical methods of producing random digits is,
33 * of course, in a state of sin. -- John von Neumann
34 */
35
36struct s390_prng_data {
37 unsigned long count; /* how many bytes were produced */
38 char *buf;
39};
40
41static struct s390_prng_data *p;
42
43/* copied from libica, use a non-zero initial parameter block */
44static unsigned char parm_block[32] = {
450x0F,0x2B,0x8E,0x63,0x8C,0x8E,0xD2,0x52,0x64,0xB7,0xA0,0x7B,0x75,0x28,0xB8,0xF4,
460x75,0x5F,0xD2,0xA6,0x8D,0x97,0x11,0xFF,0x49,0xD8,0x23,0xF3,0x7E,0x21,0xEC,0xA0,
47};
48
49static int prng_open(struct inode *inode, struct file *file)
50{
51 return nonseekable_open(inode, file);
52}
53
54static void prng_add_entropy(void)
55{
56 __u64 entropy[4];
57 unsigned int i;
58 int ret;
59
60 for (i = 0; i < 16; i++) {
61 ret = crypt_s390_kmc(KMC_PRNG, parm_block, (char *)entropy,
62 (char *)entropy, sizeof(entropy));
63 BUG_ON(ret < 0 || ret != sizeof(entropy));
64 memcpy(parm_block, entropy, sizeof(entropy));
65 }
66}
67
68static void prng_seed(int nbytes)
69{
70 char buf[16];
71 int i = 0;
72
73 BUG_ON(nbytes > 16);
74 get_random_bytes(buf, nbytes);
75
76 /* Add the entropy */
77 while (nbytes >= 8) {
78 *((__u64 *)parm_block) ^= *((__u64 *)buf+i*8);
79 prng_add_entropy();
80 i += 8;
81 nbytes -= 8;
82 }
83 prng_add_entropy();
84}
85
86static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes,
87 loff_t *ppos)
88{
89 int chunk, n;
90 int ret = 0;
91 int tmp;
92
93 /* nbytes can be arbitrary long, we spilt it into chunks */
94 while (nbytes) {
95 /* same as in extract_entropy_user in random.c */
96 if (need_resched()) {
97 if (signal_pending(current)) {
98 if (ret == 0)
99 ret = -ERESTARTSYS;
100 break;
101 }
102 schedule();
103 }
104
105 /*
106 * we lose some random bytes if an attacker issues
107 * reads < 8 bytes, but we don't care
108 */
109 chunk = min_t(int, nbytes, prng_chunk_size);
110
111 /* PRNG only likes multiples of 8 bytes */
112 n = (chunk + 7) & -8;
113
114 if (p->count > prng_entropy_limit)
115 prng_seed(8);
116
117 /* if the CPU supports PRNG stckf is present too */
118 asm volatile(".insn s,0xb27c0000,%0"
119 : "=m" (*((unsigned long long *)p->buf)) : : "cc");
120
121 /*
122 * Beside the STCKF the input for the TDES-EDE is the output
123 * of the last operation. We differ here from X9.17 since we
124 * only store one timestamp into the buffer. Padding the whole
125 * buffer with timestamps does not improve security, since
126 * successive stckf have nearly constant offsets.
127 * If an attacker knows the first timestamp it would be
128 * trivial to guess the additional values. One timestamp
129 * is therefore enough and still guarantees unique input values.
130 *
131 * Note: you can still get strict X9.17 conformity by setting
132 * prng_chunk_size to 8 bytes.
133 */
134 tmp = crypt_s390_kmc(KMC_PRNG, parm_block, p->buf, p->buf, n);
135 BUG_ON((tmp < 0) || (tmp != n));
136
137 p->count += n;
138
139 if (copy_to_user(ubuf, p->buf, chunk))
140 return -EFAULT;
141
142 nbytes -= chunk;
143 ret += chunk;
144 ubuf += chunk;
145 }
146 return ret;
147}
148
149static struct file_operations prng_fops = {
150 .owner = THIS_MODULE,
151 .open = &prng_open,
152 .release = NULL,
153 .read = &prng_read,
154};
155
156static struct miscdevice prng_dev = {
157 .name = "prandom",
158 .minor = MISC_DYNAMIC_MINOR,
159 .fops = &prng_fops,
160};
161
162static int __init prng_init(void)
163{
164 int ret;
165
166 /* check if the CPU has a PRNG */
167 if (!crypt_s390_func_available(KMC_PRNG))
168 return -EOPNOTSUPP;
169
170 if (prng_chunk_size < 8)
171 return -EINVAL;
172
173 p = kmalloc(sizeof(struct s390_prng_data), GFP_KERNEL);
174 if (!p)
175 return -ENOMEM;
176 p->count = 0;
177
178 p->buf = kmalloc(prng_chunk_size, GFP_KERNEL);
179 if (!p->buf) {
180 ret = -ENOMEM;
181 goto out_free;
182 }
183
184 /* initialize the PRNG, add 128 bits of entropy */
185 prng_seed(16);
186
187 ret = misc_register(&prng_dev);
188 if (ret) {
189 printk(KERN_WARNING
190 "Could not register misc device for PRNG.\n");
191 goto out_buf;
192 }
193 return 0;
194
195out_buf:
196 kfree(p->buf);
197out_free:
198 kfree(p);
199 return ret;
200}
201
202static void __exit prng_exit(void)
203{
204 /* wipe me */
205 memset(p->buf, 0, prng_chunk_size);
206 kfree(p->buf);
207 kfree(p);
208
209 misc_deregister(&prng_dev);
210}
211
212module_init(prng_init);
213module_exit(prng_exit);
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index 49ca8690ee39..969639f31977 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -8,8 +8,9 @@
8 * implementation written by Steve Reid. 8 * implementation written by Steve Reid.
9 * 9 *
10 * s390 Version: 10 * s390 Version:
11 * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation 11 * Copyright IBM Corp. 2003,2007
12 * Author(s): Thomas Spatzier (tspat@de.ibm.com) 12 * Author(s): Thomas Spatzier
13 * Jan Glauber (jan.glauber@de.ibm.com)
13 * 14 *
14 * Derived from "crypto/sha1.c" 15 * Derived from "crypto/sha1.c"
15 * Copyright (c) Alan Smithee. 16 * Copyright (c) Alan Smithee.
@@ -43,16 +44,14 @@ struct crypt_s390_sha1_ctx {
43static void sha1_init(struct crypto_tfm *tfm) 44static void sha1_init(struct crypto_tfm *tfm)
44{ 45{
45 struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm); 46 struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm);
46 static const u32 initstate[5] = { 47
47 0x67452301, 48 ctx->state[0] = 0x67452301;
48 0xEFCDAB89, 49 ctx->state[1] = 0xEFCDAB89;
49 0x98BADCFE, 50 ctx->state[2] = 0x98BADCFE;
50 0x10325476, 51 ctx->state[3] = 0x10325476;
51 0xC3D2E1F0 52 ctx->state[4] = 0xC3D2E1F0;
52 };
53 53
54 ctx->count = 0; 54 ctx->count = 0;
55 memcpy(ctx->state, &initstate, sizeof(initstate));
56 ctx->buf_len = 0; 55 ctx->buf_len = 0;
57} 56}
58 57
@@ -63,13 +62,13 @@ static void sha1_update(struct crypto_tfm *tfm, const u8 *data,
63 long imd_len; 62 long imd_len;
64 63
65 sctx = crypto_tfm_ctx(tfm); 64 sctx = crypto_tfm_ctx(tfm);
66 sctx->count += len * 8; //message bit length 65 sctx->count += len * 8; /* message bit length */
67 66
68 //anything in buffer yet? -> must be completed 67 /* anything in buffer yet? -> must be completed */
69 if (sctx->buf_len && (sctx->buf_len + len) >= SHA1_BLOCK_SIZE) { 68 if (sctx->buf_len && (sctx->buf_len + len) >= SHA1_BLOCK_SIZE) {
70 //complete full block and hash 69 /* complete full block and hash */
71 memcpy(sctx->buffer + sctx->buf_len, data, 70 memcpy(sctx->buffer + sctx->buf_len, data,
72 SHA1_BLOCK_SIZE - sctx->buf_len); 71 SHA1_BLOCK_SIZE - sctx->buf_len);
73 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, 72 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer,
74 SHA1_BLOCK_SIZE); 73 SHA1_BLOCK_SIZE);
75 data += SHA1_BLOCK_SIZE - sctx->buf_len; 74 data += SHA1_BLOCK_SIZE - sctx->buf_len;
@@ -77,37 +76,36 @@ static void sha1_update(struct crypto_tfm *tfm, const u8 *data,
77 sctx->buf_len = 0; 76 sctx->buf_len = 0;
78 } 77 }
79 78
80 //rest of data contains full blocks? 79 /* rest of data contains full blocks? */
81 imd_len = len & ~0x3ful; 80 imd_len = len & ~0x3ful;
82 if (imd_len){ 81 if (imd_len) {
83 crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len); 82 crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len);
84 data += imd_len; 83 data += imd_len;
85 len -= imd_len; 84 len -= imd_len;
86 } 85 }
87 //anything left? store in buffer 86 /* anything left? store in buffer */
88 if (len){ 87 if (len) {
89 memcpy(sctx->buffer + sctx->buf_len , data, len); 88 memcpy(sctx->buffer + sctx->buf_len , data, len);
90 sctx->buf_len += len; 89 sctx->buf_len += len;
91 } 90 }
92} 91}
93 92
94 93
95static void 94static void pad_message(struct crypt_s390_sha1_ctx* sctx)
96pad_message(struct crypt_s390_sha1_ctx* sctx)
97{ 95{
98 int index; 96 int index;
99 97
100 index = sctx->buf_len; 98 index = sctx->buf_len;
101 sctx->buf_len = (sctx->buf_len < 56)? 99 sctx->buf_len = (sctx->buf_len < 56) ?
102 SHA1_BLOCK_SIZE:2 * SHA1_BLOCK_SIZE; 100 SHA1_BLOCK_SIZE:2 * SHA1_BLOCK_SIZE;
103 //start pad with 1 101 /* start pad with 1 */
104 sctx->buffer[index] = 0x80; 102 sctx->buffer[index] = 0x80;
105 //pad with zeros 103 /* pad with zeros */
106 index++; 104 index++;
107 memset(sctx->buffer + index, 0x00, sctx->buf_len - index); 105 memset(sctx->buffer + index, 0x00, sctx->buf_len - index);
108 //append length 106 /* append length */
109 memcpy(sctx->buffer + sctx->buf_len - 8, &sctx->count, 107 memcpy(sctx->buffer + sctx->buf_len - 8, &sctx->count,
110 sizeof sctx->count); 108 sizeof sctx->count);
111} 109}
112 110
113/* Add padding and return the message digest. */ 111/* Add padding and return the message digest. */
@@ -115,47 +113,40 @@ static void sha1_final(struct crypto_tfm *tfm, u8 *out)
115{ 113{
116 struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); 114 struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
117 115
118 //must perform manual padding 116 /* must perform manual padding */
119 pad_message(sctx); 117 pad_message(sctx);
120 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len); 118 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len);
121 //copy digest to out 119 /* copy digest to out */
122 memcpy(out, sctx->state, SHA1_DIGEST_SIZE); 120 memcpy(out, sctx->state, SHA1_DIGEST_SIZE);
123 /* Wipe context */ 121 /* wipe context */
124 memset(sctx, 0, sizeof *sctx); 122 memset(sctx, 0, sizeof *sctx);
125} 123}
126 124
127static struct crypto_alg alg = { 125static struct crypto_alg alg = {
128 .cra_name = "sha1", 126 .cra_name = "sha1",
129 .cra_driver_name = "sha1-s390", 127 .cra_driver_name= "sha1-s390",
130 .cra_priority = CRYPT_S390_PRIORITY, 128 .cra_priority = CRYPT_S390_PRIORITY,
131 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 129 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
132 .cra_blocksize = SHA1_BLOCK_SIZE, 130 .cra_blocksize = SHA1_BLOCK_SIZE,
133 .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx), 131 .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx),
134 .cra_module = THIS_MODULE, 132 .cra_module = THIS_MODULE,
135 .cra_list = LIST_HEAD_INIT(alg.cra_list), 133 .cra_list = LIST_HEAD_INIT(alg.cra_list),
136 .cra_u = { .digest = { 134 .cra_u = { .digest = {
137 .dia_digestsize = SHA1_DIGEST_SIZE, 135 .dia_digestsize = SHA1_DIGEST_SIZE,
138 .dia_init = sha1_init, 136 .dia_init = sha1_init,
139 .dia_update = sha1_update, 137 .dia_update = sha1_update,
140 .dia_final = sha1_final } } 138 .dia_final = sha1_final } }
141}; 139};
142 140
143static int 141static int __init init(void)
144init(void)
145{ 142{
146 int ret = -ENOSYS; 143 if (!crypt_s390_func_available(KIMD_SHA_1))
144 return -EOPNOTSUPP;
147 145
148 if (crypt_s390_func_available(KIMD_SHA_1)){ 146 return crypto_register_alg(&alg);
149 ret = crypto_register_alg(&alg);
150 if (ret == 0){
151 printk(KERN_INFO "crypt_s390: sha1_s390 loaded.\n");
152 }
153 }
154 return ret;
155} 147}
156 148
157static void __exit 149static void __exit fini(void)
158fini(void)
159{ 150{
160 crypto_unregister_alg(&alg); 151 crypto_unregister_alg(&alg);
161} 152}
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 8e4e67503fe7..78436c696d37 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -4,7 +4,7 @@
4 * s390 implementation of the SHA256 Secure Hash Algorithm. 4 * s390 implementation of the SHA256 Secure Hash Algorithm.
5 * 5 *
6 * s390 Version: 6 * s390 Version:
7 * Copyright (C) 2005 IBM Deutschland GmbH, IBM Corporation 7 * Copyright IBM Corp. 2005,2007
8 * Author(s): Jan Glauber (jang@de.ibm.com) 8 * Author(s): Jan Glauber (jang@de.ibm.com)
9 * 9 *
10 * Derived from "crypto/sha256.c" 10 * Derived from "crypto/sha256.c"
@@ -143,15 +143,10 @@ static struct crypto_alg alg = {
143 143
144static int init(void) 144static int init(void)
145{ 145{
146 int ret;
147
148 if (!crypt_s390_func_available(KIMD_SHA_256)) 146 if (!crypt_s390_func_available(KIMD_SHA_256))
149 return -ENOSYS; 147 return -EOPNOTSUPP;
150 148
151 ret = crypto_register_alg(&alg); 149 return crypto_register_alg(&alg);
152 if (ret != 0)
153 printk(KERN_INFO "crypt_s390: sha256_s390 couldn't be loaded.");
154 return ret;
155} 150}
156 151
157static void __exit fini(void) 152static void __exit fini(void)
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 5368cf4a350e..7c621b8ef683 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -108,6 +108,8 @@ CONFIG_DEFAULT_MIGRATION_COST=1000000
108CONFIG_COMPAT=y 108CONFIG_COMPAT=y
109CONFIG_SYSVIPC_COMPAT=y 109CONFIG_SYSVIPC_COMPAT=y
110CONFIG_AUDIT_ARCH=y 110CONFIG_AUDIT_ARCH=y
111CONFIG_S390_SWITCH_AMODE=y
112CONFIG_S390_EXEC_PROTECT=y
111 113
112# 114#
113# Code generation options 115# Code generation options
@@ -431,7 +433,6 @@ CONFIG_TN3270_CONSOLE=y
431CONFIG_TN3215=y 433CONFIG_TN3215=y
432CONFIG_TN3215_CONSOLE=y 434CONFIG_TN3215_CONSOLE=y
433CONFIG_CCW_CONSOLE=y 435CONFIG_CCW_CONSOLE=y
434CONFIG_SCLP=y
435CONFIG_SCLP_TTY=y 436CONFIG_SCLP_TTY=y
436CONFIG_SCLP_CONSOLE=y 437CONFIG_SCLP_CONSOLE=y
437CONFIG_SCLP_VT220_TTY=y 438CONFIG_SCLP_VT220_TTY=y
@@ -724,9 +725,7 @@ CONFIG_CRYPTO_MANAGER=y
724# CONFIG_CRYPTO_MD4 is not set 725# CONFIG_CRYPTO_MD4 is not set
725# CONFIG_CRYPTO_MD5 is not set 726# CONFIG_CRYPTO_MD5 is not set
726# CONFIG_CRYPTO_SHA1 is not set 727# CONFIG_CRYPTO_SHA1 is not set
727# CONFIG_CRYPTO_SHA1_S390 is not set
728# CONFIG_CRYPTO_SHA256 is not set 728# CONFIG_CRYPTO_SHA256 is not set
729# CONFIG_CRYPTO_SHA256_S390 is not set
730# CONFIG_CRYPTO_SHA512 is not set 729# CONFIG_CRYPTO_SHA512 is not set
731# CONFIG_CRYPTO_WP512 is not set 730# CONFIG_CRYPTO_WP512 is not set
732# CONFIG_CRYPTO_TGR192 is not set 731# CONFIG_CRYPTO_TGR192 is not set
@@ -735,12 +734,10 @@ CONFIG_CRYPTO_ECB=m
735CONFIG_CRYPTO_CBC=y 734CONFIG_CRYPTO_CBC=y
736# CONFIG_CRYPTO_LRW is not set 735# CONFIG_CRYPTO_LRW is not set
737# CONFIG_CRYPTO_DES is not set 736# CONFIG_CRYPTO_DES is not set
738# CONFIG_CRYPTO_DES_S390 is not set
739# CONFIG_CRYPTO_BLOWFISH is not set 737# CONFIG_CRYPTO_BLOWFISH is not set
740# CONFIG_CRYPTO_TWOFISH is not set 738# CONFIG_CRYPTO_TWOFISH is not set
741# CONFIG_CRYPTO_SERPENT is not set 739# CONFIG_CRYPTO_SERPENT is not set
742# CONFIG_CRYPTO_AES is not set 740# CONFIG_CRYPTO_AES is not set
743# CONFIG_CRYPTO_AES_S390 is not set
744# CONFIG_CRYPTO_CAST5 is not set 741# CONFIG_CRYPTO_CAST5 is not set
745# CONFIG_CRYPTO_CAST6 is not set 742# CONFIG_CRYPTO_CAST6 is not set
746# CONFIG_CRYPTO_TEA is not set 743# CONFIG_CRYPTO_TEA is not set
@@ -755,6 +752,11 @@ CONFIG_CRYPTO_CBC=y
755# 752#
756# Hardware crypto devices 753# Hardware crypto devices
757# 754#
755# CONFIG_CRYPTO_SHA1_S390 is not set
756# CONFIG_CRYPTO_SHA256_S390 is not set
757# CONFIG_CRYPTO_DES_S390 is not set
758# CONFIG_CRYPTO_AES_S390 is not set
759CONFIG_S390_PRNG=m
758 760
759# 761#
760# Library routines 762# Library routines
diff --git a/arch/s390/hypfs/Makefile b/arch/s390/hypfs/Makefile
index f4b00cd81f7c..b08d2abf6178 100644
--- a/arch/s390/hypfs/Makefile
+++ b/arch/s390/hypfs/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o 5obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o
6 6
7s390_hypfs-objs := inode.o hypfs_diag.o 7s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
index f3dbd91965c6..aea572009d60 100644
--- a/arch/s390/hypfs/hypfs.h
+++ b/arch/s390/hypfs/hypfs.h
@@ -27,4 +27,13 @@ extern struct dentry *hypfs_create_str(struct super_block *sb,
27 struct dentry *dir, const char *name, 27 struct dentry *dir, const char *name,
28 char *string); 28 char *string);
29 29
30/* LPAR Hypervisor */
31extern int hypfs_diag_init(void);
32extern void hypfs_diag_exit(void);
33extern int hypfs_diag_create_files(struct super_block *sb, struct dentry *root);
34
35/* VM Hypervisor */
36extern int hypfs_vm_init(void);
37extern int hypfs_vm_create_files(struct super_block *sb, struct dentry *root);
38
30#endif /* _HYPFS_H_ */ 39#endif /* _HYPFS_H_ */
diff --git a/arch/s390/hypfs/hypfs_diag.h b/arch/s390/hypfs/hypfs_diag.h
deleted file mode 100644
index 256b384aebe1..000000000000
--- a/arch/s390/hypfs/hypfs_diag.h
+++ /dev/null
@@ -1,16 +0,0 @@
1/*
2 * arch/s390/hypfs_diag.h
3 * Hypervisor filesystem for Linux on s390.
4 *
5 * Copyright (C) IBM Corp. 2006
6 * Author(s): Michael Holzheu <holzheu@de.ibm.com>
7 */
8
9#ifndef _HYPFS_DIAG_H_
10#define _HYPFS_DIAG_H_
11
12extern int hypfs_diag_init(void);
13extern void hypfs_diag_exit(void);
14extern int hypfs_diag_create_files(struct super_block *sb, struct dentry *root);
15
16#endif /* _HYPFS_DIAG_H_ */
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
new file mode 100644
index 000000000000..d01fc8f799f0
--- /dev/null
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -0,0 +1,231 @@
1/*
2 * Hypervisor filesystem for Linux on s390. z/VM implementation.
3 *
4 * Copyright (C) IBM Corp. 2006
5 * Author(s): Michael Holzheu <holzheu@de.ibm.com>
6 */
7
8#include <linux/types.h>
9#include <linux/errno.h>
10#include <linux/string.h>
11#include <linux/vmalloc.h>
12#include <asm/ebcdic.h>
13#include "hypfs.h"
14
15#define NAME_LEN 8
16
17static char local_guest[] = " ";
18static char all_guests[] = "* ";
19static char *guest_query;
20
21struct diag2fc_data {
22 __u32 version;
23 __u32 flags;
24 __u64 used_cpu;
25 __u64 el_time;
26 __u64 mem_min_kb;
27 __u64 mem_max_kb;
28 __u64 mem_share_kb;
29 __u64 mem_used_kb;
30 __u32 pcpus;
31 __u32 lcpus;
32 __u32 vcpus;
33 __u32 cpu_min;
34 __u32 cpu_max;
35 __u32 cpu_shares;
36 __u32 cpu_use_samp;
37 __u32 cpu_delay_samp;
38 __u32 page_wait_samp;
39 __u32 idle_samp;
40 __u32 other_samp;
41 __u32 total_samp;
42 char guest_name[NAME_LEN];
43};
44
45struct diag2fc_parm_list {
46 char userid[NAME_LEN];
47 char aci_grp[NAME_LEN];
48 __u64 addr;
49 __u32 size;
50 __u32 fmt;
51};
52
53static int diag2fc(int size, char* query, void *addr)
54{
55 unsigned long residual_cnt;
56 unsigned long rc;
57 struct diag2fc_parm_list parm_list;
58
59 memcpy(parm_list.userid, query, NAME_LEN);
60 ASCEBC(parm_list.userid, NAME_LEN);
61 parm_list.addr = (unsigned long) addr ;
62 parm_list.size = size;
63 parm_list.fmt = 0x02;
64 memset(parm_list.aci_grp, 0x40, NAME_LEN);
65 rc = -1;
66
67 asm volatile(
68 " diag %0,%1,0x2fc\n"
69 "0:\n"
70 EX_TABLE(0b,0b)
71 : "=d" (residual_cnt), "+d" (rc) : "0" (&parm_list) : "memory");
72
73 if ((rc != 0 ) && (rc != -2))
74 return rc;
75 else
76 return -residual_cnt;
77}
78
79static struct diag2fc_data *diag2fc_store(char *query, int *count)
80{
81 int size;
82 struct diag2fc_data *data;
83
84 do {
85 size = diag2fc(0, query, NULL);
86 if (size < 0)
87 return ERR_PTR(-EACCES);
88 data = vmalloc(size);
89 if (!data)
90 return ERR_PTR(-ENOMEM);
91 if (diag2fc(size, query, data) == 0)
92 break;
93 vfree(data);
94 } while (1);
95 *count = (size / sizeof(*data));
96
97 return data;
98}
99
100static void diag2fc_free(void *data)
101{
102 vfree(data);
103}
104
105#define ATTRIBUTE(sb, dir, name, member) \
106do { \
107 void *rc; \
108 rc = hypfs_create_u64(sb, dir, name, member); \
109 if (IS_ERR(rc)) \
110 return PTR_ERR(rc); \
111} while(0)
112
113static int hpyfs_vm_create_guest(struct super_block *sb,
114 struct dentry *systems_dir,
115 struct diag2fc_data *data)
116{
117 char guest_name[NAME_LEN + 1] = {};
118 struct dentry *guest_dir, *cpus_dir, *samples_dir, *mem_dir;
119 int dedicated_flag, capped_value;
120
121 capped_value = (data->flags & 0x00000006) >> 1;
122 dedicated_flag = (data->flags & 0x00000008) >> 3;
123
124 /* guest dir */
125 memcpy(guest_name, data->guest_name, NAME_LEN);
126 EBCASC(guest_name, NAME_LEN);
127 strstrip(guest_name);
128 guest_dir = hypfs_mkdir(sb, systems_dir, guest_name);
129 if (IS_ERR(guest_dir))
130 return PTR_ERR(guest_dir);
131 ATTRIBUTE(sb, guest_dir, "onlinetime_us", data->el_time);
132
133 /* logical cpu information */
134 cpus_dir = hypfs_mkdir(sb, guest_dir, "cpus");
135 if (IS_ERR(cpus_dir))
136 return PTR_ERR(cpus_dir);
137 ATTRIBUTE(sb, cpus_dir, "cputime_us", data->used_cpu);
138 ATTRIBUTE(sb, cpus_dir, "capped", capped_value);
139 ATTRIBUTE(sb, cpus_dir, "dedicated", dedicated_flag);
140 ATTRIBUTE(sb, cpus_dir, "count", data->vcpus);
141 ATTRIBUTE(sb, cpus_dir, "weight_min", data->cpu_min);
142 ATTRIBUTE(sb, cpus_dir, "weight_max", data->cpu_max);
143 ATTRIBUTE(sb, cpus_dir, "weight_cur", data->cpu_shares);
144
145 /* memory information */
146 mem_dir = hypfs_mkdir(sb, guest_dir, "mem");
147 if (IS_ERR(mem_dir))
148 return PTR_ERR(mem_dir);
149 ATTRIBUTE(sb, mem_dir, "min_KiB", data->mem_min_kb);
150 ATTRIBUTE(sb, mem_dir, "max_KiB", data->mem_max_kb);
151 ATTRIBUTE(sb, mem_dir, "used_KiB", data->mem_used_kb);
152 ATTRIBUTE(sb, mem_dir, "share_KiB", data->mem_share_kb);
153
154 /* samples */
155 samples_dir = hypfs_mkdir(sb, guest_dir, "samples");
156 if (IS_ERR(samples_dir))
157 return PTR_ERR(samples_dir);
158 ATTRIBUTE(sb, samples_dir, "cpu_using", data->cpu_use_samp);
159 ATTRIBUTE(sb, samples_dir, "cpu_delay", data->cpu_delay_samp);
160 ATTRIBUTE(sb, samples_dir, "mem_delay", data->page_wait_samp);
161 ATTRIBUTE(sb, samples_dir, "idle", data->idle_samp);
162 ATTRIBUTE(sb, samples_dir, "other", data->other_samp);
163 ATTRIBUTE(sb, samples_dir, "total", data->total_samp);
164 return 0;
165}
166
167int hypfs_vm_create_files(struct super_block *sb, struct dentry *root)
168{
169 struct dentry *dir, *file;
170 struct diag2fc_data *data;
171 int rc, i, count = 0;
172
173 data = diag2fc_store(guest_query, &count);
174 if (IS_ERR(data))
175 return PTR_ERR(data);
176
177 /* Hpervisor Info */
178 dir = hypfs_mkdir(sb, root, "hyp");
179 if (IS_ERR(dir)) {
180 rc = PTR_ERR(dir);
181 goto failed;
182 }
183 file = hypfs_create_str(sb, dir, "type", "z/VM Hypervisor");
184 if (IS_ERR(file)) {
185 rc = PTR_ERR(file);
186 goto failed;
187 }
188
189 /* physical cpus */
190 dir = hypfs_mkdir(sb, root, "cpus");
191 if (IS_ERR(dir)) {
192 rc = PTR_ERR(dir);
193 goto failed;
194 }
195 file = hypfs_create_u64(sb, dir, "count", data->lcpus);
196 if (IS_ERR(file)) {
197 rc = PTR_ERR(file);
198 goto failed;
199 }
200
201 /* guests */
202 dir = hypfs_mkdir(sb, root, "systems");
203 if (IS_ERR(dir)) {
204 rc = PTR_ERR(dir);
205 goto failed;
206 }
207
208 for (i = 0; i < count; i++) {
209 rc = hpyfs_vm_create_guest(sb, dir, &(data[i]));
210 if (rc)
211 goto failed;
212 }
213 diag2fc_free(data);
214 return 0;
215
216failed:
217 diag2fc_free(data);
218 return rc;
219}
220
221int hypfs_vm_init(void)
222{
223 if (diag2fc(0, all_guests, NULL) > 0)
224 guest_query = all_guests;
225 else if (diag2fc(0, local_guest, NULL) > 0)
226 guest_query = local_guest;
227 else
228 return -EACCES;
229
230 return 0;
231}
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index b6716c4b9934..a4fda7b53640 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -19,7 +19,6 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <asm/ebcdic.h> 20#include <asm/ebcdic.h>
21#include "hypfs.h" 21#include "hypfs.h"
22#include "hypfs_diag.h"
23 22
24#define HYPFS_MAGIC 0x687970 /* ASCII 'hyp' */ 23#define HYPFS_MAGIC 0x687970 /* ASCII 'hyp' */
25#define TMP_SIZE 64 /* size of temporary buffers */ 24#define TMP_SIZE 64 /* size of temporary buffers */
@@ -192,7 +191,10 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
192 goto out; 191 goto out;
193 } 192 }
194 hypfs_delete_tree(sb->s_root); 193 hypfs_delete_tree(sb->s_root);
195 rc = hypfs_diag_create_files(sb, sb->s_root); 194 if (MACHINE_IS_VM)
195 rc = hypfs_vm_create_files(sb, sb->s_root);
196 else
197 rc = hypfs_diag_create_files(sb, sb->s_root);
196 if (rc) { 198 if (rc) {
197 printk(KERN_ERR "hypfs: Update failed\n"); 199 printk(KERN_ERR "hypfs: Update failed\n");
198 hypfs_delete_tree(sb->s_root); 200 hypfs_delete_tree(sb->s_root);
@@ -289,7 +291,10 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
289 rc = -ENOMEM; 291 rc = -ENOMEM;
290 goto err_alloc; 292 goto err_alloc;
291 } 293 }
292 rc = hypfs_diag_create_files(sb, root_dentry); 294 if (MACHINE_IS_VM)
295 rc = hypfs_vm_create_files(sb, root_dentry);
296 else
297 rc = hypfs_diag_create_files(sb, root_dentry);
293 if (rc) 298 if (rc)
294 goto err_tree; 299 goto err_tree;
295 sbi->update_file = hypfs_create_update_file(sb, root_dentry); 300 sbi->update_file = hypfs_create_update_file(sb, root_dentry);
@@ -462,11 +467,15 @@ static int __init hypfs_init(void)
462{ 467{
463 int rc; 468 int rc;
464 469
465 if (MACHINE_IS_VM) 470 if (MACHINE_IS_VM) {
466 return -ENODATA; 471 if (hypfs_vm_init())
467 if (hypfs_diag_init()) { 472 /* no diag 2fc, just exit */
468 rc = -ENODATA; 473 return -ENODATA;
469 goto fail_diag; 474 } else {
475 if (hypfs_diag_init()) {
476 rc = -ENODATA;
477 goto fail_diag;
478 }
470 } 479 }
471 kset_set_kset_s(&s390_subsys, hypervisor_subsys); 480 kset_set_kset_s(&s390_subsys, hypervisor_subsys);
472 rc = subsystem_register(&s390_subsys); 481 rc = subsystem_register(&s390_subsys);
@@ -480,7 +489,8 @@ static int __init hypfs_init(void)
480fail_filesystem: 489fail_filesystem:
481 subsystem_unregister(&s390_subsys); 490 subsystem_unregister(&s390_subsys);
482fail_sysfs: 491fail_sysfs:
483 hypfs_diag_exit(); 492 if (!MACHINE_IS_VM)
493 hypfs_diag_exit();
484fail_diag: 494fail_diag:
485 printk(KERN_ERR "hypfs: Initialization failed with rc = %i.\n", rc); 495 printk(KERN_ERR "hypfs: Initialization failed with rc = %i.\n", rc);
486 return rc; 496 return rc;
@@ -488,7 +498,8 @@ fail_diag:
488 498
489static void __exit hypfs_exit(void) 499static void __exit hypfs_exit(void)
490{ 500{
491 hypfs_diag_exit(); 501 if (!MACHINE_IS_VM)
502 hypfs_diag_exit();
492 unregister_filesystem(&hypfs_type); 503 unregister_filesystem(&hypfs_type);
493 subsystem_unregister(&s390_subsys); 504 subsystem_unregister(&s390_subsys);
494} 505}
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index a81881c9b297..5492d25d7d69 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -4,9 +4,9 @@
4 4
5EXTRA_AFLAGS := -traditional 5EXTRA_AFLAGS := -traditional
6 6
7obj-y := bitmap.o traps.o time.o process.o reset.o \ 7obj-y := bitmap.o traps.o time.o process.o base.o early.o \
8 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ 8 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
9 semaphore.o s390_ext.o debug.o profile.o irq.o ipl.o 9 semaphore.o s390_ext.o debug.o irq.o ipl.o
10 10
11obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 11obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
12obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 12obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
new file mode 100644
index 000000000000..dc7e5259770f
--- /dev/null
+++ b/arch/s390/kernel/base.S
@@ -0,0 +1,150 @@
1/*
2 * arch/s390/kernel/base.S
3 *
4 * Copyright IBM Corp. 2006,2007
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 * Michael Holzheu <holzheu@de.ibm.com>
7 */
8
9#include <asm/ptrace.h>
10#include <asm/lowcore.h>
11
12#ifdef CONFIG_64BIT
13
14 .globl s390_base_mcck_handler
15s390_base_mcck_handler:
16 basr %r13,0
170: lg %r15,__LC_PANIC_STACK # load panic stack
18 aghi %r15,-STACK_FRAME_OVERHEAD
19 larl %r1,s390_base_mcck_handler_fn
20 lg %r1,0(%r1)
21 ltgr %r1,%r1
22 jz 1f
23 basr %r14,%r1
241: la %r1,4095
25 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
26 lpswe __LC_MCK_OLD_PSW
27
28 .section .bss
29 .globl s390_base_mcck_handler_fn
30s390_base_mcck_handler_fn:
31 .quad 0
32 .previous
33
34 .globl s390_base_ext_handler
35s390_base_ext_handler:
36 stmg %r0,%r15,__LC_SAVE_AREA
37 basr %r13,0
380: aghi %r15,-STACK_FRAME_OVERHEAD
39 larl %r1,s390_base_ext_handler_fn
40 lg %r1,0(%r1)
41 ltgr %r1,%r1
42 jz 1f
43 basr %r14,%r1
441: lmg %r0,%r15,__LC_SAVE_AREA
45 ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
46 lpswe __LC_EXT_OLD_PSW
47
48 .section .bss
49 .globl s390_base_ext_handler_fn
50s390_base_ext_handler_fn:
51 .quad 0
52 .previous
53
54 .globl s390_base_pgm_handler
55s390_base_pgm_handler:
56 stmg %r0,%r15,__LC_SAVE_AREA
57 basr %r13,0
580: aghi %r15,-STACK_FRAME_OVERHEAD
59 larl %r1,s390_base_pgm_handler_fn
60 lg %r1,0(%r1)
61 ltgr %r1,%r1
62 jz 1f
63 basr %r14,%r1
64 lmg %r0,%r15,__LC_SAVE_AREA
65 lpswe __LC_PGM_OLD_PSW
661: lpswe disabled_wait_psw-0b(%r13)
67
68 .align 8
69disabled_wait_psw:
70 .quad 0x0002000180000000,0x0000000000000000 + s390_base_pgm_handler
71
72 .section .bss
73 .globl s390_base_pgm_handler_fn
74s390_base_pgm_handler_fn:
75 .quad 0
76 .previous
77
78#else /* CONFIG_64BIT */
79
80 .globl s390_base_mcck_handler
81s390_base_mcck_handler:
82 basr %r13,0
830: l %r15,__LC_PANIC_STACK # load panic stack
84 ahi %r15,-STACK_FRAME_OVERHEAD
85 l %r1,2f-0b(%r13)
86 l %r1,0(%r1)
87 ltr %r1,%r1
88 jz 1f
89 basr %r14,%r1
901: lm %r0,%r15,__LC_GPREGS_SAVE_AREA
91 lpsw __LC_MCK_OLD_PSW
92
932: .long s390_base_mcck_handler_fn
94
95 .section .bss
96 .globl s390_base_mcck_handler_fn
97s390_base_mcck_handler_fn:
98 .long 0
99 .previous
100
101 .globl s390_base_ext_handler
102s390_base_ext_handler:
103 stm %r0,%r15,__LC_SAVE_AREA
104 basr %r13,0
1050: ahi %r15,-STACK_FRAME_OVERHEAD
106 l %r1,2f-0b(%r13)
107 l %r1,0(%r1)
108 ltr %r1,%r1
109 jz 1f
110 basr %r14,%r1
1111: lm %r0,%r15,__LC_SAVE_AREA
112 ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
113 lpsw __LC_EXT_OLD_PSW
114
1152: .long s390_base_ext_handler_fn
116
117 .section .bss
118 .globl s390_base_ext_handler_fn
119s390_base_ext_handler_fn:
120 .long 0
121 .previous
122
123 .globl s390_base_pgm_handler
124s390_base_pgm_handler:
125 stm %r0,%r15,__LC_SAVE_AREA
126 basr %r13,0
1270: ahi %r15,-STACK_FRAME_OVERHEAD
128 l %r1,2f-0b(%r13)
129 l %r1,0(%r1)
130 ltr %r1,%r1
131 jz 1f
132 basr %r14,%r1
133 lm %r0,%r15,__LC_SAVE_AREA
134 lpsw __LC_PGM_OLD_PSW
135
1361: lpsw disabled_wait_psw-0b(%r13)
137
1382: .long s390_base_pgm_handler_fn
139
140disabled_wait_psw:
141 .align 8
142 .long 0x000a0000,0x00000000 + s390_base_pgm_handler
143
144 .section .bss
145 .globl s390_base_pgm_handler_fn
146s390_base_pgm_handler_fn:
147 .long 0
148 .previous
149
150#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/binfmt_elf32.c b/arch/s390/kernel/binfmt_elf32.c
index 5c46054195cb..f1e40ca00d8d 100644
--- a/arch/s390/kernel/binfmt_elf32.c
+++ b/arch/s390/kernel/binfmt_elf32.c
@@ -192,7 +192,7 @@ MODULE_AUTHOR("Gerhard Tonn <ton@de.ibm.com>");
192 192
193#undef cputime_to_timeval 193#undef cputime_to_timeval
194#define cputime_to_timeval cputime_to_compat_timeval 194#define cputime_to_timeval cputime_to_compat_timeval
195static __inline__ void 195static inline void
196cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) 196cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
197{ 197{
198 value->tv_usec = cputime % 1000000; 198 value->tv_usec = cputime % 1000000;
diff --git a/arch/s390/kernel/compat_exec_domain.c b/arch/s390/kernel/compat_exec_domain.c
index 71d27c493568..914d49444f92 100644
--- a/arch/s390/kernel/compat_exec_domain.c
+++ b/arch/s390/kernel/compat_exec_domain.c
@@ -12,10 +12,9 @@
12#include <linux/personality.h> 12#include <linux/personality.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14 14
15struct exec_domain s390_exec_domain; 15static struct exec_domain s390_exec_domain;
16 16
17static int __init 17static int __init s390_init (void)
18s390_init (void)
19{ 18{
20 s390_exec_domain.name = "Linux/s390"; 19 s390_exec_domain.name = "Linux/s390";
21 s390_exec_domain.handler = NULL; 20 s390_exec_domain.handler = NULL;
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 5b33f823863a..666bb6daa148 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -69,6 +69,12 @@
69 69
70#include "compat_linux.h" 70#include "compat_linux.h"
71 71
72long psw_user32_bits = (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
73 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
74 PSW_MASK_PSTATE | PSW_DEFAULT_KEY);
75long psw32_user_bits = (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME |
76 PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
77 PSW32_MASK_PSTATE);
72 78
73/* For this source file, we want overflow handling. */ 79/* For this source file, we want overflow handling. */
74 80
@@ -416,7 +422,7 @@ asmlinkage long sys32_sysinfo(struct sysinfo32 __user *info)
416 mm_segment_t old_fs = get_fs (); 422 mm_segment_t old_fs = get_fs ();
417 423
418 set_fs (KERNEL_DS); 424 set_fs (KERNEL_DS);
419 ret = sys_sysinfo((struct sysinfo __user *) &s); 425 ret = sys_sysinfo((struct sysinfo __force __user *) &s);
420 set_fs (old_fs); 426 set_fs (old_fs);
421 err = put_user (s.uptime, &info->uptime); 427 err = put_user (s.uptime, &info->uptime);
422 err |= __put_user (s.loads[0], &info->loads[0]); 428 err |= __put_user (s.loads[0], &info->loads[0]);
@@ -445,7 +451,8 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
445 mm_segment_t old_fs = get_fs (); 451 mm_segment_t old_fs = get_fs ();
446 452
447 set_fs (KERNEL_DS); 453 set_fs (KERNEL_DS);
448 ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t); 454 ret = sys_sched_rr_get_interval(pid,
455 (struct timespec __force __user *) &t);
449 set_fs (old_fs); 456 set_fs (old_fs);
450 if (put_compat_timespec(&t, interval)) 457 if (put_compat_timespec(&t, interval))
451 return -EFAULT; 458 return -EFAULT;
@@ -472,8 +479,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
472 } 479 }
473 set_fs (KERNEL_DS); 480 set_fs (KERNEL_DS);
474 ret = sys_rt_sigprocmask(how, 481 ret = sys_rt_sigprocmask(how,
475 set ? (sigset_t __user *) &s : NULL, 482 set ? (sigset_t __force __user *) &s : NULL,
476 oset ? (sigset_t __user *) &s : NULL, 483 oset ? (sigset_t __force __user *) &s : NULL,
477 sigsetsize); 484 sigsetsize);
478 set_fs (old_fs); 485 set_fs (old_fs);
479 if (ret) return ret; 486 if (ret) return ret;
@@ -499,7 +506,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
499 mm_segment_t old_fs = get_fs(); 506 mm_segment_t old_fs = get_fs();
500 507
501 set_fs (KERNEL_DS); 508 set_fs (KERNEL_DS);
502 ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize); 509 ret = sys_rt_sigpending((sigset_t __force __user *) &s, sigsetsize);
503 set_fs (old_fs); 510 set_fs (old_fs);
504 if (!ret) { 511 if (!ret) {
505 switch (_NSIG_WORDS) { 512 switch (_NSIG_WORDS) {
@@ -524,7 +531,7 @@ sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo)
524 if (copy_siginfo_from_user32(&info, uinfo)) 531 if (copy_siginfo_from_user32(&info, uinfo))
525 return -EFAULT; 532 return -EFAULT;
526 set_fs (KERNEL_DS); 533 set_fs (KERNEL_DS);
527 ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info); 534 ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force __user *) &info);
528 set_fs (old_fs); 535 set_fs (old_fs);
529 return ret; 536 return ret;
530} 537}
@@ -682,7 +689,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offse
682 689
683 set_fs(KERNEL_DS); 690 set_fs(KERNEL_DS);
684 ret = sys_sendfile(out_fd, in_fd, 691 ret = sys_sendfile(out_fd, in_fd,
685 offset ? (off_t __user *) &of : NULL, count); 692 offset ? (off_t __force __user *) &of : NULL, count);
686 set_fs(old_fs); 693 set_fs(old_fs);
687 694
688 if (offset && put_user(of, offset)) 695 if (offset && put_user(of, offset))
@@ -703,7 +710,8 @@ asmlinkage long sys32_sendfile64(int out_fd, int in_fd,
703 710
704 set_fs(KERNEL_DS); 711 set_fs(KERNEL_DS);
705 ret = sys_sendfile64(out_fd, in_fd, 712 ret = sys_sendfile64(out_fd, in_fd,
706 offset ? (loff_t __user *) &lof : NULL, count); 713 offset ? (loff_t __force __user *) &lof : NULL,
714 count);
707 set_fs(old_fs); 715 set_fs(old_fs);
708 716
709 if (offset && put_user(lof, offset)) 717 if (offset && put_user(lof, offset))
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index 1a18e29668ef..e89f8c0c42a0 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -115,37 +115,6 @@ typedef struct
115 __u32 addr; 115 __u32 addr;
116} _psw_t32 __attribute__ ((aligned(8))); 116} _psw_t32 __attribute__ ((aligned(8)));
117 117
118#define PSW32_MASK_PER 0x40000000UL
119#define PSW32_MASK_DAT 0x04000000UL
120#define PSW32_MASK_IO 0x02000000UL
121#define PSW32_MASK_EXT 0x01000000UL
122#define PSW32_MASK_KEY 0x00F00000UL
123#define PSW32_MASK_MCHECK 0x00040000UL
124#define PSW32_MASK_WAIT 0x00020000UL
125#define PSW32_MASK_PSTATE 0x00010000UL
126#define PSW32_MASK_ASC 0x0000C000UL
127#define PSW32_MASK_CC 0x00003000UL
128#define PSW32_MASK_PM 0x00000f00UL
129
130#define PSW32_ADDR_AMODE31 0x80000000UL
131#define PSW32_ADDR_INSN 0x7FFFFFFFUL
132
133#define PSW32_BASE_BITS 0x00080000UL
134
135#define PSW32_ASC_PRIMARY 0x00000000UL
136#define PSW32_ASC_ACCREG 0x00004000UL
137#define PSW32_ASC_SECONDARY 0x00008000UL
138#define PSW32_ASC_HOME 0x0000C000UL
139
140#define PSW32_USER_BITS (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME | \
141 PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | \
142 PSW32_MASK_PSTATE)
143
144#define PSW32_MASK_MERGE(CURRENT,NEW) \
145 (((CURRENT) & ~(PSW32_MASK_CC|PSW32_MASK_PM)) | \
146 ((NEW) & (PSW32_MASK_CC|PSW32_MASK_PM)))
147
148
149typedef struct 118typedef struct
150{ 119{
151 _psw_t32 psw; 120 _psw_t32 psw;
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 861888ab8c13..887a9881d0d0 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -275,8 +275,8 @@ sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss,
275 } 275 }
276 276
277 set_fs (KERNEL_DS); 277 set_fs (KERNEL_DS);
278 ret = do_sigaltstack((stack_t __user *) (uss ? &kss : NULL), 278 ret = do_sigaltstack((stack_t __force __user *) (uss ? &kss : NULL),
279 (stack_t __user *) (uoss ? &koss : NULL), 279 (stack_t __force __user *) (uoss ? &koss : NULL),
280 regs->gprs[15]); 280 regs->gprs[15]);
281 set_fs (old_fs); 281 set_fs (old_fs);
282 282
@@ -298,7 +298,7 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
298 _s390_regs_common32 regs32; 298 _s390_regs_common32 regs32;
299 int err, i; 299 int err, i;
300 300
301 regs32.psw.mask = PSW32_MASK_MERGE(PSW32_USER_BITS, 301 regs32.psw.mask = PSW32_MASK_MERGE(psw32_user_bits,
302 (__u32)(regs->psw.mask >> 32)); 302 (__u32)(regs->psw.mask >> 32));
303 regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr; 303 regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr;
304 for (i = 0; i < NUM_GPRS; i++) 304 for (i = 0; i < NUM_GPRS; i++)
@@ -401,7 +401,7 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
401 goto badframe; 401 goto badframe;
402 402
403 set_fs (KERNEL_DS); 403 set_fs (KERNEL_DS);
404 do_sigaltstack((stack_t __user *)&st, NULL, regs->gprs[15]); 404 do_sigaltstack((stack_t __force __user *)&st, NULL, regs->gprs[15]);
405 set_fs (old_fs); 405 set_fs (old_fs);
406 406
407 return regs->gprs[2]; 407 return regs->gprs[2];
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index a5972f1541fe..6c89f30c8e31 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -16,6 +16,7 @@
16#include <asm/ebcdic.h> 16#include <asm/ebcdic.h>
17#include <asm/cpcmd.h> 17#include <asm/cpcmd.h>
18#include <asm/system.h> 18#include <asm/system.h>
19#include <asm/io.h>
19 20
20static DEFINE_SPINLOCK(cpcmd_lock); 21static DEFINE_SPINLOCK(cpcmd_lock);
21static char cpcmd_buf[241]; 22static char cpcmd_buf[241];
@@ -88,13 +89,8 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
88 int len; 89 int len;
89 unsigned long flags; 90 unsigned long flags;
90 91
91 if ((rlen == 0) || (response == NULL) 92 if ((virt_to_phys(response) != (unsigned long) response) ||
92 || !((unsigned long)response >> 31)) { 93 (((unsigned long)response + rlen) >> 31)) {
93 spin_lock_irqsave(&cpcmd_lock, flags);
94 len = __cpcmd(cmd, response, rlen, response_code);
95 spin_unlock_irqrestore(&cpcmd_lock, flags);
96 }
97 else {
98 lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA); 94 lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA);
99 if (!lowbuf) { 95 if (!lowbuf) {
100 printk(KERN_WARNING 96 printk(KERN_WARNING
@@ -106,6 +102,10 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
106 spin_unlock_irqrestore(&cpcmd_lock, flags); 102 spin_unlock_irqrestore(&cpcmd_lock, flags);
107 memcpy(response, lowbuf, rlen); 103 memcpy(response, lowbuf, rlen);
108 kfree(lowbuf); 104 kfree(lowbuf);
105 } else {
106 spin_lock_irqsave(&cpcmd_lock, flags);
107 len = __cpcmd(cmd, response, rlen, response_code);
108 spin_unlock_irqrestore(&cpcmd_lock, flags);
109 } 109 }
110 return len; 110 return len;
111} 111}
diff --git a/arch/s390/kernel/crash.c b/arch/s390/kernel/crash.c
index 926cceeae0fa..8cc7c9fa64f5 100644
--- a/arch/s390/kernel/crash.c
+++ b/arch/s390/kernel/crash.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/threads.h> 10#include <linux/threads.h>
11#include <linux/kexec.h> 11#include <linux/kexec.h>
12#include <linux/reboot.h>
12 13
13void machine_crash_shutdown(struct pt_regs *regs) 14void machine_crash_shutdown(struct pt_regs *regs)
14{ 15{
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index bb57bc0e3fc8..f4b62df02aa2 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -120,7 +120,7 @@ struct debug_view debug_hex_ascii_view = {
120 NULL 120 NULL
121}; 121};
122 122
123struct debug_view debug_level_view = { 123static struct debug_view debug_level_view = {
124 "level", 124 "level",
125 &debug_prolog_level_fn, 125 &debug_prolog_level_fn,
126 NULL, 126 NULL,
@@ -129,7 +129,7 @@ struct debug_view debug_level_view = {
129 NULL 129 NULL
130}; 130};
131 131
132struct debug_view debug_pages_view = { 132static struct debug_view debug_pages_view = {
133 "pages", 133 "pages",
134 &debug_prolog_pages_fn, 134 &debug_prolog_pages_fn,
135 NULL, 135 NULL,
@@ -138,7 +138,7 @@ struct debug_view debug_pages_view = {
138 NULL 138 NULL
139}; 139};
140 140
141struct debug_view debug_flush_view = { 141static struct debug_view debug_flush_view = {
142 "flush", 142 "flush",
143 NULL, 143 NULL,
144 NULL, 144 NULL,
@@ -156,14 +156,14 @@ struct debug_view debug_sprintf_view = {
156 NULL 156 NULL
157}; 157};
158 158
159 159/* used by dump analysis tools to determine version of debug feature */
160unsigned int debug_feature_version = __DEBUG_FEATURE_VERSION; 160unsigned int debug_feature_version = __DEBUG_FEATURE_VERSION;
161 161
162/* static globals */ 162/* static globals */
163 163
164static debug_info_t *debug_area_first = NULL; 164static debug_info_t *debug_area_first = NULL;
165static debug_info_t *debug_area_last = NULL; 165static debug_info_t *debug_area_last = NULL;
166DECLARE_MUTEX(debug_lock); 166static DECLARE_MUTEX(debug_lock);
167 167
168static int initialized; 168static int initialized;
169 169
@@ -905,7 +905,7 @@ static struct ctl_table s390dbf_dir_table[] = {
905 { .ctl_name = 0 } 905 { .ctl_name = 0 }
906}; 906};
907 907
908struct ctl_table_header *s390dbf_sysctl_header; 908static struct ctl_table_header *s390dbf_sysctl_header;
909 909
910void 910void
911debug_stop_all(void) 911debug_stop_all(void)
@@ -1300,8 +1300,7 @@ out:
1300 * flushes debug areas 1300 * flushes debug areas
1301 */ 1301 */
1302 1302
1303void 1303static void debug_flush(debug_info_t* id, int area)
1304debug_flush(debug_info_t* id, int area)
1305{ 1304{
1306 unsigned long flags; 1305 unsigned long flags;
1307 int i,j; 1306 int i,j;
@@ -1511,8 +1510,7 @@ out:
1511/* 1510/*
1512 * clean up module 1511 * clean up module
1513 */ 1512 */
1514void 1513static void __exit debug_exit(void)
1515__exit debug_exit(void)
1516{ 1514{
1517 debugfs_remove(debug_debugfs_root_entry); 1515 debugfs_remove(debug_debugfs_root_entry);
1518 unregister_sysctl_table(s390dbf_sysctl_header); 1516 unregister_sysctl_table(s390dbf_sysctl_header);
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
new file mode 100644
index 000000000000..e518dd53eff5
--- /dev/null
+++ b/arch/s390/kernel/early.c
@@ -0,0 +1,306 @@
1/*
2 * arch/s390/kernel/early.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Hongjie Yang <hongjie@us.ibm.com>,
6 * Heiko Carstens <heiko.carstens@de.ibm.com>
7 */
8
9#include <linux/init.h>
10#include <linux/errno.h>
11#include <linux/string.h>
12#include <linux/ctype.h>
13#include <linux/lockdep.h>
14#include <linux/module.h>
15#include <linux/pfn.h>
16#include <linux/uaccess.h>
17#include <asm/lowcore.h>
18#include <asm/processor.h>
19#include <asm/sections.h>
20#include <asm/setup.h>
21#include <asm/cpcmd.h>
22#include <asm/sclp.h>
23
24/*
25 * Create a Kernel NSS if the SAVESYS= parameter is defined
26 */
27#define DEFSYS_CMD_SIZE 96
28#define SAVESYS_CMD_SIZE 32
29
30char kernel_nss_name[NSS_NAME_SIZE + 1];
31
32#ifdef CONFIG_SHARED_KERNEL
33static noinline __init void create_kernel_nss(void)
34{
35 unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
36#ifdef CONFIG_BLK_DEV_INITRD
37 unsigned int sinitrd_pfn, einitrd_pfn;
38#endif
39 int response;
40 char *savesys_ptr;
41 char upper_command_line[COMMAND_LINE_SIZE];
42 char defsys_cmd[DEFSYS_CMD_SIZE];
43 char savesys_cmd[SAVESYS_CMD_SIZE];
44
45 /* Do nothing if we are not running under VM */
46 if (!MACHINE_IS_VM)
47 return;
48
49 /* Convert COMMAND_LINE to upper case */
50 for (i = 0; i < strlen(COMMAND_LINE); i++)
51 upper_command_line[i] = toupper(COMMAND_LINE[i]);
52
53 savesys_ptr = strstr(upper_command_line, "SAVESYS=");
54
55 if (!savesys_ptr)
56 return;
57
58 savesys_ptr += 8; /* Point to the beginning of the NSS name */
59 for (i = 0; i < NSS_NAME_SIZE; i++) {
60 if (savesys_ptr[i] == ' ' || savesys_ptr[i] == '\0')
61 break;
62 kernel_nss_name[i] = savesys_ptr[i];
63 }
64
65 stext_pfn = PFN_DOWN(__pa(&_stext));
66 eshared_pfn = PFN_DOWN(__pa(&_eshared));
67 end_pfn = PFN_UP(__pa(&_end));
68 min_size = end_pfn << 2;
69
70 sprintf(defsys_cmd, "DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X",
71 kernel_nss_name, stext_pfn - 1, stext_pfn, eshared_pfn - 1,
72 eshared_pfn, end_pfn);
73
74#ifdef CONFIG_BLK_DEV_INITRD
75 if (INITRD_START && INITRD_SIZE) {
76 sinitrd_pfn = PFN_DOWN(__pa(INITRD_START));
77 einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE));
78 min_size = einitrd_pfn << 2;
79 sprintf(defsys_cmd, "%s EW %.5X-%.5X", defsys_cmd,
80 sinitrd_pfn, einitrd_pfn);
81 }
82#endif
83
84 sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK", defsys_cmd, min_size);
85 sprintf(savesys_cmd, "SAVESYS %s \n IPL %s",
86 kernel_nss_name, kernel_nss_name);
87
88 __cpcmd(defsys_cmd, NULL, 0, &response);
89
90 if (response != 0)
91 return;
92
93 __cpcmd(savesys_cmd, NULL, 0, &response);
94
95 if (response != strlen(savesys_cmd))
96 return;
97
98 ipl_flags = IPL_NSS_VALID;
99}
100
101#else /* CONFIG_SHARED_KERNEL */
102
103static inline void create_kernel_nss(void) { }
104
105#endif /* CONFIG_SHARED_KERNEL */
106
107/*
108 * Clear bss memory
109 */
110static noinline __init void clear_bss_section(void)
111{
112 memset(__bss_start, 0, _end - __bss_start);
113}
114
115/*
116 * Initialize storage key for kernel pages
117 */
118static noinline __init void init_kernel_storage_key(void)
119{
120 unsigned long end_pfn, init_pfn;
121
122 end_pfn = PFN_UP(__pa(&_end));
123
124 for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
125 page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY);
126}
127
128static noinline __init void detect_machine_type(void)
129{
130 struct cpuinfo_S390 *cpuinfo = &S390_lowcore.cpu_data;
131
132 asm volatile("stidp %0" : "=m" (S390_lowcore.cpu_data.cpu_id));
133
134 /* Running under z/VM ? */
135 if (cpuinfo->cpu_id.version == 0xff)
136 machine_flags |= 1;
137
138 /* Running on a P/390 ? */
139 if (cpuinfo->cpu_id.machine == 0x7490)
140 machine_flags |= 4;
141}
142
143static noinline __init int memory_fast_detect(void)
144{
145
146 unsigned long val0 = 0;
147 unsigned long val1 = 0xc;
148 int ret = -ENOSYS;
149
150 if (ipl_flags & IPL_NSS_VALID)
151 return -ENOSYS;
152
153 asm volatile(
154 " diag %1,%2,0x260\n"
155 "0: lhi %0,0\n"
156 "1:\n"
157 EX_TABLE(0b,1b)
158 : "+d" (ret), "+d" (val0), "+d" (val1) : : "cc");
159
160 if (ret || val0 != val1)
161 return -ENOSYS;
162
163 memory_chunk[0].size = val0;
164 return 0;
165}
166
167#define ADDR2G (1UL << 31)
168
169static noinline __init unsigned long sclp_memory_detect(void)
170{
171 struct sclp_readinfo_sccb *sccb;
172 unsigned long long memsize;
173
174 sccb = &s390_readinfo_sccb;
175
176 if (sccb->header.response_code != 0x10)
177 return 0;
178
179 if (sccb->rnsize)
180 memsize = sccb->rnsize << 20;
181 else
182 memsize = sccb->rnsize2 << 20;
183 if (sccb->rnmax)
184 memsize *= sccb->rnmax;
185 else
186 memsize *= sccb->rnmax2;
187#ifndef CONFIG_64BIT
188 /*
189 * Can't deal with more than 2G in 31 bit addressing mode, so
190 * limit the value in order to avoid strange side effects.
191 */
192 if (memsize > ADDR2G)
193 memsize = ADDR2G;
194#endif
195 return (unsigned long) memsize;
196}
197
198static inline __init unsigned long __tprot(unsigned long addr)
199{
200 int cc = -1;
201
202 asm volatile(
203 " tprot 0(%1),0\n"
204 "0: ipm %0\n"
205 " srl %0,28\n"
206 "1:\n"
207 EX_TABLE(0b,1b)
208 : "+d" (cc) : "a" (addr) : "cc");
209 return (unsigned long)cc;
210}
211
212/* Checking memory in 128KB increments. */
213#define CHUNK_INCR (1UL << 17)
214
215static noinline __init void find_memory_chunks(unsigned long memsize)
216{
217 unsigned long addr = 0, old_addr = 0;
218 unsigned long old_cc = CHUNK_READ_WRITE;
219 unsigned long cc;
220 int chunk = 0;
221
222 while (chunk < MEMORY_CHUNKS) {
223 cc = __tprot(addr);
224 while (cc == old_cc) {
225 addr += CHUNK_INCR;
226 cc = __tprot(addr);
227#ifndef CONFIG_64BIT
228 if (addr == ADDR2G)
229 break;
230#endif
231 }
232
233 if (old_addr != addr &&
234 (old_cc == CHUNK_READ_WRITE || old_cc == CHUNK_READ_ONLY)) {
235 memory_chunk[chunk].addr = old_addr;
236 memory_chunk[chunk].size = addr - old_addr;
237 memory_chunk[chunk].type = old_cc;
238 chunk++;
239 }
240
241 old_addr = addr;
242 old_cc = cc;
243
244#ifndef CONFIG_64BIT
245 if (addr == ADDR2G)
246 break;
247#endif
248 /*
249 * Finish memory detection at the first hole, unless
250 * - we reached the hsa -> skip it.
251 * - we know there must be more.
252 */
253 if (cc == -1UL && !memsize && old_addr != ADDR2G)
254 break;
255 if (memsize && addr >= memsize)
256 break;
257 }
258}
259
260static __init void early_pgm_check_handler(void)
261{
262 unsigned long addr;
263 const struct exception_table_entry *fixup;
264
265 addr = S390_lowcore.program_old_psw.addr;
266 fixup = search_exception_tables(addr & PSW_ADDR_INSN);
267 if (!fixup)
268 disabled_wait(0);
269 S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE;
270}
271
272static noinline __init void setup_lowcore_early(void)
273{
274 psw_t psw;
275
276 psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
277 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler;
278 S390_lowcore.external_new_psw = psw;
279 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
280 S390_lowcore.program_new_psw = psw;
281 s390_base_pgm_handler_fn = early_pgm_check_handler;
282}
283
284/*
285 * Save ipl parameters, clear bss memory, initialize storage keys
286 * and create a kernel NSS at startup if the SAVESYS= parm is defined
287 */
288void __init startup_init(void)
289{
290 unsigned long memsize;
291
292 ipl_save_parameters();
293 clear_bss_section();
294 init_kernel_storage_key();
295 lockdep_init();
296 lockdep_off();
297 detect_machine_type();
298 create_kernel_nss();
299 sort_main_extable();
300 setup_lowcore_early();
301 sclp_readinfo_early();
302 memsize = sclp_memory_detect();
303 if (memory_fast_detect() < 0)
304 find_memory_chunks(memsize);
305 lockdep_on();
306}
diff --git a/arch/s390/kernel/ebcdic.c b/arch/s390/kernel/ebcdic.c
index bb0f973137f0..cc0dc609d738 100644
--- a/arch/s390/kernel/ebcdic.c
+++ b/arch/s390/kernel/ebcdic.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <asm/types.h> 13#include <asm/types.h>
14#include <asm/ebcdic.h>
14 15
15/* 16/*
16 * ASCII (IBM PC 437) -> EBCDIC 037 17 * ASCII (IBM PC 437) -> EBCDIC 037
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index eca507050e47..453fd3b4edea 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -51,176 +51,15 @@ startup_continue:
51 st %r15,__LC_KERNEL_STACK # set end of kernel stack 51 st %r15,__LC_KERNEL_STACK # set end of kernel stack
52 ahi %r15,-96 52 ahi %r15,-96
53 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain 53 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
54
55 l %r14,.Lipl_save_parameters-.LPG1(%r13)
56 basr %r14,%r14
57# 54#
58# clear bss memory 55# Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
56# and create a kernel NSS if the SAVESYS= parm is defined
59# 57#
60 l %r2,.Lbss_bgn-.LPG1(%r13) # start of bss 58 l %r14,.Lstartup_init-.LPG1(%r13)
61 l %r3,.Lbss_end-.LPG1(%r13) # end of bss 59 basr %r14,%r14
62 sr %r3,%r2 # length of bss
63 sr %r4,%r4
64 sr %r5,%r5 # set src,length and pad to zero
65 sr %r0,%r0
66 mvcle %r2,%r4,0 # clear mem
67 jo .-4 # branch back, if not finish
68
69 l %r2,.Lrcp-.LPG1(%r13) # Read SCP forced command word
70.Lservicecall:
71 stosm .Lpmask-.LPG1(%r13),0x01 # authorize ext interrupts
72
73 stctl %r0, %r0,.Lcr-.LPG1(%r13) # get cr0
74 la %r1,0x200 # set bit 22
75 o %r1,.Lcr-.LPG1(%r13) # or old cr0 with r1
76 st %r1,.Lcr-.LPG1(%r13)
77 lctl %r0, %r0,.Lcr-.LPG1(%r13) # load modified cr0
78
79 mvc __LC_EXT_NEW_PSW(8),.Lpcext-.LPG1(%r13) # set postcall psw
80 la %r1, .Lsclph-.LPG1(%r13)
81 a %r1,__LC_EXT_NEW_PSW+4 # set handler
82 st %r1,__LC_EXT_NEW_PSW+4
83
84 l %r4,.Lsccbaddr-.LPG1(%r13) # %r4 is our index for sccb stuff
85 lr %r1,%r4 # our sccb
86 .insn rre,0xb2200000,%r2,%r1 # service call
87 ipm %r1
88 srl %r1,28 # get cc code
89 xr %r3, %r3
90 chi %r1,3
91 be .Lfchunk-.LPG1(%r13) # leave
92 chi %r1,2
93 be .Lservicecall-.LPG1(%r13)
94 lpsw .Lwaitsclp-.LPG1(%r13)
95.Lsclph:
96 lh %r1,.Lsccbr-.Lsccb(%r4)
97 chi %r1,0x10 # 0x0010 is the sucess code
98 je .Lprocsccb # let's process the sccb
99 chi %r1,0x1f0
100 bne .Lfchunk-.LPG1(%r13) # unhandled error code
101 c %r2, .Lrcp-.LPG1(%r13) # Did we try Read SCP forced
102 bne .Lfchunk-.LPG1(%r13) # if no, give up
103 l %r2, .Lrcp2-.LPG1(%r13) # try with Read SCP
104 b .Lservicecall-.LPG1(%r13)
105.Lprocsccb:
106 lhi %r1,0
107 icm %r1,3,.Lscpincr1-.Lsccb(%r4) # use this one if != 0
108 jnz .Lscnd
109 lhi %r1,0x800 # otherwise report 2GB
110.Lscnd:
111 lhi %r3,0x800 # limit reported memory size to 2GB
112 cr %r1,%r3
113 jl .Lno2gb
114 lr %r1,%r3
115.Lno2gb:
116 xr %r3,%r3 # same logic
117 ic %r3,.Lscpa1-.Lsccb(%r4)
118 chi %r3,0x00
119 jne .Lcompmem
120 l %r3,.Lscpa2-.Lsccb(%r4)
121.Lcompmem:
122 mr %r2,%r1 # mem in MB on 128-bit
123 l %r1,.Lonemb-.LPG1(%r13)
124 mr %r2,%r1 # mem size in bytes in %r3
125 b .Lfchunk-.LPG1(%r13)
126
127 .align 4
128.Lipl_save_parameters:
129 .long ipl_save_parameters
130.Linittu:
131 .long init_thread_union
132.Lpmask:
133 .byte 0
134 .align 8
135.Lpcext:.long 0x00080000,0x80000000
136.Lcr:
137 .long 0x00 # place holder for cr0
138 .align 8
139.Lwaitsclp:
140 .long 0x010a0000,0x80000000 + .Lsclph
141.Lrcp:
142 .int 0x00120001 # Read SCP forced code
143.Lrcp2:
144 .int 0x00020001 # Read SCP code
145.Lonemb:
146 .int 0x100000
147.Lfchunk:
148 60
149#
150# find memory chunks.
151#
152 lr %r9,%r3 # end of mem
153 mvc __LC_PGM_NEW_PSW(8),.Lpcmem-.LPG1(%r13)
154 la %r1,1 # test in increments of 128KB
155 sll %r1,17
156 l %r3,.Lmchunk-.LPG1(%r13) # get pointer to memory_chunk array
157 slr %r4,%r4 # set start of chunk to zero
158 slr %r5,%r5 # set end of chunk to zero
159 slr %r6,%r6 # set access code to zero
160 la %r10,MEMORY_CHUNKS # number of chunks
161.Lloop:
162 tprot 0(%r5),0 # test protection of first byte
163 ipm %r7
164 srl %r7,28
165 clr %r6,%r7 # compare cc with last access code
166 be .Lsame-.LPG1(%r13)
167 lhi %r8,0 # no program checks
168 b .Lsavchk-.LPG1(%r13)
169.Lsame:
170 ar %r5,%r1 # add 128KB to end of chunk
171 bno .Lloop-.LPG1(%r13) # r1 < 0x80000000 -> loop
172.Lchkmem: # > 2GB or tprot got a program check
173 lhi %r8,1 # set program check flag
174.Lsavchk:
175 clr %r4,%r5 # chunk size > 0?
176 be .Lchkloop-.LPG1(%r13)
177 st %r4,0(%r3) # store start address of chunk
178 lr %r0,%r5
179 slr %r0,%r4
180 st %r0,4(%r3) # store size of chunk
181 st %r6,8(%r3) # store type of chunk
182 la %r3,12(%r3)
183 ahi %r10,-1 # update chunk number
184.Lchkloop:
185 lr %r6,%r7 # set access code to last cc
186 # we got an exception or we're starting a new
187 # chunk , we must check if we should
188 # still try to find valid memory (if we detected
189 # the amount of available storage), and if we
190 # have chunks left
191 xr %r0,%r0
192 clr %r0,%r9 # did we detect memory?
193 je .Ldonemem # if not, leave
194 chi %r10,0 # do we have chunks left?
195 je .Ldonemem
196 chi %r8,1 # program check ?
197 je .Lpgmchk
198 lr %r4,%r5 # potential new chunk
199 alr %r5,%r1 # add 128KB to end of chunk
200 j .Llpcnt
201.Lpgmchk:
202 alr %r5,%r1 # add 128KB to end of chunk
203 lr %r4,%r5 # potential new chunk
204.Llpcnt:
205 clr %r5,%r9 # should we go on?
206 jl .Lloop
207.Ldonemem:
208 l %r12,.Lmflags-.LPG1(%r13) # get address of machine_flags 61 l %r12,.Lmflags-.LPG1(%r13) # get address of machine_flags
209# 62#
210# find out if we are running under VM
211#
212 stidp __LC_CPUID # store cpuid
213 tm __LC_CPUID,0xff # running under VM ?
214 bno .Lnovm-.LPG1(%r13)
215 oi 3(%r12),1 # set VM flag
216.Lnovm:
217 lh %r0,__LC_CPUID+4 # get cpu version
218 chi %r0,0x7490 # running on a P/390 ?
219 bne .Lnop390-.LPG1(%r13)
220 oi 3(%r12),4 # set P/390 flag
221.Lnop390:
222
223#
224# find out if we have an IEEE fpu 63# find out if we have an IEEE fpu
225# 64#
226 mvc __LC_PGM_NEW_PSW(8),.Lpcfpu-.LPG1(%r13) 65 mvc __LC_PGM_NEW_PSW(8),.Lpcfpu-.LPG1(%r13)
@@ -295,7 +134,6 @@ startup_continue:
295 .long 0 # cr15: linkage stack operations 134 .long 0 # cr15: linkage stack operations
296.Lduct: .long 0,0,0,0,0,0,0,0 135.Lduct: .long 0,0,0,0,0,0,0,0
297 .long 0,0,0,0,0,0,0,0 136 .long 0,0,0,0,0,0,0,0
298.Lpcmem:.long 0x00080000,0x80000000 + .Lchkmem
299.Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu 137.Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu
300.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp 138.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp
301.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg 139.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg
@@ -306,7 +144,9 @@ startup_continue:
306.Lbss_bgn: .long __bss_start 144.Lbss_bgn: .long __bss_start
307.Lbss_end: .long _end 145.Lbss_end: .long _end
308.Lparmaddr: .long PARMAREA 146.Lparmaddr: .long PARMAREA
309.Lsccbaddr: .long .Lsccb 147.Linittu: .long init_thread_union
148.Lstartup_init:
149 .long startup_init
310 150
311 .globl ipl_schib 151 .globl ipl_schib
312ipl_schib: 152ipl_schib:
@@ -322,26 +162,6 @@ ipl_devno:
322 .word 0 162 .word 0
323 163
324 .org 0x12000 164 .org 0x12000
325.globl s390_readinfo_sccb
326s390_readinfo_sccb:
327.Lsccb:
328 .hword 0x1000 # length, one page
329 .byte 0x00,0x00,0x00
330 .byte 0x80 # variable response bit set
331.Lsccbr:
332 .hword 0x00 # response code
333.Lscpincr1:
334 .hword 0x00
335.Lscpa1:
336 .byte 0x00
337 .fill 89,1,0
338.Lscpa2:
339 .int 0x00
340.Lscpincr2:
341 .quad 0x00
342 .fill 3984,1,0
343 .org 0x13000
344
345#ifdef CONFIG_SHARED_KERNEL 165#ifdef CONFIG_SHARED_KERNEL
346 .org 0x100000 166 .org 0x100000
347#endif 167#endif
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 6ba3f4512dd1..b8fec4e5c5d4 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -58,183 +58,15 @@ startup_continue:
58 stg %r15,__LC_KERNEL_STACK # set end of kernel stack 58 stg %r15,__LC_KERNEL_STACK # set end of kernel stack
59 aghi %r15,-160 59 aghi %r15,-160
60 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain 60 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
61
62 brasl %r14,ipl_save_parameters
63# 61#
64# clear bss memory 62# Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
63# and create a kernel NSS if the SAVESYS= parm is defined
65# 64#
66 larl %r2,__bss_start # start of bss segment 65 brasl %r14,startup_init
67 larl %r3,_end # end of bss segment
68 sgr %r3,%r2 # length of bss
69 sgr %r4,%r4 #
70 sgr %r5,%r5 # set src,length and pad to zero
71 mvcle %r2,%r4,0 # clear mem
72 jo .-4 # branch back, if not finish
73 # set program check new psw mask 66 # set program check new psw mask
74 mvc __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) 67 mvc __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13)
75 larl %r1,.Lslowmemdetect # set program check address
76 stg %r1,__LC_PGM_NEW_PSW+8
77 lghi %r1,0xc
78 diag %r0,%r1,0x260 # get memory size of virtual machine
79 cgr %r0,%r1 # different? -> old detection routine
80 jne .Lslowmemdetect
81 aghi %r1,1 # size is one more than end
82 larl %r2,memory_chunk
83 stg %r1,8(%r2) # store size of chunk
84 j .Ldonemem
85
86.Lslowmemdetect:
87 l %r2,.Lrcp-.LPG1(%r13) # Read SCP forced command word
88.Lservicecall:
89 stosm .Lpmask-.LPG1(%r13),0x01 # authorize ext interrupts
90
91 stctg %r0,%r0,.Lcr-.LPG1(%r13) # get cr0
92 la %r1,0x200 # set bit 22
93 og %r1,.Lcr-.LPG1(%r13) # or old cr0 with r1
94 stg %r1,.Lcr-.LPG1(%r13)
95 lctlg %r0,%r0,.Lcr-.LPG1(%r13) # load modified cr0
96
97 mvc __LC_EXT_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) # set postcall psw
98 larl %r1,.Lsclph
99 stg %r1,__LC_EXT_NEW_PSW+8 # set handler
100
101 larl %r4,.Lsccb # %r4 is our index for sccb stuff
102 lgr %r1,%r4 # our sccb
103 .insn rre,0xb2200000,%r2,%r1 # service call
104 ipm %r1
105 srl %r1,28 # get cc code
106 xr %r3,%r3
107 chi %r1,3
108 be .Lfchunk-.LPG1(%r13) # leave
109 chi %r1,2
110 be .Lservicecall-.LPG1(%r13)
111 lpswe .Lwaitsclp-.LPG1(%r13)
112.Lsclph:
113 lh %r1,.Lsccbr-.Lsccb(%r4)
114 chi %r1,0x10 # 0x0010 is the sucess code
115 je .Lprocsccb # let's process the sccb
116 chi %r1,0x1f0
117 bne .Lfchunk-.LPG1(%r13) # unhandled error code
118 c %r2,.Lrcp-.LPG1(%r13) # Did we try Read SCP forced
119 bne .Lfchunk-.LPG1(%r13) # if no, give up
120 l %r2,.Lrcp2-.LPG1(%r13) # try with Read SCP
121 b .Lservicecall-.LPG1(%r13)
122.Lprocsccb:
123 lghi %r1,0
124 icm %r1,3,.Lscpincr1-.Lsccb(%r4) # use this one if != 0
125 jnz .Lscnd
126 lg %r1,.Lscpincr2-.Lsccb(%r4) # otherwise use this one
127.Lscnd:
128 xr %r3,%r3 # same logic
129 ic %r3,.Lscpa1-.Lsccb(%r4)
130 chi %r3,0x00
131 jne .Lcompmem
132 l %r3,.Lscpa2-.Lsccb(%r4)
133.Lcompmem:
134 mlgr %r2,%r1 # mem in MB on 128-bit
135 l %r1,.Lonemb-.LPG1(%r13)
136 mlgr %r2,%r1 # mem size in bytes in %r3
137 b .Lfchunk-.LPG1(%r13)
138
139 .align 4
140.Lpmask:
141 .byte 0
142 .align 8
143.Lcr:
144 .quad 0x00 # place holder for cr0
145.Lwaitsclp:
146 .quad 0x0102000180000000,.Lsclph
147.Lrcp:
148 .int 0x00120001 # Read SCP forced code
149.Lrcp2:
150 .int 0x00020001 # Read SCP code
151.Lonemb:
152 .int 0x100000
153
154.Lfchunk:
155
156#
157# find memory chunks.
158#
159 lgr %r9,%r3 # end of mem
160 larl %r1,.Lchkmem # set program check address
161 stg %r1,__LC_PGM_NEW_PSW+8
162 la %r1,1 # test in increments of 128KB
163 sllg %r1,%r1,17
164 larl %r3,memory_chunk
165 slgr %r4,%r4 # set start of chunk to zero
166 slgr %r5,%r5 # set end of chunk to zero
167 slr %r6,%r6 # set access code to zero
168 la %r10,MEMORY_CHUNKS # number of chunks
169.Lloop:
170 tprot 0(%r5),0 # test protection of first byte
171 ipm %r7
172 srl %r7,28
173 clr %r6,%r7 # compare cc with last access code
174 je .Lsame
175 lghi %r8,0 # no program checks
176 j .Lsavchk
177.Lsame:
178 algr %r5,%r1 # add 128KB to end of chunk
179 # no need to check here,
180 brc 12,.Lloop # this is the same chunk
181.Lchkmem: # > 16EB or tprot got a program check
182 lghi %r8,1 # set program check flag
183.Lsavchk:
184 clgr %r4,%r5 # chunk size > 0?
185 je .Lchkloop
186 stg %r4,0(%r3) # store start address of chunk
187 lgr %r0,%r5
188 slgr %r0,%r4
189 stg %r0,8(%r3) # store size of chunk
190 st %r6,20(%r3) # store type of chunk
191 la %r3,24(%r3)
192 ahi %r10,-1 # update chunk number
193.Lchkloop:
194 lr %r6,%r7 # set access code to last cc
195 # we got an exception or we're starting a new
196 # chunk , we must check if we should
197 # still try to find valid memory (if we detected
198 # the amount of available storage), and if we
199 # have chunks left
200 lghi %r4,1
201 sllg %r4,%r4,31
202 clgr %r5,%r4
203 je .Lhsaskip
204 xr %r0, %r0
205 clgr %r0, %r9 # did we detect memory?
206 je .Ldonemem # if not, leave
207 chi %r10, 0 # do we have chunks left?
208 je .Ldonemem
209.Lhsaskip:
210 chi %r8,1 # program check ?
211 je .Lpgmchk
212 lgr %r4,%r5 # potential new chunk
213 algr %r5,%r1 # add 128KB to end of chunk
214 j .Llpcnt
215.Lpgmchk:
216 algr %r5,%r1 # add 128KB to end of chunk
217 lgr %r4,%r5 # potential new chunk
218.Llpcnt:
219 clgr %r5,%r9 # should we go on?
220 jl .Lloop
221.Ldonemem:
222
223 larl %r12,machine_flags 68 larl %r12,machine_flags
224# 69#
225# find out if we are running under VM
226#
227 stidp __LC_CPUID # store cpuid
228 tm __LC_CPUID,0xff # running under VM ?
229 bno 0f-.LPG1(%r13)
230 oi 7(%r12),1 # set VM flag
2310: lh %r0,__LC_CPUID+4 # get cpu version
232 chi %r0,0x7490 # running on a P/390 ?
233 bne 1f-.LPG1(%r13)
234 oi 7(%r12),4 # set P/390 flag
2351:
236
237#
238# find out if we have the MVPG instruction 70# find out if we have the MVPG instruction
239# 71#
240 la %r1,0f-.LPG1(%r13) # set program check address 72 la %r1,0f-.LPG1(%r13) # set program check address
@@ -336,25 +168,6 @@ ipl_devno:
336 .word 0 168 .word 0
337 169
338 .org 0x12000 170 .org 0x12000
339.globl s390_readinfo_sccb
340s390_readinfo_sccb:
341.Lsccb:
342 .hword 0x1000 # length, one page
343 .byte 0x00,0x00,0x00
344 .byte 0x80 # variable response bit set
345.Lsccbr:
346 .hword 0x00 # response code
347.Lscpincr1:
348 .hword 0x00
349.Lscpa1:
350 .byte 0x00
351 .fill 89,1,0
352.Lscpa2:
353 .int 0x00
354.Lscpincr2:
355 .quad 0x00
356 .fill 3984,1,0
357 .org 0x13000
358 171
359#ifdef CONFIG_SHARED_KERNEL 172#ifdef CONFIG_SHARED_KERNEL
360 .org 0x100000 173 .org 0x100000
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 9e9972e8a52b..052259530651 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -20,26 +20,27 @@
20#include <asm/cio.h> 20#include <asm/cio.h>
21#include <asm/ebcdic.h> 21#include <asm/ebcdic.h>
22#include <asm/reset.h> 22#include <asm/reset.h>
23#include <asm/sclp.h>
23 24
24#define IPL_PARM_BLOCK_VERSION 0 25#define IPL_PARM_BLOCK_VERSION 0
25#define LOADPARM_LEN 8
26 26
27extern char s390_readinfo_sccb[]; 27#define SCCB_VALID (s390_readinfo_sccb.header.response_code == 0x10)
28#define SCCB_VALID (*((__u16*)&s390_readinfo_sccb[6]) == 0x0010) 28#define SCCB_LOADPARM (&s390_readinfo_sccb.loadparm)
29#define SCCB_LOADPARM (&s390_readinfo_sccb[24]) 29#define SCCB_FLAG (s390_readinfo_sccb.flags)
30#define SCCB_FLAG (s390_readinfo_sccb[91])
31 30
32enum ipl_type { 31enum ipl_type {
33 IPL_TYPE_NONE = 1, 32 IPL_TYPE_NONE = 1,
34 IPL_TYPE_UNKNOWN = 2, 33 IPL_TYPE_UNKNOWN = 2,
35 IPL_TYPE_CCW = 4, 34 IPL_TYPE_CCW = 4,
36 IPL_TYPE_FCP = 8, 35 IPL_TYPE_FCP = 8,
36 IPL_TYPE_NSS = 16,
37}; 37};
38 38
39#define IPL_NONE_STR "none" 39#define IPL_NONE_STR "none"
40#define IPL_UNKNOWN_STR "unknown" 40#define IPL_UNKNOWN_STR "unknown"
41#define IPL_CCW_STR "ccw" 41#define IPL_CCW_STR "ccw"
42#define IPL_FCP_STR "fcp" 42#define IPL_FCP_STR "fcp"
43#define IPL_NSS_STR "nss"
43 44
44static char *ipl_type_str(enum ipl_type type) 45static char *ipl_type_str(enum ipl_type type)
45{ 46{
@@ -50,6 +51,8 @@ static char *ipl_type_str(enum ipl_type type)
50 return IPL_CCW_STR; 51 return IPL_CCW_STR;
51 case IPL_TYPE_FCP: 52 case IPL_TYPE_FCP:
52 return IPL_FCP_STR; 53 return IPL_FCP_STR;
54 case IPL_TYPE_NSS:
55 return IPL_NSS_STR;
53 case IPL_TYPE_UNKNOWN: 56 case IPL_TYPE_UNKNOWN:
54 default: 57 default:
55 return IPL_UNKNOWN_STR; 58 return IPL_UNKNOWN_STR;
@@ -64,6 +67,7 @@ enum ipl_method {
64 IPL_METHOD_FCP_RO_DIAG, 67 IPL_METHOD_FCP_RO_DIAG,
65 IPL_METHOD_FCP_RW_DIAG, 68 IPL_METHOD_FCP_RW_DIAG,
66 IPL_METHOD_FCP_RO_VM, 69 IPL_METHOD_FCP_RO_VM,
70 IPL_METHOD_NSS,
67}; 71};
68 72
69enum shutdown_action { 73enum shutdown_action {
@@ -114,11 +118,14 @@ enum diag308_rc {
114static int diag308_set_works = 0; 118static int diag308_set_works = 0;
115 119
116static int reipl_capabilities = IPL_TYPE_UNKNOWN; 120static int reipl_capabilities = IPL_TYPE_UNKNOWN;
121
117static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN; 122static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN;
118static enum ipl_method reipl_method = IPL_METHOD_NONE; 123static enum ipl_method reipl_method = IPL_METHOD_NONE;
119static struct ipl_parameter_block *reipl_block_fcp; 124static struct ipl_parameter_block *reipl_block_fcp;
120static struct ipl_parameter_block *reipl_block_ccw; 125static struct ipl_parameter_block *reipl_block_ccw;
121 126
127static char reipl_nss_name[NSS_NAME_SIZE + 1];
128
122static int dump_capabilities = IPL_TYPE_NONE; 129static int dump_capabilities = IPL_TYPE_NONE;
123static enum ipl_type dump_type = IPL_TYPE_NONE; 130static enum ipl_type dump_type = IPL_TYPE_NONE;
124static enum ipl_method dump_method = IPL_METHOD_NONE; 131static enum ipl_method dump_method = IPL_METHOD_NONE;
@@ -173,6 +180,24 @@ static struct subsys_attribute sys_##_prefix##_##_name##_attr = \
173 sys_##_prefix##_##_name##_show, \ 180 sys_##_prefix##_##_name##_show, \
174 sys_##_prefix##_##_name##_store); 181 sys_##_prefix##_##_name##_store);
175 182
183#define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\
184static ssize_t sys_##_prefix##_##_name##_show(struct subsystem *subsys, \
185 char *page) \
186{ \
187 return sprintf(page, _fmt_out, _value); \
188} \
189static ssize_t sys_##_prefix##_##_name##_store(struct subsystem *subsys,\
190 const char *buf, size_t len) \
191{ \
192 if (sscanf(buf, _fmt_in, _value) != 1) \
193 return -EINVAL; \
194 return len; \
195} \
196static struct subsys_attribute sys_##_prefix##_##_name##_attr = \
197 __ATTR(_name,(S_IRUGO | S_IWUSR), \
198 sys_##_prefix##_##_name##_show, \
199 sys_##_prefix##_##_name##_store);
200
176static void make_attrs_ro(struct attribute **attrs) 201static void make_attrs_ro(struct attribute **attrs)
177{ 202{
178 while (*attrs) { 203 while (*attrs) {
@@ -189,6 +214,8 @@ static enum ipl_type ipl_get_type(void)
189{ 214{
190 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; 215 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
191 216
217 if (ipl_flags & IPL_NSS_VALID)
218 return IPL_TYPE_NSS;
192 if (!(ipl_flags & IPL_DEVNO_VALID)) 219 if (!(ipl_flags & IPL_DEVNO_VALID))
193 return IPL_TYPE_UNKNOWN; 220 return IPL_TYPE_UNKNOWN;
194 if (!(ipl_flags & IPL_PARMBLOCK_VALID)) 221 if (!(ipl_flags & IPL_PARMBLOCK_VALID))
@@ -324,6 +351,20 @@ static struct attribute_group ipl_ccw_attr_group = {
324 .attrs = ipl_ccw_attrs, 351 .attrs = ipl_ccw_attrs,
325}; 352};
326 353
354/* NSS ipl device attributes */
355
356DEFINE_IPL_ATTR_RO(ipl_nss, name, "%s\n", kernel_nss_name);
357
358static struct attribute *ipl_nss_attrs[] = {
359 &sys_ipl_type_attr.attr,
360 &sys_ipl_nss_name_attr.attr,
361 NULL,
362};
363
364static struct attribute_group ipl_nss_attr_group = {
365 .attrs = ipl_nss_attrs,
366};
367
327/* UNKNOWN ipl device attributes */ 368/* UNKNOWN ipl device attributes */
328 369
329static struct attribute *ipl_unknown_attrs[] = { 370static struct attribute *ipl_unknown_attrs[] = {
@@ -432,6 +473,21 @@ static struct attribute_group reipl_ccw_attr_group = {
432 .attrs = reipl_ccw_attrs, 473 .attrs = reipl_ccw_attrs,
433}; 474};
434 475
476
477/* NSS reipl device attributes */
478
479DEFINE_IPL_ATTR_STR_RW(reipl_nss, name, "%s\n", "%s\n", reipl_nss_name);
480
481static struct attribute *reipl_nss_attrs[] = {
482 &sys_reipl_nss_name_attr.attr,
483 NULL,
484};
485
486static struct attribute_group reipl_nss_attr_group = {
487 .name = IPL_NSS_STR,
488 .attrs = reipl_nss_attrs,
489};
490
435/* reipl type */ 491/* reipl type */
436 492
437static int reipl_set_type(enum ipl_type type) 493static int reipl_set_type(enum ipl_type type)
@@ -454,6 +510,9 @@ static int reipl_set_type(enum ipl_type type)
454 else 510 else
455 reipl_method = IPL_METHOD_FCP_RO_DIAG; 511 reipl_method = IPL_METHOD_FCP_RO_DIAG;
456 break; 512 break;
513 case IPL_TYPE_NSS:
514 reipl_method = IPL_METHOD_NSS;
515 break;
457 default: 516 default:
458 reipl_method = IPL_METHOD_NONE; 517 reipl_method = IPL_METHOD_NONE;
459 } 518 }
@@ -475,6 +534,8 @@ static ssize_t reipl_type_store(struct subsystem *subsys, const char *buf,
475 rc = reipl_set_type(IPL_TYPE_CCW); 534 rc = reipl_set_type(IPL_TYPE_CCW);
476 else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0) 535 else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0)
477 rc = reipl_set_type(IPL_TYPE_FCP); 536 rc = reipl_set_type(IPL_TYPE_FCP);
537 else if (strncmp(buf, IPL_NSS_STR, strlen(IPL_NSS_STR)) == 0)
538 rc = reipl_set_type(IPL_TYPE_NSS);
478 return (rc != 0) ? rc : len; 539 return (rc != 0) ? rc : len;
479} 540}
480 541
@@ -647,6 +708,10 @@ void do_reipl(void)
647 case IPL_METHOD_FCP_RO_VM: 708 case IPL_METHOD_FCP_RO_VM:
648 __cpcmd("IPL", NULL, 0, NULL); 709 __cpcmd("IPL", NULL, 0, NULL);
649 break; 710 break;
711 case IPL_METHOD_NSS:
712 sprintf(buf, "IPL %s", reipl_nss_name);
713 __cpcmd(buf, NULL, 0, NULL);
714 break;
650 case IPL_METHOD_NONE: 715 case IPL_METHOD_NONE:
651 default: 716 default:
652 if (MACHINE_IS_VM) 717 if (MACHINE_IS_VM)
@@ -733,6 +798,10 @@ static int __init ipl_init(void)
733 case IPL_TYPE_FCP: 798 case IPL_TYPE_FCP:
734 rc = ipl_register_fcp_files(); 799 rc = ipl_register_fcp_files();
735 break; 800 break;
801 case IPL_TYPE_NSS:
802 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
803 &ipl_nss_attr_group);
804 break;
736 default: 805 default:
737 rc = sysfs_create_group(&ipl_subsys.kset.kobj, 806 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
738 &ipl_unknown_attr_group); 807 &ipl_unknown_attr_group);
@@ -755,6 +824,20 @@ static void __init reipl_probe(void)
755 free_page((unsigned long)buffer); 824 free_page((unsigned long)buffer);
756} 825}
757 826
827static int __init reipl_nss_init(void)
828{
829 int rc;
830
831 if (!MACHINE_IS_VM)
832 return 0;
833 rc = sysfs_create_group(&reipl_subsys.kset.kobj, &reipl_nss_attr_group);
834 if (rc)
835 return rc;
836 strncpy(reipl_nss_name, kernel_nss_name, NSS_NAME_SIZE + 1);
837 reipl_capabilities |= IPL_TYPE_NSS;
838 return 0;
839}
840
758static int __init reipl_ccw_init(void) 841static int __init reipl_ccw_init(void)
759{ 842{
760 int rc; 843 int rc;
@@ -837,6 +920,9 @@ static int __init reipl_init(void)
837 rc = reipl_fcp_init(); 920 rc = reipl_fcp_init();
838 if (rc) 921 if (rc)
839 return rc; 922 return rc;
923 rc = reipl_nss_init();
924 if (rc)
925 return rc;
840 rc = reipl_set_type(ipl_get_type()); 926 rc = reipl_set_type(ipl_get_type());
841 if (rc) 927 if (rc)
842 return rc; 928 return rc;
@@ -993,8 +1079,6 @@ static void do_reset_calls(void)
993 reset->fn(); 1079 reset->fn();
994} 1080}
995 1081
996extern void reset_mcck_handler(void);
997extern void reset_pgm_handler(void);
998extern __u32 dump_prefix_page; 1082extern __u32 dump_prefix_page;
999 1083
1000void s390_reset_system(void) 1084void s390_reset_system(void)
@@ -1016,14 +1100,14 @@ void s390_reset_system(void)
1016 __ctl_clear_bit(0,28); 1100 __ctl_clear_bit(0,28);
1017 1101
1018 /* Set new machine check handler */ 1102 /* Set new machine check handler */
1019 S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_MCHECK; 1103 S390_lowcore.mcck_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
1020 S390_lowcore.mcck_new_psw.addr = 1104 S390_lowcore.mcck_new_psw.addr =
1021 PSW_ADDR_AMODE | (unsigned long) &reset_mcck_handler; 1105 PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler;
1022 1106
1023 /* Set new program check handler */ 1107 /* Set new program check handler */
1024 S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_MCHECK; 1108 S390_lowcore.program_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
1025 S390_lowcore.program_new_psw.addr = 1109 S390_lowcore.program_new_psw.addr =
1026 PSW_ADDR_AMODE | (unsigned long) &reset_pgm_handler; 1110 PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
1027 1111
1028 do_reset_calls(); 1112 do_reset_calls();
1029} 1113}
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 1eef50918615..8f0cbca31203 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * arch/s390/kernel/irq.c 2 * arch/s390/kernel/irq.c
3 * 3 *
4 * S390 version 4 * Copyright IBM Corp. 2004,2007
5 * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6 * Thomas Spatzier (tspat@de.ibm.com)
7 * 7 *
8 * This file contains interrupt related functions. 8 * This file contains interrupt related functions.
9 */ 9 */
@@ -14,6 +14,8 @@
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/seq_file.h> 15#include <linux/seq_file.h>
16#include <linux/cpu.h> 16#include <linux/cpu.h>
17#include <linux/proc_fs.h>
18#include <linux/profile.h>
17 19
18/* 20/*
19 * show_interrupts is needed by /proc/interrupts. 21 * show_interrupts is needed by /proc/interrupts.
@@ -93,5 +95,12 @@ asmlinkage void do_softirq(void)
93 95
94 local_irq_restore(flags); 96 local_irq_restore(flags);
95} 97}
96
97EXPORT_SYMBOL(do_softirq); 98EXPORT_SYMBOL(do_softirq);
99
100void init_irq_proc(void)
101{
102 struct proc_dir_entry *root_irq_dir;
103
104 root_irq_dir = proc_mkdir("irq", NULL);
105 create_prof_cpu_mask(root_irq_dir);
106}
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 576368c4f605..a466bab6677e 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -155,15 +155,34 @@ void __kprobes get_instruction_type(struct arch_specific_insn *ainsn)
155static int __kprobes swap_instruction(void *aref) 155static int __kprobes swap_instruction(void *aref)
156{ 156{
157 struct ins_replace_args *args = aref; 157 struct ins_replace_args *args = aref;
158 u32 *addr;
159 u32 instr;
158 int err = -EFAULT; 160 int err = -EFAULT;
159 161
162 /*
163 * Text segment is read-only, hence we use stura to bypass dynamic
164 * address translation to exchange the instruction. Since stura
165 * always operates on four bytes, but we only want to exchange two
166 * bytes do some calculations to get things right. In addition we
167 * shall not cross any page boundaries (vmalloc area!) when writing
168 * the new instruction.
169 */
170 addr = (u32 *)ALIGN((unsigned long)args->ptr, 4);
171 if ((unsigned long)args->ptr & 2)
172 instr = ((*addr) & 0xffff0000) | args->new;
173 else
174 instr = ((*addr) & 0x0000ffff) | args->new << 16;
175
160 asm volatile( 176 asm volatile(
161 "0: mvc 0(2,%2),0(%3)\n" 177 " lra %1,0(%1)\n"
162 "1: la %0,0\n" 178 "0: stura %2,%1\n"
179 "1: la %0,0\n"
163 "2:\n" 180 "2:\n"
164 EX_TABLE(0b,2b) 181 EX_TABLE(0b,2b)
165 : "+d" (err), "=m" (*args->ptr) 182 : "+d" (err)
166 : "a" (args->ptr), "a" (&args->new), "m" (args->new)); 183 : "a" (addr), "d" (instr)
184 : "memory", "cc");
185
167 return err; 186 return err;
168} 187}
169 188
@@ -356,7 +375,7 @@ no_kprobe:
356 * - When the probed function returns, this probe 375 * - When the probed function returns, this probe
357 * causes the handlers to fire 376 * causes the handlers to fire
358 */ 377 */
359void __kprobes kretprobe_trampoline_holder(void) 378void kretprobe_trampoline_holder(void)
360{ 379{
361 asm volatile(".global kretprobe_trampoline\n" 380 asm volatile(".global kretprobe_trampoline\n"
362 "kretprobe_trampoline: bcr 0,0\n"); 381 "kretprobe_trampoline: bcr 0,0\n");
@@ -365,7 +384,8 @@ void __kprobes kretprobe_trampoline_holder(void)
365/* 384/*
366 * Called when the probe at kretprobe trampoline is hit 385 * Called when the probe at kretprobe trampoline is hit
367 */ 386 */
368int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 387static int __kprobes trampoline_probe_handler(struct kprobe *p,
388 struct pt_regs *regs)
369{ 389{
370 struct kretprobe_instance *ri = NULL; 390 struct kretprobe_instance *ri = NULL;
371 struct hlist_head *head, empty_rp; 391 struct hlist_head *head, empty_rp;
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index f6d9bcc0f75b..52f57af252b4 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -11,6 +11,7 @@
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/kexec.h> 12#include <linux/kexec.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/reboot.h>
14#include <asm/cio.h> 15#include <asm/cio.h>
15#include <asm/setup.h> 16#include <asm/setup.h>
16#include <asm/pgtable.h> 17#include <asm/pgtable.h>
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index d989ed45a7aa..39d1dd752529 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -30,6 +30,7 @@
30#include <linux/fs.h> 30#include <linux/fs.h>
31#include <linux/string.h> 31#include <linux/string.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/moduleloader.h>
33 34
34#if 0 35#if 0
35#define DEBUGP printk 36#define DEBUGP printk
@@ -58,7 +59,7 @@ void module_free(struct module *mod, void *module_region)
58 table entries. */ 59 table entries. */
59} 60}
60 61
61static inline void 62static void
62check_rela(Elf_Rela *rela, struct module *me) 63check_rela(Elf_Rela *rela, struct module *me)
63{ 64{
64 struct mod_arch_syminfo *info; 65 struct mod_arch_syminfo *info;
@@ -181,7 +182,7 @@ apply_relocate(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex,
181 return -ENOEXEC; 182 return -ENOEXEC;
182} 183}
183 184
184static inline int 185static int
185apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, 186apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
186 struct module *me) 187 struct module *me)
187{ 188{
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 6603fbb41d07..5acfac654f9d 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -144,7 +144,7 @@ static void default_idle(void)
144 144
145 trace_hardirqs_on(); 145 trace_hardirqs_on();
146 /* Wait for external, I/O or machine check interrupt. */ 146 /* Wait for external, I/O or machine check interrupt. */
147 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_WAIT | 147 __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
148 PSW_MASK_IO | PSW_MASK_EXT); 148 PSW_MASK_IO | PSW_MASK_EXT);
149} 149}
150 150
@@ -190,7 +190,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
190 struct pt_regs regs; 190 struct pt_regs regs;
191 191
192 memset(&regs, 0, sizeof(regs)); 192 memset(&regs, 0, sizeof(regs));
193 regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT; 193 regs.psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT;
194 regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE; 194 regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE;
195 regs.gprs[9] = (unsigned long) fn; 195 regs.gprs[9] = (unsigned long) fn;
196 regs.gprs[10] = (unsigned long) arg; 196 regs.gprs[10] = (unsigned long) arg;
diff --git a/arch/s390/kernel/profile.c b/arch/s390/kernel/profile.c
deleted file mode 100644
index b81aa1f569ca..000000000000
--- a/arch/s390/kernel/profile.c
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * arch/s390/kernel/profile.c
3 *
4 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Thomas Spatzier (tspat@de.ibm.com)
6 *
7 */
8#include <linux/proc_fs.h>
9#include <linux/profile.h>
10
11static struct proc_dir_entry * root_irq_dir;
12
13void init_irq_proc(void)
14{
15 /* create /proc/irq */
16 root_irq_dir = proc_mkdir("irq", NULL);
17
18 /* create /proc/irq/prof_cpu_mask */
19 create_prof_cpu_mask(root_irq_dir);
20}
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 8f36504075ed..2a8f0872ea8b 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -86,15 +86,13 @@ FixPerRegisters(struct task_struct *task)
86 per_info->control_regs.bits.storage_alt_space_ctl = 0; 86 per_info->control_regs.bits.storage_alt_space_ctl = 0;
87} 87}
88 88
89void 89static void set_single_step(struct task_struct *task)
90set_single_step(struct task_struct *task)
91{ 90{
92 task->thread.per_info.single_step = 1; 91 task->thread.per_info.single_step = 1;
93 FixPerRegisters(task); 92 FixPerRegisters(task);
94} 93}
95 94
96void 95static void clear_single_step(struct task_struct *task)
97clear_single_step(struct task_struct *task)
98{ 96{
99 task->thread.per_info.single_step = 0; 97 task->thread.per_info.single_step = 0;
100 FixPerRegisters(task); 98 FixPerRegisters(task);
@@ -232,9 +230,9 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
232 */ 230 */
233 if (addr == (addr_t) &dummy->regs.psw.mask && 231 if (addr == (addr_t) &dummy->regs.psw.mask &&
234#ifdef CONFIG_COMPAT 232#ifdef CONFIG_COMPAT
235 data != PSW_MASK_MERGE(PSW_USER32_BITS, data) && 233 data != PSW_MASK_MERGE(psw_user32_bits, data) &&
236#endif 234#endif
237 data != PSW_MASK_MERGE(PSW_USER_BITS, data)) 235 data != PSW_MASK_MERGE(psw_user_bits, data))
238 /* Invalid psw mask. */ 236 /* Invalid psw mask. */
239 return -EINVAL; 237 return -EINVAL;
240#ifndef CONFIG_64BIT 238#ifndef CONFIG_64BIT
@@ -309,7 +307,7 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
309 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); 307 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
310 if (copied != sizeof(tmp)) 308 if (copied != sizeof(tmp))
311 return -EIO; 309 return -EIO;
312 return put_user(tmp, (unsigned long __user *) data); 310 return put_user(tmp, (unsigned long __force __user *) data);
313 311
314 case PTRACE_PEEKUSR: 312 case PTRACE_PEEKUSR:
315 /* read the word at location addr in the USER area. */ 313 /* read the word at location addr in the USER area. */
@@ -331,7 +329,7 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
331 329
332 case PTRACE_PEEKUSR_AREA: 330 case PTRACE_PEEKUSR_AREA:
333 case PTRACE_POKEUSR_AREA: 331 case PTRACE_POKEUSR_AREA:
334 if (copy_from_user(&parea, (void __user *) addr, 332 if (copy_from_user(&parea, (void __force __user *) addr,
335 sizeof(parea))) 333 sizeof(parea)))
336 return -EFAULT; 334 return -EFAULT;
337 addr = parea.kernel_addr; 335 addr = parea.kernel_addr;
@@ -341,10 +339,11 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
341 if (request == PTRACE_PEEKUSR_AREA) 339 if (request == PTRACE_PEEKUSR_AREA)
342 ret = peek_user(child, addr, data); 340 ret = peek_user(child, addr, data);
343 else { 341 else {
344 addr_t tmp; 342 addr_t utmp;
345 if (get_user (tmp, (addr_t __user *) data)) 343 if (get_user(utmp,
344 (addr_t __force __user *) data))
346 return -EFAULT; 345 return -EFAULT;
347 ret = poke_user(child, addr, tmp); 346 ret = poke_user(child, addr, utmp);
348 } 347 }
349 if (ret) 348 if (ret)
350 return ret; 349 return ret;
@@ -394,7 +393,7 @@ peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
394 if (addr == (addr_t) &dummy32->regs.psw.mask) { 393 if (addr == (addr_t) &dummy32->regs.psw.mask) {
395 /* Fake a 31 bit psw mask. */ 394 /* Fake a 31 bit psw mask. */
396 tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32); 395 tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32);
397 tmp = PSW32_MASK_MERGE(PSW32_USER_BITS, tmp); 396 tmp = PSW32_MASK_MERGE(psw32_user_bits, tmp);
398 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 397 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
399 /* Fake a 31 bit psw address. */ 398 /* Fake a 31 bit psw address. */
400 tmp = (__u32) task_pt_regs(child)->psw.addr | 399 tmp = (__u32) task_pt_regs(child)->psw.addr |
@@ -469,11 +468,11 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
469 */ 468 */
470 if (addr == (addr_t) &dummy32->regs.psw.mask) { 469 if (addr == (addr_t) &dummy32->regs.psw.mask) {
471 /* Build a 64 bit psw mask from 31 bit mask. */ 470 /* Build a 64 bit psw mask from 31 bit mask. */
472 if (tmp != PSW32_MASK_MERGE(PSW32_USER_BITS, tmp)) 471 if (tmp != PSW32_MASK_MERGE(psw32_user_bits, tmp))
473 /* Invalid psw mask. */ 472 /* Invalid psw mask. */
474 return -EINVAL; 473 return -EINVAL;
475 task_pt_regs(child)->psw.mask = 474 task_pt_regs(child)->psw.mask =
476 PSW_MASK_MERGE(PSW_USER32_BITS, (__u64) tmp << 32); 475 PSW_MASK_MERGE(psw_user32_bits, (__u64) tmp << 32);
477 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 476 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
478 /* Build a 64 bit psw address from 31 bit address. */ 477 /* Build a 64 bit psw address from 31 bit address. */
479 task_pt_regs(child)->psw.addr = 478 task_pt_regs(child)->psw.addr =
@@ -550,7 +549,7 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
550 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); 549 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
551 if (copied != sizeof(tmp)) 550 if (copied != sizeof(tmp))
552 return -EIO; 551 return -EIO;
553 return put_user(tmp, (unsigned int __user *) data); 552 return put_user(tmp, (unsigned int __force __user *) data);
554 553
555 case PTRACE_PEEKUSR: 554 case PTRACE_PEEKUSR:
556 /* read the word at location addr in the USER area. */ 555 /* read the word at location addr in the USER area. */
@@ -571,7 +570,7 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
571 570
572 case PTRACE_PEEKUSR_AREA: 571 case PTRACE_PEEKUSR_AREA:
573 case PTRACE_POKEUSR_AREA: 572 case PTRACE_POKEUSR_AREA:
574 if (copy_from_user(&parea, (void __user *) addr, 573 if (copy_from_user(&parea, (void __force __user *) addr,
575 sizeof(parea))) 574 sizeof(parea)))
576 return -EFAULT; 575 return -EFAULT;
577 addr = parea.kernel_addr; 576 addr = parea.kernel_addr;
@@ -581,10 +580,11 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
581 if (request == PTRACE_PEEKUSR_AREA) 580 if (request == PTRACE_PEEKUSR_AREA)
582 ret = peek_user_emu31(child, addr, data); 581 ret = peek_user_emu31(child, addr, data);
583 else { 582 else {
584 __u32 tmp; 583 __u32 utmp;
585 if (get_user (tmp, (__u32 __user *) data)) 584 if (get_user(utmp,
585 (__u32 __force __user *) data))
586 return -EFAULT; 586 return -EFAULT;
587 ret = poke_user_emu31(child, addr, tmp); 587 ret = poke_user_emu31(child, addr, utmp);
588 } 588 }
589 if (ret) 589 if (ret)
590 return ret; 590 return ret;
@@ -595,17 +595,19 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
595 return 0; 595 return 0;
596 case PTRACE_GETEVENTMSG: 596 case PTRACE_GETEVENTMSG:
597 return put_user((__u32) child->ptrace_message, 597 return put_user((__u32) child->ptrace_message,
598 (unsigned int __user *) data); 598 (unsigned int __force __user *) data);
599 case PTRACE_GETSIGINFO: 599 case PTRACE_GETSIGINFO:
600 if (child->last_siginfo == NULL) 600 if (child->last_siginfo == NULL)
601 return -EINVAL; 601 return -EINVAL;
602 return copy_siginfo_to_user32((compat_siginfo_t __user *) data, 602 return copy_siginfo_to_user32((compat_siginfo_t
603 __force __user *) data,
603 child->last_siginfo); 604 child->last_siginfo);
604 case PTRACE_SETSIGINFO: 605 case PTRACE_SETSIGINFO:
605 if (child->last_siginfo == NULL) 606 if (child->last_siginfo == NULL)
606 return -EINVAL; 607 return -EINVAL;
607 return copy_siginfo_from_user32(child->last_siginfo, 608 return copy_siginfo_from_user32(child->last_siginfo,
608 (compat_siginfo_t __user *) data); 609 (compat_siginfo_t
610 __force __user *) data);
609 } 611 }
610 return ptrace_request(child, request, addr, data); 612 return ptrace_request(child, request, addr, data);
611} 613}
diff --git a/arch/s390/kernel/reset.S b/arch/s390/kernel/reset.S
deleted file mode 100644
index 8a87355161fa..000000000000
--- a/arch/s390/kernel/reset.S
+++ /dev/null
@@ -1,90 +0,0 @@
1/*
2 * arch/s390/kernel/reset.S
3 *
4 * Copyright (C) IBM Corp. 2006
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 * Michael Holzheu <holzheu@de.ibm.com>
7 */
8
9#include <asm/ptrace.h>
10#include <asm/lowcore.h>
11
12#ifdef CONFIG_64BIT
13
14 .globl reset_mcck_handler
15reset_mcck_handler:
16 basr %r13,0
170: lg %r15,__LC_PANIC_STACK # load panic stack
18 aghi %r15,-STACK_FRAME_OVERHEAD
19 lg %r1,s390_reset_mcck_handler-0b(%r13)
20 ltgr %r1,%r1
21 jz 1f
22 basr %r14,%r1
231: la %r1,4095
24 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
25 lpswe __LC_MCK_OLD_PSW
26
27 .globl s390_reset_mcck_handler
28s390_reset_mcck_handler:
29 .quad 0
30
31 .globl reset_pgm_handler
32reset_pgm_handler:
33 stmg %r0,%r15,__LC_SAVE_AREA
34 basr %r13,0
350: lg %r15,__LC_PANIC_STACK # load panic stack
36 aghi %r15,-STACK_FRAME_OVERHEAD
37 lg %r1,s390_reset_pgm_handler-0b(%r13)
38 ltgr %r1,%r1
39 jz 1f
40 basr %r14,%r1
41 lmg %r0,%r15,__LC_SAVE_AREA
42 lpswe __LC_PGM_OLD_PSW
431: lpswe disabled_wait_psw-0b(%r13)
44 .globl s390_reset_pgm_handler
45s390_reset_pgm_handler:
46 .quad 0
47 .align 8
48disabled_wait_psw:
49 .quad 0x0002000180000000,0x0000000000000000 + reset_pgm_handler
50
51#else /* CONFIG_64BIT */
52
53 .globl reset_mcck_handler
54reset_mcck_handler:
55 basr %r13,0
560: l %r15,__LC_PANIC_STACK # load panic stack
57 ahi %r15,-STACK_FRAME_OVERHEAD
58 l %r1,s390_reset_mcck_handler-0b(%r13)
59 ltr %r1,%r1
60 jz 1f
61 basr %r14,%r1
621: lm %r0,%r15,__LC_GPREGS_SAVE_AREA
63 lpsw __LC_MCK_OLD_PSW
64
65 .globl s390_reset_mcck_handler
66s390_reset_mcck_handler:
67 .long 0
68
69 .globl reset_pgm_handler
70reset_pgm_handler:
71 stm %r0,%r15,__LC_SAVE_AREA
72 basr %r13,0
730: l %r15,__LC_PANIC_STACK # load panic stack
74 ahi %r15,-STACK_FRAME_OVERHEAD
75 l %r1,s390_reset_pgm_handler-0b(%r13)
76 ltr %r1,%r1
77 jz 1f
78 basr %r14,%r1
79 lm %r0,%r15,__LC_SAVE_AREA
80 lpsw __LC_PGM_OLD_PSW
81
821: lpsw disabled_wait_psw-0b(%r13)
83 .globl s390_reset_pgm_handler
84s390_reset_pgm_handler:
85 .long 0
86disabled_wait_psw:
87 .align 8
88 .long 0x000a0000,0x00000000 + reset_pgm_handler
89
90#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
index bc5beaa8f98e..acf93dba7727 100644
--- a/arch/s390/kernel/s390_ext.c
+++ b/arch/s390/kernel/s390_ext.c
@@ -125,14 +125,12 @@ void do_extint(struct pt_regs *regs, unsigned short code)
125 * Make sure that the i/o interrupt did not "overtake" 125 * Make sure that the i/o interrupt did not "overtake"
126 * the last HZ timer interrupt. 126 * the last HZ timer interrupt.
127 */ 127 */
128 account_ticks(); 128 account_ticks(S390_lowcore.int_clock);
129 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; 129 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
130 index = ext_hash(code); 130 index = ext_hash(code);
131 for (p = ext_int_hash[index]; p; p = p->next) { 131 for (p = ext_int_hash[index]; p; p = p->next) {
132 if (likely(p->code == code)) { 132 if (likely(p->code == code))
133 if (likely(p->handler)) 133 p->handler(code);
134 p->handler(code);
135 }
136 } 134 }
137 irq_exit(); 135 irq_exit();
138 set_irq_regs(old_regs); 136 set_irq_regs(old_regs);
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 5d8ee3baac14..03739813d3bf 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -38,6 +38,8 @@
38#include <linux/device.h> 38#include <linux/device.h>
39#include <linux/notifier.h> 39#include <linux/notifier.h>
40#include <linux/pfn.h> 40#include <linux/pfn.h>
41#include <linux/ctype.h>
42#include <linux/reboot.h>
41 43
42#include <asm/uaccess.h> 44#include <asm/uaccess.h>
43#include <asm/system.h> 45#include <asm/system.h>
@@ -49,6 +51,14 @@
49#include <asm/page.h> 51#include <asm/page.h>
50#include <asm/ptrace.h> 52#include <asm/ptrace.h>
51#include <asm/sections.h> 53#include <asm/sections.h>
54#include <asm/ebcdic.h>
55#include <asm/compat.h>
56
57long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY |
58 PSW_MASK_MCHECK | PSW_DEFAULT_KEY);
59long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
60 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
61 PSW_MASK_PSTATE | PSW_DEFAULT_KEY);
52 62
53/* 63/*
54 * User copy operations. 64 * User copy operations.
@@ -117,9 +127,9 @@ void __devinit cpu_init (void)
117 */ 127 */
118char vmhalt_cmd[128] = ""; 128char vmhalt_cmd[128] = "";
119char vmpoff_cmd[128] = ""; 129char vmpoff_cmd[128] = "";
120char vmpanic_cmd[128] = ""; 130static char vmpanic_cmd[128] = "";
121 131
122static inline void strncpy_skip_quote(char *dst, char *src, int n) 132static void strncpy_skip_quote(char *dst, char *src, int n)
123{ 133{
124 int sx, dx; 134 int sx, dx;
125 135
@@ -275,10 +285,6 @@ static void __init conmode_default(void)
275} 285}
276 286
277#ifdef CONFIG_SMP 287#ifdef CONFIG_SMP
278extern void machine_restart_smp(char *);
279extern void machine_halt_smp(void);
280extern void machine_power_off_smp(void);
281
282void (*_machine_restart)(char *command) = machine_restart_smp; 288void (*_machine_restart)(char *command) = machine_restart_smp;
283void (*_machine_halt)(void) = machine_halt_smp; 289void (*_machine_halt)(void) = machine_halt_smp;
284void (*_machine_power_off)(void) = machine_power_off_smp; 290void (*_machine_power_off)(void) = machine_power_off_smp;
@@ -386,6 +392,84 @@ static int __init early_parse_ipldelay(char *p)
386} 392}
387early_param("ipldelay", early_parse_ipldelay); 393early_param("ipldelay", early_parse_ipldelay);
388 394
395#ifdef CONFIG_S390_SWITCH_AMODE
396unsigned int switch_amode = 0;
397EXPORT_SYMBOL_GPL(switch_amode);
398
399static void set_amode_and_uaccess(unsigned long user_amode,
400 unsigned long user32_amode)
401{
402 psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode |
403 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
404 PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
405#ifdef CONFIG_COMPAT
406 psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode |
407 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
408 PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
409 psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode |
410 PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
411 PSW32_MASK_PSTATE;
412#endif
413 psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
414 PSW_MASK_MCHECK | PSW_DEFAULT_KEY;
415
416 if (MACHINE_HAS_MVCOS) {
417 printk("mvcos available.\n");
418 memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
419 } else {
420 printk("mvcos not available.\n");
421 memcpy(&uaccess, &uaccess_pt, sizeof(uaccess));
422 }
423}
424
425/*
426 * Switch kernel/user addressing modes?
427 */
428static int __init early_parse_switch_amode(char *p)
429{
430 switch_amode = 1;
431 return 0;
432}
433early_param("switch_amode", early_parse_switch_amode);
434
435#else /* CONFIG_S390_SWITCH_AMODE */
436static inline void set_amode_and_uaccess(unsigned long user_amode,
437 unsigned long user32_amode)
438{
439}
440#endif /* CONFIG_S390_SWITCH_AMODE */
441
442#ifdef CONFIG_S390_EXEC_PROTECT
443unsigned int s390_noexec = 0;
444EXPORT_SYMBOL_GPL(s390_noexec);
445
446/*
447 * Enable execute protection?
448 */
449static int __init early_parse_noexec(char *p)
450{
451 if (!strncmp(p, "off", 3))
452 return 0;
453 switch_amode = 1;
454 s390_noexec = 1;
455 return 0;
456}
457early_param("noexec", early_parse_noexec);
458#endif /* CONFIG_S390_EXEC_PROTECT */
459
460static void setup_addressing_mode(void)
461{
462 if (s390_noexec) {
463 printk("S390 execute protection active, ");
464 set_amode_and_uaccess(PSW_ASC_SECONDARY, PSW32_ASC_SECONDARY);
465 return;
466 }
467 if (switch_amode) {
468 printk("S390 address spaces switched, ");
469 set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY);
470 }
471}
472
389static void __init 473static void __init
390setup_lowcore(void) 474setup_lowcore(void)
391{ 475{
@@ -402,19 +486,21 @@ setup_lowcore(void)
402 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; 486 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
403 lc->restart_psw.addr = 487 lc->restart_psw.addr =
404 PSW_ADDR_AMODE | (unsigned long) restart_int_handler; 488 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
405 lc->external_new_psw.mask = PSW_KERNEL_BITS; 489 if (switch_amode)
490 lc->restart_psw.mask |= PSW_ASC_HOME;
491 lc->external_new_psw.mask = psw_kernel_bits;
406 lc->external_new_psw.addr = 492 lc->external_new_psw.addr =
407 PSW_ADDR_AMODE | (unsigned long) ext_int_handler; 493 PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
408 lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT; 494 lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT;
409 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; 495 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
410 lc->program_new_psw.mask = PSW_KERNEL_BITS; 496 lc->program_new_psw.mask = psw_kernel_bits;
411 lc->program_new_psw.addr = 497 lc->program_new_psw.addr =
412 PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; 498 PSW_ADDR_AMODE | (unsigned long)pgm_check_handler;
413 lc->mcck_new_psw.mask = 499 lc->mcck_new_psw.mask =
414 PSW_KERNEL_BITS & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT; 500 psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT;
415 lc->mcck_new_psw.addr = 501 lc->mcck_new_psw.addr =
416 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; 502 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
417 lc->io_new_psw.mask = PSW_KERNEL_BITS; 503 lc->io_new_psw.mask = psw_kernel_bits;
418 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; 504 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
419 lc->ipl_device = S390_lowcore.ipl_device; 505 lc->ipl_device = S390_lowcore.ipl_device;
420 lc->jiffy_timer = -1LL; 506 lc->jiffy_timer = -1LL;
@@ -439,7 +525,7 @@ setup_lowcore(void)
439static void __init 525static void __init
440setup_resources(void) 526setup_resources(void)
441{ 527{
442 struct resource *res; 528 struct resource *res, *sub_res;
443 int i; 529 int i;
444 530
445 code_resource.start = (unsigned long) &_text; 531 code_resource.start = (unsigned long) &_text;
@@ -464,8 +550,38 @@ setup_resources(void)
464 res->start = memory_chunk[i].addr; 550 res->start = memory_chunk[i].addr;
465 res->end = memory_chunk[i].addr + memory_chunk[i].size - 1; 551 res->end = memory_chunk[i].addr + memory_chunk[i].size - 1;
466 request_resource(&iomem_resource, res); 552 request_resource(&iomem_resource, res);
467 request_resource(res, &code_resource); 553
468 request_resource(res, &data_resource); 554 if (code_resource.start >= res->start &&
555 code_resource.start <= res->end &&
556 code_resource.end > res->end) {
557 sub_res = alloc_bootmem_low(sizeof(struct resource));
558 memcpy(sub_res, &code_resource,
559 sizeof(struct resource));
560 sub_res->end = res->end;
561 code_resource.start = res->end + 1;
562 request_resource(res, sub_res);
563 }
564
565 if (code_resource.start >= res->start &&
566 code_resource.start <= res->end &&
567 code_resource.end <= res->end)
568 request_resource(res, &code_resource);
569
570 if (data_resource.start >= res->start &&
571 data_resource.start <= res->end &&
572 data_resource.end > res->end) {
573 sub_res = alloc_bootmem_low(sizeof(struct resource));
574 memcpy(sub_res, &data_resource,
575 sizeof(struct resource));
576 sub_res->end = res->end;
577 data_resource.start = res->end + 1;
578 request_resource(res, sub_res);
579 }
580
581 if (data_resource.start >= res->start &&
582 data_resource.start <= res->end &&
583 data_resource.end <= res->end)
584 request_resource(res, &data_resource);
469 } 585 }
470} 586}
471 587
@@ -495,16 +611,13 @@ static void __init setup_memory_end(void)
495 } 611 }
496 if (!memory_end) 612 if (!memory_end)
497 memory_end = memory_size; 613 memory_end = memory_size;
498 if (real_size > memory_end)
499 printk("More memory detected than supported. Unused: %luk\n",
500 (real_size - memory_end) >> 10);
501} 614}
502 615
503static void __init 616static void __init
504setup_memory(void) 617setup_memory(void)
505{ 618{
506 unsigned long bootmap_size; 619 unsigned long bootmap_size;
507 unsigned long start_pfn, end_pfn, init_pfn; 620 unsigned long start_pfn, end_pfn;
508 int i; 621 int i;
509 622
510 /* 623 /*
@@ -514,10 +627,6 @@ setup_memory(void)
514 start_pfn = PFN_UP(__pa(&_end)); 627 start_pfn = PFN_UP(__pa(&_end));
515 end_pfn = max_pfn = PFN_DOWN(memory_end); 628 end_pfn = max_pfn = PFN_DOWN(memory_end);
516 629
517 /* Initialize storage key for kernel pages */
518 for (init_pfn = 0 ; init_pfn < start_pfn; init_pfn++)
519 page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY);
520
521#ifdef CONFIG_BLK_DEV_INITRD 630#ifdef CONFIG_BLK_DEV_INITRD
522 /* 631 /*
523 * Move the initrd in case the bitmap of the bootmem allocater 632 * Move the initrd in case the bitmap of the bootmem allocater
@@ -651,6 +760,7 @@ setup_arch(char **cmdline_p)
651 parse_early_param(); 760 parse_early_param();
652 761
653 setup_memory_end(); 762 setup_memory_end();
763 setup_addressing_mode();
654 setup_memory(); 764 setup_memory();
655 setup_resources(); 765 setup_resources();
656 setup_lowcore(); 766 setup_lowcore();
@@ -694,6 +804,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
694 struct cpuinfo_S390 *cpuinfo; 804 struct cpuinfo_S390 *cpuinfo;
695 unsigned long n = (unsigned long) v - 1; 805 unsigned long n = (unsigned long) v - 1;
696 806
807 s390_adjust_jiffies();
697 preempt_disable(); 808 preempt_disable();
698 if (!n) { 809 if (!n) {
699 seq_printf(m, "vendor_id : IBM/S390\n" 810 seq_printf(m, "vendor_id : IBM/S390\n"
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 4c8a7954ef48..554f9cf7499c 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -119,7 +119,7 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
119 119
120 /* Copy a 'clean' PSW mask to the user to avoid leaking 120 /* Copy a 'clean' PSW mask to the user to avoid leaking
121 information about whether PER is currently on. */ 121 information about whether PER is currently on. */
122 user_sregs.regs.psw.mask = PSW_MASK_MERGE(PSW_USER_BITS, regs->psw.mask); 122 user_sregs.regs.psw.mask = PSW_MASK_MERGE(psw_user_bits, regs->psw.mask);
123 user_sregs.regs.psw.addr = regs->psw.addr; 123 user_sregs.regs.psw.addr = regs->psw.addr;
124 memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs)); 124 memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs));
125 memcpy(&user_sregs.regs.acrs, current->thread.acrs, 125 memcpy(&user_sregs.regs.acrs, current->thread.acrs,
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index c0cd255fddbd..65b52320d145 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -22,23 +22,23 @@
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/init.h> 24#include <linux/init.h>
25
26#include <linux/mm.h> 25#include <linux/mm.h>
27#include <linux/spinlock.h> 26#include <linux/spinlock.h>
28#include <linux/kernel_stat.h> 27#include <linux/kernel_stat.h>
29#include <linux/smp_lock.h> 28#include <linux/smp_lock.h>
30
31#include <linux/delay.h> 29#include <linux/delay.h>
32#include <linux/cache.h> 30#include <linux/cache.h>
33#include <linux/interrupt.h> 31#include <linux/interrupt.h>
34#include <linux/cpu.h> 32#include <linux/cpu.h>
35 33#include <linux/timex.h>
34#include <asm/setup.h>
36#include <asm/sigp.h> 35#include <asm/sigp.h>
37#include <asm/pgalloc.h> 36#include <asm/pgalloc.h>
38#include <asm/irq.h> 37#include <asm/irq.h>
39#include <asm/s390_ext.h> 38#include <asm/s390_ext.h>
40#include <asm/cpcmd.h> 39#include <asm/cpcmd.h>
41#include <asm/tlbflush.h> 40#include <asm/tlbflush.h>
41#include <asm/timer.h>
42 42
43extern volatile int __cpu_logical_map[]; 43extern volatile int __cpu_logical_map[];
44 44
@@ -53,12 +53,6 @@ cpumask_t cpu_possible_map = CPU_MASK_NONE;
53 53
54static struct task_struct *current_set[NR_CPUS]; 54static struct task_struct *current_set[NR_CPUS];
55 55
56/*
57 * Reboot, halt and power_off routines for SMP.
58 */
59extern char vmhalt_cmd[];
60extern char vmpoff_cmd[];
61
62static void smp_ext_bitcall(int, ec_bit_sig); 56static void smp_ext_bitcall(int, ec_bit_sig);
63static void smp_ext_bitcall_others(ec_bit_sig); 57static void smp_ext_bitcall_others(ec_bit_sig);
64 58
@@ -200,7 +194,7 @@ int smp_call_function_on(void (*func) (void *info), void *info,
200} 194}
201EXPORT_SYMBOL(smp_call_function_on); 195EXPORT_SYMBOL(smp_call_function_on);
202 196
203static inline void do_send_stop(void) 197static void do_send_stop(void)
204{ 198{
205 int cpu, rc; 199 int cpu, rc;
206 200
@@ -214,7 +208,7 @@ static inline void do_send_stop(void)
214 } 208 }
215} 209}
216 210
217static inline void do_store_status(void) 211static void do_store_status(void)
218{ 212{
219 int cpu, rc; 213 int cpu, rc;
220 214
@@ -230,7 +224,7 @@ static inline void do_store_status(void)
230 } 224 }
231} 225}
232 226
233static inline void do_wait_for_stop(void) 227static void do_wait_for_stop(void)
234{ 228{
235 int cpu; 229 int cpu;
236 230
@@ -250,7 +244,7 @@ static inline void do_wait_for_stop(void)
250void smp_send_stop(void) 244void smp_send_stop(void)
251{ 245{
252 /* Disable all interrupts/machine checks */ 246 /* Disable all interrupts/machine checks */
253 __load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK); 247 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
254 248
255 /* write magic number to zero page (absolute 0) */ 249 /* write magic number to zero page (absolute 0) */
256 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; 250 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
@@ -298,7 +292,7 @@ void machine_power_off_smp(void)
298 * cpus are handled. 292 * cpus are handled.
299 */ 293 */
300 294
301void do_ext_call_interrupt(__u16 code) 295static void do_ext_call_interrupt(__u16 code)
302{ 296{
303 unsigned long bits; 297 unsigned long bits;
304 298
@@ -385,7 +379,7 @@ struct ec_creg_mask_parms {
385/* 379/*
386 * callback for setting/clearing control bits 380 * callback for setting/clearing control bits
387 */ 381 */
388void smp_ctl_bit_callback(void *info) { 382static void smp_ctl_bit_callback(void *info) {
389 struct ec_creg_mask_parms *pp = info; 383 struct ec_creg_mask_parms *pp = info;
390 unsigned long cregs[16]; 384 unsigned long cregs[16];
391 int i; 385 int i;
@@ -458,17 +452,15 @@ __init smp_count_cpus(void)
458/* 452/*
459 * Activate a secondary processor. 453 * Activate a secondary processor.
460 */ 454 */
461extern void init_cpu_timer(void);
462extern void init_cpu_vtimer(void);
463
464int __devinit start_secondary(void *cpuvoid) 455int __devinit start_secondary(void *cpuvoid)
465{ 456{
466 /* Setup the cpu */ 457 /* Setup the cpu */
467 cpu_init(); 458 cpu_init();
468 preempt_disable(); 459 preempt_disable();
469 /* init per CPU timer */ 460 /* Enable TOD clock interrupts on the secondary cpu. */
470 init_cpu_timer(); 461 init_cpu_timer();
471#ifdef CONFIG_VIRT_TIMER 462#ifdef CONFIG_VIRT_TIMER
463 /* Enable cpu timer interrupts on the secondary cpu. */
472 init_cpu_vtimer(); 464 init_cpu_vtimer();
473#endif 465#endif
474 /* Enable pfault pseudo page faults on this cpu. */ 466 /* Enable pfault pseudo page faults on this cpu. */
@@ -542,7 +534,7 @@ smp_put_cpu(int cpu)
542 spin_unlock_irqrestore(&smp_reserve_lock, flags); 534 spin_unlock_irqrestore(&smp_reserve_lock, flags);
543} 535}
544 536
545static inline int 537static int
546cpu_stopped(int cpu) 538cpu_stopped(int cpu)
547{ 539{
548 __u32 status; 540 __u32 status;
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 0d14a4789bf2..2e5c65a1863e 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -11,11 +11,11 @@
11#include <linux/stacktrace.h> 11#include <linux/stacktrace.h>
12#include <linux/kallsyms.h> 12#include <linux/kallsyms.h>
13 13
14static inline unsigned long save_context_stack(struct stack_trace *trace, 14static unsigned long save_context_stack(struct stack_trace *trace,
15 unsigned int *skip, 15 unsigned int *skip,
16 unsigned long sp, 16 unsigned long sp,
17 unsigned long low, 17 unsigned long low,
18 unsigned long high) 18 unsigned long high)
19{ 19{
20 struct stack_frame *sf; 20 struct stack_frame *sf;
21 struct pt_regs *regs; 21 struct pt_regs *regs;
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 6cceed4df73e..3b91f27ab202 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -37,11 +37,15 @@
37#include <asm/irq.h> 37#include <asm/irq.h>
38#include <asm/irq_regs.h> 38#include <asm/irq_regs.h>
39#include <asm/timer.h> 39#include <asm/timer.h>
40#include <asm/etr.h>
40 41
41/* change this if you have some constant time drift */ 42/* change this if you have some constant time drift */
42#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) 43#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
43#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12) 44#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)
44 45
46/* The value of the TOD clock for 1.1.1970. */
47#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
48
45/* 49/*
46 * Create a small time difference between the timer interrupts 50 * Create a small time difference between the timer interrupts
47 * on the different cpus to avoid lock contention. 51 * on the different cpus to avoid lock contention.
@@ -51,6 +55,7 @@
51#define TICK_SIZE tick 55#define TICK_SIZE tick
52 56
53static ext_int_info_t ext_int_info_cc; 57static ext_int_info_t ext_int_info_cc;
58static ext_int_info_t ext_int_etr_cc;
54static u64 init_timer_cc; 59static u64 init_timer_cc;
55static u64 jiffies_timer_cc; 60static u64 jiffies_timer_cc;
56static u64 xtime_cc; 61static u64 xtime_cc;
@@ -89,29 +94,21 @@ void tod_to_timeval(__u64 todval, struct timespec *xtime)
89#define s390_do_profile() do { ; } while(0) 94#define s390_do_profile() do { ; } while(0)
90#endif /* CONFIG_PROFILING */ 95#endif /* CONFIG_PROFILING */
91 96
92
93/* 97/*
94 * timer_interrupt() needs to keep up the real-time clock, 98 * Advance the per cpu tick counter up to the time given with the
95 * as well as call the "do_timer()" routine every clocktick 99 * "time" argument. The per cpu update consists of accounting
100 * the virtual cpu time, calling update_process_times and calling
101 * the profiling hook. If xtime is before time it is advanced as well.
96 */ 102 */
97void account_ticks(void) 103void account_ticks(u64 time)
98{ 104{
99 __u64 tmp;
100 __u32 ticks; 105 __u32 ticks;
106 __u64 tmp;
101 107
102 /* Calculate how many ticks have passed. */ 108 /* Calculate how many ticks have passed. */
103 if (S390_lowcore.int_clock < S390_lowcore.jiffy_timer) { 109 if (time < S390_lowcore.jiffy_timer)
104 /*
105 * We have to program the clock comparator even if
106 * no tick has passed. That happens if e.g. an i/o
107 * interrupt wakes up an idle processor that has
108 * switched off its hz timer.
109 */
110 tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
111 asm volatile ("SCKC %0" : : "m" (tmp));
112 return; 110 return;
113 } 111 tmp = time - S390_lowcore.jiffy_timer;
114 tmp = S390_lowcore.int_clock - S390_lowcore.jiffy_timer;
115 if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */ 112 if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */
116 ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1; 113 ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1;
117 S390_lowcore.jiffy_timer += 114 S390_lowcore.jiffy_timer +=
@@ -124,10 +121,6 @@ void account_ticks(void)
124 S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY; 121 S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY;
125 } 122 }
126 123
127 /* set clock comparator for next tick */
128 tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
129 asm volatile ("SCKC %0" : : "m" (tmp));
130
131#ifdef CONFIG_SMP 124#ifdef CONFIG_SMP
132 /* 125 /*
133 * Do not rely on the boot cpu to do the calls to do_timer. 126 * Do not rely on the boot cpu to do the calls to do_timer.
@@ -173,7 +166,7 @@ int sysctl_hz_timer = 1;
173 * Stop the HZ tick on the current CPU. 166 * Stop the HZ tick on the current CPU.
174 * Only cpu_idle may call this function. 167 * Only cpu_idle may call this function.
175 */ 168 */
176static inline void stop_hz_timer(void) 169static void stop_hz_timer(void)
177{ 170{
178 unsigned long flags; 171 unsigned long flags;
179 unsigned long seq, next; 172 unsigned long seq, next;
@@ -210,20 +203,21 @@ static inline void stop_hz_timer(void)
210 if (timer >= jiffies_timer_cc) 203 if (timer >= jiffies_timer_cc)
211 todval = timer; 204 todval = timer;
212 } 205 }
213 asm volatile ("SCKC %0" : : "m" (todval)); 206 set_clock_comparator(todval);
214} 207}
215 208
216/* 209/*
217 * Start the HZ tick on the current CPU. 210 * Start the HZ tick on the current CPU.
218 * Only cpu_idle may call this function. 211 * Only cpu_idle may call this function.
219 */ 212 */
220static inline void start_hz_timer(void) 213static void start_hz_timer(void)
221{ 214{
222 BUG_ON(!in_interrupt()); 215 BUG_ON(!in_interrupt());
223 216
224 if (!cpu_isset(smp_processor_id(), nohz_cpu_mask)) 217 if (!cpu_isset(smp_processor_id(), nohz_cpu_mask))
225 return; 218 return;
226 account_ticks(); 219 account_ticks(get_clock());
220 set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
227 cpu_clear(smp_processor_id(), nohz_cpu_mask); 221 cpu_clear(smp_processor_id(), nohz_cpu_mask);
228} 222}
229 223
@@ -245,7 +239,7 @@ static struct notifier_block nohz_idle_nb = {
245 .notifier_call = nohz_idle_notify, 239 .notifier_call = nohz_idle_notify,
246}; 240};
247 241
248void __init nohz_init(void) 242static void __init nohz_init(void)
249{ 243{
250 if (register_idle_notifier(&nohz_idle_nb)) 244 if (register_idle_notifier(&nohz_idle_nb))
251 panic("Couldn't register idle notifier"); 245 panic("Couldn't register idle notifier");
@@ -254,24 +248,57 @@ void __init nohz_init(void)
254#endif 248#endif
255 249
256/* 250/*
257 * Start the clock comparator on the current CPU. 251 * Set up per cpu jiffy timer and set the clock comparator.
252 */
253static void setup_jiffy_timer(void)
254{
255 /* Set up clock comparator to next jiffy. */
256 S390_lowcore.jiffy_timer =
257 jiffies_timer_cc + (jiffies_64 + 1) * CLK_TICKS_PER_JIFFY;
258 set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
259}
260
261/*
262 * Set up lowcore and control register of the current cpu to
263 * enable TOD clock and clock comparator interrupts.
258 */ 264 */
259void init_cpu_timer(void) 265void init_cpu_timer(void)
260{ 266{
261 unsigned long cr0; 267 setup_jiffy_timer();
262 __u64 timer;
263 268
264 timer = jiffies_timer_cc + jiffies_64 * CLK_TICKS_PER_JIFFY; 269 /* Enable clock comparator timer interrupt. */
265 S390_lowcore.jiffy_timer = timer + CLK_TICKS_PER_JIFFY; 270 __ctl_set_bit(0,11);
266 timer += CLK_TICKS_PER_JIFFY + CPU_DEVIATION; 271
267 asm volatile ("SCKC %0" : : "m" (timer)); 272 /* Always allow ETR external interrupts, even without an ETR. */
268 /* allow clock comparator timer interrupt */ 273 __ctl_set_bit(0, 4);
269 __ctl_store(cr0, 0, 0);
270 cr0 |= 0x800;
271 __ctl_load(cr0, 0, 0);
272} 274}
273 275
274extern void vtime_init(void); 276static void clock_comparator_interrupt(__u16 code)
277{
278 /* set clock comparator for next tick */
279 set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
280}
281
282static void etr_reset(void);
283static void etr_init(void);
284static void etr_ext_handler(__u16);
285
286/*
287 * Get the TOD clock running.
288 */
289static u64 __init reset_tod_clock(void)
290{
291 u64 time;
292
293 etr_reset();
294 if (store_clock(&time) == 0)
295 return time;
296 /* TOD clock not running. Set the clock to Unix Epoch. */
297 if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0)
298 panic("TOD clock not operational.");
299
300 return TOD_UNIX_EPOCH;
301}
275 302
276static cycle_t read_tod_clock(void) 303static cycle_t read_tod_clock(void)
277{ 304{
@@ -295,48 +322,31 @@ static struct clocksource clocksource_tod = {
295 */ 322 */
296void __init time_init(void) 323void __init time_init(void)
297{ 324{
298 __u64 set_time_cc; 325 init_timer_cc = reset_tod_clock();
299 int cc; 326 xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY;
300
301 /* kick the TOD clock */
302 asm volatile(
303 " stck 0(%2)\n"
304 " ipm %0\n"
305 " srl %0,28"
306 : "=d" (cc), "=m" (init_timer_cc)
307 : "a" (&init_timer_cc) : "cc");
308 switch (cc) {
309 case 0: /* clock in set state: all is fine */
310 break;
311 case 1: /* clock in non-set state: FIXME */
312 printk("time_init: TOD clock in non-set state\n");
313 break;
314 case 2: /* clock in error state: FIXME */
315 printk("time_init: TOD clock in error state\n");
316 break;
317 case 3: /* clock in stopped or not-operational state: FIXME */
318 printk("time_init: TOD clock stopped/non-operational\n");
319 break;
320 }
321 jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY; 327 jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY;
322 328
323 /* set xtime */ 329 /* set xtime */
324 xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY; 330 tod_to_timeval(init_timer_cc - TOD_UNIX_EPOCH, &xtime);
325 set_time_cc = init_timer_cc - 0x8126d60e46000000LL +
326 (0x3c26700LL*1000000*4096);
327 tod_to_timeval(set_time_cc, &xtime);
328 set_normalized_timespec(&wall_to_monotonic, 331 set_normalized_timespec(&wall_to_monotonic,
329 -xtime.tv_sec, -xtime.tv_nsec); 332 -xtime.tv_sec, -xtime.tv_nsec);
330 333
331 /* request the clock comparator external interrupt */ 334 /* request the clock comparator external interrupt */
332 if (register_early_external_interrupt(0x1004, NULL, 335 if (register_early_external_interrupt(0x1004,
336 clock_comparator_interrupt,
333 &ext_int_info_cc) != 0) 337 &ext_int_info_cc) != 0)
334 panic("Couldn't request external interrupt 0x1004"); 338 panic("Couldn't request external interrupt 0x1004");
335 339
336 if (clocksource_register(&clocksource_tod) != 0) 340 if (clocksource_register(&clocksource_tod) != 0)
337 panic("Could not register TOD clock source"); 341 panic("Could not register TOD clock source");
338 342
339 init_cpu_timer(); 343 /* request the etr external interrupt */
344 if (register_early_external_interrupt(0x1406, etr_ext_handler,
345 &ext_int_etr_cc) != 0)
346 panic("Couldn't request external interrupt 0x1406");
347
348 /* Enable TOD clock interrupts on the boot cpu. */
349 init_cpu_timer();
340 350
341#ifdef CONFIG_NO_IDLE_HZ 351#ifdef CONFIG_NO_IDLE_HZ
342 nohz_init(); 352 nohz_init();
@@ -345,5 +355,1048 @@ void __init time_init(void)
345#ifdef CONFIG_VIRT_TIMER 355#ifdef CONFIG_VIRT_TIMER
346 vtime_init(); 356 vtime_init();
347#endif 357#endif
358 etr_init();
359}
360
361/*
362 * External Time Reference (ETR) code.
363 */
364static int etr_port0_online;
365static int etr_port1_online;
366
367static int __init early_parse_etr(char *p)
368{
369 if (strncmp(p, "off", 3) == 0)
370 etr_port0_online = etr_port1_online = 0;
371 else if (strncmp(p, "port0", 5) == 0)
372 etr_port0_online = 1;
373 else if (strncmp(p, "port1", 5) == 0)
374 etr_port1_online = 1;
375 else if (strncmp(p, "on", 2) == 0)
376 etr_port0_online = etr_port1_online = 1;
377 return 0;
378}
379early_param("etr", early_parse_etr);
380
381enum etr_event {
382 ETR_EVENT_PORT0_CHANGE,
383 ETR_EVENT_PORT1_CHANGE,
384 ETR_EVENT_PORT_ALERT,
385 ETR_EVENT_SYNC_CHECK,
386 ETR_EVENT_SWITCH_LOCAL,
387 ETR_EVENT_UPDATE,
388};
389
390enum etr_flags {
391 ETR_FLAG_ENOSYS,
392 ETR_FLAG_EACCES,
393 ETR_FLAG_STEAI,
394};
395
396/*
397 * Valid bit combinations of the eacr register are (x = don't care):
398 * e0 e1 dp p0 p1 ea es sl
399 * 0 0 x 0 0 0 0 0 initial, disabled state
400 * 0 0 x 0 1 1 0 0 port 1 online
401 * 0 0 x 1 0 1 0 0 port 0 online
402 * 0 0 x 1 1 1 0 0 both ports online
403 * 0 1 x 0 1 1 0 0 port 1 online and usable, ETR or PPS mode
404 * 0 1 x 0 1 1 0 1 port 1 online, usable and ETR mode
405 * 0 1 x 0 1 1 1 0 port 1 online, usable, PPS mode, in-sync
406 * 0 1 x 0 1 1 1 1 port 1 online, usable, ETR mode, in-sync
407 * 0 1 x 1 1 1 0 0 both ports online, port 1 usable
408 * 0 1 x 1 1 1 1 0 both ports online, port 1 usable, PPS mode, in-sync
409 * 0 1 x 1 1 1 1 1 both ports online, port 1 usable, ETR mode, in-sync
410 * 1 0 x 1 0 1 0 0 port 0 online and usable, ETR or PPS mode
411 * 1 0 x 1 0 1 0 1 port 0 online, usable and ETR mode
412 * 1 0 x 1 0 1 1 0 port 0 online, usable, PPS mode, in-sync
413 * 1 0 x 1 0 1 1 1 port 0 online, usable, ETR mode, in-sync
414 * 1 0 x 1 1 1 0 0 both ports online, port 0 usable
415 * 1 0 x 1 1 1 1 0 both ports online, port 0 usable, PPS mode, in-sync
416 * 1 0 x 1 1 1 1 1 both ports online, port 0 usable, ETR mode, in-sync
417 * 1 1 x 1 1 1 1 0 both ports online & usable, ETR, in-sync
418 * 1 1 x 1 1 1 1 1 both ports online & usable, ETR, in-sync
419 */
420static struct etr_eacr etr_eacr;
421static u64 etr_tolec; /* time of last eacr update */
422static unsigned long etr_flags;
423static struct etr_aib etr_port0;
424static int etr_port0_uptodate;
425static struct etr_aib etr_port1;
426static int etr_port1_uptodate;
427static unsigned long etr_events;
428static struct timer_list etr_timer;
429static struct tasklet_struct etr_tasklet;
430static DEFINE_PER_CPU(atomic_t, etr_sync_word);
431
432static void etr_timeout(unsigned long dummy);
433static void etr_tasklet_fn(unsigned long dummy);
434
435/*
436 * The etr get_clock function. It will write the current clock value
437 * to the clock pointer and return 0 if the clock is in sync with the
438 * external time source. If the clock mode is local it will return
439 * -ENOSYS and -EAGAIN if the clock is not in sync with the external
440 * reference. This function is what ETR is all about..
441 */
442int get_sync_clock(unsigned long long *clock)
443{
444 atomic_t *sw_ptr;
445 unsigned int sw0, sw1;
446
447 sw_ptr = &get_cpu_var(etr_sync_word);
448 sw0 = atomic_read(sw_ptr);
449 *clock = get_clock();
450 sw1 = atomic_read(sw_ptr);
451 put_cpu_var(etr_sync_sync);
452 if (sw0 == sw1 && (sw0 & 0x80000000U))
453 /* Success: time is in sync. */
454 return 0;
455 if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
456 return -ENOSYS;
457 if (test_bit(ETR_FLAG_EACCES, &etr_flags))
458 return -EACCES;
459 return -EAGAIN;
460}
461EXPORT_SYMBOL(get_sync_clock);
462
463/*
464 * Make get_sync_clock return -EAGAIN.
465 */
466static void etr_disable_sync_clock(void *dummy)
467{
468 atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word);
469 /*
470 * Clear the in-sync bit 2^31. All get_sync_clock calls will
471 * fail until the sync bit is turned back on. In addition
472 * increase the "sequence" counter to avoid the race of an
473 * etr event and the complete recovery against get_sync_clock.
474 */
475 atomic_clear_mask(0x80000000, sw_ptr);
476 atomic_inc(sw_ptr);
477}
478
479/*
480 * Make get_sync_clock return 0 again.
481 * Needs to be called from a context disabled for preemption.
482 */
483static void etr_enable_sync_clock(void)
484{
485 atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word);
486 atomic_set_mask(0x80000000, sw_ptr);
487}
488
489/*
490 * Reset ETR attachment.
491 */
492static void etr_reset(void)
493{
494 etr_eacr = (struct etr_eacr) {
495 .e0 = 0, .e1 = 0, ._pad0 = 4, .dp = 0,
496 .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0,
497 .es = 0, .sl = 0 };
498 if (etr_setr(&etr_eacr) == 0)
499 etr_tolec = get_clock();
500 else {
501 set_bit(ETR_FLAG_ENOSYS, &etr_flags);
502 if (etr_port0_online || etr_port1_online) {
503 printk(KERN_WARNING "Running on non ETR capable "
504 "machine, only local mode available.\n");
505 etr_port0_online = etr_port1_online = 0;
506 }
507 }
508}
509
510static void etr_init(void)
511{
512 struct etr_aib aib;
513
514 if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
515 return;
516 /* Check if this machine has the steai instruction. */
517 if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0)
518 set_bit(ETR_FLAG_STEAI, &etr_flags);
519 setup_timer(&etr_timer, etr_timeout, 0UL);
520 tasklet_init(&etr_tasklet, etr_tasklet_fn, 0);
521 if (!etr_port0_online && !etr_port1_online)
522 set_bit(ETR_FLAG_EACCES, &etr_flags);
523 if (etr_port0_online) {
524 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
525 tasklet_hi_schedule(&etr_tasklet);
526 }
527 if (etr_port1_online) {
528 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
529 tasklet_hi_schedule(&etr_tasklet);
530 }
531}
532
533/*
534 * Two sorts of ETR machine checks. The architecture reads:
535 * "When a machine-check niterruption occurs and if a switch-to-local or
536 * ETR-sync-check interrupt request is pending but disabled, this pending
537 * disabled interruption request is indicated and is cleared".
538 * Which means that we can get etr_switch_to_local events from the machine
539 * check handler although the interruption condition is disabled. Lovely..
540 */
541
542/*
543 * Switch to local machine check. This is called when the last usable
544 * ETR port goes inactive. After switch to local the clock is not in sync.
545 */
546void etr_switch_to_local(void)
547{
548 if (!etr_eacr.sl)
549 return;
550 etr_disable_sync_clock(NULL);
551 set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events);
552 tasklet_hi_schedule(&etr_tasklet);
553}
554
555/*
556 * ETR sync check machine check. This is called when the ETR OTE and the
557 * local clock OTE are farther apart than the ETR sync check tolerance.
558 * After a ETR sync check the clock is not in sync. The machine check
559 * is broadcasted to all cpus at the same time.
560 */
561void etr_sync_check(void)
562{
563 if (!etr_eacr.es)
564 return;
565 etr_disable_sync_clock(NULL);
566 set_bit(ETR_EVENT_SYNC_CHECK, &etr_events);
567 tasklet_hi_schedule(&etr_tasklet);
568}
569
570/*
571 * ETR external interrupt. There are two causes:
572 * 1) port state change, check the usability of the port
573 * 2) port alert, one of the ETR-data-validity bits (v1-v2 bits of the
574 * sldr-status word) or ETR-data word 1 (edf1) or ETR-data word 3 (edf3)
575 * or ETR-data word 4 (edf4) has changed.
576 */
577static void etr_ext_handler(__u16 code)
578{
579 struct etr_interruption_parameter *intparm =
580 (struct etr_interruption_parameter *) &S390_lowcore.ext_params;
581
582 if (intparm->pc0)
583 /* ETR port 0 state change. */
584 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
585 if (intparm->pc1)
586 /* ETR port 1 state change. */
587 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
588 if (intparm->eai)
589 /*
590 * ETR port alert on either port 0, 1 or both.
591 * Both ports are not up-to-date now.
592 */
593 set_bit(ETR_EVENT_PORT_ALERT, &etr_events);
594 tasklet_hi_schedule(&etr_tasklet);
595}
596
597static void etr_timeout(unsigned long dummy)
598{
599 set_bit(ETR_EVENT_UPDATE, &etr_events);
600 tasklet_hi_schedule(&etr_tasklet);
601}
602
603/*
604 * Check if the etr mode is pss.
605 */
606static inline int etr_mode_is_pps(struct etr_eacr eacr)
607{
608 return eacr.es && !eacr.sl;
609}
610
611/*
612 * Check if the etr mode is etr.
613 */
614static inline int etr_mode_is_etr(struct etr_eacr eacr)
615{
616 return eacr.es && eacr.sl;
617}
618
619/*
620 * Check if the port can be used for TOD synchronization.
621 * For PPS mode the port has to receive OTEs. For ETR mode
622 * the port has to receive OTEs, the ETR stepping bit has to
623 * be zero and the validity bits for data frame 1, 2, and 3
624 * have to be 1.
625 */
626static int etr_port_valid(struct etr_aib *aib, int port)
627{
628 unsigned int psc;
629
630 /* Check that this port is receiving OTEs. */
631 if (aib->tsp == 0)
632 return 0;
633
634 psc = port ? aib->esw.psc1 : aib->esw.psc0;
635 if (psc == etr_lpsc_pps_mode)
636 return 1;
637 if (psc == etr_lpsc_operational_step)
638 return !aib->esw.y && aib->slsw.v1 &&
639 aib->slsw.v2 && aib->slsw.v3;
640 return 0;
641}
642
643/*
644 * Check if two ports are on the same network.
645 */
646static int etr_compare_network(struct etr_aib *aib1, struct etr_aib *aib2)
647{
648 // FIXME: any other fields we have to compare?
649 return aib1->edf1.net_id == aib2->edf1.net_id;
650}
651
652/*
653 * Wrapper for etr_stei that converts physical port states
654 * to logical port states to be consistent with the output
655 * of stetr (see etr_psc vs. etr_lpsc).
656 */
657static void etr_steai_cv(struct etr_aib *aib, unsigned int func)
658{
659 BUG_ON(etr_steai(aib, func) != 0);
660 /* Convert port state to logical port state. */
661 if (aib->esw.psc0 == 1)
662 aib->esw.psc0 = 2;
663 else if (aib->esw.psc0 == 0 && aib->esw.p == 0)
664 aib->esw.psc0 = 1;
665 if (aib->esw.psc1 == 1)
666 aib->esw.psc1 = 2;
667 else if (aib->esw.psc1 == 0 && aib->esw.p == 1)
668 aib->esw.psc1 = 1;
669}
670
671/*
672 * Check if the aib a2 is still connected to the same attachment as
673 * aib a1, the etv values differ by one and a2 is valid.
674 */
675static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p)
676{
677 int state_a1, state_a2;
678
679 /* Paranoia check: e0/e1 should better be the same. */
680 if (a1->esw.eacr.e0 != a2->esw.eacr.e0 ||
681 a1->esw.eacr.e1 != a2->esw.eacr.e1)
682 return 0;
683
684 /* Still connected to the same etr ? */
685 state_a1 = p ? a1->esw.psc1 : a1->esw.psc0;
686 state_a2 = p ? a2->esw.psc1 : a2->esw.psc0;
687 if (state_a1 == etr_lpsc_operational_step) {
688 if (state_a2 != etr_lpsc_operational_step ||
689 a1->edf1.net_id != a2->edf1.net_id ||
690 a1->edf1.etr_id != a2->edf1.etr_id ||
691 a1->edf1.etr_pn != a2->edf1.etr_pn)
692 return 0;
693 } else if (state_a2 != etr_lpsc_pps_mode)
694 return 0;
695
696 /* The ETV value of a2 needs to be ETV of a1 + 1. */
697 if (a1->edf2.etv + 1 != a2->edf2.etv)
698 return 0;
699
700 if (!etr_port_valid(a2, p))
701 return 0;
702
703 return 1;
704}
705
706/*
707 * The time is "clock". xtime is what we think the time is.
708 * Adjust the value by a multiple of jiffies and add the delta to ntp.
709 * "delay" is an approximation how long the synchronization took. If
710 * the time correction is positive, then "delay" is subtracted from
711 * the time difference and only the remaining part is passed to ntp.
712 */
713static void etr_adjust_time(unsigned long long clock, unsigned long long delay)
714{
715 unsigned long long delta, ticks;
716 struct timex adjust;
717
718 /*
719 * We don't have to take the xtime lock because the cpu
720 * executing etr_adjust_time is running disabled in
721 * tasklet context and all other cpus are looping in
722 * etr_sync_cpu_start.
723 */
724 if (clock > xtime_cc) {
725 /* It is later than we thought. */
726 delta = ticks = clock - xtime_cc;
727 delta = ticks = (delta < delay) ? 0 : delta - delay;
728 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
729 init_timer_cc = init_timer_cc + delta;
730 jiffies_timer_cc = jiffies_timer_cc + delta;
731 xtime_cc = xtime_cc + delta;
732 adjust.offset = ticks * (1000000 / HZ);
733 } else {
734 /* It is earlier than we thought. */
735 delta = ticks = xtime_cc - clock;
736 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
737 init_timer_cc = init_timer_cc - delta;
738 jiffies_timer_cc = jiffies_timer_cc - delta;
739 xtime_cc = xtime_cc - delta;
740 adjust.offset = -ticks * (1000000 / HZ);
741 }
742 if (adjust.offset != 0) {
743 printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n",
744 adjust.offset);
745 adjust.modes = ADJ_OFFSET_SINGLESHOT;
746 do_adjtimex(&adjust);
747 }
748}
749
750static void etr_sync_cpu_start(void *dummy)
751{
752 int *in_sync = dummy;
753
754 etr_enable_sync_clock();
755 /*
756 * This looks like a busy wait loop but it isn't. etr_sync_cpus
757 * is called on all other cpus while the TOD clocks is stopped.
758 * __udelay will stop the cpu on an enabled wait psw until the
759 * TOD is running again.
760 */
761 while (*in_sync == 0)
762 __udelay(1);
763 if (*in_sync != 1)
764 /* Didn't work. Clear per-cpu in sync bit again. */
765 etr_disable_sync_clock(NULL);
766 /*
767 * This round of TOD syncing is done. Set the clock comparator
768 * to the next tick and let the processor continue.
769 */
770 setup_jiffy_timer();
771}
772
773static void etr_sync_cpu_end(void *dummy)
774{
775}
776
777/*
778 * Sync the TOD clock using the port refered to by aibp. This port
779 * has to be enabled and the other port has to be disabled. The
780 * last eacr update has to be more than 1.6 seconds in the past.
781 */
782static int etr_sync_clock(struct etr_aib *aib, int port)
783{
784 struct etr_aib *sync_port;
785 unsigned long long clock, delay;
786 int in_sync, follows;
787 int rc;
788
789 /* Check if the current aib is adjacent to the sync port aib. */
790 sync_port = (port == 0) ? &etr_port0 : &etr_port1;
791 follows = etr_aib_follows(sync_port, aib, port);
792 memcpy(sync_port, aib, sizeof(*aib));
793 if (!follows)
794 return -EAGAIN;
795
796 /*
797 * Catch all other cpus and make them wait until we have
798 * successfully synced the clock. smp_call_function will
799 * return after all other cpus are in etr_sync_cpu_start.
800 */
801 in_sync = 0;
802 preempt_disable();
803 smp_call_function(etr_sync_cpu_start,&in_sync,0,0);
804 local_irq_disable();
805 etr_enable_sync_clock();
806
807 /* Set clock to next OTE. */
808 __ctl_set_bit(14, 21);
809 __ctl_set_bit(0, 29);
810 clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32;
811 if (set_clock(clock) == 0) {
812 __udelay(1); /* Wait for the clock to start. */
813 __ctl_clear_bit(0, 29);
814 __ctl_clear_bit(14, 21);
815 etr_stetr(aib);
816 /* Adjust Linux timing variables. */
817 delay = (unsigned long long)
818 (aib->edf2.etv - sync_port->edf2.etv) << 32;
819 etr_adjust_time(clock, delay);
820 setup_jiffy_timer();
821 /* Verify that the clock is properly set. */
822 if (!etr_aib_follows(sync_port, aib, port)) {
823 /* Didn't work. */
824 etr_disable_sync_clock(NULL);
825 in_sync = -EAGAIN;
826 rc = -EAGAIN;
827 } else {
828 in_sync = 1;
829 rc = 0;
830 }
831 } else {
832 /* Could not set the clock ?!? */
833 __ctl_clear_bit(0, 29);
834 __ctl_clear_bit(14, 21);
835 etr_disable_sync_clock(NULL);
836 in_sync = -EAGAIN;
837 rc = -EAGAIN;
838 }
839 local_irq_enable();
840 smp_call_function(etr_sync_cpu_end,NULL,0,0);
841 preempt_enable();
842 return rc;
843}
844
845/*
846 * Handle the immediate effects of the different events.
847 * The port change event is used for online/offline changes.
848 */
849static struct etr_eacr etr_handle_events(struct etr_eacr eacr)
850{
851 if (test_and_clear_bit(ETR_EVENT_SYNC_CHECK, &etr_events))
852 eacr.es = 0;
853 if (test_and_clear_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events))
854 eacr.es = eacr.sl = 0;
855 if (test_and_clear_bit(ETR_EVENT_PORT_ALERT, &etr_events))
856 etr_port0_uptodate = etr_port1_uptodate = 0;
857
858 if (test_and_clear_bit(ETR_EVENT_PORT0_CHANGE, &etr_events)) {
859 if (eacr.e0)
860 /*
861 * Port change of an enabled port. We have to
862 * assume that this can have caused an stepping
863 * port switch.
864 */
865 etr_tolec = get_clock();
866 eacr.p0 = etr_port0_online;
867 if (!eacr.p0)
868 eacr.e0 = 0;
869 etr_port0_uptodate = 0;
870 }
871 if (test_and_clear_bit(ETR_EVENT_PORT1_CHANGE, &etr_events)) {
872 if (eacr.e1)
873 /*
874 * Port change of an enabled port. We have to
875 * assume that this can have caused an stepping
876 * port switch.
877 */
878 etr_tolec = get_clock();
879 eacr.p1 = etr_port1_online;
880 if (!eacr.p1)
881 eacr.e1 = 0;
882 etr_port1_uptodate = 0;
883 }
884 clear_bit(ETR_EVENT_UPDATE, &etr_events);
885 return eacr;
886}
887
888/*
889 * Set up a timer that expires after the etr_tolec + 1.6 seconds if
890 * one of the ports needs an update.
891 */
892static void etr_set_tolec_timeout(unsigned long long now)
893{
894 unsigned long micros;
895
896 if ((!etr_eacr.p0 || etr_port0_uptodate) &&
897 (!etr_eacr.p1 || etr_port1_uptodate))
898 return;
899 micros = (now > etr_tolec) ? ((now - etr_tolec) >> 12) : 0;
900 micros = (micros > 1600000) ? 0 : 1600000 - micros;
901 mod_timer(&etr_timer, jiffies + (micros * HZ) / 1000000 + 1);
902}
903
904/*
905 * Set up a time that expires after 1/2 second.
906 */
907static void etr_set_sync_timeout(void)
908{
909 mod_timer(&etr_timer, jiffies + HZ/2);
910}
911
912/*
913 * Update the aib information for one or both ports.
914 */
915static struct etr_eacr etr_handle_update(struct etr_aib *aib,
916 struct etr_eacr eacr)
917{
918 /* With both ports disabled the aib information is useless. */
919 if (!eacr.e0 && !eacr.e1)
920 return eacr;
921
922 /* Update port0 or port1 with aib stored in etr_tasklet_fn. */
923 if (aib->esw.q == 0) {
924 /* Information for port 0 stored. */
925 if (eacr.p0 && !etr_port0_uptodate) {
926 etr_port0 = *aib;
927 if (etr_port0_online)
928 etr_port0_uptodate = 1;
929 }
930 } else {
931 /* Information for port 1 stored. */
932 if (eacr.p1 && !etr_port1_uptodate) {
933 etr_port1 = *aib;
934 if (etr_port0_online)
935 etr_port1_uptodate = 1;
936 }
937 }
938
939 /*
940 * Do not try to get the alternate port aib if the clock
941 * is not in sync yet.
942 */
943 if (!eacr.es)
944 return eacr;
945
946 /*
947 * If steai is available we can get the information about
948 * the other port immediately. If only stetr is available the
949 * data-port bit toggle has to be used.
950 */
951 if (test_bit(ETR_FLAG_STEAI, &etr_flags)) {
952 if (eacr.p0 && !etr_port0_uptodate) {
953 etr_steai_cv(&etr_port0, ETR_STEAI_PORT_0);
954 etr_port0_uptodate = 1;
955 }
956 if (eacr.p1 && !etr_port1_uptodate) {
957 etr_steai_cv(&etr_port1, ETR_STEAI_PORT_1);
958 etr_port1_uptodate = 1;
959 }
960 } else {
961 /*
962 * One port was updated above, if the other
963 * port is not uptodate toggle dp bit.
964 */
965 if ((eacr.p0 && !etr_port0_uptodate) ||
966 (eacr.p1 && !etr_port1_uptodate))
967 eacr.dp ^= 1;
968 else
969 eacr.dp = 0;
970 }
971 return eacr;
972}
973
974/*
975 * Write new etr control register if it differs from the current one.
976 * Return 1 if etr_tolec has been updated as well.
977 */
978static void etr_update_eacr(struct etr_eacr eacr)
979{
980 int dp_changed;
981
982 if (memcmp(&etr_eacr, &eacr, sizeof(eacr)) == 0)
983 /* No change, return. */
984 return;
985 /*
986 * The disable of an active port of the change of the data port
987 * bit can/will cause a change in the data port.
988 */
989 dp_changed = etr_eacr.e0 > eacr.e0 || etr_eacr.e1 > eacr.e1 ||
990 (etr_eacr.dp ^ eacr.dp) != 0;
991 etr_eacr = eacr;
992 etr_setr(&etr_eacr);
993 if (dp_changed)
994 etr_tolec = get_clock();
995}
996
997/*
998 * ETR tasklet. In this function you'll find the main logic. In
999 * particular this is the only function that calls etr_update_eacr(),
1000 * it "controls" the etr control register.
1001 */
1002static void etr_tasklet_fn(unsigned long dummy)
1003{
1004 unsigned long long now;
1005 struct etr_eacr eacr;
1006 struct etr_aib aib;
1007 int sync_port;
1008
1009 /* Create working copy of etr_eacr. */
1010 eacr = etr_eacr;
1011
1012 /* Check for the different events and their immediate effects. */
1013 eacr = etr_handle_events(eacr);
1014
1015 /* Check if ETR is supposed to be active. */
1016 eacr.ea = eacr.p0 || eacr.p1;
1017 if (!eacr.ea) {
1018 /* Both ports offline. Reset everything. */
1019 eacr.dp = eacr.es = eacr.sl = 0;
1020 on_each_cpu(etr_disable_sync_clock, NULL, 0, 1);
1021 del_timer_sync(&etr_timer);
1022 etr_update_eacr(eacr);
1023 set_bit(ETR_FLAG_EACCES, &etr_flags);
1024 return;
1025 }
1026
1027 /* Store aib to get the current ETR status word. */
1028 BUG_ON(etr_stetr(&aib) != 0);
1029 etr_port0.esw = etr_port1.esw = aib.esw; /* Copy status word. */
1030 now = get_clock();
1031
1032 /*
1033 * Update the port information if the last stepping port change
1034 * or data port change is older than 1.6 seconds.
1035 */
1036 if (now >= etr_tolec + (1600000 << 12))
1037 eacr = etr_handle_update(&aib, eacr);
1038
1039 /*
1040 * Select ports to enable. The prefered synchronization mode is PPS.
1041 * If a port can be enabled depends on a number of things:
1042 * 1) The port needs to be online and uptodate. A port is not
1043 * disabled just because it is not uptodate, but it is only
1044 * enabled if it is uptodate.
1045 * 2) The port needs to have the same mode (pps / etr).
1046 * 3) The port needs to be usable -> etr_port_valid() == 1
1047 * 4) To enable the second port the clock needs to be in sync.
1048 * 5) If both ports are useable and are ETR ports, the network id
1049 * has to be the same.
1050 * The eacr.sl bit is used to indicate etr mode vs. pps mode.
1051 */
1052 if (eacr.p0 && aib.esw.psc0 == etr_lpsc_pps_mode) {
1053 eacr.sl = 0;
1054 eacr.e0 = 1;
1055 if (!etr_mode_is_pps(etr_eacr))
1056 eacr.es = 0;
1057 if (!eacr.es || !eacr.p1 || aib.esw.psc1 != etr_lpsc_pps_mode)
1058 eacr.e1 = 0;
1059 // FIXME: uptodate checks ?
1060 else if (etr_port0_uptodate && etr_port1_uptodate)
1061 eacr.e1 = 1;
1062 sync_port = (etr_port0_uptodate &&
1063 etr_port_valid(&etr_port0, 0)) ? 0 : -1;
1064 clear_bit(ETR_FLAG_EACCES, &etr_flags);
1065 } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_pps_mode) {
1066 eacr.sl = 0;
1067 eacr.e0 = 0;
1068 eacr.e1 = 1;
1069 if (!etr_mode_is_pps(etr_eacr))
1070 eacr.es = 0;
1071 sync_port = (etr_port1_uptodate &&
1072 etr_port_valid(&etr_port1, 1)) ? 1 : -1;
1073 clear_bit(ETR_FLAG_EACCES, &etr_flags);
1074 } else if (eacr.p0 && aib.esw.psc0 == etr_lpsc_operational_step) {
1075 eacr.sl = 1;
1076 eacr.e0 = 1;
1077 if (!etr_mode_is_etr(etr_eacr))
1078 eacr.es = 0;
1079 if (!eacr.es || !eacr.p1 ||
1080 aib.esw.psc1 != etr_lpsc_operational_alt)
1081 eacr.e1 = 0;
1082 else if (etr_port0_uptodate && etr_port1_uptodate &&
1083 etr_compare_network(&etr_port0, &etr_port1))
1084 eacr.e1 = 1;
1085 sync_port = (etr_port0_uptodate &&
1086 etr_port_valid(&etr_port0, 0)) ? 0 : -1;
1087 clear_bit(ETR_FLAG_EACCES, &etr_flags);
1088 } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_operational_step) {
1089 eacr.sl = 1;
1090 eacr.e0 = 0;
1091 eacr.e1 = 1;
1092 if (!etr_mode_is_etr(etr_eacr))
1093 eacr.es = 0;
1094 sync_port = (etr_port1_uptodate &&
1095 etr_port_valid(&etr_port1, 1)) ? 1 : -1;
1096 clear_bit(ETR_FLAG_EACCES, &etr_flags);
1097 } else {
1098 /* Both ports not usable. */
1099 eacr.es = eacr.sl = 0;
1100 sync_port = -1;
1101 set_bit(ETR_FLAG_EACCES, &etr_flags);
1102 }
1103
1104 /*
1105 * If the clock is in sync just update the eacr and return.
1106 * If there is no valid sync port wait for a port update.
1107 */
1108 if (eacr.es || sync_port < 0) {
1109 etr_update_eacr(eacr);
1110 etr_set_tolec_timeout(now);
1111 return;
1112 }
1113
1114 /*
1115 * Prepare control register for clock syncing
1116 * (reset data port bit, set sync check control.
1117 */
1118 eacr.dp = 0;
1119 eacr.es = 1;
1120
1121 /*
1122 * Update eacr and try to synchronize the clock. If the update
1123 * of eacr caused a stepping port switch (or if we have to
1124 * assume that a stepping port switch has occured) or the
1125 * clock syncing failed, reset the sync check control bit
1126 * and set up a timer to try again after 0.5 seconds
1127 */
1128 etr_update_eacr(eacr);
1129 if (now < etr_tolec + (1600000 << 12) ||
1130 etr_sync_clock(&aib, sync_port) != 0) {
1131 /* Sync failed. Try again in 1/2 second. */
1132 eacr.es = 0;
1133 etr_update_eacr(eacr);
1134 etr_set_sync_timeout();
1135 } else
1136 etr_set_tolec_timeout(now);
1137}
1138
1139/*
1140 * Sysfs interface functions
1141 */
1142static struct sysdev_class etr_sysclass = {
1143 set_kset_name("etr")
1144};
1145
1146static struct sys_device etr_port0_dev = {
1147 .id = 0,
1148 .cls = &etr_sysclass,
1149};
1150
1151static struct sys_device etr_port1_dev = {
1152 .id = 1,
1153 .cls = &etr_sysclass,
1154};
1155
1156/*
1157 * ETR class attributes
1158 */
1159static ssize_t etr_stepping_port_show(struct sysdev_class *class, char *buf)
1160{
1161 return sprintf(buf, "%i\n", etr_port0.esw.p);
1162}
1163
1164static SYSDEV_CLASS_ATTR(stepping_port, 0400, etr_stepping_port_show, NULL);
1165
1166static ssize_t etr_stepping_mode_show(struct sysdev_class *class, char *buf)
1167{
1168 char *mode_str;
1169
1170 if (etr_mode_is_pps(etr_eacr))
1171 mode_str = "pps";
1172 else if (etr_mode_is_etr(etr_eacr))
1173 mode_str = "etr";
1174 else
1175 mode_str = "local";
1176 return sprintf(buf, "%s\n", mode_str);
1177}
1178
1179static SYSDEV_CLASS_ATTR(stepping_mode, 0400, etr_stepping_mode_show, NULL);
1180
1181/*
1182 * ETR port attributes
1183 */
1184static inline struct etr_aib *etr_aib_from_dev(struct sys_device *dev)
1185{
1186 if (dev == &etr_port0_dev)
1187 return etr_port0_online ? &etr_port0 : NULL;
1188 else
1189 return etr_port1_online ? &etr_port1 : NULL;
1190}
1191
1192static ssize_t etr_online_show(struct sys_device *dev, char *buf)
1193{
1194 unsigned int online;
1195
1196 online = (dev == &etr_port0_dev) ? etr_port0_online : etr_port1_online;
1197 return sprintf(buf, "%i\n", online);
1198}
1199
1200static ssize_t etr_online_store(struct sys_device *dev,
1201 const char *buf, size_t count)
1202{
1203 unsigned int value;
1204
1205 value = simple_strtoul(buf, NULL, 0);
1206 if (value != 0 && value != 1)
1207 return -EINVAL;
1208 if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
1209 return -ENOSYS;
1210 if (dev == &etr_port0_dev) {
1211 if (etr_port0_online == value)
1212 return count; /* Nothing to do. */
1213 etr_port0_online = value;
1214 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
1215 tasklet_hi_schedule(&etr_tasklet);
1216 } else {
1217 if (etr_port1_online == value)
1218 return count; /* Nothing to do. */
1219 etr_port1_online = value;
1220 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
1221 tasklet_hi_schedule(&etr_tasklet);
1222 }
1223 return count;
1224}
1225
1226static SYSDEV_ATTR(online, 0600, etr_online_show, etr_online_store);
1227
1228static ssize_t etr_stepping_control_show(struct sys_device *dev, char *buf)
1229{
1230 return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ?
1231 etr_eacr.e0 : etr_eacr.e1);
1232}
1233
1234static SYSDEV_ATTR(stepping_control, 0400, etr_stepping_control_show, NULL);
1235
1236static ssize_t etr_mode_code_show(struct sys_device *dev, char *buf)
1237{
1238 if (!etr_port0_online && !etr_port1_online)
1239 /* Status word is not uptodate if both ports are offline. */
1240 return -ENODATA;
1241 return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ?
1242 etr_port0.esw.psc0 : etr_port0.esw.psc1);
1243}
1244
1245static SYSDEV_ATTR(state_code, 0400, etr_mode_code_show, NULL);
1246
1247static ssize_t etr_untuned_show(struct sys_device *dev, char *buf)
1248{
1249 struct etr_aib *aib = etr_aib_from_dev(dev);
1250
1251 if (!aib || !aib->slsw.v1)
1252 return -ENODATA;
1253 return sprintf(buf, "%i\n", aib->edf1.u);
1254}
1255
1256static SYSDEV_ATTR(untuned, 0400, etr_untuned_show, NULL);
1257
1258static ssize_t etr_network_id_show(struct sys_device *dev, char *buf)
1259{
1260 struct etr_aib *aib = etr_aib_from_dev(dev);
1261
1262 if (!aib || !aib->slsw.v1)
1263 return -ENODATA;
1264 return sprintf(buf, "%i\n", aib->edf1.net_id);
1265}
1266
1267static SYSDEV_ATTR(network, 0400, etr_network_id_show, NULL);
1268
1269static ssize_t etr_id_show(struct sys_device *dev, char *buf)
1270{
1271 struct etr_aib *aib = etr_aib_from_dev(dev);
1272
1273 if (!aib || !aib->slsw.v1)
1274 return -ENODATA;
1275 return sprintf(buf, "%i\n", aib->edf1.etr_id);
1276}
1277
1278static SYSDEV_ATTR(id, 0400, etr_id_show, NULL);
1279
1280static ssize_t etr_port_number_show(struct sys_device *dev, char *buf)
1281{
1282 struct etr_aib *aib = etr_aib_from_dev(dev);
1283
1284 if (!aib || !aib->slsw.v1)
1285 return -ENODATA;
1286 return sprintf(buf, "%i\n", aib->edf1.etr_pn);
1287}
1288
1289static SYSDEV_ATTR(port, 0400, etr_port_number_show, NULL);
1290
1291static ssize_t etr_coupled_show(struct sys_device *dev, char *buf)
1292{
1293 struct etr_aib *aib = etr_aib_from_dev(dev);
1294
1295 if (!aib || !aib->slsw.v3)
1296 return -ENODATA;
1297 return sprintf(buf, "%i\n", aib->edf3.c);
1298}
1299
1300static SYSDEV_ATTR(coupled, 0400, etr_coupled_show, NULL);
1301
1302static ssize_t etr_local_time_show(struct sys_device *dev, char *buf)
1303{
1304 struct etr_aib *aib = etr_aib_from_dev(dev);
1305
1306 if (!aib || !aib->slsw.v3)
1307 return -ENODATA;
1308 return sprintf(buf, "%i\n", aib->edf3.blto);
1309}
1310
1311static SYSDEV_ATTR(local_time, 0400, etr_local_time_show, NULL);
1312
1313static ssize_t etr_utc_offset_show(struct sys_device *dev, char *buf)
1314{
1315 struct etr_aib *aib = etr_aib_from_dev(dev);
1316
1317 if (!aib || !aib->slsw.v3)
1318 return -ENODATA;
1319 return sprintf(buf, "%i\n", aib->edf3.buo);
1320}
1321
1322static SYSDEV_ATTR(utc_offset, 0400, etr_utc_offset_show, NULL);
1323
1324static struct sysdev_attribute *etr_port_attributes[] = {
1325 &attr_online,
1326 &attr_stepping_control,
1327 &attr_state_code,
1328 &attr_untuned,
1329 &attr_network,
1330 &attr_id,
1331 &attr_port,
1332 &attr_coupled,
1333 &attr_local_time,
1334 &attr_utc_offset,
1335 NULL
1336};
1337
1338static int __init etr_register_port(struct sys_device *dev)
1339{
1340 struct sysdev_attribute **attr;
1341 int rc;
1342
1343 rc = sysdev_register(dev);
1344 if (rc)
1345 goto out;
1346 for (attr = etr_port_attributes; *attr; attr++) {
1347 rc = sysdev_create_file(dev, *attr);
1348 if (rc)
1349 goto out_unreg;
1350 }
1351 return 0;
1352out_unreg:
1353 for (; attr >= etr_port_attributes; attr--)
1354 sysdev_remove_file(dev, *attr);
1355 sysdev_unregister(dev);
1356out:
1357 return rc;
1358}
1359
1360static void __init etr_unregister_port(struct sys_device *dev)
1361{
1362 struct sysdev_attribute **attr;
1363
1364 for (attr = etr_port_attributes; *attr; attr++)
1365 sysdev_remove_file(dev, *attr);
1366 sysdev_unregister(dev);
1367}
1368
1369static int __init etr_init_sysfs(void)
1370{
1371 int rc;
1372
1373 rc = sysdev_class_register(&etr_sysclass);
1374 if (rc)
1375 goto out;
1376 rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_port);
1377 if (rc)
1378 goto out_unreg_class;
1379 rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_mode);
1380 if (rc)
1381 goto out_remove_stepping_port;
1382 rc = etr_register_port(&etr_port0_dev);
1383 if (rc)
1384 goto out_remove_stepping_mode;
1385 rc = etr_register_port(&etr_port1_dev);
1386 if (rc)
1387 goto out_remove_port0;
1388 return 0;
1389
1390out_remove_port0:
1391 etr_unregister_port(&etr_port0_dev);
1392out_remove_stepping_mode:
1393 sysdev_class_remove_file(&etr_sysclass, &attr_stepping_mode);
1394out_remove_stepping_port:
1395 sysdev_class_remove_file(&etr_sysclass, &attr_stepping_port);
1396out_unreg_class:
1397 sysdev_class_unregister(&etr_sysclass);
1398out:
1399 return rc;
348} 1400}
349 1401
1402device_initcall(etr_init_sysfs);
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 3cbb0dcf1f1d..f0e5a320e2ec 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -283,7 +283,7 @@ char *task_show_regs(struct task_struct *task, char *buffer)
283 return buffer; 283 return buffer;
284} 284}
285 285
286DEFINE_SPINLOCK(die_lock); 286static DEFINE_SPINLOCK(die_lock);
287 287
288void die(const char * str, struct pt_regs * regs, long err) 288void die(const char * str, struct pt_regs * regs, long err)
289{ 289{
@@ -364,8 +364,7 @@ void __kprobes do_single_step(struct pt_regs *regs)
364 force_sig(SIGTRAP, current); 364 force_sig(SIGTRAP, current);
365} 365}
366 366
367asmlinkage void 367static void default_trap_handler(struct pt_regs * regs, long interruption_code)
368default_trap_handler(struct pt_regs * regs, long interruption_code)
369{ 368{
370 if (regs->psw.mask & PSW_MASK_PSTATE) { 369 if (regs->psw.mask & PSW_MASK_PSTATE) {
371 local_irq_enable(); 370 local_irq_enable();
@@ -376,7 +375,7 @@ default_trap_handler(struct pt_regs * regs, long interruption_code)
376} 375}
377 376
378#define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \ 377#define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
379asmlinkage void name(struct pt_regs * regs, long interruption_code) \ 378static void name(struct pt_regs * regs, long interruption_code) \
380{ \ 379{ \
381 siginfo_t info; \ 380 siginfo_t info; \
382 info.si_signo = signr; \ 381 info.si_signo = signr; \
@@ -442,7 +441,7 @@ do_fp_trap(struct pt_regs *regs, void __user *location,
442 "floating point exception", regs, &si); 441 "floating point exception", regs, &si);
443} 442}
444 443
445asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code) 444static void illegal_op(struct pt_regs * regs, long interruption_code)
446{ 445{
447 siginfo_t info; 446 siginfo_t info;
448 __u8 opcode[6]; 447 __u8 opcode[6];
@@ -491,8 +490,15 @@ asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code)
491#endif 490#endif
492 } else 491 } else
493 signal = SIGILL; 492 signal = SIGILL;
494 } else 493 } else {
495 signal = SIGILL; 494 /*
495 * If we get an illegal op in kernel mode, send it through the
496 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
497 */
498 if (notify_die(DIE_BPT, "bpt", regs, interruption_code,
499 3, SIGTRAP) != NOTIFY_STOP)
500 signal = SIGILL;
501 }
496 502
497#ifdef CONFIG_MATHEMU 503#ifdef CONFIG_MATHEMU
498 if (signal == SIGFPE) 504 if (signal == SIGFPE)
@@ -585,7 +591,7 @@ DO_ERROR_INFO(SIGILL, "specification exception", specification_exception,
585 ILL_ILLOPN, get_check_address(regs)); 591 ILL_ILLOPN, get_check_address(regs));
586#endif 592#endif
587 593
588asmlinkage void data_exception(struct pt_regs * regs, long interruption_code) 594static void data_exception(struct pt_regs * regs, long interruption_code)
589{ 595{
590 __u16 __user *location; 596 __u16 __user *location;
591 int signal = 0; 597 int signal = 0;
@@ -675,7 +681,7 @@ asmlinkage void data_exception(struct pt_regs * regs, long interruption_code)
675 } 681 }
676} 682}
677 683
678asmlinkage void space_switch_exception(struct pt_regs * regs, long int_code) 684static void space_switch_exception(struct pt_regs * regs, long int_code)
679{ 685{
680 siginfo_t info; 686 siginfo_t info;
681 687
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index fe0f2e97ba7b..a48907392522 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -31,18 +31,19 @@ SECTIONS
31 31
32 _etext = .; /* End of text section */ 32 _etext = .; /* End of text section */
33 33
34 . = ALIGN(16); /* Exception table */
35 __start___ex_table = .;
36 __ex_table : { *(__ex_table) }
37 __stop___ex_table = .;
38
39 RODATA 34 RODATA
40 35
41#ifdef CONFIG_SHARED_KERNEL 36#ifdef CONFIG_SHARED_KERNEL
42 . = ALIGN(1048576); /* VM shared segments are 1MB aligned */ 37 . = ALIGN(1048576); /* VM shared segments are 1MB aligned */
38#endif
43 39
40 . = ALIGN(4096);
44 _eshared = .; /* End of shareable data */ 41 _eshared = .; /* End of shareable data */
45#endif 42
43 . = ALIGN(16); /* Exception table */
44 __start___ex_table = .;
45 __ex_table : { *(__ex_table) }
46 __stop___ex_table = .;
46 47
47 .data : { /* Data */ 48 .data : { /* Data */
48 *(.data) 49 *(.data)
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 21baaf5496d6..9d5b02801b46 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -25,7 +25,7 @@
25#include <asm/irq_regs.h> 25#include <asm/irq_regs.h>
26 26
27static ext_int_info_t ext_int_info_timer; 27static ext_int_info_t ext_int_info_timer;
28DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); 28static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
29 29
30#ifdef CONFIG_VIRT_CPU_ACCOUNTING 30#ifdef CONFIG_VIRT_CPU_ACCOUNTING
31/* 31/*
@@ -524,16 +524,15 @@ EXPORT_SYMBOL(del_virt_timer);
524void init_cpu_vtimer(void) 524void init_cpu_vtimer(void)
525{ 525{
526 struct vtimer_queue *vt_list; 526 struct vtimer_queue *vt_list;
527 unsigned long cr0;
528 527
529 /* kick the virtual timer */ 528 /* kick the virtual timer */
530 S390_lowcore.exit_timer = VTIMER_MAX_SLICE; 529 S390_lowcore.exit_timer = VTIMER_MAX_SLICE;
531 S390_lowcore.last_update_timer = VTIMER_MAX_SLICE; 530 S390_lowcore.last_update_timer = VTIMER_MAX_SLICE;
532 asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); 531 asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
533 asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock)); 532 asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock));
534 __ctl_store(cr0, 0, 0); 533
535 cr0 |= 0x400; 534 /* enable cpu timer interrupts */
536 __ctl_load(cr0, 0, 0); 535 __ctl_set_bit(0,10);
537 536
538 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); 537 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
539 INIT_LIST_HEAD(&vt_list->list); 538 INIT_LIST_HEAD(&vt_list->list);
@@ -572,6 +571,7 @@ void __init vtime_init(void)
572 if (register_idle_notifier(&vtimer_idle_nb)) 571 if (register_idle_notifier(&vtimer_idle_nb))
573 panic("Couldn't register idle notifier"); 572 panic("Couldn't register idle notifier");
574 573
574 /* Enable cpu timer interrupts on the boot cpu. */
575 init_cpu_vtimer(); 575 init_cpu_vtimer();
576} 576}
577 577
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index b5f94cf3bde8..7a44fed21b35 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -4,7 +4,7 @@
4 4
5EXTRA_AFLAGS := -traditional 5EXTRA_AFLAGS := -traditional
6 6
7lib-y += delay.o string.o uaccess_std.o uaccess_pt.o 7lib-y += delay.o string.o uaccess_std.o uaccess_pt.o qrnnd.o
8lib-$(CONFIG_32BIT) += div64.o 8lib-$(CONFIG_32BIT) += div64.o
9lib-$(CONFIG_64BIT) += uaccess_mvcos.o 9lib-$(CONFIG_64BIT) += uaccess_mvcos.o
10lib-$(CONFIG_SMP) += spinlock.o 10lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 027c4742a001..02854449b74b 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/s390/kernel/delay.c 2 * arch/s390/lib/delay.c
3 * Precise Delay Loops for S390 3 * Precise Delay Loops for S390
4 * 4 *
5 * S390 version 5 * S390 version
@@ -13,10 +13,8 @@
13 13
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16 16#include <linux/timex.h>
17#ifdef CONFIG_SMP 17#include <linux/irqflags.h>
18#include <asm/smp.h>
19#endif
20 18
21void __delay(unsigned long loops) 19void __delay(unsigned long loops)
22{ 20{
@@ -31,17 +29,39 @@ void __delay(unsigned long loops)
31} 29}
32 30
33/* 31/*
34 * Waits for 'usecs' microseconds using the tod clock, giving up the time slice 32 * Waits for 'usecs' microseconds using the TOD clock comparator.
35 * of the virtual PU inbetween to avoid congestion.
36 */ 33 */
37void __udelay(unsigned long usecs) 34void __udelay(unsigned long usecs)
38{ 35{
39 uint64_t start_cc; 36 u64 end, time, jiffy_timer = 0;
37 unsigned long flags, cr0, mask, dummy;
38
39 local_irq_save(flags);
40 if (raw_irqs_disabled_flags(flags)) {
41 jiffy_timer = S390_lowcore.jiffy_timer;
42 S390_lowcore.jiffy_timer = -1ULL - (4096 << 12);
43 __ctl_store(cr0, 0, 0);
44 dummy = (cr0 & 0xffff00e0) | 0x00000800;
45 __ctl_load(dummy , 0, 0);
46 mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
47 } else
48 mask = psw_kernel_bits | PSW_MASK_WAIT |
49 PSW_MASK_EXT | PSW_MASK_IO;
50
51 end = get_clock() + ((u64) usecs << 12);
52 do {
53 time = end < S390_lowcore.jiffy_timer ?
54 end : S390_lowcore.jiffy_timer;
55 set_clock_comparator(time);
56 trace_hardirqs_on();
57 __load_psw_mask(mask);
58 local_irq_disable();
59 } while (get_clock() < end);
40 60
41 if (usecs == 0) 61 if (raw_irqs_disabled_flags(flags)) {
42 return; 62 __ctl_load(cr0, 0, 0);
43 start_cc = get_clock(); 63 S390_lowcore.jiffy_timer = jiffy_timer;
44 do { 64 }
45 cpu_relax(); 65 set_clock_comparator(S390_lowcore.jiffy_timer);
46 } while (((get_clock() - start_cc)/4096) < usecs); 66 local_irq_restore(flags);
47} 67}
diff --git a/arch/s390/lib/qrnnd.S b/arch/s390/lib/qrnnd.S
new file mode 100644
index 000000000000..eb1df632e749
--- /dev/null
+++ b/arch/s390/lib/qrnnd.S
@@ -0,0 +1,77 @@
1# S/390 __udiv_qrnnd
2
3# r2 : &__r
4# r3 : upper half of 64 bit word n
5# r4 : lower half of 64 bit word n
6# r5 : divisor d
7# the reminder r of the division is to be stored to &__r and
8# the quotient q is to be returned
9
10 .text
11 .globl __udiv_qrnnd
12__udiv_qrnnd:
13 st %r2,24(%r15) # store pointer to reminder for later
14 lr %r0,%r3 # reload n
15 lr %r1,%r4
16 ltr %r2,%r5 # reload and test divisor
17 jp 5f
18 # divisor >= 0x80000000
19 srdl %r0,2 # n/4
20 srl %r2,1 # d/2
21 slr %r1,%r2 # special case if last bit of d is set
22 brc 3,0f # (n/4) div (n/2) can overflow by 1
23 ahi %r0,-1 # trick: subtract n/2, then divide
240: dr %r0,%r2 # signed division
25 ahi %r1,1 # trick part 2: add 1 to the quotient
26 # now (n >> 2) = (d >> 1) * %r1 + %r0
27 lhi %r3,1
28 nr %r3,%r1 # test last bit of q
29 jz 1f
30 alr %r0,%r2 # add (d>>1) to r
311: srl %r1,1 # q >>= 1
32 # now (n >> 2) = (d&-2) * %r1 + %r0
33 lhi %r3,1
34 nr %r3,%r5 # test last bit of d
35 jz 2f
36 slr %r0,%r1 # r -= q
37 brc 3,2f # borrow ?
38 alr %r0,%r5 # r += d
39 ahi %r1,-1
402: # now (n >> 2) = d * %r1 + %r0
41 alr %r1,%r1 # q <<= 1
42 alr %r0,%r0 # r <<= 1
43 brc 12,3f # overflow on r ?
44 slr %r0,%r5 # r -= d
45 ahi %r1,1 # q += 1
463: lhi %r3,2
47 nr %r3,%r4 # test next to last bit of n
48 jz 4f
49 ahi %r0,1 # r += 1
504: clr %r0,%r5 # r >= d ?
51 jl 6f
52 slr %r0,%r5 # r -= d
53 ahi %r1,1 # q += 1
54 # now (n >> 1) = d * %r1 + %r0
55 j 6f
565: # divisor < 0x80000000
57 srdl %r0,1
58 dr %r0,%r2 # signed division
59 # now (n >> 1) = d * %r1 + %r0
606: alr %r1,%r1 # q <<= 1
61 alr %r0,%r0 # r <<= 1
62 brc 12,7f # overflow on r ?
63 slr %r0,%r5 # r -= d
64 ahi %r1,1 # q += 1
657: lhi %r3,1
66 nr %r3,%r4 # isolate last bit of n
67 alr %r0,%r3 # r += (n & 1)
68 clr %r0,%r5 # r >= d ?
69 jl 8f
70 slr %r0,%r5 # r -= d
71 ahi %r1,1 # q += 1
728: # now n = d * %r1 + %r0
73 l %r2,24(%r15)
74 st %r0,0(%r2)
75 lr %r2,%r1
76 br %r14
77 .end __udiv_qrnnd
diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h
new file mode 100644
index 000000000000..126011df14f1
--- /dev/null
+++ b/arch/s390/lib/uaccess.h
@@ -0,0 +1,23 @@
1/*
2 * arch/s390/uaccess.h
3 *
4 * Copyright IBM Corp. 2007
5 *
6 */
7
8#ifndef __ARCH_S390_LIB_UACCESS_H
9#define __ARCH_S390_LIB_UACCESS_H
10
11extern size_t copy_from_user_std(size_t, const void __user *, void *);
12extern size_t copy_to_user_std(size_t, void __user *, const void *);
13extern size_t strnlen_user_std(size_t, const char __user *);
14extern size_t strncpy_from_user_std(size_t, const char __user *, char *);
15extern int futex_atomic_cmpxchg_std(int __user *, int, int);
16extern int futex_atomic_op_std(int, int __user *, int, int *);
17
18extern size_t copy_from_user_pt(size_t, const void __user *, void *);
19extern size_t copy_to_user_pt(size_t, void __user *, const void *);
20extern int futex_atomic_op_pt(int, int __user *, int, int *);
21extern int futex_atomic_cmpxchg_pt(int __user *, int, int);
22
23#endif /* __ARCH_S390_LIB_UACCESS_H */
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index f9a23d57eb79..6d8772339d76 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -12,6 +12,7 @@
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <asm/uaccess.h> 13#include <asm/uaccess.h>
14#include <asm/futex.h> 14#include <asm/futex.h>
15#include "uaccess.h"
15 16
16#ifndef __s390x__ 17#ifndef __s390x__
17#define AHI "ahi" 18#define AHI "ahi"
@@ -27,10 +28,7 @@
27#define SLR "slgr" 28#define SLR "slgr"
28#endif 29#endif
29 30
30extern size_t copy_from_user_std(size_t, const void __user *, void *); 31static size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
31extern size_t copy_to_user_std(size_t, void __user *, const void *);
32
33size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
34{ 32{
35 register unsigned long reg0 asm("0") = 0x81UL; 33 register unsigned long reg0 asm("0") = 0x81UL;
36 unsigned long tmp1, tmp2; 34 unsigned long tmp1, tmp2;
@@ -69,14 +67,14 @@ size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
69 return size; 67 return size;
70} 68}
71 69
72size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x) 70static size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x)
73{ 71{
74 if (size <= 256) 72 if (size <= 256)
75 return copy_from_user_std(size, ptr, x); 73 return copy_from_user_std(size, ptr, x);
76 return copy_from_user_mvcos(size, ptr, x); 74 return copy_from_user_mvcos(size, ptr, x);
77} 75}
78 76
79size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x) 77static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
80{ 78{
81 register unsigned long reg0 asm("0") = 0x810000UL; 79 register unsigned long reg0 asm("0") = 0x810000UL;
82 unsigned long tmp1, tmp2; 80 unsigned long tmp1, tmp2;
@@ -105,14 +103,16 @@ size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
105 return size; 103 return size;
106} 104}
107 105
108size_t copy_to_user_mvcos_check(size_t size, void __user *ptr, const void *x) 106static size_t copy_to_user_mvcos_check(size_t size, void __user *ptr,
107 const void *x)
109{ 108{
110 if (size <= 256) 109 if (size <= 256)
111 return copy_to_user_std(size, ptr, x); 110 return copy_to_user_std(size, ptr, x);
112 return copy_to_user_mvcos(size, ptr, x); 111 return copy_to_user_mvcos(size, ptr, x);
113} 112}
114 113
115size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from) 114static size_t copy_in_user_mvcos(size_t size, void __user *to,
115 const void __user *from)
116{ 116{
117 register unsigned long reg0 asm("0") = 0x810081UL; 117 register unsigned long reg0 asm("0") = 0x810081UL;
118 unsigned long tmp1, tmp2; 118 unsigned long tmp1, tmp2;
@@ -134,7 +134,7 @@ size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from)
134 return size; 134 return size;
135} 135}
136 136
137size_t clear_user_mvcos(size_t size, void __user *to) 137static size_t clear_user_mvcos(size_t size, void __user *to)
138{ 138{
139 register unsigned long reg0 asm("0") = 0x810000UL; 139 register unsigned long reg0 asm("0") = 0x810000UL;
140 unsigned long tmp1, tmp2; 140 unsigned long tmp1, tmp2;
@@ -162,10 +162,43 @@ size_t clear_user_mvcos(size_t size, void __user *to)
162 return size; 162 return size;
163} 163}
164 164
165extern size_t strnlen_user_std(size_t, const char __user *); 165static size_t strnlen_user_mvcos(size_t count, const char __user *src)
166extern size_t strncpy_from_user_std(size_t, const char __user *, char *); 166{
167extern int futex_atomic_op(int, int __user *, int, int *); 167 char buf[256];
168extern int futex_atomic_cmpxchg(int __user *, int, int); 168 int rc;
169 size_t done, len, len_str;
170
171 done = 0;
172 do {
173 len = min(count - done, (size_t) 256);
174 rc = uaccess.copy_from_user(len, src + done, buf);
175 if (unlikely(rc == len))
176 return 0;
177 len -= rc;
178 len_str = strnlen(buf, len);
179 done += len_str;
180 } while ((len_str == len) && (done < count));
181 return done + 1;
182}
183
184static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
185 char *dst)
186{
187 int rc;
188 size_t done, len, len_str;
189
190 done = 0;
191 do {
192 len = min(count - done, (size_t) 4096);
193 rc = uaccess.copy_from_user(len, src + done, dst);
194 if (unlikely(rc == len))
195 return -EFAULT;
196 len -= rc;
197 len_str = strnlen(dst, len);
198 done += len_str;
199 } while ((len_str == len) && (done < count));
200 return done;
201}
169 202
170struct uaccess_ops uaccess_mvcos = { 203struct uaccess_ops uaccess_mvcos = {
171 .copy_from_user = copy_from_user_mvcos_check, 204 .copy_from_user = copy_from_user_mvcos_check,
@@ -176,6 +209,21 @@ struct uaccess_ops uaccess_mvcos = {
176 .clear_user = clear_user_mvcos, 209 .clear_user = clear_user_mvcos,
177 .strnlen_user = strnlen_user_std, 210 .strnlen_user = strnlen_user_std,
178 .strncpy_from_user = strncpy_from_user_std, 211 .strncpy_from_user = strncpy_from_user_std,
179 .futex_atomic_op = futex_atomic_op, 212 .futex_atomic_op = futex_atomic_op_std,
180 .futex_atomic_cmpxchg = futex_atomic_cmpxchg, 213 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
214};
215
216#ifdef CONFIG_S390_SWITCH_AMODE
217struct uaccess_ops uaccess_mvcos_switch = {
218 .copy_from_user = copy_from_user_mvcos,
219 .copy_from_user_small = copy_from_user_mvcos,
220 .copy_to_user = copy_to_user_mvcos,
221 .copy_to_user_small = copy_to_user_mvcos,
222 .copy_in_user = copy_in_user_mvcos,
223 .clear_user = clear_user_mvcos,
224 .strnlen_user = strnlen_user_mvcos,
225 .strncpy_from_user = strncpy_from_user_mvcos,
226 .futex_atomic_op = futex_atomic_op_pt,
227 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
181}; 228};
229#endif
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 49c3e46b4065..63181671e3e3 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * arch/s390/lib/uaccess_pt.c 2 * arch/s390/lib/uaccess_pt.c
3 * 3 *
4 * User access functions based on page table walks. 4 * User access functions based on page table walks for enhanced
5 * system layout without hardware support.
5 * 6 *
6 * Copyright IBM Corp. 2006 7 * Copyright IBM Corp. 2006
7 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com) 8 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
@@ -12,9 +13,10 @@
12#include <linux/mm.h> 13#include <linux/mm.h>
13#include <asm/uaccess.h> 14#include <asm/uaccess.h>
14#include <asm/futex.h> 15#include <asm/futex.h>
16#include "uaccess.h"
15 17
16static inline int __handle_fault(struct mm_struct *mm, unsigned long address, 18static int __handle_fault(struct mm_struct *mm, unsigned long address,
17 int write_access) 19 int write_access)
18{ 20{
19 struct vm_area_struct *vma; 21 struct vm_area_struct *vma;
20 int ret = -EFAULT; 22 int ret = -EFAULT;
@@ -79,8 +81,8 @@ out_sigbus:
79 return ret; 81 return ret;
80} 82}
81 83
82static inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, 84static size_t __user_copy_pt(unsigned long uaddr, void *kptr,
83 size_t n, int write_user) 85 size_t n, int write_user)
84{ 86{
85 struct mm_struct *mm = current->mm; 87 struct mm_struct *mm = current->mm;
86 unsigned long offset, pfn, done, size; 88 unsigned long offset, pfn, done, size;
@@ -133,6 +135,49 @@ fault:
133 goto retry; 135 goto retry;
134} 136}
135 137
138/*
139 * Do DAT for user address by page table walk, return kernel address.
140 * This function needs to be called with current->mm->page_table_lock held.
141 */
142static unsigned long __dat_user_addr(unsigned long uaddr)
143{
144 struct mm_struct *mm = current->mm;
145 unsigned long pfn, ret;
146 pgd_t *pgd;
147 pmd_t *pmd;
148 pte_t *pte;
149 int rc;
150
151 ret = 0;
152retry:
153 pgd = pgd_offset(mm, uaddr);
154 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
155 goto fault;
156
157 pmd = pmd_offset(pgd, uaddr);
158 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
159 goto fault;
160
161 pte = pte_offset_map(pmd, uaddr);
162 if (!pte || !pte_present(*pte))
163 goto fault;
164
165 pfn = pte_pfn(*pte);
166 if (!pfn_valid(pfn))
167 goto out;
168
169 ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
170out:
171 return ret;
172fault:
173 spin_unlock(&mm->page_table_lock);
174 rc = __handle_fault(mm, uaddr, 0);
175 spin_lock(&mm->page_table_lock);
176 if (rc)
177 goto out;
178 goto retry;
179}
180
136size_t copy_from_user_pt(size_t n, const void __user *from, void *to) 181size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
137{ 182{
138 size_t rc; 183 size_t rc;
@@ -155,3 +200,277 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
155 } 200 }
156 return __user_copy_pt((unsigned long) to, (void *) from, n, 1); 201 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
157} 202}
203
204static size_t clear_user_pt(size_t n, void __user *to)
205{
206 long done, size, ret;
207
208 if (segment_eq(get_fs(), KERNEL_DS)) {
209 memset((void __kernel __force *) to, 0, n);
210 return 0;
211 }
212 done = 0;
213 do {
214 if (n - done > PAGE_SIZE)
215 size = PAGE_SIZE;
216 else
217 size = n - done;
218 ret = __user_copy_pt((unsigned long) to + done,
219 &empty_zero_page, size, 1);
220 done += size;
221 if (ret)
222 return ret + n - done;
223 } while (done < n);
224 return 0;
225}
226
227static size_t strnlen_user_pt(size_t count, const char __user *src)
228{
229 char *addr;
230 unsigned long uaddr = (unsigned long) src;
231 struct mm_struct *mm = current->mm;
232 unsigned long offset, pfn, done, len;
233 pgd_t *pgd;
234 pmd_t *pmd;
235 pte_t *pte;
236 size_t len_str;
237
238 if (segment_eq(get_fs(), KERNEL_DS))
239 return strnlen((const char __kernel __force *) src, count) + 1;
240 done = 0;
241retry:
242 spin_lock(&mm->page_table_lock);
243 do {
244 pgd = pgd_offset(mm, uaddr);
245 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
246 goto fault;
247
248 pmd = pmd_offset(pgd, uaddr);
249 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
250 goto fault;
251
252 pte = pte_offset_map(pmd, uaddr);
253 if (!pte || !pte_present(*pte))
254 goto fault;
255
256 pfn = pte_pfn(*pte);
257 if (!pfn_valid(pfn)) {
258 done = -1;
259 goto out;
260 }
261
262 offset = uaddr & (PAGE_SIZE-1);
263 addr = (char *)(pfn << PAGE_SHIFT) + offset;
264 len = min(count - done, PAGE_SIZE - offset);
265 len_str = strnlen(addr, len);
266 done += len_str;
267 uaddr += len_str;
268 } while ((len_str == len) && (done < count));
269out:
270 spin_unlock(&mm->page_table_lock);
271 return done + 1;
272fault:
273 spin_unlock(&mm->page_table_lock);
274 if (__handle_fault(mm, uaddr, 0)) {
275 return 0;
276 }
277 goto retry;
278}
279
280static size_t strncpy_from_user_pt(size_t count, const char __user *src,
281 char *dst)
282{
283 size_t n = strnlen_user_pt(count, src);
284
285 if (!n)
286 return -EFAULT;
287 if (n > count)
288 n = count;
289 if (segment_eq(get_fs(), KERNEL_DS)) {
290 memcpy(dst, (const char __kernel __force *) src, n);
291 if (dst[n-1] == '\0')
292 return n-1;
293 else
294 return n;
295 }
296 if (__user_copy_pt((unsigned long) src, dst, n, 0))
297 return -EFAULT;
298 if (dst[n-1] == '\0')
299 return n-1;
300 else
301 return n;
302}
303
304static size_t copy_in_user_pt(size_t n, void __user *to,
305 const void __user *from)
306{
307 struct mm_struct *mm = current->mm;
308 unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
309 uaddr, done, size;
310 unsigned long uaddr_from = (unsigned long) from;
311 unsigned long uaddr_to = (unsigned long) to;
312 pgd_t *pgd_from, *pgd_to;
313 pmd_t *pmd_from, *pmd_to;
314 pte_t *pte_from, *pte_to;
315 int write_user;
316
317 done = 0;
318retry:
319 spin_lock(&mm->page_table_lock);
320 do {
321 pgd_from = pgd_offset(mm, uaddr_from);
322 if (pgd_none(*pgd_from) || unlikely(pgd_bad(*pgd_from))) {
323 uaddr = uaddr_from;
324 write_user = 0;
325 goto fault;
326 }
327 pgd_to = pgd_offset(mm, uaddr_to);
328 if (pgd_none(*pgd_to) || unlikely(pgd_bad(*pgd_to))) {
329 uaddr = uaddr_to;
330 write_user = 1;
331 goto fault;
332 }
333
334 pmd_from = pmd_offset(pgd_from, uaddr_from);
335 if (pmd_none(*pmd_from) || unlikely(pmd_bad(*pmd_from))) {
336 uaddr = uaddr_from;
337 write_user = 0;
338 goto fault;
339 }
340 pmd_to = pmd_offset(pgd_to, uaddr_to);
341 if (pmd_none(*pmd_to) || unlikely(pmd_bad(*pmd_to))) {
342 uaddr = uaddr_to;
343 write_user = 1;
344 goto fault;
345 }
346
347 pte_from = pte_offset_map(pmd_from, uaddr_from);
348 if (!pte_from || !pte_present(*pte_from)) {
349 uaddr = uaddr_from;
350 write_user = 0;
351 goto fault;
352 }
353 pte_to = pte_offset_map(pmd_to, uaddr_to);
354 if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) {
355 uaddr = uaddr_to;
356 write_user = 1;
357 goto fault;
358 }
359
360 pfn_from = pte_pfn(*pte_from);
361 if (!pfn_valid(pfn_from))
362 goto out;
363 pfn_to = pte_pfn(*pte_to);
364 if (!pfn_valid(pfn_to))
365 goto out;
366
367 offset_from = uaddr_from & (PAGE_SIZE-1);
368 offset_to = uaddr_from & (PAGE_SIZE-1);
369 offset_max = max(offset_from, offset_to);
370 size = min(n - done, PAGE_SIZE - offset_max);
371
372 memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to,
373 (void *)(pfn_from << PAGE_SHIFT) + offset_from, size);
374 done += size;
375 uaddr_from += size;
376 uaddr_to += size;
377 } while (done < n);
378out:
379 spin_unlock(&mm->page_table_lock);
380 return n - done;
381fault:
382 spin_unlock(&mm->page_table_lock);
383 if (__handle_fault(mm, uaddr, write_user))
384 return n - done;
385 goto retry;
386}
387
388#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
389 asm volatile("0: l %1,0(%6)\n" \
390 "1: " insn \
391 "2: cs %1,%2,0(%6)\n" \
392 "3: jl 1b\n" \
393 " lhi %0,0\n" \
394 "4:\n" \
395 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
396 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
397 "=m" (*uaddr) \
398 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
399 "m" (*uaddr) : "cc" );
400
401int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
402{
403 int oldval = 0, newval, ret;
404
405 spin_lock(&current->mm->page_table_lock);
406 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
407 if (!uaddr) {
408 spin_unlock(&current->mm->page_table_lock);
409 return -EFAULT;
410 }
411 get_page(virt_to_page(uaddr));
412 spin_unlock(&current->mm->page_table_lock);
413 switch (op) {
414 case FUTEX_OP_SET:
415 __futex_atomic_op("lr %2,%5\n",
416 ret, oldval, newval, uaddr, oparg);
417 break;
418 case FUTEX_OP_ADD:
419 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
420 ret, oldval, newval, uaddr, oparg);
421 break;
422 case FUTEX_OP_OR:
423 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
424 ret, oldval, newval, uaddr, oparg);
425 break;
426 case FUTEX_OP_ANDN:
427 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
428 ret, oldval, newval, uaddr, oparg);
429 break;
430 case FUTEX_OP_XOR:
431 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
432 ret, oldval, newval, uaddr, oparg);
433 break;
434 default:
435 ret = -ENOSYS;
436 }
437 put_page(virt_to_page(uaddr));
438 *old = oldval;
439 return ret;
440}
441
442int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
443{
444 int ret;
445
446 spin_lock(&current->mm->page_table_lock);
447 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
448 if (!uaddr) {
449 spin_unlock(&current->mm->page_table_lock);
450 return -EFAULT;
451 }
452 get_page(virt_to_page(uaddr));
453 spin_unlock(&current->mm->page_table_lock);
454 asm volatile(" cs %1,%4,0(%5)\n"
455 "0: lr %0,%1\n"
456 "1:\n"
457 EX_TABLE(0b,1b)
458 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
459 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
460 : "cc", "memory" );
461 put_page(virt_to_page(uaddr));
462 return ret;
463}
464
465struct uaccess_ops uaccess_pt = {
466 .copy_from_user = copy_from_user_pt,
467 .copy_from_user_small = copy_from_user_pt,
468 .copy_to_user = copy_to_user_pt,
469 .copy_to_user_small = copy_to_user_pt,
470 .copy_in_user = copy_in_user_pt,
471 .clear_user = clear_user_pt,
472 .strnlen_user = strnlen_user_pt,
473 .strncpy_from_user = strncpy_from_user_pt,
474 .futex_atomic_op = futex_atomic_op_pt,
475 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
476};
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
index 56a0214e9928..28c4500a58d0 100644
--- a/arch/s390/lib/uaccess_std.c
+++ b/arch/s390/lib/uaccess_std.c
@@ -13,6 +13,7 @@
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <asm/futex.h> 15#include <asm/futex.h>
16#include "uaccess.h"
16 17
17#ifndef __s390x__ 18#ifndef __s390x__
18#define AHI "ahi" 19#define AHI "ahi"
@@ -28,9 +29,6 @@
28#define SLR "slgr" 29#define SLR "slgr"
29#endif 30#endif
30 31
31extern size_t copy_from_user_pt(size_t n, const void __user *from, void *to);
32extern size_t copy_to_user_pt(size_t n, void __user *to, const void *from);
33
34size_t copy_from_user_std(size_t size, const void __user *ptr, void *x) 32size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
35{ 33{
36 unsigned long tmp1, tmp2; 34 unsigned long tmp1, tmp2;
@@ -72,7 +70,8 @@ size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
72 return size; 70 return size;
73} 71}
74 72
75size_t copy_from_user_std_check(size_t size, const void __user *ptr, void *x) 73static size_t copy_from_user_std_check(size_t size, const void __user *ptr,
74 void *x)
76{ 75{
77 if (size <= 1024) 76 if (size <= 1024)
78 return copy_from_user_std(size, ptr, x); 77 return copy_from_user_std(size, ptr, x);
@@ -110,14 +109,16 @@ size_t copy_to_user_std(size_t size, void __user *ptr, const void *x)
110 return size; 109 return size;
111} 110}
112 111
113size_t copy_to_user_std_check(size_t size, void __user *ptr, const void *x) 112static size_t copy_to_user_std_check(size_t size, void __user *ptr,
113 const void *x)
114{ 114{
115 if (size <= 1024) 115 if (size <= 1024)
116 return copy_to_user_std(size, ptr, x); 116 return copy_to_user_std(size, ptr, x);
117 return copy_to_user_pt(size, ptr, x); 117 return copy_to_user_pt(size, ptr, x);
118} 118}
119 119
120size_t copy_in_user_std(size_t size, void __user *to, const void __user *from) 120static size_t copy_in_user_std(size_t size, void __user *to,
121 const void __user *from)
121{ 122{
122 unsigned long tmp1; 123 unsigned long tmp1;
123 124
@@ -148,7 +149,7 @@ size_t copy_in_user_std(size_t size, void __user *to, const void __user *from)
148 return size; 149 return size;
149} 150}
150 151
151size_t clear_user_std(size_t size, void __user *to) 152static size_t clear_user_std(size_t size, void __user *to)
152{ 153{
153 unsigned long tmp1, tmp2; 154 unsigned long tmp1, tmp2;
154 155
@@ -254,7 +255,7 @@ size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst)
254 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ 255 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
255 "m" (*uaddr) : "cc"); 256 "m" (*uaddr) : "cc");
256 257
257int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old) 258int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old)
258{ 259{
259 int oldval = 0, newval, ret; 260 int oldval = 0, newval, ret;
260 261
@@ -286,7 +287,7 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old)
286 return ret; 287 return ret;
287} 288}
288 289
289int futex_atomic_cmpxchg(int __user *uaddr, int oldval, int newval) 290int futex_atomic_cmpxchg_std(int __user *uaddr, int oldval, int newval)
290{ 291{
291 int ret; 292 int ret;
292 293
@@ -311,6 +312,6 @@ struct uaccess_ops uaccess_std = {
311 .clear_user = clear_user_std, 312 .clear_user = clear_user_std,
312 .strnlen_user = strnlen_user_std, 313 .strnlen_user = strnlen_user_std,
313 .strncpy_from_user = strncpy_from_user_std, 314 .strncpy_from_user = strncpy_from_user_std,
314 .futex_atomic_op = futex_atomic_op, 315 .futex_atomic_op = futex_atomic_op_std,
315 .futex_atomic_cmpxchg = futex_atomic_cmpxchg, 316 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
316}; 317};
diff --git a/arch/s390/math-emu/Makefile b/arch/s390/math-emu/Makefile
index c10df144f2ab..73b3e72efc46 100644
--- a/arch/s390/math-emu/Makefile
+++ b/arch/s390/math-emu/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the FPU instruction emulation. 2# Makefile for the FPU instruction emulation.
3# 3#
4 4
5obj-$(CONFIG_MATHEMU) := math.o qrnnd.o 5obj-$(CONFIG_MATHEMU) := math.o
6 6
7EXTRA_CFLAGS := -I$(src) -Iinclude/math-emu -w 7EXTRA_CFLAGS := -I$(src) -Iinclude/math-emu -w
8EXTRA_AFLAGS := -traditional 8EXTRA_AFLAGS := -traditional
diff --git a/arch/s390/math-emu/math.c b/arch/s390/math-emu/math.c
index 6b9aec5a2c18..3ee78ccb617d 100644
--- a/arch/s390/math-emu/math.c
+++ b/arch/s390/math-emu/math.c
@@ -15,7 +15,7 @@
15#include <asm/uaccess.h> 15#include <asm/uaccess.h>
16#include <asm/lowcore.h> 16#include <asm/lowcore.h>
17 17
18#include "sfp-util.h" 18#include <asm/sfp-util.h>
19#include <math-emu/soft-fp.h> 19#include <math-emu/soft-fp.h>
20#include <math-emu/single.h> 20#include <math-emu/single.h>
21#include <math-emu/double.h> 21#include <math-emu/double.h>
diff --git a/arch/s390/math-emu/qrnnd.S b/arch/s390/math-emu/qrnnd.S
deleted file mode 100644
index b01c2b648e22..000000000000
--- a/arch/s390/math-emu/qrnnd.S
+++ /dev/null
@@ -1,77 +0,0 @@
1# S/390 __udiv_qrnnd
2
3# r2 : &__r
4# r3 : upper half of 64 bit word n
5# r4 : lower half of 64 bit word n
6# r5 : divisor d
7# the reminder r of the division is to be stored to &__r and
8# the quotient q is to be returned
9
10 .text
11 .globl __udiv_qrnnd
12__udiv_qrnnd:
13 st %r2,24(%r15) # store pointer to reminder for later
14 lr %r0,%r3 # reload n
15 lr %r1,%r4
16 ltr %r2,%r5 # reload and test divisor
17 jp 5f
18 # divisor >= 0x80000000
19 srdl %r0,2 # n/4
20 srl %r2,1 # d/2
21 slr %r1,%r2 # special case if last bit of d is set
22 brc 3,0f # (n/4) div (n/2) can overflow by 1
23 ahi %r0,-1 # trick: subtract n/2, then divide
240: dr %r0,%r2 # signed division
25 ahi %r1,1 # trick part 2: add 1 to the quotient
26 # now (n >> 2) = (d >> 1) * %r1 + %r0
27 lhi %r3,1
28 nr %r3,%r1 # test last bit of q
29 jz 1f
30 alr %r0,%r2 # add (d>>1) to r
311: srl %r1,1 # q >>= 1
32 # now (n >> 2) = (d&-2) * %r1 + %r0
33 lhi %r3,1
34 nr %r3,%r5 # test last bit of d
35 jz 2f
36 slr %r0,%r1 # r -= q
37 brc 3,2f # borrow ?
38 alr %r0,%r5 # r += d
39 ahi %r1,-1
402: # now (n >> 2) = d * %r1 + %r0
41 alr %r1,%r1 # q <<= 1
42 alr %r0,%r0 # r <<= 1
43 brc 12,3f # overflow on r ?
44 slr %r0,%r5 # r -= d
45 ahi %r1,1 # q += 1
463: lhi %r3,2
47 nr %r3,%r4 # test next to last bit of n
48 jz 4f
49 ahi %r0,1 # r += 1
504: clr %r0,%r5 # r >= d ?
51 jl 6f
52 slr %r0,%r5 # r -= d
53 ahi %r1,1 # q += 1
54 # now (n >> 1) = d * %r1 + %r0
55 j 6f
565: # divisor < 0x80000000
57 srdl %r0,1
58 dr %r0,%r2 # signed division
59 # now (n >> 1) = d * %r1 + %r0
606: alr %r1,%r1 # q <<= 1
61 alr %r0,%r0 # r <<= 1
62 brc 12,7f # overflow on r ?
63 slr %r0,%r5 # r -= d
64 ahi %r1,1 # q += 1
657: lhi %r3,1
66 nr %r3,%r4 # isolate last bit of n
67 alr %r0,%r3 # r += (n & 1)
68 clr %r0,%r5 # r >= d ?
69 jl 8f
70 slr %r0,%r5 # r -= d
71 ahi %r1,1 # q += 1
728: # now n = d * %r1 + %r0
73 l %r2,24(%r15)
74 st %r0,0(%r2)
75 lr %r2,%r1
76 br %r14
77 .end __udiv_qrnnd
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 607f50ead1fd..f93a056869bc 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -245,7 +245,7 @@ cmm_set_timeout(long nr, long seconds)
245 cmm_set_timer(); 245 cmm_set_timer();
246} 246}
247 247
248static inline int 248static int
249cmm_skip_blanks(char *cp, char **endp) 249cmm_skip_blanks(char *cp, char **endp)
250{ 250{
251 char *str; 251 char *str;
@@ -414,7 +414,7 @@ cmm_smsg_target(char *from, char *msg)
414} 414}
415#endif 415#endif
416 416
417struct ctl_table_header *cmm_sysctl_header; 417static struct ctl_table_header *cmm_sysctl_header;
418 418
419static int 419static int
420cmm_init (void) 420cmm_init (void)
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 775bf19e742b..394980b05e6f 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -15,6 +15,7 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/bootmem.h> 16#include <linux/bootmem.h>
17#include <linux/ctype.h> 17#include <linux/ctype.h>
18#include <linux/ioport.h>
18#include <asm/page.h> 19#include <asm/page.h>
19#include <asm/pgtable.h> 20#include <asm/pgtable.h>
20#include <asm/ebcdic.h> 21#include <asm/ebcdic.h>
@@ -70,6 +71,7 @@ struct qin64 {
70struct dcss_segment { 71struct dcss_segment {
71 struct list_head list; 72 struct list_head list;
72 char dcss_name[8]; 73 char dcss_name[8];
74 char res_name[15];
73 unsigned long start_addr; 75 unsigned long start_addr;
74 unsigned long end; 76 unsigned long end;
75 atomic_t ref_count; 77 atomic_t ref_count;
@@ -77,6 +79,7 @@ struct dcss_segment {
77 unsigned int vm_segtype; 79 unsigned int vm_segtype;
78 struct qrange range[6]; 80 struct qrange range[6];
79 int segcnt; 81 int segcnt;
82 struct resource *res;
80}; 83};
81 84
82static DEFINE_MUTEX(dcss_lock); 85static DEFINE_MUTEX(dcss_lock);
@@ -88,7 +91,7 @@ static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC",
88 * Create the 8 bytes, ebcdic VM segment name from 91 * Create the 8 bytes, ebcdic VM segment name from
89 * an ascii name. 92 * an ascii name.
90 */ 93 */
91static void inline 94static void
92dcss_mkname(char *name, char *dcss_name) 95dcss_mkname(char *name, char *dcss_name)
93{ 96{
94 int i; 97 int i;
@@ -303,6 +306,29 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
303 goto out_free; 306 goto out_free;
304 } 307 }
305 308
309 seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL);
310 if (seg->res == NULL) {
311 rc = -ENOMEM;
312 goto out_shared;
313 }
314 seg->res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
315 seg->res->start = seg->start_addr;
316 seg->res->end = seg->end;
317 memcpy(&seg->res_name, seg->dcss_name, 8);
318 EBCASC(seg->res_name, 8);
319 seg->res_name[8] = '\0';
320 strncat(seg->res_name, " (DCSS)", 7);
321 seg->res->name = seg->res_name;
322 rc = seg->vm_segtype;
323 if (rc == SEG_TYPE_SC ||
324 ((rc == SEG_TYPE_SR || rc == SEG_TYPE_ER) && !do_nonshared))
325 seg->res->flags |= IORESOURCE_READONLY;
326 if (request_resource(&iomem_resource, seg->res)) {
327 rc = -EBUSY;
328 kfree(seg->res);
329 goto out_shared;
330 }
331
306 if (do_nonshared) 332 if (do_nonshared)
307 dcss_command = DCSS_LOADNSR; 333 dcss_command = DCSS_LOADNSR;
308 else 334 else
@@ -316,12 +342,11 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
316 rc = dcss_diag_translate_rc (seg->end); 342 rc = dcss_diag_translate_rc (seg->end);
317 dcss_diag(DCSS_PURGESEG, seg->dcss_name, 343 dcss_diag(DCSS_PURGESEG, seg->dcss_name,
318 &seg->start_addr, &seg->end); 344 &seg->start_addr, &seg->end);
319 goto out_shared; 345 goto out_resource;
320 } 346 }
321 seg->do_nonshared = do_nonshared; 347 seg->do_nonshared = do_nonshared;
322 atomic_set(&seg->ref_count, 1); 348 atomic_set(&seg->ref_count, 1);
323 list_add(&seg->list, &dcss_list); 349 list_add(&seg->list, &dcss_list);
324 rc = seg->vm_segtype;
325 *addr = seg->start_addr; 350 *addr = seg->start_addr;
326 *end = seg->end; 351 *end = seg->end;
327 if (do_nonshared) 352 if (do_nonshared)
@@ -329,12 +354,16 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
329 "type %s in non-shared mode\n", name, 354 "type %s in non-shared mode\n", name,
330 (void*)seg->start_addr, (void*)seg->end, 355 (void*)seg->start_addr, (void*)seg->end,
331 segtype_string[seg->vm_segtype]); 356 segtype_string[seg->vm_segtype]);
332 else 357 else {
333 PRINT_INFO ("segment_load: loaded segment %s range %p .. %p " 358 PRINT_INFO ("segment_load: loaded segment %s range %p .. %p "
334 "type %s in shared mode\n", name, 359 "type %s in shared mode\n", name,
335 (void*)seg->start_addr, (void*)seg->end, 360 (void*)seg->start_addr, (void*)seg->end,
336 segtype_string[seg->vm_segtype]); 361 segtype_string[seg->vm_segtype]);
362 }
337 goto out; 363 goto out;
364 out_resource:
365 release_resource(seg->res);
366 kfree(seg->res);
338 out_shared: 367 out_shared:
339 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); 368 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
340 out_free: 369 out_free:
@@ -401,6 +430,7 @@ segment_load (char *name, int do_nonshared, unsigned long *addr,
401 * -ENOENT : no such segment (segment gone!) 430 * -ENOENT : no such segment (segment gone!)
402 * -EAGAIN : segment is in use by other exploiters, try later 431 * -EAGAIN : segment is in use by other exploiters, try later
403 * -EINVAL : no segment with the given name is currently loaded - name invalid 432 * -EINVAL : no segment with the given name is currently loaded - name invalid
433 * -EBUSY : segment can temporarily not be used (overlaps with dcss)
404 * 0 : operation succeeded 434 * 0 : operation succeeded
405 */ 435 */
406int 436int
@@ -428,12 +458,24 @@ segment_modify_shared (char *name, int do_nonshared)
428 rc = -EAGAIN; 458 rc = -EAGAIN;
429 goto out_unlock; 459 goto out_unlock;
430 } 460 }
431 dcss_diag(DCSS_PURGESEG, seg->dcss_name, 461 release_resource(seg->res);
432 &dummy, &dummy); 462 if (do_nonshared) {
433 if (do_nonshared)
434 dcss_command = DCSS_LOADNSR; 463 dcss_command = DCSS_LOADNSR;
435 else 464 seg->res->flags &= ~IORESOURCE_READONLY;
436 dcss_command = DCSS_LOADNOLY; 465 } else {
466 dcss_command = DCSS_LOADNOLY;
467 if (seg->vm_segtype == SEG_TYPE_SR ||
468 seg->vm_segtype == SEG_TYPE_ER)
469 seg->res->flags |= IORESOURCE_READONLY;
470 }
471 if (request_resource(&iomem_resource, seg->res)) {
472 PRINT_WARN("segment_modify_shared: could not reload segment %s"
473 " - overlapping resources\n", name);
474 rc = -EBUSY;
475 kfree(seg->res);
476 goto out_del;
477 }
478 dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
437 diag_cc = dcss_diag(dcss_command, seg->dcss_name, 479 diag_cc = dcss_diag(dcss_command, seg->dcss_name,
438 &seg->start_addr, &seg->end); 480 &seg->start_addr, &seg->end);
439 if (diag_cc > 1) { 481 if (diag_cc > 1) {
@@ -446,9 +488,9 @@ segment_modify_shared (char *name, int do_nonshared)
446 rc = 0; 488 rc = 0;
447 goto out_unlock; 489 goto out_unlock;
448 out_del: 490 out_del:
491 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
449 list_del(&seg->list); 492 list_del(&seg->list);
450 dcss_diag(DCSS_PURGESEG, seg->dcss_name, 493 dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
451 &dummy, &dummy);
452 kfree(seg); 494 kfree(seg);
453 out_unlock: 495 out_unlock:
454 mutex_unlock(&dcss_lock); 496 mutex_unlock(&dcss_lock);
@@ -478,6 +520,8 @@ segment_unload(char *name)
478 } 520 }
479 if (atomic_dec_return(&seg->ref_count) != 0) 521 if (atomic_dec_return(&seg->ref_count) != 0)
480 goto out_unlock; 522 goto out_unlock;
523 release_resource(seg->res);
524 kfree(seg->res);
481 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); 525 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
482 list_del(&seg->list); 526 list_del(&seg->list);
483 dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy); 527 dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index cd85e34d8703..9ff143e87746 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -52,7 +52,7 @@ extern int sysctl_userprocess_debug;
52extern void die(const char *,struct pt_regs *,long); 52extern void die(const char *,struct pt_regs *,long);
53 53
54#ifdef CONFIG_KPROBES 54#ifdef CONFIG_KPROBES
55ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); 55static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
56int register_page_fault_notifier(struct notifier_block *nb) 56int register_page_fault_notifier(struct notifier_block *nb)
57{ 57{
58 return atomic_notifier_chain_register(&notify_page_fault_chain, nb); 58 return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
@@ -137,7 +137,9 @@ static int __check_access_register(struct pt_regs *regs, int error_code)
137 137
138/* 138/*
139 * Check which address space the address belongs to. 139 * Check which address space the address belongs to.
140 * Returns 1 for user space and 0 for kernel space. 140 * May return 1 or 2 for user space and 0 for kernel space.
141 * Returns 2 for user space in primary addressing mode with
142 * CONFIG_S390_EXEC_PROTECT on and kernel parameter noexec=on.
141 */ 143 */
142static inline int check_user_space(struct pt_regs *regs, int error_code) 144static inline int check_user_space(struct pt_regs *regs, int error_code)
143{ 145{
@@ -154,7 +156,7 @@ static inline int check_user_space(struct pt_regs *regs, int error_code)
154 return __check_access_register(regs, error_code); 156 return __check_access_register(regs, error_code);
155 if (descriptor == 2) 157 if (descriptor == 2)
156 return current->thread.mm_segment.ar4; 158 return current->thread.mm_segment.ar4;
157 return descriptor != 0; 159 return ((descriptor != 0) ^ (switch_amode)) << s390_noexec;
158} 160}
159 161
160/* 162/*
@@ -183,6 +185,77 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
183 force_sig_info(SIGSEGV, &si, current); 185 force_sig_info(SIGSEGV, &si, current);
184} 186}
185 187
188#ifdef CONFIG_S390_EXEC_PROTECT
189extern long sys_sigreturn(struct pt_regs *regs);
190extern long sys_rt_sigreturn(struct pt_regs *regs);
191extern long sys32_sigreturn(struct pt_regs *regs);
192extern long sys32_rt_sigreturn(struct pt_regs *regs);
193
194static inline void do_sigreturn(struct mm_struct *mm, struct pt_regs *regs,
195 int rt)
196{
197 up_read(&mm->mmap_sem);
198 clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
199#ifdef CONFIG_COMPAT
200 if (test_tsk_thread_flag(current, TIF_31BIT)) {
201 if (rt)
202 sys32_rt_sigreturn(regs);
203 else
204 sys32_sigreturn(regs);
205 return;
206 }
207#endif /* CONFIG_COMPAT */
208 if (rt)
209 sys_rt_sigreturn(regs);
210 else
211 sys_sigreturn(regs);
212 return;
213}
214
215static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
216 unsigned long address, unsigned long error_code)
217{
218 pgd_t *pgd;
219 pmd_t *pmd;
220 pte_t *pte;
221 u16 *instruction;
222 unsigned long pfn, uaddr = regs->psw.addr;
223
224 spin_lock(&mm->page_table_lock);
225 pgd = pgd_offset(mm, uaddr);
226 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
227 goto out_fault;
228 pmd = pmd_offset(pgd, uaddr);
229 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
230 goto out_fault;
231 pte = pte_offset_map(pmd_offset(pgd_offset(mm, uaddr), uaddr), uaddr);
232 if (!pte || !pte_present(*pte))
233 goto out_fault;
234 pfn = pte_pfn(*pte);
235 if (!pfn_valid(pfn))
236 goto out_fault;
237 spin_unlock(&mm->page_table_lock);
238
239 instruction = (u16 *) ((pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE-1)));
240 if (*instruction == 0x0a77)
241 do_sigreturn(mm, regs, 0);
242 else if (*instruction == 0x0aad)
243 do_sigreturn(mm, regs, 1);
244 else {
245 printk("- XXX - do_exception: task = %s, primary, NO EXEC "
246 "-> SIGSEGV\n", current->comm);
247 up_read(&mm->mmap_sem);
248 current->thread.prot_addr = address;
249 current->thread.trap_no = error_code;
250 do_sigsegv(regs, error_code, SEGV_MAPERR, address);
251 }
252 return 0;
253out_fault:
254 spin_unlock(&mm->page_table_lock);
255 return -EFAULT;
256}
257#endif /* CONFIG_S390_EXEC_PROTECT */
258
186/* 259/*
187 * This routine handles page faults. It determines the address, 260 * This routine handles page faults. It determines the address,
188 * and the problem, and then passes it off to one of the appropriate 261 * and the problem, and then passes it off to one of the appropriate
@@ -260,6 +333,17 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
260 vma = find_vma(mm, address); 333 vma = find_vma(mm, address);
261 if (!vma) 334 if (!vma)
262 goto bad_area; 335 goto bad_area;
336
337#ifdef CONFIG_S390_EXEC_PROTECT
338 if (unlikely((user_address == 2) && !(vma->vm_flags & VM_EXEC)))
339 if (!signal_return(mm, regs, address, error_code))
340 /*
341 * signal_return() has done an up_read(&mm->mmap_sem)
342 * if it returns 0.
343 */
344 return;
345#endif
346
263 if (vma->vm_start <= address) 347 if (vma->vm_start <= address)
264 goto good_area; 348 goto good_area;
265 if (!(vma->vm_flags & VM_GROWSDOWN)) 349 if (!(vma->vm_flags & VM_GROWSDOWN))
@@ -452,8 +536,7 @@ void pfault_fini(void)
452 : : "a" (&refbk), "m" (refbk) : "cc"); 536 : : "a" (&refbk), "m" (refbk) : "cc");
453} 537}
454 538
455asmlinkage void 539static void pfault_interrupt(__u16 error_code)
456pfault_interrupt(__u16 error_code)
457{ 540{
458 struct task_struct *tsk; 541 struct task_struct *tsk;
459 __u16 subcode; 542 __u16 subcode;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 4bb21be3b007..b3e7c45efb63 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -25,7 +25,7 @@
25#include <linux/bootmem.h> 25#include <linux/bootmem.h>
26#include <linux/pfn.h> 26#include <linux/pfn.h>
27#include <linux/poison.h> 27#include <linux/poison.h>
28 28#include <linux/initrd.h>
29#include <asm/processor.h> 29#include <asm/processor.h>
30#include <asm/system.h> 30#include <asm/system.h>
31#include <asm/uaccess.h> 31#include <asm/uaccess.h>
@@ -95,20 +95,18 @@ static void __init setup_ro_region(void)
95 pte_t new_pte; 95 pte_t new_pte;
96 unsigned long address, end; 96 unsigned long address, end;
97 97
98 address = ((unsigned long)&__start_rodata) & PAGE_MASK; 98 address = ((unsigned long)&_stext) & PAGE_MASK;
99 end = PFN_ALIGN((unsigned long)&__end_rodata); 99 end = PFN_ALIGN((unsigned long)&_eshared);
100 100
101 for (; address < end; address += PAGE_SIZE) { 101 for (; address < end; address += PAGE_SIZE) {
102 pgd = pgd_offset_k(address); 102 pgd = pgd_offset_k(address);
103 pmd = pmd_offset(pgd, address); 103 pmd = pmd_offset(pgd, address);
104 pte = pte_offset_kernel(pmd, address); 104 pte = pte_offset_kernel(pmd, address);
105 new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO)); 105 new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO));
106 set_pte(pte, new_pte); 106 *pte = new_pte;
107 } 107 }
108} 108}
109 109
110extern void vmem_map_init(void);
111
112/* 110/*
113 * paging_init() sets up the page tables 111 * paging_init() sets up the page tables
114 */ 112 */
@@ -125,11 +123,11 @@ void __init paging_init(void)
125#ifdef CONFIG_64BIT 123#ifdef CONFIG_64BIT
126 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE; 124 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE;
127 for (i = 0; i < PTRS_PER_PGD; i++) 125 for (i = 0; i < PTRS_PER_PGD; i++)
128 pgd_clear(pg_dir + i); 126 pgd_clear_kernel(pg_dir + i);
129#else 127#else
130 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 128 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
131 for (i = 0; i < PTRS_PER_PGD; i++) 129 for (i = 0; i < PTRS_PER_PGD; i++)
132 pmd_clear((pmd_t *)(pg_dir + i)); 130 pmd_clear_kernel((pmd_t *)(pg_dir + i));
133#endif 131#endif
134 vmem_map_init(); 132 vmem_map_init();
135 setup_ro_region(); 133 setup_ro_region();
@@ -174,10 +172,8 @@ void __init mem_init(void)
174 datasize >>10, 172 datasize >>10,
175 initsize >> 10); 173 initsize >> 10);
176 printk("Write protected kernel read-only data: %#lx - %#lx\n", 174 printk("Write protected kernel read-only data: %#lx - %#lx\n",
177 (unsigned long)&__start_rodata, 175 (unsigned long)&_stext,
178 PFN_ALIGN((unsigned long)&__end_rodata) - 1); 176 PFN_ALIGN((unsigned long)&_eshared) - 1);
179 printk("Virtual memmap size: %ldk\n",
180 (max_pfn * sizeof(struct page)) >> 10);
181} 177}
182 178
183void free_initmem(void) 179void free_initmem(void)
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index cd3d93e8c211..92a565190028 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -82,7 +82,7 @@ static inline pmd_t *vmem_pmd_alloc(void)
82 if (!pmd) 82 if (!pmd)
83 return NULL; 83 return NULL;
84 for (i = 0; i < PTRS_PER_PMD; i++) 84 for (i = 0; i < PTRS_PER_PMD; i++)
85 pmd_clear(pmd + i); 85 pmd_clear_kernel(pmd + i);
86 return pmd; 86 return pmd;
87} 87}
88 88
@@ -97,7 +97,7 @@ static inline pte_t *vmem_pte_alloc(void)
97 return NULL; 97 return NULL;
98 pte_val(empty_pte) = _PAGE_TYPE_EMPTY; 98 pte_val(empty_pte) = _PAGE_TYPE_EMPTY;
99 for (i = 0; i < PTRS_PER_PTE; i++) 99 for (i = 0; i < PTRS_PER_PTE; i++)
100 set_pte(pte + i, empty_pte); 100 pte[i] = empty_pte;
101 return pte; 101 return pte;
102} 102}
103 103
@@ -119,7 +119,7 @@ static int vmem_add_range(unsigned long start, unsigned long size)
119 pm_dir = vmem_pmd_alloc(); 119 pm_dir = vmem_pmd_alloc();
120 if (!pm_dir) 120 if (!pm_dir)
121 goto out; 121 goto out;
122 pgd_populate(&init_mm, pg_dir, pm_dir); 122 pgd_populate_kernel(&init_mm, pg_dir, pm_dir);
123 } 123 }
124 124
125 pm_dir = pmd_offset(pg_dir, address); 125 pm_dir = pmd_offset(pg_dir, address);
@@ -132,7 +132,7 @@ static int vmem_add_range(unsigned long start, unsigned long size)
132 132
133 pt_dir = pte_offset_kernel(pm_dir, address); 133 pt_dir = pte_offset_kernel(pm_dir, address);
134 pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL); 134 pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL);
135 set_pte(pt_dir, pte); 135 *pt_dir = pte;
136 } 136 }
137 ret = 0; 137 ret = 0;
138out: 138out:
@@ -161,7 +161,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
161 if (pmd_none(*pm_dir)) 161 if (pmd_none(*pm_dir))
162 continue; 162 continue;
163 pt_dir = pte_offset_kernel(pm_dir, address); 163 pt_dir = pte_offset_kernel(pm_dir, address);
164 set_pte(pt_dir, pte); 164 *pt_dir = pte;
165 } 165 }
166 flush_tlb_kernel_range(start, start + size); 166 flush_tlb_kernel_range(start, start + size);
167} 167}
@@ -191,7 +191,7 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
191 pm_dir = vmem_pmd_alloc(); 191 pm_dir = vmem_pmd_alloc();
192 if (!pm_dir) 192 if (!pm_dir)
193 goto out; 193 goto out;
194 pgd_populate(&init_mm, pg_dir, pm_dir); 194 pgd_populate_kernel(&init_mm, pg_dir, pm_dir);
195 } 195 }
196 196
197 pm_dir = pmd_offset(pg_dir, address); 197 pm_dir = pmd_offset(pg_dir, address);
@@ -210,7 +210,7 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
210 if (!new_page) 210 if (!new_page)
211 goto out; 211 goto out;
212 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); 212 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
213 set_pte(pt_dir, pte); 213 *pt_dir = pte;
214 } 214 }
215 } 215 }
216 ret = 0; 216 ret = 0;
diff --git a/arch/x86_64/kernel/early-quirks.c b/arch/x86_64/kernel/early-quirks.c
index 49802f1bee94..bd30d138113f 100644
--- a/arch/x86_64/kernel/early-quirks.c
+++ b/arch/x86_64/kernel/early-quirks.c
@@ -32,7 +32,7 @@ static void via_bugs(void)
32 32
33static int nvidia_hpet_detected __initdata; 33static int nvidia_hpet_detected __initdata;
34 34
35static int __init nvidia_hpet_check(unsigned long phys, unsigned long size) 35static int __init nvidia_hpet_check(struct acpi_table_header *header)
36{ 36{
37 nvidia_hpet_detected = 1; 37 nvidia_hpet_detected = 1;
38 return 0; 38 return 0;
@@ -53,7 +53,7 @@ static void nvidia_bugs(void)
53 return; 53 return;
54 54
55 nvidia_hpet_detected = 0; 55 nvidia_hpet_detected = 0;
56 acpi_table_parse(ACPI_HPET, nvidia_hpet_check); 56 acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check);
57 if (nvidia_hpet_detected == 0) { 57 if (nvidia_hpet_detected == 0) {
58 acpi_skip_timer_override = 1; 58 acpi_skip_timer_override = 1;
59 printk(KERN_INFO "Nvidia board " 59 printk(KERN_INFO "Nvidia board "
diff --git a/arch/x86_64/kernel/genapic.c b/arch/x86_64/kernel/genapic.c
index b007433f96bb..0b3603adf56d 100644
--- a/arch/x86_64/kernel/genapic.c
+++ b/arch/x86_64/kernel/genapic.c
@@ -58,8 +58,8 @@ void __init clustered_apic_check(void)
58 * Some x86_64 machines use physical APIC mode regardless of how many 58 * Some x86_64 machines use physical APIC mode regardless of how many
59 * procs/clusters are present (x86_64 ES7000 is an example). 59 * procs/clusters are present (x86_64 ES7000 is an example).
60 */ 60 */
61 if (acpi_fadt.revision > FADT2_REVISION_ID) 61 if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID)
62 if (acpi_fadt.force_apic_physical_destination_mode) { 62 if (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) {
63 genapic = &apic_cluster; 63 genapic = &apic_cluster;
64 goto print; 64 goto print;
65 } 65 }
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index 08072568847d..50dd8bef850e 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -798,7 +798,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
798 return gsi; 798 return gsi;
799 799
800 /* Don't set up the ACPI SCI because it's already set up */ 800 /* Don't set up the ACPI SCI because it's already set up */
801 if (acpi_fadt.sci_int == gsi) 801 if (acpi_gbl_FADT.sci_interrupt == gsi)
802 return gsi; 802 return gsi;
803 803
804 ioapic = mp_find_ioapic(gsi); 804 ioapic = mp_find_ioapic(gsi);
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
index 697f0aa794b9..eb18be5a6569 100644
--- a/arch/x86_64/kernel/pci-swiotlb.c
+++ b/arch/x86_64/kernel/pci-swiotlb.c
@@ -29,7 +29,7 @@ struct dma_mapping_ops swiotlb_dma_ops = {
29 .dma_supported = NULL, 29 .dma_supported = NULL,
30}; 30};
31 31
32void pci_swiotlb_init(void) 32void __init pci_swiotlb_init(void)
33{ 33{
34 /* don't initialize swiotlb if iommu=off (no_iommu=1) */ 34 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
35 if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN) 35 if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN)
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 5cc76d0d331f..335cc91c49b7 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -498,7 +498,7 @@ static unsigned long get_cmos_time(void)
498{ 498{
499 unsigned int year, mon, day, hour, min, sec; 499 unsigned int year, mon, day, hour, min, sec;
500 unsigned long flags; 500 unsigned long flags;
501 unsigned extyear = 0; 501 unsigned century = 0;
502 502
503 spin_lock_irqsave(&rtc_lock, flags); 503 spin_lock_irqsave(&rtc_lock, flags);
504 504
@@ -510,9 +510,9 @@ static unsigned long get_cmos_time(void)
510 mon = CMOS_READ(RTC_MONTH); 510 mon = CMOS_READ(RTC_MONTH);
511 year = CMOS_READ(RTC_YEAR); 511 year = CMOS_READ(RTC_YEAR);
512#ifdef CONFIG_ACPI 512#ifdef CONFIG_ACPI
513 if (acpi_fadt.revision >= FADT2_REVISION_ID && 513 if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
514 acpi_fadt.century) 514 acpi_gbl_FADT.century)
515 extyear = CMOS_READ(acpi_fadt.century); 515 century = CMOS_READ(acpi_gbl_FADT.century);
516#endif 516#endif
517 } while (sec != CMOS_READ(RTC_SECONDS)); 517 } while (sec != CMOS_READ(RTC_SECONDS));
518 518
@@ -530,10 +530,10 @@ static unsigned long get_cmos_time(void)
530 BCD_TO_BIN(mon); 530 BCD_TO_BIN(mon);
531 BCD_TO_BIN(year); 531 BCD_TO_BIN(year);
532 532
533 if (extyear) { 533 if (century) {
534 BCD_TO_BIN(extyear); 534 BCD_TO_BIN(century);
535 year += extyear; 535 year += century * 100;
536 printk(KERN_INFO "Extended CMOS year: %d\n", extyear); 536 printk(KERN_INFO "Extended CMOS year: %d\n", century * 100);
537 } else { 537 } else {
538 /* 538 /*
539 * x86-64 systems only exists since 2002. 539 * x86-64 systems only exists since 2002.
@@ -954,7 +954,7 @@ __cpuinit int unsynchronized_tsc(void)
954 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 954 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
955#ifdef CONFIG_ACPI 955#ifdef CONFIG_ACPI
956 /* But TSC doesn't tick in C3 so don't use it there */ 956 /* But TSC doesn't tick in C3 so don't use it there */
957 if (acpi_fadt.length > 0 && acpi_fadt.plvl3_lat < 1000) 957 if (acpi_gbl_FADT.header.length > 0 && acpi_gbl_FADT.C3latency < 1000)
958 return 1; 958 return 1;
959#endif 959#endif
960 return 0; 960 return 0;
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 1087e150a218..2efe215fc76a 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -101,7 +101,7 @@ static __init inline int srat_disabled(void)
101static __init int slit_valid(struct acpi_table_slit *slit) 101static __init int slit_valid(struct acpi_table_slit *slit)
102{ 102{
103 int i, j; 103 int i, j;
104 int d = slit->localities; 104 int d = slit->locality_count;
105 for (i = 0; i < d; i++) { 105 for (i = 0; i < d; i++) {
106 for (j = 0; j < d; j++) { 106 for (j = 0; j < d; j++) {
107 u8 val = slit->entry[d*i + j]; 107 u8 val = slit->entry[d*i + j];
@@ -127,18 +127,18 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
127 127
128/* Callback for Proximity Domain -> LAPIC mapping */ 128/* Callback for Proximity Domain -> LAPIC mapping */
129void __init 129void __init
130acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa) 130acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
131{ 131{
132 int pxm, node; 132 int pxm, node;
133 if (srat_disabled()) 133 if (srat_disabled())
134 return; 134 return;
135 if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { 135 if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
136 bad_srat(); 136 bad_srat();
137 return; 137 return;
138 } 138 }
139 if (pa->flags.enabled == 0) 139 if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
140 return; 140 return;
141 pxm = pa->proximity_domain; 141 pxm = pa->proximity_domain_lo;
142 node = setup_node(pxm); 142 node = setup_node(pxm);
143 if (node < 0) { 143 if (node < 0) {
144 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm); 144 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
@@ -254,21 +254,21 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end)
254 /* Looks good */ 254 /* Looks good */
255 255
256 if (nd->start == nd->end) { 256 if (nd->start == nd->end) {
257 nd->start = start; 257 nd->start = start;
258 nd->end = end; 258 nd->end = end;
259 changed = 1; 259 changed = 1;
260 } else { 260 } else {
261 if (nd->start == end) { 261 if (nd->start == end) {
262 nd->start = start; 262 nd->start = start;
263 changed = 1; 263 changed = 1;
264 } 264 }
265 if (nd->end == start) { 265 if (nd->end == start) {
266 nd->end = end; 266 nd->end = end;
267 changed = 1; 267 changed = 1;
268 } 268 }
269 if (!changed) 269 if (!changed)
270 printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n"); 270 printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n");
271 } 271 }
272 272
273 ret = update_end_of_memory(nd->end); 273 ret = update_end_of_memory(nd->end);
274 274
@@ -279,7 +279,7 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end)
279 279
280/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ 280/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
281void __init 281void __init
282acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) 282acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
283{ 283{
284 struct bootnode *nd, oldnode; 284 struct bootnode *nd, oldnode;
285 unsigned long start, end; 285 unsigned long start, end;
@@ -288,16 +288,17 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
288 288
289 if (srat_disabled()) 289 if (srat_disabled())
290 return; 290 return;
291 if (ma->header.length != sizeof(struct acpi_table_memory_affinity)) { 291 if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
292 bad_srat(); 292 bad_srat();
293 return; 293 return;
294 } 294 }
295 if (ma->flags.enabled == 0) 295 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
296 return; 296 return;
297 if (ma->flags.hot_pluggable && !save_add_info()) 297
298 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
298 return; 299 return;
299 start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32); 300 start = ma->base_address;
300 end = start + (ma->length_lo | ((u64)ma->length_hi << 32)); 301 end = start + ma->length;
301 pxm = ma->proximity_domain; 302 pxm = ma->proximity_domain;
302 node = setup_node(pxm); 303 node = setup_node(pxm);
303 if (node < 0) { 304 if (node < 0) {
@@ -337,7 +338,8 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
337 push_node_boundaries(node, nd->start >> PAGE_SHIFT, 338 push_node_boundaries(node, nd->start >> PAGE_SHIFT,
338 nd->end >> PAGE_SHIFT); 339 nd->end >> PAGE_SHIFT);
339 340
340 if (ma->flags.hot_pluggable && (reserve_hotadd(node, start, end) < 0)) { 341 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) &&
342 (reserve_hotadd(node, start, end) < 0)) {
341 /* Ignore hotadd region. Undo damage */ 343 /* Ignore hotadd region. Undo damage */
342 printk(KERN_NOTICE "SRAT: Hotplug region ignored\n"); 344 printk(KERN_NOTICE "SRAT: Hotplug region ignored\n");
343 *nd = oldnode; 345 *nd = oldnode;
@@ -394,7 +396,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
394 396
395 /* First clean up the node list */ 397 /* First clean up the node list */
396 for (i = 0; i < MAX_NUMNODES; i++) { 398 for (i = 0; i < MAX_NUMNODES; i++) {
397 cutoff_node(i, start, end); 399 cutoff_node(i, start, end);
398 if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) { 400 if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) {
399 unparse_node(i); 401 unparse_node(i);
400 node_set_offline(i); 402 node_set_offline(i);
@@ -426,7 +428,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
426 if (!node_online(i)) 428 if (!node_online(i))
427 setup_node_bootmem(i, nodes[i].start, nodes[i].end); 429 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
428 430
429 for (i = 0; i < NR_CPUS; i++) { 431 for (i = 0; i < NR_CPUS; i++) {
430 if (cpu_to_node[i] == NUMA_NO_NODE) 432 if (cpu_to_node[i] == NUMA_NO_NODE)
431 continue; 433 continue;
432 if (!node_isset(cpu_to_node[i], nodes_parsed)) 434 if (!node_isset(cpu_to_node[i], nodes_parsed))
@@ -461,7 +463,7 @@ int __node_distance(int a, int b)
461 463
462 if (!acpi_slit) 464 if (!acpi_slit)
463 return a == b ? 10 : 20; 465 return a == b ? 10 : 20;
464 index = acpi_slit->localities * node_to_pxm(a); 466 index = acpi_slit->locality_count * node_to_pxm(a);
465 return acpi_slit->entry[index + node_to_pxm(b)]; 467 return acpi_slit->entry[index + node_to_pxm(b)];
466} 468}
467 469
diff --git a/arch/x86_64/pci/mmconfig.c b/arch/x86_64/pci/mmconfig.c
index f8b6b2800a62..faabb6e87f12 100644
--- a/arch/x86_64/pci/mmconfig.c
+++ b/arch/x86_64/pci/mmconfig.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * mmconfig.c - Low-level direct PCI config space access via MMCONFIG 2 * mmconfig.c - Low-level direct PCI config space access via MMCONFIG
3 * 3 *
4 * This is an 64bit optimized version that always keeps the full mmconfig 4 * This is an 64bit optimized version that always keeps the full mmconfig
5 * space mapped. This allows lockless config space operation. 5 * space mapped. This allows lockless config space operation.
6 */ 6 */
@@ -25,7 +25,7 @@ static DECLARE_BITMAP(fallback_slots, 32*MAX_CHECK_BUS);
25 25
26/* Static virtual mapping of the MMCONFIG aperture */ 26/* Static virtual mapping of the MMCONFIG aperture */
27struct mmcfg_virt { 27struct mmcfg_virt {
28 struct acpi_table_mcfg_config *cfg; 28 struct acpi_mcfg_allocation *cfg;
29 char __iomem *virt; 29 char __iomem *virt;
30}; 30};
31static struct mmcfg_virt *pci_mmcfg_virt; 31static struct mmcfg_virt *pci_mmcfg_virt;
@@ -33,14 +33,14 @@ static struct mmcfg_virt *pci_mmcfg_virt;
33static char __iomem *get_virt(unsigned int seg, unsigned bus) 33static char __iomem *get_virt(unsigned int seg, unsigned bus)
34{ 34{
35 int cfg_num = -1; 35 int cfg_num = -1;
36 struct acpi_table_mcfg_config *cfg; 36 struct acpi_mcfg_allocation *cfg;
37 37
38 while (1) { 38 while (1) {
39 ++cfg_num; 39 ++cfg_num;
40 if (cfg_num >= pci_mmcfg_config_num) 40 if (cfg_num >= pci_mmcfg_config_num)
41 break; 41 break;
42 cfg = pci_mmcfg_virt[cfg_num].cfg; 42 cfg = pci_mmcfg_virt[cfg_num].cfg;
43 if (cfg->pci_segment_group_number != seg) 43 if (cfg->pci_segment != seg)
44 continue; 44 continue;
45 if ((cfg->start_bus_number <= bus) && 45 if ((cfg->start_bus_number <= bus) &&
46 (cfg->end_bus_number >= bus)) 46 (cfg->end_bus_number >= bus))
@@ -52,7 +52,7 @@ static char __iomem *get_virt(unsigned int seg, unsigned bus)
52 this applies to all busses. */ 52 this applies to all busses. */
53 cfg = &pci_mmcfg_config[0]; 53 cfg = &pci_mmcfg_config[0];
54 if (pci_mmcfg_config_num == 1 && 54 if (pci_mmcfg_config_num == 1 &&
55 cfg->pci_segment_group_number == 0 && 55 cfg->pci_segment == 0 &&
56 (cfg->start_bus_number | cfg->end_bus_number) == 0) 56 (cfg->start_bus_number | cfg->end_bus_number) == 0)
57 return pci_mmcfg_virt[0].virt; 57 return pci_mmcfg_virt[0].virt;
58 58
@@ -170,19 +170,19 @@ void __init pci_mmcfg_init(int type)
170 if ((pci_probe & PCI_PROBE_MMCONF) == 0) 170 if ((pci_probe & PCI_PROBE_MMCONF) == 0)
171 return; 171 return;
172 172
173 acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg); 173 acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg);
174 if ((pci_mmcfg_config_num == 0) || 174 if ((pci_mmcfg_config_num == 0) ||
175 (pci_mmcfg_config == NULL) || 175 (pci_mmcfg_config == NULL) ||
176 (pci_mmcfg_config[0].base_address == 0)) 176 (pci_mmcfg_config[0].address == 0))
177 return; 177 return;
178 178
179 /* Only do this check when type 1 works. If it doesn't work 179 /* Only do this check when type 1 works. If it doesn't work
180 assume we run on a Mac and always use MCFG */ 180 assume we run on a Mac and always use MCFG */
181 if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].base_address, 181 if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].address,
182 pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN, 182 pci_mmcfg_config[0].address + MMCONFIG_APER_MIN,
183 E820_RESERVED)) { 183 E820_RESERVED)) {
184 printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n", 184 printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %lx is not E820-reserved\n",
185 pci_mmcfg_config[0].base_address); 185 (unsigned long)pci_mmcfg_config[0].address);
186 printk(KERN_ERR "PCI: Not using MMCONFIG.\n"); 186 printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
187 return; 187 return;
188 } 188 }
@@ -194,15 +194,16 @@ void __init pci_mmcfg_init(int type)
194 } 194 }
195 for (i = 0; i < pci_mmcfg_config_num; ++i) { 195 for (i = 0; i < pci_mmcfg_config_num; ++i) {
196 pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i]; 196 pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i];
197 pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].base_address, 197 pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].address,
198 MMCONFIG_APER_MAX); 198 MMCONFIG_APER_MAX);
199 if (!pci_mmcfg_virt[i].virt) { 199 if (!pci_mmcfg_virt[i].virt) {
200 printk(KERN_ERR "PCI: Cannot map mmconfig aperture for " 200 printk(KERN_ERR "PCI: Cannot map mmconfig aperture for "
201 "segment %d\n", 201 "segment %d\n",
202 pci_mmcfg_config[i].pci_segment_group_number); 202 pci_mmcfg_config[i].pci_segment);
203 return; 203 return;
204 } 204 }
205 printk(KERN_INFO "PCI: Using MMCONFIG at %x\n", pci_mmcfg_config[i].base_address); 205 printk(KERN_INFO "PCI: Using MMCONFIG at %lx\n",
206 (unsigned long)pci_mmcfg_config[i].address);
206 } 207 }
207 208
208 unreachable_devices(); 209 unreachable_devices();
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 92ba249f3a5b..918b4d845f93 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -74,14 +74,6 @@ config CRYPTO_SHA1
74 help 74 help
75 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). 75 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
76 76
77config CRYPTO_SHA1_S390
78 tristate "SHA1 digest algorithm (s390)"
79 depends on S390
80 select CRYPTO_ALGAPI
81 help
82 This is the s390 hardware accelerated implementation of the
83 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
84
85config CRYPTO_SHA256 77config CRYPTO_SHA256
86 tristate "SHA256 digest algorithm" 78 tristate "SHA256 digest algorithm"
87 select CRYPTO_ALGAPI 79 select CRYPTO_ALGAPI
@@ -91,17 +83,6 @@ config CRYPTO_SHA256
91 This version of SHA implements a 256 bit hash with 128 bits of 83 This version of SHA implements a 256 bit hash with 128 bits of
92 security against collision attacks. 84 security against collision attacks.
93 85
94config CRYPTO_SHA256_S390
95 tristate "SHA256 digest algorithm (s390)"
96 depends on S390
97 select CRYPTO_ALGAPI
98 help
99 This is the s390 hardware accelerated implementation of the
100 SHA256 secure hash standard (DFIPS 180-2).
101
102 This version of SHA implements a 256 bit hash with 128 bits of
103 security against collision attacks.
104
105config CRYPTO_SHA512 86config CRYPTO_SHA512
106 tristate "SHA384 and SHA512 digest algorithms" 87 tristate "SHA384 and SHA512 digest algorithms"
107 select CRYPTO_ALGAPI 88 select CRYPTO_ALGAPI
@@ -187,14 +168,6 @@ config CRYPTO_DES
187 help 168 help
188 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). 169 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
189 170
190config CRYPTO_DES_S390
191 tristate "DES and Triple DES cipher algorithms (s390)"
192 depends on S390
193 select CRYPTO_ALGAPI
194 select CRYPTO_BLKCIPHER
195 help
196 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
197
198config CRYPTO_BLOWFISH 171config CRYPTO_BLOWFISH
199 tristate "Blowfish cipher algorithm" 172 tristate "Blowfish cipher algorithm"
200 select CRYPTO_ALGAPI 173 select CRYPTO_ALGAPI
@@ -336,28 +309,6 @@ config CRYPTO_AES_X86_64
336 309
337 See <http://csrc.nist.gov/encryption/aes/> for more information. 310 See <http://csrc.nist.gov/encryption/aes/> for more information.
338 311
339config CRYPTO_AES_S390
340 tristate "AES cipher algorithms (s390)"
341 depends on S390
342 select CRYPTO_ALGAPI
343 select CRYPTO_BLKCIPHER
344 help
345 This is the s390 hardware accelerated implementation of the
346 AES cipher algorithms (FIPS-197). AES uses the Rijndael
347 algorithm.
348
349 Rijndael appears to be consistently a very good performer in
350 both hardware and software across a wide range of computing
351 environments regardless of its use in feedback or non-feedback
352 modes. Its key setup time is excellent, and its key agility is
353 good. Rijndael's very low memory requirements make it very well
354 suited for restricted-space environments, in which it also
355 demonstrates excellent performance. Rijndael's operations are
356 among the easiest to defend against power and timing attacks.
357
358 On s390 the System z9-109 currently only supports the key size
359 of 128 bit.
360
361config CRYPTO_CAST5 312config CRYPTO_CAST5
362 tristate "CAST5 (CAST-128) cipher algorithm" 313 tristate "CAST5 (CAST-128) cipher algorithm"
363 select CRYPTO_ALGAPI 314 select CRYPTO_ALGAPI
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index f4f000abc4e9..20eacc2c9e0e 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "ACPI (Advanced Configuration and Power Interface) Support" 5menu "ACPI (Advanced Configuration and Power Interface) Support"
6 depends on !X86_NUMAQ
6 depends on !X86_VISWS 7 depends on !X86_VISWS
7 depends on !IA64_HP_SIM 8 depends on !IA64_HP_SIM
8 depends on IA64 || X86 9 depends on IA64 || X86
@@ -77,6 +78,20 @@ config ACPI_SLEEP_PROC_SLEEP
77 Create /proc/acpi/sleep 78 Create /proc/acpi/sleep
78 Deprecated by /sys/power/state 79 Deprecated by /sys/power/state
79 80
81config ACPI_PROCFS
82 bool "Procfs interface (deprecated)"
83 depends on ACPI
84 default y
85 ---help---
86 Procfs interface for ACPI is made optional for back-compatible.
87 As the same functions are duplicated in sysfs interface
88 and this proc interface will be removed some time later,
89 it's marked as deprecated.
90 ( /proc/acpi/debug_layer && debug_level are deprecated by
91 /sys/module/acpi/parameters/debug_layer && debug_level.
92 /proc/acpi/info is deprecated by
93 /sys/module/acpi/parameters/acpica_version )
94
80config ACPI_AC 95config ACPI_AC
81 tristate "AC Adapter" 96 tristate "AC Adapter"
82 depends on X86 97 depends on X86
@@ -107,7 +122,7 @@ config ACPI_BUTTON
107 122
108config ACPI_VIDEO 123config ACPI_VIDEO
109 tristate "Video" 124 tristate "Video"
110 depends on X86 125 depends on X86 && BACKLIGHT_CLASS_DEVICE
111 help 126 help
112 This driver implement the ACPI Extensions For Display Adapters 127 This driver implement the ACPI Extensions For Display Adapters
113 for integrated graphics devices on motherboard, as specified in 128 for integrated graphics devices on motherboard, as specified in
@@ -139,6 +154,13 @@ config ACPI_DOCK
139 help 154 help
140 This driver adds support for ACPI controlled docking stations 155 This driver adds support for ACPI controlled docking stations
141 156
157config ACPI_BAY
158 tristate "Removable Drive Bay (EXPERIMENTAL)"
159 depends on EXPERIMENTAL
160 help
161 This driver adds support for ACPI controlled removable drive
162 bays such as the IBM ultrabay or the Dell Module Bay.
163
142config ACPI_PROCESSOR 164config ACPI_PROCESSOR
143 tristate "Processor" 165 tristate "Processor"
144 default y 166 default y
@@ -186,19 +208,22 @@ config ACPI_ASUS
186 208
187 Note: display switching code is currently considered EXPERIMENTAL, 209 Note: display switching code is currently considered EXPERIMENTAL,
188 toying with these values may even lock your machine. 210 toying with these values may even lock your machine.
189 211
190 All settings are changed via /proc/acpi/asus directory entries. Owner 212 All settings are changed via /proc/acpi/asus directory entries. Owner
191 and group for these entries can be set with asus_uid and asus_gid 213 and group for these entries can be set with asus_uid and asus_gid
192 parameters. 214 parameters.
193 215
194 More information and a userspace daemon for handling the extra buttons 216 More information and a userspace daemon for handling the extra buttons
195 at <http://sourceforge.net/projects/acpi4asus/>. 217 at <http://sourceforge.net/projects/acpi4asus/>.
196 218
197 If you have an ACPI-compatible ASUS laptop, say Y or M here. This 219 If you have an ACPI-compatible ASUS laptop, say Y or M here. This
198 driver is still under development, so if your laptop is unsupported or 220 driver is still under development, so if your laptop is unsupported or
199 something works not quite as expected, please use the mailing list 221 something works not quite as expected, please use the mailing list
200 available on the above page (acpi4asus-user@lists.sourceforge.net) 222 available on the above page (acpi4asus-user@lists.sourceforge.net).
201 223
224 NOTE: This driver is deprecated and will probably be removed soon,
225 use asus-laptop instead.
226
202config ACPI_IBM 227config ACPI_IBM
203 tristate "IBM ThinkPad Laptop Extras" 228 tristate "IBM ThinkPad Laptop Extras"
204 depends on X86 229 depends on X86
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index bce7ca27b429..856c32bccacb 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -37,13 +37,15 @@ endif
37 37
38obj-y += sleep/ 38obj-y += sleep/
39obj-y += bus.o glue.o 39obj-y += bus.o glue.o
40obj-y += scan.o
40obj-$(CONFIG_ACPI_AC) += ac.o 41obj-$(CONFIG_ACPI_AC) += ac.o
41obj-$(CONFIG_ACPI_BATTERY) += battery.o 42obj-$(CONFIG_ACPI_BATTERY) += battery.o
42obj-$(CONFIG_ACPI_BUTTON) += button.o 43obj-$(CONFIG_ACPI_BUTTON) += button.o
43obj-$(CONFIG_ACPI_EC) += ec.o 44obj-$(CONFIG_ACPI_EC) += ec.o
44obj-$(CONFIG_ACPI_FAN) += fan.o 45obj-$(CONFIG_ACPI_FAN) += fan.o
45obj-$(CONFIG_ACPI_DOCK) += dock.o 46obj-$(CONFIG_ACPI_DOCK) += dock.o
46obj-$(CONFIG_ACPI_VIDEO) += video.o 47obj-$(CONFIG_ACPI_BAY) += bay.o
48obj-$(CONFIG_ACPI_VIDEO) += video.o
47obj-$(CONFIG_ACPI_HOTKEY) += hotkey.o 49obj-$(CONFIG_ACPI_HOTKEY) += hotkey.o
48obj-y += pci_root.o pci_link.o pci_irq.o pci_bind.o 50obj-y += pci_root.o pci_link.o pci_irq.o pci_bind.o
49obj-$(CONFIG_ACPI_POWER) += power.o 51obj-$(CONFIG_ACPI_POWER) += power.o
@@ -56,7 +58,6 @@ obj-$(CONFIG_ACPI_NUMA) += numa.o
56obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o 58obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o
57obj-$(CONFIG_ACPI_IBM) += ibm_acpi.o 59obj-$(CONFIG_ACPI_IBM) += ibm_acpi.o
58obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o 60obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
59obj-y += scan.o motherboard.o
60obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o 61obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o
61obj-y += cm_sbs.o 62obj-y += cm_sbs.o
62obj-$(CONFIG_ACPI_SBS) += i2c_ec.o sbs.o 63obj-$(CONFIG_ACPI_SBS) += i2c_ec.o sbs.o
diff --git a/drivers/acpi/asus_acpi.c b/drivers/acpi/asus_acpi.c
index 396140bbbe57..31ad70a6e22e 100644
--- a/drivers/acpi/asus_acpi.c
+++ b/drivers/acpi/asus_acpi.c
@@ -26,7 +26,7 @@
26 * Pontus Fuchs - Helper functions, cleanup 26 * Pontus Fuchs - Helper functions, cleanup
27 * Johann Wiesner - Small compile fixes 27 * Johann Wiesner - Small compile fixes
28 * John Belmonte - ACPI code for Toshiba laptop was a good starting point. 28 * John Belmonte - ACPI code for Toshiba laptop was a good starting point.
29 * Éric Burghard - LED display support for W1N 29 * �ic Burghard - LED display support for W1N
30 * 30 *
31 */ 31 */
32 32
@@ -1128,7 +1128,6 @@ static int asus_model_match(char *model)
1128static int asus_hotk_get_info(void) 1128static int asus_hotk_get_info(void)
1129{ 1129{
1130 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 1130 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1131 struct acpi_buffer dsdt = { ACPI_ALLOCATE_BUFFER, NULL };
1132 union acpi_object *model = NULL; 1131 union acpi_object *model = NULL;
1133 int bsts_result; 1132 int bsts_result;
1134 char *string = NULL; 1133 char *string = NULL;
@@ -1142,11 +1141,9 @@ static int asus_hotk_get_info(void)
1142 * HID), this bit will be moved. A global variable asus_info contains 1141 * HID), this bit will be moved. A global variable asus_info contains
1143 * the DSDT header. 1142 * the DSDT header.
1144 */ 1143 */
1145 status = acpi_get_table(ACPI_TABLE_ID_DSDT, 1, &dsdt); 1144 status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info);
1146 if (ACPI_FAILURE(status)) 1145 if (ACPI_FAILURE(status))
1147 printk(KERN_WARNING " Couldn't get the DSDT table header\n"); 1146 printk(KERN_WARNING " Couldn't get the DSDT table header\n");
1148 else
1149 asus_info = dsdt.pointer;
1150 1147
1151 /* We have to write 0 on init this far for all ASUS models */ 1148 /* We have to write 0 on init this far for all ASUS models */
1152 if (!write_acpi_int(hotk->handle, "INIT", 0, &buffer)) { 1149 if (!write_acpi_int(hotk->handle, "INIT", 0, &buffer)) {
@@ -1358,8 +1355,6 @@ static void __exit asus_acpi_exit(void)
1358 acpi_bus_unregister_driver(&asus_hotk_driver); 1355 acpi_bus_unregister_driver(&asus_hotk_driver);
1359 remove_proc_entry(PROC_ASUS, acpi_root_dir); 1356 remove_proc_entry(PROC_ASUS, acpi_root_dir);
1360 1357
1361 kfree(asus_info);
1362
1363 return; 1358 return;
1364} 1359}
1365 1360
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 5f43e0d14899..2f4521a48fe7 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -64,7 +64,7 @@ extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
64 64
65static int acpi_battery_add(struct acpi_device *device); 65static int acpi_battery_add(struct acpi_device *device);
66static int acpi_battery_remove(struct acpi_device *device, int type); 66static int acpi_battery_remove(struct acpi_device *device, int type);
67static int acpi_battery_resume(struct acpi_device *device, int status); 67static int acpi_battery_resume(struct acpi_device *device);
68 68
69static struct acpi_driver acpi_battery_driver = { 69static struct acpi_driver acpi_battery_driver = {
70 .name = ACPI_BATTERY_DRIVER_NAME, 70 .name = ACPI_BATTERY_DRIVER_NAME,
@@ -753,7 +753,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type)
753} 753}
754 754
755/* this is needed to learn about changes made in suspended state */ 755/* this is needed to learn about changes made in suspended state */
756static int acpi_battery_resume(struct acpi_device *device, int state) 756static int acpi_battery_resume(struct acpi_device *device)
757{ 757{
758 struct acpi_battery *battery; 758 struct acpi_battery *battery;
759 759
diff --git a/drivers/acpi/bay.c b/drivers/acpi/bay.c
new file mode 100644
index 000000000000..667fa1dfa1a3
--- /dev/null
+++ b/drivers/acpi/bay.c
@@ -0,0 +1,490 @@
1/*
2 * bay.c - ACPI removable drive bay driver
3 *
4 * Copyright (C) 2006 Kristen Carlson Accardi <kristen.c.accardi@intel.com>
5 *
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at
11 * your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 */
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/notifier.h>
29#include <acpi/acpi_bus.h>
30#include <acpi/acpi_drivers.h>
31#include <linux/seq_file.h>
32#include <asm/uaccess.h>
33#include <linux/platform_device.h>
34
35#define ACPI_BAY_DRIVER_NAME "ACPI Removable Drive Bay Driver"
36
37ACPI_MODULE_NAME("bay")
38MODULE_AUTHOR("Kristen Carlson Accardi");
39MODULE_DESCRIPTION(ACPI_BAY_DRIVER_NAME);
40MODULE_LICENSE("GPL");
41#define ACPI_BAY_CLASS "bay"
42#define ACPI_BAY_COMPONENT 0x10000000
43#define _COMPONENT ACPI_BAY_COMPONENT
44#define bay_dprintk(h,s) {\
45 char prefix[80] = {'\0'};\
46 struct acpi_buffer buffer = {sizeof(prefix), prefix};\
47 acpi_get_name(h, ACPI_FULL_PATHNAME, &buffer);\
48 printk(KERN_DEBUG PREFIX "%s: %s\n", prefix, s); }
49static void bay_notify(acpi_handle handle, u32 event, void *data);
50static int acpi_bay_add(struct acpi_device *device);
51static int acpi_bay_remove(struct acpi_device *device, int type);
52
53static struct acpi_driver acpi_bay_driver = {
54 .name = ACPI_BAY_DRIVER_NAME,
55 .class = ACPI_BAY_CLASS,
56 .ids = ACPI_BAY_HID,
57 .ops = {
58 .add = acpi_bay_add,
59 .remove = acpi_bay_remove,
60 },
61};
62
63struct bay {
64 acpi_handle handle;
65 char *name;
66 struct list_head list;
67 struct platform_device *pdev;
68};
69
70static LIST_HEAD(drive_bays);
71
72
73/*****************************************************************************
74 * Drive Bay functions *
75 *****************************************************************************/
76/**
77 * is_ejectable - see if a device is ejectable
78 * @handle: acpi handle of the device
79 *
80 * If an acpi object has a _EJ0 method, then it is ejectable
81 */
82static int is_ejectable(acpi_handle handle)
83{
84 acpi_status status;
85 acpi_handle tmp;
86
87 status = acpi_get_handle(handle, "_EJ0", &tmp);
88 if (ACPI_FAILURE(status))
89 return 0;
90 return 1;
91}
92
93/**
94 * bay_present - see if the bay device is present
95 * @bay: the drive bay
96 *
97 * execute the _STA method.
98 */
99static int bay_present(struct bay *bay)
100{
101 unsigned long sta;
102 acpi_status status;
103
104 if (bay) {
105 status = acpi_evaluate_integer(bay->handle, "_STA", NULL, &sta);
106 if (ACPI_SUCCESS(status) && sta)
107 return 1;
108 }
109 return 0;
110}
111
112/**
113 * eject_device - respond to an eject request
114 * @handle - the device to eject
115 *
116 * Call this devices _EJ0 method.
117 */
118static void eject_device(acpi_handle handle)
119{
120 struct acpi_object_list arg_list;
121 union acpi_object arg;
122
123 bay_dprintk(handle, "Ejecting device");
124
125 arg_list.count = 1;
126 arg_list.pointer = &arg;
127 arg.type = ACPI_TYPE_INTEGER;
128 arg.integer.value = 1;
129
130 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_EJ0",
131 &arg_list, NULL)))
132 pr_debug("Failed to evaluate _EJ0!\n");
133}
134
135/*
136 * show_present - read method for "present" file in sysfs
137 */
138static ssize_t show_present(struct device *dev,
139 struct device_attribute *attr, char *buf)
140{
141 struct bay *bay = dev_get_drvdata(dev);
142 return snprintf(buf, PAGE_SIZE, "%d\n", bay_present(bay));
143
144}
145DEVICE_ATTR(present, S_IRUGO, show_present, NULL);
146
147/*
148 * write_eject - write method for "eject" file in sysfs
149 */
150static ssize_t write_eject(struct device *dev, struct device_attribute *attr,
151 const char *buf, size_t count)
152{
153 struct bay *bay = dev_get_drvdata(dev);
154
155 if (!count)
156 return -EINVAL;
157
158 eject_device(bay->handle);
159 return count;
160}
161DEVICE_ATTR(eject, S_IWUSR, NULL, write_eject);
162
163/**
164 * is_ata - see if a device is an ata device
165 * @handle: acpi handle of the device
166 *
167 * If an acpi object has one of 4 ATA ACPI methods defined,
168 * then it is an ATA device
169 */
170static int is_ata(acpi_handle handle)
171{
172 acpi_handle tmp;
173
174 if ((ACPI_SUCCESS(acpi_get_handle(handle, "_GTF", &tmp))) ||
175 (ACPI_SUCCESS(acpi_get_handle(handle, "_GTM", &tmp))) ||
176 (ACPI_SUCCESS(acpi_get_handle(handle, "_STM", &tmp))) ||
177 (ACPI_SUCCESS(acpi_get_handle(handle, "_SDD", &tmp))))
178 return 1;
179
180 return 0;
181}
182
183/**
184 * parent_is_ata(acpi_handle handle)
185 *
186 */
187static int parent_is_ata(acpi_handle handle)
188{
189 acpi_handle phandle;
190
191 if (acpi_get_parent(handle, &phandle))
192 return 0;
193
194 return is_ata(phandle);
195}
196
197/**
198 * is_ejectable_bay - see if a device is an ejectable drive bay
199 * @handle: acpi handle of the device
200 *
201 * If an acpi object is ejectable and has one of the ACPI ATA
202 * methods defined, then we can safely call it an ejectable
203 * drive bay
204 */
205static int is_ejectable_bay(acpi_handle handle)
206{
207 if ((is_ata(handle) || parent_is_ata(handle)) && is_ejectable(handle))
208 return 1;
209 return 0;
210}
211
212/**
213 * eject_removable_drive - try to eject this drive
214 * @dev : the device structure of the drive
215 *
216 * If a device is a removable drive that requires an _EJ0 method
217 * to be executed in order to safely remove from the system, do
218 * it. ATM - always returns success
219 */
220int eject_removable_drive(struct device *dev)
221{
222 acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
223
224 if (handle) {
225 bay_dprintk(handle, "Got device handle");
226 if (is_ejectable_bay(handle))
227 eject_device(handle);
228 } else {
229 printk("No acpi handle for device\n");
230 }
231
232 /* should I return an error code? */
233 return 0;
234}
235EXPORT_SYMBOL_GPL(eject_removable_drive);
236
237static int acpi_bay_add(struct acpi_device *device)
238{
239 bay_dprintk(device->handle, "adding bay device");
240 strcpy(acpi_device_name(device), "Dockable Bay");
241 strcpy(acpi_device_class(device), "bay");
242 return 0;
243}
244
245static int acpi_bay_add_fs(struct bay *bay)
246{
247 int ret;
248 struct device *dev = &bay->pdev->dev;
249
250 ret = device_create_file(dev, &dev_attr_present);
251 if (ret)
252 goto add_fs_err;
253 ret = device_create_file(dev, &dev_attr_eject);
254 if (ret) {
255 device_remove_file(dev, &dev_attr_present);
256 goto add_fs_err;
257 }
258 return 0;
259
260 add_fs_err:
261 bay_dprintk(bay->handle, "Error adding sysfs files\n");
262 return ret;
263}
264
265static void acpi_bay_remove_fs(struct bay *bay)
266{
267 struct device *dev = &bay->pdev->dev;
268
269 /* cleanup sysfs */
270 device_remove_file(dev, &dev_attr_present);
271 device_remove_file(dev, &dev_attr_eject);
272}
273
274static int bay_is_dock_device(acpi_handle handle)
275{
276 acpi_handle parent;
277
278 acpi_get_parent(handle, &parent);
279
280 /* if the device or it's parent is dependent on the
281 * dock, then we are a dock device
282 */
283 return (is_dock_device(handle) || is_dock_device(parent));
284}
285
286static int bay_add(acpi_handle handle, int id)
287{
288 acpi_status status;
289 struct bay *new_bay;
290 struct platform_device *pdev;
291 struct acpi_buffer nbuffer = {ACPI_ALLOCATE_BUFFER, NULL};
292 acpi_get_name(handle, ACPI_FULL_PATHNAME, &nbuffer);
293
294 bay_dprintk(handle, "Adding notify handler");
295
296 /*
297 * Initialize bay device structure
298 */
299 new_bay = kzalloc(GFP_ATOMIC, sizeof(*new_bay));
300 INIT_LIST_HEAD(&new_bay->list);
301 new_bay->handle = handle;
302 new_bay->name = (char *)nbuffer.pointer;
303
304 /* initialize platform device stuff */
305 pdev = platform_device_register_simple(ACPI_BAY_CLASS, id, NULL, 0);
306 if (pdev == NULL) {
307 printk(KERN_ERR PREFIX "Error registering bay device\n");
308 goto bay_add_err;
309 }
310 new_bay->pdev = pdev;
311 platform_set_drvdata(pdev, new_bay);
312
313 if (acpi_bay_add_fs(new_bay)) {
314 platform_device_unregister(new_bay->pdev);
315 goto bay_add_err;
316 }
317
318 /* register for events on this device */
319 status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
320 bay_notify, new_bay);
321 if (ACPI_FAILURE(status)) {
322 printk(KERN_ERR PREFIX "Error installing bay notify handler\n");
323 }
324
325 /* if we are on a dock station, we should register for dock
326 * notifications.
327 */
328 if (bay_is_dock_device(handle)) {
329 bay_dprintk(handle, "Is dependent on dock\n");
330 register_hotplug_dock_device(handle, bay_notify, new_bay);
331 }
332 list_add(&new_bay->list, &drive_bays);
333 printk(KERN_INFO PREFIX "Bay [%s] Added\n", new_bay->name);
334 return 0;
335
336bay_add_err:
337 kfree(new_bay->name);
338 kfree(new_bay);
339 return -ENODEV;
340}
341
342static int acpi_bay_remove(struct acpi_device *device, int type)
343{
344 /*** FIXME: do something here */
345 return 0;
346}
347
348/**
349 * bay_create_acpi_device - add new devices to acpi
350 * @handle - handle of the device to add
351 *
352 * This function will create a new acpi_device for the given
353 * handle if one does not exist already. This should cause
354 * acpi to scan for drivers for the given devices, and call
355 * matching driver's add routine.
356 *
357 * Returns a pointer to the acpi_device corresponding to the handle.
358 */
359static struct acpi_device * bay_create_acpi_device(acpi_handle handle)
360{
361 struct acpi_device *device = NULL;
362 struct acpi_device *parent_device;
363 acpi_handle parent;
364 int ret;
365
366 bay_dprintk(handle, "Trying to get device");
367 if (acpi_bus_get_device(handle, &device)) {
368 /*
369 * no device created for this object,
370 * so we should create one.
371 */
372 bay_dprintk(handle, "No device for handle");
373 acpi_get_parent(handle, &parent);
374 if (acpi_bus_get_device(parent, &parent_device))
375 parent_device = NULL;
376
377 ret = acpi_bus_add(&device, parent_device, handle,
378 ACPI_BUS_TYPE_DEVICE);
379 if (ret) {
380 pr_debug("error adding bus, %x\n",
381 -ret);
382 return NULL;
383 }
384 }
385 return device;
386}
387
388/**
389 * bay_notify - act upon an acpi bay notification
390 * @handle: the bay handle
391 * @event: the acpi event
392 * @data: our driver data struct
393 *
394 */
395static void bay_notify(acpi_handle handle, u32 event, void *data)
396{
397 struct acpi_device *dev;
398
399 bay_dprintk(handle, "Bay event");
400
401 switch(event) {
402 case ACPI_NOTIFY_BUS_CHECK:
403 printk("Bus Check\n");
404 case ACPI_NOTIFY_DEVICE_CHECK:
405 printk("Device Check\n");
406 dev = bay_create_acpi_device(handle);
407 if (dev)
408 acpi_bus_generate_event(dev, event, 0);
409 else
410 printk("No device for generating event\n");
411 /* wouldn't it be a good idea to just rescan SATA
412 * right here?
413 */
414 break;
415 case ACPI_NOTIFY_EJECT_REQUEST:
416 printk("Eject request\n");
417 dev = bay_create_acpi_device(handle);
418 if (dev)
419 acpi_bus_generate_event(dev, event, 0);
420 else
421 printk("No device for generating eventn");
422
423 /* wouldn't it be a good idea to just call the
424 * eject_device here if we were a SATA device?
425 */
426 break;
427 default:
428 printk("unknown event %d\n", event);
429 }
430}
431
432static acpi_status
433find_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
434{
435 int *count = (int *)context;
436
437 /*
438 * there could be more than one ejectable bay.
439 * so, just return AE_OK always so that every object
440 * will be checked.
441 */
442 if (is_ejectable_bay(handle)) {
443 bay_dprintk(handle, "found ejectable bay");
444 if (!bay_add(handle, *count))
445 (*count)++;
446 }
447 return AE_OK;
448}
449
450static int __init bay_init(void)
451{
452 int bays = 0;
453
454 INIT_LIST_HEAD(&drive_bays);
455
456 /* look for dockable drive bays */
457 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
458 ACPI_UINT32_MAX, find_bay, &bays, NULL);
459
460 if (bays)
461 if ((acpi_bus_register_driver(&acpi_bay_driver) < 0))
462 printk(KERN_ERR "Unable to register bay driver\n");
463
464 if (!bays)
465 return -ENODEV;
466
467 return 0;
468}
469
470static void __exit bay_exit(void)
471{
472 struct bay *bay, *tmp;
473
474 list_for_each_entry_safe(bay, tmp, &drive_bays, list) {
475 if (is_dock_device(bay->handle))
476 unregister_hotplug_dock_device(bay->handle);
477 acpi_bay_remove_fs(bay);
478 acpi_remove_notify_handler(bay->handle, ACPI_SYSTEM_NOTIFY,
479 bay_notify);
480 platform_device_unregister(bay->pdev);
481 kfree(bay->name);
482 kfree(bay);
483 }
484
485 acpi_bus_unregister_driver(&acpi_bay_driver);
486}
487
488postcore_initcall(bay_init);
489module_exit(bay_exit);
490
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index f9c972b26f4f..f289fd41e77d 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -44,7 +44,7 @@ struct acpi_blacklist_item {
44 char oem_id[7]; 44 char oem_id[7];
45 char oem_table_id[9]; 45 char oem_table_id[9];
46 u32 oem_revision; 46 u32 oem_revision;
47 acpi_table_type table; 47 char *table;
48 enum acpi_blacklist_predicates oem_revision_predicate; 48 enum acpi_blacklist_predicates oem_revision_predicate;
49 char *reason; 49 char *reason;
50 u32 is_critical_error; 50 u32 is_critical_error;
@@ -56,18 +56,18 @@ struct acpi_blacklist_item {
56 */ 56 */
57static struct acpi_blacklist_item acpi_blacklist[] __initdata = { 57static struct acpi_blacklist_item acpi_blacklist[] __initdata = {
58 /* Compaq Presario 1700 */ 58 /* Compaq Presario 1700 */
59 {"PTLTD ", " DSDT ", 0x06040000, ACPI_DSDT, less_than_or_equal, 59 {"PTLTD ", " DSDT ", 0x06040000, ACPI_SIG_DSDT, less_than_or_equal,
60 "Multiple problems", 1}, 60 "Multiple problems", 1},
61 /* Sony FX120, FX140, FX150? */ 61 /* Sony FX120, FX140, FX150? */
62 {"SONY ", "U0 ", 0x20010313, ACPI_DSDT, less_than_or_equal, 62 {"SONY ", "U0 ", 0x20010313, ACPI_SIG_DSDT, less_than_or_equal,
63 "ACPI driver problem", 1}, 63 "ACPI driver problem", 1},
64 /* Compaq Presario 800, Insyde BIOS */ 64 /* Compaq Presario 800, Insyde BIOS */
65 {"INT440", "SYSFexxx", 0x00001001, ACPI_DSDT, less_than_or_equal, 65 {"INT440", "SYSFexxx", 0x00001001, ACPI_SIG_DSDT, less_than_or_equal,
66 "Does not use _REG to protect EC OpRegions", 1}, 66 "Does not use _REG to protect EC OpRegions", 1},
67 /* IBM 600E - _ADR should return 7, but it returns 1 */ 67 /* IBM 600E - _ADR should return 7, but it returns 1 */
68 {"IBM ", "TP600E ", 0x00000105, ACPI_DSDT, less_than_or_equal, 68 {"IBM ", "TP600E ", 0x00000105, ACPI_SIG_DSDT, less_than_or_equal,
69 "Incorrect _ADR", 1}, 69 "Incorrect _ADR", 1},
70 {"ASUS\0\0", "P2B-S ", 0, ACPI_DSDT, all_versions, 70 {"ASUS\0\0", "P2B-S ", 0, ACPI_SIG_DSDT, all_versions,
71 "Bogus PCI routing", 1}, 71 "Bogus PCI routing", 1},
72 72
73 {""} 73 {""}
@@ -79,7 +79,7 @@ static int __init blacklist_by_year(void)
79{ 79{
80 int year = dmi_get_year(DMI_BIOS_DATE); 80 int year = dmi_get_year(DMI_BIOS_DATE);
81 /* Doesn't exist? Likely an old system */ 81 /* Doesn't exist? Likely an old system */
82 if (year == -1) 82 if (year == -1)
83 return 1; 83 return 1;
84 /* 0? Likely a buggy new BIOS */ 84 /* 0? Likely a buggy new BIOS */
85 if (year == 0) 85 if (year == 0)
@@ -103,22 +103,21 @@ int __init acpi_blacklisted(void)
103{ 103{
104 int i = 0; 104 int i = 0;
105 int blacklisted = 0; 105 int blacklisted = 0;
106 struct acpi_table_header *table_header; 106 struct acpi_table_header table_header;
107 107
108 while (acpi_blacklist[i].oem_id[0] != '\0') { 108 while (acpi_blacklist[i].oem_id[0] != '\0') {
109 if (acpi_get_table_header_early 109 if (acpi_get_table_header(acpi_blacklist[i].table, 0, &table_header)) {
110 (acpi_blacklist[i].table, &table_header)) {
111 i++; 110 i++;
112 continue; 111 continue;
113 } 112 }
114 113
115 if (strncmp(acpi_blacklist[i].oem_id, table_header->oem_id, 6)) { 114 if (strncmp(acpi_blacklist[i].oem_id, table_header.oem_id, 6)) {
116 i++; 115 i++;
117 continue; 116 continue;
118 } 117 }
119 118
120 if (strncmp 119 if (strncmp
121 (acpi_blacklist[i].oem_table_id, table_header->oem_table_id, 120 (acpi_blacklist[i].oem_table_id, table_header.oem_table_id,
122 8)) { 121 8)) {
123 i++; 122 i++;
124 continue; 123 continue;
@@ -127,14 +126,14 @@ int __init acpi_blacklisted(void)
127 if ((acpi_blacklist[i].oem_revision_predicate == all_versions) 126 if ((acpi_blacklist[i].oem_revision_predicate == all_versions)
128 || (acpi_blacklist[i].oem_revision_predicate == 127 || (acpi_blacklist[i].oem_revision_predicate ==
129 less_than_or_equal 128 less_than_or_equal
130 && table_header->oem_revision <= 129 && table_header.oem_revision <=
131 acpi_blacklist[i].oem_revision) 130 acpi_blacklist[i].oem_revision)
132 || (acpi_blacklist[i].oem_revision_predicate == 131 || (acpi_blacklist[i].oem_revision_predicate ==
133 greater_than_or_equal 132 greater_than_or_equal
134 && table_header->oem_revision >= 133 && table_header.oem_revision >=
135 acpi_blacklist[i].oem_revision) 134 acpi_blacklist[i].oem_revision)
136 || (acpi_blacklist[i].oem_revision_predicate == equal 135 || (acpi_blacklist[i].oem_revision_predicate == equal
137 && table_header->oem_revision == 136 && table_header.oem_revision ==
138 acpi_blacklist[i].oem_revision)) { 137 acpi_blacklist[i].oem_revision)) {
139 138
140 printk(KERN_ERR PREFIX 139 printk(KERN_ERR PREFIX
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 766332e45592..c26468da4295 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -44,9 +44,6 @@ ACPI_MODULE_NAME("acpi_bus")
44extern void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger); 44extern void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger);
45#endif 45#endif
46 46
47struct fadt_descriptor acpi_fadt;
48EXPORT_SYMBOL(acpi_fadt);
49
50struct acpi_device *acpi_root; 47struct acpi_device *acpi_root;
51struct proc_dir_entry *acpi_root_dir; 48struct proc_dir_entry *acpi_root_dir;
52EXPORT_SYMBOL(acpi_root_dir); 49EXPORT_SYMBOL(acpi_root_dir);
@@ -195,7 +192,7 @@ int acpi_bus_set_power(acpi_handle handle, int state)
195 192
196 if (!device->flags.power_manageable) { 193 if (!device->flags.power_manageable) {
197 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n", 194 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n",
198 device->kobj.name)); 195 device->dev.kobj.name));
199 return -ENODEV; 196 return -ENODEV;
200 } 197 }
201 /* 198 /*
@@ -582,11 +579,12 @@ static int __init acpi_bus_init_irq(void)
582 return 0; 579 return 0;
583} 580}
584 581
582acpi_native_uint acpi_gbl_permanent_mmap;
583
584
585void __init acpi_early_init(void) 585void __init acpi_early_init(void)
586{ 586{
587 acpi_status status = AE_OK; 587 acpi_status status = AE_OK;
588 struct acpi_buffer buffer = { sizeof(acpi_fadt), &acpi_fadt };
589
590 588
591 if (acpi_disabled) 589 if (acpi_disabled)
592 return; 590 return;
@@ -597,6 +595,15 @@ void __init acpi_early_init(void)
597 if (!acpi_strict) 595 if (!acpi_strict)
598 acpi_gbl_enable_interpreter_slack = TRUE; 596 acpi_gbl_enable_interpreter_slack = TRUE;
599 597
598 acpi_gbl_permanent_mmap = 1;
599
600 status = acpi_reallocate_root_table();
601 if (ACPI_FAILURE(status)) {
602 printk(KERN_ERR PREFIX
603 "Unable to reallocate ACPI tables\n");
604 goto error0;
605 }
606
600 status = acpi_initialize_subsystem(); 607 status = acpi_initialize_subsystem();
601 if (ACPI_FAILURE(status)) { 608 if (ACPI_FAILURE(status)) {
602 printk(KERN_ERR PREFIX 609 printk(KERN_ERR PREFIX
@@ -611,32 +618,25 @@ void __init acpi_early_init(void)
611 goto error0; 618 goto error0;
612 } 619 }
613 620
614 /*
615 * Get a separate copy of the FADT for use by other drivers.
616 */
617 status = acpi_get_table(ACPI_TABLE_ID_FADT, 1, &buffer);
618 if (ACPI_FAILURE(status)) {
619 printk(KERN_ERR PREFIX "Unable to get the FADT\n");
620 goto error0;
621 }
622#ifdef CONFIG_X86 621#ifdef CONFIG_X86
623 if (!acpi_ioapic) { 622 if (!acpi_ioapic) {
624 extern acpi_interrupt_flags acpi_sci_flags; 623 extern u8 acpi_sci_flags;
625 624
626 /* compatible (0) means level (3) */ 625 /* compatible (0) means level (3) */
627 if (acpi_sci_flags.trigger == 0) 626 if (!(acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)) {
628 acpi_sci_flags.trigger = 3; 627 acpi_sci_flags &= ~ACPI_MADT_TRIGGER_MASK;
629 628 acpi_sci_flags |= ACPI_MADT_TRIGGER_LEVEL;
629 }
630 /* Set PIC-mode SCI trigger type */ 630 /* Set PIC-mode SCI trigger type */
631 acpi_pic_sci_set_trigger(acpi_fadt.sci_int, 631 acpi_pic_sci_set_trigger(acpi_gbl_FADT.sci_interrupt,
632 acpi_sci_flags.trigger); 632 (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
633 } else { 633 } else {
634 extern int acpi_sci_override_gsi; 634 extern int acpi_sci_override_gsi;
635 /* 635 /*
636 * now that acpi_fadt is initialized, 636 * now that acpi_gbl_FADT is initialized,
637 * update it with result from INT_SRC_OVR parsing 637 * update it with result from INT_SRC_OVR parsing
638 */ 638 */
639 acpi_fadt.sci_int = acpi_sci_override_gsi; 639 acpi_gbl_FADT.sci_interrupt = acpi_sci_override_gsi;
640 } 640 }
641#endif 641#endif
642 642
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index ac860583c203..c726612fafb6 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -75,7 +75,7 @@ static int acpi_button_state_open_fs(struct inode *inode, struct file *file);
75static struct acpi_driver acpi_button_driver = { 75static struct acpi_driver acpi_button_driver = {
76 .name = ACPI_BUTTON_DRIVER_NAME, 76 .name = ACPI_BUTTON_DRIVER_NAME,
77 .class = ACPI_BUTTON_CLASS, 77 .class = ACPI_BUTTON_CLASS,
78 .ids = "ACPI_FPB,ACPI_FSB,PNP0C0D,PNP0C0C,PNP0C0E", 78 .ids = "button_power,button_sleep,PNP0C0D,PNP0C0C,PNP0C0E",
79 .ops = { 79 .ops = {
80 .add = acpi_button_add, 80 .add = acpi_button_add,
81 .remove = acpi_button_remove, 81 .remove = acpi_button_remove,
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 0a1863ec91f3..69a68fd394cf 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -167,7 +167,7 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
167 if (ACPI_FAILURE(status) || !device) { 167 if (ACPI_FAILURE(status) || !device) {
168 result = container_device_add(&device, handle); 168 result = container_device_add(&device, handle);
169 if (!result) 169 if (!result)
170 kobject_uevent(&device->kobj, 170 kobject_uevent(&device->dev.kobj,
171 KOBJ_ONLINE); 171 KOBJ_ONLINE);
172 else 172 else
173 printk("Failed to add container\n"); 173 printk("Failed to add container\n");
@@ -175,13 +175,13 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
175 } else { 175 } else {
176 if (ACPI_SUCCESS(status)) { 176 if (ACPI_SUCCESS(status)) {
177 /* device exist and this is a remove request */ 177 /* device exist and this is a remove request */
178 kobject_uevent(&device->kobj, KOBJ_OFFLINE); 178 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
179 } 179 }
180 } 180 }
181 break; 181 break;
182 case ACPI_NOTIFY_EJECT_REQUEST: 182 case ACPI_NOTIFY_EJECT_REQUEST:
183 if (!acpi_bus_get_device(handle, &device) && device) { 183 if (!acpi_bus_get_device(handle, &device) && device) {
184 kobject_uevent(&device->kobj, KOBJ_OFFLINE); 184 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
185 } 185 }
186 break; 186 break;
187 default: 187 default:
diff --git a/drivers/acpi/debug.c b/drivers/acpi/debug.c
index 35c6af8a83cd..d48f65a8f658 100644
--- a/drivers/acpi/debug.c
+++ b/drivers/acpi/debug.c
@@ -13,14 +13,11 @@
13 13
14#define _COMPONENT ACPI_SYSTEM_COMPONENT 14#define _COMPONENT ACPI_SYSTEM_COMPONENT
15ACPI_MODULE_NAME("debug") 15ACPI_MODULE_NAME("debug")
16#define ACPI_SYSTEM_FILE_DEBUG_LAYER "debug_layer" 16
17#define ACPI_SYSTEM_FILE_DEBUG_LEVEL "debug_level"
18#ifdef MODULE_PARAM_PREFIX 17#ifdef MODULE_PARAM_PREFIX
19#undef MODULE_PARAM_PREFIX 18#undef MODULE_PARAM_PREFIX
20#endif 19#endif
21#define MODULE_PARAM_PREFIX 20#define MODULE_PARAM_PREFIX "acpi."
22 module_param(acpi_dbg_layer, uint, 0400);
23module_param(acpi_dbg_level, uint, 0400);
24 21
25struct acpi_dlayer { 22struct acpi_dlayer {
26 const char *name; 23 const char *name;
@@ -86,6 +83,60 @@ static const struct acpi_dlevel acpi_debug_levels[] = {
86 ACPI_DEBUG_INIT(ACPI_LV_EVENTS), 83 ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
87}; 84};
88 85
86/* --------------------------------------------------------------------------
87 FS Interface (/sys)
88 -------------------------------------------------------------------------- */
89static int param_get_debug_layer(char *buffer, struct kernel_param *kp) {
90 int result = 0;
91 int i;
92
93 result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
94
95 for(i = 0; i <ARRAY_SIZE(acpi_debug_layers); i++) {
96 result += sprintf(buffer+result, "%-25s\t0x%08lX [%c]\n",
97 acpi_debug_layers[i].name,
98 acpi_debug_layers[i].value,
99 (acpi_dbg_layer & acpi_debug_layers[i].value) ? '*' : ' ');
100 }
101 result += sprintf(buffer+result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
102 ACPI_ALL_DRIVERS,
103 (acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
104 ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer &
105 ACPI_ALL_DRIVERS) == 0 ? ' ' : '-');
106 result += sprintf(buffer+result, "--\ndebug_layer = 0x%08X ( * = enabled)\n", acpi_dbg_layer);
107
108 return result;
109}
110
111static int param_get_debug_level(char *buffer, struct kernel_param *kp) {
112 int result = 0;
113 int i;
114
115 result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
116
117 for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
118 result += sprintf(buffer+result, "%-25s\t0x%08lX [%c]\n",
119 acpi_debug_levels[i].name,
120 acpi_debug_levels[i].value,
121 (acpi_dbg_level & acpi_debug_levels[i].
122 value) ? '*' : ' ');
123 }
124 result += sprintf(buffer+result, "--\ndebug_level = 0x%08X (* = enabled)\n",
125 acpi_dbg_level);
126
127 return result;
128}
129
130module_param_call(debug_layer, param_set_uint, param_get_debug_layer, &acpi_dbg_layer, 0644);
131module_param_call(debug_level, param_set_uint, param_get_debug_level, &acpi_dbg_level, 0644);
132
133/* --------------------------------------------------------------------------
134 FS Interface (/proc)
135 -------------------------------------------------------------------------- */
136#ifdef CONFIG_ACPI_PROCFS
137#define ACPI_SYSTEM_FILE_DEBUG_LAYER "debug_layer"
138#define ACPI_SYSTEM_FILE_DEBUG_LEVEL "debug_level"
139
89static int 140static int
90acpi_system_read_debug(char *page, 141acpi_system_read_debug(char *page,
91 char **start, off_t off, int count, int *eof, void *data) 142 char **start, off_t off, int count, int *eof, void *data)
@@ -221,3 +272,4 @@ static int __init acpi_debug_init(void)
221} 272}
222 273
223subsys_initcall(acpi_debug_init); 274subsys_initcall(acpi_debug_init);
275#endif
diff --git a/drivers/acpi/dispatcher/dsfield.c b/drivers/acpi/dispatcher/dsfield.c
index a6d77efb41a0..f049639bac35 100644
--- a/drivers/acpi/dispatcher/dsfield.c
+++ b/drivers/acpi/dispatcher/dsfield.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -133,7 +133,8 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
133 } 133 }
134 } 134 }
135 135
136 /* We could put the returned object (Node) on the object stack for later, 136 /*
137 * We could put the returned object (Node) on the object stack for later,
137 * but for now, we will put it in the "op" object that the parser uses, 138 * but for now, we will put it in the "op" object that the parser uses,
138 * so we can get it again at the end of this scope 139 * so we can get it again at the end of this scope
139 */ 140 */
@@ -514,8 +515,33 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
514 515
515 /* Third arg is the bank_value */ 516 /* Third arg is the bank_value */
516 517
518 /* TBD: This arg is a term_arg, not a constant, and must be evaluated */
519
517 arg = arg->common.next; 520 arg = arg->common.next;
518 info.bank_value = (u32) arg->common.value.integer; 521
522 /* Currently, only the following constants are supported */
523
524 switch (arg->common.aml_opcode) {
525 case AML_ZERO_OP:
526 info.bank_value = 0;
527 break;
528
529 case AML_ONE_OP:
530 info.bank_value = 1;
531 break;
532
533 case AML_BYTE_OP:
534 case AML_WORD_OP:
535 case AML_DWORD_OP:
536 case AML_QWORD_OP:
537 info.bank_value = (u32) arg->common.value.integer;
538 break;
539
540 default:
541 info.bank_value = 0;
542 ACPI_ERROR((AE_INFO,
543 "Non-constant BankValue for BankField is not implemented"));
544 }
519 545
520 /* Fourth arg is the field flags */ 546 /* Fourth arg is the field flags */
521 547
diff --git a/drivers/acpi/dispatcher/dsinit.c b/drivers/acpi/dispatcher/dsinit.c
index 1888c055d10f..af923c388520 100644
--- a/drivers/acpi/dispatcher/dsinit.c
+++ b/drivers/acpi/dispatcher/dsinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -44,6 +44,7 @@
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acdispat.h> 45#include <acpi/acdispat.h>
46#include <acpi/acnamesp.h> 46#include <acpi/acnamesp.h>
47#include <acpi/actables.h>
47 48
48#define _COMPONENT ACPI_DISPATCHER 49#define _COMPONENT ACPI_DISPATCHER
49ACPI_MODULE_NAME("dsinit") 50ACPI_MODULE_NAME("dsinit")
@@ -90,7 +91,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
90 * We are only interested in NS nodes owned by the table that 91 * We are only interested in NS nodes owned by the table that
91 * was just loaded 92 * was just loaded
92 */ 93 */
93 if (node->owner_id != info->table_desc->owner_id) { 94 if (node->owner_id != info->owner_id) {
94 return (AE_OK); 95 return (AE_OK);
95 } 96 }
96 97
@@ -150,14 +151,21 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
150 ******************************************************************************/ 151 ******************************************************************************/
151 152
152acpi_status 153acpi_status
153acpi_ds_initialize_objects(struct acpi_table_desc * table_desc, 154acpi_ds_initialize_objects(acpi_native_uint table_index,
154 struct acpi_namespace_node * start_node) 155 struct acpi_namespace_node * start_node)
155{ 156{
156 acpi_status status; 157 acpi_status status;
157 struct acpi_init_walk_info info; 158 struct acpi_init_walk_info info;
159 struct acpi_table_header *table;
160 acpi_owner_id owner_id;
158 161
159 ACPI_FUNCTION_TRACE(ds_initialize_objects); 162 ACPI_FUNCTION_TRACE(ds_initialize_objects);
160 163
164 status = acpi_tb_get_owner_id(table_index, &owner_id);
165 if (ACPI_FAILURE(status)) {
166 return_ACPI_STATUS(status);
167 }
168
161 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 169 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
162 "**** Starting initialization of namespace objects ****\n")); 170 "**** Starting initialization of namespace objects ****\n"));
163 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Parsing all Control Methods:")); 171 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Parsing all Control Methods:"));
@@ -166,7 +174,8 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
166 info.op_region_count = 0; 174 info.op_region_count = 0;
167 info.object_count = 0; 175 info.object_count = 0;
168 info.device_count = 0; 176 info.device_count = 0;
169 info.table_desc = table_desc; 177 info.table_index = table_index;
178 info.owner_id = owner_id;
170 179
171 /* Walk entire namespace from the supplied root */ 180 /* Walk entire namespace from the supplied root */
172 181
@@ -176,10 +185,14 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
176 ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace")); 185 ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
177 } 186 }
178 187
188 status = acpi_get_table_by_index(table_index, &table);
189 if (ACPI_FAILURE(status)) {
190 return_ACPI_STATUS(status);
191 }
192
179 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, 193 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
180 "\nTable [%4.4s](id %4.4X) - %hd Objects with %hd Devices %hd Methods %hd Regions\n", 194 "\nTable [%4.4s](id %4.4X) - %hd Objects with %hd Devices %hd Methods %hd Regions\n",
181 table_desc->pointer->signature, 195 table->signature, owner_id, info.object_count,
182 table_desc->owner_id, info.object_count,
183 info.device_count, info.method_count, 196 info.device_count, info.method_count,
184 info.op_region_count)); 197 info.op_region_count));
185 198
diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/dispatcher/dsmethod.c
index cf888add3191..1cbe61905824 100644
--- a/drivers/acpi/dispatcher/dsmethod.c
+++ b/drivers/acpi/dispatcher/dsmethod.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -327,7 +327,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
327 ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state); 327 ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
328 328
329 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 329 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
330 "Execute method %p, currentstate=%p\n", 330 "Calling method %p, currentstate=%p\n",
331 this_walk_state->prev_op, this_walk_state)); 331 this_walk_state->prev_op, this_walk_state));
332 332
333 /* 333 /*
@@ -351,49 +351,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
351 return_ACPI_STATUS(status); 351 return_ACPI_STATUS(status);
352 } 352 }
353 353
354 /* 354 /* Begin method parse/execution. Create a new walk state */
355 * 1) Parse the method. All "normal" methods are parsed for each execution.
356 * Internal methods (_OSI, etc.) do not require parsing.
357 */
358 if (!(obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY)) {
359
360 /* Create a new walk state for the parse */
361
362 next_walk_state =
363 acpi_ds_create_walk_state(obj_desc->method.owner_id, op,
364 obj_desc, NULL);
365 if (!next_walk_state) {
366 status = AE_NO_MEMORY;
367 goto cleanup;
368 }
369
370 /* Create and init a parse tree root */
371
372 op = acpi_ps_create_scope_op();
373 if (!op) {
374 status = AE_NO_MEMORY;
375 goto cleanup;
376 }
377
378 status = acpi_ds_init_aml_walk(next_walk_state, op, method_node,
379 obj_desc->method.aml_start,
380 obj_desc->method.aml_length,
381 NULL, 1);
382 if (ACPI_FAILURE(status)) {
383 acpi_ps_delete_parse_tree(op);
384 goto cleanup;
385 }
386
387 /* Begin AML parse (deletes next_walk_state) */
388
389 status = acpi_ps_parse_aml(next_walk_state);
390 acpi_ps_delete_parse_tree(op);
391 if (ACPI_FAILURE(status)) {
392 goto cleanup;
393 }
394 }
395
396 /* 2) Begin method execution. Create a new walk state */
397 355
398 next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id, 356 next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id,
399 NULL, obj_desc, thread); 357 NULL, obj_desc, thread);
@@ -424,7 +382,8 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
424 382
425 status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node, 383 status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
426 obj_desc->method.aml_start, 384 obj_desc->method.aml_start,
427 obj_desc->method.aml_length, info, 3); 385 obj_desc->method.aml_length, info,
386 ACPI_IMODE_EXECUTE);
428 387
429 ACPI_FREE(info); 388 ACPI_FREE(info);
430 if (ACPI_FAILURE(status)) { 389 if (ACPI_FAILURE(status)) {
@@ -445,8 +404,8 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
445 this_walk_state->num_operands = 0; 404 this_walk_state->num_operands = 0;
446 405
447 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 406 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
448 "Starting nested execution, newstate=%p\n", 407 "**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
449 next_walk_state)); 408 method_node->name.ascii, next_walk_state));
450 409
451 /* Invoke an internal method if necessary */ 410 /* Invoke an internal method if necessary */
452 411
diff --git a/drivers/acpi/dispatcher/dsmthdat.c b/drivers/acpi/dispatcher/dsmthdat.c
index 459160ff9058..ba4626e06a5e 100644
--- a/drivers/acpi/dispatcher/dsmthdat.c
+++ b/drivers/acpi/dispatcher/dsmthdat.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/dispatcher/dsobject.c b/drivers/acpi/dispatcher/dsobject.c
index 72190abb1d59..a474ca2334d5 100644
--- a/drivers/acpi/dispatcher/dsobject.c
+++ b/drivers/acpi/dispatcher/dsobject.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -260,7 +260,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
260 } 260 }
261 261
262 obj_desc->buffer.flags |= AOPOBJ_DATA_VALID; 262 obj_desc->buffer.flags |= AOPOBJ_DATA_VALID;
263 op->common.node = (struct acpi_namespace_node *)obj_desc; 263 op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
264 return_ACPI_STATUS(AE_OK); 264 return_ACPI_STATUS(AE_OK);
265} 265}
266 266
@@ -270,7 +270,8 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
270 * 270 *
271 * PARAMETERS: walk_state - Current walk state 271 * PARAMETERS: walk_state - Current walk state
272 * Op - Parser object to be translated 272 * Op - Parser object to be translated
273 * package_length - Number of elements in the package 273 * element_count - Number of elements in the package - this is
274 * the num_elements argument to Package()
274 * obj_desc_ptr - Where the ACPI internal object is returned 275 * obj_desc_ptr - Where the ACPI internal object is returned
275 * 276 *
276 * RETURN: Status 277 * RETURN: Status
@@ -278,18 +279,29 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
278 * DESCRIPTION: Translate a parser Op package object to the equivalent 279 * DESCRIPTION: Translate a parser Op package object to the equivalent
279 * namespace object 280 * namespace object
280 * 281 *
282 * NOTE: The number of elements in the package will be always be the num_elements
283 * count, regardless of the number of elements in the package list. If
284 * num_elements is smaller, only that many package list elements are used.
285 * if num_elements is larger, the Package object is padded out with
286 * objects of type Uninitialized (as per ACPI spec.)
287 *
288 * Even though the ASL compilers do not allow num_elements to be smaller
289 * than the Package list length (for the fixed length package opcode), some
290 * BIOS code modifies the AML on the fly to adjust the num_elements, and
291 * this code compensates for that. This also provides compatibility with
292 * other AML interpreters.
293 *
281 ******************************************************************************/ 294 ******************************************************************************/
282 295
283acpi_status 296acpi_status
284acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state, 297acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
285 union acpi_parse_object *op, 298 union acpi_parse_object *op,
286 u32 package_length, 299 u32 element_count,
287 union acpi_operand_object **obj_desc_ptr) 300 union acpi_operand_object **obj_desc_ptr)
288{ 301{
289 union acpi_parse_object *arg; 302 union acpi_parse_object *arg;
290 union acpi_parse_object *parent; 303 union acpi_parse_object *parent;
291 union acpi_operand_object *obj_desc = NULL; 304 union acpi_operand_object *obj_desc = NULL;
292 u32 package_list_length;
293 acpi_status status = AE_OK; 305 acpi_status status = AE_OK;
294 acpi_native_uint i; 306 acpi_native_uint i;
295 307
@@ -318,32 +330,13 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
318 obj_desc->package.node = parent->common.node; 330 obj_desc->package.node = parent->common.node;
319 } 331 }
320 332
321 obj_desc->package.count = package_length;
322
323 /* Count the number of items in the package list */
324
325 arg = op->common.value.arg;
326 arg = arg->common.next;
327 for (package_list_length = 0; arg; package_list_length++) {
328 arg = arg->common.next;
329 }
330
331 /*
332 * The package length (number of elements) will be the greater
333 * of the specified length and the length of the initializer list
334 */
335 if (package_list_length > package_length) {
336 obj_desc->package.count = package_list_length;
337 }
338
339 /* 333 /*
340 * Allocate the pointer array (array of pointers to the 334 * Allocate the element array (array of pointers to the individual
341 * individual objects). Add an extra pointer slot so 335 * objects) based on the num_elements parameter. Add an extra pointer slot
342 * that the list is always null terminated. 336 * so that the list is always null terminated.
343 */ 337 */
344 obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size) 338 obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
345 obj_desc->package. 339 element_count +
346 count +
347 1) * sizeof(void *)); 340 1) * sizeof(void *));
348 341
349 if (!obj_desc->package.elements) { 342 if (!obj_desc->package.elements) {
@@ -351,15 +344,20 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
351 return_ACPI_STATUS(AE_NO_MEMORY); 344 return_ACPI_STATUS(AE_NO_MEMORY);
352 } 345 }
353 346
347 obj_desc->package.count = element_count;
348
354 /* 349 /*
355 * Initialize all elements of the package 350 * Initialize the elements of the package, up to the num_elements count.
351 * Package is automatically padded with uninitialized (NULL) elements
352 * if num_elements is greater than the package list length. Likewise,
353 * Package is truncated if num_elements is less than the list length.
356 */ 354 */
357 arg = op->common.value.arg; 355 arg = op->common.value.arg;
358 arg = arg->common.next; 356 arg = arg->common.next;
359 for (i = 0; arg; i++) { 357 for (i = 0; arg && (i < element_count); i++) {
360 if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) { 358 if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
361 359
362 /* Object (package or buffer) is already built */ 360 /* This package element is already built, just get it */
363 361
364 obj_desc->package.elements[i] = 362 obj_desc->package.elements[i] =
365 ACPI_CAST_PTR(union acpi_operand_object, 363 ACPI_CAST_PTR(union acpi_operand_object,
@@ -373,8 +371,14 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
373 arg = arg->common.next; 371 arg = arg->common.next;
374 } 372 }
375 373
374 if (!arg) {
375 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
376 "Package List length larger than NumElements count (%X), truncated\n",
377 element_count));
378 }
379
376 obj_desc->package.flags |= AOPOBJ_DATA_VALID; 380 obj_desc->package.flags |= AOPOBJ_DATA_VALID;
377 op->common.node = (struct acpi_namespace_node *)obj_desc; 381 op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
378 return_ACPI_STATUS(status); 382 return_ACPI_STATUS(status);
379} 383}
380 384
@@ -488,8 +492,9 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
488 /* 492 /*
489 * Defer evaluation of Buffer term_arg operand 493 * Defer evaluation of Buffer term_arg operand
490 */ 494 */
491 obj_desc->buffer.node = (struct acpi_namespace_node *) 495 obj_desc->buffer.node =
492 walk_state->operands[0]; 496 ACPI_CAST_PTR(struct acpi_namespace_node,
497 walk_state->operands[0]);
493 obj_desc->buffer.aml_start = op->named.data; 498 obj_desc->buffer.aml_start = op->named.data;
494 obj_desc->buffer.aml_length = op->named.length; 499 obj_desc->buffer.aml_length = op->named.length;
495 break; 500 break;
@@ -499,8 +504,9 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
499 /* 504 /*
500 * Defer evaluation of Package term_arg operand 505 * Defer evaluation of Package term_arg operand
501 */ 506 */
502 obj_desc->package.node = (struct acpi_namespace_node *) 507 obj_desc->package.node =
503 walk_state->operands[0]; 508 ACPI_CAST_PTR(struct acpi_namespace_node,
509 walk_state->operands[0]);
504 obj_desc->package.aml_start = op->named.data; 510 obj_desc->package.aml_start = op->named.data;
505 obj_desc->package.aml_length = op->named.length; 511 obj_desc->package.aml_length = op->named.length;
506 break; 512 break;
diff --git a/drivers/acpi/dispatcher/dsopcode.c b/drivers/acpi/dispatcher/dsopcode.c
index 5b974a8fe614..6c6104a7a247 100644
--- a/drivers/acpi/dispatcher/dsopcode.c
+++ b/drivers/acpi/dispatcher/dsopcode.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -114,7 +114,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
114 } 114 }
115 115
116 status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start, 116 status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
117 aml_length, NULL, 1); 117 aml_length, NULL, ACPI_IMODE_LOAD_PASS1);
118 if (ACPI_FAILURE(status)) { 118 if (ACPI_FAILURE(status)) {
119 acpi_ds_delete_walk_state(walk_state); 119 acpi_ds_delete_walk_state(walk_state);
120 goto cleanup; 120 goto cleanup;
@@ -157,7 +157,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
157 /* Execute the opcode and arguments */ 157 /* Execute the opcode and arguments */
158 158
159 status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start, 159 status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
160 aml_length, NULL, 3); 160 aml_length, NULL, ACPI_IMODE_EXECUTE);
161 if (ACPI_FAILURE(status)) { 161 if (ACPI_FAILURE(status)) {
162 acpi_ds_delete_walk_state(walk_state); 162 acpi_ds_delete_walk_state(walk_state);
163 goto cleanup; 163 goto cleanup;
diff --git a/drivers/acpi/dispatcher/dsutils.c b/drivers/acpi/dispatcher/dsutils.c
index 05230baf5de8..e4073e05a75c 100644
--- a/drivers/acpi/dispatcher/dsutils.c
+++ b/drivers/acpi/dispatcher/dsutils.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/dispatcher/dswexec.c b/drivers/acpi/dispatcher/dswexec.c
index d7a616c3104e..69693fa07224 100644
--- a/drivers/acpi/dispatcher/dswexec.c
+++ b/drivers/acpi/dispatcher/dswexec.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -219,7 +219,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
219 if (!op) { 219 if (!op) {
220 status = acpi_ds_load2_begin_op(walk_state, out_op); 220 status = acpi_ds_load2_begin_op(walk_state, out_op);
221 if (ACPI_FAILURE(status)) { 221 if (ACPI_FAILURE(status)) {
222 return_ACPI_STATUS(status); 222 goto error_exit;
223 } 223 }
224 224
225 op = *out_op; 225 op = *out_op;
@@ -238,7 +238,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
238 238
239 status = acpi_ds_scope_stack_pop(walk_state); 239 status = acpi_ds_scope_stack_pop(walk_state);
240 if (ACPI_FAILURE(status)) { 240 if (ACPI_FAILURE(status)) {
241 return_ACPI_STATUS(status); 241 goto error_exit;
242 } 242 }
243 } 243 }
244 } 244 }
@@ -287,7 +287,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
287 287
288 status = acpi_ds_result_stack_push(walk_state); 288 status = acpi_ds_result_stack_push(walk_state);
289 if (ACPI_FAILURE(status)) { 289 if (ACPI_FAILURE(status)) {
290 return_ACPI_STATUS(status); 290 goto error_exit;
291 } 291 }
292 292
293 status = acpi_ds_exec_begin_control_op(walk_state, op); 293 status = acpi_ds_exec_begin_control_op(walk_state, op);
@@ -328,6 +328,10 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
328 /* Nothing to do here during method execution */ 328 /* Nothing to do here during method execution */
329 329
330 return_ACPI_STATUS(status); 330 return_ACPI_STATUS(status);
331
332 error_exit:
333 status = acpi_ds_method_error(status, walk_state);
334 return_ACPI_STATUS(status);
331} 335}
332 336
333/***************************************************************************** 337/*****************************************************************************
diff --git a/drivers/acpi/dispatcher/dswload.c b/drivers/acpi/dispatcher/dswload.c
index e3ca7f6539c1..8ab9d1b29a4c 100644
--- a/drivers/acpi/dispatcher/dswload.c
+++ b/drivers/acpi/dispatcher/dswload.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -196,6 +196,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
196 * one of the opcodes that actually opens a scope 196 * one of the opcodes that actually opens a scope
197 */ 197 */
198 switch (node->type) { 198 switch (node->type) {
199 case ACPI_TYPE_ANY:
199 case ACPI_TYPE_LOCAL_SCOPE: /* Scope */ 200 case ACPI_TYPE_LOCAL_SCOPE: /* Scope */
200 case ACPI_TYPE_DEVICE: 201 case ACPI_TYPE_DEVICE:
201 case ACPI_TYPE_POWER: 202 case ACPI_TYPE_POWER:
@@ -546,6 +547,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
546 acpi_status status; 547 acpi_status status;
547 acpi_object_type object_type; 548 acpi_object_type object_type;
548 char *buffer_ptr; 549 char *buffer_ptr;
550 u32 flags;
549 551
550 ACPI_FUNCTION_TRACE(ds_load2_begin_op); 552 ACPI_FUNCTION_TRACE(ds_load2_begin_op);
551 553
@@ -669,6 +671,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
669 * one of the opcodes that actually opens a scope 671 * one of the opcodes that actually opens a scope
670 */ 672 */
671 switch (node->type) { 673 switch (node->type) {
674 case ACPI_TYPE_ANY:
672 case ACPI_TYPE_LOCAL_SCOPE: /* Scope */ 675 case ACPI_TYPE_LOCAL_SCOPE: /* Scope */
673 case ACPI_TYPE_DEVICE: 676 case ACPI_TYPE_DEVICE:
674 case ACPI_TYPE_POWER: 677 case ACPI_TYPE_POWER:
@@ -750,12 +753,20 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
750 break; 753 break;
751 } 754 }
752 755
753 /* Add new entry into namespace */ 756 flags = ACPI_NS_NO_UPSEARCH;
757 if (walk_state->pass_number == ACPI_IMODE_EXECUTE) {
758
759 /* Execution mode, node cannot already exist, node is temporary */
760
761 flags |= (ACPI_NS_ERROR_IF_FOUND | ACPI_NS_TEMPORARY);
762 }
763
764 /* Add new entry or lookup existing entry */
754 765
755 status = 766 status =
756 acpi_ns_lookup(walk_state->scope_info, buffer_ptr, 767 acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
757 object_type, ACPI_IMODE_LOAD_PASS2, 768 object_type, ACPI_IMODE_LOAD_PASS2, flags,
758 ACPI_NS_NO_UPSEARCH, walk_state, &(node)); 769 walk_state, &node);
759 break; 770 break;
760 } 771 }
761 772
diff --git a/drivers/acpi/dispatcher/dswscope.c b/drivers/acpi/dispatcher/dswscope.c
index c9228972f5f6..3927c495e4bf 100644
--- a/drivers/acpi/dispatcher/dswscope.c
+++ b/drivers/acpi/dispatcher/dswscope.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/dispatcher/dswstate.c b/drivers/acpi/dispatcher/dswstate.c
index 7817e5522679..16c8e38b51ef 100644
--- a/drivers/acpi/dispatcher/dswstate.c
+++ b/drivers/acpi/dispatcher/dswstate.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 90990a4b6526..688e83a16906 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -615,20 +615,28 @@ static acpi_status
615find_dock_devices(acpi_handle handle, u32 lvl, void *context, void **rv) 615find_dock_devices(acpi_handle handle, u32 lvl, void *context, void **rv)
616{ 616{
617 acpi_status status; 617 acpi_status status;
618 acpi_handle tmp; 618 acpi_handle tmp, parent;
619 struct dock_station *ds = context; 619 struct dock_station *ds = context;
620 struct dock_dependent_device *dd; 620 struct dock_dependent_device *dd;
621 621
622 status = acpi_bus_get_ejd(handle, &tmp); 622 status = acpi_bus_get_ejd(handle, &tmp);
623 if (ACPI_FAILURE(status)) 623 if (ACPI_FAILURE(status)) {
624 return AE_OK; 624 /* try the parent device as well */
625 status = acpi_get_parent(handle, &parent);
626 if (ACPI_FAILURE(status))
627 goto fdd_out;
628 /* see if parent is dependent on dock */
629 status = acpi_bus_get_ejd(parent, &tmp);
630 if (ACPI_FAILURE(status))
631 goto fdd_out;
632 }
625 633
626 if (tmp == ds->handle) { 634 if (tmp == ds->handle) {
627 dd = alloc_dock_dependent_device(handle); 635 dd = alloc_dock_dependent_device(handle);
628 if (dd) 636 if (dd)
629 add_dock_dependent_device(ds, dd); 637 add_dock_dependent_device(ds, dd);
630 } 638 }
631 639fdd_out:
632 return AE_OK; 640 return AE_OK;
633} 641}
634 642
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index cbdf031f3c09..743ce27fa0bb 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -872,9 +872,8 @@ static int __init acpi_ec_get_real_ecdt(void)
872 acpi_status status; 872 acpi_status status;
873 struct acpi_table_ecdt *ecdt_ptr; 873 struct acpi_table_ecdt *ecdt_ptr;
874 874
875 status = acpi_get_firmware_table("ECDT", 1, ACPI_LOGICAL_ADDRESSING, 875 status = acpi_get_table(ACPI_SIG_ECDT, 1,
876 (struct acpi_table_header **) 876 (struct acpi_table_header **)&ecdt_ptr);
877 &ecdt_ptr);
878 if (ACPI_FAILURE(status)) 877 if (ACPI_FAILURE(status))
879 return -ENODEV; 878 return -ENODEV;
880 879
@@ -891,14 +890,14 @@ static int __init acpi_ec_get_real_ecdt(void)
891 if (acpi_ec_mode == EC_INTR) { 890 if (acpi_ec_mode == EC_INTR) {
892 init_waitqueue_head(&ec_ecdt->wait); 891 init_waitqueue_head(&ec_ecdt->wait);
893 } 892 }
894 ec_ecdt->command_addr = ecdt_ptr->ec_control.address; 893 ec_ecdt->command_addr = ecdt_ptr->control.address;
895 ec_ecdt->data_addr = ecdt_ptr->ec_data.address; 894 ec_ecdt->data_addr = ecdt_ptr->data.address;
896 ec_ecdt->gpe = ecdt_ptr->gpe_bit; 895 ec_ecdt->gpe = ecdt_ptr->gpe;
897 /* use the GL just to be safe */ 896 /* use the GL just to be safe */
898 ec_ecdt->global_lock = TRUE; 897 ec_ecdt->global_lock = TRUE;
899 ec_ecdt->uid = ecdt_ptr->uid; 898 ec_ecdt->uid = ecdt_ptr->uid;
900 899
901 status = acpi_get_handle(NULL, ecdt_ptr->ec_id, &ec_ecdt->handle); 900 status = acpi_get_handle(NULL, ecdt_ptr->id, &ec_ecdt->handle);
902 if (ACPI_FAILURE(status)) { 901 if (ACPI_FAILURE(status)) {
903 goto error; 902 goto error;
904 } 903 }
diff --git a/drivers/acpi/events/evevent.c b/drivers/acpi/events/evevent.c
index 919037d6acff..a1f87b5def2a 100644
--- a/drivers/acpi/events/evevent.c
+++ b/drivers/acpi/events/evevent.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -70,13 +70,6 @@ acpi_status acpi_ev_initialize_events(void)
70 70
71 ACPI_FUNCTION_TRACE(ev_initialize_events); 71 ACPI_FUNCTION_TRACE(ev_initialize_events);
72 72
73 /* Make sure we have ACPI tables */
74
75 if (!acpi_gbl_DSDT) {
76 ACPI_WARNING((AE_INFO, "No ACPI tables present!"));
77 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
78 }
79
80 /* 73 /*
81 * Initialize the Fixed and General Purpose Events. This is done prior to 74 * Initialize the Fixed and General Purpose Events. This is done prior to
82 * enabling SCIs to prevent interrupts from occurring before the handlers are 75 * enabling SCIs to prevent interrupts from occurring before the handlers are
@@ -211,8 +204,7 @@ static acpi_status acpi_ev_fixed_event_initialize(void)
211 if (acpi_gbl_fixed_event_info[i].enable_register_id != 0xFF) { 204 if (acpi_gbl_fixed_event_info[i].enable_register_id != 0xFF) {
212 status = 205 status =
213 acpi_set_register(acpi_gbl_fixed_event_info[i]. 206 acpi_set_register(acpi_gbl_fixed_event_info[i].
214 enable_register_id, 0, 207 enable_register_id, 0);
215 ACPI_MTX_LOCK);
216 if (ACPI_FAILURE(status)) { 208 if (ACPI_FAILURE(status)) {
217 return (status); 209 return (status);
218 } 210 }
@@ -298,7 +290,7 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
298 /* Clear the status bit */ 290 /* Clear the status bit */
299 291
300 (void)acpi_set_register(acpi_gbl_fixed_event_info[event]. 292 (void)acpi_set_register(acpi_gbl_fixed_event_info[event].
301 status_register_id, 1, ACPI_MTX_DO_NOT_LOCK); 293 status_register_id, 1);
302 294
303 /* 295 /*
304 * Make sure we've got a handler. If not, report an error. 296 * Make sure we've got a handler. If not, report an error.
@@ -306,8 +298,7 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
306 */ 298 */
307 if (NULL == acpi_gbl_fixed_event_handlers[event].handler) { 299 if (NULL == acpi_gbl_fixed_event_handlers[event].handler) {
308 (void)acpi_set_register(acpi_gbl_fixed_event_info[event]. 300 (void)acpi_set_register(acpi_gbl_fixed_event_info[event].
309 enable_register_id, 0, 301 enable_register_id, 0);
310 ACPI_MTX_DO_NOT_LOCK);
311 302
312 ACPI_ERROR((AE_INFO, 303 ACPI_ERROR((AE_INFO,
313 "No installed handler for fixed event [%08X]", 304 "No installed handler for fixed event [%08X]",
diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c
index c76c0583ca6a..dfac3ecc596e 100644
--- a/drivers/acpi/events/evgpe.c
+++ b/drivers/acpi/events/evgpe.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -121,7 +121,9 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
121 if (!gpe_register_info) { 121 if (!gpe_register_info) {
122 return_ACPI_STATUS(AE_NOT_EXIST); 122 return_ACPI_STATUS(AE_NOT_EXIST);
123 } 123 }
124 register_bit = gpe_event_info->register_bit; 124 register_bit = (u8)
125 (1 <<
126 (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
125 127
126 /* 1) Disable case. Simply clear all enable bits */ 128 /* 1) Disable case. Simply clear all enable bits */
127 129
@@ -458,8 +460,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
458 460
459 /* Examine one GPE bit */ 461 /* Examine one GPE bit */
460 462
461 if (enabled_status_byte & 463 if (enabled_status_byte & (1 << j)) {
462 acpi_gbl_decode_to8bit[j]) {
463 /* 464 /*
464 * Found an active GPE. Dispatch the event to a handler 465 * Found an active GPE. Dispatch the event to a handler
465 * or method. 466 * or method.
@@ -570,7 +571,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
570 571
571 if (ACPI_FAILURE(status)) { 572 if (ACPI_FAILURE(status)) {
572 ACPI_EXCEPTION((AE_INFO, status, 573 ACPI_EXCEPTION((AE_INFO, status,
573 "While evaluating GPE method [%4.4s]", 574 "while evaluating GPE method [%4.4s]",
574 acpi_ut_get_node_name 575 acpi_ut_get_node_name
575 (local_gpe_event_info.dispatch. 576 (local_gpe_event_info.dispatch.
576 method_node))); 577 method_node)));
@@ -618,6 +619,8 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
618 619
619 ACPI_FUNCTION_TRACE(ev_gpe_dispatch); 620 ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
620 621
622 acpi_gpe_count++;
623
621 /* 624 /*
622 * If edge-triggered, clear the GPE status bit now. Note that 625 * If edge-triggered, clear the GPE status bit now. Note that
623 * level-triggered events are cleared after the GPE is serviced. 626 * level-triggered events are cleared after the GPE is serviced.
@@ -633,20 +636,23 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
633 } 636 }
634 } 637 }
635 638
636 /* Save current system state */ 639 if (!acpi_gbl_system_awake_and_running) {
637 640 /*
638 if (acpi_gbl_system_awake_and_running) { 641 * We just woke up because of a wake GPE. Disable any further GPEs
639 ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_SYSTEM_RUNNING); 642 * until we are fully up and running (Only wake GPEs should be enabled
640 } else { 643 * at this time, but we just brute-force disable them all.)
641 ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_SYSTEM_RUNNING); 644 * 1) We must disable this particular wake GPE so it won't fire again
645 * 2) We want to disable all wake GPEs, since we are now awake
646 */
647 (void)acpi_hw_disable_all_gpes();
642 } 648 }
643 649
644 /* 650 /*
645 * Dispatch the GPE to either an installed handler, or the control 651 * Dispatch the GPE to either an installed handler, or the control method
646 * method associated with this GPE (_Lxx or _Exx). 652 * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke
647 * If a handler exists, we invoke it and do not attempt to run the method. 653 * it and do not attempt to run the method. If there is neither a handler
648 * If there is neither a handler nor a method, we disable the level to 654 * nor a method, we disable this GPE to prevent further such pointless
649 * prevent further events from coming in here. 655 * events from firing.
650 */ 656 */
651 switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { 657 switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
652 case ACPI_GPE_DISPATCH_HANDLER: 658 case ACPI_GPE_DISPATCH_HANDLER:
@@ -677,8 +683,8 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
677 case ACPI_GPE_DISPATCH_METHOD: 683 case ACPI_GPE_DISPATCH_METHOD:
678 684
679 /* 685 /*
680 * Disable GPE, so it doesn't keep firing before the method has a 686 * Disable the GPE, so it doesn't keep firing before the method has a
681 * chance to run. 687 * chance to run (it runs asynchronously with interrupts enabled).
682 */ 688 */
683 status = acpi_ev_disable_gpe(gpe_event_info); 689 status = acpi_ev_disable_gpe(gpe_event_info);
684 if (ACPI_FAILURE(status)) { 690 if (ACPI_FAILURE(status)) {
@@ -711,7 +717,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
711 gpe_number)); 717 gpe_number));
712 718
713 /* 719 /*
714 * Disable the GPE. The GPE will remain disabled until the ACPI 720 * Disable the GPE. The GPE will remain disabled until the ACPI
715 * Core Subsystem is restarted, or a handler is installed. 721 * Core Subsystem is restarted, or a handler is installed.
716 */ 722 */
717 status = acpi_ev_disable_gpe(gpe_event_info); 723 status = acpi_ev_disable_gpe(gpe_event_info);
@@ -726,50 +732,3 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
726 732
727 return_UINT32(ACPI_INTERRUPT_HANDLED); 733 return_UINT32(ACPI_INTERRUPT_HANDLED);
728} 734}
729
730#ifdef ACPI_GPE_NOTIFY_CHECK
731/*******************************************************************************
732 * TBD: NOT USED, PROTOTYPE ONLY AND WILL PROBABLY BE REMOVED
733 *
734 * FUNCTION: acpi_ev_check_for_wake_only_gpe
735 *
736 * PARAMETERS: gpe_event_info - info for this GPE
737 *
738 * RETURN: Status
739 *
740 * DESCRIPTION: Determine if a a GPE is "wake-only".
741 *
742 * Called from Notify() code in interpreter when a "DeviceWake"
743 * Notify comes in.
744 *
745 ******************************************************************************/
746
747acpi_status
748acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info)
749{
750 acpi_status status;
751
752 ACPI_FUNCTION_TRACE(ev_check_for_wake_only_gpe);
753
754 if ((gpe_event_info) && /* Only >0 for _Lxx/_Exx */
755 ((gpe_event_info->flags & ACPI_GPE_SYSTEM_MASK) == ACPI_GPE_SYSTEM_RUNNING)) { /* System state at GPE time */
756 /* This must be a wake-only GPE, disable it */
757
758 status = acpi_ev_disable_gpe(gpe_event_info);
759
760 /* Set GPE to wake-only. Do not change wake disabled/enabled status */
761
762 acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE);
763
764 ACPI_INFO((AE_INFO,
765 "GPE %p was updated from wake/run to wake-only",
766 gpe_event_info));
767
768 /* This was a wake-only GPE */
769
770 return_ACPI_STATUS(AE_WAKE_ONLY_GPE);
771 }
772
773 return_ACPI_STATUS(AE_OK);
774}
775#endif
diff --git a/drivers/acpi/events/evgpeblk.c b/drivers/acpi/events/evgpeblk.c
index 95ddeb48bc0f..ad5bc76edf46 100644
--- a/drivers/acpi/events/evgpeblk.c
+++ b/drivers/acpi/events/evgpeblk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -529,7 +529,7 @@ static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
529 529
530 /* Install new interrupt handler if not SCI_INT */ 530 /* Install new interrupt handler if not SCI_INT */
531 531
532 if (interrupt_number != acpi_gbl_FADT->sci_int) { 532 if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
533 status = acpi_os_install_interrupt_handler(interrupt_number, 533 status = acpi_os_install_interrupt_handler(interrupt_number,
534 acpi_ev_gpe_xrupt_handler, 534 acpi_ev_gpe_xrupt_handler,
535 gpe_xrupt); 535 gpe_xrupt);
@@ -567,7 +567,7 @@ acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
567 567
568 /* We never want to remove the SCI interrupt handler */ 568 /* We never want to remove the SCI interrupt handler */
569 569
570 if (gpe_xrupt->interrupt_number == acpi_gbl_FADT->sci_int) { 570 if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
571 gpe_xrupt->gpe_block_list_head = NULL; 571 gpe_xrupt->gpe_block_list_head = NULL;
572 return_ACPI_STATUS(AE_OK); 572 return_ACPI_STATUS(AE_OK);
573 } 573 }
@@ -796,30 +796,31 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
796 (u8) (gpe_block->block_base_number + 796 (u8) (gpe_block->block_base_number +
797 (i * ACPI_GPE_REGISTER_WIDTH)); 797 (i * ACPI_GPE_REGISTER_WIDTH));
798 798
799 ACPI_STORE_ADDRESS(this_register->status_address.address, 799 this_register->status_address.address =
800 (gpe_block->block_address.address + i)); 800 gpe_block->block_address.address + i;
801 801
802 ACPI_STORE_ADDRESS(this_register->enable_address.address, 802 this_register->enable_address.address =
803 (gpe_block->block_address.address 803 gpe_block->block_address.address + i +
804 + i + gpe_block->register_count)); 804 gpe_block->register_count;
805 805
806 this_register->status_address.address_space_id = 806 this_register->status_address.space_id =
807 gpe_block->block_address.address_space_id; 807 gpe_block->block_address.space_id;
808 this_register->enable_address.address_space_id = 808 this_register->enable_address.space_id =
809 gpe_block->block_address.address_space_id; 809 gpe_block->block_address.space_id;
810 this_register->status_address.register_bit_width = 810 this_register->status_address.bit_width =
811 ACPI_GPE_REGISTER_WIDTH; 811 ACPI_GPE_REGISTER_WIDTH;
812 this_register->enable_address.register_bit_width = 812 this_register->enable_address.bit_width =
813 ACPI_GPE_REGISTER_WIDTH; 813 ACPI_GPE_REGISTER_WIDTH;
814 this_register->status_address.register_bit_offset = 814 this_register->status_address.bit_offset =
815 ACPI_GPE_REGISTER_WIDTH; 815 ACPI_GPE_REGISTER_WIDTH;
816 this_register->enable_address.register_bit_offset = 816 this_register->enable_address.bit_offset =
817 ACPI_GPE_REGISTER_WIDTH; 817 ACPI_GPE_REGISTER_WIDTH;
818 818
819 /* Init the event_info for each GPE within this register */ 819 /* Init the event_info for each GPE within this register */
820 820
821 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { 821 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
822 this_event->register_bit = acpi_gbl_decode_to8bit[j]; 822 this_event->gpe_number =
823 (u8) (this_register->base_gpe_number + j);
823 this_event->register_info = this_register; 824 this_event->register_info = this_register;
824 this_event++; 825 this_event++;
825 } 826 }
@@ -1109,11 +1110,12 @@ acpi_status acpi_ev_gpe_initialize(void)
1109 * If EITHER the register length OR the block address are zero, then that 1110 * If EITHER the register length OR the block address are zero, then that
1110 * particular block is not supported. 1111 * particular block is not supported.
1111 */ 1112 */
1112 if (acpi_gbl_FADT->gpe0_blk_len && acpi_gbl_FADT->xgpe0_blk.address) { 1113 if (acpi_gbl_FADT.gpe0_block_length &&
1114 acpi_gbl_FADT.xgpe0_block.address) {
1113 1115
1114 /* GPE block 0 exists (has both length and address > 0) */ 1116 /* GPE block 0 exists (has both length and address > 0) */
1115 1117
1116 register_count0 = (u16) (acpi_gbl_FADT->gpe0_blk_len / 2); 1118 register_count0 = (u16) (acpi_gbl_FADT.gpe0_block_length / 2);
1117 1119
1118 gpe_number_max = 1120 gpe_number_max =
1119 (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1; 1121 (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
@@ -1121,9 +1123,9 @@ acpi_status acpi_ev_gpe_initialize(void)
1121 /* Install GPE Block 0 */ 1123 /* Install GPE Block 0 */
1122 1124
1123 status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device, 1125 status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
1124 &acpi_gbl_FADT->xgpe0_blk, 1126 &acpi_gbl_FADT.xgpe0_block,
1125 register_count0, 0, 1127 register_count0, 0,
1126 acpi_gbl_FADT->sci_int, 1128 acpi_gbl_FADT.sci_interrupt,
1127 &acpi_gbl_gpe_fadt_blocks[0]); 1129 &acpi_gbl_gpe_fadt_blocks[0]);
1128 1130
1129 if (ACPI_FAILURE(status)) { 1131 if (ACPI_FAILURE(status)) {
@@ -1132,20 +1134,21 @@ acpi_status acpi_ev_gpe_initialize(void)
1132 } 1134 }
1133 } 1135 }
1134 1136
1135 if (acpi_gbl_FADT->gpe1_blk_len && acpi_gbl_FADT->xgpe1_blk.address) { 1137 if (acpi_gbl_FADT.gpe1_block_length &&
1138 acpi_gbl_FADT.xgpe1_block.address) {
1136 1139
1137 /* GPE block 1 exists (has both length and address > 0) */ 1140 /* GPE block 1 exists (has both length and address > 0) */
1138 1141
1139 register_count1 = (u16) (acpi_gbl_FADT->gpe1_blk_len / 2); 1142 register_count1 = (u16) (acpi_gbl_FADT.gpe1_block_length / 2);
1140 1143
1141 /* Check for GPE0/GPE1 overlap (if both banks exist) */ 1144 /* Check for GPE0/GPE1 overlap (if both banks exist) */
1142 1145
1143 if ((register_count0) && 1146 if ((register_count0) &&
1144 (gpe_number_max >= acpi_gbl_FADT->gpe1_base)) { 1147 (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
1145 ACPI_ERROR((AE_INFO, 1148 ACPI_ERROR((AE_INFO,
1146 "GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1", 1149 "GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1",
1147 gpe_number_max, acpi_gbl_FADT->gpe1_base, 1150 gpe_number_max, acpi_gbl_FADT.gpe1_base,
1148 acpi_gbl_FADT->gpe1_base + 1151 acpi_gbl_FADT.gpe1_base +
1149 ((register_count1 * 1152 ((register_count1 *
1150 ACPI_GPE_REGISTER_WIDTH) - 1))); 1153 ACPI_GPE_REGISTER_WIDTH) - 1)));
1151 1154
@@ -1157,10 +1160,11 @@ acpi_status acpi_ev_gpe_initialize(void)
1157 1160
1158 status = 1161 status =
1159 acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device, 1162 acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
1160 &acpi_gbl_FADT->xgpe1_blk, 1163 &acpi_gbl_FADT.xgpe1_block,
1161 register_count1, 1164 register_count1,
1162 acpi_gbl_FADT->gpe1_base, 1165 acpi_gbl_FADT.gpe1_base,
1163 acpi_gbl_FADT->sci_int, 1166 acpi_gbl_FADT.
1167 sci_interrupt,
1164 &acpi_gbl_gpe_fadt_blocks 1168 &acpi_gbl_gpe_fadt_blocks
1165 [1]); 1169 [1]);
1166 1170
@@ -1173,7 +1177,7 @@ acpi_status acpi_ev_gpe_initialize(void)
1173 * GPE0 and GPE1 do not have to be contiguous in the GPE number 1177 * GPE0 and GPE1 do not have to be contiguous in the GPE number
1174 * space. However, GPE0 always starts at GPE number zero. 1178 * space. However, GPE0 always starts at GPE number zero.
1175 */ 1179 */
1176 gpe_number_max = acpi_gbl_FADT->gpe1_base + 1180 gpe_number_max = acpi_gbl_FADT.gpe1_base +
1177 ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1); 1181 ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
1178 } 1182 }
1179 } 1183 }
diff --git a/drivers/acpi/events/evmisc.c b/drivers/acpi/events/evmisc.c
index bf63edc6608d..1b784ffe54c3 100644
--- a/drivers/acpi/events/evmisc.c
+++ b/drivers/acpi/events/evmisc.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -63,14 +63,18 @@ static const char *acpi_notify_value_names[] = {
63}; 63};
64#endif 64#endif
65 65
66/* Pointer to FACS needed for the Global Lock */
67
68static struct acpi_table_facs *facs = NULL;
69
66/* Local prototypes */ 70/* Local prototypes */
67 71
68static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context); 72static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context);
69 73
70static void ACPI_SYSTEM_XFACE acpi_ev_global_lock_thread(void *context);
71
72static u32 acpi_ev_global_lock_handler(void *context); 74static u32 acpi_ev_global_lock_handler(void *context);
73 75
76static acpi_status acpi_ev_remove_global_lock_handler(void);
77
74/******************************************************************************* 78/*******************************************************************************
75 * 79 *
76 * FUNCTION: acpi_ev_is_notify_object 80 * FUNCTION: acpi_ev_is_notify_object
@@ -282,49 +286,19 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
282 286
283/******************************************************************************* 287/*******************************************************************************
284 * 288 *
285 * FUNCTION: acpi_ev_global_lock_thread
286 *
287 * PARAMETERS: Context - From thread interface, not used
288 *
289 * RETURN: None
290 *
291 * DESCRIPTION: Invoked by SCI interrupt handler upon acquisition of the
292 * Global Lock. Simply signal all threads that are waiting
293 * for the lock.
294 *
295 ******************************************************************************/
296
297static void ACPI_SYSTEM_XFACE acpi_ev_global_lock_thread(void *context)
298{
299 acpi_status status;
300
301 /* Signal threads that are waiting for the lock */
302
303 if (acpi_gbl_global_lock_thread_count) {
304
305 /* Send sufficient units to the semaphore */
306
307 status =
308 acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore,
309 acpi_gbl_global_lock_thread_count);
310 if (ACPI_FAILURE(status)) {
311 ACPI_ERROR((AE_INFO,
312 "Could not signal Global Lock semaphore"));
313 }
314 }
315}
316
317/*******************************************************************************
318 *
319 * FUNCTION: acpi_ev_global_lock_handler 289 * FUNCTION: acpi_ev_global_lock_handler
320 * 290 *
321 * PARAMETERS: Context - From thread interface, not used 291 * PARAMETERS: Context - From thread interface, not used
322 * 292 *
323 * RETURN: ACPI_INTERRUPT_HANDLED or ACPI_INTERRUPT_NOT_HANDLED 293 * RETURN: ACPI_INTERRUPT_HANDLED
324 * 294 *
325 * DESCRIPTION: Invoked directly from the SCI handler when a global lock 295 * DESCRIPTION: Invoked directly from the SCI handler when a global lock
326 * release interrupt occurs. Grab the global lock and queue 296 * release interrupt occurs. Attempt to acquire the global lock,
327 * the global lock thread for execution 297 * if successful, signal the thread waiting for the lock.
298 *
299 * NOTE: Assumes that the semaphore can be signaled from interrupt level. If
300 * this is not possible for some reason, a separate thread will have to be
301 * scheduled to do this.
328 * 302 *
329 ******************************************************************************/ 303 ******************************************************************************/
330 304
@@ -333,16 +307,24 @@ static u32 acpi_ev_global_lock_handler(void *context)
333 u8 acquired = FALSE; 307 u8 acquired = FALSE;
334 308
335 /* 309 /*
336 * Attempt to get the lock 310 * Attempt to get the lock.
311 *
337 * If we don't get it now, it will be marked pending and we will 312 * If we don't get it now, it will be marked pending and we will
338 * take another interrupt when it becomes free. 313 * take another interrupt when it becomes free.
339 */ 314 */
340 ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_common_fACS.global_lock, acquired); 315 ACPI_ACQUIRE_GLOBAL_LOCK(facs, acquired);
341 if (acquired) { 316 if (acquired) {
342 317
343 /* Got the lock, now wake all threads waiting for it */ 318 /* Got the lock, now wake all threads waiting for it */
319
344 acpi_gbl_global_lock_acquired = TRUE; 320 acpi_gbl_global_lock_acquired = TRUE;
345 acpi_ev_global_lock_thread(context); 321 /* Send a unit to the semaphore */
322
323 if (ACPI_FAILURE(acpi_os_signal_semaphore(
324 acpi_gbl_global_lock_semaphore, 1))) {
325 ACPI_ERROR((AE_INFO,
326 "Could not signal Global Lock semaphore"));
327 }
346 } 328 }
347 329
348 return (ACPI_INTERRUPT_HANDLED); 330 return (ACPI_INTERRUPT_HANDLED);
@@ -366,6 +348,13 @@ acpi_status acpi_ev_init_global_lock_handler(void)
366 348
367 ACPI_FUNCTION_TRACE(ev_init_global_lock_handler); 349 ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
368 350
351 status =
352 acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
353 (struct acpi_table_header **)&facs);
354 if (ACPI_FAILURE(status)) {
355 return_ACPI_STATUS(status);
356 }
357
369 acpi_gbl_global_lock_present = TRUE; 358 acpi_gbl_global_lock_present = TRUE;
370 status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL, 359 status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
371 acpi_ev_global_lock_handler, 360 acpi_ev_global_lock_handler,
@@ -389,6 +378,31 @@ acpi_status acpi_ev_init_global_lock_handler(void)
389 return_ACPI_STATUS(status); 378 return_ACPI_STATUS(status);
390} 379}
391 380
381/*******************************************************************************
382 *
383 * FUNCTION: acpi_ev_remove_global_lock_handler
384 *
385 * PARAMETERS: None
386 *
387 * RETURN: Status
388 *
389 * DESCRIPTION: Remove the handler for the Global Lock
390 *
391 ******************************************************************************/
392
393static acpi_status acpi_ev_remove_global_lock_handler(void)
394{
395 acpi_status status;
396
397 ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler);
398
399 acpi_gbl_global_lock_present = FALSE;
400 status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL,
401 acpi_ev_global_lock_handler);
402
403 return_ACPI_STATUS(status);
404}
405
392/****************************************************************************** 406/******************************************************************************
393 * 407 *
394 * FUNCTION: acpi_ev_acquire_global_lock 408 * FUNCTION: acpi_ev_acquire_global_lock
@@ -399,6 +413,16 @@ acpi_status acpi_ev_init_global_lock_handler(void)
399 * 413 *
400 * DESCRIPTION: Attempt to gain ownership of the Global Lock. 414 * DESCRIPTION: Attempt to gain ownership of the Global Lock.
401 * 415 *
416 * MUTEX: Interpreter must be locked
417 *
418 * Note: The original implementation allowed multiple threads to "acquire" the
419 * Global Lock, and the OS would hold the lock until the last thread had
420 * released it. However, this could potentially starve the BIOS out of the
421 * lock, especially in the case where there is a tight handshake between the
422 * Embedded Controller driver and the BIOS. Therefore, this implementation
423 * allows only one thread to acquire the HW Global Lock at a time, and makes
424 * the global lock appear as a standard mutex on the OS side.
425 *
402 *****************************************************************************/ 426 *****************************************************************************/
403 427
404acpi_status acpi_ev_acquire_global_lock(u16 timeout) 428acpi_status acpi_ev_acquire_global_lock(u16 timeout)
@@ -408,53 +432,51 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
408 432
409 ACPI_FUNCTION_TRACE(ev_acquire_global_lock); 433 ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
410 434
411#ifndef ACPI_APPLICATION 435 /*
412 /* Make sure that we actually have a global lock */ 436 * Only one thread can acquire the GL at a time, the global_lock_mutex
413 437 * enforces this. This interface releases the interpreter if we must wait.
414 if (!acpi_gbl_global_lock_present) { 438 */
415 return_ACPI_STATUS(AE_NO_GLOBAL_LOCK); 439 status = acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex, timeout);
440 if (ACPI_FAILURE(status)) {
441 return_ACPI_STATUS(status);
416 } 442 }
417#endif
418
419 /* One more thread wants the global lock */
420
421 acpi_gbl_global_lock_thread_count++;
422 443
423 /* 444 /*
424 * If we (OS side vs. BIOS side) have the hardware lock already, 445 * Make sure that a global lock actually exists. If not, just treat
425 * we are done 446 * the lock as a standard mutex.
426 */ 447 */
427 if (acpi_gbl_global_lock_acquired) { 448 if (!acpi_gbl_global_lock_present) {
449 acpi_gbl_global_lock_acquired = TRUE;
428 return_ACPI_STATUS(AE_OK); 450 return_ACPI_STATUS(AE_OK);
429 } 451 }
430 452
431 /* We must acquire the actual hardware lock */ 453 /* Attempt to acquire the actual hardware lock */
432 454
433 ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_common_fACS.global_lock, acquired); 455 ACPI_ACQUIRE_GLOBAL_LOCK(facs, acquired);
434 if (acquired) { 456 if (acquired) {
435 457
436 /* We got the lock */ 458 /* We got the lock */
437 459
438 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 460 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
439 "Acquired the HW Global Lock\n")); 461 "Acquired hardware Global Lock\n"));
440 462
441 acpi_gbl_global_lock_acquired = TRUE; 463 acpi_gbl_global_lock_acquired = TRUE;
442 return_ACPI_STATUS(AE_OK); 464 return_ACPI_STATUS(AE_OK);
443 } 465 }
444 466
445 /* 467 /*
446 * Did not get the lock. The pending bit was set above, and we must now 468 * Did not get the lock. The pending bit was set above, and we must now
447 * wait until we get the global lock released interrupt. 469 * wait until we get the global lock released interrupt.
448 */ 470 */
449 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for the HW Global Lock\n")); 471 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for hardware Global Lock\n"));
450 472
451 /* 473 /*
452 * Acquire the global lock semaphore first. 474 * Wait for handshake with the global lock interrupt handler.
453 * Since this wait will block, we must release the interpreter 475 * This interface releases the interpreter if we must wait.
454 */ 476 */
455 status = 477 status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore,
456 acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore, 478 ACPI_WAIT_FOREVER);
457 timeout); 479
458 return_ACPI_STATUS(status); 480 return_ACPI_STATUS(status);
459} 481}
460 482
@@ -477,38 +499,39 @@ acpi_status acpi_ev_release_global_lock(void)
477 499
478 ACPI_FUNCTION_TRACE(ev_release_global_lock); 500 ACPI_FUNCTION_TRACE(ev_release_global_lock);
479 501
480 if (!acpi_gbl_global_lock_thread_count) { 502 /* Lock must be already acquired */
503
504 if (!acpi_gbl_global_lock_acquired) {
481 ACPI_WARNING((AE_INFO, 505 ACPI_WARNING((AE_INFO,
482 "Cannot release HW Global Lock, it has not been acquired")); 506 "Cannot release the ACPI Global Lock, it has not been acquired"));
483 return_ACPI_STATUS(AE_NOT_ACQUIRED); 507 return_ACPI_STATUS(AE_NOT_ACQUIRED);
484 } 508 }
485 509
486 /* One fewer thread has the global lock */ 510 if (acpi_gbl_global_lock_present) {
487 511
488 acpi_gbl_global_lock_thread_count--; 512 /* Allow any thread to release the lock */
489 if (acpi_gbl_global_lock_thread_count) {
490 513
491 /* There are still some threads holding the lock, cannot release */ 514 ACPI_RELEASE_GLOBAL_LOCK(facs, pending);
492 515
493 return_ACPI_STATUS(AE_OK); 516 /*
517 * If the pending bit was set, we must write GBL_RLS to the control
518 * register
519 */
520 if (pending) {
521 status =
522 acpi_set_register(ACPI_BITREG_GLOBAL_LOCK_RELEASE,
523 1);
524 }
525
526 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
527 "Released hardware Global Lock\n"));
494 } 528 }
495 529
496 /*
497 * No more threads holding lock, we can do the actual hardware
498 * release
499 */
500 ACPI_RELEASE_GLOBAL_LOCK(acpi_gbl_common_fACS.global_lock, pending);
501 acpi_gbl_global_lock_acquired = FALSE; 530 acpi_gbl_global_lock_acquired = FALSE;
502 531
503 /* 532 /* Release the local GL mutex */
504 * If the pending bit was set, we must write GBL_RLS to the control
505 * register
506 */
507 if (pending) {
508 status = acpi_set_register(ACPI_BITREG_GLOBAL_LOCK_RELEASE,
509 1, ACPI_MTX_LOCK);
510 }
511 533
534 acpi_os_release_mutex(acpi_gbl_global_lock_mutex);
512 return_ACPI_STATUS(status); 535 return_ACPI_STATUS(status);
513} 536}
514 537
@@ -558,6 +581,12 @@ void acpi_ev_terminate(void)
558 if (ACPI_FAILURE(status)) { 581 if (ACPI_FAILURE(status)) {
559 ACPI_ERROR((AE_INFO, "Could not remove SCI handler")); 582 ACPI_ERROR((AE_INFO, "Could not remove SCI handler"));
560 } 583 }
584
585 status = acpi_ev_remove_global_lock_handler();
586 if (ACPI_FAILURE(status)) {
587 ACPI_ERROR((AE_INFO,
588 "Could not remove Global Lock handler"));
589 }
561 } 590 }
562 591
563 /* Deallocate all handler objects installed within GPE info structs */ 592 /* Deallocate all handler objects installed within GPE info structs */
diff --git a/drivers/acpi/events/evregion.c b/drivers/acpi/events/evregion.c
index 21caae04fe85..e99f0c435a47 100644
--- a/drivers/acpi/events/evregion.c
+++ b/drivers/acpi/events/evregion.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -291,7 +291,6 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
291 u32 bit_width, acpi_integer * value) 291 u32 bit_width, acpi_integer * value)
292{ 292{
293 acpi_status status; 293 acpi_status status;
294 acpi_status status2;
295 acpi_adr_space_handler handler; 294 acpi_adr_space_handler handler;
296 acpi_adr_space_setup region_setup; 295 acpi_adr_space_setup region_setup;
297 union acpi_operand_object *handler_desc; 296 union acpi_operand_object *handler_desc;
@@ -345,7 +344,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
345 * setup will potentially execute control methods 344 * setup will potentially execute control methods
346 * (e.g., _REG method for this region) 345 * (e.g., _REG method for this region)
347 */ 346 */
348 acpi_ex_exit_interpreter(); 347 acpi_ex_relinquish_interpreter();
349 348
350 status = region_setup(region_obj, ACPI_REGION_ACTIVATE, 349 status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
351 handler_desc->address_space.context, 350 handler_desc->address_space.context,
@@ -353,10 +352,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
353 352
354 /* Re-enter the interpreter */ 353 /* Re-enter the interpreter */
355 354
356 status2 = acpi_ex_enter_interpreter(); 355 acpi_ex_reacquire_interpreter();
357 if (ACPI_FAILURE(status2)) {
358 return_ACPI_STATUS(status2);
359 }
360 356
361 /* Check for failure of the Region Setup */ 357 /* Check for failure of the Region Setup */
362 358
@@ -409,7 +405,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
409 * exit the interpreter because the handler *might* block -- we don't 405 * exit the interpreter because the handler *might* block -- we don't
410 * know what it will do, so we can't hold the lock on the intepreter. 406 * know what it will do, so we can't hold the lock on the intepreter.
411 */ 407 */
412 acpi_ex_exit_interpreter(); 408 acpi_ex_relinquish_interpreter();
413 } 409 }
414 410
415 /* Call the handler */ 411 /* Call the handler */
@@ -430,10 +426,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
430 * We just returned from a non-default handler, we must re-enter the 426 * We just returned from a non-default handler, we must re-enter the
431 * interpreter 427 * interpreter
432 */ 428 */
433 status2 = acpi_ex_enter_interpreter(); 429 acpi_ex_reacquire_interpreter();
434 if (ACPI_FAILURE(status2)) {
435 return_ACPI_STATUS(status2);
436 }
437 } 430 }
438 431
439 return_ACPI_STATUS(status); 432 return_ACPI_STATUS(status);
diff --git a/drivers/acpi/events/evrgnini.c b/drivers/acpi/events/evrgnini.c
index 203d1359190a..a4fa7e6822a3 100644
--- a/drivers/acpi/events/evrgnini.c
+++ b/drivers/acpi/events/evrgnini.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -48,6 +48,11 @@
48#define _COMPONENT ACPI_EVENTS 48#define _COMPONENT ACPI_EVENTS
49ACPI_MODULE_NAME("evrgnini") 49ACPI_MODULE_NAME("evrgnini")
50 50
51/* Local prototypes */
52static u8 acpi_ev_match_pci_root_bridge(char *id);
53
54static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
55
51/******************************************************************************* 56/*******************************************************************************
52 * 57 *
53 * FUNCTION: acpi_ev_system_memory_region_setup 58 * FUNCTION: acpi_ev_system_memory_region_setup
@@ -62,6 +67,7 @@ ACPI_MODULE_NAME("evrgnini")
62 * DESCRIPTION: Setup a system_memory operation region 67 * DESCRIPTION: Setup a system_memory operation region
63 * 68 *
64 ******************************************************************************/ 69 ******************************************************************************/
70
65acpi_status 71acpi_status
66acpi_ev_system_memory_region_setup(acpi_handle handle, 72acpi_ev_system_memory_region_setup(acpi_handle handle,
67 u32 function, 73 u32 function,
@@ -168,9 +174,9 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
168 union acpi_operand_object *handler_obj; 174 union acpi_operand_object *handler_obj;
169 struct acpi_namespace_node *parent_node; 175 struct acpi_namespace_node *parent_node;
170 struct acpi_namespace_node *pci_root_node; 176 struct acpi_namespace_node *pci_root_node;
177 struct acpi_namespace_node *pci_device_node;
171 union acpi_operand_object *region_obj = 178 union acpi_operand_object *region_obj =
172 (union acpi_operand_object *)handle; 179 (union acpi_operand_object *)handle;
173 struct acpi_device_id object_hID;
174 180
175 ACPI_FUNCTION_TRACE(ev_pci_config_region_setup); 181 ACPI_FUNCTION_TRACE(ev_pci_config_region_setup);
176 182
@@ -215,45 +221,30 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
215 221
216 pci_root_node = parent_node; 222 pci_root_node = parent_node;
217 while (pci_root_node != acpi_gbl_root_node) { 223 while (pci_root_node != acpi_gbl_root_node) {
218 status =
219 acpi_ut_execute_HID(pci_root_node, &object_hID);
220 if (ACPI_SUCCESS(status)) {
221 /*
222 * Got a valid _HID string, check if this is a PCI root.
223 * New for ACPI 3.0: check for a PCI Express root also.
224 */
225 if (!
226 (ACPI_STRNCMP
227 (object_hID.value, PCI_ROOT_HID_STRING,
228 sizeof(PCI_ROOT_HID_STRING)))
229 ||
230 !(ACPI_STRNCMP
231 (object_hID.value,
232 PCI_EXPRESS_ROOT_HID_STRING,
233 sizeof(PCI_EXPRESS_ROOT_HID_STRING)))) {
234
235 /* Install a handler for this PCI root bridge */
236 224
237 status = 225 /* Get the _HID/_CID in order to detect a root_bridge */
238 acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL); 226
239 if (ACPI_FAILURE(status)) { 227 if (acpi_ev_is_pci_root_bridge(pci_root_node)) {
240 if (status == AE_SAME_HANDLER) { 228
241 /* 229 /* Install a handler for this PCI root bridge */
242 * It is OK if the handler is already installed on the root 230
243 * bridge. Still need to return a context object for the 231 status = acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
244 * new PCI_Config operation region, however. 232 if (ACPI_FAILURE(status)) {
245 */ 233 if (status == AE_SAME_HANDLER) {
246 status = AE_OK; 234 /*
247 } else { 235 * It is OK if the handler is already installed on the root
248 ACPI_EXCEPTION((AE_INFO, 236 * bridge. Still need to return a context object for the
249 status, 237 * new PCI_Config operation region, however.
250 "Could not install PciConfig handler for Root Bridge %4.4s", 238 */
251 acpi_ut_get_node_name 239 status = AE_OK;
252 (pci_root_node))); 240 } else {
253 } 241 ACPI_EXCEPTION((AE_INFO, status,
242 "Could not install PciConfig handler for Root Bridge %4.4s",
243 acpi_ut_get_node_name
244 (pci_root_node)));
254 } 245 }
255 break;
256 } 246 }
247 break;
257 } 248 }
258 249
259 pci_root_node = acpi_ns_get_parent_node(pci_root_node); 250 pci_root_node = acpi_ns_get_parent_node(pci_root_node);
@@ -282,14 +273,25 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
282 /* 273 /*
283 * For PCI_Config space access, we need the segment, bus, 274 * For PCI_Config space access, we need the segment, bus,
284 * device and function numbers. Acquire them here. 275 * device and function numbers. Acquire them here.
276 *
277 * Find the parent device object. (This allows the operation region to be
278 * within a subscope under the device, such as a control method.)
285 */ 279 */
280 pci_device_node = region_obj->region.node;
281 while (pci_device_node && (pci_device_node->type != ACPI_TYPE_DEVICE)) {
282 pci_device_node = acpi_ns_get_parent_node(pci_device_node);
283 }
284
285 if (!pci_device_node) {
286 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
287 }
286 288
287 /* 289 /*
288 * Get the PCI device and function numbers from the _ADR object 290 * Get the PCI device and function numbers from the _ADR object
289 * contained in the parent's scope. 291 * contained in the parent's scope.
290 */ 292 */
291 status = 293 status =
292 acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, parent_node, 294 acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, pci_device_node,
293 &pci_value); 295 &pci_value);
294 296
295 /* 297 /*
@@ -329,6 +331,91 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
329 331
330/******************************************************************************* 332/*******************************************************************************
331 * 333 *
334 * FUNCTION: acpi_ev_match_pci_root_bridge
335 *
336 * PARAMETERS: Id - The HID/CID in string format
337 *
338 * RETURN: TRUE if the Id is a match for a PCI/PCI-Express Root Bridge
339 *
340 * DESCRIPTION: Determine if the input ID is a PCI Root Bridge ID.
341 *
342 ******************************************************************************/
343
344static u8 acpi_ev_match_pci_root_bridge(char *id)
345{
346
347 /*
348 * Check if this is a PCI root.
349 * ACPI 3.0+: check for a PCI Express root also.
350 */
351 if (!(ACPI_STRNCMP(id,
352 PCI_ROOT_HID_STRING,
353 sizeof(PCI_ROOT_HID_STRING))) ||
354 !(ACPI_STRNCMP(id,
355 PCI_EXPRESS_ROOT_HID_STRING,
356 sizeof(PCI_EXPRESS_ROOT_HID_STRING)))) {
357 return (TRUE);
358 }
359
360 return (FALSE);
361}
362
363/*******************************************************************************
364 *
365 * FUNCTION: acpi_ev_is_pci_root_bridge
366 *
367 * PARAMETERS: Node - Device node being examined
368 *
369 * RETURN: TRUE if device is a PCI/PCI-Express Root Bridge
370 *
371 * DESCRIPTION: Determine if the input device represents a PCI Root Bridge by
372 * examining the _HID and _CID for the device.
373 *
374 ******************************************************************************/
375
376static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
377{
378 acpi_status status;
379 struct acpi_device_id hid;
380 struct acpi_compatible_id_list *cid;
381 acpi_native_uint i;
382
383 /*
384 * Get the _HID and check for a PCI Root Bridge
385 */
386 status = acpi_ut_execute_HID(node, &hid);
387 if (ACPI_FAILURE(status)) {
388 return (FALSE);
389 }
390
391 if (acpi_ev_match_pci_root_bridge(hid.value)) {
392 return (TRUE);
393 }
394
395 /*
396 * The _HID did not match.
397 * Get the _CID and check for a PCI Root Bridge
398 */
399 status = acpi_ut_execute_CID(node, &cid);
400 if (ACPI_FAILURE(status)) {
401 return (FALSE);
402 }
403
404 /* Check all _CIDs in the returned list */
405
406 for (i = 0; i < cid->count; i++) {
407 if (acpi_ev_match_pci_root_bridge(cid->id[i].value)) {
408 ACPI_FREE(cid);
409 return (TRUE);
410 }
411 }
412
413 ACPI_FREE(cid);
414 return (FALSE);
415}
416
417/*******************************************************************************
418 *
332 * FUNCTION: acpi_ev_pci_bar_region_setup 419 * FUNCTION: acpi_ev_pci_bar_region_setup
333 * 420 *
334 * PARAMETERS: Handle - Region we are interested in 421 * PARAMETERS: Handle - Region we are interested in
@@ -432,6 +519,9 @@ acpi_ev_default_region_setup(acpi_handle handle,
432 * a PCI address in the scope of the definition. This address is 519 * a PCI address in the scope of the definition. This address is
433 * required to perform an access to PCI config space. 520 * required to perform an access to PCI config space.
434 * 521 *
522 * MUTEX: Interpreter should be unlocked, because we may run the _REG
523 * method for this region.
524 *
435 ******************************************************************************/ 525 ******************************************************************************/
436 526
437acpi_status 527acpi_status
diff --git a/drivers/acpi/events/evsci.c b/drivers/acpi/events/evsci.c
index 8106215ad554..7e5d15ce2395 100644
--- a/drivers/acpi/events/evsci.c
+++ b/drivers/acpi/events/evsci.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -142,9 +142,10 @@ u32 acpi_ev_install_sci_handler(void)
142 142
143 ACPI_FUNCTION_TRACE(ev_install_sci_handler); 143 ACPI_FUNCTION_TRACE(ev_install_sci_handler);
144 144
145 status = acpi_os_install_interrupt_handler((u32) acpi_gbl_FADT->sci_int, 145 status =
146 acpi_ev_sci_xrupt_handler, 146 acpi_os_install_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
147 acpi_gbl_gpe_xrupt_list_head); 147 acpi_ev_sci_xrupt_handler,
148 acpi_gbl_gpe_xrupt_list_head);
148 return_ACPI_STATUS(status); 149 return_ACPI_STATUS(status);
149} 150}
150 151
@@ -175,8 +176,9 @@ acpi_status acpi_ev_remove_sci_handler(void)
175 176
176 /* Just let the OS remove the handler and disable the level */ 177 /* Just let the OS remove the handler and disable the level */
177 178
178 status = acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT->sci_int, 179 status =
179 acpi_ev_sci_xrupt_handler); 180 acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
181 acpi_ev_sci_xrupt_handler);
180 182
181 return_ACPI_STATUS(status); 183 return_ACPI_STATUS(status);
182} 184}
diff --git a/drivers/acpi/events/evxface.c b/drivers/acpi/events/evxface.c
index 923fd2b46955..685a103a3587 100644
--- a/drivers/acpi/events/evxface.c
+++ b/drivers/acpi/events/evxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -768,11 +768,9 @@ acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
768 return (AE_BAD_PARAMETER); 768 return (AE_BAD_PARAMETER);
769 } 769 }
770 770
771 status = acpi_ex_enter_interpreter(); 771 /* Must lock interpreter to prevent race conditions */
772 if (ACPI_FAILURE(status)) {
773 return (status);
774 }
775 772
773 acpi_ex_enter_interpreter();
776 status = acpi_ev_acquire_global_lock(timeout); 774 status = acpi_ev_acquire_global_lock(timeout);
777 acpi_ex_exit_interpreter(); 775 acpi_ex_exit_interpreter();
778 776
diff --git a/drivers/acpi/events/evxfevnt.c b/drivers/acpi/events/evxfevnt.c
index 7ebc2efac936..17065e98807c 100644
--- a/drivers/acpi/events/evxfevnt.c
+++ b/drivers/acpi/events/evxfevnt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -44,6 +44,7 @@
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acevents.h> 45#include <acpi/acevents.h>
46#include <acpi/acnamesp.h> 46#include <acpi/acnamesp.h>
47#include <acpi/actables.h>
47 48
48#define _COMPONENT ACPI_EVENTS 49#define _COMPONENT ACPI_EVENTS
49ACPI_MODULE_NAME("evxfevnt") 50ACPI_MODULE_NAME("evxfevnt")
@@ -65,13 +66,14 @@ acpi_status acpi_enable(void)
65 66
66 ACPI_FUNCTION_TRACE(acpi_enable); 67 ACPI_FUNCTION_TRACE(acpi_enable);
67 68
68 /* Make sure we have the FADT */ 69 /* ACPI tables must be present */
69 70
70 if (!acpi_gbl_FADT) { 71 if (!acpi_tb_tables_loaded()) {
71 ACPI_WARNING((AE_INFO, "No FADT information present!"));
72 return_ACPI_STATUS(AE_NO_ACPI_TABLES); 72 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
73 } 73 }
74 74
75 /* Check current mode */
76
75 if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) { 77 if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
76 ACPI_DEBUG_PRINT((ACPI_DB_INIT, 78 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
77 "System is already in ACPI mode\n")); 79 "System is already in ACPI mode\n"));
@@ -111,11 +113,6 @@ acpi_status acpi_disable(void)
111 113
112 ACPI_FUNCTION_TRACE(acpi_disable); 114 ACPI_FUNCTION_TRACE(acpi_disable);
113 115
114 if (!acpi_gbl_FADT) {
115 ACPI_WARNING((AE_INFO, "No FADT information present!"));
116 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
117 }
118
119 if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) { 116 if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) {
120 ACPI_DEBUG_PRINT((ACPI_DB_INIT, 117 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
121 "System is already in legacy (non-ACPI) mode\n")); 118 "System is already in legacy (non-ACPI) mode\n"));
@@ -169,7 +166,7 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
169 */ 166 */
170 status = 167 status =
171 acpi_set_register(acpi_gbl_fixed_event_info[event]. 168 acpi_set_register(acpi_gbl_fixed_event_info[event].
172 enable_register_id, 1, ACPI_MTX_LOCK); 169 enable_register_id, 1);
173 if (ACPI_FAILURE(status)) { 170 if (ACPI_FAILURE(status)) {
174 return_ACPI_STATUS(status); 171 return_ACPI_STATUS(status);
175 } 172 }
@@ -178,7 +175,7 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
178 175
179 status = 176 status =
180 acpi_get_register(acpi_gbl_fixed_event_info[event]. 177 acpi_get_register(acpi_gbl_fixed_event_info[event].
181 enable_register_id, &value, ACPI_MTX_LOCK); 178 enable_register_id, &value);
182 if (ACPI_FAILURE(status)) { 179 if (ACPI_FAILURE(status)) {
183 return_ACPI_STATUS(status); 180 return_ACPI_STATUS(status);
184 } 181 }
@@ -368,14 +365,14 @@ acpi_status acpi_disable_event(u32 event, u32 flags)
368 */ 365 */
369 status = 366 status =
370 acpi_set_register(acpi_gbl_fixed_event_info[event]. 367 acpi_set_register(acpi_gbl_fixed_event_info[event].
371 enable_register_id, 0, ACPI_MTX_LOCK); 368 enable_register_id, 0);
372 if (ACPI_FAILURE(status)) { 369 if (ACPI_FAILURE(status)) {
373 return_ACPI_STATUS(status); 370 return_ACPI_STATUS(status);
374 } 371 }
375 372
376 status = 373 status =
377 acpi_get_register(acpi_gbl_fixed_event_info[event]. 374 acpi_get_register(acpi_gbl_fixed_event_info[event].
378 enable_register_id, &value, ACPI_MTX_LOCK); 375 enable_register_id, &value);
379 if (ACPI_FAILURE(status)) { 376 if (ACPI_FAILURE(status)) {
380 return_ACPI_STATUS(status); 377 return_ACPI_STATUS(status);
381 } 378 }
@@ -421,7 +418,7 @@ acpi_status acpi_clear_event(u32 event)
421 */ 418 */
422 status = 419 status =
423 acpi_set_register(acpi_gbl_fixed_event_info[event]. 420 acpi_set_register(acpi_gbl_fixed_event_info[event].
424 status_register_id, 1, ACPI_MTX_LOCK); 421 status_register_id, 1);
425 422
426 return_ACPI_STATUS(status); 423 return_ACPI_STATUS(status);
427} 424}
@@ -510,7 +507,7 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
510 507
511 status = 508 status =
512 acpi_get_register(acpi_gbl_fixed_event_info[event]. 509 acpi_get_register(acpi_gbl_fixed_event_info[event].
513 status_register_id, event_status, ACPI_MTX_LOCK); 510 status_register_id, event_status);
514 511
515 return_ACPI_STATUS(status); 512 return_ACPI_STATUS(status);
516} 513}
diff --git a/drivers/acpi/events/evxfregn.c b/drivers/acpi/events/evxfregn.c
index 83b12a9afa32..7bf09c5fb242 100644
--- a/drivers/acpi/events/evxfregn.c
+++ b/drivers/acpi/events/evxfregn.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exconfig.c b/drivers/acpi/executer/exconfig.c
index c8341fa5fe01..25802f302ffe 100644
--- a/drivers/acpi/executer/exconfig.c
+++ b/drivers/acpi/executer/exconfig.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -54,7 +54,7 @@ ACPI_MODULE_NAME("exconfig")
54 54
55/* Local prototypes */ 55/* Local prototypes */
56static acpi_status 56static acpi_status
57acpi_ex_add_table(struct acpi_table_header *table, 57acpi_ex_add_table(acpi_native_uint table_index,
58 struct acpi_namespace_node *parent_node, 58 struct acpi_namespace_node *parent_node,
59 union acpi_operand_object **ddb_handle); 59 union acpi_operand_object **ddb_handle);
60 60
@@ -74,12 +74,11 @@ acpi_ex_add_table(struct acpi_table_header *table,
74 ******************************************************************************/ 74 ******************************************************************************/
75 75
76static acpi_status 76static acpi_status
77acpi_ex_add_table(struct acpi_table_header *table, 77acpi_ex_add_table(acpi_native_uint table_index,
78 struct acpi_namespace_node *parent_node, 78 struct acpi_namespace_node *parent_node,
79 union acpi_operand_object **ddb_handle) 79 union acpi_operand_object **ddb_handle)
80{ 80{
81 acpi_status status; 81 acpi_status status;
82 struct acpi_table_desc table_info;
83 union acpi_operand_object *obj_desc; 82 union acpi_operand_object *obj_desc;
84 83
85 ACPI_FUNCTION_TRACE(ex_add_table); 84 ACPI_FUNCTION_TRACE(ex_add_table);
@@ -98,42 +97,16 @@ acpi_ex_add_table(struct acpi_table_header *table,
98 97
99 /* Install the new table into the local data structures */ 98 /* Install the new table into the local data structures */
100 99
101 ACPI_MEMSET(&table_info, 0, sizeof(struct acpi_table_desc)); 100 obj_desc->reference.object = ACPI_CAST_PTR(void, table_index);
102
103 table_info.type = ACPI_TABLE_ID_SSDT;
104 table_info.pointer = table;
105 table_info.length = (acpi_size) table->length;
106 table_info.allocation = ACPI_MEM_ALLOCATED;
107
108 status = acpi_tb_install_table(&table_info);
109 obj_desc->reference.object = table_info.installed_desc;
110
111 if (ACPI_FAILURE(status)) {
112 if (status == AE_ALREADY_EXISTS) {
113
114 /* Table already exists, just return the handle */
115
116 return_ACPI_STATUS(AE_OK);
117 }
118 goto cleanup;
119 }
120 101
121 /* Add the table to the namespace */ 102 /* Add the table to the namespace */
122 103
123 status = acpi_ns_load_table(table_info.installed_desc, parent_node); 104 status = acpi_ns_load_table(table_index, parent_node);
124 if (ACPI_FAILURE(status)) { 105 if (ACPI_FAILURE(status)) {
125 106 acpi_ut_remove_reference(obj_desc);
126 /* Uninstall table on error */ 107 *ddb_handle = NULL;
127
128 (void)acpi_tb_uninstall_table(table_info.installed_desc);
129 goto cleanup;
130 } 108 }
131 109
132 return_ACPI_STATUS(AE_OK);
133
134 cleanup:
135 acpi_ut_remove_reference(obj_desc);
136 *ddb_handle = NULL;
137 return_ACPI_STATUS(status); 110 return_ACPI_STATUS(status);
138} 111}
139 112
@@ -146,7 +119,7 @@ acpi_ex_add_table(struct acpi_table_header *table,
146 * 119 *
147 * RETURN: Status 120 * RETURN: Status
148 * 121 *
149 * DESCRIPTION: Load an ACPI table 122 * DESCRIPTION: Load an ACPI table from the RSDT/XSDT
150 * 123 *
151 ******************************************************************************/ 124 ******************************************************************************/
152 125
@@ -156,33 +129,20 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
156{ 129{
157 acpi_status status; 130 acpi_status status;
158 union acpi_operand_object **operand = &walk_state->operands[0]; 131 union acpi_operand_object **operand = &walk_state->operands[0];
159 struct acpi_table_header *table; 132 acpi_native_uint table_index;
160 struct acpi_namespace_node *parent_node; 133 struct acpi_namespace_node *parent_node;
161 struct acpi_namespace_node *start_node; 134 struct acpi_namespace_node *start_node;
162 struct acpi_namespace_node *parameter_node = NULL; 135 struct acpi_namespace_node *parameter_node = NULL;
163 union acpi_operand_object *ddb_handle; 136 union acpi_operand_object *ddb_handle;
137 struct acpi_table_header *table;
164 138
165 ACPI_FUNCTION_TRACE(ex_load_table_op); 139 ACPI_FUNCTION_TRACE(ex_load_table_op);
166 140
167#if 0 141 /* Find the ACPI table in the RSDT/XSDT */
168 /*
169 * Make sure that the signature does not match one of the tables that
170 * is already loaded.
171 */
172 status = acpi_tb_match_signature(operand[0]->string.pointer, NULL);
173 if (status == AE_OK) {
174
175 /* Signature matched -- don't allow override */
176
177 return_ACPI_STATUS(AE_ALREADY_EXISTS);
178 }
179#endif
180
181 /* Find the ACPI table */
182 142
183 status = acpi_tb_find_table(operand[0]->string.pointer, 143 status = acpi_tb_find_table(operand[0]->string.pointer,
184 operand[1]->string.pointer, 144 operand[1]->string.pointer,
185 operand[2]->string.pointer, &table); 145 operand[2]->string.pointer, &table_index);
186 if (ACPI_FAILURE(status)) { 146 if (ACPI_FAILURE(status)) {
187 if (status != AE_NOT_FOUND) { 147 if (status != AE_NOT_FOUND) {
188 return_ACPI_STATUS(status); 148 return_ACPI_STATUS(status);
@@ -245,7 +205,7 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
245 205
246 /* Load the table into the namespace */ 206 /* Load the table into the namespace */
247 207
248 status = acpi_ex_add_table(table, parent_node, &ddb_handle); 208 status = acpi_ex_add_table(table_index, parent_node, &ddb_handle);
249 if (ACPI_FAILURE(status)) { 209 if (ACPI_FAILURE(status)) {
250 return_ACPI_STATUS(status); 210 return_ACPI_STATUS(status);
251 } 211 }
@@ -266,9 +226,13 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
266 } 226 }
267 } 227 }
268 228
269 ACPI_INFO((AE_INFO, 229 status = acpi_get_table_by_index(table_index, &table);
270 "Dynamic OEM Table Load - [%4.4s] OemId [%6.6s] OemTableId [%8.8s]", 230 if (ACPI_SUCCESS(status)) {
271 table->signature, table->oem_id, table->oem_table_id)); 231 ACPI_INFO((AE_INFO,
232 "Dynamic OEM Table Load - [%4.4s] OemId [%6.6s] OemTableId [%8.8s]",
233 table->signature, table->oem_id,
234 table->oem_table_id));
235 }
272 236
273 *return_desc = ddb_handle; 237 *return_desc = ddb_handle;
274 return_ACPI_STATUS(status); 238 return_ACPI_STATUS(status);
@@ -278,7 +242,7 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
278 * 242 *
279 * FUNCTION: acpi_ex_load_op 243 * FUNCTION: acpi_ex_load_op
280 * 244 *
281 * PARAMETERS: obj_desc - Region or Field where the table will be 245 * PARAMETERS: obj_desc - Region or Buffer/Field where the table will be
282 * obtained 246 * obtained
283 * Target - Where a handle to the table will be stored 247 * Target - Where a handle to the table will be stored
284 * walk_state - Current state 248 * walk_state - Current state
@@ -287,6 +251,12 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
287 * 251 *
288 * DESCRIPTION: Load an ACPI table from a field or operation region 252 * DESCRIPTION: Load an ACPI table from a field or operation region
289 * 253 *
254 * NOTE: Region Fields (Field, bank_field, index_fields) are resolved to buffer
255 * objects before this code is reached.
256 *
257 * If source is an operation region, it must refer to system_memory, as
258 * per the ACPI specification.
259 *
290 ******************************************************************************/ 260 ******************************************************************************/
291 261
292acpi_status 262acpi_status
@@ -294,22 +264,26 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
294 union acpi_operand_object *target, 264 union acpi_operand_object *target,
295 struct acpi_walk_state *walk_state) 265 struct acpi_walk_state *walk_state)
296{ 266{
297 acpi_status status;
298 union acpi_operand_object *ddb_handle; 267 union acpi_operand_object *ddb_handle;
299 union acpi_operand_object *buffer_desc = NULL; 268 struct acpi_table_desc table_desc;
300 struct acpi_table_header *table_ptr = NULL; 269 acpi_native_uint table_index;
301 acpi_physical_address address; 270 acpi_status status;
302 struct acpi_table_header table_header;
303 acpi_integer temp;
304 u32 i;
305 271
306 ACPI_FUNCTION_TRACE(ex_load_op); 272 ACPI_FUNCTION_TRACE(ex_load_op);
307 273
308 /* Object can be either an op_region or a Field */ 274 ACPI_MEMSET(&table_desc, 0, sizeof(struct acpi_table_desc));
275
276 /* Source Object can be either an op_region or a Buffer/Field */
309 277
310 switch (ACPI_GET_OBJECT_TYPE(obj_desc)) { 278 switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
311 case ACPI_TYPE_REGION: 279 case ACPI_TYPE_REGION:
312 280
281 /* Region must be system_memory (from ACPI spec) */
282
283 if (obj_desc->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
284 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
285 }
286
313 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Load from Region %p %s\n", 287 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Load from Region %p %s\n",
314 obj_desc, 288 obj_desc,
315 acpi_ut_get_object_type_name(obj_desc))); 289 acpi_ut_get_object_type_name(obj_desc)));
@@ -325,113 +299,41 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
325 } 299 }
326 } 300 }
327 301
328 /* Get the base physical address of the region */ 302 table_desc.address = obj_desc->region.address;
329 303 table_desc.length = obj_desc->region.length;
330 address = obj_desc->region.address; 304 table_desc.flags = ACPI_TABLE_ORIGIN_MAPPED;
331
332 /* Get part of the table header to get the table length */
333
334 table_header.length = 0;
335 for (i = 0; i < 8; i++) {
336 status =
337 acpi_ev_address_space_dispatch(obj_desc, ACPI_READ,
338 (acpi_physical_address)
339 (i + address), 8,
340 &temp);
341 if (ACPI_FAILURE(status)) {
342 return_ACPI_STATUS(status);
343 }
344
345 /* Get the one valid byte of the returned 64-bit value */
346
347 ACPI_CAST_PTR(u8, &table_header)[i] = (u8) temp;
348 }
349
350 /* Sanity check the table length */
351
352 if (table_header.length < sizeof(struct acpi_table_header)) {
353 return_ACPI_STATUS(AE_BAD_HEADER);
354 }
355
356 /* Allocate a buffer for the entire table */
357
358 table_ptr = ACPI_ALLOCATE(table_header.length);
359 if (!table_ptr) {
360 return_ACPI_STATUS(AE_NO_MEMORY);
361 }
362
363 /* Get the entire table from the op region */
364
365 for (i = 0; i < table_header.length; i++) {
366 status =
367 acpi_ev_address_space_dispatch(obj_desc, ACPI_READ,
368 (acpi_physical_address)
369 (i + address), 8,
370 &temp);
371 if (ACPI_FAILURE(status)) {
372 goto cleanup;
373 }
374
375 /* Get the one valid byte of the returned 64-bit value */
376
377 ACPI_CAST_PTR(u8, table_ptr)[i] = (u8) temp;
378 }
379 break; 305 break;
380 306
381 case ACPI_TYPE_LOCAL_REGION_FIELD: 307 case ACPI_TYPE_BUFFER: /* Buffer or resolved region_field */
382 case ACPI_TYPE_LOCAL_BANK_FIELD:
383 case ACPI_TYPE_LOCAL_INDEX_FIELD:
384 308
385 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Load from Field %p %s\n", 309 /* Simply extract the buffer from the buffer object */
386 obj_desc,
387 acpi_ut_get_object_type_name(obj_desc)));
388 310
389 /* 311 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
390 * The length of the field must be at least as large as the table. 312 "Load from Buffer or Field %p %s\n", obj_desc,
391 * Read the entire field and thus the entire table. Buffer is 313 acpi_ut_get_object_type_name(obj_desc)));
392 * allocated during the read.
393 */
394 status =
395 acpi_ex_read_data_from_field(walk_state, obj_desc,
396 &buffer_desc);
397 if (ACPI_FAILURE(status)) {
398 return_ACPI_STATUS(status);
399 }
400
401 table_ptr = ACPI_CAST_PTR(struct acpi_table_header,
402 buffer_desc->buffer.pointer);
403
404 /* All done with the buffer_desc, delete it */
405
406 buffer_desc->buffer.pointer = NULL;
407 acpi_ut_remove_reference(buffer_desc);
408 314
409 /* Sanity check the table length */ 315 table_desc.pointer = ACPI_CAST_PTR(struct acpi_table_header,
316 obj_desc->buffer.pointer);
317 table_desc.length = table_desc.pointer->length;
318 table_desc.flags = ACPI_TABLE_ORIGIN_ALLOCATED;
410 319
411 if (table_ptr->length < sizeof(struct acpi_table_header)) { 320 obj_desc->buffer.pointer = NULL;
412 status = AE_BAD_HEADER;
413 goto cleanup;
414 }
415 break; 321 break;
416 322
417 default: 323 default:
418 return_ACPI_STATUS(AE_AML_OPERAND_TYPE); 324 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
419 } 325 }
420 326
421 /* The table must be either an SSDT or a PSDT */ 327 /*
422 328 * Install the new table into the local data structures
423 if ((!ACPI_COMPARE_NAME(table_ptr->signature, PSDT_SIG)) && 329 */
424 (!ACPI_COMPARE_NAME(table_ptr->signature, SSDT_SIG))) { 330 status = acpi_tb_add_table(&table_desc, &table_index);
425 ACPI_ERROR((AE_INFO, 331 if (ACPI_FAILURE(status)) {
426 "Table has invalid signature [%4.4s], must be SSDT or PSDT",
427 table_ptr->signature));
428 status = AE_BAD_SIGNATURE;
429 goto cleanup; 332 goto cleanup;
430 } 333 }
431 334
432 /* Install the new table into the local data structures */ 335 status =
433 336 acpi_ex_add_table(table_index, acpi_gbl_root_node, &ddb_handle);
434 status = acpi_ex_add_table(table_ptr, acpi_gbl_root_node, &ddb_handle);
435 if (ACPI_FAILURE(status)) { 337 if (ACPI_FAILURE(status)) {
436 338
437 /* On error, table_ptr was deallocated above */ 339 /* On error, table_ptr was deallocated above */
@@ -450,13 +352,9 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
450 return_ACPI_STATUS(status); 352 return_ACPI_STATUS(status);
451 } 353 }
452 354
453 ACPI_INFO((AE_INFO,
454 "Dynamic SSDT Load - OemId [%6.6s] OemTableId [%8.8s]",
455 table_ptr->oem_id, table_ptr->oem_table_id));
456
457 cleanup: 355 cleanup:
458 if (ACPI_FAILURE(status)) { 356 if (ACPI_FAILURE(status)) {
459 ACPI_FREE(table_ptr); 357 acpi_tb_delete_table(&table_desc);
460 } 358 }
461 return_ACPI_STATUS(status); 359 return_ACPI_STATUS(status);
462} 360}
@@ -477,7 +375,7 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
477{ 375{
478 acpi_status status = AE_OK; 376 acpi_status status = AE_OK;
479 union acpi_operand_object *table_desc = ddb_handle; 377 union acpi_operand_object *table_desc = ddb_handle;
480 struct acpi_table_desc *table_info; 378 acpi_native_uint table_index;
481 379
482 ACPI_FUNCTION_TRACE(ex_unload_table); 380 ACPI_FUNCTION_TRACE(ex_unload_table);
483 381
@@ -493,19 +391,18 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
493 return_ACPI_STATUS(AE_BAD_PARAMETER); 391 return_ACPI_STATUS(AE_BAD_PARAMETER);
494 } 392 }
495 393
496 /* Get the actual table descriptor from the ddb_handle */ 394 /* Get the table index from the ddb_handle */
497 395
498 table_info = (struct acpi_table_desc *)table_desc->reference.object; 396 table_index = (acpi_native_uint) table_desc->reference.object;
499 397
500 /* 398 /*
501 * Delete the entire namespace under this table Node 399 * Delete the entire namespace under this table Node
502 * (Offset contains the table_id) 400 * (Offset contains the table_id)
503 */ 401 */
504 acpi_ns_delete_namespace_by_owner(table_info->owner_id); 402 acpi_tb_delete_namespace_by_owner(table_index);
505 403 acpi_tb_release_owner_id(table_index);
506 /* Delete the table itself */
507 404
508 (void)acpi_tb_uninstall_table(table_info->installed_desc); 405 acpi_tb_set_table_loaded_flag(table_index, FALSE);
509 406
510 /* Delete the table descriptor (ddb_handle) */ 407 /* Delete the table descriptor (ddb_handle) */
511 408
diff --git a/drivers/acpi/executer/exconvrt.c b/drivers/acpi/executer/exconvrt.c
index 544e81a6a438..d470e8b1f4ea 100644
--- a/drivers/acpi/executer/exconvrt.c
+++ b/drivers/acpi/executer/exconvrt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/excreate.c b/drivers/acpi/executer/excreate.c
index 34eec82c1b1e..7c38528a7e83 100644
--- a/drivers/acpi/executer/excreate.c
+++ b/drivers/acpi/executer/excreate.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -359,8 +359,9 @@ acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state)
359 union acpi_operand_object **operand = &walk_state->operands[0]; 359 union acpi_operand_object **operand = &walk_state->operands[0];
360 union acpi_operand_object *obj_desc; 360 union acpi_operand_object *obj_desc;
361 struct acpi_namespace_node *node; 361 struct acpi_namespace_node *node;
362 struct acpi_table_header *table;
363 union acpi_operand_object *region_obj2; 362 union acpi_operand_object *region_obj2;
363 acpi_native_uint table_index;
364 struct acpi_table_header *table;
364 365
365 ACPI_FUNCTION_TRACE(ex_create_table_region); 366 ACPI_FUNCTION_TRACE(ex_create_table_region);
366 367
@@ -380,7 +381,7 @@ acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state)
380 381
381 status = acpi_tb_find_table(operand[1]->string.pointer, 382 status = acpi_tb_find_table(operand[1]->string.pointer,
382 operand[2]->string.pointer, 383 operand[2]->string.pointer,
383 operand[3]->string.pointer, &table); 384 operand[3]->string.pointer, &table_index);
384 if (ACPI_FAILURE(status)) { 385 if (ACPI_FAILURE(status)) {
385 return_ACPI_STATUS(status); 386 return_ACPI_STATUS(status);
386 } 387 }
@@ -395,6 +396,11 @@ acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state)
395 region_obj2 = obj_desc->common.next_object; 396 region_obj2 = obj_desc->common.next_object;
396 region_obj2->extra.region_context = NULL; 397 region_obj2->extra.region_context = NULL;
397 398
399 status = acpi_get_table_by_index(table_index, &table);
400 if (ACPI_FAILURE(status)) {
401 return_ACPI_STATUS(status);
402 }
403
398 /* Init the region from the operands */ 404 /* Init the region from the operands */
399 405
400 obj_desc->region.space_id = REGION_DATA_TABLE; 406 obj_desc->region.space_id = REGION_DATA_TABLE;
@@ -553,7 +559,8 @@ acpi_ex_create_method(u8 * aml_start,
553 559
554 obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_METHOD); 560 obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_METHOD);
555 if (!obj_desc) { 561 if (!obj_desc) {
556 return_ACPI_STATUS(AE_NO_MEMORY); 562 status = AE_NO_MEMORY;
563 goto exit;
557 } 564 }
558 565
559 /* Save the method's AML pointer and length */ 566 /* Save the method's AML pointer and length */
@@ -576,10 +583,7 @@ acpi_ex_create_method(u8 * aml_start,
576 * Get the sync_level. If method is serialized, a mutex will be 583 * Get the sync_level. If method is serialized, a mutex will be
577 * created for this method when it is parsed. 584 * created for this method when it is parsed.
578 */ 585 */
579 if (acpi_gbl_all_methods_serialized) { 586 if (method_flags & AML_METHOD_SERIALIZED) {
580 obj_desc->method.sync_level = 0;
581 obj_desc->method.method_flags |= AML_METHOD_SERIALIZED;
582 } else if (method_flags & AML_METHOD_SERIALIZED) {
583 /* 587 /*
584 * ACPI 1.0: sync_level = 0 588 * ACPI 1.0: sync_level = 0
585 * ACPI 2.0: sync_level = sync_level in method declaration 589 * ACPI 2.0: sync_level = sync_level in method declaration
@@ -597,6 +601,7 @@ acpi_ex_create_method(u8 * aml_start,
597 601
598 acpi_ut_remove_reference(obj_desc); 602 acpi_ut_remove_reference(obj_desc);
599 603
604 exit:
600 /* Remove a reference to the operand */ 605 /* Remove a reference to the operand */
601 606
602 acpi_ut_remove_reference(operand[1]); 607 acpi_ut_remove_reference(operand[1]);
diff --git a/drivers/acpi/executer/exdump.c b/drivers/acpi/executer/exdump.c
index 2450943add33..68d283fd60e7 100644
--- a/drivers/acpi/executer/exdump.c
+++ b/drivers/acpi/executer/exdump.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -59,8 +59,6 @@ static void acpi_ex_out_string(char *title, char *value);
59 59
60static void acpi_ex_out_pointer(char *title, void *value); 60static void acpi_ex_out_pointer(char *title, void *value);
61 61
62static void acpi_ex_out_address(char *title, acpi_physical_address value);
63
64static void 62static void
65acpi_ex_dump_object(union acpi_operand_object *obj_desc, 63acpi_ex_dump_object(union acpi_operand_object *obj_desc,
66 struct acpi_exdump_info *info); 64 struct acpi_exdump_info *info);
@@ -92,10 +90,11 @@ static struct acpi_exdump_info acpi_ex_dump_string[4] = {
92 {ACPI_EXD_STRING, 0, NULL} 90 {ACPI_EXD_STRING, 0, NULL}
93}; 91};
94 92
95static struct acpi_exdump_info acpi_ex_dump_buffer[4] = { 93static struct acpi_exdump_info acpi_ex_dump_buffer[5] = {
96 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_buffer), NULL}, 94 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_buffer), NULL},
97 {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(buffer.length), "Length"}, 95 {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(buffer.length), "Length"},
98 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.pointer), "Pointer"}, 96 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.pointer), "Pointer"},
97 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.node), "Parent Node"},
99 {ACPI_EXD_BUFFER, 0, NULL} 98 {ACPI_EXD_BUFFER, 0, NULL}
100}; 99};
101 100
@@ -165,8 +164,8 @@ static struct acpi_exdump_info acpi_ex_dump_power[5] = {
165 164
166static struct acpi_exdump_info acpi_ex_dump_processor[7] = { 165static struct acpi_exdump_info acpi_ex_dump_processor[7] = {
167 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_processor), NULL}, 166 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_processor), NULL},
168 {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(processor.proc_id), "Processor ID"}, 167 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.proc_id), "Processor ID"},
169 {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(processor.length), "Length"}, 168 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.length), "Length"},
170 {ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(processor.address), "Address"}, 169 {ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(processor.address), "Address"},
171 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.system_notify), 170 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.system_notify),
172 "System Notify"}, 171 "System Notify"},
@@ -379,18 +378,12 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
379 break; 378 break;
380 379
381 case ACPI_EXD_POINTER: 380 case ACPI_EXD_POINTER:
381 case ACPI_EXD_ADDRESS:
382 382
383 acpi_ex_out_pointer(name, 383 acpi_ex_out_pointer(name,
384 *ACPI_CAST_PTR(void *, target)); 384 *ACPI_CAST_PTR(void *, target));
385 break; 385 break;
386 386
387 case ACPI_EXD_ADDRESS:
388
389 acpi_ex_out_address(name,
390 *ACPI_CAST_PTR
391 (acpi_physical_address, target));
392 break;
393
394 case ACPI_EXD_STRING: 387 case ACPI_EXD_STRING:
395 388
396 acpi_ut_print_string(obj_desc->string.pointer, 389 acpi_ut_print_string(obj_desc->string.pointer,
@@ -834,16 +827,6 @@ static void acpi_ex_out_pointer(char *title, void *value)
834 acpi_os_printf("%20s : %p\n", title, value); 827 acpi_os_printf("%20s : %p\n", title, value);
835} 828}
836 829
837static void acpi_ex_out_address(char *title, acpi_physical_address value)
838{
839
840#if ACPI_MACHINE_WIDTH == 16
841 acpi_os_printf("%20s : %p\n", title, value);
842#else
843 acpi_os_printf("%20s : %8.8X%8.8X\n", title, ACPI_FORMAT_UINT64(value));
844#endif
845}
846
847/******************************************************************************* 830/*******************************************************************************
848 * 831 *
849 * FUNCTION: acpi_ex_dump_namespace_node 832 * FUNCTION: acpi_ex_dump_namespace_node
diff --git a/drivers/acpi/executer/exfield.c b/drivers/acpi/executer/exfield.c
index 9ea9c3a67ca9..2d88a3d8d1ad 100644
--- a/drivers/acpi/executer/exfield.c
+++ b/drivers/acpi/executer/exfield.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exfldio.c b/drivers/acpi/executer/exfldio.c
index 40f0bee6faa5..65a48b6170ee 100644
--- a/drivers/acpi/executer/exfldio.c
+++ b/drivers/acpi/executer/exfldio.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -257,14 +257,13 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
257 } 257 }
258 258
259 ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD, 259 ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD,
260 " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %8.8X%8.8X\n", 260 " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %p\n",
261 acpi_ut_get_region_name(rgn_desc->region. 261 acpi_ut_get_region_name(rgn_desc->region.
262 space_id), 262 space_id),
263 rgn_desc->region.space_id, 263 rgn_desc->region.space_id,
264 obj_desc->common_field.access_byte_width, 264 obj_desc->common_field.access_byte_width,
265 obj_desc->common_field.base_byte_offset, 265 obj_desc->common_field.base_byte_offset,
266 field_datum_byte_offset, 266 field_datum_byte_offset, (void *)address));
267 ACPI_FORMAT_UINT64(address)));
268 267
269 /* Invoke the appropriate address_space/op_region handler */ 268 /* Invoke the appropriate address_space/op_region handler */
270 269
diff --git a/drivers/acpi/executer/exmisc.c b/drivers/acpi/executer/exmisc.c
index bd98aab017cf..f13d1cec2d6d 100644
--- a/drivers/acpi/executer/exmisc.c
+++ b/drivers/acpi/executer/exmisc.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exmutex.c b/drivers/acpi/executer/exmutex.c
index bf90f04f2c60..5101bad5baf8 100644
--- a/drivers/acpi/executer/exmutex.c
+++ b/drivers/acpi/executer/exmutex.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -44,6 +44,7 @@
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acinterp.h> 46#include <acpi/acinterp.h>
47#include <acpi/acevents.h>
47 48
48#define _COMPONENT ACPI_EXECUTER 49#define _COMPONENT ACPI_EXECUTER
49ACPI_MODULE_NAME("exmutex") 50ACPI_MODULE_NAME("exmutex")
@@ -150,7 +151,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
150 return_ACPI_STATUS(AE_BAD_PARAMETER); 151 return_ACPI_STATUS(AE_BAD_PARAMETER);
151 } 152 }
152 153
153 /* Sanity check -- we must have a valid thread ID */ 154 /* Sanity check: we must have a valid thread ID */
154 155
155 if (!walk_state->thread) { 156 if (!walk_state->thread) {
156 ACPI_ERROR((AE_INFO, 157 ACPI_ERROR((AE_INFO,
@@ -174,24 +175,28 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
174 /* Support for multiple acquires by the owning thread */ 175 /* Support for multiple acquires by the owning thread */
175 176
176 if (obj_desc->mutex.owner_thread) { 177 if (obj_desc->mutex.owner_thread) {
177 178 if (obj_desc->mutex.owner_thread->thread_id ==
178 /* Special case for Global Lock, allow all threads */ 179 walk_state->thread->thread_id) {
179
180 if ((obj_desc->mutex.owner_thread->thread_id ==
181 walk_state->thread->thread_id) ||
182 (obj_desc->mutex.os_mutex == ACPI_GLOBAL_LOCK)) {
183 /* 180 /*
184 * The mutex is already owned by this thread, 181 * The mutex is already owned by this thread, just increment the
185 * just increment the acquisition depth 182 * acquisition depth
186 */ 183 */
187 obj_desc->mutex.acquisition_depth++; 184 obj_desc->mutex.acquisition_depth++;
188 return_ACPI_STATUS(AE_OK); 185 return_ACPI_STATUS(AE_OK);
189 } 186 }
190 } 187 }
191 188
192 /* Acquire the mutex, wait if necessary */ 189 /* Acquire the mutex, wait if necessary. Special case for Global Lock */
190
191 if (obj_desc->mutex.os_mutex == acpi_gbl_global_lock_mutex) {
192 status =
193 acpi_ev_acquire_global_lock((u16) time_desc->integer.value);
194 } else {
195 status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex,
196 (u16) time_desc->integer.
197 value);
198 }
193 199
194 status = acpi_ex_system_acquire_mutex(time_desc, obj_desc);
195 if (ACPI_FAILURE(status)) { 200 if (ACPI_FAILURE(status)) {
196 201
197 /* Includes failure from a timeout on time_desc */ 202 /* Includes failure from a timeout on time_desc */
@@ -211,7 +216,6 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
211 /* Link the mutex to the current thread for force-unlock at method exit */ 216 /* Link the mutex to the current thread for force-unlock at method exit */
212 217
213 acpi_ex_link_mutex(obj_desc, walk_state->thread); 218 acpi_ex_link_mutex(obj_desc, walk_state->thread);
214
215 return_ACPI_STATUS(AE_OK); 219 return_ACPI_STATUS(AE_OK);
216} 220}
217 221
@@ -232,7 +236,7 @@ acpi_status
232acpi_ex_release_mutex(union acpi_operand_object *obj_desc, 236acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
233 struct acpi_walk_state *walk_state) 237 struct acpi_walk_state *walk_state)
234{ 238{
235 acpi_status status; 239 acpi_status status = AE_OK;
236 240
237 ACPI_FUNCTION_TRACE(ex_release_mutex); 241 ACPI_FUNCTION_TRACE(ex_release_mutex);
238 242
@@ -249,7 +253,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
249 return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED); 253 return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED);
250 } 254 }
251 255
252 /* Sanity check -- we must have a valid thread ID */ 256 /* Sanity check: we must have a valid thread ID */
253 257
254 if (!walk_state->thread) { 258 if (!walk_state->thread) {
255 ACPI_ERROR((AE_INFO, 259 ACPI_ERROR((AE_INFO,
@@ -264,7 +268,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
264 */ 268 */
265 if ((obj_desc->mutex.owner_thread->thread_id != 269 if ((obj_desc->mutex.owner_thread->thread_id !=
266 walk_state->thread->thread_id) 270 walk_state->thread->thread_id)
267 && (obj_desc->mutex.os_mutex != ACPI_GLOBAL_LOCK)) { 271 && (obj_desc->mutex.os_mutex != acpi_gbl_global_lock_mutex)) {
268 ACPI_ERROR((AE_INFO, 272 ACPI_ERROR((AE_INFO,
269 "Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX", 273 "Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX",
270 (unsigned long)walk_state->thread->thread_id, 274 (unsigned long)walk_state->thread->thread_id,
@@ -274,8 +278,8 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
274 } 278 }
275 279
276 /* 280 /*
277 * The sync level of the mutex must be less than or 281 * The sync level of the mutex must be less than or equal to the current
278 * equal to the current sync level 282 * sync level
279 */ 283 */
280 if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) { 284 if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) {
281 ACPI_ERROR((AE_INFO, 285 ACPI_ERROR((AE_INFO,
@@ -298,11 +302,15 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
298 302
299 acpi_ex_unlink_mutex(obj_desc); 303 acpi_ex_unlink_mutex(obj_desc);
300 304
301 /* Release the mutex */ 305 /* Release the mutex, special case for Global Lock */
302 306
303 status = acpi_ex_system_release_mutex(obj_desc); 307 if (obj_desc->mutex.os_mutex == acpi_gbl_global_lock_mutex) {
308 status = acpi_ev_release_global_lock();
309 } else {
310 acpi_os_release_mutex(obj_desc->mutex.os_mutex);
311 }
304 312
305 /* Update the mutex and walk state, restore sync_level before acquire */ 313 /* Update the mutex and restore sync_level */
306 314
307 obj_desc->mutex.owner_thread = NULL; 315 obj_desc->mutex.owner_thread = NULL;
308 walk_state->thread->current_sync_level = 316 walk_state->thread->current_sync_level =
@@ -321,39 +329,49 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
321 * 329 *
322 * DESCRIPTION: Release all mutexes held by this thread 330 * DESCRIPTION: Release all mutexes held by this thread
323 * 331 *
332 * NOTE: This function is called as the thread is exiting the interpreter.
333 * Mutexes are not released when an individual control method is exited, but
334 * only when the parent thread actually exits the interpreter. This allows one
335 * method to acquire a mutex, and a different method to release it, as long as
336 * this is performed underneath a single parent control method.
337 *
324 ******************************************************************************/ 338 ******************************************************************************/
325 339
326void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread) 340void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
327{ 341{
328 union acpi_operand_object *next = thread->acquired_mutex_list; 342 union acpi_operand_object *next = thread->acquired_mutex_list;
329 union acpi_operand_object *this; 343 union acpi_operand_object *obj_desc;
330 acpi_status status;
331 344
332 ACPI_FUNCTION_ENTRY(); 345 ACPI_FUNCTION_ENTRY();
333 346
334 /* Traverse the list of owned mutexes, releasing each one */ 347 /* Traverse the list of owned mutexes, releasing each one */
335 348
336 while (next) { 349 while (next) {
337 this = next; 350 obj_desc = next;
338 next = this->mutex.next; 351 next = obj_desc->mutex.next;
352
353 obj_desc->mutex.prev = NULL;
354 obj_desc->mutex.next = NULL;
355 obj_desc->mutex.acquisition_depth = 0;
356
357 /* Release the mutex, special case for Global Lock */
339 358
340 this->mutex.acquisition_depth = 1; 359 if (obj_desc->mutex.os_mutex == acpi_gbl_global_lock_mutex) {
341 this->mutex.prev = NULL;
342 this->mutex.next = NULL;
343 360
344 /* Release the mutex */ 361 /* Ignore errors */
345 362
346 status = acpi_ex_system_release_mutex(this); 363 (void)acpi_ev_release_global_lock();
347 if (ACPI_FAILURE(status)) { 364 } else {
348 continue; 365 acpi_os_release_mutex(obj_desc->mutex.os_mutex);
349 } 366 }
350 367
351 /* Mark mutex unowned */ 368 /* Mark mutex unowned */
352 369
353 this->mutex.owner_thread = NULL; 370 obj_desc->mutex.owner_thread = NULL;
354 371
355 /* Update Thread sync_level (Last mutex is the important one) */ 372 /* Update Thread sync_level (Last mutex is the important one) */
356 373
357 thread->current_sync_level = this->mutex.original_sync_level; 374 thread->current_sync_level =
375 obj_desc->mutex.original_sync_level;
358 } 376 }
359} 377}
diff --git a/drivers/acpi/executer/exnames.c b/drivers/acpi/executer/exnames.c
index d3d70364626c..1ee4fb1175c6 100644
--- a/drivers/acpi/executer/exnames.c
+++ b/drivers/acpi/executer/exnames.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exoparg1.c b/drivers/acpi/executer/exoparg1.c
index 6374d8be88e0..252f10acbbcc 100644
--- a/drivers/acpi/executer/exoparg1.c
+++ b/drivers/acpi/executer/exoparg1.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -104,9 +104,7 @@ acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state)
104 status = AE_NO_MEMORY; 104 status = AE_NO_MEMORY;
105 goto cleanup; 105 goto cleanup;
106 } 106 }
107#if ACPI_MACHINE_WIDTH != 16
108 return_desc->integer.value = acpi_os_get_timer(); 107 return_desc->integer.value = acpi_os_get_timer();
109#endif
110 break; 108 break;
111 109
112 default: /* Unknown opcode */ 110 default: /* Unknown opcode */
diff --git a/drivers/acpi/executer/exoparg2.c b/drivers/acpi/executer/exoparg2.c
index 7d2cbc113160..17e652e65379 100644
--- a/drivers/acpi/executer/exoparg2.c
+++ b/drivers/acpi/executer/exoparg2.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exoparg3.c b/drivers/acpi/executer/exoparg3.c
index e2d945dfd509..7fe67cf82cee 100644
--- a/drivers/acpi/executer/exoparg3.c
+++ b/drivers/acpi/executer/exoparg3.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exoparg6.c b/drivers/acpi/executer/exoparg6.c
index f0c0ba6eb408..bd80a9cb3d65 100644
--- a/drivers/acpi/executer/exoparg6.c
+++ b/drivers/acpi/executer/exoparg6.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exprep.c b/drivers/acpi/executer/exprep.c
index 44d064f427b9..a6696621ff1b 100644
--- a/drivers/acpi/executer/exprep.c
+++ b/drivers/acpi/executer/exprep.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exregion.c b/drivers/acpi/executer/exregion.c
index 3cc97ba48b36..2e9ce94798c7 100644
--- a/drivers/acpi/executer/exregion.c
+++ b/drivers/acpi/executer/exregion.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -155,16 +155,15 @@ acpi_ex_system_memory_space_handler(u32 function,
155 155
156 /* Create a new mapping starting at the address given */ 156 /* Create a new mapping starting at the address given */
157 157
158 status = acpi_os_map_memory(address, window_size, 158 mem_info->mapped_logical_address =
159 (void **)&mem_info-> 159 acpi_os_map_memory((acpi_native_uint) address, window_size);
160 mapped_logical_address); 160 if (!mem_info->mapped_logical_address) {
161 if (ACPI_FAILURE(status)) {
162 ACPI_ERROR((AE_INFO, 161 ACPI_ERROR((AE_INFO,
163 "Could not map memory at %8.8X%8.8X, size %X", 162 "Could not map memory at %8.8X%8.8X, size %X",
164 ACPI_FORMAT_UINT64(address), 163 ACPI_FORMAT_UINT64(address),
165 (u32) window_size)); 164 (u32) window_size));
166 mem_info->mapped_length = 0; 165 mem_info->mapped_length = 0;
167 return_ACPI_STATUS(status); 166 return_ACPI_STATUS(AE_NO_MEMORY);
168 } 167 }
169 168
170 /* Save the physical address and mapping size */ 169 /* Save the physical address and mapping size */
@@ -210,11 +209,10 @@ acpi_ex_system_memory_space_handler(u32 function,
210 *value = (acpi_integer) ACPI_GET32(logical_addr_ptr); 209 *value = (acpi_integer) ACPI_GET32(logical_addr_ptr);
211 break; 210 break;
212 211
213#if ACPI_MACHINE_WIDTH != 16
214 case 64: 212 case 64:
215 *value = (acpi_integer) ACPI_GET64(logical_addr_ptr); 213 *value = (acpi_integer) ACPI_GET64(logical_addr_ptr);
216 break; 214 break;
217#endif 215
218 default: 216 default:
219 /* bit_width was already validated */ 217 /* bit_width was already validated */
220 break; 218 break;
@@ -236,11 +234,9 @@ acpi_ex_system_memory_space_handler(u32 function,
236 ACPI_SET32(logical_addr_ptr) = (u32) * value; 234 ACPI_SET32(logical_addr_ptr) = (u32) * value;
237 break; 235 break;
238 236
239#if ACPI_MACHINE_WIDTH != 16
240 case 64: 237 case 64:
241 ACPI_SET64(logical_addr_ptr) = (u64) * value; 238 ACPI_SET64(logical_addr_ptr) = (u64) * value;
242 break; 239 break;
243#endif
244 240
245 default: 241 default:
246 /* bit_width was already validated */ 242 /* bit_width was already validated */
diff --git a/drivers/acpi/executer/exresnte.c b/drivers/acpi/executer/exresnte.c
index 3089b05a1368..2b3a01cc4929 100644
--- a/drivers/acpi/executer/exresnte.c
+++ b/drivers/acpi/executer/exresnte.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exresolv.c b/drivers/acpi/executer/exresolv.c
index 6499de878017..6c64e55dab0e 100644
--- a/drivers/acpi/executer/exresolv.c
+++ b/drivers/acpi/executer/exresolv.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -141,7 +141,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
141 acpi_status status = AE_OK; 141 acpi_status status = AE_OK;
142 union acpi_operand_object *stack_desc; 142 union acpi_operand_object *stack_desc;
143 void *temp_node; 143 void *temp_node;
144 union acpi_operand_object *obj_desc; 144 union acpi_operand_object *obj_desc = NULL;
145 u16 opcode; 145 u16 opcode;
146 146
147 ACPI_FUNCTION_TRACE(ex_resolve_object_to_value); 147 ACPI_FUNCTION_TRACE(ex_resolve_object_to_value);
@@ -299,8 +299,6 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
299 status = acpi_ds_get_package_arguments(stack_desc); 299 status = acpi_ds_get_package_arguments(stack_desc);
300 break; 300 break;
301 301
302 /* These cases may never happen here, but just in case.. */
303
304 case ACPI_TYPE_BUFFER_FIELD: 302 case ACPI_TYPE_BUFFER_FIELD:
305 case ACPI_TYPE_LOCAL_REGION_FIELD: 303 case ACPI_TYPE_LOCAL_REGION_FIELD:
306 case ACPI_TYPE_LOCAL_BANK_FIELD: 304 case ACPI_TYPE_LOCAL_BANK_FIELD:
@@ -314,6 +312,10 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
314 status = 312 status =
315 acpi_ex_read_data_from_field(walk_state, stack_desc, 313 acpi_ex_read_data_from_field(walk_state, stack_desc,
316 &obj_desc); 314 &obj_desc);
315
316 /* Remove a reference to the original operand, then override */
317
318 acpi_ut_remove_reference(*stack_ptr);
317 *stack_ptr = (void *)obj_desc; 319 *stack_ptr = (void *)obj_desc;
318 break; 320 break;
319 321
diff --git a/drivers/acpi/executer/exresop.c b/drivers/acpi/executer/exresop.c
index 4c93d0972333..ba761862a599 100644
--- a/drivers/acpi/executer/exresop.c
+++ b/drivers/acpi/executer/exresop.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -611,22 +611,20 @@ acpi_ex_resolve_operands(u16 opcode,
611 } 611 }
612 goto next_operand; 612 goto next_operand;
613 613
614 case ARGI_REGION_OR_FIELD: 614 case ARGI_REGION_OR_BUFFER: /* Used by Load() only */
615 615
616 /* Need an operand of type REGION or a FIELD in a region */ 616 /* Need an operand of type REGION or a BUFFER (which could be a resolved region field) */
617 617
618 switch (ACPI_GET_OBJECT_TYPE(obj_desc)) { 618 switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
619 case ACPI_TYPE_BUFFER:
619 case ACPI_TYPE_REGION: 620 case ACPI_TYPE_REGION:
620 case ACPI_TYPE_LOCAL_REGION_FIELD:
621 case ACPI_TYPE_LOCAL_BANK_FIELD:
622 case ACPI_TYPE_LOCAL_INDEX_FIELD:
623 621
624 /* Valid operand */ 622 /* Valid operand */
625 break; 623 break;
626 624
627 default: 625 default:
628 ACPI_ERROR((AE_INFO, 626 ACPI_ERROR((AE_INFO,
629 "Needed [Region/RegionField], found [%s] %p", 627 "Needed [Region/Buffer], found [%s] %p",
630 acpi_ut_get_object_type_name 628 acpi_ut_get_object_type_name
631 (obj_desc), obj_desc)); 629 (obj_desc), obj_desc));
632 630
diff --git a/drivers/acpi/executer/exstore.c b/drivers/acpi/executer/exstore.c
index 0456405ba019..f4b69a637820 100644
--- a/drivers/acpi/executer/exstore.c
+++ b/drivers/acpi/executer/exstore.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exstoren.c b/drivers/acpi/executer/exstoren.c
index 591aaf0e18b3..1d622c625c64 100644
--- a/drivers/acpi/executer/exstoren.c
+++ b/drivers/acpi/executer/exstoren.c
@@ -7,7 +7,7 @@
7 *****************************************************************************/ 7 *****************************************************************************/
8 8
9/* 9/*
10 * Copyright (C) 2000 - 2006, R. Byron Moore 10 * Copyright (C) 2000 - 2007, R. Byron Moore
11 * All rights reserved. 11 * All rights reserved.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exstorob.c b/drivers/acpi/executer/exstorob.c
index 99ebe5adfcda..8233d40178ee 100644
--- a/drivers/acpi/executer/exstorob.c
+++ b/drivers/acpi/executer/exstorob.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exsystem.c b/drivers/acpi/executer/exsystem.c
index 28aef3e69ecc..9460baff3032 100644
--- a/drivers/acpi/executer/exsystem.c
+++ b/drivers/acpi/executer/exsystem.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -66,7 +66,6 @@ ACPI_MODULE_NAME("exsystem")
66acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout) 66acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
67{ 67{
68 acpi_status status; 68 acpi_status status;
69 acpi_status status2;
70 69
71 ACPI_FUNCTION_TRACE(ex_system_wait_semaphore); 70 ACPI_FUNCTION_TRACE(ex_system_wait_semaphore);
72 71
@@ -79,7 +78,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
79 78
80 /* We must wait, so unlock the interpreter */ 79 /* We must wait, so unlock the interpreter */
81 80
82 acpi_ex_exit_interpreter(); 81 acpi_ex_relinquish_interpreter();
83 82
84 status = acpi_os_wait_semaphore(semaphore, 1, timeout); 83 status = acpi_os_wait_semaphore(semaphore, 1, timeout);
85 84
@@ -89,13 +88,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
89 88
90 /* Reacquire the interpreter */ 89 /* Reacquire the interpreter */
91 90
92 status2 = acpi_ex_enter_interpreter(); 91 acpi_ex_reacquire_interpreter();
93 if (ACPI_FAILURE(status2)) {
94
95 /* Report fatal error, could not acquire interpreter */
96
97 return_ACPI_STATUS(status2);
98 }
99 } 92 }
100 93
101 return_ACPI_STATUS(status); 94 return_ACPI_STATUS(status);
@@ -119,7 +112,6 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
119acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout) 112acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
120{ 113{
121 acpi_status status; 114 acpi_status status;
122 acpi_status status2;
123 115
124 ACPI_FUNCTION_TRACE(ex_system_wait_mutex); 116 ACPI_FUNCTION_TRACE(ex_system_wait_mutex);
125 117
@@ -132,7 +124,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
132 124
133 /* We must wait, so unlock the interpreter */ 125 /* We must wait, so unlock the interpreter */
134 126
135 acpi_ex_exit_interpreter(); 127 acpi_ex_relinquish_interpreter();
136 128
137 status = acpi_os_acquire_mutex(mutex, timeout); 129 status = acpi_os_acquire_mutex(mutex, timeout);
138 130
@@ -142,13 +134,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
142 134
143 /* Reacquire the interpreter */ 135 /* Reacquire the interpreter */
144 136
145 status2 = acpi_ex_enter_interpreter(); 137 acpi_ex_reacquire_interpreter();
146 if (ACPI_FAILURE(status2)) {
147
148 /* Report fatal error, could not acquire interpreter */
149
150 return_ACPI_STATUS(status2);
151 }
152 } 138 }
153 139
154 return_ACPI_STATUS(status); 140 return_ACPI_STATUS(status);
@@ -209,96 +195,18 @@ acpi_status acpi_ex_system_do_stall(u32 how_long)
209 195
210acpi_status acpi_ex_system_do_suspend(acpi_integer how_long) 196acpi_status acpi_ex_system_do_suspend(acpi_integer how_long)
211{ 197{
212 acpi_status status;
213
214 ACPI_FUNCTION_ENTRY(); 198 ACPI_FUNCTION_ENTRY();
215 199
216 /* Since this thread will sleep, we must release the interpreter */ 200 /* Since this thread will sleep, we must release the interpreter */
217 201
218 acpi_ex_exit_interpreter(); 202 acpi_ex_relinquish_interpreter();
219 203
220 acpi_os_sleep(how_long); 204 acpi_os_sleep(how_long);
221 205
222 /* And now we must get the interpreter again */ 206 /* And now we must get the interpreter again */
223 207
224 status = acpi_ex_enter_interpreter(); 208 acpi_ex_reacquire_interpreter();
225 return (status); 209 return (AE_OK);
226}
227
228/*******************************************************************************
229 *
230 * FUNCTION: acpi_ex_system_acquire_mutex
231 *
232 * PARAMETERS: time_desc - Maximum time to wait for the mutex
233 * obj_desc - The object descriptor for this op
234 *
235 * RETURN: Status
236 *
237 * DESCRIPTION: Provides an access point to perform synchronization operations
238 * within the AML. This function will cause a lock to be generated
239 * for the Mutex pointed to by obj_desc.
240 *
241 ******************************************************************************/
242
243acpi_status
244acpi_ex_system_acquire_mutex(union acpi_operand_object * time_desc,
245 union acpi_operand_object * obj_desc)
246{
247 acpi_status status = AE_OK;
248
249 ACPI_FUNCTION_TRACE_PTR(ex_system_acquire_mutex, obj_desc);
250
251 if (!obj_desc) {
252 return_ACPI_STATUS(AE_BAD_PARAMETER);
253 }
254
255 /* Support for the _GL_ Mutex object -- go get the global lock */
256
257 if (obj_desc->mutex.os_mutex == ACPI_GLOBAL_LOCK) {
258 status =
259 acpi_ev_acquire_global_lock((u16) time_desc->integer.value);
260 return_ACPI_STATUS(status);
261 }
262
263 status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex,
264 (u16) time_desc->integer.value);
265 return_ACPI_STATUS(status);
266}
267
268/*******************************************************************************
269 *
270 * FUNCTION: acpi_ex_system_release_mutex
271 *
272 * PARAMETERS: obj_desc - The object descriptor for this op
273 *
274 * RETURN: Status
275 *
276 * DESCRIPTION: Provides an access point to perform synchronization operations
277 * within the AML. This operation is a request to release a
278 * previously acquired Mutex. If the Mutex variable is set then
279 * it will be decremented.
280 *
281 ******************************************************************************/
282
283acpi_status acpi_ex_system_release_mutex(union acpi_operand_object *obj_desc)
284{
285 acpi_status status = AE_OK;
286
287 ACPI_FUNCTION_TRACE(ex_system_release_mutex);
288
289 if (!obj_desc) {
290 return_ACPI_STATUS(AE_BAD_PARAMETER);
291 }
292
293 /* Support for the _GL_ Mutex object -- release the global lock */
294
295 if (obj_desc->mutex.os_mutex == ACPI_GLOBAL_LOCK) {
296 status = acpi_ev_release_global_lock();
297 return_ACPI_STATUS(status);
298 }
299
300 acpi_os_release_mutex(obj_desc->mutex.os_mutex);
301 return_ACPI_STATUS(AE_OK);
302} 210}
303 211
304/******************************************************************************* 212/*******************************************************************************
@@ -314,7 +222,7 @@ acpi_status acpi_ex_system_release_mutex(union acpi_operand_object *obj_desc)
314 * 222 *
315 ******************************************************************************/ 223 ******************************************************************************/
316 224
317acpi_status acpi_ex_system_signal_event(union acpi_operand_object *obj_desc) 225acpi_status acpi_ex_system_signal_event(union acpi_operand_object * obj_desc)
318{ 226{
319 acpi_status status = AE_OK; 227 acpi_status status = AE_OK;
320 228
diff --git a/drivers/acpi/executer/exutils.c b/drivers/acpi/executer/exutils.c
index 982c8b65876f..6b0aeccbb69b 100644
--- a/drivers/acpi/executer/exutils.c
+++ b/drivers/acpi/executer/exutils.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -76,14 +76,15 @@ static u32 acpi_ex_digits_needed(acpi_integer value, u32 base);
76 * 76 *
77 * PARAMETERS: None 77 * PARAMETERS: None
78 * 78 *
79 * RETURN: Status 79 * RETURN: None
80 * 80 *
81 * DESCRIPTION: Enter the interpreter execution region. Failure to enter 81 * DESCRIPTION: Enter the interpreter execution region. Failure to enter
82 * the interpreter region is a fatal system error 82 * the interpreter region is a fatal system error. Used in
83 * conjunction with exit_interpreter.
83 * 84 *
84 ******************************************************************************/ 85 ******************************************************************************/
85 86
86acpi_status acpi_ex_enter_interpreter(void) 87void acpi_ex_enter_interpreter(void)
87{ 88{
88 acpi_status status; 89 acpi_status status;
89 90
@@ -91,31 +92,55 @@ acpi_status acpi_ex_enter_interpreter(void)
91 92
92 status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER); 93 status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
93 if (ACPI_FAILURE(status)) { 94 if (ACPI_FAILURE(status)) {
94 ACPI_ERROR((AE_INFO, "Could not acquire interpreter mutex")); 95 ACPI_ERROR((AE_INFO,
96 "Could not acquire AML Interpreter mutex"));
95 } 97 }
96 98
97 return_ACPI_STATUS(status); 99 return_VOID;
98} 100}
99 101
100/******************************************************************************* 102/*******************************************************************************
101 * 103 *
102 * FUNCTION: acpi_ex_exit_interpreter 104 * FUNCTION: acpi_ex_reacquire_interpreter
103 * 105 *
104 * PARAMETERS: None 106 * PARAMETERS: None
105 * 107 *
106 * RETURN: None 108 * RETURN: None
107 * 109 *
108 * DESCRIPTION: Exit the interpreter execution region 110 * DESCRIPTION: Reacquire the interpreter execution region from within the
111 * interpreter code. Failure to enter the interpreter region is a
112 * fatal system error. Used in conjuction with
113 * relinquish_interpreter
114 *
115 ******************************************************************************/
116
117void acpi_ex_reacquire_interpreter(void)
118{
119 ACPI_FUNCTION_TRACE(ex_reacquire_interpreter);
120
121 /*
122 * If the global serialized flag is set, do not release the interpreter,
123 * since it was not actually released by acpi_ex_relinquish_interpreter.
124 * This forces the interpreter to be single threaded.
125 */
126 if (!acpi_gbl_all_methods_serialized) {
127 acpi_ex_enter_interpreter();
128 }
129
130 return_VOID;
131}
132
133/*******************************************************************************
134 *
135 * FUNCTION: acpi_ex_exit_interpreter
136 *
137 * PARAMETERS: None
138 *
139 * RETURN: None
109 * 140 *
110 * Cases where the interpreter is unlocked: 141 * DESCRIPTION: Exit the interpreter execution region. This is the top level
111 * 1) Completion of the execution of a control method 142 * routine used to exit the interpreter when all processing has
112 * 2) Method blocked on a Sleep() AML opcode 143 * been completed.
113 * 3) Method blocked on an Acquire() AML opcode
114 * 4) Method blocked on a Wait() AML opcode
115 * 5) Method blocked to acquire the global lock
116 * 6) Method blocked to execute a serialized control method that is
117 * already executing
118 * 7) About to invoke a user-installed opregion handler
119 * 144 *
120 ******************************************************************************/ 145 ******************************************************************************/
121 146
@@ -127,7 +152,46 @@ void acpi_ex_exit_interpreter(void)
127 152
128 status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER); 153 status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
129 if (ACPI_FAILURE(status)) { 154 if (ACPI_FAILURE(status)) {
130 ACPI_ERROR((AE_INFO, "Could not release interpreter mutex")); 155 ACPI_ERROR((AE_INFO,
156 "Could not release AML Interpreter mutex"));
157 }
158
159 return_VOID;
160}
161
162/*******************************************************************************
163 *
164 * FUNCTION: acpi_ex_relinquish_interpreter
165 *
166 * PARAMETERS: None
167 *
168 * RETURN: None
169 *
170 * DESCRIPTION: Exit the interpreter execution region, from within the
171 * interpreter - before attempting an operation that will possibly
172 * block the running thread.
173 *
174 * Cases where the interpreter is unlocked internally
175 * 1) Method to be blocked on a Sleep() AML opcode
176 * 2) Method to be blocked on an Acquire() AML opcode
177 * 3) Method to be blocked on a Wait() AML opcode
178 * 4) Method to be blocked to acquire the global lock
179 * 5) Method to be blocked waiting to execute a serialized control method
180 * that is currently executing
181 * 6) About to invoke a user-installed opregion handler
182 *
183 ******************************************************************************/
184
185void acpi_ex_relinquish_interpreter(void)
186{
187 ACPI_FUNCTION_TRACE(ex_relinquish_interpreter);
188
189 /*
190 * If the global serialized flag is set, do not release the interpreter.
191 * This forces the interpreter to be single threaded.
192 */
193 if (!acpi_gbl_all_methods_serialized) {
194 acpi_ex_exit_interpreter();
131 } 195 }
132 196
133 return_VOID; 197 return_VOID;
@@ -141,8 +205,8 @@ void acpi_ex_exit_interpreter(void)
141 * 205 *
142 * RETURN: none 206 * RETURN: none
143 * 207 *
144 * DESCRIPTION: Truncate a number to 32-bits if the currently executing method 208 * DESCRIPTION: Truncate an ACPI Integer to 32 bits if the execution mode is
145 * belongs to a 32-bit ACPI table. 209 * 32-bit, as determined by the revision of the DSDT.
146 * 210 *
147 ******************************************************************************/ 211 ******************************************************************************/
148 212
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index f305a826ca2d..af22fdf73413 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -48,8 +48,8 @@ MODULE_LICENSE("GPL");
48 48
49static int acpi_fan_add(struct acpi_device *device); 49static int acpi_fan_add(struct acpi_device *device);
50static int acpi_fan_remove(struct acpi_device *device, int type); 50static int acpi_fan_remove(struct acpi_device *device, int type);
51static int acpi_fan_suspend(struct acpi_device *device, int state); 51static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state);
52static int acpi_fan_resume(struct acpi_device *device, int state); 52static int acpi_fan_resume(struct acpi_device *device);
53 53
54static struct acpi_driver acpi_fan_driver = { 54static struct acpi_driver acpi_fan_driver = {
55 .name = ACPI_FAN_DRIVER_NAME, 55 .name = ACPI_FAN_DRIVER_NAME,
@@ -237,7 +237,7 @@ static int acpi_fan_remove(struct acpi_device *device, int type)
237 return 0; 237 return 0;
238} 238}
239 239
240static int acpi_fan_suspend(struct acpi_device *device, int state) 240static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state)
241{ 241{
242 if (!device) 242 if (!device)
243 return -EINVAL; 243 return -EINVAL;
@@ -247,7 +247,7 @@ static int acpi_fan_suspend(struct acpi_device *device, int state)
247 return AE_OK; 247 return AE_OK;
248} 248}
249 249
250static int acpi_fan_resume(struct acpi_device *device, int state) 250static int acpi_fan_resume(struct acpi_device *device)
251{ 251{
252 int result = 0; 252 int result = 0;
253 int power_state = 0; 253 int power_state = 0;
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 8a0324b43e53..7b6c9ff9bebe 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -86,129 +86,6 @@ static int acpi_find_bridge_device(struct device *dev, acpi_handle * handle)
86 return ret; 86 return ret;
87} 87}
88 88
89/* Get PCI root bridge's handle from its segment and bus number */
90struct acpi_find_pci_root {
91 unsigned int seg;
92 unsigned int bus;
93 acpi_handle handle;
94};
95
96static acpi_status
97do_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
98{
99 unsigned long *busnr = data;
100 struct acpi_resource_address64 address;
101
102 if (resource->type != ACPI_RESOURCE_TYPE_ADDRESS16 &&
103 resource->type != ACPI_RESOURCE_TYPE_ADDRESS32 &&
104 resource->type != ACPI_RESOURCE_TYPE_ADDRESS64)
105 return AE_OK;
106
107 acpi_resource_to_address64(resource, &address);
108 if ((address.address_length > 0) &&
109 (address.resource_type == ACPI_BUS_NUMBER_RANGE))
110 *busnr = address.minimum;
111
112 return AE_OK;
113}
114
115static int get_root_bridge_busnr(acpi_handle handle)
116{
117 acpi_status status;
118 unsigned long bus, bbn;
119 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
120
121 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
122
123 status = acpi_evaluate_integer(handle, METHOD_NAME__BBN, NULL,
124 &bbn);
125 if (status == AE_NOT_FOUND) {
126 /* Assume bus = 0 */
127 printk(KERN_INFO PREFIX
128 "Assume root bridge [%s] bus is 0\n",
129 (char *)buffer.pointer);
130 status = AE_OK;
131 bbn = 0;
132 }
133 if (ACPI_FAILURE(status)) {
134 bbn = -ENODEV;
135 goto exit;
136 }
137 if (bbn > 0)
138 goto exit;
139
140 /* _BBN in some systems return 0 for all root bridges */
141 bus = -1;
142 status = acpi_walk_resources(handle, METHOD_NAME__CRS,
143 do_root_bridge_busnr_callback, &bus);
144 /* If _CRS failed, we just use _BBN */
145 if (ACPI_FAILURE(status) || (bus == -1))
146 goto exit;
147 /* We select _CRS */
148 if (bbn != bus) {
149 printk(KERN_INFO PREFIX
150 "_BBN and _CRS returns different value for %s. Select _CRS\n",
151 (char *)buffer.pointer);
152 bbn = bus;
153 }
154 exit:
155 kfree(buffer.pointer);
156 return (int)bbn;
157}
158
159static acpi_status
160find_pci_rootbridge(acpi_handle handle, u32 lvl, void *context, void **rv)
161{
162 struct acpi_find_pci_root *find = (struct acpi_find_pci_root *)context;
163 unsigned long seg, bus;
164 acpi_status status;
165 int tmp;
166 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
167
168 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
169
170 status = acpi_evaluate_integer(handle, METHOD_NAME__SEG, NULL, &seg);
171 if (status == AE_NOT_FOUND) {
172 /* Assume seg = 0 */
173 status = AE_OK;
174 seg = 0;
175 }
176 if (ACPI_FAILURE(status)) {
177 status = AE_CTRL_DEPTH;
178 goto exit;
179 }
180
181 tmp = get_root_bridge_busnr(handle);
182 if (tmp < 0) {
183 printk(KERN_ERR PREFIX
184 "Find root bridge failed for %s\n",
185 (char *)buffer.pointer);
186 status = AE_CTRL_DEPTH;
187 goto exit;
188 }
189 bus = tmp;
190
191 if (seg == find->seg && bus == find->bus)
192 {
193 find->handle = handle;
194 status = AE_CTRL_TERMINATE;
195 }
196 else
197 status = AE_OK;
198 exit:
199 kfree(buffer.pointer);
200 return status;
201}
202
203acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus)
204{
205 struct acpi_find_pci_root find = { seg, bus, NULL };
206
207 acpi_get_devices(PCI_ROOT_HID_STRING, find_pci_rootbridge, &find, NULL);
208 return find.handle;
209}
210EXPORT_SYMBOL_GPL(acpi_get_pci_rootbridge_handle);
211
212/* Get device's handler per its address under its parent */ 89/* Get device's handler per its address under its parent */
213struct acpi_find_child { 90struct acpi_find_child {
214 acpi_handle handle; 91 acpi_handle handle;
diff --git a/drivers/acpi/hardware/hwacpi.c b/drivers/acpi/hardware/hwacpi.c
index de50fab2a910..6031ca13dd2f 100644
--- a/drivers/acpi/hardware/hwacpi.c
+++ b/drivers/acpi/hardware/hwacpi.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -49,41 +49,6 @@ ACPI_MODULE_NAME("hwacpi")
49 49
50/****************************************************************************** 50/******************************************************************************
51 * 51 *
52 * FUNCTION: acpi_hw_initialize
53 *
54 * PARAMETERS: None
55 *
56 * RETURN: Status
57 *
58 * DESCRIPTION: Initialize and validate the various ACPI registers defined in
59 * the FADT.
60 *
61 ******************************************************************************/
62acpi_status acpi_hw_initialize(void)
63{
64 acpi_status status;
65
66 ACPI_FUNCTION_TRACE(hw_initialize);
67
68 /* We must have the ACPI tables by the time we get here */
69
70 if (!acpi_gbl_FADT) {
71 ACPI_ERROR((AE_INFO, "No FADT is present"));
72 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
73 }
74
75 /* Sanity check the FADT for valid values */
76
77 status = acpi_ut_validate_fadt();
78 if (ACPI_FAILURE(status)) {
79 return_ACPI_STATUS(status);
80 }
81
82 return_ACPI_STATUS(AE_OK);
83}
84
85/******************************************************************************
86 *
87 * FUNCTION: acpi_hw_set_mode 52 * FUNCTION: acpi_hw_set_mode
88 * 53 *
89 * PARAMETERS: Mode - SYS_MODE_ACPI or SYS_MODE_LEGACY 54 * PARAMETERS: Mode - SYS_MODE_ACPI or SYS_MODE_LEGACY
@@ -93,7 +58,6 @@ acpi_status acpi_hw_initialize(void)
93 * DESCRIPTION: Transitions the system into the requested mode. 58 * DESCRIPTION: Transitions the system into the requested mode.
94 * 59 *
95 ******************************************************************************/ 60 ******************************************************************************/
96
97acpi_status acpi_hw_set_mode(u32 mode) 61acpi_status acpi_hw_set_mode(u32 mode)
98{ 62{
99 63
@@ -106,7 +70,7 @@ acpi_status acpi_hw_set_mode(u32 mode)
106 * ACPI 2.0 clarified that if SMI_CMD in FADT is zero, 70 * ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
107 * system does not support mode transition. 71 * system does not support mode transition.
108 */ 72 */
109 if (!acpi_gbl_FADT->smi_cmd) { 73 if (!acpi_gbl_FADT.smi_command) {
110 ACPI_ERROR((AE_INFO, 74 ACPI_ERROR((AE_INFO,
111 "No SMI_CMD in FADT, mode transition failed")); 75 "No SMI_CMD in FADT, mode transition failed"));
112 return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); 76 return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
@@ -119,7 +83,7 @@ acpi_status acpi_hw_set_mode(u32 mode)
119 * we make sure both the numbers are zero to determine these 83 * we make sure both the numbers are zero to determine these
120 * transitions are not supported. 84 * transitions are not supported.
121 */ 85 */
122 if (!acpi_gbl_FADT->acpi_enable && !acpi_gbl_FADT->acpi_disable) { 86 if (!acpi_gbl_FADT.acpi_enable && !acpi_gbl_FADT.acpi_disable) {
123 ACPI_ERROR((AE_INFO, 87 ACPI_ERROR((AE_INFO,
124 "No ACPI mode transition supported in this system (enable/disable both zero)")); 88 "No ACPI mode transition supported in this system (enable/disable both zero)"));
125 return_ACPI_STATUS(AE_OK); 89 return_ACPI_STATUS(AE_OK);
@@ -130,9 +94,8 @@ acpi_status acpi_hw_set_mode(u32 mode)
130 94
131 /* BIOS should have disabled ALL fixed and GP events */ 95 /* BIOS should have disabled ALL fixed and GP events */
132 96
133 status = acpi_os_write_port(acpi_gbl_FADT->smi_cmd, 97 status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
134 (u32) acpi_gbl_FADT->acpi_enable, 98 (u32) acpi_gbl_FADT.acpi_enable, 8);
135 8);
136 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 99 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
137 "Attempting to enable ACPI mode\n")); 100 "Attempting to enable ACPI mode\n"));
138 break; 101 break;
@@ -143,8 +106,8 @@ acpi_status acpi_hw_set_mode(u32 mode)
143 * BIOS should clear all fixed status bits and restore fixed event 106 * BIOS should clear all fixed status bits and restore fixed event
144 * enable bits to default 107 * enable bits to default
145 */ 108 */
146 status = acpi_os_write_port(acpi_gbl_FADT->smi_cmd, 109 status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
147 (u32) acpi_gbl_FADT->acpi_disable, 110 (u32) acpi_gbl_FADT.acpi_disable,
148 8); 111 8);
149 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 112 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
150 "Attempting to enable Legacy (non-ACPI) mode\n")); 113 "Attempting to enable Legacy (non-ACPI) mode\n"));
@@ -204,12 +167,11 @@ u32 acpi_hw_get_mode(void)
204 * ACPI 2.0 clarified that if SMI_CMD in FADT is zero, 167 * ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
205 * system does not support mode transition. 168 * system does not support mode transition.
206 */ 169 */
207 if (!acpi_gbl_FADT->smi_cmd) { 170 if (!acpi_gbl_FADT.smi_command) {
208 return_UINT32(ACPI_SYS_MODE_ACPI); 171 return_UINT32(ACPI_SYS_MODE_ACPI);
209 } 172 }
210 173
211 status = 174 status = acpi_get_register(ACPI_BITREG_SCI_ENABLE, &value);
212 acpi_get_register(ACPI_BITREG_SCI_ENABLE, &value, ACPI_MTX_LOCK);
213 if (ACPI_FAILURE(status)) { 175 if (ACPI_FAILURE(status)) {
214 return_UINT32(ACPI_SYS_MODE_LEGACY); 176 return_UINT32(ACPI_SYS_MODE_LEGACY);
215 } 177 }
diff --git a/drivers/acpi/hardware/hwgpe.c b/drivers/acpi/hardware/hwgpe.c
index 608a3a60ee11..117a05cadaaa 100644
--- a/drivers/acpi/hardware/hwgpe.c
+++ b/drivers/acpi/hardware/hwgpe.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -105,14 +105,20 @@ acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info *gpe_event_info)
105acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info) 105acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
106{ 106{
107 acpi_status status; 107 acpi_status status;
108 u8 register_bit;
108 109
109 ACPI_FUNCTION_ENTRY(); 110 ACPI_FUNCTION_ENTRY();
110 111
112 register_bit = (u8)
113 (1 <<
114 (gpe_event_info->gpe_number -
115 gpe_event_info->register_info->base_gpe_number));
116
111 /* 117 /*
112 * Write a one to the appropriate bit in the status register to 118 * Write a one to the appropriate bit in the status register to
113 * clear this GPE. 119 * clear this GPE.
114 */ 120 */
115 status = acpi_hw_low_level_write(8, gpe_event_info->register_bit, 121 status = acpi_hw_low_level_write(8, register_bit,
116 &gpe_event_info->register_info-> 122 &gpe_event_info->register_info->
117 status_address); 123 status_address);
118 124
@@ -155,7 +161,10 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
155 161
156 /* Get the register bitmask for this GPE */ 162 /* Get the register bitmask for this GPE */
157 163
158 register_bit = gpe_event_info->register_bit; 164 register_bit = (u8)
165 (1 <<
166 (gpe_event_info->gpe_number -
167 gpe_event_info->register_info->base_gpe_number));
159 168
160 /* GPE currently enabled? (enabled for runtime?) */ 169 /* GPE currently enabled? (enabled for runtime?) */
161 170
diff --git a/drivers/acpi/hardware/hwregs.c b/drivers/acpi/hardware/hwregs.c
index fa58c1edce1e..1d371fa663f2 100644
--- a/drivers/acpi/hardware/hwregs.c
+++ b/drivers/acpi/hardware/hwregs.c
@@ -7,7 +7,7 @@
7 ******************************************************************************/ 7 ******************************************************************************/
8 8
9/* 9/*
10 * Copyright (C) 2000 - 2006, R. Byron Moore 10 * Copyright (C) 2000 - 2007, R. Byron Moore
11 * All rights reserved. 11 * All rights reserved.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
@@ -54,17 +54,15 @@ ACPI_MODULE_NAME("hwregs")
54 * 54 *
55 * FUNCTION: acpi_hw_clear_acpi_status 55 * FUNCTION: acpi_hw_clear_acpi_status
56 * 56 *
57 * PARAMETERS: Flags - Lock the hardware or not 57 * PARAMETERS: None
58 * 58 *
59 * RETURN: none 59 * RETURN: None
60 * 60 *
61 * DESCRIPTION: Clears all fixed and general purpose status bits 61 * DESCRIPTION: Clears all fixed and general purpose status bits
62 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED 62 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
63 * 63 *
64 * NOTE: TBD: Flags parameter is obsolete, to be removed
65 *
66 ******************************************************************************/ 64 ******************************************************************************/
67acpi_status acpi_hw_clear_acpi_status(u32 flags) 65acpi_status acpi_hw_clear_acpi_status(void)
68{ 66{
69 acpi_status status; 67 acpi_status status;
70 acpi_cpu_flags lock_flags = 0; 68 acpi_cpu_flags lock_flags = 0;
@@ -73,7 +71,7 @@ acpi_status acpi_hw_clear_acpi_status(u32 flags)
73 71
74 ACPI_DEBUG_PRINT((ACPI_DB_IO, "About to write %04X to %04X\n", 72 ACPI_DEBUG_PRINT((ACPI_DB_IO, "About to write %04X to %04X\n",
75 ACPI_BITMASK_ALL_FIXED_STATUS, 73 ACPI_BITMASK_ALL_FIXED_STATUS,
76 (u16) acpi_gbl_FADT->xpm1a_evt_blk.address)); 74 (u16) acpi_gbl_FADT.xpm1a_event_block.address));
77 75
78 lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); 76 lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
79 77
@@ -86,10 +84,10 @@ acpi_status acpi_hw_clear_acpi_status(u32 flags)
86 84
87 /* Clear the fixed events */ 85 /* Clear the fixed events */
88 86
89 if (acpi_gbl_FADT->xpm1b_evt_blk.address) { 87 if (acpi_gbl_FADT.xpm1b_event_block.address) {
90 status = 88 status =
91 acpi_hw_low_level_write(16, ACPI_BITMASK_ALL_FIXED_STATUS, 89 acpi_hw_low_level_write(16, ACPI_BITMASK_ALL_FIXED_STATUS,
92 &acpi_gbl_FADT->xpm1b_evt_blk); 90 &acpi_gbl_FADT.xpm1b_event_block);
93 if (ACPI_FAILURE(status)) { 91 if (ACPI_FAILURE(status)) {
94 goto unlock_and_exit; 92 goto unlock_and_exit;
95 } 93 }
@@ -253,18 +251,15 @@ struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id)
253 * 251 *
254 * PARAMETERS: register_id - ID of ACPI bit_register to access 252 * PARAMETERS: register_id - ID of ACPI bit_register to access
255 * return_value - Value that was read from the register 253 * return_value - Value that was read from the register
256 * Flags - Lock the hardware or not
257 * 254 *
258 * RETURN: Status and the value read from specified Register. Value 255 * RETURN: Status and the value read from specified Register. Value
259 * returned is normalized to bit0 (is shifted all the way right) 256 * returned is normalized to bit0 (is shifted all the way right)
260 * 257 *
261 * DESCRIPTION: ACPI bit_register read function. 258 * DESCRIPTION: ACPI bit_register read function.
262 * 259 *
263 * NOTE: TBD: Flags parameter is obsolete, to be removed
264 *
265 ******************************************************************************/ 260 ******************************************************************************/
266 261
267acpi_status acpi_get_register(u32 register_id, u32 * return_value, u32 flags) 262acpi_status acpi_get_register(u32 register_id, u32 * return_value)
268{ 263{
269 u32 register_value = 0; 264 u32 register_value = 0;
270 struct acpi_bit_register_info *bit_reg_info; 265 struct acpi_bit_register_info *bit_reg_info;
@@ -312,16 +307,13 @@ ACPI_EXPORT_SYMBOL(acpi_get_register)
312 * PARAMETERS: register_id - ID of ACPI bit_register to access 307 * PARAMETERS: register_id - ID of ACPI bit_register to access
313 * Value - (only used on write) value to write to the 308 * Value - (only used on write) value to write to the
314 * Register, NOT pre-normalized to the bit pos 309 * Register, NOT pre-normalized to the bit pos
315 * Flags - Lock the hardware or not
316 * 310 *
317 * RETURN: Status 311 * RETURN: Status
318 * 312 *
319 * DESCRIPTION: ACPI Bit Register write function. 313 * DESCRIPTION: ACPI Bit Register write function.
320 * 314 *
321 * NOTE: TBD: Flags parameter is obsolete, to be removed
322 *
323 ******************************************************************************/ 315 ******************************************************************************/
324acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags) 316acpi_status acpi_set_register(u32 register_id, u32 value)
325{ 317{
326 u32 register_value = 0; 318 u32 register_value = 0;
327 struct acpi_bit_register_info *bit_reg_info; 319 struct acpi_bit_register_info *bit_reg_info;
@@ -422,8 +414,9 @@ acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags)
422 ACPI_DEBUG_PRINT((ACPI_DB_IO, 414 ACPI_DEBUG_PRINT((ACPI_DB_IO,
423 "PM2 control: Read %X from %8.8X%8.8X\n", 415 "PM2 control: Read %X from %8.8X%8.8X\n",
424 register_value, 416 register_value,
425 ACPI_FORMAT_UINT64(acpi_gbl_FADT-> 417 ACPI_FORMAT_UINT64(acpi_gbl_FADT.
426 xpm2_cnt_blk.address))); 418 xpm2_control_block.
419 address)));
427 420
428 ACPI_REGISTER_INSERT_VALUE(register_value, 421 ACPI_REGISTER_INSERT_VALUE(register_value,
429 bit_reg_info->bit_position, 422 bit_reg_info->bit_position,
@@ -433,8 +426,9 @@ acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags)
433 ACPI_DEBUG_PRINT((ACPI_DB_IO, 426 ACPI_DEBUG_PRINT((ACPI_DB_IO,
434 "About to write %4.4X to %8.8X%8.8X\n", 427 "About to write %4.4X to %8.8X%8.8X\n",
435 register_value, 428 register_value,
436 ACPI_FORMAT_UINT64(acpi_gbl_FADT-> 429 ACPI_FORMAT_UINT64(acpi_gbl_FADT.
437 xpm2_cnt_blk.address))); 430 xpm2_control_block.
431 address)));
438 432
439 status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK, 433 status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
440 ACPI_REGISTER_PM2_CONTROL, 434 ACPI_REGISTER_PM2_CONTROL,
@@ -495,7 +489,7 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
495 489
496 status = 490 status =
497 acpi_hw_low_level_read(16, &value1, 491 acpi_hw_low_level_read(16, &value1,
498 &acpi_gbl_FADT->xpm1a_evt_blk); 492 &acpi_gbl_FADT.xpm1a_event_block);
499 if (ACPI_FAILURE(status)) { 493 if (ACPI_FAILURE(status)) {
500 goto unlock_and_exit; 494 goto unlock_and_exit;
501 } 495 }
@@ -504,7 +498,7 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
504 498
505 status = 499 status =
506 acpi_hw_low_level_read(16, &value2, 500 acpi_hw_low_level_read(16, &value2,
507 &acpi_gbl_FADT->xpm1b_evt_blk); 501 &acpi_gbl_FADT.xpm1b_event_block);
508 value1 |= value2; 502 value1 |= value2;
509 break; 503 break;
510 504
@@ -527,14 +521,14 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
527 521
528 status = 522 status =
529 acpi_hw_low_level_read(16, &value1, 523 acpi_hw_low_level_read(16, &value1,
530 &acpi_gbl_FADT->xpm1a_cnt_blk); 524 &acpi_gbl_FADT.xpm1a_control_block);
531 if (ACPI_FAILURE(status)) { 525 if (ACPI_FAILURE(status)) {
532 goto unlock_and_exit; 526 goto unlock_and_exit;
533 } 527 }
534 528
535 status = 529 status =
536 acpi_hw_low_level_read(16, &value2, 530 acpi_hw_low_level_read(16, &value2,
537 &acpi_gbl_FADT->xpm1b_cnt_blk); 531 &acpi_gbl_FADT.xpm1b_control_block);
538 value1 |= value2; 532 value1 |= value2;
539 break; 533 break;
540 534
@@ -542,19 +536,20 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
542 536
543 status = 537 status =
544 acpi_hw_low_level_read(8, &value1, 538 acpi_hw_low_level_read(8, &value1,
545 &acpi_gbl_FADT->xpm2_cnt_blk); 539 &acpi_gbl_FADT.xpm2_control_block);
546 break; 540 break;
547 541
548 case ACPI_REGISTER_PM_TIMER: /* 32-bit access */ 542 case ACPI_REGISTER_PM_TIMER: /* 32-bit access */
549 543
550 status = 544 status =
551 acpi_hw_low_level_read(32, &value1, 545 acpi_hw_low_level_read(32, &value1,
552 &acpi_gbl_FADT->xpm_tmr_blk); 546 &acpi_gbl_FADT.xpm_timer_block);
553 break; 547 break;
554 548
555 case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */ 549 case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */
556 550
557 status = acpi_os_read_port(acpi_gbl_FADT->smi_cmd, &value1, 8); 551 status =
552 acpi_os_read_port(acpi_gbl_FADT.smi_command, &value1, 8);
558 break; 553 break;
559 554
560 default: 555 default:
@@ -635,7 +630,7 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
635 630
636 status = 631 status =
637 acpi_hw_low_level_write(16, value, 632 acpi_hw_low_level_write(16, value,
638 &acpi_gbl_FADT->xpm1a_evt_blk); 633 &acpi_gbl_FADT.xpm1a_event_block);
639 if (ACPI_FAILURE(status)) { 634 if (ACPI_FAILURE(status)) {
640 goto unlock_and_exit; 635 goto unlock_and_exit;
641 } 636 }
@@ -644,7 +639,7 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
644 639
645 status = 640 status =
646 acpi_hw_low_level_write(16, value, 641 acpi_hw_low_level_write(16, value,
647 &acpi_gbl_FADT->xpm1b_evt_blk); 642 &acpi_gbl_FADT.xpm1b_event_block);
648 break; 643 break;
649 644
650 case ACPI_REGISTER_PM1_ENABLE: /* 16-bit access */ 645 case ACPI_REGISTER_PM1_ENABLE: /* 16-bit access */
@@ -682,49 +677,50 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
682 677
683 status = 678 status =
684 acpi_hw_low_level_write(16, value, 679 acpi_hw_low_level_write(16, value,
685 &acpi_gbl_FADT->xpm1a_cnt_blk); 680 &acpi_gbl_FADT.xpm1a_control_block);
686 if (ACPI_FAILURE(status)) { 681 if (ACPI_FAILURE(status)) {
687 goto unlock_and_exit; 682 goto unlock_and_exit;
688 } 683 }
689 684
690 status = 685 status =
691 acpi_hw_low_level_write(16, value, 686 acpi_hw_low_level_write(16, value,
692 &acpi_gbl_FADT->xpm1b_cnt_blk); 687 &acpi_gbl_FADT.xpm1b_control_block);
693 break; 688 break;
694 689
695 case ACPI_REGISTER_PM1A_CONTROL: /* 16-bit access */ 690 case ACPI_REGISTER_PM1A_CONTROL: /* 16-bit access */
696 691
697 status = 692 status =
698 acpi_hw_low_level_write(16, value, 693 acpi_hw_low_level_write(16, value,
699 &acpi_gbl_FADT->xpm1a_cnt_blk); 694 &acpi_gbl_FADT.xpm1a_control_block);
700 break; 695 break;
701 696
702 case ACPI_REGISTER_PM1B_CONTROL: /* 16-bit access */ 697 case ACPI_REGISTER_PM1B_CONTROL: /* 16-bit access */
703 698
704 status = 699 status =
705 acpi_hw_low_level_write(16, value, 700 acpi_hw_low_level_write(16, value,
706 &acpi_gbl_FADT->xpm1b_cnt_blk); 701 &acpi_gbl_FADT.xpm1b_control_block);
707 break; 702 break;
708 703
709 case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */ 704 case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */
710 705
711 status = 706 status =
712 acpi_hw_low_level_write(8, value, 707 acpi_hw_low_level_write(8, value,
713 &acpi_gbl_FADT->xpm2_cnt_blk); 708 &acpi_gbl_FADT.xpm2_control_block);
714 break; 709 break;
715 710
716 case ACPI_REGISTER_PM_TIMER: /* 32-bit access */ 711 case ACPI_REGISTER_PM_TIMER: /* 32-bit access */
717 712
718 status = 713 status =
719 acpi_hw_low_level_write(32, value, 714 acpi_hw_low_level_write(32, value,
720 &acpi_gbl_FADT->xpm_tmr_blk); 715 &acpi_gbl_FADT.xpm_timer_block);
721 break; 716 break;
722 717
723 case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */ 718 case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */
724 719
725 /* SMI_CMD is currently always in IO space */ 720 /* SMI_CMD is currently always in IO space */
726 721
727 status = acpi_os_write_port(acpi_gbl_FADT->smi_cmd, value, 8); 722 status =
723 acpi_os_write_port(acpi_gbl_FADT.smi_command, value, 8);
728 break; 724 break;
729 725
730 default: 726 default:
@@ -783,7 +779,7 @@ acpi_hw_low_level_read(u32 width, u32 * value, struct acpi_generic_address *reg)
783 * Two address spaces supported: Memory or IO. 779 * Two address spaces supported: Memory or IO.
784 * PCI_Config is not supported here because the GAS struct is insufficient 780 * PCI_Config is not supported here because the GAS struct is insufficient
785 */ 781 */
786 switch (reg->address_space_id) { 782 switch (reg->space_id) {
787 case ACPI_ADR_SPACE_SYSTEM_MEMORY: 783 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
788 784
789 status = acpi_os_read_memory((acpi_physical_address) address, 785 status = acpi_os_read_memory((acpi_physical_address) address,
@@ -792,22 +788,20 @@ acpi_hw_low_level_read(u32 width, u32 * value, struct acpi_generic_address *reg)
792 788
793 case ACPI_ADR_SPACE_SYSTEM_IO: 789 case ACPI_ADR_SPACE_SYSTEM_IO:
794 790
795 status = acpi_os_read_port((acpi_io_address) address, 791 status =
796 value, width); 792 acpi_os_read_port((acpi_io_address) address, value, width);
797 break; 793 break;
798 794
799 default: 795 default:
800 ACPI_ERROR((AE_INFO, 796 ACPI_ERROR((AE_INFO,
801 "Unsupported address space: %X", 797 "Unsupported address space: %X", reg->space_id));
802 reg->address_space_id));
803 return (AE_BAD_PARAMETER); 798 return (AE_BAD_PARAMETER);
804 } 799 }
805 800
806 ACPI_DEBUG_PRINT((ACPI_DB_IO, 801 ACPI_DEBUG_PRINT((ACPI_DB_IO,
807 "Read: %8.8X width %2d from %8.8X%8.8X (%s)\n", 802 "Read: %8.8X width %2d from %8.8X%8.8X (%s)\n",
808 *value, width, 803 *value, width, ACPI_FORMAT_UINT64(address),
809 ACPI_FORMAT_UINT64(address), 804 acpi_ut_get_region_name(reg->space_id)));
810 acpi_ut_get_region_name(reg->address_space_id)));
811 805
812 return (status); 806 return (status);
813} 807}
@@ -854,7 +848,7 @@ acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address * reg)
854 * Two address spaces supported: Memory or IO. 848 * Two address spaces supported: Memory or IO.
855 * PCI_Config is not supported here because the GAS struct is insufficient 849 * PCI_Config is not supported here because the GAS struct is insufficient
856 */ 850 */
857 switch (reg->address_space_id) { 851 switch (reg->space_id) {
858 case ACPI_ADR_SPACE_SYSTEM_MEMORY: 852 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
859 853
860 status = acpi_os_write_memory((acpi_physical_address) address, 854 status = acpi_os_write_memory((acpi_physical_address) address,
@@ -863,22 +857,20 @@ acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address * reg)
863 857
864 case ACPI_ADR_SPACE_SYSTEM_IO: 858 case ACPI_ADR_SPACE_SYSTEM_IO:
865 859
866 status = acpi_os_write_port((acpi_io_address) address, 860 status = acpi_os_write_port((acpi_io_address) address, value,
867 value, width); 861 width);
868 break; 862 break;
869 863
870 default: 864 default:
871 ACPI_ERROR((AE_INFO, 865 ACPI_ERROR((AE_INFO,
872 "Unsupported address space: %X", 866 "Unsupported address space: %X", reg->space_id));
873 reg->address_space_id));
874 return (AE_BAD_PARAMETER); 867 return (AE_BAD_PARAMETER);
875 } 868 }
876 869
877 ACPI_DEBUG_PRINT((ACPI_DB_IO, 870 ACPI_DEBUG_PRINT((ACPI_DB_IO,
878 "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n", 871 "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n",
879 value, width, 872 value, width, ACPI_FORMAT_UINT64(address),
880 ACPI_FORMAT_UINT64(address), 873 acpi_ut_get_region_name(reg->space_id)));
881 acpi_ut_get_region_name(reg->address_space_id)));
882 874
883 return (status); 875 return (status);
884} 876}
diff --git a/drivers/acpi/hardware/hwsleep.c b/drivers/acpi/hardware/hwsleep.c
index 8bb43cae60c2..57901ca3ade9 100644
--- a/drivers/acpi/hardware/hwsleep.c
+++ b/drivers/acpi/hardware/hwsleep.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -43,6 +43,7 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/actables.h>
46 47
47#define _COMPONENT ACPI_HARDWARE 48#define _COMPONENT ACPI_HARDWARE
48ACPI_MODULE_NAME("hwsleep") 49ACPI_MODULE_NAME("hwsleep")
@@ -62,17 +63,32 @@ ACPI_MODULE_NAME("hwsleep")
62acpi_status 63acpi_status
63acpi_set_firmware_waking_vector(acpi_physical_address physical_address) 64acpi_set_firmware_waking_vector(acpi_physical_address physical_address)
64{ 65{
66 struct acpi_table_facs *facs;
67 acpi_status status;
65 68
66 ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector); 69 ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector);
67 70
71 /* Get the FACS */
72
73 status =
74 acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
75 (struct acpi_table_header **)&facs);
76 if (ACPI_FAILURE(status)) {
77 return_ACPI_STATUS(status);
78 }
79
68 /* Set the vector */ 80 /* Set the vector */
69 81
70 if (acpi_gbl_common_fACS.vector_width == 32) { 82 if ((facs->length < 32) || (!(facs->xfirmware_waking_vector))) {
71 *(ACPI_CAST_PTR 83 /*
72 (u32, acpi_gbl_common_fACS.firmware_waking_vector)) 84 * ACPI 1.0 FACS or short table or optional X_ field is zero
73 = (u32) physical_address; 85 */
86 facs->firmware_waking_vector = (u32) physical_address;
74 } else { 87 } else {
75 *acpi_gbl_common_fACS.firmware_waking_vector = physical_address; 88 /*
89 * ACPI 2.0 FACS with valid X_ field
90 */
91 facs->xfirmware_waking_vector = physical_address;
76 } 92 }
77 93
78 return_ACPI_STATUS(AE_OK); 94 return_ACPI_STATUS(AE_OK);
@@ -97,6 +113,8 @@ ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
97acpi_status 113acpi_status
98acpi_get_firmware_waking_vector(acpi_physical_address * physical_address) 114acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
99{ 115{
116 struct acpi_table_facs *facs;
117 acpi_status status;
100 118
101 ACPI_FUNCTION_TRACE(acpi_get_firmware_waking_vector); 119 ACPI_FUNCTION_TRACE(acpi_get_firmware_waking_vector);
102 120
@@ -104,16 +122,29 @@ acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
104 return_ACPI_STATUS(AE_BAD_PARAMETER); 122 return_ACPI_STATUS(AE_BAD_PARAMETER);
105 } 123 }
106 124
125 /* Get the FACS */
126
127 status =
128 acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
129 (struct acpi_table_header **)&facs);
130 if (ACPI_FAILURE(status)) {
131 return_ACPI_STATUS(status);
132 }
133
107 /* Get the vector */ 134 /* Get the vector */
108 135
109 if (acpi_gbl_common_fACS.vector_width == 32) { 136 if ((facs->length < 32) || (!(facs->xfirmware_waking_vector))) {
110 *physical_address = (acpi_physical_address) 137 /*
111 * 138 * ACPI 1.0 FACS or short table or optional X_ field is zero
112 (ACPI_CAST_PTR 139 */
113 (u32, acpi_gbl_common_fACS.firmware_waking_vector)); 140 *physical_address =
141 (acpi_physical_address) facs->firmware_waking_vector;
114 } else { 142 } else {
143 /*
144 * ACPI 2.0 FACS with valid X_ field
145 */
115 *physical_address = 146 *physical_address =
116 *acpi_gbl_common_fACS.firmware_waking_vector; 147 (acpi_physical_address) facs->xfirmware_waking_vector;
117 } 148 }
118 149
119 return_ACPI_STATUS(AE_OK); 150 return_ACPI_STATUS(AE_OK);
@@ -246,15 +277,14 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
246 277
247 /* Clear wake status */ 278 /* Clear wake status */
248 279
249 status = 280 status = acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1);
250 acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1, ACPI_MTX_DO_NOT_LOCK);
251 if (ACPI_FAILURE(status)) { 281 if (ACPI_FAILURE(status)) {
252 return_ACPI_STATUS(status); 282 return_ACPI_STATUS(status);
253 } 283 }
254 284
255 /* Clear all fixed and general purpose status bits */ 285 /* Clear all fixed and general purpose status bits */
256 286
257 status = acpi_hw_clear_acpi_status(ACPI_MTX_DO_NOT_LOCK); 287 status = acpi_hw_clear_acpi_status();
258 if (ACPI_FAILURE(status)) { 288 if (ACPI_FAILURE(status)) {
259 return_ACPI_STATUS(status); 289 return_ACPI_STATUS(status);
260 } 290 }
@@ -367,8 +397,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
367 /* Wait until we enter sleep state */ 397 /* Wait until we enter sleep state */
368 398
369 do { 399 do {
370 status = acpi_get_register(ACPI_BITREG_WAKE_STATUS, &in_value, 400 status = acpi_get_register(ACPI_BITREG_WAKE_STATUS, &in_value);
371 ACPI_MTX_DO_NOT_LOCK);
372 if (ACPI_FAILURE(status)) { 401 if (ACPI_FAILURE(status)) {
373 return_ACPI_STATUS(status); 402 return_ACPI_STATUS(status);
374 } 403 }
@@ -401,13 +430,12 @@ acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
401 430
402 ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_s4bios); 431 ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_s4bios);
403 432
404 status = 433 status = acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1);
405 acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1, ACPI_MTX_DO_NOT_LOCK);
406 if (ACPI_FAILURE(status)) { 434 if (ACPI_FAILURE(status)) {
407 return_ACPI_STATUS(status); 435 return_ACPI_STATUS(status);
408 } 436 }
409 437
410 status = acpi_hw_clear_acpi_status(ACPI_MTX_DO_NOT_LOCK); 438 status = acpi_hw_clear_acpi_status();
411 if (ACPI_FAILURE(status)) { 439 if (ACPI_FAILURE(status)) {
412 return_ACPI_STATUS(status); 440 return_ACPI_STATUS(status);
413 } 441 }
@@ -429,13 +457,12 @@ acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
429 457
430 ACPI_FLUSH_CPU_CACHE(); 458 ACPI_FLUSH_CPU_CACHE();
431 459
432 status = acpi_os_write_port(acpi_gbl_FADT->smi_cmd, 460 status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
433 (u32) acpi_gbl_FADT->S4bios_req, 8); 461 (u32) acpi_gbl_FADT.S4bios_request, 8);
434 462
435 do { 463 do {
436 acpi_os_stall(1000); 464 acpi_os_stall(1000);
437 status = acpi_get_register(ACPI_BITREG_WAKE_STATUS, &in_value, 465 status = acpi_get_register(ACPI_BITREG_WAKE_STATUS, &in_value);
438 ACPI_MTX_DO_NOT_LOCK);
439 if (ACPI_FAILURE(status)) { 466 if (ACPI_FAILURE(status)) {
440 return_ACPI_STATUS(status); 467 return_ACPI_STATUS(status);
441 } 468 }
@@ -568,13 +595,11 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
568 595
569 (void) 596 (void)
570 acpi_set_register(acpi_gbl_fixed_event_info 597 acpi_set_register(acpi_gbl_fixed_event_info
571 [ACPI_EVENT_POWER_BUTTON].enable_register_id, 1, 598 [ACPI_EVENT_POWER_BUTTON].enable_register_id, 1);
572 ACPI_MTX_DO_NOT_LOCK);
573 599
574 (void) 600 (void)
575 acpi_set_register(acpi_gbl_fixed_event_info 601 acpi_set_register(acpi_gbl_fixed_event_info
576 [ACPI_EVENT_POWER_BUTTON].status_register_id, 1, 602 [ACPI_EVENT_POWER_BUTTON].status_register_id, 1);
577 ACPI_MTX_DO_NOT_LOCK);
578 603
579 arg.integer.value = ACPI_SST_WORKING; 604 arg.integer.value = ACPI_SST_WORKING;
580 status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL); 605 status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
diff --git a/drivers/acpi/hardware/hwtimer.c b/drivers/acpi/hardware/hwtimer.c
index c4ec47c939fd..c32eab696acd 100644
--- a/drivers/acpi/hardware/hwtimer.c
+++ b/drivers/acpi/hardware/hwtimer.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -66,7 +66,7 @@ acpi_status acpi_get_timer_resolution(u32 * resolution)
66 return_ACPI_STATUS(AE_BAD_PARAMETER); 66 return_ACPI_STATUS(AE_BAD_PARAMETER);
67 } 67 }
68 68
69 if (acpi_gbl_FADT->tmr_val_ext == 0) { 69 if ((acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) == 0) {
70 *resolution = 24; 70 *resolution = 24;
71 } else { 71 } else {
72 *resolution = 32; 72 *resolution = 32;
@@ -98,7 +98,8 @@ acpi_status acpi_get_timer(u32 * ticks)
98 return_ACPI_STATUS(AE_BAD_PARAMETER); 98 return_ACPI_STATUS(AE_BAD_PARAMETER);
99 } 99 }
100 100
101 status = acpi_hw_low_level_read(32, ticks, &acpi_gbl_FADT->xpm_tmr_blk); 101 status =
102 acpi_hw_low_level_read(32, ticks, &acpi_gbl_FADT.xpm_timer_block);
102 103
103 return_ACPI_STATUS(status); 104 return_ACPI_STATUS(status);
104} 105}
@@ -153,7 +154,7 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
153 if (start_ticks < end_ticks) { 154 if (start_ticks < end_ticks) {
154 delta_ticks = end_ticks - start_ticks; 155 delta_ticks = end_ticks - start_ticks;
155 } else if (start_ticks > end_ticks) { 156 } else if (start_ticks > end_ticks) {
156 if (acpi_gbl_FADT->tmr_val_ext == 0) { 157 if ((acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) == 0) {
157 158
158 /* 24-bit Timer */ 159 /* 24-bit Timer */
159 160
diff --git a/drivers/acpi/motherboard.c b/drivers/acpi/motherboard.c
deleted file mode 100644
index 2e17ec75af03..000000000000
--- a/drivers/acpi/motherboard.c
+++ /dev/null
@@ -1,191 +0,0 @@
1/*
2 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or (at
6 * your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
18 */
19
20/* Purpose: Prevent PCMCIA cards from using motherboard resources. */
21
22#include <linux/kernel.h>
23#include <linux/init.h>
24#include <linux/types.h>
25#include <linux/pci.h>
26#include <linux/ioport.h>
27#include <asm/io.h>
28
29#include <acpi/acpi_bus.h>
30#include <acpi/acpi_drivers.h>
31
32#define _COMPONENT ACPI_SYSTEM_COMPONENT
33ACPI_MODULE_NAME("acpi_motherboard")
34
35/* Dell use PNP0C01 instead of PNP0C02 */
36#define ACPI_MB_HID1 "PNP0C01"
37#define ACPI_MB_HID2 "PNP0C02"
38/**
39 * Doesn't care about legacy IO ports, only IO ports beyond 0x1000 are reserved
40 * Doesn't care about the failure of 'request_region', since other may reserve
41 * the io ports as well
42 */
43#define IS_RESERVED_ADDR(base, len) \
44 (((len) > 0) && ((base) > 0) && ((base) + (len) < IO_SPACE_LIMIT) \
45 && ((base) + (len) > PCIBIOS_MIN_IO))
46/*
47 * Clearing the flag (IORESOURCE_BUSY) allows drivers to use
48 * the io ports if they really know they can use it, while
49 * still preventing hotplug PCI devices from using it.
50 */
51
52/*
53 * When CONFIG_PNP is enabled, pnp/system.c binds to PNP0C01
54 * and PNP0C02, redundant with acpi_reserve_io_ranges().
55 * But acpi_reserve_io_ranges() is necessary for !CONFIG_PNP.
56 */
57static acpi_status acpi_reserve_io_ranges(struct acpi_resource *res, void *data)
58{
59 struct resource *requested_res = NULL;
60
61
62 if (res->type == ACPI_RESOURCE_TYPE_IO) {
63 struct acpi_resource_io *io_res = &res->data.io;
64
65 if (io_res->minimum != io_res->maximum)
66 return AE_OK;
67 if (IS_RESERVED_ADDR
68 (io_res->minimum, io_res->address_length)) {
69 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
70 "Motherboard resources 0x%08x - 0x%08x\n",
71 io_res->minimum,
72 io_res->minimum +
73 io_res->address_length));
74 requested_res =
75 request_region(io_res->minimum,
76 io_res->address_length, "motherboard");
77 }
78 } else if (res->type == ACPI_RESOURCE_TYPE_FIXED_IO) {
79 struct acpi_resource_fixed_io *fixed_io_res =
80 &res->data.fixed_io;
81
82 if (IS_RESERVED_ADDR
83 (fixed_io_res->address, fixed_io_res->address_length)) {
84 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
85 "Motherboard resources 0x%08x - 0x%08x\n",
86 fixed_io_res->address,
87 fixed_io_res->address +
88 fixed_io_res->address_length));
89 requested_res =
90 request_region(fixed_io_res->address,
91 fixed_io_res->address_length,
92 "motherboard");
93 }
94 } else {
95 /* Memory mapped IO? */
96 }
97
98 if (requested_res)
99 requested_res->flags &= ~IORESOURCE_BUSY;
100 return AE_OK;
101}
102
103static int acpi_motherboard_add(struct acpi_device *device)
104{
105 if (!device)
106 return -EINVAL;
107 acpi_walk_resources(device->handle, METHOD_NAME__CRS,
108 acpi_reserve_io_ranges, NULL);
109
110 return 0;
111}
112
113static struct acpi_driver acpi_motherboard_driver1 = {
114 .name = "motherboard",
115 .class = "",
116 .ids = ACPI_MB_HID1,
117 .ops = {
118 .add = acpi_motherboard_add,
119 },
120};
121
122static struct acpi_driver acpi_motherboard_driver2 = {
123 .name = "motherboard",
124 .class = "",
125 .ids = ACPI_MB_HID2,
126 .ops = {
127 .add = acpi_motherboard_add,
128 },
129};
130
131static void __init acpi_request_region (struct acpi_generic_address *addr,
132 unsigned int length, char *desc)
133{
134 if (!addr->address || !length)
135 return;
136
137 if (addr->address_space_id == ACPI_ADR_SPACE_SYSTEM_IO)
138 request_region(addr->address, length, desc);
139 else if (addr->address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
140 request_mem_region(addr->address, length, desc);
141}
142
143static void __init acpi_reserve_resources(void)
144{
145 acpi_request_region(&acpi_gbl_FADT->xpm1a_evt_blk,
146 acpi_gbl_FADT->pm1_evt_len, "ACPI PM1a_EVT_BLK");
147
148 acpi_request_region(&acpi_gbl_FADT->xpm1b_evt_blk,
149 acpi_gbl_FADT->pm1_evt_len, "ACPI PM1b_EVT_BLK");
150
151 acpi_request_region(&acpi_gbl_FADT->xpm1a_cnt_blk,
152 acpi_gbl_FADT->pm1_cnt_len, "ACPI PM1a_CNT_BLK");
153
154 acpi_request_region(&acpi_gbl_FADT->xpm1b_cnt_blk,
155 acpi_gbl_FADT->pm1_cnt_len, "ACPI PM1b_CNT_BLK");
156
157 if (acpi_gbl_FADT->pm_tm_len == 4)
158 acpi_request_region(&acpi_gbl_FADT->xpm_tmr_blk, 4, "ACPI PM_TMR");
159
160 acpi_request_region(&acpi_gbl_FADT->xpm2_cnt_blk,
161 acpi_gbl_FADT->pm2_cnt_len, "ACPI PM2_CNT_BLK");
162
163 /* Length of GPE blocks must be a non-negative multiple of 2 */
164
165 if (!(acpi_gbl_FADT->gpe0_blk_len & 0x1))
166 acpi_request_region(&acpi_gbl_FADT->xgpe0_blk,
167 acpi_gbl_FADT->gpe0_blk_len, "ACPI GPE0_BLK");
168
169 if (!(acpi_gbl_FADT->gpe1_blk_len & 0x1))
170 acpi_request_region(&acpi_gbl_FADT->xgpe1_blk,
171 acpi_gbl_FADT->gpe1_blk_len, "ACPI GPE1_BLK");
172}
173
174static int __init acpi_motherboard_init(void)
175{
176 acpi_bus_register_driver(&acpi_motherboard_driver1);
177 acpi_bus_register_driver(&acpi_motherboard_driver2);
178 /*
179 * Guarantee motherboard IO reservation first
180 * This module must run after scan.c
181 */
182 if (!acpi_disabled)
183 acpi_reserve_resources();
184 return 0;
185}
186
187/**
188 * Reserve motherboard resources after PCI claim BARs,
189 * but before PCI assign resources for uninitialized PCI devices
190 */
191fs_initcall(acpi_motherboard_init);
diff --git a/drivers/acpi/namespace/nsaccess.c b/drivers/acpi/namespace/nsaccess.c
index c1c6c236df9a..57faf598bad8 100644
--- a/drivers/acpi/namespace/nsaccess.c
+++ b/drivers/acpi/namespace/nsaccess.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -195,31 +195,27 @@ acpi_status acpi_ns_root_initialize(void)
195 obj_desc->mutex.sync_level = 195 obj_desc->mutex.sync_level =
196 (u8) (ACPI_TO_INTEGER(val) - 1); 196 (u8) (ACPI_TO_INTEGER(val) - 1);
197 197
198 if (ACPI_STRCMP(init_val->name, "_GL_") == 0) { 198 /* Create a mutex */
199 199
200 /* Create a counting semaphore for the global lock */ 200 status =
201 acpi_os_create_mutex(&obj_desc->mutex.
202 os_mutex);
203 if (ACPI_FAILURE(status)) {
204 acpi_ut_remove_reference(obj_desc);
205 goto unlock_and_exit;
206 }
201 207
202 status = 208 /* Special case for ACPI Global Lock */
203 acpi_os_create_semaphore
204 (ACPI_NO_UNIT_LIMIT, 1,
205 &acpi_gbl_global_lock_semaphore);
206 if (ACPI_FAILURE(status)) {
207 acpi_ut_remove_reference
208 (obj_desc);
209 goto unlock_and_exit;
210 }
211 209
212 /* Mark this mutex as very special */ 210 if (ACPI_STRCMP(init_val->name, "_GL_") == 0) {
211 acpi_gbl_global_lock_mutex =
212 obj_desc->mutex.os_mutex;
213 213
214 obj_desc->mutex.os_mutex = 214 /* Create additional counting semaphore for global lock */
215 ACPI_GLOBAL_LOCK;
216 } else {
217 /* Create a mutex */
218 215
219 status = 216 status =
220 acpi_os_create_mutex(&obj_desc-> 217 acpi_os_create_semaphore(1, 0,
221 mutex. 218 &acpi_gbl_global_lock_semaphore);
222 os_mutex);
223 if (ACPI_FAILURE(status)) { 219 if (ACPI_FAILURE(status)) {
224 acpi_ut_remove_reference 220 acpi_ut_remove_reference
225 (obj_desc); 221 (obj_desc);
diff --git a/drivers/acpi/namespace/nsalloc.c b/drivers/acpi/namespace/nsalloc.c
index 55b407aae266..1d693d8ad2d8 100644
--- a/drivers/acpi/namespace/nsalloc.c
+++ b/drivers/acpi/namespace/nsalloc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -61,6 +61,9 @@ ACPI_MODULE_NAME("nsalloc")
61struct acpi_namespace_node *acpi_ns_create_node(u32 name) 61struct acpi_namespace_node *acpi_ns_create_node(u32 name)
62{ 62{
63 struct acpi_namespace_node *node; 63 struct acpi_namespace_node *node;
64#ifdef ACPI_DBG_TRACK_ALLOCATIONS
65 u32 temp;
66#endif
64 67
65 ACPI_FUNCTION_TRACE(ns_create_node); 68 ACPI_FUNCTION_TRACE(ns_create_node);
66 69
@@ -71,6 +74,15 @@ struct acpi_namespace_node *acpi_ns_create_node(u32 name)
71 74
72 ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_allocated++); 75 ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_allocated++);
73 76
77#ifdef ACPI_DBG_TRACK_ALLOCATIONS
78 temp =
79 acpi_gbl_ns_node_list->total_allocated -
80 acpi_gbl_ns_node_list->total_freed;
81 if (temp > acpi_gbl_ns_node_list->max_occupied) {
82 acpi_gbl_ns_node_list->max_occupied = temp;
83 }
84#endif
85
74 node->name.integer = name; 86 node->name.integer = name;
75 ACPI_SET_DESCRIPTOR_TYPE(node, ACPI_DESC_TYPE_NAMED); 87 ACPI_SET_DESCRIPTOR_TYPE(node, ACPI_DESC_TYPE_NAMED);
76 return_PTR(node); 88 return_PTR(node);
diff --git a/drivers/acpi/namespace/nsdump.c b/drivers/acpi/namespace/nsdump.c
index d72df66aa965..1fc4f86676e1 100644
--- a/drivers/acpi/namespace/nsdump.c
+++ b/drivers/acpi/namespace/nsdump.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -205,7 +205,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
205 205
206 if (!acpi_ut_valid_acpi_name(this_node->name.integer)) { 206 if (!acpi_ut_valid_acpi_name(this_node->name.integer)) {
207 this_node->name.integer = 207 this_node->name.integer =
208 acpi_ut_repair_name(this_node->name.integer); 208 acpi_ut_repair_name(this_node->name.ascii);
209 209
210 ACPI_WARNING((AE_INFO, "Invalid ACPI Name %08X", 210 ACPI_WARNING((AE_INFO, "Invalid ACPI Name %08X",
211 this_node->name.integer)); 211 this_node->name.integer));
@@ -226,6 +226,12 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
226 obj_desc = acpi_ns_get_attached_object(this_node); 226 obj_desc = acpi_ns_get_attached_object(this_node);
227 acpi_dbg_level = dbg_level; 227 acpi_dbg_level = dbg_level;
228 228
229 /* Temp nodes are those nodes created by a control method */
230
231 if (this_node->flags & ANOBJ_TEMPORARY) {
232 acpi_os_printf("(T) ");
233 }
234
229 switch (info->display_type & ACPI_DISPLAY_MASK) { 235 switch (info->display_type & ACPI_DISPLAY_MASK) {
230 case ACPI_DISPLAY_SUMMARY: 236 case ACPI_DISPLAY_SUMMARY:
231 237
@@ -623,7 +629,8 @@ acpi_ns_dump_objects(acpi_object_type type,
623 info.display_type = display_type; 629 info.display_type = display_type;
624 630
625 (void)acpi_ns_walk_namespace(type, start_handle, max_depth, 631 (void)acpi_ns_walk_namespace(type, start_handle, max_depth,
626 ACPI_NS_WALK_NO_UNLOCK, 632 ACPI_NS_WALK_NO_UNLOCK |
633 ACPI_NS_WALK_TEMP_NODES,
627 acpi_ns_dump_one_object, (void *)&info, 634 acpi_ns_dump_one_object, (void *)&info,
628 NULL); 635 NULL);
629} 636}
diff --git a/drivers/acpi/namespace/nsdumpdv.c b/drivers/acpi/namespace/nsdumpdv.c
index c6bf5d30fca3..5097e167939e 100644
--- a/drivers/acpi/namespace/nsdumpdv.c
+++ b/drivers/acpi/namespace/nsdumpdv.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/namespace/nseval.c b/drivers/acpi/namespace/nseval.c
index 4b0a4a8c9843..aa6370c67ec1 100644
--- a/drivers/acpi/namespace/nseval.c
+++ b/drivers/acpi/namespace/nseval.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -154,11 +154,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
154 * Execute the method via the interpreter. The interpreter is locked 154 * Execute the method via the interpreter. The interpreter is locked
155 * here before calling into the AML parser 155 * here before calling into the AML parser
156 */ 156 */
157 status = acpi_ex_enter_interpreter(); 157 acpi_ex_enter_interpreter();
158 if (ACPI_FAILURE(status)) {
159 return_ACPI_STATUS(status);
160 }
161
162 status = acpi_ps_execute_method(info); 158 status = acpi_ps_execute_method(info);
163 acpi_ex_exit_interpreter(); 159 acpi_ex_exit_interpreter();
164 } else { 160 } else {
@@ -182,10 +178,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
182 * resolution, we must lock it because we could access an opregion. 178 * resolution, we must lock it because we could access an opregion.
183 * The opregion access code assumes that the interpreter is locked. 179 * The opregion access code assumes that the interpreter is locked.
184 */ 180 */
185 status = acpi_ex_enter_interpreter(); 181 acpi_ex_enter_interpreter();
186 if (ACPI_FAILURE(status)) {
187 return_ACPI_STATUS(status);
188 }
189 182
190 /* Function has a strange interface */ 183 /* Function has a strange interface */
191 184
diff --git a/drivers/acpi/namespace/nsinit.c b/drivers/acpi/namespace/nsinit.c
index aec8488c0019..326af8fc0ce7 100644
--- a/drivers/acpi/namespace/nsinit.c
+++ b/drivers/acpi/namespace/nsinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -213,7 +213,7 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
213 u32 level, void *context, void **return_value) 213 u32 level, void *context, void **return_value)
214{ 214{
215 acpi_object_type type; 215 acpi_object_type type;
216 acpi_status status; 216 acpi_status status = AE_OK;
217 struct acpi_init_walk_info *info = 217 struct acpi_init_walk_info *info =
218 (struct acpi_init_walk_info *)context; 218 (struct acpi_init_walk_info *)context;
219 struct acpi_namespace_node *node = 219 struct acpi_namespace_node *node =
@@ -267,10 +267,7 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
267 /* 267 /*
268 * Must lock the interpreter before executing AML code 268 * Must lock the interpreter before executing AML code
269 */ 269 */
270 status = acpi_ex_enter_interpreter(); 270 acpi_ex_enter_interpreter();
271 if (ACPI_FAILURE(status)) {
272 return (status);
273 }
274 271
275 /* 272 /*
276 * Each of these types can contain executable AML code within the 273 * Each of these types can contain executable AML code within the
diff --git a/drivers/acpi/namespace/nsload.c b/drivers/acpi/namespace/nsload.c
index fe75d888e183..d4f9654fd20f 100644
--- a/drivers/acpi/namespace/nsload.c
+++ b/drivers/acpi/namespace/nsload.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -44,13 +44,12 @@
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acnamesp.h> 45#include <acpi/acnamesp.h>
46#include <acpi/acdispat.h> 46#include <acpi/acdispat.h>
47#include <acpi/actables.h>
47 48
48#define _COMPONENT ACPI_NAMESPACE 49#define _COMPONENT ACPI_NAMESPACE
49ACPI_MODULE_NAME("nsload") 50ACPI_MODULE_NAME("nsload")
50 51
51/* Local prototypes */ 52/* Local prototypes */
52static acpi_status acpi_ns_load_table_by_type(acpi_table_type table_type);
53
54#ifdef ACPI_FUTURE_IMPLEMENTATION 53#ifdef ACPI_FUTURE_IMPLEMENTATION
55acpi_status acpi_ns_unload_namespace(acpi_handle handle); 54acpi_status acpi_ns_unload_namespace(acpi_handle handle);
56 55
@@ -62,7 +61,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle);
62 * 61 *
63 * FUNCTION: acpi_ns_load_table 62 * FUNCTION: acpi_ns_load_table
64 * 63 *
65 * PARAMETERS: table_desc - Descriptor for table to be loaded 64 * PARAMETERS: table_index - Index for table to be loaded
66 * Node - Owning NS node 65 * Node - Owning NS node
67 * 66 *
68 * RETURN: Status 67 * RETURN: Status
@@ -72,42 +71,13 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle);
72 ******************************************************************************/ 71 ******************************************************************************/
73 72
74acpi_status 73acpi_status
75acpi_ns_load_table(struct acpi_table_desc *table_desc, 74acpi_ns_load_table(acpi_native_uint table_index,
76 struct acpi_namespace_node *node) 75 struct acpi_namespace_node *node)
77{ 76{
78 acpi_status status; 77 acpi_status status;
79 78
80 ACPI_FUNCTION_TRACE(ns_load_table); 79 ACPI_FUNCTION_TRACE(ns_load_table);
81 80
82 /* Check if table contains valid AML (must be DSDT, PSDT, SSDT, etc.) */
83
84 if (!
85 (acpi_gbl_table_data[table_desc->type].
86 flags & ACPI_TABLE_EXECUTABLE)) {
87
88 /* Just ignore this table */
89
90 return_ACPI_STATUS(AE_OK);
91 }
92
93 /* Check validity of the AML start and length */
94
95 if (!table_desc->aml_start) {
96 ACPI_ERROR((AE_INFO, "Null AML pointer"));
97 return_ACPI_STATUS(AE_BAD_PARAMETER);
98 }
99
100 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AML block at %p\n",
101 table_desc->aml_start));
102
103 /* Ignore table if there is no AML contained within */
104
105 if (!table_desc->aml_length) {
106 ACPI_WARNING((AE_INFO, "Zero-length AML block in table [%4.4s]",
107 table_desc->pointer->signature));
108 return_ACPI_STATUS(AE_OK);
109 }
110
111 /* 81 /*
112 * Parse the table and load the namespace with all named 82 * Parse the table and load the namespace with all named
113 * objects found within. Control methods are NOT parsed 83 * objects found within. Control methods are NOT parsed
@@ -117,15 +87,34 @@ acpi_ns_load_table(struct acpi_table_desc *table_desc,
117 * to another control method, we can't continue parsing 87 * to another control method, we can't continue parsing
118 * because we don't know how many arguments to parse next! 88 * because we don't know how many arguments to parse next!
119 */ 89 */
90 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
91 if (ACPI_FAILURE(status)) {
92 return_ACPI_STATUS(status);
93 }
94
95 /* If table already loaded into namespace, just return */
96
97 if (acpi_tb_is_table_loaded(table_index)) {
98 status = AE_ALREADY_EXISTS;
99 goto unlock;
100 }
101
120 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 102 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
121 "**** Loading table into namespace ****\n")); 103 "**** Loading table into namespace ****\n"));
122 104
123 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); 105 status = acpi_tb_allocate_owner_id(table_index);
124 if (ACPI_FAILURE(status)) { 106 if (ACPI_FAILURE(status)) {
125 return_ACPI_STATUS(status); 107 goto unlock;
108 }
109
110 status = acpi_ns_parse_table(table_index, node->child);
111 if (ACPI_SUCCESS(status)) {
112 acpi_tb_set_table_loaded_flag(table_index, TRUE);
113 } else {
114 acpi_tb_release_owner_id(table_index);
126 } 115 }
127 116
128 status = acpi_ns_parse_table(table_desc, node->child); 117 unlock:
129 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 118 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
130 119
131 if (ACPI_FAILURE(status)) { 120 if (ACPI_FAILURE(status)) {
@@ -141,7 +130,7 @@ acpi_ns_load_table(struct acpi_table_desc *table_desc,
141 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 130 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
142 "**** Begin Table Method Parsing and Object Initialization ****\n")); 131 "**** Begin Table Method Parsing and Object Initialization ****\n"));
143 132
144 status = acpi_ds_initialize_objects(table_desc, node); 133 status = acpi_ds_initialize_objects(table_index, node);
145 134
146 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 135 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
147 "**** Completed Table Method Parsing and Object Initialization ****\n")); 136 "**** Completed Table Method Parsing and Object Initialization ****\n"));
@@ -149,99 +138,7 @@ acpi_ns_load_table(struct acpi_table_desc *table_desc,
149 return_ACPI_STATUS(status); 138 return_ACPI_STATUS(status);
150} 139}
151 140
152/******************************************************************************* 141#ifdef ACPI_OBSOLETE_FUNCTIONS
153 *
154 * FUNCTION: acpi_ns_load_table_by_type
155 *
156 * PARAMETERS: table_type - Id of the table type to load
157 *
158 * RETURN: Status
159 *
160 * DESCRIPTION: Load an ACPI table or tables into the namespace. All tables
161 * of the given type are loaded. The mechanism allows this
162 * routine to be called repeatedly.
163 *
164 ******************************************************************************/
165
166static acpi_status acpi_ns_load_table_by_type(acpi_table_type table_type)
167{
168 u32 i;
169 acpi_status status;
170 struct acpi_table_desc *table_desc;
171
172 ACPI_FUNCTION_TRACE(ns_load_table_by_type);
173
174 status = acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
175 if (ACPI_FAILURE(status)) {
176 return_ACPI_STATUS(status);
177 }
178
179 /*
180 * Table types supported are:
181 * DSDT (one), SSDT/PSDT (multiple)
182 */
183 switch (table_type) {
184 case ACPI_TABLE_ID_DSDT:
185
186 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace load: DSDT\n"));
187
188 table_desc = acpi_gbl_table_lists[ACPI_TABLE_ID_DSDT].next;
189
190 /* If table already loaded into namespace, just return */
191
192 if (table_desc->loaded_into_namespace) {
193 goto unlock_and_exit;
194 }
195
196 /* Now load the single DSDT */
197
198 status = acpi_ns_load_table(table_desc, acpi_gbl_root_node);
199 if (ACPI_SUCCESS(status)) {
200 table_desc->loaded_into_namespace = TRUE;
201 }
202 break;
203
204 case ACPI_TABLE_ID_SSDT:
205 case ACPI_TABLE_ID_PSDT:
206
207 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
208 "Namespace load: %d SSDT or PSDTs\n",
209 acpi_gbl_table_lists[table_type].count));
210
211 /*
212 * Traverse list of SSDT or PSDT tables
213 */
214 table_desc = acpi_gbl_table_lists[table_type].next;
215 for (i = 0; i < acpi_gbl_table_lists[table_type].count; i++) {
216 /*
217 * Only attempt to load table into namespace if it is not
218 * already loaded!
219 */
220 if (!table_desc->loaded_into_namespace) {
221 status =
222 acpi_ns_load_table(table_desc,
223 acpi_gbl_root_node);
224 if (ACPI_FAILURE(status)) {
225 break;
226 }
227
228 table_desc->loaded_into_namespace = TRUE;
229 }
230
231 table_desc = table_desc->next;
232 }
233 break;
234
235 default:
236 status = AE_SUPPORT;
237 break;
238 }
239
240 unlock_and_exit:
241 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
242 return_ACPI_STATUS(status);
243}
244
245/******************************************************************************* 142/*******************************************************************************
246 * 143 *
247 * FUNCTION: acpi_load_namespace 144 * FUNCTION: acpi_load_namespace
@@ -288,6 +185,7 @@ acpi_status acpi_ns_load_namespace(void)
288 185
289 return_ACPI_STATUS(status); 186 return_ACPI_STATUS(status);
290} 187}
188#endif
291 189
292#ifdef ACPI_FUTURE_IMPLEMENTATION 190#ifdef ACPI_FUTURE_IMPLEMENTATION
293/******************************************************************************* 191/*******************************************************************************
diff --git a/drivers/acpi/namespace/nsnames.c b/drivers/acpi/namespace/nsnames.c
index 97b8332c9746..cbd94af08cc5 100644
--- a/drivers/acpi/namespace/nsnames.c
+++ b/drivers/acpi/namespace/nsnames.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/namespace/nsobject.c b/drivers/acpi/namespace/nsobject.c
index aabe8794b908..d9d7377bc6e6 100644
--- a/drivers/acpi/namespace/nsobject.c
+++ b/drivers/acpi/namespace/nsobject.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/namespace/nsparse.c b/drivers/acpi/namespace/nsparse.c
index 155505a4ef69..e696aa847990 100644
--- a/drivers/acpi/namespace/nsparse.c
+++ b/drivers/acpi/namespace/nsparse.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -45,6 +45,7 @@
45#include <acpi/acnamesp.h> 45#include <acpi/acnamesp.h>
46#include <acpi/acparser.h> 46#include <acpi/acparser.h>
47#include <acpi/acdispat.h> 47#include <acpi/acdispat.h>
48#include <acpi/actables.h>
48 49
49#define _COMPONENT ACPI_NAMESPACE 50#define _COMPONENT ACPI_NAMESPACE
50ACPI_MODULE_NAME("nsparse") 51ACPI_MODULE_NAME("nsparse")
@@ -62,14 +63,24 @@ ACPI_MODULE_NAME("nsparse")
62 * 63 *
63 ******************************************************************************/ 64 ******************************************************************************/
64acpi_status 65acpi_status
65acpi_ns_one_complete_parse(u8 pass_number, struct acpi_table_desc *table_desc) 66acpi_ns_one_complete_parse(acpi_native_uint pass_number,
67 acpi_native_uint table_index)
66{ 68{
67 union acpi_parse_object *parse_root; 69 union acpi_parse_object *parse_root;
68 acpi_status status; 70 acpi_status status;
71 acpi_native_uint aml_length;
72 u8 *aml_start;
69 struct acpi_walk_state *walk_state; 73 struct acpi_walk_state *walk_state;
74 struct acpi_table_header *table;
75 acpi_owner_id owner_id;
70 76
71 ACPI_FUNCTION_TRACE(ns_one_complete_parse); 77 ACPI_FUNCTION_TRACE(ns_one_complete_parse);
72 78
79 status = acpi_tb_get_owner_id(table_index, &owner_id);
80 if (ACPI_FAILURE(status)) {
81 return_ACPI_STATUS(status);
82 }
83
73 /* Create and init a Root Node */ 84 /* Create and init a Root Node */
74 85
75 parse_root = acpi_ps_create_scope_op(); 86 parse_root = acpi_ps_create_scope_op();
@@ -79,26 +90,41 @@ acpi_ns_one_complete_parse(u8 pass_number, struct acpi_table_desc *table_desc)
79 90
80 /* Create and initialize a new walk state */ 91 /* Create and initialize a new walk state */
81 92
82 walk_state = acpi_ds_create_walk_state(table_desc->owner_id, 93 walk_state = acpi_ds_create_walk_state(owner_id, NULL, NULL, NULL);
83 NULL, NULL, NULL);
84 if (!walk_state) { 94 if (!walk_state) {
85 acpi_ps_free_op(parse_root); 95 acpi_ps_free_op(parse_root);
86 return_ACPI_STATUS(AE_NO_MEMORY); 96 return_ACPI_STATUS(AE_NO_MEMORY);
87 } 97 }
88 98
89 status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL, 99 status = acpi_get_table_by_index(table_index, &table);
90 table_desc->aml_start, 100 if (ACPI_FAILURE(status)) {
91 table_desc->aml_length, NULL, 101 acpi_ds_delete_walk_state(walk_state);
92 pass_number); 102 acpi_ps_free_op(parse_root);
103 return_ACPI_STATUS(status);
104 }
105
106 /* Table must consist of at least a complete header */
107
108 if (table->length < sizeof(struct acpi_table_header)) {
109 status = AE_BAD_HEADER;
110 } else {
111 aml_start = (u8 *) table + sizeof(struct acpi_table_header);
112 aml_length = table->length - sizeof(struct acpi_table_header);
113 status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL,
114 aml_start, aml_length, NULL,
115 (u8) pass_number);
116 }
117
93 if (ACPI_FAILURE(status)) { 118 if (ACPI_FAILURE(status)) {
94 acpi_ds_delete_walk_state(walk_state); 119 acpi_ds_delete_walk_state(walk_state);
120 acpi_ps_delete_parse_tree(parse_root);
95 return_ACPI_STATUS(status); 121 return_ACPI_STATUS(status);
96 } 122 }
97 123
98 /* Parse the AML */ 124 /* Parse the AML */
99 125
100 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "*PARSE* pass %d parse\n", 126 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "*PARSE* pass %d parse\n",
101 pass_number)); 127 (unsigned)pass_number));
102 status = acpi_ps_parse_aml(walk_state); 128 status = acpi_ps_parse_aml(walk_state);
103 129
104 acpi_ps_delete_parse_tree(parse_root); 130 acpi_ps_delete_parse_tree(parse_root);
@@ -119,7 +145,7 @@ acpi_ns_one_complete_parse(u8 pass_number, struct acpi_table_desc *table_desc)
119 ******************************************************************************/ 145 ******************************************************************************/
120 146
121acpi_status 147acpi_status
122acpi_ns_parse_table(struct acpi_table_desc *table_desc, 148acpi_ns_parse_table(acpi_native_uint table_index,
123 struct acpi_namespace_node *start_node) 149 struct acpi_namespace_node *start_node)
124{ 150{
125 acpi_status status; 151 acpi_status status;
@@ -134,10 +160,10 @@ acpi_ns_parse_table(struct acpi_table_desc *table_desc,
134 * each Parser Op subtree is deleted when it is finished. This saves 160 * each Parser Op subtree is deleted when it is finished. This saves
135 * a great deal of memory, and allows a small cache of parse objects 161 * a great deal of memory, and allows a small cache of parse objects
136 * to service the entire parse. The second pass of the parse then 162 * to service the entire parse. The second pass of the parse then
137 * performs another complete parse of the AML.. 163 * performs another complete parse of the AML.
138 */ 164 */
139 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n")); 165 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n"));
140 status = acpi_ns_one_complete_parse(1, table_desc); 166 status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1, table_index);
141 if (ACPI_FAILURE(status)) { 167 if (ACPI_FAILURE(status)) {
142 return_ACPI_STATUS(status); 168 return_ACPI_STATUS(status);
143 } 169 }
@@ -152,7 +178,7 @@ acpi_ns_parse_table(struct acpi_table_desc *table_desc,
152 * parse objects are all cached. 178 * parse objects are all cached.
153 */ 179 */
154 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 2\n")); 180 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 2\n"));
155 status = acpi_ns_one_complete_parse(2, table_desc); 181 status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2, table_index);
156 if (ACPI_FAILURE(status)) { 182 if (ACPI_FAILURE(status)) {
157 return_ACPI_STATUS(status); 183 return_ACPI_STATUS(status);
158 } 184 }
diff --git a/drivers/acpi/namespace/nssearch.c b/drivers/acpi/namespace/nssearch.c
index 500e2bbcfaf7..e863be665ce8 100644
--- a/drivers/acpi/namespace/nssearch.c
+++ b/drivers/acpi/namespace/nssearch.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -321,7 +321,8 @@ acpi_ns_search_and_enter(u32 target_name,
321 * even though there are a few bad names. 321 * even though there are a few bad names.
322 */ 322 */
323 if (!acpi_ut_valid_acpi_name(target_name)) { 323 if (!acpi_ut_valid_acpi_name(target_name)) {
324 target_name = acpi_ut_repair_name(target_name); 324 target_name =
325 acpi_ut_repair_name(ACPI_CAST_PTR(char, &target_name));
325 326
326 /* Report warning only if in strict mode or debug mode */ 327 /* Report warning only if in strict mode or debug mode */
327 328
@@ -401,6 +402,10 @@ acpi_ns_search_and_enter(u32 target_name,
401 } 402 }
402#endif 403#endif
403 404
405 if (flags & ACPI_NS_TEMPORARY) {
406 new_node->flags |= ANOBJ_TEMPORARY;
407 }
408
404 /* Install the new object into the parent's list of children */ 409 /* Install the new object into the parent's list of children */
405 410
406 acpi_ns_install_node(walk_state, node, new_node, type); 411 acpi_ns_install_node(walk_state, node, new_node, type);
diff --git a/drivers/acpi/namespace/nsutils.c b/drivers/acpi/namespace/nsutils.c
index aa4e799d9a8c..90fd059615ff 100644
--- a/drivers/acpi/namespace/nsutils.c
+++ b/drivers/acpi/namespace/nsutils.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -770,13 +770,6 @@ void acpi_ns_terminate(void)
770 } 770 }
771 771
772 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace freed\n")); 772 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace freed\n"));
773
774 /*
775 * 2) Now we can delete the ACPI tables
776 */
777 acpi_tb_delete_all_tables();
778 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n"));
779
780 return_VOID; 773 return_VOID;
781} 774}
782 775
diff --git a/drivers/acpi/namespace/nswalk.c b/drivers/acpi/namespace/nswalk.c
index c8f6bef16ed0..94eb8f332d94 100644
--- a/drivers/acpi/namespace/nswalk.c
+++ b/drivers/acpi/namespace/nswalk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -126,7 +126,7 @@ struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type,
126 * PARAMETERS: Type - acpi_object_type to search for 126 * PARAMETERS: Type - acpi_object_type to search for
127 * start_node - Handle in namespace where search begins 127 * start_node - Handle in namespace where search begins
128 * max_depth - Depth to which search is to reach 128 * max_depth - Depth to which search is to reach
129 * unlock_before_callback- Whether to unlock the NS before invoking 129 * Flags - Whether to unlock the NS before invoking
130 * the callback routine 130 * the callback routine
131 * user_function - Called when an object of "Type" is found 131 * user_function - Called when an object of "Type" is found
132 * Context - Passed to user function 132 * Context - Passed to user function
@@ -153,7 +153,7 @@ acpi_status
153acpi_ns_walk_namespace(acpi_object_type type, 153acpi_ns_walk_namespace(acpi_object_type type,
154 acpi_handle start_node, 154 acpi_handle start_node,
155 u32 max_depth, 155 u32 max_depth,
156 u8 unlock_before_callback, 156 u32 flags,
157 acpi_walk_callback user_function, 157 acpi_walk_callback user_function,
158 void *context, void **return_value) 158 void *context, void **return_value)
159{ 159{
@@ -193,20 +193,34 @@ acpi_ns_walk_namespace(acpi_object_type type,
193 acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node, 193 acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
194 child_node); 194 child_node);
195 if (child_node) { 195 if (child_node) {
196 /* 196
197 * Found node, Get the type if we are not 197 /* Found next child, get the type if we are not searching for ANY */
198 * searching for ANY 198
199 */
200 if (type != ACPI_TYPE_ANY) { 199 if (type != ACPI_TYPE_ANY) {
201 child_type = child_node->type; 200 child_type = child_node->type;
202 } 201 }
203 202
204 if (child_type == type) { 203 /*
204 * Ignore all temporary namespace nodes (created during control
205 * method execution) unless told otherwise. These temporary nodes
206 * can cause a race condition because they can be deleted during the
207 * execution of the user function (if the namespace is unlocked before
208 * invocation of the user function.) Only the debugger namespace dump
209 * will examine the temporary nodes.
210 */
211 if ((child_node->flags & ANOBJ_TEMPORARY) &&
212 !(flags & ACPI_NS_WALK_TEMP_NODES)) {
213 status = AE_CTRL_DEPTH;
214 }
215
216 /* Type must match requested type */
217
218 else if (child_type == type) {
205 /* 219 /*
206 * Found a matching node, invoke the user 220 * Found a matching node, invoke the user callback function.
207 * callback function 221 * Unlock the namespace if flag is set.
208 */ 222 */
209 if (unlock_before_callback) { 223 if (flags & ACPI_NS_WALK_UNLOCK) {
210 mutex_status = 224 mutex_status =
211 acpi_ut_release_mutex 225 acpi_ut_release_mutex
212 (ACPI_MTX_NAMESPACE); 226 (ACPI_MTX_NAMESPACE);
@@ -216,10 +230,11 @@ acpi_ns_walk_namespace(acpi_object_type type,
216 } 230 }
217 } 231 }
218 232
219 status = user_function(child_node, level, 233 status =
220 context, return_value); 234 user_function(child_node, level, context,
235 return_value);
221 236
222 if (unlock_before_callback) { 237 if (flags & ACPI_NS_WALK_UNLOCK) {
223 mutex_status = 238 mutex_status =
224 acpi_ut_acquire_mutex 239 acpi_ut_acquire_mutex
225 (ACPI_MTX_NAMESPACE); 240 (ACPI_MTX_NAMESPACE);
@@ -251,20 +266,17 @@ acpi_ns_walk_namespace(acpi_object_type type,
251 } 266 }
252 267
253 /* 268 /*
254 * Depth first search: 269 * Depth first search: Attempt to go down another level in the
255 * Attempt to go down another level in the namespace 270 * namespace if we are allowed to. Don't go any further if we have
256 * if we are allowed to. Don't go any further if we 271 * reached the caller specified maximum depth or if the user
257 * have reached the caller specified maximum depth 272 * function has specified that the maximum depth has been reached.
258 * or if the user function has specified that the
259 * maximum depth has been reached.
260 */ 273 */
261 if ((level < max_depth) && (status != AE_CTRL_DEPTH)) { 274 if ((level < max_depth) && (status != AE_CTRL_DEPTH)) {
262 if (acpi_ns_get_next_node 275 if (acpi_ns_get_next_node
263 (ACPI_TYPE_ANY, child_node, NULL)) { 276 (ACPI_TYPE_ANY, child_node, NULL)) {
264 /* 277
265 * There is at least one child of this 278 /* There is at least one child of this node, visit it */
266 * node, visit the onde 279
267 */
268 level++; 280 level++;
269 parent_node = child_node; 281 parent_node = child_node;
270 child_node = NULL; 282 child_node = NULL;
@@ -272,9 +284,8 @@ acpi_ns_walk_namespace(acpi_object_type type,
272 } 284 }
273 } else { 285 } else {
274 /* 286 /*
275 * No more children of this node (acpi_ns_get_next_node 287 * No more children of this node (acpi_ns_get_next_node failed), go
276 * failed), go back upwards in the namespace tree to 288 * back upwards in the namespace tree to the node's parent.
277 * the node's parent.
278 */ 289 */
279 level--; 290 level--;
280 child_node = parent_node; 291 child_node = parent_node;
diff --git a/drivers/acpi/namespace/nsxfeval.c b/drivers/acpi/namespace/nsxfeval.c
index dca6799ac678..7ac6ace50059 100644
--- a/drivers/acpi/namespace/nsxfeval.c
+++ b/drivers/acpi/namespace/nsxfeval.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -170,7 +170,6 @@ acpi_evaluate_object(acpi_handle handle,
170 struct acpi_buffer *return_buffer) 170 struct acpi_buffer *return_buffer)
171{ 171{
172 acpi_status status; 172 acpi_status status;
173 acpi_status status2;
174 struct acpi_evaluate_info *info; 173 struct acpi_evaluate_info *info;
175 acpi_size buffer_space_needed; 174 acpi_size buffer_space_needed;
176 u32 i; 175 u32 i;
@@ -329,14 +328,12 @@ acpi_evaluate_object(acpi_handle handle,
329 * Delete the internal return object. NOTE: Interpreter must be 328 * Delete the internal return object. NOTE: Interpreter must be
330 * locked to avoid race condition. 329 * locked to avoid race condition.
331 */ 330 */
332 status2 = acpi_ex_enter_interpreter(); 331 acpi_ex_enter_interpreter();
333 if (ACPI_SUCCESS(status2)) {
334 332
335 /* Remove one reference on the return object (should delete it) */ 333 /* Remove one reference on the return object (should delete it) */
336 334
337 acpi_ut_remove_reference(info->return_object); 335 acpi_ut_remove_reference(info->return_object);
338 acpi_ex_exit_interpreter(); 336 acpi_ex_exit_interpreter();
339 }
340 } 337 }
341 338
342 cleanup: 339 cleanup:
diff --git a/drivers/acpi/namespace/nsxfname.c b/drivers/acpi/namespace/nsxfname.c
index 978213a6c19f..b489781b22a8 100644
--- a/drivers/acpi/namespace/nsxfname.c
+++ b/drivers/acpi/namespace/nsxfname.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -84,38 +84,41 @@ acpi_get_handle(acpi_handle parent,
84 /* Convert a parent handle to a prefix node */ 84 /* Convert a parent handle to a prefix node */
85 85
86 if (parent) { 86 if (parent) {
87 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
88 if (ACPI_FAILURE(status)) {
89 return (status);
90 }
91
92 prefix_node = acpi_ns_map_handle_to_node(parent); 87 prefix_node = acpi_ns_map_handle_to_node(parent);
93 if (!prefix_node) { 88 if (!prefix_node) {
94 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
95 return (AE_BAD_PARAMETER); 89 return (AE_BAD_PARAMETER);
96 } 90 }
91 }
92
93 /*
94 * Valid cases are:
95 * 1) Fully qualified pathname
96 * 2) Parent + Relative pathname
97 *
98 * Error for <null Parent + relative path>
99 */
100 if (acpi_ns_valid_root_prefix(pathname[0])) {
97 101
98 status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 102 /* Pathname is fully qualified (starts with '\') */
99 if (ACPI_FAILURE(status)) { 103
100 return (status); 104 /* Special case for root-only, since we can't search for it */
105
106 if (!ACPI_STRCMP(pathname, ACPI_NS_ROOT_PATH)) {
107 *ret_handle =
108 acpi_ns_convert_entry_to_handle(acpi_gbl_root_node);
109 return (AE_OK);
101 } 110 }
102 } 111 } else if (!prefix_node) {
103 112
104 /* Special case for root, since we can't search for it */ 113 /* Relative path with null prefix is disallowed */
105 114
106 if (ACPI_STRCMP(pathname, ACPI_NS_ROOT_PATH) == 0) { 115 return (AE_BAD_PARAMETER);
107 *ret_handle =
108 acpi_ns_convert_entry_to_handle(acpi_gbl_root_node);
109 return (AE_OK);
110 } 116 }
111 117
112 /* 118 /* Find the Node and convert to a handle */
113 * Find the Node and convert to a handle
114 */
115 status = acpi_ns_get_node(prefix_node, pathname, ACPI_NS_NO_UPSEARCH,
116 &node);
117 119
118 *ret_handle = NULL; 120 status =
121 acpi_ns_get_node(prefix_node, pathname, ACPI_NS_NO_UPSEARCH, &node);
119 if (ACPI_SUCCESS(status)) { 122 if (ACPI_SUCCESS(status)) {
120 *ret_handle = acpi_ns_convert_entry_to_handle(node); 123 *ret_handle = acpi_ns_convert_entry_to_handle(node);
121 } 124 }
diff --git a/drivers/acpi/namespace/nsxfobj.c b/drivers/acpi/namespace/nsxfobj.c
index a18b1c223129..faa375887201 100644
--- a/drivers/acpi/namespace/nsxfobj.c
+++ b/drivers/acpi/namespace/nsxfobj.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index bd96a7045925..4a9faff4c01d 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -45,7 +45,7 @@ int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS]
45int __cpuinitdata node_to_pxm_map[MAX_NUMNODES] 45int __cpuinitdata node_to_pxm_map[MAX_NUMNODES]
46 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL }; 46 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
47 47
48extern int __init acpi_table_parse_madt_family(enum acpi_table_id id, 48extern int __init acpi_table_parse_madt_family(char *id,
49 unsigned long madt_size, 49 unsigned long madt_size,
50 int entry_id, 50 int entry_id,
51 acpi_madt_entry_handler handler, 51 acpi_madt_entry_handler handler,
@@ -89,7 +89,7 @@ void __cpuinit acpi_unmap_pxm_to_node(int node)
89 node_clear(node, nodes_found_map); 89 node_clear(node, nodes_found_map);
90} 90}
91 91
92void __init acpi_table_print_srat_entry(acpi_table_entry_header * header) 92void __init acpi_table_print_srat_entry(struct acpi_subtable_header * header)
93{ 93{
94 94
95 ACPI_FUNCTION_NAME("acpi_table_print_srat_entry"); 95 ACPI_FUNCTION_NAME("acpi_table_print_srat_entry");
@@ -99,36 +99,35 @@ void __init acpi_table_print_srat_entry(acpi_table_entry_header * header)
99 99
100 switch (header->type) { 100 switch (header->type) {
101 101
102 case ACPI_SRAT_PROCESSOR_AFFINITY: 102 case ACPI_SRAT_TYPE_CPU_AFFINITY:
103#ifdef ACPI_DEBUG_OUTPUT 103#ifdef ACPI_DEBUG_OUTPUT
104 { 104 {
105 struct acpi_table_processor_affinity *p = 105 struct acpi_srat_cpu_affinity *p =
106 (struct acpi_table_processor_affinity *)header; 106 (struct acpi_srat_cpu_affinity *)header;
107 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 107 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
108 "SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n", 108 "SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n",
109 p->apic_id, p->lsapic_eid, 109 p->apic_id, p->local_sapic_eid,
110 p->proximity_domain, 110 p->proximity_domain_lo,
111 p->flags. 111 (p->flags & ACPI_SRAT_CPU_ENABLED)?
112 enabled ? "enabled" : "disabled")); 112 "enabled" : "disabled"));
113 } 113 }
114#endif /* ACPI_DEBUG_OUTPUT */ 114#endif /* ACPI_DEBUG_OUTPUT */
115 break; 115 break;
116 116
117 case ACPI_SRAT_MEMORY_AFFINITY: 117 case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
118#ifdef ACPI_DEBUG_OUTPUT 118#ifdef ACPI_DEBUG_OUTPUT
119 { 119 {
120 struct acpi_table_memory_affinity *p = 120 struct acpi_srat_mem_affinity *p =
121 (struct acpi_table_memory_affinity *)header; 121 (struct acpi_srat_mem_affinity *)header;
122 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 122 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
123 "SRAT Memory (0x%08x%08x length 0x%08x%08x type 0x%x) in proximity domain %d %s%s\n", 123 "SRAT Memory (0x%lx length 0x%lx type 0x%x) in proximity domain %d %s%s\n",
124 p->base_addr_hi, p->base_addr_lo, 124 (unsigned long)p->base_address,
125 p->length_hi, p->length_lo, 125 (unsigned long)p->length,
126 p->memory_type, p->proximity_domain, 126 p->memory_type, p->proximity_domain,
127 p->flags. 127 (p->flags & ACPI_SRAT_MEM_ENABLED)?
128 enabled ? "enabled" : "disabled", 128 "enabled" : "disabled",
129 p->flags. 129 (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)?
130 hot_pluggable ? " hot-pluggable" : 130 " hot-pluggable" : ""));
131 ""));
132 } 131 }
133#endif /* ACPI_DEBUG_OUTPUT */ 132#endif /* ACPI_DEBUG_OUTPUT */
134 break; 133 break;
@@ -141,18 +140,18 @@ void __init acpi_table_print_srat_entry(acpi_table_entry_header * header)
141 } 140 }
142} 141}
143 142
144static int __init acpi_parse_slit(unsigned long phys_addr, unsigned long size) 143static int __init acpi_parse_slit(struct acpi_table_header *table)
145{ 144{
146 struct acpi_table_slit *slit; 145 struct acpi_table_slit *slit;
147 u32 localities; 146 u32 localities;
148 147
149 if (!phys_addr || !size) 148 if (!table)
150 return -EINVAL; 149 return -EINVAL;
151 150
152 slit = (struct acpi_table_slit *)__va(phys_addr); 151 slit = (struct acpi_table_slit *)table;
153 152
154 /* downcast just for %llu vs %lu for i386/ia64 */ 153 /* downcast just for %llu vs %lu for i386/ia64 */
155 localities = (u32) slit->localities; 154 localities = (u32) slit->locality_count;
156 155
157 acpi_numa_slit_init(slit); 156 acpi_numa_slit_init(slit);
158 157
@@ -160,12 +159,12 @@ static int __init acpi_parse_slit(unsigned long phys_addr, unsigned long size)
160} 159}
161 160
162static int __init 161static int __init
163acpi_parse_processor_affinity(acpi_table_entry_header * header, 162acpi_parse_processor_affinity(struct acpi_subtable_header * header,
164 const unsigned long end) 163 const unsigned long end)
165{ 164{
166 struct acpi_table_processor_affinity *processor_affinity; 165 struct acpi_srat_cpu_affinity *processor_affinity;
167 166
168 processor_affinity = (struct acpi_table_processor_affinity *)header; 167 processor_affinity = (struct acpi_srat_cpu_affinity *)header;
169 if (!processor_affinity) 168 if (!processor_affinity)
170 return -EINVAL; 169 return -EINVAL;
171 170
@@ -178,12 +177,12 @@ acpi_parse_processor_affinity(acpi_table_entry_header * header,
178} 177}
179 178
180static int __init 179static int __init
181acpi_parse_memory_affinity(acpi_table_entry_header * header, 180acpi_parse_memory_affinity(struct acpi_subtable_header * header,
182 const unsigned long end) 181 const unsigned long end)
183{ 182{
184 struct acpi_table_memory_affinity *memory_affinity; 183 struct acpi_srat_mem_affinity *memory_affinity;
185 184
186 memory_affinity = (struct acpi_table_memory_affinity *)header; 185 memory_affinity = (struct acpi_srat_mem_affinity *)header;
187 if (!memory_affinity) 186 if (!memory_affinity)
188 return -EINVAL; 187 return -EINVAL;
189 188
@@ -195,23 +194,23 @@ acpi_parse_memory_affinity(acpi_table_entry_header * header,
195 return 0; 194 return 0;
196} 195}
197 196
198static int __init acpi_parse_srat(unsigned long phys_addr, unsigned long size) 197static int __init acpi_parse_srat(struct acpi_table_header *table)
199{ 198{
200 struct acpi_table_srat *srat; 199 struct acpi_table_srat *srat;
201 200
202 if (!phys_addr || !size) 201 if (!table)
203 return -EINVAL; 202 return -EINVAL;
204 203
205 srat = (struct acpi_table_srat *)__va(phys_addr); 204 srat = (struct acpi_table_srat *)table;
206 205
207 return 0; 206 return 0;
208} 207}
209 208
210int __init 209int __init
211acpi_table_parse_srat(enum acpi_srat_entry_id id, 210acpi_table_parse_srat(enum acpi_srat_type id,
212 acpi_madt_entry_handler handler, unsigned int max_entries) 211 acpi_madt_entry_handler handler, unsigned int max_entries)
213{ 212{
214 return acpi_table_parse_madt_family(ACPI_SRAT, 213 return acpi_table_parse_madt_family(ACPI_SIG_SRAT,
215 sizeof(struct acpi_table_srat), id, 214 sizeof(struct acpi_table_srat), id,
216 handler, max_entries); 215 handler, max_entries);
217} 216}
@@ -221,17 +220,17 @@ int __init acpi_numa_init(void)
221 int result; 220 int result;
222 221
223 /* SRAT: Static Resource Affinity Table */ 222 /* SRAT: Static Resource Affinity Table */
224 result = acpi_table_parse(ACPI_SRAT, acpi_parse_srat); 223 result = acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat);
225 224
226 if (result > 0) { 225 if (result > 0) {
227 result = acpi_table_parse_srat(ACPI_SRAT_PROCESSOR_AFFINITY, 226 result = acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
228 acpi_parse_processor_affinity, 227 acpi_parse_processor_affinity,
229 NR_CPUS); 228 NR_CPUS);
230 result = acpi_table_parse_srat(ACPI_SRAT_MEMORY_AFFINITY, acpi_parse_memory_affinity, NR_NODE_MEMBLKS); // IA64 specific 229 result = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, acpi_parse_memory_affinity, NR_NODE_MEMBLKS); // IA64 specific
231 } 230 }
232 231
233 /* SLIT: System Locality Information Table */ 232 /* SLIT: System Locality Information Table */
234 result = acpi_table_parse(ACPI_SLIT, acpi_parse_slit); 233 result = acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
235 234
236 acpi_numa_arch_fixup(); 235 acpi_numa_arch_fixup();
237 return 0; 236 return 0;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 57ae1e5cde0a..0f6f3bcbc8eb 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -36,6 +36,7 @@
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/workqueue.h> 37#include <linux/workqueue.h>
38#include <linux/nmi.h> 38#include <linux/nmi.h>
39#include <linux/acpi.h>
39#include <acpi/acpi.h> 40#include <acpi/acpi.h>
40#include <asm/io.h> 41#include <asm/io.h>
41#include <acpi/acpi_bus.h> 42#include <acpi/acpi_bus.h>
@@ -75,6 +76,54 @@ static acpi_osd_handler acpi_irq_handler;
75static void *acpi_irq_context; 76static void *acpi_irq_context;
76static struct workqueue_struct *kacpid_wq; 77static struct workqueue_struct *kacpid_wq;
77 78
79static void __init acpi_request_region (struct acpi_generic_address *addr,
80 unsigned int length, char *desc)
81{
82 struct resource *res;
83
84 if (!addr->address || !length)
85 return;
86
87 if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
88 res = request_region(addr->address, length, desc);
89 else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
90 res = request_mem_region(addr->address, length, desc);
91}
92
93static int __init acpi_reserve_resources(void)
94{
95 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
96 "ACPI PM1a_EVT_BLK");
97
98 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
99 "ACPI PM1b_EVT_BLK");
100
101 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
102 "ACPI PM1a_CNT_BLK");
103
104 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
105 "ACPI PM1b_CNT_BLK");
106
107 if (acpi_gbl_FADT.pm_timer_length == 4)
108 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
109
110 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
111 "ACPI PM2_CNT_BLK");
112
113 /* Length of GPE blocks must be a non-negative multiple of 2 */
114
115 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
116 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
117 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
118
119 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
120 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
121 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
122
123 return 0;
124}
125device_initcall(acpi_reserve_resources);
126
78acpi_status acpi_os_initialize(void) 127acpi_status acpi_os_initialize(void)
79{ 128{
80 return AE_OK; 129 return AE_OK;
@@ -136,53 +185,43 @@ void acpi_os_vprintf(const char *fmt, va_list args)
136#endif 185#endif
137} 186}
138 187
139acpi_status acpi_os_get_root_pointer(u32 flags, struct acpi_pointer *addr) 188acpi_physical_address __init acpi_os_get_root_pointer(void)
140{ 189{
141 if (efi_enabled) { 190 if (efi_enabled) {
142 addr->pointer_type = ACPI_PHYSICAL_POINTER;
143 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 191 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
144 addr->pointer.physical = efi.acpi20; 192 return efi.acpi20;
145 else if (efi.acpi != EFI_INVALID_TABLE_ADDR) 193 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
146 addr->pointer.physical = efi.acpi; 194 return efi.acpi;
147 else { 195 else {
148 printk(KERN_ERR PREFIX 196 printk(KERN_ERR PREFIX
149 "System description tables not found\n"); 197 "System description tables not found\n");
150 return AE_NOT_FOUND; 198 return 0;
151 } 199 }
152 } else { 200 } else
153 if (ACPI_FAILURE(acpi_find_root_pointer(flags, addr))) { 201 return acpi_find_rsdp();
154 printk(KERN_ERR PREFIX
155 "System description tables not found\n");
156 return AE_NOT_FOUND;
157 }
158 }
159
160 return AE_OK;
161} 202}
162 203
163acpi_status 204void __iomem *acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
164acpi_os_map_memory(acpi_physical_address phys, acpi_size size,
165 void __iomem ** virt)
166{ 205{
167 if (phys > ULONG_MAX) { 206 if (phys > ULONG_MAX) {
168 printk(KERN_ERR PREFIX "Cannot map memory that high\n"); 207 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
169 return AE_BAD_PARAMETER; 208 return 0;
170 } 209 }
171 /* 210 if (acpi_gbl_permanent_mmap)
172 * ioremap checks to ensure this is in reserved space 211 /*
173 */ 212 * ioremap checks to ensure this is in reserved space
174 *virt = ioremap((unsigned long)phys, size); 213 */
175 214 return ioremap((unsigned long)phys, size);
176 if (!*virt) 215 else
177 return AE_NO_MEMORY; 216 return __acpi_map_table((unsigned long)phys, size);
178
179 return AE_OK;
180} 217}
181EXPORT_SYMBOL_GPL(acpi_os_map_memory); 218EXPORT_SYMBOL_GPL(acpi_os_map_memory);
182 219
183void acpi_os_unmap_memory(void __iomem * virt, acpi_size size) 220void acpi_os_unmap_memory(void __iomem * virt, acpi_size size)
184{ 221{
185 iounmap(virt); 222 if (acpi_gbl_permanent_mmap) {
223 iounmap(virt);
224 }
186} 225}
187EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); 226EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
188 227
@@ -254,7 +293,7 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
254 * FADT. It may not be the same if an interrupt source override exists 293 * FADT. It may not be the same if an interrupt source override exists
255 * for the SCI. 294 * for the SCI.
256 */ 295 */
257 gsi = acpi_fadt.sci_int; 296 gsi = acpi_gbl_FADT.sci_interrupt;
258 if (acpi_gsi_to_irq(gsi, &irq) < 0) { 297 if (acpi_gsi_to_irq(gsi, &irq) < 0) {
259 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n", 298 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
260 gsi); 299 gsi);
diff --git a/drivers/acpi/parser/psargs.c b/drivers/acpi/parser/psargs.c
index bf88e076c3e9..c2b9835c890b 100644
--- a/drivers/acpi/parser/psargs.c
+++ b/drivers/acpi/parser/psargs.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/parser/psloop.c b/drivers/acpi/parser/psloop.c
index e1541db3753a..773aee82fbb8 100644
--- a/drivers/acpi/parser/psloop.c
+++ b/drivers/acpi/parser/psloop.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -42,12 +42,11 @@
42 */ 42 */
43 43
44/* 44/*
45 * Parse the AML and build an operation tree as most interpreters, 45 * Parse the AML and build an operation tree as most interpreters, (such as
46 * like Perl, do. Parsing is done by hand rather than with a YACC 46 * Perl) do. Parsing is done by hand rather than with a YACC generated parser
47 * generated parser to tightly constrain stack and dynamic memory 47 * to tightly constrain stack and dynamic memory usage. Parsing is kept
48 * usage. At the same time, parsing is kept flexible and the code 48 * flexible and the code fairly compact by parsing based on a list of AML
49 * fairly compact by parsing based on a list of AML opcode 49 * opcode templates in aml_op_info[].
50 * templates in aml_op_info[]
51 */ 50 */
52 51
53#include <acpi/acpi.h> 52#include <acpi/acpi.h>
@@ -60,766 +59,679 @@ ACPI_MODULE_NAME("psloop")
60 59
61static u32 acpi_gbl_depth = 0; 60static u32 acpi_gbl_depth = 0;
62 61
62/* Local prototypes */
63
64static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state);
65
66static acpi_status
67acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
68 u8 * aml_op_start,
69 union acpi_parse_object *unnamed_op,
70 union acpi_parse_object **op);
71
72static acpi_status
73acpi_ps_create_op(struct acpi_walk_state *walk_state,
74 u8 * aml_op_start, union acpi_parse_object **new_op);
75
76static acpi_status
77acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
78 u8 * aml_op_start, union acpi_parse_object *op);
79
80static acpi_status
81acpi_ps_complete_op(struct acpi_walk_state *walk_state,
82 union acpi_parse_object **op, acpi_status status);
83
84static acpi_status
85acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
86 union acpi_parse_object *op, acpi_status status);
87
63/******************************************************************************* 88/*******************************************************************************
64 * 89 *
65 * FUNCTION: acpi_ps_parse_loop 90 * FUNCTION: acpi_ps_get_aml_opcode
66 * 91 *
67 * PARAMETERS: walk_state - Current state 92 * PARAMETERS: walk_state - Current state
68 * 93 *
69 * RETURN: Status 94 * RETURN: Status
70 * 95 *
71 * DESCRIPTION: Parse AML (pointed to by the current parser state) and return 96 * DESCRIPTION: Extract the next AML opcode from the input stream.
72 * a tree of ops.
73 * 97 *
74 ******************************************************************************/ 98 ******************************************************************************/
75 99
76acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) 100static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
77{ 101{
78 acpi_status status = AE_OK;
79 acpi_status status2;
80 union acpi_parse_object *op = NULL; /* current op */
81 union acpi_parse_object *arg = NULL;
82 union acpi_parse_object *pre_op = NULL;
83 struct acpi_parse_state *parser_state;
84 u8 *aml_op_start = NULL;
85 102
86 ACPI_FUNCTION_TRACE_PTR(ps_parse_loop, walk_state); 103 ACPI_FUNCTION_TRACE_PTR(ps_get_aml_opcode, walk_state);
87 104
88 if (walk_state->descending_callback == NULL) { 105 walk_state->aml_offset =
89 return_ACPI_STATUS(AE_BAD_PARAMETER); 106 (u32) ACPI_PTR_DIFF(walk_state->parser_state.aml,
90 } 107 walk_state->parser_state.aml_start);
108 walk_state->opcode = acpi_ps_peek_opcode(&(walk_state->parser_state));
91 109
92 parser_state = &walk_state->parser_state; 110 /*
93 walk_state->arg_types = 0; 111 * First cut to determine what we have found:
112 * 1) A valid AML opcode
113 * 2) A name string
114 * 3) An unknown/invalid opcode
115 */
116 walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
94 117
95#if (!defined (ACPI_NO_METHOD_EXECUTION) && !defined (ACPI_CONSTANT_EVAL_ONLY)) 118 switch (walk_state->op_info->class) {
119 case AML_CLASS_ASCII:
120 case AML_CLASS_PREFIX:
121 /*
122 * Starts with a valid prefix or ASCII char, this is a name
123 * string. Convert the bare name string to a namepath.
124 */
125 walk_state->opcode = AML_INT_NAMEPATH_OP;
126 walk_state->arg_types = ARGP_NAMESTRING;
127 break;
96 128
97 if (walk_state->walk_type & ACPI_WALK_METHOD_RESTART) { 129 case AML_CLASS_UNKNOWN:
98 130
99 /* We are restarting a preempted control method */ 131 /* The opcode is unrecognized. Just skip unknown opcodes */
100 132
101 if (acpi_ps_has_completed_scope(parser_state)) { 133 ACPI_ERROR((AE_INFO,
102 /* 134 "Found unknown opcode %X at AML address %p offset %X, ignoring",
103 * We must check if a predicate to an IF or WHILE statement 135 walk_state->opcode, walk_state->parser_state.aml,
104 * was just completed 136 walk_state->aml_offset));
105 */
106 if ((parser_state->scope->parse_scope.op) &&
107 ((parser_state->scope->parse_scope.op->common.
108 aml_opcode == AML_IF_OP)
109 || (parser_state->scope->parse_scope.op->common.
110 aml_opcode == AML_WHILE_OP))
111 && (walk_state->control_state)
112 && (walk_state->control_state->common.state ==
113 ACPI_CONTROL_PREDICATE_EXECUTING)) {
114 /*
115 * A predicate was just completed, get the value of the
116 * predicate and branch based on that value
117 */
118 walk_state->op = NULL;
119 status =
120 acpi_ds_get_predicate_value(walk_state,
121 ACPI_TO_POINTER
122 (TRUE));
123 if (ACPI_FAILURE(status)
124 && ((status & AE_CODE_MASK) !=
125 AE_CODE_CONTROL)) {
126 if (status == AE_AML_NO_RETURN_VALUE) {
127 ACPI_EXCEPTION((AE_INFO, status,
128 "Invoked method did not return a value"));
129 137
130 } 138 ACPI_DUMP_BUFFER(walk_state->parser_state.aml, 128);
131 ACPI_EXCEPTION((AE_INFO, status,
132 "GetPredicate Failed"));
133 return_ACPI_STATUS(status);
134 }
135 139
136 status = 140 /* Assume one-byte bad opcode */
137 acpi_ps_next_parse_state(walk_state, op,
138 status);
139 }
140 141
141 acpi_ps_pop_scope(parser_state, &op, 142 walk_state->parser_state.aml++;
142 &walk_state->arg_types, 143 return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
143 &walk_state->arg_count);
144 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
145 "Popped scope, Op=%p\n", op));
146 } else if (walk_state->prev_op) {
147 144
148 /* We were in the middle of an op */ 145 default:
149 146
150 op = walk_state->prev_op; 147 /* Found opcode info, this is a normal opcode */
151 walk_state->arg_types = walk_state->prev_arg_types; 148
152 } 149 walk_state->parser_state.aml +=
150 acpi_ps_get_opcode_size(walk_state->opcode);
151 walk_state->arg_types = walk_state->op_info->parse_args;
152 break;
153 } 153 }
154#endif
155 154
156 /* Iterative parsing loop, while there is more AML to process: */ 155 return_ACPI_STATUS(AE_OK);
156}
157 157
158 while ((parser_state->aml < parser_state->aml_end) || (op)) { 158/*******************************************************************************
159 aml_op_start = parser_state->aml; 159 *
160 if (!op) { 160 * FUNCTION: acpi_ps_build_named_op
161 *
162 * PARAMETERS: walk_state - Current state
163 * aml_op_start - Begin of named Op in AML
164 * unnamed_op - Early Op (not a named Op)
165 * Op - Returned Op
166 *
167 * RETURN: Status
168 *
169 * DESCRIPTION: Parse a named Op
170 *
171 ******************************************************************************/
161 172
162 /* Get the next opcode from the AML stream */ 173static acpi_status
174acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
175 u8 * aml_op_start,
176 union acpi_parse_object *unnamed_op,
177 union acpi_parse_object **op)
178{
179 acpi_status status = AE_OK;
180 union acpi_parse_object *arg = NULL;
163 181
164 walk_state->aml_offset = 182 ACPI_FUNCTION_TRACE_PTR(ps_build_named_op, walk_state);
165 (u32) ACPI_PTR_DIFF(parser_state->aml,
166 parser_state->aml_start);
167 walk_state->opcode = acpi_ps_peek_opcode(parser_state);
168 183
169 /* 184 unnamed_op->common.value.arg = NULL;
170 * First cut to determine what we have found: 185 unnamed_op->common.aml_opcode = walk_state->opcode;
171 * 1) A valid AML opcode
172 * 2) A name string
173 * 3) An unknown/invalid opcode
174 */
175 walk_state->op_info =
176 acpi_ps_get_opcode_info(walk_state->opcode);
177 switch (walk_state->op_info->class) {
178 case AML_CLASS_ASCII:
179 case AML_CLASS_PREFIX:
180 /*
181 * Starts with a valid prefix or ASCII char, this is a name
182 * string. Convert the bare name string to a namepath.
183 */
184 walk_state->opcode = AML_INT_NAMEPATH_OP;
185 walk_state->arg_types = ARGP_NAMESTRING;
186 break;
187 186
188 case AML_CLASS_UNKNOWN: 187 /*
188 * Get and append arguments until we find the node that contains
189 * the name (the type ARGP_NAME).
190 */
191 while (GET_CURRENT_ARG_TYPE(walk_state->arg_types) &&
192 (GET_CURRENT_ARG_TYPE(walk_state->arg_types) != ARGP_NAME)) {
193 status =
194 acpi_ps_get_next_arg(walk_state,
195 &(walk_state->parser_state),
196 GET_CURRENT_ARG_TYPE(walk_state->
197 arg_types), &arg);
198 if (ACPI_FAILURE(status)) {
199 return_ACPI_STATUS(status);
200 }
189 201
190 /* The opcode is unrecognized. Just skip unknown opcodes */ 202 acpi_ps_append_arg(unnamed_op, arg);
203 INCREMENT_ARG_LIST(walk_state->arg_types);
204 }
191 205
192 ACPI_ERROR((AE_INFO, 206 /*
193 "Found unknown opcode %X at AML address %p offset %X, ignoring", 207 * Make sure that we found a NAME and didn't run out of arguments
194 walk_state->opcode, 208 */
195 parser_state->aml, 209 if (!GET_CURRENT_ARG_TYPE(walk_state->arg_types)) {
196 walk_state->aml_offset)); 210 return_ACPI_STATUS(AE_AML_NO_OPERAND);
211 }
197 212
198 ACPI_DUMP_BUFFER(parser_state->aml, 128); 213 /* We know that this arg is a name, move to next arg */
199 214
200 /* Assume one-byte bad opcode */ 215 INCREMENT_ARG_LIST(walk_state->arg_types);
201 216
202 parser_state->aml++; 217 /*
203 continue; 218 * Find the object. This will either insert the object into
219 * the namespace or simply look it up
220 */
221 walk_state->op = NULL;
204 222
205 default: 223 status = walk_state->descending_callback(walk_state, op);
224 if (ACPI_FAILURE(status)) {
225 ACPI_EXCEPTION((AE_INFO, status, "During name lookup/catalog"));
226 return_ACPI_STATUS(status);
227 }
206 228
207 /* Found opcode info, this is a normal opcode */ 229 if (!*op) {
230 return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
231 }
208 232
209 parser_state->aml += 233 status = acpi_ps_next_parse_state(walk_state, *op, status);
210 acpi_ps_get_opcode_size(walk_state->opcode); 234 if (ACPI_FAILURE(status)) {
211 walk_state->arg_types = 235 if (status == AE_CTRL_PENDING) {
212 walk_state->op_info->parse_args; 236 return_ACPI_STATUS(AE_CTRL_PARSE_PENDING);
213 break; 237 }
214 } 238 return_ACPI_STATUS(status);
239 }
215 240
216 /* Create Op structure and append to parent's argument list */ 241 acpi_ps_append_arg(*op, unnamed_op->common.value.arg);
242 acpi_gbl_depth++;
217 243
218 if (walk_state->op_info->flags & AML_NAMED) { 244 if ((*op)->common.aml_opcode == AML_REGION_OP) {
245 /*
246 * Defer final parsing of an operation_region body, because we don't
247 * have enough info in the first pass to parse it correctly (i.e.,
248 * there may be method calls within the term_arg elements of the body.)
249 *
250 * However, we must continue parsing because the opregion is not a
251 * standalone package -- we don't know where the end is at this point.
252 *
253 * (Length is unknown until parse of the body complete)
254 */
255 (*op)->named.data = aml_op_start;
256 (*op)->named.length = 0;
257 }
219 258
220 /* Allocate a new pre_op if necessary */ 259 return_ACPI_STATUS(AE_OK);
260}
221 261
222 if (!pre_op) { 262/*******************************************************************************
223 pre_op = 263 *
224 acpi_ps_alloc_op(walk_state-> 264 * FUNCTION: acpi_ps_create_op
225 opcode); 265 *
226 if (!pre_op) { 266 * PARAMETERS: walk_state - Current state
227 status = AE_NO_MEMORY; 267 * aml_op_start - Op start in AML
228 goto close_this_op; 268 * new_op - Returned Op
229 } 269 *
230 } 270 * RETURN: Status
271 *
272 * DESCRIPTION: Get Op from AML
273 *
274 ******************************************************************************/
231 275
232 pre_op->common.value.arg = NULL; 276static acpi_status
233 pre_op->common.aml_opcode = walk_state->opcode; 277acpi_ps_create_op(struct acpi_walk_state *walk_state,
278 u8 * aml_op_start, union acpi_parse_object **new_op)
279{
280 acpi_status status = AE_OK;
281 union acpi_parse_object *op;
282 union acpi_parse_object *named_op = NULL;
234 283
235 /* 284 ACPI_FUNCTION_TRACE_PTR(ps_create_op, walk_state);
236 * Get and append arguments until we find the node that contains
237 * the name (the type ARGP_NAME).
238 */
239 while (GET_CURRENT_ARG_TYPE
240 (walk_state->arg_types)
241 &&
242 (GET_CURRENT_ARG_TYPE
243 (walk_state->arg_types) != ARGP_NAME)) {
244 status =
245 acpi_ps_get_next_arg(walk_state,
246 parser_state,
247 GET_CURRENT_ARG_TYPE
248 (walk_state->
249 arg_types),
250 &arg);
251 if (ACPI_FAILURE(status)) {
252 goto close_this_op;
253 }
254 285
255 acpi_ps_append_arg(pre_op, arg); 286 status = acpi_ps_get_aml_opcode(walk_state);
256 INCREMENT_ARG_LIST(walk_state-> 287 if (status == AE_CTRL_PARSE_CONTINUE) {
257 arg_types); 288 return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
258 } 289 }
259 290
260 /* 291 /* Create Op structure and append to parent's argument list */
261 * Make sure that we found a NAME and didn't run out of
262 * arguments
263 */
264 if (!GET_CURRENT_ARG_TYPE
265 (walk_state->arg_types)) {
266 status = AE_AML_NO_OPERAND;
267 goto close_this_op;
268 }
269 292
270 /* We know that this arg is a name, move to next arg */ 293 walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
294 op = acpi_ps_alloc_op(walk_state->opcode);
295 if (!op) {
296 return_ACPI_STATUS(AE_NO_MEMORY);
297 }
271 298
272 INCREMENT_ARG_LIST(walk_state->arg_types); 299 if (walk_state->op_info->flags & AML_NAMED) {
300 status =
301 acpi_ps_build_named_op(walk_state, aml_op_start, op,
302 &named_op);
303 acpi_ps_free_op(op);
304 if (ACPI_FAILURE(status)) {
305 return_ACPI_STATUS(status);
306 }
273 307
274 /* 308 *new_op = named_op;
275 * Find the object. This will either insert the object into 309 return_ACPI_STATUS(AE_OK);
276 * the namespace or simply look it up 310 }
277 */
278 walk_state->op = NULL;
279 311
280 status = 312 /* Not a named opcode, just allocate Op and append to parent */
281 walk_state->descending_callback(walk_state,
282 &op);
283 if (ACPI_FAILURE(status)) {
284 ACPI_EXCEPTION((AE_INFO, status,
285 "During name lookup/catalog"));
286 goto close_this_op;
287 }
288 313
289 if (!op) { 314 if (walk_state->op_info->flags & AML_CREATE) {
290 continue; 315 /*
291 } 316 * Backup to beginning of create_xXXfield declaration
317 * body_length is unknown until we parse the body
318 */
319 op->named.data = aml_op_start;
320 op->named.length = 0;
321 }
292 322
293 status = 323 acpi_ps_append_arg(acpi_ps_get_parent_scope
294 acpi_ps_next_parse_state(walk_state, op, 324 (&(walk_state->parser_state)), op);
295 status);
296 if (status == AE_CTRL_PENDING) {
297 status = AE_OK;
298 goto close_this_op;
299 }
300 325
301 if (ACPI_FAILURE(status)) { 326 if (walk_state->descending_callback != NULL) {
302 goto close_this_op; 327 /*
303 } 328 * Find the object. This will either insert the object into
329 * the namespace or simply look it up
330 */
331 walk_state->op = *new_op = op;
304 332
305 acpi_ps_append_arg(op, 333 status = walk_state->descending_callback(walk_state, &op);
306 pre_op->common.value.arg); 334 status = acpi_ps_next_parse_state(walk_state, op, status);
307 acpi_gbl_depth++; 335 if (status == AE_CTRL_PENDING) {
308 336 status = AE_CTRL_PARSE_PENDING;
309 if (op->common.aml_opcode == AML_REGION_OP) { 337 }
310 /* 338 }
311 * Defer final parsing of an operation_region body,
312 * because we don't have enough info in the first pass
313 * to parse it correctly (i.e., there may be method
314 * calls within the term_arg elements of the body.)
315 *
316 * However, we must continue parsing because
317 * the opregion is not a standalone package --
318 * we don't know where the end is at this point.
319 *
320 * (Length is unknown until parse of the body complete)
321 */
322 op->named.data = aml_op_start;
323 op->named.length = 0;
324 }
325 } else {
326 /* Not a named opcode, just allocate Op and append to parent */
327 339
328 walk_state->op_info = 340 return_ACPI_STATUS(status);
329 acpi_ps_get_opcode_info(walk_state->opcode); 341}
330 op = acpi_ps_alloc_op(walk_state->opcode);
331 if (!op) {
332 status = AE_NO_MEMORY;
333 goto close_this_op;
334 }
335 342
336 if (walk_state->op_info->flags & AML_CREATE) { 343/*******************************************************************************
337 /* 344 *
338 * Backup to beginning of create_xXXfield declaration 345 * FUNCTION: acpi_ps_get_arguments
339 * body_length is unknown until we parse the body 346 *
340 */ 347 * PARAMETERS: walk_state - Current state
341 op->named.data = aml_op_start; 348 * aml_op_start - Op start in AML
342 op->named.length = 0; 349 * Op - Current Op
343 } 350 *
351 * RETURN: Status
352 *
353 * DESCRIPTION: Get arguments for passed Op.
354 *
355 ******************************************************************************/
344 356
345 acpi_ps_append_arg(acpi_ps_get_parent_scope 357static acpi_status
346 (parser_state), op); 358acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
359 u8 * aml_op_start, union acpi_parse_object *op)
360{
361 acpi_status status = AE_OK;
362 union acpi_parse_object *arg = NULL;
347 363
348 if ((walk_state->descending_callback != NULL)) { 364 ACPI_FUNCTION_TRACE_PTR(ps_get_arguments, walk_state);
349 /*
350 * Find the object. This will either insert the object into
351 * the namespace or simply look it up
352 */
353 walk_state->op = op;
354 365
355 status = 366 switch (op->common.aml_opcode) {
356 walk_state-> 367 case AML_BYTE_OP: /* AML_BYTEDATA_ARG */
357 descending_callback(walk_state, 368 case AML_WORD_OP: /* AML_WORDDATA_ARG */
358 &op); 369 case AML_DWORD_OP: /* AML_DWORDATA_ARG */
359 status = 370 case AML_QWORD_OP: /* AML_QWORDATA_ARG */
360 acpi_ps_next_parse_state(walk_state, 371 case AML_STRING_OP: /* AML_ASCIICHARLIST_ARG */
361 op,
362 status);
363 if (status == AE_CTRL_PENDING) {
364 status = AE_OK;
365 goto close_this_op;
366 }
367 372
368 if (ACPI_FAILURE(status)) { 373 /* Fill in constant or string argument directly */
369 goto close_this_op;
370 }
371 }
372 }
373 374
374 op->common.aml_offset = walk_state->aml_offset; 375 acpi_ps_get_next_simple_arg(&(walk_state->parser_state),
376 GET_CURRENT_ARG_TYPE(walk_state->
377 arg_types),
378 op);
379 break;
375 380
376 if (walk_state->op_info) { 381 case AML_INT_NAMEPATH_OP: /* AML_NAMESTRING_ARG */
377 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, 382
378 "Opcode %4.4X [%s] Op %p Aml %p AmlOffset %5.5X\n", 383 status =
379 (u32) op->common.aml_opcode, 384 acpi_ps_get_next_namepath(walk_state,
380 walk_state->op_info->name, op, 385 &(walk_state->parser_state), op,
381 parser_state->aml, 386 1);
382 op->common.aml_offset)); 387 if (ACPI_FAILURE(status)) {
383 } 388 return_ACPI_STATUS(status);
384 } 389 }
385 390
391 walk_state->arg_types = 0;
392 break;
393
394 default:
386 /* 395 /*
387 * Start arg_count at zero because we don't know if there are 396 * Op is not a constant or string, append each argument to the Op
388 * any args yet
389 */ 397 */
390 walk_state->arg_count = 0; 398 while (GET_CURRENT_ARG_TYPE(walk_state->arg_types)
391 399 && !walk_state->arg_count) {
392 /* Are there any arguments that must be processed? */ 400 walk_state->aml_offset =
393 401 (u32) ACPI_PTR_DIFF(walk_state->parser_state.aml,
394 if (walk_state->arg_types) { 402 walk_state->parser_state.
395 403 aml_start);
396 /* Get arguments */
397
398 switch (op->common.aml_opcode) {
399 case AML_BYTE_OP: /* AML_BYTEDATA_ARG */
400 case AML_WORD_OP: /* AML_WORDDATA_ARG */
401 case AML_DWORD_OP: /* AML_DWORDATA_ARG */
402 case AML_QWORD_OP: /* AML_QWORDATA_ARG */
403 case AML_STRING_OP: /* AML_ASCIICHARLIST_ARG */
404
405 /* Fill in constant or string argument directly */
406
407 acpi_ps_get_next_simple_arg(parser_state,
408 GET_CURRENT_ARG_TYPE
409 (walk_state->
410 arg_types), op);
411 break;
412
413 case AML_INT_NAMEPATH_OP: /* AML_NAMESTRING_ARG */
414
415 status =
416 acpi_ps_get_next_namepath(walk_state,
417 parser_state, op,
418 1);
419 if (ACPI_FAILURE(status)) {
420 goto close_this_op;
421 }
422
423 walk_state->arg_types = 0;
424 break;
425 404
426 default: 405 status =
427 /* 406 acpi_ps_get_next_arg(walk_state,
428 * Op is not a constant or string, append each argument 407 &(walk_state->parser_state),
429 * to the Op 408 GET_CURRENT_ARG_TYPE
430 */ 409 (walk_state->arg_types), &arg);
431 while (GET_CURRENT_ARG_TYPE 410 if (ACPI_FAILURE(status)) {
432 (walk_state->arg_types) 411 return_ACPI_STATUS(status);
433 && !walk_state->arg_count) { 412 }
434 walk_state->aml_offset = (u32)
435 ACPI_PTR_DIFF(parser_state->aml,
436 parser_state->
437 aml_start);
438 413
439 status = 414 if (arg) {
440 acpi_ps_get_next_arg(walk_state, 415 arg->common.aml_offset = walk_state->aml_offset;
441 parser_state, 416 acpi_ps_append_arg(op, arg);
442 GET_CURRENT_ARG_TYPE 417 }
443 (walk_state->
444 arg_types),
445 &arg);
446 if (ACPI_FAILURE(status)) {
447 goto close_this_op;
448 }
449 418
450 if (arg) { 419 INCREMENT_ARG_LIST(walk_state->arg_types);
451 arg->common.aml_offset = 420 }
452 walk_state->aml_offset;
453 acpi_ps_append_arg(op, arg);
454 }
455 INCREMENT_ARG_LIST(walk_state->
456 arg_types);
457 }
458 421
459 /* Special processing for certain opcodes */ 422 /* Special processing for certain opcodes */
460 423
461 /* TBD (remove): Temporary mechanism to disable this code if needed */ 424 /* TBD (remove): Temporary mechanism to disable this code if needed */
462 425
463#ifdef ACPI_ENABLE_MODULE_LEVEL_CODE 426#ifdef ACPI_ENABLE_MODULE_LEVEL_CODE
464 427
465 if ((walk_state->pass_number <= 428 if ((walk_state->pass_number <= ACPI_IMODE_LOAD_PASS1) &&
466 ACPI_IMODE_LOAD_PASS1) 429 ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) == 0)) {
467 && 430 /*
468 ((walk_state-> 431 * We want to skip If/Else/While constructs during Pass1 because we
469 parse_flags & ACPI_PARSE_DISASSEMBLE) == 432 * want to actually conditionally execute the code during Pass2.
470 0)) { 433 *
471 /* 434 * Except for disassembly, where we always want to walk the
472 * We want to skip If/Else/While constructs during Pass1 435 * If/Else/While packages
473 * because we want to actually conditionally execute the 436 */
474 * code during Pass2. 437 switch (op->common.aml_opcode) {
475 * 438 case AML_IF_OP:
476 * Except for disassembly, where we always want to 439 case AML_ELSE_OP:
477 * walk the If/Else/While packages 440 case AML_WHILE_OP:
478 */
479 switch (op->common.aml_opcode) {
480 case AML_IF_OP:
481 case AML_ELSE_OP:
482 case AML_WHILE_OP:
483
484 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
485 "Pass1: Skipping an If/Else/While body\n"));
486
487 /* Skip body of if/else/while in pass 1 */
488
489 parser_state->aml =
490 parser_state->pkg_end;
491 walk_state->arg_count = 0;
492 break;
493
494 default:
495 break;
496 }
497 }
498#endif
499 switch (op->common.aml_opcode) {
500 case AML_METHOD_OP:
501
502 /*
503 * Skip parsing of control method
504 * because we don't have enough info in the first pass
505 * to parse it correctly.
506 *
507 * Save the length and address of the body
508 */
509 op->named.data = parser_state->aml;
510 op->named.length =
511 (u32) (parser_state->pkg_end -
512 parser_state->aml);
513
514 /* Skip body of method */
515
516 parser_state->aml =
517 parser_state->pkg_end;
518 walk_state->arg_count = 0;
519 break;
520
521 case AML_BUFFER_OP:
522 case AML_PACKAGE_OP:
523 case AML_VAR_PACKAGE_OP:
524
525 if ((op->common.parent) &&
526 (op->common.parent->common.
527 aml_opcode == AML_NAME_OP)
528 && (walk_state->pass_number <=
529 ACPI_IMODE_LOAD_PASS2)) {
530 /*
531 * Skip parsing of Buffers and Packages
532 * because we don't have enough info in the first pass
533 * to parse them correctly.
534 */
535 op->named.data = aml_op_start;
536 op->named.length =
537 (u32) (parser_state->
538 pkg_end -
539 aml_op_start);
540
541 /* Skip body */
542
543 parser_state->aml =
544 parser_state->pkg_end;
545 walk_state->arg_count = 0;
546 }
547 break;
548 441
549 case AML_WHILE_OP: 442 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
443 "Pass1: Skipping an If/Else/While body\n"));
550 444
551 if (walk_state->control_state) { 445 /* Skip body of if/else/while in pass 1 */
552 walk_state->control_state->
553 control.package_end =
554 parser_state->pkg_end;
555 }
556 break;
557 446
558 default: 447 walk_state->parser_state.aml =
448 walk_state->parser_state.pkg_end;
449 walk_state->arg_count = 0;
450 break;
559 451
560 /* No action for all other opcodes */ 452 default:
561 break;
562 }
563 break; 453 break;
564 } 454 }
565 } 455 }
456#endif
566 457
567 /* Check for arguments that need to be processed */ 458 switch (op->common.aml_opcode) {
568 459 case AML_METHOD_OP:
569 if (walk_state->arg_count) {
570 /* 460 /*
571 * There are arguments (complex ones), push Op and 461 * Skip parsing of control method because we don't have enough
572 * prepare for argument 462 * info in the first pass to parse it correctly.
463 *
464 * Save the length and address of the body
573 */ 465 */
574 status = acpi_ps_push_scope(parser_state, op, 466 op->named.data = walk_state->parser_state.aml;
575 walk_state->arg_types, 467 op->named.length = (u32)
576 walk_state->arg_count); 468 (walk_state->parser_state.pkg_end -
577 if (ACPI_FAILURE(status)) { 469 walk_state->parser_state.aml);
578 goto close_this_op;
579 }
580 op = NULL;
581 continue;
582 }
583 470
584 /* 471 /* Skip body of method */
585 * All arguments have been processed -- Op is complete,
586 * prepare for next
587 */
588 walk_state->op_info =
589 acpi_ps_get_opcode_info(op->common.aml_opcode);
590 if (walk_state->op_info->flags & AML_NAMED) {
591 if (acpi_gbl_depth) {
592 acpi_gbl_depth--;
593 }
594 472
595 if (op->common.aml_opcode == AML_REGION_OP) { 473 walk_state->parser_state.aml =
474 walk_state->parser_state.pkg_end;
475 walk_state->arg_count = 0;
476 break;
477
478 case AML_BUFFER_OP:
479 case AML_PACKAGE_OP:
480 case AML_VAR_PACKAGE_OP:
481
482 if ((op->common.parent) &&
483 (op->common.parent->common.aml_opcode ==
484 AML_NAME_OP)
485 && (walk_state->pass_number <=
486 ACPI_IMODE_LOAD_PASS2)) {
596 /* 487 /*
597 * Skip parsing of control method or opregion body, 488 * Skip parsing of Buffers and Packages because we don't have
598 * because we don't have enough info in the first pass 489 * enough info in the first pass to parse them correctly.
599 * to parse them correctly.
600 *
601 * Completed parsing an op_region declaration, we now
602 * know the length.
603 */ 490 */
604 op->named.length = 491 op->named.data = aml_op_start;
605 (u32) (parser_state->aml - op->named.data); 492 op->named.length = (u32)
606 } 493 (walk_state->parser_state.pkg_end -
607 } 494 aml_op_start);
608 495
609 if (walk_state->op_info->flags & AML_CREATE) { 496 /* Skip body */
610 /*
611 * Backup to beginning of create_xXXfield declaration (1 for
612 * Opcode)
613 *
614 * body_length is unknown until we parse the body
615 */
616 op->named.length =
617 (u32) (parser_state->aml - op->named.data);
618 }
619 497
620 /* This op complete, notify the dispatcher */ 498 walk_state->parser_state.aml =
499 walk_state->parser_state.pkg_end;
500 walk_state->arg_count = 0;
501 }
502 break;
621 503
622 if (walk_state->ascending_callback != NULL) { 504 case AML_WHILE_OP:
623 walk_state->op = op;
624 walk_state->opcode = op->common.aml_opcode;
625 505
626 status = walk_state->ascending_callback(walk_state); 506 if (walk_state->control_state) {
627 status = 507 walk_state->control_state->control.package_end =
628 acpi_ps_next_parse_state(walk_state, op, status); 508 walk_state->parser_state.pkg_end;
629 if (status == AE_CTRL_PENDING) {
630 status = AE_OK;
631 goto close_this_op;
632 } 509 }
633 } 510 break;
634
635 close_this_op:
636 /*
637 * Finished one argument of the containing scope
638 */
639 parser_state->scope->parse_scope.arg_count--;
640 511
641 /* Finished with pre_op */ 512 default:
642 513
643 if (pre_op) { 514 /* No action for all other opcodes */
644 acpi_ps_free_op(pre_op); 515 break;
645 pre_op = NULL;
646 } 516 }
647 517
648 /* Close this Op (will result in parse subtree deletion) */ 518 break;
519 }
649 520
650 status2 = acpi_ps_complete_this_op(walk_state, op); 521 return_ACPI_STATUS(AE_OK);
651 if (ACPI_FAILURE(status2)) { 522}
652 return_ACPI_STATUS(status2);
653 }
654 op = NULL;
655 523
656 switch (status) { 524/*******************************************************************************
657 case AE_OK: 525 *
658 break; 526 * FUNCTION: acpi_ps_complete_op
527 *
528 * PARAMETERS: walk_state - Current state
529 * Op - Returned Op
530 * Status - Parse status before complete Op
531 *
532 * RETURN: Status
533 *
534 * DESCRIPTION: Complete Op
535 *
536 ******************************************************************************/
659 537
660 case AE_CTRL_TRANSFER: 538static acpi_status
539acpi_ps_complete_op(struct acpi_walk_state *walk_state,
540 union acpi_parse_object **op, acpi_status status)
541{
542 acpi_status status2;
661 543
662 /* We are about to transfer to a called method. */ 544 ACPI_FUNCTION_TRACE_PTR(ps_complete_op, walk_state);
663 545
664 walk_state->prev_op = op; 546 /*
665 walk_state->prev_arg_types = walk_state->arg_types; 547 * Finished one argument of the containing scope
666 return_ACPI_STATUS(status); 548 */
549 walk_state->parser_state.scope->parse_scope.arg_count--;
667 550
668 case AE_CTRL_END: 551 /* Close this Op (will result in parse subtree deletion) */
669 552
670 acpi_ps_pop_scope(parser_state, &op, 553 status2 = acpi_ps_complete_this_op(walk_state, *op);
671 &walk_state->arg_types, 554 if (ACPI_FAILURE(status2)) {
672 &walk_state->arg_count); 555 return_ACPI_STATUS(status2);
556 }
673 557
674 if (op) { 558 *op = NULL;
675 walk_state->op = op;
676 walk_state->op_info =
677 acpi_ps_get_opcode_info(op->common.
678 aml_opcode);
679 walk_state->opcode = op->common.aml_opcode;
680 559
681 status = 560 switch (status) {
682 walk_state->ascending_callback(walk_state); 561 case AE_OK:
683 status = 562 break;
684 acpi_ps_next_parse_state(walk_state, op,
685 status);
686 563
687 status2 = 564 case AE_CTRL_TRANSFER:
688 acpi_ps_complete_this_op(walk_state, op);
689 if (ACPI_FAILURE(status2)) {
690 return_ACPI_STATUS(status2);
691 }
692 op = NULL;
693 }
694 status = AE_OK;
695 break;
696 565
697 case AE_CTRL_BREAK: 566 /* We are about to transfer to a called method */
698 case AE_CTRL_CONTINUE:
699 567
700 /* Pop off scopes until we find the While */ 568 walk_state->prev_op = NULL;
569 walk_state->prev_arg_types = walk_state->arg_types;
570 return_ACPI_STATUS(status);
701 571
702 while (!op || (op->common.aml_opcode != AML_WHILE_OP)) { 572 case AE_CTRL_END:
703 acpi_ps_pop_scope(parser_state, &op,
704 &walk_state->arg_types,
705 &walk_state->arg_count);
706 573
707 if (op->common.aml_opcode != AML_WHILE_OP) { 574 acpi_ps_pop_scope(&(walk_state->parser_state), op,
708 status2 = 575 &walk_state->arg_types,
709 acpi_ds_result_stack_pop 576 &walk_state->arg_count);
710 (walk_state);
711 if (ACPI_FAILURE(status2)) {
712 return_ACPI_STATUS(status2);
713 }
714 }
715 }
716
717 /* Close this iteration of the While loop */
718 577
719 walk_state->op = op; 578 if (*op) {
579 walk_state->op = *op;
720 walk_state->op_info = 580 walk_state->op_info =
721 acpi_ps_get_opcode_info(op->common.aml_opcode); 581 acpi_ps_get_opcode_info((*op)->common.aml_opcode);
722 walk_state->opcode = op->common.aml_opcode; 582 walk_state->opcode = (*op)->common.aml_opcode;
723 583
724 status = walk_state->ascending_callback(walk_state); 584 status = walk_state->ascending_callback(walk_state);
725 status = 585 status =
726 acpi_ps_next_parse_state(walk_state, op, status); 586 acpi_ps_next_parse_state(walk_state, *op, status);
727 587
728 status2 = acpi_ps_complete_this_op(walk_state, op); 588 status2 = acpi_ps_complete_this_op(walk_state, *op);
729 if (ACPI_FAILURE(status2)) { 589 if (ACPI_FAILURE(status2)) {
730 return_ACPI_STATUS(status2); 590 return_ACPI_STATUS(status2);
731 } 591 }
732 op = NULL; 592 }
733
734 status = AE_OK;
735 break;
736 593
737 case AE_CTRL_TERMINATE: 594 status = AE_OK;
595 break;
738 596
739 status = AE_OK; 597 case AE_CTRL_BREAK:
598 case AE_CTRL_CONTINUE:
740 599
741 /* Clean up */ 600 /* Pop off scopes until we find the While */
742 do {
743 if (op) {
744 status2 =
745 acpi_ps_complete_this_op(walk_state,
746 op);
747 if (ACPI_FAILURE(status2)) {
748 return_ACPI_STATUS(status2);
749 }
750 601
751 status2 = 602 while (!(*op) || ((*op)->common.aml_opcode != AML_WHILE_OP)) {
752 acpi_ds_result_stack_pop 603 acpi_ps_pop_scope(&(walk_state->parser_state), op,
753 (walk_state); 604 &walk_state->arg_types,
754 if (ACPI_FAILURE(status2)) { 605 &walk_state->arg_count);
755 return_ACPI_STATUS(status2);
756 }
757 606
758 acpi_ut_delete_generic_state 607 if ((*op)->common.aml_opcode != AML_WHILE_OP) {
759 (acpi_ut_pop_generic_state 608 status2 = acpi_ds_result_stack_pop(walk_state);
760 (&walk_state->control_state)); 609 if (ACPI_FAILURE(status2)) {
610 return_ACPI_STATUS(status2);
761 } 611 }
612 }
613 }
762 614
763 acpi_ps_pop_scope(parser_state, &op, 615 /* Close this iteration of the While loop */
764 &walk_state->arg_types,
765 &walk_state->arg_count);
766 616
767 } while (op); 617 walk_state->op = *op;
618 walk_state->op_info =
619 acpi_ps_get_opcode_info((*op)->common.aml_opcode);
620 walk_state->opcode = (*op)->common.aml_opcode;
768 621
769 return_ACPI_STATUS(status); 622 status = walk_state->ascending_callback(walk_state);
623 status = acpi_ps_next_parse_state(walk_state, *op, status);
770 624
771 default: /* All other non-AE_OK status */ 625 status2 = acpi_ps_complete_this_op(walk_state, *op);
626 if (ACPI_FAILURE(status2)) {
627 return_ACPI_STATUS(status2);
628 }
772 629
773 do { 630 status = AE_OK;
774 if (op) { 631 break;
775 status2 = 632
776 acpi_ps_complete_this_op(walk_state, 633 case AE_CTRL_TERMINATE:
777 op); 634
778 if (ACPI_FAILURE(status2)) { 635 /* Clean up */
779 return_ACPI_STATUS(status2); 636 do {
780 } 637 if (*op) {
638 status2 =
639 acpi_ps_complete_this_op(walk_state, *op);
640 if (ACPI_FAILURE(status2)) {
641 return_ACPI_STATUS(status2);
642 }
643 status2 = acpi_ds_result_stack_pop(walk_state);
644 if (ACPI_FAILURE(status2)) {
645 return_ACPI_STATUS(status2);
781 } 646 }
782 647
783 acpi_ps_pop_scope(parser_state, &op, 648 acpi_ut_delete_generic_state
784 &walk_state->arg_types, 649 (acpi_ut_pop_generic_state
785 &walk_state->arg_count); 650 (&walk_state->control_state));
651 }
786 652
787 } while (op); 653 acpi_ps_pop_scope(&(walk_state->parser_state), op,
654 &walk_state->arg_types,
655 &walk_state->arg_count);
788 656
789 /* 657 } while (*op);
790 * TBD: Cleanup parse ops on error 658
791 */ 659 return_ACPI_STATUS(AE_OK);
792#if 0 660
793 if (op == NULL) { 661 default: /* All other non-AE_OK status */
794 acpi_ps_pop_scope(parser_state, &op, 662
795 &walk_state->arg_types, 663 do {
796 &walk_state->arg_count); 664 if (*op) {
665 status2 =
666 acpi_ps_complete_this_op(walk_state, *op);
667 if (ACPI_FAILURE(status2)) {
668 return_ACPI_STATUS(status2);
669 }
797 } 670 }
798#endif
799 walk_state->prev_op = op;
800 walk_state->prev_arg_types = walk_state->arg_types;
801 return_ACPI_STATUS(status);
802 }
803 671
804 /* This scope complete? */ 672 acpi_ps_pop_scope(&(walk_state->parser_state), op,
673 &walk_state->arg_types,
674 &walk_state->arg_count);
805 675
806 if (acpi_ps_has_completed_scope(parser_state)) { 676 } while (*op);
807 acpi_ps_pop_scope(parser_state, &op, 677
678#if 0
679 /*
680 * TBD: Cleanup parse ops on error
681 */
682 if (*op == NULL) {
683 acpi_ps_pop_scope(parser_state, op,
808 &walk_state->arg_types, 684 &walk_state->arg_types,
809 &walk_state->arg_count); 685 &walk_state->arg_count);
810 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
811 "Popped scope, Op=%p\n", op));
812 } else {
813 op = NULL;
814 } 686 }
687#endif
688 walk_state->prev_op = NULL;
689 walk_state->prev_arg_types = walk_state->arg_types;
690 return_ACPI_STATUS(status);
691 }
815 692
816 } /* while parser_state->Aml */ 693 /* This scope complete? */
694
695 if (acpi_ps_has_completed_scope(&(walk_state->parser_state))) {
696 acpi_ps_pop_scope(&(walk_state->parser_state), op,
697 &walk_state->arg_types,
698 &walk_state->arg_count);
699 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Popped scope, Op=%p\n", *op));
700 } else {
701 *op = NULL;
702 }
703
704 return_ACPI_STATUS(AE_OK);
705}
706
707/*******************************************************************************
708 *
709 * FUNCTION: acpi_ps_complete_final_op
710 *
711 * PARAMETERS: walk_state - Current state
712 * Op - Current Op
713 * Status - Current parse status before complete last
714 * Op
715 *
716 * RETURN: Status
717 *
718 * DESCRIPTION: Complete last Op.
719 *
720 ******************************************************************************/
721
722static acpi_status
723acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
724 union acpi_parse_object *op, acpi_status status)
725{
726 acpi_status status2;
727
728 ACPI_FUNCTION_TRACE_PTR(ps_complete_final_op, walk_state);
817 729
818 /* 730 /*
819 * Complete the last Op (if not completed), and clear the scope stack. 731 * Complete the last Op (if not completed), and clear the scope stack.
820 * It is easily possible to end an AML "package" with an unbounded number 732 * It is easily possible to end an AML "package" with an unbounded number
821 * of open scopes (such as when several ASL blocks are closed with 733 * of open scopes (such as when several ASL blocks are closed with
822 * sequential closing braces). We want to terminate each one cleanly. 734 * sequential closing braces). We want to terminate each one cleanly.
823 */ 735 */
824 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "AML package complete at Op %p\n", 736 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "AML package complete at Op %p\n",
825 op)); 737 op));
@@ -838,8 +750,12 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
838 acpi_ps_next_parse_state(walk_state, op, 750 acpi_ps_next_parse_state(walk_state, op,
839 status); 751 status);
840 if (status == AE_CTRL_PENDING) { 752 if (status == AE_CTRL_PENDING) {
841 status = AE_OK; 753 status =
842 goto close_this_op; 754 acpi_ps_complete_op(walk_state, &op,
755 AE_OK);
756 if (ACPI_FAILURE(status)) {
757 return_ACPI_STATUS(status);
758 }
843 } 759 }
844 760
845 if (status == AE_CTRL_TERMINATE) { 761 if (status == AE_CTRL_TERMINATE) {
@@ -858,7 +774,9 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
858 } 774 }
859 } 775 }
860 776
861 acpi_ps_pop_scope(parser_state, 777 acpi_ps_pop_scope(&
778 (walk_state->
779 parser_state),
862 &op, 780 &op,
863 &walk_state-> 781 &walk_state->
864 arg_types, 782 arg_types,
@@ -887,10 +805,252 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
887 } 805 }
888 } 806 }
889 807
890 acpi_ps_pop_scope(parser_state, &op, &walk_state->arg_types, 808 acpi_ps_pop_scope(&(walk_state->parser_state), &op,
809 &walk_state->arg_types,
891 &walk_state->arg_count); 810 &walk_state->arg_count);
892 811
893 } while (op); 812 } while (op);
894 813
895 return_ACPI_STATUS(status); 814 return_ACPI_STATUS(status);
896} 815}
816
817/*******************************************************************************
818 *
819 * FUNCTION: acpi_ps_parse_loop
820 *
821 * PARAMETERS: walk_state - Current state
822 *
823 * RETURN: Status
824 *
825 * DESCRIPTION: Parse AML (pointed to by the current parser state) and return
826 * a tree of ops.
827 *
828 ******************************************************************************/
829
830acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
831{
832 acpi_status status = AE_OK;
833 union acpi_parse_object *op = NULL; /* current op */
834 struct acpi_parse_state *parser_state;
835 u8 *aml_op_start = NULL;
836
837 ACPI_FUNCTION_TRACE_PTR(ps_parse_loop, walk_state);
838
839 if (walk_state->descending_callback == NULL) {
840 return_ACPI_STATUS(AE_BAD_PARAMETER);
841 }
842
843 parser_state = &walk_state->parser_state;
844 walk_state->arg_types = 0;
845
846#if (!defined (ACPI_NO_METHOD_EXECUTION) && !defined (ACPI_CONSTANT_EVAL_ONLY))
847
848 if (walk_state->walk_type & ACPI_WALK_METHOD_RESTART) {
849
850 /* We are restarting a preempted control method */
851
852 if (acpi_ps_has_completed_scope(parser_state)) {
853 /*
854 * We must check if a predicate to an IF or WHILE statement
855 * was just completed
856 */
857 if ((parser_state->scope->parse_scope.op) &&
858 ((parser_state->scope->parse_scope.op->common.
859 aml_opcode == AML_IF_OP)
860 || (parser_state->scope->parse_scope.op->common.
861 aml_opcode == AML_WHILE_OP))
862 && (walk_state->control_state)
863 && (walk_state->control_state->common.state ==
864 ACPI_CONTROL_PREDICATE_EXECUTING)) {
865 /*
866 * A predicate was just completed, get the value of the
867 * predicate and branch based on that value
868 */
869 walk_state->op = NULL;
870 status =
871 acpi_ds_get_predicate_value(walk_state,
872 ACPI_TO_POINTER
873 (TRUE));
874 if (ACPI_FAILURE(status)
875 && ((status & AE_CODE_MASK) !=
876 AE_CODE_CONTROL)) {
877 if (status == AE_AML_NO_RETURN_VALUE) {
878 ACPI_EXCEPTION((AE_INFO, status,
879 "Invoked method did not return a value"));
880
881 }
882
883 ACPI_EXCEPTION((AE_INFO, status,
884 "GetPredicate Failed"));
885 return_ACPI_STATUS(status);
886 }
887
888 status =
889 acpi_ps_next_parse_state(walk_state, op,
890 status);
891 }
892
893 acpi_ps_pop_scope(parser_state, &op,
894 &walk_state->arg_types,
895 &walk_state->arg_count);
896 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
897 "Popped scope, Op=%p\n", op));
898 } else if (walk_state->prev_op) {
899
900 /* We were in the middle of an op */
901
902 op = walk_state->prev_op;
903 walk_state->arg_types = walk_state->prev_arg_types;
904 }
905 }
906#endif
907
908 /* Iterative parsing loop, while there is more AML to process: */
909
910 while ((parser_state->aml < parser_state->aml_end) || (op)) {
911 aml_op_start = parser_state->aml;
912 if (!op) {
913 status =
914 acpi_ps_create_op(walk_state, aml_op_start, &op);
915 if (ACPI_FAILURE(status)) {
916 if (status == AE_CTRL_PARSE_CONTINUE) {
917 continue;
918 }
919
920 if (status == AE_CTRL_PARSE_PENDING) {
921 status = AE_OK;
922 }
923
924 status =
925 acpi_ps_complete_op(walk_state, &op,
926 status);
927 if (ACPI_FAILURE(status)) {
928 return_ACPI_STATUS(status);
929 }
930
931 continue;
932 }
933
934 op->common.aml_offset = walk_state->aml_offset;
935
936 if (walk_state->op_info) {
937 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
938 "Opcode %4.4X [%s] Op %p Aml %p AmlOffset %5.5X\n",
939 (u32) op->common.aml_opcode,
940 walk_state->op_info->name, op,
941 parser_state->aml,
942 op->common.aml_offset));
943 }
944 }
945
946 /*
947 * Start arg_count at zero because we don't know if there are
948 * any args yet
949 */
950 walk_state->arg_count = 0;
951
952 /* Are there any arguments that must be processed? */
953
954 if (walk_state->arg_types) {
955
956 /* Get arguments */
957
958 status =
959 acpi_ps_get_arguments(walk_state, aml_op_start, op);
960 if (ACPI_FAILURE(status)) {
961 status =
962 acpi_ps_complete_op(walk_state, &op,
963 status);
964 if (ACPI_FAILURE(status)) {
965 return_ACPI_STATUS(status);
966 }
967
968 continue;
969 }
970 }
971
972 /* Check for arguments that need to be processed */
973
974 if (walk_state->arg_count) {
975 /*
976 * There are arguments (complex ones), push Op and
977 * prepare for argument
978 */
979 status = acpi_ps_push_scope(parser_state, op,
980 walk_state->arg_types,
981 walk_state->arg_count);
982 if (ACPI_FAILURE(status)) {
983 status =
984 acpi_ps_complete_op(walk_state, &op,
985 status);
986 if (ACPI_FAILURE(status)) {
987 return_ACPI_STATUS(status);
988 }
989
990 continue;
991 }
992
993 op = NULL;
994 continue;
995 }
996
997 /*
998 * All arguments have been processed -- Op is complete,
999 * prepare for next
1000 */
1001 walk_state->op_info =
1002 acpi_ps_get_opcode_info(op->common.aml_opcode);
1003 if (walk_state->op_info->flags & AML_NAMED) {
1004 if (acpi_gbl_depth) {
1005 acpi_gbl_depth--;
1006 }
1007
1008 if (op->common.aml_opcode == AML_REGION_OP) {
1009 /*
1010 * Skip parsing of control method or opregion body,
1011 * because we don't have enough info in the first pass
1012 * to parse them correctly.
1013 *
1014 * Completed parsing an op_region declaration, we now
1015 * know the length.
1016 */
1017 op->named.length =
1018 (u32) (parser_state->aml - op->named.data);
1019 }
1020 }
1021
1022 if (walk_state->op_info->flags & AML_CREATE) {
1023 /*
1024 * Backup to beginning of create_xXXfield declaration (1 for
1025 * Opcode)
1026 *
1027 * body_length is unknown until we parse the body
1028 */
1029 op->named.length =
1030 (u32) (parser_state->aml - op->named.data);
1031 }
1032
1033 /* This op complete, notify the dispatcher */
1034
1035 if (walk_state->ascending_callback != NULL) {
1036 walk_state->op = op;
1037 walk_state->opcode = op->common.aml_opcode;
1038
1039 status = walk_state->ascending_callback(walk_state);
1040 status =
1041 acpi_ps_next_parse_state(walk_state, op, status);
1042 if (status == AE_CTRL_PENDING) {
1043 status = AE_OK;
1044 }
1045 }
1046
1047 status = acpi_ps_complete_op(walk_state, &op, status);
1048 if (ACPI_FAILURE(status)) {
1049 return_ACPI_STATUS(status);
1050 }
1051
1052 } /* while parser_state->Aml */
1053
1054 status = acpi_ps_complete_final_op(walk_state, op, status);
1055 return_ACPI_STATUS(status);
1056}
diff --git a/drivers/acpi/parser/psopcode.c b/drivers/acpi/parser/psopcode.c
index 4bd25e32769f..16d8b6cc3c22 100644
--- a/drivers/acpi/parser/psopcode.c
+++ b/drivers/acpi/parser/psopcode.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/parser/psparse.c b/drivers/acpi/parser/psparse.c
index a02aa62fe1e5..5d63f48e56b5 100644
--- a/drivers/acpi/parser/psparse.c
+++ b/drivers/acpi/parser/psparse.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -540,6 +540,11 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
540 540
541 if ((status == AE_ALREADY_EXISTS) && 541 if ((status == AE_ALREADY_EXISTS) &&
542 (!walk_state->method_desc->method.mutex)) { 542 (!walk_state->method_desc->method.mutex)) {
543 ACPI_INFO((AE_INFO,
544 "Marking method %4.4s as Serialized",
545 walk_state->method_node->name.
546 ascii));
547
543 /* 548 /*
544 * Method tried to create an object twice. The probable cause is 549 * Method tried to create an object twice. The probable cause is
545 * that the method cannot handle reentrancy. 550 * that the method cannot handle reentrancy.
diff --git a/drivers/acpi/parser/psscope.c b/drivers/acpi/parser/psscope.c
index a3e0314de24d..77cfa4ed0cfe 100644
--- a/drivers/acpi/parser/psscope.c
+++ b/drivers/acpi/parser/psscope.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/parser/pstree.c b/drivers/acpi/parser/pstree.c
index 0015717ef096..966e7ea2a0c4 100644
--- a/drivers/acpi/parser/pstree.c
+++ b/drivers/acpi/parser/pstree.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/parser/psutils.c b/drivers/acpi/parser/psutils.c
index d405387b7414..8ca52002db55 100644
--- a/drivers/acpi/parser/psutils.c
+++ b/drivers/acpi/parser/psutils.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/parser/pswalk.c b/drivers/acpi/parser/pswalk.c
index a84a547a0f1b..49f9757434e4 100644
--- a/drivers/acpi/parser/pswalk.c
+++ b/drivers/acpi/parser/pswalk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/parser/psxface.c b/drivers/acpi/parser/psxface.c
index 5d996c1140af..94103bced75e 100644
--- a/drivers/acpi/parser/psxface.c
+++ b/drivers/acpi/parser/psxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -54,8 +54,6 @@ static void acpi_ps_start_trace(struct acpi_evaluate_info *info);
54 54
55static void acpi_ps_stop_trace(struct acpi_evaluate_info *info); 55static void acpi_ps_stop_trace(struct acpi_evaluate_info *info);
56 56
57static acpi_status acpi_ps_execute_pass(struct acpi_evaluate_info *info);
58
59static void 57static void
60acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action); 58acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action);
61 59
@@ -215,6 +213,8 @@ static void acpi_ps_stop_trace(struct acpi_evaluate_info *info)
215acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info) 213acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
216{ 214{
217 acpi_status status; 215 acpi_status status;
216 union acpi_parse_object *op;
217 struct acpi_walk_state *walk_state;
218 218
219 ACPI_FUNCTION_TRACE(ps_execute_method); 219 ACPI_FUNCTION_TRACE(ps_execute_method);
220 220
@@ -234,8 +234,7 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
234 } 234 }
235 235
236 /* 236 /*
237 * The caller "owns" the parameters, so give each one an extra 237 * The caller "owns" the parameters, so give each one an extra reference
238 * reference
239 */ 238 */
240 acpi_ps_update_parameter_list(info, REF_INCREMENT); 239 acpi_ps_update_parameter_list(info, REF_INCREMENT);
241 240
@@ -244,30 +243,50 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
244 acpi_ps_start_trace(info); 243 acpi_ps_start_trace(info);
245 244
246 /* 245 /*
247 * 1) Perform the first pass parse of the method to enter any 246 * Execute the method. Performs parse simultaneously
248 * named objects that it creates into the namespace
249 */ 247 */
250 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, 248 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
251 "**** Begin Method Parse **** Entry=%p obj=%p\n", 249 "**** Begin Method Parse/Execute [%4.4s] **** Node=%p Obj=%p\n",
252 info->resolved_node, info->obj_desc)); 250 info->resolved_node->name.ascii, info->resolved_node,
251 info->obj_desc));
252
253 /* Create and init a Root Node */
254
255 op = acpi_ps_create_scope_op();
256 if (!op) {
257 status = AE_NO_MEMORY;
258 goto cleanup;
259 }
260
261 /* Create and initialize a new walk state */
262
263 info->pass_number = ACPI_IMODE_EXECUTE;
264 walk_state =
265 acpi_ds_create_walk_state(info->obj_desc->method.owner_id, NULL,
266 NULL, NULL);
267 if (!walk_state) {
268 status = AE_NO_MEMORY;
269 goto cleanup;
270 }
253 271
254 info->pass_number = 1; 272 status = acpi_ds_init_aml_walk(walk_state, op, info->resolved_node,
255 status = acpi_ps_execute_pass(info); 273 info->obj_desc->method.aml_start,
274 info->obj_desc->method.aml_length, info,
275 info->pass_number);
256 if (ACPI_FAILURE(status)) { 276 if (ACPI_FAILURE(status)) {
277 acpi_ds_delete_walk_state(walk_state);
257 goto cleanup; 278 goto cleanup;
258 } 279 }
259 280
260 /* 281 /* Parse the AML */
261 * 2) Execute the method. Performs second pass parse simultaneously
262 */
263 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
264 "**** Begin Method Execution **** Entry=%p obj=%p\n",
265 info->resolved_node, info->obj_desc));
266 282
267 info->pass_number = 3; 283 status = acpi_ps_parse_aml(walk_state);
268 status = acpi_ps_execute_pass(info); 284
285 /* walk_state was deleted by parse_aml */
269 286
270 cleanup: 287 cleanup:
288 acpi_ps_delete_parse_tree(op);
289
271 /* End optional tracing */ 290 /* End optional tracing */
272 291
273 acpi_ps_stop_trace(info); 292 acpi_ps_stop_trace(info);
@@ -330,62 +349,3 @@ acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action)
330 } 349 }
331 } 350 }
332} 351}
333
334/*******************************************************************************
335 *
336 * FUNCTION: acpi_ps_execute_pass
337 *
338 * PARAMETERS: Info - See struct acpi_evaluate_info
339 * (Used: pass_number, Node, and obj_desc)
340 *
341 * RETURN: Status
342 *
343 * DESCRIPTION: Single AML pass: Parse or Execute a control method
344 *
345 ******************************************************************************/
346
347static acpi_status acpi_ps_execute_pass(struct acpi_evaluate_info *info)
348{
349 acpi_status status;
350 union acpi_parse_object *op;
351 struct acpi_walk_state *walk_state;
352
353 ACPI_FUNCTION_TRACE(ps_execute_pass);
354
355 /* Create and init a Root Node */
356
357 op = acpi_ps_create_scope_op();
358 if (!op) {
359 return_ACPI_STATUS(AE_NO_MEMORY);
360 }
361
362 /* Create and initialize a new walk state */
363
364 walk_state =
365 acpi_ds_create_walk_state(info->obj_desc->method.owner_id, NULL,
366 NULL, NULL);
367 if (!walk_state) {
368 status = AE_NO_MEMORY;
369 goto cleanup;
370 }
371
372 status = acpi_ds_init_aml_walk(walk_state, op, info->resolved_node,
373 info->obj_desc->method.aml_start,
374 info->obj_desc->method.aml_length,
375 info->pass_number == 1 ? NULL : info,
376 info->pass_number);
377 if (ACPI_FAILURE(status)) {
378 acpi_ds_delete_walk_state(walk_state);
379 goto cleanup;
380 }
381
382 /* Parse the AML */
383
384 status = acpi_ps_parse_aml(walk_state);
385
386 /* Walk state was deleted by parse_aml */
387
388 cleanup:
389 acpi_ps_delete_parse_tree(op);
390 return_ACPI_STATUS(status);
391}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 481e633bbf41..0f683c8c6fbc 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -513,7 +513,7 @@ int __init acpi_irq_penalty_init(void)
513 } 513 }
514 } 514 }
515 /* Add a penalty for the SCI */ 515 /* Add a penalty for the SCI */
516 acpi_irq_penalty[acpi_fadt.sci_int] += PIRQ_PENALTY_PCI_USING; 516 acpi_irq_penalty[acpi_gbl_FADT.sci_interrupt] += PIRQ_PENALTY_PCI_USING;
517 517
518 return 0; 518 return 0;
519} 519}
@@ -785,7 +785,7 @@ static int irqrouter_resume(struct sys_device *dev)
785 785
786 786
787 /* Make sure SCI is enabled again (Apple firmware bug?) */ 787 /* Make sure SCI is enabled again (Apple firmware bug?) */
788 acpi_set_register(ACPI_BITREG_SCI_ENABLE, 1, ACPI_MTX_DO_NOT_LOCK); 788 acpi_set_register(ACPI_BITREG_SCI_ENABLE, 1);
789 789
790 list_for_each(node, &acpi_link.entries) { 790 list_for_each(node, &acpi_link.entries) {
791 link = list_entry(node, struct acpi_pci_link, node); 791 link = list_entry(node, struct acpi_pci_link, node);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index a860efa2c562..4ecf701687e8 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -117,6 +117,19 @@ void acpi_pci_unregister_driver(struct acpi_pci_driver *driver)
117 117
118EXPORT_SYMBOL(acpi_pci_unregister_driver); 118EXPORT_SYMBOL(acpi_pci_unregister_driver);
119 119
120acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus)
121{
122 struct acpi_pci_root *tmp;
123
124 list_for_each_entry(tmp, &acpi_pci_roots, node) {
125 if ((tmp->id.segment == (u16) seg) && (tmp->id.bus == (u16) bus))
126 return tmp->device->handle;
127 }
128 return NULL;
129}
130
131EXPORT_SYMBOL_GPL(acpi_get_pci_rootbridge_handle);
132
120static acpi_status 133static acpi_status
121get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data) 134get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
122{ 135{
@@ -152,6 +165,21 @@ static acpi_status try_get_root_bridge_busnr(acpi_handle handle, int *busnum)
152 return AE_OK; 165 return AE_OK;
153} 166}
154 167
168static void acpi_pci_bridge_scan(struct acpi_device *device)
169{
170 int status;
171 struct acpi_device *child = NULL;
172
173 if (device->flags.bus_address)
174 if (device->parent && device->parent->ops.bind) {
175 status = device->parent->ops.bind(device);
176 if (!status) {
177 list_for_each_entry(child, &device->children, node)
178 acpi_pci_bridge_scan(child);
179 }
180 }
181}
182
155static int acpi_pci_root_add(struct acpi_device *device) 183static int acpi_pci_root_add(struct acpi_device *device)
156{ 184{
157 int result = 0; 185 int result = 0;
@@ -160,6 +188,7 @@ static int acpi_pci_root_add(struct acpi_device *device)
160 acpi_status status = AE_OK; 188 acpi_status status = AE_OK;
161 unsigned long value = 0; 189 unsigned long value = 0;
162 acpi_handle handle = NULL; 190 acpi_handle handle = NULL;
191 struct acpi_device *child;
163 192
164 193
165 if (!device) 194 if (!device)
@@ -175,9 +204,6 @@ static int acpi_pci_root_add(struct acpi_device *device)
175 strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); 204 strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
176 acpi_driver_data(device) = root; 205 acpi_driver_data(device) = root;
177 206
178 /*
179 * TBD: Doesn't the bus driver automatically set this?
180 */
181 device->ops.bind = acpi_pci_bind; 207 device->ops.bind = acpi_pci_bind;
182 208
183 /* 209 /*
@@ -299,6 +325,12 @@ static int acpi_pci_root_add(struct acpi_device *device)
299 result = acpi_pci_irq_add_prt(device->handle, root->id.segment, 325 result = acpi_pci_irq_add_prt(device->handle, root->id.segment,
300 root->id.bus); 326 root->id.bus);
301 327
328 /*
329 * Scan and bind all _ADR-Based Devices
330 */
331 list_for_each_entry(child, &device->children, node)
332 acpi_pci_bridge_scan(child);
333
302 end: 334 end:
303 if (result) { 335 if (result) {
304 if (!list_empty(&root->node)) 336 if (!list_empty(&root->node))
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 5f9496d59ed6..0079bc51082c 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -375,30 +375,126 @@ static int acpi_processor_remove_fs(struct acpi_device *device)
375} 375}
376 376
377/* Use the acpiid in MADT to map cpus in case of SMP */ 377/* Use the acpiid in MADT to map cpus in case of SMP */
378
378#ifndef CONFIG_SMP 379#ifndef CONFIG_SMP
379#define convert_acpiid_to_cpu(acpi_id) (-1) 380static int get_cpu_id(acpi_handle handle, u32 acpi_id) {return -1;}
380#else 381#else
381 382
383static struct acpi_table_madt *madt;
384
385static int map_lapic_id(struct acpi_subtable_header *entry,
386 u32 acpi_id, int *apic_id)
387{
388 struct acpi_madt_local_apic *lapic =
389 (struct acpi_madt_local_apic *)entry;
390 if ((lapic->lapic_flags & ACPI_MADT_ENABLED) &&
391 lapic->processor_id == acpi_id) {
392 *apic_id = lapic->id;
393 return 1;
394 }
395 return 0;
396}
397
398static int map_lsapic_id(struct acpi_subtable_header *entry,
399 u32 acpi_id, int *apic_id)
400{
401 struct acpi_madt_local_sapic *lsapic =
402 (struct acpi_madt_local_sapic *)entry;
403 /* Only check enabled APICs*/
404 if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
405 /* First check against id */
406 if (lsapic->processor_id == acpi_id) {
407 *apic_id = lsapic->id;
408 return 1;
409 /* Check against optional uid */
410 } else if (entry->length >= 16 &&
411 lsapic->uid == acpi_id) {
412 *apic_id = lsapic->uid;
413 return 1;
414 }
415 }
416 return 0;
417}
418
382#ifdef CONFIG_IA64 419#ifdef CONFIG_IA64
383#define arch_acpiid_to_apicid ia64_acpiid_to_sapicid
384#define arch_cpu_to_apicid ia64_cpu_to_sapicid 420#define arch_cpu_to_apicid ia64_cpu_to_sapicid
385#define ARCH_BAD_APICID (0xffff)
386#else 421#else
387#define arch_acpiid_to_apicid x86_acpiid_to_apicid
388#define arch_cpu_to_apicid x86_cpu_to_apicid 422#define arch_cpu_to_apicid x86_cpu_to_apicid
389#define ARCH_BAD_APICID (0xff)
390#endif 423#endif
391 424
392static int convert_acpiid_to_cpu(u8 acpi_id) 425static int map_madt_entry(u32 acpi_id)
426{
427 unsigned long madt_end, entry;
428 int apic_id = -1;
429
430 if (!madt)
431 return apic_id;
432
433 entry = (unsigned long)madt;
434 madt_end = entry + madt->header.length;
435
436 /* Parse all entries looking for a match. */
437
438 entry += sizeof(struct acpi_table_madt);
439 while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
440 struct acpi_subtable_header *header =
441 (struct acpi_subtable_header *)entry;
442 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
443 if (map_lapic_id(header, acpi_id, &apic_id))
444 break;
445 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
446 if (map_lsapic_id(header, acpi_id, &apic_id))
447 break;
448 }
449 entry += header->length;
450 }
451 return apic_id;
452}
453
454static int map_mat_entry(acpi_handle handle, u32 acpi_id)
455{
456 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
457 union acpi_object *obj;
458 struct acpi_subtable_header *header;
459 int apic_id = -1;
460
461 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
462 goto exit;
463
464 if (!buffer.length || !buffer.pointer)
465 goto exit;
466
467 obj = buffer.pointer;
468 if (obj->type != ACPI_TYPE_BUFFER ||
469 obj->buffer.length < sizeof(struct acpi_subtable_header)) {
470 goto exit;
471 }
472
473 header = (struct acpi_subtable_header *)obj->buffer.pointer;
474 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
475 map_lapic_id(header, acpi_id, &apic_id);
476 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
477 map_lsapic_id(header, acpi_id, &apic_id);
478 }
479
480exit:
481 if (buffer.pointer)
482 kfree(buffer.pointer);
483 return apic_id;
484}
485
486static int get_cpu_id(acpi_handle handle, u32 acpi_id)
393{ 487{
394 u16 apic_id;
395 int i; 488 int i;
489 int apic_id = -1;
396 490
397 apic_id = arch_acpiid_to_apicid[acpi_id]; 491 apic_id = map_mat_entry(handle, acpi_id);
398 if (apic_id == ARCH_BAD_APICID) 492 if (apic_id == -1)
399 return -1; 493 apic_id = map_madt_entry(acpi_id);
494 if (apic_id == -1)
495 return apic_id;
400 496
401 for (i = 0; i < NR_CPUS; i++) { 497 for (i = 0; i < NR_CPUS; ++i) {
402 if (arch_cpu_to_apicid[i] == apic_id) 498 if (arch_cpu_to_apicid[i] == apic_id)
403 return i; 499 return i;
404 } 500 }
@@ -410,7 +506,7 @@ static int convert_acpiid_to_cpu(u8 acpi_id)
410 Driver Interface 506 Driver Interface
411 -------------------------------------------------------------------------- */ 507 -------------------------------------------------------------------------- */
412 508
413static int acpi_processor_get_info(struct acpi_processor *pr) 509static int acpi_processor_get_info(struct acpi_processor *pr, unsigned has_uid)
414{ 510{
415 acpi_status status = 0; 511 acpi_status status = 0;
416 union acpi_object object = { 0 }; 512 union acpi_object object = { 0 };
@@ -431,7 +527,7 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
431 * Check to see if we have bus mastering arbitration control. This 527 * Check to see if we have bus mastering arbitration control. This
432 * is required for proper C3 usage (to maintain cache coherency). 528 * is required for proper C3 usage (to maintain cache coherency).
433 */ 529 */
434 if (acpi_fadt.V1_pm2_cnt_blk && acpi_fadt.pm2_cnt_len) { 530 if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
435 pr->flags.bm_control = 1; 531 pr->flags.bm_control = 1;
436 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 532 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
437 "Bus mastering arbitration control present\n")); 533 "Bus mastering arbitration control present\n"));
@@ -439,24 +535,35 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
439 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 535 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
440 "No bus mastering arbitration control\n")); 536 "No bus mastering arbitration control\n"));
441 537
442 /* 538 /* Check if it is a Device with HID and UID */
443 * Evalute the processor object. Note that it is common on SMP to 539 if (has_uid) {
444 * have the first (boot) processor with a valid PBLK address while 540 unsigned long value;
445 * all others have a NULL address. 541 status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
446 */ 542 NULL, &value);
447 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); 543 if (ACPI_FAILURE(status)) {
448 if (ACPI_FAILURE(status)) { 544 printk(KERN_ERR PREFIX "Evaluating processor _UID\n");
449 printk(KERN_ERR PREFIX "Evaluating processor object\n"); 545 return -ENODEV;
450 return -ENODEV; 546 }
451 } 547 pr->acpi_id = value;
452 548 } else {
453 /* 549 /*
454 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP. 550 * Evalute the processor object. Note that it is common on SMP to
455 * >>> 'acpi_get_processor_id(acpi_id, &id)' in arch/xxx/acpi.c 551 * have the first (boot) processor with a valid PBLK address while
456 */ 552 * all others have a NULL address.
457 pr->acpi_id = object.processor.proc_id; 553 */
554 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
555 if (ACPI_FAILURE(status)) {
556 printk(KERN_ERR PREFIX "Evaluating processor object\n");
557 return -ENODEV;
558 }
458 559
459 cpu_index = convert_acpiid_to_cpu(pr->acpi_id); 560 /*
561 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
562 * >>> 'acpi_get_processor_id(acpi_id, &id)' in arch/xxx/acpi.c
563 */
564 pr->acpi_id = object.processor.proc_id;
565 }
566 cpu_index = get_cpu_id(pr->handle, pr->acpi_id);
460 567
461 /* Handle UP system running SMP kernel, with no LAPIC in MADT */ 568 /* Handle UP system running SMP kernel, with no LAPIC in MADT */
462 if (!cpu0_initialized && (cpu_index == -1) && 569 if (!cpu0_initialized && (cpu_index == -1) &&
@@ -473,7 +580,7 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
473 * less than the max # of CPUs. They should be ignored _iff 580 * less than the max # of CPUs. They should be ignored _iff
474 * they are physically not present. 581 * they are physically not present.
475 */ 582 */
476 if (cpu_index == -1) { 583 if (pr->id == -1) {
477 if (ACPI_FAILURE 584 if (ACPI_FAILURE
478 (acpi_processor_hotadd_init(pr->handle, &pr->id))) { 585 (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
479 return -ENODEV; 586 return -ENODEV;
@@ -490,8 +597,8 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
490 object.processor.pblk_length); 597 object.processor.pblk_length);
491 else { 598 else {
492 pr->throttling.address = object.processor.pblk_address; 599 pr->throttling.address = object.processor.pblk_address;
493 pr->throttling.duty_offset = acpi_fadt.duty_offset; 600 pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
494 pr->throttling.duty_width = acpi_fadt.duty_width; 601 pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
495 602
496 pr->pblk = object.processor.pblk_address; 603 pr->pblk = object.processor.pblk_address;
497 604
@@ -525,7 +632,7 @@ static int __cpuinit acpi_processor_start(struct acpi_device *device)
525 632
526 pr = acpi_driver_data(device); 633 pr = acpi_driver_data(device);
527 634
528 result = acpi_processor_get_info(pr); 635 result = acpi_processor_get_info(pr, device->flags.unique_id);
529 if (result) { 636 if (result) {
530 /* Processor is physically not present */ 637 /* Processor is physically not present */
531 return 0; 638 return 0;
@@ -707,7 +814,7 @@ int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
707 return -ENODEV; 814 return -ENODEV;
708 815
709 if ((pr->id >= 0) && (pr->id < NR_CPUS)) { 816 if ((pr->id >= 0) && (pr->id < NR_CPUS)) {
710 kobject_uevent(&(*device)->kobj, KOBJ_ONLINE); 817 kobject_uevent(&(*device)->dev.kobj, KOBJ_ONLINE);
711 } 818 }
712 return 0; 819 return 0;
713} 820}
@@ -745,13 +852,13 @@ acpi_processor_hotplug_notify(acpi_handle handle, u32 event, void *data)
745 } 852 }
746 853
747 if (pr->id >= 0 && (pr->id < NR_CPUS)) { 854 if (pr->id >= 0 && (pr->id < NR_CPUS)) {
748 kobject_uevent(&device->kobj, KOBJ_OFFLINE); 855 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
749 break; 856 break;
750 } 857 }
751 858
752 result = acpi_processor_start(device); 859 result = acpi_processor_start(device);
753 if ((!result) && ((pr->id >= 0) && (pr->id < NR_CPUS))) { 860 if ((!result) && ((pr->id >= 0) && (pr->id < NR_CPUS))) {
754 kobject_uevent(&device->kobj, KOBJ_ONLINE); 861 kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
755 } else { 862 } else {
756 printk(KERN_ERR PREFIX "Device [%s] failed to start\n", 863 printk(KERN_ERR PREFIX "Device [%s] failed to start\n",
757 acpi_device_bid(device)); 864 acpi_device_bid(device));
@@ -774,7 +881,7 @@ acpi_processor_hotplug_notify(acpi_handle handle, u32 event, void *data)
774 } 881 }
775 882
776 if ((pr->id < NR_CPUS) && (cpu_present(pr->id))) 883 if ((pr->id < NR_CPUS) && (cpu_present(pr->id)))
777 kobject_uevent(&device->kobj, KOBJ_OFFLINE); 884 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
778 break; 885 break;
779 default: 886 default:
780 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 887 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -895,6 +1002,12 @@ static int __init acpi_processor_init(void)
895 memset(&processors, 0, sizeof(processors)); 1002 memset(&processors, 0, sizeof(processors));
896 memset(&errata, 0, sizeof(errata)); 1003 memset(&errata, 0, sizeof(errata));
897 1004
1005#ifdef CONFIG_SMP
1006 if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
1007 (struct acpi_table_header **)&madt)))
1008 madt = 0;
1009#endif
1010
898 acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir); 1011 acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
899 if (!acpi_processor_dir) 1012 if (!acpi_processor_dir)
900 return -ENOMEM; 1013 return -ENOMEM;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 3f30af21574e..6c6751b1405b 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -160,7 +160,7 @@ static inline u32 ticks_elapsed(u32 t1, u32 t2)
160{ 160{
161 if (t2 >= t1) 161 if (t2 >= t1)
162 return (t2 - t1); 162 return (t2 - t1);
163 else if (!acpi_fadt.tmr_val_ext) 163 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
164 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); 164 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
165 else 165 else
166 return ((0xFFFFFFFF - t1) + t2); 166 return ((0xFFFFFFFF - t1) + t2);
@@ -187,8 +187,7 @@ acpi_processor_power_activate(struct acpi_processor *pr,
187 case ACPI_STATE_C3: 187 case ACPI_STATE_C3:
188 /* Disable bus master reload */ 188 /* Disable bus master reload */
189 if (new->type != ACPI_STATE_C3 && pr->flags.bm_check) 189 if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
190 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0, 190 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
191 ACPI_MTX_DO_NOT_LOCK);
192 break; 191 break;
193 } 192 }
194 } 193 }
@@ -198,8 +197,7 @@ acpi_processor_power_activate(struct acpi_processor *pr,
198 case ACPI_STATE_C3: 197 case ACPI_STATE_C3:
199 /* Enable bus master reload */ 198 /* Enable bus master reload */
200 if (old->type != ACPI_STATE_C3 && pr->flags.bm_check) 199 if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
201 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1, 200 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
202 ACPI_MTX_DO_NOT_LOCK);
203 break; 201 break;
204 } 202 }
205 203
@@ -236,7 +234,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
236 /* Dummy wait op - must do something useless after P_LVL2 read 234 /* Dummy wait op - must do something useless after P_LVL2 read
237 because chipsets cannot guarantee that STPCLK# signal 235 because chipsets cannot guarantee that STPCLK# signal
238 gets asserted in time to freeze execution properly. */ 236 gets asserted in time to freeze execution properly. */
239 unused = inl(acpi_fadt.xpm_tmr_blk.address); 237 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
240 } 238 }
241} 239}
242 240
@@ -291,12 +289,10 @@ static void acpi_processor_idle(void)
291 289
292 pr->power.bm_activity <<= diff; 290 pr->power.bm_activity <<= diff;
293 291
294 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, 292 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
295 &bm_status, ACPI_MTX_DO_NOT_LOCK);
296 if (bm_status) { 293 if (bm_status) {
297 pr->power.bm_activity |= 0x1; 294 pr->power.bm_activity |= 0x1;
298 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 295 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
299 1, ACPI_MTX_DO_NOT_LOCK);
300 } 296 }
301 /* 297 /*
302 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect 298 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
@@ -338,7 +334,7 @@ static void acpi_processor_idle(void)
338 * detection phase, to work cleanly with logical CPU hotplug. 334 * detection phase, to work cleanly with logical CPU hotplug.
339 */ 335 */
340 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && 336 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
341 !pr->flags.has_cst && !acpi_fadt.plvl2_up) 337 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
342 cx = &pr->power.states[ACPI_STATE_C1]; 338 cx = &pr->power.states[ACPI_STATE_C1];
343#endif 339#endif
344 340
@@ -384,11 +380,11 @@ static void acpi_processor_idle(void)
384 380
385 case ACPI_STATE_C2: 381 case ACPI_STATE_C2:
386 /* Get start time (ticks) */ 382 /* Get start time (ticks) */
387 t1 = inl(acpi_fadt.xpm_tmr_blk.address); 383 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
388 /* Invoke C2 */ 384 /* Invoke C2 */
389 acpi_cstate_enter(cx); 385 acpi_cstate_enter(cx);
390 /* Get end time (ticks) */ 386 /* Get end time (ticks) */
391 t2 = inl(acpi_fadt.xpm_tmr_blk.address); 387 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
392 388
393#ifdef CONFIG_GENERIC_TIME 389#ifdef CONFIG_GENERIC_TIME
394 /* TSC halts in C2, so notify users */ 390 /* TSC halts in C2, so notify users */
@@ -411,8 +407,7 @@ static void acpi_processor_idle(void)
411 * All CPUs are trying to go to C3 407 * All CPUs are trying to go to C3
412 * Disable bus master arbitration 408 * Disable bus master arbitration
413 */ 409 */
414 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1, 410 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
415 ACPI_MTX_DO_NOT_LOCK);
416 } 411 }
417 } else { 412 } else {
418 /* SMP with no shared cache... Invalidate cache */ 413 /* SMP with no shared cache... Invalidate cache */
@@ -420,16 +415,15 @@ static void acpi_processor_idle(void)
420 } 415 }
421 416
422 /* Get start time (ticks) */ 417 /* Get start time (ticks) */
423 t1 = inl(acpi_fadt.xpm_tmr_blk.address); 418 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
424 /* Invoke C3 */ 419 /* Invoke C3 */
425 acpi_cstate_enter(cx); 420 acpi_cstate_enter(cx);
426 /* Get end time (ticks) */ 421 /* Get end time (ticks) */
427 t2 = inl(acpi_fadt.xpm_tmr_blk.address); 422 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
428 if (pr->flags.bm_check) { 423 if (pr->flags.bm_check) {
429 /* Enable bus master arbitration */ 424 /* Enable bus master arbitration */
430 atomic_dec(&c3_cpu_count); 425 atomic_dec(&c3_cpu_count);
431 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0, 426 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
432 ACPI_MTX_DO_NOT_LOCK);
433 } 427 }
434 428
435#ifdef CONFIG_GENERIC_TIME 429#ifdef CONFIG_GENERIC_TIME
@@ -457,7 +451,7 @@ static void acpi_processor_idle(void)
457#ifdef CONFIG_HOTPLUG_CPU 451#ifdef CONFIG_HOTPLUG_CPU
458 /* Don't do promotion/demotion */ 452 /* Don't do promotion/demotion */
459 if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) && 453 if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
460 !pr->flags.has_cst && !acpi_fadt.plvl2_up) { 454 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
461 next_state = cx; 455 next_state = cx;
462 goto end; 456 goto end;
463 } 457 }
@@ -627,7 +621,8 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
627 * Check for P_LVL2_UP flag before entering C2 and above on 621 * Check for P_LVL2_UP flag before entering C2 and above on
628 * an SMP system. 622 * an SMP system.
629 */ 623 */
630 if ((num_online_cpus() > 1) && !acpi_fadt.plvl2_up) 624 if ((num_online_cpus() > 1) &&
625 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
631 return -ENODEV; 626 return -ENODEV;
632#endif 627#endif
633 628
@@ -636,8 +631,8 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
636 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; 631 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
637 632
638 /* determine latencies from FADT */ 633 /* determine latencies from FADT */
639 pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.plvl2_lat; 634 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
640 pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.plvl3_lat; 635 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
641 636
642 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 637 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
643 "lvl2[0x%08x] lvl3[0x%08x]\n", 638 "lvl2[0x%08x] lvl3[0x%08x]\n",
@@ -883,14 +878,13 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
883 * WBINVD should be set in fadt, for C3 state to be 878 * WBINVD should be set in fadt, for C3 state to be
884 * supported on when bm_check is not required. 879 * supported on when bm_check is not required.
885 */ 880 */
886 if (acpi_fadt.wb_invd != 1) { 881 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
887 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 882 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
888 "Cache invalidation should work properly" 883 "Cache invalidation should work properly"
889 " for C3 to be enabled on SMP systems\n")); 884 " for C3 to be enabled on SMP systems\n"));
890 return; 885 return;
891 } 886 }
892 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 887 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
893 0, ACPI_MTX_DO_NOT_LOCK);
894 } 888 }
895 889
896 /* 890 /*
@@ -1096,7 +1090,7 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
1096 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n", 1090 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
1097 pr->power.states[i].latency, 1091 pr->power.states[i].latency,
1098 pr->power.states[i].usage, 1092 pr->power.states[i].usage,
1099 pr->power.states[i].time); 1093 (unsigned long long)pr->power.states[i].time);
1100 } 1094 }
1101 1095
1102 end: 1096 end:
@@ -1164,9 +1158,9 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1164 if (!pr) 1158 if (!pr)
1165 return -EINVAL; 1159 return -EINVAL;
1166 1160
1167 if (acpi_fadt.cst_cnt && !nocst) { 1161 if (acpi_gbl_FADT.cst_control && !nocst) {
1168 status = 1162 status =
1169 acpi_os_write_port(acpi_fadt.smi_cmd, acpi_fadt.cst_cnt, 8); 1163 acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
1170 if (ACPI_FAILURE(status)) { 1164 if (ACPI_FAILURE(status)) {
1171 ACPI_EXCEPTION((AE_INFO, status, 1165 ACPI_EXCEPTION((AE_INFO, status,
1172 "Notifying BIOS of _CST ability failed")); 1166 "Notifying BIOS of _CST ability failed"));
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index cbb6f0814ce2..058f13cf3b79 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -352,31 +352,24 @@ int acpi_processor_notify_smm(struct module *calling_module)
352 352
353 is_done = -EIO; 353 is_done = -EIO;
354 354
355 /* Can't write pstate_cnt to smi_cmd if either value is zero */ 355 /* Can't write pstate_control to smi_command if either value is zero */
356 if ((!acpi_fadt.smi_cmd) || (!acpi_fadt.pstate_cnt)) { 356 if ((!acpi_gbl_FADT.smi_command) || (!acpi_gbl_FADT.pstate_control)) {
357 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_cnt\n")); 357 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_control\n"));
358 module_put(calling_module); 358 module_put(calling_module);
359 return 0; 359 return 0;
360 } 360 }
361 361
362 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 362 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
363 "Writing pstate_cnt [0x%x] to smi_cmd [0x%x]\n", 363 "Writing pstate_control [0x%x] to smi_command [0x%x]\n",
364 acpi_fadt.pstate_cnt, acpi_fadt.smi_cmd)); 364 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
365 365
366 /* FADT v1 doesn't support pstate_cnt, many BIOS vendors use 366 status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
367 * it anyway, so we need to support it... */ 367 (u32) acpi_gbl_FADT.pstate_control, 8);
368 if (acpi_fadt_is_v1) {
369 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
370 "Using v1.0 FADT reserved value for pstate_cnt\n"));
371 }
372
373 status = acpi_os_write_port(acpi_fadt.smi_cmd,
374 (u32) acpi_fadt.pstate_cnt, 8);
375 if (ACPI_FAILURE(status)) { 368 if (ACPI_FAILURE(status)) {
376 ACPI_EXCEPTION((AE_INFO, status, 369 ACPI_EXCEPTION((AE_INFO, status,
377 "Failed to write pstate_cnt [0x%x] to " 370 "Failed to write pstate_control [0x%x] to "
378 "smi_cmd [0x%x]", acpi_fadt.pstate_cnt, 371 "smi_command [0x%x]", acpi_gbl_FADT.pstate_control,
379 acpi_fadt.smi_cmd)); 372 acpi_gbl_FADT.smi_command));
380 module_put(calling_module); 373 module_put(calling_module);
381 return status; 374 return status;
382 } 375 }
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 0ec7dcde0063..89dff3639abe 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -125,7 +125,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
125 /* Used to clear all duty_value bits */ 125 /* Used to clear all duty_value bits */
126 duty_mask = pr->throttling.state_count - 1; 126 duty_mask = pr->throttling.state_count - 1;
127 127
128 duty_mask <<= acpi_fadt.duty_offset; 128 duty_mask <<= acpi_gbl_FADT.duty_offset;
129 duty_mask = ~duty_mask; 129 duty_mask = ~duty_mask;
130 } 130 }
131 131
@@ -208,7 +208,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
208 return 0; 208 return 0;
209 } 209 }
210 210
211 pr->throttling.state_count = 1 << acpi_fadt.duty_width; 211 pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
212 212
213 /* 213 /*
214 * Compute state values. Note that throttling displays a linear power/ 214 * Compute state values. Note that throttling displays a linear power/
diff --git a/drivers/acpi/resources/rsaddr.c b/drivers/acpi/resources/rsaddr.c
index 8fa3213ce000..271e61509eeb 100644
--- a/drivers/acpi/resources/rsaddr.c
+++ b/drivers/acpi/resources/rsaddr.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/resources/rscalc.c
index cf87b0230026..8c6d3fdec38a 100644
--- a/drivers/acpi/resources/rscalc.c
+++ b/drivers/acpi/resources/rscalc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rscreate.c b/drivers/acpi/resources/rscreate.c
index 008058acdd39..1358c06a969c 100644
--- a/drivers/acpi/resources/rscreate.c
+++ b/drivers/acpi/resources/rscreate.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsdump.c b/drivers/acpi/resources/rsdump.c
index 9c99a723a860..de20a5d6decf 100644
--- a/drivers/acpi/resources/rsdump.c
+++ b/drivers/acpi/resources/rsdump.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsinfo.c b/drivers/acpi/resources/rsinfo.c
index 9e7ae2f8a1d3..7e3c335ab320 100644
--- a/drivers/acpi/resources/rsinfo.c
+++ b/drivers/acpi/resources/rsinfo.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsio.c b/drivers/acpi/resources/rsio.c
index ea567167c4f2..b297bc3e4419 100644
--- a/drivers/acpi/resources/rsio.c
+++ b/drivers/acpi/resources/rsio.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsirq.c b/drivers/acpi/resources/rsirq.c
index 1fa63bc2e36f..5657f7b95039 100644
--- a/drivers/acpi/resources/rsirq.c
+++ b/drivers/acpi/resources/rsirq.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rslist.c b/drivers/acpi/resources/rslist.c
index 29423ce030ca..a92755c8877d 100644
--- a/drivers/acpi/resources/rslist.c
+++ b/drivers/acpi/resources/rslist.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsmemory.c b/drivers/acpi/resources/rsmemory.c
index a5131936d690..521eab7dd8df 100644
--- a/drivers/acpi/resources/rsmemory.c
+++ b/drivers/acpi/resources/rsmemory.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsmisc.c b/drivers/acpi/resources/rsmisc.c
index faf6e106b785..3b63b561b94e 100644
--- a/drivers/acpi/resources/rsmisc.c
+++ b/drivers/acpi/resources/rsmisc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsutils.c b/drivers/acpi/resources/rsutils.c
index a9cbee8e8b44..2442a8f8df57 100644
--- a/drivers/acpi/resources/rsutils.c
+++ b/drivers/acpi/resources/rsutils.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsxface.c b/drivers/acpi/resources/rsxface.c
index 1999e2ab7daa..991f8901498c 100644
--- a/drivers/acpi/resources/rsxface.c
+++ b/drivers/acpi/resources/rsxface.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 283d87522c5d..64f26db10c8e 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -21,101 +21,305 @@ extern struct acpi_device *acpi_root;
21#define ACPI_BUS_DEVICE_NAME "System Bus" 21#define ACPI_BUS_DEVICE_NAME "System Bus"
22 22
23static LIST_HEAD(acpi_device_list); 23static LIST_HEAD(acpi_device_list);
24static LIST_HEAD(acpi_bus_id_list);
24DEFINE_SPINLOCK(acpi_device_lock); 25DEFINE_SPINLOCK(acpi_device_lock);
25LIST_HEAD(acpi_wakeup_device_list); 26LIST_HEAD(acpi_wakeup_device_list);
26 27
28struct acpi_device_bus_id{
29 char bus_id[15];
30 unsigned int instance_no;
31 struct list_head node;
32};
33static int acpi_eject_operation(acpi_handle handle, int lockable)
34{
35 struct acpi_object_list arg_list;
36 union acpi_object arg;
37 acpi_status status = AE_OK;
38
39 /*
40 * TBD: evaluate _PS3?
41 */
42
43 if (lockable) {
44 arg_list.count = 1;
45 arg_list.pointer = &arg;
46 arg.type = ACPI_TYPE_INTEGER;
47 arg.integer.value = 0;
48 acpi_evaluate_object(handle, "_LCK", &arg_list, NULL);
49 }
50
51 arg_list.count = 1;
52 arg_list.pointer = &arg;
53 arg.type = ACPI_TYPE_INTEGER;
54 arg.integer.value = 1;
55
56 /*
57 * TBD: _EJD support.
58 */
59
60 status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL);
61 if (ACPI_FAILURE(status)) {
62 return (-ENODEV);
63 }
64
65 return (0);
66}
27 67
28static void acpi_device_release(struct kobject *kobj) 68static ssize_t
69acpi_eject_store(struct device *d, struct device_attribute *attr,
70 const char *buf, size_t count)
29{ 71{
30 struct acpi_device *dev = container_of(kobj, struct acpi_device, kobj); 72 int result;
31 kfree(dev->pnp.cid_list); 73 int ret = count;
32 kfree(dev); 74 int islockable;
75 acpi_status status;
76 acpi_handle handle;
77 acpi_object_type type = 0;
78 struct acpi_device *acpi_device = to_acpi_device(d);
79
80 if ((!count) || (buf[0] != '1')) {
81 return -EINVAL;
82 }
83#ifndef FORCE_EJECT
84 if (acpi_device->driver == NULL) {
85 ret = -ENODEV;
86 goto err;
87 }
88#endif
89 status = acpi_get_type(acpi_device->handle, &type);
90 if (ACPI_FAILURE(status) || (!acpi_device->flags.ejectable)) {
91 ret = -ENODEV;
92 goto err;
93 }
94
95 islockable = acpi_device->flags.lockable;
96 handle = acpi_device->handle;
97
98 result = acpi_bus_trim(acpi_device, 1);
99
100 if (!result)
101 result = acpi_eject_operation(handle, islockable);
102
103 if (result) {
104 ret = -EBUSY;
105 }
106 err:
107 return ret;
33} 108}
34 109
35struct acpi_device_attribute { 110static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
36 struct attribute attr; 111
37 ssize_t(*show) (struct acpi_device *, char *); 112static ssize_t
38 ssize_t(*store) (struct acpi_device *, const char *, size_t); 113acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) {
39}; 114 struct acpi_device *acpi_dev = to_acpi_device(dev);
115
116 return sprintf(buf, "%s\n", acpi_dev->pnp.hardware_id);
117}
118static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
119
120static ssize_t
121acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) {
122 struct acpi_device *acpi_dev = to_acpi_device(dev);
123 struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL};
124 int result;
125
126 result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path);
127 if(result)
128 goto end;
129
130 result = sprintf(buf, "%s\n", (char*)path.pointer);
131 kfree(path.pointer);
132 end:
133 return result;
134}
135static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL);
136
137static int acpi_device_setup_files(struct acpi_device *dev)
138{
139 acpi_status status;
140 acpi_handle temp;
141 int result = 0;
142
143 /*
144 * Devices gotten from FADT don't have a "path" attribute
145 */
146 if(dev->handle) {
147 result = device_create_file(&dev->dev, &dev_attr_path);
148 if(result)
149 goto end;
150 }
40 151
41typedef void acpi_device_sysfs_files(struct kobject *, 152 if(dev->flags.hardware_id) {
42 const struct attribute *); 153 result = device_create_file(&dev->dev, &dev_attr_hid);
154 if(result)
155 goto end;
156 }
43 157
44static void setup_sys_fs_device_files(struct acpi_device *dev, 158 /*
45 acpi_device_sysfs_files * func); 159 * If device has _EJ0, 'eject' file is created that is used to trigger
160 * hot-removal function from userland.
161 */
162 status = acpi_get_handle(dev->handle, "_EJ0", &temp);
163 if (ACPI_SUCCESS(status))
164 result = device_create_file(&dev->dev, &dev_attr_eject);
165 end:
166 return result;
167}
46 168
47#define create_sysfs_device_files(dev) \ 169static void acpi_device_remove_files(struct acpi_device *dev)
48 setup_sys_fs_device_files(dev, (acpi_device_sysfs_files *)&sysfs_create_file) 170{
49#define remove_sysfs_device_files(dev) \ 171 acpi_status status;
50 setup_sys_fs_device_files(dev, (acpi_device_sysfs_files *)&sysfs_remove_file) 172 acpi_handle temp;
51 173
52#define to_acpi_device(n) container_of(n, struct acpi_device, kobj) 174 /*
53#define to_handle_attr(n) container_of(n, struct acpi_device_attribute, attr); 175 * If device has _EJ0, 'eject' file is created that is used to trigger
176 * hot-removal function from userland.
177 */
178 status = acpi_get_handle(dev->handle, "_EJ0", &temp);
179 if (ACPI_SUCCESS(status))
180 device_remove_file(&dev->dev, &dev_attr_eject);
54 181
55static ssize_t acpi_device_attr_show(struct kobject *kobj, 182 if(dev->flags.hardware_id)
56 struct attribute *attr, char *buf) 183 device_remove_file(&dev->dev, &dev_attr_hid);
184 if(dev->handle)
185 device_remove_file(&dev->dev, &dev_attr_path);
186}
187/* --------------------------------------------------------------------------
188 ACPI Bus operations
189 -------------------------------------------------------------------------- */
190static void acpi_device_release(struct device *dev)
57{ 191{
58 struct acpi_device *device = to_acpi_device(kobj); 192 struct acpi_device *acpi_dev = to_acpi_device(dev);
59 struct acpi_device_attribute *attribute = to_handle_attr(attr); 193
60 return attribute->show ? attribute->show(device, buf) : -EIO; 194 kfree(acpi_dev->pnp.cid_list);
195 kfree(acpi_dev);
61} 196}
62static ssize_t acpi_device_attr_store(struct kobject *kobj, 197
63 struct attribute *attr, const char *buf, 198static int acpi_device_suspend(struct device *dev, pm_message_t state)
64 size_t len)
65{ 199{
66 struct acpi_device *device = to_acpi_device(kobj); 200 struct acpi_device *acpi_dev = to_acpi_device(dev);
67 struct acpi_device_attribute *attribute = to_handle_attr(attr); 201 struct acpi_driver *acpi_drv = acpi_dev->driver;
68 return attribute->store ? attribute->store(device, buf, len) : -EIO; 202
203 if (acpi_drv && acpi_drv->ops.suspend)
204 return acpi_drv->ops.suspend(acpi_dev, state);
205 return 0;
69} 206}
70 207
71static struct sysfs_ops acpi_device_sysfs_ops = { 208static int acpi_device_resume(struct device *dev)
72 .show = acpi_device_attr_show, 209{
73 .store = acpi_device_attr_store, 210 struct acpi_device *acpi_dev = to_acpi_device(dev);
74}; 211 struct acpi_driver *acpi_drv = acpi_dev->driver;
75 212
76static struct kobj_type ktype_acpi_ns = { 213 if (acpi_drv && acpi_drv->ops.resume)
77 .sysfs_ops = &acpi_device_sysfs_ops, 214 return acpi_drv->ops.resume(acpi_dev);
78 .release = acpi_device_release, 215 return 0;
79}; 216}
80 217
81static int namespace_uevent(struct kset *kset, struct kobject *kobj, 218static int acpi_bus_match(struct device *dev, struct device_driver *drv)
82 char **envp, int num_envp, char *buffer,
83 int buffer_size)
84{ 219{
85 struct acpi_device *dev = to_acpi_device(kobj); 220 struct acpi_device *acpi_dev = to_acpi_device(dev);
86 int i = 0; 221 struct acpi_driver *acpi_drv = to_acpi_driver(drv);
87 int len = 0;
88 222
89 if (!dev->driver) 223 return !acpi_match_ids(acpi_dev, acpi_drv->ids);
90 return 0; 224}
91 225
92 if (add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, 226static int acpi_device_uevent(struct device *dev, char **envp, int num_envp,
93 "PHYSDEVDRIVER=%s", dev->driver->name)) 227 char *buffer, int buffer_size)
228{
229 struct acpi_device *acpi_dev = to_acpi_device(dev);
230 int i = 0, length = 0, ret = 0;
231
232 if (acpi_dev->flags.hardware_id)
233 ret = add_uevent_var(envp, num_envp, &i,
234 buffer, buffer_size, &length,
235 "HWID=%s", acpi_dev->pnp.hardware_id);
236 if (ret)
94 return -ENOMEM; 237 return -ENOMEM;
238 if (acpi_dev->flags.compatible_ids) {
239 int j;
240 struct acpi_compatible_id_list *cid_list;
241
242 cid_list = acpi_dev->pnp.cid_list;
243
244 for (j = 0; j < cid_list->count; j++) {
245 ret = add_uevent_var(envp, num_envp, &i, buffer,
246 buffer_size, &length, "COMPTID=%s",
247 cid_list->id[j].value);
248 if (ret)
249 return -ENOMEM;
250 }
251 }
95 252
96 envp[i] = NULL; 253 envp[i] = NULL;
254 return 0;
255}
256
257static int acpi_bus_driver_init(struct acpi_device *, struct acpi_driver *);
258static int acpi_start_single_object(struct acpi_device *);
259static int acpi_device_probe(struct device * dev)
260{
261 struct acpi_device *acpi_dev = to_acpi_device(dev);
262 struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
263 int ret;
264
265 ret = acpi_bus_driver_init(acpi_dev, acpi_drv);
266 if (!ret) {
267 if (acpi_dev->bus_ops.acpi_op_start)
268 acpi_start_single_object(acpi_dev);
269 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
270 "Found driver [%s] for device [%s]\n",
271 acpi_drv->name, acpi_dev->pnp.bus_id));
272 get_device(dev);
273 }
274 return ret;
275}
97 276
277static int acpi_device_remove(struct device * dev)
278{
279 struct acpi_device *acpi_dev = to_acpi_device(dev);
280 struct acpi_driver *acpi_drv = acpi_dev->driver;
281
282 if (acpi_drv) {
283 if (acpi_drv->ops.stop)
284 acpi_drv->ops.stop(acpi_dev, acpi_dev->removal_type);
285 if (acpi_drv->ops.remove)
286 acpi_drv->ops.remove(acpi_dev, acpi_dev->removal_type);
287 }
288 acpi_dev->driver = NULL;
289 acpi_driver_data(dev) = NULL;
290
291 put_device(dev);
98 return 0; 292 return 0;
99} 293}
100 294
101static struct kset_uevent_ops namespace_uevent_ops = { 295static void acpi_device_shutdown(struct device *dev)
102 .uevent = &namespace_uevent, 296{
103}; 297 struct acpi_device *acpi_dev = to_acpi_device(dev);
298 struct acpi_driver *acpi_drv = acpi_dev->driver;
299
300 if (acpi_drv && acpi_drv->ops.shutdown)
301 acpi_drv->ops.shutdown(acpi_dev);
104 302
105static struct kset acpi_namespace_kset = { 303 return ;
106 .kobj = { 304}
107 .name = "namespace", 305
108 }, 306static struct bus_type acpi_bus_type = {
109 .subsys = &acpi_subsys, 307 .name = "acpi",
110 .ktype = &ktype_acpi_ns, 308 .suspend = acpi_device_suspend,
111 .uevent_ops = &namespace_uevent_ops, 309 .resume = acpi_device_resume,
310 .shutdown = acpi_device_shutdown,
311 .match = acpi_bus_match,
312 .probe = acpi_device_probe,
313 .remove = acpi_device_remove,
314 .uevent = acpi_device_uevent,
112}; 315};
113 316
114static void acpi_device_register(struct acpi_device *device, 317static int acpi_device_register(struct acpi_device *device,
115 struct acpi_device *parent) 318 struct acpi_device *parent)
116{ 319{
117 int err; 320 int result;
118 321 struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id;
322 int found = 0;
119 /* 323 /*
120 * Linkage 324 * Linkage
121 * ------- 325 * -------
@@ -126,7 +330,33 @@ static void acpi_device_register(struct acpi_device *device,
126 INIT_LIST_HEAD(&device->g_list); 330 INIT_LIST_HEAD(&device->g_list);
127 INIT_LIST_HEAD(&device->wakeup_list); 331 INIT_LIST_HEAD(&device->wakeup_list);
128 332
333 new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
334 if (!new_bus_id) {
335 printk(KERN_ERR PREFIX "Memory allocation error\n");
336 return -ENOMEM;
337 }
338
129 spin_lock(&acpi_device_lock); 339 spin_lock(&acpi_device_lock);
340 /*
341 * Find suitable bus_id and instance number in acpi_bus_id_list
342 * If failed, create one and link it into acpi_bus_id_list
343 */
344 list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
345 if(!strcmp(acpi_device_bus_id->bus_id, device->flags.hardware_id? device->pnp.hardware_id : "device")) {
346 acpi_device_bus_id->instance_no ++;
347 found = 1;
348 kfree(new_bus_id);
349 break;
350 }
351 }
352 if(!found) {
353 acpi_device_bus_id = new_bus_id;
354 strcpy(acpi_device_bus_id->bus_id, device->flags.hardware_id ? device->pnp.hardware_id : "device");
355 acpi_device_bus_id->instance_no = 0;
356 list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
357 }
358 sprintf(device->dev.bus_id, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no);
359
130 if (device->parent) { 360 if (device->parent) {
131 list_add_tail(&device->node, &device->parent->children); 361 list_add_tail(&device->node, &device->parent->children);
132 list_add_tail(&device->g_list, &device->parent->g_list); 362 list_add_tail(&device->g_list, &device->parent->g_list);
@@ -136,16 +366,33 @@ static void acpi_device_register(struct acpi_device *device,
136 list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list); 366 list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list);
137 spin_unlock(&acpi_device_lock); 367 spin_unlock(&acpi_device_lock);
138 368
139 strlcpy(device->kobj.name, device->pnp.bus_id, KOBJ_NAME_LEN); 369 if (device->parent)
140 if (parent) 370 device->dev.parent = &parent->dev;
141 device->kobj.parent = &parent->kobj; 371 device->dev.bus = &acpi_bus_type;
142 device->kobj.ktype = &ktype_acpi_ns; 372 device_initialize(&device->dev);
143 device->kobj.kset = &acpi_namespace_kset; 373 device->dev.release = &acpi_device_release;
144 err = kobject_register(&device->kobj); 374 result = device_add(&device->dev);
145 if (err < 0) 375 if(result) {
146 printk(KERN_WARNING "%s: kobject_register error: %d\n", 376 printk("Error adding device %s", device->dev.bus_id);
147 __FUNCTION__, err); 377 goto end;
148 create_sysfs_device_files(device); 378 }
379
380 result = acpi_device_setup_files(device);
381 if(result)
382 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error creating sysfs interface for device %s\n", device->dev.bus_id));
383
384 device->removal_type = ACPI_BUS_REMOVAL_NORMAL;
385 return 0;
386 end:
387 spin_lock(&acpi_device_lock);
388 if (device->parent) {
389 list_del(&device->node);
390 list_del(&device->g_list);
391 } else
392 list_del(&device->g_list);
393 list_del(&device->wakeup_list);
394 spin_unlock(&acpi_device_lock);
395 return result;
149} 396}
150 397
151static void acpi_device_unregister(struct acpi_device *device, int type) 398static void acpi_device_unregister(struct acpi_device *device, int type)
@@ -158,81 +405,143 @@ static void acpi_device_unregister(struct acpi_device *device, int type)
158 list_del(&device->g_list); 405 list_del(&device->g_list);
159 406
160 list_del(&device->wakeup_list); 407 list_del(&device->wakeup_list);
161
162 spin_unlock(&acpi_device_lock); 408 spin_unlock(&acpi_device_lock);
163 409
164 acpi_detach_data(device->handle, acpi_bus_data_handler); 410 acpi_detach_data(device->handle, acpi_bus_data_handler);
165 remove_sysfs_device_files(device); 411
166 kobject_unregister(&device->kobj); 412 acpi_device_remove_files(device);
413 device_unregister(&device->dev);
167} 414}
168 415
169void acpi_bus_data_handler(acpi_handle handle, u32 function, void *context) 416/* --------------------------------------------------------------------------
417 Driver Management
418 -------------------------------------------------------------------------- */
419/**
420 * acpi_bus_driver_init - add a device to a driver
421 * @device: the device to add and initialize
422 * @driver: driver for the device
423 *
424 * Used to initialize a device via its device driver. Called whenever a
425 * driver is bound to a device. Invokes the driver's add() ops.
426 */
427static int
428acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver)
170{ 429{
430 int result = 0;
171 431
172 /* TBD */
173 432
174 return; 433 if (!device || !driver)
175} 434 return -EINVAL;
176 435
177static int acpi_bus_get_power_flags(struct acpi_device *device) 436 if (!driver->ops.add)
178{ 437 return -ENOSYS;
179 acpi_status status = 0;
180 acpi_handle handle = NULL;
181 u32 i = 0;
182 438
439 result = driver->ops.add(device);
440 if (result) {
441 device->driver = NULL;
442 acpi_driver_data(device) = NULL;
443 return result;
444 }
183 445
184 /* 446 device->driver = driver;
185 * Power Management Flags
186 */
187 status = acpi_get_handle(device->handle, "_PSC", &handle);
188 if (ACPI_SUCCESS(status))
189 device->power.flags.explicit_get = 1;
190 status = acpi_get_handle(device->handle, "_IRC", &handle);
191 if (ACPI_SUCCESS(status))
192 device->power.flags.inrush_current = 1;
193 447
194 /* 448 /*
195 * Enumerate supported power management states 449 * TBD - Configuration Management: Assign resources to device based
450 * upon possible configuration and currently allocated resources.
196 */ 451 */
197 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3; i++) {
198 struct acpi_device_power_state *ps = &device->power.states[i];
199 char object_name[5] = { '_', 'P', 'R', '0' + i, '\0' };
200 452
201 /* Evaluate "_PRx" to se if power resources are referenced */ 453 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
202 acpi_evaluate_reference(device->handle, object_name, NULL, 454 "Driver successfully bound to device\n"));
203 &ps->resources); 455 return 0;
204 if (ps->resources.count) { 456}
205 device->power.flags.power_resources = 1;
206 ps->flags.valid = 1;
207 }
208 457
209 /* Evaluate "_PSx" to see if we can do explicit sets */ 458static int acpi_start_single_object(struct acpi_device *device)
210 object_name[2] = 'S'; 459{
211 status = acpi_get_handle(device->handle, object_name, &handle); 460 int result = 0;
212 if (ACPI_SUCCESS(status)) { 461 struct acpi_driver *driver;
213 ps->flags.explicit_set = 1;
214 ps->flags.valid = 1;
215 }
216 462
217 /* State is valid if we have some power control */
218 if (ps->resources.count || ps->flags.explicit_set)
219 ps->flags.valid = 1;
220 463
221 ps->power = -1; /* Unknown - driver assigned */ 464 if (!(driver = device->driver))
222 ps->latency = -1; /* Unknown - driver assigned */ 465 return 0;
466
467 if (driver->ops.start) {
468 result = driver->ops.start(device);
469 if (result && driver->ops.remove)
470 driver->ops.remove(device, ACPI_BUS_REMOVAL_NORMAL);
223 } 471 }
224 472
225 /* Set defaults for D0 and D3 states (always valid) */ 473 return result;
226 device->power.states[ACPI_STATE_D0].flags.valid = 1; 474}
227 device->power.states[ACPI_STATE_D0].power = 100;
228 device->power.states[ACPI_STATE_D3].flags.valid = 1;
229 device->power.states[ACPI_STATE_D3].power = 0;
230 475
231 /* TBD: System wake support and resource requirements. */ 476/**
477 * acpi_bus_register_driver - register a driver with the ACPI bus
478 * @driver: driver being registered
479 *
480 * Registers a driver with the ACPI bus. Searches the namespace for all
481 * devices that match the driver's criteria and binds. Returns zero for
482 * success or a negative error status for failure.
483 */
484int acpi_bus_register_driver(struct acpi_driver *driver)
485{
486 int ret;
232 487
233 device->power.state = ACPI_STATE_UNKNOWN; 488 if (acpi_disabled)
489 return -ENODEV;
490 driver->drv.name = driver->name;
491 driver->drv.bus = &acpi_bus_type;
492 driver->drv.owner = driver->owner;
234 493
235 return 0; 494 ret = driver_register(&driver->drv);
495 return ret;
496}
497
498EXPORT_SYMBOL(acpi_bus_register_driver);
499
500/**
501 * acpi_bus_unregister_driver - unregisters a driver with the APIC bus
502 * @driver: driver to unregister
503 *
504 * Unregisters a driver with the ACPI bus. Searches the namespace for all
505 * devices that match the driver's criteria and unbinds.
506 */
507void acpi_bus_unregister_driver(struct acpi_driver *driver)
508{
509 driver_unregister(&driver->drv);
510}
511
512EXPORT_SYMBOL(acpi_bus_unregister_driver);
513
514/* --------------------------------------------------------------------------
515 Device Enumeration
516 -------------------------------------------------------------------------- */
517acpi_status
518acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd)
519{
520 acpi_status status;
521 acpi_handle tmp;
522 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
523 union acpi_object *obj;
524
525 status = acpi_get_handle(handle, "_EJD", &tmp);
526 if (ACPI_FAILURE(status))
527 return status;
528
529 status = acpi_evaluate_object(handle, "_EJD", NULL, &buffer);
530 if (ACPI_SUCCESS(status)) {
531 obj = buffer.pointer;
532 status = acpi_get_handle(NULL, obj->string.pointer, ejd);
533 kfree(buffer.pointer);
534 }
535 return status;
536}
537EXPORT_SYMBOL_GPL(acpi_bus_get_ejd);
538
539void acpi_bus_data_handler(acpi_handle handle, u32 function, void *context)
540{
541
542 /* TBD */
543
544 return;
236} 545}
237 546
238int acpi_match_ids(struct acpi_device *device, char *ids) 547int acpi_match_ids(struct acpi_device *device, char *ids)
@@ -254,6 +563,12 @@ int acpi_match_ids(struct acpi_device *device, char *ids)
254 return -ENOENT; 563 return -ENOENT;
255} 564}
256 565
566static int acpi_bus_get_perf_flags(struct acpi_device *device)
567{
568 device->performance.state = ACPI_STATE_UNKNOWN;
569 return 0;
570}
571
257static acpi_status 572static acpi_status
258acpi_bus_extract_wakeup_device_power_package(struct acpi_device *device, 573acpi_bus_extract_wakeup_device_power_package(struct acpi_device *device,
259 union acpi_object *package) 574 union acpi_object *package)
@@ -338,359 +653,66 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
338 return 0; 653 return 0;
339} 654}
340 655
341/* -------------------------------------------------------------------------- 656static int acpi_bus_get_power_flags(struct acpi_device *device)
342 ACPI sysfs device file support
343 -------------------------------------------------------------------------- */
344static ssize_t acpi_eject_store(struct acpi_device *device,
345 const char *buf, size_t count);
346
347#define ACPI_DEVICE_ATTR(_name,_mode,_show,_store) \
348static struct acpi_device_attribute acpi_device_attr_##_name = \
349 __ATTR(_name, _mode, _show, _store)
350
351ACPI_DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
352
353/**
354 * setup_sys_fs_device_files - sets up the device files under device namespace
355 * @dev: acpi_device object
356 * @func: function pointer to create or destroy the device file
357 */
358static void
359setup_sys_fs_device_files(struct acpi_device *dev,
360 acpi_device_sysfs_files * func)
361{
362 acpi_status status;
363 acpi_handle temp = NULL;
364
365 /*
366 * If device has _EJ0, 'eject' file is created that is used to trigger
367 * hot-removal function from userland.
368 */
369 status = acpi_get_handle(dev->handle, "_EJ0", &temp);
370 if (ACPI_SUCCESS(status))
371 (*(func)) (&dev->kobj, &acpi_device_attr_eject.attr);
372}
373
374static int acpi_eject_operation(acpi_handle handle, int lockable)
375{ 657{
376 struct acpi_object_list arg_list; 658 acpi_status status = 0;
377 union acpi_object arg; 659 acpi_handle handle = NULL;
378 acpi_status status = AE_OK; 660 u32 i = 0;
379
380 /*
381 * TBD: evaluate _PS3?
382 */
383
384 if (lockable) {
385 arg_list.count = 1;
386 arg_list.pointer = &arg;
387 arg.type = ACPI_TYPE_INTEGER;
388 arg.integer.value = 0;
389 acpi_evaluate_object(handle, "_LCK", &arg_list, NULL);
390 }
391 661
392 arg_list.count = 1;
393 arg_list.pointer = &arg;
394 arg.type = ACPI_TYPE_INTEGER;
395 arg.integer.value = 1;
396 662
397 /* 663 /*
398 * TBD: _EJD support. 664 * Power Management Flags
399 */ 665 */
400 666 status = acpi_get_handle(device->handle, "_PSC", &handle);
401 status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL); 667 if (ACPI_SUCCESS(status))
402 if (ACPI_FAILURE(status)) { 668 device->power.flags.explicit_get = 1;
403 return (-ENODEV); 669 status = acpi_get_handle(device->handle, "_IRC", &handle);
404 } 670 if (ACPI_SUCCESS(status))
405 671 device->power.flags.inrush_current = 1;
406 return (0);
407}
408
409static ssize_t
410acpi_eject_store(struct acpi_device *device, const char *buf, size_t count)
411{
412 int result;
413 int ret = count;
414 int islockable;
415 acpi_status status;
416 acpi_handle handle;
417 acpi_object_type type = 0;
418
419 if ((!count) || (buf[0] != '1')) {
420 return -EINVAL;
421 }
422#ifndef FORCE_EJECT
423 if (device->driver == NULL) {
424 ret = -ENODEV;
425 goto err;
426 }
427#endif
428 status = acpi_get_type(device->handle, &type);
429 if (ACPI_FAILURE(status) || (!device->flags.ejectable)) {
430 ret = -ENODEV;
431 goto err;
432 }
433
434 islockable = device->flags.lockable;
435 handle = device->handle;
436
437 result = acpi_bus_trim(device, 1);
438
439 if (!result)
440 result = acpi_eject_operation(handle, islockable);
441
442 if (result) {
443 ret = -EBUSY;
444 }
445 err:
446 return ret;
447}
448
449/* --------------------------------------------------------------------------
450 Performance Management
451 -------------------------------------------------------------------------- */
452
453static int acpi_bus_get_perf_flags(struct acpi_device *device)
454{
455 device->performance.state = ACPI_STATE_UNKNOWN;
456 return 0;
457}
458
459/* --------------------------------------------------------------------------
460 Driver Management
461 -------------------------------------------------------------------------- */
462
463static LIST_HEAD(acpi_bus_drivers);
464
465/**
466 * acpi_bus_match - match device IDs to driver's supported IDs
467 * @device: the device that we are trying to match to a driver
468 * @driver: driver whose device id table is being checked
469 *
470 * Checks the device's hardware (_HID) or compatible (_CID) ids to see if it
471 * matches the specified driver's criteria.
472 */
473static int
474acpi_bus_match(struct acpi_device *device, struct acpi_driver *driver)
475{
476 if (driver && driver->ops.match)
477 return driver->ops.match(device, driver);
478 return acpi_match_ids(device, driver->ids);
479}
480
481/**
482 * acpi_bus_driver_init - add a device to a driver
483 * @device: the device to add and initialize
484 * @driver: driver for the device
485 *
486 * Used to initialize a device via its device driver. Called whenever a
487 * driver is bound to a device. Invokes the driver's add() and start() ops.
488 */
489static int
490acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver)
491{
492 int result = 0;
493
494
495 if (!device || !driver)
496 return -EINVAL;
497
498 if (!driver->ops.add)
499 return -ENOSYS;
500
501 result = driver->ops.add(device);
502 if (result) {
503 device->driver = NULL;
504 acpi_driver_data(device) = NULL;
505 return result;
506 }
507
508 device->driver = driver;
509 672
510 /* 673 /*
511 * TBD - Configuration Management: Assign resources to device based 674 * Enumerate supported power management states
512 * upon possible configuration and currently allocated resources.
513 */ 675 */
676 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3; i++) {
677 struct acpi_device_power_state *ps = &device->power.states[i];
678 char object_name[5] = { '_', 'P', 'R', '0' + i, '\0' };
514 679
515 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 680 /* Evaluate "_PRx" to se if power resources are referenced */
516 "Driver successfully bound to device\n")); 681 acpi_evaluate_reference(device->handle, object_name, NULL,
517 return 0; 682 &ps->resources);
518} 683 if (ps->resources.count) {
519 684 device->power.flags.power_resources = 1;
520static int acpi_start_single_object(struct acpi_device *device) 685 ps->flags.valid = 1;
521{
522 int result = 0;
523 struct acpi_driver *driver;
524
525
526 if (!(driver = device->driver))
527 return 0;
528
529 if (driver->ops.start) {
530 result = driver->ops.start(device);
531 if (result && driver->ops.remove)
532 driver->ops.remove(device, ACPI_BUS_REMOVAL_NORMAL);
533 }
534
535 return result;
536}
537
538static void acpi_driver_attach(struct acpi_driver *drv)
539{
540 struct list_head *node, *next;
541
542
543 spin_lock(&acpi_device_lock);
544 list_for_each_safe(node, next, &acpi_device_list) {
545 struct acpi_device *dev =
546 container_of(node, struct acpi_device, g_list);
547
548 if (dev->driver || !dev->status.present)
549 continue;
550 spin_unlock(&acpi_device_lock);
551
552 if (!acpi_bus_match(dev, drv)) {
553 if (!acpi_bus_driver_init(dev, drv)) {
554 acpi_start_single_object(dev);
555 atomic_inc(&drv->references);
556 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
557 "Found driver [%s] for device [%s]\n",
558 drv->name, dev->pnp.bus_id));
559 }
560 } 686 }
561 spin_lock(&acpi_device_lock);
562 }
563 spin_unlock(&acpi_device_lock);
564}
565
566static void acpi_driver_detach(struct acpi_driver *drv)
567{
568 struct list_head *node, *next;
569 687
570 688 /* Evaluate "_PSx" to see if we can do explicit sets */
571 spin_lock(&acpi_device_lock); 689 object_name[2] = 'S';
572 list_for_each_safe(node, next, &acpi_device_list) { 690 status = acpi_get_handle(device->handle, object_name, &handle);
573 struct acpi_device *dev = 691 if (ACPI_SUCCESS(status)) {
574 container_of(node, struct acpi_device, g_list); 692 ps->flags.explicit_set = 1;
575 693 ps->flags.valid = 1;
576 if (dev->driver == drv) {
577 spin_unlock(&acpi_device_lock);
578 if (drv->ops.remove)
579 drv->ops.remove(dev, ACPI_BUS_REMOVAL_NORMAL);
580 spin_lock(&acpi_device_lock);
581 dev->driver = NULL;
582 dev->driver_data = NULL;
583 atomic_dec(&drv->references);
584 } 694 }
585 }
586 spin_unlock(&acpi_device_lock);
587}
588
589/**
590 * acpi_bus_register_driver - register a driver with the ACPI bus
591 * @driver: driver being registered
592 *
593 * Registers a driver with the ACPI bus. Searches the namespace for all
594 * devices that match the driver's criteria and binds. Returns zero for
595 * success or a negative error status for failure.
596 */
597int acpi_bus_register_driver(struct acpi_driver *driver)
598{
599
600 if (acpi_disabled)
601 return -ENODEV;
602
603 spin_lock(&acpi_device_lock);
604 list_add_tail(&driver->node, &acpi_bus_drivers);
605 spin_unlock(&acpi_device_lock);
606 acpi_driver_attach(driver);
607
608 return 0;
609}
610
611EXPORT_SYMBOL(acpi_bus_register_driver);
612
613/**
614 * acpi_bus_unregister_driver - unregisters a driver with the APIC bus
615 * @driver: driver to unregister
616 *
617 * Unregisters a driver with the ACPI bus. Searches the namespace for all
618 * devices that match the driver's criteria and unbinds.
619 */
620void acpi_bus_unregister_driver(struct acpi_driver *driver)
621{
622 acpi_driver_detach(driver);
623
624 if (!atomic_read(&driver->references)) {
625 spin_lock(&acpi_device_lock);
626 list_del_init(&driver->node);
627 spin_unlock(&acpi_device_lock);
628 }
629 return;
630}
631
632EXPORT_SYMBOL(acpi_bus_unregister_driver);
633
634/**
635 * acpi_bus_find_driver - check if there is a driver installed for the device
636 * @device: device that we are trying to find a supporting driver for
637 *
638 * Parses the list of registered drivers looking for a driver applicable for
639 * the specified device.
640 */
641static int acpi_bus_find_driver(struct acpi_device *device)
642{
643 int result = 0;
644 struct list_head *node, *next;
645 695
696 /* State is valid if we have some power control */
697 if (ps->resources.count || ps->flags.explicit_set)
698 ps->flags.valid = 1;
646 699
647 spin_lock(&acpi_device_lock); 700 ps->power = -1; /* Unknown - driver assigned */
648 list_for_each_safe(node, next, &acpi_bus_drivers) { 701 ps->latency = -1; /* Unknown - driver assigned */
649 struct acpi_driver *driver =
650 container_of(node, struct acpi_driver, node);
651
652 atomic_inc(&driver->references);
653 spin_unlock(&acpi_device_lock);
654 if (!acpi_bus_match(device, driver)) {
655 result = acpi_bus_driver_init(device, driver);
656 if (!result)
657 goto Done;
658 }
659 atomic_dec(&driver->references);
660 spin_lock(&acpi_device_lock);
661 } 702 }
662 spin_unlock(&acpi_device_lock);
663
664 Done:
665 return result;
666}
667 703
668/* -------------------------------------------------------------------------- 704 /* Set defaults for D0 and D3 states (always valid) */
669 Device Enumeration 705 device->power.states[ACPI_STATE_D0].flags.valid = 1;
670 -------------------------------------------------------------------------- */ 706 device->power.states[ACPI_STATE_D0].power = 100;
707 device->power.states[ACPI_STATE_D3].flags.valid = 1;
708 device->power.states[ACPI_STATE_D3].power = 0;
671 709
672acpi_status 710 /* TBD: System wake support and resource requirements. */
673acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd)
674{
675 acpi_status status;
676 acpi_handle tmp;
677 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
678 union acpi_object *obj;
679 711
680 status = acpi_get_handle(handle, "_EJD", &tmp); 712 device->power.state = ACPI_STATE_UNKNOWN;
681 if (ACPI_FAILURE(status))
682 return status;
683 713
684 status = acpi_evaluate_object(handle, "_EJD", NULL, &buffer); 714 return 0;
685 if (ACPI_SUCCESS(status)) {
686 obj = buffer.pointer;
687 status = acpi_get_handle(NULL, obj->string.pointer, ejd);
688 kfree(buffer.pointer);
689 }
690 return status;
691} 715}
692EXPORT_SYMBOL_GPL(acpi_bus_get_ejd);
693
694 716
695static int acpi_bus_get_flags(struct acpi_device *device) 717static int acpi_bus_get_flags(struct acpi_device *device)
696{ 718{
@@ -782,6 +804,75 @@ static void acpi_device_get_busid(struct acpi_device *device,
782 } 804 }
783} 805}
784 806
807static int
808acpi_video_bus_match(struct acpi_device *device)
809{
810 acpi_handle h_dummy1;
811 acpi_handle h_dummy2;
812 acpi_handle h_dummy3;
813
814
815 if (!device)
816 return -EINVAL;
817
818 /* Since there is no HID, CID for ACPI Video drivers, we have
819 * to check well known required nodes for each feature we support.
820 */
821
822 /* Does this device able to support video switching ? */
823 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy1)) &&
824 ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy2)))
825 return 0;
826
827 /* Does this device able to retrieve a video ROM ? */
828 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy1)))
829 return 0;
830
831 /* Does this device able to configure which video head to be POSTed ? */
832 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_VPO", &h_dummy1)) &&
833 ACPI_SUCCESS(acpi_get_handle(device->handle, "_GPD", &h_dummy2)) &&
834 ACPI_SUCCESS(acpi_get_handle(device->handle, "_SPD", &h_dummy3)))
835 return 0;
836
837 return -ENODEV;
838}
839
840/*
841 * acpi_bay_match - see if a device is an ejectable driver bay
842 *
843 * If an acpi object is ejectable and has one of the ACPI ATA methods defined,
844 * then we can safely call it an ejectable drive bay
845 */
846static int acpi_bay_match(struct acpi_device *device){
847 acpi_status status;
848 acpi_handle handle;
849 acpi_handle tmp;
850 acpi_handle phandle;
851
852 handle = device->handle;
853
854 status = acpi_get_handle(handle, "_EJ0", &tmp);
855 if (ACPI_FAILURE(status))
856 return -ENODEV;
857
858 if ((ACPI_SUCCESS(acpi_get_handle(handle, "_GTF", &tmp))) ||
859 (ACPI_SUCCESS(acpi_get_handle(handle, "_GTM", &tmp))) ||
860 (ACPI_SUCCESS(acpi_get_handle(handle, "_STM", &tmp))) ||
861 (ACPI_SUCCESS(acpi_get_handle(handle, "_SDD", &tmp))))
862 return 0;
863
864 if (acpi_get_parent(handle, &phandle))
865 return -ENODEV;
866
867 if ((ACPI_SUCCESS(acpi_get_handle(phandle, "_GTF", &tmp))) ||
868 (ACPI_SUCCESS(acpi_get_handle(phandle, "_GTM", &tmp))) ||
869 (ACPI_SUCCESS(acpi_get_handle(phandle, "_STM", &tmp))) ||
870 (ACPI_SUCCESS(acpi_get_handle(phandle, "_SDD", &tmp))))
871 return 0;
872
873 return -ENODEV;
874}
875
785static void acpi_device_set_id(struct acpi_device *device, 876static void acpi_device_set_id(struct acpi_device *device,
786 struct acpi_device *parent, acpi_handle handle, 877 struct acpi_device *parent, acpi_handle handle,
787 int type) 878 int type)
@@ -812,6 +903,16 @@ static void acpi_device_set_id(struct acpi_device *device,
812 device->pnp.bus_address = info->address; 903 device->pnp.bus_address = info->address;
813 device->flags.bus_address = 1; 904 device->flags.bus_address = 1;
814 } 905 }
906
907 if(!(info->valid & (ACPI_VALID_HID | ACPI_VALID_CID))){
908 status = acpi_video_bus_match(device);
909 if(ACPI_SUCCESS(status))
910 hid = ACPI_VIDEO_HID;
911
912 status = acpi_bay_match(device);
913 if (ACPI_SUCCESS(status))
914 hid = ACPI_BAY_HID;
915 }
815 break; 916 break;
816 case ACPI_BUS_TYPE_POWER: 917 case ACPI_BUS_TYPE_POWER:
817 hid = ACPI_POWER_HID; 918 hid = ACPI_POWER_HID;
@@ -888,86 +989,24 @@ static int acpi_device_set_context(struct acpi_device *device, int type)
888 return result; 989 return result;
889} 990}
890 991
891static void acpi_device_get_debug_info(struct acpi_device *device,
892 acpi_handle handle, int type)
893{
894#ifdef CONFIG_ACPI_DEBUG_OUTPUT
895 char *type_string = NULL;
896 char name[80] = { '?', '\0' };
897 struct acpi_buffer buffer = { sizeof(name), name };
898
899 switch (type) {
900 case ACPI_BUS_TYPE_DEVICE:
901 type_string = "Device";
902 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
903 break;
904 case ACPI_BUS_TYPE_POWER:
905 type_string = "Power Resource";
906 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
907 break;
908 case ACPI_BUS_TYPE_PROCESSOR:
909 type_string = "Processor";
910 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
911 break;
912 case ACPI_BUS_TYPE_SYSTEM:
913 type_string = "System";
914 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
915 break;
916 case ACPI_BUS_TYPE_THERMAL:
917 type_string = "Thermal Zone";
918 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
919 break;
920 case ACPI_BUS_TYPE_POWER_BUTTON:
921 type_string = "Power Button";
922 sprintf(name, "PWRB");
923 break;
924 case ACPI_BUS_TYPE_SLEEP_BUTTON:
925 type_string = "Sleep Button";
926 sprintf(name, "SLPB");
927 break;
928 }
929
930 printk(KERN_DEBUG "Found %s %s [%p]\n", type_string, name, handle);
931#endif /*CONFIG_ACPI_DEBUG_OUTPUT */
932}
933
934static int acpi_bus_remove(struct acpi_device *dev, int rmdevice) 992static int acpi_bus_remove(struct acpi_device *dev, int rmdevice)
935{ 993{
936 int result = 0;
937 struct acpi_driver *driver;
938
939
940 if (!dev) 994 if (!dev)
941 return -EINVAL; 995 return -EINVAL;
942 996
943 driver = dev->driver; 997 dev->removal_type = ACPI_BUS_REMOVAL_EJECT;
944 998 device_release_driver(&dev->dev);
945 if ((driver) && (driver->ops.remove)) {
946
947 if (driver->ops.stop) {
948 result = driver->ops.stop(dev, ACPI_BUS_REMOVAL_EJECT);
949 if (result)
950 return result;
951 }
952
953 result = dev->driver->ops.remove(dev, ACPI_BUS_REMOVAL_EJECT);
954 if (result) {
955 return result;
956 }
957
958 atomic_dec(&dev->driver->references);
959 dev->driver = NULL;
960 acpi_driver_data(dev) = NULL;
961 }
962 999
963 if (!rmdevice) 1000 if (!rmdevice)
964 return 0; 1001 return 0;
965 1002
1003 /*
1004 * unbind _ADR-Based Devices when hot removal
1005 */
966 if (dev->flags.bus_address) { 1006 if (dev->flags.bus_address) {
967 if ((dev->parent) && (dev->parent->ops.unbind)) 1007 if ((dev->parent) && (dev->parent->ops.unbind))
968 dev->parent->ops.unbind(dev); 1008 dev->parent->ops.unbind(dev);
969 } 1009 }
970
971 acpi_device_unregister(dev, ACPI_BUS_REMOVAL_EJECT); 1010 acpi_device_unregister(dev, ACPI_BUS_REMOVAL_EJECT);
972 1011
973 return 0; 1012 return 0;
@@ -975,7 +1014,8 @@ static int acpi_bus_remove(struct acpi_device *dev, int rmdevice)
975 1014
976static int 1015static int
977acpi_add_single_object(struct acpi_device **child, 1016acpi_add_single_object(struct acpi_device **child,
978 struct acpi_device *parent, acpi_handle handle, int type) 1017 struct acpi_device *parent, acpi_handle handle, int type,
1018 struct acpi_bus_ops *ops)
979{ 1019{
980 int result = 0; 1020 int result = 0;
981 struct acpi_device *device = NULL; 1021 struct acpi_device *device = NULL;
@@ -992,6 +1032,8 @@ acpi_add_single_object(struct acpi_device **child,
992 1032
993 device->handle = handle; 1033 device->handle = handle;
994 device->parent = parent; 1034 device->parent = parent;
1035 device->bus_ops = *ops; /* workround for not call .start */
1036
995 1037
996 acpi_device_get_busid(device, handle, type); 1038 acpi_device_get_busid(device, handle, type);
997 1039
@@ -1076,33 +1118,16 @@ acpi_add_single_object(struct acpi_device **child,
1076 if ((result = acpi_device_set_context(device, type))) 1118 if ((result = acpi_device_set_context(device, type)))
1077 goto end; 1119 goto end;
1078 1120
1079 acpi_device_get_debug_info(device, handle, type); 1121 result = acpi_device_register(device, parent);
1080
1081 acpi_device_register(device, parent);
1082 1122
1083 /* 1123 /*
1084 * Bind _ADR-Based Devices 1124 * Bind _ADR-Based Devices when hot add
1085 * -----------------------
1086 * If there's a a bus address (_ADR) then we utilize the parent's
1087 * 'bind' function (if exists) to bind the ACPI- and natively-
1088 * enumerated device representations.
1089 */ 1125 */
1090 if (device->flags.bus_address) { 1126 if (device->flags.bus_address) {
1091 if (device->parent && device->parent->ops.bind) 1127 if (device->parent && device->parent->ops.bind)
1092 device->parent->ops.bind(device); 1128 device->parent->ops.bind(device);
1093 } 1129 }
1094 1130
1095 /*
1096 * Locate & Attach Driver
1097 * ----------------------
1098 * If there's a hardware id (_HID) or compatible ids (_CID) we check
1099 * to see if there's a driver installed for this kind of device. Note
1100 * that drivers can install before or after a device is enumerated.
1101 *
1102 * TBD: Assumes LDM provides driver hot-plug capability.
1103 */
1104 acpi_bus_find_driver(device);
1105
1106 end: 1131 end:
1107 if (!result) 1132 if (!result)
1108 *child = device; 1133 *child = device;
@@ -1188,14 +1213,14 @@ static int acpi_bus_scan(struct acpi_device *start, struct acpi_bus_ops *ops)
1188 1213
1189 if (ops->acpi_op_add) 1214 if (ops->acpi_op_add)
1190 status = acpi_add_single_object(&child, parent, 1215 status = acpi_add_single_object(&child, parent,
1191 chandle, type); 1216 chandle, type, ops);
1192 else 1217 else
1193 status = acpi_bus_get_device(chandle, &child); 1218 status = acpi_bus_get_device(chandle, &child);
1194 1219
1195 if (ACPI_FAILURE(status)) 1220 if (ACPI_FAILURE(status))
1196 continue; 1221 continue;
1197 1222
1198 if (ops->acpi_op_start) { 1223 if (ops->acpi_op_start && !(ops->acpi_op_add)) {
1199 status = acpi_start_single_object(child); 1224 status = acpi_start_single_object(child);
1200 if (ACPI_FAILURE(status)) 1225 if (ACPI_FAILURE(status))
1201 continue; 1226 continue;
@@ -1233,13 +1258,13 @@ acpi_bus_add(struct acpi_device **child,
1233 int result; 1258 int result;
1234 struct acpi_bus_ops ops; 1259 struct acpi_bus_ops ops;
1235 1260
1261 memset(&ops, 0, sizeof(ops));
1262 ops.acpi_op_add = 1;
1236 1263
1237 result = acpi_add_single_object(child, parent, handle, type); 1264 result = acpi_add_single_object(child, parent, handle, type, &ops);
1238 if (!result) { 1265 if (!result)
1239 memset(&ops, 0, sizeof(ops));
1240 ops.acpi_op_add = 1;
1241 result = acpi_bus_scan(*child, &ops); 1266 result = acpi_bus_scan(*child, &ops);
1242 } 1267
1243 return result; 1268 return result;
1244} 1269}
1245 1270
@@ -1325,127 +1350,35 @@ static int acpi_bus_scan_fixed(struct acpi_device *root)
1325{ 1350{
1326 int result = 0; 1351 int result = 0;
1327 struct acpi_device *device = NULL; 1352 struct acpi_device *device = NULL;
1328 1353 struct acpi_bus_ops ops;
1329 1354
1330 if (!root) 1355 if (!root)
1331 return -ENODEV; 1356 return -ENODEV;
1332 1357
1358 memset(&ops, 0, sizeof(ops));
1359 ops.acpi_op_add = 1;
1360 ops.acpi_op_start = 1;
1361
1333 /* 1362 /*
1334 * Enumerate all fixed-feature devices. 1363 * Enumerate all fixed-feature devices.
1335 */ 1364 */
1336 if (acpi_fadt.pwr_button == 0) { 1365 if ((acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON) == 0) {
1337 result = acpi_add_single_object(&device, acpi_root, 1366 result = acpi_add_single_object(&device, acpi_root,
1338 NULL, 1367 NULL,
1339 ACPI_BUS_TYPE_POWER_BUTTON); 1368 ACPI_BUS_TYPE_POWER_BUTTON,
1340 if (!result) 1369 &ops);
1341 result = acpi_start_single_object(device);
1342 } 1370 }
1343 1371
1344 if (acpi_fadt.sleep_button == 0) { 1372 if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
1345 result = acpi_add_single_object(&device, acpi_root, 1373 result = acpi_add_single_object(&device, acpi_root,
1346 NULL, 1374 NULL,
1347 ACPI_BUS_TYPE_SLEEP_BUTTON); 1375 ACPI_BUS_TYPE_SLEEP_BUTTON,
1348 if (!result) 1376 &ops);
1349 result = acpi_start_single_object(device);
1350 } 1377 }
1351 1378
1352 return result; 1379 return result;
1353} 1380}
1354 1381
1355
1356static inline struct acpi_device * to_acpi_dev(struct device * dev)
1357{
1358 return container_of(dev, struct acpi_device, dev);
1359}
1360
1361
1362static int root_suspend(struct acpi_device * acpi_dev, pm_message_t state)
1363{
1364 struct acpi_device * dev, * next;
1365 int result;
1366
1367 spin_lock(&acpi_device_lock);
1368 list_for_each_entry_safe_reverse(dev, next, &acpi_device_list, g_list) {
1369 if (dev->driver && dev->driver->ops.suspend) {
1370 spin_unlock(&acpi_device_lock);
1371 result = dev->driver->ops.suspend(dev, 0);
1372 if (result) {
1373 printk(KERN_ERR PREFIX "[%s - %s] Suspend failed: %d\n",
1374 acpi_device_name(dev),
1375 acpi_device_bid(dev), result);
1376 }
1377 spin_lock(&acpi_device_lock);
1378 }
1379 }
1380 spin_unlock(&acpi_device_lock);
1381 return 0;
1382}
1383
1384
1385static int acpi_device_suspend(struct device * dev, pm_message_t state)
1386{
1387 struct acpi_device * acpi_dev = to_acpi_dev(dev);
1388
1389 /*
1390 * For now, we should only register 1 generic device -
1391 * the ACPI root device - and from there, we walk the
1392 * tree of ACPI devices to suspend each one using the
1393 * ACPI driver methods.
1394 */
1395 if (acpi_dev->handle == ACPI_ROOT_OBJECT)
1396 root_suspend(acpi_dev, state);
1397 return 0;
1398}
1399
1400
1401
1402static int root_resume(struct acpi_device * acpi_dev)
1403{
1404 struct acpi_device * dev, * next;
1405 int result;
1406
1407 spin_lock(&acpi_device_lock);
1408 list_for_each_entry_safe(dev, next, &acpi_device_list, g_list) {
1409 if (dev->driver && dev->driver->ops.resume) {
1410 spin_unlock(&acpi_device_lock);
1411 result = dev->driver->ops.resume(dev, 0);
1412 if (result) {
1413 printk(KERN_ERR PREFIX "[%s - %s] resume failed: %d\n",
1414 acpi_device_name(dev),
1415 acpi_device_bid(dev), result);
1416 }
1417 spin_lock(&acpi_device_lock);
1418 }
1419 }
1420 spin_unlock(&acpi_device_lock);
1421 return 0;
1422}
1423
1424
1425static int acpi_device_resume(struct device * dev)
1426{
1427 struct acpi_device * acpi_dev = to_acpi_dev(dev);
1428
1429 /*
1430 * For now, we should only register 1 generic device -
1431 * the ACPI root device - and from there, we walk the
1432 * tree of ACPI devices to resume each one using the
1433 * ACPI driver methods.
1434 */
1435 if (acpi_dev->handle == ACPI_ROOT_OBJECT)
1436 root_resume(acpi_dev);
1437 return 0;
1438}
1439
1440
1441static struct bus_type acpi_bus_type = {
1442 .name = "acpi",
1443 .suspend = acpi_device_suspend,
1444 .resume = acpi_device_resume,
1445};
1446
1447
1448
1449static int __init acpi_scan_init(void) 1382static int __init acpi_scan_init(void)
1450{ 1383{
1451 int result; 1384 int result;
@@ -1455,9 +1388,9 @@ static int __init acpi_scan_init(void)
1455 if (acpi_disabled) 1388 if (acpi_disabled)
1456 return 0; 1389 return 0;
1457 1390
1458 result = kset_register(&acpi_namespace_kset); 1391 memset(&ops, 0, sizeof(ops));
1459 if (result < 0) 1392 ops.acpi_op_add = 1;
1460 printk(KERN_ERR PREFIX "kset_register error: %d\n", result); 1393 ops.acpi_op_start = 1;
1461 1394
1462 result = bus_register(&acpi_bus_type); 1395 result = bus_register(&acpi_bus_type);
1463 if (result) { 1396 if (result) {
@@ -1469,32 +1402,16 @@ static int __init acpi_scan_init(void)
1469 * Create the root device in the bus's device tree 1402 * Create the root device in the bus's device tree
1470 */ 1403 */
1471 result = acpi_add_single_object(&acpi_root, NULL, ACPI_ROOT_OBJECT, 1404 result = acpi_add_single_object(&acpi_root, NULL, ACPI_ROOT_OBJECT,
1472 ACPI_BUS_TYPE_SYSTEM); 1405 ACPI_BUS_TYPE_SYSTEM, &ops);
1473 if (result) 1406 if (result)
1474 goto Done; 1407 goto Done;
1475 1408
1476 result = acpi_start_single_object(acpi_root);
1477 if (result)
1478 goto Done;
1479
1480 acpi_root->dev.bus = &acpi_bus_type;
1481 snprintf(acpi_root->dev.bus_id, BUS_ID_SIZE, "%s", acpi_bus_type.name);
1482 result = device_register(&acpi_root->dev);
1483 if (result) {
1484 /* We don't want to quit even if we failed to add suspend/resume */
1485 printk(KERN_ERR PREFIX "Could not register device\n");
1486 }
1487
1488 /* 1409 /*
1489 * Enumerate devices in the ACPI namespace. 1410 * Enumerate devices in the ACPI namespace.
1490 */ 1411 */
1491 result = acpi_bus_scan_fixed(acpi_root); 1412 result = acpi_bus_scan_fixed(acpi_root);
1492 if (!result) { 1413 if (!result)
1493 memset(&ops, 0, sizeof(ops));
1494 ops.acpi_op_add = 1;
1495 ops.acpi_op_start = 1;
1496 result = acpi_bus_scan(acpi_root, &ops); 1414 result = acpi_bus_scan(acpi_root, &ops);
1497 }
1498 1415
1499 if (result) 1416 if (result)
1500 acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL); 1417 acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL);
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c
index 34962578039d..ccc11b33d89c 100644
--- a/drivers/acpi/sleep/proc.c
+++ b/drivers/acpi/sleep/proc.c
@@ -73,7 +73,7 @@ acpi_system_write_sleep(struct file *file,
73static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset) 73static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
74{ 74{
75 u32 sec, min, hr; 75 u32 sec, min, hr;
76 u32 day, mo, yr; 76 u32 day, mo, yr, cent = 0;
77 unsigned char rtc_control = 0; 77 unsigned char rtc_control = 0;
78 unsigned long flags; 78 unsigned long flags;
79 79
@@ -87,20 +87,19 @@ static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
87 rtc_control = CMOS_READ(RTC_CONTROL); 87 rtc_control = CMOS_READ(RTC_CONTROL);
88 88
89 /* If we ever get an FACP with proper values... */ 89 /* If we ever get an FACP with proper values... */
90 if (acpi_gbl_FADT->day_alrm) 90 if (acpi_gbl_FADT.day_alarm)
91 /* ACPI spec: only low 6 its should be cared */ 91 /* ACPI spec: only low 6 its should be cared */
92 day = CMOS_READ(acpi_gbl_FADT->day_alrm) & 0x3F; 92 day = CMOS_READ(acpi_gbl_FADT.day_alarm) & 0x3F;
93 else 93 else
94 day = CMOS_READ(RTC_DAY_OF_MONTH); 94 day = CMOS_READ(RTC_DAY_OF_MONTH);
95 if (acpi_gbl_FADT->mon_alrm) 95 if (acpi_gbl_FADT.month_alarm)
96 mo = CMOS_READ(acpi_gbl_FADT->mon_alrm); 96 mo = CMOS_READ(acpi_gbl_FADT.month_alarm);
97 else 97 else
98 mo = CMOS_READ(RTC_MONTH); 98 mo = CMOS_READ(RTC_MONTH);
99 if (acpi_gbl_FADT->century) 99 if (acpi_gbl_FADT.century)
100 yr = CMOS_READ(acpi_gbl_FADT->century) * 100 + 100 cent = CMOS_READ(acpi_gbl_FADT.century);
101 CMOS_READ(RTC_YEAR); 101
102 else 102 yr = CMOS_READ(RTC_YEAR);
103 yr = CMOS_READ(RTC_YEAR);
104 103
105 spin_unlock_irqrestore(&rtc_lock, flags); 104 spin_unlock_irqrestore(&rtc_lock, flags);
106 105
@@ -111,10 +110,11 @@ static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
111 BCD_TO_BIN(day); 110 BCD_TO_BIN(day);
112 BCD_TO_BIN(mo); 111 BCD_TO_BIN(mo);
113 BCD_TO_BIN(yr); 112 BCD_TO_BIN(yr);
113 BCD_TO_BIN(cent);
114 } 114 }
115 115
116 /* we're trusting the FADT (see above) */ 116 /* we're trusting the FADT (see above) */
117 if (!acpi_gbl_FADT->century) 117 if (!acpi_gbl_FADT.century)
118 /* If we're not trusting the FADT, we should at least make it 118 /* If we're not trusting the FADT, we should at least make it
119 * right for _this_ century... ehm, what is _this_ century? 119 * right for _this_ century... ehm, what is _this_ century?
120 * 120 *
@@ -134,6 +134,8 @@ static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
134 * 134 *
135 */ 135 */
136 yr += 2000; 136 yr += 2000;
137 else
138 yr += cent * 100;
137 139
138 seq_printf(seq, "%4.4u-", yr); 140 seq_printf(seq, "%4.4u-", yr);
139 (mo > 12) ? seq_puts(seq, "**-") : seq_printf(seq, "%2.2u-", mo); 141 (mo > 12) ? seq_puts(seq, "**-") : seq_printf(seq, "%2.2u-", mo);
@@ -317,12 +319,12 @@ acpi_system_write_alarm(struct file *file,
317 * offsets into the CMOS RAM here -- which for some reason are pointing 319 * offsets into the CMOS RAM here -- which for some reason are pointing
318 * to the RTC area of memory. 320 * to the RTC area of memory.
319 */ 321 */
320 if (acpi_gbl_FADT->day_alrm) 322 if (acpi_gbl_FADT.day_alarm)
321 CMOS_WRITE(day, acpi_gbl_FADT->day_alrm); 323 CMOS_WRITE(day, acpi_gbl_FADT.day_alarm);
322 if (acpi_gbl_FADT->mon_alrm) 324 if (acpi_gbl_FADT.month_alarm)
323 CMOS_WRITE(mo, acpi_gbl_FADT->mon_alrm); 325 CMOS_WRITE(mo, acpi_gbl_FADT.month_alarm);
324 if (acpi_gbl_FADT->century) 326 if (acpi_gbl_FADT.century)
325 CMOS_WRITE(yr / 100, acpi_gbl_FADT->century); 327 CMOS_WRITE(yr / 100, acpi_gbl_FADT.century);
326 /* enable the rtc alarm interrupt */ 328 /* enable the rtc alarm interrupt */
327 rtc_control |= RTC_AIE; 329 rtc_control |= RTC_AIE;
328 CMOS_WRITE(rtc_control, RTC_CONTROL); 330 CMOS_WRITE(rtc_control, RTC_CONTROL);
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index d86dcb3c2366..7147b0bdab0a 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -32,6 +32,11 @@
32 32
33#define _COMPONENT ACPI_SYSTEM_COMPONENT 33#define _COMPONENT ACPI_SYSTEM_COMPONENT
34ACPI_MODULE_NAME("acpi_system") 34ACPI_MODULE_NAME("acpi_system")
35#ifdef MODULE_PARAM_PREFIX
36#undef MODULE_PARAM_PREFIX
37#endif
38#define MODULE_PARAM_PREFIX "acpi."
39
35#define ACPI_SYSTEM_CLASS "system" 40#define ACPI_SYSTEM_CLASS "system"
36#define ACPI_SYSTEM_DRIVER_NAME "ACPI System Driver" 41#define ACPI_SYSTEM_DRIVER_NAME "ACPI System Driver"
37#define ACPI_SYSTEM_DEVICE_NAME "System" 42#define ACPI_SYSTEM_DEVICE_NAME "System"
@@ -39,11 +44,24 @@ ACPI_MODULE_NAME("acpi_system")
39#define ACPI_SYSTEM_FILE_EVENT "event" 44#define ACPI_SYSTEM_FILE_EVENT "event"
40#define ACPI_SYSTEM_FILE_DSDT "dsdt" 45#define ACPI_SYSTEM_FILE_DSDT "dsdt"
41#define ACPI_SYSTEM_FILE_FADT "fadt" 46#define ACPI_SYSTEM_FILE_FADT "fadt"
42extern struct fadt_descriptor acpi_fadt; 47
48/*
49 * Make ACPICA version work as module param
50 */
51static int param_get_acpica_version(char *buffer, struct kernel_param *kp) {
52 int result;
53
54 result = sprintf(buffer, "%x", ACPI_CA_VERSION);
55
56 return result;
57}
58
59module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
43 60
44/* -------------------------------------------------------------------------- 61/* --------------------------------------------------------------------------
45 FS Interface (/proc) 62 FS Interface (/proc)
46 -------------------------------------------------------------------------- */ 63 -------------------------------------------------------------------------- */
64#ifdef CONFIG_ACPI_PROCFS
47 65
48static int acpi_system_read_info(struct seq_file *seq, void *offset) 66static int acpi_system_read_info(struct seq_file *seq, void *offset)
49{ 67{
@@ -63,6 +81,7 @@ static const struct file_operations acpi_system_info_ops = {
63 .llseek = seq_lseek, 81 .llseek = seq_lseek,
64 .release = single_release, 82 .release = single_release,
65}; 83};
84#endif
66 85
67static ssize_t acpi_system_read_dsdt(struct file *, char __user *, size_t, 86static ssize_t acpi_system_read_dsdt(struct file *, char __user *, size_t,
68 loff_t *); 87 loff_t *);
@@ -76,17 +95,16 @@ acpi_system_read_dsdt(struct file *file,
76 char __user * buffer, size_t count, loff_t * ppos) 95 char __user * buffer, size_t count, loff_t * ppos)
77{ 96{
78 acpi_status status = AE_OK; 97 acpi_status status = AE_OK;
79 struct acpi_buffer dsdt = { ACPI_ALLOCATE_BUFFER, NULL }; 98 struct acpi_table_header *dsdt = NULL;
80 ssize_t res; 99 ssize_t res;
81 100
82 101
83 status = acpi_get_table(ACPI_TABLE_ID_DSDT, 1, &dsdt); 102 status = acpi_get_table(ACPI_SIG_DSDT, 1, &dsdt);
84 if (ACPI_FAILURE(status)) 103 if (ACPI_FAILURE(status))
85 return -ENODEV; 104 return -ENODEV;
86 105
87 res = simple_read_from_buffer(buffer, count, ppos, 106 res = simple_read_from_buffer(buffer, count, ppos,
88 dsdt.pointer, dsdt.length); 107 dsdt, dsdt->length);
89 kfree(dsdt.pointer);
90 108
91 return res; 109 return res;
92} 110}
@@ -103,17 +121,16 @@ acpi_system_read_fadt(struct file *file,
103 char __user * buffer, size_t count, loff_t * ppos) 121 char __user * buffer, size_t count, loff_t * ppos)
104{ 122{
105 acpi_status status = AE_OK; 123 acpi_status status = AE_OK;
106 struct acpi_buffer fadt = { ACPI_ALLOCATE_BUFFER, NULL }; 124 struct acpi_table_header *fadt = NULL;
107 ssize_t res; 125 ssize_t res;
108 126
109 127
110 status = acpi_get_table(ACPI_TABLE_ID_FADT, 1, &fadt); 128 status = acpi_get_table(ACPI_SIG_FADT, 1, &fadt);
111 if (ACPI_FAILURE(status)) 129 if (ACPI_FAILURE(status))
112 return -ENODEV; 130 return -ENODEV;
113 131
114 res = simple_read_from_buffer(buffer, count, ppos, 132 res = simple_read_from_buffer(buffer, count, ppos,
115 fadt.pointer, fadt.length); 133 fadt, fadt->length);
116 kfree(fadt.pointer);
117 134
118 return res; 135 return res;
119} 136}
@@ -128,6 +145,7 @@ static int __init acpi_system_init(void)
128 if (acpi_disabled) 145 if (acpi_disabled)
129 return 0; 146 return 0;
130 147
148#ifdef CONFIG_ACPI_PROCFS
131 /* 'info' [R] */ 149 /* 'info' [R] */
132 name = ACPI_SYSTEM_FILE_INFO; 150 name = ACPI_SYSTEM_FILE_INFO;
133 entry = create_proc_entry(name, S_IRUGO, acpi_root_dir); 151 entry = create_proc_entry(name, S_IRUGO, acpi_root_dir);
@@ -136,6 +154,7 @@ static int __init acpi_system_init(void)
136 else { 154 else {
137 entry->proc_fops = &acpi_system_info_ops; 155 entry->proc_fops = &acpi_system_info_ops;
138 } 156 }
157#endif
139 158
140 /* 'dsdt' [R] */ 159 /* 'dsdt' [R] */
141 name = ACPI_SYSTEM_FILE_DSDT; 160 name = ACPI_SYSTEM_FILE_DSDT;
@@ -159,7 +178,9 @@ static int __init acpi_system_init(void)
159 Error: 178 Error:
160 remove_proc_entry(ACPI_SYSTEM_FILE_FADT, acpi_root_dir); 179 remove_proc_entry(ACPI_SYSTEM_FILE_FADT, acpi_root_dir);
161 remove_proc_entry(ACPI_SYSTEM_FILE_DSDT, acpi_root_dir); 180 remove_proc_entry(ACPI_SYSTEM_FILE_DSDT, acpi_root_dir);
181#ifdef CONFIG_ACPI_PROCFS
162 remove_proc_entry(ACPI_SYSTEM_FILE_INFO, acpi_root_dir); 182 remove_proc_entry(ACPI_SYSTEM_FILE_INFO, acpi_root_dir);
183#endif
163 184
164 error = -EFAULT; 185 error = -EFAULT;
165 goto Done; 186 goto Done;
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index ffa30c9fccbf..ba4cb200314a 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -38,154 +38,97 @@
38 38
39#define ACPI_MAX_TABLES 128 39#define ACPI_MAX_TABLES 128
40 40
41static char *acpi_table_signatures[ACPI_TABLE_COUNT] = {
42 [ACPI_TABLE_UNKNOWN] = "????",
43 [ACPI_APIC] = "APIC",
44 [ACPI_BOOT] = "BOOT",
45 [ACPI_DBGP] = "DBGP",
46 [ACPI_DSDT] = "DSDT",
47 [ACPI_ECDT] = "ECDT",
48 [ACPI_ETDT] = "ETDT",
49 [ACPI_FADT] = "FACP",
50 [ACPI_FACS] = "FACS",
51 [ACPI_OEMX] = "OEM",
52 [ACPI_PSDT] = "PSDT",
53 [ACPI_SBST] = "SBST",
54 [ACPI_SLIT] = "SLIT",
55 [ACPI_SPCR] = "SPCR",
56 [ACPI_SRAT] = "SRAT",
57 [ACPI_SSDT] = "SSDT",
58 [ACPI_SPMI] = "SPMI",
59 [ACPI_HPET] = "HPET",
60 [ACPI_MCFG] = "MCFG",
61};
62
63static char *mps_inti_flags_polarity[] = { "dfl", "high", "res", "low" }; 41static char *mps_inti_flags_polarity[] = { "dfl", "high", "res", "low" };
64static char *mps_inti_flags_trigger[] = { "dfl", "edge", "res", "level" }; 42static char *mps_inti_flags_trigger[] = { "dfl", "edge", "res", "level" };
65 43
66/* System Description Table (RSDT/XSDT) */ 44static struct acpi_table_desc initial_tables[ACPI_MAX_TABLES] __initdata;
67struct acpi_table_sdt {
68 unsigned long pa;
69 enum acpi_table_id id;
70 unsigned long size;
71} __attribute__ ((packed));
72
73static unsigned long sdt_pa; /* Physical Address */
74static unsigned long sdt_count; /* Table count */
75 45
76static struct acpi_table_sdt sdt_entry[ACPI_MAX_TABLES] __initdata; 46void acpi_table_print_madt_entry(struct acpi_subtable_header * header)
77
78void acpi_table_print(struct acpi_table_header *header, unsigned long phys_addr)
79{
80 char *name = NULL;
81
82 if (!header)
83 return;
84
85 /* Some table signatures aren't good table names */
86
87 if (!strncmp((char *)&header->signature,
88 acpi_table_signatures[ACPI_APIC],
89 sizeof(header->signature))) {
90 name = "MADT";
91 } else if (!strncmp((char *)&header->signature,
92 acpi_table_signatures[ACPI_FADT],
93 sizeof(header->signature))) {
94 name = "FADT";
95 } else
96 name = header->signature;
97
98 printk(KERN_DEBUG PREFIX
99 "%.4s (v%3.3d %6.6s %8.8s 0x%08x %.4s 0x%08x) @ 0x%p\n", name,
100 header->revision, header->oem_id, header->oem_table_id,
101 header->oem_revision, header->asl_compiler_id,
102 header->asl_compiler_revision, (void *)phys_addr);
103}
104
105void acpi_table_print_madt_entry(acpi_table_entry_header * header)
106{ 47{
107 if (!header) 48 if (!header)
108 return; 49 return;
109 50
110 switch (header->type) { 51 switch (header->type) {
111 52
112 case ACPI_MADT_LAPIC: 53 case ACPI_MADT_TYPE_LOCAL_APIC:
113 { 54 {
114 struct acpi_table_lapic *p = 55 struct acpi_madt_local_apic *p =
115 (struct acpi_table_lapic *)header; 56 (struct acpi_madt_local_apic *)header;
116 printk(KERN_INFO PREFIX 57 printk(KERN_INFO PREFIX
117 "LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n", 58 "LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
118 p->acpi_id, p->id, 59 p->processor_id, p->id,
119 p->flags.enabled ? "enabled" : "disabled"); 60 (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
120 } 61 }
121 break; 62 break;
122 63
123 case ACPI_MADT_IOAPIC: 64 case ACPI_MADT_TYPE_IO_APIC:
124 { 65 {
125 struct acpi_table_ioapic *p = 66 struct acpi_madt_io_apic *p =
126 (struct acpi_table_ioapic *)header; 67 (struct acpi_madt_io_apic *)header;
127 printk(KERN_INFO PREFIX 68 printk(KERN_INFO PREFIX
128 "IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n", 69 "IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n",
129 p->id, p->address, p->global_irq_base); 70 p->id, p->address, p->global_irq_base);
130 } 71 }
131 break; 72 break;
132 73
133 case ACPI_MADT_INT_SRC_OVR: 74 case ACPI_MADT_TYPE_INTERRUPT_OVERRIDE:
134 { 75 {
135 struct acpi_table_int_src_ovr *p = 76 struct acpi_madt_interrupt_override *p =
136 (struct acpi_table_int_src_ovr *)header; 77 (struct acpi_madt_interrupt_override *)header;
137 printk(KERN_INFO PREFIX 78 printk(KERN_INFO PREFIX
138 "INT_SRC_OVR (bus %d bus_irq %d global_irq %d %s %s)\n", 79 "INT_SRC_OVR (bus %d bus_irq %d global_irq %d %s %s)\n",
139 p->bus, p->bus_irq, p->global_irq, 80 p->bus, p->source_irq, p->global_irq,
140 mps_inti_flags_polarity[p->flags.polarity], 81 mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
141 mps_inti_flags_trigger[p->flags.trigger]); 82 mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2]);
142 if (p->flags.reserved) 83 if (p->inti_flags &
84 ~(ACPI_MADT_POLARITY_MASK | ACPI_MADT_TRIGGER_MASK))
143 printk(KERN_INFO PREFIX 85 printk(KERN_INFO PREFIX
144 "INT_SRC_OVR unexpected reserved flags: 0x%x\n", 86 "INT_SRC_OVR unexpected reserved flags: 0x%x\n",
145 p->flags.reserved); 87 p->inti_flags &
88 ~(ACPI_MADT_POLARITY_MASK | ACPI_MADT_TRIGGER_MASK));
146 89
147 } 90 }
148 break; 91 break;
149 92
150 case ACPI_MADT_NMI_SRC: 93 case ACPI_MADT_TYPE_NMI_SOURCE:
151 { 94 {
152 struct acpi_table_nmi_src *p = 95 struct acpi_madt_nmi_source *p =
153 (struct acpi_table_nmi_src *)header; 96 (struct acpi_madt_nmi_source *)header;
154 printk(KERN_INFO PREFIX 97 printk(KERN_INFO PREFIX
155 "NMI_SRC (%s %s global_irq %d)\n", 98 "NMI_SRC (%s %s global_irq %d)\n",
156 mps_inti_flags_polarity[p->flags.polarity], 99 mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
157 mps_inti_flags_trigger[p->flags.trigger], 100 mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
158 p->global_irq); 101 p->global_irq);
159 } 102 }
160 break; 103 break;
161 104
162 case ACPI_MADT_LAPIC_NMI: 105 case ACPI_MADT_TYPE_LOCAL_APIC_NMI:
163 { 106 {
164 struct acpi_table_lapic_nmi *p = 107 struct acpi_madt_local_apic_nmi *p =
165 (struct acpi_table_lapic_nmi *)header; 108 (struct acpi_madt_local_apic_nmi *)header;
166 printk(KERN_INFO PREFIX 109 printk(KERN_INFO PREFIX
167 "LAPIC_NMI (acpi_id[0x%02x] %s %s lint[0x%x])\n", 110 "LAPIC_NMI (acpi_id[0x%02x] %s %s lint[0x%x])\n",
168 p->acpi_id, 111 p->processor_id,
169 mps_inti_flags_polarity[p->flags.polarity], 112 mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK ],
170 mps_inti_flags_trigger[p->flags.trigger], 113 mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
171 p->lint); 114 p->lint);
172 } 115 }
173 break; 116 break;
174 117
175 case ACPI_MADT_LAPIC_ADDR_OVR: 118 case ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE:
176 { 119 {
177 struct acpi_table_lapic_addr_ovr *p = 120 struct acpi_madt_local_apic_override *p =
178 (struct acpi_table_lapic_addr_ovr *)header; 121 (struct acpi_madt_local_apic_override *)header;
179 printk(KERN_INFO PREFIX 122 printk(KERN_INFO PREFIX
180 "LAPIC_ADDR_OVR (address[%p])\n", 123 "LAPIC_ADDR_OVR (address[%p])\n",
181 (void *)(unsigned long)p->address); 124 (void *)(unsigned long)p->address);
182 } 125 }
183 break; 126 break;
184 127
185 case ACPI_MADT_IOSAPIC: 128 case ACPI_MADT_TYPE_IO_SAPIC:
186 { 129 {
187 struct acpi_table_iosapic *p = 130 struct acpi_madt_io_sapic *p =
188 (struct acpi_table_iosapic *)header; 131 (struct acpi_madt_io_sapic *)header;
189 printk(KERN_INFO PREFIX 132 printk(KERN_INFO PREFIX
190 "IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n", 133 "IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n",
191 p->id, (void *)(unsigned long)p->address, 134 p->id, (void *)(unsigned long)p->address,
@@ -193,26 +136,26 @@ void acpi_table_print_madt_entry(acpi_table_entry_header * header)
193 } 136 }
194 break; 137 break;
195 138
196 case ACPI_MADT_LSAPIC: 139 case ACPI_MADT_TYPE_LOCAL_SAPIC:
197 { 140 {
198 struct acpi_table_lsapic *p = 141 struct acpi_madt_local_sapic *p =
199 (struct acpi_table_lsapic *)header; 142 (struct acpi_madt_local_sapic *)header;
200 printk(KERN_INFO PREFIX 143 printk(KERN_INFO PREFIX
201 "LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n", 144 "LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
202 p->acpi_id, p->id, p->eid, 145 p->processor_id, p->id, p->eid,
203 p->flags.enabled ? "enabled" : "disabled"); 146 (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
204 } 147 }
205 break; 148 break;
206 149
207 case ACPI_MADT_PLAT_INT_SRC: 150 case ACPI_MADT_TYPE_INTERRUPT_SOURCE:
208 { 151 {
209 struct acpi_table_plat_int_src *p = 152 struct acpi_madt_interrupt_source *p =
210 (struct acpi_table_plat_int_src *)header; 153 (struct acpi_madt_interrupt_source *)header;
211 printk(KERN_INFO PREFIX 154 printk(KERN_INFO PREFIX
212 "PLAT_INT_SRC (%s %s type[0x%x] id[0x%04x] eid[0x%x] iosapic_vector[0x%x] global_irq[0x%x]\n", 155 "PLAT_INT_SRC (%s %s type[0x%x] id[0x%04x] eid[0x%x] iosapic_vector[0x%x] global_irq[0x%x]\n",
213 mps_inti_flags_polarity[p->flags.polarity], 156 mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
214 mps_inti_flags_trigger[p->flags.trigger], 157 mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
215 p->type, p->id, p->eid, p->iosapic_vector, 158 p->type, p->id, p->eid, p->io_sapic_vector,
216 p->global_irq); 159 p->global_irq);
217 } 160 }
218 break; 161 break;
@@ -225,342 +168,76 @@ void acpi_table_print_madt_entry(acpi_table_entry_header * header)
225 } 168 }
226} 169}
227 170
228static int
229acpi_table_compute_checksum(void *table_pointer, unsigned long length)
230{
231 u8 *p = table_pointer;
232 unsigned long remains = length;
233 unsigned long sum = 0;
234
235 if (!p || !length)
236 return -EINVAL;
237
238 while (remains--)
239 sum += *p++;
240
241 return (sum & 0xFF);
242}
243 171
244/*
245 * acpi_get_table_header_early()
246 * for acpi_blacklisted(), acpi_table_get_sdt()
247 */
248int __init 172int __init
249acpi_get_table_header_early(enum acpi_table_id id, 173acpi_table_parse_madt_family(char *id,
250 struct acpi_table_header **header)
251{
252 unsigned int i;
253 enum acpi_table_id temp_id;
254
255 /* DSDT is different from the rest */
256 if (id == ACPI_DSDT)
257 temp_id = ACPI_FADT;
258 else
259 temp_id = id;
260
261 /* Locate the table. */
262
263 for (i = 0; i < sdt_count; i++) {
264 if (sdt_entry[i].id != temp_id)
265 continue;
266 *header = (void *)
267 __acpi_map_table(sdt_entry[i].pa, sdt_entry[i].size);
268 if (!*header) {
269 printk(KERN_WARNING PREFIX "Unable to map %s\n",
270 acpi_table_signatures[temp_id]);
271 return -ENODEV;
272 }
273 break;
274 }
275
276 if (!*header) {
277 printk(KERN_WARNING PREFIX "%s not present\n",
278 acpi_table_signatures[id]);
279 return -ENODEV;
280 }
281
282 /* Map the DSDT header via the pointer in the FADT */
283 if (id == ACPI_DSDT) {
284 struct fadt_descriptor *fadt =
285 (struct fadt_descriptor *)*header;
286
287 if (fadt->revision == 3 && fadt->Xdsdt) {
288 *header = (void *)__acpi_map_table(fadt->Xdsdt,
289 sizeof(struct
290 acpi_table_header));
291 } else if (fadt->V1_dsdt) {
292 *header = (void *)__acpi_map_table(fadt->V1_dsdt,
293 sizeof(struct
294 acpi_table_header));
295 } else
296 *header = NULL;
297
298 if (!*header) {
299 printk(KERN_WARNING PREFIX "Unable to map DSDT\n");
300 return -ENODEV;
301 }
302 }
303
304 return 0;
305}
306
307int __init
308acpi_table_parse_madt_family(enum acpi_table_id id,
309 unsigned long madt_size, 174 unsigned long madt_size,
310 int entry_id, 175 int entry_id,
311 acpi_madt_entry_handler handler, 176 acpi_madt_entry_handler handler,
312 unsigned int max_entries) 177 unsigned int max_entries)
313{ 178{
314 void *madt = NULL; 179 struct acpi_table_header *madt = NULL;
315 acpi_table_entry_header *entry; 180 struct acpi_subtable_header *entry;
316 unsigned int count = 0; 181 unsigned int count = 0;
317 unsigned long madt_end; 182 unsigned long madt_end;
318 unsigned int i;
319 183
320 if (!handler) 184 if (!handler)
321 return -EINVAL; 185 return -EINVAL;
322 186
323 /* Locate the MADT (if exists). There should only be one. */ 187 /* Locate the MADT (if exists). There should only be one. */
324 188 acpi_get_table(id, 0, &madt);
325 for (i = 0; i < sdt_count; i++) {
326 if (sdt_entry[i].id != id)
327 continue;
328 madt = (void *)
329 __acpi_map_table(sdt_entry[i].pa, sdt_entry[i].size);
330 if (!madt) {
331 printk(KERN_WARNING PREFIX "Unable to map %s\n",
332 acpi_table_signatures[id]);
333 return -ENODEV;
334 }
335 break;
336 }
337 189
338 if (!madt) { 190 if (!madt) {
339 printk(KERN_WARNING PREFIX "%s not present\n", 191 printk(KERN_WARNING PREFIX "%4.4s not present\n", id);
340 acpi_table_signatures[id]);
341 return -ENODEV; 192 return -ENODEV;
342 } 193 }
343 194
344 madt_end = (unsigned long)madt + sdt_entry[i].size; 195 madt_end = (unsigned long)madt + madt->length;
345 196
346 /* Parse all entries looking for a match. */ 197 /* Parse all entries looking for a match. */
347 198
348 entry = (acpi_table_entry_header *) 199 entry = (struct acpi_subtable_header *)
349 ((unsigned long)madt + madt_size); 200 ((unsigned long)madt + madt_size);
350 201
351 while (((unsigned long)entry) + sizeof(acpi_table_entry_header) < 202 while (((unsigned long)entry) + sizeof(struct acpi_subtable_header) <
352 madt_end) { 203 madt_end) {
353 if (entry->type == entry_id 204 if (entry->type == entry_id
354 && (!max_entries || count++ < max_entries)) 205 && (!max_entries || count++ < max_entries))
355 if (handler(entry, madt_end)) 206 if (handler(entry, madt_end))
356 return -EINVAL; 207 return -EINVAL;
357 208
358 entry = (acpi_table_entry_header *) 209 entry = (struct acpi_subtable_header *)
359 ((unsigned long)entry + entry->length); 210 ((unsigned long)entry + entry->length);
360 } 211 }
361 if (max_entries && count > max_entries) { 212 if (max_entries && count > max_entries) {
362 printk(KERN_WARNING PREFIX "[%s:0x%02x] ignored %i entries of " 213 printk(KERN_WARNING PREFIX "[%4.4s:0x%02x] ignored %i entries of "
363 "%i found\n", acpi_table_signatures[id], entry_id, 214 "%i found\n", id, entry_id, count - max_entries, count);
364 count - max_entries, count);
365 } 215 }
366 216
367 return count; 217 return count;
368} 218}
369 219
370int __init 220int __init
371acpi_table_parse_madt(enum acpi_madt_entry_id id, 221acpi_table_parse_madt(enum acpi_madt_type id,
372 acpi_madt_entry_handler handler, unsigned int max_entries) 222 acpi_madt_entry_handler handler, unsigned int max_entries)
373{ 223{
374 return acpi_table_parse_madt_family(ACPI_APIC, 224 return acpi_table_parse_madt_family(ACPI_SIG_MADT,
375 sizeof(struct acpi_table_madt), id, 225 sizeof(struct acpi_table_madt), id,
376 handler, max_entries); 226 handler, max_entries);
377} 227}
378 228
379int __init acpi_table_parse(enum acpi_table_id id, acpi_table_handler handler) 229int __init acpi_table_parse(char *id, acpi_table_handler handler)
380{ 230{
381 int count = 0; 231 struct acpi_table_header *table = NULL;
382 unsigned int i = 0;
383
384 if (!handler) 232 if (!handler)
385 return -EINVAL; 233 return -EINVAL;
386 234
387 for (i = 0; i < sdt_count; i++) { 235 acpi_get_table(id, 0, &table);
388 if (sdt_entry[i].id != id) 236 if (table) {
389 continue; 237 handler(table);
390 count++; 238 return 1;
391 if (count == 1) 239 } else
392 handler(sdt_entry[i].pa, sdt_entry[i].size); 240 return 0;
393
394 else
395 printk(KERN_WARNING PREFIX
396 "%d duplicate %s table ignored.\n", count,
397 acpi_table_signatures[id]);
398 }
399
400 return count;
401}
402
403static int __init acpi_table_get_sdt(struct acpi_table_rsdp *rsdp)
404{
405 struct acpi_table_header *header = NULL;
406 unsigned int i, id = 0;
407
408 if (!rsdp)
409 return -EINVAL;
410
411 /* First check XSDT (but only on ACPI 2.0-compatible systems) */
412
413 if ((rsdp->revision >= 2) &&
414 (((struct acpi20_table_rsdp *)rsdp)->xsdt_address)) {
415
416 struct acpi_table_xsdt *mapped_xsdt = NULL;
417
418 sdt_pa = ((struct acpi20_table_rsdp *)rsdp)->xsdt_address;
419
420 /* map in just the header */
421 header = (struct acpi_table_header *)
422 __acpi_map_table(sdt_pa, sizeof(struct acpi_table_header));
423
424 if (!header) {
425 printk(KERN_WARNING PREFIX
426 "Unable to map XSDT header\n");
427 return -ENODEV;
428 }
429
430 /* remap in the entire table before processing */
431 mapped_xsdt = (struct acpi_table_xsdt *)
432 __acpi_map_table(sdt_pa, header->length);
433 if (!mapped_xsdt) {
434 printk(KERN_WARNING PREFIX "Unable to map XSDT\n");
435 return -ENODEV;
436 }
437 header = &mapped_xsdt->header;
438
439 if (strncmp(header->signature, "XSDT", 4)) {
440 printk(KERN_WARNING PREFIX
441 "XSDT signature incorrect\n");
442 return -ENODEV;
443 }
444
445 if (acpi_table_compute_checksum(header, header->length)) {
446 printk(KERN_WARNING PREFIX "Invalid XSDT checksum\n");
447 return -ENODEV;
448 }
449
450 sdt_count =
451 (header->length - sizeof(struct acpi_table_header)) >> 3;
452 if (sdt_count > ACPI_MAX_TABLES) {
453 printk(KERN_WARNING PREFIX
454 "Truncated %lu XSDT entries\n",
455 (sdt_count - ACPI_MAX_TABLES));
456 sdt_count = ACPI_MAX_TABLES;
457 }
458
459 for (i = 0; i < sdt_count; i++)
460 sdt_entry[i].pa = (unsigned long)mapped_xsdt->entry[i];
461 }
462
463 /* Then check RSDT */
464
465 else if (rsdp->rsdt_address) {
466
467 struct acpi_table_rsdt *mapped_rsdt = NULL;
468
469 sdt_pa = rsdp->rsdt_address;
470
471 /* map in just the header */
472 header = (struct acpi_table_header *)
473 __acpi_map_table(sdt_pa, sizeof(struct acpi_table_header));
474 if (!header) {
475 printk(KERN_WARNING PREFIX
476 "Unable to map RSDT header\n");
477 return -ENODEV;
478 }
479
480 /* remap in the entire table before processing */
481 mapped_rsdt = (struct acpi_table_rsdt *)
482 __acpi_map_table(sdt_pa, header->length);
483 if (!mapped_rsdt) {
484 printk(KERN_WARNING PREFIX "Unable to map RSDT\n");
485 return -ENODEV;
486 }
487 header = &mapped_rsdt->header;
488
489 if (strncmp(header->signature, "RSDT", 4)) {
490 printk(KERN_WARNING PREFIX
491 "RSDT signature incorrect\n");
492 return -ENODEV;
493 }
494
495 if (acpi_table_compute_checksum(header, header->length)) {
496 printk(KERN_WARNING PREFIX "Invalid RSDT checksum\n");
497 return -ENODEV;
498 }
499
500 sdt_count =
501 (header->length - sizeof(struct acpi_table_header)) >> 2;
502 if (sdt_count > ACPI_MAX_TABLES) {
503 printk(KERN_WARNING PREFIX
504 "Truncated %lu RSDT entries\n",
505 (sdt_count - ACPI_MAX_TABLES));
506 sdt_count = ACPI_MAX_TABLES;
507 }
508
509 for (i = 0; i < sdt_count; i++)
510 sdt_entry[i].pa = (unsigned long)mapped_rsdt->entry[i];
511 }
512
513 else {
514 printk(KERN_WARNING PREFIX
515 "No System Description Table (RSDT/XSDT) specified in RSDP\n");
516 return -ENODEV;
517 }
518
519 acpi_table_print(header, sdt_pa);
520
521 for (i = 0; i < sdt_count; i++) {
522
523 /* map in just the header */
524 header = (struct acpi_table_header *)
525 __acpi_map_table(sdt_entry[i].pa,
526 sizeof(struct acpi_table_header));
527 if (!header)
528 continue;
529
530 /* remap in the entire table before processing */
531 header = (struct acpi_table_header *)
532 __acpi_map_table(sdt_entry[i].pa, header->length);
533 if (!header)
534 continue;
535
536 acpi_table_print(header, sdt_entry[i].pa);
537
538 if (acpi_table_compute_checksum(header, header->length)) {
539 printk(KERN_WARNING " >>> ERROR: Invalid checksum\n");
540 continue;
541 }
542
543 sdt_entry[i].size = header->length;
544
545 for (id = 0; id < ACPI_TABLE_COUNT; id++) {
546 if (!strncmp((char *)&header->signature,
547 acpi_table_signatures[id],
548 sizeof(header->signature))) {
549 sdt_entry[i].id = id;
550 }
551 }
552 }
553
554 /*
555 * The DSDT is *not* in the RSDT (why not? no idea.) but we want
556 * to print its info, because this is what people usually blacklist
557 * against. Unfortunately, we don't know the phys_addr, so just
558 * print 0. Maybe no one will notice.
559 */
560 if (!acpi_get_table_header_early(ACPI_DSDT, &header))
561 acpi_table_print(header, 0);
562
563 return 0;
564} 241}
565 242
566/* 243/*
@@ -568,54 +245,13 @@ static int __init acpi_table_get_sdt(struct acpi_table_rsdp *rsdp)
568 * 245 *
569 * find RSDP, find and checksum SDT/XSDT. 246 * find RSDP, find and checksum SDT/XSDT.
570 * checksum all tables, print SDT/XSDT 247 * checksum all tables, print SDT/XSDT
571 * 248 *
572 * result: sdt_entry[] is initialized 249 * result: sdt_entry[] is initialized
573 */ 250 */
574 251
252
575int __init acpi_table_init(void) 253int __init acpi_table_init(void)
576{ 254{
577 struct acpi_table_rsdp *rsdp = NULL; 255 acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
578 unsigned long rsdp_phys = 0;
579 int result = 0;
580
581 /* Locate and map the Root System Description Table (RSDP) */
582
583 rsdp_phys = acpi_find_rsdp();
584 if (!rsdp_phys) {
585 printk(KERN_ERR PREFIX "Unable to locate RSDP\n");
586 return -ENODEV;
587 }
588
589 rsdp = (struct acpi_table_rsdp *)__acpi_map_table(rsdp_phys,
590 sizeof(struct acpi_table_rsdp));
591 if (!rsdp) {
592 printk(KERN_WARNING PREFIX "Unable to map RSDP\n");
593 return -ENODEV;
594 }
595
596 printk(KERN_DEBUG PREFIX
597 "RSDP (v%3.3d %6.6s ) @ 0x%p\n",
598 rsdp->revision, rsdp->oem_id, (void *)rsdp_phys);
599
600 if (rsdp->revision < 2)
601 result =
602 acpi_table_compute_checksum(rsdp,
603 sizeof(struct acpi_table_rsdp));
604 else
605 result =
606 acpi_table_compute_checksum(rsdp,
607 ((struct acpi20_table_rsdp *)
608 rsdp)->length);
609
610 if (result) {
611 printk(KERN_WARNING " >>> ERROR: Invalid checksum\n");
612 return -ENODEV;
613 }
614
615 /* Locate and map the System Description table (RSDT/XSDT) */
616
617 if (acpi_table_get_sdt(rsdp))
618 return -ENODEV;
619
620 return 0; 256 return 0;
621} 257}
diff --git a/drivers/acpi/tables/Makefile b/drivers/acpi/tables/Makefile
index aa4c69594d97..0a7d7afac255 100644
--- a/drivers/acpi/tables/Makefile
+++ b/drivers/acpi/tables/Makefile
@@ -2,7 +2,6 @@
2# Makefile for all Linux ACPI interpreter subdirectories 2# Makefile for all Linux ACPI interpreter subdirectories
3# 3#
4 4
5obj-y := tbconvrt.o tbget.o tbrsdt.o tbxface.o \ 5obj-y := tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o
6 tbgetall.o tbinstal.o tbutils.o tbxfroot.o
7 6
8EXTRA_CFLAGS += $(ACPI_CFLAGS) 7EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/tables/tbconvrt.c b/drivers/acpi/tables/tbconvrt.c
deleted file mode 100644
index d697fcb35d52..000000000000
--- a/drivers/acpi/tables/tbconvrt.c
+++ /dev/null
@@ -1,622 +0,0 @@
1/******************************************************************************
2 *
3 * Module Name: tbconvrt - ACPI Table conversion utilities
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include <acpi/actables.h>
46
47#define _COMPONENT ACPI_TABLES
48ACPI_MODULE_NAME("tbconvrt")
49
50/* Local prototypes */
51static void
52acpi_tb_init_generic_address(struct acpi_generic_address *new_gas_struct,
53 u8 register_bit_width,
54 acpi_physical_address address);
55
56static void
57acpi_tb_convert_fadt1(struct fadt_descriptor *local_fadt,
58 struct fadt_descriptor_rev1 *original_fadt);
59
60static void
61acpi_tb_convert_fadt2(struct fadt_descriptor *local_fadt,
62 struct fadt_descriptor *original_fadt);
63
64u8 acpi_fadt_is_v1;
65ACPI_EXPORT_SYMBOL(acpi_fadt_is_v1)
66
67/*******************************************************************************
68 *
69 * FUNCTION: acpi_tb_get_table_count
70 *
71 * PARAMETERS: RSDP - Pointer to the RSDP
72 * RSDT - Pointer to the RSDT/XSDT
73 *
74 * RETURN: The number of tables pointed to by the RSDT or XSDT.
75 *
76 * DESCRIPTION: Calculate the number of tables. Automatically handles either
77 * an RSDT or XSDT.
78 *
79 ******************************************************************************/
80
81u32
82acpi_tb_get_table_count(struct rsdp_descriptor *RSDP,
83 struct acpi_table_header *RSDT)
84{
85 u32 pointer_size;
86
87 ACPI_FUNCTION_ENTRY();
88
89 /* RSDT pointers are 32 bits, XSDT pointers are 64 bits */
90
91 if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) {
92 pointer_size = sizeof(u32);
93 } else {
94 pointer_size = sizeof(u64);
95 }
96
97 /*
98 * Determine the number of tables pointed to by the RSDT/XSDT.
99 * This is defined by the ACPI Specification to be the number of
100 * pointers contained within the RSDT/XSDT. The size of the pointers
101 * is architecture-dependent.
102 */
103 return ((RSDT->length -
104 sizeof(struct acpi_table_header)) / pointer_size);
105}
106
107/*******************************************************************************
108 *
109 * FUNCTION: acpi_tb_convert_to_xsdt
110 *
111 * PARAMETERS: table_info - Info about the RSDT
112 *
113 * RETURN: Status
114 *
115 * DESCRIPTION: Convert an RSDT to an XSDT (internal common format)
116 *
117 ******************************************************************************/
118
119acpi_status acpi_tb_convert_to_xsdt(struct acpi_table_desc *table_info)
120{
121 acpi_size table_size;
122 u32 i;
123 struct xsdt_descriptor *new_table;
124
125 ACPI_FUNCTION_ENTRY();
126
127 /* Compute size of the converted XSDT */
128
129 table_size = ((acpi_size) acpi_gbl_rsdt_table_count * sizeof(u64)) +
130 sizeof(struct acpi_table_header);
131
132 /* Allocate an XSDT */
133
134 new_table = ACPI_ALLOCATE_ZEROED(table_size);
135 if (!new_table) {
136 return (AE_NO_MEMORY);
137 }
138
139 /* Copy the header and set the length */
140
141 ACPI_MEMCPY(new_table, table_info->pointer,
142 sizeof(struct acpi_table_header));
143 new_table->length = (u32) table_size;
144
145 /* Copy the table pointers */
146
147 for (i = 0; i < acpi_gbl_rsdt_table_count; i++) {
148
149 /* RSDT pointers are 32 bits, XSDT pointers are 64 bits */
150
151 if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) {
152 ACPI_STORE_ADDRESS(new_table->table_offset_entry[i],
153 (ACPI_CAST_PTR
154 (struct rsdt_descriptor,
155 table_info->pointer))->
156 table_offset_entry[i]);
157 } else {
158 new_table->table_offset_entry[i] =
159 (ACPI_CAST_PTR(struct xsdt_descriptor,
160 table_info->pointer))->
161 table_offset_entry[i];
162 }
163 }
164
165 /* Delete the original table (either mapped or in a buffer) */
166
167 acpi_tb_delete_single_table(table_info);
168
169 /* Point the table descriptor to the new table */
170
171 table_info->pointer =
172 ACPI_CAST_PTR(struct acpi_table_header, new_table);
173 table_info->length = table_size;
174 table_info->allocation = ACPI_MEM_ALLOCATED;
175
176 return (AE_OK);
177}
178
179/*******************************************************************************
180 *
181 * FUNCTION: acpi_tb_init_generic_address
182 *
183 * PARAMETERS: new_gas_struct - GAS struct to be initialized
184 * register_bit_width - Width of this register
185 * Address - Address of the register
186 *
187 * RETURN: None
188 *
189 * DESCRIPTION: Initialize a GAS structure.
190 *
191 ******************************************************************************/
192
193static void
194acpi_tb_init_generic_address(struct acpi_generic_address *new_gas_struct,
195 u8 register_bit_width,
196 acpi_physical_address address)
197{
198
199 ACPI_STORE_ADDRESS(new_gas_struct->address, address);
200
201 new_gas_struct->address_space_id = ACPI_ADR_SPACE_SYSTEM_IO;
202 new_gas_struct->register_bit_width = register_bit_width;
203 new_gas_struct->register_bit_offset = 0;
204 new_gas_struct->access_width = 0;
205}
206
207/*******************************************************************************
208 *
209 * FUNCTION: acpi_tb_convert_fadt1
210 *
211 * PARAMETERS: local_fadt - Pointer to new FADT
212 * original_fadt - Pointer to old FADT
213 *
214 * RETURN: None, populates local_fadt
215 *
216 * DESCRIPTION: Convert an ACPI 1.0 FADT to common internal format
217 *
218 ******************************************************************************/
219
220static void
221acpi_tb_convert_fadt1(struct fadt_descriptor *local_fadt,
222 struct fadt_descriptor_rev1 *original_fadt)
223{
224
225 /* ACPI 1.0 FACS */
226 /* The BIOS stored FADT should agree with Revision 1.0 */
227 acpi_fadt_is_v1 = 1;
228
229 /*
230 * Copy the table header and the common part of the tables.
231 *
232 * The 2.0 table is an extension of the 1.0 table, so the entire 1.0
233 * table can be copied first, then expand some fields to 64 bits.
234 */
235 ACPI_MEMCPY(local_fadt, original_fadt,
236 sizeof(struct fadt_descriptor_rev1));
237
238 /* Convert table pointers to 64-bit fields */
239
240 ACPI_STORE_ADDRESS(local_fadt->xfirmware_ctrl,
241 local_fadt->V1_firmware_ctrl);
242 ACPI_STORE_ADDRESS(local_fadt->Xdsdt, local_fadt->V1_dsdt);
243
244 /*
245 * System Interrupt Model isn't used in ACPI 2.0
246 * (local_fadt->Reserved1 = 0;)
247 */
248
249 /*
250 * This field is set by the OEM to convey the preferred power management
251 * profile to OSPM. It doesn't have any 1.0 equivalence. Since we don't
252 * know what kind of 32-bit system this is, we will use "unspecified".
253 */
254 local_fadt->prefer_PM_profile = PM_UNSPECIFIED;
255
256 /*
257 * Processor Performance State Control. This is the value OSPM writes to
258 * the SMI_CMD register to assume processor performance state control
259 * responsibility. There isn't any equivalence in 1.0, but as many 1.x
260 * ACPI tables contain _PCT and _PSS we also keep this value, unless
261 * acpi_strict is set.
262 */
263 if (acpi_strict)
264 local_fadt->pstate_cnt = 0;
265
266 /*
267 * Support for the _CST object and C States change notification.
268 * This data item hasn't any 1.0 equivalence so leave it zero.
269 */
270 local_fadt->cst_cnt = 0;
271
272 /*
273 * FADT Rev 2 was an interim FADT released between ACPI 1.0 and ACPI 2.0.
274 * It primarily adds the FADT reset mechanism.
275 */
276 if ((original_fadt->revision == 2) &&
277 (original_fadt->length ==
278 sizeof(struct fadt_descriptor_rev2_minus))) {
279 /*
280 * Grab the entire generic address struct, plus the 1-byte reset value
281 * that immediately follows.
282 */
283 ACPI_MEMCPY(&local_fadt->reset_register,
284 &(ACPI_CAST_PTR(struct fadt_descriptor_rev2_minus,
285 original_fadt))->reset_register,
286 sizeof(struct acpi_generic_address) + 1);
287 } else {
288 /*
289 * Since there isn't any equivalence in 1.0 and since it is highly
290 * likely that a 1.0 system has legacy support.
291 */
292 local_fadt->iapc_boot_arch = BAF_LEGACY_DEVICES;
293 }
294
295 /*
296 * Convert the V1.0 block addresses to V2.0 GAS structures
297 */
298 acpi_tb_init_generic_address(&local_fadt->xpm1a_evt_blk,
299 local_fadt->pm1_evt_len,
300 (acpi_physical_address) local_fadt->
301 V1_pm1a_evt_blk);
302 acpi_tb_init_generic_address(&local_fadt->xpm1b_evt_blk,
303 local_fadt->pm1_evt_len,
304 (acpi_physical_address) local_fadt->
305 V1_pm1b_evt_blk);
306 acpi_tb_init_generic_address(&local_fadt->xpm1a_cnt_blk,
307 local_fadt->pm1_cnt_len,
308 (acpi_physical_address) local_fadt->
309 V1_pm1a_cnt_blk);
310 acpi_tb_init_generic_address(&local_fadt->xpm1b_cnt_blk,
311 local_fadt->pm1_cnt_len,
312 (acpi_physical_address) local_fadt->
313 V1_pm1b_cnt_blk);
314 acpi_tb_init_generic_address(&local_fadt->xpm2_cnt_blk,
315 local_fadt->pm2_cnt_len,
316 (acpi_physical_address) local_fadt->
317 V1_pm2_cnt_blk);
318 acpi_tb_init_generic_address(&local_fadt->xpm_tmr_blk,
319 local_fadt->pm_tm_len,
320 (acpi_physical_address) local_fadt->
321 V1_pm_tmr_blk);
322 acpi_tb_init_generic_address(&local_fadt->xgpe0_blk, 0,
323 (acpi_physical_address) local_fadt->
324 V1_gpe0_blk);
325 acpi_tb_init_generic_address(&local_fadt->xgpe1_blk, 0,
326 (acpi_physical_address) local_fadt->
327 V1_gpe1_blk);
328
329 /* Create separate GAS structs for the PM1 Enable registers */
330
331 acpi_tb_init_generic_address(&acpi_gbl_xpm1a_enable,
332 (u8) ACPI_DIV_2(acpi_gbl_FADT->
333 pm1_evt_len),
334 (acpi_physical_address)
335 (local_fadt->xpm1a_evt_blk.address +
336 ACPI_DIV_2(acpi_gbl_FADT->pm1_evt_len)));
337
338 /* PM1B is optional; leave null if not present */
339
340 if (local_fadt->xpm1b_evt_blk.address) {
341 acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable,
342 (u8) ACPI_DIV_2(acpi_gbl_FADT->
343 pm1_evt_len),
344 (acpi_physical_address)
345 (local_fadt->xpm1b_evt_blk.
346 address +
347 ACPI_DIV_2(acpi_gbl_FADT->
348 pm1_evt_len)));
349 }
350}
351
352/*******************************************************************************
353 *
354 * FUNCTION: acpi_tb_convert_fadt2
355 *
356 * PARAMETERS: local_fadt - Pointer to new FADT
357 * original_fadt - Pointer to old FADT
358 *
359 * RETURN: None, populates local_fadt
360 *
361 * DESCRIPTION: Convert an ACPI 2.0 FADT to common internal format.
362 * Handles optional "X" fields.
363 *
364 ******************************************************************************/
365
366static void
367acpi_tb_convert_fadt2(struct fadt_descriptor *local_fadt,
368 struct fadt_descriptor *original_fadt)
369{
370
371 /* We have an ACPI 2.0 FADT but we must copy it to our local buffer */
372
373 ACPI_MEMCPY(local_fadt, original_fadt, sizeof(struct fadt_descriptor));
374
375 /*
376 * "X" fields are optional extensions to the original V1.0 fields, so
377 * we must selectively expand V1.0 fields if the corresponding X field
378 * is zero.
379 */
380 if (!(local_fadt->xfirmware_ctrl)) {
381 ACPI_STORE_ADDRESS(local_fadt->xfirmware_ctrl,
382 local_fadt->V1_firmware_ctrl);
383 }
384
385 if (!(local_fadt->Xdsdt)) {
386 ACPI_STORE_ADDRESS(local_fadt->Xdsdt, local_fadt->V1_dsdt);
387 }
388
389 if (!(local_fadt->xpm1a_evt_blk.address)) {
390 acpi_tb_init_generic_address(&local_fadt->xpm1a_evt_blk,
391 local_fadt->pm1_evt_len,
392 (acpi_physical_address)
393 local_fadt->V1_pm1a_evt_blk);
394 }
395
396 if (!(local_fadt->xpm1b_evt_blk.address)) {
397 acpi_tb_init_generic_address(&local_fadt->xpm1b_evt_blk,
398 local_fadt->pm1_evt_len,
399 (acpi_physical_address)
400 local_fadt->V1_pm1b_evt_blk);
401 }
402
403 if (!(local_fadt->xpm1a_cnt_blk.address)) {
404 acpi_tb_init_generic_address(&local_fadt->xpm1a_cnt_blk,
405 local_fadt->pm1_cnt_len,
406 (acpi_physical_address)
407 local_fadt->V1_pm1a_cnt_blk);
408 }
409
410 if (!(local_fadt->xpm1b_cnt_blk.address)) {
411 acpi_tb_init_generic_address(&local_fadt->xpm1b_cnt_blk,
412 local_fadt->pm1_cnt_len,
413 (acpi_physical_address)
414 local_fadt->V1_pm1b_cnt_blk);
415 }
416
417 if (!(local_fadt->xpm2_cnt_blk.address)) {
418 acpi_tb_init_generic_address(&local_fadt->xpm2_cnt_blk,
419 local_fadt->pm2_cnt_len,
420 (acpi_physical_address)
421 local_fadt->V1_pm2_cnt_blk);
422 }
423
424 if (!(local_fadt->xpm_tmr_blk.address)) {
425 acpi_tb_init_generic_address(&local_fadt->xpm_tmr_blk,
426 local_fadt->pm_tm_len,
427 (acpi_physical_address)
428 local_fadt->V1_pm_tmr_blk);
429 }
430
431 if (!(local_fadt->xgpe0_blk.address)) {
432 acpi_tb_init_generic_address(&local_fadt->xgpe0_blk,
433 0,
434 (acpi_physical_address)
435 local_fadt->V1_gpe0_blk);
436 }
437
438 if (!(local_fadt->xgpe1_blk.address)) {
439 acpi_tb_init_generic_address(&local_fadt->xgpe1_blk,
440 0,
441 (acpi_physical_address)
442 local_fadt->V1_gpe1_blk);
443 }
444
445 /* Create separate GAS structs for the PM1 Enable registers */
446
447 acpi_tb_init_generic_address(&acpi_gbl_xpm1a_enable,
448 (u8) ACPI_DIV_2(acpi_gbl_FADT->
449 pm1_evt_len),
450 (acpi_physical_address)
451 (local_fadt->xpm1a_evt_blk.address +
452 ACPI_DIV_2(acpi_gbl_FADT->pm1_evt_len)));
453
454 acpi_gbl_xpm1a_enable.address_space_id =
455 local_fadt->xpm1a_evt_blk.address_space_id;
456
457 /* PM1B is optional; leave null if not present */
458
459 if (local_fadt->xpm1b_evt_blk.address) {
460 acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable,
461 (u8) ACPI_DIV_2(acpi_gbl_FADT->
462 pm1_evt_len),
463 (acpi_physical_address)
464 (local_fadt->xpm1b_evt_blk.
465 address +
466 ACPI_DIV_2(acpi_gbl_FADT->
467 pm1_evt_len)));
468
469 acpi_gbl_xpm1b_enable.address_space_id =
470 local_fadt->xpm1b_evt_blk.address_space_id;
471 }
472}
473
474/*******************************************************************************
475 *
476 * FUNCTION: acpi_tb_convert_table_fadt
477 *
478 * PARAMETERS: None
479 *
480 * RETURN: Status
481 *
482 * DESCRIPTION: Converts a BIOS supplied ACPI 1.0 FADT to a local
483 * ACPI 2.0 FADT. If the BIOS supplied a 2.0 FADT then it is simply
484 * copied to the local FADT. The ACPI CA software uses this
485 * local FADT. Thus a significant amount of special #ifdef
486 * type codeing is saved.
487 *
488 ******************************************************************************/
489
490acpi_status acpi_tb_convert_table_fadt(void)
491{
492 struct fadt_descriptor *local_fadt;
493 struct acpi_table_desc *table_desc;
494
495 ACPI_FUNCTION_TRACE(tb_convert_table_fadt);
496
497 /*
498 * acpi_gbl_FADT is valid. Validate the FADT length. The table must be
499 * at least as long as the version 1.0 FADT
500 */
501 if (acpi_gbl_FADT->length < sizeof(struct fadt_descriptor_rev1)) {
502 ACPI_ERROR((AE_INFO, "FADT is invalid, too short: 0x%X",
503 acpi_gbl_FADT->length));
504 return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
505 }
506
507 /* Allocate buffer for the ACPI 2.0(+) FADT */
508
509 local_fadt = ACPI_ALLOCATE_ZEROED(sizeof(struct fadt_descriptor));
510 if (!local_fadt) {
511 return_ACPI_STATUS(AE_NO_MEMORY);
512 }
513
514 if (acpi_gbl_FADT->revision >= FADT2_REVISION_ID) {
515 if (acpi_gbl_FADT->length < sizeof(struct fadt_descriptor)) {
516
517 /* Length is too short to be a V2.0 table */
518
519 ACPI_WARNING((AE_INFO,
520 "Inconsistent FADT length (0x%X) and revision (0x%X), using FADT V1.0 portion of table",
521 acpi_gbl_FADT->length,
522 acpi_gbl_FADT->revision));
523
524 acpi_tb_convert_fadt1(local_fadt,
525 (void *)acpi_gbl_FADT);
526 } else {
527 /* Valid V2.0 table */
528
529 acpi_tb_convert_fadt2(local_fadt, acpi_gbl_FADT);
530 }
531 } else {
532 /* Valid V1.0 table */
533
534 acpi_tb_convert_fadt1(local_fadt, (void *)acpi_gbl_FADT);
535 }
536
537 /* Global FADT pointer will point to the new common V2.0 FADT */
538
539 acpi_gbl_FADT = local_fadt;
540 acpi_gbl_FADT->length = sizeof(struct fadt_descriptor);
541
542 /* Free the original table */
543
544 table_desc = acpi_gbl_table_lists[ACPI_TABLE_ID_FADT].next;
545 acpi_tb_delete_single_table(table_desc);
546
547 /* Install the new table */
548
549 table_desc->pointer =
550 ACPI_CAST_PTR(struct acpi_table_header, acpi_gbl_FADT);
551 table_desc->allocation = ACPI_MEM_ALLOCATED;
552 table_desc->length = sizeof(struct fadt_descriptor);
553
554 /* Dump the entire FADT */
555
556 ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
557 "Hex dump of common internal FADT, size %d (%X)\n",
558 acpi_gbl_FADT->length, acpi_gbl_FADT->length));
559
560 ACPI_DUMP_BUFFER(ACPI_CAST_PTR(u8, acpi_gbl_FADT),
561 acpi_gbl_FADT->length);
562
563 return_ACPI_STATUS(AE_OK);
564}
565
566/*******************************************************************************
567 *
568 * FUNCTION: acpi_tb_build_common_facs
569 *
570 * PARAMETERS: table_info - Info for currently installed FACS
571 *
572 * RETURN: Status
573 *
574 * DESCRIPTION: Convert ACPI 1.0 and ACPI 2.0 FACS to a common internal
575 * table format.
576 *
577 ******************************************************************************/
578
579acpi_status acpi_tb_build_common_facs(struct acpi_table_desc *table_info)
580{
581
582 ACPI_FUNCTION_TRACE(tb_build_common_facs);
583
584 /* Absolute minimum length is 24, but the ACPI spec says 64 */
585
586 if (acpi_gbl_FACS->length < 24) {
587 ACPI_ERROR((AE_INFO, "Invalid FACS table length: 0x%X",
588 acpi_gbl_FACS->length));
589 return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
590 }
591
592 if (acpi_gbl_FACS->length < 64) {
593 ACPI_WARNING((AE_INFO,
594 "FACS is shorter than the ACPI specification allows: 0x%X, using anyway",
595 acpi_gbl_FACS->length));
596 }
597
598 /* Copy fields to the new FACS */
599
600 acpi_gbl_common_fACS.global_lock = &(acpi_gbl_FACS->global_lock);
601
602 if ((acpi_gbl_RSDP->revision < 2) ||
603 (acpi_gbl_FACS->length < 32) ||
604 (!(acpi_gbl_FACS->xfirmware_waking_vector))) {
605
606 /* ACPI 1.0 FACS or short table or optional X_ field is zero */
607
608 acpi_gbl_common_fACS.firmware_waking_vector = ACPI_CAST_PTR(u64,
609 &
610 (acpi_gbl_FACS->
611 firmware_waking_vector));
612 acpi_gbl_common_fACS.vector_width = 32;
613 } else {
614 /* ACPI 2.0 FACS with valid X_ field */
615
616 acpi_gbl_common_fACS.firmware_waking_vector =
617 &acpi_gbl_FACS->xfirmware_waking_vector;
618 acpi_gbl_common_fACS.vector_width = 64;
619 }
620
621 return_ACPI_STATUS(AE_OK);
622}
diff --git a/drivers/acpi/tables/tbfadt.c b/drivers/acpi/tables/tbfadt.c
new file mode 100644
index 000000000000..807c7116e94b
--- /dev/null
+++ b/drivers/acpi/tables/tbfadt.c
@@ -0,0 +1,434 @@
1/******************************************************************************
2 *
3 * Module Name: tbfadt - FADT table utilities
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include <acpi/actables.h>
46
47#define _COMPONENT ACPI_TABLES
48ACPI_MODULE_NAME("tbfadt")
49
50/* Local prototypes */
51static void inline
52acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
53 u8 bit_width, u64 address);
54
55static void acpi_tb_convert_fadt(void);
56
57static void acpi_tb_validate_fadt(void);
58
59/* Table for conversion of FADT to common internal format and FADT validation */
60
61typedef struct acpi_fadt_info {
62 char *name;
63 u8 target;
64 u8 source;
65 u8 length;
66 u8 type;
67
68} acpi_fadt_info;
69
70#define ACPI_FADT_REQUIRED 1
71#define ACPI_FADT_SEPARATE_LENGTH 2
72
73static struct acpi_fadt_info fadt_info_table[] = {
74 {"Pm1aEventBlock", ACPI_FADT_OFFSET(xpm1a_event_block),
75 ACPI_FADT_OFFSET(pm1a_event_block),
76 ACPI_FADT_OFFSET(pm1_event_length), ACPI_FADT_REQUIRED},
77
78 {"Pm1bEventBlock", ACPI_FADT_OFFSET(xpm1b_event_block),
79 ACPI_FADT_OFFSET(pm1b_event_block),
80 ACPI_FADT_OFFSET(pm1_event_length), 0},
81
82 {"Pm1aControlBlock", ACPI_FADT_OFFSET(xpm1a_control_block),
83 ACPI_FADT_OFFSET(pm1a_control_block),
84 ACPI_FADT_OFFSET(pm1_control_length), ACPI_FADT_REQUIRED},
85
86 {"Pm1bControlBlock", ACPI_FADT_OFFSET(xpm1b_control_block),
87 ACPI_FADT_OFFSET(pm1b_control_block),
88 ACPI_FADT_OFFSET(pm1_control_length), 0},
89
90 {"Pm2ControlBlock", ACPI_FADT_OFFSET(xpm2_control_block),
91 ACPI_FADT_OFFSET(pm2_control_block),
92 ACPI_FADT_OFFSET(pm2_control_length), ACPI_FADT_SEPARATE_LENGTH},
93
94 {"PmTimerBlock", ACPI_FADT_OFFSET(xpm_timer_block),
95 ACPI_FADT_OFFSET(pm_timer_block),
96 ACPI_FADT_OFFSET(pm_timer_length), ACPI_FADT_REQUIRED},
97
98 {"Gpe0Block", ACPI_FADT_OFFSET(xgpe0_block),
99 ACPI_FADT_OFFSET(gpe0_block),
100 ACPI_FADT_OFFSET(gpe0_block_length), ACPI_FADT_SEPARATE_LENGTH},
101
102 {"Gpe1Block", ACPI_FADT_OFFSET(xgpe1_block),
103 ACPI_FADT_OFFSET(gpe1_block),
104 ACPI_FADT_OFFSET(gpe1_block_length), ACPI_FADT_SEPARATE_LENGTH}
105};
106
107#define ACPI_FADT_INFO_ENTRIES (sizeof (fadt_info_table) / sizeof (struct acpi_fadt_info))
108
109/*******************************************************************************
110 *
111 * FUNCTION: acpi_tb_init_generic_address
112 *
113 * PARAMETERS: generic_address - GAS struct to be initialized
114 * bit_width - Width of this register
115 * Address - Address of the register
116 *
117 * RETURN: None
118 *
119 * DESCRIPTION: Initialize a Generic Address Structure (GAS)
120 * See the ACPI specification for a full description and
121 * definition of this structure.
122 *
123 ******************************************************************************/
124
125static void inline
126acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
127 u8 bit_width, u64 address)
128{
129
130 /*
131 * The 64-bit Address field is non-aligned in the byte packed
132 * GAS struct.
133 */
134 ACPI_MOVE_64_TO_64(&generic_address->address, &address);
135
136 /* All other fields are byte-wide */
137
138 generic_address->space_id = ACPI_ADR_SPACE_SYSTEM_IO;
139 generic_address->bit_width = bit_width;
140 generic_address->bit_offset = 0;
141 generic_address->access_width = 0;
142}
143
144/*******************************************************************************
145 *
146 * FUNCTION: acpi_tb_parse_fadt
147 *
148 * PARAMETERS: table_index - Index for the FADT
149 * Flags - Flags
150 *
151 * RETURN: None
152 *
153 * DESCRIPTION: Initialize the FADT, DSDT and FACS tables
154 * (FADT contains the addresses of the DSDT and FACS)
155 *
156 ******************************************************************************/
157
158void acpi_tb_parse_fadt(acpi_native_uint table_index, u8 flags)
159{
160 u32 length;
161 struct acpi_table_header *table;
162
163 /*
164 * The FADT has multiple versions with different lengths,
165 * and it contains pointers to both the DSDT and FACS tables.
166 *
167 * Get a local copy of the FADT and convert it to a common format
168 * Map entire FADT, assumed to be smaller than one page.
169 */
170 length = acpi_gbl_root_table_list.tables[table_index].length;
171
172 table =
173 acpi_os_map_memory(acpi_gbl_root_table_list.tables[table_index].
174 address, length);
175 if (!table) {
176 return;
177 }
178
179 /*
180 * Validate the FADT checksum before we copy the table. Ignore
181 * checksum error as we want to try to get the DSDT and FACS.
182 */
183 (void)acpi_tb_verify_checksum(table, length);
184
185 /* Obtain a local copy of the FADT in common ACPI 2.0+ format */
186
187 acpi_tb_create_local_fadt(table, length);
188
189 /* All done with the real FADT, unmap it */
190
191 acpi_os_unmap_memory(table, length);
192
193 /* Obtain the DSDT and FACS tables via their addresses within the FADT */
194
195 acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt,
196 flags, ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT);
197
198 acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xfacs,
199 flags, ACPI_SIG_FACS, ACPI_TABLE_INDEX_FACS);
200}
201
202/*******************************************************************************
203 *
204 * FUNCTION: acpi_tb_create_local_fadt
205 *
206 * PARAMETERS: Table - Pointer to BIOS FADT
207 * Length - Length of the table
208 *
209 * RETURN: None
210 *
211 * DESCRIPTION: Get a local copy of the FADT and convert it to a common format.
212 * Performs validation on some important FADT fields.
213 *
214 ******************************************************************************/
215
216void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
217{
218
219 /*
220 * Check if the FADT is larger than what we know about (ACPI 2.0 version).
221 * Truncate the table, but make some noise.
222 */
223 if (length > sizeof(struct acpi_table_fadt)) {
224 ACPI_WARNING((AE_INFO,
225 "FADT (revision %u) is longer than ACPI 2.0 version, truncating length 0x%X to 0x%zX",
226 table->revision, (unsigned)length,
227 sizeof(struct acpi_table_fadt)));
228 }
229
230 /* Copy the entire FADT locally. Zero first for tb_convert_fadt */
231
232 ACPI_MEMSET(&acpi_gbl_FADT, 0, sizeof(struct acpi_table_fadt));
233
234 ACPI_MEMCPY(&acpi_gbl_FADT, table,
235 ACPI_MIN(length, sizeof(struct acpi_table_fadt)));
236
237 /*
238 * 1) Convert the local copy of the FADT to the common internal format
239 * 2) Validate some of the important values within the FADT
240 */
241 acpi_tb_convert_fadt();
242 acpi_tb_validate_fadt();
243}
244
245/*******************************************************************************
246 *
247 * FUNCTION: acpi_tb_convert_fadt
248 *
249 * PARAMETERS: None, uses acpi_gbl_FADT
250 *
251 * RETURN: None
252 *
253 * DESCRIPTION: Converts all versions of the FADT to a common internal format.
254 * -> Expand all 32-bit addresses to 64-bit.
255 *
256 * NOTE: acpi_gbl_FADT must be of size (struct acpi_table_fadt),
257 * and must contain a copy of the actual FADT.
258 *
259 * ACPICA will use the "X" fields of the FADT for all addresses.
260 *
261 * "X" fields are optional extensions to the original V1.0 fields. Even if
262 * they are present in the structure, they can be optionally not used by
263 * setting them to zero. Therefore, we must selectively expand V1.0 fields
264 * if the corresponding X field is zero.
265 *
266 * For ACPI 1.0 FADTs, all address fields are expanded to the corresponding
267 * "X" fields.
268 *
269 * For ACPI 2.0 FADTs, any "X" fields that are NULL are filled in by
270 * expanding the corresponding ACPI 1.0 field.
271 *
272 ******************************************************************************/
273
274static void acpi_tb_convert_fadt(void)
275{
276 u8 pm1_register_length;
277 struct acpi_generic_address *target;
278 acpi_native_uint i;
279
280 /* Update the local FADT table header length */
281
282 acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
283
284 /* Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary */
285
286 if (!acpi_gbl_FADT.Xfacs) {
287 acpi_gbl_FADT.Xfacs = (u64) acpi_gbl_FADT.facs;
288 }
289
290 if (!acpi_gbl_FADT.Xdsdt) {
291 acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt;
292 }
293
294 /*
295 * Expand the 32-bit V1.0 addresses to the 64-bit "X" generic address
296 * structures as necessary.
297 */
298 for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
299 target =
300 ACPI_ADD_PTR(struct acpi_generic_address, &acpi_gbl_FADT,
301 fadt_info_table[i].target);
302
303 /* Expand only if the X target is null */
304
305 if (!target->address) {
306 acpi_tb_init_generic_address(target,
307 *ACPI_ADD_PTR(u8,
308 &acpi_gbl_FADT,
309 fadt_info_table
310 [i].length),
311 (u64) * ACPI_ADD_PTR(u32,
312 &acpi_gbl_FADT,
313 fadt_info_table
314 [i].
315 source));
316 }
317 }
318
319 /*
320 * Calculate separate GAS structs for the PM1 Enable registers.
321 * These addresses do not appear (directly) in the FADT, so it is
322 * useful to calculate them once, here.
323 *
324 * The PM event blocks are split into two register blocks, first is the
325 * PM Status Register block, followed immediately by the PM Enable Register
326 * block. Each is of length (pm1_event_length/2)
327 */
328 pm1_register_length = (u8) ACPI_DIV_2(acpi_gbl_FADT.pm1_event_length);
329
330 /* The PM1A register block is required */
331
332 acpi_tb_init_generic_address(&acpi_gbl_xpm1a_enable,
333 pm1_register_length,
334 (acpi_gbl_FADT.xpm1a_event_block.address +
335 pm1_register_length));
336 /* Don't forget to copy space_id of the GAS */
337 acpi_gbl_xpm1a_enable.space_id = acpi_gbl_FADT.xpm1a_event_block.space_id;
338
339 /* The PM1B register block is optional, ignore if not present */
340
341 if (acpi_gbl_FADT.xpm1b_event_block.address) {
342 acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable,
343 pm1_register_length,
344 (acpi_gbl_FADT.xpm1b_event_block.
345 address + pm1_register_length));
346 /* Don't forget to copy space_id of the GAS */
347 acpi_gbl_xpm1b_enable.space_id = acpi_gbl_FADT.xpm1a_event_block.space_id;
348
349 }
350}
351
352/******************************************************************************
353 *
354 * FUNCTION: acpi_tb_validate_fadt
355 *
356 * PARAMETERS: Table - Pointer to the FADT to be validated
357 *
358 * RETURN: None
359 *
360 * DESCRIPTION: Validate various important fields within the FADT. If a problem
361 * is found, issue a message, but no status is returned.
362 * Used by both the table manager and the disassembler.
363 *
364 * Possible additional checks:
365 * (acpi_gbl_FADT.pm1_event_length >= 4)
366 * (acpi_gbl_FADT.pm1_control_length >= 2)
367 * (acpi_gbl_FADT.pm_timer_length >= 4)
368 * Gpe block lengths must be multiple of 2
369 *
370 ******************************************************************************/
371
372static void acpi_tb_validate_fadt(void)
373{
374 u32 *address32;
375 struct acpi_generic_address *address64;
376 u8 length;
377 acpi_native_uint i;
378
379 /* Examine all of the 64-bit extended address fields (X fields) */
380
381 for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
382
383 /* Generate pointers to the 32-bit and 64-bit addresses and get the length */
384
385 address64 =
386 ACPI_ADD_PTR(struct acpi_generic_address, &acpi_gbl_FADT,
387 fadt_info_table[i].target);
388 address32 =
389 ACPI_ADD_PTR(u32, &acpi_gbl_FADT,
390 fadt_info_table[i].source);
391 length =
392 *ACPI_ADD_PTR(u8, &acpi_gbl_FADT,
393 fadt_info_table[i].length);
394
395 if (fadt_info_table[i].type & ACPI_FADT_REQUIRED) {
396 /*
397 * Field is required (Pm1a_event, Pm1a_control, pm_timer).
398 * Both the address and length must be non-zero.
399 */
400 if (!address64->address || !length) {
401 ACPI_ERROR((AE_INFO,
402 "Required field \"%s\" has zero address and/or length: %8.8X%8.8X/%X",
403 fadt_info_table[i].name,
404 ACPI_FORMAT_UINT64(address64->
405 address),
406 length));
407 }
408 } else if (fadt_info_table[i].type & ACPI_FADT_SEPARATE_LENGTH) {
409 /*
410 * Field is optional (PM2Control, GPE0, GPE1) AND has its own
411 * length field. If present, both the address and length must be valid.
412 */
413 if ((address64->address && !length)
414 || (!address64->address && length)) {
415 ACPI_WARNING((AE_INFO,
416 "Optional field \"%s\" has zero address or length: %8.8X%8.8X/%X",
417 fadt_info_table[i].name,
418 ACPI_FORMAT_UINT64(address64->
419 address),
420 length));
421 }
422 }
423
424 /* If both 32- and 64-bit addresses are valid (non-zero), they must match */
425
426 if (address64->address && *address32 &&
427 (address64->address != (u64) * address32)) {
428 ACPI_ERROR((AE_INFO,
429 "32/64X address mismatch in \"%s\": [%8.8X] [%8.8X%8.8X], using 64X",
430 fadt_info_table[i].name, *address32,
431 ACPI_FORMAT_UINT64(address64->address)));
432 }
433 }
434}
diff --git a/drivers/acpi/tables/tbfind.c b/drivers/acpi/tables/tbfind.c
new file mode 100644
index 000000000000..058c064948e1
--- /dev/null
+++ b/drivers/acpi/tables/tbfind.c
@@ -0,0 +1,126 @@
1/******************************************************************************
2 *
3 * Module Name: tbfind - find table
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include <acpi/actables.h>
46
47#define _COMPONENT ACPI_TABLES
48ACPI_MODULE_NAME("tbfind")
49
50/*******************************************************************************
51 *
52 * FUNCTION: acpi_tb_find_table
53 *
54 * PARAMETERS: Signature - String with ACPI table signature
55 * oem_id - String with the table OEM ID
56 * oem_table_id - String with the OEM Table ID
57 * table_index - Where the table index is returned
58 *
59 * RETURN: Status and table index
60 *
61 * DESCRIPTION: Find an ACPI table (in the RSDT/XSDT) that matches the
62 * Signature, OEM ID and OEM Table ID. Returns an index that can
63 * be used to get the table header or entire table.
64 *
65 ******************************************************************************/
66acpi_status
67acpi_tb_find_table(char *signature,
68 char *oem_id,
69 char *oem_table_id, acpi_native_uint * table_index)
70{
71 acpi_native_uint i;
72 acpi_status status;
73
74 ACPI_FUNCTION_TRACE(tb_find_table);
75
76 for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
77 if (ACPI_MEMCMP(&(acpi_gbl_root_table_list.tables[i].signature),
78 signature, ACPI_NAME_SIZE)) {
79
80 /* Not the requested table */
81
82 continue;
83 }
84
85 /* Table with matching signature has been found */
86
87 if (!acpi_gbl_root_table_list.tables[i].pointer) {
88
89 /* Table is not currently mapped, map it */
90
91 status =
92 acpi_tb_verify_table(&acpi_gbl_root_table_list.
93 tables[i]);
94 if (ACPI_FAILURE(status)) {
95 return_ACPI_STATUS(status);
96 }
97
98 if (!acpi_gbl_root_table_list.tables[i].pointer) {
99 continue;
100 }
101 }
102
103 /* Check for table match on all IDs */
104
105 if (!ACPI_MEMCMP
106 (acpi_gbl_root_table_list.tables[i].pointer->signature,
107 signature, ACPI_NAME_SIZE) && (!oem_id[0]
108 ||
109 !ACPI_MEMCMP
110 (acpi_gbl_root_table_list.
111 tables[i].pointer->oem_id,
112 oem_id, ACPI_OEM_ID_SIZE))
113 && (!oem_table_id[0]
114 || !ACPI_MEMCMP(acpi_gbl_root_table_list.tables[i].
115 pointer->oem_table_id, oem_table_id,
116 ACPI_OEM_TABLE_ID_SIZE))) {
117 *table_index = i;
118
119 ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
120 "Found table [%4.4s]\n", signature));
121 return_ACPI_STATUS(AE_OK);
122 }
123 }
124
125 return_ACPI_STATUS(AE_NOT_FOUND);
126}
diff --git a/drivers/acpi/tables/tbget.c b/drivers/acpi/tables/tbget.c
deleted file mode 100644
index 11e2d4454e05..000000000000
--- a/drivers/acpi/tables/tbget.c
+++ /dev/null
@@ -1,471 +0,0 @@
1/******************************************************************************
2 *
3 * Module Name: tbget - ACPI Table get* routines
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include <acpi/actables.h>
46
47#define _COMPONENT ACPI_TABLES
48ACPI_MODULE_NAME("tbget")
49
50/* Local prototypes */
51static acpi_status
52acpi_tb_get_this_table(struct acpi_pointer *address,
53 struct acpi_table_header *header,
54 struct acpi_table_desc *table_info);
55
56static acpi_status
57acpi_tb_table_override(struct acpi_table_header *header,
58 struct acpi_table_desc *table_info);
59
60/*******************************************************************************
61 *
62 * FUNCTION: acpi_tb_get_table
63 *
64 * PARAMETERS: Address - Address of table to retrieve. Can be
65 * Logical or Physical
66 * table_info - Where table info is returned
67 *
68 * RETURN: None
69 *
70 * DESCRIPTION: Get entire table of unknown size.
71 *
72 ******************************************************************************/
73
74acpi_status
75acpi_tb_get_table(struct acpi_pointer *address,
76 struct acpi_table_desc *table_info)
77{
78 acpi_status status;
79 struct acpi_table_header header;
80
81 ACPI_FUNCTION_TRACE(tb_get_table);
82
83 /* Get the header in order to get signature and table size */
84
85 status = acpi_tb_get_table_header(address, &header);
86 if (ACPI_FAILURE(status)) {
87 return_ACPI_STATUS(status);
88 }
89
90 /* Get the entire table */
91
92 status = acpi_tb_get_table_body(address, &header, table_info);
93 if (ACPI_FAILURE(status)) {
94 ACPI_EXCEPTION((AE_INFO, status,
95 "Could not get ACPI table (size %X)",
96 header.length));
97 return_ACPI_STATUS(status);
98 }
99
100 return_ACPI_STATUS(AE_OK);
101}
102
103/*******************************************************************************
104 *
105 * FUNCTION: acpi_tb_get_table_header
106 *
107 * PARAMETERS: Address - Address of table to retrieve. Can be
108 * Logical or Physical
109 * return_header - Where the table header is returned
110 *
111 * RETURN: Status
112 *
113 * DESCRIPTION: Get an ACPI table header. Works in both physical or virtual
114 * addressing mode. Works with both physical or logical pointers.
115 * Table is either copied or mapped, depending on the pointer
116 * type and mode of the processor.
117 *
118 ******************************************************************************/
119
120acpi_status
121acpi_tb_get_table_header(struct acpi_pointer *address,
122 struct acpi_table_header *return_header)
123{
124 acpi_status status = AE_OK;
125 struct acpi_table_header *header = NULL;
126
127 ACPI_FUNCTION_TRACE(tb_get_table_header);
128
129 /*
130 * Flags contains the current processor mode (Virtual or Physical
131 * addressing) The pointer_type is either Logical or Physical
132 */
133 switch (address->pointer_type) {
134 case ACPI_PHYSMODE_PHYSPTR:
135 case ACPI_LOGMODE_LOGPTR:
136
137 /* Pointer matches processor mode, copy the header */
138
139 ACPI_MEMCPY(return_header, address->pointer.logical,
140 sizeof(struct acpi_table_header));
141 break;
142
143 case ACPI_LOGMODE_PHYSPTR:
144
145 /* Create a logical address for the physical pointer */
146
147 status = acpi_os_map_memory(address->pointer.physical,
148 sizeof(struct acpi_table_header),
149 (void *)&header);
150 if (ACPI_FAILURE(status)) {
151 ACPI_ERROR((AE_INFO,
152 "Could not map memory at %8.8X%8.8X for table header",
153 ACPI_FORMAT_UINT64(address->pointer.
154 physical)));
155 return_ACPI_STATUS(status);
156 }
157
158 /* Copy header and delete mapping */
159
160 ACPI_MEMCPY(return_header, header,
161 sizeof(struct acpi_table_header));
162 acpi_os_unmap_memory(header, sizeof(struct acpi_table_header));
163 break;
164
165 default:
166
167 ACPI_ERROR((AE_INFO, "Invalid address flags %X",
168 address->pointer_type));
169 return_ACPI_STATUS(AE_BAD_PARAMETER);
170 }
171
172 ACPI_DEBUG_PRINT((ACPI_DB_TABLES, "Table Signature: [%4.4s]\n",
173 return_header->signature));
174
175 return_ACPI_STATUS(AE_OK);
176}
177
178/*******************************************************************************
179 *
180 * FUNCTION: acpi_tb_get_table_body
181 *
182 * PARAMETERS: Address - Address of table to retrieve. Can be
183 * Logical or Physical
184 * Header - Header of the table to retrieve
185 * table_info - Where the table info is returned
186 *
187 * RETURN: Status
188 *
189 * DESCRIPTION: Get an entire ACPI table with support to allow the host OS to
190 * replace the table with a newer version (table override.)
191 * Works in both physical or virtual
192 * addressing mode. Works with both physical or logical pointers.
193 * Table is either copied or mapped, depending on the pointer
194 * type and mode of the processor.
195 *
196 ******************************************************************************/
197
198acpi_status
199acpi_tb_get_table_body(struct acpi_pointer *address,
200 struct acpi_table_header *header,
201 struct acpi_table_desc *table_info)
202{
203 acpi_status status;
204
205 ACPI_FUNCTION_TRACE(tb_get_table_body);
206
207 if (!table_info || !address) {
208 return_ACPI_STATUS(AE_BAD_PARAMETER);
209 }
210
211 /* Attempt table override. */
212
213 status = acpi_tb_table_override(header, table_info);
214 if (ACPI_SUCCESS(status)) {
215
216 /* Table was overridden by the host OS */
217
218 return_ACPI_STATUS(status);
219 }
220
221 /* No override, get the original table */
222
223 status = acpi_tb_get_this_table(address, header, table_info);
224 return_ACPI_STATUS(status);
225}
226
227/*******************************************************************************
228 *
229 * FUNCTION: acpi_tb_table_override
230 *
231 * PARAMETERS: Header - Pointer to table header
232 * table_info - Return info if table is overridden
233 *
234 * RETURN: None
235 *
236 * DESCRIPTION: Attempts override of current table with a new one if provided
237 * by the host OS.
238 *
239 ******************************************************************************/
240
241static acpi_status
242acpi_tb_table_override(struct acpi_table_header *header,
243 struct acpi_table_desc *table_info)
244{
245 struct acpi_table_header *new_table;
246 acpi_status status;
247 struct acpi_pointer address;
248
249 ACPI_FUNCTION_TRACE(tb_table_override);
250
251 /*
252 * The OSL will examine the header and decide whether to override this
253 * table. If it decides to override, a table will be returned in new_table,
254 * which we will then copy.
255 */
256 status = acpi_os_table_override(header, &new_table);
257 if (ACPI_FAILURE(status)) {
258
259 /* Some severe error from the OSL, but we basically ignore it */
260
261 ACPI_EXCEPTION((AE_INFO, status,
262 "Could not override ACPI table"));
263 return_ACPI_STATUS(status);
264 }
265
266 if (!new_table) {
267
268 /* No table override */
269
270 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
271 }
272
273 /*
274 * We have a new table to override the old one. Get a copy of
275 * the new one. We know that the new table has a logical pointer.
276 */
277 address.pointer_type = ACPI_LOGICAL_POINTER | ACPI_LOGICAL_ADDRESSING;
278 address.pointer.logical = new_table;
279
280 status = acpi_tb_get_this_table(&address, new_table, table_info);
281 if (ACPI_FAILURE(status)) {
282 ACPI_EXCEPTION((AE_INFO, status, "Could not copy ACPI table"));
283 return_ACPI_STATUS(status);
284 }
285
286 /* Copy the table info */
287
288 ACPI_INFO((AE_INFO, "Table [%4.4s] replaced by host OS",
289 table_info->pointer->signature));
290
291 return_ACPI_STATUS(AE_OK);
292}
293
294/*******************************************************************************
295 *
296 * FUNCTION: acpi_tb_get_this_table
297 *
298 * PARAMETERS: Address - Address of table to retrieve. Can be
299 * Logical or Physical
300 * Header - Header of the table to retrieve
301 * table_info - Where the table info is returned
302 *
303 * RETURN: Status
304 *
305 * DESCRIPTION: Get an entire ACPI table. Works in both physical or virtual
306 * addressing mode. Works with both physical or logical pointers.
307 * Table is either copied or mapped, depending on the pointer
308 * type and mode of the processor.
309 *
310 ******************************************************************************/
311
312static acpi_status
313acpi_tb_get_this_table(struct acpi_pointer *address,
314 struct acpi_table_header *header,
315 struct acpi_table_desc *table_info)
316{
317 struct acpi_table_header *full_table = NULL;
318 u8 allocation;
319 acpi_status status = AE_OK;
320
321 ACPI_FUNCTION_TRACE(tb_get_this_table);
322
323 /* Validate minimum length */
324
325 if (header->length < sizeof(struct acpi_table_header)) {
326 ACPI_ERROR((AE_INFO,
327 "Table length (%X) is smaller than minimum (%zX)",
328 header->length, sizeof(struct acpi_table_header)));
329
330 return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
331 }
332
333 /*
334 * Flags contains the current processor mode (Virtual or Physical
335 * addressing) The pointer_type is either Logical or Physical
336 */
337 switch (address->pointer_type) {
338 case ACPI_PHYSMODE_PHYSPTR:
339 case ACPI_LOGMODE_LOGPTR:
340
341 /* Pointer matches processor mode, copy the table to a new buffer */
342
343 full_table = ACPI_ALLOCATE(header->length);
344 if (!full_table) {
345 ACPI_ERROR((AE_INFO,
346 "Could not allocate table memory for [%4.4s] length %X",
347 header->signature, header->length));
348 return_ACPI_STATUS(AE_NO_MEMORY);
349 }
350
351 /* Copy the entire table (including header) to the local buffer */
352
353 ACPI_MEMCPY(full_table, address->pointer.logical,
354 header->length);
355
356 /* Save allocation type */
357
358 allocation = ACPI_MEM_ALLOCATED;
359 break;
360
361 case ACPI_LOGMODE_PHYSPTR:
362
363 /*
364 * Just map the table's physical memory
365 * into our address space.
366 */
367 status = acpi_os_map_memory(address->pointer.physical,
368 (acpi_size) header->length,
369 ACPI_CAST_PTR(void, &full_table));
370 if (ACPI_FAILURE(status)) {
371 ACPI_ERROR((AE_INFO,
372 "Could not map memory for table [%4.4s] at %8.8X%8.8X for length %X",
373 header->signature,
374 ACPI_FORMAT_UINT64(address->pointer.
375 physical),
376 header->length));
377 return (status);
378 }
379
380 /* Save allocation type */
381
382 allocation = ACPI_MEM_MAPPED;
383 break;
384
385 default:
386
387 ACPI_ERROR((AE_INFO, "Invalid address flags %X",
388 address->pointer_type));
389 return_ACPI_STATUS(AE_BAD_PARAMETER);
390 }
391
392 /*
393 * Validate checksum for _most_ tables,
394 * even the ones whose signature we don't recognize
395 */
396 if (table_info->type != ACPI_TABLE_ID_FACS) {
397 status = acpi_tb_verify_table_checksum(full_table);
398
399#if (!ACPI_CHECKSUM_ABORT)
400 if (ACPI_FAILURE(status)) {
401
402 /* Ignore the error if configuration says so */
403
404 status = AE_OK;
405 }
406#endif
407 }
408
409 /* Return values */
410
411 table_info->pointer = full_table;
412 table_info->length = (acpi_size) header->length;
413 table_info->allocation = allocation;
414
415 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
416 "Found table [%4.4s] at %8.8X%8.8X, mapped/copied to %p\n",
417 full_table->signature,
418 ACPI_FORMAT_UINT64(address->pointer.physical),
419 full_table));
420
421 return_ACPI_STATUS(status);
422}
423
424/*******************************************************************************
425 *
426 * FUNCTION: acpi_tb_get_table_ptr
427 *
428 * PARAMETERS: table_type - one of the defined table types
429 * Instance - Which table of this type
430 * return_table - pointer to location to place the pointer for
431 * return
432 *
433 * RETURN: Status
434 *
435 * DESCRIPTION: This function is called to get the pointer to an ACPI table.
436 *
437 ******************************************************************************/
438
439acpi_status
440acpi_tb_get_table_ptr(acpi_table_type table_type,
441 u32 instance, struct acpi_table_header **return_table)
442{
443 struct acpi_table_desc *table_desc;
444 u32 i;
445
446 ACPI_FUNCTION_TRACE(tb_get_table_ptr);
447
448 if (table_type > ACPI_TABLE_ID_MAX) {
449 return_ACPI_STATUS(AE_BAD_PARAMETER);
450 }
451
452 /* Check for instance out of range of the current table count */
453
454 if (instance > acpi_gbl_table_lists[table_type].count) {
455 return_ACPI_STATUS(AE_NOT_EXIST);
456 }
457
458 /*
459 * Walk the list to get the desired table
460 * Note: Instance is one-based
461 */
462 table_desc = acpi_gbl_table_lists[table_type].next;
463 for (i = 1; i < instance; i++) {
464 table_desc = table_desc->next;
465 }
466
467 /* We are now pointing to the requested table's descriptor */
468
469 *return_table = table_desc->pointer;
470 return_ACPI_STATUS(AE_OK);
471}
diff --git a/drivers/acpi/tables/tbgetall.c b/drivers/acpi/tables/tbgetall.c
deleted file mode 100644
index ad982112e4c6..000000000000
--- a/drivers/acpi/tables/tbgetall.c
+++ /dev/null
@@ -1,311 +0,0 @@
1/******************************************************************************
2 *
3 * Module Name: tbgetall - Get all required ACPI tables
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include <acpi/actables.h>
46
47#define _COMPONENT ACPI_TABLES
48ACPI_MODULE_NAME("tbgetall")
49
50/* Local prototypes */
51static acpi_status
52acpi_tb_get_primary_table(struct acpi_pointer *address,
53 struct acpi_table_desc *table_info);
54
55static acpi_status
56acpi_tb_get_secondary_table(struct acpi_pointer *address,
57 acpi_string signature,
58 struct acpi_table_desc *table_info);
59
60/*******************************************************************************
61 *
62 * FUNCTION: acpi_tb_get_primary_table
63 *
64 * PARAMETERS: Address - Physical address of table to retrieve
65 * *table_info - Where the table info is returned
66 *
67 * RETURN: Status
68 *
69 * DESCRIPTION: Maps the physical address of table into a logical address
70 *
71 ******************************************************************************/
72
73static acpi_status
74acpi_tb_get_primary_table(struct acpi_pointer *address,
75 struct acpi_table_desc *table_info)
76{
77 acpi_status status;
78 struct acpi_table_header header;
79
80 ACPI_FUNCTION_TRACE(tb_get_primary_table);
81
82 /* Ignore a NULL address in the RSDT */
83
84 if (!address->pointer.value) {
85 return_ACPI_STATUS(AE_OK);
86 }
87
88 /* Get the header in order to get signature and table size */
89
90 status = acpi_tb_get_table_header(address, &header);
91 if (ACPI_FAILURE(status)) {
92 return_ACPI_STATUS(status);
93 }
94
95 /* Clear the table_info */
96
97 ACPI_MEMSET(table_info, 0, sizeof(struct acpi_table_desc));
98
99 /*
100 * Check the table signature and make sure it is recognized.
101 * Also checks the header checksum
102 */
103 table_info->pointer = &header;
104 status = acpi_tb_recognize_table(table_info, ACPI_TABLE_PRIMARY);
105 if (ACPI_FAILURE(status)) {
106 return_ACPI_STATUS(status);
107 }
108
109 /* Get the entire table */
110
111 status = acpi_tb_get_table_body(address, &header, table_info);
112 if (ACPI_FAILURE(status)) {
113 return_ACPI_STATUS(status);
114 }
115
116 /* Install the table */
117
118 status = acpi_tb_install_table(table_info);
119 return_ACPI_STATUS(status);
120}
121
122/*******************************************************************************
123 *
124 * FUNCTION: acpi_tb_get_secondary_table
125 *
126 * PARAMETERS: Address - Physical address of table to retrieve
127 * *table_info - Where the table info is returned
128 *
129 * RETURN: Status
130 *
131 * DESCRIPTION: Maps the physical address of table into a logical address
132 *
133 ******************************************************************************/
134
135static acpi_status
136acpi_tb_get_secondary_table(struct acpi_pointer *address,
137 acpi_string signature,
138 struct acpi_table_desc *table_info)
139{
140 acpi_status status;
141 struct acpi_table_header header;
142
143 ACPI_FUNCTION_TRACE_STR(tb_get_secondary_table, signature);
144
145 /* Get the header in order to match the signature */
146
147 status = acpi_tb_get_table_header(address, &header);
148 if (ACPI_FAILURE(status)) {
149 return_ACPI_STATUS(status);
150 }
151
152 /* Signature must match request */
153
154 if (!ACPI_COMPARE_NAME(header.signature, signature)) {
155 ACPI_ERROR((AE_INFO,
156 "Incorrect table signature - wanted [%s] found [%4.4s]",
157 signature, header.signature));
158 return_ACPI_STATUS(AE_BAD_SIGNATURE);
159 }
160
161 /*
162 * Check the table signature and make sure it is recognized.
163 * Also checks the header checksum
164 */
165 table_info->pointer = &header;
166 status = acpi_tb_recognize_table(table_info, ACPI_TABLE_SECONDARY);
167 if (ACPI_FAILURE(status)) {
168 return_ACPI_STATUS(status);
169 }
170
171 /* Get the entire table */
172
173 status = acpi_tb_get_table_body(address, &header, table_info);
174 if (ACPI_FAILURE(status)) {
175 return_ACPI_STATUS(status);
176 }
177
178 /* Install the table */
179
180 status = acpi_tb_install_table(table_info);
181 return_ACPI_STATUS(status);
182}
183
184/*******************************************************************************
185 *
186 * FUNCTION: acpi_tb_get_required_tables
187 *
188 * PARAMETERS: None
189 *
190 * RETURN: Status
191 *
192 * DESCRIPTION: Load and validate tables other than the RSDT. The RSDT must
193 * already be loaded and validated.
194 *
195 * Get the minimum set of ACPI tables, namely:
196 *
197 * 1) FADT (via RSDT in loop below)
198 * 2) FACS (via FADT)
199 * 3) DSDT (via FADT)
200 *
201 ******************************************************************************/
202
203acpi_status acpi_tb_get_required_tables(void)
204{
205 acpi_status status = AE_OK;
206 u32 i;
207 struct acpi_table_desc table_info;
208 struct acpi_pointer address;
209
210 ACPI_FUNCTION_TRACE(tb_get_required_tables);
211
212 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "%d ACPI tables in RSDT\n",
213 acpi_gbl_rsdt_table_count));
214
215 address.pointer_type = acpi_gbl_table_flags | ACPI_LOGICAL_ADDRESSING;
216
217 /*
218 * Loop through all table pointers found in RSDT.
219 * This will NOT include the FACS and DSDT - we must get
220 * them after the loop.
221 *
222 * The only tables we are interested in getting here is the FADT and
223 * any SSDTs.
224 */
225 for (i = 0; i < acpi_gbl_rsdt_table_count; i++) {
226
227 /* Get the table address from the common internal XSDT */
228
229 address.pointer.value = acpi_gbl_XSDT->table_offset_entry[i];
230
231 /*
232 * Get the tables needed by this subsystem (FADT and any SSDTs).
233 * NOTE: All other tables are completely ignored at this time.
234 */
235 status = acpi_tb_get_primary_table(&address, &table_info);
236 if ((status != AE_OK) && (status != AE_TABLE_NOT_SUPPORTED)) {
237 ACPI_WARNING((AE_INFO,
238 "%s, while getting table at %8.8X%8.8X",
239 acpi_format_exception(status),
240 ACPI_FORMAT_UINT64(address.pointer.
241 value)));
242 }
243 }
244
245 /* We must have a FADT to continue */
246
247 if (!acpi_gbl_FADT) {
248 ACPI_ERROR((AE_INFO, "No FADT present in RSDT/XSDT"));
249 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
250 }
251
252 /*
253 * Convert the FADT to a common format. This allows earlier revisions of
254 * the table to coexist with newer versions, using common access code.
255 */
256 status = acpi_tb_convert_table_fadt();
257 if (ACPI_FAILURE(status)) {
258 ACPI_ERROR((AE_INFO,
259 "Could not convert FADT to internal common format"));
260 return_ACPI_STATUS(status);
261 }
262
263 /* Get the FACS (Pointed to by the FADT) */
264
265 address.pointer.value = acpi_gbl_FADT->xfirmware_ctrl;
266
267 status = acpi_tb_get_secondary_table(&address, FACS_SIG, &table_info);
268 if (ACPI_FAILURE(status)) {
269 ACPI_EXCEPTION((AE_INFO, status,
270 "Could not get/install the FACS"));
271 return_ACPI_STATUS(status);
272 }
273
274 /*
275 * Create the common FACS pointer table
276 * (Contains pointers to the original table)
277 */
278 status = acpi_tb_build_common_facs(&table_info);
279 if (ACPI_FAILURE(status)) {
280 return_ACPI_STATUS(status);
281 }
282
283 /* Get/install the DSDT (Pointed to by the FADT) */
284
285 address.pointer.value = acpi_gbl_FADT->Xdsdt;
286
287 status = acpi_tb_get_secondary_table(&address, DSDT_SIG, &table_info);
288 if (ACPI_FAILURE(status)) {
289 ACPI_ERROR((AE_INFO, "Could not get/install the DSDT"));
290 return_ACPI_STATUS(status);
291 }
292
293 /* Set Integer Width (32/64) based upon DSDT revision */
294
295 acpi_ut_set_integer_width(acpi_gbl_DSDT->revision);
296
297 /* Dump the entire DSDT */
298
299 ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
300 "Hex dump of entire DSDT, size %d (0x%X), Integer width = %d\n",
301 acpi_gbl_DSDT->length, acpi_gbl_DSDT->length,
302 acpi_gbl_integer_bit_width));
303
304 ACPI_DUMP_BUFFER(ACPI_CAST_PTR(u8, acpi_gbl_DSDT),
305 acpi_gbl_DSDT->length);
306
307 /* Always delete the RSDP mapping, we are done with it */
308
309 acpi_tb_delete_tables_by_type(ACPI_TABLE_ID_RSDP);
310 return_ACPI_STATUS(status);
311}
diff --git a/drivers/acpi/tables/tbinstal.c b/drivers/acpi/tables/tbinstal.c
index 1668a232fb67..0e7b121a99ce 100644
--- a/drivers/acpi/tables/tbinstal.c
+++ b/drivers/acpi/tables/tbinstal.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -42,510 +42,498 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acnamesp.h>
45#include <acpi/actables.h> 46#include <acpi/actables.h>
46 47
47#define _COMPONENT ACPI_TABLES 48#define _COMPONENT ACPI_TABLES
48ACPI_MODULE_NAME("tbinstal") 49ACPI_MODULE_NAME("tbinstal")
49 50
50/* Local prototypes */ 51/******************************************************************************
51static acpi_status
52acpi_tb_match_signature(char *signature,
53 struct acpi_table_desc *table_info, u8 search_type);
54
55/*******************************************************************************
56 * 52 *
57 * FUNCTION: acpi_tb_match_signature 53 * FUNCTION: acpi_tb_verify_table
58 * 54 *
59 * PARAMETERS: Signature - Table signature to match 55 * PARAMETERS: table_desc - table
60 * table_info - Return data
61 * search_type - Table type to match (primary/secondary)
62 * 56 *
63 * RETURN: Status 57 * RETURN: Status
64 * 58 *
65 * DESCRIPTION: Compare signature against the list of "ACPI-subsystem-owned" 59 * DESCRIPTION: this function is called to verify and map table
66 * tables (DSDT/FADT/SSDT, etc.) Returns the table_type_iD on match.
67 * 60 *
68 ******************************************************************************/ 61 *****************************************************************************/
69 62acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc)
70static acpi_status
71acpi_tb_match_signature(char *signature,
72 struct acpi_table_desc *table_info, u8 search_type)
73{ 63{
74 acpi_native_uint i; 64 acpi_status status = AE_OK;
75 65
76 ACPI_FUNCTION_TRACE(tb_match_signature); 66 ACPI_FUNCTION_TRACE(tb_verify_table);
77 67
78 /* Search for a signature match among the known table types */ 68 /* Map the table if necessary */
79 69
80 for (i = 0; i < (ACPI_TABLE_ID_MAX + 1); i++) { 70 if (!table_desc->pointer) {
81 if (!(acpi_gbl_table_data[i].flags & search_type)) { 71 if ((table_desc->flags & ACPI_TABLE_ORIGIN_MASK) ==
82 continue; 72 ACPI_TABLE_ORIGIN_MAPPED) {
73 table_desc->pointer =
74 acpi_os_map_memory(table_desc->address,
75 table_desc->length);
83 } 76 }
77 if (!table_desc->pointer) {
78 return_ACPI_STATUS(AE_NO_MEMORY);
79 }
80 }
84 81
85 if (!ACPI_STRNCMP(signature, acpi_gbl_table_data[i].signature, 82 /* FACS is the odd table, has no standard ACPI header and no checksum */
86 acpi_gbl_table_data[i].sig_length)) {
87
88 /* Found a signature match, return index if requested */
89 83
90 if (table_info) { 84 if (!ACPI_COMPARE_NAME(&table_desc->signature, ACPI_SIG_FACS)) {
91 table_info->type = (u8) i;
92 }
93 85
94 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 86 /* Always calculate checksum, ignore bad checksum if requested */
95 "Table [%4.4s] is an ACPI table consumed by the core subsystem\n",
96 (char *)acpi_gbl_table_data[i].
97 signature));
98 87
99 return_ACPI_STATUS(AE_OK); 88 status =
100 } 89 acpi_tb_verify_checksum(table_desc->pointer,
90 table_desc->length);
101 } 91 }
102 92
103 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 93 return_ACPI_STATUS(status);
104 "Table [%4.4s] is not an ACPI table consumed by the core subsystem - ignored\n",
105 (char *)signature));
106
107 return_ACPI_STATUS(AE_TABLE_NOT_SUPPORTED);
108} 94}
109 95
110/******************************************************************************* 96/*******************************************************************************
111 * 97 *
112 * FUNCTION: acpi_tb_install_table 98 * FUNCTION: acpi_tb_add_table
113 * 99 *
114 * PARAMETERS: table_info - Return value from acpi_tb_get_table_body 100 * PARAMETERS: table_desc - Table descriptor
101 * table_index - Where the table index is returned
115 * 102 *
116 * RETURN: Status 103 * RETURN: Status
117 * 104 *
118 * DESCRIPTION: Install the table into the global data structures. 105 * DESCRIPTION: This function is called to add the ACPI table
119 * 106 *
120 ******************************************************************************/ 107 ******************************************************************************/
121 108
122acpi_status acpi_tb_install_table(struct acpi_table_desc *table_info) 109acpi_status
110acpi_tb_add_table(struct acpi_table_desc *table_desc,
111 acpi_native_uint * table_index)
123{ 112{
124 acpi_status status; 113 acpi_native_uint i;
125 114 acpi_native_uint length;
126 ACPI_FUNCTION_TRACE(tb_install_table); 115 acpi_status status = AE_OK;
127 116
128 /* Lock tables while installing */ 117 ACPI_FUNCTION_TRACE(tb_add_table);
129 118
130 status = acpi_ut_acquire_mutex(ACPI_MTX_TABLES); 119 if (!table_desc->pointer) {
131 if (ACPI_FAILURE(status)) { 120 status = acpi_tb_verify_table(table_desc);
132 ACPI_EXCEPTION((AE_INFO, status, 121 if (ACPI_FAILURE(status) || !table_desc->pointer) {
133 "Could not acquire table mutex")); 122 return_ACPI_STATUS(status);
134 return_ACPI_STATUS(status); 123 }
135 } 124 }
136 125
137 /* 126 /* The table must be either an SSDT or a PSDT */
138 * Ignore a table that is already installed. For example, some BIOS 127
139 * ASL code will repeatedly attempt to load the same SSDT. 128 if ((!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_PSDT))
140 */ 129 &&
141 status = acpi_tb_is_table_installed(table_info); 130 (!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_SSDT)))
142 if (ACPI_FAILURE(status)) { 131 {
143 goto unlock_and_exit; 132 ACPI_ERROR((AE_INFO,
133 "Table has invalid signature [%4.4s], must be SSDT or PSDT",
134 table_desc->pointer->signature));
135 return_ACPI_STATUS(AE_BAD_SIGNATURE);
144 } 136 }
145 137
146 /* Install the table into the global data structure */ 138 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
139
140 /* Check if table is already registered */
141
142 for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
143 if (!acpi_gbl_root_table_list.tables[i].pointer) {
144 status =
145 acpi_tb_verify_table(&acpi_gbl_root_table_list.
146 tables[i]);
147 if (ACPI_FAILURE(status)
148 || !acpi_gbl_root_table_list.tables[i].pointer) {
149 continue;
150 }
151 }
152
153 length = ACPI_MIN(table_desc->length,
154 acpi_gbl_root_table_list.tables[i].length);
155 if (ACPI_MEMCMP(table_desc->pointer,
156 acpi_gbl_root_table_list.tables[i].pointer,
157 length)) {
158 continue;
159 }
160
161 /* Table is already registered */
162
163 acpi_tb_delete_table(table_desc);
164 *table_index = i;
165 goto release;
166 }
147 167
148 status = acpi_tb_init_table_descriptor(table_info->type, table_info); 168 /*
169 * Add the table to the global table list
170 */
171 status = acpi_tb_store_table(table_desc->address, table_desc->pointer,
172 table_desc->length, table_desc->flags,
173 table_index);
149 if (ACPI_FAILURE(status)) { 174 if (ACPI_FAILURE(status)) {
150 ACPI_EXCEPTION((AE_INFO, status, 175 goto release;
151 "Could not install table [%4.4s]",
152 table_info->pointer->signature));
153 } 176 }
154 177
155 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "%s located at %p\n", 178 acpi_tb_print_table_header(table_desc->address, table_desc->pointer);
156 acpi_gbl_table_data[table_info->type].name,
157 table_info->pointer));
158 179
159 unlock_and_exit: 180 release:
160 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); 181 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
161 return_ACPI_STATUS(status); 182 return_ACPI_STATUS(status);
162} 183}
163 184
164/******************************************************************************* 185/*******************************************************************************
165 * 186 *
166 * FUNCTION: acpi_tb_recognize_table 187 * FUNCTION: acpi_tb_resize_root_table_list
167 * 188 *
168 * PARAMETERS: table_info - Return value from acpi_tb_get_table_body 189 * PARAMETERS: None
169 * search_type - Table type to match (primary/secondary)
170 * 190 *
171 * RETURN: Status 191 * RETURN: Status
172 * 192 *
173 * DESCRIPTION: Check a table signature for a match against known table types 193 * DESCRIPTION: Expand the size of global table array
174 *
175 * NOTE: All table pointers are validated as follows:
176 * 1) Table pointer must point to valid physical memory
177 * 2) Signature must be 4 ASCII chars, even if we don't recognize the
178 * name
179 * 3) Table must be readable for length specified in the header
180 * 4) Table checksum must be valid (with the exception of the FACS
181 * which has no checksum for some odd reason)
182 * 194 *
183 ******************************************************************************/ 195 ******************************************************************************/
184 196
185acpi_status 197acpi_status acpi_tb_resize_root_table_list(void)
186acpi_tb_recognize_table(struct acpi_table_desc *table_info, u8 search_type)
187{ 198{
188 struct acpi_table_header *table_header; 199 struct acpi_table_desc *tables;
189 acpi_status status;
190 200
191 ACPI_FUNCTION_TRACE(tb_recognize_table); 201 ACPI_FUNCTION_TRACE(tb_resize_root_table_list);
192 202
193 /* Ensure that we have a valid table pointer */ 203 /* allow_resize flag is a parameter to acpi_initialize_tables */
194 204
195 table_header = (struct acpi_table_header *)table_info->pointer; 205 if (!(acpi_gbl_root_table_list.flags & ACPI_ROOT_ALLOW_RESIZE)) {
196 if (!table_header) { 206 ACPI_ERROR((AE_INFO,
197 return_ACPI_STATUS(AE_BAD_PARAMETER); 207 "Resize of Root Table Array is not allowed"));
208 return_ACPI_STATUS(AE_SUPPORT);
198 } 209 }
199 210
200 /* 211 /* Increase the Table Array size */
201 * We only "recognize" a limited number of ACPI tables -- namely, the 212
202 * ones that are used by the subsystem (DSDT, FADT, etc.) 213 tables = ACPI_ALLOCATE_ZEROED((acpi_gbl_root_table_list.size +
203 * 214 ACPI_ROOT_TABLE_SIZE_INCREMENT)
204 * An AE_TABLE_NOT_SUPPORTED means that the table was not recognized. 215 * sizeof(struct acpi_table_desc));
205 * This can be any one of many valid ACPI tables, it just isn't one of 216 if (!tables) {
206 * the tables that is consumed by the core subsystem 217 ACPI_ERROR((AE_INFO,
207 */ 218 "Could not allocate new root table array"));
208 status = acpi_tb_match_signature(table_header->signature, 219 return_ACPI_STATUS(AE_NO_MEMORY);
209 table_info, search_type);
210 if (ACPI_FAILURE(status)) {
211 return_ACPI_STATUS(status);
212 } 220 }
213 221
214 status = acpi_tb_validate_table_header(table_header); 222 /* Copy and free the previous table array */
215 if (ACPI_FAILURE(status)) { 223
216 return_ACPI_STATUS(status); 224 if (acpi_gbl_root_table_list.tables) {
225 ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables,
226 acpi_gbl_root_table_list.size *
227 sizeof(struct acpi_table_desc));
228
229 if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
230 ACPI_FREE(acpi_gbl_root_table_list.tables);
231 }
217 } 232 }
218 233
219 /* Return the table type and length via the info struct */ 234 acpi_gbl_root_table_list.tables = tables;
235 acpi_gbl_root_table_list.size += ACPI_ROOT_TABLE_SIZE_INCREMENT;
236 acpi_gbl_root_table_list.flags |= (u8) ACPI_ROOT_ORIGIN_ALLOCATED;
220 237
221 table_info->length = (acpi_size) table_header->length; 238 return_ACPI_STATUS(AE_OK);
222 return_ACPI_STATUS(status);
223} 239}
224 240
225/******************************************************************************* 241/*******************************************************************************
226 * 242 *
227 * FUNCTION: acpi_tb_init_table_descriptor 243 * FUNCTION: acpi_tb_store_table
228 * 244 *
229 * PARAMETERS: table_type - The type of the table 245 * PARAMETERS: Address - Table address
230 * table_info - A table info struct 246 * Table - Table header
247 * Length - Table length
248 * Flags - flags
231 * 249 *
232 * RETURN: None. 250 * RETURN: Status and table index.
233 * 251 *
234 * DESCRIPTION: Install a table into the global data structs. 252 * DESCRIPTION: Add an ACPI table to the global table list
235 * 253 *
236 ******************************************************************************/ 254 ******************************************************************************/
237 255
238acpi_status 256acpi_status
239acpi_tb_init_table_descriptor(acpi_table_type table_type, 257acpi_tb_store_table(acpi_physical_address address,
240 struct acpi_table_desc *table_info) 258 struct acpi_table_header *table,
259 u32 length, u8 flags, acpi_native_uint * table_index)
241{ 260{
242 struct acpi_table_list *list_head; 261 acpi_status status = AE_OK;
243 struct acpi_table_desc *table_desc;
244 acpi_status status;
245
246 ACPI_FUNCTION_TRACE_U32(tb_init_table_descriptor, table_type);
247 262
248 /* Allocate a descriptor for this table */ 263 /* Ensure that there is room for the table in the Root Table List */
249 264
250 table_desc = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_table_desc)); 265 if (acpi_gbl_root_table_list.count >= acpi_gbl_root_table_list.size) {
251 if (!table_desc) { 266 status = acpi_tb_resize_root_table_list();
252 return_ACPI_STATUS(AE_NO_MEMORY); 267 if (ACPI_FAILURE(status)) {
268 return (status);
269 }
253 } 270 }
254 271
255 /* Get a new owner ID for the table */ 272 /* Initialize added table */
273
274 acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
275 address = address;
276 acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
277 pointer = table;
278 acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].length =
279 length;
280 acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
281 owner_id = 0;
282 acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].flags =
283 flags;
284
285 ACPI_MOVE_32_TO_32(&
286 (acpi_gbl_root_table_list.
287 tables[acpi_gbl_root_table_list.count].signature),
288 table->signature);
289
290 *table_index = acpi_gbl_root_table_list.count;
291 acpi_gbl_root_table_list.count++;
292 return (status);
293}
256 294
257 status = acpi_ut_allocate_owner_id(&table_desc->owner_id); 295/*******************************************************************************
258 if (ACPI_FAILURE(status)) { 296 *
259 goto error_exit1; 297 * FUNCTION: acpi_tb_delete_table
260 } 298 *
299 * PARAMETERS: table_index - Table index
300 *
301 * RETURN: None
302 *
303 * DESCRIPTION: Delete one internal ACPI table
304 *
305 ******************************************************************************/
261 306
262 /* Install the table into the global data structure */ 307void acpi_tb_delete_table(struct acpi_table_desc *table_desc)
308{
309 /* Table must be mapped or allocated */
310 if (!table_desc->pointer) {
311 return;
312 }
313 switch (table_desc->flags & ACPI_TABLE_ORIGIN_MASK) {
314 case ACPI_TABLE_ORIGIN_MAPPED:
315 acpi_os_unmap_memory(table_desc->pointer, table_desc->length);
316 break;
317 case ACPI_TABLE_ORIGIN_ALLOCATED:
318 ACPI_FREE(table_desc->pointer);
319 break;
320 default:;
321 }
263 322
264 list_head = &acpi_gbl_table_lists[table_type]; 323 table_desc->pointer = NULL;
324}
265 325
266 /* 326/*******************************************************************************
267 * Two major types of tables: 1) Only one instance is allowed. This 327 *
268 * includes most ACPI tables such as the DSDT. 2) Multiple instances of 328 * FUNCTION: acpi_tb_terminate
269 * the table are allowed. This includes SSDT and PSDTs. 329 *
270 */ 330 * PARAMETERS: None
271 if (ACPI_IS_SINGLE_TABLE(acpi_gbl_table_data[table_type].flags)) { 331 *
272 /* 332 * RETURN: None
273 * Only one table allowed, and a table has alread been installed 333 *
274 * at this location, so return an error. 334 * DESCRIPTION: Delete all internal ACPI tables
275 */ 335 *
276 if (list_head->next) { 336 ******************************************************************************/
277 status = AE_ALREADY_EXISTS;
278 goto error_exit2;
279 }
280 337
281 table_desc->next = list_head->next; 338void acpi_tb_terminate(void)
282 list_head->next = table_desc; 339{
340 acpi_native_uint i;
283 341
284 if (table_desc->next) { 342 ACPI_FUNCTION_TRACE(tb_terminate);
285 table_desc->next->prev = table_desc;
286 }
287 343
288 list_head->count++; 344 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
289 } else {
290 /*
291 * Link the new table in to the list of tables of this type.
292 * Insert at the end of the list, order IS IMPORTANT.
293 *
294 * table_desc->Prev & Next are already NULL from calloc()
295 */
296 list_head->count++;
297
298 if (!list_head->next) {
299 list_head->next = table_desc;
300 } else {
301 table_desc->next = list_head->next;
302 345
303 while (table_desc->next->next) { 346 /* Delete the individual tables */
304 table_desc->next = table_desc->next->next;
305 }
306 347
307 table_desc->next->next = table_desc; 348 for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
308 table_desc->prev = table_desc->next; 349 acpi_tb_delete_table(&acpi_gbl_root_table_list.tables[i]);
309 table_desc->next = NULL;
310 }
311 } 350 }
312 351
313 /* Finish initialization of the table descriptor */
314
315 table_desc->loaded_into_namespace = FALSE;
316 table_desc->type = (u8) table_type;
317 table_desc->pointer = table_info->pointer;
318 table_desc->length = table_info->length;
319 table_desc->allocation = table_info->allocation;
320 table_desc->aml_start = (u8 *) (table_desc->pointer + 1),
321 table_desc->aml_length = (u32)
322 (table_desc->length - (u32) sizeof(struct acpi_table_header));
323
324 /* 352 /*
325 * Set the appropriate global pointer (if there is one) to point to the 353 * Delete the root table array if allocated locally. Array cannot be
326 * newly installed table 354 * mapped, so we don't need to check for that flag.
327 */ 355 */
328 if (acpi_gbl_table_data[table_type].global_ptr) { 356 if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
329 *(acpi_gbl_table_data[table_type].global_ptr) = 357 ACPI_FREE(acpi_gbl_root_table_list.tables);
330 table_info->pointer;
331 } 358 }
332 359
333 /* Return Data */ 360 acpi_gbl_root_table_list.tables = NULL;
334 361 acpi_gbl_root_table_list.flags = 0;
335 table_info->owner_id = table_desc->owner_id; 362 acpi_gbl_root_table_list.count = 0;
336 table_info->installed_desc = table_desc;
337 return_ACPI_STATUS(AE_OK);
338
339 /* Error exit with cleanup */
340
341 error_exit2:
342
343 acpi_ut_release_owner_id(&table_desc->owner_id);
344 363
345 error_exit1: 364 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n"));
346 365 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
347 ACPI_FREE(table_desc);
348 return_ACPI_STATUS(status);
349} 366}
350 367
351/******************************************************************************* 368/*******************************************************************************
352 * 369 *
353 * FUNCTION: acpi_tb_delete_all_tables 370 * FUNCTION: acpi_tb_delete_namespace_by_owner
354 * 371 *
355 * PARAMETERS: None. 372 * PARAMETERS: table_index - Table index
356 * 373 *
357 * RETURN: None. 374 * RETURN: None
358 * 375 *
359 * DESCRIPTION: Delete all internal ACPI tables 376 * DESCRIPTION: Delete all namespace objects created when this table was loaded.
360 * 377 *
361 ******************************************************************************/ 378 ******************************************************************************/
362 379
363void acpi_tb_delete_all_tables(void) 380void acpi_tb_delete_namespace_by_owner(acpi_native_uint table_index)
364{ 381{
365 acpi_table_type type; 382 acpi_owner_id owner_id;
366 383
367 /* 384 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
368 * Free memory allocated for ACPI tables 385 if (table_index < acpi_gbl_root_table_list.count) {
369 * Memory can either be mapped or allocated 386 owner_id =
370 */ 387 acpi_gbl_root_table_list.tables[table_index].owner_id;
371 for (type = 0; type < (ACPI_TABLE_ID_MAX + 1); type++) { 388 } else {
372 acpi_tb_delete_tables_by_type(type); 389 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
390 return;
373 } 391 }
392
393 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
394 acpi_ns_delete_namespace_by_owner(owner_id);
374} 395}
375 396
376/******************************************************************************* 397/*******************************************************************************
377 * 398 *
378 * FUNCTION: acpi_tb_delete_tables_by_type 399 * FUNCTION: acpi_tb_allocate_owner_id
379 * 400 *
380 * PARAMETERS: Type - The table type to be deleted 401 * PARAMETERS: table_index - Table index
381 * 402 *
382 * RETURN: None. 403 * RETURN: Status
383 * 404 *
384 * DESCRIPTION: Delete an internal ACPI table 405 * DESCRIPTION: Allocates owner_id in table_desc
385 * Locks the ACPI table mutex
386 * 406 *
387 ******************************************************************************/ 407 ******************************************************************************/
388 408
389void acpi_tb_delete_tables_by_type(acpi_table_type type) 409acpi_status acpi_tb_allocate_owner_id(acpi_native_uint table_index)
390{ 410{
391 struct acpi_table_desc *table_desc; 411 acpi_status status = AE_BAD_PARAMETER;
392 u32 count;
393 u32 i;
394 412
395 ACPI_FUNCTION_TRACE_U32(tb_delete_tables_by_type, type); 413 ACPI_FUNCTION_TRACE(tb_allocate_owner_id);
396 414
397 if (type > ACPI_TABLE_ID_MAX) { 415 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
398 return_VOID; 416 if (table_index < acpi_gbl_root_table_list.count) {
399 } 417 status = acpi_ut_allocate_owner_id
400 418 (&(acpi_gbl_root_table_list.tables[table_index].owner_id));
401 if (ACPI_FAILURE(acpi_ut_acquire_mutex(ACPI_MTX_TABLES))) {
402 return;
403 }
404
405 /* Clear the appropriate "typed" global table pointer */
406
407 switch (type) {
408 case ACPI_TABLE_ID_RSDP:
409 acpi_gbl_RSDP = NULL;
410 break;
411
412 case ACPI_TABLE_ID_DSDT:
413 acpi_gbl_DSDT = NULL;
414 break;
415
416 case ACPI_TABLE_ID_FADT:
417 acpi_gbl_FADT = NULL;
418 break;
419
420 case ACPI_TABLE_ID_FACS:
421 acpi_gbl_FACS = NULL;
422 break;
423
424 case ACPI_TABLE_ID_XSDT:
425 acpi_gbl_XSDT = NULL;
426 break;
427
428 case ACPI_TABLE_ID_SSDT:
429 case ACPI_TABLE_ID_PSDT:
430 default:
431 break;
432 }
433
434 /*
435 * Free the table
436 * 1) Get the head of the list
437 */
438 table_desc = acpi_gbl_table_lists[type].next;
439 count = acpi_gbl_table_lists[type].count;
440
441 /*
442 * 2) Walk the entire list, deleting both the allocated tables
443 * and the table descriptors
444 */
445 for (i = 0; i < count; i++) {
446 table_desc = acpi_tb_uninstall_table(table_desc);
447 } 419 }
448 420
449 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); 421 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
450 return_VOID; 422 return_ACPI_STATUS(status);
451} 423}
452 424
453/******************************************************************************* 425/*******************************************************************************
454 * 426 *
455 * FUNCTION: acpi_tb_delete_single_table 427 * FUNCTION: acpi_tb_release_owner_id
456 * 428 *
457 * PARAMETERS: table_info - A table info struct 429 * PARAMETERS: table_index - Table index
458 * 430 *
459 * RETURN: None. 431 * RETURN: Status
460 * 432 *
461 * DESCRIPTION: Low-level free for a single ACPI table. Handles cases where 433 * DESCRIPTION: Releases owner_id in table_desc
462 * the table was allocated a buffer or was mapped.
463 * 434 *
464 ******************************************************************************/ 435 ******************************************************************************/
465 436
466void acpi_tb_delete_single_table(struct acpi_table_desc *table_desc) 437acpi_status acpi_tb_release_owner_id(acpi_native_uint table_index)
467{ 438{
439 acpi_status status = AE_BAD_PARAMETER;
468 440
469 /* Must have a valid table descriptor and pointer */ 441 ACPI_FUNCTION_TRACE(tb_release_owner_id);
470 442
471 if ((!table_desc) || (!table_desc->pointer)) { 443 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
472 return; 444 if (table_index < acpi_gbl_root_table_list.count) {
445 acpi_ut_release_owner_id(&
446 (acpi_gbl_root_table_list.
447 tables[table_index].owner_id));
448 status = AE_OK;
473 } 449 }
474 450
475 /* Valid table, determine type of memory allocation */ 451 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
476 452 return_ACPI_STATUS(status);
477 switch (table_desc->allocation) {
478 case ACPI_MEM_NOT_ALLOCATED:
479 break;
480
481 case ACPI_MEM_ALLOCATED:
482
483 ACPI_FREE(table_desc->pointer);
484 break;
485
486 case ACPI_MEM_MAPPED:
487
488 acpi_os_unmap_memory(table_desc->pointer, table_desc->length);
489 break;
490
491 default:
492 break;
493 }
494} 453}
495 454
496/******************************************************************************* 455/*******************************************************************************
497 * 456 *
498 * FUNCTION: acpi_tb_uninstall_table 457 * FUNCTION: acpi_tb_get_owner_id
499 * 458 *
500 * PARAMETERS: table_info - A table info struct 459 * PARAMETERS: table_index - Table index
460 * owner_id - Where the table owner_id is returned
501 * 461 *
502 * RETURN: Pointer to the next table in the list (of same type) 462 * RETURN: Status
503 * 463 *
504 * DESCRIPTION: Free the memory associated with an internal ACPI table that 464 * DESCRIPTION: returns owner_id for the ACPI table
505 * is either installed or has never been installed.
506 * Table mutex should be locked.
507 * 465 *
508 ******************************************************************************/ 466 ******************************************************************************/
509 467
510struct acpi_table_desc *acpi_tb_uninstall_table(struct acpi_table_desc 468acpi_status
511 *table_desc) 469acpi_tb_get_owner_id(acpi_native_uint table_index, acpi_owner_id * owner_id)
512{ 470{
513 struct acpi_table_desc *next_desc; 471 acpi_status status = AE_BAD_PARAMETER;
514 472
515 ACPI_FUNCTION_TRACE_PTR(tb_uninstall_table, table_desc); 473 ACPI_FUNCTION_TRACE(tb_get_owner_id);
516 474
517 if (!table_desc) { 475 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
518 return_PTR(NULL); 476 if (table_index < acpi_gbl_root_table_list.count) {
477 *owner_id =
478 acpi_gbl_root_table_list.tables[table_index].owner_id;
479 status = AE_OK;
519 } 480 }
520 481
521 /* Unlink the descriptor from the doubly linked list */ 482 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
483 return_ACPI_STATUS(status);
484}
522 485
523 if (table_desc->prev) { 486/*******************************************************************************
524 table_desc->prev->next = table_desc->next; 487 *
525 } else { 488 * FUNCTION: acpi_tb_is_table_loaded
526 /* Is first on list, update list head */ 489 *
490 * PARAMETERS: table_index - Table index
491 *
492 * RETURN: Table Loaded Flag
493 *
494 ******************************************************************************/
527 495
528 acpi_gbl_table_lists[table_desc->type].next = table_desc->next; 496u8 acpi_tb_is_table_loaded(acpi_native_uint table_index)
529 } 497{
498 u8 is_loaded = FALSE;
530 499
531 if (table_desc->next) { 500 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
532 table_desc->next->prev = table_desc->prev; 501 if (table_index < acpi_gbl_root_table_list.count) {
502 is_loaded = (u8)
503 (acpi_gbl_root_table_list.tables[table_index].
504 flags & ACPI_TABLE_IS_LOADED);
533 } 505 }
534 506
535 /* Free the memory allocated for the table itself */ 507 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
536 508 return (is_loaded);
537 acpi_tb_delete_single_table(table_desc); 509}
538
539 /* Free the owner ID associated with this table */
540
541 acpi_ut_release_owner_id(&table_desc->owner_id);
542 510
543 /* Free the table descriptor */ 511/*******************************************************************************
512 *
513 * FUNCTION: acpi_tb_set_table_loaded_flag
514 *
515 * PARAMETERS: table_index - Table index
516 * is_loaded - TRUE if table is loaded, FALSE otherwise
517 *
518 * RETURN: None
519 *
520 * DESCRIPTION: Sets the table loaded flag to either TRUE or FALSE.
521 *
522 ******************************************************************************/
544 523
545 next_desc = table_desc->next; 524void acpi_tb_set_table_loaded_flag(acpi_native_uint table_index, u8 is_loaded)
546 ACPI_FREE(table_desc); 525{
547 526
548 /* Return pointer to the next descriptor */ 527 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
528 if (table_index < acpi_gbl_root_table_list.count) {
529 if (is_loaded) {
530 acpi_gbl_root_table_list.tables[table_index].flags |=
531 ACPI_TABLE_IS_LOADED;
532 } else {
533 acpi_gbl_root_table_list.tables[table_index].flags &=
534 ~ACPI_TABLE_IS_LOADED;
535 }
536 }
549 537
550 return_PTR(next_desc); 538 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
551} 539}
diff --git a/drivers/acpi/tables/tbrsdt.c b/drivers/acpi/tables/tbrsdt.c
deleted file mode 100644
index 86a5fca9b739..000000000000
--- a/drivers/acpi/tables/tbrsdt.c
+++ /dev/null
@@ -1,307 +0,0 @@
1/******************************************************************************
2 *
3 * Module Name: tbrsdt - ACPI RSDT table utilities
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include <acpi/actables.h>
46
47#define _COMPONENT ACPI_TABLES
48ACPI_MODULE_NAME("tbrsdt")
49
50/*******************************************************************************
51 *
52 * FUNCTION: acpi_tb_verify_rsdp
53 *
54 * PARAMETERS: Address - RSDP (Pointer to RSDT)
55 *
56 * RETURN: Status
57 *
58 * DESCRIPTION: Load and validate the RSDP (ptr) and RSDT (table)
59 *
60 ******************************************************************************/
61acpi_status acpi_tb_verify_rsdp(struct acpi_pointer *address)
62{
63 struct acpi_table_desc table_info;
64 acpi_status status;
65 struct rsdp_descriptor *rsdp;
66
67 ACPI_FUNCTION_TRACE(tb_verify_rsdp);
68
69 switch (address->pointer_type) {
70 case ACPI_LOGICAL_POINTER:
71
72 rsdp = address->pointer.logical;
73 break;
74
75 case ACPI_PHYSICAL_POINTER:
76 /*
77 * Obtain access to the RSDP structure
78 */
79 status = acpi_os_map_memory(address->pointer.physical,
80 sizeof(struct rsdp_descriptor),
81 ACPI_CAST_PTR(void, &rsdp));
82 if (ACPI_FAILURE(status)) {
83 return_ACPI_STATUS(status);
84 }
85 break;
86
87 default:
88 return_ACPI_STATUS(AE_BAD_PARAMETER);
89 }
90
91 /* Verify RSDP signature and checksum */
92
93 status = acpi_tb_validate_rsdp(rsdp);
94 if (ACPI_FAILURE(status)) {
95 goto cleanup;
96 }
97
98 /* RSDP is ok. Init the table info */
99
100 table_info.pointer = ACPI_CAST_PTR(struct acpi_table_header, rsdp);
101 table_info.length = sizeof(struct rsdp_descriptor);
102
103 if (address->pointer_type == ACPI_PHYSICAL_POINTER) {
104 table_info.allocation = ACPI_MEM_MAPPED;
105 } else {
106 table_info.allocation = ACPI_MEM_NOT_ALLOCATED;
107 }
108
109 /* Save the table pointers and allocation info */
110
111 status = acpi_tb_init_table_descriptor(ACPI_TABLE_ID_RSDP, &table_info);
112 if (ACPI_FAILURE(status)) {
113 goto cleanup;
114 }
115
116 /* Save the RSDP in a global for easy access */
117
118 acpi_gbl_RSDP =
119 ACPI_CAST_PTR(struct rsdp_descriptor, table_info.pointer);
120 return_ACPI_STATUS(status);
121
122 /* Error exit */
123 cleanup:
124
125 if (acpi_gbl_table_flags & ACPI_PHYSICAL_POINTER) {
126 acpi_os_unmap_memory(rsdp, sizeof(struct rsdp_descriptor));
127 }
128 return_ACPI_STATUS(status);
129}
130
131/*******************************************************************************
132 *
133 * FUNCTION: acpi_tb_get_rsdt_address
134 *
135 * PARAMETERS: out_address - Where the address is returned
136 *
137 * RETURN: None, Address
138 *
139 * DESCRIPTION: Extract the address of either the RSDT or XSDT, depending on the
140 * version of the RSDP and whether the XSDT pointer is valid
141 *
142 ******************************************************************************/
143
144void acpi_tb_get_rsdt_address(struct acpi_pointer *out_address)
145{
146
147 ACPI_FUNCTION_ENTRY();
148
149 out_address->pointer_type =
150 acpi_gbl_table_flags | ACPI_LOGICAL_ADDRESSING;
151
152 /* Use XSDT if it is present */
153
154 if ((acpi_gbl_RSDP->revision >= 2) &&
155 acpi_gbl_RSDP->xsdt_physical_address) {
156 out_address->pointer.value =
157 acpi_gbl_RSDP->xsdt_physical_address;
158 acpi_gbl_root_table_type = ACPI_TABLE_TYPE_XSDT;
159 } else {
160 /* No XSDT, use the RSDT */
161
162 out_address->pointer.value =
163 acpi_gbl_RSDP->rsdt_physical_address;
164 acpi_gbl_root_table_type = ACPI_TABLE_TYPE_RSDT;
165 }
166}
167
168/*******************************************************************************
169 *
170 * FUNCTION: acpi_tb_validate_rsdt
171 *
172 * PARAMETERS: table_ptr - Addressable pointer to the RSDT.
173 *
174 * RETURN: Status
175 *
176 * DESCRIPTION: Validate signature for the RSDT or XSDT
177 *
178 ******************************************************************************/
179
180acpi_status acpi_tb_validate_rsdt(struct acpi_table_header *table_ptr)
181{
182 char *signature;
183
184 ACPI_FUNCTION_ENTRY();
185
186 /* Validate minimum length */
187
188 if (table_ptr->length < sizeof(struct acpi_table_header)) {
189 ACPI_ERROR((AE_INFO,
190 "RSDT/XSDT length (%X) is smaller than minimum (%zX)",
191 table_ptr->length,
192 sizeof(struct acpi_table_header)));
193
194 return (AE_INVALID_TABLE_LENGTH);
195 }
196
197 /* Search for appropriate signature, RSDT or XSDT */
198
199 if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) {
200 signature = RSDT_SIG;
201 } else {
202 signature = XSDT_SIG;
203 }
204
205 if (!ACPI_COMPARE_NAME(table_ptr->signature, signature)) {
206
207 /* Invalid RSDT or XSDT signature */
208
209 ACPI_ERROR((AE_INFO,
210 "Invalid signature where RSDP indicates RSDT/XSDT should be located. RSDP:"));
211
212 ACPI_DUMP_BUFFER(acpi_gbl_RSDP, 20);
213
214 ACPI_ERROR((AE_INFO,
215 "RSDT/XSDT signature at %X is invalid",
216 acpi_gbl_RSDP->rsdt_physical_address));
217
218 if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) {
219 ACPI_ERROR((AE_INFO, "Looking for RSDT"));
220 } else {
221 ACPI_ERROR((AE_INFO, "Looking for XSDT"));
222 }
223
224 ACPI_DUMP_BUFFER(ACPI_CAST_PTR(char, table_ptr), 48);
225 return (AE_BAD_SIGNATURE);
226 }
227
228 return (AE_OK);
229}
230
231/*******************************************************************************
232 *
233 * FUNCTION: acpi_tb_get_table_rsdt
234 *
235 * PARAMETERS: None
236 *
237 * RETURN: Status
238 *
239 * DESCRIPTION: Load and validate the RSDP (ptr) and RSDT (table)
240 *
241 ******************************************************************************/
242
243acpi_status acpi_tb_get_table_rsdt(void)
244{
245 struct acpi_table_desc table_info;
246 acpi_status status;
247 struct acpi_pointer address;
248
249 ACPI_FUNCTION_TRACE(tb_get_table_rsdt);
250
251 /* Get the RSDT/XSDT via the RSDP */
252
253 acpi_tb_get_rsdt_address(&address);
254
255 table_info.type = ACPI_TABLE_ID_XSDT;
256 status = acpi_tb_get_table(&address, &table_info);
257 if (ACPI_FAILURE(status)) {
258 ACPI_EXCEPTION((AE_INFO, status,
259 "Could not get the RSDT/XSDT"));
260 return_ACPI_STATUS(status);
261 }
262
263 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
264 "RSDP located at %p, points to RSDT physical=%8.8X%8.8X\n",
265 acpi_gbl_RSDP,
266 ACPI_FORMAT_UINT64(address.pointer.value)));
267
268 /* Check the RSDT or XSDT signature */
269
270 status = acpi_tb_validate_rsdt(table_info.pointer);
271 if (ACPI_FAILURE(status)) {
272 goto error_cleanup;
273 }
274
275 /* Get the number of tables defined in the RSDT or XSDT */
276
277 acpi_gbl_rsdt_table_count = acpi_tb_get_table_count(acpi_gbl_RSDP,
278 table_info.pointer);
279
280 /* Convert and/or copy to an XSDT structure */
281
282 status = acpi_tb_convert_to_xsdt(&table_info);
283 if (ACPI_FAILURE(status)) {
284 goto error_cleanup;
285 }
286
287 /* Save the table pointers and allocation info */
288
289 status = acpi_tb_init_table_descriptor(ACPI_TABLE_ID_XSDT, &table_info);
290 if (ACPI_FAILURE(status)) {
291 goto error_cleanup;
292 }
293
294 acpi_gbl_XSDT =
295 ACPI_CAST_PTR(struct xsdt_descriptor, table_info.pointer);
296
297 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "XSDT located at %p\n", acpi_gbl_XSDT));
298 return_ACPI_STATUS(status);
299
300 error_cleanup:
301
302 /* Free table allocated by acpi_tb_get_table */
303
304 acpi_tb_delete_single_table(&table_info);
305
306 return_ACPI_STATUS(status);
307}
diff --git a/drivers/acpi/tables/tbutils.c b/drivers/acpi/tables/tbutils.c
index 209a401801e3..1da64b4518c0 100644
--- a/drivers/acpi/tables/tbutils.c
+++ b/drivers/acpi/tables/tbutils.c
@@ -1,11 +1,11 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Module Name: tbutils - Table manipulation utilities 3 * Module Name: tbutils - table utilities
4 * 4 *
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -48,137 +48,119 @@
48ACPI_MODULE_NAME("tbutils") 48ACPI_MODULE_NAME("tbutils")
49 49
50/* Local prototypes */ 50/* Local prototypes */
51#ifdef ACPI_OBSOLETE_FUNCTIONS 51static acpi_physical_address
52acpi_status 52acpi_tb_get_root_table_entry(u8 * table_entry,
53acpi_tb_handle_to_object(u16 table_id, struct acpi_table_desc **table_desc); 53 acpi_native_uint table_entry_size);
54#endif
55 54
56/******************************************************************************* 55/*******************************************************************************
57 * 56 *
58 * FUNCTION: acpi_tb_is_table_installed 57 * FUNCTION: acpi_tb_tables_loaded
59 *
60 * PARAMETERS: new_table_desc - Descriptor for new table being installed
61 * 58 *
62 * RETURN: Status - AE_ALREADY_EXISTS if the table is already installed 59 * PARAMETERS: None
63 * 60 *
64 * DESCRIPTION: Determine if an ACPI table is already installed 61 * RETURN: TRUE if required ACPI tables are loaded
65 * 62 *
66 * MUTEX: Table data structures should be locked 63 * DESCRIPTION: Determine if the minimum required ACPI tables are present
64 * (FADT, FACS, DSDT)
67 * 65 *
68 ******************************************************************************/ 66 ******************************************************************************/
69 67
70acpi_status acpi_tb_is_table_installed(struct acpi_table_desc *new_table_desc) 68u8 acpi_tb_tables_loaded(void)
71{ 69{
72 struct acpi_table_desc *table_desc;
73
74 ACPI_FUNCTION_TRACE(tb_is_table_installed);
75 70
76 /* Get the list descriptor and first table descriptor */ 71 if (acpi_gbl_root_table_list.count >= 3) {
77 72 return (TRUE);
78 table_desc = acpi_gbl_table_lists[new_table_desc->type].next; 73 }
79 74
80 /* Examine all installed tables of this type */ 75 return (FALSE);
76}
81 77
82 while (table_desc) { 78/*******************************************************************************
83 /* 79 *
84 * If the table lengths match, perform a full bytewise compare. This 80 * FUNCTION: acpi_tb_print_table_header
85 * means that we will allow tables with duplicate oem_table_id(s), as 81 *
86 * long as the tables are different in some way. 82 * PARAMETERS: Address - Table physical address
87 * 83 * Header - Table header
88 * Checking if the table has been loaded into the namespace means that 84 *
89 * we don't check for duplicate tables during the initial installation 85 * RETURN: None
90 * of tables within the RSDT/XSDT. 86 *
91 */ 87 * DESCRIPTION: Print an ACPI table header. Special cases for FACS and RSDP.
92 if ((table_desc->loaded_into_namespace) && 88 *
93 (table_desc->pointer->length == 89 ******************************************************************************/
94 new_table_desc->pointer->length)
95 &&
96 (!ACPI_MEMCMP
97 (table_desc->pointer, new_table_desc->pointer,
98 new_table_desc->pointer->length))) {
99
100 /* Match: this table is already installed */
101
102 ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
103 "Table [%4.4s] already installed: Rev %X OemTableId [%8.8s]\n",
104 new_table_desc->pointer->signature,
105 new_table_desc->pointer->revision,
106 new_table_desc->pointer->
107 oem_table_id));
108
109 new_table_desc->owner_id = table_desc->owner_id;
110 new_table_desc->installed_desc = table_desc;
111
112 return_ACPI_STATUS(AE_ALREADY_EXISTS);
113 }
114 90
115 /* Get next table on the list */ 91void
92acpi_tb_print_table_header(acpi_physical_address address,
93 struct acpi_table_header *header)
94{
116 95
117 table_desc = table_desc->next; 96 if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) {
97
98 /* FACS only has signature and length fields of common table header */
99
100 ACPI_INFO((AE_INFO, "%4.4s %08lX, %04X",
101 header->signature, (unsigned long)address,
102 header->length));
103 } else if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_RSDP)) {
104
105 /* RSDP has no common fields */
106
107 ACPI_INFO((AE_INFO, "RSDP %08lX, %04X (r%d %6.6s)",
108 (unsigned long)address,
109 (ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
110 revision >
111 0) ? ACPI_CAST_PTR(struct acpi_table_rsdp,
112 header)->length : 20,
113 ACPI_CAST_PTR(struct acpi_table_rsdp,
114 header)->revision,
115 ACPI_CAST_PTR(struct acpi_table_rsdp,
116 header)->oem_id));
117 } else {
118 /* Standard ACPI table with full common header */
119
120 ACPI_INFO((AE_INFO,
121 "%4.4s %08lX, %04X (r%d %6.6s %8.8s %8X %4.4s %8X)",
122 header->signature, (unsigned long)address,
123 header->length, header->revision, header->oem_id,
124 header->oem_table_id, header->oem_revision,
125 header->asl_compiler_id,
126 header->asl_compiler_revision));
118 } 127 }
119
120 return_ACPI_STATUS(AE_OK);
121} 128}
122 129
123/******************************************************************************* 130/*******************************************************************************
124 * 131 *
125 * FUNCTION: acpi_tb_validate_table_header 132 * FUNCTION: acpi_tb_validate_checksum
126 * 133 *
127 * PARAMETERS: table_header - Logical pointer to the table 134 * PARAMETERS: Table - ACPI table to verify
135 * Length - Length of entire table
128 * 136 *
129 * RETURN: Status 137 * RETURN: Status
130 * 138 *
131 * DESCRIPTION: Check an ACPI table header for validity 139 * DESCRIPTION: Verifies that the table checksums to zero. Optionally returns
132 * 140 * exception on bad checksum.
133 * NOTE: Table pointers are validated as follows:
134 * 1) Table pointer must point to valid physical memory
135 * 2) Signature must be 4 ASCII chars, even if we don't recognize the
136 * name
137 * 3) Table must be readable for length specified in the header
138 * 4) Table checksum must be valid (with the exception of the FACS
139 * which has no checksum because it contains variable fields)
140 * 141 *
141 ******************************************************************************/ 142 ******************************************************************************/
142 143
143acpi_status 144acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
144acpi_tb_validate_table_header(struct acpi_table_header *table_header)
145{ 145{
146 acpi_name signature; 146 u8 checksum;
147
148 ACPI_FUNCTION_ENTRY();
149
150 /* Verify that this is a valid address */
151
152 if (!acpi_os_readable(table_header, sizeof(struct acpi_table_header))) {
153 ACPI_ERROR((AE_INFO,
154 "Cannot read table header at %p", table_header));
155
156 return (AE_BAD_ADDRESS);
157 }
158 147
159 /* Ensure that the signature is 4 ASCII characters */ 148 /* Compute the checksum on the table */
160 149
161 ACPI_MOVE_32_TO_32(&signature, table_header->signature); 150 checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, table), length);
162 if (!acpi_ut_valid_acpi_name(signature)) {
163 ACPI_ERROR((AE_INFO, "Invalid table signature 0x%8.8X",
164 signature));
165 151
166 ACPI_DUMP_BUFFER(table_header, 152 /* Checksum ok? (should be zero) */
167 sizeof(struct acpi_table_header));
168 return (AE_BAD_SIGNATURE);
169 }
170 153
171 /* Validate the table length */ 154 if (checksum) {
155 ACPI_WARNING((AE_INFO,
156 "Incorrect checksum in table [%4.4s] - %2.2X, should be %2.2X",
157 table->signature, table->checksum,
158 (u8) (table->checksum - checksum)));
172 159
173 if (table_header->length < sizeof(struct acpi_table_header)) { 160#if (ACPI_CHECKSUM_ABORT)
174 ACPI_ERROR((AE_INFO,
175 "Invalid length 0x%X in table with signature %4.4s",
176 (u32) table_header->length,
177 ACPI_CAST_PTR(char, &signature)));
178 161
179 ACPI_DUMP_BUFFER(table_header, 162 return (AE_BAD_CHECKSUM);
180 sizeof(struct acpi_table_header)); 163#endif
181 return (AE_BAD_HEADER);
182 } 164 }
183 165
184 return (AE_OK); 166 return (AE_OK);
@@ -186,157 +168,320 @@ acpi_tb_validate_table_header(struct acpi_table_header *table_header)
186 168
187/******************************************************************************* 169/*******************************************************************************
188 * 170 *
189 * FUNCTION: acpi_tb_sum_table 171 * FUNCTION: acpi_tb_checksum
190 * 172 *
191 * PARAMETERS: Buffer - Buffer to sum 173 * PARAMETERS: Buffer - Pointer to memory region to be checked
192 * Length - Size of the buffer 174 * Length - Length of this memory region
193 * 175 *
194 * RETURN: 8 bit sum of buffer 176 * RETURN: Checksum (u8)
195 * 177 *
196 * DESCRIPTION: Computes an 8 bit sum of the buffer(length) and returns it. 178 * DESCRIPTION: Calculates circular checksum of memory region.
197 * 179 *
198 ******************************************************************************/ 180 ******************************************************************************/
199 181
200u8 acpi_tb_sum_table(void *buffer, u32 length) 182u8 acpi_tb_checksum(u8 * buffer, acpi_native_uint length)
201{ 183{
202 acpi_native_uint i;
203 u8 sum = 0; 184 u8 sum = 0;
185 u8 *end = buffer + length;
204 186
205 if (!buffer || !length) { 187 while (buffer < end) {
206 return (0); 188 sum = (u8) (sum + *(buffer++));
207 } 189 }
208 190
209 for (i = 0; i < length; i++) { 191 return sum;
210 sum = (u8) (sum + ((u8 *) buffer)[i]);
211 }
212 return (sum);
213} 192}
214 193
215/******************************************************************************* 194/*******************************************************************************
216 * 195 *
217 * FUNCTION: acpi_tb_generate_checksum 196 * FUNCTION: acpi_tb_install_table
218 * 197 *
219 * PARAMETERS: Table - Pointer to a valid ACPI table (with a 198 * PARAMETERS: Address - Physical address of DSDT or FACS
220 * standard ACPI header) 199 * Flags - Flags
200 * Signature - Table signature, NULL if no need to
201 * match
202 * table_index - Index into root table array
221 * 203 *
222 * RETURN: 8 bit checksum of buffer 204 * RETURN: None
223 * 205 *
224 * DESCRIPTION: Computes an 8 bit checksum of the table. 206 * DESCRIPTION: Install an ACPI table into the global data structure.
225 * 207 *
226 ******************************************************************************/ 208 ******************************************************************************/
227 209
228u8 acpi_tb_generate_checksum(struct acpi_table_header * table) 210void
211acpi_tb_install_table(acpi_physical_address address,
212 u8 flags, char *signature, acpi_native_uint table_index)
229{ 213{
230 u8 checksum; 214 struct acpi_table_header *table;
215
216 if (!address) {
217 ACPI_ERROR((AE_INFO,
218 "Null physical address for ACPI table [%s]",
219 signature));
220 return;
221 }
222
223 /* Map just the table header */
224
225 table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
226 if (!table) {
227 return;
228 }
229
230 /* If a particular signature is expected, signature must match */
231
232 if (signature && !ACPI_COMPARE_NAME(table->signature, signature)) {
233 ACPI_ERROR((AE_INFO,
234 "Invalid signature 0x%X for ACPI table [%s]",
235 *ACPI_CAST_PTR(u32, table->signature), signature));
236 goto unmap_and_exit;
237 }
231 238
232 /* Sum the entire table as-is */ 239 /* Initialize the table entry */
233 240
234 checksum = acpi_tb_sum_table(table, table->length); 241 acpi_gbl_root_table_list.tables[table_index].address = address;
242 acpi_gbl_root_table_list.tables[table_index].length = table->length;
243 acpi_gbl_root_table_list.tables[table_index].flags = flags;
235 244
236 /* Subtract off the existing checksum value in the table */ 245 ACPI_MOVE_32_TO_32(&
246 (acpi_gbl_root_table_list.tables[table_index].
247 signature), table->signature);
237 248
238 checksum = (u8) (checksum - table->checksum); 249 acpi_tb_print_table_header(address, table);
239 250
240 /* Compute the final checksum */ 251 if (table_index == ACPI_TABLE_INDEX_DSDT) {
241 252
242 checksum = (u8) (0 - checksum); 253 /* Global integer width is based upon revision of the DSDT */
243 return (checksum); 254
255 acpi_ut_set_integer_width(table->revision);
256 }
257
258 unmap_and_exit:
259 acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
244} 260}
245 261
246/******************************************************************************* 262/*******************************************************************************
247 * 263 *
248 * FUNCTION: acpi_tb_set_checksum 264 * FUNCTION: acpi_tb_get_root_table_entry
249 * 265 *
250 * PARAMETERS: Table - Pointer to a valid ACPI table (with a 266 * PARAMETERS: table_entry - Pointer to the RSDT/XSDT table entry
251 * standard ACPI header) 267 * table_entry_size - sizeof 32 or 64 (RSDT or XSDT)
252 * 268 *
253 * RETURN: None. Sets the table checksum field 269 * RETURN: Physical address extracted from the root table
254 * 270 *
255 * DESCRIPTION: Computes an 8 bit checksum of the table and inserts the 271 * DESCRIPTION: Get one root table entry. Handles 32-bit and 64-bit cases on
256 * checksum into the table header. 272 * both 32-bit and 64-bit platforms
273 *
274 * NOTE: acpi_physical_address is 32-bit on 32-bit platforms, 64-bit on
275 * 64-bit platforms.
257 * 276 *
258 ******************************************************************************/ 277 ******************************************************************************/
259 278
260void acpi_tb_set_checksum(struct acpi_table_header *table) 279static acpi_physical_address
280acpi_tb_get_root_table_entry(u8 * table_entry,
281 acpi_native_uint table_entry_size)
261{ 282{
283 u64 address64;
284
285 /*
286 * Get the table physical address (32-bit for RSDT, 64-bit for XSDT):
287 * Note: Addresses are 32-bit aligned (not 64) in both RSDT and XSDT
288 */
289 if (table_entry_size == sizeof(u32)) {
290 /*
291 * 32-bit platform, RSDT: Return 32-bit table entry
292 * 64-bit platform, RSDT: Expand 32-bit to 64-bit and return
293 */
294 return ((acpi_physical_address)
295 (*ACPI_CAST_PTR(u32, table_entry)));
296 } else {
297 /*
298 * 32-bit platform, XSDT: Truncate 64-bit to 32-bit and return
299 * 64-bit platform, XSDT: Move (unaligned) 64-bit to local, return 64-bit
300 */
301 ACPI_MOVE_64_TO_64(&address64, table_entry);
262 302
263 table->checksum = acpi_tb_generate_checksum(table); 303#if ACPI_MACHINE_WIDTH == 32
304 if (address64 > ACPI_UINT32_MAX) {
305
306 /* Will truncate 64-bit address to 32 bits, issue warning */
307
308 ACPI_WARNING((AE_INFO,
309 "64-bit Physical Address in XSDT is too large (%8.8X%8.8X), truncating",
310 ACPI_FORMAT_UINT64(address64)));
311 }
312#endif
313 return ((acpi_physical_address) (address64));
314 }
264} 315}
265 316
266/******************************************************************************* 317/*******************************************************************************
267 * 318 *
268 * FUNCTION: acpi_tb_verify_table_checksum 319 * FUNCTION: acpi_tb_parse_root_table
320 *
321 * PARAMETERS: Rsdp - Pointer to the RSDP
322 * Flags - Flags
269 * 323 *
270 * PARAMETERS: *table_header - ACPI table to verify 324 * RETURN: Status
271 * 325 *
272 * RETURN: 8 bit checksum of table 326 * DESCRIPTION: This function is called to parse the Root System Description
327 * Table (RSDT or XSDT)
273 * 328 *
274 * DESCRIPTION: Generates an 8 bit checksum of table and returns and compares 329 * NOTE: Tables are mapped (not copied) for efficiency. The FACS must
275 * it to the existing checksum value. 330 * be mapped and cannot be copied because it contains the actual
331 * memory location of the ACPI Global Lock.
276 * 332 *
277 ******************************************************************************/ 333 ******************************************************************************/
278 334
279acpi_status 335acpi_status __init
280acpi_tb_verify_table_checksum(struct acpi_table_header *table_header) 336acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags)
281{ 337{
282 u8 checksum; 338 struct acpi_table_rsdp *rsdp;
339 acpi_native_uint table_entry_size;
340 acpi_native_uint i;
341 u32 table_count;
342 struct acpi_table_header *table;
343 acpi_physical_address address;
344 u32 length;
345 u8 *table_entry;
346 acpi_status status;
347
348 ACPI_FUNCTION_TRACE(tb_parse_root_table);
349
350 /*
351 * Map the entire RSDP and extract the address of the RSDT or XSDT
352 */
353 rsdp = acpi_os_map_memory(rsdp_address, sizeof(struct acpi_table_rsdp));
354 if (!rsdp) {
355 return_ACPI_STATUS(AE_NO_MEMORY);
356 }
283 357
284 ACPI_FUNCTION_TRACE(tb_verify_table_checksum); 358 acpi_tb_print_table_header(rsdp_address,
359 ACPI_CAST_PTR(struct acpi_table_header,
360 rsdp));
285 361
286 /* Compute the checksum on the table */ 362 /* Differentiate between RSDT and XSDT root tables */
287 363
288 checksum = acpi_tb_generate_checksum(table_header); 364 if (rsdp->revision > 1 && rsdp->xsdt_physical_address) {
365 /*
366 * Root table is an XSDT (64-bit physical addresses). We must use the
367 * XSDT if the revision is > 1 and the XSDT pointer is present, as per
368 * the ACPI specification.
369 */
370 address = (acpi_physical_address) rsdp->xsdt_physical_address;
371 table_entry_size = sizeof(u64);
372 } else {
373 /* Root table is an RSDT (32-bit physical addresses) */
289 374
290 /* Checksum ok? */ 375 address = (acpi_physical_address) rsdp->rsdt_physical_address;
376 table_entry_size = sizeof(u32);
377 }
291 378
292 if (checksum == table_header->checksum) { 379 /*
293 return_ACPI_STATUS(AE_OK); 380 * It is not possible to map more than one entry in some environments,
381 * so unmap the RSDP here before mapping other tables
382 */
383 acpi_os_unmap_memory(rsdp, sizeof(struct acpi_table_rsdp));
384
385 /* Map the RSDT/XSDT table header to get the full table length */
386
387 table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
388 if (!table) {
389 return_ACPI_STATUS(AE_NO_MEMORY);
294 } 390 }
295 391
296 ACPI_WARNING((AE_INFO, 392 acpi_tb_print_table_header(address, table);
297 "Incorrect checksum in table [%4.4s] - is %2.2X, should be %2.2X",
298 table_header->signature, table_header->checksum,
299 checksum));
300 393
301 return_ACPI_STATUS(AE_BAD_CHECKSUM); 394 /* Get the length of the full table, verify length and map entire table */
302}
303 395
304#ifdef ACPI_OBSOLETE_FUNCTIONS 396 length = table->length;
305/******************************************************************************* 397 acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
306 *
307 * FUNCTION: acpi_tb_handle_to_object
308 *
309 * PARAMETERS: table_id - Id for which the function is searching
310 * table_desc - Pointer to return the matching table
311 * descriptor.
312 *
313 * RETURN: Search the tables to find one with a matching table_id and
314 * return a pointer to that table descriptor.
315 *
316 ******************************************************************************/
317 398
318acpi_status 399 if (length < sizeof(struct acpi_table_header)) {
319acpi_tb_handle_to_object(u16 table_id, 400 ACPI_ERROR((AE_INFO, "Invalid length 0x%X in RSDT/XSDT",
320 struct acpi_table_desc **return_table_desc) 401 length));
321{ 402 return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
322 u32 i; 403 }
323 struct acpi_table_desc *table_desc;
324 404
325 ACPI_FUNCTION_NAME(tb_handle_to_object); 405 table = acpi_os_map_memory(address, length);
406 if (!table) {
407 return_ACPI_STATUS(AE_NO_MEMORY);
408 }
409
410 /* Validate the root table checksum */
411
412 status = acpi_tb_verify_checksum(table, length);
413 if (ACPI_FAILURE(status)) {
414 acpi_os_unmap_memory(table, length);
415 return_ACPI_STATUS(status);
416 }
326 417
327 for (i = 0; i < ACPI_TABLE_MAX; i++) { 418 /* Calculate the number of tables described in the root table */
328 table_desc = acpi_gbl_table_lists[i].next; 419
329 while (table_desc) { 420 table_count =
330 if (table_desc->table_id == table_id) { 421 (u32) ((table->length -
331 *return_table_desc = table_desc; 422 sizeof(struct acpi_table_header)) / table_entry_size);
332 return (AE_OK); 423
424 /*
425 * First two entries in the table array are reserved for the DSDT and FACS,
426 * which are not actually present in the RSDT/XSDT - they come from the FADT
427 */
428 table_entry =
429 ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header);
430 acpi_gbl_root_table_list.count = 2;
431
432 /*
433 * Initialize the root table array from the RSDT/XSDT
434 */
435 for (i = 0; i < table_count; i++) {
436 if (acpi_gbl_root_table_list.count >=
437 acpi_gbl_root_table_list.size) {
438
439 /* There is no more room in the root table array, attempt resize */
440
441 status = acpi_tb_resize_root_table_list();
442 if (ACPI_FAILURE(status)) {
443 ACPI_WARNING((AE_INFO,
444 "Truncating %u table entries!",
445 (unsigned)
446 (acpi_gbl_root_table_list.size -
447 acpi_gbl_root_table_list.
448 count)));
449 break;
333 } 450 }
451 }
452
453 /* Get the table physical address (32-bit for RSDT, 64-bit for XSDT) */
334 454
335 table_desc = table_desc->next; 455 acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
456 address =
457 acpi_tb_get_root_table_entry(table_entry, table_entry_size);
458
459 table_entry += table_entry_size;
460 acpi_gbl_root_table_list.count++;
461 }
462
463 /*
464 * It is not possible to map more than one entry in some environments,
465 * so unmap the root table here before mapping other tables
466 */
467 acpi_os_unmap_memory(table, length);
468
469 /*
470 * Complete the initialization of the root table array by examining
471 * the header of each table
472 */
473 for (i = 2; i < acpi_gbl_root_table_list.count; i++) {
474 acpi_tb_install_table(acpi_gbl_root_table_list.tables[i].
475 address, flags, NULL, i);
476
477 /* Special case for FADT - get the DSDT and FACS */
478
479 if (ACPI_COMPARE_NAME
480 (&acpi_gbl_root_table_list.tables[i].signature,
481 ACPI_SIG_FADT)) {
482 acpi_tb_parse_fadt(i, flags);
336 } 483 }
337 } 484 }
338 485
339 ACPI_ERROR((AE_INFO, "TableId=%X does not exist", table_id)); 486 return_ACPI_STATUS(AE_OK);
340 return (AE_BAD_PARAMETER);
341} 487}
342#endif
diff --git a/drivers/acpi/tables/tbxface.c b/drivers/acpi/tables/tbxface.c
index 5ba9303293ad..807978d5381a 100644
--- a/drivers/acpi/tables/tbxface.c
+++ b/drivers/acpi/tables/tbxface.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -49,80 +49,158 @@
49#define _COMPONENT ACPI_TABLES 49#define _COMPONENT ACPI_TABLES
50ACPI_MODULE_NAME("tbxface") 50ACPI_MODULE_NAME("tbxface")
51 51
52/* Local prototypes */
53static acpi_status acpi_tb_load_namespace(void);
54
52/******************************************************************************* 55/*******************************************************************************
53 * 56 *
54 * FUNCTION: acpi_load_tables 57 * FUNCTION: acpi_allocate_root_table
55 * 58 *
56 * PARAMETERS: None 59 * PARAMETERS: initial_table_count - Size of initial_table_array, in number of
60 * struct acpi_table_desc structures
57 * 61 *
58 * RETURN: Status 62 * RETURN: Status
59 * 63 *
60 * DESCRIPTION: This function is called to load the ACPI tables from the 64 * DESCRIPTION: Allocate a root table array. Used by i_aSL compiler and
61 * provided RSDT 65 * acpi_initialize_tables.
62 * 66 *
63 ******************************************************************************/ 67 ******************************************************************************/
64acpi_status acpi_load_tables(void) 68
69acpi_status acpi_allocate_root_table(u32 initial_table_count)
65{ 70{
66 struct acpi_pointer rsdp_address;
67 acpi_status status;
68 71
69 ACPI_FUNCTION_TRACE(acpi_load_tables); 72 acpi_gbl_root_table_list.size = initial_table_count;
73 acpi_gbl_root_table_list.flags = ACPI_ROOT_ALLOW_RESIZE;
70 74
71 /* Get the RSDP */ 75 return (acpi_tb_resize_root_table_list());
76}
72 77
73 status = acpi_os_get_root_pointer(ACPI_LOGICAL_ADDRESSING, 78/*******************************************************************************
74 &rsdp_address); 79 *
75 if (ACPI_FAILURE(status)) { 80 * FUNCTION: acpi_initialize_tables
76 ACPI_EXCEPTION((AE_INFO, status, "Could not get the RSDP")); 81 *
77 goto error_exit; 82 * PARAMETERS: initial_table_array - Pointer to an array of pre-allocated
78 } 83 * struct acpi_table_desc structures. If NULL, the
84 * array is dynamically allocated.
85 * initial_table_count - Size of initial_table_array, in number of
86 * struct acpi_table_desc structures
87 * allow_realloc - Flag to tell Table Manager if resize of
88 * pre-allocated array is allowed. Ignored
89 * if initial_table_array is NULL.
90 *
91 * RETURN: Status
92 *
93 * DESCRIPTION: Initialize the table manager, get the RSDP and RSDT/XSDT.
94 *
95 * NOTE: Allows static allocation of the initial table array in order
96 * to avoid the use of dynamic memory in confined environments
97 * such as the kernel boot sequence where it may not be available.
98 *
99 * If the host OS memory managers are initialized, use NULL for
100 * initial_table_array, and the table will be dynamically allocated.
101 *
102 ******************************************************************************/
79 103
80 /* Map and validate the RSDP */ 104acpi_status __init
105acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
106 u32 initial_table_count, u8 allow_resize)
107{
108 acpi_physical_address rsdp_address;
109 acpi_status status;
81 110
82 acpi_gbl_table_flags = rsdp_address.pointer_type; 111 ACPI_FUNCTION_TRACE(acpi_initialize_tables);
83 112
84 status = acpi_tb_verify_rsdp(&rsdp_address); 113 /*
85 if (ACPI_FAILURE(status)) { 114 * Set up the Root Table Array
86 ACPI_EXCEPTION((AE_INFO, status, "During RSDP validation")); 115 * Allocate the table array if requested
87 goto error_exit; 116 */
117 if (!initial_table_array) {
118 status = acpi_allocate_root_table(initial_table_count);
119 if (ACPI_FAILURE(status)) {
120 return_ACPI_STATUS(status);
121 }
122 } else {
123 /* Root Table Array has been statically allocated by the host */
124
125 ACPI_MEMSET(initial_table_array, 0,
126 initial_table_count *
127 sizeof(struct acpi_table_desc));
128
129 acpi_gbl_root_table_list.tables = initial_table_array;
130 acpi_gbl_root_table_list.size = initial_table_count;
131 acpi_gbl_root_table_list.flags = ACPI_ROOT_ORIGIN_UNKNOWN;
132 if (allow_resize) {
133 acpi_gbl_root_table_list.flags |=
134 ACPI_ROOT_ALLOW_RESIZE;
135 }
88 } 136 }
89 137
90 /* Get the RSDT via the RSDP */ 138 /* Get the address of the RSDP */
91 139
92 status = acpi_tb_get_table_rsdt(); 140 rsdp_address = acpi_os_get_root_pointer();
93 if (ACPI_FAILURE(status)) { 141 if (!rsdp_address) {
94 ACPI_EXCEPTION((AE_INFO, status, "Could not load RSDT")); 142 return_ACPI_STATUS(AE_NOT_FOUND);
95 goto error_exit;
96 } 143 }
97 144
98 /* Now get the tables needed by this subsystem (FADT, DSDT, etc.) */ 145 /*
146 * Get the root table (RSDT or XSDT) and extract all entries to the local
147 * Root Table Array. This array contains the information of the RSDT/XSDT
148 * in a common, more useable format.
149 */
150 status =
151 acpi_tb_parse_root_table(rsdp_address, ACPI_TABLE_ORIGIN_MAPPED);
152 return_ACPI_STATUS(status);
153}
99 154
100 status = acpi_tb_get_required_tables(); 155/*******************************************************************************
101 if (ACPI_FAILURE(status)) { 156 *
102 ACPI_EXCEPTION((AE_INFO, status, 157 * FUNCTION: acpi_reallocate_root_table
103 "Could not get all required tables (DSDT/FADT/FACS)")); 158 *
104 goto error_exit; 159 * PARAMETERS: None
160 *
161 * RETURN: Status
162 *
163 * DESCRIPTION: Reallocate Root Table List into dynamic memory. Copies the
164 * root list from the previously provided scratch area. Should
165 * be called once dynamic memory allocation is available in the
166 * kernel
167 *
168 ******************************************************************************/
169acpi_status acpi_reallocate_root_table(void)
170{
171 struct acpi_table_desc *tables;
172 acpi_size new_size;
173
174 ACPI_FUNCTION_TRACE(acpi_reallocate_root_table);
175
176 /*
177 * Only reallocate the root table if the host provided a static buffer
178 * for the table array in the call to acpi_initialize_tables.
179 */
180 if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
181 return_ACPI_STATUS(AE_SUPPORT);
105 } 182 }
106 183
107 ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI Tables successfully acquired\n")); 184 new_size =
185 (acpi_gbl_root_table_list.count +
186 ACPI_ROOT_TABLE_SIZE_INCREMENT) * sizeof(struct acpi_table_desc);
108 187
109 /* Load the namespace from the tables */ 188 /* Create new array and copy the old array */
110 189
111 status = acpi_ns_load_namespace(); 190 tables = ACPI_ALLOCATE_ZEROED(new_size);
112 if (ACPI_FAILURE(status)) { 191 if (!tables) {
113 ACPI_EXCEPTION((AE_INFO, status, "Could not load namespace")); 192 return_ACPI_STATUS(AE_NO_MEMORY);
114 goto error_exit;
115 } 193 }
116 194
117 return_ACPI_STATUS(AE_OK); 195 ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, new_size);
118 196
119 error_exit: 197 acpi_gbl_root_table_list.size = acpi_gbl_root_table_list.count;
120 ACPI_EXCEPTION((AE_INFO, status, "Could not load tables")); 198 acpi_gbl_root_table_list.tables = tables;
121 return_ACPI_STATUS(status); 199 acpi_gbl_root_table_list.flags =
122} 200 ACPI_ROOT_ORIGIN_ALLOCATED | ACPI_ROOT_ALLOW_RESIZE;
123
124ACPI_EXPORT_SYMBOL(acpi_load_tables)
125 201
202 return_ACPI_STATUS(AE_OK);
203}
126/******************************************************************************* 204/*******************************************************************************
127 * 205 *
128 * FUNCTION: acpi_load_table 206 * FUNCTION: acpi_load_table
@@ -141,342 +219,405 @@ ACPI_EXPORT_SYMBOL(acpi_load_tables)
141acpi_status acpi_load_table(struct acpi_table_header *table_ptr) 219acpi_status acpi_load_table(struct acpi_table_header *table_ptr)
142{ 220{
143 acpi_status status; 221 acpi_status status;
144 struct acpi_table_desc table_info; 222 acpi_native_uint table_index;
145 struct acpi_pointer address; 223 struct acpi_table_desc table_desc;
146
147 ACPI_FUNCTION_TRACE(acpi_load_table);
148
149 if (!table_ptr) {
150 return_ACPI_STATUS(AE_BAD_PARAMETER);
151 }
152
153 /* Copy the table to a local buffer */
154 224
155 address.pointer_type = ACPI_LOGICAL_POINTER | ACPI_LOGICAL_ADDRESSING; 225 if (!table_ptr)
156 address.pointer.logical = table_ptr; 226 return AE_BAD_PARAMETER;
157
158 status = acpi_tb_get_table_body(&address, table_ptr, &table_info);
159 if (ACPI_FAILURE(status)) {
160 return_ACPI_STATUS(status);
161 }
162
163 /* Check signature for a valid table type */
164
165 status = acpi_tb_recognize_table(&table_info, ACPI_TABLE_ALL);
166 if (ACPI_FAILURE(status)) {
167 return_ACPI_STATUS(status);
168 }
169 227
170 /* Install the new table into the local data structures */ 228 ACPI_MEMSET(&table_desc, 0, sizeof(struct acpi_table_desc));
229 table_desc.pointer = table_ptr;
230 table_desc.length = table_ptr->length;
231 table_desc.flags = ACPI_TABLE_ORIGIN_UNKNOWN;
171 232
172 status = acpi_tb_install_table(&table_info); 233 /*
234 * Install the new table into the local data structures
235 */
236 status = acpi_tb_add_table(&table_desc, &table_index);
173 if (ACPI_FAILURE(status)) { 237 if (ACPI_FAILURE(status)) {
174 if (status == AE_ALREADY_EXISTS) { 238 return status;
175
176 /* Table already exists, no error */
177
178 status = AE_OK;
179 }
180
181 /* Free table allocated by acpi_tb_get_table_body */
182
183 acpi_tb_delete_single_table(&table_info);
184 return_ACPI_STATUS(status);
185 } 239 }
240 status = acpi_ns_load_table(table_index, acpi_gbl_root_node);
241 return status;
242}
186 243
187 /* Convert the table to common format if necessary */ 244ACPI_EXPORT_SYMBOL(acpi_load_table)
188
189 switch (table_info.type) {
190 case ACPI_TABLE_ID_FADT:
191
192 status = acpi_tb_convert_table_fadt();
193 break;
194
195 case ACPI_TABLE_ID_FACS:
196 245
197 status = acpi_tb_build_common_facs(&table_info); 246/******************************************************************************
198 break; 247 *
248 * FUNCTION: acpi_get_table_header
249 *
250 * PARAMETERS: Signature - ACPI signature of needed table
251 * Instance - Which instance (for SSDTs)
252 * out_table_header - The pointer to the table header to fill
253 *
254 * RETURN: Status and pointer to mapped table header
255 *
256 * DESCRIPTION: Finds an ACPI table header.
257 *
258 * NOTE: Caller is responsible in unmapping the header with
259 * acpi_os_unmap_memory
260 *
261 *****************************************************************************/
262acpi_status
263acpi_get_table_header(char *signature,
264 acpi_native_uint instance,
265 struct acpi_table_header *out_table_header)
266{
267 acpi_native_uint i;
268 acpi_native_uint j;
269 struct acpi_table_header *header;
199 270
200 default: 271 /* Parameter validation */
201 /* Load table into namespace if it contains executable AML */
202 272
203 status = 273 if (!signature || !out_table_header) {
204 acpi_ns_load_table(table_info.installed_desc, 274 return (AE_BAD_PARAMETER);
205 acpi_gbl_root_node);
206 break;
207 } 275 }
208 276
209 if (ACPI_FAILURE(status)) { 277 /*
278 * Walk the root table list
279 */
280 for (i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++) {
281 if (!ACPI_COMPARE_NAME
282 (&(acpi_gbl_root_table_list.tables[i].signature),
283 signature)) {
284 continue;
285 }
210 286
211 /* Uninstall table and free the buffer */ 287 if (++j < instance) {
288 continue;
289 }
212 290
213 (void)acpi_tb_uninstall_table(table_info.installed_desc); 291 if (!acpi_gbl_root_table_list.tables[i].pointer) {
292 if ((acpi_gbl_root_table_list.tables[i].
293 flags & ACPI_TABLE_ORIGIN_MASK) ==
294 ACPI_TABLE_ORIGIN_MAPPED) {
295 header =
296 acpi_os_map_memory(acpi_gbl_root_table_list.
297 tables[i].address,
298 sizeof(struct
299 acpi_table_header));
300 if (!header) {
301 return AE_NO_MEMORY;
302 }
303 ACPI_MEMCPY(out_table_header, header,
304 sizeof(struct acpi_table_header));
305 acpi_os_unmap_memory(header,
306 sizeof(struct
307 acpi_table_header));
308 } else {
309 return AE_NOT_FOUND;
310 }
311 } else {
312 ACPI_MEMCPY(out_table_header,
313 acpi_gbl_root_table_list.tables[i].pointer,
314 sizeof(struct acpi_table_header));
315 }
316 return (AE_OK);
214 } 317 }
215 318
216 return_ACPI_STATUS(status); 319 return (AE_NOT_FOUND);
217} 320}
218 321
219ACPI_EXPORT_SYMBOL(acpi_load_table) 322ACPI_EXPORT_SYMBOL(acpi_get_table_header)
220 323
221/******************************************************************************* 324
325/******************************************************************************
222 * 326 *
223 * FUNCTION: acpi_unload_table_id 327 * FUNCTION: acpi_unload_table_id
224 * 328 *
225 * PARAMETERS: table_type - Type of table to be unloaded 329 * PARAMETERS: id - Owner ID of the table to be removed.
226 * id - Owner ID of the table to be removed.
227 * 330 *
228 * RETURN: Status 331 * RETURN: Status
229 * 332 *
230 * DESCRIPTION: This routine is used to force the unload of a table (by id) 333 * DESCRIPTION: This routine is used to force the unload of a table (by id)
231 * 334 *
232 ******************************************************************************/ 335 ******************************************************************************/
233acpi_status acpi_unload_table_id(acpi_table_type table_type, acpi_owner_id id) 336acpi_status acpi_unload_table_id(acpi_owner_id id)
234{ 337{
235 struct acpi_table_desc *table_desc; 338 int i;
236 acpi_status status; 339 acpi_status status = AE_NOT_EXIST;
237 340
238 ACPI_FUNCTION_TRACE(acpi_unload_table); 341 ACPI_FUNCTION_TRACE(acpi_unload_table);
239 342
240 /* Parameter validation */
241 if (table_type > ACPI_TABLE_ID_MAX)
242 return_ACPI_STATUS(AE_BAD_PARAMETER);
243
244 /* Find table from the requested type list */ 343 /* Find table from the requested type list */
245 table_desc = acpi_gbl_table_lists[table_type].next; 344 for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
246 while (table_desc && table_desc->owner_id != id) 345 if (id != acpi_gbl_root_table_list.tables[i].owner_id) {
247 table_desc = table_desc->next; 346 continue;
248 347 }
249 if (!table_desc) 348 /*
250 return_ACPI_STATUS(AE_NOT_EXIST); 349 * Delete all namespace objects owned by this table. Note that these
251 350 * objects can appear anywhere in the namespace by virtue of the AML
252 /* 351 * "Scope" operator. Thus, we need to track ownership by an ID, not
253 * Delete all namespace objects owned by this table. Note that these 352 * simply a position within the hierarchy
254 * objects can appear anywhere in the namespace by virtue of the AML 353 */
255 * "Scope" operator. Thus, we need to track ownership by an ID, not 354 acpi_tb_delete_namespace_by_owner(i);
256 * simply a position within the hierarchy 355 acpi_tb_release_owner_id(i);
257 */ 356 acpi_tb_set_table_loaded_flag(i, FALSE);
258 acpi_ns_delete_namespace_by_owner(table_desc->owner_id); 357 }
259 358 return_ACPI_STATUS(status);
260 status = acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
261 if (ACPI_FAILURE(status))
262 return_ACPI_STATUS(status);
263
264 (void)acpi_tb_uninstall_table(table_desc);
265
266 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
267
268 return_ACPI_STATUS(AE_OK);
269} 359}
270 360
271ACPI_EXPORT_SYMBOL(acpi_unload_table_id) 361ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
272 362
273#ifdef ACPI_FUTURE_USAGE
274/******************************************************************************* 363/*******************************************************************************
275 * 364 *
276 * FUNCTION: acpi_unload_table 365 * FUNCTION: acpi_get_table
277 * 366 *
278 * PARAMETERS: table_type - Type of table to be unloaded 367 * PARAMETERS: Signature - ACPI signature of needed table
368 * Instance - Which instance (for SSDTs)
369 * out_table - Where the pointer to the table is returned
279 * 370 *
280 * RETURN: Status 371 * RETURN: Status and pointer to table
281 * 372 *
282 * DESCRIPTION: This routine is used to force the unload of a table 373 * DESCRIPTION: Finds and verifies an ACPI table.
283 * 374 *
284 ******************************************************************************/ 375 *****************************************************************************/
285acpi_status acpi_unload_table(acpi_table_type table_type) 376acpi_status
377acpi_get_table(char *signature,
378 acpi_native_uint instance, struct acpi_table_header ** out_table)
286{ 379{
287 struct acpi_table_desc *table_desc; 380 acpi_native_uint i;
288 381 acpi_native_uint j;
289 ACPI_FUNCTION_TRACE(acpi_unload_table); 382 acpi_status status;
290 383
291 /* Parameter validation */ 384 /* Parameter validation */
292 385
293 if (table_type > ACPI_TABLE_ID_MAX) { 386 if (!signature || !out_table) {
294 return_ACPI_STATUS(AE_BAD_PARAMETER); 387 return (AE_BAD_PARAMETER);
295 } 388 }
296 389
297 /* Find all tables of the requested type */ 390 /*
391 * Walk the root table list
392 */
393 for (i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++) {
394 if (!ACPI_COMPARE_NAME
395 (&(acpi_gbl_root_table_list.tables[i].signature),
396 signature)) {
397 continue;
398 }
298 399
299 table_desc = acpi_gbl_table_lists[table_type].next; 400 if (++j < instance) {
300 if (!table_desc) { 401 continue;
301 return_ACPI_STATUS(AE_NOT_EXIST); 402 }
302 }
303 403
304 while (table_desc) { 404 status =
305 /* 405 acpi_tb_verify_table(&acpi_gbl_root_table_list.tables[i]);
306 * Delete all namespace objects owned by this table. Note that these 406 if (ACPI_SUCCESS(status)) {
307 * objects can appear anywhere in the namespace by virtue of the AML 407 *out_table = acpi_gbl_root_table_list.tables[i].pointer;
308 * "Scope" operator. Thus, we need to track ownership by an ID, not 408 }
309 * simply a position within the hierarchy
310 */
311 acpi_ns_delete_namespace_by_owner(table_desc->owner_id);
312 table_desc = table_desc->next;
313 }
314 409
315 /* Delete (or unmap) all tables of this type */ 410 if (!acpi_gbl_permanent_mmap) {
411 acpi_gbl_root_table_list.tables[i].pointer = 0;
412 }
316 413
317 acpi_tb_delete_tables_by_type(table_type); 414 return (status);
318 return_ACPI_STATUS(AE_OK); 415 }
416
417 return (AE_NOT_FOUND);
319} 418}
320 419
321ACPI_EXPORT_SYMBOL(acpi_unload_table) 420ACPI_EXPORT_SYMBOL(acpi_get_table)
322 421
323/******************************************************************************* 422/*******************************************************************************
324 * 423 *
325 * FUNCTION: acpi_get_table_header 424 * FUNCTION: acpi_get_table_by_index
326 * 425 *
327 * PARAMETERS: table_type - one of the defined table types 426 * PARAMETERS: table_index - Table index
328 * Instance - the non zero instance of the table, allows 427 * Table - Where the pointer to the table is returned
329 * support for multiple tables of the same type
330 * see acpi_gbl_acpi_table_flag
331 * out_table_header - pointer to the struct acpi_table_header if successful
332 * 428 *
333 * DESCRIPTION: This function is called to get an ACPI table header. The caller 429 * RETURN: Status and pointer to the table
334 * supplies an pointer to a data area sufficient to contain an ACPI
335 * struct acpi_table_header structure.
336 * 430 *
337 * The header contains a length field that can be used to determine 431 * DESCRIPTION: Obtain a table by an index into the global table list.
338 * the size of the buffer needed to contain the entire table. This
339 * function is not valid for the RSD PTR table since it does not
340 * have a standard header and is fixed length.
341 * 432 *
342 ******************************************************************************/ 433 ******************************************************************************/
343acpi_status 434acpi_status
344acpi_get_table_header(acpi_table_type table_type, 435acpi_get_table_by_index(acpi_native_uint table_index,
345 u32 instance, struct acpi_table_header *out_table_header) 436 struct acpi_table_header ** table)
346{ 437{
347 struct acpi_table_header *tbl_ptr;
348 acpi_status status; 438 acpi_status status;
349 439
350 ACPI_FUNCTION_TRACE(acpi_get_table_header); 440 ACPI_FUNCTION_TRACE(acpi_get_table_by_index);
441
442 /* Parameter validation */
351 443
352 if ((instance == 0) || 444 if (!table) {
353 (table_type == ACPI_TABLE_ID_RSDP) || (!out_table_header)) {
354 return_ACPI_STATUS(AE_BAD_PARAMETER); 445 return_ACPI_STATUS(AE_BAD_PARAMETER);
355 } 446 }
356 447
357 /* Check the table type and instance */ 448 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
449
450 /* Validate index */
358 451
359 if ((table_type > ACPI_TABLE_ID_MAX) || 452 if (table_index >= acpi_gbl_root_table_list.count) {
360 (ACPI_IS_SINGLE_TABLE(acpi_gbl_table_data[table_type].flags) && 453 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
361 instance > 1)) {
362 return_ACPI_STATUS(AE_BAD_PARAMETER); 454 return_ACPI_STATUS(AE_BAD_PARAMETER);
363 } 455 }
364 456
365 /* Get a pointer to the entire table */ 457 if (!acpi_gbl_root_table_list.tables[table_index].pointer) {
366 458
367 status = acpi_tb_get_table_ptr(table_type, instance, &tbl_ptr); 459 /* Table is not mapped, map it */
368 if (ACPI_FAILURE(status)) {
369 return_ACPI_STATUS(status);
370 }
371 460
372 /* The function will return a NULL pointer if the table is not loaded */ 461 status =
373 462 acpi_tb_verify_table(&acpi_gbl_root_table_list.
374 if (tbl_ptr == NULL) { 463 tables[table_index]);
375 return_ACPI_STATUS(AE_NOT_EXIST); 464 if (ACPI_FAILURE(status)) {
465 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
466 return_ACPI_STATUS(status);
467 }
376 } 468 }
377 469
378 /* Copy the header to the caller's buffer */ 470 *table = acpi_gbl_root_table_list.tables[table_index].pointer;
379 471 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
380 ACPI_MEMCPY(ACPI_CAST_PTR(void, out_table_header), 472 return_ACPI_STATUS(AE_OK);
381 ACPI_CAST_PTR(void, tbl_ptr),
382 sizeof(struct acpi_table_header));
383
384 return_ACPI_STATUS(status);
385} 473}
386 474
387ACPI_EXPORT_SYMBOL(acpi_get_table_header) 475ACPI_EXPORT_SYMBOL(acpi_get_table_by_index)
388#endif /* ACPI_FUTURE_USAGE */
389 476
390/******************************************************************************* 477/*******************************************************************************
391 * 478 *
392 * FUNCTION: acpi_get_table 479 * FUNCTION: acpi_tb_load_namespace
393 * 480 *
394 * PARAMETERS: table_type - one of the defined table types 481 * PARAMETERS: None
395 * Instance - the non zero instance of the table, allows
396 * support for multiple tables of the same type
397 * see acpi_gbl_acpi_table_flag
398 * ret_buffer - pointer to a structure containing a buffer to
399 * receive the table
400 * 482 *
401 * RETURN: Status 483 * RETURN: Status
402 * 484 *
403 * DESCRIPTION: This function is called to get an ACPI table. The caller 485 * DESCRIPTION: Load the namespace from the DSDT and all SSDTs/PSDTs found in
404 * supplies an out_buffer large enough to contain the entire ACPI 486 * the RSDT/XSDT.
405 * table. The caller should call the acpi_get_table_header function
406 * first to determine the buffer size needed. Upon completion
407 * the out_buffer->Length field will indicate the number of bytes
408 * copied into the out_buffer->buf_ptr buffer. This table will be
409 * a complete table including the header.
410 * 487 *
411 ******************************************************************************/ 488 ******************************************************************************/
412acpi_status 489static acpi_status acpi_tb_load_namespace(void)
413acpi_get_table(acpi_table_type table_type,
414 u32 instance, struct acpi_buffer *ret_buffer)
415{ 490{
416 struct acpi_table_header *tbl_ptr;
417 acpi_status status; 491 acpi_status status;
418 acpi_size table_length; 492 struct acpi_table_header *table;
493 acpi_native_uint i;
419 494
420 ACPI_FUNCTION_TRACE(acpi_get_table); 495 ACPI_FUNCTION_TRACE(tb_load_namespace);
421 496
422 /* Parameter validation */ 497 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
423 498
424 if (instance == 0) { 499 /*
425 return_ACPI_STATUS(AE_BAD_PARAMETER); 500 * Load the namespace. The DSDT is required, but any SSDT and PSDT tables
501 * are optional.
502 */
503 if (!acpi_gbl_root_table_list.count ||
504 !ACPI_COMPARE_NAME(&
505 (acpi_gbl_root_table_list.
506 tables[ACPI_TABLE_INDEX_DSDT].signature),
507 ACPI_SIG_DSDT)
508 ||
509 ACPI_FAILURE(acpi_tb_verify_table
510 (&acpi_gbl_root_table_list.
511 tables[ACPI_TABLE_INDEX_DSDT]))) {
512 status = AE_NO_ACPI_TABLES;
513 goto unlock_and_exit;
426 } 514 }
427 515
428 status = acpi_ut_validate_buffer(ret_buffer); 516 /*
429 if (ACPI_FAILURE(status)) { 517 * Find DSDT table
430 return_ACPI_STATUS(status); 518 */
519 status =
520 acpi_os_table_override(acpi_gbl_root_table_list.
521 tables[ACPI_TABLE_INDEX_DSDT].pointer,
522 &table);
523 if (ACPI_SUCCESS(status) && table) {
524 /*
525 * DSDT table has been found
526 */
527 acpi_tb_delete_table(&acpi_gbl_root_table_list.
528 tables[ACPI_TABLE_INDEX_DSDT]);
529 acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer =
530 table;
531 acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].length =
532 table->length;
533 acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].flags =
534 ACPI_TABLE_ORIGIN_UNKNOWN;
535
536 ACPI_INFO((AE_INFO, "Table DSDT replaced by host OS"));
537 acpi_tb_print_table_header(0, table);
431 } 538 }
432 539
433 /* Check the table type and instance */ 540 status =
541 acpi_tb_verify_table(&acpi_gbl_root_table_list.
542 tables[ACPI_TABLE_INDEX_DSDT]);
543 if (ACPI_FAILURE(status)) {
434 544
435 if ((table_type > ACPI_TABLE_ID_MAX) || 545 /* A valid DSDT is required */
436 (ACPI_IS_SINGLE_TABLE(acpi_gbl_table_data[table_type].flags) && 546
437 instance > 1)) { 547 status = AE_NO_ACPI_TABLES;
438 return_ACPI_STATUS(AE_BAD_PARAMETER); 548 goto unlock_and_exit;
439 } 549 }
440 550
441 /* Get a pointer to the entire table */ 551 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
442 552
443 status = acpi_tb_get_table_ptr(table_type, instance, &tbl_ptr); 553 /*
554 * Load and parse tables.
555 */
556 status = acpi_ns_load_table(ACPI_TABLE_INDEX_DSDT, acpi_gbl_root_node);
444 if (ACPI_FAILURE(status)) { 557 if (ACPI_FAILURE(status)) {
445 return_ACPI_STATUS(status); 558 return_ACPI_STATUS(status);
446 } 559 }
447 560
448 /* 561 /*
449 * acpi_tb_get_table_ptr will return a NULL pointer if the 562 * Load any SSDT or PSDT tables. Note: Loop leaves tables locked
450 * table is not loaded.
451 */ 563 */
452 if (tbl_ptr == NULL) { 564 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
453 return_ACPI_STATUS(AE_NOT_EXIST); 565 for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
566 if ((!ACPI_COMPARE_NAME
567 (&(acpi_gbl_root_table_list.tables[i].signature),
568 ACPI_SIG_SSDT)
569 &&
570 !ACPI_COMPARE_NAME(&
571 (acpi_gbl_root_table_list.tables[i].
572 signature), ACPI_SIG_PSDT))
573 ||
574 ACPI_FAILURE(acpi_tb_verify_table
575 (&acpi_gbl_root_table_list.tables[i]))) {
576 continue;
577 }
578
579 /* Ignore errors while loading tables, get as many as possible */
580
581 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
582 (void)acpi_ns_load_table(i, acpi_gbl_root_node);
583 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
454 } 584 }
455 585
456 /* Get the table length */ 586 ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI Tables successfully acquired\n"));
457 587
458 if (table_type == ACPI_TABLE_ID_RSDP) { 588 unlock_and_exit:
589 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
590 return_ACPI_STATUS(status);
591}
459 592
460 /* RSD PTR is the only "table" without a header */ 593/*******************************************************************************
594 *
595 * FUNCTION: acpi_load_tables
596 *
597 * PARAMETERS: None
598 *
599 * RETURN: Status
600 *
601 * DESCRIPTION: Load the ACPI tables from the RSDT/XSDT
602 *
603 ******************************************************************************/
461 604
462 table_length = sizeof(struct rsdp_descriptor); 605acpi_status acpi_load_tables(void)
463 } else { 606{
464 table_length = (acpi_size) tbl_ptr->length; 607 acpi_status status;
465 }
466 608
467 /* Validate/Allocate/Clear caller buffer */ 609 ACPI_FUNCTION_TRACE(acpi_load_tables);
468 610
469 status = acpi_ut_initialize_buffer(ret_buffer, table_length); 611 /*
612 * Load the namespace from the tables
613 */
614 status = acpi_tb_load_namespace();
470 if (ACPI_FAILURE(status)) { 615 if (ACPI_FAILURE(status)) {
471 return_ACPI_STATUS(status); 616 ACPI_EXCEPTION((AE_INFO, status,
617 "While loading namespace from ACPI tables"));
472 } 618 }
473 619
474 /* Copy the table to the buffer */ 620 return_ACPI_STATUS(status);
475
476 ACPI_MEMCPY(ACPI_CAST_PTR(void, ret_buffer->pointer),
477 ACPI_CAST_PTR(void, tbl_ptr), table_length);
478
479 return_ACPI_STATUS(AE_OK);
480} 621}
481 622
482ACPI_EXPORT_SYMBOL(acpi_get_table) 623ACPI_EXPORT_SYMBOL(acpi_load_tables)
diff --git a/drivers/acpi/tables/tbxfroot.c b/drivers/acpi/tables/tbxfroot.c
index da2648bbdbc0..cf8fa514189f 100644
--- a/drivers/acpi/tables/tbxfroot.c
+++ b/drivers/acpi/tables/tbxfroot.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -48,16 +48,15 @@
48ACPI_MODULE_NAME("tbxfroot") 48ACPI_MODULE_NAME("tbxfroot")
49 49
50/* Local prototypes */ 50/* Local prototypes */
51static acpi_status
52acpi_tb_find_rsdp(struct acpi_table_desc *table_info, u32 flags);
53
54static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length); 51static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length);
55 52
53static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp);
54
56/******************************************************************************* 55/*******************************************************************************
57 * 56 *
58 * FUNCTION: acpi_tb_validate_rsdp 57 * FUNCTION: acpi_tb_validate_rsdp
59 * 58 *
60 * PARAMETERS: Rsdp - Pointer to unvalidated RSDP 59 * PARAMETERS: Rsdp - Pointer to unvalidated RSDP
61 * 60 *
62 * RETURN: Status 61 * RETURN: Status
63 * 62 *
@@ -65,14 +64,18 @@ static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length);
65 * 64 *
66 ******************************************************************************/ 65 ******************************************************************************/
67 66
68acpi_status acpi_tb_validate_rsdp(struct rsdp_descriptor *rsdp) 67static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
69{ 68{
70 ACPI_FUNCTION_ENTRY(); 69 ACPI_FUNCTION_ENTRY();
71 70
72 /* 71 /*
73 * The signature and checksum must both be correct 72 * The signature and checksum must both be correct
73 *
74 * Note: Sometimes there exists more than one RSDP in memory; the valid
75 * RSDP has a valid checksum, all others have an invalid checksum.
74 */ 76 */
75 if (ACPI_STRNCMP((char *)rsdp, RSDP_SIG, sizeof(RSDP_SIG) - 1) != 0) { 77 if (ACPI_STRNCMP((char *)rsdp, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1)
78 != 0) {
76 79
77 /* Nope, BAD Signature */ 80 /* Nope, BAD Signature */
78 81
@@ -81,14 +84,14 @@ acpi_status acpi_tb_validate_rsdp(struct rsdp_descriptor *rsdp)
81 84
82 /* Check the standard checksum */ 85 /* Check the standard checksum */
83 86
84 if (acpi_tb_sum_table(rsdp, ACPI_RSDP_CHECKSUM_LENGTH) != 0) { 87 if (acpi_tb_checksum((u8 *) rsdp, ACPI_RSDP_CHECKSUM_LENGTH) != 0) {
85 return (AE_BAD_CHECKSUM); 88 return (AE_BAD_CHECKSUM);
86 } 89 }
87 90
88 /* Check extended checksum if table version >= 2 */ 91 /* Check extended checksum if table version >= 2 */
89 92
90 if ((rsdp->revision >= 2) && 93 if ((rsdp->revision >= 2) &&
91 (acpi_tb_sum_table(rsdp, ACPI_RSDP_XCHECKSUM_LENGTH) != 0)) { 94 (acpi_tb_checksum((u8 *) rsdp, ACPI_RSDP_XCHECKSUM_LENGTH) != 0)) {
92 return (AE_BAD_CHECKSUM); 95 return (AE_BAD_CHECKSUM);
93 } 96 }
94 97
@@ -97,314 +100,123 @@ acpi_status acpi_tb_validate_rsdp(struct rsdp_descriptor *rsdp)
97 100
98/******************************************************************************* 101/*******************************************************************************
99 * 102 *
100 * FUNCTION: acpi_tb_find_table 103 * FUNCTION: acpi_tb_find_rsdp
101 *
102 * PARAMETERS: Signature - String with ACPI table signature
103 * oem_id - String with the table OEM ID
104 * oem_table_id - String with the OEM Table ID
105 * table_ptr - Where the table pointer is returned
106 *
107 * RETURN: Status
108 * 104 *
109 * DESCRIPTION: Find an ACPI table (in the RSDT/XSDT) that matches the 105 * PARAMETERS: table_address - Where the table pointer is returned
110 * Signature, OEM ID and OEM Table ID.
111 * 106 *
112 ******************************************************************************/ 107 * RETURN: Status, RSDP physical address
113
114acpi_status
115acpi_tb_find_table(char *signature,
116 char *oem_id,
117 char *oem_table_id, struct acpi_table_header ** table_ptr)
118{
119 acpi_status status;
120 struct acpi_table_header *table;
121
122 ACPI_FUNCTION_TRACE(tb_find_table);
123
124 /* Validate string lengths */
125
126 if ((ACPI_STRLEN(signature) > ACPI_NAME_SIZE) ||
127 (ACPI_STRLEN(oem_id) > sizeof(table->oem_id)) ||
128 (ACPI_STRLEN(oem_table_id) > sizeof(table->oem_table_id))) {
129 return_ACPI_STATUS(AE_AML_STRING_LIMIT);
130 }
131
132 if (ACPI_COMPARE_NAME(signature, DSDT_SIG)) {
133 /*
134 * The DSDT pointer is contained in the FADT, not the RSDT.
135 * This code should suffice, because the only code that would perform
136 * a "find" on the DSDT is the data_table_region() AML opcode -- in
137 * which case, the DSDT is guaranteed to be already loaded.
138 * If this becomes insufficient, the FADT will have to be found first.
139 */
140 if (!acpi_gbl_DSDT) {
141 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
142 }
143 table = acpi_gbl_DSDT;
144 } else {
145 /* Find the table */
146
147 status = acpi_get_firmware_table(signature, 1,
148 ACPI_LOGICAL_ADDRESSING,
149 &table);
150 if (ACPI_FAILURE(status)) {
151 return_ACPI_STATUS(status);
152 }
153 }
154
155 /* Check oem_id and oem_table_id */
156
157 if ((oem_id[0] &&
158 ACPI_STRNCMP(oem_id, table->oem_id,
159 sizeof(table->oem_id))) ||
160 (oem_table_id[0] &&
161 ACPI_STRNCMP(oem_table_id, table->oem_table_id,
162 sizeof(table->oem_table_id)))) {
163 return_ACPI_STATUS(AE_AML_NAME_NOT_FOUND);
164 }
165
166 ACPI_DEBUG_PRINT((ACPI_DB_TABLES, "Found table [%4.4s]\n",
167 table->signature));
168
169 *table_ptr = table;
170 return_ACPI_STATUS(AE_OK);
171}
172
173/*******************************************************************************
174 *
175 * FUNCTION: acpi_get_firmware_table
176 * 108 *
177 * PARAMETERS: Signature - Any ACPI table signature 109 * DESCRIPTION: Search lower 1_mbyte of memory for the root system descriptor
178 * Instance - the non zero instance of the table, allows 110 * pointer structure. If it is found, set *RSDP to point to it.
179 * support for multiple tables of the same type
180 * Flags - Physical/Virtual support
181 * table_pointer - Where a buffer containing the table is
182 * returned
183 * 111 *
184 * RETURN: Status 112 * NOTE1: The RSDP must be either in the first 1_k of the Extended
113 * BIOS Data Area or between E0000 and FFFFF (From ACPI Spec.)
114 * Only a 32-bit physical address is necessary.
185 * 115 *
186 * DESCRIPTION: This function is called to get an ACPI table. A buffer is 116 * NOTE2: This function is always available, regardless of the
187 * allocated for the table and returned in table_pointer. 117 * initialization state of the rest of ACPI.
188 * This table will be a complete table including the header.
189 * 118 *
190 ******************************************************************************/ 119 ******************************************************************************/
191 120
192acpi_status 121acpi_status acpi_find_root_pointer(acpi_native_uint * table_address)
193acpi_get_firmware_table(acpi_string signature,
194 u32 instance,
195 u32 flags, struct acpi_table_header **table_pointer)
196{ 122{
197 acpi_status status; 123 u8 *table_ptr;
198 struct acpi_pointer address; 124 u8 *mem_rover;
199 struct acpi_table_header *header = NULL; 125 u32 physical_address;
200 struct acpi_table_desc *table_info = NULL;
201 struct acpi_table_desc *rsdt_info;
202 u32 table_count;
203 u32 i;
204 u32 j;
205
206 ACPI_FUNCTION_TRACE(acpi_get_firmware_table);
207
208 /*
209 * Ensure that at least the table manager is initialized. We don't
210 * require that the entire ACPI subsystem is up for this interface.
211 * If we have a buffer, we must have a length too
212 */
213 if ((instance == 0) || (!signature) || (!table_pointer)) {
214 return_ACPI_STATUS(AE_BAD_PARAMETER);
215 }
216
217 /* Ensure that we have a RSDP */
218
219 if (!acpi_gbl_RSDP) {
220
221 /* Get the RSDP */
222
223 status = acpi_os_get_root_pointer(flags, &address);
224 if (ACPI_FAILURE(status)) {
225 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "RSDP not found\n"));
226 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
227 }
228
229 /* Map and validate the RSDP */
230
231 if ((flags & ACPI_MEMORY_MODE) == ACPI_LOGICAL_ADDRESSING) {
232 status = acpi_os_map_memory(address.pointer.physical,
233 sizeof(struct
234 rsdp_descriptor),
235 (void *)&acpi_gbl_RSDP);
236 if (ACPI_FAILURE(status)) {
237 return_ACPI_STATUS(status);
238 }
239 } else {
240 acpi_gbl_RSDP = address.pointer.logical;
241 }
242
243 /* The RDSP signature and checksum must both be correct */
244
245 status = acpi_tb_validate_rsdp(acpi_gbl_RSDP);
246 if (ACPI_FAILURE(status)) {
247 return_ACPI_STATUS(status);
248 }
249 }
250
251 /* Get the RSDT address via the RSDP */
252
253 acpi_tb_get_rsdt_address(&address);
254 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
255 "RSDP located at %p, RSDT physical=%8.8X%8.8X\n",
256 acpi_gbl_RSDP,
257 ACPI_FORMAT_UINT64(address.pointer.value)));
258 126
259 /* Insert processor_mode flags */ 127 ACPI_FUNCTION_TRACE(acpi_find_root_pointer);
260 128
261 address.pointer_type |= flags; 129 /* 1a) Get the location of the Extended BIOS Data Area (EBDA) */
262 130
263 /* Get and validate the RSDT */ 131 table_ptr = acpi_os_map_memory((acpi_physical_address)
132 ACPI_EBDA_PTR_LOCATION,
133 ACPI_EBDA_PTR_LENGTH);
134 if (!table_ptr) {
135 ACPI_ERROR((AE_INFO,
136 "Could not map memory at %8.8X for length %X",
137 ACPI_EBDA_PTR_LOCATION, ACPI_EBDA_PTR_LENGTH));
264 138
265 rsdt_info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_table_desc));
266 if (!rsdt_info) {
267 return_ACPI_STATUS(AE_NO_MEMORY); 139 return_ACPI_STATUS(AE_NO_MEMORY);
268 } 140 }
269 141
270 status = acpi_tb_get_table(&address, rsdt_info); 142 ACPI_MOVE_16_TO_32(&physical_address, table_ptr);
271 if (ACPI_FAILURE(status)) {
272 goto cleanup;
273 }
274
275 status = acpi_tb_validate_rsdt(rsdt_info->pointer);
276 if (ACPI_FAILURE(status)) {
277 goto cleanup;
278 }
279 143
280 /* Allocate a scratch table header and table descriptor */ 144 /* Convert segment part to physical address */
281 145
282 header = ACPI_ALLOCATE(sizeof(struct acpi_table_header)); 146 physical_address <<= 4;
283 if (!header) { 147 acpi_os_unmap_memory(table_ptr, ACPI_EBDA_PTR_LENGTH);
284 status = AE_NO_MEMORY;
285 goto cleanup;
286 }
287 148
288 table_info = ACPI_ALLOCATE(sizeof(struct acpi_table_desc)); 149 /* EBDA present? */
289 if (!table_info) {
290 status = AE_NO_MEMORY;
291 goto cleanup;
292 }
293 150
294 /* Get the number of table pointers within the RSDT */ 151 if (physical_address > 0x400) {
295
296 table_count =
297 acpi_tb_get_table_count(acpi_gbl_RSDP, rsdt_info->pointer);
298 address.pointer_type = acpi_gbl_table_flags | flags;
299
300 /*
301 * Search the RSDT/XSDT for the correct instance of the
302 * requested table
303 */
304 for (i = 0, j = 0; i < table_count; i++) {
305 /* 152 /*
306 * Get the next table pointer, handle RSDT vs. XSDT 153 * 1b) Search EBDA paragraphs (EBDA is required to be a
307 * RSDT pointers are 32 bits, XSDT pointers are 64 bits 154 * minimum of 1_k length)
308 */ 155 */
309 if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) { 156 table_ptr = acpi_os_map_memory((acpi_native_uint)
310 address.pointer.value = 157 physical_address,
311 (ACPI_CAST_PTR 158 ACPI_EBDA_WINDOW_SIZE);
312 (struct rsdt_descriptor, 159 if (!table_ptr) {
313 rsdt_info->pointer))->table_offset_entry[i]; 160 ACPI_ERROR((AE_INFO,
314 } else { 161 "Could not map memory at %8.8X for length %X",
315 address.pointer.value = 162 physical_address, ACPI_EBDA_WINDOW_SIZE));
316 (ACPI_CAST_PTR
317 (struct xsdt_descriptor,
318 rsdt_info->pointer))->table_offset_entry[i];
319 }
320
321 /* Get the table header */
322 163
323 status = acpi_tb_get_table_header(&address, header); 164 return_ACPI_STATUS(AE_NO_MEMORY);
324 if (ACPI_FAILURE(status)) {
325 goto cleanup;
326 } 165 }
327 166
328 /* Compare table signatures and table instance */ 167 mem_rover =
329 168 acpi_tb_scan_memory_for_rsdp(table_ptr,
330 if (ACPI_COMPARE_NAME(header->signature, signature)) { 169 ACPI_EBDA_WINDOW_SIZE);
331 170 acpi_os_unmap_memory(table_ptr, ACPI_EBDA_WINDOW_SIZE);
332 /* An instance of the table was found */
333 171
334 j++; 172 if (mem_rover) {
335 if (j >= instance) {
336 173
337 /* Found the correct instance, get the entire table */ 174 /* Return the physical address */
338 175
339 status = 176 physical_address +=
340 acpi_tb_get_table_body(&address, header, 177 (u32) ACPI_PTR_DIFF(mem_rover, table_ptr);
341 table_info);
342 if (ACPI_FAILURE(status)) {
343 goto cleanup;
344 }
345 178
346 *table_pointer = table_info->pointer; 179 *table_address = physical_address;
347 goto cleanup; 180 return_ACPI_STATUS(AE_OK);
348 }
349 } 181 }
350 } 182 }
351 183
352 /* Did not find the table */ 184 /*
185 * 2) Search upper memory: 16-byte boundaries in E0000h-FFFFFh
186 */
187 table_ptr = acpi_os_map_memory((acpi_physical_address)
188 ACPI_HI_RSDP_WINDOW_BASE,
189 ACPI_HI_RSDP_WINDOW_SIZE);
353 190
354 status = AE_NOT_EXIST; 191 if (!table_ptr) {
192 ACPI_ERROR((AE_INFO,
193 "Could not map memory at %8.8X for length %X",
194 ACPI_HI_RSDP_WINDOW_BASE,
195 ACPI_HI_RSDP_WINDOW_SIZE));
355 196
356 cleanup: 197 return_ACPI_STATUS(AE_NO_MEMORY);
357 if (rsdt_info->pointer) {
358 acpi_os_unmap_memory(rsdt_info->pointer,
359 (acpi_size) rsdt_info->pointer->length);
360 } 198 }
361 ACPI_FREE(rsdt_info);
362 199
363 if (header) { 200 mem_rover =
364 ACPI_FREE(header); 201 acpi_tb_scan_memory_for_rsdp(table_ptr, ACPI_HI_RSDP_WINDOW_SIZE);
365 } 202 acpi_os_unmap_memory(table_ptr, ACPI_HI_RSDP_WINDOW_SIZE);
366 if (table_info) {
367 ACPI_FREE(table_info);
368 }
369 return_ACPI_STATUS(status);
370}
371 203
372ACPI_EXPORT_SYMBOL(acpi_get_firmware_table) 204 if (mem_rover) {
373 205
374/* TBD: Move to a new file */ 206 /* Return the physical address */
375#if ACPI_MACHINE_WIDTH != 16
376/*******************************************************************************
377 *
378 * FUNCTION: acpi_find_root_pointer
379 *
380 * PARAMETERS: Flags - Logical/Physical addressing
381 * rsdp_address - Where to place the RSDP address
382 *
383 * RETURN: Status, Physical address of the RSDP
384 *
385 * DESCRIPTION: Find the RSDP
386 *
387 ******************************************************************************/
388acpi_status acpi_find_root_pointer(u32 flags, struct acpi_pointer *rsdp_address)
389{
390 struct acpi_table_desc table_info;
391 acpi_status status;
392
393 ACPI_FUNCTION_TRACE(acpi_find_root_pointer);
394
395 /* Get the RSDP */
396 207
397 status = acpi_tb_find_rsdp(&table_info, flags); 208 physical_address = (u32)
398 if (ACPI_FAILURE(status)) { 209 (ACPI_HI_RSDP_WINDOW_BASE +
399 ACPI_EXCEPTION((AE_INFO, status, 210 ACPI_PTR_DIFF(mem_rover, table_ptr));
400 "RSDP structure not found - Flags=%X", flags));
401 211
402 return_ACPI_STATUS(AE_NO_ACPI_TABLES); 212 *table_address = physical_address;
213 return_ACPI_STATUS(AE_OK);
403 } 214 }
404 215
405 rsdp_address->pointer_type = ACPI_PHYSICAL_POINTER; 216 /* A valid RSDP was not found */
406 rsdp_address->pointer.physical = table_info.physical_address; 217
407 return_ACPI_STATUS(AE_OK); 218 ACPI_ERROR((AE_INFO, "A valid RSDP was not found"));
219 return_ACPI_STATUS(AE_NOT_FOUND);
408} 220}
409 221
410ACPI_EXPORT_SYMBOL(acpi_find_root_pointer) 222ACPI_EXPORT_SYMBOL(acpi_find_root_pointer)
@@ -440,7 +252,7 @@ static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length)
440 252
441 status = 253 status =
442 acpi_tb_validate_rsdp(ACPI_CAST_PTR 254 acpi_tb_validate_rsdp(ACPI_CAST_PTR
443 (struct rsdp_descriptor, mem_rover)); 255 (struct acpi_table_rsdp, mem_rover));
444 if (ACPI_SUCCESS(status)) { 256 if (ACPI_SUCCESS(status)) {
445 257
446 /* Sig and checksum valid, we have found a real RSDP */ 258 /* Sig and checksum valid, we have found a real RSDP */
@@ -461,189 +273,3 @@ static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length)
461 start_address)); 273 start_address));
462 return_PTR(NULL); 274 return_PTR(NULL);
463} 275}
464
465/*******************************************************************************
466 *
467 * FUNCTION: acpi_tb_find_rsdp
468 *
469 * PARAMETERS: table_info - Where the table info is returned
470 * Flags - Current memory mode (logical vs.
471 * physical addressing)
472 *
473 * RETURN: Status, RSDP physical address
474 *
475 * DESCRIPTION: Search lower 1_mbyte of memory for the root system descriptor
476 * pointer structure. If it is found, set *RSDP to point to it.
477 *
478 * NOTE1: The RSDP must be either in the first 1_k of the Extended
479 * BIOS Data Area or between E0000 and FFFFF (From ACPI Spec.)
480 * Only a 32-bit physical address is necessary.
481 *
482 * NOTE2: This function is always available, regardless of the
483 * initialization state of the rest of ACPI.
484 *
485 ******************************************************************************/
486
487static acpi_status
488acpi_tb_find_rsdp(struct acpi_table_desc *table_info, u32 flags)
489{
490 u8 *table_ptr;
491 u8 *mem_rover;
492 u32 physical_address;
493 acpi_status status;
494
495 ACPI_FUNCTION_TRACE(tb_find_rsdp);
496
497 /*
498 * Scan supports either logical addressing or physical addressing
499 */
500 if ((flags & ACPI_MEMORY_MODE) == ACPI_LOGICAL_ADDRESSING) {
501
502 /* 1a) Get the location of the Extended BIOS Data Area (EBDA) */
503
504 status = acpi_os_map_memory((acpi_physical_address)
505 ACPI_EBDA_PTR_LOCATION,
506 ACPI_EBDA_PTR_LENGTH,
507 (void *)&table_ptr);
508 if (ACPI_FAILURE(status)) {
509 ACPI_ERROR((AE_INFO,
510 "Could not map memory at %8.8X for length %X",
511 ACPI_EBDA_PTR_LOCATION,
512 ACPI_EBDA_PTR_LENGTH));
513
514 return_ACPI_STATUS(status);
515 }
516
517 ACPI_MOVE_16_TO_32(&physical_address, table_ptr);
518
519 /* Convert segment part to physical address */
520
521 physical_address <<= 4;
522 acpi_os_unmap_memory(table_ptr, ACPI_EBDA_PTR_LENGTH);
523
524 /* EBDA present? */
525
526 if (physical_address > 0x400) {
527 /*
528 * 1b) Search EBDA paragraphs (EBDA is required to be a
529 * minimum of 1_k length)
530 */
531 status = acpi_os_map_memory((acpi_physical_address)
532 physical_address,
533 ACPI_EBDA_WINDOW_SIZE,
534 (void *)&table_ptr);
535 if (ACPI_FAILURE(status)) {
536 ACPI_ERROR((AE_INFO,
537 "Could not map memory at %8.8X for length %X",
538 physical_address,
539 ACPI_EBDA_WINDOW_SIZE));
540
541 return_ACPI_STATUS(status);
542 }
543
544 mem_rover = acpi_tb_scan_memory_for_rsdp(table_ptr,
545 ACPI_EBDA_WINDOW_SIZE);
546 acpi_os_unmap_memory(table_ptr, ACPI_EBDA_WINDOW_SIZE);
547
548 if (mem_rover) {
549
550 /* Return the physical address */
551
552 physical_address +=
553 (u32) ACPI_PTR_DIFF(mem_rover, table_ptr);
554
555 table_info->physical_address =
556 (acpi_physical_address) physical_address;
557 return_ACPI_STATUS(AE_OK);
558 }
559 }
560
561 /*
562 * 2) Search upper memory: 16-byte boundaries in E0000h-FFFFFh
563 */
564 status = acpi_os_map_memory((acpi_physical_address)
565 ACPI_HI_RSDP_WINDOW_BASE,
566 ACPI_HI_RSDP_WINDOW_SIZE,
567 (void *)&table_ptr);
568
569 if (ACPI_FAILURE(status)) {
570 ACPI_ERROR((AE_INFO,
571 "Could not map memory at %8.8X for length %X",
572 ACPI_HI_RSDP_WINDOW_BASE,
573 ACPI_HI_RSDP_WINDOW_SIZE));
574
575 return_ACPI_STATUS(status);
576 }
577
578 mem_rover =
579 acpi_tb_scan_memory_for_rsdp(table_ptr,
580 ACPI_HI_RSDP_WINDOW_SIZE);
581 acpi_os_unmap_memory(table_ptr, ACPI_HI_RSDP_WINDOW_SIZE);
582
583 if (mem_rover) {
584
585 /* Return the physical address */
586
587 physical_address = (u32)
588 (ACPI_HI_RSDP_WINDOW_BASE +
589 ACPI_PTR_DIFF(mem_rover, table_ptr));
590
591 table_info->physical_address =
592 (acpi_physical_address) physical_address;
593 return_ACPI_STATUS(AE_OK);
594 }
595 }
596
597 /*
598 * Physical addressing
599 */
600 else {
601 /* 1a) Get the location of the EBDA */
602
603 ACPI_MOVE_16_TO_32(&physical_address, ACPI_EBDA_PTR_LOCATION);
604 physical_address <<= 4; /* Convert segment to physical address */
605
606 /* EBDA present? */
607
608 if (physical_address > 0x400) {
609 /*
610 * 1b) Search EBDA paragraphs (EBDA is required to be a minimum of
611 * 1_k length)
612 */
613 mem_rover =
614 acpi_tb_scan_memory_for_rsdp(ACPI_PHYSADDR_TO_PTR
615 (physical_address),
616 ACPI_EBDA_WINDOW_SIZE);
617 if (mem_rover) {
618
619 /* Return the physical address */
620
621 table_info->physical_address =
622 ACPI_TO_INTEGER(mem_rover);
623 return_ACPI_STATUS(AE_OK);
624 }
625 }
626
627 /* 2) Search upper memory: 16-byte boundaries in E0000h-FFFFFh */
628
629 mem_rover =
630 acpi_tb_scan_memory_for_rsdp(ACPI_PHYSADDR_TO_PTR
631 (ACPI_HI_RSDP_WINDOW_BASE),
632 ACPI_HI_RSDP_WINDOW_SIZE);
633 if (mem_rover) {
634
635 /* Found it, return the physical address */
636
637 table_info->physical_address =
638 ACPI_TO_INTEGER(mem_rover);
639 return_ACPI_STATUS(AE_OK);
640 }
641 }
642
643 /* A valid RSDP was not found */
644
645 ACPI_ERROR((AE_INFO, "No valid RSDP was found"));
646 return_ACPI_STATUS(AE_NOT_FOUND);
647}
648
649#endif
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 40ddb4dd9631..f76d3168c2b2 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -82,7 +82,7 @@ MODULE_PARM_DESC(tzp, "Thermal zone polling frequency, in 1/10 seconds.\n");
82 82
83static int acpi_thermal_add(struct acpi_device *device); 83static int acpi_thermal_add(struct acpi_device *device);
84static int acpi_thermal_remove(struct acpi_device *device, int type); 84static int acpi_thermal_remove(struct acpi_device *device, int type);
85static int acpi_thermal_resume(struct acpi_device *device, int state); 85static int acpi_thermal_resume(struct acpi_device *device);
86static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file); 86static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file);
87static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file); 87static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file);
88static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file); 88static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file);
@@ -1353,7 +1353,7 @@ static int acpi_thermal_remove(struct acpi_device *device, int type)
1353 return 0; 1353 return 0;
1354} 1354}
1355 1355
1356static int acpi_thermal_resume(struct acpi_device *device, int state) 1356static int acpi_thermal_resume(struct acpi_device *device)
1357{ 1357{
1358 struct acpi_thermal *tz = NULL; 1358 struct acpi_thermal *tz = NULL;
1359 int i; 1359 int i;
diff --git a/drivers/acpi/utilities/utalloc.c b/drivers/acpi/utilities/utalloc.c
index f6cbc0b1bfd0..55a764807499 100644
--- a/drivers/acpi/utilities/utalloc.c
+++ b/drivers/acpi/utilities/utalloc.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -42,6 +42,7 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acdebug.h>
45 46
46#define _COMPONENT ACPI_UTILITIES 47#define _COMPONENT ACPI_UTILITIES
47ACPI_MODULE_NAME("utalloc") 48ACPI_MODULE_NAME("utalloc")
@@ -142,6 +143,14 @@ acpi_status acpi_ut_create_caches(void)
142 143
143acpi_status acpi_ut_delete_caches(void) 144acpi_status acpi_ut_delete_caches(void)
144{ 145{
146#ifdef ACPI_DBG_TRACK_ALLOCATIONS
147 char buffer[7];
148
149 if (acpi_gbl_display_final_mem_stats) {
150 ACPI_STRCPY(buffer, "MEMORY");
151 acpi_db_display_statistics(buffer);
152 }
153#endif
145 154
146 (void)acpi_os_delete_cache(acpi_gbl_namespace_cache); 155 (void)acpi_os_delete_cache(acpi_gbl_namespace_cache);
147 acpi_gbl_namespace_cache = NULL; 156 acpi_gbl_namespace_cache = NULL;
diff --git a/drivers/acpi/utilities/utcache.c b/drivers/acpi/utilities/utcache.c
index 1a1f8109159c..870f6edeb5f2 100644
--- a/drivers/acpi/utilities/utcache.c
+++ b/drivers/acpi/utilities/utcache.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -289,6 +289,14 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
289 289
290 ACPI_MEM_TRACKING(cache->total_allocated++); 290 ACPI_MEM_TRACKING(cache->total_allocated++);
291 291
292#ifdef ACPI_DBG_TRACK_ALLOCATIONS
293 if ((cache->total_allocated - cache->total_freed) >
294 cache->max_occupied) {
295 cache->max_occupied =
296 cache->total_allocated - cache->total_freed;
297 }
298#endif
299
292 /* Avoid deadlock with ACPI_ALLOCATE_ZEROED */ 300 /* Avoid deadlock with ACPI_ALLOCATE_ZEROED */
293 301
294 status = acpi_ut_release_mutex(ACPI_MTX_CACHES); 302 status = acpi_ut_release_mutex(ACPI_MTX_CACHES);
diff --git a/drivers/acpi/utilities/utcopy.c b/drivers/acpi/utilities/utcopy.c
index 5e1a80d1bc36..84d529db0a66 100644
--- a/drivers/acpi/utilities/utcopy.c
+++ b/drivers/acpi/utilities/utcopy.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -719,6 +719,15 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
719 acpi_ut_add_reference(source_desc->reference.object); 719 acpi_ut_add_reference(source_desc->reference.object);
720 break; 720 break;
721 721
722 case ACPI_TYPE_REGION:
723 /*
724 * We copied the Region Handler, so we now must add a reference
725 */
726 if (dest_desc->region.handler) {
727 acpi_ut_add_reference(dest_desc->region.handler);
728 }
729 break;
730
722 default: 731 default:
723 /* Nothing to do for other simple objects */ 732 /* Nothing to do for other simple objects */
724 break; 733 break;
diff --git a/drivers/acpi/utilities/utdebug.c b/drivers/acpi/utilities/utdebug.c
index 9e9054e155c1..61ad4f2daee2 100644
--- a/drivers/acpi/utilities/utdebug.c
+++ b/drivers/acpi/utilities/utdebug.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -181,8 +181,7 @@ acpi_ut_debug_print(u32 requested_debug_level,
181 if (ACPI_LV_THREADS & acpi_dbg_level) { 181 if (ACPI_LV_THREADS & acpi_dbg_level) {
182 acpi_os_printf 182 acpi_os_printf
183 ("\n**** Context Switch from TID %lX to TID %lX ****\n\n", 183 ("\n**** Context Switch from TID %lX to TID %lX ****\n\n",
184 (unsigned long) acpi_gbl_prev_thread_id, 184 (unsigned long)acpi_gbl_prev_thread_id, (unsigned long)thread_id);
185 (unsigned long) thread_id);
186 } 185 }
187 186
188 acpi_gbl_prev_thread_id = thread_id; 187 acpi_gbl_prev_thread_id = thread_id;
@@ -195,7 +194,7 @@ acpi_ut_debug_print(u32 requested_debug_level,
195 acpi_os_printf("%8s-%04ld ", module_name, line_number); 194 acpi_os_printf("%8s-%04ld ", module_name, line_number);
196 195
197 if (ACPI_LV_THREADS & acpi_dbg_level) { 196 if (ACPI_LV_THREADS & acpi_dbg_level) {
198 acpi_os_printf("[%04lX] ", thread_id); 197 acpi_os_printf("[%04lX] ", (unsigned long)thread_id);
199 } 198 }
200 199
201 acpi_os_printf("[%02ld] %-22.22s: ", 200 acpi_os_printf("[%02ld] %-22.22s: ",
diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c
index 9d3f1149ba21..f777cebdc46d 100644
--- a/drivers/acpi/utilities/utdelete.c
+++ b/drivers/acpi/utilities/utdelete.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -158,16 +158,20 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
158 "***** Mutex %p, OS Mutex %p\n", 158 "***** Mutex %p, OS Mutex %p\n",
159 object, object->mutex.os_mutex)); 159 object, object->mutex.os_mutex));
160 160
161 if (object->mutex.os_mutex != ACPI_GLOBAL_LOCK) { 161 if (object->mutex.os_mutex == acpi_gbl_global_lock_mutex) {
162 acpi_ex_unlink_mutex(object); 162
163 acpi_os_delete_mutex(object->mutex.os_mutex); 163 /* Global Lock has extra semaphore */
164 } else {
165 /* Global Lock "mutex" is actually a counting semaphore */
166 164
167 (void) 165 (void)
168 acpi_os_delete_semaphore 166 acpi_os_delete_semaphore
169 (acpi_gbl_global_lock_semaphore); 167 (acpi_gbl_global_lock_semaphore);
170 acpi_gbl_global_lock_semaphore = NULL; 168 acpi_gbl_global_lock_semaphore = NULL;
169
170 acpi_os_delete_mutex(object->mutex.os_mutex);
171 acpi_gbl_global_lock_mutex = NULL;
172 } else {
173 acpi_ex_unlink_mutex(object);
174 acpi_os_delete_mutex(object->mutex.os_mutex);
171 } 175 }
172 break; 176 break;
173 177
diff --git a/drivers/acpi/utilities/uteval.c b/drivers/acpi/utilities/uteval.c
index d6d7121583c0..13d5879cd98b 100644
--- a/drivers/acpi/utilities/uteval.c
+++ b/drivers/acpi/utilities/uteval.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/utilities/utglobal.c b/drivers/acpi/utilities/utglobal.c
index 014030af8b50..af33358a964b 100644
--- a/drivers/acpi/utilities/utglobal.c
+++ b/drivers/acpi/utilities/utglobal.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -46,89 +46,9 @@
46#include <acpi/acpi.h> 46#include <acpi/acpi.h>
47#include <acpi/acnamesp.h> 47#include <acpi/acnamesp.h>
48 48
49ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
49#define _COMPONENT ACPI_UTILITIES 50#define _COMPONENT ACPI_UTILITIES
50ACPI_MODULE_NAME("utglobal") 51 ACPI_MODULE_NAME("utglobal")
51
52/*******************************************************************************
53 *
54 * FUNCTION: acpi_format_exception
55 *
56 * PARAMETERS: Status - The acpi_status code to be formatted
57 *
58 * RETURN: A string containing the exception text. A valid pointer is
59 * always returned.
60 *
61 * DESCRIPTION: This function translates an ACPI exception into an ASCII string.
62 *
63 ******************************************************************************/
64const char *acpi_format_exception(acpi_status status)
65{
66 acpi_status sub_status;
67 const char *exception = NULL;
68
69 ACPI_FUNCTION_ENTRY();
70
71 /*
72 * Status is composed of two parts, a "type" and an actual code
73 */
74 sub_status = (status & ~AE_CODE_MASK);
75
76 switch (status & AE_CODE_MASK) {
77 case AE_CODE_ENVIRONMENTAL:
78
79 if (sub_status <= AE_CODE_ENV_MAX) {
80 exception = acpi_gbl_exception_names_env[sub_status];
81 }
82 break;
83
84 case AE_CODE_PROGRAMMER:
85
86 if (sub_status <= AE_CODE_PGM_MAX) {
87 exception =
88 acpi_gbl_exception_names_pgm[sub_status - 1];
89 }
90 break;
91
92 case AE_CODE_ACPI_TABLES:
93
94 if (sub_status <= AE_CODE_TBL_MAX) {
95 exception =
96 acpi_gbl_exception_names_tbl[sub_status - 1];
97 }
98 break;
99
100 case AE_CODE_AML:
101
102 if (sub_status <= AE_CODE_AML_MAX) {
103 exception =
104 acpi_gbl_exception_names_aml[sub_status - 1];
105 }
106 break;
107
108 case AE_CODE_CONTROL:
109
110 if (sub_status <= AE_CODE_CTRL_MAX) {
111 exception =
112 acpi_gbl_exception_names_ctrl[sub_status - 1];
113 }
114 break;
115
116 default:
117 break;
118 }
119
120 if (!exception) {
121
122 /* Exception code was not recognized */
123
124 ACPI_ERROR((AE_INFO,
125 "Unknown exception code: 0x%8.8X", status));
126
127 exception = "UNKNOWN_STATUS_CODE";
128 }
129
130 return (ACPI_CAST_PTR(const char, exception));
131}
132 52
133/******************************************************************************* 53/*******************************************************************************
134 * 54 *
@@ -163,8 +83,6 @@ u32 acpi_gbl_startup_flags = 0;
163 83
164u8 acpi_gbl_shutdown = TRUE; 84u8 acpi_gbl_shutdown = TRUE;
165 85
166const u8 acpi_gbl_decode_to8bit[8] = { 1, 2, 4, 8, 16, 32, 64, 128 };
167
168const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT] = { 86const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT] = {
169 "\\_S0_", 87 "\\_S0_",
170 "\\_S1_", 88 "\\_S1_",
@@ -183,10 +101,45 @@ const char *acpi_gbl_highest_dstate_names[4] = {
183 101
184/******************************************************************************* 102/*******************************************************************************
185 * 103 *
186 * Namespace globals 104 * FUNCTION: acpi_format_exception
105 *
106 * PARAMETERS: Status - The acpi_status code to be formatted
107 *
108 * RETURN: A string containing the exception text. A valid pointer is
109 * always returned.
110 *
111 * DESCRIPTION: This function translates an ACPI exception into an ASCII string
112 * It is here instead of utxface.c so it is always present.
187 * 113 *
188 ******************************************************************************/ 114 ******************************************************************************/
189 115
116const char *acpi_format_exception(acpi_status status)
117{
118 const char *exception = NULL;
119
120 ACPI_FUNCTION_ENTRY();
121
122 exception = acpi_ut_validate_exception(status);
123 if (!exception) {
124
125 /* Exception code was not recognized */
126
127 ACPI_ERROR((AE_INFO,
128 "Unknown exception code: 0x%8.8X", status));
129
130 exception = "UNKNOWN_STATUS_CODE";
131 }
132
133 return (ACPI_CAST_PTR(const char, exception));
134}
135
136ACPI_EXPORT_SYMBOL(acpi_format_exception)
137
138/*******************************************************************************
139 *
140 * Namespace globals
141 *
142 ******************************************************************************/
190/* 143/*
191 * Predefined ACPI Names (Built-in to the Interpreter) 144 * Predefined ACPI Names (Built-in to the Interpreter)
192 * 145 *
@@ -280,53 +233,6 @@ char acpi_ut_hex_to_ascii_char(acpi_integer integer, u32 position)
280 return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]); 233 return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]);
281} 234}
282 235
283/*******************************************************************************
284 *
285 * Table name globals
286 *
287 * NOTE: This table includes ONLY the ACPI tables that the subsystem consumes.
288 * it is NOT an exhaustive list of all possible ACPI tables. All ACPI tables
289 * that are not used by the subsystem are simply ignored.
290 *
291 * Do NOT add any table to this list that is not consumed directly by this
292 * subsystem (No MADT, ECDT, SBST, etc.)
293 *
294 ******************************************************************************/
295
296struct acpi_table_list acpi_gbl_table_lists[ACPI_TABLE_ID_MAX + 1];
297
298struct acpi_table_support acpi_gbl_table_data[ACPI_TABLE_ID_MAX + 1] = {
299 /*********** Name, Signature, Global typed pointer Signature size, Type How many allowed?, Contains valid AML? */
300
301 /* RSDP 0 */ {RSDP_NAME, RSDP_SIG, NULL, sizeof(RSDP_SIG) - 1,
302 ACPI_TABLE_ROOT | ACPI_TABLE_SINGLE}
303 ,
304 /* DSDT 1 */ {DSDT_SIG, DSDT_SIG, (void *)&acpi_gbl_DSDT,
305 sizeof(DSDT_SIG) - 1,
306 ACPI_TABLE_SECONDARY | ACPI_TABLE_SINGLE |
307 ACPI_TABLE_EXECUTABLE}
308 ,
309 /* FADT 2 */ {FADT_SIG, FADT_SIG, (void *)&acpi_gbl_FADT,
310 sizeof(FADT_SIG) - 1,
311 ACPI_TABLE_PRIMARY | ACPI_TABLE_SINGLE}
312 ,
313 /* FACS 3 */ {FACS_SIG, FACS_SIG, (void *)&acpi_gbl_FACS,
314 sizeof(FACS_SIG) - 1,
315 ACPI_TABLE_SECONDARY | ACPI_TABLE_SINGLE}
316 ,
317 /* PSDT 4 */ {PSDT_SIG, PSDT_SIG, NULL, sizeof(PSDT_SIG) - 1,
318 ACPI_TABLE_PRIMARY | ACPI_TABLE_MULTIPLE |
319 ACPI_TABLE_EXECUTABLE}
320 ,
321 /* SSDT 5 */ {SSDT_SIG, SSDT_SIG, NULL, sizeof(SSDT_SIG) - 1,
322 ACPI_TABLE_PRIMARY | ACPI_TABLE_MULTIPLE |
323 ACPI_TABLE_EXECUTABLE}
324 ,
325 /* XSDT 6 */ {XSDT_SIG, XSDT_SIG, NULL, sizeof(RSDT_SIG) - 1,
326 ACPI_TABLE_ROOT | ACPI_TABLE_SINGLE}
327 ,
328};
329
330/****************************************************************************** 236/******************************************************************************
331 * 237 *
332 * Event and Hardware globals 238 * Event and Hardware globals
@@ -612,7 +518,7 @@ char *acpi_ut_get_node_name(void *object)
612 /* Name must be a valid ACPI name */ 518 /* Name must be a valid ACPI name */
613 519
614 if (!acpi_ut_valid_acpi_name(node->name.integer)) { 520 if (!acpi_ut_valid_acpi_name(node->name.integer)) {
615 node->name.integer = acpi_ut_repair_name(node->name.integer); 521 node->name.integer = acpi_ut_repair_name(node->name.ascii);
616 } 522 }
617 523
618 /* Return the name */ 524 /* Return the name */
@@ -751,13 +657,6 @@ void acpi_ut_init_globals(void)
751 return; 657 return;
752 } 658 }
753 659
754 /* ACPI table structure */
755
756 for (i = 0; i < (ACPI_TABLE_ID_MAX + 1); i++) {
757 acpi_gbl_table_lists[i].next = NULL;
758 acpi_gbl_table_lists[i].count = 0;
759 }
760
761 /* Mutex locked flags */ 660 /* Mutex locked flags */
762 661
763 for (i = 0; i < ACPI_NUM_MUTEX; i++) { 662 for (i = 0; i < ACPI_NUM_MUTEX; i++) {
@@ -773,6 +672,7 @@ void acpi_ut_init_globals(void)
773 672
774 /* GPE support */ 673 /* GPE support */
775 674
675 acpi_gpe_count = 0;
776 acpi_gbl_gpe_xrupt_list_head = NULL; 676 acpi_gbl_gpe_xrupt_list_head = NULL;
777 acpi_gbl_gpe_fadt_blocks[0] = NULL; 677 acpi_gbl_gpe_fadt_blocks[0] = NULL;
778 acpi_gbl_gpe_fadt_blocks[1] = NULL; 678 acpi_gbl_gpe_fadt_blocks[1] = NULL;
@@ -784,25 +684,15 @@ void acpi_ut_init_globals(void)
784 acpi_gbl_exception_handler = NULL; 684 acpi_gbl_exception_handler = NULL;
785 acpi_gbl_init_handler = NULL; 685 acpi_gbl_init_handler = NULL;
786 686
787 /* Global "typed" ACPI table pointers */
788
789 acpi_gbl_RSDP = NULL;
790 acpi_gbl_XSDT = NULL;
791 acpi_gbl_FACS = NULL;
792 acpi_gbl_FADT = NULL;
793 acpi_gbl_DSDT = NULL;
794
795 /* Global Lock support */ 687 /* Global Lock support */
796 688
797 acpi_gbl_global_lock_semaphore = NULL; 689 acpi_gbl_global_lock_semaphore = NULL;
690 acpi_gbl_global_lock_mutex = NULL;
798 acpi_gbl_global_lock_acquired = FALSE; 691 acpi_gbl_global_lock_acquired = FALSE;
799 acpi_gbl_global_lock_thread_count = 0;
800 acpi_gbl_global_lock_handle = 0; 692 acpi_gbl_global_lock_handle = 0;
801 693
802 /* Miscellaneous variables */ 694 /* Miscellaneous variables */
803 695
804 acpi_gbl_table_flags = ACPI_PHYSICAL_POINTER;
805 acpi_gbl_rsdp_original_location = 0;
806 acpi_gbl_cm_single_step = FALSE; 696 acpi_gbl_cm_single_step = FALSE;
807 acpi_gbl_db_terminate_threads = FALSE; 697 acpi_gbl_db_terminate_threads = FALSE;
808 acpi_gbl_shutdown = FALSE; 698 acpi_gbl_shutdown = FALSE;
@@ -837,8 +727,13 @@ void acpi_ut_init_globals(void)
837 acpi_gbl_lowest_stack_pointer = ACPI_SIZE_MAX; 727 acpi_gbl_lowest_stack_pointer = ACPI_SIZE_MAX;
838#endif 728#endif
839 729
730#ifdef ACPI_DBG_TRACK_ALLOCATIONS
731 acpi_gbl_display_final_mem_stats = FALSE;
732#endif
733
840 return_VOID; 734 return_VOID;
841} 735}
842 736
843ACPI_EXPORT_SYMBOL(acpi_dbg_level) 737ACPI_EXPORT_SYMBOL(acpi_dbg_level)
844ACPI_EXPORT_SYMBOL(acpi_dbg_layer) 738ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
739ACPI_EXPORT_SYMBOL(acpi_gpe_count)
diff --git a/drivers/acpi/utilities/utinit.c b/drivers/acpi/utilities/utinit.c
index ff76055eb7d6..ad3c0d0a5cf8 100644
--- a/drivers/acpi/utilities/utinit.c
+++ b/drivers/acpi/utilities/utinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -44,119 +44,14 @@
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acnamesp.h> 45#include <acpi/acnamesp.h>
46#include <acpi/acevents.h> 46#include <acpi/acevents.h>
47#include <acpi/actables.h>
47 48
48#define _COMPONENT ACPI_UTILITIES 49#define _COMPONENT ACPI_UTILITIES
49ACPI_MODULE_NAME("utinit") 50ACPI_MODULE_NAME("utinit")
50 51
51/* Local prototypes */ 52/* Local prototypes */
52static void
53acpi_ut_fadt_register_error(char *register_name, u32 value, u8 offset);
54
55static void acpi_ut_terminate(void); 53static void acpi_ut_terminate(void);
56 54
57/*******************************************************************************
58 *
59 * FUNCTION: acpi_ut_fadt_register_error
60 *
61 * PARAMETERS: register_name - Pointer to string identifying register
62 * Value - Actual register contents value
63 * Offset - Byte offset in the FADT
64 *
65 * RETURN: AE_BAD_VALUE
66 *
67 * DESCRIPTION: Display failure message
68 *
69 ******************************************************************************/
70
71static void
72acpi_ut_fadt_register_error(char *register_name, u32 value, u8 offset)
73{
74
75 ACPI_WARNING((AE_INFO,
76 "Invalid FADT value %s=%X at offset %X FADT=%p",
77 register_name, value, offset, acpi_gbl_FADT));
78}
79
80/******************************************************************************
81 *
82 * FUNCTION: acpi_ut_validate_fadt
83 *
84 * PARAMETERS: None
85 *
86 * RETURN: Status
87 *
88 * DESCRIPTION: Validate various ACPI registers in the FADT
89 *
90 ******************************************************************************/
91
92acpi_status acpi_ut_validate_fadt(void)
93{
94
95 /*
96 * Verify Fixed ACPI Description Table fields,
97 * but don't abort on any problems, just display error
98 */
99 if (acpi_gbl_FADT->pm1_evt_len < 4) {
100 acpi_ut_fadt_register_error("PM1_EVT_LEN",
101 (u32) acpi_gbl_FADT->pm1_evt_len,
102 ACPI_FADT_OFFSET(pm1_evt_len));
103 }
104
105 if (!acpi_gbl_FADT->pm1_cnt_len) {
106 acpi_ut_fadt_register_error("PM1_CNT_LEN", 0,
107 ACPI_FADT_OFFSET(pm1_cnt_len));
108 }
109
110 if (!acpi_gbl_FADT->xpm1a_evt_blk.address) {
111 acpi_ut_fadt_register_error("X_PM1a_EVT_BLK", 0,
112 ACPI_FADT_OFFSET(xpm1a_evt_blk.
113 address));
114 }
115
116 if (!acpi_gbl_FADT->xpm1a_cnt_blk.address) {
117 acpi_ut_fadt_register_error("X_PM1a_CNT_BLK", 0,
118 ACPI_FADT_OFFSET(xpm1a_cnt_blk.
119 address));
120 }
121
122 if (!acpi_gbl_FADT->xpm_tmr_blk.address) {
123 acpi_ut_fadt_register_error("X_PM_TMR_BLK", 0,
124 ACPI_FADT_OFFSET(xpm_tmr_blk.
125 address));
126 }
127
128 if ((acpi_gbl_FADT->xpm2_cnt_blk.address &&
129 !acpi_gbl_FADT->pm2_cnt_len)) {
130 acpi_ut_fadt_register_error("PM2_CNT_LEN",
131 (u32) acpi_gbl_FADT->pm2_cnt_len,
132 ACPI_FADT_OFFSET(pm2_cnt_len));
133 }
134
135 if (acpi_gbl_FADT->pm_tm_len < 4) {
136 acpi_ut_fadt_register_error("PM_TM_LEN",
137 (u32) acpi_gbl_FADT->pm_tm_len,
138 ACPI_FADT_OFFSET(pm_tm_len));
139 }
140
141 /* Length of GPE blocks must be a multiple of 2 */
142
143 if (acpi_gbl_FADT->xgpe0_blk.address &&
144 (acpi_gbl_FADT->gpe0_blk_len & 1)) {
145 acpi_ut_fadt_register_error("(x)GPE0_BLK_LEN",
146 (u32) acpi_gbl_FADT->gpe0_blk_len,
147 ACPI_FADT_OFFSET(gpe0_blk_len));
148 }
149
150 if (acpi_gbl_FADT->xgpe1_blk.address &&
151 (acpi_gbl_FADT->gpe1_blk_len & 1)) {
152 acpi_ut_fadt_register_error("(x)GPE1_BLK_LEN",
153 (u32) acpi_gbl_FADT->gpe1_blk_len,
154 ACPI_FADT_OFFSET(gpe1_blk_len));
155 }
156
157 return (AE_OK);
158}
159
160/****************************************************************************** 55/******************************************************************************
161 * 56 *
162 * FUNCTION: acpi_ut_terminate 57 * FUNCTION: acpi_ut_terminate
@@ -178,7 +73,6 @@ static void acpi_ut_terminate(void)
178 73
179 ACPI_FUNCTION_TRACE(ut_terminate); 74 ACPI_FUNCTION_TRACE(ut_terminate);
180 75
181 /* Free global tables, etc. */
182 /* Free global GPE blocks and related info structures */ 76 /* Free global GPE blocks and related info structures */
183 77
184 gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head; 78 gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
@@ -239,6 +133,10 @@ void acpi_ut_subsystem_shutdown(void)
239 133
240 acpi_ns_terminate(); 134 acpi_ns_terminate();
241 135
136 /* Delete the ACPI tables */
137
138 acpi_tb_terminate();
139
242 /* Close the globals */ 140 /* Close the globals */
243 141
244 acpi_ut_terminate(); 142 acpi_ut_terminate();
diff --git a/drivers/acpi/utilities/utmath.c b/drivers/acpi/utilities/utmath.c
index 19d74bedce27..0c56a0d20b29 100644
--- a/drivers/acpi/utilities/utmath.c
+++ b/drivers/acpi/utilities/utmath.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/utilities/utmisc.c b/drivers/acpi/utilities/utmisc.c
index 6d8a8211be90..50133fffe420 100644
--- a/drivers/acpi/utilities/utmisc.c
+++ b/drivers/acpi/utilities/utmisc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -51,6 +51,78 @@ ACPI_MODULE_NAME("utmisc")
51 51
52/******************************************************************************* 52/*******************************************************************************
53 * 53 *
54 * FUNCTION: acpi_ut_validate_exception
55 *
56 * PARAMETERS: Status - The acpi_status code to be formatted
57 *
58 * RETURN: A string containing the exception text. NULL if exception is
59 * not valid.
60 *
61 * DESCRIPTION: This function validates and translates an ACPI exception into
62 * an ASCII string.
63 *
64 ******************************************************************************/
65const char *acpi_ut_validate_exception(acpi_status status)
66{
67 acpi_status sub_status;
68 const char *exception = NULL;
69
70 ACPI_FUNCTION_ENTRY();
71
72 /*
73 * Status is composed of two parts, a "type" and an actual code
74 */
75 sub_status = (status & ~AE_CODE_MASK);
76
77 switch (status & AE_CODE_MASK) {
78 case AE_CODE_ENVIRONMENTAL:
79
80 if (sub_status <= AE_CODE_ENV_MAX) {
81 exception = acpi_gbl_exception_names_env[sub_status];
82 }
83 break;
84
85 case AE_CODE_PROGRAMMER:
86
87 if (sub_status <= AE_CODE_PGM_MAX) {
88 exception =
89 acpi_gbl_exception_names_pgm[sub_status - 1];
90 }
91 break;
92
93 case AE_CODE_ACPI_TABLES:
94
95 if (sub_status <= AE_CODE_TBL_MAX) {
96 exception =
97 acpi_gbl_exception_names_tbl[sub_status - 1];
98 }
99 break;
100
101 case AE_CODE_AML:
102
103 if (sub_status <= AE_CODE_AML_MAX) {
104 exception =
105 acpi_gbl_exception_names_aml[sub_status - 1];
106 }
107 break;
108
109 case AE_CODE_CONTROL:
110
111 if (sub_status <= AE_CODE_CTRL_MAX) {
112 exception =
113 acpi_gbl_exception_names_ctrl[sub_status - 1];
114 }
115 break;
116
117 default:
118 break;
119 }
120
121 return (ACPI_CAST_PTR(const char, exception));
122}
123
124/*******************************************************************************
125 *
54 * FUNCTION: acpi_ut_is_aml_table 126 * FUNCTION: acpi_ut_is_aml_table
55 * 127 *
56 * PARAMETERS: Table - An ACPI table 128 * PARAMETERS: Table - An ACPI table
@@ -62,14 +134,15 @@ ACPI_MODULE_NAME("utmisc")
62 * data tables that do not contain AML code. 134 * data tables that do not contain AML code.
63 * 135 *
64 ******************************************************************************/ 136 ******************************************************************************/
137
65u8 acpi_ut_is_aml_table(struct acpi_table_header *table) 138u8 acpi_ut_is_aml_table(struct acpi_table_header *table)
66{ 139{
67 140
68 /* These are the only tables that contain executable AML */ 141 /* These are the only tables that contain executable AML */
69 142
70 if (ACPI_COMPARE_NAME(table->signature, DSDT_SIG) || 143 if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT) ||
71 ACPI_COMPARE_NAME(table->signature, PSDT_SIG) || 144 ACPI_COMPARE_NAME(table->signature, ACPI_SIG_PSDT) ||
72 ACPI_COMPARE_NAME(table->signature, SSDT_SIG)) { 145 ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT)) {
73 return (TRUE); 146 return (TRUE);
74 } 147 }
75 148
@@ -418,7 +491,7 @@ u32 acpi_ut_dword_byte_swap(u32 value)
418void acpi_ut_set_integer_width(u8 revision) 491void acpi_ut_set_integer_width(u8 revision)
419{ 492{
420 493
421 if (revision <= 1) { 494 if (revision < 2) {
422 495
423 /* 32-bit case */ 496 /* 32-bit case */
424 497
@@ -582,26 +655,25 @@ u8 acpi_ut_valid_acpi_name(u32 name)
582 * 655 *
583 ******************************************************************************/ 656 ******************************************************************************/
584 657
585acpi_name acpi_ut_repair_name(acpi_name name) 658acpi_name acpi_ut_repair_name(char *name)
586{ 659{
587 char *name_ptr = ACPI_CAST_PTR(char, &name);
588 char new_name[ACPI_NAME_SIZE];
589 acpi_native_uint i; 660 acpi_native_uint i;
661 char new_name[ACPI_NAME_SIZE];
590 662
591 for (i = 0; i < ACPI_NAME_SIZE; i++) { 663 for (i = 0; i < ACPI_NAME_SIZE; i++) {
592 new_name[i] = name_ptr[i]; 664 new_name[i] = name[i];
593 665
594 /* 666 /*
595 * Replace a bad character with something printable, yet technically 667 * Replace a bad character with something printable, yet technically
596 * still invalid. This prevents any collisions with existing "good" 668 * still invalid. This prevents any collisions with existing "good"
597 * names in the namespace. 669 * names in the namespace.
598 */ 670 */
599 if (!acpi_ut_valid_acpi_char(name_ptr[i], i)) { 671 if (!acpi_ut_valid_acpi_char(name[i], i)) {
600 new_name[i] = '*'; 672 new_name[i] = '*';
601 } 673 }
602 } 674 }
603 675
604 return (*ACPI_CAST_PTR(u32, new_name)); 676 return (*(u32 *) new_name);
605} 677}
606 678
607/******************************************************************************* 679/*******************************************************************************
@@ -996,9 +1068,13 @@ acpi_ut_info(char *module_name, u32 line_number, char *format, ...)
996{ 1068{
997 va_list args; 1069 va_list args;
998 1070
999 acpi_os_printf("ACPI (%s-%04d): ", module_name, line_number); 1071 /*
1072 * Removed module_name, line_number, and acpica version, not needed
1073 * for info output
1074 */
1075 acpi_os_printf("ACPI: ");
1000 1076
1001 va_start(args, format); 1077 va_start(args, format);
1002 acpi_os_vprintf(format, args); 1078 acpi_os_vprintf(format, args);
1003 acpi_os_printf(" [%X]\n", ACPI_CA_VERSION); 1079 acpi_os_printf("\n");
1004} 1080}
diff --git a/drivers/acpi/utilities/utmutex.c b/drivers/acpi/utilities/utmutex.c
index 180e73ceb6e2..cbad2ef5987d 100644
--- a/drivers/acpi/utilities/utmutex.c
+++ b/drivers/acpi/utilities/utmutex.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/utilities/utobject.c b/drivers/acpi/utilities/utobject.c
index ba7d8ac702df..4696124759e1 100644
--- a/drivers/acpi/utilities/utobject.c
+++ b/drivers/acpi/utilities/utobject.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/utilities/utresrc.c b/drivers/acpi/utilities/utresrc.c
index 5a2de92831d3..e8fe1ba6cc24 100644
--- a/drivers/acpi/utilities/utresrc.c
+++ b/drivers/acpi/utilities/utresrc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/utilities/utstate.c b/drivers/acpi/utilities/utstate.c
index eaa13d05c859..edcaafad0a31 100644
--- a/drivers/acpi/utilities/utstate.c
+++ b/drivers/acpi/utilities/utstate.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/utilities/utxface.c b/drivers/acpi/utilities/utxface.c
index 3538f69c82a1..de3276f4f468 100644
--- a/drivers/acpi/utilities/utxface.c
+++ b/drivers/acpi/utilities/utxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -67,6 +67,7 @@ acpi_status acpi_initialize_subsystem(void)
67 67
68 ACPI_FUNCTION_TRACE(acpi_initialize_subsystem); 68 ACPI_FUNCTION_TRACE(acpi_initialize_subsystem);
69 69
70 acpi_gbl_startup_flags = ACPI_SUBSYSTEM_INITIALIZE;
70 ACPI_DEBUG_EXEC(acpi_ut_init_stack_ptr_trace()); 71 ACPI_DEBUG_EXEC(acpi_ut_init_stack_ptr_trace());
71 72
72 /* Initialize the OS-Dependent layer */ 73 /* Initialize the OS-Dependent layer */
@@ -127,20 +128,6 @@ acpi_status acpi_enable_subsystem(u32 flags)
127 128
128 ACPI_FUNCTION_TRACE(acpi_enable_subsystem); 129 ACPI_FUNCTION_TRACE(acpi_enable_subsystem);
129 130
130 /*
131 * We must initialize the hardware before we can enable ACPI.
132 * The values from the FADT are validated here.
133 */
134 if (!(flags & ACPI_NO_HARDWARE_INIT)) {
135 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
136 "[Init] Initializing ACPI hardware\n"));
137
138 status = acpi_hw_initialize();
139 if (ACPI_FAILURE(status)) {
140 return_ACPI_STATUS(status);
141 }
142 }
143
144 /* Enable ACPI mode */ 131 /* Enable ACPI mode */
145 132
146 if (!(flags & ACPI_NO_ACPI_ENABLE)) { 133 if (!(flags & ACPI_NO_ACPI_ENABLE)) {
@@ -398,7 +385,6 @@ acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer)
398{ 385{
399 struct acpi_system_info *info_ptr; 386 struct acpi_system_info *info_ptr;
400 acpi_status status; 387 acpi_status status;
401 u32 i;
402 388
403 ACPI_FUNCTION_TRACE(acpi_get_system_info); 389 ACPI_FUNCTION_TRACE(acpi_get_system_info);
404 390
@@ -431,9 +417,7 @@ acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer)
431 417
432 /* Timer resolution - 24 or 32 bits */ 418 /* Timer resolution - 24 or 32 bits */
433 419
434 if (!acpi_gbl_FADT) { 420 if (acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) {
435 info_ptr->timer_resolution = 0;
436 } else if (acpi_gbl_FADT->tmr_val_ext == 0) {
437 info_ptr->timer_resolution = 24; 421 info_ptr->timer_resolution = 24;
438 } else { 422 } else {
439 info_ptr->timer_resolution = 32; 423 info_ptr->timer_resolution = 32;
@@ -449,13 +433,6 @@ acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer)
449 info_ptr->debug_layer = acpi_dbg_layer; 433 info_ptr->debug_layer = acpi_dbg_layer;
450 info_ptr->debug_level = acpi_dbg_level; 434 info_ptr->debug_level = acpi_dbg_level;
451 435
452 /* Current status of the ACPI tables, per table type */
453
454 info_ptr->num_table_types = ACPI_TABLE_ID_MAX + 1;
455 for (i = 0; i < (ACPI_TABLE_ID_MAX + 1); i++) {
456 info_ptr->table_info[i].count = acpi_gbl_table_lists[i].count;
457 }
458
459 return_ACPI_STATUS(AE_OK); 436 return_ACPI_STATUS(AE_OK);
460} 437}
461 438
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 3d54680d0333..e0b97add8c63 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -32,6 +32,7 @@
32#include <linux/proc_fs.h> 32#include <linux/proc_fs.h>
33#include <linux/seq_file.h> 33#include <linux/seq_file.h>
34 34
35#include <linux/backlight.h>
35#include <asm/uaccess.h> 36#include <asm/uaccess.h>
36 37
37#include <acpi/acpi_bus.h> 38#include <acpi/acpi_bus.h>
@@ -56,6 +57,12 @@
56 57
57#define ACPI_VIDEO_HEAD_INVALID (~0u - 1) 58#define ACPI_VIDEO_HEAD_INVALID (~0u - 1)
58#define ACPI_VIDEO_HEAD_END (~0u) 59#define ACPI_VIDEO_HEAD_END (~0u)
60#define MAX_NAME_LEN 20
61
62#define ACPI_VIDEO_DISPLAY_CRT 1
63#define ACPI_VIDEO_DISPLAY_TV 2
64#define ACPI_VIDEO_DISPLAY_DVI 3
65#define ACPI_VIDEO_DISPLAY_LCD 4
59 66
60#define _COMPONENT ACPI_VIDEO_COMPONENT 67#define _COMPONENT ACPI_VIDEO_COMPONENT
61ACPI_MODULE_NAME("acpi_video") 68ACPI_MODULE_NAME("acpi_video")
@@ -66,16 +73,14 @@ MODULE_LICENSE("GPL");
66 73
67static int acpi_video_bus_add(struct acpi_device *device); 74static int acpi_video_bus_add(struct acpi_device *device);
68static int acpi_video_bus_remove(struct acpi_device *device, int type); 75static int acpi_video_bus_remove(struct acpi_device *device, int type);
69static int acpi_video_bus_match(struct acpi_device *device,
70 struct acpi_driver *driver);
71 76
72static struct acpi_driver acpi_video_bus = { 77static struct acpi_driver acpi_video_bus = {
73 .name = ACPI_VIDEO_DRIVER_NAME, 78 .name = ACPI_VIDEO_DRIVER_NAME,
74 .class = ACPI_VIDEO_CLASS, 79 .class = ACPI_VIDEO_CLASS,
80 .ids = ACPI_VIDEO_HID,
75 .ops = { 81 .ops = {
76 .add = acpi_video_bus_add, 82 .add = acpi_video_bus_add,
77 .remove = acpi_video_bus_remove, 83 .remove = acpi_video_bus_remove,
78 .match = acpi_video_bus_match,
79 }, 84 },
80}; 85};
81 86
@@ -133,20 +138,21 @@ struct acpi_video_device_flags {
133 u8 crt:1; 138 u8 crt:1;
134 u8 lcd:1; 139 u8 lcd:1;
135 u8 tvout:1; 140 u8 tvout:1;
141 u8 dvi:1;
136 u8 bios:1; 142 u8 bios:1;
137 u8 unknown:1; 143 u8 unknown:1;
138 u8 reserved:3; 144 u8 reserved:2;
139}; 145};
140 146
141struct acpi_video_device_cap { 147struct acpi_video_device_cap {
142 u8 _ADR:1; /*Return the unique ID */ 148 u8 _ADR:1; /*Return the unique ID */
143 u8 _BCL:1; /*Query list of brightness control levels supported */ 149 u8 _BCL:1; /*Query list of brightness control levels supported */
144 u8 _BCM:1; /*Set the brightness level */ 150 u8 _BCM:1; /*Set the brightness level */
151 u8 _BQC:1; /* Get current brightness level */
145 u8 _DDC:1; /*Return the EDID for this device */ 152 u8 _DDC:1; /*Return the EDID for this device */
146 u8 _DCS:1; /*Return status of output device */ 153 u8 _DCS:1; /*Return status of output device */
147 u8 _DGS:1; /*Query graphics state */ 154 u8 _DGS:1; /*Query graphics state */
148 u8 _DSS:1; /*Device state set */ 155 u8 _DSS:1; /*Device state set */
149 u8 _reserved:1;
150}; 156};
151 157
152struct acpi_video_device_brightness { 158struct acpi_video_device_brightness {
@@ -163,6 +169,8 @@ struct acpi_video_device {
163 struct acpi_video_bus *video; 169 struct acpi_video_bus *video;
164 struct acpi_device *dev; 170 struct acpi_device *dev;
165 struct acpi_video_device_brightness *brightness; 171 struct acpi_video_device_brightness *brightness;
172 struct backlight_device *backlight;
173 struct backlight_properties *data;
166}; 174};
167 175
168/* bus */ 176/* bus */
@@ -257,11 +265,35 @@ static void acpi_video_device_bind(struct acpi_video_bus *video,
257 struct acpi_video_device *device); 265 struct acpi_video_device *device);
258static int acpi_video_device_enumerate(struct acpi_video_bus *video); 266static int acpi_video_device_enumerate(struct acpi_video_bus *video);
259static int acpi_video_switch_output(struct acpi_video_bus *video, int event); 267static int acpi_video_switch_output(struct acpi_video_bus *video, int event);
268static int acpi_video_device_lcd_set_level(struct acpi_video_device *device,
269 int level);
270static int acpi_video_device_lcd_get_level_current(
271 struct acpi_video_device *device,
272 unsigned long *level);
260static int acpi_video_get_next_level(struct acpi_video_device *device, 273static int acpi_video_get_next_level(struct acpi_video_device *device,
261 u32 level_current, u32 event); 274 u32 level_current, u32 event);
262static void acpi_video_switch_brightness(struct acpi_video_device *device, 275static void acpi_video_switch_brightness(struct acpi_video_device *device,
263 int event); 276 int event);
264 277
278/*backlight device sysfs support*/
279static int acpi_video_get_brightness(struct backlight_device *bd)
280{
281 unsigned long cur_level;
282 struct acpi_video_device *vd =
283 (struct acpi_video_device *)class_get_devdata(&bd->class_dev);
284 acpi_video_device_lcd_get_level_current(vd, &cur_level);
285 return (int) cur_level;
286}
287
288static int acpi_video_set_brightness(struct backlight_device *bd)
289{
290 int request_level = bd->props->brightness;
291 struct acpi_video_device *vd =
292 (struct acpi_video_device *)class_get_devdata(&bd->class_dev);
293 acpi_video_device_lcd_set_level(vd, request_level);
294 return 0;
295}
296
265/* -------------------------------------------------------------------------- 297/* --------------------------------------------------------------------------
266 Video Management 298 Video Management
267 -------------------------------------------------------------------------- */ 299 -------------------------------------------------------------------------- */
@@ -499,6 +531,7 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
499 acpi_integer status; 531 acpi_integer status;
500 acpi_handle h_dummy1; 532 acpi_handle h_dummy1;
501 int i; 533 int i;
534 u32 max_level = 0;
502 union acpi_object *obj = NULL; 535 union acpi_object *obj = NULL;
503 struct acpi_video_device_brightness *br = NULL; 536 struct acpi_video_device_brightness *br = NULL;
504 537
@@ -514,6 +547,8 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
514 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_BCM", &h_dummy1))) { 547 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_BCM", &h_dummy1))) {
515 device->cap._BCM = 1; 548 device->cap._BCM = 1;
516 } 549 }
550 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle,"_BQC",&h_dummy1)))
551 device->cap._BQC = 1;
517 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DDC", &h_dummy1))) { 552 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DDC", &h_dummy1))) {
518 device->cap._DDC = 1; 553 device->cap._DDC = 1;
519 } 554 }
@@ -550,6 +585,8 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
550 continue; 585 continue;
551 } 586 }
552 br->levels[count] = (u32) o->integer.value; 587 br->levels[count] = (u32) o->integer.value;
588 if (br->levels[count] > max_level)
589 max_level = br->levels[count];
553 count++; 590 count++;
554 } 591 }
555 out: 592 out:
@@ -568,6 +605,37 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
568 605
569 kfree(obj); 606 kfree(obj);
570 607
608 if (device->cap._BCL && device->cap._BCM && device->cap._BQC){
609 unsigned long tmp;
610 static int count = 0;
611 char *name;
612 struct backlight_properties *acpi_video_data;
613
614 name = kzalloc(MAX_NAME_LEN, GFP_KERNEL);
615 if (!name)
616 return;
617
618 acpi_video_data = kzalloc(
619 sizeof(struct backlight_properties),
620 GFP_KERNEL);
621 if (!acpi_video_data){
622 kfree(name);
623 return;
624 }
625 acpi_video_data->owner = THIS_MODULE;
626 acpi_video_data->get_brightness =
627 acpi_video_get_brightness;
628 acpi_video_data->update_status =
629 acpi_video_set_brightness;
630 sprintf(name, "acpi_video%d", count++);
631 device->data = acpi_video_data;
632 acpi_video_data->max_brightness = max_level;
633 acpi_video_device_lcd_get_level_current(device, &tmp);
634 acpi_video_data->brightness = (int)tmp;
635 device->backlight = backlight_device_register(name,
636 NULL, device, acpi_video_data);
637 kfree(name);
638 }
571 return; 639 return;
572} 640}
573 641
@@ -668,6 +736,8 @@ static int acpi_video_device_info_seq_show(struct seq_file *seq, void *offset)
668 seq_printf(seq, "LCD\n"); 736 seq_printf(seq, "LCD\n");
669 else if (dev->flags.tvout) 737 else if (dev->flags.tvout)
670 seq_printf(seq, "TVOUT\n"); 738 seq_printf(seq, "TVOUT\n");
739 else if (dev->flags.dvi)
740 seq_printf(seq, "DVI\n");
671 else 741 else
672 seq_printf(seq, "UNKNOWN\n"); 742 seq_printf(seq, "UNKNOWN\n");
673 743
@@ -1242,6 +1312,16 @@ static int acpi_video_bus_remove_fs(struct acpi_device *device)
1242 -------------------------------------------------------------------------- */ 1312 -------------------------------------------------------------------------- */
1243 1313
1244/* device interface */ 1314/* device interface */
1315static struct acpi_video_device_attrib*
1316acpi_video_get_device_attr(struct acpi_video_bus *video, unsigned long device_id)
1317{
1318 int count;
1319
1320 for(count = 0; count < video->attached_count; count++)
1321 if((video->attached_array[count].value.int_val & 0xffff) == device_id)
1322 return &(video->attached_array[count].value.attrib);
1323 return NULL;
1324}
1245 1325
1246static int 1326static int
1247acpi_video_bus_get_one_device(struct acpi_device *device, 1327acpi_video_bus_get_one_device(struct acpi_device *device,
@@ -1250,7 +1330,7 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
1250 unsigned long device_id; 1330 unsigned long device_id;
1251 int status; 1331 int status;
1252 struct acpi_video_device *data; 1332 struct acpi_video_device *data;
1253 1333 struct acpi_video_device_attrib* attribute;
1254 1334
1255 if (!device || !video) 1335 if (!device || !video)
1256 return -EINVAL; 1336 return -EINVAL;
@@ -1271,20 +1351,30 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
1271 data->video = video; 1351 data->video = video;
1272 data->dev = device; 1352 data->dev = device;
1273 1353
1274 switch (device_id & 0xffff) { 1354 attribute = acpi_video_get_device_attr(video, device_id);
1275 case 0x0100: 1355
1276 data->flags.crt = 1; 1356 if((attribute != NULL) && attribute->device_id_scheme) {
1277 break; 1357 switch (attribute->display_type) {
1278 case 0x0400: 1358 case ACPI_VIDEO_DISPLAY_CRT:
1279 data->flags.lcd = 1; 1359 data->flags.crt = 1;
1280 break; 1360 break;
1281 case 0x0200: 1361 case ACPI_VIDEO_DISPLAY_TV:
1282 data->flags.tvout = 1; 1362 data->flags.tvout = 1;
1283 break; 1363 break;
1284 default: 1364 case ACPI_VIDEO_DISPLAY_DVI:
1365 data->flags.dvi = 1;
1366 break;
1367 case ACPI_VIDEO_DISPLAY_LCD:
1368 data->flags.lcd = 1;
1369 break;
1370 default:
1371 data->flags.unknown = 1;
1372 break;
1373 }
1374 if(attribute->bios_can_detect)
1375 data->flags.bios = 1;
1376 } else
1285 data->flags.unknown = 1; 1377 data->flags.unknown = 1;
1286 break;
1287 }
1288 1378
1289 acpi_video_device_bind(video, data); 1379 acpi_video_device_bind(video, data);
1290 acpi_video_device_find_cap(data); 1380 acpi_video_device_find_cap(data);
@@ -1588,7 +1678,10 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
1588 status = acpi_remove_notify_handler(device->dev->handle, 1678 status = acpi_remove_notify_handler(device->dev->handle,
1589 ACPI_DEVICE_NOTIFY, 1679 ACPI_DEVICE_NOTIFY,
1590 acpi_video_device_notify); 1680 acpi_video_device_notify);
1591 1681 if (device->backlight){
1682 backlight_device_unregister(device->backlight);
1683 kfree(device->data);
1684 }
1592 return 0; 1685 return 0;
1593} 1686}
1594 1687
@@ -1790,39 +1883,6 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type)
1790 return 0; 1883 return 0;
1791} 1884}
1792 1885
1793static int
1794acpi_video_bus_match(struct acpi_device *device, struct acpi_driver *driver)
1795{
1796 acpi_handle h_dummy1;
1797 acpi_handle h_dummy2;
1798 acpi_handle h_dummy3;
1799
1800
1801 if (!device || !driver)
1802 return -EINVAL;
1803
1804 /* Since there is no HID, CID for ACPI Video drivers, we have
1805 * to check well known required nodes for each feature we support.
1806 */
1807
1808 /* Does this device able to support video switching ? */
1809 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy1)) &&
1810 ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy2)))
1811 return 0;
1812
1813 /* Does this device able to retrieve a video ROM ? */
1814 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy1)))
1815 return 0;
1816
1817 /* Does this device able to configure which video head to be POSTed ? */
1818 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_VPO", &h_dummy1)) &&
1819 ACPI_SUCCESS(acpi_get_handle(device->handle, "_GPD", &h_dummy2)) &&
1820 ACPI_SUCCESS(acpi_get_handle(device->handle, "_SPD", &h_dummy3)))
1821 return 0;
1822
1823 return -ENODEV;
1824}
1825
1826static int __init acpi_video_init(void) 1886static int __init acpi_video_init(void)
1827{ 1887{
1828 int result = 0; 1888 int result = 0;
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index f1afd26a509f..a7b33d2f5991 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1802,7 +1802,7 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
1802 return -ENODEV; 1802 return -ENODEV;
1803 } 1803 }
1804 1804
1805 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 1805 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1806 addr_space = IPMI_MEM_ADDR_SPACE; 1806 addr_space = IPMI_MEM_ADDR_SPACE;
1807 else 1807 else
1808 addr_space = IPMI_IO_ADDR_SPACE; 1808 addr_space = IPMI_IO_ADDR_SPACE;
@@ -1848,19 +1848,19 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
1848 info->irq_setup = NULL; 1848 info->irq_setup = NULL;
1849 } 1849 }
1850 1850
1851 if (spmi->addr.register_bit_width) { 1851 if (spmi->addr.bit_width) {
1852 /* A (hopefully) properly formed register bit width. */ 1852 /* A (hopefully) properly formed register bit width. */
1853 info->io.regspacing = spmi->addr.register_bit_width / 8; 1853 info->io.regspacing = spmi->addr.bit_width / 8;
1854 } else { 1854 } else {
1855 info->io.regspacing = DEFAULT_REGSPACING; 1855 info->io.regspacing = DEFAULT_REGSPACING;
1856 } 1856 }
1857 info->io.regsize = info->io.regspacing; 1857 info->io.regsize = info->io.regspacing;
1858 info->io.regshift = spmi->addr.register_bit_offset; 1858 info->io.regshift = spmi->addr.bit_offset;
1859 1859
1860 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { 1860 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1861 info->io_setup = mem_setup; 1861 info->io_setup = mem_setup;
1862 info->io.addr_type = IPMI_IO_ADDR_SPACE; 1862 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1863 } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) { 1863 } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1864 info->io_setup = port_setup; 1864 info->io_setup = port_setup;
1865 info->io.addr_type = IPMI_MEM_ADDR_SPACE; 1865 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1866 } else { 1866 } else {
@@ -1888,10 +1888,8 @@ static __devinit void acpi_find_bmc(void)
1888 return; 1888 return;
1889 1889
1890 for (i = 0; ; i++) { 1890 for (i = 0; ; i++) {
1891 status = acpi_get_firmware_table("SPMI", i+1, 1891 status = acpi_get_table(ACPI_SIG_SPMI, i+1,
1892 ACPI_LOGICAL_ADDRESSING, 1892 (struct acpi_table_header **)&spmi);
1893 (struct acpi_table_header **)
1894 &spmi);
1895 if (status != AE_OK) 1893 if (status != AE_OK)
1896 return; 1894 return;
1897 1895
diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
index a611972024e6..7fca5f470beb 100644
--- a/drivers/char/tpm/tpm_bios.c
+++ b/drivers/char/tpm/tpm_bios.c
@@ -372,10 +372,8 @@ static int read_log(struct tpm_bios_log *log)
372 } 372 }
373 373
374 /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */ 374 /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */
375 status = acpi_get_firmware_table(ACPI_TCPA_SIG, 1, 375 status = acpi_get_table(ACPI_SIG_TCPA, 1,
376 ACPI_LOGICAL_ADDRESSING, 376 (struct acpi_table_header **)&buff);
377 (struct acpi_table_header **)
378 &buff);
379 377
380 if (ACPI_FAILURE(status)) { 378 if (ACPI_FAILURE(status)) {
381 printk(KERN_ERR "%s: ERROR - Could not get TCPA table\n", 379 printk(KERN_ERR "%s: ERROR - Could not get TCPA table\n",
@@ -409,7 +407,7 @@ static int read_log(struct tpm_bios_log *log)
409 407
410 log->bios_event_log_end = log->bios_event_log + len; 408 log->bios_event_log_end = log->bios_event_log + len;
411 409
412 acpi_os_map_memory(start, len, (void *) &virt); 410 virt = acpi_os_map_memory(start, len);
413 411
414 memcpy(log->bios_event_log, virt, len); 412 memcpy(log->bios_event_log, virt, len);
415 413
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 879250d3d069..ff8c4beaace4 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -51,6 +51,8 @@ config CRYPTO_DEV_PADLOCK_SHA
51 If unsure say M. The compiled module will be 51 If unsure say M. The compiled module will be
52 called padlock-sha.ko 52 called padlock-sha.ko
53 53
54source "arch/s390/crypto/Kconfig"
55
54config CRYPTO_DEV_GEODE 56config CRYPTO_DEV_GEODE
55 tristate "Support for the Geode LX AES engine" 57 tristate "Support for the Geode LX AES engine"
56 depends on CRYPTO && X86_32 && PCI 58 depends on CRYPTO && X86_32 && PCI
diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
index c2ad72fefd9d..2b4b76e8bd72 100644
--- a/drivers/firmware/pcdp.c
+++ b/drivers/firmware/pcdp.c
@@ -26,7 +26,7 @@ setup_serial_console(struct pcdp_uart *uart)
26 static char options[64], *p = options; 26 static char options[64], *p = options;
27 char parity; 27 char parity;
28 28
29 mmio = (uart->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY); 29 mmio = (uart->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY);
30 p += sprintf(p, "console=uart,%s,0x%lx", 30 p += sprintf(p, "console=uart,%s,0x%lx",
31 mmio ? "mmio" : "io", uart->addr.address); 31 mmio ? "mmio" : "io", uart->addr.address);
32 if (uart->baud) { 32 if (uart->baud) {
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index ec796ad087df..850788f4dd2e 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -22,5 +22,19 @@ config HID
22 22
23 If unsure, say Y 23 If unsure, say Y
24 24
25config HID_DEBUG
26 bool "HID debugging support"
27 depends on HID
28 ---help---
29 This option lets the HID layer output diagnostics about its internal
30 state, resolve HID usages, dump HID fields, etc. Individual HID drivers
31 use this debugging facility to output information about individual HID
32 devices, etc.
33
34 This feature is useful for those who are either debugging the HID parser
35 or any HID hardware device.
36
37 If unsure, say N
38
25endmenu 39endmenu
26 40
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 6432392110bf..52e97d8f3c95 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -1,15 +1,8 @@
1# 1#
2# Makefile for the HID driver 2# Makefile for the HID driver
3# 3#
4 4hid-objs := hid-core.o hid-input.o
5# Multipart objects.
6hid-objs := hid-core.o hid-input.o
7
8# Optional parts of multipart objects.
9 5
10obj-$(CONFIG_HID) += hid.o 6obj-$(CONFIG_HID) += hid.o
11 7hid-$(CONFIG_HID_DEBUG) += hid-debug.o
12ifeq ($(CONFIG_INPUT_DEBUG),y)
13EXTRA_CFLAGS += -DDEBUG
14endif
15 8
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 49f18f5b2514..8c7d48eff7b7 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -28,11 +28,9 @@
28#include <linux/input.h> 28#include <linux/input.h>
29#include <linux/wait.h> 29#include <linux/wait.h>
30 30
31#undef DEBUG
32#undef DEBUG_DATA
33
34#include <linux/hid.h> 31#include <linux/hid.h>
35#include <linux/hiddev.h> 32#include <linux/hiddev.h>
33#include <linux/hid-debug.h>
36 34
37/* 35/*
38 * Version Information 36 * Version Information
@@ -951,7 +949,7 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
951 return -1; 949 return -1;
952 } 950 }
953 951
954#ifdef DEBUG_DATA 952#ifdef CONFIG_HID_DEBUG
955 printk(KERN_DEBUG __FILE__ ": report (size %u) (%snumbered)\n", size, report_enum->numbered ? "" : "un"); 953 printk(KERN_DEBUG __FILE__ ": report (size %u) (%snumbered)\n", size, report_enum->numbered ? "" : "un");
956#endif 954#endif
957 955
@@ -961,7 +959,7 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
961 size--; 959 size--;
962 } 960 }
963 961
964#ifdef DEBUG_DATA 962#ifdef CONFIG_HID_DEBUG
965 { 963 {
966 int i; 964 int i;
967 printk(KERN_DEBUG __FILE__ ": report %d (size %u) = ", n, size); 965 printk(KERN_DEBUG __FILE__ ": report %d (size %u) = ", n, size);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
new file mode 100644
index 000000000000..89241be4ec9b
--- /dev/null
+++ b/drivers/hid/hid-debug.c
@@ -0,0 +1,764 @@
1/*
2 * $Id: hid-debug.h,v 1.8 2001/09/25 09:37:57 vojtech Exp $
3 *
4 * (c) 1999 Andreas Gal <gal@cs.uni-magdeburg.de>
5 * (c) 2000-2001 Vojtech Pavlik <vojtech@ucw.cz>
6 * (c) 2007 Jiri Kosina
7 *
8 * Some debug stuff for the HID parser.
9 */
10
11/*
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 * Should you need to contact me, the author, you can do so either by
27 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
28 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
29 */
30
31#include <linux/hid.h>
32
33struct hid_usage_entry {
34 unsigned page;
35 unsigned usage;
36 char *description;
37};
38
39static const struct hid_usage_entry hid_usage_table[] = {
40 { 0, 0, "Undefined" },
41 { 1, 0, "GenericDesktop" },
42 {0, 0x01, "Pointer"},
43 {0, 0x02, "Mouse"},
44 {0, 0x04, "Joystick"},
45 {0, 0x05, "GamePad"},
46 {0, 0x06, "Keyboard"},
47 {0, 0x07, "Keypad"},
48 {0, 0x08, "MultiAxis"},
49 {0, 0x30, "X"},
50 {0, 0x31, "Y"},
51 {0, 0x32, "Z"},
52 {0, 0x33, "Rx"},
53 {0, 0x34, "Ry"},
54 {0, 0x35, "Rz"},
55 {0, 0x36, "Slider"},
56 {0, 0x37, "Dial"},
57 {0, 0x38, "Wheel"},
58 {0, 0x39, "HatSwitch"},
59 {0, 0x3a, "CountedBuffer"},
60 {0, 0x3b, "ByteCount"},
61 {0, 0x3c, "MotionWakeup"},
62 {0, 0x3d, "Start"},
63 {0, 0x3e, "Select"},
64 {0, 0x40, "Vx"},
65 {0, 0x41, "Vy"},
66 {0, 0x42, "Vz"},
67 {0, 0x43, "Vbrx"},
68 {0, 0x44, "Vbry"},
69 {0, 0x45, "Vbrz"},
70 {0, 0x46, "Vno"},
71 {0, 0x80, "SystemControl"},
72 {0, 0x81, "SystemPowerDown"},
73 {0, 0x82, "SystemSleep"},
74 {0, 0x83, "SystemWakeUp"},
75 {0, 0x84, "SystemContextMenu"},
76 {0, 0x85, "SystemMainMenu"},
77 {0, 0x86, "SystemAppMenu"},
78 {0, 0x87, "SystemMenuHelp"},
79 {0, 0x88, "SystemMenuExit"},
80 {0, 0x89, "SystemMenuSelect"},
81 {0, 0x8a, "SystemMenuRight"},
82 {0, 0x8b, "SystemMenuLeft"},
83 {0, 0x8c, "SystemMenuUp"},
84 {0, 0x8d, "SystemMenuDown"},
85 {0, 0x90, "D-PadUp"},
86 {0, 0x91, "D-PadDown"},
87 {0, 0x92, "D-PadRight"},
88 {0, 0x93, "D-PadLeft"},
89 { 2, 0, "Simulation" },
90 {0, 0xb0, "Aileron"},
91 {0, 0xb1, "AileronTrim"},
92 {0, 0xb2, "Anti-Torque"},
93 {0, 0xb3, "Autopilot"},
94 {0, 0xb4, "Chaff"},
95 {0, 0xb5, "Collective"},
96 {0, 0xb6, "DiveBrake"},
97 {0, 0xb7, "ElectronicCountermeasures"},
98 {0, 0xb8, "Elevator"},
99 {0, 0xb9, "ElevatorTrim"},
100 {0, 0xba, "Rudder"},
101 {0, 0xbb, "Throttle"},
102 {0, 0xbc, "FlightCommunications"},
103 {0, 0xbd, "FlareRelease"},
104 {0, 0xbe, "LandingGear"},
105 {0, 0xbf, "ToeBrake"},
106 { 7, 0, "Keyboard" },
107 { 8, 0, "LED" },
108 {0, 0x01, "NumLock"},
109 {0, 0x02, "CapsLock"},
110 {0, 0x03, "ScrollLock"},
111 {0, 0x04, "Compose"},
112 {0, 0x05, "Kana"},
113 {0, 0x4b, "GenericIndicator"},
114 { 9, 0, "Button" },
115 { 10, 0, "Ordinal" },
116 { 12, 0, "Consumer" },
117 {0, 0x238, "HorizontalWheel"},
118 { 13, 0, "Digitizers" },
119 {0, 0x01, "Digitizer"},
120 {0, 0x02, "Pen"},
121 {0, 0x03, "LightPen"},
122 {0, 0x04, "TouchScreen"},
123 {0, 0x05, "TouchPad"},
124 {0, 0x20, "Stylus"},
125 {0, 0x21, "Puck"},
126 {0, 0x22, "Finger"},
127 {0, 0x30, "TipPressure"},
128 {0, 0x31, "BarrelPressure"},
129 {0, 0x32, "InRange"},
130 {0, 0x33, "Touch"},
131 {0, 0x34, "UnTouch"},
132 {0, 0x35, "Tap"},
133 {0, 0x39, "TabletFunctionKey"},
134 {0, 0x3a, "ProgramChangeKey"},
135 {0, 0x3c, "Invert"},
136 {0, 0x42, "TipSwitch"},
137 {0, 0x43, "SecondaryTipSwitch"},
138 {0, 0x44, "BarrelSwitch"},
139 {0, 0x45, "Eraser"},
140 {0, 0x46, "TabletPick"},
141 { 15, 0, "PhysicalInterfaceDevice" },
142 {0, 0x00, "Undefined"},
143 {0, 0x01, "Physical_Interface_Device"},
144 {0, 0x20, "Normal"},
145 {0, 0x21, "Set_Effect_Report"},
146 {0, 0x22, "Effect_Block_Index"},
147 {0, 0x23, "Parameter_Block_Offset"},
148 {0, 0x24, "ROM_Flag"},
149 {0, 0x25, "Effect_Type"},
150 {0, 0x26, "ET_Constant_Force"},
151 {0, 0x27, "ET_Ramp"},
152 {0, 0x28, "ET_Custom_Force_Data"},
153 {0, 0x30, "ET_Square"},
154 {0, 0x31, "ET_Sine"},
155 {0, 0x32, "ET_Triangle"},
156 {0, 0x33, "ET_Sawtooth_Up"},
157 {0, 0x34, "ET_Sawtooth_Down"},
158 {0, 0x40, "ET_Spring"},
159 {0, 0x41, "ET_Damper"},
160 {0, 0x42, "ET_Inertia"},
161 {0, 0x43, "ET_Friction"},
162 {0, 0x50, "Duration"},
163 {0, 0x51, "Sample_Period"},
164 {0, 0x52, "Gain"},
165 {0, 0x53, "Trigger_Button"},
166 {0, 0x54, "Trigger_Repeat_Interval"},
167 {0, 0x55, "Axes_Enable"},
168 {0, 0x56, "Direction_Enable"},
169 {0, 0x57, "Direction"},
170 {0, 0x58, "Type_Specific_Block_Offset"},
171 {0, 0x59, "Block_Type"},
172 {0, 0x5A, "Set_Envelope_Report"},
173 {0, 0x5B, "Attack_Level"},
174 {0, 0x5C, "Attack_Time"},
175 {0, 0x5D, "Fade_Level"},
176 {0, 0x5E, "Fade_Time"},
177 {0, 0x5F, "Set_Condition_Report"},
178 {0, 0x60, "CP_Offset"},
179 {0, 0x61, "Positive_Coefficient"},
180 {0, 0x62, "Negative_Coefficient"},
181 {0, 0x63, "Positive_Saturation"},
182 {0, 0x64, "Negative_Saturation"},
183 {0, 0x65, "Dead_Band"},
184 {0, 0x66, "Download_Force_Sample"},
185 {0, 0x67, "Isoch_Custom_Force_Enable"},
186 {0, 0x68, "Custom_Force_Data_Report"},
187 {0, 0x69, "Custom_Force_Data"},
188 {0, 0x6A, "Custom_Force_Vendor_Defined_Data"},
189 {0, 0x6B, "Set_Custom_Force_Report"},
190 {0, 0x6C, "Custom_Force_Data_Offset"},
191 {0, 0x6D, "Sample_Count"},
192 {0, 0x6E, "Set_Periodic_Report"},
193 {0, 0x6F, "Offset"},
194 {0, 0x70, "Magnitude"},
195 {0, 0x71, "Phase"},
196 {0, 0x72, "Period"},
197 {0, 0x73, "Set_Constant_Force_Report"},
198 {0, 0x74, "Set_Ramp_Force_Report"},
199 {0, 0x75, "Ramp_Start"},
200 {0, 0x76, "Ramp_End"},
201 {0, 0x77, "Effect_Operation_Report"},
202 {0, 0x78, "Effect_Operation"},
203 {0, 0x79, "Op_Effect_Start"},
204 {0, 0x7A, "Op_Effect_Start_Solo"},
205 {0, 0x7B, "Op_Effect_Stop"},
206 {0, 0x7C, "Loop_Count"},
207 {0, 0x7D, "Device_Gain_Report"},
208 {0, 0x7E, "Device_Gain"},
209 {0, 0x7F, "PID_Pool_Report"},
210 {0, 0x80, "RAM_Pool_Size"},
211 {0, 0x81, "ROM_Pool_Size"},
212 {0, 0x82, "ROM_Effect_Block_Count"},
213 {0, 0x83, "Simultaneous_Effects_Max"},
214 {0, 0x84, "Pool_Alignment"},
215 {0, 0x85, "PID_Pool_Move_Report"},
216 {0, 0x86, "Move_Source"},
217 {0, 0x87, "Move_Destination"},
218 {0, 0x88, "Move_Length"},
219 {0, 0x89, "PID_Block_Load_Report"},
220 {0, 0x8B, "Block_Load_Status"},
221 {0, 0x8C, "Block_Load_Success"},
222 {0, 0x8D, "Block_Load_Full"},
223 {0, 0x8E, "Block_Load_Error"},
224 {0, 0x8F, "Block_Handle"},
225 {0, 0x90, "PID_Block_Free_Report"},
226 {0, 0x91, "Type_Specific_Block_Handle"},
227 {0, 0x92, "PID_State_Report"},
228 {0, 0x94, "Effect_Playing"},
229 {0, 0x95, "PID_Device_Control_Report"},
230 {0, 0x96, "PID_Device_Control"},
231 {0, 0x97, "DC_Enable_Actuators"},
232 {0, 0x98, "DC_Disable_Actuators"},
233 {0, 0x99, "DC_Stop_All_Effects"},
234 {0, 0x9A, "DC_Device_Reset"},
235 {0, 0x9B, "DC_Device_Pause"},
236 {0, 0x9C, "DC_Device_Continue"},
237 {0, 0x9F, "Device_Paused"},
238 {0, 0xA0, "Actuators_Enabled"},
239 {0, 0xA4, "Safety_Switch"},
240 {0, 0xA5, "Actuator_Override_Switch"},
241 {0, 0xA6, "Actuator_Power"},
242 {0, 0xA7, "Start_Delay"},
243 {0, 0xA8, "Parameter_Block_Size"},
244 {0, 0xA9, "Device_Managed_Pool"},
245 {0, 0xAA, "Shared_Parameter_Blocks"},
246 {0, 0xAB, "Create_New_Effect_Report"},
247 {0, 0xAC, "RAM_Pool_Available"},
248 { 0x84, 0, "Power Device" },
249 { 0x84, 0x02, "PresentStatus" },
250 { 0x84, 0x03, "ChangeStatus" },
251 { 0x84, 0x04, "UPS" },
252 { 0x84, 0x05, "PowerSupply" },
253 { 0x84, 0x10, "BatterySystem" },
254 { 0x84, 0x11, "BatterySystemID" },
255 { 0x84, 0x12, "Battery" },
256 { 0x84, 0x13, "BatteryID" },
257 { 0x84, 0x14, "Charger" },
258 { 0x84, 0x15, "ChargerID" },
259 { 0x84, 0x16, "PowerConverter" },
260 { 0x84, 0x17, "PowerConverterID" },
261 { 0x84, 0x18, "OutletSystem" },
262 { 0x84, 0x19, "OutletSystemID" },
263 { 0x84, 0x1a, "Input" },
264 { 0x84, 0x1b, "InputID" },
265 { 0x84, 0x1c, "Output" },
266 { 0x84, 0x1d, "OutputID" },
267 { 0x84, 0x1e, "Flow" },
268 { 0x84, 0x1f, "FlowID" },
269 { 0x84, 0x20, "Outlet" },
270 { 0x84, 0x21, "OutletID" },
271 { 0x84, 0x22, "Gang" },
272 { 0x84, 0x24, "PowerSummary" },
273 { 0x84, 0x25, "PowerSummaryID" },
274 { 0x84, 0x30, "Voltage" },
275 { 0x84, 0x31, "Current" },
276 { 0x84, 0x32, "Frequency" },
277 { 0x84, 0x33, "ApparentPower" },
278 { 0x84, 0x35, "PercentLoad" },
279 { 0x84, 0x40, "ConfigVoltage" },
280 { 0x84, 0x41, "ConfigCurrent" },
281 { 0x84, 0x43, "ConfigApparentPower" },
282 { 0x84, 0x53, "LowVoltageTransfer" },
283 { 0x84, 0x54, "HighVoltageTransfer" },
284 { 0x84, 0x56, "DelayBeforeStartup" },
285 { 0x84, 0x57, "DelayBeforeShutdown" },
286 { 0x84, 0x58, "Test" },
287 { 0x84, 0x5a, "AudibleAlarmControl" },
288 { 0x84, 0x60, "Present" },
289 { 0x84, 0x61, "Good" },
290 { 0x84, 0x62, "InternalFailure" },
291 { 0x84, 0x65, "Overload" },
292 { 0x84, 0x66, "OverCharged" },
293 { 0x84, 0x67, "OverTemperature" },
294 { 0x84, 0x68, "ShutdownRequested" },
295 { 0x84, 0x69, "ShutdownImminent" },
296 { 0x84, 0x6b, "SwitchOn/Off" },
297 { 0x84, 0x6c, "Switchable" },
298 { 0x84, 0x6d, "Used" },
299 { 0x84, 0x6e, "Boost" },
300 { 0x84, 0x73, "CommunicationLost" },
301 { 0x84, 0xfd, "iManufacturer" },
302 { 0x84, 0xfe, "iProduct" },
303 { 0x84, 0xff, "iSerialNumber" },
304 { 0x85, 0, "Battery System" },
305 { 0x85, 0x01, "SMBBatteryMode" },
306 { 0x85, 0x02, "SMBBatteryStatus" },
307 { 0x85, 0x03, "SMBAlarmWarning" },
308 { 0x85, 0x04, "SMBChargerMode" },
309 { 0x85, 0x05, "SMBChargerStatus" },
310 { 0x85, 0x06, "SMBChargerSpecInfo" },
311 { 0x85, 0x07, "SMBSelectorState" },
312 { 0x85, 0x08, "SMBSelectorPresets" },
313 { 0x85, 0x09, "SMBSelectorInfo" },
314 { 0x85, 0x29, "RemainingCapacityLimit" },
315 { 0x85, 0x2c, "CapacityMode" },
316 { 0x85, 0x42, "BelowRemainingCapacityLimit" },
317 { 0x85, 0x44, "Charging" },
318 { 0x85, 0x45, "Discharging" },
319 { 0x85, 0x4b, "NeedReplacement" },
320 { 0x85, 0x66, "RemainingCapacity" },
321 { 0x85, 0x68, "RunTimeToEmpty" },
322 { 0x85, 0x6a, "AverageTimeToFull" },
323 { 0x85, 0x83, "DesignCapacity" },
324 { 0x85, 0x85, "ManufacturerDate" },
325 { 0x85, 0x89, "iDeviceChemistry" },
326 { 0x85, 0x8b, "Rechargable" },
327 { 0x85, 0x8f, "iOEMInformation" },
328 { 0x85, 0x8d, "CapacityGranularity1" },
329 { 0x85, 0xd0, "ACPresent" },
330 /* pages 0xff00 to 0xffff are vendor-specific */
331 { 0xffff, 0, "Vendor-specific-FF" },
332 { 0, 0, NULL }
333};
334
335static void resolv_usage_page(unsigned page) {
336 const struct hid_usage_entry *p;
337
338 for (p = hid_usage_table; p->description; p++)
339 if (p->page == page) {
340 printk("%s", p->description);
341 return;
342 }
343 printk("%04x", page);
344}
345
346void hid_resolv_usage(unsigned usage) {
347 const struct hid_usage_entry *p;
348
349 resolv_usage_page(usage >> 16);
350 printk(".");
351 for (p = hid_usage_table; p->description; p++)
352 if (p->page == (usage >> 16)) {
353 for(++p; p->description && p->usage != 0; p++)
354 if (p->usage == (usage & 0xffff)) {
355 printk("%s", p->description);
356 return;
357 }
358 break;
359 }
360 printk("%04x", usage & 0xffff);
361}
362EXPORT_SYMBOL_GPL(hid_resolv_usage);
363
364__inline__ static void tab(int n) {
365 while (n--) printk(" ");
366}
367
368void hid_dump_field(struct hid_field *field, int n) {
369 int j;
370
371 if (field->physical) {
372 tab(n);
373 printk("Physical(");
374 hid_resolv_usage(field->physical); printk(")\n");
375 }
376 if (field->logical) {
377 tab(n);
378 printk("Logical(");
379 hid_resolv_usage(field->logical); printk(")\n");
380 }
381 tab(n); printk("Usage(%d)\n", field->maxusage);
382 for (j = 0; j < field->maxusage; j++) {
383 tab(n+2); hid_resolv_usage(field->usage[j].hid); printk("\n");
384 }
385 if (field->logical_minimum != field->logical_maximum) {
386 tab(n); printk("Logical Minimum(%d)\n", field->logical_minimum);
387 tab(n); printk("Logical Maximum(%d)\n", field->logical_maximum);
388 }
389 if (field->physical_minimum != field->physical_maximum) {
390 tab(n); printk("Physical Minimum(%d)\n", field->physical_minimum);
391 tab(n); printk("Physical Maximum(%d)\n", field->physical_maximum);
392 }
393 if (field->unit_exponent) {
394 tab(n); printk("Unit Exponent(%d)\n", field->unit_exponent);
395 }
396 if (field->unit) {
397 char *systems[5] = { "None", "SI Linear", "SI Rotation", "English Linear", "English Rotation" };
398 char *units[5][8] = {
399 { "None", "None", "None", "None", "None", "None", "None", "None" },
400 { "None", "Centimeter", "Gram", "Seconds", "Kelvin", "Ampere", "Candela", "None" },
401 { "None", "Radians", "Gram", "Seconds", "Kelvin", "Ampere", "Candela", "None" },
402 { "None", "Inch", "Slug", "Seconds", "Fahrenheit", "Ampere", "Candela", "None" },
403 { "None", "Degrees", "Slug", "Seconds", "Fahrenheit", "Ampere", "Candela", "None" }
404 };
405
406 int i;
407 int sys;
408 __u32 data = field->unit;
409
410 /* First nibble tells us which system we're in. */
411 sys = data & 0xf;
412 data >>= 4;
413
414 if(sys > 4) {
415 tab(n); printk("Unit(Invalid)\n");
416 }
417 else {
418 int earlier_unit = 0;
419
420 tab(n); printk("Unit(%s : ", systems[sys]);
421
422 for (i=1 ; i<sizeof(__u32)*2 ; i++) {
423 char nibble = data & 0xf;
424 data >>= 4;
425 if (nibble != 0) {
426 if(earlier_unit++ > 0)
427 printk("*");
428 printk("%s", units[sys][i]);
429 if(nibble != 1) {
430 /* This is a _signed_ nibble(!) */
431
432 int val = nibble & 0x7;
433 if(nibble & 0x08)
434 val = -((0x7 & ~val) +1);
435 printk("^%d", val);
436 }
437 }
438 }
439 printk(")\n");
440 }
441 }
442 tab(n); printk("Report Size(%u)\n", field->report_size);
443 tab(n); printk("Report Count(%u)\n", field->report_count);
444 tab(n); printk("Report Offset(%u)\n", field->report_offset);
445
446 tab(n); printk("Flags( ");
447 j = field->flags;
448 printk("%s", HID_MAIN_ITEM_CONSTANT & j ? "Constant " : "");
449 printk("%s", HID_MAIN_ITEM_VARIABLE & j ? "Variable " : "Array ");
450 printk("%s", HID_MAIN_ITEM_RELATIVE & j ? "Relative " : "Absolute ");
451 printk("%s", HID_MAIN_ITEM_WRAP & j ? "Wrap " : "");
452 printk("%s", HID_MAIN_ITEM_NONLINEAR & j ? "NonLinear " : "");
453 printk("%s", HID_MAIN_ITEM_NO_PREFERRED & j ? "NoPrefferedState " : "");
454 printk("%s", HID_MAIN_ITEM_NULL_STATE & j ? "NullState " : "");
455 printk("%s", HID_MAIN_ITEM_VOLATILE & j ? "Volatile " : "");
456 printk("%s", HID_MAIN_ITEM_BUFFERED_BYTE & j ? "BufferedByte " : "");
457 printk(")\n");
458}
459EXPORT_SYMBOL_GPL(hid_dump_field);
460
461void hid_dump_device(struct hid_device *device) {
462 struct hid_report_enum *report_enum;
463 struct hid_report *report;
464 struct list_head *list;
465 unsigned i,k;
466 static char *table[] = {"INPUT", "OUTPUT", "FEATURE"};
467
468 for (i = 0; i < HID_REPORT_TYPES; i++) {
469 report_enum = device->report_enum + i;
470 list = report_enum->report_list.next;
471 while (list != &report_enum->report_list) {
472 report = (struct hid_report *) list;
473 tab(2);
474 printk("%s", table[i]);
475 if (report->id)
476 printk("(%d)", report->id);
477 printk("[%s]", table[report->type]);
478 printk("\n");
479 for (k = 0; k < report->maxfield; k++) {
480 tab(4);
481 printk("Field(%d)\n", k);
482 hid_dump_field(report->field[k], 6);
483 }
484 list = list->next;
485 }
486 }
487}
488EXPORT_SYMBOL_GPL(hid_dump_device);
489
490void hid_dump_input(struct hid_usage *usage, __s32 value) {
491 printk("hid-debug: input ");
492 hid_resolv_usage(usage->hid);
493 printk(" = %d\n", value);
494}
495EXPORT_SYMBOL_GPL(hid_dump_input);
496
497static char *events[EV_MAX + 1] = {
498 [EV_SYN] = "Sync", [EV_KEY] = "Key",
499 [EV_REL] = "Relative", [EV_ABS] = "Absolute",
500 [EV_MSC] = "Misc", [EV_LED] = "LED",
501 [EV_SND] = "Sound", [EV_REP] = "Repeat",
502 [EV_FF] = "ForceFeedback", [EV_PWR] = "Power",
503 [EV_FF_STATUS] = "ForceFeedbackStatus",
504};
505
506static char *syncs[2] = {
507 [SYN_REPORT] = "Report", [SYN_CONFIG] = "Config",
508};
509static char *keys[KEY_MAX + 1] = {
510 [KEY_RESERVED] = "Reserved", [KEY_ESC] = "Esc",
511 [KEY_1] = "1", [KEY_2] = "2",
512 [KEY_3] = "3", [KEY_4] = "4",
513 [KEY_5] = "5", [KEY_6] = "6",
514 [KEY_7] = "7", [KEY_8] = "8",
515 [KEY_9] = "9", [KEY_0] = "0",
516 [KEY_MINUS] = "Minus", [KEY_EQUAL] = "Equal",
517 [KEY_BACKSPACE] = "Backspace", [KEY_TAB] = "Tab",
518 [KEY_Q] = "Q", [KEY_W] = "W",
519 [KEY_E] = "E", [KEY_R] = "R",
520 [KEY_T] = "T", [KEY_Y] = "Y",
521 [KEY_U] = "U", [KEY_I] = "I",
522 [KEY_O] = "O", [KEY_P] = "P",
523 [KEY_LEFTBRACE] = "LeftBrace", [KEY_RIGHTBRACE] = "RightBrace",
524 [KEY_ENTER] = "Enter", [KEY_LEFTCTRL] = "LeftControl",
525 [KEY_A] = "A", [KEY_S] = "S",
526 [KEY_D] = "D", [KEY_F] = "F",
527 [KEY_G] = "G", [KEY_H] = "H",
528 [KEY_J] = "J", [KEY_K] = "K",
529 [KEY_L] = "L", [KEY_SEMICOLON] = "Semicolon",
530 [KEY_APOSTROPHE] = "Apostrophe", [KEY_GRAVE] = "Grave",
531 [KEY_LEFTSHIFT] = "LeftShift", [KEY_BACKSLASH] = "BackSlash",
532 [KEY_Z] = "Z", [KEY_X] = "X",
533 [KEY_C] = "C", [KEY_V] = "V",
534 [KEY_B] = "B", [KEY_N] = "N",
535 [KEY_M] = "M", [KEY_COMMA] = "Comma",
536 [KEY_DOT] = "Dot", [KEY_SLASH] = "Slash",
537 [KEY_RIGHTSHIFT] = "RightShift", [KEY_KPASTERISK] = "KPAsterisk",
538 [KEY_LEFTALT] = "LeftAlt", [KEY_SPACE] = "Space",
539 [KEY_CAPSLOCK] = "CapsLock", [KEY_F1] = "F1",
540 [KEY_F2] = "F2", [KEY_F3] = "F3",
541 [KEY_F4] = "F4", [KEY_F5] = "F5",
542 [KEY_F6] = "F6", [KEY_F7] = "F7",
543 [KEY_F8] = "F8", [KEY_F9] = "F9",
544 [KEY_F10] = "F10", [KEY_NUMLOCK] = "NumLock",
545 [KEY_SCROLLLOCK] = "ScrollLock", [KEY_KP7] = "KP7",
546 [KEY_KP8] = "KP8", [KEY_KP9] = "KP9",
547 [KEY_KPMINUS] = "KPMinus", [KEY_KP4] = "KP4",
548 [KEY_KP5] = "KP5", [KEY_KP6] = "KP6",
549 [KEY_KPPLUS] = "KPPlus", [KEY_KP1] = "KP1",
550 [KEY_KP2] = "KP2", [KEY_KP3] = "KP3",
551 [KEY_KP0] = "KP0", [KEY_KPDOT] = "KPDot",
552 [KEY_ZENKAKUHANKAKU] = "Zenkaku/Hankaku", [KEY_102ND] = "102nd",
553 [KEY_F11] = "F11", [KEY_F12] = "F12",
554 [KEY_RO] = "RO", [KEY_KATAKANA] = "Katakana",
555 [KEY_HIRAGANA] = "HIRAGANA", [KEY_HENKAN] = "Henkan",
556 [KEY_KATAKANAHIRAGANA] = "Katakana/Hiragana", [KEY_MUHENKAN] = "Muhenkan",
557 [KEY_KPJPCOMMA] = "KPJpComma", [KEY_KPENTER] = "KPEnter",
558 [KEY_RIGHTCTRL] = "RightCtrl", [KEY_KPSLASH] = "KPSlash",
559 [KEY_SYSRQ] = "SysRq", [KEY_RIGHTALT] = "RightAlt",
560 [KEY_LINEFEED] = "LineFeed", [KEY_HOME] = "Home",
561 [KEY_UP] = "Up", [KEY_PAGEUP] = "PageUp",
562 [KEY_LEFT] = "Left", [KEY_RIGHT] = "Right",
563 [KEY_END] = "End", [KEY_DOWN] = "Down",
564 [KEY_PAGEDOWN] = "PageDown", [KEY_INSERT] = "Insert",
565 [KEY_DELETE] = "Delete", [KEY_MACRO] = "Macro",
566 [KEY_MUTE] = "Mute", [KEY_VOLUMEDOWN] = "VolumeDown",
567 [KEY_VOLUMEUP] = "VolumeUp", [KEY_POWER] = "Power",
568 [KEY_KPEQUAL] = "KPEqual", [KEY_KPPLUSMINUS] = "KPPlusMinus",
569 [KEY_PAUSE] = "Pause", [KEY_KPCOMMA] = "KPComma",
570 [KEY_HANGUEL] = "Hangeul", [KEY_HANJA] = "Hanja",
571 [KEY_YEN] = "Yen", [KEY_LEFTMETA] = "LeftMeta",
572 [KEY_RIGHTMETA] = "RightMeta", [KEY_COMPOSE] = "Compose",
573 [KEY_STOP] = "Stop", [KEY_AGAIN] = "Again",
574 [KEY_PROPS] = "Props", [KEY_UNDO] = "Undo",
575 [KEY_FRONT] = "Front", [KEY_COPY] = "Copy",
576 [KEY_OPEN] = "Open", [KEY_PASTE] = "Paste",
577 [KEY_FIND] = "Find", [KEY_CUT] = "Cut",
578 [KEY_HELP] = "Help", [KEY_MENU] = "Menu",
579 [KEY_CALC] = "Calc", [KEY_SETUP] = "Setup",
580 [KEY_SLEEP] = "Sleep", [KEY_WAKEUP] = "WakeUp",
581 [KEY_FILE] = "File", [KEY_SENDFILE] = "SendFile",
582 [KEY_DELETEFILE] = "DeleteFile", [KEY_XFER] = "X-fer",
583 [KEY_PROG1] = "Prog1", [KEY_PROG2] = "Prog2",
584 [KEY_WWW] = "WWW", [KEY_MSDOS] = "MSDOS",
585 [KEY_COFFEE] = "Coffee", [KEY_DIRECTION] = "Direction",
586 [KEY_CYCLEWINDOWS] = "CycleWindows", [KEY_MAIL] = "Mail",
587 [KEY_BOOKMARKS] = "Bookmarks", [KEY_COMPUTER] = "Computer",
588 [KEY_BACK] = "Back", [KEY_FORWARD] = "Forward",
589 [KEY_CLOSECD] = "CloseCD", [KEY_EJECTCD] = "EjectCD",
590 [KEY_EJECTCLOSECD] = "EjectCloseCD", [KEY_NEXTSONG] = "NextSong",
591 [KEY_PLAYPAUSE] = "PlayPause", [KEY_PREVIOUSSONG] = "PreviousSong",
592 [KEY_STOPCD] = "StopCD", [KEY_RECORD] = "Record",
593 [KEY_REWIND] = "Rewind", [KEY_PHONE] = "Phone",
594 [KEY_ISO] = "ISOKey", [KEY_CONFIG] = "Config",
595 [KEY_HOMEPAGE] = "HomePage", [KEY_REFRESH] = "Refresh",
596 [KEY_EXIT] = "Exit", [KEY_MOVE] = "Move",
597 [KEY_EDIT] = "Edit", [KEY_SCROLLUP] = "ScrollUp",
598 [KEY_SCROLLDOWN] = "ScrollDown", [KEY_KPLEFTPAREN] = "KPLeftParenthesis",
599 [KEY_KPRIGHTPAREN] = "KPRightParenthesis", [KEY_NEW] = "New",
600 [KEY_REDO] = "Redo", [KEY_F13] = "F13",
601 [KEY_F14] = "F14", [KEY_F15] = "F15",
602 [KEY_F16] = "F16", [KEY_F17] = "F17",
603 [KEY_F18] = "F18", [KEY_F19] = "F19",
604 [KEY_F20] = "F20", [KEY_F21] = "F21",
605 [KEY_F22] = "F22", [KEY_F23] = "F23",
606 [KEY_F24] = "F24", [KEY_PLAYCD] = "PlayCD",
607 [KEY_PAUSECD] = "PauseCD", [KEY_PROG3] = "Prog3",
608 [KEY_PROG4] = "Prog4", [KEY_SUSPEND] = "Suspend",
609 [KEY_CLOSE] = "Close", [KEY_PLAY] = "Play",
610 [KEY_FASTFORWARD] = "FastForward", [KEY_BASSBOOST] = "BassBoost",
611 [KEY_PRINT] = "Print", [KEY_HP] = "HP",
612 [KEY_CAMERA] = "Camera", [KEY_SOUND] = "Sound",
613 [KEY_QUESTION] = "Question", [KEY_EMAIL] = "Email",
614 [KEY_CHAT] = "Chat", [KEY_SEARCH] = "Search",
615 [KEY_CONNECT] = "Connect", [KEY_FINANCE] = "Finance",
616 [KEY_SPORT] = "Sport", [KEY_SHOP] = "Shop",
617 [KEY_ALTERASE] = "AlternateErase", [KEY_CANCEL] = "Cancel",
618 [KEY_BRIGHTNESSDOWN] = "BrightnessDown", [KEY_BRIGHTNESSUP] = "BrightnessUp",
619 [KEY_MEDIA] = "Media", [KEY_UNKNOWN] = "Unknown",
620 [BTN_0] = "Btn0", [BTN_1] = "Btn1",
621 [BTN_2] = "Btn2", [BTN_3] = "Btn3",
622 [BTN_4] = "Btn4", [BTN_5] = "Btn5",
623 [BTN_6] = "Btn6", [BTN_7] = "Btn7",
624 [BTN_8] = "Btn8", [BTN_9] = "Btn9",
625 [BTN_LEFT] = "LeftBtn", [BTN_RIGHT] = "RightBtn",
626 [BTN_MIDDLE] = "MiddleBtn", [BTN_SIDE] = "SideBtn",
627 [BTN_EXTRA] = "ExtraBtn", [BTN_FORWARD] = "ForwardBtn",
628 [BTN_BACK] = "BackBtn", [BTN_TASK] = "TaskBtn",
629 [BTN_TRIGGER] = "Trigger", [BTN_THUMB] = "ThumbBtn",
630 [BTN_THUMB2] = "ThumbBtn2", [BTN_TOP] = "TopBtn",
631 [BTN_TOP2] = "TopBtn2", [BTN_PINKIE] = "PinkieBtn",
632 [BTN_BASE] = "BaseBtn", [BTN_BASE2] = "BaseBtn2",
633 [BTN_BASE3] = "BaseBtn3", [BTN_BASE4] = "BaseBtn4",
634 [BTN_BASE5] = "BaseBtn5", [BTN_BASE6] = "BaseBtn6",
635 [BTN_DEAD] = "BtnDead", [BTN_A] = "BtnA",
636 [BTN_B] = "BtnB", [BTN_C] = "BtnC",
637 [BTN_X] = "BtnX", [BTN_Y] = "BtnY",
638 [BTN_Z] = "BtnZ", [BTN_TL] = "BtnTL",
639 [BTN_TR] = "BtnTR", [BTN_TL2] = "BtnTL2",
640 [BTN_TR2] = "BtnTR2", [BTN_SELECT] = "BtnSelect",
641 [BTN_START] = "BtnStart", [BTN_MODE] = "BtnMode",
642 [BTN_THUMBL] = "BtnThumbL", [BTN_THUMBR] = "BtnThumbR",
643 [BTN_TOOL_PEN] = "ToolPen", [BTN_TOOL_RUBBER] = "ToolRubber",
644 [BTN_TOOL_BRUSH] = "ToolBrush", [BTN_TOOL_PENCIL] = "ToolPencil",
645 [BTN_TOOL_AIRBRUSH] = "ToolAirbrush", [BTN_TOOL_FINGER] = "ToolFinger",
646 [BTN_TOOL_MOUSE] = "ToolMouse", [BTN_TOOL_LENS] = "ToolLens",
647 [BTN_TOUCH] = "Touch", [BTN_STYLUS] = "Stylus",
648 [BTN_STYLUS2] = "Stylus2", [BTN_TOOL_DOUBLETAP] = "ToolDoubleTap",
649 [BTN_TOOL_TRIPLETAP] = "ToolTripleTap", [BTN_GEAR_DOWN] = "WheelBtn",
650 [BTN_GEAR_UP] = "Gear up", [KEY_OK] = "Ok",
651 [KEY_SELECT] = "Select", [KEY_GOTO] = "Goto",
652 [KEY_CLEAR] = "Clear", [KEY_POWER2] = "Power2",
653 [KEY_OPTION] = "Option", [KEY_INFO] = "Info",
654 [KEY_TIME] = "Time", [KEY_VENDOR] = "Vendor",
655 [KEY_ARCHIVE] = "Archive", [KEY_PROGRAM] = "Program",
656 [KEY_CHANNEL] = "Channel", [KEY_FAVORITES] = "Favorites",
657 [KEY_EPG] = "EPG", [KEY_PVR] = "PVR",
658 [KEY_MHP] = "MHP", [KEY_LANGUAGE] = "Language",
659 [KEY_TITLE] = "Title", [KEY_SUBTITLE] = "Subtitle",
660 [KEY_ANGLE] = "Angle", [KEY_ZOOM] = "Zoom",
661 [KEY_MODE] = "Mode", [KEY_KEYBOARD] = "Keyboard",
662 [KEY_SCREEN] = "Screen", [KEY_PC] = "PC",
663 [KEY_TV] = "TV", [KEY_TV2] = "TV2",
664 [KEY_VCR] = "VCR", [KEY_VCR2] = "VCR2",
665 [KEY_SAT] = "Sat", [KEY_SAT2] = "Sat2",
666 [KEY_CD] = "CD", [KEY_TAPE] = "Tape",
667 [KEY_RADIO] = "Radio", [KEY_TUNER] = "Tuner",
668 [KEY_PLAYER] = "Player", [KEY_TEXT] = "Text",
669 [KEY_DVD] = "DVD", [KEY_AUX] = "Aux",
670 [KEY_MP3] = "MP3", [KEY_AUDIO] = "Audio",
671 [KEY_VIDEO] = "Video", [KEY_DIRECTORY] = "Directory",
672 [KEY_LIST] = "List", [KEY_MEMO] = "Memo",
673 [KEY_CALENDAR] = "Calendar", [KEY_RED] = "Red",
674 [KEY_GREEN] = "Green", [KEY_YELLOW] = "Yellow",
675 [KEY_BLUE] = "Blue", [KEY_CHANNELUP] = "ChannelUp",
676 [KEY_CHANNELDOWN] = "ChannelDown", [KEY_FIRST] = "First",
677 [KEY_LAST] = "Last", [KEY_AB] = "AB",
678 [KEY_NEXT] = "Next", [KEY_RESTART] = "Restart",
679 [KEY_SLOW] = "Slow", [KEY_SHUFFLE] = "Shuffle",
680 [KEY_BREAK] = "Break", [KEY_PREVIOUS] = "Previous",
681 [KEY_DIGITS] = "Digits", [KEY_TEEN] = "TEEN",
682 [KEY_TWEN] = "TWEN", [KEY_DEL_EOL] = "DeleteEOL",
683 [KEY_DEL_EOS] = "DeleteEOS", [KEY_INS_LINE] = "InsertLine",
684 [KEY_DEL_LINE] = "DeleteLine",
685 [KEY_SEND] = "Send", [KEY_REPLY] = "Reply",
686 [KEY_FORWARDMAIL] = "ForwardMail", [KEY_SAVE] = "Save",
687 [KEY_DOCUMENTS] = "Documents",
688 [KEY_FN] = "Fn", [KEY_FN_ESC] = "Fn+ESC",
689 [KEY_FN_1] = "Fn+1", [KEY_FN_2] = "Fn+2",
690 [KEY_FN_B] = "Fn+B", [KEY_FN_D] = "Fn+D",
691 [KEY_FN_E] = "Fn+E", [KEY_FN_F] = "Fn+F",
692 [KEY_FN_S] = "Fn+S",
693 [KEY_FN_F1] = "Fn+F1", [KEY_FN_F2] = "Fn+F2",
694 [KEY_FN_F3] = "Fn+F3", [KEY_FN_F4] = "Fn+F4",
695 [KEY_FN_F5] = "Fn+F5", [KEY_FN_F6] = "Fn+F6",
696 [KEY_FN_F7] = "Fn+F7", [KEY_FN_F8] = "Fn+F8",
697 [KEY_FN_F9] = "Fn+F9", [KEY_FN_F10] = "Fn+F10",
698 [KEY_FN_F11] = "Fn+F11", [KEY_FN_F12] = "Fn+F12",
699 [KEY_KBDILLUMTOGGLE] = "KbdIlluminationToggle",
700 [KEY_KBDILLUMDOWN] = "KbdIlluminationDown",
701 [KEY_KBDILLUMUP] = "KbdIlluminationUp",
702 [KEY_SWITCHVIDEOMODE] = "SwitchVideoMode",
703};
704
705static char *relatives[REL_MAX + 1] = {
706 [REL_X] = "X", [REL_Y] = "Y",
707 [REL_Z] = "Z", [REL_RX] = "Rx",
708 [REL_RY] = "Ry", [REL_RZ] = "Rz",
709 [REL_HWHEEL] = "HWheel", [REL_DIAL] = "Dial",
710 [REL_WHEEL] = "Wheel", [REL_MISC] = "Misc",
711};
712
713static char *absolutes[ABS_MAX + 1] = {
714 [ABS_X] = "X", [ABS_Y] = "Y",
715 [ABS_Z] = "Z", [ABS_RX] = "Rx",
716 [ABS_RY] = "Ry", [ABS_RZ] = "Rz",
717 [ABS_THROTTLE] = "Throttle", [ABS_RUDDER] = "Rudder",
718 [ABS_WHEEL] = "Wheel", [ABS_GAS] = "Gas",
719 [ABS_BRAKE] = "Brake", [ABS_HAT0X] = "Hat0X",
720 [ABS_HAT0Y] = "Hat0Y", [ABS_HAT1X] = "Hat1X",
721 [ABS_HAT1Y] = "Hat1Y", [ABS_HAT2X] = "Hat2X",
722 [ABS_HAT2Y] = "Hat2Y", [ABS_HAT3X] = "Hat3X",
723 [ABS_HAT3Y] = "Hat 3Y", [ABS_PRESSURE] = "Pressure",
724 [ABS_DISTANCE] = "Distance", [ABS_TILT_X] = "XTilt",
725 [ABS_TILT_Y] = "YTilt", [ABS_TOOL_WIDTH] = "Tool Width",
726 [ABS_VOLUME] = "Volume", [ABS_MISC] = "Misc",
727};
728
729static char *misc[MSC_MAX + 1] = {
730 [MSC_SERIAL] = "Serial", [MSC_PULSELED] = "Pulseled",
731 [MSC_GESTURE] = "Gesture", [MSC_RAW] = "RawData"
732};
733
734static char *leds[LED_MAX + 1] = {
735 [LED_NUML] = "NumLock", [LED_CAPSL] = "CapsLock",
736 [LED_SCROLLL] = "ScrollLock", [LED_COMPOSE] = "Compose",
737 [LED_KANA] = "Kana", [LED_SLEEP] = "Sleep",
738 [LED_SUSPEND] = "Suspend", [LED_MUTE] = "Mute",
739 [LED_MISC] = "Misc",
740};
741
742static char *repeats[REP_MAX + 1] = {
743 [REP_DELAY] = "Delay", [REP_PERIOD] = "Period"
744};
745
746static char *sounds[SND_MAX + 1] = {
747 [SND_CLICK] = "Click", [SND_BELL] = "Bell",
748 [SND_TONE] = "Tone"
749};
750
751static char **names[EV_MAX + 1] = {
752 [EV_SYN] = syncs, [EV_KEY] = keys,
753 [EV_REL] = relatives, [EV_ABS] = absolutes,
754 [EV_MSC] = misc, [EV_LED] = leds,
755 [EV_SND] = sounds, [EV_REP] = repeats,
756};
757
758void hid_resolv_event(__u8 type, __u16 code) {
759
760 printk("%s.%s", events[type] ? events[type] : "?",
761 names[type] ? (names[type][code] ? names[type][code] : "?") : "?");
762}
763EXPORT_SYMBOL_GPL(hid_resolv_event);
764
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index c7a6833f6821..25d180a24fc4 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -31,9 +31,8 @@
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33 33
34#undef DEBUG
35
36#include <linux/hid.h> 34#include <linux/hid.h>
35#include <linux/hid-debug.h>
37 36
38static int hid_pb_fnmode = 1; 37static int hid_pb_fnmode = 1;
39module_param_named(pb_fnmode, hid_pb_fnmode, int, 0644); 38module_param_named(pb_fnmode, hid_pb_fnmode, int, 0644);
@@ -252,9 +251,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
252 251
253 field->hidinput = hidinput; 252 field->hidinput = hidinput;
254 253
255#ifdef DEBUG 254#ifdef CONFIG_HID_DEBUG
256 printk(KERN_DEBUG "Mapping: "); 255 printk(KERN_DEBUG "Mapping: ");
257 resolv_usage(usage->hid); 256 hid_resolv_usage(usage->hid);
258 printk(" ---> "); 257 printk(" ---> ");
259#endif 258#endif
260 259
@@ -682,14 +681,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
682 field->dpad = usage->code; 681 field->dpad = usage->code;
683 } 682 }
684 683
685#ifdef DEBUG 684 hid_resolv_event(usage->type, usage->code);
686 resolv_event(usage->type, usage->code); 685#ifdef CONFIG_HID_DEBUG
687 printk("\n"); 686 printk("\n");
688#endif 687#endif
689 return; 688 return;
690 689
691ignore: 690ignore:
692#ifdef DEBUG 691#ifdef CONFIG_HID_DEBUG
693 printk("IGNORED\n"); 692 printk("IGNORED\n");
694#endif 693#endif
695 return; 694 return;
@@ -804,6 +803,18 @@ int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int
804} 803}
805EXPORT_SYMBOL_GPL(hidinput_find_field); 804EXPORT_SYMBOL_GPL(hidinput_find_field);
806 805
806static int hidinput_open(struct input_dev *dev)
807{
808 struct hid_device *hid = dev->private;
809 return hid->hid_open(hid);
810}
811
812static void hidinput_close(struct input_dev *dev)
813{
814 struct hid_device *hid = dev->private;
815 hid->hid_close(hid);
816}
817
807/* 818/*
808 * Register the input device; print a message. 819 * Register the input device; print a message.
809 * Configure the input layer interface 820 * Configure the input layer interface
@@ -816,6 +827,7 @@ int hidinput_connect(struct hid_device *hid)
816 struct hid_input *hidinput = NULL; 827 struct hid_input *hidinput = NULL;
817 struct input_dev *input_dev; 828 struct input_dev *input_dev;
818 int i, j, k; 829 int i, j, k;
830 int max_report_type = HID_OUTPUT_REPORT;
819 831
820 INIT_LIST_HEAD(&hid->inputs); 832 INIT_LIST_HEAD(&hid->inputs);
821 833
@@ -828,7 +840,10 @@ int hidinput_connect(struct hid_device *hid)
828 if (i == hid->maxcollection) 840 if (i == hid->maxcollection)
829 return -1; 841 return -1;
830 842
831 for (k = HID_INPUT_REPORT; k <= HID_OUTPUT_REPORT; k++) 843 if (hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORTS)
844 max_report_type = HID_INPUT_REPORT;
845
846 for (k = HID_INPUT_REPORT; k <= max_report_type; k++)
832 list_for_each_entry(report, &hid->report_enum[k].report_list, list) { 847 list_for_each_entry(report, &hid->report_enum[k].report_list, list) {
833 848
834 if (!report->maxfield) 849 if (!report->maxfield)
@@ -846,8 +861,8 @@ int hidinput_connect(struct hid_device *hid)
846 861
847 input_dev->private = hid; 862 input_dev->private = hid;
848 input_dev->event = hid->hidinput_input_event; 863 input_dev->event = hid->hidinput_input_event;
849 input_dev->open = hid->hidinput_open; 864 input_dev->open = hidinput_open;
850 input_dev->close = hid->hidinput_close; 865 input_dev->close = hidinput_close;
851 866
852 input_dev->name = hid->name; 867 input_dev->name = hid->name;
853 input_dev->phys = hid->phys; 868 input_dev->phys = hid->phys;
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index af939796750d..d2bb5a9a303f 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -360,8 +360,7 @@ static int netevent_callback(struct notifier_block *self, unsigned long event,
360 if (event == NETEVENT_NEIGH_UPDATE) { 360 if (event == NETEVENT_NEIGH_UPDATE) {
361 struct neighbour *neigh = ctx; 361 struct neighbour *neigh = ctx;
362 362
363 if (neigh->dev->type == ARPHRD_INFINIBAND && 363 if (neigh->nud_state & NUD_VALID) {
364 (neigh->nud_state & NUD_VALID)) {
365 set_timeout(jiffies); 364 set_timeout(jiffies);
366 } 365 }
367 } 366 }
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 5ed141ebd1c8..13efd4170349 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -642,7 +642,8 @@ static void snoop_recv(struct ib_mad_qp_info *qp_info,
642 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 642 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
643} 643}
644 644
645static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num, 645static void build_smp_wc(struct ib_qp *qp,
646 u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
646 struct ib_wc *wc) 647 struct ib_wc *wc)
647{ 648{
648 memset(wc, 0, sizeof *wc); 649 memset(wc, 0, sizeof *wc);
@@ -652,7 +653,7 @@ static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
652 wc->pkey_index = pkey_index; 653 wc->pkey_index = pkey_index;
653 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); 654 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
654 wc->src_qp = IB_QP0; 655 wc->src_qp = IB_QP0;
655 wc->qp_num = IB_QP0; 656 wc->qp = qp;
656 wc->slid = slid; 657 wc->slid = slid;
657 wc->sl = 0; 658 wc->sl = 0;
658 wc->dlid_path_bits = 0; 659 wc->dlid_path_bits = 0;
@@ -713,7 +714,8 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
713 goto out; 714 goto out;
714 } 715 }
715 716
716 build_smp_wc(send_wr->wr_id, be16_to_cpu(smp->dr_slid), 717 build_smp_wc(mad_agent_priv->agent.qp,
718 send_wr->wr_id, be16_to_cpu(smp->dr_slid),
717 send_wr->wr.ud.pkey_index, 719 send_wr->wr.ud.pkey_index,
718 send_wr->wr.ud.port_num, &mad_wc); 720 send_wr->wr.ud.port_num, &mad_wc);
719 721
@@ -2355,7 +2357,8 @@ static void local_completions(struct work_struct *work)
2355 * Defined behavior is to complete response 2357 * Defined behavior is to complete response
2356 * before request 2358 * before request
2357 */ 2359 */
2358 build_smp_wc((unsigned long) local->mad_send_wr, 2360 build_smp_wc(recv_mad_agent->agent.qp,
2361 (unsigned long) local->mad_send_wr,
2359 be16_to_cpu(IB_LID_PERMISSIVE), 2362 be16_to_cpu(IB_LID_PERMISSIVE),
2360 0, recv_mad_agent->agent.port_num, &wc); 2363 0, recv_mad_agent->agent.port_num, &wc);
2361 2364
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 743247ec065e..df1efbc10882 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -933,7 +933,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
933 resp->wc[i].vendor_err = wc[i].vendor_err; 933 resp->wc[i].vendor_err = wc[i].vendor_err;
934 resp->wc[i].byte_len = wc[i].byte_len; 934 resp->wc[i].byte_len = wc[i].byte_len;
935 resp->wc[i].imm_data = (__u32 __force) wc[i].imm_data; 935 resp->wc[i].imm_data = (__u32 __force) wc[i].imm_data;
936 resp->wc[i].qp_num = wc[i].qp_num; 936 resp->wc[i].qp_num = wc[i].qp->qp_num;
937 resp->wc[i].src_qp = wc[i].src_qp; 937 resp->wc[i].src_qp = wc[i].src_qp;
938 resp->wc[i].wc_flags = wc[i].wc_flags; 938 resp->wc[i].wc_flags = wc[i].wc_flags;
939 resp->wc[i].pkey_index = wc[i].pkey_index; 939 resp->wc[i].pkey_index = wc[i].pkey_index;
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index 05c9154d46f4..5175c99ee586 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -153,7 +153,7 @@ static inline int c2_poll_one(struct c2_dev *c2dev,
153 153
154 entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce)); 154 entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce));
155 entry->wr_id = ce->hdr.context; 155 entry->wr_id = ce->hdr.context;
156 entry->qp_num = ce->handle; 156 entry->qp = &qp->ibqp;
157 entry->wc_flags = 0; 157 entry->wc_flags = 0;
158 entry->slid = 0; 158 entry->slid = 0;
159 entry->sl = 0; 159 entry->sl = 0;
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 1c722032319c..cf95ee474b0f 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -119,13 +119,14 @@ struct ehca_qp {
119 struct ipz_qp_handle ipz_qp_handle; 119 struct ipz_qp_handle ipz_qp_handle;
120 struct ehca_pfqp pf; 120 struct ehca_pfqp pf;
121 struct ib_qp_init_attr init_attr; 121 struct ib_qp_init_attr init_attr;
122 u64 uspace_squeue;
123 u64 uspace_rqueue;
124 u64 uspace_fwh;
125 struct ehca_cq *send_cq; 122 struct ehca_cq *send_cq;
126 struct ehca_cq *recv_cq; 123 struct ehca_cq *recv_cq;
127 unsigned int sqerr_purgeflag; 124 unsigned int sqerr_purgeflag;
128 struct hlist_node list_entries; 125 struct hlist_node list_entries;
126 /* mmap counter for resources mapped into user space */
127 u32 mm_count_squeue;
128 u32 mm_count_rqueue;
129 u32 mm_count_galpa;
129}; 130};
130 131
131/* must be power of 2 */ 132/* must be power of 2 */
@@ -142,13 +143,14 @@ struct ehca_cq {
142 struct ipz_cq_handle ipz_cq_handle; 143 struct ipz_cq_handle ipz_cq_handle;
143 struct ehca_pfcq pf; 144 struct ehca_pfcq pf;
144 spinlock_t cb_lock; 145 spinlock_t cb_lock;
145 u64 uspace_queue;
146 u64 uspace_fwh;
147 struct hlist_head qp_hashtab[QP_HASHTAB_LEN]; 146 struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
148 struct list_head entry; 147 struct list_head entry;
149 u32 nr_callbacks; 148 u32 nr_callbacks;
150 spinlock_t task_lock; 149 spinlock_t task_lock;
151 u32 ownpid; 150 u32 ownpid;
151 /* mmap counter for resources mapped into user space */
152 u32 mm_count_queue;
153 u32 mm_count_galpa;
152}; 154};
153 155
154enum ehca_mr_flag { 156enum ehca_mr_flag {
@@ -248,20 +250,6 @@ struct ehca_ucontext {
248 struct ib_ucontext ib_ucontext; 250 struct ib_ucontext ib_ucontext;
249}; 251};
250 252
251struct ehca_module *ehca_module_new(void);
252
253int ehca_module_delete(struct ehca_module *me);
254
255int ehca_eq_ctor(struct ehca_eq *eq);
256
257int ehca_eq_dtor(struct ehca_eq *eq);
258
259struct ehca_shca *ehca_shca_new(void);
260
261int ehca_shca_delete(struct ehca_shca *me);
262
263struct ehca_sport *ehca_sport_new(struct ehca_shca *anchor);
264
265int ehca_init_pd_cache(void); 253int ehca_init_pd_cache(void);
266void ehca_cleanup_pd_cache(void); 254void ehca_cleanup_pd_cache(void);
267int ehca_init_cq_cache(void); 255int ehca_init_cq_cache(void);
@@ -283,7 +271,6 @@ extern int ehca_port_act_time;
283extern int ehca_use_hp_mr; 271extern int ehca_use_hp_mr;
284 272
285struct ipzu_queue_resp { 273struct ipzu_queue_resp {
286 u64 queue; /* points to first queue entry */
287 u32 qe_size; /* queue entry size */ 274 u32 qe_size; /* queue entry size */
288 u32 act_nr_of_sg; 275 u32 act_nr_of_sg;
289 u32 queue_length; /* queue length allocated in bytes */ 276 u32 queue_length; /* queue length allocated in bytes */
@@ -296,7 +283,6 @@ struct ehca_create_cq_resp {
296 u32 cq_number; 283 u32 cq_number;
297 u32 token; 284 u32 token;
298 struct ipzu_queue_resp ipz_queue; 285 struct ipzu_queue_resp ipz_queue;
299 struct h_galpas galpas;
300}; 286};
301 287
302struct ehca_create_qp_resp { 288struct ehca_create_qp_resp {
@@ -309,7 +295,6 @@ struct ehca_create_qp_resp {
309 u32 dummy; /* padding for 8 byte alignment */ 295 u32 dummy; /* padding for 8 byte alignment */
310 struct ipzu_queue_resp ipz_squeue; 296 struct ipzu_queue_resp ipz_squeue;
311 struct ipzu_queue_resp ipz_rqueue; 297 struct ipzu_queue_resp ipz_rqueue;
312 struct h_galpas galpas;
313}; 298};
314 299
315struct ehca_alloc_cq_parms { 300struct ehca_alloc_cq_parms {
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 6074c897f51c..9291a86ca053 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -267,7 +267,6 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
267 if (context) { 267 if (context) {
268 struct ipz_queue *ipz_queue = &my_cq->ipz_queue; 268 struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
269 struct ehca_create_cq_resp resp; 269 struct ehca_create_cq_resp resp;
270 struct vm_area_struct *vma;
271 memset(&resp, 0, sizeof(resp)); 270 memset(&resp, 0, sizeof(resp));
272 resp.cq_number = my_cq->cq_number; 271 resp.cq_number = my_cq->cq_number;
273 resp.token = my_cq->token; 272 resp.token = my_cq->token;
@@ -276,40 +275,14 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
276 resp.ipz_queue.queue_length = ipz_queue->queue_length; 275 resp.ipz_queue.queue_length = ipz_queue->queue_length;
277 resp.ipz_queue.pagesize = ipz_queue->pagesize; 276 resp.ipz_queue.pagesize = ipz_queue->pagesize;
278 resp.ipz_queue.toggle_state = ipz_queue->toggle_state; 277 resp.ipz_queue.toggle_state = ipz_queue->toggle_state;
279 ret = ehca_mmap_nopage(((u64)(my_cq->token) << 32) | 0x12000000,
280 ipz_queue->queue_length,
281 (void**)&resp.ipz_queue.queue,
282 &vma);
283 if (ret) {
284 ehca_err(device, "Could not mmap queue pages");
285 cq = ERR_PTR(ret);
286 goto create_cq_exit4;
287 }
288 my_cq->uspace_queue = resp.ipz_queue.queue;
289 resp.galpas = my_cq->galpas;
290 ret = ehca_mmap_register(my_cq->galpas.user.fw_handle,
291 (void**)&resp.galpas.kernel.fw_handle,
292 &vma);
293 if (ret) {
294 ehca_err(device, "Could not mmap fw_handle");
295 cq = ERR_PTR(ret);
296 goto create_cq_exit5;
297 }
298 my_cq->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;
299 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { 278 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
300 ehca_err(device, "Copy to udata failed."); 279 ehca_err(device, "Copy to udata failed.");
301 goto create_cq_exit6; 280 goto create_cq_exit4;
302 } 281 }
303 } 282 }
304 283
305 return cq; 284 return cq;
306 285
307create_cq_exit6:
308 ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
309
310create_cq_exit5:
311 ehca_munmap(my_cq->uspace_queue, my_cq->ipz_queue.queue_length);
312
313create_cq_exit4: 286create_cq_exit4:
314 ipz_queue_dtor(&my_cq->ipz_queue); 287 ipz_queue_dtor(&my_cq->ipz_queue);
315 288
@@ -333,7 +306,6 @@ create_cq_exit1:
333int ehca_destroy_cq(struct ib_cq *cq) 306int ehca_destroy_cq(struct ib_cq *cq)
334{ 307{
335 u64 h_ret; 308 u64 h_ret;
336 int ret;
337 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); 309 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
338 int cq_num = my_cq->cq_number; 310 int cq_num = my_cq->cq_number;
339 struct ib_device *device = cq->device; 311 struct ib_device *device = cq->device;
@@ -343,6 +315,20 @@ int ehca_destroy_cq(struct ib_cq *cq)
343 u32 cur_pid = current->tgid; 315 u32 cur_pid = current->tgid;
344 unsigned long flags; 316 unsigned long flags;
345 317
318 if (cq->uobject) {
319 if (my_cq->mm_count_galpa || my_cq->mm_count_queue) {
320 ehca_err(device, "Resources still referenced in "
321 "user space cq_num=%x", my_cq->cq_number);
322 return -EINVAL;
323 }
324 if (my_cq->ownpid != cur_pid) {
325 ehca_err(device, "Invalid caller pid=%x ownpid=%x "
326 "cq_num=%x",
327 cur_pid, my_cq->ownpid, my_cq->cq_number);
328 return -EINVAL;
329 }
330 }
331
346 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 332 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
347 while (my_cq->nr_callbacks) { 333 while (my_cq->nr_callbacks) {
348 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 334 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
@@ -353,25 +339,6 @@ int ehca_destroy_cq(struct ib_cq *cq)
353 idr_remove(&ehca_cq_idr, my_cq->token); 339 idr_remove(&ehca_cq_idr, my_cq->token);
354 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 340 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
355 341
356 if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) {
357 ehca_err(device, "Invalid caller pid=%x ownpid=%x",
358 cur_pid, my_cq->ownpid);
359 return -EINVAL;
360 }
361
362 /* un-mmap if vma alloc */
363 if (my_cq->uspace_queue ) {
364 ret = ehca_munmap(my_cq->uspace_queue,
365 my_cq->ipz_queue.queue_length);
366 if (ret)
367 ehca_err(device, "Could not munmap queue ehca_cq=%p "
368 "cq_num=%x", my_cq, cq_num);
369 ret = ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
370 if (ret)
371 ehca_err(device, "Could not munmap fwh ehca_cq=%p "
372 "cq_num=%x", my_cq, cq_num);
373 }
374
375 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0); 342 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
376 if (h_ret == H_R_STATE) { 343 if (h_ret == H_R_STATE) {
377 /* cq in err: read err data and destroy it forcibly */ 344 /* cq in err: read err data and destroy it forcibly */
@@ -400,7 +367,7 @@ int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
400 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); 367 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
401 u32 cur_pid = current->tgid; 368 u32 cur_pid = current->tgid;
402 369
403 if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) { 370 if (cq->uobject && my_cq->ownpid != cur_pid) {
404 ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x", 371 ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x",
405 cur_pid, my_cq->ownpid); 372 cur_pid, my_cq->ownpid);
406 return -EINVAL; 373 return -EINVAL;
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index cd7789f0d08e..95fd59fb4528 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -171,14 +171,6 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
171 171
172void ehca_poll_eqs(unsigned long data); 172void ehca_poll_eqs(unsigned long data);
173 173
174int ehca_mmap_nopage(u64 foffset,u64 length,void **mapped,
175 struct vm_area_struct **vma);
176
177int ehca_mmap_register(u64 physical,void **mapped,
178 struct vm_area_struct **vma);
179
180int ehca_munmap(unsigned long addr, size_t len);
181
182#ifdef CONFIG_PPC_64K_PAGES 174#ifdef CONFIG_PPC_64K_PAGES
183void *ehca_alloc_fw_ctrlblock(gfp_t flags); 175void *ehca_alloc_fw_ctrlblock(gfp_t flags);
184void ehca_free_fw_ctrlblock(void *ptr); 176void ehca_free_fw_ctrlblock(void *ptr);
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 6574fbbaead5..1155bcf48212 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -52,7 +52,7 @@
52MODULE_LICENSE("Dual BSD/GPL"); 52MODULE_LICENSE("Dual BSD/GPL");
53MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 53MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
54MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); 54MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
55MODULE_VERSION("SVNEHCA_0019"); 55MODULE_VERSION("SVNEHCA_0020");
56 56
57int ehca_open_aqp1 = 0; 57int ehca_open_aqp1 = 0;
58int ehca_debug_level = 0; 58int ehca_debug_level = 0;
@@ -288,7 +288,7 @@ int ehca_init_device(struct ehca_shca *shca)
288 strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX); 288 strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX);
289 shca->ib_device.owner = THIS_MODULE; 289 shca->ib_device.owner = THIS_MODULE;
290 290
291 shca->ib_device.uverbs_abi_ver = 5; 291 shca->ib_device.uverbs_abi_ver = 6;
292 shca->ib_device.uverbs_cmd_mask = 292 shca->ib_device.uverbs_cmd_mask =
293 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 293 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
294 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 294 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
@@ -790,7 +790,7 @@ int __init ehca_module_init(void)
790 int ret; 790 int ret;
791 791
792 printk(KERN_INFO "eHCA Infiniband Device Driver " 792 printk(KERN_INFO "eHCA Infiniband Device Driver "
793 "(Rel.: SVNEHCA_0019)\n"); 793 "(Rel.: SVNEHCA_0020)\n");
794 idr_init(&ehca_qp_idr); 794 idr_init(&ehca_qp_idr);
795 idr_init(&ehca_cq_idr); 795 idr_init(&ehca_cq_idr);
796 spin_lock_init(&ehca_qp_idr_lock); 796 spin_lock_init(&ehca_qp_idr_lock);
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 34b85556d01e..95efef921f1d 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -637,7 +637,6 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
637 struct ipz_queue *ipz_rqueue = &my_qp->ipz_rqueue; 637 struct ipz_queue *ipz_rqueue = &my_qp->ipz_rqueue;
638 struct ipz_queue *ipz_squeue = &my_qp->ipz_squeue; 638 struct ipz_queue *ipz_squeue = &my_qp->ipz_squeue;
639 struct ehca_create_qp_resp resp; 639 struct ehca_create_qp_resp resp;
640 struct vm_area_struct * vma;
641 memset(&resp, 0, sizeof(resp)); 640 memset(&resp, 0, sizeof(resp));
642 641
643 resp.qp_num = my_qp->real_qp_num; 642 resp.qp_num = my_qp->real_qp_num;
@@ -651,59 +650,21 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
651 resp.ipz_rqueue.queue_length = ipz_rqueue->queue_length; 650 resp.ipz_rqueue.queue_length = ipz_rqueue->queue_length;
652 resp.ipz_rqueue.pagesize = ipz_rqueue->pagesize; 651 resp.ipz_rqueue.pagesize = ipz_rqueue->pagesize;
653 resp.ipz_rqueue.toggle_state = ipz_rqueue->toggle_state; 652 resp.ipz_rqueue.toggle_state = ipz_rqueue->toggle_state;
654 ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x22000000,
655 ipz_rqueue->queue_length,
656 (void**)&resp.ipz_rqueue.queue,
657 &vma);
658 if (ret) {
659 ehca_err(pd->device, "Could not mmap rqueue pages");
660 goto create_qp_exit3;
661 }
662 my_qp->uspace_rqueue = resp.ipz_rqueue.queue;
663 /* squeue properties */ 653 /* squeue properties */
664 resp.ipz_squeue.qe_size = ipz_squeue->qe_size; 654 resp.ipz_squeue.qe_size = ipz_squeue->qe_size;
665 resp.ipz_squeue.act_nr_of_sg = ipz_squeue->act_nr_of_sg; 655 resp.ipz_squeue.act_nr_of_sg = ipz_squeue->act_nr_of_sg;
666 resp.ipz_squeue.queue_length = ipz_squeue->queue_length; 656 resp.ipz_squeue.queue_length = ipz_squeue->queue_length;
667 resp.ipz_squeue.pagesize = ipz_squeue->pagesize; 657 resp.ipz_squeue.pagesize = ipz_squeue->pagesize;
668 resp.ipz_squeue.toggle_state = ipz_squeue->toggle_state; 658 resp.ipz_squeue.toggle_state = ipz_squeue->toggle_state;
669 ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x23000000,
670 ipz_squeue->queue_length,
671 (void**)&resp.ipz_squeue.queue,
672 &vma);
673 if (ret) {
674 ehca_err(pd->device, "Could not mmap squeue pages");
675 goto create_qp_exit4;
676 }
677 my_qp->uspace_squeue = resp.ipz_squeue.queue;
678 /* fw_handle */
679 resp.galpas = my_qp->galpas;
680 ret = ehca_mmap_register(my_qp->galpas.user.fw_handle,
681 (void**)&resp.galpas.kernel.fw_handle,
682 &vma);
683 if (ret) {
684 ehca_err(pd->device, "Could not mmap fw_handle");
685 goto create_qp_exit5;
686 }
687 my_qp->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;
688
689 if (ib_copy_to_udata(udata, &resp, sizeof resp)) { 659 if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
690 ehca_err(pd->device, "Copy to udata failed"); 660 ehca_err(pd->device, "Copy to udata failed");
691 ret = -EINVAL; 661 ret = -EINVAL;
692 goto create_qp_exit6; 662 goto create_qp_exit3;
693 } 663 }
694 } 664 }
695 665
696 return &my_qp->ib_qp; 666 return &my_qp->ib_qp;
697 667
698create_qp_exit6:
699 ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE);
700
701create_qp_exit5:
702 ehca_munmap(my_qp->uspace_squeue, my_qp->ipz_squeue.queue_length);
703
704create_qp_exit4:
705 ehca_munmap(my_qp->uspace_rqueue, my_qp->ipz_rqueue.queue_length);
706
707create_qp_exit3: 668create_qp_exit3:
708 ipz_queue_dtor(&my_qp->ipz_rqueue); 669 ipz_queue_dtor(&my_qp->ipz_rqueue);
709 ipz_queue_dtor(&my_qp->ipz_squeue); 670 ipz_queue_dtor(&my_qp->ipz_squeue);
@@ -931,7 +892,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
931 my_qp->qp_type == IB_QPT_SMI) && 892 my_qp->qp_type == IB_QPT_SMI) &&
932 statetrans == IB_QPST_SQE2RTS) { 893 statetrans == IB_QPST_SQE2RTS) {
933 /* mark next free wqe if kernel */ 894 /* mark next free wqe if kernel */
934 if (my_qp->uspace_squeue == 0) { 895 if (!ibqp->uobject) {
935 struct ehca_wqe *wqe; 896 struct ehca_wqe *wqe;
936 /* lock send queue */ 897 /* lock send queue */
937 spin_lock_irqsave(&my_qp->spinlock_s, spl_flags); 898 spin_lock_irqsave(&my_qp->spinlock_s, spl_flags);
@@ -1417,11 +1378,18 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
1417 enum ib_qp_type qp_type; 1378 enum ib_qp_type qp_type;
1418 unsigned long flags; 1379 unsigned long flags;
1419 1380
1420 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && 1381 if (ibqp->uobject) {
1421 my_pd->ownpid != cur_pid) { 1382 if (my_qp->mm_count_galpa ||
1422 ehca_err(ibqp->device, "Invalid caller pid=%x ownpid=%x", 1383 my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
1423 cur_pid, my_pd->ownpid); 1384 ehca_err(ibqp->device, "Resources still referenced in "
1424 return -EINVAL; 1385 "user space qp_num=%x", ibqp->qp_num);
1386 return -EINVAL;
1387 }
1388 if (my_pd->ownpid != cur_pid) {
1389 ehca_err(ibqp->device, "Invalid caller pid=%x ownpid=%x",
1390 cur_pid, my_pd->ownpid);
1391 return -EINVAL;
1392 }
1425 } 1393 }
1426 1394
1427 if (my_qp->send_cq) { 1395 if (my_qp->send_cq) {
@@ -1439,24 +1407,6 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
1439 idr_remove(&ehca_qp_idr, my_qp->token); 1407 idr_remove(&ehca_qp_idr, my_qp->token);
1440 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); 1408 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
1441 1409
1442 /* un-mmap if vma alloc */
1443 if (my_qp->uspace_rqueue) {
1444 ret = ehca_munmap(my_qp->uspace_rqueue,
1445 my_qp->ipz_rqueue.queue_length);
1446 if (ret)
1447 ehca_err(ibqp->device, "Could not munmap rqueue "
1448 "qp_num=%x", qp_num);
1449 ret = ehca_munmap(my_qp->uspace_squeue,
1450 my_qp->ipz_squeue.queue_length);
1451 if (ret)
1452 ehca_err(ibqp->device, "Could not munmap squeue "
1453 "qp_num=%x", qp_num);
1454 ret = ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE);
1455 if (ret)
1456 ehca_err(ibqp->device, "Could not munmap fwh qp_num=%x",
1457 qp_num);
1458 }
1459
1460 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); 1410 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
1461 if (h_ret != H_SUCCESS) { 1411 if (h_ret != H_SUCCESS) {
1462 ehca_err(ibqp->device, "hipz_h_destroy_qp() failed rc=%lx " 1412 ehca_err(ibqp->device, "hipz_h_destroy_qp() failed rc=%lx "
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index b46bda1bf85d..08d3f892d9f3 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -579,7 +579,7 @@ poll_cq_one_read_cqe:
579 } else 579 } else
580 wc->status = IB_WC_SUCCESS; 580 wc->status = IB_WC_SUCCESS;
581 581
582 wc->qp_num = cqe->local_qp_number; 582 wc->qp = NULL;
583 wc->byte_len = cqe->nr_bytes_transferred; 583 wc->byte_len = cqe->nr_bytes_transferred;
584 wc->pkey_index = cqe->pkey_index; 584 wc->pkey_index = cqe->pkey_index;
585 wc->slid = cqe->rlid; 585 wc->slid = cqe->rlid;
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index e08764e4aef2..73db920b6945 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -68,105 +68,183 @@ int ehca_dealloc_ucontext(struct ib_ucontext *context)
68 return 0; 68 return 0;
69} 69}
70 70
71struct page *ehca_nopage(struct vm_area_struct *vma, 71static void ehca_mm_open(struct vm_area_struct *vma)
72 unsigned long address, int *type)
73{ 72{
74 struct page *mypage = NULL; 73 u32 *count = (u32*)vma->vm_private_data;
75 u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT; 74 if (!count) {
76 u32 idr_handle = fileoffset >> 32; 75 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
77 u32 q_type = (fileoffset >> 28) & 0xF; /* CQ, QP,... */ 76 vma->vm_start, vma->vm_end);
78 u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */ 77 return;
79 u32 cur_pid = current->tgid; 78 }
80 unsigned long flags; 79 (*count)++;
81 struct ehca_cq *cq; 80 if (!(*count))
82 struct ehca_qp *qp; 81 ehca_gen_err("Use count overflow vm_start=%lx vm_end=%lx",
83 struct ehca_pd *pd; 82 vma->vm_start, vma->vm_end);
84 u64 offset; 83 ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
85 void *vaddr; 84 vma->vm_start, vma->vm_end, *count);
85}
86 86
87 switch (q_type) { 87static void ehca_mm_close(struct vm_area_struct *vma)
88 case 1: /* CQ */ 88{
89 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 89 u32 *count = (u32*)vma->vm_private_data;
90 cq = idr_find(&ehca_cq_idr, idr_handle); 90 if (!count) {
91 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 91 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
92 vma->vm_start, vma->vm_end);
93 return;
94 }
95 (*count)--;
96 ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
97 vma->vm_start, vma->vm_end, *count);
98}
92 99
93 /* make sure this mmap really belongs to the authorized user */ 100static struct vm_operations_struct vm_ops = {
94 if (!cq) { 101 .open = ehca_mm_open,
95 ehca_gen_err("cq is NULL ret=NOPAGE_SIGBUS"); 102 .close = ehca_mm_close,
96 return NOPAGE_SIGBUS; 103};
104
105static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas,
106 u32 *mm_count)
107{
108 int ret;
109 u64 vsize, physical;
110
111 vsize = vma->vm_end - vma->vm_start;
112 if (vsize != EHCA_PAGESIZE) {
113 ehca_gen_err("invalid vsize=%lx", vma->vm_end - vma->vm_start);
114 return -EINVAL;
115 }
116
117 physical = galpas->user.fw_handle;
118 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
119 ehca_gen_dbg("vsize=%lx physical=%lx", vsize, physical);
120 /* VM_IO | VM_RESERVED are set by remap_pfn_range() */
121 ret = remap_pfn_range(vma, vma->vm_start, physical >> PAGE_SHIFT,
122 vsize, vma->vm_page_prot);
123 if (unlikely(ret)) {
124 ehca_gen_err("remap_pfn_range() failed ret=%x", ret);
125 return -ENOMEM;
126 }
127
128 vma->vm_private_data = mm_count;
129 (*mm_count)++;
130 vma->vm_ops = &vm_ops;
131
132 return 0;
133}
134
135static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue,
136 u32 *mm_count)
137{
138 int ret;
139 u64 start, ofs;
140 struct page *page;
141
142 vma->vm_flags |= VM_RESERVED;
143 start = vma->vm_start;
144 for (ofs = 0; ofs < queue->queue_length; ofs += PAGE_SIZE) {
145 u64 virt_addr = (u64)ipz_qeit_calc(queue, ofs);
146 page = virt_to_page(virt_addr);
147 ret = vm_insert_page(vma, start, page);
148 if (unlikely(ret)) {
149 ehca_gen_err("vm_insert_page() failed rc=%x", ret);
150 return ret;
97 } 151 }
152 start += PAGE_SIZE;
153 }
154 vma->vm_private_data = mm_count;
155 (*mm_count)++;
156 vma->vm_ops = &vm_ops;
98 157
99 if (cq->ownpid != cur_pid) { 158 return 0;
159}
160
161static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq,
162 u32 rsrc_type)
163{
164 int ret;
165
166 switch (rsrc_type) {
167 case 1: /* galpa fw handle */
168 ehca_dbg(cq->ib_cq.device, "cq_num=%x fw", cq->cq_number);
169 ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa);
170 if (unlikely(ret)) {
100 ehca_err(cq->ib_cq.device, 171 ehca_err(cq->ib_cq.device,
101 "Invalid caller pid=%x ownpid=%x", 172 "ehca_mmap_fw() failed rc=%x cq_num=%x",
102 cur_pid, cq->ownpid); 173 ret, cq->cq_number);
103 return NOPAGE_SIGBUS; 174 return ret;
104 } 175 }
176 break;
105 177
106 if (rsrc_type == 2) { 178 case 2: /* cq queue_addr */
107 ehca_dbg(cq->ib_cq.device, "cq=%p cq queuearea", cq); 179 ehca_dbg(cq->ib_cq.device, "cq_num=%x queue", cq->cq_number);
108 offset = address - vma->vm_start; 180 ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue);
109 vaddr = ipz_qeit_calc(&cq->ipz_queue, offset); 181 if (unlikely(ret)) {
110 ehca_dbg(cq->ib_cq.device, "offset=%lx vaddr=%p", 182 ehca_err(cq->ib_cq.device,
111 offset, vaddr); 183 "ehca_mmap_queue() failed rc=%x cq_num=%x",
112 mypage = virt_to_page(vaddr); 184 ret, cq->cq_number);
185 return ret;
113 } 186 }
114 break; 187 break;
115 188
116 case 2: /* QP */ 189 default:
117 spin_lock_irqsave(&ehca_qp_idr_lock, flags); 190 ehca_err(cq->ib_cq.device, "bad resource type=%x cq_num=%x",
118 qp = idr_find(&ehca_qp_idr, idr_handle); 191 rsrc_type, cq->cq_number);
119 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); 192 return -EINVAL;
193 }
120 194
121 /* make sure this mmap really belongs to the authorized user */ 195 return 0;
122 if (!qp) { 196}
123 ehca_gen_err("qp is NULL ret=NOPAGE_SIGBUS"); 197
124 return NOPAGE_SIGBUS; 198static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
199 u32 rsrc_type)
200{
201 int ret;
202
203 switch (rsrc_type) {
204 case 1: /* galpa fw handle */
205 ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num);
206 ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa);
207 if (unlikely(ret)) {
208 ehca_err(qp->ib_qp.device,
209 "remap_pfn_range() failed ret=%x qp_num=%x",
210 ret, qp->ib_qp.qp_num);
211 return -ENOMEM;
125 } 212 }
213 break;
126 214
127 pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd); 215 case 2: /* qp rqueue_addr */
128 if (pd->ownpid != cur_pid) { 216 ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue",
217 qp->ib_qp.qp_num);
218 ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, &qp->mm_count_rqueue);
219 if (unlikely(ret)) {
129 ehca_err(qp->ib_qp.device, 220 ehca_err(qp->ib_qp.device,
130 "Invalid caller pid=%x ownpid=%x", 221 "ehca_mmap_queue(rq) failed rc=%x qp_num=%x",
131 cur_pid, pd->ownpid); 222 ret, qp->ib_qp.qp_num);
132 return NOPAGE_SIGBUS; 223 return ret;
133 } 224 }
225 break;
134 226
135 if (rsrc_type == 2) { /* rqueue */ 227 case 3: /* qp squeue_addr */
136 ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueuearea", qp); 228 ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue",
137 offset = address - vma->vm_start; 229 qp->ib_qp.qp_num);
138 vaddr = ipz_qeit_calc(&qp->ipz_rqueue, offset); 230 ret = ehca_mmap_queue(vma, &qp->ipz_squeue, &qp->mm_count_squeue);
139 ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p", 231 if (unlikely(ret)) {
140 offset, vaddr); 232 ehca_err(qp->ib_qp.device,
141 mypage = virt_to_page(vaddr); 233 "ehca_mmap_queue(sq) failed rc=%x qp_num=%x",
142 } else if (rsrc_type == 3) { /* squeue */ 234 ret, qp->ib_qp.qp_num);
143 ehca_dbg(qp->ib_qp.device, "qp=%p qp squeuearea", qp); 235 return ret;
144 offset = address - vma->vm_start;
145 vaddr = ipz_qeit_calc(&qp->ipz_squeue, offset);
146 ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p",
147 offset, vaddr);
148 mypage = virt_to_page(vaddr);
149 } 236 }
150 break; 237 break;
151 238
152 default: 239 default:
153 ehca_gen_err("bad queue type %x", q_type); 240 ehca_err(qp->ib_qp.device, "bad resource type=%x qp=num=%x",
154 return NOPAGE_SIGBUS; 241 rsrc_type, qp->ib_qp.qp_num);
155 } 242 return -EINVAL;
156
157 if (!mypage) {
158 ehca_gen_err("Invalid page adr==NULL ret=NOPAGE_SIGBUS");
159 return NOPAGE_SIGBUS;
160 } 243 }
161 get_page(mypage);
162 244
163 return mypage; 245 return 0;
164} 246}
165 247
166static struct vm_operations_struct ehcau_vm_ops = {
167 .nopage = ehca_nopage,
168};
169
170int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 248int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
171{ 249{
172 u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT; 250 u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT;
@@ -175,7 +253,6 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
175 u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */ 253 u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
176 u32 cur_pid = current->tgid; 254 u32 cur_pid = current->tgid;
177 u32 ret; 255 u32 ret;
178 u64 vsize, physical;
179 unsigned long flags; 256 unsigned long flags;
180 struct ehca_cq *cq; 257 struct ehca_cq *cq;
181 struct ehca_qp *qp; 258 struct ehca_qp *qp;
@@ -201,44 +278,12 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
201 if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context) 278 if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
202 return -EINVAL; 279 return -EINVAL;
203 280
204 switch (rsrc_type) { 281 ret = ehca_mmap_cq(vma, cq, rsrc_type);
205 case 1: /* galpa fw handle */ 282 if (unlikely(ret)) {
206 ehca_dbg(cq->ib_cq.device, "cq=%p cq triggerarea", cq); 283 ehca_err(cq->ib_cq.device,
207 vma->vm_flags |= VM_RESERVED; 284 "ehca_mmap_cq() failed rc=%x cq_num=%x",
208 vsize = vma->vm_end - vma->vm_start; 285 ret, cq->cq_number);
209 if (vsize != EHCA_PAGESIZE) { 286 return ret;
210 ehca_err(cq->ib_cq.device, "invalid vsize=%lx",
211 vma->vm_end - vma->vm_start);
212 return -EINVAL;
213 }
214
215 physical = cq->galpas.user.fw_handle;
216 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
217 vma->vm_flags |= VM_IO | VM_RESERVED;
218
219 ehca_dbg(cq->ib_cq.device,
220 "vsize=%lx physical=%lx", vsize, physical);
221 ret = remap_pfn_range(vma, vma->vm_start,
222 physical >> PAGE_SHIFT, vsize,
223 vma->vm_page_prot);
224 if (ret) {
225 ehca_err(cq->ib_cq.device,
226 "remap_pfn_range() failed ret=%x",
227 ret);
228 return -ENOMEM;
229 }
230 break;
231
232 case 2: /* cq queue_addr */
233 ehca_dbg(cq->ib_cq.device, "cq=%p cq q_addr", cq);
234 vma->vm_flags |= VM_RESERVED;
235 vma->vm_ops = &ehcau_vm_ops;
236 break;
237
238 default:
239 ehca_err(cq->ib_cq.device, "bad resource type %x",
240 rsrc_type);
241 return -EINVAL;
242 } 287 }
243 break; 288 break;
244 289
@@ -262,50 +307,12 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
262 if (!qp->ib_qp.uobject || qp->ib_qp.uobject->context != context) 307 if (!qp->ib_qp.uobject || qp->ib_qp.uobject->context != context)
263 return -EINVAL; 308 return -EINVAL;
264 309
265 switch (rsrc_type) { 310 ret = ehca_mmap_qp(vma, qp, rsrc_type);
266 case 1: /* galpa fw handle */ 311 if (unlikely(ret)) {
267 ehca_dbg(qp->ib_qp.device, "qp=%p qp triggerarea", qp); 312 ehca_err(qp->ib_qp.device,
268 vma->vm_flags |= VM_RESERVED; 313 "ehca_mmap_qp() failed rc=%x qp_num=%x",
269 vsize = vma->vm_end - vma->vm_start; 314 ret, qp->ib_qp.qp_num);
270 if (vsize != EHCA_PAGESIZE) { 315 return ret;
271 ehca_err(qp->ib_qp.device, "invalid vsize=%lx",
272 vma->vm_end - vma->vm_start);
273 return -EINVAL;
274 }
275
276 physical = qp->galpas.user.fw_handle;
277 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
278 vma->vm_flags |= VM_IO | VM_RESERVED;
279
280 ehca_dbg(qp->ib_qp.device, "vsize=%lx physical=%lx",
281 vsize, physical);
282 ret = remap_pfn_range(vma, vma->vm_start,
283 physical >> PAGE_SHIFT, vsize,
284 vma->vm_page_prot);
285 if (ret) {
286 ehca_err(qp->ib_qp.device,
287 "remap_pfn_range() failed ret=%x",
288 ret);
289 return -ENOMEM;
290 }
291 break;
292
293 case 2: /* qp rqueue_addr */
294 ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueue_addr", qp);
295 vma->vm_flags |= VM_RESERVED;
296 vma->vm_ops = &ehcau_vm_ops;
297 break;
298
299 case 3: /* qp squeue_addr */
300 ehca_dbg(qp->ib_qp.device, "qp=%p qp squeue_addr", qp);
301 vma->vm_flags |= VM_RESERVED;
302 vma->vm_ops = &ehcau_vm_ops;
303 break;
304
305 default:
306 ehca_err(qp->ib_qp.device, "bad resource type %x",
307 rsrc_type);
308 return -EINVAL;
309 } 316 }
310 break; 317 break;
311 318
@@ -316,77 +323,3 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
316 323
317 return 0; 324 return 0;
318} 325}
319
320int ehca_mmap_nopage(u64 foffset, u64 length, void **mapped,
321 struct vm_area_struct **vma)
322{
323 down_write(&current->mm->mmap_sem);
324 *mapped = (void*)do_mmap(NULL,0, length, PROT_WRITE,
325 MAP_SHARED | MAP_ANONYMOUS,
326 foffset);
327 up_write(&current->mm->mmap_sem);
328 if (!(*mapped)) {
329 ehca_gen_err("couldn't mmap foffset=%lx length=%lx",
330 foffset, length);
331 return -EINVAL;
332 }
333
334 *vma = find_vma(current->mm, (u64)*mapped);
335 if (!(*vma)) {
336 down_write(&current->mm->mmap_sem);
337 do_munmap(current->mm, 0, length);
338 up_write(&current->mm->mmap_sem);
339 ehca_gen_err("couldn't find vma queue=%p", *mapped);
340 return -EINVAL;
341 }
342 (*vma)->vm_flags |= VM_RESERVED;
343 (*vma)->vm_ops = &ehcau_vm_ops;
344
345 return 0;
346}
347
348int ehca_mmap_register(u64 physical, void **mapped,
349 struct vm_area_struct **vma)
350{
351 int ret;
352 unsigned long vsize;
353 /* ehca hw supports only 4k page */
354 ret = ehca_mmap_nopage(0, EHCA_PAGESIZE, mapped, vma);
355 if (ret) {
356 ehca_gen_err("could'nt mmap physical=%lx", physical);
357 return ret;
358 }
359
360 (*vma)->vm_flags |= VM_RESERVED;
361 vsize = (*vma)->vm_end - (*vma)->vm_start;
362 if (vsize != EHCA_PAGESIZE) {
363 ehca_gen_err("invalid vsize=%lx",
364 (*vma)->vm_end - (*vma)->vm_start);
365 return -EINVAL;
366 }
367
368 (*vma)->vm_page_prot = pgprot_noncached((*vma)->vm_page_prot);
369 (*vma)->vm_flags |= VM_IO | VM_RESERVED;
370
371 ret = remap_pfn_range((*vma), (*vma)->vm_start,
372 physical >> PAGE_SHIFT, vsize,
373 (*vma)->vm_page_prot);
374 if (ret) {
375 ehca_gen_err("remap_pfn_range() failed ret=%x", ret);
376 return -ENOMEM;
377 }
378
379 return 0;
380
381}
382
383int ehca_munmap(unsigned long addr, size_t len) {
384 int ret = 0;
385 struct mm_struct *mm = current->mm;
386 if (mm) {
387 down_write(&mm->mmap_sem);
388 ret = do_munmap(mm, addr, len);
389 up_write(&mm->mmap_sem);
390 }
391 return ret;
392}
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 46c1c89bf6ae..64f07b19349f 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -379,7 +379,7 @@ void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
379 wc.vendor_err = 0; 379 wc.vendor_err = 0;
380 wc.byte_len = 0; 380 wc.byte_len = 0;
381 wc.imm_data = 0; 381 wc.imm_data = 0;
382 wc.qp_num = qp->ibqp.qp_num; 382 wc.qp = &qp->ibqp;
383 wc.src_qp = 0; 383 wc.src_qp = 0;
384 wc.wc_flags = 0; 384 wc.wc_flags = 0;
385 wc.pkey_index = 0; 385 wc.pkey_index = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index ce6038743c5c..5ff20cb04494 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -702,7 +702,7 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
702 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 702 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
703 wc->vendor_err = 0; 703 wc->vendor_err = 0;
704 wc->byte_len = 0; 704 wc->byte_len = 0;
705 wc->qp_num = qp->ibqp.qp_num; 705 wc->qp = &qp->ibqp;
706 wc->src_qp = qp->remote_qpn; 706 wc->src_qp = qp->remote_qpn;
707 wc->pkey_index = 0; 707 wc->pkey_index = 0;
708 wc->slid = qp->remote_ah_attr.dlid; 708 wc->slid = qp->remote_ah_attr.dlid;
@@ -836,7 +836,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
836 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 836 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
837 wc.vendor_err = 0; 837 wc.vendor_err = 0;
838 wc.byte_len = wqe->length; 838 wc.byte_len = wqe->length;
839 wc.qp_num = qp->ibqp.qp_num; 839 wc.qp = &qp->ibqp;
840 wc.src_qp = qp->remote_qpn; 840 wc.src_qp = qp->remote_qpn;
841 wc.pkey_index = 0; 841 wc.pkey_index = 0;
842 wc.slid = qp->remote_ah_attr.dlid; 842 wc.slid = qp->remote_ah_attr.dlid;
@@ -951,7 +951,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
951 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 951 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
952 wc.vendor_err = 0; 952 wc.vendor_err = 0;
953 wc.byte_len = 0; 953 wc.byte_len = 0;
954 wc.qp_num = qp->ibqp.qp_num; 954 wc.qp = &qp->ibqp;
955 wc.src_qp = qp->remote_qpn; 955 wc.src_qp = qp->remote_qpn;
956 wc.pkey_index = 0; 956 wc.pkey_index = 0;
957 wc.slid = qp->remote_ah_attr.dlid; 957 wc.slid = qp->remote_ah_attr.dlid;
@@ -1511,7 +1511,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1511 wc.status = IB_WC_SUCCESS; 1511 wc.status = IB_WC_SUCCESS;
1512 wc.opcode = IB_WC_RECV; 1512 wc.opcode = IB_WC_RECV;
1513 wc.vendor_err = 0; 1513 wc.vendor_err = 0;
1514 wc.qp_num = qp->ibqp.qp_num; 1514 wc.qp = &qp->ibqp;
1515 wc.src_qp = qp->remote_qpn; 1515 wc.src_qp = qp->remote_qpn;
1516 wc.pkey_index = 0; 1516 wc.pkey_index = 0;
1517 wc.slid = qp->remote_ah_attr.dlid; 1517 wc.slid = qp->remote_ah_attr.dlid;
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index f7530512045d..e86cb171872e 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -137,7 +137,7 @@ bad_lkey:
137 wc.vendor_err = 0; 137 wc.vendor_err = 0;
138 wc.byte_len = 0; 138 wc.byte_len = 0;
139 wc.imm_data = 0; 139 wc.imm_data = 0;
140 wc.qp_num = qp->ibqp.qp_num; 140 wc.qp = &qp->ibqp;
141 wc.src_qp = 0; 141 wc.src_qp = 0;
142 wc.wc_flags = 0; 142 wc.wc_flags = 0;
143 wc.pkey_index = 0; 143 wc.pkey_index = 0;
@@ -336,7 +336,7 @@ again:
336 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 336 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
337 wc.vendor_err = 0; 337 wc.vendor_err = 0;
338 wc.byte_len = 0; 338 wc.byte_len = 0;
339 wc.qp_num = sqp->ibqp.qp_num; 339 wc.qp = &sqp->ibqp;
340 wc.src_qp = sqp->remote_qpn; 340 wc.src_qp = sqp->remote_qpn;
341 wc.pkey_index = 0; 341 wc.pkey_index = 0;
342 wc.slid = sqp->remote_ah_attr.dlid; 342 wc.slid = sqp->remote_ah_attr.dlid;
@@ -426,7 +426,7 @@ again:
426 wc.status = IB_WC_SUCCESS; 426 wc.status = IB_WC_SUCCESS;
427 wc.vendor_err = 0; 427 wc.vendor_err = 0;
428 wc.byte_len = wqe->length; 428 wc.byte_len = wqe->length;
429 wc.qp_num = qp->ibqp.qp_num; 429 wc.qp = &qp->ibqp;
430 wc.src_qp = qp->remote_qpn; 430 wc.src_qp = qp->remote_qpn;
431 /* XXX do we know which pkey matched? Only needed for GSI. */ 431 /* XXX do we know which pkey matched? Only needed for GSI. */
432 wc.pkey_index = 0; 432 wc.pkey_index = 0;
@@ -447,7 +447,7 @@ send_comp:
447 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 447 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
448 wc.vendor_err = 0; 448 wc.vendor_err = 0;
449 wc.byte_len = wqe->length; 449 wc.byte_len = wqe->length;
450 wc.qp_num = sqp->ibqp.qp_num; 450 wc.qp = &sqp->ibqp;
451 wc.src_qp = 0; 451 wc.src_qp = 0;
452 wc.pkey_index = 0; 452 wc.pkey_index = 0;
453 wc.slid = 0; 453 wc.slid = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
index e636cfd67a82..325d6634ff53 100644
--- a/drivers/infiniband/hw/ipath/ipath_uc.c
+++ b/drivers/infiniband/hw/ipath/ipath_uc.c
@@ -49,7 +49,7 @@ static void complete_last_send(struct ipath_qp *qp, struct ipath_swqe *wqe,
49 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 49 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
50 wc->vendor_err = 0; 50 wc->vendor_err = 0;
51 wc->byte_len = wqe->length; 51 wc->byte_len = wqe->length;
52 wc->qp_num = qp->ibqp.qp_num; 52 wc->qp = &qp->ibqp;
53 wc->src_qp = qp->remote_qpn; 53 wc->src_qp = qp->remote_qpn;
54 wc->pkey_index = 0; 54 wc->pkey_index = 0;
55 wc->slid = qp->remote_ah_attr.dlid; 55 wc->slid = qp->remote_ah_attr.dlid;
@@ -411,7 +411,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
411 wc.status = IB_WC_SUCCESS; 411 wc.status = IB_WC_SUCCESS;
412 wc.opcode = IB_WC_RECV; 412 wc.opcode = IB_WC_RECV;
413 wc.vendor_err = 0; 413 wc.vendor_err = 0;
414 wc.qp_num = qp->ibqp.qp_num; 414 wc.qp = &qp->ibqp;
415 wc.src_qp = qp->remote_qpn; 415 wc.src_qp = qp->remote_qpn;
416 wc.pkey_index = 0; 416 wc.pkey_index = 0;
417 wc.slid = qp->remote_ah_attr.dlid; 417 wc.slid = qp->remote_ah_attr.dlid;
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index 49f1102af8b3..9a3e54664ee4 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -66,7 +66,7 @@ bad_lkey:
66 wc.vendor_err = 0; 66 wc.vendor_err = 0;
67 wc.byte_len = 0; 67 wc.byte_len = 0;
68 wc.imm_data = 0; 68 wc.imm_data = 0;
69 wc.qp_num = qp->ibqp.qp_num; 69 wc.qp = &qp->ibqp;
70 wc.src_qp = 0; 70 wc.src_qp = 0;
71 wc.wc_flags = 0; 71 wc.wc_flags = 0;
72 wc.pkey_index = 0; 72 wc.pkey_index = 0;
@@ -255,7 +255,7 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
255 wc->status = IB_WC_SUCCESS; 255 wc->status = IB_WC_SUCCESS;
256 wc->opcode = IB_WC_RECV; 256 wc->opcode = IB_WC_RECV;
257 wc->vendor_err = 0; 257 wc->vendor_err = 0;
258 wc->qp_num = qp->ibqp.qp_num; 258 wc->qp = &qp->ibqp;
259 wc->src_qp = sqp->ibqp.qp_num; 259 wc->src_qp = sqp->ibqp.qp_num;
260 /* XXX do we know which pkey matched? Only needed for GSI. */ 260 /* XXX do we know which pkey matched? Only needed for GSI. */
261 wc->pkey_index = 0; 261 wc->pkey_index = 0;
@@ -474,7 +474,7 @@ done:
474 wc.vendor_err = 0; 474 wc.vendor_err = 0;
475 wc.opcode = IB_WC_SEND; 475 wc.opcode = IB_WC_SEND;
476 wc.byte_len = len; 476 wc.byte_len = len;
477 wc.qp_num = qp->ibqp.qp_num; 477 wc.qp = &qp->ibqp;
478 wc.src_qp = 0; 478 wc.src_qp = 0;
479 wc.wc_flags = 0; 479 wc.wc_flags = 0;
480 /* XXX initialize other fields? */ 480 /* XXX initialize other fields? */
@@ -651,7 +651,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
651 wc.status = IB_WC_SUCCESS; 651 wc.status = IB_WC_SUCCESS;
652 wc.opcode = IB_WC_RECV; 652 wc.opcode = IB_WC_RECV;
653 wc.vendor_err = 0; 653 wc.vendor_err = 0;
654 wc.qp_num = qp->ibqp.qp_num; 654 wc.qp = &qp->ibqp;
655 wc.src_qp = src_qp; 655 wc.src_qp = src_qp;
656 /* XXX do we know which pkey matched? Only needed for GSI. */ 656 /* XXX do we know which pkey matched? Only needed for GSI. */
657 wc.pkey_index = 0; 657 wc.pkey_index = 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 768df7265b81..968d1519761c 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1854,7 +1854,7 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
1854 1854
1855 memset(inbox + 256, 0, 256); 1855 memset(inbox + 256, 0, 256);
1856 1856
1857 MTHCA_PUT(inbox, in_wc->qp_num, MAD_IFC_MY_QPN_OFFSET); 1857 MTHCA_PUT(inbox, in_wc->qp->qp_num, MAD_IFC_MY_QPN_OFFSET);
1858 MTHCA_PUT(inbox, in_wc->src_qp, MAD_IFC_RQPN_OFFSET); 1858 MTHCA_PUT(inbox, in_wc->src_qp, MAD_IFC_RQPN_OFFSET);
1859 1859
1860 val = in_wc->sl << 4; 1860 val = in_wc->sl << 4;
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 1159c8a0f2c5..efd79ef109a6 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -534,7 +534,7 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
534 } 534 }
535 } 535 }
536 536
537 entry->qp_num = (*cur_qp)->qpn; 537 entry->qp = &(*cur_qp)->ibqp;
538 538
539 if (is_send) { 539 if (is_send) {
540 wq = &(*cur_qp)->sq; 540 wq = &(*cur_qp)->sq;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 72611fd15103..5e8ac577f0ad 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -548,6 +548,7 @@ static int srp_reconnect_target(struct srp_target_port *target)
548 target->tx_head = 0; 548 target->tx_head = 0;
549 target->tx_tail = 0; 549 target->tx_tail = 0;
550 550
551 target->qp_in_error = 0;
551 ret = srp_connect_target(target); 552 ret = srp_connect_target(target);
552 if (ret) 553 if (ret)
553 goto err; 554 goto err;
@@ -878,6 +879,7 @@ static void srp_completion(struct ib_cq *cq, void *target_ptr)
878 printk(KERN_ERR PFX "failed %s status %d\n", 879 printk(KERN_ERR PFX "failed %s status %d\n",
879 wc.wr_id & SRP_OP_RECV ? "receive" : "send", 880 wc.wr_id & SRP_OP_RECV ? "receive" : "send",
880 wc.status); 881 wc.status);
882 target->qp_in_error = 1;
881 break; 883 break;
882 } 884 }
883 885
@@ -1337,6 +1339,8 @@ static int srp_abort(struct scsi_cmnd *scmnd)
1337 1339
1338 printk(KERN_ERR "SRP abort called\n"); 1340 printk(KERN_ERR "SRP abort called\n");
1339 1341
1342 if (target->qp_in_error)
1343 return FAILED;
1340 if (srp_find_req(target, scmnd, &req)) 1344 if (srp_find_req(target, scmnd, &req))
1341 return FAILED; 1345 return FAILED;
1342 if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK)) 1346 if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
@@ -1365,6 +1369,8 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
1365 1369
1366 printk(KERN_ERR "SRP reset_device called\n"); 1370 printk(KERN_ERR "SRP reset_device called\n");
1367 1371
1372 if (target->qp_in_error)
1373 return FAILED;
1368 if (srp_find_req(target, scmnd, &req)) 1374 if (srp_find_req(target, scmnd, &req))
1369 return FAILED; 1375 return FAILED;
1370 if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET)) 1376 if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
@@ -1801,6 +1807,7 @@ static ssize_t srp_create_target(struct class_device *class_dev,
1801 goto err_free; 1807 goto err_free;
1802 } 1808 }
1803 1809
1810 target->qp_in_error = 0;
1804 ret = srp_connect_target(target); 1811 ret = srp_connect_target(target);
1805 if (ret) { 1812 if (ret) {
1806 printk(KERN_ERR PFX "Connection failed\n"); 1813 printk(KERN_ERR PFX "Connection failed\n");
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index c21772317b86..2f3319c719a5 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -158,6 +158,7 @@ struct srp_target_port {
158 struct completion done; 158 struct completion done;
159 int status; 159 int status;
160 enum srp_target_state state; 160 enum srp_target_state state;
161 int qp_in_error;
161}; 162};
162 163
163struct srp_iu { 164struct srp_iu {
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 00db31c314e0..89bba277da5f 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -69,6 +69,25 @@ config TIFM_7XX1
69 To compile this driver as a module, choose M here: the module will 69 To compile this driver as a module, choose M here: the module will
70 be called tifm_7xx1. 70 be called tifm_7xx1.
71 71
72config ASUS_LAPTOP
73 tristate "Asus Laptop Extras (EXPERIMENTAL)"
74 depends on X86
75 depends on ACPI
76 depends on EXPERIMENTAL && !ACPI_ASUS
77 depends on LEDS_CLASS
78 depends on BACKLIGHT_CLASS_DEVICE
79 ---help---
80 This is the new Linux driver for Asus laptops. It may also support some
81 MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate
82 standard ACPI events that go through /proc/acpi/events. It also adds
83 support for video output switching, LCD backlight control, Bluetooth and
84 Wlan control, and most importantly, allows you to blink those fancy LEDs.
85
86 For more information and a userspace daemon for handling the extra
87 buttons see <http://acpi4asus.sf.net/>.
88
89 If you have an ACPI-compatible ASUS laptop, say Y or M here.
90
72config MSI_LAPTOP 91config MSI_LAPTOP
73 tristate "MSI Laptop Extras" 92 tristate "MSI Laptop Extras"
74 depends on X86 93 depends on X86
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index c9e98ab021c5..35da53c409c0 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -6,6 +6,7 @@ obj- := misc.o # Dummy rule to force built-in.o to be made
6obj-$(CONFIG_IBM_ASM) += ibmasm/ 6obj-$(CONFIG_IBM_ASM) += ibmasm/
7obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/ 7obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/
8obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o 8obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
9obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o
9obj-$(CONFIG_LKDTM) += lkdtm.o 10obj-$(CONFIG_LKDTM) += lkdtm.o
10obj-$(CONFIG_TIFM_CORE) += tifm_core.o 11obj-$(CONFIG_TIFM_CORE) += tifm_core.o
11obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o 12obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
diff --git a/drivers/misc/asus-laptop.c b/drivers/misc/asus-laptop.c
new file mode 100644
index 000000000000..861c39935f99
--- /dev/null
+++ b/drivers/misc/asus-laptop.c
@@ -0,0 +1,1165 @@
1/*
2 * asus-laptop.c - Asus Laptop Support
3 *
4 *
5 * Copyright (C) 2002-2005 Julien Lerouge, 2003-2006 Karol Kozimor
6 * Copyright (C) 2006 Corentin Chary
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 *
23 * The development page for this driver is located at
24 * http://sourceforge.net/projects/acpi4asus/
25 *
26 * Credits:
27 * Pontus Fuchs - Helper functions, cleanup
28 * Johann Wiesner - Small compile fixes
29 * John Belmonte - ACPI code for Toshiba laptop was a good starting point.
30 * Eric Burghard - LED display support for W1N
31 * Josh Green - Light Sens support
32 * Thomas Tuttle - His first patch for led support was very helpfull
33 *
34 */
35
36#include <linux/autoconf.h>
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/init.h>
40#include <linux/types.h>
41#include <linux/err.h>
42#include <linux/proc_fs.h>
43#include <linux/backlight.h>
44#include <linux/fb.h>
45#include <linux/leds.h>
46#include <linux/platform_device.h>
47#include <acpi/acpi_drivers.h>
48#include <acpi/acpi_bus.h>
49#include <asm/uaccess.h>
50
51#define ASUS_LAPTOP_VERSION "0.40"
52
53#define ASUS_HOTK_NAME "Asus Laptop Support"
54#define ASUS_HOTK_CLASS "hotkey"
55#define ASUS_HOTK_DEVICE_NAME "Hotkey"
56#define ASUS_HOTK_HID "ATK0100"
57#define ASUS_HOTK_FILE "asus-laptop"
58#define ASUS_HOTK_PREFIX "\\_SB.ATKD."
59
60/*
61 * Some events we use, same for all Asus
62 */
63#define ATKD_BR_UP 0x10
64#define ATKD_BR_DOWN 0x20
65#define ATKD_LCD_ON 0x33
66#define ATKD_LCD_OFF 0x34
67
68/*
69 * Known bits returned by \_SB.ATKD.HWRS
70 */
71#define WL_HWRS 0x80
72#define BT_HWRS 0x100
73
74/*
75 * Flags for hotk status
76 * WL_ON and BT_ON are also used for wireless_status()
77 */
78#define WL_ON 0x01 //internal Wifi
79#define BT_ON 0x02 //internal Bluetooth
80#define MLED_ON 0x04 //mail LED
81#define TLED_ON 0x08 //touchpad LED
82#define RLED_ON 0x10 //Record LED
83#define PLED_ON 0x20 //Phone LED
84#define LCD_ON 0x40 //LCD backlight
85
86#define ASUS_LOG ASUS_HOTK_FILE ": "
87#define ASUS_ERR KERN_ERR ASUS_LOG
88#define ASUS_WARNING KERN_WARNING ASUS_LOG
89#define ASUS_NOTICE KERN_NOTICE ASUS_LOG
90#define ASUS_INFO KERN_INFO ASUS_LOG
91#define ASUS_DEBUG KERN_DEBUG ASUS_LOG
92
93MODULE_AUTHOR("Julien Lerouge, Karol Kozimor, Corentin Chary");
94MODULE_DESCRIPTION(ASUS_HOTK_NAME);
95MODULE_LICENSE("GPL");
96
97#define ASUS_HANDLE(object, paths...) \
98 static acpi_handle object##_handle = NULL; \
99 static char *object##_paths[] = { paths }
100
101/* LED */
102ASUS_HANDLE(mled_set, ASUS_HOTK_PREFIX "MLED");
103ASUS_HANDLE(tled_set, ASUS_HOTK_PREFIX "TLED");
104ASUS_HANDLE(rled_set, ASUS_HOTK_PREFIX "RLED"); /* W1JC */
105ASUS_HANDLE(pled_set, ASUS_HOTK_PREFIX "PLED"); /* A7J */
106
107/* LEDD */
108ASUS_HANDLE(ledd_set, ASUS_HOTK_PREFIX "SLCM");
109
110/* Bluetooth and WLAN
111 * WLED and BLED are not handled like other XLED, because in some dsdt
112 * they also control the WLAN/Bluetooth device.
113 */
114ASUS_HANDLE(wl_switch, ASUS_HOTK_PREFIX "WLED");
115ASUS_HANDLE(bt_switch, ASUS_HOTK_PREFIX "BLED");
116ASUS_HANDLE(wireless_status, ASUS_HOTK_PREFIX "RSTS"); /* All new models */
117
118/* Brightness */
119ASUS_HANDLE(brightness_set, ASUS_HOTK_PREFIX "SPLV");
120ASUS_HANDLE(brightness_get, ASUS_HOTK_PREFIX "GPLV");
121
122/* Backlight */
123ASUS_HANDLE(lcd_switch, "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */
124 "\\_SB.PCI0.ISA.EC0._Q10", /* A1x */
125 "\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */
126 "\\_SB.PCI0.PX40.EC0.Q10", /* M1A */
127 "\\_SB.PCI0.LPCB.EC0._Q10", /* P30 */
128 "\\_SB.PCI0.PX40.Q10", /* S1x */
129 "\\Q10"); /* A2x, L2D, L3D, M2E */
130
131/* Display */
132ASUS_HANDLE(display_set, ASUS_HOTK_PREFIX "SDSP");
133ASUS_HANDLE(display_get, "\\_SB.PCI0.P0P1.VGA.GETD", /* A6B, A6K A6R A7D F3JM L4R M6R A3G
134 M6A M6V VX-1 V6J V6V W3Z */
135 "\\_SB.PCI0.P0P2.VGA.GETD", /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V
136 S5A M5A z33A W1Jc W2V */
137 "\\_SB.PCI0.P0P3.VGA.GETD", /* A6V A6Q */
138 "\\_SB.PCI0.P0PA.VGA.GETD", /* A6T, A6M */
139 "\\_SB.PCI0.PCI1.VGAC.NMAP", /* L3C */
140 "\\_SB.PCI0.VGA.GETD", /* Z96F */
141 "\\ACTD", /* A2D */
142 "\\ADVG", /* A4G Z71A W1N W5A W5F M2N M3N M5N M6N S1N S5N */
143 "\\DNXT", /* P30 */
144 "\\INFB", /* A2H D1 L2D L3D L3H L2E L5D L5C M1A M2E L4L W3V */
145 "\\SSTE"); /* A3F A6F A3N A3L M6N W3N W6A */
146
147ASUS_HANDLE(ls_switch, ASUS_HOTK_PREFIX "ALSC"); /* Z71A Z71V */
148ASUS_HANDLE(ls_level, ASUS_HOTK_PREFIX "ALSL"); /* Z71A Z71V */
149
150/*
151 * This is the main structure, we can use it to store anything interesting
152 * about the hotk device
153 */
154struct asus_hotk {
155 char *name; //laptop name
156 struct acpi_device *device; //the device we are in
157 acpi_handle handle; //the handle of the hotk device
158 char status; //status of the hotk, for LEDs, ...
159 u32 ledd_status; //status of the LED display
160 u8 light_level; //light sensor level
161 u8 light_switch; //light sensor switch value
162 u16 event_count[128]; //count for each event TODO make this better
163};
164
165/*
166 * This header is made available to allow proper configuration given model,
167 * revision number , ... this info cannot go in struct asus_hotk because it is
168 * available before the hotk
169 */
170static struct acpi_table_header *asus_info;
171
172/* The actual device the driver binds to */
173static struct asus_hotk *hotk;
174
175/*
176 * The hotkey driver declaration
177 */
178static int asus_hotk_add(struct acpi_device *device);
179static int asus_hotk_remove(struct acpi_device *device, int type);
180static struct acpi_driver asus_hotk_driver = {
181 .name = ASUS_HOTK_NAME,
182 .class = ASUS_HOTK_CLASS,
183 .ids = ASUS_HOTK_HID,
184 .ops = {
185 .add = asus_hotk_add,
186 .remove = asus_hotk_remove,
187 },
188};
189
190/* The backlight device /sys/class/backlight */
191static struct backlight_device *asus_backlight_device;
192
193/*
194 * The backlight class declaration
195 */
196static int read_brightness(struct backlight_device *bd);
197static int update_bl_status(struct backlight_device *bd);
198static struct backlight_properties asusbl_data = {
199 .owner = THIS_MODULE,
200 .get_brightness = read_brightness,
201 .update_status = update_bl_status,
202 .max_brightness = 15,
203};
204
205/* These functions actually update the LED's, and are called from a
206 * workqueue. By doing this as separate work rather than when the LED
207 * subsystem asks, we avoid messing with the Asus ACPI stuff during a
208 * potentially bad time, such as a timer interrupt. */
209static struct workqueue_struct *led_workqueue;
210
211#define ASUS_LED(object, ledname) \
212 static void object##_led_set(struct led_classdev *led_cdev, \
213 enum led_brightness value); \
214 static void object##_led_update(struct work_struct *ignored); \
215 static int object##_led_wk; \
216 DECLARE_WORK(object##_led_work, object##_led_update); \
217 static struct led_classdev object##_led = { \
218 .name = "asus:" ledname, \
219 .brightness_set = object##_led_set, \
220 }
221
222ASUS_LED(mled, "mail");
223ASUS_LED(tled, "touchpad");
224ASUS_LED(rled, "record");
225ASUS_LED(pled, "phone");
226
227/*
228 * This function evaluates an ACPI method, given an int as parameter, the
229 * method is searched within the scope of the handle, can be NULL. The output
230 * of the method is written is output, which can also be NULL
231 *
232 * returns 1 if write is successful, 0 else.
233 */
234static int write_acpi_int(acpi_handle handle, const char *method, int val,
235 struct acpi_buffer *output)
236{
237 struct acpi_object_list params; //list of input parameters (an int here)
238 union acpi_object in_obj; //the only param we use
239 acpi_status status;
240
241 params.count = 1;
242 params.pointer = &in_obj;
243 in_obj.type = ACPI_TYPE_INTEGER;
244 in_obj.integer.value = val;
245
246 status = acpi_evaluate_object(handle, (char *)method, &params, output);
247 return (status == AE_OK);
248}
249
250static int read_acpi_int(acpi_handle handle, const char *method, int *val,
251 struct acpi_object_list *params)
252{
253 struct acpi_buffer output;
254 union acpi_object out_obj;
255 acpi_status status;
256
257 output.length = sizeof(out_obj);
258 output.pointer = &out_obj;
259
260 status = acpi_evaluate_object(handle, (char *)method, params, &output);
261 *val = out_obj.integer.value;
262 return (status == AE_OK) && (out_obj.type == ACPI_TYPE_INTEGER);
263}
264
265static int read_wireless_status(int mask)
266{
267 int status;
268
269 if (!wireless_status_handle)
270 return (hotk->status & mask) ? 1 : 0;
271
272 if (read_acpi_int(wireless_status_handle, NULL, &status, NULL)) {
273 return (status & mask) ? 1 : 0;
274 } else
275 printk(ASUS_WARNING "Error reading Wireless status\n");
276
277 return (hotk->status & mask) ? 1 : 0;
278}
279
280/* Generic LED functions */
281static int read_status(int mask)
282{
283 /* There is a special method for both wireless devices */
284 if (mask == BT_ON || mask == WL_ON)
285 return read_wireless_status(mask);
286
287 return (hotk->status & mask) ? 1 : 0;
288}
289
290static void write_status(acpi_handle handle, int out, int mask, int invert)
291{
292 hotk->status = (out) ? (hotk->status | mask) : (hotk->status & ~mask);
293
294 if (invert) /* invert target value */
295 out = !out & 0x1;
296
297 if (handle && !write_acpi_int(handle, NULL, out, NULL))
298 printk(ASUS_WARNING " write failed\n");
299}
300
301/* /sys/class/led handlers */
302#define ASUS_LED_HANDLER(object, mask, invert) \
303 static void object##_led_set(struct led_classdev *led_cdev, \
304 enum led_brightness value) \
305 { \
306 object##_led_wk = value; \
307 queue_work(led_workqueue, &object##_led_work); \
308 } \
309 static void object##_led_update(struct work_struct *ignored) \
310 { \
311 int value = object##_led_wk; \
312 write_status(object##_set_handle, value, (mask), (invert)); \
313 }
314
315ASUS_LED_HANDLER(mled, MLED_ON, 1);
316ASUS_LED_HANDLER(pled, PLED_ON, 0);
317ASUS_LED_HANDLER(rled, RLED_ON, 0);
318ASUS_LED_HANDLER(tled, TLED_ON, 0);
319
320static int get_lcd_state(void)
321{
322 return read_status(LCD_ON);
323}
324
325static int set_lcd_state(int value)
326{
327 int lcd = 0;
328 acpi_status status = 0;
329
330 lcd = value ? 1 : 0;
331
332 if (lcd == get_lcd_state())
333 return 0;
334
335 if (lcd_switch_handle) {
336 status = acpi_evaluate_object(lcd_switch_handle,
337 NULL, NULL, NULL);
338
339 if (ACPI_FAILURE(status))
340 printk(ASUS_WARNING "Error switching LCD\n");
341 }
342
343 write_status(NULL, lcd, LCD_ON, 0);
344 return 0;
345}
346
347static void lcd_blank(int blank)
348{
349 struct backlight_device *bd = asus_backlight_device;
350
351 if (bd) {
352 down(&bd->sem);
353 if (likely(bd->props)) {
354 bd->props->power = blank;
355 if (likely(bd->props->update_status))
356 bd->props->update_status(bd);
357 }
358 up(&bd->sem);
359 }
360}
361
362static int read_brightness(struct backlight_device *bd)
363{
364 int value;
365
366 if (!read_acpi_int(brightness_get_handle, NULL, &value, NULL))
367 printk(ASUS_WARNING "Error reading brightness\n");
368
369 return value;
370}
371
372static int set_brightness(struct backlight_device *bd, int value)
373{
374 int ret = 0;
375
376 value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
377 /* 0 <= value <= 15 */
378
379 if (!write_acpi_int(brightness_set_handle, NULL, value, NULL)) {
380 printk(ASUS_WARNING "Error changing brightness\n");
381 ret = -EIO;
382 }
383
384 return ret;
385}
386
387static int update_bl_status(struct backlight_device *bd)
388{
389 int rv;
390 int value = bd->props->brightness;
391
392 rv = set_brightness(bd, value);
393 if (rv)
394 return rv;
395
396 value = (bd->props->power == FB_BLANK_UNBLANK) ? 1 : 0;
397 return set_lcd_state(value);
398}
399
400/*
401 * Platform device handlers
402 */
403
404/*
405 * We write our info in page, we begin at offset off and cannot write more
406 * than count bytes. We set eof to 1 if we handle those 2 values. We return the
407 * number of bytes written in page
408 */
409static ssize_t show_infos(struct device *dev,
410 struct device_attribute *attr, char *page)
411{
412 int len = 0;
413 int temp;
414 char buf[16]; //enough for all info
415 /*
416 * We use the easy way, we don't care of off and count, so we don't set eof
417 * to 1
418 */
419
420 len += sprintf(page, ASUS_HOTK_NAME " " ASUS_LAPTOP_VERSION "\n");
421 len += sprintf(page + len, "Model reference : %s\n", hotk->name);
422 /*
423 * The SFUN method probably allows the original driver to get the list
424 * of features supported by a given model. For now, 0x0100 or 0x0800
425 * bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card.
426 * The significance of others is yet to be found.
427 */
428 if (read_acpi_int(hotk->handle, "SFUN", &temp, NULL))
429 len +=
430 sprintf(page + len, "SFUN value : 0x%04x\n", temp);
431 /*
432 * Another value for userspace: the ASYM method returns 0x02 for
433 * battery low and 0x04 for battery critical, its readings tend to be
434 * more accurate than those provided by _BST.
435 * Note: since not all the laptops provide this method, errors are
436 * silently ignored.
437 */
438 if (read_acpi_int(hotk->handle, "ASYM", &temp, NULL))
439 len +=
440 sprintf(page + len, "ASYM value : 0x%04x\n", temp);
441 if (asus_info) {
442 snprintf(buf, 16, "%d", asus_info->length);
443 len += sprintf(page + len, "DSDT length : %s\n", buf);
444 snprintf(buf, 16, "%d", asus_info->checksum);
445 len += sprintf(page + len, "DSDT checksum : %s\n", buf);
446 snprintf(buf, 16, "%d", asus_info->revision);
447 len += sprintf(page + len, "DSDT revision : %s\n", buf);
448 snprintf(buf, 7, "%s", asus_info->oem_id);
449 len += sprintf(page + len, "OEM id : %s\n", buf);
450 snprintf(buf, 9, "%s", asus_info->oem_table_id);
451 len += sprintf(page + len, "OEM table id : %s\n", buf);
452 snprintf(buf, 16, "%x", asus_info->oem_revision);
453 len += sprintf(page + len, "OEM revision : 0x%s\n", buf);
454 snprintf(buf, 5, "%s", asus_info->asl_compiler_id);
455 len += sprintf(page + len, "ASL comp vendor id : %s\n", buf);
456 snprintf(buf, 16, "%x", asus_info->asl_compiler_revision);
457 len += sprintf(page + len, "ASL comp revision : 0x%s\n", buf);
458 }
459
460 return len;
461}
462
463static int parse_arg(const char *buf, unsigned long count, int *val)
464{
465 if (!count)
466 return 0;
467 if (count > 31)
468 return -EINVAL;
469 if (sscanf(buf, "%i", val) != 1)
470 return -EINVAL;
471 return count;
472}
473
474static ssize_t store_status(const char *buf, size_t count,
475 acpi_handle handle, int mask, int invert)
476{
477 int rv, value;
478 int out = 0;
479
480 rv = parse_arg(buf, count, &value);
481 if (rv > 0)
482 out = value ? 1 : 0;
483
484 write_status(handle, out, mask, invert);
485
486 return rv;
487}
488
489/*
490 * LEDD display
491 */
492static ssize_t show_ledd(struct device *dev,
493 struct device_attribute *attr, char *buf)
494{
495 return sprintf(buf, "0x%08x\n", hotk->ledd_status);
496}
497
498static ssize_t store_ledd(struct device *dev, struct device_attribute *attr,
499 const char *buf, size_t count)
500{
501 int rv, value;
502
503 rv = parse_arg(buf, count, &value);
504 if (rv > 0) {
505 if (!write_acpi_int(ledd_set_handle, NULL, value, NULL))
506 printk(ASUS_WARNING "LED display write failed\n");
507 else
508 hotk->ledd_status = (u32) value;
509 }
510 return rv;
511}
512
513/*
514 * WLAN
515 */
516static ssize_t show_wlan(struct device *dev,
517 struct device_attribute *attr, char *buf)
518{
519 return sprintf(buf, "%d\n", read_status(WL_ON));
520}
521
522static ssize_t store_wlan(struct device *dev, struct device_attribute *attr,
523 const char *buf, size_t count)
524{
525 return store_status(buf, count, wl_switch_handle, WL_ON, 0);
526}
527
528/*
529 * Bluetooth
530 */
531static ssize_t show_bluetooth(struct device *dev,
532 struct device_attribute *attr, char *buf)
533{
534 return sprintf(buf, "%d\n", read_status(BT_ON));
535}
536
537static ssize_t store_bluetooth(struct device *dev,
538 struct device_attribute *attr, const char *buf,
539 size_t count)
540{
541 return store_status(buf, count, bt_switch_handle, BT_ON, 0);
542}
543
544/*
545 * Display
546 */
547static void set_display(int value)
548{
549 /* no sanity check needed for now */
550 if (!write_acpi_int(display_set_handle, NULL, value, NULL))
551 printk(ASUS_WARNING "Error setting display\n");
552 return;
553}
554
555static int read_display(void)
556{
557 int value = 0;
558
559 /* In most of the case, we know how to set the display, but sometime
560 we can't read it */
561 if (display_get_handle) {
562 if (!read_acpi_int(display_get_handle, NULL, &value, NULL))
563 printk(ASUS_WARNING "Error reading display status\n");
564 }
565
566 value &= 0x0F; /* needed for some models, shouldn't hurt others */
567
568 return value;
569}
570
571/*
572 * Now, *this* one could be more user-friendly, but so far, no-one has
573 * complained. The significance of bits is the same as in store_disp()
574 */
575static ssize_t show_disp(struct device *dev,
576 struct device_attribute *attr, char *buf)
577{
578 return sprintf(buf, "%d\n", read_display());
579}
580
581/*
582 * Experimental support for display switching. As of now: 1 should activate
583 * the LCD output, 2 should do for CRT, 4 for TV-Out and 8 for DVI.
584 * Any combination (bitwise) of these will suffice. I never actually tested 4
585 * displays hooked up simultaneously, so be warned. See the acpi4asus README
586 * for more info.
587 */
588static ssize_t store_disp(struct device *dev, struct device_attribute *attr,
589 const char *buf, size_t count)
590{
591 int rv, value;
592
593 rv = parse_arg(buf, count, &value);
594 if (rv > 0)
595 set_display(value);
596 return rv;
597}
598
599/*
600 * Light Sens
601 */
602static void set_light_sens_switch(int value)
603{
604 if (!write_acpi_int(ls_switch_handle, NULL, value, NULL))
605 printk(ASUS_WARNING "Error setting light sensor switch\n");
606 hotk->light_switch = value;
607}
608
609static ssize_t show_lssw(struct device *dev,
610 struct device_attribute *attr, char *buf)
611{
612 return sprintf(buf, "%d\n", hotk->light_switch);
613}
614
615static ssize_t store_lssw(struct device *dev, struct device_attribute *attr,
616 const char *buf, size_t count)
617{
618 int rv, value;
619
620 rv = parse_arg(buf, count, &value);
621 if (rv > 0)
622 set_light_sens_switch(value ? 1 : 0);
623
624 return rv;
625}
626
627static void set_light_sens_level(int value)
628{
629 if (!write_acpi_int(ls_level_handle, NULL, value, NULL))
630 printk(ASUS_WARNING "Error setting light sensor level\n");
631 hotk->light_level = value;
632}
633
634static ssize_t show_lslvl(struct device *dev,
635 struct device_attribute *attr, char *buf)
636{
637 return sprintf(buf, "%d\n", hotk->light_level);
638}
639
640static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr,
641 const char *buf, size_t count)
642{
643 int rv, value;
644
645 rv = parse_arg(buf, count, &value);
646 if (rv > 0) {
647 value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
648 /* 0 <= value <= 15 */
649 set_light_sens_level(value);
650 }
651
652 return rv;
653}
654
655static void asus_hotk_notify(acpi_handle handle, u32 event, void *data)
656{
657 /* TODO Find a better way to handle events count. */
658 if (!hotk)
659 return;
660
661 /*
662 * We need to tell the backlight device when the backlight power is
663 * switched
664 */
665 if (event == ATKD_LCD_ON) {
666 write_status(NULL, 1, LCD_ON, 0);
667 lcd_blank(FB_BLANK_UNBLANK);
668 } else if (event == ATKD_LCD_OFF) {
669 write_status(NULL, 0, LCD_ON, 0);
670 lcd_blank(FB_BLANK_POWERDOWN);
671 }
672
673 acpi_bus_generate_event(hotk->device, event,
674 hotk->event_count[event % 128]++);
675
676 return;
677}
678
679#define ASUS_CREATE_DEVICE_ATTR(_name) \
680 struct device_attribute dev_attr_##_name = { \
681 .attr = { \
682 .name = __stringify(_name), \
683 .mode = 0, \
684 .owner = THIS_MODULE }, \
685 .show = NULL, \
686 .store = NULL, \
687 }
688
689#define ASUS_SET_DEVICE_ATTR(_name, _mode, _show, _store) \
690 do { \
691 dev_attr_##_name.attr.mode = _mode; \
692 dev_attr_##_name.show = _show; \
693 dev_attr_##_name.store = _store; \
694 } while(0)
695
696static ASUS_CREATE_DEVICE_ATTR(infos);
697static ASUS_CREATE_DEVICE_ATTR(wlan);
698static ASUS_CREATE_DEVICE_ATTR(bluetooth);
699static ASUS_CREATE_DEVICE_ATTR(display);
700static ASUS_CREATE_DEVICE_ATTR(ledd);
701static ASUS_CREATE_DEVICE_ATTR(ls_switch);
702static ASUS_CREATE_DEVICE_ATTR(ls_level);
703
704static struct attribute *asuspf_attributes[] = {
705 &dev_attr_infos.attr,
706 &dev_attr_wlan.attr,
707 &dev_attr_bluetooth.attr,
708 &dev_attr_display.attr,
709 &dev_attr_ledd.attr,
710 &dev_attr_ls_switch.attr,
711 &dev_attr_ls_level.attr,
712 NULL
713};
714
715static struct attribute_group asuspf_attribute_group = {
716 .attrs = asuspf_attributes
717};
718
719static struct platform_driver asuspf_driver = {
720 .driver = {
721 .name = ASUS_HOTK_FILE,
722 .owner = THIS_MODULE,
723 }
724};
725
726static struct platform_device *asuspf_device;
727
728static void asus_hotk_add_fs(void)
729{
730 ASUS_SET_DEVICE_ATTR(infos, 0444, show_infos, NULL);
731
732 if (wl_switch_handle)
733 ASUS_SET_DEVICE_ATTR(wlan, 0644, show_wlan, store_wlan);
734
735 if (bt_switch_handle)
736 ASUS_SET_DEVICE_ATTR(bluetooth, 0644,
737 show_bluetooth, store_bluetooth);
738
739 if (display_set_handle && display_get_handle)
740 ASUS_SET_DEVICE_ATTR(display, 0644, show_disp, store_disp);
741 else if (display_set_handle)
742 ASUS_SET_DEVICE_ATTR(display, 0200, NULL, store_disp);
743
744 if (ledd_set_handle)
745 ASUS_SET_DEVICE_ATTR(ledd, 0644, show_ledd, store_ledd);
746
747 if (ls_switch_handle && ls_level_handle) {
748 ASUS_SET_DEVICE_ATTR(ls_level, 0644, show_lslvl, store_lslvl);
749 ASUS_SET_DEVICE_ATTR(ls_switch, 0644, show_lssw, store_lssw);
750 }
751}
752
753static int asus_handle_init(char *name, acpi_handle * handle,
754 char **paths, int num_paths)
755{
756 int i;
757 acpi_status status;
758
759 for (i = 0; i < num_paths; i++) {
760 status = acpi_get_handle(NULL, paths[i], handle);
761 if (ACPI_SUCCESS(status))
762 return 0;
763 }
764
765 *handle = NULL;
766 return -ENODEV;
767}
768
769#define ASUS_HANDLE_INIT(object) \
770 asus_handle_init(#object, &object##_handle, object##_paths, \
771 ARRAY_SIZE(object##_paths))
772
773/*
774 * This function is used to initialize the hotk with right values. In this
775 * method, we can make all the detection we want, and modify the hotk struct
776 */
777static int asus_hotk_get_info(void)
778{
779 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
780 union acpi_object *model = NULL;
781 int bsts_result, hwrs_result;
782 char *string = NULL;
783 acpi_status status;
784
785 /*
786 * Get DSDT headers early enough to allow for differentiating between
787 * models, but late enough to allow acpi_bus_register_driver() to fail
788 * before doing anything ACPI-specific. Should we encounter a machine,
789 * which needs special handling (i.e. its hotkey device has a different
790 * HID), this bit will be moved. A global variable asus_info contains
791 * the DSDT header.
792 */
793 status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info);
794 if (ACPI_FAILURE(status))
795 printk(ASUS_WARNING "Couldn't get the DSDT table header\n");
796
797 /* We have to write 0 on init this far for all ASUS models */
798 if (!write_acpi_int(hotk->handle, "INIT", 0, &buffer)) {
799 printk(ASUS_ERR "Hotkey initialization failed\n");
800 return -ENODEV;
801 }
802
803 /* This needs to be called for some laptops to init properly */
804 if (!read_acpi_int(hotk->handle, "BSTS", &bsts_result, NULL))
805 printk(ASUS_WARNING "Error calling BSTS\n");
806 else if (bsts_result)
807 printk(ASUS_NOTICE "BSTS called, 0x%02x returned\n",
808 bsts_result);
809
810 /*
811 * Try to match the object returned by INIT to the specific model.
812 * Handle every possible object (or the lack of thereof) the DSDT
813 * writers might throw at us. When in trouble, we pass NULL to
814 * asus_model_match() and try something completely different.
815 */
816 if (buffer.pointer) {
817 model = buffer.pointer;
818 switch (model->type) {
819 case ACPI_TYPE_STRING:
820 string = model->string.pointer;
821 break;
822 case ACPI_TYPE_BUFFER:
823 string = model->buffer.pointer;
824 break;
825 default:
826 string = "";
827 break;
828 }
829 }
830 hotk->name = kstrdup(string, GFP_KERNEL);
831 if (!hotk->name)
832 return -ENOMEM;
833
834 if (*string)
835 printk(ASUS_NOTICE " %s model detected\n", string);
836
837 ASUS_HANDLE_INIT(mled_set);
838 ASUS_HANDLE_INIT(tled_set);
839 ASUS_HANDLE_INIT(rled_set);
840 ASUS_HANDLE_INIT(pled_set);
841
842 ASUS_HANDLE_INIT(ledd_set);
843
844 /*
845 * The HWRS method return informations about the hardware.
846 * 0x80 bit is for WLAN, 0x100 for Bluetooth.
847 * The significance of others is yet to be found.
848 * If we don't find the method, we assume the device are present.
849 */
850 if (!read_acpi_int(hotk->handle, "HRWS", &hwrs_result, NULL))
851 hwrs_result = WL_HWRS | BT_HWRS;
852
853 if (hwrs_result & WL_HWRS)
854 ASUS_HANDLE_INIT(wl_switch);
855 if (hwrs_result & BT_HWRS)
856 ASUS_HANDLE_INIT(bt_switch);
857
858 ASUS_HANDLE_INIT(wireless_status);
859
860 ASUS_HANDLE_INIT(brightness_set);
861 ASUS_HANDLE_INIT(brightness_get);
862
863 ASUS_HANDLE_INIT(lcd_switch);
864
865 ASUS_HANDLE_INIT(display_set);
866 ASUS_HANDLE_INIT(display_get);
867
868 /* There is a lot of models with "ALSL", but a few get
869 a real light sens, so we need to check it. */
870 if (ASUS_HANDLE_INIT(ls_switch))
871 ASUS_HANDLE_INIT(ls_level);
872
873 kfree(model);
874
875 return AE_OK;
876}
877
878static int asus_hotk_check(void)
879{
880 int result = 0;
881
882 result = acpi_bus_get_status(hotk->device);
883 if (result)
884 return result;
885
886 if (hotk->device->status.present) {
887 result = asus_hotk_get_info();
888 } else {
889 printk(ASUS_ERR "Hotkey device not present, aborting\n");
890 return -EINVAL;
891 }
892
893 return result;
894}
895
896static int asus_hotk_found;
897
898static int asus_hotk_add(struct acpi_device *device)
899{
900 acpi_status status = AE_OK;
901 int result;
902
903 if (!device)
904 return -EINVAL;
905
906 printk(ASUS_NOTICE "Asus Laptop Support version %s\n",
907 ASUS_LAPTOP_VERSION);
908
909 hotk = kmalloc(sizeof(struct asus_hotk), GFP_KERNEL);
910 if (!hotk)
911 return -ENOMEM;
912 memset(hotk, 0, sizeof(struct asus_hotk));
913
914 hotk->handle = device->handle;
915 strcpy(acpi_device_name(device), ASUS_HOTK_DEVICE_NAME);
916 strcpy(acpi_device_class(device), ASUS_HOTK_CLASS);
917 acpi_driver_data(device) = hotk;
918 hotk->device = device;
919
920 result = asus_hotk_check();
921 if (result)
922 goto end;
923
924 asus_hotk_add_fs();
925
926 /*
927 * We install the handler, it will receive the hotk in parameter, so, we
928 * could add other data to the hotk struct
929 */
930 status = acpi_install_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY,
931 asus_hotk_notify, hotk);
932 if (ACPI_FAILURE(status))
933 printk(ASUS_ERR "Error installing notify handler\n");
934
935 asus_hotk_found = 1;
936
937 /* WLED and BLED are on by default */
938 write_status(bt_switch_handle, 1, BT_ON, 0);
939 write_status(wl_switch_handle, 1, WL_ON, 0);
940
941 /* LCD Backlight is on by default */
942 write_status(NULL, 1, LCD_ON, 0);
943
944 /* LED display is off by default */
945 hotk->ledd_status = 0xFFF;
946
947 /* Set initial values of light sensor and level */
948 hotk->light_switch = 1; /* Default to light sensor disabled */
949 hotk->light_level = 0; /* level 5 for sensor sensitivity */
950
951 if (ls_switch_handle)
952 set_light_sens_switch(hotk->light_switch);
953
954 if (ls_level_handle)
955 set_light_sens_level(hotk->light_level);
956
957 end:
958 if (result) {
959 kfree(hotk->name);
960 kfree(hotk);
961 }
962
963 return result;
964}
965
966static int asus_hotk_remove(struct acpi_device *device, int type)
967{
968 acpi_status status = 0;
969
970 if (!device || !acpi_driver_data(device))
971 return -EINVAL;
972
973 status = acpi_remove_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY,
974 asus_hotk_notify);
975 if (ACPI_FAILURE(status))
976 printk(ASUS_ERR "Error removing notify handler\n");
977
978 kfree(hotk->name);
979 kfree(hotk);
980
981 return 0;
982}
983
984static void asus_backlight_exit(void)
985{
986 if (asus_backlight_device)
987 backlight_device_unregister(asus_backlight_device);
988}
989
990#define ASUS_LED_UNREGISTER(object) \
991 if(object##_led.class_dev \
992 && !IS_ERR(object##_led.class_dev)) \
993 led_classdev_unregister(&object##_led)
994
995static void asus_led_exit(void)
996{
997 ASUS_LED_UNREGISTER(mled);
998 ASUS_LED_UNREGISTER(tled);
999 ASUS_LED_UNREGISTER(pled);
1000 ASUS_LED_UNREGISTER(rled);
1001
1002 destroy_workqueue(led_workqueue);
1003}
1004
1005static void __exit asus_laptop_exit(void)
1006{
1007 asus_backlight_exit();
1008 asus_led_exit();
1009
1010 acpi_bus_unregister_driver(&asus_hotk_driver);
1011 sysfs_remove_group(&asuspf_device->dev.kobj, &asuspf_attribute_group);
1012 platform_device_unregister(asuspf_device);
1013 platform_driver_unregister(&asuspf_driver);
1014}
1015
1016static int asus_backlight_init(struct device *dev)
1017{
1018 struct backlight_device *bd;
1019
1020 if (brightness_set_handle && lcd_switch_handle) {
1021 bd = backlight_device_register(ASUS_HOTK_FILE, dev,
1022 NULL, &asusbl_data);
1023 if (IS_ERR(bd)) {
1024 printk(ASUS_ERR
1025 "Could not register asus backlight device\n");
1026 asus_backlight_device = NULL;
1027 return PTR_ERR(bd);
1028 }
1029
1030 asus_backlight_device = bd;
1031
1032 down(&bd->sem);
1033 if (likely(bd->props)) {
1034 bd->props->brightness = read_brightness(NULL);
1035 bd->props->power = FB_BLANK_UNBLANK;
1036 if (likely(bd->props->update_status))
1037 bd->props->update_status(bd);
1038 }
1039 up(&bd->sem);
1040 }
1041 return 0;
1042}
1043
1044static int asus_led_register(acpi_handle handle,
1045 struct led_classdev *ldev, struct device *dev)
1046{
1047 if (!handle)
1048 return 0;
1049
1050 return led_classdev_register(dev, ldev);
1051}
1052
1053#define ASUS_LED_REGISTER(object, device) \
1054 asus_led_register(object##_set_handle, &object##_led, device)
1055
1056static int asus_led_init(struct device *dev)
1057{
1058 int rv;
1059
1060 rv = ASUS_LED_REGISTER(mled, dev);
1061 if (rv)
1062 return rv;
1063
1064 rv = ASUS_LED_REGISTER(tled, dev);
1065 if (rv)
1066 return rv;
1067
1068 rv = ASUS_LED_REGISTER(rled, dev);
1069 if (rv)
1070 return rv;
1071
1072 rv = ASUS_LED_REGISTER(pled, dev);
1073 if (rv)
1074 return rv;
1075
1076 led_workqueue = create_singlethread_workqueue("led_workqueue");
1077 if (!led_workqueue)
1078 return -ENOMEM;
1079
1080 return 0;
1081}
1082
1083static int __init asus_laptop_init(void)
1084{
1085 struct device *dev;
1086 int result;
1087
1088 if (acpi_disabled)
1089 return -ENODEV;
1090
1091 if (!acpi_specific_hotkey_enabled) {
1092 printk(ASUS_ERR "Using generic hotkey driver\n");
1093 return -ENODEV;
1094 }
1095
1096 result = acpi_bus_register_driver(&asus_hotk_driver);
1097 if (result < 0)
1098 return result;
1099
1100 /*
1101 * This is a bit of a kludge. We only want this module loaded
1102 * for ASUS systems, but there's currently no way to probe the
1103 * ACPI namespace for ASUS HIDs. So we just return failure if
1104 * we didn't find one, which will cause the module to be
1105 * unloaded.
1106 */
1107 if (!asus_hotk_found) {
1108 acpi_bus_unregister_driver(&asus_hotk_driver);
1109 return -ENODEV;
1110 }
1111
1112 dev = acpi_get_physical_device(hotk->device->handle);
1113
1114 result = asus_backlight_init(dev);
1115 if (result)
1116 goto fail_backlight;
1117
1118 result = asus_led_init(dev);
1119 if (result)
1120 goto fail_led;
1121
1122 /* Register platform stuff */
1123 result = platform_driver_register(&asuspf_driver);
1124 if (result)
1125 goto fail_platform_driver;
1126
1127 asuspf_device = platform_device_alloc(ASUS_HOTK_FILE, -1);
1128 if (!asuspf_device) {
1129 result = -ENOMEM;
1130 goto fail_platform_device1;
1131 }
1132
1133 result = platform_device_add(asuspf_device);
1134 if (result)
1135 goto fail_platform_device2;
1136
1137 result = sysfs_create_group(&asuspf_device->dev.kobj,
1138 &asuspf_attribute_group);
1139 if (result)
1140 goto fail_sysfs;
1141
1142 return 0;
1143
1144 fail_sysfs:
1145 platform_device_del(asuspf_device);
1146
1147 fail_platform_device2:
1148 platform_device_put(asuspf_device);
1149
1150 fail_platform_device1:
1151 platform_driver_unregister(&asuspf_driver);
1152
1153 fail_platform_driver:
1154 asus_led_exit();
1155
1156 fail_led:
1157 asus_backlight_exit();
1158
1159 fail_backlight:
1160
1161 return result;
1162}
1163
1164module_init(asus_laptop_init);
1165module_exit(asus_laptop_exit);
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index 2ab7add78f94..e21e490fedb0 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -11,66 +11,25 @@
11 11
12#include <linux/tifm.h> 12#include <linux/tifm.h>
13#include <linux/dma-mapping.h> 13#include <linux/dma-mapping.h>
14#include <linux/freezer.h>
14 15
15#define DRIVER_NAME "tifm_7xx1" 16#define DRIVER_NAME "tifm_7xx1"
16#define DRIVER_VERSION "0.6" 17#define DRIVER_VERSION "0.7"
17 18
18static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock) 19static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock)
19{ 20{
20 int cnt;
21 unsigned long flags;
22
23 spin_lock_irqsave(&fm->lock, flags);
24 if (!fm->inhibit_new_cards) {
25 for (cnt = 0; cnt < fm->max_sockets; cnt++) {
26 if (fm->sockets[cnt] == sock) {
27 fm->remove_mask |= (1 << cnt);
28 queue_work(fm->wq, &fm->media_remover);
29 break;
30 }
31 }
32 }
33 spin_unlock_irqrestore(&fm->lock, flags);
34}
35
36static void tifm_7xx1_remove_media(struct work_struct *work)
37{
38 struct tifm_adapter *fm =
39 container_of(work, struct tifm_adapter, media_remover);
40 unsigned long flags; 21 unsigned long flags;
41 int cnt;
42 struct tifm_dev *sock;
43 22
44 if (!class_device_get(&fm->cdev))
45 return;
46 spin_lock_irqsave(&fm->lock, flags); 23 spin_lock_irqsave(&fm->lock, flags);
47 for (cnt = 0; cnt < fm->max_sockets; cnt++) { 24 fm->socket_change_set |= 1 << sock->socket_id;
48 if (fm->sockets[cnt] && (fm->remove_mask & (1 << cnt))) { 25 wake_up_all(&fm->change_set_notify);
49 printk(KERN_INFO DRIVER_NAME
50 ": demand removing card from socket %d\n", cnt);
51 sock = fm->sockets[cnt];
52 fm->sockets[cnt] = NULL;
53 fm->remove_mask &= ~(1 << cnt);
54
55 writel(0x0e00, sock->addr + SOCK_CONTROL);
56
57 writel((TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK) << cnt,
58 fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
59 writel((TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK) << cnt,
60 fm->addr + FM_SET_INTERRUPT_ENABLE);
61
62 spin_unlock_irqrestore(&fm->lock, flags);
63 device_unregister(&sock->dev);
64 spin_lock_irqsave(&fm->lock, flags);
65 }
66 }
67 spin_unlock_irqrestore(&fm->lock, flags); 26 spin_unlock_irqrestore(&fm->lock, flags);
68 class_device_put(&fm->cdev);
69} 27}
70 28
71static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id) 29static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id)
72{ 30{
73 struct tifm_adapter *fm = dev_id; 31 struct tifm_adapter *fm = dev_id;
32 struct tifm_dev *sock;
74 unsigned int irq_status; 33 unsigned int irq_status;
75 unsigned int sock_irq_status, cnt; 34 unsigned int sock_irq_status, cnt;
76 35
@@ -84,42 +43,32 @@ static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id)
84 if (irq_status & TIFM_IRQ_ENABLE) { 43 if (irq_status & TIFM_IRQ_ENABLE) {
85 writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); 44 writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
86 45
87 for (cnt = 0; cnt < fm->max_sockets; cnt++) { 46 for (cnt = 0; cnt < fm->num_sockets; cnt++) {
88 sock_irq_status = (irq_status >> cnt) & 47 sock = fm->sockets[cnt];
89 (TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK); 48 sock_irq_status = (irq_status >> cnt)
90 49 & (TIFM_IRQ_FIFOMASK(1)
91 if (fm->sockets[cnt]) { 50 | TIFM_IRQ_CARDMASK(1));
92 if (sock_irq_status &&
93 fm->sockets[cnt]->signal_irq)
94 sock_irq_status = fm->sockets[cnt]->
95 signal_irq(fm->sockets[cnt],
96 sock_irq_status);
97 51
98 if (irq_status & (1 << cnt)) 52 if (sock && sock_irq_status)
99 fm->remove_mask |= 1 << cnt; 53 sock->signal_irq(sock, sock_irq_status);
100 } else {
101 if (irq_status & (1 << cnt))
102 fm->insert_mask |= 1 << cnt;
103 }
104 } 54 }
55
56 fm->socket_change_set |= irq_status
57 & ((1 << fm->num_sockets) - 1);
105 } 58 }
106 writel(irq_status, fm->addr + FM_INTERRUPT_STATUS); 59 writel(irq_status, fm->addr + FM_INTERRUPT_STATUS);
107 60
108 if (!fm->inhibit_new_cards) { 61 if (!fm->socket_change_set)
109 if (!fm->remove_mask && !fm->insert_mask) { 62 writel(TIFM_IRQ_ENABLE, fm->addr + FM_SET_INTERRUPT_ENABLE);
110 writel(TIFM_IRQ_ENABLE, 63 else
111 fm->addr + FM_SET_INTERRUPT_ENABLE); 64 wake_up_all(&fm->change_set_notify);
112 } else {
113 queue_work(fm->wq, &fm->media_remover);
114 queue_work(fm->wq, &fm->media_inserter);
115 }
116 }
117 65
118 spin_unlock(&fm->lock); 66 spin_unlock(&fm->lock);
119 return IRQ_HANDLED; 67 return IRQ_HANDLED;
120} 68}
121 69
122static tifm_media_id tifm_7xx1_toggle_sock_power(char __iomem *sock_addr, int is_x2) 70static tifm_media_id tifm_7xx1_toggle_sock_power(char __iomem *sock_addr,
71 int is_x2)
123{ 72{
124 unsigned int s_state; 73 unsigned int s_state;
125 int cnt; 74 int cnt;
@@ -127,8 +76,8 @@ static tifm_media_id tifm_7xx1_toggle_sock_power(char __iomem *sock_addr, int is
127 writel(0x0e00, sock_addr + SOCK_CONTROL); 76 writel(0x0e00, sock_addr + SOCK_CONTROL);
128 77
129 for (cnt = 0; cnt < 100; cnt++) { 78 for (cnt = 0; cnt < 100; cnt++) {
130 if (!(TIFM_SOCK_STATE_POWERED & 79 if (!(TIFM_SOCK_STATE_POWERED
131 readl(sock_addr + SOCK_PRESENT_STATE))) 80 & readl(sock_addr + SOCK_PRESENT_STATE)))
132 break; 81 break;
133 msleep(10); 82 msleep(10);
134 } 83 }
@@ -151,8 +100,8 @@ static tifm_media_id tifm_7xx1_toggle_sock_power(char __iomem *sock_addr, int is
151 } 100 }
152 101
153 for (cnt = 0; cnt < 100; cnt++) { 102 for (cnt = 0; cnt < 100; cnt++) {
154 if ((TIFM_SOCK_STATE_POWERED & 103 if ((TIFM_SOCK_STATE_POWERED
155 readl(sock_addr + SOCK_PRESENT_STATE))) 104 & readl(sock_addr + SOCK_PRESENT_STATE)))
156 break; 105 break;
157 msleep(10); 106 msleep(10);
158 } 107 }
@@ -170,130 +119,209 @@ tifm_7xx1_sock_addr(char __iomem *base_addr, unsigned int sock_num)
170 return base_addr + ((sock_num + 1) << 10); 119 return base_addr + ((sock_num + 1) << 10);
171} 120}
172 121
173static void tifm_7xx1_insert_media(struct work_struct *work) 122static int tifm_7xx1_switch_media(void *data)
174{ 123{
175 struct tifm_adapter *fm = 124 struct tifm_adapter *fm = data;
176 container_of(work, struct tifm_adapter, media_inserter);
177 unsigned long flags; 125 unsigned long flags;
178 tifm_media_id media_id; 126 tifm_media_id media_id;
179 char *card_name = "xx"; 127 char *card_name = "xx";
180 int cnt, ok_to_register; 128 int cnt, rc;
181 unsigned int insert_mask; 129 struct tifm_dev *sock;
182 struct tifm_dev *new_sock = NULL; 130 unsigned int socket_change_set;
183 131
184 if (!class_device_get(&fm->cdev)) 132 while (1) {
185 return; 133 rc = wait_event_interruptible(fm->change_set_notify,
186 spin_lock_irqsave(&fm->lock, flags); 134 fm->socket_change_set);
187 insert_mask = fm->insert_mask; 135 if (rc == -ERESTARTSYS)
188 fm->insert_mask = 0; 136 try_to_freeze();
189 if (fm->inhibit_new_cards) { 137
138 spin_lock_irqsave(&fm->lock, flags);
139 socket_change_set = fm->socket_change_set;
140 fm->socket_change_set = 0;
141
142 dev_dbg(fm->dev, "checking media set %x\n",
143 socket_change_set);
144
145 if (kthread_should_stop())
146 socket_change_set = (1 << fm->num_sockets) - 1;
190 spin_unlock_irqrestore(&fm->lock, flags); 147 spin_unlock_irqrestore(&fm->lock, flags);
191 class_device_put(&fm->cdev);
192 return;
193 }
194 spin_unlock_irqrestore(&fm->lock, flags);
195 148
196 for (cnt = 0; cnt < fm->max_sockets; cnt++) { 149 if (!socket_change_set)
197 if (!(insert_mask & (1 << cnt)))
198 continue; 150 continue;
199 151
200 media_id = tifm_7xx1_toggle_sock_power(tifm_7xx1_sock_addr(fm->addr, cnt), 152 spin_lock_irqsave(&fm->lock, flags);
201 fm->max_sockets == 2); 153 for (cnt = 0; cnt < fm->num_sockets; cnt++) {
202 if (media_id) { 154 if (!(socket_change_set & (1 << cnt)))
203 ok_to_register = 0; 155 continue;
204 new_sock = tifm_alloc_device(fm, cnt); 156 sock = fm->sockets[cnt];
205 if (new_sock) { 157 if (sock) {
206 new_sock->addr = tifm_7xx1_sock_addr(fm->addr,
207 cnt);
208 new_sock->media_id = media_id;
209 switch (media_id) {
210 case 1:
211 card_name = "xd";
212 break;
213 case 2:
214 card_name = "ms";
215 break;
216 case 3:
217 card_name = "sd";
218 break;
219 default:
220 break;
221 }
222 snprintf(new_sock->dev.bus_id, BUS_ID_SIZE,
223 "tifm_%s%u:%u", card_name, fm->id, cnt);
224 printk(KERN_INFO DRIVER_NAME 158 printk(KERN_INFO DRIVER_NAME
225 ": %s card detected in socket %d\n", 159 ": demand removing card from socket %d\n",
226 card_name, cnt); 160 cnt);
161 fm->sockets[cnt] = NULL;
162 spin_unlock_irqrestore(&fm->lock, flags);
163 device_unregister(&sock->dev);
227 spin_lock_irqsave(&fm->lock, flags); 164 spin_lock_irqsave(&fm->lock, flags);
228 if (!fm->sockets[cnt]) { 165 writel(0x0e00,
229 fm->sockets[cnt] = new_sock; 166 tifm_7xx1_sock_addr(fm->addr, cnt)
230 ok_to_register = 1; 167 + SOCK_CONTROL);
168 }
169 if (kthread_should_stop())
170 continue;
171
172 spin_unlock_irqrestore(&fm->lock, flags);
173 media_id = tifm_7xx1_toggle_sock_power(
174 tifm_7xx1_sock_addr(fm->addr, cnt),
175 fm->num_sockets == 2);
176 if (media_id) {
177 sock = tifm_alloc_device(fm);
178 if (sock) {
179 sock->addr = tifm_7xx1_sock_addr(fm->addr,
180 cnt);
181 sock->media_id = media_id;
182 sock->socket_id = cnt;
183 switch (media_id) {
184 case 1:
185 card_name = "xd";
186 break;
187 case 2:
188 card_name = "ms";
189 break;
190 case 3:
191 card_name = "sd";
192 break;
193 default:
194 tifm_free_device(&sock->dev);
195 spin_lock_irqsave(&fm->lock, flags);
196 continue;
197 }
198 snprintf(sock->dev.bus_id, BUS_ID_SIZE,
199 "tifm_%s%u:%u", card_name,
200 fm->id, cnt);
201 printk(KERN_INFO DRIVER_NAME
202 ": %s card detected in socket %d\n",
203 card_name, cnt);
204 if (!device_register(&sock->dev)) {
205 spin_lock_irqsave(&fm->lock, flags);
206 if (!fm->sockets[cnt]) {
207 fm->sockets[cnt] = sock;
208 sock = NULL;
209 }
210 spin_unlock_irqrestore(&fm->lock, flags);
211 }
212 if (sock)
213 tifm_free_device(&sock->dev);
231 } 214 }
215 spin_lock_irqsave(&fm->lock, flags);
216 }
217 }
218
219 if (!kthread_should_stop()) {
220 writel(TIFM_IRQ_FIFOMASK(socket_change_set)
221 | TIFM_IRQ_CARDMASK(socket_change_set),
222 fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
223 writel(TIFM_IRQ_FIFOMASK(socket_change_set)
224 | TIFM_IRQ_CARDMASK(socket_change_set),
225 fm->addr + FM_SET_INTERRUPT_ENABLE);
226 writel(TIFM_IRQ_ENABLE,
227 fm->addr + FM_SET_INTERRUPT_ENABLE);
228 spin_unlock_irqrestore(&fm->lock, flags);
229 } else {
230 for (cnt = 0; cnt < fm->num_sockets; cnt++) {
231 if (fm->sockets[cnt])
232 fm->socket_change_set |= 1 << cnt;
233 }
234 if (!fm->socket_change_set) {
235 spin_unlock_irqrestore(&fm->lock, flags);
236 return 0;
237 } else {
232 spin_unlock_irqrestore(&fm->lock, flags); 238 spin_unlock_irqrestore(&fm->lock, flags);
233 if (!ok_to_register ||
234 device_register(&new_sock->dev)) {
235 spin_lock_irqsave(&fm->lock, flags);
236 fm->sockets[cnt] = NULL;
237 spin_unlock_irqrestore(&fm->lock,
238 flags);
239 tifm_free_device(&new_sock->dev);
240 }
241 } 239 }
242 } 240 }
243 writel((TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK) << cnt,
244 fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
245 writel((TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK) << cnt,
246 fm->addr + FM_SET_INTERRUPT_ENABLE);
247 } 241 }
248 242 return 0;
249 writel(TIFM_IRQ_ENABLE, fm->addr + FM_SET_INTERRUPT_ENABLE);
250 class_device_put(&fm->cdev);
251} 243}
252 244
245#ifdef CONFIG_PM
246
253static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state) 247static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state)
254{ 248{
255 struct tifm_adapter *fm = pci_get_drvdata(dev); 249 dev_dbg(&dev->dev, "suspending host\n");
256 unsigned long flags;
257 250
258 spin_lock_irqsave(&fm->lock, flags); 251 pci_save_state(dev);
259 fm->inhibit_new_cards = 1; 252 pci_enable_wake(dev, pci_choose_state(dev, state), 0);
260 fm->remove_mask = 0xf; 253 pci_disable_device(dev);
261 fm->insert_mask = 0; 254 pci_set_power_state(dev, pci_choose_state(dev, state));
262 writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
263 spin_unlock_irqrestore(&fm->lock, flags);
264 flush_workqueue(fm->wq);
265
266 tifm_7xx1_remove_media(&fm->media_remover);
267
268 pci_set_power_state(dev, PCI_D3hot);
269 pci_disable_device(dev);
270 pci_save_state(dev);
271 return 0; 255 return 0;
272} 256}
273 257
274static int tifm_7xx1_resume(struct pci_dev *dev) 258static int tifm_7xx1_resume(struct pci_dev *dev)
275{ 259{
276 struct tifm_adapter *fm = pci_get_drvdata(dev); 260 struct tifm_adapter *fm = pci_get_drvdata(dev);
261 int cnt, rc;
277 unsigned long flags; 262 unsigned long flags;
263 tifm_media_id new_ids[fm->num_sockets];
278 264
265 pci_set_power_state(dev, PCI_D0);
279 pci_restore_state(dev); 266 pci_restore_state(dev);
280 pci_enable_device(dev); 267 rc = pci_enable_device(dev);
281 pci_set_power_state(dev, PCI_D0); 268 if (rc)
282 pci_set_master(dev); 269 return rc;
270 pci_set_master(dev);
283 271
272 dev_dbg(&dev->dev, "resuming host\n");
273
274 for (cnt = 0; cnt < fm->num_sockets; cnt++)
275 new_ids[cnt] = tifm_7xx1_toggle_sock_power(
276 tifm_7xx1_sock_addr(fm->addr, cnt),
277 fm->num_sockets == 2);
284 spin_lock_irqsave(&fm->lock, flags); 278 spin_lock_irqsave(&fm->lock, flags);
285 fm->inhibit_new_cards = 0; 279 fm->socket_change_set = 0;
286 writel(TIFM_IRQ_SETALL, fm->addr + FM_INTERRUPT_STATUS); 280 for (cnt = 0; cnt < fm->num_sockets; cnt++) {
287 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); 281 if (fm->sockets[cnt]) {
288 writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SETALLSOCK, 282 if (fm->sockets[cnt]->media_id == new_ids[cnt])
289 fm->addr + FM_SET_INTERRUPT_ENABLE); 283 fm->socket_change_set |= 1 << cnt;
290 fm->insert_mask = 0xf; 284
285 fm->sockets[cnt]->media_id = new_ids[cnt];
286 }
287 }
288
289 writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1),
290 fm->addr + FM_SET_INTERRUPT_ENABLE);
291 if (!fm->socket_change_set) {
292 spin_unlock_irqrestore(&fm->lock, flags);
293 return 0;
294 } else {
295 fm->socket_change_set = 0;
296 spin_unlock_irqrestore(&fm->lock, flags);
297 }
298
299 wait_event_timeout(fm->change_set_notify, fm->socket_change_set, HZ);
300
301 spin_lock_irqsave(&fm->lock, flags);
302 writel(TIFM_IRQ_FIFOMASK(fm->socket_change_set)
303 | TIFM_IRQ_CARDMASK(fm->socket_change_set),
304 fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
305 writel(TIFM_IRQ_FIFOMASK(fm->socket_change_set)
306 | TIFM_IRQ_CARDMASK(fm->socket_change_set),
307 fm->addr + FM_SET_INTERRUPT_ENABLE);
308 writel(TIFM_IRQ_ENABLE,
309 fm->addr + FM_SET_INTERRUPT_ENABLE);
310 fm->socket_change_set = 0;
311
291 spin_unlock_irqrestore(&fm->lock, flags); 312 spin_unlock_irqrestore(&fm->lock, flags);
292 return 0; 313 return 0;
293} 314}
294 315
316#else
317
318#define tifm_7xx1_suspend NULL
319#define tifm_7xx1_resume NULL
320
321#endif /* CONFIG_PM */
322
295static int tifm_7xx1_probe(struct pci_dev *dev, 323static int tifm_7xx1_probe(struct pci_dev *dev,
296 const struct pci_device_id *dev_id) 324 const struct pci_device_id *dev_id)
297{ 325{
298 struct tifm_adapter *fm; 326 struct tifm_adapter *fm;
299 int pci_dev_busy = 0; 327 int pci_dev_busy = 0;
@@ -324,19 +352,18 @@ static int tifm_7xx1_probe(struct pci_dev *dev,
324 } 352 }
325 353
326 fm->dev = &dev->dev; 354 fm->dev = &dev->dev;
327 fm->max_sockets = (dev->device == 0x803B) ? 2 : 4; 355 fm->num_sockets = (dev->device == PCI_DEVICE_ID_TI_XX21_XX11_FM)
328 fm->sockets = kzalloc(sizeof(struct tifm_dev*) * fm->max_sockets, 356 ? 4 : 2;
329 GFP_KERNEL); 357 fm->sockets = kzalloc(sizeof(struct tifm_dev*) * fm->num_sockets,
358 GFP_KERNEL);
330 if (!fm->sockets) 359 if (!fm->sockets)
331 goto err_out_free; 360 goto err_out_free;
332 361
333 INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media);
334 INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media);
335 fm->eject = tifm_7xx1_eject; 362 fm->eject = tifm_7xx1_eject;
336 pci_set_drvdata(dev, fm); 363 pci_set_drvdata(dev, fm);
337 364
338 fm->addr = ioremap(pci_resource_start(dev, 0), 365 fm->addr = ioremap(pci_resource_start(dev, 0),
339 pci_resource_len(dev, 0)); 366 pci_resource_len(dev, 0));
340 if (!fm->addr) 367 if (!fm->addr)
341 goto err_out_free; 368 goto err_out_free;
342 369
@@ -344,16 +371,15 @@ static int tifm_7xx1_probe(struct pci_dev *dev,
344 if (rc) 371 if (rc)
345 goto err_out_unmap; 372 goto err_out_unmap;
346 373
347 rc = tifm_add_adapter(fm); 374 init_waitqueue_head(&fm->change_set_notify);
375 rc = tifm_add_adapter(fm, tifm_7xx1_switch_media);
348 if (rc) 376 if (rc)
349 goto err_out_irq; 377 goto err_out_irq;
350 378
351 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); 379 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
352 writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SETALLSOCK, 380 writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1),
353 fm->addr + FM_SET_INTERRUPT_ENABLE); 381 fm->addr + FM_SET_INTERRUPT_ENABLE);
354 382 wake_up_process(fm->media_switcher);
355 fm->insert_mask = 0xf;
356
357 return 0; 383 return 0;
358 384
359err_out_irq: 385err_out_irq:
@@ -377,19 +403,15 @@ static void tifm_7xx1_remove(struct pci_dev *dev)
377 struct tifm_adapter *fm = pci_get_drvdata(dev); 403 struct tifm_adapter *fm = pci_get_drvdata(dev);
378 unsigned long flags; 404 unsigned long flags;
379 405
406 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
407 mmiowb();
408 free_irq(dev->irq, fm);
409
380 spin_lock_irqsave(&fm->lock, flags); 410 spin_lock_irqsave(&fm->lock, flags);
381 fm->inhibit_new_cards = 1; 411 fm->socket_change_set = (1 << fm->num_sockets) - 1;
382 fm->remove_mask = 0xf;
383 fm->insert_mask = 0;
384 writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
385 spin_unlock_irqrestore(&fm->lock, flags); 412 spin_unlock_irqrestore(&fm->lock, flags);
386 413
387 flush_workqueue(fm->wq); 414 kthread_stop(fm->media_switcher);
388
389 tifm_7xx1_remove_media(&fm->media_remover);
390
391 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
392 free_irq(dev->irq, fm);
393 415
394 tifm_remove_adapter(fm); 416 tifm_remove_adapter(fm);
395 417
@@ -404,10 +426,12 @@ static void tifm_7xx1_remove(struct pci_dev *dev)
404} 426}
405 427
406static struct pci_device_id tifm_7xx1_pci_tbl [] = { 428static struct pci_device_id tifm_7xx1_pci_tbl [] = {
407 { PCI_VENDOR_ID_TI, 0x8033, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 429 { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX21_XX11_FM, PCI_ANY_ID,
408 0 }, /* xx21 - the one I have */ 430 PCI_ANY_ID, 0, 0, 0 }, /* xx21 - the one I have */
409 { PCI_VENDOR_ID_TI, 0x803B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 431 { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX12_FM, PCI_ANY_ID,
410 0 }, /* xx12 - should be also supported */ 432 PCI_ANY_ID, 0, 0, 0 },
433 { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX20_FM, PCI_ANY_ID,
434 PCI_ANY_ID, 0, 0, 0 },
411 { } 435 { }
412}; 436};
413 437
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index d61df5c3ac36..6b10ebe9d936 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -14,7 +14,7 @@
14#include <linux/idr.h> 14#include <linux/idr.h>
15 15
16#define DRIVER_NAME "tifm_core" 16#define DRIVER_NAME "tifm_core"
17#define DRIVER_VERSION "0.6" 17#define DRIVER_VERSION "0.7"
18 18
19static DEFINE_IDR(tifm_adapter_idr); 19static DEFINE_IDR(tifm_adapter_idr);
20static DEFINE_SPINLOCK(tifm_adapter_lock); 20static DEFINE_SPINLOCK(tifm_adapter_lock);
@@ -60,10 +60,41 @@ static int tifm_uevent(struct device *dev, char **envp, int num_envp,
60 return 0; 60 return 0;
61} 61}
62 62
63#ifdef CONFIG_PM
64
65static int tifm_device_suspend(struct device *dev, pm_message_t state)
66{
67 struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev);
68 struct tifm_driver *drv = fm_dev->drv;
69
70 if (drv && drv->suspend)
71 return drv->suspend(fm_dev, state);
72 return 0;
73}
74
75static int tifm_device_resume(struct device *dev)
76{
77 struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev);
78 struct tifm_driver *drv = fm_dev->drv;
79
80 if (drv && drv->resume)
81 return drv->resume(fm_dev);
82 return 0;
83}
84
85#else
86
87#define tifm_device_suspend NULL
88#define tifm_device_resume NULL
89
90#endif /* CONFIG_PM */
91
63static struct bus_type tifm_bus_type = { 92static struct bus_type tifm_bus_type = {
64 .name = "tifm", 93 .name = "tifm",
65 .match = tifm_match, 94 .match = tifm_match,
66 .uevent = tifm_uevent, 95 .uevent = tifm_uevent,
96 .suspend = tifm_device_suspend,
97 .resume = tifm_device_resume
67}; 98};
68 99
69static void tifm_free(struct class_device *cdev) 100static void tifm_free(struct class_device *cdev)
@@ -71,8 +102,6 @@ static void tifm_free(struct class_device *cdev)
71 struct tifm_adapter *fm = container_of(cdev, struct tifm_adapter, cdev); 102 struct tifm_adapter *fm = container_of(cdev, struct tifm_adapter, cdev);
72 103
73 kfree(fm->sockets); 104 kfree(fm->sockets);
74 if (fm->wq)
75 destroy_workqueue(fm->wq);
76 kfree(fm); 105 kfree(fm);
77} 106}
78 107
@@ -101,7 +130,8 @@ void tifm_free_adapter(struct tifm_adapter *fm)
101} 130}
102EXPORT_SYMBOL(tifm_free_adapter); 131EXPORT_SYMBOL(tifm_free_adapter);
103 132
104int tifm_add_adapter(struct tifm_adapter *fm) 133int tifm_add_adapter(struct tifm_adapter *fm,
134 int (*mediathreadfn)(void *data))
105{ 135{
106 int rc; 136 int rc;
107 137
@@ -113,10 +143,10 @@ int tifm_add_adapter(struct tifm_adapter *fm)
113 spin_unlock(&tifm_adapter_lock); 143 spin_unlock(&tifm_adapter_lock);
114 if (!rc) { 144 if (!rc) {
115 snprintf(fm->cdev.class_id, BUS_ID_SIZE, "tifm%u", fm->id); 145 snprintf(fm->cdev.class_id, BUS_ID_SIZE, "tifm%u", fm->id);
116 strncpy(fm->wq_name, fm->cdev.class_id, KOBJ_NAME_LEN); 146 fm->media_switcher = kthread_create(mediathreadfn,
147 fm, "tifm/%u", fm->id);
117 148
118 fm->wq = create_singlethread_workqueue(fm->wq_name); 149 if (!IS_ERR(fm->media_switcher))
119 if (fm->wq)
120 return class_device_add(&fm->cdev); 150 return class_device_add(&fm->cdev);
121 151
122 spin_lock(&tifm_adapter_lock); 152 spin_lock(&tifm_adapter_lock);
@@ -141,27 +171,27 @@ EXPORT_SYMBOL(tifm_remove_adapter);
141void tifm_free_device(struct device *dev) 171void tifm_free_device(struct device *dev)
142{ 172{
143 struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev); 173 struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev);
144 if (fm_dev->wq)
145 destroy_workqueue(fm_dev->wq);
146 kfree(fm_dev); 174 kfree(fm_dev);
147} 175}
148EXPORT_SYMBOL(tifm_free_device); 176EXPORT_SYMBOL(tifm_free_device);
149 177
150struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm, unsigned int id) 178static void tifm_dummy_signal_irq(struct tifm_dev *sock,
179 unsigned int sock_irq_status)
180{
181 return;
182}
183
184struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm)
151{ 185{
152 struct tifm_dev *dev = kzalloc(sizeof(struct tifm_dev), GFP_KERNEL); 186 struct tifm_dev *dev = kzalloc(sizeof(struct tifm_dev), GFP_KERNEL);
153 187
154 if (dev) { 188 if (dev) {
155 spin_lock_init(&dev->lock); 189 spin_lock_init(&dev->lock);
156 snprintf(dev->wq_name, KOBJ_NAME_LEN, "tifm%u:%u", fm->id, id); 190
157 dev->wq = create_singlethread_workqueue(dev->wq_name);
158 if (!dev->wq) {
159 kfree(dev);
160 return NULL;
161 }
162 dev->dev.parent = fm->dev; 191 dev->dev.parent = fm->dev;
163 dev->dev.bus = &tifm_bus_type; 192 dev->dev.bus = &tifm_bus_type;
164 dev->dev.release = tifm_free_device; 193 dev->dev.release = tifm_free_device;
194 dev->signal_irq = tifm_dummy_signal_irq;
165 } 195 }
166 return dev; 196 return dev;
167} 197}
@@ -219,6 +249,7 @@ static int tifm_device_remove(struct device *dev)
219 struct tifm_driver *drv = fm_dev->drv; 249 struct tifm_driver *drv = fm_dev->drv;
220 250
221 if (drv) { 251 if (drv) {
252 fm_dev->signal_irq = tifm_dummy_signal_irq;
222 if (drv->remove) 253 if (drv->remove)
223 drv->remove(fm_dev); 254 drv->remove(fm_dev);
224 fm_dev->drv = NULL; 255 fm_dev->drv = NULL;
@@ -233,6 +264,8 @@ int tifm_register_driver(struct tifm_driver *drv)
233 drv->driver.bus = &tifm_bus_type; 264 drv->driver.bus = &tifm_bus_type;
234 drv->driver.probe = tifm_device_probe; 265 drv->driver.probe = tifm_device_probe;
235 drv->driver.remove = tifm_device_remove; 266 drv->driver.remove = tifm_device_remove;
267 drv->driver.suspend = tifm_device_suspend;
268 drv->driver.resume = tifm_device_resume;
236 269
237 return driver_register(&drv->driver); 270 return driver_register(&drv->driver);
238} 271}
diff --git a/drivers/mmc/at91_mci.c b/drivers/mmc/at91_mci.c
index aa152f31851e..2ce50f38e3c7 100644
--- a/drivers/mmc/at91_mci.c
+++ b/drivers/mmc/at91_mci.c
@@ -823,6 +823,9 @@ static int __init at91_mci_probe(struct platform_device *pdev)
823 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 823 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
824 mmc->caps = MMC_CAP_BYTEBLOCK; 824 mmc->caps = MMC_CAP_BYTEBLOCK;
825 825
826 mmc->max_blk_size = 4095;
827 mmc->max_blk_count = mmc->max_req_size;
828
826 host = mmc_priv(mmc); 829 host = mmc_priv(mmc);
827 host->mmc = mmc; 830 host->mmc = mmc;
828 host->buffer = NULL; 831 host->buffer = NULL;
diff --git a/drivers/mmc/au1xmmc.c b/drivers/mmc/au1xmmc.c
index 800527cf40d5..b834be261ab7 100644
--- a/drivers/mmc/au1xmmc.c
+++ b/drivers/mmc/au1xmmc.c
@@ -152,8 +152,9 @@ static inline int au1xmmc_card_inserted(struct au1xmmc_host *host)
152 ? 1 : 0; 152 ? 1 : 0;
153} 153}
154 154
155static inline int au1xmmc_card_readonly(struct au1xmmc_host *host) 155static int au1xmmc_card_readonly(struct mmc_host *mmc)
156{ 156{
157 struct au1xmmc_host *host = mmc_priv(mmc);
157 return (bcsr->status & au1xmmc_card_table[host->id].wpstatus) 158 return (bcsr->status & au1xmmc_card_table[host->id].wpstatus)
158 ? 1 : 0; 159 ? 1 : 0;
159} 160}
@@ -193,6 +194,8 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
193 u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT); 194 u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT);
194 195
195 switch (mmc_resp_type(cmd)) { 196 switch (mmc_resp_type(cmd)) {
197 case MMC_RSP_NONE:
198 break;
196 case MMC_RSP_R1: 199 case MMC_RSP_R1:
197 mmccmd |= SD_CMD_RT_1; 200 mmccmd |= SD_CMD_RT_1;
198 break; 201 break;
@@ -205,6 +208,10 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
205 case MMC_RSP_R3: 208 case MMC_RSP_R3:
206 mmccmd |= SD_CMD_RT_3; 209 mmccmd |= SD_CMD_RT_3;
207 break; 210 break;
211 default:
212 printk(KERN_INFO "au1xmmc: unhandled response type %02x\n",
213 mmc_resp_type(cmd));
214 return MMC_ERR_INVALID;
208 } 215 }
209 216
210 switch(cmd->opcode) { 217 switch(cmd->opcode) {
@@ -878,6 +885,7 @@ static void au1xmmc_init_dma(struct au1xmmc_host *host)
878static const struct mmc_host_ops au1xmmc_ops = { 885static const struct mmc_host_ops au1xmmc_ops = {
879 .request = au1xmmc_request, 886 .request = au1xmmc_request,
880 .set_ios = au1xmmc_set_ios, 887 .set_ios = au1xmmc_set_ios,
888 .get_ro = au1xmmc_card_readonly,
881}; 889};
882 890
883static int __devinit au1xmmc_probe(struct platform_device *pdev) 891static int __devinit au1xmmc_probe(struct platform_device *pdev)
@@ -914,6 +922,9 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
914 mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE; 922 mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE;
915 mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT; 923 mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT;
916 924
925 mmc->max_blk_size = 2048;
926 mmc->max_blk_count = 512;
927
917 mmc->ocr_avail = AU1XMMC_OCR; 928 mmc->ocr_avail = AU1XMMC_OCR;
918 929
919 host = mmc_priv(mmc); 930 host = mmc_priv(mmc);
diff --git a/drivers/mmc/imxmmc.c b/drivers/mmc/imxmmc.c
index bfb9ff693208..b060d4bfba29 100644
--- a/drivers/mmc/imxmmc.c
+++ b/drivers/mmc/imxmmc.c
@@ -958,8 +958,10 @@ static int imxmci_probe(struct platform_device *pdev)
958 /* MMC core transfer sizes tunable parameters */ 958 /* MMC core transfer sizes tunable parameters */
959 mmc->max_hw_segs = 64; 959 mmc->max_hw_segs = 64;
960 mmc->max_phys_segs = 64; 960 mmc->max_phys_segs = 64;
961 mmc->max_sectors = 64; /* default 1 << (PAGE_CACHE_SHIFT - 9) */
962 mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */ 961 mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */
962 mmc->max_req_size = 64*512; /* default PAGE_CACHE_SIZE */
963 mmc->max_blk_size = 2048;
964 mmc->max_blk_count = 65535;
963 965
964 host = mmc_priv(mmc); 966 host = mmc_priv(mmc);
965 host->mmc = mmc; 967 host->mmc = mmc;
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 6f2a282e2b97..5046a1661342 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -103,11 +103,16 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
103 mmc_hostname(host), mrq->cmd->opcode, 103 mmc_hostname(host), mrq->cmd->opcode,
104 mrq->cmd->arg, mrq->cmd->flags); 104 mrq->cmd->arg, mrq->cmd->flags);
105 105
106 WARN_ON(host->card_busy == NULL); 106 WARN_ON(!host->claimed);
107 107
108 mrq->cmd->error = 0; 108 mrq->cmd->error = 0;
109 mrq->cmd->mrq = mrq; 109 mrq->cmd->mrq = mrq;
110 if (mrq->data) { 110 if (mrq->data) {
111 BUG_ON(mrq->data->blksz > host->max_blk_size);
112 BUG_ON(mrq->data->blocks > host->max_blk_count);
113 BUG_ON(mrq->data->blocks * mrq->data->blksz >
114 host->max_req_size);
115
111 mrq->cmd->data = mrq->data; 116 mrq->cmd->data = mrq->data;
112 mrq->data->error = 0; 117 mrq->data->error = 0;
113 mrq->data->mrq = mrq; 118 mrq->data->mrq = mrq;
@@ -157,7 +162,7 @@ int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries
157{ 162{
158 struct mmc_request mrq; 163 struct mmc_request mrq;
159 164
160 BUG_ON(host->card_busy == NULL); 165 BUG_ON(!host->claimed);
161 166
162 memset(&mrq, 0, sizeof(struct mmc_request)); 167 memset(&mrq, 0, sizeof(struct mmc_request));
163 168
@@ -195,7 +200,7 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, unsigned int rca,
195 200
196 int i, err; 201 int i, err;
197 202
198 BUG_ON(host->card_busy == NULL); 203 BUG_ON(!host->claimed);
199 BUG_ON(retries < 0); 204 BUG_ON(retries < 0);
200 205
201 err = MMC_ERR_INVALID; 206 err = MMC_ERR_INVALID;
@@ -289,7 +294,10 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card,
289 else 294 else
290 limit_us = 100000; 295 limit_us = 100000;
291 296
292 if (timeout_us > limit_us) { 297 /*
298 * SDHC cards always use these fixed values.
299 */
300 if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
293 data->timeout_ns = limit_us * 1000; 301 data->timeout_ns = limit_us * 1000;
294 data->timeout_clks = 0; 302 data->timeout_clks = 0;
295 } 303 }
@@ -320,14 +328,14 @@ int __mmc_claim_host(struct mmc_host *host, struct mmc_card *card)
320 spin_lock_irqsave(&host->lock, flags); 328 spin_lock_irqsave(&host->lock, flags);
321 while (1) { 329 while (1) {
322 set_current_state(TASK_UNINTERRUPTIBLE); 330 set_current_state(TASK_UNINTERRUPTIBLE);
323 if (host->card_busy == NULL) 331 if (!host->claimed)
324 break; 332 break;
325 spin_unlock_irqrestore(&host->lock, flags); 333 spin_unlock_irqrestore(&host->lock, flags);
326 schedule(); 334 schedule();
327 spin_lock_irqsave(&host->lock, flags); 335 spin_lock_irqsave(&host->lock, flags);
328 } 336 }
329 set_current_state(TASK_RUNNING); 337 set_current_state(TASK_RUNNING);
330 host->card_busy = card; 338 host->claimed = 1;
331 spin_unlock_irqrestore(&host->lock, flags); 339 spin_unlock_irqrestore(&host->lock, flags);
332 remove_wait_queue(&host->wq, &wait); 340 remove_wait_queue(&host->wq, &wait);
333 341
@@ -353,10 +361,10 @@ void mmc_release_host(struct mmc_host *host)
353{ 361{
354 unsigned long flags; 362 unsigned long flags;
355 363
356 BUG_ON(host->card_busy == NULL); 364 BUG_ON(!host->claimed);
357 365
358 spin_lock_irqsave(&host->lock, flags); 366 spin_lock_irqsave(&host->lock, flags);
359 host->card_busy = NULL; 367 host->claimed = 0;
360 spin_unlock_irqrestore(&host->lock, flags); 368 spin_unlock_irqrestore(&host->lock, flags);
361 369
362 wake_up(&host->wq); 370 wake_up(&host->wq);
@@ -372,7 +380,7 @@ static inline void mmc_set_ios(struct mmc_host *host)
372 mmc_hostname(host), ios->clock, ios->bus_mode, 380 mmc_hostname(host), ios->clock, ios->bus_mode,
373 ios->power_mode, ios->chip_select, ios->vdd, 381 ios->power_mode, ios->chip_select, ios->vdd,
374 ios->bus_width); 382 ios->bus_width);
375 383
376 host->ops->set_ios(host, ios); 384 host->ops->set_ios(host, ios);
377} 385}
378 386
@@ -381,7 +389,7 @@ static int mmc_select_card(struct mmc_host *host, struct mmc_card *card)
381 int err; 389 int err;
382 struct mmc_command cmd; 390 struct mmc_command cmd;
383 391
384 BUG_ON(host->card_busy == NULL); 392 BUG_ON(!host->claimed);
385 393
386 if (host->card_selected == card) 394 if (host->card_selected == card)
387 return MMC_ERR_NONE; 395 return MMC_ERR_NONE;
@@ -588,34 +596,65 @@ static void mmc_decode_csd(struct mmc_card *card)
588 596
589 if (mmc_card_sd(card)) { 597 if (mmc_card_sd(card)) {
590 csd_struct = UNSTUFF_BITS(resp, 126, 2); 598 csd_struct = UNSTUFF_BITS(resp, 126, 2);
591 if (csd_struct != 0) { 599
600 switch (csd_struct) {
601 case 0:
602 m = UNSTUFF_BITS(resp, 115, 4);
603 e = UNSTUFF_BITS(resp, 112, 3);
604 csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
605 csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
606
607 m = UNSTUFF_BITS(resp, 99, 4);
608 e = UNSTUFF_BITS(resp, 96, 3);
609 csd->max_dtr = tran_exp[e] * tran_mant[m];
610 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
611
612 e = UNSTUFF_BITS(resp, 47, 3);
613 m = UNSTUFF_BITS(resp, 62, 12);
614 csd->capacity = (1 + m) << (e + 2);
615
616 csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
617 csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
618 csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
619 csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
620 csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
621 csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
622 csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
623 break;
624 case 1:
625 /*
626 * This is a block-addressed SDHC card. Most
627 * interesting fields are unused and have fixed
628 * values. To avoid getting tripped by buggy cards,
629 * we assume those fixed values ourselves.
630 */
631 mmc_card_set_blockaddr(card);
632
633 csd->tacc_ns = 0; /* Unused */
634 csd->tacc_clks = 0; /* Unused */
635
636 m = UNSTUFF_BITS(resp, 99, 4);
637 e = UNSTUFF_BITS(resp, 96, 3);
638 csd->max_dtr = tran_exp[e] * tran_mant[m];
639 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
640
641 m = UNSTUFF_BITS(resp, 48, 22);
642 csd->capacity = (1 + m) << 10;
643
644 csd->read_blkbits = 9;
645 csd->read_partial = 0;
646 csd->write_misalign = 0;
647 csd->read_misalign = 0;
648 csd->r2w_factor = 4; /* Unused */
649 csd->write_blkbits = 9;
650 csd->write_partial = 0;
651 break;
652 default:
592 printk("%s: unrecognised CSD structure version %d\n", 653 printk("%s: unrecognised CSD structure version %d\n",
593 mmc_hostname(card->host), csd_struct); 654 mmc_hostname(card->host), csd_struct);
594 mmc_card_set_bad(card); 655 mmc_card_set_bad(card);
595 return; 656 return;
596 } 657 }
597
598 m = UNSTUFF_BITS(resp, 115, 4);
599 e = UNSTUFF_BITS(resp, 112, 3);
600 csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
601 csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
602
603 m = UNSTUFF_BITS(resp, 99, 4);
604 e = UNSTUFF_BITS(resp, 96, 3);
605 csd->max_dtr = tran_exp[e] * tran_mant[m];
606 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
607
608 e = UNSTUFF_BITS(resp, 47, 3);
609 m = UNSTUFF_BITS(resp, 62, 12);
610 csd->capacity = (1 + m) << (e + 2);
611
612 csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
613 csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
614 csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
615 csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
616 csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
617 csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
618 csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
619 } else { 658 } else {
620 /* 659 /*
621 * We only understand CSD structure v1.1 and v1.2. 660 * We only understand CSD structure v1.1 and v1.2.
@@ -848,6 +887,41 @@ static int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
848 return err; 887 return err;
849} 888}
850 889
890static int mmc_send_if_cond(struct mmc_host *host, u32 ocr, int *rsd2)
891{
892 struct mmc_command cmd;
893 int err, sd2;
894 static const u8 test_pattern = 0xAA;
895
896 /*
897 * To support SD 2.0 cards, we must always invoke SD_SEND_IF_COND
898 * before SD_APP_OP_COND. This command will harmlessly fail for
899 * SD 1.0 cards.
900 */
901 cmd.opcode = SD_SEND_IF_COND;
902 cmd.arg = ((ocr & 0xFF8000) != 0) << 8 | test_pattern;
903 cmd.flags = MMC_RSP_R7 | MMC_CMD_BCR;
904
905 err = mmc_wait_for_cmd(host, &cmd, 0);
906 if (err == MMC_ERR_NONE) {
907 if ((cmd.resp[0] & 0xFF) == test_pattern) {
908 sd2 = 1;
909 } else {
910 sd2 = 0;
911 err = MMC_ERR_FAILED;
912 }
913 } else {
914 /*
915 * Treat errors as SD 1.0 card.
916 */
917 sd2 = 0;
918 err = MMC_ERR_NONE;
919 }
920 if (rsd2)
921 *rsd2 = sd2;
922 return err;
923}
924
851/* 925/*
852 * Discover cards by requesting their CID. If this command 926 * Discover cards by requesting their CID. If this command
853 * times out, it is not an error; there are no further cards 927 * times out, it is not an error; there are no further cards
@@ -1018,7 +1092,8 @@ static void mmc_process_ext_csds(struct mmc_host *host)
1018 mmc_wait_for_req(host, &mrq); 1092 mmc_wait_for_req(host, &mrq);
1019 1093
1020 if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { 1094 if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) {
1021 mmc_card_set_dead(card); 1095 printk("%s: unable to read EXT_CSD, performance "
1096 "might suffer.\n", mmc_hostname(card->host));
1022 continue; 1097 continue;
1023 } 1098 }
1024 1099
@@ -1034,7 +1109,6 @@ static void mmc_process_ext_csds(struct mmc_host *host)
1034 printk("%s: card is mmc v4 but doesn't support " 1109 printk("%s: card is mmc v4 but doesn't support "
1035 "any high-speed modes.\n", 1110 "any high-speed modes.\n",
1036 mmc_hostname(card->host)); 1111 mmc_hostname(card->host));
1037 mmc_card_set_bad(card);
1038 continue; 1112 continue;
1039 } 1113 }
1040 1114
@@ -1215,7 +1289,9 @@ static void mmc_read_switch_caps(struct mmc_host *host)
1215 mmc_wait_for_req(host, &mrq); 1289 mmc_wait_for_req(host, &mrq);
1216 1290
1217 if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { 1291 if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) {
1218 mmc_card_set_dead(card); 1292 printk("%s: unable to read switch capabilities, "
1293 "performance might suffer.\n",
1294 mmc_hostname(card->host));
1219 continue; 1295 continue;
1220 } 1296 }
1221 1297
@@ -1247,12 +1323,8 @@ static void mmc_read_switch_caps(struct mmc_host *host)
1247 1323
1248 mmc_wait_for_req(host, &mrq); 1324 mmc_wait_for_req(host, &mrq);
1249 1325
1250 if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { 1326 if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE ||
1251 mmc_card_set_dead(card); 1327 (status[16] & 0xF) != 1) {
1252 continue;
1253 }
1254
1255 if ((status[16] & 0xF) != 1) {
1256 printk(KERN_WARNING "%s: Problem switching card " 1328 printk(KERN_WARNING "%s: Problem switching card "
1257 "into high-speed mode!\n", 1329 "into high-speed mode!\n",
1258 mmc_hostname(host)); 1330 mmc_hostname(host));
@@ -1334,6 +1406,10 @@ static void mmc_setup(struct mmc_host *host)
1334 mmc_power_up(host); 1406 mmc_power_up(host);
1335 mmc_idle_cards(host); 1407 mmc_idle_cards(host);
1336 1408
1409 err = mmc_send_if_cond(host, host->ocr_avail, NULL);
1410 if (err != MMC_ERR_NONE) {
1411 return;
1412 }
1337 err = mmc_send_app_op_cond(host, 0, &ocr); 1413 err = mmc_send_app_op_cond(host, 0, &ocr);
1338 1414
1339 /* 1415 /*
@@ -1386,10 +1462,21 @@ static void mmc_setup(struct mmc_host *host)
1386 * all get the idea that they should be ready for CMD2. 1462 * all get the idea that they should be ready for CMD2.
1387 * (My SanDisk card seems to need this.) 1463 * (My SanDisk card seems to need this.)
1388 */ 1464 */
1389 if (host->mode == MMC_MODE_SD) 1465 if (host->mode == MMC_MODE_SD) {
1390 mmc_send_app_op_cond(host, host->ocr, NULL); 1466 int err, sd2;
1391 else 1467 err = mmc_send_if_cond(host, host->ocr, &sd2);
1468 if (err == MMC_ERR_NONE) {
1469 /*
1470 * If SD_SEND_IF_COND indicates an SD 2.0
1471 * compliant card and we should set bit 30
1472 * of the ocr to indicate that we can handle
1473 * block-addressed SDHC cards.
1474 */
1475 mmc_send_app_op_cond(host, host->ocr | (sd2 << 30), NULL);
1476 }
1477 } else {
1392 mmc_send_op_cond(host, host->ocr, NULL); 1478 mmc_send_op_cond(host, host->ocr, NULL);
1479 }
1393 1480
1394 mmc_discover_cards(host); 1481 mmc_discover_cards(host);
1395 1482
@@ -1519,8 +1606,11 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
1519 */ 1606 */
1520 host->max_hw_segs = 1; 1607 host->max_hw_segs = 1;
1521 host->max_phys_segs = 1; 1608 host->max_phys_segs = 1;
1522 host->max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
1523 host->max_seg_size = PAGE_CACHE_SIZE; 1609 host->max_seg_size = PAGE_CACHE_SIZE;
1610
1611 host->max_req_size = PAGE_CACHE_SIZE;
1612 host->max_blk_size = 512;
1613 host->max_blk_count = PAGE_CACHE_SIZE / 512;
1524 } 1614 }
1525 1615
1526 return host; 1616 return host;
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index 87713572293f..05ba8ace70e7 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -237,13 +237,17 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
237 brq.mrq.cmd = &brq.cmd; 237 brq.mrq.cmd = &brq.cmd;
238 brq.mrq.data = &brq.data; 238 brq.mrq.data = &brq.data;
239 239
240 brq.cmd.arg = req->sector << 9; 240 brq.cmd.arg = req->sector;
241 if (!mmc_card_blockaddr(card))
242 brq.cmd.arg <<= 9;
241 brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 243 brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
242 brq.data.blksz = 1 << md->block_bits; 244 brq.data.blksz = 1 << md->block_bits;
243 brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
244 brq.stop.opcode = MMC_STOP_TRANSMISSION; 245 brq.stop.opcode = MMC_STOP_TRANSMISSION;
245 brq.stop.arg = 0; 246 brq.stop.arg = 0;
246 brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC; 247 brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
248 brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
249 if (brq.data.blocks > card->host->max_blk_count)
250 brq.data.blocks = card->host->max_blk_count;
247 251
248 mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ); 252 mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ);
249 253
@@ -375,9 +379,10 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
375 spin_unlock_irq(&md->lock); 379 spin_unlock_irq(&md->lock);
376 } 380 }
377 381
382flush_queue:
383
378 mmc_card_release_host(card); 384 mmc_card_release_host(card);
379 385
380flush_queue:
381 spin_lock_irq(&md->lock); 386 spin_lock_irq(&md->lock);
382 while (ret) { 387 while (ret) {
383 ret = end_that_request_chunk(req, 0, 388 ret = end_that_request_chunk(req, 0,
@@ -494,6 +499,10 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
494 struct mmc_command cmd; 499 struct mmc_command cmd;
495 int err; 500 int err;
496 501
502 /* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
503 if (mmc_card_blockaddr(card))
504 return 0;
505
497 mmc_card_claim_host(card); 506 mmc_card_claim_host(card);
498 cmd.opcode = MMC_SET_BLOCKLEN; 507 cmd.opcode = MMC_SET_BLOCKLEN;
499 cmd.arg = 1 << md->block_bits; 508 cmd.arg = 1 << md->block_bits;
diff --git a/drivers/mmc/mmc_queue.c b/drivers/mmc/mmc_queue.c
index 3e35a43819fb..c27e42645cdb 100644
--- a/drivers/mmc/mmc_queue.c
+++ b/drivers/mmc/mmc_queue.c
@@ -147,7 +147,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
147 147
148 blk_queue_prep_rq(mq->queue, mmc_prep_request); 148 blk_queue_prep_rq(mq->queue, mmc_prep_request);
149 blk_queue_bounce_limit(mq->queue, limit); 149 blk_queue_bounce_limit(mq->queue, limit);
150 blk_queue_max_sectors(mq->queue, host->max_sectors); 150 blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
151 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); 151 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
152 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); 152 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
153 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 153 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
diff --git a/drivers/mmc/mmc_sysfs.c b/drivers/mmc/mmc_sysfs.c
index e334acd045bc..d32698b02d7f 100644
--- a/drivers/mmc/mmc_sysfs.c
+++ b/drivers/mmc/mmc_sysfs.c
@@ -199,7 +199,7 @@ void mmc_init_card(struct mmc_card *card, struct mmc_host *host)
199 memset(card, 0, sizeof(struct mmc_card)); 199 memset(card, 0, sizeof(struct mmc_card));
200 card->host = host; 200 card->host = host;
201 device_initialize(&card->dev); 201 device_initialize(&card->dev);
202 card->dev.parent = mmc_dev(host); 202 card->dev.parent = mmc_classdev(host);
203 card->dev.bus = &mmc_bus_type; 203 card->dev.bus = &mmc_bus_type;
204 card->dev.release = mmc_release_card; 204 card->dev.release = mmc_release_card;
205} 205}
diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c
index ccfe6561be24..5941dd951e82 100644
--- a/drivers/mmc/mmci.c
+++ b/drivers/mmc/mmci.c
@@ -524,15 +524,24 @@ static int mmci_probe(struct amba_device *dev, void *id)
524 /* 524 /*
525 * Since we only have a 16-bit data length register, we must 525 * Since we only have a 16-bit data length register, we must
526 * ensure that we don't exceed 2^16-1 bytes in a single request. 526 * ensure that we don't exceed 2^16-1 bytes in a single request.
527 * Choose 64 (512-byte) sectors as the limit.
528 */ 527 */
529 mmc->max_sectors = 64; 528 mmc->max_req_size = 65535;
530 529
531 /* 530 /*
532 * Set the maximum segment size. Since we aren't doing DMA 531 * Set the maximum segment size. Since we aren't doing DMA
533 * (yet) we are only limited by the data length register. 532 * (yet) we are only limited by the data length register.
534 */ 533 */
535 mmc->max_seg_size = mmc->max_sectors << 9; 534 mmc->max_seg_size = mmc->max_req_size;
535
536 /*
537 * Block size can be up to 2048 bytes, but must be a power of two.
538 */
539 mmc->max_blk_size = 2048;
540
541 /*
542 * No limit on the number of blocks transferred.
543 */
544 mmc->max_blk_count = mmc->max_req_size;
536 545
537 spin_lock_init(&host->lock); 546 spin_lock_init(&host->lock);
538 547
diff --git a/drivers/mmc/omap.c b/drivers/mmc/omap.c
index d30540b27614..1e96a2f65022 100644
--- a/drivers/mmc/omap.c
+++ b/drivers/mmc/omap.c
@@ -1099,8 +1099,10 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1099 */ 1099 */
1100 mmc->max_phys_segs = 32; 1100 mmc->max_phys_segs = 32;
1101 mmc->max_hw_segs = 32; 1101 mmc->max_hw_segs = 32;
1102 mmc->max_sectors = 256; /* NBLK max 11-bits, OMAP also limited by DMA */ 1102 mmc->max_blk_size = 2048; /* BLEN is 11 bits (+1) */
1103 mmc->max_seg_size = mmc->max_sectors * 512; 1103 mmc->max_blk_count = 2048; /* NBLK is 11 bits (+1) */
1104 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1105 mmc->max_seg_size = mmc->max_req_size;
1104 1106
1105 if (host->power_pin >= 0) { 1107 if (host->power_pin >= 0) {
1106 if ((ret = omap_request_gpio(host->power_pin)) != 0) { 1108 if ((ret = omap_request_gpio(host->power_pin)) != 0) {
diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c
index 6073d998b11f..9774fc68b61a 100644
--- a/drivers/mmc/pxamci.c
+++ b/drivers/mmc/pxamci.c
@@ -450,6 +450,16 @@ static int pxamci_probe(struct platform_device *pdev)
450 */ 450 */
451 mmc->max_seg_size = PAGE_SIZE; 451 mmc->max_seg_size = PAGE_SIZE;
452 452
453 /*
454 * Block length register is 10 bits.
455 */
456 mmc->max_blk_size = 1023;
457
458 /*
459 * Block count register is 16 bits.
460 */
461 mmc->max_blk_count = 65535;
462
453 host = mmc_priv(mmc); 463 host = mmc_priv(mmc);
454 host->mmc = mmc; 464 host->mmc = mmc;
455 host->dma = -1; 465 host->dma = -1;
diff --git a/drivers/mmc/sdhci.c b/drivers/mmc/sdhci.c
index c2d13d7e9911..4bf1fea5e2c4 100644
--- a/drivers/mmc/sdhci.c
+++ b/drivers/mmc/sdhci.c
@@ -37,6 +37,7 @@ static unsigned int debug_quirks = 0;
37#define SDHCI_QUIRK_FORCE_DMA (1<<1) 37#define SDHCI_QUIRK_FORCE_DMA (1<<1)
38/* Controller doesn't like some resets when there is no card inserted. */ 38/* Controller doesn't like some resets when there is no card inserted. */
39#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2) 39#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
40#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
40 41
41static const struct pci_device_id pci_ids[] __devinitdata = { 42static const struct pci_device_id pci_ids[] __devinitdata = {
42 { 43 {
@@ -65,6 +66,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
65 .driver_data = SDHCI_QUIRK_FORCE_DMA, 66 .driver_data = SDHCI_QUIRK_FORCE_DMA,
66 }, 67 },
67 68
69 {
70 .vendor = PCI_VENDOR_ID_ENE,
71 .device = PCI_DEVICE_ID_ENE_CB712_SD,
72 .subvendor = PCI_ANY_ID,
73 .subdevice = PCI_ANY_ID,
74 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE,
75 },
76
68 { /* Generic SD host controller */ 77 { /* Generic SD host controller */
69 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) 78 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
70 }, 79 },
@@ -197,15 +206,9 @@ static void sdhci_deactivate_led(struct sdhci_host *host)
197 * * 206 * *
198\*****************************************************************************/ 207\*****************************************************************************/
199 208
200static inline char* sdhci_kmap_sg(struct sdhci_host* host) 209static inline char* sdhci_sg_to_buffer(struct sdhci_host* host)
201{ 210{
202 host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ); 211 return page_address(host->cur_sg->page) + host->cur_sg->offset;
203 return host->mapped_sg + host->cur_sg->offset;
204}
205
206static inline void sdhci_kunmap_sg(struct sdhci_host* host)
207{
208 kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ);
209} 212}
210 213
211static inline int sdhci_next_sg(struct sdhci_host* host) 214static inline int sdhci_next_sg(struct sdhci_host* host)
@@ -240,7 +243,7 @@ static void sdhci_read_block_pio(struct sdhci_host *host)
240 chunk_remain = 0; 243 chunk_remain = 0;
241 data = 0; 244 data = 0;
242 245
243 buffer = sdhci_kmap_sg(host) + host->offset; 246 buffer = sdhci_sg_to_buffer(host) + host->offset;
244 247
245 while (blksize) { 248 while (blksize) {
246 if (chunk_remain == 0) { 249 if (chunk_remain == 0) {
@@ -264,16 +267,13 @@ static void sdhci_read_block_pio(struct sdhci_host *host)
264 } 267 }
265 268
266 if (host->remain == 0) { 269 if (host->remain == 0) {
267 sdhci_kunmap_sg(host);
268 if (sdhci_next_sg(host) == 0) { 270 if (sdhci_next_sg(host) == 0) {
269 BUG_ON(blksize != 0); 271 BUG_ON(blksize != 0);
270 return; 272 return;
271 } 273 }
272 buffer = sdhci_kmap_sg(host); 274 buffer = sdhci_sg_to_buffer(host);
273 } 275 }
274 } 276 }
275
276 sdhci_kunmap_sg(host);
277} 277}
278 278
279static void sdhci_write_block_pio(struct sdhci_host *host) 279static void sdhci_write_block_pio(struct sdhci_host *host)
@@ -290,7 +290,7 @@ static void sdhci_write_block_pio(struct sdhci_host *host)
290 data = 0; 290 data = 0;
291 291
292 bytes = 0; 292 bytes = 0;
293 buffer = sdhci_kmap_sg(host) + host->offset; 293 buffer = sdhci_sg_to_buffer(host) + host->offset;
294 294
295 while (blksize) { 295 while (blksize) {
296 size = min(host->size, host->remain); 296 size = min(host->size, host->remain);
@@ -314,16 +314,13 @@ static void sdhci_write_block_pio(struct sdhci_host *host)
314 } 314 }
315 315
316 if (host->remain == 0) { 316 if (host->remain == 0) {
317 sdhci_kunmap_sg(host);
318 if (sdhci_next_sg(host) == 0) { 317 if (sdhci_next_sg(host) == 0) {
319 BUG_ON(blksize != 0); 318 BUG_ON(blksize != 0);
320 return; 319 return;
321 } 320 }
322 buffer = sdhci_kmap_sg(host); 321 buffer = sdhci_sg_to_buffer(host);
323 } 322 }
324 } 323 }
325
326 sdhci_kunmap_sg(host);
327} 324}
328 325
329static void sdhci_transfer_pio(struct sdhci_host *host) 326static void sdhci_transfer_pio(struct sdhci_host *host)
@@ -372,7 +369,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
372 369
373 /* Sanity checks */ 370 /* Sanity checks */
374 BUG_ON(data->blksz * data->blocks > 524288); 371 BUG_ON(data->blksz * data->blocks > 524288);
375 BUG_ON(data->blksz > host->max_block); 372 BUG_ON(data->blksz > host->mmc->max_blk_size);
376 BUG_ON(data->blocks > 65535); 373 BUG_ON(data->blocks > 65535);
377 374
378 /* timeout in us */ 375 /* timeout in us */
@@ -674,10 +671,17 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
674 if (host->power == power) 671 if (host->power == power)
675 return; 672 return;
676 673
677 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL); 674 if (power == (unsigned short)-1) {
678 675 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
679 if (power == (unsigned short)-1)
680 goto out; 676 goto out;
677 }
678
679 /*
680 * Spec says that we should clear the power reg before setting
681 * a new value. Some controllers don't seem to like this though.
682 */
683 if (!(host->chip->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
684 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
681 685
682 pwr = SDHCI_POWER_ON; 686 pwr = SDHCI_POWER_ON;
683 687
@@ -1109,7 +1113,9 @@ static int sdhci_resume (struct pci_dev *pdev)
1109 1113
1110 pci_set_power_state(pdev, PCI_D0); 1114 pci_set_power_state(pdev, PCI_D0);
1111 pci_restore_state(pdev); 1115 pci_restore_state(pdev);
1112 pci_enable_device(pdev); 1116 ret = pci_enable_device(pdev);
1117 if (ret)
1118 return ret;
1113 1119
1114 for (i = 0;i < chip->num_slots;i++) { 1120 for (i = 0;i < chip->num_slots;i++) {
1115 if (!chip->hosts[i]) 1121 if (!chip->hosts[i])
@@ -1274,15 +1280,6 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1274 if (caps & SDHCI_TIMEOUT_CLK_UNIT) 1280 if (caps & SDHCI_TIMEOUT_CLK_UNIT)
1275 host->timeout_clk *= 1000; 1281 host->timeout_clk *= 1000;
1276 1282
1277 host->max_block = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT;
1278 if (host->max_block >= 3) {
1279 printk(KERN_ERR "%s: Invalid maximum block size.\n",
1280 host->slot_descr);
1281 ret = -ENODEV;
1282 goto unmap;
1283 }
1284 host->max_block = 512 << host->max_block;
1285
1286 /* 1283 /*
1287 * Set host parameters. 1284 * Set host parameters.
1288 */ 1285 */
@@ -1294,9 +1291,9 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1294 mmc->ocr_avail = 0; 1291 mmc->ocr_avail = 0;
1295 if (caps & SDHCI_CAN_VDD_330) 1292 if (caps & SDHCI_CAN_VDD_330)
1296 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34; 1293 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
1297 else if (caps & SDHCI_CAN_VDD_300) 1294 if (caps & SDHCI_CAN_VDD_300)
1298 mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31; 1295 mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
1299 else if (caps & SDHCI_CAN_VDD_180) 1296 if (caps & SDHCI_CAN_VDD_180)
1300 mmc->ocr_avail |= MMC_VDD_17_18|MMC_VDD_18_19; 1297 mmc->ocr_avail |= MMC_VDD_17_18|MMC_VDD_18_19;
1301 1298
1302 if ((host->max_clk > 25000000) && !(caps & SDHCI_CAN_DO_HISPD)) { 1299 if ((host->max_clk > 25000000) && !(caps & SDHCI_CAN_DO_HISPD)) {
@@ -1326,15 +1323,33 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1326 1323
1327 /* 1324 /*
1328 * Maximum number of sectors in one transfer. Limited by DMA boundary 1325 * Maximum number of sectors in one transfer. Limited by DMA boundary
1329 * size (512KiB), which means (512 KiB/512=) 1024 entries. 1326 * size (512KiB).
1330 */ 1327 */
1331 mmc->max_sectors = 1024; 1328 mmc->max_req_size = 524288;
1332 1329
1333 /* 1330 /*
1334 * Maximum segment size. Could be one segment with the maximum number 1331 * Maximum segment size. Could be one segment with the maximum number
1335 * of sectors. 1332 * of bytes.
1333 */
1334 mmc->max_seg_size = mmc->max_req_size;
1335
1336 /*
1337 * Maximum block size. This varies from controller to controller and
1338 * is specified in the capabilities register.
1339 */
1340 mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT;
1341 if (mmc->max_blk_size >= 3) {
1342 printk(KERN_ERR "%s: Invalid maximum block size.\n",
1343 host->slot_descr);
1344 ret = -ENODEV;
1345 goto unmap;
1346 }
1347 mmc->max_blk_size = 512 << mmc->max_blk_size;
1348
1349 /*
1350 * Maximum block count.
1336 */ 1351 */
1337 mmc->max_seg_size = mmc->max_sectors * 512; 1352 mmc->max_blk_count = 65535;
1338 1353
1339 /* 1354 /*
1340 * Init tasklets. 1355 * Init tasklets.
diff --git a/drivers/mmc/sdhci.h b/drivers/mmc/sdhci.h
index f9d1a0a6f03a..e324f0a623dc 100644
--- a/drivers/mmc/sdhci.h
+++ b/drivers/mmc/sdhci.h
@@ -174,7 +174,6 @@ struct sdhci_host {
174 174
175 unsigned int max_clk; /* Max possible freq (MHz) */ 175 unsigned int max_clk; /* Max possible freq (MHz) */
176 unsigned int timeout_clk; /* Timeout freq (KHz) */ 176 unsigned int timeout_clk; /* Timeout freq (KHz) */
177 unsigned int max_block; /* Max block size (bytes) */
178 177
179 unsigned int clock; /* Current clock (MHz) */ 178 unsigned int clock; /* Current clock (MHz) */
180 unsigned short power; /* Current voltage */ 179 unsigned short power; /* Current voltage */
@@ -184,7 +183,6 @@ struct sdhci_host {
184 struct mmc_data *data; /* Current data request */ 183 struct mmc_data *data; /* Current data request */
185 184
186 struct scatterlist *cur_sg; /* We're working on this */ 185 struct scatterlist *cur_sg; /* We're working on this */
187 char *mapped_sg; /* This is where it's mapped */
188 int num_sg; /* Entries left */ 186 int num_sg; /* Entries left */
189 int offset; /* Offset into current sg */ 187 int offset; /* Offset into current sg */
190 int remain; /* Bytes left in current */ 188 int remain; /* Bytes left in current */
diff --git a/drivers/mmc/tifm_sd.c b/drivers/mmc/tifm_sd.c
index fa4a52886b97..e65f8a0a9349 100644
--- a/drivers/mmc/tifm_sd.c
+++ b/drivers/mmc/tifm_sd.c
@@ -17,7 +17,7 @@
17#include <asm/io.h> 17#include <asm/io.h>
18 18
19#define DRIVER_NAME "tifm_sd" 19#define DRIVER_NAME "tifm_sd"
20#define DRIVER_VERSION "0.6" 20#define DRIVER_VERSION "0.7"
21 21
22static int no_dma = 0; 22static int no_dma = 0;
23static int fixed_timeout = 0; 23static int fixed_timeout = 0;
@@ -79,7 +79,6 @@ typedef enum {
79 79
80enum { 80enum {
81 FIFO_RDY = 0x0001, /* hardware dependent value */ 81 FIFO_RDY = 0x0001, /* hardware dependent value */
82 HOST_REG = 0x0002,
83 EJECT = 0x0004, 82 EJECT = 0x0004,
84 EJECT_DONE = 0x0008, 83 EJECT_DONE = 0x0008,
85 CARD_BUSY = 0x0010, 84 CARD_BUSY = 0x0010,
@@ -95,46 +94,53 @@ struct tifm_sd {
95 card_state_t state; 94 card_state_t state;
96 unsigned int clk_freq; 95 unsigned int clk_freq;
97 unsigned int clk_div; 96 unsigned int clk_div;
98 unsigned long timeout_jiffies; // software timeout - 2 sec 97 unsigned long timeout_jiffies;
99 98
99 struct tasklet_struct finish_tasklet;
100 struct timer_list timer;
100 struct mmc_request *req; 101 struct mmc_request *req;
101 struct work_struct cmd_handler; 102 wait_queue_head_t notify;
102 struct delayed_work abort_handler;
103 wait_queue_head_t can_eject;
104 103
105 size_t written_blocks; 104 size_t written_blocks;
106 char *buffer;
107 size_t buffer_size; 105 size_t buffer_size;
108 size_t buffer_pos; 106 size_t buffer_pos;
109 107
110}; 108};
111 109
110static char* tifm_sd_data_buffer(struct mmc_data *data)
111{
112 return page_address(data->sg->page) + data->sg->offset;
113}
114
112static int tifm_sd_transfer_data(struct tifm_dev *sock, struct tifm_sd *host, 115static int tifm_sd_transfer_data(struct tifm_dev *sock, struct tifm_sd *host,
113 unsigned int host_status) 116 unsigned int host_status)
114{ 117{
115 struct mmc_command *cmd = host->req->cmd; 118 struct mmc_command *cmd = host->req->cmd;
116 unsigned int t_val = 0, cnt = 0; 119 unsigned int t_val = 0, cnt = 0;
120 char *buffer;
117 121
118 if (host_status & TIFM_MMCSD_BRS) { 122 if (host_status & TIFM_MMCSD_BRS) {
119 /* in non-dma rx mode BRS fires when fifo is still not empty */ 123 /* in non-dma rx mode BRS fires when fifo is still not empty */
120 if (host->buffer && (cmd->data->flags & MMC_DATA_READ)) { 124 if (no_dma && (cmd->data->flags & MMC_DATA_READ)) {
125 buffer = tifm_sd_data_buffer(host->req->data);
121 while (host->buffer_size > host->buffer_pos) { 126 while (host->buffer_size > host->buffer_pos) {
122 t_val = readl(sock->addr + SOCK_MMCSD_DATA); 127 t_val = readl(sock->addr + SOCK_MMCSD_DATA);
123 host->buffer[host->buffer_pos++] = t_val & 0xff; 128 buffer[host->buffer_pos++] = t_val & 0xff;
124 host->buffer[host->buffer_pos++] = 129 buffer[host->buffer_pos++] =
125 (t_val >> 8) & 0xff; 130 (t_val >> 8) & 0xff;
126 } 131 }
127 } 132 }
128 return 1; 133 return 1;
129 } else if (host->buffer) { 134 } else if (no_dma) {
135 buffer = tifm_sd_data_buffer(host->req->data);
130 if ((cmd->data->flags & MMC_DATA_READ) && 136 if ((cmd->data->flags & MMC_DATA_READ) &&
131 (host_status & TIFM_MMCSD_AF)) { 137 (host_status & TIFM_MMCSD_AF)) {
132 for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) { 138 for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) {
133 t_val = readl(sock->addr + SOCK_MMCSD_DATA); 139 t_val = readl(sock->addr + SOCK_MMCSD_DATA);
134 if (host->buffer_size > host->buffer_pos) { 140 if (host->buffer_size > host->buffer_pos) {
135 host->buffer[host->buffer_pos++] = 141 buffer[host->buffer_pos++] =
136 t_val & 0xff; 142 t_val & 0xff;
137 host->buffer[host->buffer_pos++] = 143 buffer[host->buffer_pos++] =
138 (t_val >> 8) & 0xff; 144 (t_val >> 8) & 0xff;
139 } 145 }
140 } 146 }
@@ -142,11 +148,12 @@ static int tifm_sd_transfer_data(struct tifm_dev *sock, struct tifm_sd *host,
142 && (host_status & TIFM_MMCSD_AE)) { 148 && (host_status & TIFM_MMCSD_AE)) {
143 for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) { 149 for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) {
144 if (host->buffer_size > host->buffer_pos) { 150 if (host->buffer_size > host->buffer_pos) {
145 t_val = host->buffer[host->buffer_pos++] & 0x00ff; 151 t_val = buffer[host->buffer_pos++]
146 t_val |= ((host->buffer[host->buffer_pos++]) << 8) 152 & 0x00ff;
147 & 0xff00; 153 t_val |= ((buffer[host->buffer_pos++])
154 << 8) & 0xff00;
148 writel(t_val, 155 writel(t_val,
149 sock->addr + SOCK_MMCSD_DATA); 156 sock->addr + SOCK_MMCSD_DATA);
150 } 157 }
151 } 158 }
152 } 159 }
@@ -206,7 +213,7 @@ static void tifm_sd_exec(struct tifm_sd *host, struct mmc_command *cmd)
206 cmd_mask |= TIFM_MMCSD_READ; 213 cmd_mask |= TIFM_MMCSD_READ;
207 214
208 dev_dbg(&sock->dev, "executing opcode 0x%x, arg: 0x%x, mask: 0x%x\n", 215 dev_dbg(&sock->dev, "executing opcode 0x%x, arg: 0x%x, mask: 0x%x\n",
209 cmd->opcode, cmd->arg, cmd_mask); 216 cmd->opcode, cmd->arg, cmd_mask);
210 217
211 writel((cmd->arg >> 16) & 0xffff, sock->addr + SOCK_MMCSD_ARG_HIGH); 218 writel((cmd->arg >> 16) & 0xffff, sock->addr + SOCK_MMCSD_ARG_HIGH);
212 writel(cmd->arg & 0xffff, sock->addr + SOCK_MMCSD_ARG_LOW); 219 writel(cmd->arg & 0xffff, sock->addr + SOCK_MMCSD_ARG_LOW);
@@ -239,65 +246,78 @@ change_state:
239 tifm_sd_fetch_resp(cmd, sock); 246 tifm_sd_fetch_resp(cmd, sock);
240 if (cmd->data) { 247 if (cmd->data) {
241 host->state = BRS; 248 host->state = BRS;
242 } else 249 } else {
243 host->state = READY; 250 host->state = READY;
251 }
244 goto change_state; 252 goto change_state;
245 } 253 }
246 break; 254 break;
247 case BRS: 255 case BRS:
248 if (tifm_sd_transfer_data(sock, host, host_status)) { 256 if (tifm_sd_transfer_data(sock, host, host_status)) {
249 if (!host->req->stop) { 257 if (cmd->data->flags & MMC_DATA_WRITE) {
250 if (cmd->data->flags & MMC_DATA_WRITE) { 258 host->state = CARD;
251 host->state = CARD; 259 } else {
260 if (no_dma) {
261 if (host->req->stop) {
262 tifm_sd_exec(host, host->req->stop);
263 host->state = SCMD;
264 } else {
265 host->state = READY;
266 }
252 } else { 267 } else {
253 host->state = 268 host->state = FIFO;
254 host->buffer ? READY : FIFO;
255 } 269 }
256 goto change_state;
257 } 270 }
258 tifm_sd_exec(host, host->req->stop); 271 goto change_state;
259 host->state = SCMD;
260 } 272 }
261 break; 273 break;
262 case SCMD: 274 case SCMD:
263 if (host_status & TIFM_MMCSD_EOC) { 275 if (host_status & TIFM_MMCSD_EOC) {
264 tifm_sd_fetch_resp(host->req->stop, sock); 276 tifm_sd_fetch_resp(host->req->stop, sock);
265 if (cmd->error) { 277 host->state = READY;
266 host->state = READY;
267 } else if (cmd->data->flags & MMC_DATA_WRITE) {
268 host->state = CARD;
269 } else {
270 host->state = host->buffer ? READY : FIFO;
271 }
272 goto change_state; 278 goto change_state;
273 } 279 }
274 break; 280 break;
275 case CARD: 281 case CARD:
282 dev_dbg(&sock->dev, "waiting for CARD, have %zd blocks\n",
283 host->written_blocks);
276 if (!(host->flags & CARD_BUSY) 284 if (!(host->flags & CARD_BUSY)
277 && (host->written_blocks == cmd->data->blocks)) { 285 && (host->written_blocks == cmd->data->blocks)) {
278 host->state = host->buffer ? READY : FIFO; 286 if (no_dma) {
287 if (host->req->stop) {
288 tifm_sd_exec(host, host->req->stop);
289 host->state = SCMD;
290 } else {
291 host->state = READY;
292 }
293 } else {
294 host->state = FIFO;
295 }
279 goto change_state; 296 goto change_state;
280 } 297 }
281 break; 298 break;
282 case FIFO: 299 case FIFO:
283 if (host->flags & FIFO_RDY) { 300 if (host->flags & FIFO_RDY) {
284 host->state = READY;
285 host->flags &= ~FIFO_RDY; 301 host->flags &= ~FIFO_RDY;
302 if (host->req->stop) {
303 tifm_sd_exec(host, host->req->stop);
304 host->state = SCMD;
305 } else {
306 host->state = READY;
307 }
286 goto change_state; 308 goto change_state;
287 } 309 }
288 break; 310 break;
289 case READY: 311 case READY:
290 queue_work(sock->wq, &host->cmd_handler); 312 tasklet_schedule(&host->finish_tasklet);
291 return; 313 return;
292 } 314 }
293 315
294 queue_delayed_work(sock->wq, &host->abort_handler,
295 host->timeout_jiffies);
296} 316}
297 317
298/* Called from interrupt handler */ 318/* Called from interrupt handler */
299static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock, 319static void tifm_sd_signal_irq(struct tifm_dev *sock,
300 unsigned int sock_irq_status) 320 unsigned int sock_irq_status)
301{ 321{
302 struct tifm_sd *host; 322 struct tifm_sd *host;
303 unsigned int host_status = 0, fifo_status = 0; 323 unsigned int host_status = 0, fifo_status = 0;
@@ -305,7 +325,6 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock,
305 325
306 spin_lock(&sock->lock); 326 spin_lock(&sock->lock);
307 host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock)); 327 host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock));
308 cancel_delayed_work(&host->abort_handler);
309 328
310 if (sock_irq_status & FIFO_EVENT) { 329 if (sock_irq_status & FIFO_EVENT) {
311 fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS); 330 fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS);
@@ -318,19 +337,17 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock,
318 host_status = readl(sock->addr + SOCK_MMCSD_STATUS); 337 host_status = readl(sock->addr + SOCK_MMCSD_STATUS);
319 writel(host_status, sock->addr + SOCK_MMCSD_STATUS); 338 writel(host_status, sock->addr + SOCK_MMCSD_STATUS);
320 339
321 if (!(host->flags & HOST_REG))
322 queue_work(sock->wq, &host->cmd_handler);
323 if (!host->req) 340 if (!host->req)
324 goto done; 341 goto done;
325 342
326 if (host_status & TIFM_MMCSD_ERRMASK) { 343 if (host_status & TIFM_MMCSD_ERRMASK) {
327 if (host_status & TIFM_MMCSD_CERR) 344 if (host_status & TIFM_MMCSD_CERR)
328 error_code = MMC_ERR_FAILED; 345 error_code = MMC_ERR_FAILED;
329 else if (host_status & 346 else if (host_status
330 (TIFM_MMCSD_CTO | TIFM_MMCSD_DTO)) 347 & (TIFM_MMCSD_CTO | TIFM_MMCSD_DTO))
331 error_code = MMC_ERR_TIMEOUT; 348 error_code = MMC_ERR_TIMEOUT;
332 else if (host_status & 349 else if (host_status
333 (TIFM_MMCSD_CCRC | TIFM_MMCSD_DCRC)) 350 & (TIFM_MMCSD_CCRC | TIFM_MMCSD_DCRC))
334 error_code = MMC_ERR_BADCRC; 351 error_code = MMC_ERR_BADCRC;
335 352
336 writel(TIFM_FIFO_INT_SETALL, 353 writel(TIFM_FIFO_INT_SETALL,
@@ -340,12 +357,11 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock,
340 if (host->req->stop) { 357 if (host->req->stop) {
341 if (host->state == SCMD) { 358 if (host->state == SCMD) {
342 host->req->stop->error = error_code; 359 host->req->stop->error = error_code;
343 } else if(host->state == BRS) { 360 } else if (host->state == BRS
361 || host->state == CARD
362 || host->state == FIFO) {
344 host->req->cmd->error = error_code; 363 host->req->cmd->error = error_code;
345 tifm_sd_exec(host, host->req->stop); 364 tifm_sd_exec(host, host->req->stop);
346 queue_delayed_work(sock->wq,
347 &host->abort_handler,
348 host->timeout_jiffies);
349 host->state = SCMD; 365 host->state = SCMD;
350 goto done; 366 goto done;
351 } else { 367 } else {
@@ -359,8 +375,8 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock,
359 375
360 if (host_status & TIFM_MMCSD_CB) 376 if (host_status & TIFM_MMCSD_CB)
361 host->flags |= CARD_BUSY; 377 host->flags |= CARD_BUSY;
362 if ((host_status & TIFM_MMCSD_EOFB) && 378 if ((host_status & TIFM_MMCSD_EOFB)
363 (host->flags & CARD_BUSY)) { 379 && (host->flags & CARD_BUSY)) {
364 host->written_blocks++; 380 host->written_blocks++;
365 host->flags &= ~CARD_BUSY; 381 host->flags &= ~CARD_BUSY;
366 } 382 }
@@ -370,22 +386,22 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock,
370 tifm_sd_process_cmd(sock, host, host_status); 386 tifm_sd_process_cmd(sock, host, host_status);
371done: 387done:
372 dev_dbg(&sock->dev, "host_status %x, fifo_status %x\n", 388 dev_dbg(&sock->dev, "host_status %x, fifo_status %x\n",
373 host_status, fifo_status); 389 host_status, fifo_status);
374 spin_unlock(&sock->lock); 390 spin_unlock(&sock->lock);
375 return sock_irq_status;
376} 391}
377 392
378static void tifm_sd_prepare_data(struct tifm_sd *card, struct mmc_command *cmd) 393static void tifm_sd_prepare_data(struct tifm_sd *host, struct mmc_command *cmd)
379{ 394{
380 struct tifm_dev *sock = card->dev; 395 struct tifm_dev *sock = host->dev;
381 unsigned int dest_cnt; 396 unsigned int dest_cnt;
382 397
383 /* DMA style IO */ 398 /* DMA style IO */
384 399 dev_dbg(&sock->dev, "setting dma for %d blocks\n",
400 cmd->data->blocks);
385 writel(TIFM_FIFO_INT_SETALL, 401 writel(TIFM_FIFO_INT_SETALL,
386 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); 402 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
387 writel(ilog2(cmd->data->blksz) - 2, 403 writel(ilog2(cmd->data->blksz) - 2,
388 sock->addr + SOCK_FIFO_PAGE_SIZE); 404 sock->addr + SOCK_FIFO_PAGE_SIZE);
389 writel(TIFM_FIFO_ENABLE, sock->addr + SOCK_FIFO_CONTROL); 405 writel(TIFM_FIFO_ENABLE, sock->addr + SOCK_FIFO_CONTROL);
390 writel(TIFM_FIFO_INTMASK, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); 406 writel(TIFM_FIFO_INTMASK, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
391 407
@@ -399,7 +415,7 @@ static void tifm_sd_prepare_data(struct tifm_sd *card, struct mmc_command *cmd)
399 if (cmd->data->flags & MMC_DATA_WRITE) { 415 if (cmd->data->flags & MMC_DATA_WRITE) {
400 writel(TIFM_MMCSD_TXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); 416 writel(TIFM_MMCSD_TXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
401 writel(dest_cnt | TIFM_DMA_TX | TIFM_DMA_EN, 417 writel(dest_cnt | TIFM_DMA_TX | TIFM_DMA_EN,
402 sock->addr + SOCK_DMA_CONTROL); 418 sock->addr + SOCK_DMA_CONTROL);
403 } else { 419 } else {
404 writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); 420 writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
405 writel(dest_cnt | TIFM_DMA_EN, sock->addr + SOCK_DMA_CONTROL); 421 writel(dest_cnt | TIFM_DMA_EN, sock->addr + SOCK_DMA_CONTROL);
@@ -407,7 +423,7 @@ static void tifm_sd_prepare_data(struct tifm_sd *card, struct mmc_command *cmd)
407} 423}
408 424
409static void tifm_sd_set_data_timeout(struct tifm_sd *host, 425static void tifm_sd_set_data_timeout(struct tifm_sd *host,
410 struct mmc_data *data) 426 struct mmc_data *data)
411{ 427{
412 struct tifm_dev *sock = host->dev; 428 struct tifm_dev *sock = host->dev;
413 unsigned int data_timeout = data->timeout_clks; 429 unsigned int data_timeout = data->timeout_clks;
@@ -416,22 +432,21 @@ static void tifm_sd_set_data_timeout(struct tifm_sd *host,
416 return; 432 return;
417 433
418 data_timeout += data->timeout_ns / 434 data_timeout += data->timeout_ns /
419 ((1000000000 / host->clk_freq) * host->clk_div); 435 ((1000000000UL / host->clk_freq) * host->clk_div);
420 data_timeout *= 10; // call it fudge factor for now
421 436
422 if (data_timeout < 0xffff) { 437 if (data_timeout < 0xffff) {
423 writel((~TIFM_MMCSD_DPE) &
424 readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
425 sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
426 writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO); 438 writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO);
439 writel((~TIFM_MMCSD_DPE)
440 & readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
441 sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
427 } else { 442 } else {
428 writel(TIFM_MMCSD_DPE |
429 readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
430 sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
431 data_timeout = (data_timeout >> 10) + 1; 443 data_timeout = (data_timeout >> 10) + 1;
432 if(data_timeout > 0xffff) 444 if (data_timeout > 0xffff)
433 data_timeout = 0; /* set to unlimited */ 445 data_timeout = 0; /* set to unlimited */
434 writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO); 446 writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO);
447 writel(TIFM_MMCSD_DPE
448 | readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
449 sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
435 } 450 }
436} 451}
437 452
@@ -474,11 +489,10 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)
474 } 489 }
475 490
476 host->req = mrq; 491 host->req = mrq;
492 mod_timer(&host->timer, jiffies + host->timeout_jiffies);
477 host->state = CMD; 493 host->state = CMD;
478 queue_delayed_work(sock->wq, &host->abort_handler,
479 host->timeout_jiffies);
480 writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL), 494 writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL),
481 sock->addr + SOCK_CONTROL); 495 sock->addr + SOCK_CONTROL);
482 tifm_sd_exec(host, mrq->cmd); 496 tifm_sd_exec(host, mrq->cmd);
483 spin_unlock_irqrestore(&sock->lock, flags); 497 spin_unlock_irqrestore(&sock->lock, flags);
484 return; 498 return;
@@ -493,9 +507,9 @@ err_out:
493 mmc_request_done(mmc, mrq); 507 mmc_request_done(mmc, mrq);
494} 508}
495 509
496static void tifm_sd_end_cmd(struct work_struct *work) 510static void tifm_sd_end_cmd(unsigned long data)
497{ 511{
498 struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); 512 struct tifm_sd *host = (struct tifm_sd*)data;
499 struct tifm_dev *sock = host->dev; 513 struct tifm_dev *sock = host->dev;
500 struct mmc_host *mmc = tifm_get_drvdata(sock); 514 struct mmc_host *mmc = tifm_get_drvdata(sock);
501 struct mmc_request *mrq; 515 struct mmc_request *mrq;
@@ -504,6 +518,7 @@ static void tifm_sd_end_cmd(struct work_struct *work)
504 518
505 spin_lock_irqsave(&sock->lock, flags); 519 spin_lock_irqsave(&sock->lock, flags);
506 520
521 del_timer(&host->timer);
507 mrq = host->req; 522 mrq = host->req;
508 host->req = NULL; 523 host->req = NULL;
509 host->state = IDLE; 524 host->state = IDLE;
@@ -517,8 +532,8 @@ static void tifm_sd_end_cmd(struct work_struct *work)
517 r_data = mrq->cmd->data; 532 r_data = mrq->cmd->data;
518 if (r_data) { 533 if (r_data) {
519 if (r_data->flags & MMC_DATA_WRITE) { 534 if (r_data->flags & MMC_DATA_WRITE) {
520 r_data->bytes_xfered = host->written_blocks * 535 r_data->bytes_xfered = host->written_blocks
521 r_data->blksz; 536 * r_data->blksz;
522 } else { 537 } else {
523 r_data->bytes_xfered = r_data->blocks - 538 r_data->bytes_xfered = r_data->blocks -
524 readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1; 539 readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1;
@@ -532,7 +547,7 @@ static void tifm_sd_end_cmd(struct work_struct *work)
532 } 547 }
533 548
534 writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL), 549 writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL),
535 sock->addr + SOCK_CONTROL); 550 sock->addr + SOCK_CONTROL);
536 551
537 spin_unlock_irqrestore(&sock->lock, flags); 552 spin_unlock_irqrestore(&sock->lock, flags);
538 mmc_request_done(mmc, mrq); 553 mmc_request_done(mmc, mrq);
@@ -544,15 +559,6 @@ static void tifm_sd_request_nodma(struct mmc_host *mmc, struct mmc_request *mrq)
544 struct tifm_dev *sock = host->dev; 559 struct tifm_dev *sock = host->dev;
545 unsigned long flags; 560 unsigned long flags;
546 struct mmc_data *r_data = mrq->cmd->data; 561 struct mmc_data *r_data = mrq->cmd->data;
547 char *t_buffer = NULL;
548
549 if (r_data) {
550 t_buffer = kmap(r_data->sg->page);
551 if (!t_buffer) {
552 printk(KERN_ERR DRIVER_NAME ": kmap failed\n");
553 goto err_out;
554 }
555 }
556 562
557 spin_lock_irqsave(&sock->lock, flags); 563 spin_lock_irqsave(&sock->lock, flags);
558 if (host->flags & EJECT) { 564 if (host->flags & EJECT) {
@@ -569,15 +575,14 @@ static void tifm_sd_request_nodma(struct mmc_host *mmc, struct mmc_request *mrq)
569 if (r_data) { 575 if (r_data) {
570 tifm_sd_set_data_timeout(host, r_data); 576 tifm_sd_set_data_timeout(host, r_data);
571 577
572 host->buffer = t_buffer + r_data->sg->offset; 578 host->buffer_size = mrq->cmd->data->blocks
573 host->buffer_size = mrq->cmd->data->blocks * 579 * mrq->cmd->data->blksz;
574 mrq->cmd->data->blksz;
575 580
576 writel(TIFM_MMCSD_BUFINT | 581 writel(TIFM_MMCSD_BUFINT
577 readl(sock->addr + SOCK_MMCSD_INT_ENABLE), 582 | readl(sock->addr + SOCK_MMCSD_INT_ENABLE),
578 sock->addr + SOCK_MMCSD_INT_ENABLE); 583 sock->addr + SOCK_MMCSD_INT_ENABLE);
579 writel(((TIFM_MMCSD_FIFO_SIZE - 1) << 8) | 584 writel(((TIFM_MMCSD_FIFO_SIZE - 1) << 8)
580 (TIFM_MMCSD_FIFO_SIZE - 1), 585 | (TIFM_MMCSD_FIFO_SIZE - 1),
581 sock->addr + SOCK_MMCSD_BUFFER_CONFIG); 586 sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
582 587
583 host->written_blocks = 0; 588 host->written_blocks = 0;
@@ -588,26 +593,22 @@ static void tifm_sd_request_nodma(struct mmc_host *mmc, struct mmc_request *mrq)
588 } 593 }
589 594
590 host->req = mrq; 595 host->req = mrq;
596 mod_timer(&host->timer, jiffies + host->timeout_jiffies);
591 host->state = CMD; 597 host->state = CMD;
592 queue_delayed_work(sock->wq, &host->abort_handler,
593 host->timeout_jiffies);
594 writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL), 598 writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL),
595 sock->addr + SOCK_CONTROL); 599 sock->addr + SOCK_CONTROL);
596 tifm_sd_exec(host, mrq->cmd); 600 tifm_sd_exec(host, mrq->cmd);
597 spin_unlock_irqrestore(&sock->lock, flags); 601 spin_unlock_irqrestore(&sock->lock, flags);
598 return; 602 return;
599 603
600err_out: 604err_out:
601 if (t_buffer)
602 kunmap(r_data->sg->page);
603
604 mrq->cmd->error = MMC_ERR_TIMEOUT; 605 mrq->cmd->error = MMC_ERR_TIMEOUT;
605 mmc_request_done(mmc, mrq); 606 mmc_request_done(mmc, mrq);
606} 607}
607 608
608static void tifm_sd_end_cmd_nodma(struct work_struct *work) 609static void tifm_sd_end_cmd_nodma(unsigned long data)
609{ 610{
610 struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); 611 struct tifm_sd *host = (struct tifm_sd*)data;
611 struct tifm_dev *sock = host->dev; 612 struct tifm_dev *sock = host->dev;
612 struct mmc_host *mmc = tifm_get_drvdata(sock); 613 struct mmc_host *mmc = tifm_get_drvdata(sock);
613 struct mmc_request *mrq; 614 struct mmc_request *mrq;
@@ -616,6 +617,7 @@ static void tifm_sd_end_cmd_nodma(struct work_struct *work)
616 617
617 spin_lock_irqsave(&sock->lock, flags); 618 spin_lock_irqsave(&sock->lock, flags);
618 619
620 del_timer(&host->timer);
619 mrq = host->req; 621 mrq = host->req;
620 host->req = NULL; 622 host->req = NULL;
621 host->state = IDLE; 623 host->state = IDLE;
@@ -633,8 +635,8 @@ static void tifm_sd_end_cmd_nodma(struct work_struct *work)
633 sock->addr + SOCK_MMCSD_INT_ENABLE); 635 sock->addr + SOCK_MMCSD_INT_ENABLE);
634 636
635 if (r_data->flags & MMC_DATA_WRITE) { 637 if (r_data->flags & MMC_DATA_WRITE) {
636 r_data->bytes_xfered = host->written_blocks * 638 r_data->bytes_xfered = host->written_blocks
637 r_data->blksz; 639 * r_data->blksz;
638 } else { 640 } else {
639 r_data->bytes_xfered = r_data->blocks - 641 r_data->bytes_xfered = r_data->blocks -
640 readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1; 642 readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1;
@@ -642,29 +644,44 @@ static void tifm_sd_end_cmd_nodma(struct work_struct *work)
642 r_data->bytes_xfered += r_data->blksz - 644 r_data->bytes_xfered += r_data->blksz -
643 readl(sock->addr + SOCK_MMCSD_BLOCK_LEN) + 1; 645 readl(sock->addr + SOCK_MMCSD_BLOCK_LEN) + 1;
644 } 646 }
645 host->buffer = NULL;
646 host->buffer_pos = 0; 647 host->buffer_pos = 0;
647 host->buffer_size = 0; 648 host->buffer_size = 0;
648 } 649 }
649 650
650 writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL), 651 writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL),
651 sock->addr + SOCK_CONTROL); 652 sock->addr + SOCK_CONTROL);
652 653
653 spin_unlock_irqrestore(&sock->lock, flags); 654 spin_unlock_irqrestore(&sock->lock, flags);
654 655
655 if (r_data)
656 kunmap(r_data->sg->page);
657
658 mmc_request_done(mmc, mrq); 656 mmc_request_done(mmc, mrq);
659} 657}
660 658
661static void tifm_sd_abort(struct work_struct *work) 659static void tifm_sd_terminate(struct tifm_sd *host)
660{
661 struct tifm_dev *sock = host->dev;
662 unsigned long flags;
663
664 writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
665 mmiowb();
666 spin_lock_irqsave(&sock->lock, flags);
667 host->flags |= EJECT;
668 if (host->req) {
669 writel(TIFM_FIFO_INT_SETALL,
670 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
671 writel(0, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
672 tasklet_schedule(&host->finish_tasklet);
673 }
674 spin_unlock_irqrestore(&sock->lock, flags);
675}
676
677static void tifm_sd_abort(unsigned long data)
662{ 678{
663 struct tifm_sd *host = 679 struct tifm_sd *host = (struct tifm_sd*)data;
664 container_of(work, struct tifm_sd, abort_handler.work);
665 680
666 printk(KERN_ERR DRIVER_NAME 681 printk(KERN_ERR DRIVER_NAME
667 ": card failed to respond for a long period of time"); 682 ": card failed to respond for a long period of time");
683
684 tifm_sd_terminate(host);
668 tifm_eject(host->dev); 685 tifm_eject(host->dev);
669} 686}
670 687
@@ -683,9 +700,9 @@ static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
683 writel(TIFM_MMCSD_4BBUS | readl(sock->addr + SOCK_MMCSD_CONFIG), 700 writel(TIFM_MMCSD_4BBUS | readl(sock->addr + SOCK_MMCSD_CONFIG),
684 sock->addr + SOCK_MMCSD_CONFIG); 701 sock->addr + SOCK_MMCSD_CONFIG);
685 } else { 702 } else {
686 writel((~TIFM_MMCSD_4BBUS) & 703 writel((~TIFM_MMCSD_4BBUS)
687 readl(sock->addr + SOCK_MMCSD_CONFIG), 704 & readl(sock->addr + SOCK_MMCSD_CONFIG),
688 sock->addr + SOCK_MMCSD_CONFIG); 705 sock->addr + SOCK_MMCSD_CONFIG);
689 } 706 }
690 707
691 if (ios->clock) { 708 if (ios->clock) {
@@ -704,23 +721,24 @@ static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
704 if ((20000000 / clk_div1) > (24000000 / clk_div2)) { 721 if ((20000000 / clk_div1) > (24000000 / clk_div2)) {
705 host->clk_freq = 20000000; 722 host->clk_freq = 20000000;
706 host->clk_div = clk_div1; 723 host->clk_div = clk_div1;
707 writel((~TIFM_CTRL_FAST_CLK) & 724 writel((~TIFM_CTRL_FAST_CLK)
708 readl(sock->addr + SOCK_CONTROL), 725 & readl(sock->addr + SOCK_CONTROL),
709 sock->addr + SOCK_CONTROL); 726 sock->addr + SOCK_CONTROL);
710 } else { 727 } else {
711 host->clk_freq = 24000000; 728 host->clk_freq = 24000000;
712 host->clk_div = clk_div2; 729 host->clk_div = clk_div2;
713 writel(TIFM_CTRL_FAST_CLK | 730 writel(TIFM_CTRL_FAST_CLK
714 readl(sock->addr + SOCK_CONTROL), 731 | readl(sock->addr + SOCK_CONTROL),
715 sock->addr + SOCK_CONTROL); 732 sock->addr + SOCK_CONTROL);
716 } 733 }
717 } else { 734 } else {
718 host->clk_div = 0; 735 host->clk_div = 0;
719 } 736 }
720 host->clk_div &= TIFM_MMCSD_CLKMASK; 737 host->clk_div &= TIFM_MMCSD_CLKMASK;
721 writel(host->clk_div | ((~TIFM_MMCSD_CLKMASK) & 738 writel(host->clk_div
722 readl(sock->addr + SOCK_MMCSD_CONFIG)), 739 | ((~TIFM_MMCSD_CLKMASK)
723 sock->addr + SOCK_MMCSD_CONFIG); 740 & readl(sock->addr + SOCK_MMCSD_CONFIG)),
741 sock->addr + SOCK_MMCSD_CONFIG);
724 742
725 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) 743 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
726 host->flags |= OPENDRAIN; 744 host->flags |= OPENDRAIN;
@@ -734,7 +752,7 @@ static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
734 // allow removal. 752 // allow removal.
735 if ((host->flags & EJECT) && ios->power_mode == MMC_POWER_OFF) { 753 if ((host->flags & EJECT) && ios->power_mode == MMC_POWER_OFF) {
736 host->flags |= EJECT_DONE; 754 host->flags |= EJECT_DONE;
737 wake_up_all(&host->can_eject); 755 wake_up_all(&host->notify);
738 } 756 }
739 757
740 spin_unlock_irqrestore(&sock->lock, flags); 758 spin_unlock_irqrestore(&sock->lock, flags);
@@ -762,20 +780,67 @@ static struct mmc_host_ops tifm_sd_ops = {
762 .get_ro = tifm_sd_ro 780 .get_ro = tifm_sd_ro
763}; 781};
764 782
765static void tifm_sd_register_host(struct work_struct *work) 783static int tifm_sd_initialize_host(struct tifm_sd *host)
766{ 784{
767 struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); 785 int rc;
786 unsigned int host_status = 0;
768 struct tifm_dev *sock = host->dev; 787 struct tifm_dev *sock = host->dev;
769 struct mmc_host *mmc = tifm_get_drvdata(sock);
770 unsigned long flags;
771 788
772 spin_lock_irqsave(&sock->lock, flags); 789 writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
773 host->flags |= HOST_REG; 790 mmiowb();
774 PREPARE_WORK(&host->cmd_handler, 791 host->clk_div = 61;
775 no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd); 792 host->clk_freq = 20000000;
776 spin_unlock_irqrestore(&sock->lock, flags); 793 writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL);
777 dev_dbg(&sock->dev, "adding host\n"); 794 writel(host->clk_div | TIFM_MMCSD_POWER,
778 mmc_add_host(mmc); 795 sock->addr + SOCK_MMCSD_CONFIG);
796
797 /* wait up to 0.51 sec for reset */
798 for (rc = 2; rc <= 256; rc <<= 1) {
799 if (1 & readl(sock->addr + SOCK_MMCSD_SYSTEM_STATUS)) {
800 rc = 0;
801 break;
802 }
803 msleep(rc);
804 }
805
806 if (rc) {
807 printk(KERN_ERR DRIVER_NAME
808 ": controller failed to reset\n");
809 return -ENODEV;
810 }
811
812 writel(0, sock->addr + SOCK_MMCSD_NUM_BLOCKS);
813 writel(host->clk_div | TIFM_MMCSD_POWER,
814 sock->addr + SOCK_MMCSD_CONFIG);
815 writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
816
817 // command timeout fixed to 64 clocks for now
818 writel(64, sock->addr + SOCK_MMCSD_COMMAND_TO);
819 writel(TIFM_MMCSD_INAB, sock->addr + SOCK_MMCSD_COMMAND);
820
821 /* INAB should take much less than reset */
822 for (rc = 1; rc <= 16; rc <<= 1) {
823 host_status = readl(sock->addr + SOCK_MMCSD_STATUS);
824 writel(host_status, sock->addr + SOCK_MMCSD_STATUS);
825 if (!(host_status & TIFM_MMCSD_ERRMASK)
826 && (host_status & TIFM_MMCSD_EOC)) {
827 rc = 0;
828 break;
829 }
830 msleep(rc);
831 }
832
833 if (rc) {
834 printk(KERN_ERR DRIVER_NAME
835 ": card not ready - probe failed on initialization\n");
836 return -ENODEV;
837 }
838
839 writel(TIFM_MMCSD_DATAMASK | TIFM_MMCSD_ERRMASK,
840 sock->addr + SOCK_MMCSD_INT_ENABLE);
841 mmiowb();
842
843 return 0;
779} 844}
780 845
781static int tifm_sd_probe(struct tifm_dev *sock) 846static int tifm_sd_probe(struct tifm_dev *sock)
@@ -784,8 +849,8 @@ static int tifm_sd_probe(struct tifm_dev *sock)
784 struct tifm_sd *host; 849 struct tifm_sd *host;
785 int rc = -EIO; 850 int rc = -EIO;
786 851
787 if (!(TIFM_SOCK_STATE_OCCUPIED & 852 if (!(TIFM_SOCK_STATE_OCCUPIED
788 readl(sock->addr + SOCK_PRESENT_STATE))) { 853 & readl(sock->addr + SOCK_PRESENT_STATE))) {
789 printk(KERN_WARNING DRIVER_NAME ": card gone, unexpectedly\n"); 854 printk(KERN_WARNING DRIVER_NAME ": card gone, unexpectedly\n");
790 return rc; 855 return rc;
791 } 856 }
@@ -795,109 +860,99 @@ static int tifm_sd_probe(struct tifm_dev *sock)
795 return -ENOMEM; 860 return -ENOMEM;
796 861
797 host = mmc_priv(mmc); 862 host = mmc_priv(mmc);
798 host->dev = sock;
799 host->clk_div = 61;
800 init_waitqueue_head(&host->can_eject);
801 INIT_WORK(&host->cmd_handler, tifm_sd_register_host);
802 INIT_DELAYED_WORK(&host->abort_handler, tifm_sd_abort);
803
804 tifm_set_drvdata(sock, mmc); 863 tifm_set_drvdata(sock, mmc);
805 sock->signal_irq = tifm_sd_signal_irq; 864 host->dev = sock;
806
807 host->clk_freq = 20000000;
808 host->timeout_jiffies = msecs_to_jiffies(1000); 865 host->timeout_jiffies = msecs_to_jiffies(1000);
809 866
867 init_waitqueue_head(&host->notify);
868 tasklet_init(&host->finish_tasklet,
869 no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd,
870 (unsigned long)host);
871 setup_timer(&host->timer, tifm_sd_abort, (unsigned long)host);
872
810 tifm_sd_ops.request = no_dma ? tifm_sd_request_nodma : tifm_sd_request; 873 tifm_sd_ops.request = no_dma ? tifm_sd_request_nodma : tifm_sd_request;
811 mmc->ops = &tifm_sd_ops; 874 mmc->ops = &tifm_sd_ops;
812 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 875 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
813 mmc->caps = MMC_CAP_4_BIT_DATA; 876 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE;
814 mmc->f_min = 20000000 / 60; 877 mmc->f_min = 20000000 / 60;
815 mmc->f_max = 24000000; 878 mmc->f_max = 24000000;
816 mmc->max_hw_segs = 1; 879 mmc->max_hw_segs = 1;
817 mmc->max_phys_segs = 1; 880 mmc->max_phys_segs = 1;
818 mmc->max_sectors = 127; 881 // limited by DMA counter - it's safer to stick with
819 mmc->max_seg_size = mmc->max_sectors << 11; //2k maximum hw block length 882 // block counter has 11 bits though
820 883 mmc->max_blk_count = 256;
821 writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE); 884 // 2k maximum hw block length
822 writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL); 885 mmc->max_blk_size = 2048;
823 writel(host->clk_div | TIFM_MMCSD_POWER, 886 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
824 sock->addr + SOCK_MMCSD_CONFIG); 887 mmc->max_seg_size = mmc->max_req_size;
888 sock->signal_irq = tifm_sd_signal_irq;
889 rc = tifm_sd_initialize_host(host);
825 890
826 for (rc = 0; rc < 50; rc++) { 891 if (!rc)
827 /* Wait for reset ack */ 892 rc = mmc_add_host(mmc);
828 if (1 & readl(sock->addr + SOCK_MMCSD_SYSTEM_STATUS)) { 893 if (rc)
829 rc = 0; 894 goto out_free_mmc;
830 break;
831 }
832 msleep(10);
833 }
834 895
835 if (rc) { 896 return 0;
836 printk(KERN_ERR DRIVER_NAME 897out_free_mmc:
837 ": card not ready - probe failed\n"); 898 mmc_free_host(mmc);
838 mmc_free_host(mmc); 899 return rc;
839 return -ENODEV; 900}
840 }
841 901
842 writel(0, sock->addr + SOCK_MMCSD_NUM_BLOCKS); 902static void tifm_sd_remove(struct tifm_dev *sock)
843 writel(host->clk_div | TIFM_MMCSD_POWER, 903{
844 sock->addr + SOCK_MMCSD_CONFIG); 904 struct mmc_host *mmc = tifm_get_drvdata(sock);
845 writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); 905 struct tifm_sd *host = mmc_priv(mmc);
846 writel(TIFM_MMCSD_DATAMASK | TIFM_MMCSD_ERRMASK,
847 sock->addr + SOCK_MMCSD_INT_ENABLE);
848 906
849 writel(64, sock->addr + SOCK_MMCSD_COMMAND_TO); // command timeout 64 clocks for now 907 del_timer_sync(&host->timer);
850 writel(TIFM_MMCSD_INAB, sock->addr + SOCK_MMCSD_COMMAND); 908 tifm_sd_terminate(host);
851 writel(host->clk_div | TIFM_MMCSD_POWER, 909 wait_event_timeout(host->notify, host->flags & EJECT_DONE,
852 sock->addr + SOCK_MMCSD_CONFIG); 910 host->timeout_jiffies);
911 tasklet_kill(&host->finish_tasklet);
912 mmc_remove_host(mmc);
853 913
854 queue_delayed_work(sock->wq, &host->abort_handler, 914 /* The meaning of the bit majority in this constant is unknown. */
855 host->timeout_jiffies); 915 writel(0xfff8 & readl(sock->addr + SOCK_CONTROL),
916 sock->addr + SOCK_CONTROL);
856 917
857 return 0; 918 tifm_set_drvdata(sock, NULL);
919 mmc_free_host(mmc);
858} 920}
859 921
860static int tifm_sd_host_is_down(struct tifm_dev *sock) 922#ifdef CONFIG_PM
923
924static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state)
861{ 925{
862 struct mmc_host *mmc = tifm_get_drvdata(sock); 926 struct mmc_host *mmc = tifm_get_drvdata(sock);
863 struct tifm_sd *host = mmc_priv(mmc); 927 int rc;
864 unsigned long flags;
865 int rc = 0;
866 928
867 spin_lock_irqsave(&sock->lock, flags); 929 rc = mmc_suspend_host(mmc, state);
868 rc = (host->flags & EJECT_DONE); 930 /* The meaning of the bit majority in this constant is unknown. */
869 spin_unlock_irqrestore(&sock->lock, flags); 931 writel(0xfff8 & readl(sock->addr + SOCK_CONTROL),
932 sock->addr + SOCK_CONTROL);
870 return rc; 933 return rc;
871} 934}
872 935
873static void tifm_sd_remove(struct tifm_dev *sock) 936static int tifm_sd_resume(struct tifm_dev *sock)
874{ 937{
875 struct mmc_host *mmc = tifm_get_drvdata(sock); 938 struct mmc_host *mmc = tifm_get_drvdata(sock);
876 struct tifm_sd *host = mmc_priv(mmc); 939 struct tifm_sd *host = mmc_priv(mmc);
877 unsigned long flags;
878 940
879 spin_lock_irqsave(&sock->lock, flags); 941 if (sock->media_id != FM_SD
880 host->flags |= EJECT; 942 || tifm_sd_initialize_host(host)) {
881 if (host->req) 943 tifm_eject(sock);
882 queue_work(sock->wq, &host->cmd_handler); 944 return 0;
883 spin_unlock_irqrestore(&sock->lock, flags); 945 } else {
884 wait_event_timeout(host->can_eject, tifm_sd_host_is_down(sock), 946 return mmc_resume_host(mmc);
885 host->timeout_jiffies); 947 }
948}
886 949
887 if (host->flags & HOST_REG) 950#else
888 mmc_remove_host(mmc);
889 951
890 /* The meaning of the bit majority in this constant is unknown. */ 952#define tifm_sd_suspend NULL
891 writel(0xfff8 & readl(sock->addr + SOCK_CONTROL), 953#define tifm_sd_resume NULL
892 sock->addr + SOCK_CONTROL);
893 writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
894 writel(TIFM_FIFO_INT_SETALL,
895 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
896 writel(0, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
897 954
898 tifm_set_drvdata(sock, NULL); 955#endif /* CONFIG_PM */
899 mmc_free_host(mmc);
900}
901 956
902static tifm_media_id tifm_sd_id_tbl[] = { 957static tifm_media_id tifm_sd_id_tbl[] = {
903 FM_SD, 0 958 FM_SD, 0
@@ -910,7 +965,9 @@ static struct tifm_driver tifm_sd_driver = {
910 }, 965 },
911 .id_table = tifm_sd_id_tbl, 966 .id_table = tifm_sd_id_tbl,
912 .probe = tifm_sd_probe, 967 .probe = tifm_sd_probe,
913 .remove = tifm_sd_remove 968 .remove = tifm_sd_remove,
969 .suspend = tifm_sd_suspend,
970 .resume = tifm_sd_resume
914}; 971};
915 972
916static int __init tifm_sd_init(void) 973static int __init tifm_sd_init(void)
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c
index 7a282672f8e9..a44d8777ab9f 100644
--- a/drivers/mmc/wbsd.c
+++ b/drivers/mmc/wbsd.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver 2 * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver
3 * 3 *
4 * Copyright (C) 2004-2005 Pierre Ossman, All Rights Reserved. 4 * Copyright (C) 2004-2006 Pierre Ossman, All Rights Reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -272,16 +272,9 @@ static inline int wbsd_next_sg(struct wbsd_host *host)
272 return host->num_sg; 272 return host->num_sg;
273} 273}
274 274
275static inline char *wbsd_kmap_sg(struct wbsd_host *host) 275static inline char *wbsd_sg_to_buffer(struct wbsd_host *host)
276{ 276{
277 host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ) + 277 return page_address(host->cur_sg->page) + host->cur_sg->offset;
278 host->cur_sg->offset;
279 return host->mapped_sg;
280}
281
282static inline void wbsd_kunmap_sg(struct wbsd_host *host)
283{
284 kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ);
285} 278}
286 279
287static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data) 280static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
@@ -302,12 +295,11 @@ static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
302 * we do not transfer too much. 295 * we do not transfer too much.
303 */ 296 */
304 for (i = 0; i < len; i++) { 297 for (i = 0; i < len; i++) {
305 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset; 298 sgbuf = page_address(sg[i].page) + sg[i].offset;
306 if (size < sg[i].length) 299 if (size < sg[i].length)
307 memcpy(dmabuf, sgbuf, size); 300 memcpy(dmabuf, sgbuf, size);
308 else 301 else
309 memcpy(dmabuf, sgbuf, sg[i].length); 302 memcpy(dmabuf, sgbuf, sg[i].length);
310 kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
311 dmabuf += sg[i].length; 303 dmabuf += sg[i].length;
312 304
313 if (size < sg[i].length) 305 if (size < sg[i].length)
@@ -347,7 +339,7 @@ static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data)
347 * we do not transfer too much. 339 * we do not transfer too much.
348 */ 340 */
349 for (i = 0; i < len; i++) { 341 for (i = 0; i < len; i++) {
350 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset; 342 sgbuf = page_address(sg[i].page) + sg[i].offset;
351 if (size < sg[i].length) 343 if (size < sg[i].length)
352 memcpy(sgbuf, dmabuf, size); 344 memcpy(sgbuf, dmabuf, size);
353 else 345 else
@@ -497,7 +489,7 @@ static void wbsd_empty_fifo(struct wbsd_host *host)
497 if (data->bytes_xfered == host->size) 489 if (data->bytes_xfered == host->size)
498 return; 490 return;
499 491
500 buffer = wbsd_kmap_sg(host) + host->offset; 492 buffer = wbsd_sg_to_buffer(host) + host->offset;
501 493
502 /* 494 /*
503 * Drain the fifo. This has a tendency to loop longer 495 * Drain the fifo. This has a tendency to loop longer
@@ -526,17 +518,13 @@ static void wbsd_empty_fifo(struct wbsd_host *host)
526 /* 518 /*
527 * Transfer done? 519 * Transfer done?
528 */ 520 */
529 if (data->bytes_xfered == host->size) { 521 if (data->bytes_xfered == host->size)
530 wbsd_kunmap_sg(host);
531 return; 522 return;
532 }
533 523
534 /* 524 /*
535 * End of scatter list entry? 525 * End of scatter list entry?
536 */ 526 */
537 if (host->remain == 0) { 527 if (host->remain == 0) {
538 wbsd_kunmap_sg(host);
539
540 /* 528 /*
541 * Get next entry. Check if last. 529 * Get next entry. Check if last.
542 */ 530 */
@@ -554,13 +542,11 @@ static void wbsd_empty_fifo(struct wbsd_host *host)
554 return; 542 return;
555 } 543 }
556 544
557 buffer = wbsd_kmap_sg(host); 545 buffer = wbsd_sg_to_buffer(host);
558 } 546 }
559 } 547 }
560 } 548 }
561 549
562 wbsd_kunmap_sg(host);
563
564 /* 550 /*
565 * This is a very dirty hack to solve a 551 * This is a very dirty hack to solve a
566 * hardware problem. The chip doesn't trigger 552 * hardware problem. The chip doesn't trigger
@@ -583,7 +569,7 @@ static void wbsd_fill_fifo(struct wbsd_host *host)
583 if (data->bytes_xfered == host->size) 569 if (data->bytes_xfered == host->size)
584 return; 570 return;
585 571
586 buffer = wbsd_kmap_sg(host) + host->offset; 572 buffer = wbsd_sg_to_buffer(host) + host->offset;
587 573
588 /* 574 /*
589 * Fill the fifo. This has a tendency to loop longer 575 * Fill the fifo. This has a tendency to loop longer
@@ -612,17 +598,13 @@ static void wbsd_fill_fifo(struct wbsd_host *host)
612 /* 598 /*
613 * Transfer done? 599 * Transfer done?
614 */ 600 */
615 if (data->bytes_xfered == host->size) { 601 if (data->bytes_xfered == host->size)
616 wbsd_kunmap_sg(host);
617 return; 602 return;
618 }
619 603
620 /* 604 /*
621 * End of scatter list entry? 605 * End of scatter list entry?
622 */ 606 */
623 if (host->remain == 0) { 607 if (host->remain == 0) {
624 wbsd_kunmap_sg(host);
625
626 /* 608 /*
627 * Get next entry. Check if last. 609 * Get next entry. Check if last.
628 */ 610 */
@@ -640,13 +622,11 @@ static void wbsd_fill_fifo(struct wbsd_host *host)
640 return; 622 return;
641 } 623 }
642 624
643 buffer = wbsd_kmap_sg(host); 625 buffer = wbsd_sg_to_buffer(host);
644 } 626 }
645 } 627 }
646 } 628 }
647 629
648 wbsd_kunmap_sg(host);
649
650 /* 630 /*
651 * The controller stops sending interrupts for 631 * The controller stops sending interrupts for
652 * 'FIFO empty' under certain conditions. So we 632 * 'FIFO empty' under certain conditions. So we
@@ -910,6 +890,45 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
910 */ 890 */
911 if (cmd->data && (cmd->error == MMC_ERR_NONE)) { 891 if (cmd->data && (cmd->error == MMC_ERR_NONE)) {
912 /* 892 /*
893 * The hardware is so delightfully stupid that it has a list
894 * of "data" commands. If a command isn't on this list, it'll
895 * just go back to the idle state and won't send any data
896 * interrupts.
897 */
898 switch (cmd->opcode) {
899 case 11:
900 case 17:
901 case 18:
902 case 20:
903 case 24:
904 case 25:
905 case 26:
906 case 27:
907 case 30:
908 case 42:
909 case 56:
910 break;
911
912 /* ACMDs. We don't keep track of state, so we just treat them
913 * like any other command. */
914 case 51:
915 break;
916
917 default:
918#ifdef CONFIG_MMC_DEBUG
919 printk(KERN_WARNING "%s: Data command %d is not "
920 "supported by this controller.\n",
921 mmc_hostname(host->mmc), cmd->opcode);
922#endif
923 cmd->data->error = MMC_ERR_INVALID;
924
925 if (cmd->data->stop)
926 wbsd_send_command(host, cmd->data->stop);
927
928 goto done;
929 };
930
931 /*
913 * Dirty fix for hardware bug. 932 * Dirty fix for hardware bug.
914 */ 933 */
915 if (host->dma == -1) 934 if (host->dma == -1)
@@ -1343,16 +1362,27 @@ static int __devinit wbsd_alloc_mmc(struct device *dev)
1343 mmc->max_phys_segs = 128; 1362 mmc->max_phys_segs = 128;
1344 1363
1345 /* 1364 /*
1346 * Maximum number of sectors in one transfer. Also limited by 64kB 1365 * Maximum request size. Also limited by 64KiB buffer.
1347 * buffer.
1348 */ 1366 */
1349 mmc->max_sectors = 128; 1367 mmc->max_req_size = 65536;
1350 1368
1351 /* 1369 /*
1352 * Maximum segment size. Could be one segment with the maximum number 1370 * Maximum segment size. Could be one segment with the maximum number
1353 * of segments. 1371 * of bytes.
1372 */
1373 mmc->max_seg_size = mmc->max_req_size;
1374
1375 /*
1376 * Maximum block size. We have 12 bits (= 4095) but have to subtract
1377 * space for CRC. So the maximum is 4095 - 4*2 = 4087.
1378 */
1379 mmc->max_blk_size = 4087;
1380
1381 /*
1382 * Maximum block count. There is no real limit so the maximum
1383 * request size will be the only restriction.
1354 */ 1384 */
1355 mmc->max_seg_size = mmc->max_sectors * 512; 1385 mmc->max_blk_count = mmc->max_req_size;
1356 1386
1357 dev_set_drvdata(dev, mmc); 1387 dev_set_drvdata(dev, mmc);
1358 1388
diff --git a/drivers/mmc/wbsd.h b/drivers/mmc/wbsd.h
index 6072993f01e3..d06718b0e2ab 100644
--- a/drivers/mmc/wbsd.h
+++ b/drivers/mmc/wbsd.h
@@ -154,7 +154,6 @@ struct wbsd_host
154 154
155 struct scatterlist* cur_sg; /* Current SG entry */ 155 struct scatterlist* cur_sg; /* Current SG entry */
156 unsigned int num_sg; /* Number of entries left */ 156 unsigned int num_sg; /* Number of entries left */
157 void* mapped_sg; /* vaddr of mapped sg */
158 157
159 unsigned int offset; /* Offset into current entry */ 158 unsigned int offset; /* Offset into current entry */
160 unsigned int remain; /* Data left in curren entry */ 159 unsigned int remain; /* Data left in curren entry */
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index bd1faebf61a0..fca978fb158e 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -773,13 +773,13 @@ static int get_gsi_base(acpi_handle handle, u32 *gsi_base)
773 goto out; 773 goto out;
774 774
775 table = obj->buffer.pointer; 775 table = obj->buffer.pointer;
776 switch (((acpi_table_entry_header *)table)->type) { 776 switch (((struct acpi_subtable_header *)table)->type) {
777 case ACPI_MADT_IOSAPIC: 777 case ACPI_MADT_TYPE_IO_SAPIC:
778 *gsi_base = ((struct acpi_table_iosapic *)table)->global_irq_base; 778 *gsi_base = ((struct acpi_madt_io_sapic *)table)->global_irq_base;
779 result = 0; 779 result = 0;
780 break; 780 break;
781 case ACPI_MADT_IOAPIC: 781 case ACPI_MADT_TYPE_IO_APIC:
782 *gsi_base = ((struct acpi_table_ioapic *)table)->global_irq_base; 782 *gsi_base = ((struct acpi_madt_io_apic *)table)->global_irq_base;
783 result = 0; 783 result = 0;
784 break; 784 break;
785 default: 785 default:
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 5d188c558386..78cf0711d1fa 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -28,6 +28,8 @@
28#include <asm/sn/sn_feature_sets.h> 28#include <asm/sn/sn_feature_sets.h>
29#include <asm/sn/sn_sal.h> 29#include <asm/sn/sn_sal.h>
30#include <asm/sn/types.h> 30#include <asm/sn/types.h>
31#include <linux/acpi.h>
32#include <asm/sn/acpi.h>
31 33
32#include "../pci.h" 34#include "../pci.h"
33 35
@@ -35,14 +37,17 @@ MODULE_LICENSE("GPL");
35MODULE_AUTHOR("SGI (prarit@sgi.com, dickie@sgi.com, habeck@sgi.com)"); 37MODULE_AUTHOR("SGI (prarit@sgi.com, dickie@sgi.com, habeck@sgi.com)");
36MODULE_DESCRIPTION("SGI Altix Hot Plug PCI Controller Driver"); 38MODULE_DESCRIPTION("SGI Altix Hot Plug PCI Controller Driver");
37 39
38#define PCIIO_ASIC_TYPE_TIOCA 4 40
41/* SAL call error codes. Keep in sync with prom header io/include/pcibr.h */
39#define PCI_SLOT_ALREADY_UP 2 /* slot already up */ 42#define PCI_SLOT_ALREADY_UP 2 /* slot already up */
40#define PCI_SLOT_ALREADY_DOWN 3 /* slot already down */ 43#define PCI_SLOT_ALREADY_DOWN 3 /* slot already down */
41#define PCI_L1_ERR 7 /* L1 console command error */ 44#define PCI_L1_ERR 7 /* L1 console command error */
42#define PCI_EMPTY_33MHZ 15 /* empty 33 MHz bus */ 45#define PCI_EMPTY_33MHZ 15 /* empty 33 MHz bus */
46
47
48#define PCIIO_ASIC_TYPE_TIOCA 4
43#define PCI_L1_QSIZE 128 /* our L1 message buffer size */ 49#define PCI_L1_QSIZE 128 /* our L1 message buffer size */
44#define SN_MAX_HP_SLOTS 32 /* max hotplug slots */ 50#define SN_MAX_HP_SLOTS 32 /* max hotplug slots */
45#define SGI_HOTPLUG_PROM_REV 0x0430 /* Min. required PROM version */
46#define SN_SLOT_NAME_SIZE 33 /* size of name string */ 51#define SN_SLOT_NAME_SIZE 33 /* size of name string */
47 52
48/* internal list head */ 53/* internal list head */
@@ -227,7 +232,7 @@ static void sn_bus_free_data(struct pci_dev *dev)
227} 232}
228 233
229static int sn_slot_enable(struct hotplug_slot *bss_hotplug_slot, 234static int sn_slot_enable(struct hotplug_slot *bss_hotplug_slot,
230 int device_num) 235 int device_num, char **ssdt)
231{ 236{
232 struct slot *slot = bss_hotplug_slot->private; 237 struct slot *slot = bss_hotplug_slot->private;
233 struct pcibus_info *pcibus_info; 238 struct pcibus_info *pcibus_info;
@@ -240,7 +245,8 @@ static int sn_slot_enable(struct hotplug_slot *bss_hotplug_slot,
240 * Power-on and initialize the slot in the SN 245 * Power-on and initialize the slot in the SN
241 * PCI infrastructure. 246 * PCI infrastructure.
242 */ 247 */
243 rc = sal_pcibr_slot_enable(pcibus_info, device_num, &resp); 248 rc = sal_pcibr_slot_enable(pcibus_info, device_num, &resp, ssdt);
249
244 250
245 if (rc == PCI_SLOT_ALREADY_UP) { 251 if (rc == PCI_SLOT_ALREADY_UP) {
246 dev_dbg(slot->pci_bus->self, "is already active\n"); 252 dev_dbg(slot->pci_bus->self, "is already active\n");
@@ -335,6 +341,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
335 int func, num_funcs; 341 int func, num_funcs;
336 int new_ppb = 0; 342 int new_ppb = 0;
337 int rc; 343 int rc;
344 char *ssdt = NULL;
338 void pcibios_fixup_device_resources(struct pci_dev *); 345 void pcibios_fixup_device_resources(struct pci_dev *);
339 346
340 /* Serialize the Linux PCI infrastructure */ 347 /* Serialize the Linux PCI infrastructure */
@@ -342,14 +349,29 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
342 349
343 /* 350 /*
344 * Power-on and initialize the slot in the SN 351 * Power-on and initialize the slot in the SN
345 * PCI infrastructure. 352 * PCI infrastructure. Also, retrieve the ACPI SSDT
353 * table for the slot (if ACPI capable PROM).
346 */ 354 */
347 rc = sn_slot_enable(bss_hotplug_slot, slot->device_num); 355 rc = sn_slot_enable(bss_hotplug_slot, slot->device_num, &ssdt);
348 if (rc) { 356 if (rc) {
349 mutex_unlock(&sn_hotplug_mutex); 357 mutex_unlock(&sn_hotplug_mutex);
350 return rc; 358 return rc;
351 } 359 }
352 360
361 if (ssdt)
362 ssdt = __va(ssdt);
363 /* Add the new SSDT for the slot to the ACPI namespace */
364 if (SN_ACPI_BASE_SUPPORT() && ssdt) {
365 acpi_status ret;
366
367 ret = acpi_load_table((struct acpi_table_header *)ssdt);
368 if (ACPI_FAILURE(ret)) {
369 printk(KERN_ERR "%s: acpi_load_table failed (0x%x)\n",
370 __FUNCTION__, ret);
371 /* try to continue on */
372 }
373 }
374
353 num_funcs = pci_scan_slot(slot->pci_bus, 375 num_funcs = pci_scan_slot(slot->pci_bus,
354 PCI_DEVFN(slot->device_num + 1, 0)); 376 PCI_DEVFN(slot->device_num + 1, 0));
355 if (!num_funcs) { 377 if (!num_funcs) {
@@ -374,7 +396,10 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
374 * pdi_host_pcidev_info). 396 * pdi_host_pcidev_info).
375 */ 397 */
376 pcibios_fixup_device_resources(dev); 398 pcibios_fixup_device_resources(dev);
377 sn_pci_fixup_slot(dev); 399 if (SN_ACPI_BASE_SUPPORT())
400 sn_acpi_slot_fixup(dev);
401 else
402 sn_io_slot_fixup(dev);
378 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { 403 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
379 unsigned char sec_bus; 404 unsigned char sec_bus;
380 pci_read_config_byte(dev, PCI_SECONDARY_BUS, 405 pci_read_config_byte(dev, PCI_SECONDARY_BUS,
@@ -388,6 +413,63 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
388 } 413 }
389 } 414 }
390 415
416 /*
417 * Add the slot's devices to the ACPI infrastructure */
418 if (SN_ACPI_BASE_SUPPORT() && ssdt) {
419 unsigned long adr;
420 struct acpi_device *pdevice;
421 struct acpi_device *device;
422 acpi_handle phandle;
423 acpi_handle chandle = NULL;
424 acpi_handle rethandle;
425 acpi_status ret;
426
427 phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle;
428
429 if (acpi_bus_get_device(phandle, &pdevice)) {
430 dev_dbg(slot->pci_bus->self,
431 "no parent device, assuming NULL\n");
432 pdevice = NULL;
433 }
434
435 /*
436 * Walk the rootbus node's immediate children looking for
437 * the slot's device node(s). There can be more than
438 * one for multifunction devices.
439 */
440 for (;;) {
441 rethandle = NULL;
442 ret = acpi_get_next_object(ACPI_TYPE_DEVICE,
443 phandle, chandle,
444 &rethandle);
445
446 if (ret == AE_NOT_FOUND || rethandle == NULL)
447 break;
448
449 chandle = rethandle;
450
451 ret = acpi_evaluate_integer(chandle, METHOD_NAME__ADR,
452 NULL, &adr);
453
454 if (ACPI_SUCCESS(ret) &&
455 (adr>>16) == (slot->device_num + 1)) {
456
457 ret = acpi_bus_add(&device, pdevice, chandle,
458 ACPI_BUS_TYPE_DEVICE);
459 if (ACPI_FAILURE(ret)) {
460 printk(KERN_ERR "%s: acpi_bus_add "
461 "failed (0x%x) for slot %d "
462 "func %d\n", __FUNCTION__,
463 ret, (int)(adr>>16),
464 (int)(adr&0xffff));
465 /* try to continue on */
466 } else {
467 acpi_bus_start(device);
468 }
469 }
470 }
471 }
472
391 /* Call the driver for the new device */ 473 /* Call the driver for the new device */
392 pci_bus_add_devices(slot->pci_bus); 474 pci_bus_add_devices(slot->pci_bus);
393 /* Call the drivers for the new devices subordinate to PPB */ 475 /* Call the drivers for the new devices subordinate to PPB */
@@ -412,6 +494,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
412 struct pci_dev *dev; 494 struct pci_dev *dev;
413 int func; 495 int func;
414 int rc; 496 int rc;
497 acpi_owner_id ssdt_id = 0;
415 498
416 /* Acquire update access to the bus */ 499 /* Acquire update access to the bus */
417 mutex_lock(&sn_hotplug_mutex); 500 mutex_lock(&sn_hotplug_mutex);
@@ -422,6 +505,52 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
422 if (rc) 505 if (rc)
423 goto leaving; 506 goto leaving;
424 507
508 /* free the ACPI resources for the slot */
509 if (SN_ACPI_BASE_SUPPORT() &&
510 PCI_CONTROLLER(slot->pci_bus)->acpi_handle) {
511 unsigned long adr;
512 struct acpi_device *device;
513 acpi_handle phandle;
514 acpi_handle chandle = NULL;
515 acpi_handle rethandle;
516 acpi_status ret;
517
518 /* Get the rootbus node pointer */
519 phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle;
520
521 /*
522 * Walk the rootbus node's immediate children looking for
523 * the slot's device node(s). There can be more than
524 * one for multifunction devices.
525 */
526 for (;;) {
527 rethandle = NULL;
528 ret = acpi_get_next_object(ACPI_TYPE_DEVICE,
529 phandle, chandle,
530 &rethandle);
531
532 if (ret == AE_NOT_FOUND || rethandle == NULL)
533 break;
534
535 chandle = rethandle;
536
537 ret = acpi_evaluate_integer(chandle,
538 METHOD_NAME__ADR,
539 NULL, &adr);
540 if (ACPI_SUCCESS(ret) &&
541 (adr>>16) == (slot->device_num + 1)) {
542 /* retain the owner id */
543 acpi_get_id(chandle, &ssdt_id);
544
545 ret = acpi_bus_get_device(chandle,
546 &device);
547 if (ACPI_SUCCESS(ret))
548 acpi_bus_trim(device, 1);
549 }
550 }
551
552 }
553
425 /* Free the SN resources assigned to the Linux device.*/ 554 /* Free the SN resources assigned to the Linux device.*/
426 for (func = 0; func < 8; func++) { 555 for (func = 0; func < 8; func++) {
427 dev = pci_get_slot(slot->pci_bus, 556 dev = pci_get_slot(slot->pci_bus,
@@ -434,6 +563,18 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
434 } 563 }
435 } 564 }
436 565
566 /* Remove the SSDT for the slot from the ACPI namespace */
567 if (SN_ACPI_BASE_SUPPORT() && ssdt_id) {
568 acpi_status ret;
569 ret = acpi_unload_table_id(ssdt_id);
570 if (ACPI_FAILURE(ret)) {
571 printk(KERN_ERR "%s: acpi_unload_table_id "
572 "failed (0x%x) for id %d\n",
573 __FUNCTION__, ret, ssdt_id);
574 /* try to continue on */
575 }
576 }
577
437 /* free the collected sysdata pointers */ 578 /* free the collected sysdata pointers */
438 sn_bus_free_sysdata(); 579 sn_bus_free_sysdata();
439 580
diff --git a/drivers/pnp/pnpacpi/Kconfig b/drivers/pnp/pnpacpi/Kconfig
index b1854171b963..ad27e5e0101f 100644
--- a/drivers/pnp/pnpacpi/Kconfig
+++ b/drivers/pnp/pnpacpi/Kconfig
@@ -2,8 +2,8 @@
2# Plug and Play ACPI configuration 2# Plug and Play ACPI configuration
3# 3#
4config PNPACPI 4config PNPACPI
5 bool "Plug and Play ACPI support (EXPERIMENTAL)" 5 bool "Plug and Play ACPI support"
6 depends on PNP && ACPI && EXPERIMENTAL 6 depends on PNP && ACPI
7 default y 7 default y
8 ---help--- 8 ---help---
9 Linux uses the PNPACPI to autodetect built-in 9 Linux uses the PNPACPI to autodetect built-in
diff --git a/drivers/pnp/system.c b/drivers/pnp/system.c
index d42015c382af..2065e74bb63f 100644
--- a/drivers/pnp/system.c
+++ b/drivers/pnp/system.c
@@ -3,7 +3,8 @@
3 * 3 *
4 * Some code is based on pnpbios_core.c 4 * Some code is based on pnpbios_core.c
5 * Copyright 2002 Adam Belay <ambx1@neo.rr.com> 5 * Copyright 2002 Adam Belay <ambx1@neo.rr.com>
6 * 6 * (c) Copyright 2007 Hewlett-Packard Development Company, L.P.
7 * Bjorn Helgaas <bjorn.helgaas@hp.com>
7 */ 8 */
8 9
9#include <linux/pnp.h> 10#include <linux/pnp.h>
@@ -21,18 +22,21 @@ static const struct pnp_device_id pnp_dev_table[] = {
21 { "", 0 } 22 { "", 0 }
22}; 23};
23 24
24static void reserve_ioport_range(char *pnpid, int start, int end) 25static void reserve_range(char *pnpid, int start, int end, int port)
25{ 26{
26 struct resource *res; 27 struct resource *res;
27 char *regionid; 28 char *regionid;
28 29
29 regionid = kmalloc(16, GFP_KERNEL); 30 regionid = kmalloc(16, GFP_KERNEL);
30 if ( regionid == NULL ) 31 if (regionid == NULL)
31 return; 32 return;
32 snprintf(regionid, 16, "pnp %s", pnpid); 33 snprintf(regionid, 16, "pnp %s", pnpid);
33 res = request_region(start,end-start+1,regionid); 34 if (port)
34 if ( res == NULL ) 35 res = request_region(start,end-start+1,regionid);
35 kfree( regionid ); 36 else
37 res = request_mem_region(start,end-start+1,regionid);
38 if (res == NULL)
39 kfree(regionid);
36 else 40 else
37 res->flags &= ~IORESOURCE_BUSY; 41 res->flags &= ~IORESOURCE_BUSY;
38 /* 42 /*
@@ -41,26 +45,20 @@ static void reserve_ioport_range(char *pnpid, int start, int end)
41 * have double reservations. 45 * have double reservations.
42 */ 46 */
43 printk(KERN_INFO 47 printk(KERN_INFO
44 "pnp: %s: ioport range 0x%x-0x%x %s reserved\n", 48 "pnp: %s: %s range 0x%x-0x%x %s reserved\n",
45 pnpid, start, end, 49 pnpid, port ? "ioport" : "iomem", start, end,
46 NULL != res ? "has been" : "could not be" 50 NULL != res ? "has been" : "could not be");
47 );
48
49 return;
50} 51}
51 52
52static void reserve_resources_of_dev( struct pnp_dev *dev ) 53static void reserve_resources_of_dev(struct pnp_dev *dev)
53{ 54{
54 int i; 55 int i;
55 56
56 for (i=0;i<PNP_MAX_PORT;i++) { 57 for (i = 0; i < PNP_MAX_PORT; i++) {
57 if (!pnp_port_valid(dev, i)) 58 if (!pnp_port_valid(dev, i))
58 /* end of resources */
59 continue; 59 continue;
60 if (pnp_port_start(dev, i) == 0) 60 if (pnp_port_start(dev, i) == 0)
61 /* disabled */ 61 continue; /* disabled */
62 /* Do nothing */
63 continue;
64 if (pnp_port_start(dev, i) < 0x100) 62 if (pnp_port_start(dev, i) < 0x100)
65 /* 63 /*
66 * Below 0x100 is only standard PC hardware 64 * Below 0x100 is only standard PC hardware
@@ -72,14 +70,18 @@ static void reserve_resources_of_dev( struct pnp_dev *dev )
72 */ 70 */
73 continue; 71 continue;
74 if (pnp_port_end(dev, i) < pnp_port_start(dev, i)) 72 if (pnp_port_end(dev, i) < pnp_port_start(dev, i))
75 /* invalid endpoint */ 73 continue; /* invalid */
76 /* Do nothing */ 74
75 reserve_range(dev->dev.bus_id, pnp_port_start(dev, i),
76 pnp_port_end(dev, i), 1);
77 }
78
79 for (i = 0; i < PNP_MAX_MEM; i++) {
80 if (!pnp_mem_valid(dev, i))
77 continue; 81 continue;
78 reserve_ioport_range( 82
79 dev->dev.bus_id, 83 reserve_range(dev->dev.bus_id, pnp_mem_start(dev, i),
80 pnp_port_start(dev, i), 84 pnp_mem_end(dev, i), 0);
81 pnp_port_end(dev, i)
82 );
83 } 85 }
84 86
85 return; 87 return;
diff --git a/drivers/s390/Kconfig b/drivers/s390/Kconfig
index ae89b9b88743..165af398fdea 100644
--- a/drivers/s390/Kconfig
+++ b/drivers/s390/Kconfig
@@ -103,14 +103,8 @@ config CCW_CONSOLE
103 depends on TN3215_CONSOLE || TN3270_CONSOLE 103 depends on TN3215_CONSOLE || TN3270_CONSOLE
104 default y 104 default y
105 105
106config SCLP
107 bool "Support for SCLP"
108 help
109 Include support for the SCLP interface to the service element.
110
111config SCLP_TTY 106config SCLP_TTY
112 bool "Support for SCLP line mode terminal" 107 bool "Support for SCLP line mode terminal"
113 depends on SCLP
114 help 108 help
115 Include support for IBM SCLP line-mode terminals. 109 Include support for IBM SCLP line-mode terminals.
116 110
@@ -123,7 +117,6 @@ config SCLP_CONSOLE
123 117
124config SCLP_VT220_TTY 118config SCLP_VT220_TTY
125 bool "Support for SCLP VT220-compatible terminal" 119 bool "Support for SCLP VT220-compatible terminal"
126 depends on SCLP
127 help 120 help
128 Include support for an IBM SCLP VT220-compatible terminal. 121 Include support for an IBM SCLP VT220-compatible terminal.
129 122
@@ -136,7 +129,6 @@ config SCLP_VT220_CONSOLE
136 129
137config SCLP_CPI 130config SCLP_CPI
138 tristate "Control-Program Identification" 131 tristate "Control-Program Identification"
139 depends on SCLP
140 help 132 help
141 This option enables the hardware console interface for system 133 This option enables the hardware console interface for system
142 identification. This is commonly used for workload management and 134 identification. This is commonly used for workload management and
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index 9803c9352d78..5a888704a8d0 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -2,6 +2,8 @@
2# Makefile for the S/390 specific device drivers 2# Makefile for the S/390 specific device drivers
3# 3#
4 4
5CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
6
5obj-y += s390mach.o sysinfo.o s390_rdev.o 7obj-y += s390mach.o sysinfo.o s390_rdev.o
6obj-y += cio/ block/ char/ crypto/ net/ scsi/ 8obj-y += cio/ block/ char/ crypto/ net/ scsi/
7 9
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 492b68bcd7cc..eb5dc62f0d9c 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -37,6 +37,7 @@
37 */ 37 */
38debug_info_t *dasd_debug_area; 38debug_info_t *dasd_debug_area;
39struct dasd_discipline *dasd_diag_discipline_pointer; 39struct dasd_discipline *dasd_diag_discipline_pointer;
40void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
40 41
41MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 42MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
42MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 43MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
@@ -51,7 +52,6 @@ static int dasd_alloc_queue(struct dasd_device * device);
51static void dasd_setup_queue(struct dasd_device * device); 52static void dasd_setup_queue(struct dasd_device * device);
52static void dasd_free_queue(struct dasd_device * device); 53static void dasd_free_queue(struct dasd_device * device);
53static void dasd_flush_request_queue(struct dasd_device *); 54static void dasd_flush_request_queue(struct dasd_device *);
54static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
55static int dasd_flush_ccw_queue(struct dasd_device *, int); 55static int dasd_flush_ccw_queue(struct dasd_device *, int);
56static void dasd_tasklet(struct dasd_device *); 56static void dasd_tasklet(struct dasd_device *);
57static void do_kick_device(struct work_struct *); 57static void do_kick_device(struct work_struct *);
@@ -483,7 +483,7 @@ unsigned int dasd_profile_level = DASD_PROFILE_OFF;
483/* 483/*
484 * Add profiling information for cqr before execution. 484 * Add profiling information for cqr before execution.
485 */ 485 */
486static inline void 486static void
487dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, 487dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
488 struct request *req) 488 struct request *req)
489{ 489{
@@ -505,7 +505,7 @@ dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
505/* 505/*
506 * Add profiling information for cqr after execution. 506 * Add profiling information for cqr after execution.
507 */ 507 */
508static inline void 508static void
509dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, 509dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
510 struct request *req) 510 struct request *req)
511{ 511{
@@ -1022,8 +1022,6 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1022 irb->scsw.cstat == 0 && 1022 irb->scsw.cstat == 0 &&
1023 !irb->esw.esw0.erw.cons) 1023 !irb->esw.esw0.erw.cons)
1024 era = dasd_era_none; 1024 era = dasd_era_none;
1025 else if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags))
1026 era = dasd_era_fatal; /* don't recover this request */
1027 else if (irb->esw.esw0.erw.cons) 1025 else if (irb->esw.esw0.erw.cons)
1028 era = device->discipline->examine_error(cqr, irb); 1026 era = device->discipline->examine_error(cqr, irb);
1029 else 1027 else
@@ -1104,7 +1102,7 @@ __dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr)
1104/* 1102/*
1105 * Process ccw request queue. 1103 * Process ccw request queue.
1106 */ 1104 */
1107static inline void 1105static void
1108__dasd_process_ccw_queue(struct dasd_device * device, 1106__dasd_process_ccw_queue(struct dasd_device * device,
1109 struct list_head *final_queue) 1107 struct list_head *final_queue)
1110{ 1108{
@@ -1127,7 +1125,9 @@ restart:
1127 cqr->status = DASD_CQR_FAILED; 1125 cqr->status = DASD_CQR_FAILED;
1128 cqr->stopclk = get_clock(); 1126 cqr->stopclk = get_clock();
1129 } else { 1127 } else {
1130 if (cqr->irb.esw.esw0.erw.cons) { 1128 if (cqr->irb.esw.esw0.erw.cons &&
1129 test_bit(DASD_CQR_FLAGS_USE_ERP,
1130 &cqr->flags)) {
1131 erp_fn = device->discipline-> 1131 erp_fn = device->discipline->
1132 erp_action(cqr); 1132 erp_action(cqr);
1133 erp_fn(cqr); 1133 erp_fn(cqr);
@@ -1181,7 +1181,7 @@ dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data)
1181/* 1181/*
1182 * Fetch requests from the block device queue. 1182 * Fetch requests from the block device queue.
1183 */ 1183 */
1184static inline void 1184static void
1185__dasd_process_blk_queue(struct dasd_device * device) 1185__dasd_process_blk_queue(struct dasd_device * device)
1186{ 1186{
1187 request_queue_t *queue; 1187 request_queue_t *queue;
@@ -1232,6 +1232,19 @@ __dasd_process_blk_queue(struct dasd_device * device)
1232 if (IS_ERR(cqr)) { 1232 if (IS_ERR(cqr)) {
1233 if (PTR_ERR(cqr) == -ENOMEM) 1233 if (PTR_ERR(cqr) == -ENOMEM)
1234 break; /* terminate request queue loop */ 1234 break; /* terminate request queue loop */
1235 if (PTR_ERR(cqr) == -EAGAIN) {
1236 /*
1237 * The current request cannot be build right
1238 * now, we have to try later. If this request
1239 * is the head-of-queue we stop the device
1240 * for 1/2 second.
1241 */
1242 if (!list_empty(&device->ccw_queue))
1243 break;
1244 device->stopped |= DASD_STOPPED_PENDING;
1245 dasd_set_timer(device, HZ/2);
1246 break;
1247 }
1235 DBF_DEV_EVENT(DBF_ERR, device, 1248 DBF_DEV_EVENT(DBF_ERR, device,
1236 "CCW creation failed (rc=%ld) " 1249 "CCW creation failed (rc=%ld) "
1237 "on request %p", 1250 "on request %p",
@@ -1254,7 +1267,7 @@ __dasd_process_blk_queue(struct dasd_device * device)
1254 * Take a look at the first request on the ccw queue and check 1267 * Take a look at the first request on the ccw queue and check
1255 * if it reached its expire time. If so, terminate the IO. 1268 * if it reached its expire time. If so, terminate the IO.
1256 */ 1269 */
1257static inline void 1270static void
1258__dasd_check_expire(struct dasd_device * device) 1271__dasd_check_expire(struct dasd_device * device)
1259{ 1272{
1260 struct dasd_ccw_req *cqr; 1273 struct dasd_ccw_req *cqr;
@@ -1285,7 +1298,7 @@ __dasd_check_expire(struct dasd_device * device)
1285 * Take a look at the first request on the ccw queue and check 1298 * Take a look at the first request on the ccw queue and check
1286 * if it needs to be started. 1299 * if it needs to be started.
1287 */ 1300 */
1288static inline void 1301static void
1289__dasd_start_head(struct dasd_device * device) 1302__dasd_start_head(struct dasd_device * device)
1290{ 1303{
1291 struct dasd_ccw_req *cqr; 1304 struct dasd_ccw_req *cqr;
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 4d01040c2c63..8b9d68f6e016 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -170,7 +170,6 @@ dasd_3990_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
170 /* log the erp chain if fatal error occurred */ 170 /* log the erp chain if fatal error occurred */
171 if ((era == dasd_era_fatal) && (device->state >= DASD_STATE_READY)) { 171 if ((era == dasd_era_fatal) && (device->state >= DASD_STATE_READY)) {
172 dasd_log_sense(cqr, irb); 172 dasd_log_sense(cqr, irb);
173 dasd_log_ccw(cqr, 0, irb->scsw.cpa);
174 } 173 }
175 174
176 return era; 175 return era;
@@ -2640,7 +2639,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2640 2639
2641 struct dasd_ccw_req *erp = NULL; 2640 struct dasd_ccw_req *erp = NULL;
2642 struct dasd_device *device = cqr->device; 2641 struct dasd_device *device = cqr->device;
2643 __u32 cpa = cqr->irb.scsw.cpa;
2644 struct dasd_ccw_req *temp_erp = NULL; 2642 struct dasd_ccw_req *temp_erp = NULL;
2645 2643
2646 if (device->features & DASD_FEATURE_ERPLOG) { 2644 if (device->features & DASD_FEATURE_ERPLOG) {
@@ -2706,9 +2704,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2706 } 2704 }
2707 } 2705 }
2708 2706
2709 if (erp->status == DASD_CQR_FAILED)
2710 dasd_log_ccw(erp, 1, cpa);
2711
2712 /* enqueue added ERP request */ 2707 /* enqueue added ERP request */
2713 if (erp->status == DASD_CQR_FILLED) { 2708 if (erp->status == DASD_CQR_FILLED) {
2714 erp->status = DASD_CQR_QUEUED; 2709 erp->status = DASD_CQR_QUEUED;
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 5943266152f5..ed70852cc915 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -136,7 +136,7 @@ __setup ("dasd=", dasd_call_setup);
136/* 136/*
137 * Read a device busid/devno from a string. 137 * Read a device busid/devno from a string.
138 */ 138 */
139static inline int 139static int
140dasd_busid(char **str, int *id0, int *id1, int *devno) 140dasd_busid(char **str, int *id0, int *id1, int *devno)
141{ 141{
142 int val, old_style; 142 int val, old_style;
@@ -182,7 +182,7 @@ dasd_busid(char **str, int *id0, int *id1, int *devno)
182 * only one: "ro" for read-only devices. The default feature set 182 * only one: "ro" for read-only devices. The default feature set
183 * is empty (value 0). 183 * is empty (value 0).
184 */ 184 */
185static inline int 185static int
186dasd_feature_list(char *str, char **endp) 186dasd_feature_list(char *str, char **endp)
187{ 187{
188 int features, len, rc; 188 int features, len, rc;
@@ -341,7 +341,7 @@ dasd_parse_range( char *parsestring ) {
341 return ERR_PTR(-EINVAL); 341 return ERR_PTR(-EINVAL);
342} 342}
343 343
344static inline char * 344static char *
345dasd_parse_next_element( char *parsestring ) { 345dasd_parse_next_element( char *parsestring ) {
346 char * residual_str; 346 char * residual_str;
347 residual_str = dasd_parse_keyword(parsestring); 347 residual_str = dasd_parse_keyword(parsestring);
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 53db58a68617..ab782bb46ac1 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -43,7 +43,7 @@ MODULE_LICENSE("GPL");
43#define DIAG_MAX_RETRIES 32 43#define DIAG_MAX_RETRIES 32
44#define DIAG_TIMEOUT 50 * HZ 44#define DIAG_TIMEOUT 50 * HZ
45 45
46struct dasd_discipline dasd_diag_discipline; 46static struct dasd_discipline dasd_diag_discipline;
47 47
48struct dasd_diag_private { 48struct dasd_diag_private {
49 struct dasd_diag_characteristics rdc_data; 49 struct dasd_diag_characteristics rdc_data;
@@ -90,7 +90,7 @@ static inline int dia250(void *iob, int cmd)
90 * block offset. On success, return zero and set end_block to contain the 90 * block offset. On success, return zero and set end_block to contain the
91 * number of blocks on the device minus the specified offset. Return non-zero 91 * number of blocks on the device minus the specified offset. Return non-zero
92 * otherwise. */ 92 * otherwise. */
93static __inline__ int 93static inline int
94mdsk_init_io(struct dasd_device *device, unsigned int blocksize, 94mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
95 blocknum_t offset, blocknum_t *end_block) 95 blocknum_t offset, blocknum_t *end_block)
96{ 96{
@@ -117,7 +117,7 @@ mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
117 117
118/* Remove block I/O environment for device. Return zero on success, non-zero 118/* Remove block I/O environment for device. Return zero on success, non-zero
119 * otherwise. */ 119 * otherwise. */
120static __inline__ int 120static inline int
121mdsk_term_io(struct dasd_device * device) 121mdsk_term_io(struct dasd_device * device)
122{ 122{
123 struct dasd_diag_private *private; 123 struct dasd_diag_private *private;
@@ -576,7 +576,7 @@ dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
576 "dump sense not available for DIAG data"); 576 "dump sense not available for DIAG data");
577} 577}
578 578
579struct dasd_discipline dasd_diag_discipline = { 579static struct dasd_discipline dasd_diag_discipline = {
580 .owner = THIS_MODULE, 580 .owner = THIS_MODULE,
581 .name = "DIAG", 581 .name = "DIAG",
582 .ebcname = "DIAG", 582 .ebcname = "DIAG",
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index fdaa471e845f..cecab2274a6e 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -134,44 +134,7 @@ ceil_quot(unsigned int d1, unsigned int d2)
134 return (d1 + (d2 - 1)) / d2; 134 return (d1 + (d2 - 1)) / d2;
135} 135}
136 136
137static inline int 137static unsigned int
138bytes_per_record(struct dasd_eckd_characteristics *rdc, int kl, int dl)
139{
140 unsigned int fl1, fl2, int1, int2;
141 int bpr;
142
143 switch (rdc->formula) {
144 case 0x01:
145 fl1 = round_up_multiple(ECKD_F2(rdc) + dl, ECKD_F1(rdc));
146 fl2 = round_up_multiple(kl ? ECKD_F2(rdc) + kl : 0,
147 ECKD_F1(rdc));
148 bpr = fl1 + fl2;
149 break;
150 case 0x02:
151 int1 = ceil_quot(dl + ECKD_F6(rdc), ECKD_F5(rdc) << 1);
152 int2 = ceil_quot(kl + ECKD_F6(rdc), ECKD_F5(rdc) << 1);
153 fl1 = round_up_multiple(ECKD_F1(rdc) * ECKD_F2(rdc) + dl +
154 ECKD_F6(rdc) + ECKD_F4(rdc) * int1,
155 ECKD_F1(rdc));
156 fl2 = round_up_multiple(ECKD_F1(rdc) * ECKD_F3(rdc) + kl +
157 ECKD_F6(rdc) + ECKD_F4(rdc) * int2,
158 ECKD_F1(rdc));
159 bpr = fl1 + fl2;
160 break;
161 default:
162 bpr = 0;
163 break;
164 }
165 return bpr;
166}
167
168static inline unsigned int
169bytes_per_track(struct dasd_eckd_characteristics *rdc)
170{
171 return *(unsigned int *) (rdc->byte_per_track) >> 8;
172}
173
174static inline unsigned int
175recs_per_track(struct dasd_eckd_characteristics * rdc, 138recs_per_track(struct dasd_eckd_characteristics * rdc,
176 unsigned int kl, unsigned int dl) 139 unsigned int kl, unsigned int dl)
177{ 140{
@@ -204,37 +167,39 @@ recs_per_track(struct dasd_eckd_characteristics * rdc,
204 return 0; 167 return 0;
205} 168}
206 169
207static inline void 170static int
208check_XRC (struct ccw1 *de_ccw, 171check_XRC (struct ccw1 *de_ccw,
209 struct DE_eckd_data *data, 172 struct DE_eckd_data *data,
210 struct dasd_device *device) 173 struct dasd_device *device)
211{ 174{
212 struct dasd_eckd_private *private; 175 struct dasd_eckd_private *private;
176 int rc;
213 177
214 private = (struct dasd_eckd_private *) device->private; 178 private = (struct dasd_eckd_private *) device->private;
179 if (!private->rdc_data.facilities.XRC_supported)
180 return 0;
215 181
216 /* switch on System Time Stamp - needed for XRC Support */ 182 /* switch on System Time Stamp - needed for XRC Support */
217 if (private->rdc_data.facilities.XRC_supported) { 183 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
218 184 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
219 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
220 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
221
222 data->ep_sys_time = get_clock ();
223
224 de_ccw->count = sizeof (struct DE_eckd_data);
225 de_ccw->flags |= CCW_FLAG_SLI;
226 }
227 185
228 return; 186 rc = get_sync_clock(&data->ep_sys_time);
187 /* Ignore return code if sync clock is switched off. */
188 if (rc == -ENOSYS || rc == -EACCES)
189 rc = 0;
229 190
230} /* end check_XRC */ 191 de_ccw->count = sizeof (struct DE_eckd_data);
192 de_ccw->flags |= CCW_FLAG_SLI;
193 return rc;
194}
231 195
232static inline void 196static int
233define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, 197define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
234 int totrk, int cmd, struct dasd_device * device) 198 int totrk, int cmd, struct dasd_device * device)
235{ 199{
236 struct dasd_eckd_private *private; 200 struct dasd_eckd_private *private;
237 struct ch_t geo, beg, end; 201 struct ch_t geo, beg, end;
202 int rc = 0;
238 203
239 private = (struct dasd_eckd_private *) device->private; 204 private = (struct dasd_eckd_private *) device->private;
240 205
@@ -263,12 +228,12 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
263 case DASD_ECKD_CCW_WRITE_KD_MT: 228 case DASD_ECKD_CCW_WRITE_KD_MT:
264 data->mask.perm = 0x02; 229 data->mask.perm = 0x02;
265 data->attributes.operation = private->attrib.operation; 230 data->attributes.operation = private->attrib.operation;
266 check_XRC (ccw, data, device); 231 rc = check_XRC (ccw, data, device);
267 break; 232 break;
268 case DASD_ECKD_CCW_WRITE_CKD: 233 case DASD_ECKD_CCW_WRITE_CKD:
269 case DASD_ECKD_CCW_WRITE_CKD_MT: 234 case DASD_ECKD_CCW_WRITE_CKD_MT:
270 data->attributes.operation = DASD_BYPASS_CACHE; 235 data->attributes.operation = DASD_BYPASS_CACHE;
271 check_XRC (ccw, data, device); 236 rc = check_XRC (ccw, data, device);
272 break; 237 break;
273 case DASD_ECKD_CCW_ERASE: 238 case DASD_ECKD_CCW_ERASE:
274 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 239 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
@@ -276,7 +241,7 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
276 data->mask.perm = 0x3; 241 data->mask.perm = 0x3;
277 data->mask.auth = 0x1; 242 data->mask.auth = 0x1;
278 data->attributes.operation = DASD_BYPASS_CACHE; 243 data->attributes.operation = DASD_BYPASS_CACHE;
279 check_XRC (ccw, data, device); 244 rc = check_XRC (ccw, data, device);
280 break; 245 break;
281 default: 246 default:
282 DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd); 247 DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd);
@@ -312,9 +277,10 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
312 data->beg_ext.head = beg.head; 277 data->beg_ext.head = beg.head;
313 data->end_ext.cyl = end.cyl; 278 data->end_ext.cyl = end.cyl;
314 data->end_ext.head = end.head; 279 data->end_ext.head = end.head;
280 return rc;
315} 281}
316 282
317static inline void 283static void
318locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk, 284locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk,
319 int rec_on_trk, int no_rec, int cmd, 285 int rec_on_trk, int no_rec, int cmd,
320 struct dasd_device * device, int reclen) 286 struct dasd_device * device, int reclen)
@@ -548,7 +514,7 @@ dasd_eckd_read_conf(struct dasd_device *device)
548/* 514/*
549 * Build CP for Perform Subsystem Function - SSC. 515 * Build CP for Perform Subsystem Function - SSC.
550 */ 516 */
551struct dasd_ccw_req * 517static struct dasd_ccw_req *
552dasd_eckd_build_psf_ssc(struct dasd_device *device) 518dasd_eckd_build_psf_ssc(struct dasd_device *device)
553{ 519{
554 struct dasd_ccw_req *cqr; 520 struct dasd_ccw_req *cqr;
@@ -1200,7 +1166,12 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1200 return cqr; 1166 return cqr;
1201 ccw = cqr->cpaddr; 1167 ccw = cqr->cpaddr;
1202 /* First ccw is define extent. */ 1168 /* First ccw is define extent. */
1203 define_extent(ccw++, cqr->data, first_trk, last_trk, cmd, device); 1169 if (define_extent(ccw++, cqr->data, first_trk,
1170 last_trk, cmd, device) == -EAGAIN) {
1171 /* Clock not in sync and XRC is enabled. Try again later. */
1172 dasd_sfree_request(cqr, device);
1173 return ERR_PTR(-EAGAIN);
1174 }
1204 /* Build locate_record+read/write/ccws. */ 1175 /* Build locate_record+read/write/ccws. */
1205 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data)); 1176 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data));
1206 LO_data = (struct LO_eckd_data *) (idaws + cidaw); 1177 LO_data = (struct LO_eckd_data *) (idaws + cidaw);
@@ -1380,7 +1351,7 @@ dasd_eckd_release(struct dasd_device *device)
1380 cqr->device = device; 1351 cqr->device = device;
1381 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1352 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1382 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1353 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1383 cqr->retries = 0; 1354 cqr->retries = 2; /* set retry counter to enable basic ERP */
1384 cqr->expires = 2 * HZ; 1355 cqr->expires = 2 * HZ;
1385 cqr->buildclk = get_clock(); 1356 cqr->buildclk = get_clock();
1386 cqr->status = DASD_CQR_FILLED; 1357 cqr->status = DASD_CQR_FILLED;
@@ -1420,7 +1391,7 @@ dasd_eckd_reserve(struct dasd_device *device)
1420 cqr->device = device; 1391 cqr->device = device;
1421 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1392 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1422 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1393 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1423 cqr->retries = 0; 1394 cqr->retries = 2; /* set retry counter to enable basic ERP */
1424 cqr->expires = 2 * HZ; 1395 cqr->expires = 2 * HZ;
1425 cqr->buildclk = get_clock(); 1396 cqr->buildclk = get_clock();
1426 cqr->status = DASD_CQR_FILLED; 1397 cqr->status = DASD_CQR_FILLED;
@@ -1459,7 +1430,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
1459 cqr->device = device; 1430 cqr->device = device;
1460 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1431 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1461 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1432 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1462 cqr->retries = 0; 1433 cqr->retries = 2; /* set retry counter to enable basic ERP */
1463 cqr->expires = 2 * HZ; 1434 cqr->expires = 2 * HZ;
1464 cqr->buildclk = get_clock(); 1435 cqr->buildclk = get_clock();
1465 cqr->status = DASD_CQR_FILLED; 1436 cqr->status = DASD_CQR_FILLED;
@@ -1609,7 +1580,7 @@ dasd_eckd_ioctl(struct dasd_device *device, unsigned int cmd, void __user *argp)
1609 * Dump the range of CCWs into 'page' buffer 1580 * Dump the range of CCWs into 'page' buffer
1610 * and return number of printed chars. 1581 * and return number of printed chars.
1611 */ 1582 */
1612static inline int 1583static int
1613dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) 1584dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
1614{ 1585{
1615 int len, count; 1586 int len, count;
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index e0bf30ebb215..6cedc914077e 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -658,18 +658,24 @@ static struct file_operations dasd_eer_fops = {
658 .owner = THIS_MODULE, 658 .owner = THIS_MODULE,
659}; 659};
660 660
661static struct miscdevice dasd_eer_dev = { 661static struct miscdevice *dasd_eer_dev = NULL;
662 .minor = MISC_DYNAMIC_MINOR,
663 .name = "dasd_eer",
664 .fops = &dasd_eer_fops,
665};
666 662
667int __init dasd_eer_init(void) 663int __init dasd_eer_init(void)
668{ 664{
669 int rc; 665 int rc;
670 666
671 rc = misc_register(&dasd_eer_dev); 667 dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL);
668 if (!dasd_eer_dev)
669 return -ENOMEM;
670
671 dasd_eer_dev->minor = MISC_DYNAMIC_MINOR;
672 dasd_eer_dev->name = "dasd_eer";
673 dasd_eer_dev->fops = &dasd_eer_fops;
674
675 rc = misc_register(dasd_eer_dev);
672 if (rc) { 676 if (rc) {
677 kfree(dasd_eer_dev);
678 dasd_eer_dev = NULL;
673 MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not " 679 MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not "
674 "register misc device"); 680 "register misc device");
675 return rc; 681 return rc;
@@ -680,5 +686,9 @@ int __init dasd_eer_init(void)
680 686
681void dasd_eer_exit(void) 687void dasd_eer_exit(void)
682{ 688{
683 WARN_ON(misc_deregister(&dasd_eer_dev) != 0); 689 if (dasd_eer_dev) {
690 WARN_ON(misc_deregister(dasd_eer_dev) != 0);
691 kfree(dasd_eer_dev);
692 dasd_eer_dev = NULL;
693 }
684} 694}
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index 58a65097922b..caa5d91420f8 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -152,25 +152,6 @@ dasd_default_erp_postaction(struct dasd_ccw_req * cqr)
152 152
153} /* end default_erp_postaction */ 153} /* end default_erp_postaction */
154 154
155/*
156 * Print the hex dump of the memory used by a request. This includes
157 * all error recovery ccws that have been chained in from of the
158 * real request.
159 */
160static inline void
161hex_dump_memory(struct dasd_device *device, void *data, int len)
162{
163 int *pint;
164
165 pint = (int *) data;
166 while (len > 0) {
167 DEV_MESSAGE(KERN_ERR, device, "%p: %08x %08x %08x %08x",
168 pint, pint[0], pint[1], pint[2], pint[3]);
169 pint += 4;
170 len -= 16;
171 }
172}
173
174void 155void
175dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb) 156dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
176{ 157{
@@ -182,69 +163,8 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
182 device->discipline->dump_sense(device, cqr, irb); 163 device->discipline->dump_sense(device, cqr, irb);
183} 164}
184 165
185void
186dasd_log_ccw(struct dasd_ccw_req * cqr, int caller, __u32 cpa)
187{
188 struct dasd_device *device;
189 struct dasd_ccw_req *lcqr;
190 struct ccw1 *ccw;
191 int cplength;
192
193 device = cqr->device;
194 /* log the channel program */
195 for (lcqr = cqr; lcqr != NULL; lcqr = lcqr->refers) {
196 DEV_MESSAGE(KERN_ERR, device,
197 "(%s) ERP chain report for req: %p",
198 caller == 0 ? "EXAMINE" : "ACTION", lcqr);
199 hex_dump_memory(device, lcqr, sizeof(struct dasd_ccw_req));
200
201 cplength = 1;
202 ccw = lcqr->cpaddr;
203 while (ccw++->flags & (CCW_FLAG_DC | CCW_FLAG_CC))
204 cplength++;
205
206 if (cplength > 40) { /* log only parts of the CP */
207 DEV_MESSAGE(KERN_ERR, device, "%s",
208 "Start of channel program:");
209 hex_dump_memory(device, lcqr->cpaddr,
210 40*sizeof(struct ccw1));
211
212 DEV_MESSAGE(KERN_ERR, device, "%s",
213 "End of channel program:");
214 hex_dump_memory(device, lcqr->cpaddr + cplength - 10,
215 10*sizeof(struct ccw1));
216 } else { /* log the whole CP */
217 DEV_MESSAGE(KERN_ERR, device, "%s",
218 "Channel program (complete):");
219 hex_dump_memory(device, lcqr->cpaddr,
220 cplength*sizeof(struct ccw1));
221 }
222
223 if (lcqr != cqr)
224 continue;
225
226 /*
227 * Log bytes arround failed CCW but only if we did
228 * not log the whole CP of the CCW is outside the
229 * logged CP.
230 */
231 if (cplength > 40 ||
232 ((addr_t) cpa < (addr_t) lcqr->cpaddr &&
233 (addr_t) cpa > (addr_t) (lcqr->cpaddr + cplength + 4))) {
234
235 DEV_MESSAGE(KERN_ERR, device,
236 "Failed CCW (%p) (area):",
237 (void *) (long) cpa);
238 hex_dump_memory(device, cqr->cpaddr - 10,
239 20*sizeof(struct ccw1));
240 }
241 }
242
243} /* end log_erp_chain */
244
245EXPORT_SYMBOL(dasd_default_erp_action); 166EXPORT_SYMBOL(dasd_default_erp_action);
246EXPORT_SYMBOL(dasd_default_erp_postaction); 167EXPORT_SYMBOL(dasd_default_erp_postaction);
247EXPORT_SYMBOL(dasd_alloc_erp_request); 168EXPORT_SYMBOL(dasd_alloc_erp_request);
248EXPORT_SYMBOL(dasd_free_erp_request); 169EXPORT_SYMBOL(dasd_free_erp_request);
249EXPORT_SYMBOL(dasd_log_sense); 170EXPORT_SYMBOL(dasd_log_sense);
250EXPORT_SYMBOL(dasd_log_ccw);
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index b857fd5893fd..be0909e39226 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -75,7 +75,7 @@ static struct ccw_driver dasd_fba_driver = {
75 .notify = dasd_generic_notify, 75 .notify = dasd_generic_notify,
76}; 76};
77 77
78static inline void 78static void
79define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw, 79define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw,
80 int blksize, int beg, int nr) 80 int blksize, int beg, int nr)
81{ 81{
@@ -95,7 +95,7 @@ define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw,
95 data->ext_end = nr - 1; 95 data->ext_end = nr - 1;
96} 96}
97 97
98static inline void 98static void
99locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw, 99locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw,
100 int block_nr, int block_ct) 100 int block_nr, int block_ct)
101{ 101{
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index d163632101d2..47ba4462708d 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -147,7 +147,7 @@ dasd_destroy_partitions(struct dasd_device * device)
147 */ 147 */
148 memset(&bpart, 0, sizeof(struct blkpg_partition)); 148 memset(&bpart, 0, sizeof(struct blkpg_partition));
149 memset(&barg, 0, sizeof(struct blkpg_ioctl_arg)); 149 memset(&barg, 0, sizeof(struct blkpg_ioctl_arg));
150 barg.data = (void __user *) &bpart; 150 barg.data = (void __force __user *) &bpart;
151 barg.op = BLKPG_DEL_PARTITION; 151 barg.op = BLKPG_DEL_PARTITION;
152 for (bpart.pno = device->gdp->minors - 1; bpart.pno > 0; bpart.pno--) 152 for (bpart.pno = device->gdp->minors - 1; bpart.pno > 0; bpart.pno--)
153 ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg); 153 ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg);
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index fb725e3b08fe..a2cc69e11410 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -559,7 +559,6 @@ struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int,
559 struct dasd_device *); 559 struct dasd_device *);
560void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *); 560void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *);
561void dasd_log_sense(struct dasd_ccw_req *, struct irb *); 561void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
562void dasd_log_ccw(struct dasd_ccw_req *, int, __u32);
563 562
564/* externals in dasd_3370_erp.c */ 563/* externals in dasd_3370_erp.c */
565dasd_era_t dasd_3370_erp_examine(struct dasd_ccw_req *, struct irb *); 564dasd_era_t dasd_3370_erp_examine(struct dasd_ccw_req *, struct irb *);
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index bfa010f6dab2..8b7e11815d70 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -28,7 +28,7 @@ static struct proc_dir_entry *dasd_proc_root_entry = NULL;
28static struct proc_dir_entry *dasd_devices_entry = NULL; 28static struct proc_dir_entry *dasd_devices_entry = NULL;
29static struct proc_dir_entry *dasd_statistics_entry = NULL; 29static struct proc_dir_entry *dasd_statistics_entry = NULL;
30 30
31static inline char * 31static char *
32dasd_get_user_string(const char __user *user_buf, size_t user_len) 32dasd_get_user_string(const char __user *user_buf, size_t user_len)
33{ 33{
34 char *buffer; 34 char *buffer;
@@ -154,7 +154,7 @@ static struct file_operations dasd_devices_file_ops = {
154 .release = seq_release, 154 .release = seq_release,
155}; 155};
156 156
157static inline int 157static int
158dasd_calc_metrics(char *page, char **start, off_t off, 158dasd_calc_metrics(char *page, char **start, off_t off,
159 int count, int *eof, int len) 159 int count, int *eof, int len)
160{ 160{
@@ -167,8 +167,8 @@ dasd_calc_metrics(char *page, char **start, off_t off,
167 return len; 167 return len;
168} 168}
169 169
170static inline char * 170static char *
171dasd_statistics_array(char *str, int *array, int shift) 171dasd_statistics_array(char *str, unsigned int *array, int shift)
172{ 172{
173 int i; 173 int i;
174 174
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index be9b05347b4f..1340451ea408 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -102,7 +102,7 @@ dcssblk_release_segment(struct device *dev)
102 * device needs to be enqueued before the semaphore is 102 * device needs to be enqueued before the semaphore is
103 * freed. 103 * freed.
104 */ 104 */
105static inline int 105static int
106dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info) 106dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info)
107{ 107{
108 int minor, found; 108 int minor, found;
@@ -230,7 +230,7 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
230 SEGMENT_SHARED); 230 SEGMENT_SHARED);
231 if (rc < 0) { 231 if (rc < 0) {
232 BUG_ON(rc == -EINVAL); 232 BUG_ON(rc == -EINVAL);
233 if (rc == -EIO || rc == -ENOENT) 233 if (rc != -EAGAIN)
234 goto removeseg; 234 goto removeseg;
235 } else { 235 } else {
236 dev_info->is_shared = 1; 236 dev_info->is_shared = 1;
@@ -253,7 +253,7 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
253 SEGMENT_EXCLUSIVE); 253 SEGMENT_EXCLUSIVE);
254 if (rc < 0) { 254 if (rc < 0) {
255 BUG_ON(rc == -EINVAL); 255 BUG_ON(rc == -EINVAL);
256 if (rc == -EIO || rc == -ENOENT) 256 if (rc != -EAGAIN)
257 goto removeseg; 257 goto removeseg;
258 } else { 258 } else {
259 dev_info->is_shared = 0; 259 dev_info->is_shared = 0;
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index c3e97b4fc186..293e667b50f2 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -2,7 +2,8 @@
2# S/390 character devices 2# S/390 character devices
3# 3#
4 4
5obj-y += ctrlchar.o keyboard.o defkeymap.o 5obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
6 sclp_info.o
6 7
7obj-$(CONFIG_TN3270) += raw3270.o 8obj-$(CONFIG_TN3270) += raw3270.o
8obj-$(CONFIG_TN3270_CONSOLE) += con3270.o 9obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
@@ -11,7 +12,6 @@ obj-$(CONFIG_TN3270_FS) += fs3270.o
11 12
12obj-$(CONFIG_TN3215) += con3215.o 13obj-$(CONFIG_TN3215) += con3215.o
13 14
14obj-$(CONFIG_SCLP) += sclp.o sclp_rw.o sclp_quiesce.o
15obj-$(CONFIG_SCLP_TTY) += sclp_tty.o 15obj-$(CONFIG_SCLP_TTY) += sclp_tty.o
16obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o 16obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
17obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o 17obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 25b5d7a66417..9a328f14a641 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -1121,7 +1121,7 @@ static const struct tty_operations tty3215_ops = {
1121 * 3215 tty registration code called from tty_init(). 1121 * 3215 tty registration code called from tty_init().
1122 * Most kernel services (incl. kmalloc) are available at this poimt. 1122 * Most kernel services (incl. kmalloc) are available at this poimt.
1123 */ 1123 */
1124int __init 1124static int __init
1125tty3215_init(void) 1125tty3215_init(void)
1126{ 1126{
1127 struct tty_driver *driver; 1127 struct tty_driver *driver;
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 7566be890688..8e7f2d7633d6 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -69,8 +69,7 @@ static void con3270_update(struct con3270 *);
69/* 69/*
70 * Setup timeout for a device. On timeout trigger an update. 70 * Setup timeout for a device. On timeout trigger an update.
71 */ 71 */
72void 72static void con3270_set_timer(struct con3270 *cp, int expires)
73con3270_set_timer(struct con3270 *cp, int expires)
74{ 73{
75 if (expires == 0) { 74 if (expires == 0) {
76 if (timer_pending(&cp->timer)) 75 if (timer_pending(&cp->timer))
diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c
index 17027d918cf7..564baca01b7c 100644
--- a/drivers/s390/char/defkeymap.c
+++ b/drivers/s390/char/defkeymap.c
@@ -5,6 +5,8 @@
5#include <linux/types.h> 5#include <linux/types.h>
6#include <linux/keyboard.h> 6#include <linux/keyboard.h>
7#include <linux/kd.h> 7#include <linux/kd.h>
8#include <linux/kbd_kern.h>
9#include <linux/kbd_diacr.h>
8 10
9u_short plain_map[NR_KEYS] = { 11u_short plain_map[NR_KEYS] = {
10 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 12 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 0893d306ae80..e1a746269c4c 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -23,7 +23,7 @@
23#include "raw3270.h" 23#include "raw3270.h"
24#include "ctrlchar.h" 24#include "ctrlchar.h"
25 25
26struct raw3270_fn fs3270_fn; 26static struct raw3270_fn fs3270_fn;
27 27
28struct fs3270 { 28struct fs3270 {
29 struct raw3270_view view; 29 struct raw3270_view view;
@@ -401,7 +401,7 @@ fs3270_release(struct raw3270_view *view)
401} 401}
402 402
403/* View to a 3270 device. Can be console, tty or fullscreen. */ 403/* View to a 3270 device. Can be console, tty or fullscreen. */
404struct raw3270_fn fs3270_fn = { 404static struct raw3270_fn fs3270_fn = {
405 .activate = fs3270_activate, 405 .activate = fs3270_activate,
406 .deactivate = fs3270_deactivate, 406 .deactivate = fs3270_deactivate,
407 .intv = (void *) fs3270_irq, 407 .intv = (void *) fs3270_irq,
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 3e86fd1756e5..f62f9a4e8950 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -148,6 +148,7 @@ kbd_ascebc(struct kbd_data *kbd, unsigned char *ascebc)
148 } 148 }
149} 149}
150 150
151#if 0
151/* 152/*
152 * Generate ebcdic -> ascii translation table from kbd_data. 153 * Generate ebcdic -> ascii translation table from kbd_data.
153 */ 154 */
@@ -173,6 +174,7 @@ kbd_ebcasc(struct kbd_data *kbd, unsigned char *ebcasc)
173 } 174 }
174 } 175 }
175} 176}
177#endif
176 178
177/* 179/*
178 * We have a combining character DIACR here, followed by the character CH. 180 * We have a combining character DIACR here, followed by the character CH.
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index cdb24f528112..9e451acc6491 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -67,8 +67,8 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
67 return -EINVAL; 67 return -EINVAL;
68} 68}
69 69
70static inline struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv, 70static struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv,
71 struct monwrite_hdr *monhdr) 71 struct monwrite_hdr *monhdr)
72{ 72{
73 struct mon_buf *entry, *next; 73 struct mon_buf *entry, *next;
74 74
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 7a84014f2037..8facd14adb7c 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -29,7 +29,7 @@
29#include <linux/device.h> 29#include <linux/device.h>
30#include <linux/mutex.h> 30#include <linux/mutex.h>
31 31
32struct class *class3270; 32static struct class *class3270;
33 33
34/* The main 3270 data structure. */ 34/* The main 3270 data structure. */
35struct raw3270 { 35struct raw3270 {
@@ -86,7 +86,7 @@ DECLARE_WAIT_QUEUE_HEAD(raw3270_wait_queue);
86/* 86/*
87 * Encode array for 12 bit 3270 addresses. 87 * Encode array for 12 bit 3270 addresses.
88 */ 88 */
89unsigned char raw3270_ebcgraf[64] = { 89static unsigned char raw3270_ebcgraf[64] = {
90 0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 90 0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
91 0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 91 0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
92 0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 92 0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 8a056df09d6b..f171de3b0b11 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -59,7 +59,8 @@ static volatile enum sclp_init_state_t {
59/* Internal state: is a request active at the sclp? */ 59/* Internal state: is a request active at the sclp? */
60static volatile enum sclp_running_state_t { 60static volatile enum sclp_running_state_t {
61 sclp_running_state_idle, 61 sclp_running_state_idle,
62 sclp_running_state_running 62 sclp_running_state_running,
63 sclp_running_state_reset_pending
63} sclp_running_state = sclp_running_state_idle; 64} sclp_running_state = sclp_running_state_idle;
64 65
65/* Internal state: is a read request pending? */ 66/* Internal state: is a read request pending? */
@@ -88,15 +89,15 @@ static volatile enum sclp_mask_state_t {
88 89
89/* Timeout intervals in seconds.*/ 90/* Timeout intervals in seconds.*/
90#define SCLP_BUSY_INTERVAL 10 91#define SCLP_BUSY_INTERVAL 10
91#define SCLP_RETRY_INTERVAL 15 92#define SCLP_RETRY_INTERVAL 30
92 93
93static void sclp_process_queue(void); 94static void sclp_process_queue(void);
94static int sclp_init_mask(int calculate); 95static int sclp_init_mask(int calculate);
95static int sclp_init(void); 96static int sclp_init(void);
96 97
97/* Perform service call. Return 0 on success, non-zero otherwise. */ 98/* Perform service call. Return 0 on success, non-zero otherwise. */
98static int 99int
99service_call(sclp_cmdw_t command, void *sccb) 100sclp_service_call(sclp_cmdw_t command, void *sccb)
100{ 101{
101 int cc; 102 int cc;
102 103
@@ -113,19 +114,17 @@ service_call(sclp_cmdw_t command, void *sccb)
113 return 0; 114 return 0;
114} 115}
115 116
116/* Request timeout handler. Restart the request queue. If DATA is non-zero, 117static inline void __sclp_make_read_req(void);
117 * force restart of running request. */ 118
118static void 119static void
119sclp_request_timeout(unsigned long data) 120__sclp_queue_read_req(void)
120{ 121{
121 unsigned long flags; 122 if (sclp_reading_state == sclp_reading_state_idle) {
122 123 sclp_reading_state = sclp_reading_state_reading;
123 if (data) { 124 __sclp_make_read_req();
124 spin_lock_irqsave(&sclp_lock, flags); 125 /* Add request to head of queue */
125 sclp_running_state = sclp_running_state_idle; 126 list_add(&sclp_read_req.list, &sclp_req_queue);
126 spin_unlock_irqrestore(&sclp_lock, flags);
127 } 127 }
128 sclp_process_queue();
129} 128}
130 129
131/* Set up request retry timer. Called while sclp_lock is locked. */ 130/* Set up request retry timer. Called while sclp_lock is locked. */
@@ -140,6 +139,29 @@ __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
140 add_timer(&sclp_request_timer); 139 add_timer(&sclp_request_timer);
141} 140}
142 141
142/* Request timeout handler. Restart the request queue. If DATA is non-zero,
143 * force restart of running request. */
144static void
145sclp_request_timeout(unsigned long data)
146{
147 unsigned long flags;
148
149 spin_lock_irqsave(&sclp_lock, flags);
150 if (data) {
151 if (sclp_running_state == sclp_running_state_running) {
152 /* Break running state and queue NOP read event request
153 * to get a defined interface state. */
154 __sclp_queue_read_req();
155 sclp_running_state = sclp_running_state_idle;
156 }
157 } else {
158 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
159 sclp_request_timeout, 0);
160 }
161 spin_unlock_irqrestore(&sclp_lock, flags);
162 sclp_process_queue();
163}
164
143/* Try to start a request. Return zero if the request was successfully 165/* Try to start a request. Return zero if the request was successfully
144 * started or if it will be started at a later time. Return non-zero otherwise. 166 * started or if it will be started at a later time. Return non-zero otherwise.
145 * Called while sclp_lock is locked. */ 167 * Called while sclp_lock is locked. */
@@ -151,7 +173,7 @@ __sclp_start_request(struct sclp_req *req)
151 if (sclp_running_state != sclp_running_state_idle) 173 if (sclp_running_state != sclp_running_state_idle)
152 return 0; 174 return 0;
153 del_timer(&sclp_request_timer); 175 del_timer(&sclp_request_timer);
154 rc = service_call(req->command, req->sccb); 176 rc = sclp_service_call(req->command, req->sccb);
155 req->start_count++; 177 req->start_count++;
156 178
157 if (rc == 0) { 179 if (rc == 0) {
@@ -191,7 +213,15 @@ sclp_process_queue(void)
191 rc = __sclp_start_request(req); 213 rc = __sclp_start_request(req);
192 if (rc == 0) 214 if (rc == 0)
193 break; 215 break;
194 /* Request failed. */ 216 /* Request failed */
217 if (req->start_count > 1) {
218 /* Cannot abort already submitted request - could still
219 * be active at the SCLP */
220 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
221 sclp_request_timeout, 0);
222 break;
223 }
224 /* Post-processing for aborted request */
195 list_del(&req->list); 225 list_del(&req->list);
196 if (req->callback) { 226 if (req->callback) {
197 spin_unlock_irqrestore(&sclp_lock, flags); 227 spin_unlock_irqrestore(&sclp_lock, flags);
@@ -221,7 +251,8 @@ sclp_add_request(struct sclp_req *req)
221 list_add_tail(&req->list, &sclp_req_queue); 251 list_add_tail(&req->list, &sclp_req_queue);
222 rc = 0; 252 rc = 0;
223 /* Start if request is first in list */ 253 /* Start if request is first in list */
224 if (req->list.prev == &sclp_req_queue) { 254 if (sclp_running_state == sclp_running_state_idle &&
255 req->list.prev == &sclp_req_queue) {
225 rc = __sclp_start_request(req); 256 rc = __sclp_start_request(req);
226 if (rc) 257 if (rc)
227 list_del(&req->list); 258 list_del(&req->list);
@@ -294,7 +325,7 @@ __sclp_make_read_req(void)
294 sccb = (struct sccb_header *) sclp_read_sccb; 325 sccb = (struct sccb_header *) sclp_read_sccb;
295 clear_page(sccb); 326 clear_page(sccb);
296 memset(&sclp_read_req, 0, sizeof(struct sclp_req)); 327 memset(&sclp_read_req, 0, sizeof(struct sclp_req));
297 sclp_read_req.command = SCLP_CMDW_READDATA; 328 sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
298 sclp_read_req.status = SCLP_REQ_QUEUED; 329 sclp_read_req.status = SCLP_REQ_QUEUED;
299 sclp_read_req.start_count = 0; 330 sclp_read_req.start_count = 0;
300 sclp_read_req.callback = sclp_read_cb; 331 sclp_read_req.callback = sclp_read_cb;
@@ -334,6 +365,8 @@ sclp_interrupt_handler(__u16 code)
334 finished_sccb = S390_lowcore.ext_params & 0xfffffff8; 365 finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
335 evbuf_pending = S390_lowcore.ext_params & 0x3; 366 evbuf_pending = S390_lowcore.ext_params & 0x3;
336 if (finished_sccb) { 367 if (finished_sccb) {
368 del_timer(&sclp_request_timer);
369 sclp_running_state = sclp_running_state_reset_pending;
337 req = __sclp_find_req(finished_sccb); 370 req = __sclp_find_req(finished_sccb);
338 if (req) { 371 if (req) {
339 /* Request post-processing */ 372 /* Request post-processing */
@@ -348,13 +381,8 @@ sclp_interrupt_handler(__u16 code)
348 sclp_running_state = sclp_running_state_idle; 381 sclp_running_state = sclp_running_state_idle;
349 } 382 }
350 if (evbuf_pending && sclp_receive_mask != 0 && 383 if (evbuf_pending && sclp_receive_mask != 0 &&
351 sclp_reading_state == sclp_reading_state_idle && 384 sclp_activation_state == sclp_activation_state_active)
352 sclp_activation_state == sclp_activation_state_active ) { 385 __sclp_queue_read_req();
353 sclp_reading_state = sclp_reading_state_reading;
354 __sclp_make_read_req();
355 /* Add request to head of queue */
356 list_add(&sclp_read_req.list, &sclp_req_queue);
357 }
358 spin_unlock(&sclp_lock); 386 spin_unlock(&sclp_lock);
359 sclp_process_queue(); 387 sclp_process_queue();
360} 388}
@@ -374,6 +402,7 @@ sclp_sync_wait(void)
374 unsigned long flags; 402 unsigned long flags;
375 unsigned long cr0, cr0_sync; 403 unsigned long cr0, cr0_sync;
376 u64 timeout; 404 u64 timeout;
405 int irq_context;
377 406
378 /* We'll be disabling timer interrupts, so we need a custom timeout 407 /* We'll be disabling timer interrupts, so we need a custom timeout
379 * mechanism */ 408 * mechanism */
@@ -386,7 +415,9 @@ sclp_sync_wait(void)
386 } 415 }
387 local_irq_save(flags); 416 local_irq_save(flags);
388 /* Prevent bottom half from executing once we force interrupts open */ 417 /* Prevent bottom half from executing once we force interrupts open */
389 local_bh_disable(); 418 irq_context = in_interrupt();
419 if (!irq_context)
420 local_bh_disable();
390 /* Enable service-signal interruption, disable timer interrupts */ 421 /* Enable service-signal interruption, disable timer interrupts */
391 trace_hardirqs_on(); 422 trace_hardirqs_on();
392 __ctl_store(cr0, 0, 0); 423 __ctl_store(cr0, 0, 0);
@@ -402,19 +433,19 @@ sclp_sync_wait(void)
402 get_clock() > timeout && 433 get_clock() > timeout &&
403 del_timer(&sclp_request_timer)) 434 del_timer(&sclp_request_timer))
404 sclp_request_timer.function(sclp_request_timer.data); 435 sclp_request_timer.function(sclp_request_timer.data);
405 barrier();
406 cpu_relax(); 436 cpu_relax();
407 } 437 }
408 local_irq_disable(); 438 local_irq_disable();
409 __ctl_load(cr0, 0, 0); 439 __ctl_load(cr0, 0, 0);
410 _local_bh_enable(); 440 if (!irq_context)
441 _local_bh_enable();
411 local_irq_restore(flags); 442 local_irq_restore(flags);
412} 443}
413 444
414EXPORT_SYMBOL(sclp_sync_wait); 445EXPORT_SYMBOL(sclp_sync_wait);
415 446
416/* Dispatch changes in send and receive mask to registered listeners. */ 447/* Dispatch changes in send and receive mask to registered listeners. */
417static inline void 448static void
418sclp_dispatch_state_change(void) 449sclp_dispatch_state_change(void)
419{ 450{
420 struct list_head *l; 451 struct list_head *l;
@@ -597,7 +628,7 @@ __sclp_make_init_req(u32 receive_mask, u32 send_mask)
597 sccb = (struct init_sccb *) sclp_init_sccb; 628 sccb = (struct init_sccb *) sclp_init_sccb;
598 clear_page(sccb); 629 clear_page(sccb);
599 memset(&sclp_init_req, 0, sizeof(struct sclp_req)); 630 memset(&sclp_init_req, 0, sizeof(struct sclp_req));
600 sclp_init_req.command = SCLP_CMDW_WRITEMASK; 631 sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
601 sclp_init_req.status = SCLP_REQ_FILLED; 632 sclp_init_req.status = SCLP_REQ_FILLED;
602 sclp_init_req.start_count = 0; 633 sclp_init_req.start_count = 0;
603 sclp_init_req.callback = NULL; 634 sclp_init_req.callback = NULL;
@@ -800,7 +831,7 @@ sclp_check_interface(void)
800 for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) { 831 for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
801 __sclp_make_init_req(0, 0); 832 __sclp_make_init_req(0, 0);
802 sccb = (struct init_sccb *) sclp_init_req.sccb; 833 sccb = (struct init_sccb *) sclp_init_req.sccb;
803 rc = service_call(sclp_init_req.command, sccb); 834 rc = sclp_service_call(sclp_init_req.command, sccb);
804 if (rc == -EIO) 835 if (rc == -EIO)
805 break; 836 break;
806 sclp_init_req.status = SCLP_REQ_RUNNING; 837 sclp_init_req.status = SCLP_REQ_RUNNING;
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 2c71d6ee7b5b..7d29ab45a6ed 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -12,7 +12,7 @@
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/list.h> 14#include <linux/list.h>
15 15#include <asm/sclp.h>
16#include <asm/ebcdic.h> 16#include <asm/ebcdic.h>
17 17
18/* maximum number of pages concerning our own memory management */ 18/* maximum number of pages concerning our own memory management */
@@ -49,9 +49,11 @@
49 49
50typedef unsigned int sclp_cmdw_t; 50typedef unsigned int sclp_cmdw_t;
51 51
52#define SCLP_CMDW_READDATA 0x00770005 52#define SCLP_CMDW_READ_EVENT_DATA 0x00770005
53#define SCLP_CMDW_WRITEDATA 0x00760005 53#define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005
54#define SCLP_CMDW_WRITEMASK 0x00780005 54#define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005
55#define SCLP_CMDW_READ_SCP_INFO 0x00020001
56#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
55 57
56#define GDS_ID_MDSMU 0x1310 58#define GDS_ID_MDSMU 0x1310
57#define GDS_ID_MDSRouteInfo 0x1311 59#define GDS_ID_MDSRouteInfo 0x1311
@@ -66,13 +68,6 @@ typedef unsigned int sclp_cmdw_t;
66 68
67typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ 69typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */
68 70
69struct sccb_header {
70 u16 length;
71 u8 function_code;
72 u8 control_mask[3];
73 u16 response_code;
74} __attribute__((packed));
75
76struct gds_subvector { 71struct gds_subvector {
77 u8 length; 72 u8 length;
78 u8 key; 73 u8 key;
@@ -131,6 +126,7 @@ void sclp_unregister(struct sclp_register *reg);
131int sclp_remove_processed(struct sccb_header *sccb); 126int sclp_remove_processed(struct sccb_header *sccb);
132int sclp_deactivate(void); 127int sclp_deactivate(void);
133int sclp_reactivate(void); 128int sclp_reactivate(void);
129int sclp_service_call(sclp_cmdw_t command, void *sccb);
134 130
135/* useful inlines */ 131/* useful inlines */
136 132
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index 86864f641716..ead1043d788e 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -66,7 +66,7 @@ sclp_conbuf_callback(struct sclp_buffer *buffer, int rc)
66 } while (buffer && sclp_emit_buffer(buffer, sclp_conbuf_callback)); 66 } while (buffer && sclp_emit_buffer(buffer, sclp_conbuf_callback));
67} 67}
68 68
69static inline void 69static void
70sclp_conbuf_emit(void) 70sclp_conbuf_emit(void)
71{ 71{
72 struct sclp_buffer* buffer; 72 struct sclp_buffer* buffer;
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c
index 4f873ae148b7..65aa2c85737f 100644
--- a/drivers/s390/char/sclp_cpi.c
+++ b/drivers/s390/char/sclp_cpi.c
@@ -169,7 +169,7 @@ cpi_prepare_req(void)
169 } 169 }
170 170
171 /* prepare request data structure presented to SCLP driver */ 171 /* prepare request data structure presented to SCLP driver */
172 req->command = SCLP_CMDW_WRITEDATA; 172 req->command = SCLP_CMDW_WRITE_EVENT_DATA;
173 req->sccb = sccb; 173 req->sccb = sccb;
174 req->status = SCLP_REQ_FILLED; 174 req->status = SCLP_REQ_FILLED;
175 req->callback = cpi_callback; 175 req->callback = cpi_callback;
diff --git a/drivers/s390/char/sclp_info.c b/drivers/s390/char/sclp_info.c
new file mode 100644
index 000000000000..7bcbe643b087
--- /dev/null
+++ b/drivers/s390/char/sclp_info.c
@@ -0,0 +1,57 @@
1/*
2 * drivers/s390/char/sclp_info.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/init.h>
9#include <linux/errno.h>
10#include <linux/string.h>
11#include <asm/sclp.h>
12#include "sclp.h"
13
14struct sclp_readinfo_sccb s390_readinfo_sccb;
15
16void __init sclp_readinfo_early(void)
17{
18 sclp_cmdw_t command;
19 struct sccb_header *sccb;
20 int ret;
21
22 __ctl_set_bit(0, 9); /* enable service signal subclass mask */
23
24 sccb = &s390_readinfo_sccb.header;
25 command = SCLP_CMDW_READ_SCP_INFO_FORCED;
26 while (1) {
27 u16 response;
28
29 memset(&s390_readinfo_sccb, 0, sizeof(s390_readinfo_sccb));
30 sccb->length = sizeof(s390_readinfo_sccb);
31 sccb->control_mask[2] = 0x80;
32
33 ret = sclp_service_call(command, &s390_readinfo_sccb);
34
35 if (ret == -EIO)
36 goto out;
37 if (ret == -EBUSY)
38 continue;
39
40 __load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT |
41 PSW_MASK_WAIT | PSW_DEFAULT_KEY);
42 local_irq_disable();
43 barrier();
44
45 response = sccb->response_code;
46
47 if (response == 0x10)
48 break;
49
50 if (response != 0x1f0 || command == SCLP_CMDW_READ_SCP_INFO)
51 break;
52
53 command = SCLP_CMDW_READ_SCP_INFO;
54 }
55out:
56 __ctl_clear_bit(0, 9); /* disable service signal subclass mask */
57}
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index 0c92d3909cca..2486783ea58e 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -460,7 +460,7 @@ sclp_emit_buffer(struct sclp_buffer *buffer,
460 sccb->msg_buf.header.type = EvTyp_PMsgCmd; 460 sccb->msg_buf.header.type = EvTyp_PMsgCmd;
461 else 461 else
462 return -ENOSYS; 462 return -ENOSYS;
463 buffer->request.command = SCLP_CMDW_WRITEDATA; 463 buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA;
464 buffer->request.status = SCLP_REQ_FILLED; 464 buffer->request.status = SCLP_REQ_FILLED;
465 buffer->request.callback = sclp_writedata_callback; 465 buffer->request.callback = sclp_writedata_callback;
466 buffer->request.callback_data = buffer; 466 buffer->request.callback_data = buffer;
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 2d173e5c8a09..90536f60bf50 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -721,7 +721,7 @@ static const struct tty_operations sclp_ops = {
721 .ioctl = sclp_tty_ioctl, 721 .ioctl = sclp_tty_ioctl,
722}; 722};
723 723
724int __init 724static int __init
725sclp_tty_init(void) 725sclp_tty_init(void)
726{ 726{
727 struct tty_driver *driver; 727 struct tty_driver *driver;
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 723bf4191bfe..544f137d70d7 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -207,7 +207,7 @@ __sclp_vt220_emit(struct sclp_vt220_request *request)
207 request->sclp_req.status = SCLP_REQ_FAILED; 207 request->sclp_req.status = SCLP_REQ_FAILED;
208 return -EIO; 208 return -EIO;
209 } 209 }
210 request->sclp_req.command = SCLP_CMDW_WRITEDATA; 210 request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA;
211 request->sclp_req.status = SCLP_REQ_FILLED; 211 request->sclp_req.status = SCLP_REQ_FILLED;
212 request->sclp_req.callback = sclp_vt220_callback; 212 request->sclp_req.callback = sclp_vt220_callback;
213 request->sclp_req.callback_data = (void *) request; 213 request->sclp_req.callback_data = (void *) request;
@@ -669,7 +669,7 @@ static const struct tty_operations sclp_vt220_ops = {
669/* 669/*
670 * Register driver with SCLP and Linux and initialize internal tty structures. 670 * Register driver with SCLP and Linux and initialize internal tty structures.
671 */ 671 */
672int __init 672static int __init
673sclp_vt220_tty_init(void) 673sclp_vt220_tty_init(void)
674{ 674{
675 struct tty_driver *driver; 675 struct tty_driver *driver;
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index c9f1c4c8bb13..bb4ff537729d 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -3,7 +3,7 @@
3 * tape device driver for 3480/3490E/3590 tapes. 3 * tape device driver for 3480/3490E/3590 tapes.
4 * 4 *
5 * S390 and zSeries version 5 * S390 and zSeries version
6 * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright IBM Corp. 2001,2006
7 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 8 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
9 * Martin Schwidefsky <schwidefsky@de.ibm.com> 9 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -99,7 +99,11 @@ enum tape_op {
99 TO_DIS, /* Tape display */ 99 TO_DIS, /* Tape display */
100 TO_ASSIGN, /* Assign tape to channel path */ 100 TO_ASSIGN, /* Assign tape to channel path */
101 TO_UNASSIGN, /* Unassign tape from channel path */ 101 TO_UNASSIGN, /* Unassign tape from channel path */
102 TO_SIZE /* #entries in tape_op_t */ 102 TO_CRYPT_ON, /* Enable encrpytion */
103 TO_CRYPT_OFF, /* Disable encrpytion */
104 TO_KEKL_SET, /* Set KEK label */
105 TO_KEKL_QUERY, /* Query KEK label */
106 TO_SIZE, /* #entries in tape_op_t */
103}; 107};
104 108
105/* Forward declaration */ 109/* Forward declaration */
@@ -112,6 +116,7 @@ enum tape_request_status {
112 TAPE_REQUEST_IN_IO, /* request is currently in IO */ 116 TAPE_REQUEST_IN_IO, /* request is currently in IO */
113 TAPE_REQUEST_DONE, /* request is completed. */ 117 TAPE_REQUEST_DONE, /* request is completed. */
114 TAPE_REQUEST_CANCEL, /* request should be canceled. */ 118 TAPE_REQUEST_CANCEL, /* request should be canceled. */
119 TAPE_REQUEST_LONG_BUSY, /* request has to be restarted after long busy */
115}; 120};
116 121
117/* Tape CCW request */ 122/* Tape CCW request */
@@ -164,10 +169,11 @@ struct tape_discipline {
164 * The discipline irq function either returns an error code (<0) which 169 * The discipline irq function either returns an error code (<0) which
165 * means that the request has failed with an error or one of the following: 170 * means that the request has failed with an error or one of the following:
166 */ 171 */
167#define TAPE_IO_SUCCESS 0 /* request successful */ 172#define TAPE_IO_SUCCESS 0 /* request successful */
168#define TAPE_IO_PENDING 1 /* request still running */ 173#define TAPE_IO_PENDING 1 /* request still running */
169#define TAPE_IO_RETRY 2 /* retry to current request */ 174#define TAPE_IO_RETRY 2 /* retry to current request */
170#define TAPE_IO_STOP 3 /* stop the running request */ 175#define TAPE_IO_STOP 3 /* stop the running request */
176#define TAPE_IO_LONG_BUSY 4 /* delay the running request */
171 177
172/* Char Frontend Data */ 178/* Char Frontend Data */
173struct tape_char_data { 179struct tape_char_data {
@@ -242,6 +248,10 @@ struct tape_device {
242 248
243 /* Function to start or stop the next request later. */ 249 /* Function to start or stop the next request later. */
244 struct delayed_work tape_dnr; 250 struct delayed_work tape_dnr;
251
252 /* Timer for long busy */
253 struct timer_list lb_timeout;
254
245}; 255};
246 256
247/* Externals from tape_core.c */ 257/* Externals from tape_core.c */
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 9df912f63188..50f5edab83d7 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -2,7 +2,7 @@
2 * drivers/s390/char/tape_3590.c 2 * drivers/s390/char/tape_3590.c
3 * tape device discipline for 3590 tapes. 3 * tape device discipline for 3590 tapes.
4 * 4 *
5 * Copyright (C) IBM Corp. 2001,2006 5 * Copyright IBM Corp. 2001,2006
6 * Author(s): Stefan Bader <shbader@de.ibm.com> 6 * Author(s): Stefan Bader <shbader@de.ibm.com>
7 * Michael Holzheu <holzheu@de.ibm.com> 7 * Michael Holzheu <holzheu@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -11,6 +11,7 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/bio.h> 13#include <linux/bio.h>
14#include <asm/ebcdic.h>
14 15
15#define TAPE_DBF_AREA tape_3590_dbf 16#define TAPE_DBF_AREA tape_3590_dbf
16 17
@@ -30,7 +31,7 @@ EXPORT_SYMBOL(TAPE_DBF_AREA);
30 * - Read Device (buffered) log: BRA 31 * - Read Device (buffered) log: BRA
31 * - Read Library log: BRA 32 * - Read Library log: BRA
32 * - Swap Devices: BRA 33 * - Swap Devices: BRA
33 * - Long Busy: BRA 34 * - Long Busy: implemented
34 * - Special Intercept: BRA 35 * - Special Intercept: BRA
35 * - Read Alternate: implemented 36 * - Read Alternate: implemented
36 *******************************************************************/ 37 *******************************************************************/
@@ -94,6 +95,332 @@ static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = {
94 [0xae] = "Subsystem environmental alert", 95 [0xae] = "Subsystem environmental alert",
95}; 96};
96 97
98static int crypt_supported(struct tape_device *device)
99{
100 return TAPE390_CRYPT_SUPPORTED(TAPE_3590_CRYPT_INFO(device));
101}
102
103static int crypt_enabled(struct tape_device *device)
104{
105 return TAPE390_CRYPT_ON(TAPE_3590_CRYPT_INFO(device));
106}
107
108static void ext_to_int_kekl(struct tape390_kekl *in,
109 struct tape3592_kekl *out)
110{
111 int i;
112
113 memset(out, 0, sizeof(*out));
114 if (in->type == TAPE390_KEKL_TYPE_HASH)
115 out->flags |= 0x40;
116 if (in->type_on_tape == TAPE390_KEKL_TYPE_HASH)
117 out->flags |= 0x80;
118 strncpy(out->label, in->label, 64);
119 for (i = strlen(in->label); i < sizeof(out->label); i++)
120 out->label[i] = ' ';
121 ASCEBC(out->label, sizeof(out->label));
122}
123
124static void int_to_ext_kekl(struct tape3592_kekl *in,
125 struct tape390_kekl *out)
126{
127 memset(out, 0, sizeof(*out));
128 if(in->flags & 0x40)
129 out->type = TAPE390_KEKL_TYPE_HASH;
130 else
131 out->type = TAPE390_KEKL_TYPE_LABEL;
132 if(in->flags & 0x80)
133 out->type_on_tape = TAPE390_KEKL_TYPE_HASH;
134 else
135 out->type_on_tape = TAPE390_KEKL_TYPE_LABEL;
136 memcpy(out->label, in->label, sizeof(in->label));
137 EBCASC(out->label, sizeof(in->label));
138 strstrip(out->label);
139}
140
141static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in,
142 struct tape390_kekl_pair *out)
143{
144 if (in->count == 0) {
145 out->kekl[0].type = TAPE390_KEKL_TYPE_NONE;
146 out->kekl[0].type_on_tape = TAPE390_KEKL_TYPE_NONE;
147 out->kekl[1].type = TAPE390_KEKL_TYPE_NONE;
148 out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE;
149 } else if (in->count == 1) {
150 int_to_ext_kekl(&in->kekl[0], &out->kekl[0]);
151 out->kekl[1].type = TAPE390_KEKL_TYPE_NONE;
152 out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE;
153 } else if (in->count == 2) {
154 int_to_ext_kekl(&in->kekl[0], &out->kekl[0]);
155 int_to_ext_kekl(&in->kekl[1], &out->kekl[1]);
156 } else {
157 printk("Invalid KEKL number: %d\n", in->count);
158 BUG();
159 }
160}
161
162static int check_ext_kekl(struct tape390_kekl *kekl)
163{
164 if (kekl->type == TAPE390_KEKL_TYPE_NONE)
165 goto invalid;
166 if (kekl->type > TAPE390_KEKL_TYPE_HASH)
167 goto invalid;
168 if (kekl->type_on_tape == TAPE390_KEKL_TYPE_NONE)
169 goto invalid;
170 if (kekl->type_on_tape > TAPE390_KEKL_TYPE_HASH)
171 goto invalid;
172 if ((kekl->type == TAPE390_KEKL_TYPE_HASH) &&
173 (kekl->type_on_tape == TAPE390_KEKL_TYPE_LABEL))
174 goto invalid;
175
176 return 0;
177invalid:
178 return -EINVAL;
179}
180
181static int check_ext_kekl_pair(struct tape390_kekl_pair *kekls)
182{
183 if (check_ext_kekl(&kekls->kekl[0]))
184 goto invalid;
185 if (check_ext_kekl(&kekls->kekl[1]))
186 goto invalid;
187
188 return 0;
189invalid:
190 return -EINVAL;
191}
192
193/*
194 * Query KEKLs
195 */
196static int tape_3592_kekl_query(struct tape_device *device,
197 struct tape390_kekl_pair *ext_kekls)
198{
199 struct tape_request *request;
200 struct tape3592_kekl_query_order *order;
201 struct tape3592_kekl_query_data *int_kekls;
202 int rc;
203
204 DBF_EVENT(6, "tape3592_kekl_query\n");
205 int_kekls = kmalloc(sizeof(*int_kekls), GFP_KERNEL|GFP_DMA);
206 if (!int_kekls)
207 return -ENOMEM;
208 request = tape_alloc_request(2, sizeof(*order));
209 if (IS_ERR(request)) {
210 rc = PTR_ERR(request);
211 goto fail_malloc;
212 }
213 order = request->cpdata;
214 memset(order,0,sizeof(*order));
215 order->code = 0xe2;
216 order->max_count = 2;
217 request->op = TO_KEKL_QUERY;
218 tape_ccw_cc(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order);
219 tape_ccw_end(request->cpaddr + 1, READ_SS_DATA, sizeof(*int_kekls),
220 int_kekls);
221 rc = tape_do_io(device, request);
222 if (rc)
223 goto fail_request;
224 int_to_ext_kekl_pair(&int_kekls->kekls, ext_kekls);
225
226 rc = 0;
227fail_request:
228 tape_free_request(request);
229fail_malloc:
230 kfree(int_kekls);
231 return rc;
232}
233
234/*
235 * IOCTL: Query KEKLs
236 */
237static int tape_3592_ioctl_kekl_query(struct tape_device *device,
238 unsigned long arg)
239{
240 int rc;
241 struct tape390_kekl_pair *ext_kekls;
242
243 DBF_EVENT(6, "tape_3592_ioctl_kekl_query\n");
244 if (!crypt_supported(device))
245 return -ENOSYS;
246 if (!crypt_enabled(device))
247 return -EUNATCH;
248 ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL);
249 if (!ext_kekls)
250 return -ENOMEM;
251 rc = tape_3592_kekl_query(device, ext_kekls);
252 if (rc != 0)
253 goto fail;
254 if (copy_to_user((char __user *) arg, ext_kekls, sizeof(*ext_kekls))) {
255 rc = -EFAULT;
256 goto fail;
257 }
258 rc = 0;
259fail:
260 kfree(ext_kekls);
261 return rc;
262}
263
264static int tape_3590_mttell(struct tape_device *device, int mt_count);
265
266/*
267 * Set KEKLs
268 */
269static int tape_3592_kekl_set(struct tape_device *device,
270 struct tape390_kekl_pair *ext_kekls)
271{
272 struct tape_request *request;
273 struct tape3592_kekl_set_order *order;
274
275 DBF_EVENT(6, "tape3592_kekl_set\n");
276 if (check_ext_kekl_pair(ext_kekls)) {
277 DBF_EVENT(6, "invalid kekls\n");
278 return -EINVAL;
279 }
280 if (tape_3590_mttell(device, 0) != 0)
281 return -EBADSLT;
282 request = tape_alloc_request(1, sizeof(*order));
283 if (IS_ERR(request))
284 return PTR_ERR(request);
285 order = request->cpdata;
286 memset(order, 0, sizeof(*order));
287 order->code = 0xe3;
288 order->kekls.count = 2;
289 ext_to_int_kekl(&ext_kekls->kekl[0], &order->kekls.kekl[0]);
290 ext_to_int_kekl(&ext_kekls->kekl[1], &order->kekls.kekl[1]);
291 request->op = TO_KEKL_SET;
292 tape_ccw_end(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order);
293
294 return tape_do_io_free(device, request);
295}
296
297/*
298 * IOCTL: Set KEKLs
299 */
300static int tape_3592_ioctl_kekl_set(struct tape_device *device,
301 unsigned long arg)
302{
303 int rc;
304 struct tape390_kekl_pair *ext_kekls;
305
306 DBF_EVENT(6, "tape_3592_ioctl_kekl_set\n");
307 if (!crypt_supported(device))
308 return -ENOSYS;
309 if (!crypt_enabled(device))
310 return -EUNATCH;
311 ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL);
312 if (!ext_kekls)
313 return -ENOMEM;
314 if (copy_from_user(ext_kekls, (char __user *)arg, sizeof(*ext_kekls))) {
315 rc = -EFAULT;
316 goto out;
317 }
318 rc = tape_3592_kekl_set(device, ext_kekls);
319out:
320 kfree(ext_kekls);
321 return rc;
322}
323
324/*
325 * Enable encryption
326 */
327static int tape_3592_enable_crypt(struct tape_device *device)
328{
329 struct tape_request *request;
330 char *data;
331
332 DBF_EVENT(6, "tape_3592_enable_crypt\n");
333 if (!crypt_supported(device))
334 return -ENOSYS;
335 request = tape_alloc_request(2, 72);
336 if (IS_ERR(request))
337 return PTR_ERR(request);
338 data = request->cpdata;
339 memset(data,0,72);
340
341 data[0] = 0x05;
342 data[36 + 0] = 0x03;
343 data[36 + 1] = 0x03;
344 data[36 + 4] = 0x40;
345 data[36 + 6] = 0x01;
346 data[36 + 14] = 0x2f;
347 data[36 + 18] = 0xc3;
348 data[36 + 35] = 0x72;
349 request->op = TO_CRYPT_ON;
350 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
351 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
352 return tape_do_io_free(device, request);
353}
354
355/*
356 * Disable encryption
357 */
358static int tape_3592_disable_crypt(struct tape_device *device)
359{
360 struct tape_request *request;
361 char *data;
362
363 DBF_EVENT(6, "tape_3592_disable_crypt\n");
364 if (!crypt_supported(device))
365 return -ENOSYS;
366 request = tape_alloc_request(2, 72);
367 if (IS_ERR(request))
368 return PTR_ERR(request);
369 data = request->cpdata;
370 memset(data,0,72);
371
372 data[0] = 0x05;
373 data[36 + 0] = 0x03;
374 data[36 + 1] = 0x03;
375 data[36 + 35] = 0x32;
376
377 request->op = TO_CRYPT_OFF;
378 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
379 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
380
381 return tape_do_io_free(device, request);
382}
383
384/*
385 * IOCTL: Set encryption status
386 */
387static int tape_3592_ioctl_crypt_set(struct tape_device *device,
388 unsigned long arg)
389{
390 struct tape390_crypt_info info;
391
392 DBF_EVENT(6, "tape_3592_ioctl_crypt_set\n");
393 if (!crypt_supported(device))
394 return -ENOSYS;
395 if (copy_from_user(&info, (char __user *)arg, sizeof(info)))
396 return -EFAULT;
397 if (info.status & ~TAPE390_CRYPT_ON_MASK)
398 return -EINVAL;
399 if (info.status & TAPE390_CRYPT_ON_MASK)
400 return tape_3592_enable_crypt(device);
401 else
402 return tape_3592_disable_crypt(device);
403}
404
405static int tape_3590_sense_medium(struct tape_device *device);
406
407/*
408 * IOCTL: Query enryption status
409 */
410static int tape_3592_ioctl_crypt_query(struct tape_device *device,
411 unsigned long arg)
412{
413 DBF_EVENT(6, "tape_3592_ioctl_crypt_query\n");
414 if (!crypt_supported(device))
415 return -ENOSYS;
416 tape_3590_sense_medium(device);
417 if (copy_to_user((char __user *) arg, &TAPE_3590_CRYPT_INFO(device),
418 sizeof(TAPE_3590_CRYPT_INFO(device))))
419 return -EFAULT;
420 else
421 return 0;
422}
423
97/* 424/*
98 * 3590 IOCTL Overload 425 * 3590 IOCTL Overload
99 */ 426 */
@@ -109,6 +436,14 @@ tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
109 436
110 return tape_std_display(device, &disp); 437 return tape_std_display(device, &disp);
111 } 438 }
439 case TAPE390_KEKL_SET:
440 return tape_3592_ioctl_kekl_set(device, arg);
441 case TAPE390_KEKL_QUERY:
442 return tape_3592_ioctl_kekl_query(device, arg);
443 case TAPE390_CRYPT_SET:
444 return tape_3592_ioctl_crypt_set(device, arg);
445 case TAPE390_CRYPT_QUERY:
446 return tape_3592_ioctl_crypt_query(device, arg);
112 default: 447 default:
113 return -EINVAL; /* no additional ioctls */ 448 return -EINVAL; /* no additional ioctls */
114 } 449 }
@@ -248,6 +583,12 @@ tape_3590_work_handler(struct work_struct *work)
248 case TO_READ_ATTMSG: 583 case TO_READ_ATTMSG:
249 tape_3590_read_attmsg(p->device); 584 tape_3590_read_attmsg(p->device);
250 break; 585 break;
586 case TO_CRYPT_ON:
587 tape_3592_enable_crypt(p->device);
588 break;
589 case TO_CRYPT_OFF:
590 tape_3592_disable_crypt(p->device);
591 break;
251 default: 592 default:
252 DBF_EVENT(3, "T3590: work handler undefined for " 593 DBF_EVENT(3, "T3590: work handler undefined for "
253 "operation 0x%02x\n", p->op); 594 "operation 0x%02x\n", p->op);
@@ -365,6 +706,33 @@ tape_3590_check_locate(struct tape_device *device, struct tape_request *request)
365} 706}
366#endif 707#endif
367 708
709static void tape_3590_med_state_set(struct tape_device *device,
710 struct tape_3590_med_sense *sense)
711{
712 struct tape390_crypt_info *c_info;
713
714 c_info = &TAPE_3590_CRYPT_INFO(device);
715
716 if (sense->masst == MSENSE_UNASSOCIATED) {
717 tape_med_state_set(device, MS_UNLOADED);
718 TAPE_3590_CRYPT_INFO(device).medium_status = 0;
719 return;
720 }
721 if (sense->masst != MSENSE_ASSOCIATED_MOUNT) {
722 PRINT_ERR("Unknown medium state: %x\n", sense->masst);
723 return;
724 }
725 tape_med_state_set(device, MS_LOADED);
726 c_info->medium_status |= TAPE390_MEDIUM_LOADED_MASK;
727 if (sense->flags & MSENSE_CRYPT_MASK) {
728 PRINT_INFO("Medium is encrypted (%04x)\n", sense->flags);
729 c_info->medium_status |= TAPE390_MEDIUM_ENCRYPTED_MASK;
730 } else {
731 DBF_EVENT(6, "Medium is not encrypted %04x\n", sense->flags);
732 c_info->medium_status &= ~TAPE390_MEDIUM_ENCRYPTED_MASK;
733 }
734}
735
368/* 736/*
369 * The done handler is called at device/channel end and wakes up the sleeping 737 * The done handler is called at device/channel end and wakes up the sleeping
370 * process 738 * process
@@ -372,9 +740,10 @@ tape_3590_check_locate(struct tape_device *device, struct tape_request *request)
372static int 740static int
373tape_3590_done(struct tape_device *device, struct tape_request *request) 741tape_3590_done(struct tape_device *device, struct tape_request *request)
374{ 742{
375 struct tape_3590_med_sense *sense; 743 struct tape_3590_disc_data *disc_data;
376 744
377 DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]); 745 DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]);
746 disc_data = device->discdata;
378 747
379 switch (request->op) { 748 switch (request->op) {
380 case TO_BSB: 749 case TO_BSB:
@@ -394,13 +763,20 @@ tape_3590_done(struct tape_device *device, struct tape_request *request)
394 break; 763 break;
395 case TO_RUN: 764 case TO_RUN:
396 tape_med_state_set(device, MS_UNLOADED); 765 tape_med_state_set(device, MS_UNLOADED);
766 tape_3590_schedule_work(device, TO_CRYPT_OFF);
397 break; 767 break;
398 case TO_MSEN: 768 case TO_MSEN:
399 sense = (struct tape_3590_med_sense *) request->cpdata; 769 tape_3590_med_state_set(device, request->cpdata);
400 if (sense->masst == MSENSE_UNASSOCIATED) 770 break;
401 tape_med_state_set(device, MS_UNLOADED); 771 case TO_CRYPT_ON:
402 if (sense->masst == MSENSE_ASSOCIATED_MOUNT) 772 TAPE_3590_CRYPT_INFO(device).status
403 tape_med_state_set(device, MS_LOADED); 773 |= TAPE390_CRYPT_ON_MASK;
774 *(device->modeset_byte) |= 0x03;
775 break;
776 case TO_CRYPT_OFF:
777 TAPE_3590_CRYPT_INFO(device).status
778 &= ~TAPE390_CRYPT_ON_MASK;
779 *(device->modeset_byte) &= ~0x03;
404 break; 780 break;
405 case TO_RBI: /* RBI seems to succeed even without medium loaded. */ 781 case TO_RBI: /* RBI seems to succeed even without medium loaded. */
406 case TO_NOP: /* Same to NOP. */ 782 case TO_NOP: /* Same to NOP. */
@@ -409,8 +785,9 @@ tape_3590_done(struct tape_device *device, struct tape_request *request)
409 case TO_DIS: 785 case TO_DIS:
410 case TO_ASSIGN: 786 case TO_ASSIGN:
411 case TO_UNASSIGN: 787 case TO_UNASSIGN:
412 break;
413 case TO_SIZE: 788 case TO_SIZE:
789 case TO_KEKL_SET:
790 case TO_KEKL_QUERY:
414 break; 791 break;
415 } 792 }
416 return TAPE_IO_SUCCESS; 793 return TAPE_IO_SUCCESS;
@@ -540,10 +917,8 @@ static int
540tape_3590_erp_long_busy(struct tape_device *device, 917tape_3590_erp_long_busy(struct tape_device *device,
541 struct tape_request *request, struct irb *irb) 918 struct tape_request *request, struct irb *irb)
542{ 919{
543 /* FIXME: how about WAITING for a minute ? */ 920 DBF_EVENT(6, "Device is busy\n");
544 PRINT_WARN("(%s): Device is busy! Please wait a minute!\n", 921 return TAPE_IO_LONG_BUSY;
545 device->cdev->dev.bus_id);
546 return tape_3590_erp_basic(device, request, irb, -EBUSY);
547} 922}
548 923
549/* 924/*
@@ -951,6 +1326,34 @@ tape_3590_print_era_msg(struct tape_device *device, struct irb *irb)
951 device->cdev->dev.bus_id, sense->mc); 1326 device->cdev->dev.bus_id, sense->mc);
952} 1327}
953 1328
1329static int tape_3590_crypt_error(struct tape_device *device,
1330 struct tape_request *request, struct irb *irb)
1331{
1332 u8 cu_rc, ekm_rc1;
1333 u16 ekm_rc2;
1334 u32 drv_rc;
1335 char *bus_id, *sense;
1336
1337 sense = ((struct tape_3590_sense *) irb->ecw)->fmt.data;
1338 bus_id = device->cdev->dev.bus_id;
1339 cu_rc = sense[0];
1340 drv_rc = *((u32*) &sense[5]) & 0xffffff;
1341 ekm_rc1 = sense[9];
1342 ekm_rc2 = *((u16*) &sense[10]);
1343 if ((cu_rc == 0) && (ekm_rc2 == 0xee31))
1344 /* key not defined on EKM */
1345 return tape_3590_erp_basic(device, request, irb, -EKEYREJECTED);
1346 if ((cu_rc == 1) || (cu_rc == 2))
1347 /* No connection to EKM */
1348 return tape_3590_erp_basic(device, request, irb, -ENOTCONN);
1349
1350 PRINT_ERR("(%s): Unable to get encryption key from EKM\n", bus_id);
1351 PRINT_ERR("(%s): CU=%02X DRIVE=%06X EKM=%02X:%04X\n", bus_id, cu_rc,
1352 drv_rc, ekm_rc1, ekm_rc2);
1353
1354 return tape_3590_erp_basic(device, request, irb, -ENOKEY);
1355}
1356
954/* 1357/*
955 * 3590 error Recovery routine: 1358 * 3590 error Recovery routine:
956 * If possible, it tries to recover from the error. If this is not possible, 1359 * If possible, it tries to recover from the error. If this is not possible,
@@ -979,6 +1382,8 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
979 1382
980 sense = (struct tape_3590_sense *) irb->ecw; 1383 sense = (struct tape_3590_sense *) irb->ecw;
981 1384
1385 DBF_EVENT(6, "Unit Check: RQC = %x\n", sense->rc_rqc);
1386
982 /* 1387 /*
983 * First check all RC-QRCs where we want to do something special 1388 * First check all RC-QRCs where we want to do something special
984 * - "break": basic error recovery is done 1389 * - "break": basic error recovery is done
@@ -999,6 +1404,8 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
999 case 0x2231: 1404 case 0x2231:
1000 tape_3590_print_era_msg(device, irb); 1405 tape_3590_print_era_msg(device, irb);
1001 return tape_3590_erp_special_interrupt(device, request, irb); 1406 return tape_3590_erp_special_interrupt(device, request, irb);
1407 case 0x2240:
1408 return tape_3590_crypt_error(device, request, irb);
1002 1409
1003 case 0x3010: 1410 case 0x3010:
1004 DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n", 1411 DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n",
@@ -1020,6 +1427,7 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1020 DBF_EVENT(2, "(%08x): Rewind Unload complete\n", 1427 DBF_EVENT(2, "(%08x): Rewind Unload complete\n",
1021 device->cdev_id); 1428 device->cdev_id);
1022 tape_med_state_set(device, MS_UNLOADED); 1429 tape_med_state_set(device, MS_UNLOADED);
1430 tape_3590_schedule_work(device, TO_CRYPT_OFF);
1023 return tape_3590_erp_basic(device, request, irb, 0); 1431 return tape_3590_erp_basic(device, request, irb, 0);
1024 1432
1025 case 0x4010: 1433 case 0x4010:
@@ -1030,9 +1438,15 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1030 PRINT_WARN("(%s): Tape operation when medium not loaded\n", 1438 PRINT_WARN("(%s): Tape operation when medium not loaded\n",
1031 device->cdev->dev.bus_id); 1439 device->cdev->dev.bus_id);
1032 tape_med_state_set(device, MS_UNLOADED); 1440 tape_med_state_set(device, MS_UNLOADED);
1441 tape_3590_schedule_work(device, TO_CRYPT_OFF);
1033 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); 1442 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
1034 case 0x4012: /* Device Long Busy */ 1443 case 0x4012: /* Device Long Busy */
1444 /* XXX: Also use long busy handling here? */
1445 DBF_EVENT(6, "(%08x): LONG BUSY\n", device->cdev_id);
1035 tape_3590_print_era_msg(device, irb); 1446 tape_3590_print_era_msg(device, irb);
1447 return tape_3590_erp_basic(device, request, irb, -EBUSY);
1448 case 0x4014:
1449 DBF_EVENT(6, "(%08x): Crypto LONG BUSY\n", device->cdev_id);
1036 return tape_3590_erp_long_busy(device, request, irb); 1450 return tape_3590_erp_long_busy(device, request, irb);
1037 1451
1038 case 0x5010: 1452 case 0x5010:
@@ -1064,6 +1478,7 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1064 case 0x5120: 1478 case 0x5120:
1065 case 0x1120: 1479 case 0x1120:
1066 tape_med_state_set(device, MS_UNLOADED); 1480 tape_med_state_set(device, MS_UNLOADED);
1481 tape_3590_schedule_work(device, TO_CRYPT_OFF);
1067 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); 1482 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
1068 1483
1069 case 0x6020: 1484 case 0x6020:
@@ -1142,21 +1557,47 @@ tape_3590_setup_device(struct tape_device *device)
1142{ 1557{
1143 int rc; 1558 int rc;
1144 struct tape_3590_disc_data *data; 1559 struct tape_3590_disc_data *data;
1560 char *rdc_data;
1145 1561
1146 DBF_EVENT(6, "3590 device setup\n"); 1562 DBF_EVENT(6, "3590 device setup\n");
1147 data = kmalloc(sizeof(struct tape_3590_disc_data), 1563 data = kzalloc(sizeof(struct tape_3590_disc_data), GFP_KERNEL | GFP_DMA);
1148 GFP_KERNEL | GFP_DMA);
1149 if (data == NULL) 1564 if (data == NULL)
1150 return -ENOMEM; 1565 return -ENOMEM;
1151 data->read_back_op = READ_PREVIOUS; 1566 data->read_back_op = READ_PREVIOUS;
1152 device->discdata = data; 1567 device->discdata = data;
1153 1568
1154 if ((rc = tape_std_assign(device)) == 0) { 1569 rdc_data = kmalloc(64, GFP_KERNEL | GFP_DMA);
1155 /* Try to find out if medium is loaded */ 1570 if (!rdc_data) {
1156 if ((rc = tape_3590_sense_medium(device)) != 0) 1571 rc = -ENOMEM;
1157 DBF_LH(3, "3590 medium sense returned %d\n", rc); 1572 goto fail_kmalloc;
1573 }
1574 rc = read_dev_chars(device->cdev, (void**)&rdc_data, 64);
1575 if (rc) {
1576 DBF_LH(3, "Read device characteristics failed!\n");
1577 goto fail_kmalloc;
1578 }
1579 rc = tape_std_assign(device);
1580 if (rc)
1581 goto fail_rdc_data;
1582 if (rdc_data[31] == 0x13) {
1583 PRINT_INFO("Device has crypto support\n");
1584 data->crypt_info.capability |= TAPE390_CRYPT_SUPPORTED_MASK;
1585 tape_3592_disable_crypt(device);
1586 } else {
1587 DBF_EVENT(6, "Device has NO crypto support\n");
1158 } 1588 }
1589 /* Try to find out if medium is loaded */
1590 rc = tape_3590_sense_medium(device);
1591 if (rc) {
1592 DBF_LH(3, "3590 medium sense returned %d\n", rc);
1593 goto fail_rdc_data;
1594 }
1595 return 0;
1159 1596
1597fail_rdc_data:
1598 kfree(rdc_data);
1599fail_kmalloc:
1600 kfree(data);
1160 return rc; 1601 return rc;
1161} 1602}
1162 1603
diff --git a/drivers/s390/char/tape_3590.h b/drivers/s390/char/tape_3590.h
index cf274b9445a6..aa5138807af1 100644
--- a/drivers/s390/char/tape_3590.h
+++ b/drivers/s390/char/tape_3590.h
@@ -2,7 +2,7 @@
2 * drivers/s390/char/tape_3590.h 2 * drivers/s390/char/tape_3590.h
3 * tape device discipline for 3590 tapes. 3 * tape device discipline for 3590 tapes.
4 * 4 *
5 * Copyright (C) IBM Corp. 2001,2006 5 * Copyright IBM Corp. 2001,2006
6 * Author(s): Stefan Bader <shbader@de.ibm.com> 6 * Author(s): Stefan Bader <shbader@de.ibm.com>
7 * Michael Holzheu <holzheu@de.ibm.com> 7 * Michael Holzheu <holzheu@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -38,16 +38,22 @@
38#define MSENSE_UNASSOCIATED 0x00 38#define MSENSE_UNASSOCIATED 0x00
39#define MSENSE_ASSOCIATED_MOUNT 0x01 39#define MSENSE_ASSOCIATED_MOUNT 0x01
40#define MSENSE_ASSOCIATED_UMOUNT 0x02 40#define MSENSE_ASSOCIATED_UMOUNT 0x02
41#define MSENSE_CRYPT_MASK 0x00000010
41 42
42#define TAPE_3590_MAX_MSG 0xb0 43#define TAPE_3590_MAX_MSG 0xb0
43 44
44/* Datatypes */ 45/* Datatypes */
45 46
46struct tape_3590_disc_data { 47struct tape_3590_disc_data {
47 unsigned char modeset_byte; 48 struct tape390_crypt_info crypt_info;
48 int read_back_op; 49 int read_back_op;
49}; 50};
50 51
52#define TAPE_3590_CRYPT_INFO(device) \
53 ((struct tape_3590_disc_data*)(device->discdata))->crypt_info
54#define TAPE_3590_READ_BACK_OP(device) \
55 ((struct tape_3590_disc_data*)(device->discdata))->read_back_op
56
51struct tape_3590_sense { 57struct tape_3590_sense {
52 58
53 unsigned int command_rej:1; 59 unsigned int command_rej:1;
@@ -118,7 +124,48 @@ struct tape_3590_sense {
118struct tape_3590_med_sense { 124struct tape_3590_med_sense {
119 unsigned int macst:4; 125 unsigned int macst:4;
120 unsigned int masst:4; 126 unsigned int masst:4;
121 char pad[127]; 127 char pad1[7];
128 unsigned int flags;
129 char pad2[116];
130} __attribute__ ((packed));
131
132/* Datastructures for 3592 encryption support */
133
134struct tape3592_kekl {
135 __u8 flags;
136 char label[64];
137} __attribute__ ((packed));
138
139struct tape3592_kekl_pair {
140 __u8 count;
141 struct tape3592_kekl kekl[2];
142} __attribute__ ((packed));
143
144struct tape3592_kekl_query_data {
145 __u16 len;
146 __u8 fmt;
147 __u8 mc;
148 __u32 id;
149 __u8 flags;
150 struct tape3592_kekl_pair kekls;
151 char reserved[116];
152} __attribute__ ((packed));
153
154struct tape3592_kekl_query_order {
155 __u8 code;
156 __u8 flags;
157 char reserved1[2];
158 __u8 max_count;
159 char reserved2[35];
160} __attribute__ ((packed));
161
162struct tape3592_kekl_set_order {
163 __u8 code;
164 __u8 flags;
165 char reserved1[2];
166 __u8 op;
167 struct tape3592_kekl_pair kekls;
168 char reserved2[120];
122} __attribute__ ((packed)); 169} __attribute__ ((packed));
123 170
124#endif /* _TAPE_3590_H */ 171#endif /* _TAPE_3590_H */
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index c8a89b3b87d4..dd0ecaed592e 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -73,7 +73,7 @@ tapeblock_trigger_requeue(struct tape_device *device)
73/* 73/*
74 * Post finished request. 74 * Post finished request.
75 */ 75 */
76static inline void 76static void
77tapeblock_end_request(struct request *req, int uptodate) 77tapeblock_end_request(struct request *req, int uptodate)
78{ 78{
79 if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) 79 if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
@@ -108,7 +108,7 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data)
108/* 108/*
109 * Feed the tape device CCW queue with requests supplied in a list. 109 * Feed the tape device CCW queue with requests supplied in a list.
110 */ 110 */
111static inline int 111static int
112tapeblock_start_request(struct tape_device *device, struct request *req) 112tapeblock_start_request(struct tape_device *device, struct request *req)
113{ 113{
114 struct tape_request * ccw_req; 114 struct tape_request * ccw_req;
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 31198c8f2718..9faea04e11e9 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -3,7 +3,7 @@
3 * character device frontend for tape device driver 3 * character device frontend for tape device driver
4 * 4 *
5 * S390 and zSeries version 5 * S390 and zSeries version
6 * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright IBM Corp. 2001,2006
7 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
@@ -89,22 +89,7 @@ tapechar_cleanup_device(struct tape_device *device)
89 device->nt = NULL; 89 device->nt = NULL;
90} 90}
91 91
92/* 92static int
93 * Terminate write command (we write two TMs and skip backward over last)
94 * This ensures that the tape is always correctly terminated.
95 * When the user writes afterwards a new file, he will overwrite the
96 * second TM and therefore one TM will remain to separate the
97 * two files on the tape...
98 */
99static inline void
100tapechar_terminate_write(struct tape_device *device)
101{
102 if (tape_mtop(device, MTWEOF, 1) == 0 &&
103 tape_mtop(device, MTWEOF, 1) == 0)
104 tape_mtop(device, MTBSR, 1);
105}
106
107static inline int
108tapechar_check_idalbuffer(struct tape_device *device, size_t block_size) 93tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
109{ 94{
110 struct idal_buffer *new; 95 struct idal_buffer *new;
@@ -137,7 +122,7 @@ tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
137/* 122/*
138 * Tape device read function 123 * Tape device read function
139 */ 124 */
140ssize_t 125static ssize_t
141tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) 126tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
142{ 127{
143 struct tape_device *device; 128 struct tape_device *device;
@@ -201,7 +186,7 @@ tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
201/* 186/*
202 * Tape device write function 187 * Tape device write function
203 */ 188 */
204ssize_t 189static ssize_t
205tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos) 190tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos)
206{ 191{
207 struct tape_device *device; 192 struct tape_device *device;
@@ -291,7 +276,7 @@ tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t
291/* 276/*
292 * Character frontend tape device open function. 277 * Character frontend tape device open function.
293 */ 278 */
294int 279static int
295tapechar_open (struct inode *inode, struct file *filp) 280tapechar_open (struct inode *inode, struct file *filp)
296{ 281{
297 struct tape_device *device; 282 struct tape_device *device;
@@ -326,7 +311,7 @@ tapechar_open (struct inode *inode, struct file *filp)
326 * Character frontend tape device release function. 311 * Character frontend tape device release function.
327 */ 312 */
328 313
329int 314static int
330tapechar_release(struct inode *inode, struct file *filp) 315tapechar_release(struct inode *inode, struct file *filp)
331{ 316{
332 struct tape_device *device; 317 struct tape_device *device;
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index c6c2e918b990..e2a8a1a04bab 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -3,7 +3,7 @@
3 * basic function of the tape device driver 3 * basic function of the tape device driver
4 * 4 *
5 * S390 and zSeries version 5 * S390 and zSeries version
6 * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright IBM Corp. 2001,2006
7 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
@@ -26,9 +26,11 @@
26#include "tape_std.h" 26#include "tape_std.h"
27 27
28#define PRINTK_HEADER "TAPE_CORE: " 28#define PRINTK_HEADER "TAPE_CORE: "
29#define LONG_BUSY_TIMEOUT 180 /* seconds */
29 30
30static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); 31static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
31static void tape_delayed_next_request(struct work_struct *); 32static void tape_delayed_next_request(struct work_struct *);
33static void tape_long_busy_timeout(unsigned long data);
32 34
33/* 35/*
34 * One list to contain all tape devices of all disciplines, so 36 * One list to contain all tape devices of all disciplines, so
@@ -69,10 +71,12 @@ const char *tape_op_verbose[TO_SIZE] =
69 [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF", 71 [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF",
70 [TO_READ_ATTMSG] = "RAT", 72 [TO_READ_ATTMSG] = "RAT",
71 [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS", 73 [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS",
72 [TO_UNASSIGN] = "UAS" 74 [TO_UNASSIGN] = "UAS", [TO_CRYPT_ON] = "CON",
75 [TO_CRYPT_OFF] = "COF", [TO_KEKL_SET] = "KLS",
76 [TO_KEKL_QUERY] = "KLQ",
73}; 77};
74 78
75static inline int 79static int
76busid_to_int(char *bus_id) 80busid_to_int(char *bus_id)
77{ 81{
78 int dec; 82 int dec;
@@ -252,7 +256,7 @@ tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
252/* 256/*
253 * Stop running ccw. Has to be called with the device lock held. 257 * Stop running ccw. Has to be called with the device lock held.
254 */ 258 */
255static inline int 259static int
256__tape_cancel_io(struct tape_device *device, struct tape_request *request) 260__tape_cancel_io(struct tape_device *device, struct tape_request *request)
257{ 261{
258 int retries; 262 int retries;
@@ -346,6 +350,9 @@ tape_generic_online(struct tape_device *device,
346 return -EINVAL; 350 return -EINVAL;
347 } 351 }
348 352
353 init_timer(&device->lb_timeout);
354 device->lb_timeout.function = tape_long_busy_timeout;
355
349 /* Let the discipline have a go at the device. */ 356 /* Let the discipline have a go at the device. */
350 device->discipline = discipline; 357 device->discipline = discipline;
351 if (!try_module_get(discipline->owner)) { 358 if (!try_module_get(discipline->owner)) {
@@ -385,7 +392,7 @@ out:
385 return rc; 392 return rc;
386} 393}
387 394
388static inline void 395static void
389tape_cleanup_device(struct tape_device *device) 396tape_cleanup_device(struct tape_device *device)
390{ 397{
391 tapeblock_cleanup_device(device); 398 tapeblock_cleanup_device(device);
@@ -563,7 +570,7 @@ tape_generic_probe(struct ccw_device *cdev)
563 return ret; 570 return ret;
564} 571}
565 572
566static inline void 573static void
567__tape_discard_requests(struct tape_device *device) 574__tape_discard_requests(struct tape_device *device)
568{ 575{
569 struct tape_request * request; 576 struct tape_request * request;
@@ -703,7 +710,7 @@ tape_free_request (struct tape_request * request)
703 kfree(request); 710 kfree(request);
704} 711}
705 712
706static inline int 713static int
707__tape_start_io(struct tape_device *device, struct tape_request *request) 714__tape_start_io(struct tape_device *device, struct tape_request *request)
708{ 715{
709 int rc; 716 int rc;
@@ -733,7 +740,7 @@ __tape_start_io(struct tape_device *device, struct tape_request *request)
733 return rc; 740 return rc;
734} 741}
735 742
736static inline void 743static void
737__tape_start_next_request(struct tape_device *device) 744__tape_start_next_request(struct tape_device *device)
738{ 745{
739 struct list_head *l, *n; 746 struct list_head *l, *n;
@@ -801,7 +808,23 @@ tape_delayed_next_request(struct work_struct *work)
801 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 808 spin_unlock_irq(get_ccwdev_lock(device->cdev));
802} 809}
803 810
804static inline void 811static void tape_long_busy_timeout(unsigned long data)
812{
813 struct tape_request *request;
814 struct tape_device *device;
815
816 device = (struct tape_device *) data;
817 spin_lock_irq(get_ccwdev_lock(device->cdev));
818 request = list_entry(device->req_queue.next, struct tape_request, list);
819 if (request->status != TAPE_REQUEST_LONG_BUSY)
820 BUG();
821 DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id);
822 __tape_start_next_request(device);
823 device->lb_timeout.data = (unsigned long) tape_put_device(device);
824 spin_unlock_irq(get_ccwdev_lock(device->cdev));
825}
826
827static void
805__tape_end_request( 828__tape_end_request(
806 struct tape_device * device, 829 struct tape_device * device,
807 struct tape_request * request, 830 struct tape_request * request,
@@ -878,7 +901,7 @@ tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
878 * and starts it if the tape is idle. Has to be called with 901 * and starts it if the tape is idle. Has to be called with
879 * the device lock held. 902 * the device lock held.
880 */ 903 */
881static inline int 904static int
882__tape_start_request(struct tape_device *device, struct tape_request *request) 905__tape_start_request(struct tape_device *device, struct tape_request *request)
883{ 906{
884 int rc; 907 int rc;
@@ -1094,7 +1117,22 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1094 /* May be an unsolicited irq */ 1117 /* May be an unsolicited irq */
1095 if(request != NULL) 1118 if(request != NULL)
1096 request->rescnt = irb->scsw.count; 1119 request->rescnt = irb->scsw.count;
1097 1120 else if ((irb->scsw.dstat == 0x85 || irb->scsw.dstat == 0x80) &&
1121 !list_empty(&device->req_queue)) {
1122 /* Not Ready to Ready after long busy ? */
1123 struct tape_request *req;
1124 req = list_entry(device->req_queue.next,
1125 struct tape_request, list);
1126 if (req->status == TAPE_REQUEST_LONG_BUSY) {
1127 DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id);
1128 if (del_timer(&device->lb_timeout)) {
1129 device->lb_timeout.data = (unsigned long)
1130 tape_put_device(device);
1131 __tape_start_next_request(device);
1132 }
1133 return;
1134 }
1135 }
1098 if (irb->scsw.dstat != 0x0c) { 1136 if (irb->scsw.dstat != 0x0c) {
1099 /* Set the 'ONLINE' flag depending on sense byte 1 */ 1137 /* Set the 'ONLINE' flag depending on sense byte 1 */
1100 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) 1138 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
@@ -1142,6 +1180,15 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1142 break; 1180 break;
1143 case TAPE_IO_PENDING: 1181 case TAPE_IO_PENDING:
1144 break; 1182 break;
1183 case TAPE_IO_LONG_BUSY:
1184 device->lb_timeout.data =
1185 (unsigned long)tape_get_device_reference(device);
1186 device->lb_timeout.expires = jiffies +
1187 LONG_BUSY_TIMEOUT * HZ;
1188 DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id);
1189 add_timer(&device->lb_timeout);
1190 request->status = TAPE_REQUEST_LONG_BUSY;
1191 break;
1145 case TAPE_IO_RETRY: 1192 case TAPE_IO_RETRY:
1146 rc = __tape_start_io(device, request); 1193 rc = __tape_start_io(device, request);
1147 if (rc) 1194 if (rc)
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 09844621edc0..bc33068b9ce2 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -36,7 +36,7 @@
36struct tty_driver *tty3270_driver; 36struct tty_driver *tty3270_driver;
37static int tty3270_max_index; 37static int tty3270_max_index;
38 38
39struct raw3270_fn tty3270_fn; 39static struct raw3270_fn tty3270_fn;
40 40
41struct tty3270_cell { 41struct tty3270_cell {
42 unsigned char character; 42 unsigned char character;
@@ -119,8 +119,7 @@ static void tty3270_update(struct tty3270 *);
119/* 119/*
120 * Setup timeout for a device. On timeout trigger an update. 120 * Setup timeout for a device. On timeout trigger an update.
121 */ 121 */
122void 122static void tty3270_set_timer(struct tty3270 *tp, int expires)
123tty3270_set_timer(struct tty3270 *tp, int expires)
124{ 123{
125 if (expires == 0) { 124 if (expires == 0) {
126 if (timer_pending(&tp->timer) && del_timer(&tp->timer)) 125 if (timer_pending(&tp->timer) && del_timer(&tp->timer))
@@ -841,7 +840,7 @@ tty3270_del_views(void)
841 } 840 }
842} 841}
843 842
844struct raw3270_fn tty3270_fn = { 843static struct raw3270_fn tty3270_fn = {
845 .activate = tty3270_activate, 844 .activate = tty3270_activate,
846 .deactivate = tty3270_deactivate, 845 .deactivate = tty3270_deactivate,
847 .intv = (void *) tty3270_irq, 846 .intv = (void *) tty3270_irq,
@@ -1754,8 +1753,7 @@ static const struct tty_operations tty3270_ops = {
1754 .set_termios = tty3270_set_termios 1753 .set_termios = tty3270_set_termios
1755}; 1754};
1756 1755
1757void 1756static void tty3270_notifier(int index, int active)
1758tty3270_notifier(int index, int active)
1759{ 1757{
1760 if (active) 1758 if (active)
1761 tty_register_device(tty3270_driver, index, NULL); 1759 tty_register_device(tty3270_driver, index, NULL);
@@ -1767,8 +1765,7 @@ tty3270_notifier(int index, int active)
1767 * 3270 tty registration code called from tty_init(). 1765 * 3270 tty registration code called from tty_init().
1768 * Most kernel services (incl. kmalloc) are available at this poimt. 1766 * Most kernel services (incl. kmalloc) are available at this poimt.
1769 */ 1767 */
1770int __init 1768static int __init tty3270_init(void)
1771tty3270_init(void)
1772{ 1769{
1773 struct tty_driver *driver; 1770 struct tty_driver *driver;
1774 int ret; 1771 int ret;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 6cb23040954b..4f894dc2373b 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -128,9 +128,8 @@ static iucv_interrupt_ops_t vmlogrdr_iucvops = {
128 .MessagePending = vmlogrdr_iucv_MessagePending, 128 .MessagePending = vmlogrdr_iucv_MessagePending,
129}; 129};
130 130
131 131static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
132DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue); 132static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
133DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
134 133
135/* 134/*
136 * pointer to system service private structure 135 * pointer to system service private structure
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 12c2d6b746e6..aa65df4dfced 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -43,7 +43,7 @@ typedef enum {add, free} range_action;
43 * Function: blacklist_range 43 * Function: blacklist_range
44 * (Un-)blacklist the devices from-to 44 * (Un-)blacklist the devices from-to
45 */ 45 */
46static inline void 46static void
47blacklist_range (range_action action, unsigned int from, unsigned int to, 47blacklist_range (range_action action, unsigned int from, unsigned int to,
48 unsigned int ssid) 48 unsigned int ssid)
49{ 49{
@@ -69,7 +69,7 @@ blacklist_range (range_action action, unsigned int from, unsigned int to,
69 * Get devno/busid from given string. 69 * Get devno/busid from given string.
70 * Shamelessly grabbed from dasd_devmap.c. 70 * Shamelessly grabbed from dasd_devmap.c.
71 */ 71 */
72static inline int 72static int
73blacklist_busid(char **str, int *id0, int *ssid, int *devno) 73blacklist_busid(char **str, int *id0, int *ssid, int *devno)
74{ 74{
75 int val, old_style; 75 int val, old_style;
@@ -123,10 +123,10 @@ confused:
123 return 1; 123 return 1;
124} 124}
125 125
126static inline int 126static int
127blacklist_parse_parameters (char *str, range_action action) 127blacklist_parse_parameters (char *str, range_action action)
128{ 128{
129 unsigned int from, to, from_id0, to_id0, from_ssid, to_ssid; 129 int from, to, from_id0, to_id0, from_ssid, to_ssid;
130 130
131 while (*str != 0 && *str != '\n') { 131 while (*str != 0 && *str != '\n') {
132 range_action ra = action; 132 range_action ra = action;
@@ -227,7 +227,7 @@ is_blacklisted (int ssid, int devno)
227 * Function: blacklist_parse_proc_parameters 227 * Function: blacklist_parse_proc_parameters
228 * parse the stuff which is piped to /proc/cio_ignore 228 * parse the stuff which is piped to /proc/cio_ignore
229 */ 229 */
230static inline void 230static void
231blacklist_parse_proc_parameters (char *buf) 231blacklist_parse_proc_parameters (char *buf)
232{ 232{
233 if (strncmp (buf, "free ", 5) == 0) { 233 if (strncmp (buf, "free ", 5) == 0) {
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 38954f5cd14c..d48e3ca4752c 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -53,7 +53,7 @@ ccwgroup_uevent (struct device *dev, char **envp, int num_envp, char *buffer,
53 53
54static struct bus_type ccwgroup_bus_type; 54static struct bus_type ccwgroup_bus_type;
55 55
56static inline void 56static void
57__ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) 57__ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
58{ 58{
59 int i; 59 int i;
@@ -104,7 +104,7 @@ ccwgroup_release (struct device *dev)
104 kfree(gdev); 104 kfree(gdev);
105} 105}
106 106
107static inline int 107static int
108__ccwgroup_create_symlinks(struct ccwgroup_device *gdev) 108__ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
109{ 109{
110 char str[8]; 110 char str[8];
@@ -424,7 +424,7 @@ ccwgroup_probe_ccwdev(struct ccw_device *cdev)
424 return 0; 424 return 0;
425} 425}
426 426
427static inline struct ccwgroup_device * 427static struct ccwgroup_device *
428__ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev) 428__ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
429{ 429{
430 struct ccwgroup_device *gdev; 430 struct ccwgroup_device *gdev;
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index cbab8d2ce5cf..6f05a44e3817 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -93,7 +93,7 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
93 u16 sch; /* subchannel */ 93 u16 sch; /* subchannel */
94 u8 chpid[8]; /* chpids 0-7 */ 94 u8 chpid[8]; /* chpids 0-7 */
95 u16 fla[8]; /* full link addresses 0-7 */ 95 u16 fla[8]; /* full link addresses 0-7 */
96 } *ssd_area; 96 } __attribute__ ((packed)) *ssd_area;
97 97
98 ssd_area = page; 98 ssd_area = page;
99 99
@@ -277,7 +277,7 @@ out_unreg:
277 return 0; 277 return 0;
278} 278}
279 279
280static inline void 280static void
281s390_set_chpid_offline( __u8 chpid) 281s390_set_chpid_offline( __u8 chpid)
282{ 282{
283 char dbf_txt[15]; 283 char dbf_txt[15];
@@ -338,7 +338,7 @@ s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
338 return 0x80 >> chp; 338 return 0x80 >> chp;
339} 339}
340 340
341static inline int 341static int
342s390_process_res_acc_new_sch(struct subchannel_id schid) 342s390_process_res_acc_new_sch(struct subchannel_id schid)
343{ 343{
344 struct schib schib; 344 struct schib schib;
@@ -444,7 +444,7 @@ __get_chpid_from_lir(void *data)
444 u32 andesc[28]; 444 u32 andesc[28];
445 /* incident-specific information */ 445 /* incident-specific information */
446 u32 isinfo[28]; 446 u32 isinfo[28];
447 } *lir; 447 } __attribute__ ((packed)) *lir;
448 448
449 lir = data; 449 lir = data;
450 if (!(lir->iq&0x80)) 450 if (!(lir->iq&0x80))
@@ -461,154 +461,146 @@ __get_chpid_from_lir(void *data)
461 return (u16) (lir->indesc[0]&0x000000ff); 461 return (u16) (lir->indesc[0]&0x000000ff);
462} 462}
463 463
464int 464struct chsc_sei_area {
465chsc_process_crw(void) 465 struct chsc_header request;
466 u32 reserved1;
467 u32 reserved2;
468 u32 reserved3;
469 struct chsc_header response;
470 u32 reserved4;
471 u8 flags;
472 u8 vf; /* validity flags */
473 u8 rs; /* reporting source */
474 u8 cc; /* content code */
475 u16 fla; /* full link address */
476 u16 rsid; /* reporting source id */
477 u32 reserved5;
478 u32 reserved6;
479 u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
480 /* ccdf has to be big enough for a link-incident record */
481} __attribute__ ((packed));
482
483static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
484{
485 int chpid;
486
487 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
488 sei_area->rs, sei_area->rsid);
489 if (sei_area->rs != 4)
490 return 0;
491 chpid = __get_chpid_from_lir(sei_area->ccdf);
492 if (chpid < 0)
493 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
494 else
495 s390_set_chpid_offline(chpid);
496
497 return 0;
498}
499
500static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
466{ 501{
467 int chpid, ret;
468 struct res_acc_data res_data; 502 struct res_acc_data res_data;
469 struct { 503 struct device *dev;
470 struct chsc_header request; 504 int status;
471 u32 reserved1; 505 int rc;
472 u32 reserved2; 506
473 u32 reserved3; 507 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
474 struct chsc_header response; 508 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
475 u32 reserved4; 509 if (sei_area->rs != 4)
476 u8 flags; 510 return 0;
477 u8 vf; /* validity flags */ 511 /* allocate a new channel path structure, if needed */
478 u8 rs; /* reporting source */ 512 status = get_chp_status(sei_area->rsid);
479 u8 cc; /* content code */ 513 if (status < 0)
480 u16 fla; /* full link address */ 514 new_channel_path(sei_area->rsid);
481 u16 rsid; /* reporting source id */ 515 else if (!status)
482 u32 reserved5; 516 return 0;
483 u32 reserved6; 517 dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
484 u32 ccdf[96]; /* content-code dependent field */ 518 memset(&res_data, 0, sizeof(struct res_acc_data));
485 /* ccdf has to be big enough for a link-incident record */ 519 res_data.chp = to_channelpath(dev);
486 } *sei_area; 520 if ((sei_area->vf & 0xc0) != 0) {
521 res_data.fla = sei_area->fla;
522 if ((sei_area->vf & 0xc0) == 0xc0)
523 /* full link address */
524 res_data.fla_mask = 0xffff;
525 else
526 /* link address */
527 res_data.fla_mask = 0xff00;
528 }
529 rc = s390_process_res_acc(&res_data);
530 put_device(dev);
531
532 return rc;
533}
534
535static int chsc_process_sei(struct chsc_sei_area *sei_area)
536{
537 int rc;
538
539 /* Check if we might have lost some information. */
540 if (sei_area->flags & 0x40)
541 CIO_CRW_EVENT(2, "chsc: event overflow\n");
542 /* which kind of information was stored? */
543 rc = 0;
544 switch (sei_area->cc) {
545 case 1: /* link incident*/
546 rc = chsc_process_sei_link_incident(sei_area);
547 break;
548 case 2: /* i/o resource accessibiliy */
549 rc = chsc_process_sei_res_acc(sei_area);
550 break;
551 default: /* other stuff */
552 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
553 sei_area->cc);
554 break;
555 }
556
557 return rc;
558}
559
560int chsc_process_crw(void)
561{
562 struct chsc_sei_area *sei_area;
563 int ret;
564 int rc;
487 565
488 if (!sei_page) 566 if (!sei_page)
489 return 0; 567 return 0;
490 /* 568 /* Access to sei_page is serialized through machine check handler
491 * build the chsc request block for store event information 569 * thread, so no need for locking. */
492 * and do the call
493 * This function is only called by the machine check handler thread,
494 * so we don't need locking for the sei_page.
495 */
496 sei_area = sei_page; 570 sei_area = sei_page;
497 571
498 CIO_TRACE_EVENT( 2, "prcss"); 572 CIO_TRACE_EVENT( 2, "prcss");
499 ret = 0; 573 ret = 0;
500 do { 574 do {
501 int ccode, status;
502 struct device *dev;
503 memset(sei_area, 0, sizeof(*sei_area)); 575 memset(sei_area, 0, sizeof(*sei_area));
504 memset(&res_data, 0, sizeof(struct res_acc_data));
505 sei_area->request.length = 0x0010; 576 sei_area->request.length = 0x0010;
506 sei_area->request.code = 0x000e; 577 sei_area->request.code = 0x000e;
578 if (chsc(sei_area))
579 break;
507 580
508 ccode = chsc(sei_area); 581 if (sei_area->response.code == 0x0001) {
509 if (ccode > 0) 582 CIO_CRW_EVENT(4, "chsc: sei successful\n");
510 return 0; 583 rc = chsc_process_sei(sei_area);
511 584 if (rc)
512 switch (sei_area->response.code) { 585 ret = rc;
513 /* for debug purposes, check for problems */ 586 } else {
514 case 0x0001: 587 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
515 CIO_CRW_EVENT(4, "chsc_process_crw: event information "
516 "successfully stored\n");
517 break; /* everything ok */
518 case 0x0002:
519 CIO_CRW_EVENT(2,
520 "chsc_process_crw: invalid command!\n");
521 return 0;
522 case 0x0003:
523 CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc "
524 "request block!\n");
525 return 0;
526 case 0x0005:
527 CIO_CRW_EVENT(2, "chsc_process_crw: no event "
528 "information stored\n");
529 return 0;
530 default:
531 CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n",
532 sei_area->response.code); 588 sei_area->response.code);
533 return 0; 589 ret = 0;
534 }
535
536 /* Check if we might have lost some information. */
537 if (sei_area->flags & 0x40)
538 CIO_CRW_EVENT(2, "chsc_process_crw: Event information "
539 "has been lost due to overflow!\n");
540
541 if (sei_area->rs != 4) {
542 CIO_CRW_EVENT(2, "chsc_process_crw: reporting source "
543 "(%04X) isn't a chpid!\n",
544 sei_area->rsid);
545 continue;
546 }
547
548 /* which kind of information was stored? */
549 switch (sei_area->cc) {
550 case 1: /* link incident*/
551 CIO_CRW_EVENT(4, "chsc_process_crw: "
552 "channel subsystem reports link incident,"
553 " reporting source is chpid %x\n",
554 sei_area->rsid);
555 chpid = __get_chpid_from_lir(sei_area->ccdf);
556 if (chpid < 0)
557 CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n",
558 __FUNCTION__);
559 else
560 s390_set_chpid_offline(chpid);
561 break;
562
563 case 2: /* i/o resource accessibiliy */
564 CIO_CRW_EVENT(4, "chsc_process_crw: "
565 "channel subsystem reports some I/O "
566 "devices may have become accessible\n");
567 pr_debug("Data received after sei: \n");
568 pr_debug("Validity flags: %x\n", sei_area->vf);
569
570 /* allocate a new channel path structure, if needed */
571 status = get_chp_status(sei_area->rsid);
572 if (status < 0)
573 new_channel_path(sei_area->rsid);
574 else if (!status)
575 break;
576 dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
577 res_data.chp = to_channelpath(dev);
578 pr_debug("chpid: %x", sei_area->rsid);
579 if ((sei_area->vf & 0xc0) != 0) {
580 res_data.fla = sei_area->fla;
581 if ((sei_area->vf & 0xc0) == 0xc0) {
582 pr_debug(" full link addr: %x",
583 sei_area->fla);
584 res_data.fla_mask = 0xffff;
585 } else {
586 pr_debug(" link addr: %x",
587 sei_area->fla);
588 res_data.fla_mask = 0xff00;
589 }
590 }
591 ret = s390_process_res_acc(&res_data);
592 pr_debug("\n\n");
593 put_device(dev);
594 break;
595
596 default: /* other stuff */
597 CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n",
598 sei_area->cc);
599 break; 590 break;
600 } 591 }
601 } while (sei_area->flags & 0x80); 592 } while (sei_area->flags & 0x80);
593
602 return ret; 594 return ret;
603} 595}
604 596
605static inline int 597static int
606__chp_add_new_sch(struct subchannel_id schid) 598__chp_add_new_sch(struct subchannel_id schid)
607{ 599{
608 struct schib schib; 600 struct schib schib;
609 int ret; 601 int ret;
610 602
611 if (stsch(schid, &schib)) 603 if (stsch_err(schid, &schib))
612 /* We're through */ 604 /* We're through */
613 return need_rescan ? -EAGAIN : -ENXIO; 605 return need_rescan ? -EAGAIN : -ENXIO;
614 606
@@ -709,7 +701,7 @@ chp_process_crw(int chpid, int on)
709 return chp_add(chpid); 701 return chp_add(chpid);
710} 702}
711 703
712static inline int check_for_io_on_path(struct subchannel *sch, int index) 704static int check_for_io_on_path(struct subchannel *sch, int index)
713{ 705{
714 int cc; 706 int cc;
715 707
@@ -741,7 +733,7 @@ static void terminate_internal_io(struct subchannel *sch)
741 sch->driver->termination(&sch->dev); 733 sch->driver->termination(&sch->dev);
742} 734}
743 735
744static inline void 736static void
745__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) 737__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
746{ 738{
747 int chp, old_lpm; 739 int chp, old_lpm;
@@ -967,8 +959,8 @@ static struct bin_attribute chp_measurement_attr = {
967static void 959static void
968chsc_remove_chp_cmg_attr(struct channel_path *chp) 960chsc_remove_chp_cmg_attr(struct channel_path *chp)
969{ 961{
970 sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_chars_attr); 962 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
971 sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_attr); 963 device_remove_bin_file(&chp->dev, &chp_measurement_attr);
972} 964}
973 965
974static int 966static int
@@ -976,14 +968,12 @@ chsc_add_chp_cmg_attr(struct channel_path *chp)
976{ 968{
977 int ret; 969 int ret;
978 970
979 ret = sysfs_create_bin_file(&chp->dev.kobj, 971 ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
980 &chp_measurement_chars_attr);
981 if (ret) 972 if (ret)
982 return ret; 973 return ret;
983 ret = sysfs_create_bin_file(&chp->dev.kobj, &chp_measurement_attr); 974 ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
984 if (ret) 975 if (ret)
985 sysfs_remove_bin_file(&chp->dev.kobj, 976 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
986 &chp_measurement_chars_attr);
987 return ret; 977 return ret;
988} 978}
989 979
@@ -1042,7 +1032,7 @@ __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
1042 u32 : 4; 1032 u32 : 4;
1043 u32 fmt : 4; 1033 u32 fmt : 4;
1044 u32 : 16; 1034 u32 : 16;
1045 } *secm_area; 1035 } __attribute__ ((packed)) *secm_area;
1046 int ret, ccode; 1036 int ret, ccode;
1047 1037
1048 secm_area = page; 1038 secm_area = page;
@@ -1253,7 +1243,7 @@ chsc_determine_channel_path_description(int chpid,
1253 struct chsc_header response; 1243 struct chsc_header response;
1254 u32 zeroes2; 1244 u32 zeroes2;
1255 struct channel_path_desc desc; 1245 struct channel_path_desc desc;
1256 } *scpd_area; 1246 } __attribute__ ((packed)) *scpd_area;
1257 1247
1258 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1248 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1259 if (!scpd_area) 1249 if (!scpd_area)
@@ -1350,7 +1340,7 @@ chsc_get_channel_measurement_chars(struct channel_path *chp)
1350 u32 cmg : 8; 1340 u32 cmg : 8;
1351 u32 zeroes3; 1341 u32 zeroes3;
1352 u32 data[NR_MEASUREMENT_CHARS]; 1342 u32 data[NR_MEASUREMENT_CHARS];
1353 } *scmc_area; 1343 } __attribute__ ((packed)) *scmc_area;
1354 1344
1355 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1345 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1356 if (!scmc_area) 1346 if (!scmc_area)
@@ -1517,7 +1507,7 @@ chsc_enable_facility(int operation_code)
1517 u32 reserved5:4; 1507 u32 reserved5:4;
1518 u32 format2:4; 1508 u32 format2:4;
1519 u32 reserved6:24; 1509 u32 reserved6:24;
1520 } *sda_area; 1510 } __attribute__ ((packed)) *sda_area;
1521 1511
1522 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); 1512 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1523 if (!sda_area) 1513 if (!sda_area)
@@ -1569,7 +1559,7 @@ chsc_determine_css_characteristics(void)
1569 u32 reserved4; 1559 u32 reserved4;
1570 u32 general_char[510]; 1560 u32 general_char[510];
1571 u32 chsc_char[518]; 1561 u32 chsc_char[518];
1572 } *scsc_area; 1562 } __attribute__ ((packed)) *scsc_area;
1573 1563
1574 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1564 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1575 if (!scsc_area) { 1565 if (!scsc_area) {
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index a259245780ae..0fb2b024208f 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -10,17 +10,17 @@
10struct chsc_header { 10struct chsc_header {
11 u16 length; 11 u16 length;
12 u16 code; 12 u16 code;
13}; 13} __attribute__ ((packed));
14 14
15#define NR_MEASUREMENT_CHARS 5 15#define NR_MEASUREMENT_CHARS 5
16struct cmg_chars { 16struct cmg_chars {
17 u32 values[NR_MEASUREMENT_CHARS]; 17 u32 values[NR_MEASUREMENT_CHARS];
18}; 18} __attribute__ ((packed));
19 19
20#define NR_MEASUREMENT_ENTRIES 8 20#define NR_MEASUREMENT_ENTRIES 8
21struct cmg_entry { 21struct cmg_entry {
22 u32 values[NR_MEASUREMENT_ENTRIES]; 22 u32 values[NR_MEASUREMENT_ENTRIES];
23}; 23} __attribute__ ((packed));
24 24
25struct channel_path_desc { 25struct channel_path_desc {
26 u8 flags; 26 u8 flags;
@@ -31,7 +31,7 @@ struct channel_path_desc {
31 u8 zeroes; 31 u8 zeroes;
32 u8 chla; 32 u8 chla;
33 u8 chpp; 33 u8 chpp;
34}; 34} __attribute__ ((packed));
35 35
36struct channel_path { 36struct channel_path {
37 int id; 37 int id;
@@ -47,6 +47,9 @@ struct channel_path {
47extern void s390_process_css( void ); 47extern void s390_process_css( void );
48extern void chsc_validate_chpids(struct subchannel *); 48extern void chsc_validate_chpids(struct subchannel *);
49extern void chpid_is_actually_online(int); 49extern void chpid_is_actually_online(int);
50extern int css_get_ssd_info(struct subchannel *);
51extern int chsc_process_crw(void);
52extern int chp_process_crw(int, int);
50 53
51struct css_general_char { 54struct css_general_char {
52 u64 : 41; 55 u64 : 41;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index ae1bf231d089..b3a56dc5f68a 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -122,7 +122,7 @@ cio_get_options (struct subchannel *sch)
122 * Use tpi to get a pending interrupt, call the interrupt handler and 122 * Use tpi to get a pending interrupt, call the interrupt handler and
123 * return a pointer to the subchannel structure. 123 * return a pointer to the subchannel structure.
124 */ 124 */
125static inline int 125static int
126cio_tpi(void) 126cio_tpi(void)
127{ 127{
128 struct tpi_info *tpi_info; 128 struct tpi_info *tpi_info;
@@ -152,7 +152,7 @@ cio_tpi(void)
152 return 1; 152 return 1;
153} 153}
154 154
155static inline int 155static int
156cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) 156cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
157{ 157{
158 char dbf_text[15]; 158 char dbf_text[15];
@@ -585,7 +585,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
585 * This device must not be known to Linux. So we simply 585 * This device must not be known to Linux. So we simply
586 * say that there is no device and return ENODEV. 586 * say that there is no device and return ENODEV.
587 */ 587 */
588 CIO_MSG_EVENT(0, "Blacklisted device detected " 588 CIO_MSG_EVENT(4, "Blacklisted device detected "
589 "at devno %04X, subchannel set %x\n", 589 "at devno %04X, subchannel set %x\n",
590 sch->schib.pmcw.dev, sch->schid.ssid); 590 sch->schib.pmcw.dev, sch->schid.ssid);
591 err = -ENODEV; 591 err = -ENODEV;
@@ -646,7 +646,7 @@ do_IRQ (struct pt_regs *regs)
646 * Make sure that the i/o interrupt did not "overtake" 646 * Make sure that the i/o interrupt did not "overtake"
647 * the last HZ timer interrupt. 647 * the last HZ timer interrupt.
648 */ 648 */
649 account_ticks(); 649 account_ticks(S390_lowcore.int_clock);
650 /* 650 /*
651 * Get interrupt information from lowcore 651 * Get interrupt information from lowcore
652 */ 652 */
@@ -832,7 +832,7 @@ cio_get_console_subchannel(void)
832} 832}
833 833
834#endif 834#endif
835static inline int 835static int
836__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) 836__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
837{ 837{
838 int retry, cc; 838 int retry, cc;
@@ -850,7 +850,20 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
850 return -EBUSY; /* uhm... */ 850 return -EBUSY; /* uhm... */
851} 851}
852 852
853static inline int 853/* we can't use the normal udelay here, since it enables external interrupts */
854
855static void udelay_reset(unsigned long usecs)
856{
857 uint64_t start_cc, end_cc;
858
859 asm volatile ("STCK %0" : "=m" (start_cc));
860 do {
861 cpu_relax();
862 asm volatile ("STCK %0" : "=m" (end_cc));
863 } while (((end_cc - start_cc)/4096) < usecs);
864}
865
866static int
854__clear_subchannel_easy(struct subchannel_id schid) 867__clear_subchannel_easy(struct subchannel_id schid)
855{ 868{
856 int retry; 869 int retry;
@@ -865,7 +878,7 @@ __clear_subchannel_easy(struct subchannel_id schid)
865 if (schid_equal(&ti.schid, &schid)) 878 if (schid_equal(&ti.schid, &schid))
866 return 0; 879 return 0;
867 } 880 }
868 udelay(100); 881 udelay_reset(100);
869 } 882 }
870 return -EBUSY; 883 return -EBUSY;
871} 884}
@@ -882,11 +895,11 @@ static int stsch_reset(struct subchannel_id schid, volatile struct schib *addr)
882 int rc; 895 int rc;
883 896
884 pgm_check_occured = 0; 897 pgm_check_occured = 0;
885 s390_reset_pgm_handler = cio_reset_pgm_check_handler; 898 s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
886 rc = stsch(schid, addr); 899 rc = stsch(schid, addr);
887 s390_reset_pgm_handler = NULL; 900 s390_base_pgm_handler_fn = NULL;
888 901
889 /* The program check handler could have changed pgm_check_occured */ 902 /* The program check handler could have changed pgm_check_occured. */
890 barrier(); 903 barrier();
891 904
892 if (pgm_check_occured) 905 if (pgm_check_occured)
@@ -944,7 +957,7 @@ static void css_reset(void)
944 /* Reset subchannels. */ 957 /* Reset subchannels. */
945 for_each_subchannel(__shutdown_subchannel_easy, NULL); 958 for_each_subchannel(__shutdown_subchannel_easy, NULL);
946 /* Reset channel paths. */ 959 /* Reset channel paths. */
947 s390_reset_mcck_handler = s390_reset_chpids_mcck_handler; 960 s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler;
948 /* Enable channel report machine checks. */ 961 /* Enable channel report machine checks. */
949 __ctl_set_bit(14, 28); 962 __ctl_set_bit(14, 28);
950 /* Temporarily reenable machine checks. */ 963 /* Temporarily reenable machine checks. */
@@ -969,7 +982,7 @@ static void css_reset(void)
969 local_mcck_disable(); 982 local_mcck_disable();
970 /* Disable channel report machine checks. */ 983 /* Disable channel report machine checks. */
971 __ctl_clear_bit(14, 28); 984 __ctl_clear_bit(14, 28);
972 s390_reset_mcck_handler = NULL; 985 s390_base_mcck_handler_fn = NULL;
973} 986}
974 987
975static struct reset_call css_reset_call = { 988static struct reset_call css_reset_call = {
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 828b2d334f0a..90b22faabbf7 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -519,8 +519,8 @@ struct cmb {
519/* insert a single device into the cmb_area list 519/* insert a single device into the cmb_area list
520 * called with cmb_area.lock held from alloc_cmb 520 * called with cmb_area.lock held from alloc_cmb
521 */ 521 */
522static inline int alloc_cmb_single (struct ccw_device *cdev, 522static int alloc_cmb_single(struct ccw_device *cdev,
523 struct cmb_data *cmb_data) 523 struct cmb_data *cmb_data)
524{ 524{
525 struct cmb *cmb; 525 struct cmb *cmb;
526 struct ccw_device_private *node; 526 struct ccw_device_private *node;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 9d6c02446863..fe0ace7aece8 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -30,7 +30,7 @@ struct channel_subsystem *css[__MAX_CSSID + 1];
30 30
31int css_characteristics_avail = 0; 31int css_characteristics_avail = 0;
32 32
33inline int 33int
34for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 34for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
35{ 35{
36 struct subchannel_id schid; 36 struct subchannel_id schid;
@@ -108,9 +108,6 @@ css_subchannel_release(struct device *dev)
108 } 108 }
109} 109}
110 110
111extern int css_get_ssd_info(struct subchannel *sch);
112
113
114int css_sch_device_register(struct subchannel *sch) 111int css_sch_device_register(struct subchannel *sch)
115{ 112{
116 int ret; 113 int ret;
@@ -187,7 +184,7 @@ get_subchannel_by_schid(struct subchannel_id schid)
187 return dev ? to_subchannel(dev) : NULL; 184 return dev ? to_subchannel(dev) : NULL;
188} 185}
189 186
190static inline int css_get_subchannel_status(struct subchannel *sch) 187static int css_get_subchannel_status(struct subchannel *sch)
191{ 188{
192 struct schib schib; 189 struct schib schib;
193 190
@@ -299,7 +296,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
299 /* Will be done on the slow path. */ 296 /* Will be done on the slow path. */
300 return -EAGAIN; 297 return -EAGAIN;
301 } 298 }
302 if (stsch(schid, &schib) || !schib.pmcw.dnv) { 299 if (stsch_err(schid, &schib) || !schib.pmcw.dnv) {
303 /* Unusable - ignore. */ 300 /* Unusable - ignore. */
304 return 0; 301 return 0;
305 } 302 }
@@ -417,7 +414,7 @@ static void reprobe_all(struct work_struct *unused)
417 need_reprobe); 414 need_reprobe);
418} 415}
419 416
420DECLARE_WORK(css_reprobe_work, reprobe_all); 417static DECLARE_WORK(css_reprobe_work, reprobe_all);
421 418
422/* Schedule reprobing of all unregistered subchannels. */ 419/* Schedule reprobing of all unregistered subchannels. */
423void css_schedule_reprobe(void) 420void css_schedule_reprobe(void)
@@ -578,7 +575,7 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
578 575
579static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); 576static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
580 577
581static inline int __init setup_css(int nr) 578static int __init setup_css(int nr)
582{ 579{
583 u32 tod_high; 580 u32 tod_high;
584 int ret; 581 int ret;
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 3464c5b875c4..ca2bab932a8a 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -143,6 +143,8 @@ extern void css_sch_device_unregister(struct subchannel *);
143extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); 143extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
144extern int css_init_done; 144extern int css_init_done;
145extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); 145extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
146extern int css_process_crw(int, int);
147extern void css_reiterate_subchannels(void);
146 148
147#define __MAX_SUBCHANNEL 65535 149#define __MAX_SUBCHANNEL 65535
148#define __MAX_SSID 3 150#define __MAX_SSID 3
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 803579053c2f..e322111fb369 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -138,7 +138,6 @@ struct bus_type ccw_bus_type;
138 138
139static int io_subchannel_probe (struct subchannel *); 139static int io_subchannel_probe (struct subchannel *);
140static int io_subchannel_remove (struct subchannel *); 140static int io_subchannel_remove (struct subchannel *);
141void io_subchannel_irq (struct device *);
142static int io_subchannel_notify(struct device *, int); 141static int io_subchannel_notify(struct device *, int);
143static void io_subchannel_verify(struct device *); 142static void io_subchannel_verify(struct device *);
144static void io_subchannel_ioterm(struct device *); 143static void io_subchannel_ioterm(struct device *);
@@ -235,11 +234,8 @@ chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
235 ssize_t ret = 0; 234 ssize_t ret = 0;
236 int chp; 235 int chp;
237 236
238 if (ssd) 237 for (chp = 0; chp < 8; chp++)
239 for (chp = 0; chp < 8; chp++) 238 ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]);
240 ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]);
241 else
242 ret += sprintf (buf, "n/a");
243 ret += sprintf (buf+ret, "\n"); 239 ret += sprintf (buf+ret, "\n");
244 return min((ssize_t)PAGE_SIZE, ret); 240 return min((ssize_t)PAGE_SIZE, ret);
245} 241}
@@ -552,13 +548,13 @@ static struct attribute_group ccwdev_attr_group = {
552 .attrs = ccwdev_attrs, 548 .attrs = ccwdev_attrs,
553}; 549};
554 550
555static inline int 551static int
556device_add_files (struct device *dev) 552device_add_files (struct device *dev)
557{ 553{
558 return sysfs_create_group(&dev->kobj, &ccwdev_attr_group); 554 return sysfs_create_group(&dev->kobj, &ccwdev_attr_group);
559} 555}
560 556
561static inline void 557static void
562device_remove_files(struct device *dev) 558device_remove_files(struct device *dev)
563{ 559{
564 sysfs_remove_group(&dev->kobj, &ccwdev_attr_group); 560 sysfs_remove_group(&dev->kobj, &ccwdev_attr_group);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 29db6341d632..b66338b76579 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -74,6 +74,7 @@ extern struct workqueue_struct *ccw_device_notify_work;
74extern wait_queue_head_t ccw_device_init_wq; 74extern wait_queue_head_t ccw_device_init_wq;
75extern atomic_t ccw_device_init_count; 75extern atomic_t ccw_device_init_count;
76 76
77void io_subchannel_irq (struct device *pdev);
77void io_subchannel_recog_done(struct ccw_device *cdev); 78void io_subchannel_recog_done(struct ccw_device *cdev);
78 79
79int ccw_device_cancel_halt_clear(struct ccw_device *); 80int ccw_device_cancel_halt_clear(struct ccw_device *);
@@ -118,6 +119,7 @@ int ccw_device_stlck(struct ccw_device *);
118/* qdio needs this. */ 119/* qdio needs this. */
119void ccw_device_set_timeout(struct ccw_device *, int); 120void ccw_device_set_timeout(struct ccw_device *, int);
120extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); 121extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
122extern struct bus_type ccw_bus_type;
121 123
122/* Channel measurement facility related */ 124/* Channel measurement facility related */
123void retry_set_schib(struct ccw_device *cdev); 125void retry_set_schib(struct ccw_device *cdev);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index eed14572fc3b..51238e7555bb 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -206,7 +206,7 @@ ccw_device_handle_oper(struct ccw_device *cdev)
206 * been varied online on the SE so we have to find out by magic (i. e. driving 206 * been varied online on the SE so we have to find out by magic (i. e. driving
207 * the channel subsystem to device selection and updating our path masks). 207 * the channel subsystem to device selection and updating our path masks).
208 */ 208 */
209static inline void 209static void
210__recover_lost_chpids(struct subchannel *sch, int old_lpm) 210__recover_lost_chpids(struct subchannel *sch, int old_lpm)
211{ 211{
212 int mask, i; 212 int mask, i;
@@ -387,7 +387,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
387 put_device (&cdev->dev); 387 put_device (&cdev->dev);
388} 388}
389 389
390static inline int cmp_pgid(struct pgid *p1, struct pgid *p2) 390static int cmp_pgid(struct pgid *p1, struct pgid *p2)
391{ 391{
392 char *c1; 392 char *c1;
393 char *c2; 393 char *c2;
@@ -842,6 +842,8 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
842call_handler_unsol: 842call_handler_unsol:
843 if (cdev->handler) 843 if (cdev->handler)
844 cdev->handler (cdev, 0, irb); 844 cdev->handler (cdev, 0, irb);
845 if (cdev->private->flags.doverify)
846 ccw_device_online_verify(cdev, 0);
845 return; 847 return;
846 } 848 }
847 /* Accumulate status and find out if a basic sense is needed. */ 849 /* Accumulate status and find out if a basic sense is needed. */
@@ -892,7 +894,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
892/* 894/*
893 * Got an interrupt for a basic sense. 895 * Got an interrupt for a basic sense.
894 */ 896 */
895void 897static void
896ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) 898ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
897{ 899{
898 struct irb *irb; 900 struct irb *irb;
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index d269607336ec..d7b25b8f71d2 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -302,7 +302,7 @@ ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
302 wake_up(&cdev->private->wait_q); 302 wake_up(&cdev->private->wait_q);
303} 303}
304 304
305static inline int 305static int
306__ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm) 306__ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm)
307{ 307{
308 int ret; 308 int ret;
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index bdcf930f7beb..6b1caea622ea 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -25,7 +25,7 @@
25 * Check for any kind of channel or interface control check but don't 25 * Check for any kind of channel or interface control check but don't
26 * issue the message for the console device 26 * issue the message for the console device
27 */ 27 */
28static inline void 28static void
29ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) 29ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
30{ 30{
31 if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | 31 if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK |
@@ -72,7 +72,7 @@ ccw_device_path_notoper(struct ccw_device *cdev)
72/* 72/*
73 * Copy valid bits from the extended control word to device irb. 73 * Copy valid bits from the extended control word to device irb.
74 */ 74 */
75static inline void 75static void
76ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) 76ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
77{ 77{
78 /* 78 /*
@@ -94,7 +94,7 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
94/* 94/*
95 * Check if extended status word is valid. 95 * Check if extended status word is valid.
96 */ 96 */
97static inline int 97static int
98ccw_device_accumulate_esw_valid(struct irb *irb) 98ccw_device_accumulate_esw_valid(struct irb *irb)
99{ 99{
100 if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) 100 if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND)
@@ -109,7 +109,7 @@ ccw_device_accumulate_esw_valid(struct irb *irb)
109/* 109/*
110 * Copy valid bits from the extended status word to device irb. 110 * Copy valid bits from the extended status word to device irb.
111 */ 111 */
112static inline void 112static void
113ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) 113ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
114{ 114{
115 struct irb *cdev_irb; 115 struct irb *cdev_irb;
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 6fd1940842eb..d726cd5777de 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -66,7 +66,6 @@ MODULE_LICENSE("GPL");
66/******************** HERE WE GO ***********************************/ 66/******************** HERE WE GO ***********************************/
67 67
68static const char version[] = "QDIO base support version 2"; 68static const char version[] = "QDIO base support version 2";
69extern struct bus_type ccw_bus_type;
70 69
71static int qdio_performance_stats = 0; 70static int qdio_performance_stats = 0;
72static int proc_perf_file_registration; 71static int proc_perf_file_registration;
@@ -138,7 +137,7 @@ qdio_release_q(struct qdio_q *q)
138} 137}
139 138
140/*check ccq */ 139/*check ccq */
141static inline int 140static int
142qdio_check_ccq(struct qdio_q *q, unsigned int ccq) 141qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
143{ 142{
144 char dbf_text[15]; 143 char dbf_text[15];
@@ -153,7 +152,7 @@ qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
153 return -EIO; 152 return -EIO;
154} 153}
155/* EQBS: extract buffer states */ 154/* EQBS: extract buffer states */
156static inline int 155static int
157qdio_do_eqbs(struct qdio_q *q, unsigned char *state, 156qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
158 unsigned int *start, unsigned int *cnt) 157 unsigned int *start, unsigned int *cnt)
159{ 158{
@@ -188,7 +187,7 @@ again:
188} 187}
189 188
190/* SQBS: set buffer states */ 189/* SQBS: set buffer states */
191static inline int 190static int
192qdio_do_sqbs(struct qdio_q *q, unsigned char state, 191qdio_do_sqbs(struct qdio_q *q, unsigned char state,
193 unsigned int *start, unsigned int *cnt) 192 unsigned int *start, unsigned int *cnt)
194{ 193{
@@ -315,7 +314,7 @@ __do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
315 * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns 314 * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns
316 * an access exception 315 * an access exception
317 */ 316 */
318static inline int 317static int
319qdio_siga_output(struct qdio_q *q) 318qdio_siga_output(struct qdio_q *q)
320{ 319{
321 int cc; 320 int cc;
@@ -349,7 +348,7 @@ qdio_siga_output(struct qdio_q *q)
349 return cc; 348 return cc;
350} 349}
351 350
352static inline int 351static int
353qdio_siga_input(struct qdio_q *q) 352qdio_siga_input(struct qdio_q *q)
354{ 353{
355 int cc; 354 int cc;
@@ -421,7 +420,7 @@ tiqdio_sched_tl(void)
421 tasklet_hi_schedule(&tiqdio_tasklet); 420 tasklet_hi_schedule(&tiqdio_tasklet);
422} 421}
423 422
424static inline void 423static void
425qdio_mark_tiq(struct qdio_q *q) 424qdio_mark_tiq(struct qdio_q *q)
426{ 425{
427 unsigned long flags; 426 unsigned long flags;
@@ -471,7 +470,7 @@ qdio_mark_q(struct qdio_q *q)
471 tasklet_schedule(&q->tasklet); 470 tasklet_schedule(&q->tasklet);
472} 471}
473 472
474static inline int 473static int
475qdio_stop_polling(struct qdio_q *q) 474qdio_stop_polling(struct qdio_q *q)
476{ 475{
477#ifdef QDIO_USE_PROCESSING_STATE 476#ifdef QDIO_USE_PROCESSING_STATE
@@ -525,7 +524,7 @@ qdio_stop_polling(struct qdio_q *q)
525 * sophisticated locking outside of unmark_q, so that we don't need to 524 * sophisticated locking outside of unmark_q, so that we don't need to
526 * disable the interrupts :-) 525 * disable the interrupts :-)
527*/ 526*/
528static inline void 527static void
529qdio_unmark_q(struct qdio_q *q) 528qdio_unmark_q(struct qdio_q *q)
530{ 529{
531 unsigned long flags; 530 unsigned long flags;
@@ -691,7 +690,7 @@ qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q)
691 return q->first_to_check; 690 return q->first_to_check;
692} 691}
693 692
694static inline int 693static int
695qdio_get_outbound_buffer_frontier(struct qdio_q *q) 694qdio_get_outbound_buffer_frontier(struct qdio_q *q)
696{ 695{
697 struct qdio_irq *irq; 696 struct qdio_irq *irq;
@@ -774,7 +773,7 @@ out:
774} 773}
775 774
776/* all buffers are processed */ 775/* all buffers are processed */
777static inline int 776static int
778qdio_is_outbound_q_done(struct qdio_q *q) 777qdio_is_outbound_q_done(struct qdio_q *q)
779{ 778{
780 int no_used; 779 int no_used;
@@ -796,7 +795,7 @@ qdio_is_outbound_q_done(struct qdio_q *q)
796 return (no_used==0); 795 return (no_used==0);
797} 796}
798 797
799static inline int 798static int
800qdio_has_outbound_q_moved(struct qdio_q *q) 799qdio_has_outbound_q_moved(struct qdio_q *q)
801{ 800{
802 int i; 801 int i;
@@ -816,7 +815,7 @@ qdio_has_outbound_q_moved(struct qdio_q *q)
816 } 815 }
817} 816}
818 817
819static inline void 818static void
820qdio_kick_outbound_q(struct qdio_q *q) 819qdio_kick_outbound_q(struct qdio_q *q)
821{ 820{
822 int result; 821 int result;
@@ -905,7 +904,7 @@ qdio_kick_outbound_q(struct qdio_q *q)
905 } 904 }
906} 905}
907 906
908static inline void 907static void
909qdio_kick_outbound_handler(struct qdio_q *q) 908qdio_kick_outbound_handler(struct qdio_q *q)
910{ 909{
911 int start, end, real_end, count; 910 int start, end, real_end, count;
@@ -942,7 +941,7 @@ qdio_kick_outbound_handler(struct qdio_q *q)
942 q->error_status_flags=0; 941 q->error_status_flags=0;
943} 942}
944 943
945static inline void 944static void
946__qdio_outbound_processing(struct qdio_q *q) 945__qdio_outbound_processing(struct qdio_q *q)
947{ 946{
948 int siga_attempts; 947 int siga_attempts;
@@ -1002,7 +1001,7 @@ qdio_outbound_processing(struct qdio_q *q)
1002/************************* INBOUND ROUTINES *******************************/ 1001/************************* INBOUND ROUTINES *******************************/
1003 1002
1004 1003
1005static inline int 1004static int
1006qdio_get_inbound_buffer_frontier(struct qdio_q *q) 1005qdio_get_inbound_buffer_frontier(struct qdio_q *q)
1007{ 1006{
1008 struct qdio_irq *irq; 1007 struct qdio_irq *irq;
@@ -1133,7 +1132,7 @@ out:
1133 return q->first_to_check; 1132 return q->first_to_check;
1134} 1133}
1135 1134
1136static inline int 1135static int
1137qdio_has_inbound_q_moved(struct qdio_q *q) 1136qdio_has_inbound_q_moved(struct qdio_q *q)
1138{ 1137{
1139 int i; 1138 int i;
@@ -1167,7 +1166,7 @@ qdio_has_inbound_q_moved(struct qdio_q *q)
1167} 1166}
1168 1167
1169/* means, no more buffers to be filled */ 1168/* means, no more buffers to be filled */
1170static inline int 1169static int
1171tiqdio_is_inbound_q_done(struct qdio_q *q) 1170tiqdio_is_inbound_q_done(struct qdio_q *q)
1172{ 1171{
1173 int no_used; 1172 int no_used;
@@ -1228,7 +1227,7 @@ tiqdio_is_inbound_q_done(struct qdio_q *q)
1228 return 0; 1227 return 0;
1229} 1228}
1230 1229
1231static inline int 1230static int
1232qdio_is_inbound_q_done(struct qdio_q *q) 1231qdio_is_inbound_q_done(struct qdio_q *q)
1233{ 1232{
1234 int no_used; 1233 int no_used;
@@ -1296,7 +1295,7 @@ qdio_is_inbound_q_done(struct qdio_q *q)
1296 } 1295 }
1297} 1296}
1298 1297
1299static inline void 1298static void
1300qdio_kick_inbound_handler(struct qdio_q *q) 1299qdio_kick_inbound_handler(struct qdio_q *q)
1301{ 1300{
1302 int count, start, end, real_end, i; 1301 int count, start, end, real_end, i;
@@ -1343,7 +1342,7 @@ qdio_kick_inbound_handler(struct qdio_q *q)
1343 } 1342 }
1344} 1343}
1345 1344
1346static inline void 1345static void
1347__tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set) 1346__tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
1348{ 1347{
1349 struct qdio_irq *irq_ptr; 1348 struct qdio_irq *irq_ptr;
@@ -1442,7 +1441,7 @@ tiqdio_inbound_processing(struct qdio_q *q)
1442 __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount)); 1441 __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount));
1443} 1442}
1444 1443
1445static inline void 1444static void
1446__qdio_inbound_processing(struct qdio_q *q) 1445__qdio_inbound_processing(struct qdio_q *q)
1447{ 1446{
1448 int q_laps=0; 1447 int q_laps=0;
@@ -1493,7 +1492,7 @@ qdio_inbound_processing(struct qdio_q *q)
1493/************************* MAIN ROUTINES *******************************/ 1492/************************* MAIN ROUTINES *******************************/
1494 1493
1495#ifdef QDIO_USE_PROCESSING_STATE 1494#ifdef QDIO_USE_PROCESSING_STATE
1496static inline int 1495static int
1497tiqdio_reset_processing_state(struct qdio_q *q, int q_laps) 1496tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
1498{ 1497{
1499 if (!q) { 1498 if (!q) {
@@ -1545,7 +1544,7 @@ tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
1545} 1544}
1546#endif /* QDIO_USE_PROCESSING_STATE */ 1545#endif /* QDIO_USE_PROCESSING_STATE */
1547 1546
1548static inline void 1547static void
1549tiqdio_inbound_checks(void) 1548tiqdio_inbound_checks(void)
1550{ 1549{
1551 struct qdio_q *q; 1550 struct qdio_q *q;
@@ -1949,7 +1948,7 @@ qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
1949 mb(); 1948 mb();
1950} 1949}
1951 1950
1952static inline void 1951static void
1953qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) 1952qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
1954{ 1953{
1955 char dbf_text[15]; 1954 char dbf_text[15];
@@ -1966,7 +1965,7 @@ qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
1966 1965
1967} 1966}
1968 1967
1969static inline void 1968static void
1970qdio_handle_pci(struct qdio_irq *irq_ptr) 1969qdio_handle_pci(struct qdio_irq *irq_ptr)
1971{ 1970{
1972 int i; 1971 int i;
@@ -2002,7 +2001,7 @@ qdio_handle_pci(struct qdio_irq *irq_ptr)
2002 2001
2003static void qdio_establish_handle_irq(struct ccw_device*, int, int); 2002static void qdio_establish_handle_irq(struct ccw_device*, int, int);
2004 2003
2005static inline void 2004static void
2006qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm, 2005qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm,
2007 int cstat, int dstat) 2006 int cstat, int dstat)
2008{ 2007{
@@ -2229,7 +2228,7 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
2229 return cc; 2228 return cc;
2230} 2229}
2231 2230
2232static inline void 2231static void
2233qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac, 2232qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac,
2234 unsigned long token) 2233 unsigned long token)
2235{ 2234{
@@ -2740,7 +2739,7 @@ qdio_free(struct ccw_device *cdev)
2740 return 0; 2739 return 0;
2741} 2740}
2742 2741
2743static inline void 2742static void
2744qdio_allocate_do_dbf(struct qdio_initialize *init_data) 2743qdio_allocate_do_dbf(struct qdio_initialize *init_data)
2745{ 2744{
2746 char dbf_text[20]; /* if a printf printed out more than 8 chars */ 2745 char dbf_text[20]; /* if a printf printed out more than 8 chars */
@@ -2773,7 +2772,7 @@ qdio_allocate_do_dbf(struct qdio_initialize *init_data)
2773 QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*)); 2772 QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*));
2774} 2773}
2775 2774
2776static inline void 2775static void
2777qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt) 2776qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt)
2778{ 2777{
2779 irq_ptr->input_qs[i]->is_iqdio_q = iqfmt; 2778 irq_ptr->input_qs[i]->is_iqdio_q = iqfmt;
@@ -2792,7 +2791,7 @@ qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt)
2792 irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY; 2791 irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY;
2793} 2792}
2794 2793
2795static inline void 2794static void
2796qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i, 2795qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i,
2797 int j, int iqfmt) 2796 int j, int iqfmt)
2798{ 2797{
@@ -2813,7 +2812,7 @@ qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i,
2813} 2812}
2814 2813
2815 2814
2816static inline void 2815static void
2817qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr) 2816qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr)
2818{ 2817{
2819 int i; 2818 int i;
@@ -2839,7 +2838,7 @@ qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr)
2839 } 2838 }
2840} 2839}
2841 2840
2842static inline void 2841static void
2843qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr) 2842qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr)
2844{ 2843{
2845 int i; 2844 int i;
@@ -2865,7 +2864,7 @@ qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr)
2865 } 2864 }
2866} 2865}
2867 2866
2868static inline int 2867static int
2869qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, 2868qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
2870 int dstat) 2869 int dstat)
2871{ 2870{
@@ -3014,7 +3013,7 @@ qdio_allocate(struct qdio_initialize *init_data)
3014 return 0; 3013 return 0;
3015} 3014}
3016 3015
3017int qdio_fill_irq(struct qdio_initialize *init_data) 3016static int qdio_fill_irq(struct qdio_initialize *init_data)
3018{ 3017{
3019 int i; 3018 int i;
3020 char dbf_text[15]; 3019 char dbf_text[15];
@@ -3367,7 +3366,7 @@ qdio_activate(struct ccw_device *cdev, int flags)
3367} 3366}
3368 3367
3369/* buffers filled forwards again to make Rick happy */ 3368/* buffers filled forwards again to make Rick happy */
3370static inline void 3369static void
3371qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx, 3370qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
3372 unsigned int count, struct qdio_buffer *buffers) 3371 unsigned int count, struct qdio_buffer *buffers)
3373{ 3372{
@@ -3386,7 +3385,7 @@ qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
3386 } 3385 }
3387} 3386}
3388 3387
3389static inline void 3388static void
3390qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx, 3389qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
3391 unsigned int count, struct qdio_buffer *buffers) 3390 unsigned int count, struct qdio_buffer *buffers)
3392{ 3391{
@@ -3407,7 +3406,7 @@ qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
3407 } 3406 }
3408} 3407}
3409 3408
3410static inline void 3409static void
3411do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags, 3410do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
3412 unsigned int qidx, unsigned int count, 3411 unsigned int qidx, unsigned int count,
3413 struct qdio_buffer *buffers) 3412 struct qdio_buffer *buffers)
@@ -3443,7 +3442,7 @@ do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
3443 qdio_mark_q(q); 3442 qdio_mark_q(q);
3444} 3443}
3445 3444
3446static inline void 3445static void
3447do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, 3446do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3448 unsigned int qidx, unsigned int count, 3447 unsigned int qidx, unsigned int count,
3449 struct qdio_buffer *buffers) 3448 struct qdio_buffer *buffers)
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 81b5899f4010..c7d1355237b6 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -465,7 +465,7 @@ static int ap_device_probe(struct device *dev)
465 * Flush all requests from the request/pending queue of an AP device. 465 * Flush all requests from the request/pending queue of an AP device.
466 * @ap_dev: pointer to the AP device. 466 * @ap_dev: pointer to the AP device.
467 */ 467 */
468static inline void __ap_flush_queue(struct ap_device *ap_dev) 468static void __ap_flush_queue(struct ap_device *ap_dev)
469{ 469{
470 struct ap_message *ap_msg, *next; 470 struct ap_message *ap_msg, *next;
471 471
@@ -587,7 +587,7 @@ static struct bus_attribute *const ap_bus_attrs[] = {
587/** 587/**
588 * Pick one of the 16 ap domains. 588 * Pick one of the 16 ap domains.
589 */ 589 */
590static inline int ap_select_domain(void) 590static int ap_select_domain(void)
591{ 591{
592 int queue_depth, device_type, count, max_count, best_domain; 592 int queue_depth, device_type, count, max_count, best_domain;
593 int rc, i, j; 593 int rc, i, j;
@@ -825,7 +825,7 @@ static inline void ap_schedule_poll_timer(void)
825 * required, bit 2^1 is set if the poll timer needs to get armed 825 * required, bit 2^1 is set if the poll timer needs to get armed
826 * Returns 0 if the device is still present, -ENODEV if not. 826 * Returns 0 if the device is still present, -ENODEV if not.
827 */ 827 */
828static inline int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) 828static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
829{ 829{
830 struct ap_queue_status status; 830 struct ap_queue_status status;
831 struct ap_message *ap_msg; 831 struct ap_message *ap_msg;
@@ -872,7 +872,7 @@ static inline int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
872 * required, bit 2^1 is set if the poll timer needs to get armed 872 * required, bit 2^1 is set if the poll timer needs to get armed
873 * Returns 0 if the device is still present, -ENODEV if not. 873 * Returns 0 if the device is still present, -ENODEV if not.
874 */ 874 */
875static inline int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) 875static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
876{ 876{
877 struct ap_queue_status status; 877 struct ap_queue_status status;
878 struct ap_message *ap_msg; 878 struct ap_message *ap_msg;
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 1edc10a7a6f2..b9e59bc9435a 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -791,7 +791,7 @@ static long trans_xcRB32(struct file *filp, unsigned int cmd,
791 return rc; 791 return rc;
792} 792}
793 793
794long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 794static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
795 unsigned long arg) 795 unsigned long arg)
796{ 796{
797 if (cmd == ICARSAMODEXPO) 797 if (cmd == ICARSAMODEXPO)
@@ -833,8 +833,8 @@ static struct miscdevice zcrypt_misc_device = {
833 */ 833 */
834static struct proc_dir_entry *zcrypt_entry; 834static struct proc_dir_entry *zcrypt_entry;
835 835
836static inline int sprintcl(unsigned char *outaddr, unsigned char *addr, 836static int sprintcl(unsigned char *outaddr, unsigned char *addr,
837 unsigned int len) 837 unsigned int len)
838{ 838{
839 int hl, i; 839 int hl, i;
840 840
@@ -845,8 +845,8 @@ static inline int sprintcl(unsigned char *outaddr, unsigned char *addr,
845 return hl; 845 return hl;
846} 846}
847 847
848static inline int sprintrw(unsigned char *outaddr, unsigned char *addr, 848static int sprintrw(unsigned char *outaddr, unsigned char *addr,
849 unsigned int len) 849 unsigned int len)
850{ 850{
851 int hl, inl, c, cx; 851 int hl, inl, c, cx;
852 852
@@ -865,8 +865,8 @@ static inline int sprintrw(unsigned char *outaddr, unsigned char *addr,
865 return hl; 865 return hl;
866} 866}
867 867
868static inline int sprinthx(unsigned char *title, unsigned char *outaddr, 868static int sprinthx(unsigned char *title, unsigned char *outaddr,
869 unsigned char *addr, unsigned int len) 869 unsigned char *addr, unsigned int len)
870{ 870{
871 int hl, inl, r, rx; 871 int hl, inl, r, rx;
872 872
@@ -885,8 +885,8 @@ static inline int sprinthx(unsigned char *title, unsigned char *outaddr,
885 return hl; 885 return hl;
886} 886}
887 887
888static inline int sprinthx4(unsigned char *title, unsigned char *outaddr, 888static int sprinthx4(unsigned char *title, unsigned char *outaddr,
889 unsigned int *array, unsigned int len) 889 unsigned int *array, unsigned int len)
890{ 890{
891 int hl, r; 891 int hl, r;
892 892
@@ -943,7 +943,7 @@ static int zcrypt_status_read(char *resp_buff, char **start, off_t offset,
943 zcrypt_qdepth_mask(workarea); 943 zcrypt_qdepth_mask(workarea);
944 len += sprinthx("Waiting work element counts", 944 len += sprinthx("Waiting work element counts",
945 resp_buff+len, workarea, AP_DEVICES); 945 resp_buff+len, workarea, AP_DEVICES);
946 zcrypt_perdev_reqcnt((unsigned int *) workarea); 946 zcrypt_perdev_reqcnt((int *) workarea);
947 len += sprinthx4("Per-device successfully completed request counts", 947 len += sprinthx4("Per-device successfully completed request counts",
948 resp_buff+len,(unsigned int *) workarea, AP_DEVICES); 948 resp_buff+len,(unsigned int *) workarea, AP_DEVICES);
949 *eof = 1; 949 *eof = 1;
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index 32e37014345c..818ffe05ac00 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -191,10 +191,10 @@ static int ICACRT_msg_to_type4CRT_msg(struct zcrypt_device *zdev,
191 * 191 *
192 * Returns 0 on success or -EFAULT. 192 * Returns 0 on success or -EFAULT.
193 */ 193 */
194static inline int convert_type84(struct zcrypt_device *zdev, 194static int convert_type84(struct zcrypt_device *zdev,
195 struct ap_message *reply, 195 struct ap_message *reply,
196 char __user *outputdata, 196 char __user *outputdata,
197 unsigned int outputdatalength) 197 unsigned int outputdatalength)
198{ 198{
199 struct type84_hdr *t84h = reply->message; 199 struct type84_hdr *t84h = reply->message;
200 char *data; 200 char *data;
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index b7153c1e15cd..252443b6bd1b 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -709,7 +709,8 @@ out_free:
709 * PCIXCC/CEX2C device to the request distributor 709 * PCIXCC/CEX2C device to the request distributor
710 * @xcRB: pointer to the send_cprb request buffer 710 * @xcRB: pointer to the send_cprb request buffer
711 */ 711 */
712long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev, struct ica_xcRB *xcRB) 712static long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev,
713 struct ica_xcRB *xcRB)
713{ 714{
714 struct ap_message ap_msg; 715 struct ap_message ap_msg;
715 struct response_type resp_type = { 716 struct response_type resp_type = {
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 95f4e105cb96..7809a79feec7 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -121,7 +121,7 @@ MODULE_LICENSE("GPL");
121#define DEBUG 121#define DEBUG
122#endif 122#endif
123 123
124 char debug_buffer[255]; 124static char debug_buffer[255];
125/** 125/**
126 * Debug Facility Stuff 126 * Debug Facility Stuff
127 */ 127 */
@@ -223,16 +223,14 @@ static void claw_timer ( struct chbk * p_ch );
223/* Functions */ 223/* Functions */
224static int add_claw_reads(struct net_device *dev, 224static int add_claw_reads(struct net_device *dev,
225 struct ccwbk* p_first, struct ccwbk* p_last); 225 struct ccwbk* p_first, struct ccwbk* p_last);
226static void inline ccw_check_return_code (struct ccw_device *cdev, 226static void ccw_check_return_code (struct ccw_device *cdev, int return_code);
227 int return_code); 227static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense );
228static void inline ccw_check_unit_check (struct chbk * p_ch,
229 unsigned char sense );
230static int find_link(struct net_device *dev, char *host_name, char *ws_name ); 228static int find_link(struct net_device *dev, char *host_name, char *ws_name );
231static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid); 229static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid);
232static int init_ccw_bk(struct net_device *dev); 230static int init_ccw_bk(struct net_device *dev);
233static void probe_error( struct ccwgroup_device *cgdev); 231static void probe_error( struct ccwgroup_device *cgdev);
234static struct net_device_stats *claw_stats(struct net_device *dev); 232static struct net_device_stats *claw_stats(struct net_device *dev);
235static int inline pages_to_order_of_mag(int num_of_pages); 233static int pages_to_order_of_mag(int num_of_pages);
236static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr); 234static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr);
237#ifdef DEBUG 235#ifdef DEBUG
238static void dumpit (char *buf, int len); 236static void dumpit (char *buf, int len);
@@ -1310,7 +1308,7 @@ claw_timer ( struct chbk * p_ch )
1310* of magnitude get_free_pages() has an upper order of 9 * 1308* of magnitude get_free_pages() has an upper order of 9 *
1311*--------------------------------------------------------------------*/ 1309*--------------------------------------------------------------------*/
1312 1310
1313static int inline 1311static int
1314pages_to_order_of_mag(int num_of_pages) 1312pages_to_order_of_mag(int num_of_pages)
1315{ 1313{
1316 int order_of_mag=1; /* assume 2 pages */ 1314 int order_of_mag=1; /* assume 2 pages */
@@ -1482,7 +1480,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1482 * * 1480 * *
1483 *-------------------------------------------------------------------*/ 1481 *-------------------------------------------------------------------*/
1484 1482
1485static void inline 1483static void
1486ccw_check_return_code(struct ccw_device *cdev, int return_code) 1484ccw_check_return_code(struct ccw_device *cdev, int return_code)
1487{ 1485{
1488#ifdef FUNCTRACE 1486#ifdef FUNCTRACE
@@ -1529,7 +1527,7 @@ ccw_check_return_code(struct ccw_device *cdev, int return_code)
1529* ccw_check_unit_check * 1527* ccw_check_unit_check *
1530*--------------------------------------------------------------------*/ 1528*--------------------------------------------------------------------*/
1531 1529
1532static void inline 1530static void
1533ccw_check_unit_check(struct chbk * p_ch, unsigned char sense ) 1531ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
1534{ 1532{
1535 struct net_device *dev = p_ch->ndev; 1533 struct net_device *dev = p_ch->ndev;
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index 03cc263fe0da..5a84fbbc6611 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -369,7 +369,7 @@ ctc_dump_skb(struct sk_buff *skb, int offset)
369 * @param ch The channel where this skb has been received. 369 * @param ch The channel where this skb has been received.
370 * @param pskb The received skb. 370 * @param pskb The received skb.
371 */ 371 */
372static __inline__ void 372static void
373ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb) 373ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
374{ 374{
375 struct net_device *dev = ch->netdev; 375 struct net_device *dev = ch->netdev;
@@ -512,7 +512,7 @@ ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
512 * @param ch The channel, the error belongs to. 512 * @param ch The channel, the error belongs to.
513 * @param return_code The error code to inspect. 513 * @param return_code The error code to inspect.
514 */ 514 */
515static void inline 515static void
516ccw_check_return_code(struct channel *ch, int return_code, char *msg) 516ccw_check_return_code(struct channel *ch, int return_code, char *msg)
517{ 517{
518 DBF_TEXT(trace, 5, __FUNCTION__); 518 DBF_TEXT(trace, 5, __FUNCTION__);
@@ -547,7 +547,7 @@ ccw_check_return_code(struct channel *ch, int return_code, char *msg)
547 * @param ch The channel, the sense code belongs to. 547 * @param ch The channel, the sense code belongs to.
548 * @param sense The sense code to inspect. 548 * @param sense The sense code to inspect.
549 */ 549 */
550static void inline 550static void
551ccw_unit_check(struct channel *ch, unsigned char sense) 551ccw_unit_check(struct channel *ch, unsigned char sense)
552{ 552{
553 DBF_TEXT(trace, 5, __FUNCTION__); 553 DBF_TEXT(trace, 5, __FUNCTION__);
@@ -603,7 +603,7 @@ ctc_purge_skb_queue(struct sk_buff_head *q)
603 } 603 }
604} 604}
605 605
606static __inline__ int 606static int
607ctc_checkalloc_buffer(struct channel *ch, int warn) 607ctc_checkalloc_buffer(struct channel *ch, int warn)
608{ 608{
609 DBF_TEXT(trace, 5, __FUNCTION__); 609 DBF_TEXT(trace, 5, __FUNCTION__);
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c
index e965f03a7291..76728ae4b843 100644
--- a/drivers/s390/net/cu3088.c
+++ b/drivers/s390/net/cu3088.c
@@ -57,7 +57,7 @@ static struct ccw_device_id cu3088_ids[] = {
57 57
58static struct ccw_driver cu3088_driver; 58static struct ccw_driver cu3088_driver;
59 59
60struct device *cu3088_root_dev; 60static struct device *cu3088_root_dev;
61 61
62static ssize_t 62static ssize_t
63group_write(struct device_driver *drv, const char *buf, size_t count) 63group_write(struct device_driver *drv, const char *buf, size_t count)
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index e5665b6743a1..b97dd15bdb9a 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -828,7 +828,7 @@ lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd)
828/** 828/**
829 * Emit buffer of a lan comand. 829 * Emit buffer of a lan comand.
830 */ 830 */
831void 831static void
832lcs_lancmd_timeout(unsigned long data) 832lcs_lancmd_timeout(unsigned long data)
833{ 833{
834 struct lcs_reply *reply, *list_reply, *r; 834 struct lcs_reply *reply, *list_reply, *r;
@@ -1360,7 +1360,7 @@ lcs_get_problem(struct ccw_device *cdev, struct irb *irb)
1360 return 0; 1360 return 0;
1361} 1361}
1362 1362
1363void 1363static void
1364lcs_schedule_recovery(struct lcs_card *card) 1364lcs_schedule_recovery(struct lcs_card *card)
1365{ 1365{
1366 LCS_DBF_TEXT(2, trace, "startrec"); 1366 LCS_DBF_TEXT(2, trace, "startrec");
@@ -1990,7 +1990,7 @@ lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char
1990 1990
1991} 1991}
1992 1992
1993DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store); 1993static DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store);
1994 1994
1995static ssize_t 1995static ssize_t
1996lcs_dev_recover_store(struct device *dev, struct device_attribute *attr, 1996lcs_dev_recover_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index d7d1cc0a5c8e..3346088f47e0 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -2053,7 +2053,7 @@ out_free_ndev:
2053 return ret; 2053 return ret;
2054} 2054}
2055 2055
2056DRIVER_ATTR(connection, 0200, NULL, conn_write); 2056static DRIVER_ATTR(connection, 0200, NULL, conn_write);
2057 2057
2058static ssize_t 2058static ssize_t
2059remove_write (struct device_driver *drv, const char *buf, size_t count) 2059remove_write (struct device_driver *drv, const char *buf, size_t count)
@@ -2112,7 +2112,7 @@ remove_write (struct device_driver *drv, const char *buf, size_t count)
2112 return -EINVAL; 2112 return -EINVAL;
2113} 2113}
2114 2114
2115DRIVER_ATTR(remove, 0200, NULL, remove_write); 2115static DRIVER_ATTR(remove, 0200, NULL, remove_write);
2116 2116
2117static void 2117static void
2118netiucv_banner(void) 2118netiucv_banner(void)
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c
index 6bb558a9a032..7c735e1fe063 100644
--- a/drivers/s390/net/qeth_eddp.c
+++ b/drivers/s390/net/qeth_eddp.c
@@ -49,7 +49,7 @@ qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
49 return buffers_needed; 49 return buffers_needed;
50} 50}
51 51
52static inline void 52static void
53qeth_eddp_free_context(struct qeth_eddp_context *ctx) 53qeth_eddp_free_context(struct qeth_eddp_context *ctx)
54{ 54{
55 int i; 55 int i;
@@ -91,7 +91,7 @@ qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
91 } 91 }
92} 92}
93 93
94static inline int 94static int
95qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf, 95qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
96 struct qeth_eddp_context *ctx) 96 struct qeth_eddp_context *ctx)
97{ 97{
@@ -196,7 +196,7 @@ out:
196 return flush_cnt; 196 return flush_cnt;
197} 197}
198 198
199static inline void 199static void
200qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, 200qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
201 struct qeth_eddp_data *eddp, int data_len) 201 struct qeth_eddp_data *eddp, int data_len)
202{ 202{
@@ -256,7 +256,7 @@ qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
256 ctx->offset += eddp->thl; 256 ctx->offset += eddp->thl;
257} 257}
258 258
259static inline void 259static void
260qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, 260qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
261 __wsum *hcsum) 261 __wsum *hcsum)
262{ 262{
@@ -302,7 +302,7 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
302 } 302 }
303} 303}
304 304
305static inline void 305static void
306qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx, 306qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
307 struct qeth_eddp_data *eddp, int data_len, 307 struct qeth_eddp_data *eddp, int data_len,
308 __wsum hcsum) 308 __wsum hcsum)
@@ -349,7 +349,7 @@ qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
349 ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum); 349 ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
350} 350}
351 351
352static inline __wsum 352static __wsum
353qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len) 353qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
354{ 354{
355 __wsum phcsum; /* pseudo header checksum */ 355 __wsum phcsum; /* pseudo header checksum */
@@ -363,7 +363,7 @@ qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
363 return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum); 363 return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
364} 364}
365 365
366static inline __wsum 366static __wsum
367qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len) 367qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
368{ 368{
369 __be32 proto; 369 __be32 proto;
@@ -381,7 +381,7 @@ qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
381 return phcsum; 381 return phcsum;
382} 382}
383 383
384static inline struct qeth_eddp_data * 384static struct qeth_eddp_data *
385qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl) 385qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
386{ 386{
387 struct qeth_eddp_data *eddp; 387 struct qeth_eddp_data *eddp;
@@ -399,7 +399,7 @@ qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
399 return eddp; 399 return eddp;
400} 400}
401 401
402static inline void 402static void
403__qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, 403__qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
404 struct qeth_eddp_data *eddp) 404 struct qeth_eddp_data *eddp)
405{ 405{
@@ -464,7 +464,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
464 } 464 }
465} 465}
466 466
467static inline int 467static int
468qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, 468qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
469 struct sk_buff *skb, struct qeth_hdr *qhdr) 469 struct sk_buff *skb, struct qeth_hdr *qhdr)
470{ 470{
@@ -505,7 +505,7 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
505 return 0; 505 return 0;
506} 506}
507 507
508static inline void 508static void
509qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb, 509qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
510 int hdr_len) 510 int hdr_len)
511{ 511{
@@ -529,7 +529,7 @@ qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
529 (skb_shinfo(skb)->gso_segs + 1); 529 (skb_shinfo(skb)->gso_segs + 1);
530} 530}
531 531
532static inline struct qeth_eddp_context * 532static struct qeth_eddp_context *
533qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb, 533qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
534 int hdr_len) 534 int hdr_len)
535{ 535{
@@ -581,7 +581,7 @@ qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
581 return ctx; 581 return ctx;
582} 582}
583 583
584static inline struct qeth_eddp_context * 584static struct qeth_eddp_context *
585qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb, 585qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
586 struct qeth_hdr *qhdr) 586 struct qeth_hdr *qhdr)
587{ 587{
@@ -625,5 +625,3 @@ qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb,
625 } 625 }
626 return NULL; 626 return NULL;
627} 627}
628
629
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index d2efa5ff125d..2257e45594b3 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -651,7 +651,7 @@ __qeth_ref_ip_on_card(struct qeth_card *card, struct qeth_ipaddr *todo,
651 return 0; 651 return 0;
652} 652}
653 653
654static inline int 654static int
655__qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr, 655__qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr,
656 int same_type) 656 int same_type)
657{ 657{
@@ -795,7 +795,7 @@ qeth_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
795 return rc; 795 return rc;
796} 796}
797 797
798static inline void 798static void
799__qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags) 799__qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags)
800{ 800{
801 struct qeth_ipaddr *addr, *tmp; 801 struct qeth_ipaddr *addr, *tmp;
@@ -882,7 +882,7 @@ static void qeth_layer2_add_multicast(struct qeth_card *);
882static void qeth_add_multicast_ipv6(struct qeth_card *); 882static void qeth_add_multicast_ipv6(struct qeth_card *);
883#endif 883#endif
884 884
885static inline int 885static int
886qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread) 886qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread)
887{ 887{
888 unsigned long flags; 888 unsigned long flags;
@@ -920,7 +920,7 @@ qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
920 wake_up(&card->wait_q); 920 wake_up(&card->wait_q);
921} 921}
922 922
923static inline int 923static int
924__qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 924__qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
925{ 925{
926 unsigned long flags; 926 unsigned long flags;
@@ -1764,9 +1764,9 @@ out:
1764 qeth_release_buffer(channel,iob); 1764 qeth_release_buffer(channel,iob);
1765} 1765}
1766 1766
1767static inline void 1767static void
1768qeth_prepare_control_data(struct qeth_card *card, int len, 1768qeth_prepare_control_data(struct qeth_card *card, int len,
1769struct qeth_cmd_buffer *iob) 1769 struct qeth_cmd_buffer *iob)
1770{ 1770{
1771 qeth_setup_ccw(&card->write,iob->data,len); 1771 qeth_setup_ccw(&card->write,iob->data,len);
1772 iob->callback = qeth_release_buffer; 1772 iob->callback = qeth_release_buffer;
@@ -2160,7 +2160,7 @@ qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
2160 return 0; 2160 return 0;
2161} 2161}
2162 2162
2163static inline struct sk_buff * 2163static struct sk_buff *
2164qeth_get_skb(unsigned int length, struct qeth_hdr *hdr) 2164qeth_get_skb(unsigned int length, struct qeth_hdr *hdr)
2165{ 2165{
2166 struct sk_buff* skb; 2166 struct sk_buff* skb;
@@ -2179,7 +2179,7 @@ qeth_get_skb(unsigned int length, struct qeth_hdr *hdr)
2179 return skb; 2179 return skb;
2180} 2180}
2181 2181
2182static inline struct sk_buff * 2182static struct sk_buff *
2183qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer, 2183qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
2184 struct qdio_buffer_element **__element, int *__offset, 2184 struct qdio_buffer_element **__element, int *__offset,
2185 struct qeth_hdr **hdr) 2185 struct qeth_hdr **hdr)
@@ -2264,7 +2264,7 @@ no_mem:
2264 return NULL; 2264 return NULL;
2265} 2265}
2266 2266
2267static inline __be16 2267static __be16
2268qeth_type_trans(struct sk_buff *skb, struct net_device *dev) 2268qeth_type_trans(struct sk_buff *skb, struct net_device *dev)
2269{ 2269{
2270 struct qeth_card *card; 2270 struct qeth_card *card;
@@ -2297,7 +2297,7 @@ qeth_type_trans(struct sk_buff *skb, struct net_device *dev)
2297 return htons(ETH_P_802_2); 2297 return htons(ETH_P_802_2);
2298} 2298}
2299 2299
2300static inline void 2300static void
2301qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb, 2301qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb,
2302 struct qeth_hdr *hdr) 2302 struct qeth_hdr *hdr)
2303{ 2303{
@@ -2351,7 +2351,7 @@ qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb,
2351 fake_llc->ethertype = ETH_P_IP; 2351 fake_llc->ethertype = ETH_P_IP;
2352} 2352}
2353 2353
2354static inline void 2354static void
2355qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb, 2355qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb,
2356 struct qeth_hdr *hdr) 2356 struct qeth_hdr *hdr)
2357{ 2357{
@@ -2420,7 +2420,7 @@ qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2420 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; 2420 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
2421} 2421}
2422 2422
2423static inline __u16 2423static __u16
2424qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, 2424qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2425 struct qeth_hdr *hdr) 2425 struct qeth_hdr *hdr)
2426{ 2426{
@@ -2476,7 +2476,7 @@ qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2476 return vlan_id; 2476 return vlan_id;
2477} 2477}
2478 2478
2479static inline void 2479static void
2480qeth_process_inbound_buffer(struct qeth_card *card, 2480qeth_process_inbound_buffer(struct qeth_card *card,
2481 struct qeth_qdio_buffer *buf, int index) 2481 struct qeth_qdio_buffer *buf, int index)
2482{ 2482{
@@ -2528,7 +2528,7 @@ qeth_process_inbound_buffer(struct qeth_card *card,
2528 } 2528 }
2529} 2529}
2530 2530
2531static inline struct qeth_buffer_pool_entry * 2531static struct qeth_buffer_pool_entry *
2532qeth_get_buffer_pool_entry(struct qeth_card *card) 2532qeth_get_buffer_pool_entry(struct qeth_card *card)
2533{ 2533{
2534 struct qeth_buffer_pool_entry *entry; 2534 struct qeth_buffer_pool_entry *entry;
@@ -2543,7 +2543,7 @@ qeth_get_buffer_pool_entry(struct qeth_card *card)
2543 return NULL; 2543 return NULL;
2544} 2544}
2545 2545
2546static inline void 2546static void
2547qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf) 2547qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
2548{ 2548{
2549 struct qeth_buffer_pool_entry *pool_entry; 2549 struct qeth_buffer_pool_entry *pool_entry;
@@ -2570,7 +2570,7 @@ qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
2570 buf->state = QETH_QDIO_BUF_EMPTY; 2570 buf->state = QETH_QDIO_BUF_EMPTY;
2571} 2571}
2572 2572
2573static inline void 2573static void
2574qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 2574qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
2575 struct qeth_qdio_out_buffer *buf) 2575 struct qeth_qdio_out_buffer *buf)
2576{ 2576{
@@ -2595,7 +2595,7 @@ qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
2595 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); 2595 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
2596} 2596}
2597 2597
2598static inline void 2598static void
2599qeth_queue_input_buffer(struct qeth_card *card, int index) 2599qeth_queue_input_buffer(struct qeth_card *card, int index)
2600{ 2600{
2601 struct qeth_qdio_q *queue = card->qdio.in_q; 2601 struct qeth_qdio_q *queue = card->qdio.in_q;
@@ -2699,7 +2699,7 @@ qeth_qdio_input_handler(struct ccw_device * ccwdev, unsigned int status,
2699 card->perf_stats.inbound_start_time; 2699 card->perf_stats.inbound_start_time;
2700} 2700}
2701 2701
2702static inline int 2702static int
2703qeth_handle_send_error(struct qeth_card *card, 2703qeth_handle_send_error(struct qeth_card *card,
2704 struct qeth_qdio_out_buffer *buffer, 2704 struct qeth_qdio_out_buffer *buffer,
2705 unsigned int qdio_err, unsigned int siga_err) 2705 unsigned int qdio_err, unsigned int siga_err)
@@ -2821,7 +2821,7 @@ qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2821 * Switched to packing state if the number of used buffers on a queue 2821 * Switched to packing state if the number of used buffers on a queue
2822 * reaches a certain limit. 2822 * reaches a certain limit.
2823 */ 2823 */
2824static inline void 2824static void
2825qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) 2825qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
2826{ 2826{
2827 if (!queue->do_pack) { 2827 if (!queue->do_pack) {
@@ -2842,7 +2842,7 @@ qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
2842 * In that case 1 is returned to inform the caller. If no buffer 2842 * In that case 1 is returned to inform the caller. If no buffer
2843 * has to be flushed, zero is returned. 2843 * has to be flushed, zero is returned.
2844 */ 2844 */
2845static inline int 2845static int
2846qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) 2846qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
2847{ 2847{
2848 struct qeth_qdio_out_buffer *buffer; 2848 struct qeth_qdio_out_buffer *buffer;
@@ -2877,7 +2877,7 @@ qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
2877 * Checks if there is a packing buffer and prepares it to be flushed. 2877 * Checks if there is a packing buffer and prepares it to be flushed.
2878 * In that case returns 1, otherwise zero. 2878 * In that case returns 1, otherwise zero.
2879 */ 2879 */
2880static inline int 2880static int
2881qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue) 2881qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2882{ 2882{
2883 struct qeth_qdio_out_buffer *buffer; 2883 struct qeth_qdio_out_buffer *buffer;
@@ -2894,7 +2894,7 @@ qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2894 return 0; 2894 return 0;
2895} 2895}
2896 2896
2897static inline void 2897static void
2898qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) 2898qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
2899{ 2899{
2900 int index; 2900 int index;
@@ -3594,7 +3594,7 @@ qeth_fake_header(struct sk_buff *skb, struct net_device *dev,
3594 } 3594 }
3595} 3595}
3596 3596
3597static inline int 3597static int
3598qeth_send_packet(struct qeth_card *, struct sk_buff *); 3598qeth_send_packet(struct qeth_card *, struct sk_buff *);
3599 3599
3600static int 3600static int
@@ -3759,7 +3759,7 @@ qeth_stop(struct net_device *dev)
3759 return 0; 3759 return 0;
3760} 3760}
3761 3761
3762static inline int 3762static int
3763qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb) 3763qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
3764{ 3764{
3765 int cast_type = RTN_UNSPEC; 3765 int cast_type = RTN_UNSPEC;
@@ -3806,7 +3806,7 @@ qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
3806 return cast_type; 3806 return cast_type;
3807} 3807}
3808 3808
3809static inline int 3809static int
3810qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, 3810qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3811 int ipv, int cast_type) 3811 int ipv, int cast_type)
3812{ 3812{
@@ -3853,7 +3853,7 @@ qeth_get_ip_version(struct sk_buff *skb)
3853 } 3853 }
3854} 3854}
3855 3855
3856static inline struct qeth_hdr * 3856static struct qeth_hdr *
3857__qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv) 3857__qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv)
3858{ 3858{
3859#ifdef CONFIG_QETH_VLAN 3859#ifdef CONFIG_QETH_VLAN
@@ -3882,14 +3882,14 @@ __qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv)
3882 qeth_push_skb(card, skb, sizeof(struct qeth_hdr))); 3882 qeth_push_skb(card, skb, sizeof(struct qeth_hdr)));
3883} 3883}
3884 3884
3885static inline void 3885static void
3886__qeth_free_new_skb(struct sk_buff *orig_skb, struct sk_buff *new_skb) 3886__qeth_free_new_skb(struct sk_buff *orig_skb, struct sk_buff *new_skb)
3887{ 3887{
3888 if (orig_skb != new_skb) 3888 if (orig_skb != new_skb)
3889 dev_kfree_skb_any(new_skb); 3889 dev_kfree_skb_any(new_skb);
3890} 3890}
3891 3891
3892static inline struct sk_buff * 3892static struct sk_buff *
3893qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, 3893qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb,
3894 struct qeth_hdr **hdr, int ipv) 3894 struct qeth_hdr **hdr, int ipv)
3895{ 3895{
@@ -3940,7 +3940,7 @@ qeth_get_qeth_hdr_flags6(int cast_type)
3940 return ct | QETH_CAST_UNICAST; 3940 return ct | QETH_CAST_UNICAST;
3941} 3941}
3942 3942
3943static inline void 3943static void
3944qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr, 3944qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr,
3945 struct sk_buff *skb) 3945 struct sk_buff *skb)
3946{ 3946{
@@ -3977,7 +3977,7 @@ qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr,
3977 } 3977 }
3978} 3978}
3979 3979
3980static inline void 3980static void
3981qeth_layer2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, 3981qeth_layer2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
3982 struct sk_buff *skb, int cast_type) 3982 struct sk_buff *skb, int cast_type)
3983{ 3983{
@@ -4068,7 +4068,7 @@ qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
4068 } 4068 }
4069} 4069}
4070 4070
4071static inline void 4071static void
4072__qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, 4072__qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
4073 int is_tso, int *next_element_to_fill) 4073 int is_tso, int *next_element_to_fill)
4074{ 4074{
@@ -4112,7 +4112,7 @@ __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
4112 *next_element_to_fill = element; 4112 *next_element_to_fill = element;
4113} 4113}
4114 4114
4115static inline int 4115static int
4116qeth_fill_buffer(struct qeth_qdio_out_q *queue, 4116qeth_fill_buffer(struct qeth_qdio_out_q *queue,
4117 struct qeth_qdio_out_buffer *buf, 4117 struct qeth_qdio_out_buffer *buf,
4118 struct sk_buff *skb) 4118 struct sk_buff *skb)
@@ -4171,7 +4171,7 @@ qeth_fill_buffer(struct qeth_qdio_out_q *queue,
4171 return flush_cnt; 4171 return flush_cnt;
4172} 4172}
4173 4173
4174static inline int 4174static int
4175qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, 4175qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4176 struct sk_buff *skb, struct qeth_hdr *hdr, 4176 struct sk_buff *skb, struct qeth_hdr *hdr,
4177 int elements_needed, 4177 int elements_needed,
@@ -4222,7 +4222,7 @@ out:
4222 return -EBUSY; 4222 return -EBUSY;
4223} 4223}
4224 4224
4225static inline int 4225static int
4226qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, 4226qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4227 struct sk_buff *skb, struct qeth_hdr *hdr, 4227 struct sk_buff *skb, struct qeth_hdr *hdr,
4228 int elements_needed, struct qeth_eddp_context *ctx) 4228 int elements_needed, struct qeth_eddp_context *ctx)
@@ -4328,7 +4328,7 @@ out:
4328 return rc; 4328 return rc;
4329} 4329}
4330 4330
4331static inline int 4331static int
4332qeth_get_elements_no(struct qeth_card *card, void *hdr, 4332qeth_get_elements_no(struct qeth_card *card, void *hdr,
4333 struct sk_buff *skb, int elems) 4333 struct sk_buff *skb, int elems)
4334{ 4334{
@@ -4349,7 +4349,7 @@ qeth_get_elements_no(struct qeth_card *card, void *hdr,
4349} 4349}
4350 4350
4351 4351
4352static inline int 4352static int
4353qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) 4353qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
4354{ 4354{
4355 int ipv = 0; 4355 int ipv = 0;
@@ -4536,7 +4536,7 @@ qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4536} 4536}
4537 4537
4538 4538
4539static inline const char * 4539static const char *
4540qeth_arp_get_error_cause(int *rc) 4540qeth_arp_get_error_cause(int *rc)
4541{ 4541{
4542 switch (*rc) { 4542 switch (*rc) {
@@ -4597,7 +4597,7 @@ qeth_arp_set_no_entries(struct qeth_card *card, int no_entries)
4597 return rc; 4597 return rc;
4598} 4598}
4599 4599
4600static inline void 4600static void
4601qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo, 4601qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo,
4602 struct qeth_arp_query_data *qdata, 4602 struct qeth_arp_query_data *qdata,
4603 int entry_size, int uentry_size) 4603 int entry_size, int uentry_size)
@@ -5214,7 +5214,7 @@ qeth_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
5214 spin_unlock_irqrestore(&card->vlanlock, flags); 5214 spin_unlock_irqrestore(&card->vlanlock, flags);
5215} 5215}
5216 5216
5217static inline void 5217static void
5218qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf, 5218qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf,
5219 unsigned short vid) 5219 unsigned short vid)
5220{ 5220{
@@ -5625,7 +5625,7 @@ qeth_delete_mc_addresses(struct qeth_card *card)
5625 spin_unlock_irqrestore(&card->ip_lock, flags); 5625 spin_unlock_irqrestore(&card->ip_lock, flags);
5626} 5626}
5627 5627
5628static inline void 5628static void
5629qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev) 5629qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev)
5630{ 5630{
5631 struct qeth_ipaddr *ipm; 5631 struct qeth_ipaddr *ipm;
@@ -5711,7 +5711,7 @@ qeth_layer2_add_multicast(struct qeth_card *card)
5711} 5711}
5712 5712
5713#ifdef CONFIG_QETH_IPV6 5713#ifdef CONFIG_QETH_IPV6
5714static inline void 5714static void
5715qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev) 5715qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
5716{ 5716{
5717 struct qeth_ipaddr *ipm; 5717 struct qeth_ipaddr *ipm;
@@ -6022,7 +6022,7 @@ qeth_send_setdelmc(struct qeth_card *card, struct qeth_ipaddr *addr, int ipacmd)
6022 6022
6023 return rc; 6023 return rc;
6024} 6024}
6025static inline void 6025static void
6026qeth_fill_netmask(u8 *netmask, unsigned int len) 6026qeth_fill_netmask(u8 *netmask, unsigned int len)
6027{ 6027{
6028 int i,j; 6028 int i,j;
@@ -6626,7 +6626,7 @@ qeth_send_setadp_mode(struct qeth_card *card, __u32 command, __u32 mode)
6626 return rc; 6626 return rc;
6627} 6627}
6628 6628
6629static inline int 6629static int
6630qeth_setadapter_hstr(struct qeth_card *card) 6630qeth_setadapter_hstr(struct qeth_card *card)
6631{ 6631{
6632 int rc; 6632 int rc;
@@ -6889,7 +6889,7 @@ qeth_send_simple_setassparms(struct qeth_card *card,
6889 return rc; 6889 return rc;
6890} 6890}
6891 6891
6892static inline int 6892static int
6893qeth_start_ipa_arp_processing(struct qeth_card *card) 6893qeth_start_ipa_arp_processing(struct qeth_card *card)
6894{ 6894{
6895 int rc; 6895 int rc;
@@ -7529,7 +7529,7 @@ qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
7529 wake_up(&card->wait_q); 7529 wake_up(&card->wait_q);
7530} 7530}
7531 7531
7532static inline int 7532static int
7533qeth_threads_running(struct qeth_card *card, unsigned long threads) 7533qeth_threads_running(struct qeth_card *card, unsigned long threads)
7534{ 7534{
7535 unsigned long flags; 7535 unsigned long flags;
@@ -8118,7 +8118,7 @@ qeth_del_ipato_entry(struct qeth_card *card, enum qeth_prot_versions proto,
8118 spin_unlock_irqrestore(&card->ip_lock, flags); 8118 spin_unlock_irqrestore(&card->ip_lock, flags);
8119} 8119}
8120 8120
8121static inline void 8121static void
8122qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len) 8122qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
8123{ 8123{
8124 int i, j; 8124 int i, j;
diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c
index 5836737ac58f..d518419cd0c6 100644
--- a/drivers/s390/net/qeth_sys.c
+++ b/drivers/s390/net/qeth_sys.c
@@ -328,7 +328,7 @@ qeth_dev_bufcnt_store(struct device *dev, struct device_attribute *attr, const c
328static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show, 328static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show,
329 qeth_dev_bufcnt_store); 329 qeth_dev_bufcnt_store);
330 330
331static inline ssize_t 331static ssize_t
332qeth_dev_route_show(struct qeth_card *card, struct qeth_routing_info *route, 332qeth_dev_route_show(struct qeth_card *card, struct qeth_routing_info *route,
333 char *buf) 333 char *buf)
334{ 334{
@@ -368,7 +368,7 @@ qeth_dev_route4_show(struct device *dev, struct device_attribute *attr, char *bu
368 return qeth_dev_route_show(card, &card->options.route4, buf); 368 return qeth_dev_route_show(card, &card->options.route4, buf);
369} 369}
370 370
371static inline ssize_t 371static ssize_t
372qeth_dev_route_store(struct qeth_card *card, struct qeth_routing_info *route, 372qeth_dev_route_store(struct qeth_card *card, struct qeth_routing_info *route,
373 enum qeth_prot_versions prot, const char *buf, size_t count) 373 enum qeth_prot_versions prot, const char *buf, size_t count)
374{ 374{
@@ -998,7 +998,7 @@ struct device_attribute dev_attr_##_id = { \
998 .store = _store, \ 998 .store = _store, \
999}; 999};
1000 1000
1001int 1001static int
1002qeth_check_layer2(struct qeth_card *card) 1002qeth_check_layer2(struct qeth_card *card)
1003{ 1003{
1004 if (card->options.layer2) 1004 if (card->options.layer2)
@@ -1100,7 +1100,7 @@ static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644,
1100 qeth_dev_ipato_invert4_show, 1100 qeth_dev_ipato_invert4_show,
1101 qeth_dev_ipato_invert4_store); 1101 qeth_dev_ipato_invert4_store);
1102 1102
1103static inline ssize_t 1103static ssize_t
1104qeth_dev_ipato_add_show(char *buf, struct qeth_card *card, 1104qeth_dev_ipato_add_show(char *buf, struct qeth_card *card,
1105 enum qeth_prot_versions proto) 1105 enum qeth_prot_versions proto)
1106{ 1106{
@@ -1146,7 +1146,7 @@ qeth_dev_ipato_add4_show(struct device *dev, struct device_attribute *attr, char
1146 return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV4); 1146 return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV4);
1147} 1147}
1148 1148
1149static inline int 1149static int
1150qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto, 1150qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto,
1151 u8 *addr, int *mask_bits) 1151 u8 *addr, int *mask_bits)
1152{ 1152{
@@ -1178,7 +1178,7 @@ qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto,
1178 return 0; 1178 return 0;
1179} 1179}
1180 1180
1181static inline ssize_t 1181static ssize_t
1182qeth_dev_ipato_add_store(const char *buf, size_t count, 1182qeth_dev_ipato_add_store(const char *buf, size_t count,
1183 struct qeth_card *card, enum qeth_prot_versions proto) 1183 struct qeth_card *card, enum qeth_prot_versions proto)
1184{ 1184{
@@ -1223,7 +1223,7 @@ static QETH_DEVICE_ATTR(ipato_add4, add4, 0644,
1223 qeth_dev_ipato_add4_show, 1223 qeth_dev_ipato_add4_show,
1224 qeth_dev_ipato_add4_store); 1224 qeth_dev_ipato_add4_store);
1225 1225
1226static inline ssize_t 1226static ssize_t
1227qeth_dev_ipato_del_store(const char *buf, size_t count, 1227qeth_dev_ipato_del_store(const char *buf, size_t count,
1228 struct qeth_card *card, enum qeth_prot_versions proto) 1228 struct qeth_card *card, enum qeth_prot_versions proto)
1229{ 1229{
@@ -1361,7 +1361,7 @@ static struct attribute_group qeth_device_ipato_group = {
1361 .attrs = (struct attribute **)qeth_ipato_device_attrs, 1361 .attrs = (struct attribute **)qeth_ipato_device_attrs,
1362}; 1362};
1363 1363
1364static inline ssize_t 1364static ssize_t
1365qeth_dev_vipa_add_show(char *buf, struct qeth_card *card, 1365qeth_dev_vipa_add_show(char *buf, struct qeth_card *card,
1366 enum qeth_prot_versions proto) 1366 enum qeth_prot_versions proto)
1367{ 1367{
@@ -1407,7 +1407,7 @@ qeth_dev_vipa_add4_show(struct device *dev, struct device_attribute *attr, char
1407 return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV4); 1407 return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV4);
1408} 1408}
1409 1409
1410static inline int 1410static int
1411qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto, 1411qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto,
1412 u8 *addr) 1412 u8 *addr)
1413{ 1413{
@@ -1418,7 +1418,7 @@ qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto,
1418 return 0; 1418 return 0;
1419} 1419}
1420 1420
1421static inline ssize_t 1421static ssize_t
1422qeth_dev_vipa_add_store(const char *buf, size_t count, 1422qeth_dev_vipa_add_store(const char *buf, size_t count,
1423 struct qeth_card *card, enum qeth_prot_versions proto) 1423 struct qeth_card *card, enum qeth_prot_versions proto)
1424{ 1424{
@@ -1451,7 +1451,7 @@ static QETH_DEVICE_ATTR(vipa_add4, add4, 0644,
1451 qeth_dev_vipa_add4_show, 1451 qeth_dev_vipa_add4_show,
1452 qeth_dev_vipa_add4_store); 1452 qeth_dev_vipa_add4_store);
1453 1453
1454static inline ssize_t 1454static ssize_t
1455qeth_dev_vipa_del_store(const char *buf, size_t count, 1455qeth_dev_vipa_del_store(const char *buf, size_t count,
1456 struct qeth_card *card, enum qeth_prot_versions proto) 1456 struct qeth_card *card, enum qeth_prot_versions proto)
1457{ 1457{
@@ -1542,7 +1542,7 @@ static struct attribute_group qeth_device_vipa_group = {
1542 .attrs = (struct attribute **)qeth_vipa_device_attrs, 1542 .attrs = (struct attribute **)qeth_vipa_device_attrs,
1543}; 1543};
1544 1544
1545static inline ssize_t 1545static ssize_t
1546qeth_dev_rxip_add_show(char *buf, struct qeth_card *card, 1546qeth_dev_rxip_add_show(char *buf, struct qeth_card *card,
1547 enum qeth_prot_versions proto) 1547 enum qeth_prot_versions proto)
1548{ 1548{
@@ -1588,7 +1588,7 @@ qeth_dev_rxip_add4_show(struct device *dev, struct device_attribute *attr, char
1588 return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV4); 1588 return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV4);
1589} 1589}
1590 1590
1591static inline int 1591static int
1592qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto, 1592qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto,
1593 u8 *addr) 1593 u8 *addr)
1594{ 1594{
@@ -1599,7 +1599,7 @@ qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto,
1599 return 0; 1599 return 0;
1600} 1600}
1601 1601
1602static inline ssize_t 1602static ssize_t
1603qeth_dev_rxip_add_store(const char *buf, size_t count, 1603qeth_dev_rxip_add_store(const char *buf, size_t count,
1604 struct qeth_card *card, enum qeth_prot_versions proto) 1604 struct qeth_card *card, enum qeth_prot_versions proto)
1605{ 1605{
@@ -1632,7 +1632,7 @@ static QETH_DEVICE_ATTR(rxip_add4, add4, 0644,
1632 qeth_dev_rxip_add4_show, 1632 qeth_dev_rxip_add4_show,
1633 qeth_dev_rxip_add4_store); 1633 qeth_dev_rxip_add4_store);
1634 1634
1635static inline ssize_t 1635static ssize_t
1636qeth_dev_rxip_del_store(const char *buf, size_t count, 1636qeth_dev_rxip_del_store(const char *buf, size_t count,
1637 struct qeth_card *card, enum qeth_prot_versions proto) 1637 struct qeth_card *card, enum qeth_prot_versions proto)
1638{ 1638{
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index e088b5e28711..806bb1a921eb 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -13,22 +13,18 @@
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/workqueue.h> 14#include <linux/workqueue.h>
15#include <linux/time.h> 15#include <linux/time.h>
16#include <linux/device.h>
16#include <linux/kthread.h> 17#include <linux/kthread.h>
17 18#include <asm/etr.h>
18#include <asm/lowcore.h> 19#include <asm/lowcore.h>
19 20#include <asm/cio.h>
21#include "cio/cio.h"
22#include "cio/chsc.h"
23#include "cio/css.h"
20#include "s390mach.h" 24#include "s390mach.h"
21 25
22static struct semaphore m_sem; 26static struct semaphore m_sem;
23 27
24extern int css_process_crw(int, int);
25extern int chsc_process_crw(void);
26extern int chp_process_crw(int, int);
27extern void css_reiterate_subchannels(void);
28
29extern struct workqueue_struct *slow_path_wq;
30extern struct work_struct slow_path_work;
31
32static NORET_TYPE void 28static NORET_TYPE void
33s390_handle_damage(char *msg) 29s390_handle_damage(char *msg)
34{ 30{
@@ -470,6 +466,19 @@ s390_do_machine_check(struct pt_regs *regs)
470 s390_handle_damage("unable to revalidate registers."); 466 s390_handle_damage("unable to revalidate registers.");
471 } 467 }
472 468
469 if (mci->cd) {
470 /* Timing facility damage */
471 s390_handle_damage("TOD clock damaged");
472 }
473
474 if (mci->ed && mci->ec) {
475 /* External damage */
476 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SYNC))
477 etr_sync_check();
478 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH))
479 etr_switch_to_local();
480 }
481
473 if (mci->se) 482 if (mci->se)
474 /* Storage error uncorrected */ 483 /* Storage error uncorrected */
475 s390_handle_damage("received storage error uncorrected " 484 s390_handle_damage("received storage error uncorrected "
@@ -508,7 +517,7 @@ static int
508machine_check_init(void) 517machine_check_init(void)
509{ 518{
510 init_MUTEX_LOCKED(&m_sem); 519 init_MUTEX_LOCKED(&m_sem);
511 ctl_clear_bit(14, 25); /* disable external damage MCH */ 520 ctl_set_bit(14, 25); /* enable external damage MCH */
512 ctl_set_bit(14, 27); /* enable system recovery MCH */ 521 ctl_set_bit(14, 27); /* enable system recovery MCH */
513#ifdef CONFIG_MACHCHK_WARNING 522#ifdef CONFIG_MACHCHK_WARNING
514 ctl_set_bit(14, 24); /* enable warning MCH */ 523 ctl_set_bit(14, 24); /* enable warning MCH */
@@ -529,7 +538,11 @@ arch_initcall(machine_check_init);
529static int __init 538static int __init
530machine_check_crw_init (void) 539machine_check_crw_init (void)
531{ 540{
532 kthread_run(s390_collect_crw_info, &m_sem, "kmcheck"); 541 struct task_struct *task;
542
543 task = kthread_run(s390_collect_crw_info, &m_sem, "kmcheck");
544 if (IS_ERR(task))
545 return PTR_ERR(task);
533 ctl_set_bit(14, 28); /* enable channel report MCH */ 546 ctl_set_bit(14, 28); /* enable channel report MCH */
534 return 0; 547 return 0;
535} 548}
diff --git a/drivers/s390/s390mach.h b/drivers/s390/s390mach.h
index 7abb42a09ae2..d3ca4281a494 100644
--- a/drivers/s390/s390mach.h
+++ b/drivers/s390/s390mach.h
@@ -102,4 +102,7 @@ static inline int stcrw(struct crw *pcrw )
102 return ccode; 102 return ccode;
103} 103}
104 104
105#define ED_ETR_SYNC 12 /* External damage ETR sync check */
106#define ED_ETR_SWITCH 13 /* External damage ETR switch to local */
107
105#endif /* __s390mach */ 108#endif /* __s390mach */
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 85093b71f9fa..39a885266790 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -47,13 +47,12 @@ static int __init zfcp_module_init(void);
47static void zfcp_ns_gid_pn_handler(unsigned long); 47static void zfcp_ns_gid_pn_handler(unsigned long);
48 48
49/* miscellaneous */ 49/* miscellaneous */
50static inline int zfcp_sg_list_alloc(struct zfcp_sg_list *, size_t); 50static int zfcp_sg_list_alloc(struct zfcp_sg_list *, size_t);
51static inline void zfcp_sg_list_free(struct zfcp_sg_list *); 51static void zfcp_sg_list_free(struct zfcp_sg_list *);
52static inline int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *, 52static int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *,
53 void __user *, size_t); 53 void __user *, size_t);
54static inline int zfcp_sg_list_copy_to_user(void __user *, 54static int zfcp_sg_list_copy_to_user(void __user *,
55 struct zfcp_sg_list *, size_t); 55 struct zfcp_sg_list *, size_t);
56
57static long zfcp_cfdc_dev_ioctl(struct file *, unsigned int, unsigned long); 56static long zfcp_cfdc_dev_ioctl(struct file *, unsigned int, unsigned long);
58 57
59#define ZFCP_CFDC_IOC_MAGIC 0xDD 58#define ZFCP_CFDC_IOC_MAGIC 0xDD
@@ -605,7 +604,7 @@ zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
605 * elements of the scatter-gather list. The maximum size of a single element 604 * elements of the scatter-gather list. The maximum size of a single element
606 * in the scatter-gather list is PAGE_SIZE. 605 * in the scatter-gather list is PAGE_SIZE.
607 */ 606 */
608static inline int 607static int
609zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size) 608zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size)
610{ 609{
611 struct scatterlist *sg; 610 struct scatterlist *sg;
@@ -652,7 +651,7 @@ zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size)
652 * Memory for each element in the scatter-gather list is freed. 651 * Memory for each element in the scatter-gather list is freed.
653 * Finally sg_list->sg is freed itself and sg_list->count is reset. 652 * Finally sg_list->sg is freed itself and sg_list->count is reset.
654 */ 653 */
655static inline void 654static void
656zfcp_sg_list_free(struct zfcp_sg_list *sg_list) 655zfcp_sg_list_free(struct zfcp_sg_list *sg_list)
657{ 656{
658 struct scatterlist *sg; 657 struct scatterlist *sg;
@@ -697,7 +696,7 @@ zfcp_sg_size(struct scatterlist *sg, unsigned int sg_count)
697 * @size: number of bytes to be copied 696 * @size: number of bytes to be copied
698 * Return: 0 on success, -EFAULT if copy_from_user fails. 697 * Return: 0 on success, -EFAULT if copy_from_user fails.
699 */ 698 */
700static inline int 699static int
701zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list, 700zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list,
702 void __user *user_buffer, 701 void __user *user_buffer,
703 size_t size) 702 size_t size)
@@ -735,7 +734,7 @@ zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list,
735 * @size: number of bytes to be copied 734 * @size: number of bytes to be copied
736 * Return: 0 on success, -EFAULT if copy_to_user fails 735 * Return: 0 on success, -EFAULT if copy_to_user fails
737 */ 736 */
738static inline int 737static int
739zfcp_sg_list_copy_to_user(void __user *user_buffer, 738zfcp_sg_list_copy_to_user(void __user *user_buffer,
740 struct zfcp_sg_list *sg_list, 739 struct zfcp_sg_list *sg_list,
741 size_t size) 740 size_t size)
@@ -1799,7 +1798,7 @@ static const struct zfcp_rc_entry zfcp_p_rjt_rc[] = {
1799 * @code: reason code 1798 * @code: reason code
1800 * @rc_table: table of reason codes and descriptions 1799 * @rc_table: table of reason codes and descriptions
1801 */ 1800 */
1802static inline const char * 1801static const char *
1803zfcp_rc_description(u8 code, const struct zfcp_rc_entry *rc_table) 1802zfcp_rc_description(u8 code, const struct zfcp_rc_entry *rc_table)
1804{ 1803{
1805 const char *descr = "unknown reason code"; 1804 const char *descr = "unknown reason code";
@@ -1847,7 +1846,7 @@ zfcp_check_ct_response(struct ct_hdr *rjt)
1847 * @rjt_par: reject parameter acc. to FC-PH/FC-FS 1846 * @rjt_par: reject parameter acc. to FC-PH/FC-FS
1848 * @rc_table: table of reason codes and descriptions 1847 * @rc_table: table of reason codes and descriptions
1849 */ 1848 */
1850static inline void 1849static void
1851zfcp_print_els_rjt(struct zfcp_ls_rjt_par *rjt_par, 1850zfcp_print_els_rjt(struct zfcp_ls_rjt_par *rjt_par,
1852 const struct zfcp_rc_entry *rc_table) 1851 const struct zfcp_rc_entry *rc_table)
1853{ 1852{
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 0aa3b1ac76af..d8191d115c14 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -31,7 +31,7 @@ MODULE_PARM_DESC(dbfsize,
31 31
32#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER 32#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
33 33
34static inline int 34static int
35zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck) 35zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck)
36{ 36{
37 unsigned long long sec; 37 unsigned long long sec;
@@ -106,7 +106,7 @@ zfcp_dbf_view_dump(char *out_buf, const char *label,
106 return len; 106 return len;
107} 107}
108 108
109static inline int 109static int
110zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area, 110zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area,
111 debug_entry_t * entry, char *out_buf) 111 debug_entry_t * entry, char *out_buf)
112{ 112{
@@ -130,7 +130,7 @@ zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area,
130 return len; 130 return len;
131} 131}
132 132
133inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) 133void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
134{ 134{
135 struct zfcp_adapter *adapter = fsf_req->adapter; 135 struct zfcp_adapter *adapter = fsf_req->adapter;
136 struct fsf_qtcb *qtcb = fsf_req->qtcb; 136 struct fsf_qtcb *qtcb = fsf_req->qtcb;
@@ -241,7 +241,7 @@ inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
241 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); 241 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
242} 242}
243 243
244inline void 244void
245zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, 245zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
246 struct fsf_status_read_buffer *status_buffer) 246 struct fsf_status_read_buffer *status_buffer)
247{ 247{
@@ -295,7 +295,7 @@ zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
295 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); 295 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
296} 296}
297 297
298inline void 298void
299zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, 299zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
300 unsigned int qdio_error, unsigned int siga_error, 300 unsigned int qdio_error, unsigned int siga_error,
301 int sbal_index, int sbal_count) 301 int sbal_index, int sbal_count)
@@ -316,7 +316,7 @@ zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
316 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); 316 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
317} 317}
318 318
319static inline int 319static int
320zfcp_hba_dbf_view_response(char *out_buf, 320zfcp_hba_dbf_view_response(char *out_buf,
321 struct zfcp_hba_dbf_record_response *rec) 321 struct zfcp_hba_dbf_record_response *rec)
322{ 322{
@@ -403,7 +403,7 @@ zfcp_hba_dbf_view_response(char *out_buf,
403 return len; 403 return len;
404} 404}
405 405
406static inline int 406static int
407zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec) 407zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec)
408{ 408{
409 int len = 0; 409 int len = 0;
@@ -424,7 +424,7 @@ zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec)
424 return len; 424 return len;
425} 425}
426 426
427static inline int 427static int
428zfcp_hba_dbf_view_qdio(char *out_buf, struct zfcp_hba_dbf_record_qdio *rec) 428zfcp_hba_dbf_view_qdio(char *out_buf, struct zfcp_hba_dbf_record_qdio *rec)
429{ 429{
430 int len = 0; 430 int len = 0;
@@ -469,7 +469,7 @@ zfcp_hba_dbf_view_format(debug_info_t * id, struct debug_view *view,
469 return len; 469 return len;
470} 470}
471 471
472struct debug_view zfcp_hba_dbf_view = { 472static struct debug_view zfcp_hba_dbf_view = {
473 "structured", 473 "structured",
474 NULL, 474 NULL,
475 &zfcp_dbf_view_header, 475 &zfcp_dbf_view_header,
@@ -478,7 +478,7 @@ struct debug_view zfcp_hba_dbf_view = {
478 NULL 478 NULL
479}; 479};
480 480
481inline void 481void
482_zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req, 482_zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req,
483 u32 s_id, u32 d_id, void *buffer, int buflen) 483 u32 s_id, u32 d_id, void *buffer, int buflen)
484{ 484{
@@ -519,7 +519,7 @@ _zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req,
519 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); 519 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
520} 520}
521 521
522inline void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) 522void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
523{ 523{
524 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; 524 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
525 struct zfcp_port *port = ct->port; 525 struct zfcp_port *port = ct->port;
@@ -531,7 +531,7 @@ inline void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
531 ct->req->length); 531 ct->req->length);
532} 532}
533 533
534inline void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) 534void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
535{ 535{
536 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; 536 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
537 struct zfcp_port *port = ct->port; 537 struct zfcp_port *port = ct->port;
@@ -543,7 +543,7 @@ inline void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
543 ct->resp->length); 543 ct->resp->length);
544} 544}
545 545
546static inline void 546static void
547_zfcp_san_dbf_event_common_els(const char *tag, int level, 547_zfcp_san_dbf_event_common_els(const char *tag, int level,
548 struct zfcp_fsf_req *fsf_req, u32 s_id, 548 struct zfcp_fsf_req *fsf_req, u32 s_id,
549 u32 d_id, u8 ls_code, void *buffer, int buflen) 549 u32 d_id, u8 ls_code, void *buffer, int buflen)
@@ -585,7 +585,7 @@ _zfcp_san_dbf_event_common_els(const char *tag, int level,
585 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); 585 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
586} 586}
587 587
588inline void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req) 588void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req)
589{ 589{
590 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; 590 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
591 591
@@ -597,7 +597,7 @@ inline void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req)
597 els->req->length); 597 els->req->length);
598} 598}
599 599
600inline void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req) 600void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req)
601{ 601{
602 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; 602 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
603 603
@@ -608,7 +608,7 @@ inline void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req)
608 els->resp->length); 608 els->resp->length);
609} 609}
610 610
611inline void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req) 611void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req)
612{ 612{
613 struct zfcp_adapter *adapter = fsf_req->adapter; 613 struct zfcp_adapter *adapter = fsf_req->adapter;
614 struct fsf_status_read_buffer *status_buffer = 614 struct fsf_status_read_buffer *status_buffer =
@@ -693,7 +693,7 @@ zfcp_san_dbf_view_format(debug_info_t * id, struct debug_view *view,
693 return len; 693 return len;
694} 694}
695 695
696struct debug_view zfcp_san_dbf_view = { 696static struct debug_view zfcp_san_dbf_view = {
697 "structured", 697 "structured",
698 NULL, 698 NULL,
699 &zfcp_dbf_view_header, 699 &zfcp_dbf_view_header,
@@ -702,7 +702,7 @@ struct debug_view zfcp_san_dbf_view = {
702 NULL 702 NULL
703}; 703};
704 704
705static inline void 705static void
706_zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, 706_zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
707 struct zfcp_adapter *adapter, 707 struct zfcp_adapter *adapter,
708 struct scsi_cmnd *scsi_cmnd, 708 struct scsi_cmnd *scsi_cmnd,
@@ -786,7 +786,7 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
786 spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags); 786 spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags);
787} 787}
788 788
789inline void 789void
790zfcp_scsi_dbf_event_result(const char *tag, int level, 790zfcp_scsi_dbf_event_result(const char *tag, int level,
791 struct zfcp_adapter *adapter, 791 struct zfcp_adapter *adapter,
792 struct scsi_cmnd *scsi_cmnd, 792 struct scsi_cmnd *scsi_cmnd,
@@ -796,7 +796,7 @@ zfcp_scsi_dbf_event_result(const char *tag, int level,
796 adapter, scsi_cmnd, fsf_req, 0); 796 adapter, scsi_cmnd, fsf_req, 0);
797} 797}
798 798
799inline void 799void
800zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, 800zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
801 struct scsi_cmnd *scsi_cmnd, 801 struct scsi_cmnd *scsi_cmnd,
802 struct zfcp_fsf_req *new_fsf_req, 802 struct zfcp_fsf_req *new_fsf_req,
@@ -806,7 +806,7 @@ zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
806 adapter, scsi_cmnd, new_fsf_req, old_req_id); 806 adapter, scsi_cmnd, new_fsf_req, old_req_id);
807} 807}
808 808
809inline void 809void
810zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, 810zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
811 struct scsi_cmnd *scsi_cmnd) 811 struct scsi_cmnd *scsi_cmnd)
812{ 812{
@@ -884,7 +884,7 @@ zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view,
884 return len; 884 return len;
885} 885}
886 886
887struct debug_view zfcp_scsi_dbf_view = { 887static struct debug_view zfcp_scsi_dbf_view = {
888 "structured", 888 "structured",
889 NULL, 889 NULL,
890 &zfcp_dbf_view_header, 890 &zfcp_dbf_view_header,
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index c88babce9bca..88642dec080c 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -200,7 +200,7 @@ void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, unsigned long timeout)
200 * returns: 0 - initiated action successfully 200 * returns: 0 - initiated action successfully
201 * <0 - failed to initiate action 201 * <0 - failed to initiate action
202 */ 202 */
203int 203static int
204zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask) 204zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask)
205{ 205{
206 int retval; 206 int retval;
@@ -295,7 +295,7 @@ zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear_mask)
295 * zfcp_erp_adisc - send ADISC ELS command 295 * zfcp_erp_adisc - send ADISC ELS command
296 * @port: port structure 296 * @port: port structure
297 */ 297 */
298int 298static int
299zfcp_erp_adisc(struct zfcp_port *port) 299zfcp_erp_adisc(struct zfcp_port *port)
300{ 300{
301 struct zfcp_adapter *adapter = port->adapter; 301 struct zfcp_adapter *adapter = port->adapter;
@@ -380,7 +380,7 @@ zfcp_erp_adisc(struct zfcp_port *port)
380 * 380 *
381 * If ADISC failed (LS_RJT or timed out) forced reopen of the port is triggered. 381 * If ADISC failed (LS_RJT or timed out) forced reopen of the port is triggered.
382 */ 382 */
383void 383static void
384zfcp_erp_adisc_handler(unsigned long data) 384zfcp_erp_adisc_handler(unsigned long data)
385{ 385{
386 struct zfcp_send_els *send_els; 386 struct zfcp_send_els *send_els;
@@ -3141,7 +3141,6 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
3141 break; 3141 break;
3142 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 3142 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
3143 if (result != ZFCP_ERP_SUCCEEDED) { 3143 if (result != ZFCP_ERP_SUCCEEDED) {
3144 struct zfcp_port *port;
3145 list_for_each_entry(port, &adapter->port_list_head, list) 3144 list_for_each_entry(port, &adapter->port_list_head, list)
3146 if (port->rport && 3145 if (port->rport &&
3147 !atomic_test_mask(ZFCP_STATUS_PORT_WKA, 3146 !atomic_test_mask(ZFCP_STATUS_PORT_WKA,
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index b8794d77285d..cda0cc095ad1 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -119,8 +119,8 @@ extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
119extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *); 119extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
120extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t); 120extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t);
121extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *); 121extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *);
122extern void set_host_byte(u32 *, char); 122extern void set_host_byte(int *, char);
123extern void set_driver_byte(u32 *, char); 123extern void set_driver_byte(int *, char);
124extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *); 124extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
125extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *); 125extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *);
126 126
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 067f1519eb04..4b3ae3f22e78 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -4563,7 +4563,7 @@ zfcp_fsf_req_sbal_check(unsigned long *flags,
4563/* 4563/*
4564 * set qtcb pointer in fsf_req and initialize QTCB 4564 * set qtcb pointer in fsf_req and initialize QTCB
4565 */ 4565 */
4566static inline void 4566static void
4567zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req) 4567zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req)
4568{ 4568{
4569 if (likely(fsf_req->qtcb != NULL)) { 4569 if (likely(fsf_req->qtcb != NULL)) {
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index dbd9f48e863e..1e12a78e8edd 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -21,22 +21,22 @@
21 21
22#include "zfcp_ext.h" 22#include "zfcp_ext.h"
23 23
24static inline void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int); 24static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int);
25static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get 25static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get
26 (struct zfcp_qdio_queue *, int, int); 26 (struct zfcp_qdio_queue *, int, int);
27static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp 27static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp
28 (struct zfcp_fsf_req *, int, int); 28 (struct zfcp_fsf_req *, int, int);
29static inline volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain 29static volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain
30 (struct zfcp_fsf_req *, unsigned long); 30 (struct zfcp_fsf_req *, unsigned long);
31static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_next 31static volatile struct qdio_buffer_element *zfcp_qdio_sbale_next
32 (struct zfcp_fsf_req *, unsigned long); 32 (struct zfcp_fsf_req *, unsigned long);
33static inline int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int); 33static int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int);
34static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *); 34static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *);
35static inline void zfcp_qdio_sbale_fill 35static void zfcp_qdio_sbale_fill
36 (struct zfcp_fsf_req *, unsigned long, void *, int); 36 (struct zfcp_fsf_req *, unsigned long, void *, int);
37static inline int zfcp_qdio_sbals_from_segment 37static int zfcp_qdio_sbals_from_segment
38 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long); 38 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long);
39static inline int zfcp_qdio_sbals_from_buffer 39static int zfcp_qdio_sbals_from_buffer
40 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long, int); 40 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long, int);
41 41
42static qdio_handler_t zfcp_qdio_request_handler; 42static qdio_handler_t zfcp_qdio_request_handler;
@@ -201,7 +201,7 @@ zfcp_qdio_allocate(struct zfcp_adapter *adapter)
201 * returns: error flag 201 * returns: error flag
202 * 202 *
203 */ 203 */
204static inline int 204static int
205zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status, 205zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status,
206 unsigned int qdio_error, unsigned int siga_error, 206 unsigned int qdio_error, unsigned int siga_error,
207 int first_element, int elements_processed) 207 int first_element, int elements_processed)
@@ -462,7 +462,7 @@ zfcp_qdio_sbale_get(struct zfcp_qdio_queue *queue, int sbal, int sbale)
462 * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for 462 * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for
463 * a struct zfcp_fsf_req 463 * a struct zfcp_fsf_req
464 */ 464 */
465inline volatile struct qdio_buffer_element * 465volatile struct qdio_buffer_element *
466zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) 466zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale)
467{ 467{
468 return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue, 468 return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue,
@@ -484,7 +484,7 @@ zfcp_qdio_sbale_resp(struct zfcp_fsf_req *fsf_req, int sbal, int sbale)
484 * zfcp_qdio_sbale_curr - return current SBALE on request_queue for 484 * zfcp_qdio_sbale_curr - return current SBALE on request_queue for
485 * a struct zfcp_fsf_req 485 * a struct zfcp_fsf_req
486 */ 486 */
487inline volatile struct qdio_buffer_element * 487volatile struct qdio_buffer_element *
488zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req) 488zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req)
489{ 489{
490 return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 490 return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr,
@@ -499,7 +499,7 @@ zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req)
499 * 499 *
500 * Note: We can assume at least one free SBAL in the request_queue when called. 500 * Note: We can assume at least one free SBAL in the request_queue when called.
501 */ 501 */
502static inline void 502static void
503zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) 503zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
504{ 504{
505 int count = atomic_read(&fsf_req->adapter->request_queue.free_count); 505 int count = atomic_read(&fsf_req->adapter->request_queue.free_count);
@@ -517,7 +517,7 @@ zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
517 * 517 *
518 * This function changes sbal_curr, sbale_curr, sbal_number of fsf_req. 518 * This function changes sbal_curr, sbale_curr, sbal_number of fsf_req.
519 */ 519 */
520static inline volatile struct qdio_buffer_element * 520static volatile struct qdio_buffer_element *
521zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 521zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
522{ 522{
523 volatile struct qdio_buffer_element *sbale; 523 volatile struct qdio_buffer_element *sbale;
@@ -554,7 +554,7 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
554/** 554/**
555 * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed 555 * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed
556 */ 556 */
557static inline volatile struct qdio_buffer_element * 557static volatile struct qdio_buffer_element *
558zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 558zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
559{ 559{
560 if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) 560 if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
@@ -569,7 +569,7 @@ zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
569 * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue 569 * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue
570 * with zero from 570 * with zero from
571 */ 571 */
572static inline int 572static int
573zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last) 573zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last)
574{ 574{
575 struct qdio_buffer **buf = queue->buffer; 575 struct qdio_buffer **buf = queue->buffer;
@@ -603,7 +603,7 @@ zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *fsf_req)
603 * zfcp_qdio_sbale_fill - set address and lenght in current SBALE 603 * zfcp_qdio_sbale_fill - set address and lenght in current SBALE
604 * on request_queue 604 * on request_queue
605 */ 605 */
606static inline void 606static void
607zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 607zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
608 void *addr, int length) 608 void *addr, int length)
609{ 609{
@@ -624,7 +624,7 @@ zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
624 * Alignment and length of the segment determine how many SBALEs are needed 624 * Alignment and length of the segment determine how many SBALEs are needed
625 * for the memory segment. 625 * for the memory segment.
626 */ 626 */
627static inline int 627static int
628zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 628zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
629 void *start_addr, unsigned long total_length) 629 void *start_addr, unsigned long total_length)
630{ 630{
@@ -659,7 +659,7 @@ zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
659 * @sg_count: number of elements in scatter-gather list 659 * @sg_count: number of elements in scatter-gather list
660 * @max_sbals: upper bound for number of SBALs to be used 660 * @max_sbals: upper bound for number of SBALs to be used
661 */ 661 */
662inline int 662int
663zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 663zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
664 struct scatterlist *sg, int sg_count, int max_sbals) 664 struct scatterlist *sg, int sg_count, int max_sbals)
665{ 665{
@@ -707,7 +707,7 @@ out:
707 * @length: length of buffer 707 * @length: length of buffer
708 * @max_sbals: upper bound for number of SBALs to be used 708 * @max_sbals: upper bound for number of SBALs to be used
709 */ 709 */
710static inline int 710static int
711zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 711zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
712 void *buffer, unsigned long length, int max_sbals) 712 void *buffer, unsigned long length, int max_sbals)
713{ 713{
@@ -728,7 +728,7 @@ zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
728 * @scsi_cmnd: either scatter-gather list or buffer contained herein is used 728 * @scsi_cmnd: either scatter-gather list or buffer contained herein is used
729 * to fill SBALs 729 * to fill SBALs
730 */ 730 */
731inline int 731int
732zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req, 732zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req,
733 unsigned long sbtype, struct scsi_cmnd *scsi_cmnd) 733 unsigned long sbtype, struct scsi_cmnd *scsi_cmnd)
734{ 734{
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 452d96f92a14..99db02062c3b 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -90,7 +90,7 @@ zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
90 return fcp_sns_info_ptr; 90 return fcp_sns_info_ptr;
91} 91}
92 92
93fcp_dl_t * 93static fcp_dl_t *
94zfcp_get_fcp_dl_ptr(struct fcp_cmnd_iu * fcp_cmd) 94zfcp_get_fcp_dl_ptr(struct fcp_cmnd_iu * fcp_cmd)
95{ 95{
96 int additional_length = fcp_cmd->add_fcp_cdb_length << 2; 96 int additional_length = fcp_cmd->add_fcp_cdb_length << 2;
@@ -124,19 +124,19 @@ zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, fcp_dl_t fcp_dl)
124 * regarding the specified byte 124 * regarding the specified byte
125 */ 125 */
126static inline void 126static inline void
127set_byte(u32 * result, char status, char pos) 127set_byte(int *result, char status, char pos)
128{ 128{
129 *result |= status << (pos * 8); 129 *result |= status << (pos * 8);
130} 130}
131 131
132void 132void
133set_host_byte(u32 * result, char status) 133set_host_byte(int *result, char status)
134{ 134{
135 set_byte(result, status, 2); 135 set_byte(result, status, 2);
136} 136}
137 137
138void 138void
139set_driver_byte(u32 * result, char status) 139set_driver_byte(int *result, char status)
140{ 140{
141 set_byte(result, status, 3); 141 set_byte(result, status, 3);
142} 142}
@@ -280,7 +280,7 @@ out:
280 return retval; 280 return retval;
281} 281}
282 282
283void 283static void
284zfcp_scsi_command_sync_handler(struct scsi_cmnd *scpnt) 284zfcp_scsi_command_sync_handler(struct scsi_cmnd *scpnt)
285{ 285{
286 struct completion *wait = (struct completion *) scpnt->SCp.ptr; 286 struct completion *wait = (struct completion *) scpnt->SCp.ptr;
@@ -324,7 +324,7 @@ zfcp_scsi_command_sync(struct zfcp_unit *unit, struct scsi_cmnd *scpnt,
324 * returns: 0 - success, SCSI command enqueued 324 * returns: 0 - success, SCSI command enqueued
325 * !0 - failure 325 * !0 - failure
326 */ 326 */
327int 327static int
328zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, 328zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
329 void (*done) (struct scsi_cmnd *)) 329 void (*done) (struct scsi_cmnd *))
330{ 330{
@@ -380,7 +380,7 @@ zfcp_unit_lookup(struct zfcp_adapter *adapter, int channel, unsigned int id,
380 * will handle late commands. (Usually, the normal completion of late 380 * will handle late commands. (Usually, the normal completion of late
381 * commands is ignored with respect to the running abort operation.) 381 * commands is ignored with respect to the running abort operation.)
382 */ 382 */
383int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) 383static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
384{ 384{
385 struct Scsi_Host *scsi_host; 385 struct Scsi_Host *scsi_host;
386 struct zfcp_adapter *adapter; 386 struct zfcp_adapter *adapter;
@@ -445,7 +445,7 @@ int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
445 return retval; 445 return retval;
446} 446}
447 447
448int 448static int
449zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) 449zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
450{ 450{
451 int retval; 451 int retval;
@@ -541,7 +541,7 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags,
541/** 541/**
542 * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset 542 * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset
543 */ 543 */
544int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) 544static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
545{ 545{
546 struct zfcp_unit *unit; 546 struct zfcp_unit *unit;
547 struct zfcp_adapter *adapter; 547 struct zfcp_adapter *adapter;
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
index 1e788e815ce7..090743d2f914 100644
--- a/drivers/s390/sysinfo.c
+++ b/drivers/s390/sysinfo.c
@@ -9,8 +9,14 @@
9#include <linux/mm.h> 9#include <linux/mm.h>
10#include <linux/proc_fs.h> 10#include <linux/proc_fs.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/delay.h>
12#include <asm/ebcdic.h> 13#include <asm/ebcdic.h>
13 14
15/* Sigh, math-emu. Don't ask. */
16#include <asm/sfp-util.h>
17#include <math-emu/soft-fp.h>
18#include <math-emu/single.h>
19
14struct sysinfo_1_1_1 { 20struct sysinfo_1_1_1 {
15 char reserved_0[32]; 21 char reserved_0[32];
16 char manufacturer[16]; 22 char manufacturer[16];
@@ -198,7 +204,7 @@ static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len)
198 * if the higher order 8 bits are not zero. Printing 204 * if the higher order 8 bits are not zero. Printing
199 * a floating point number in the kernel is a no-no, 205 * a floating point number in the kernel is a no-no,
200 * always print the number as 32 bit unsigned integer. 206 * always print the number as 32 bit unsigned integer.
201 * The user-space needs to know about the stange 207 * The user-space needs to know about the strange
202 * encoding of the alternate cpu capability. 208 * encoding of the alternate cpu capability.
203 */ 209 */
204 len += sprintf(page + len, "Capability: %u %u\n", 210 len += sprintf(page + len, "Capability: %u %u\n",
@@ -351,3 +357,58 @@ static __init int create_proc_sysinfo(void)
351 357
352__initcall(create_proc_sysinfo); 358__initcall(create_proc_sysinfo);
353 359
360/*
361 * CPU capability might have changed. Therefore recalculate loops_per_jiffy.
362 */
363void s390_adjust_jiffies(void)
364{
365 struct sysinfo_1_2_2 *info;
366 const unsigned int fmil = 0x4b189680; /* 1e7 as 32-bit float. */
367 FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
368 FP_DECL_EX;
369 unsigned int capability;
370
371 info = (void *) get_zeroed_page(GFP_KERNEL);
372 if (!info)
373 return;
374
375 if (stsi(info, 1, 2, 2) != -ENOSYS) {
376 /*
377 * Major sigh. The cpu capability encoding is "special".
378 * If the first 9 bits of info->capability are 0 then it
379 * is a 32 bit unsigned integer in the range 0 .. 2^23.
380 * If the first 9 bits are != 0 then it is a 32 bit float.
381 * In addition a lower value indicates a proportionally
382 * higher cpu capacity. Bogomips are the other way round.
383 * To get to a halfway suitable number we divide 1e7
384 * by the cpu capability number. Yes, that means a floating
385 * point division .. math-emu here we come :-)
386 */
387 FP_UNPACK_SP(SA, &fmil);
388 if ((info->capability >> 23) == 0)
389 FP_FROM_INT_S(SB, info->capability, 32, int);
390 else
391 FP_UNPACK_SP(SB, &info->capability);
392 FP_DIV_S(SR, SA, SB);
393 FP_TO_INT_S(capability, SR, 32, 0);
394 } else
395 /*
396 * Really old machine without stsi block for basic
397 * cpu information. Report 42.0 bogomips.
398 */
399 capability = 42;
400 loops_per_jiffy = capability * (500000/HZ);
401 free_page((unsigned long) info);
402}
403
404/*
405 * calibrate the delay loop
406 */
407void __init calibrate_delay(void)
408{
409 s390_adjust_jiffies();
410 /* Print the good old Bogomips line .. */
411 printk(KERN_DEBUG "Calibrating delay loop (skipped)... "
412 "%lu.%02lu BogoMIPS preset\n", loops_per_jiffy/(500000/HZ),
413 (loops_per_jiffy/(5000/HZ)) % 100);
414}
diff --git a/drivers/usb/input/Kconfig b/drivers/usb/input/Kconfig
index c7d887540d8d..aa6a620c162f 100644
--- a/drivers/usb/input/Kconfig
+++ b/drivers/usb/input/Kconfig
@@ -69,6 +69,14 @@ config LOGITECH_FF
69 Note: if you say N here, this device will still be supported, but without 69 Note: if you say N here, this device will still be supported, but without
70 force feedback. 70 force feedback.
71 71
72config PANTHERLORD_FF
73 bool "PantherLord USB/PS2 2in1 Adapter support"
74 depends on HID_FF
75 select INPUT_FF_MEMLESS if USB_HID
76 help
77 Say Y here if you have a PantherLord USB/PS2 2in1 Adapter and want
78 to enable force feedback support for it.
79
72config THRUSTMASTER_FF 80config THRUSTMASTER_FF
73 bool "ThrustMaster FireStorm Dual Power 2 support (EXPERIMENTAL)" 81 bool "ThrustMaster FireStorm Dual Power 2 support (EXPERIMENTAL)"
74 depends on HID_FF && EXPERIMENTAL 82 depends on HID_FF && EXPERIMENTAL
diff --git a/drivers/usb/input/Makefile b/drivers/usb/input/Makefile
index 1a24b5bfa05f..a06024e5cd56 100644
--- a/drivers/usb/input/Makefile
+++ b/drivers/usb/input/Makefile
@@ -17,6 +17,9 @@ endif
17ifeq ($(CONFIG_LOGITECH_FF),y) 17ifeq ($(CONFIG_LOGITECH_FF),y)
18 usbhid-objs += hid-lgff.o 18 usbhid-objs += hid-lgff.o
19endif 19endif
20ifeq ($(CONFIG_PANTHERLORD_FF),y)
21 usbhid-objs += hid-plff.o
22endif
20ifeq ($(CONFIG_THRUSTMASTER_FF),y) 23ifeq ($(CONFIG_THRUSTMASTER_FF),y)
21 usbhid-objs += hid-tmff.o 24 usbhid-objs += hid-tmff.o
22endif 25endif
diff --git a/drivers/usb/input/hid-core.c b/drivers/usb/input/hid-core.c
index c6c9e72e5fd9..e07a30490726 100644
--- a/drivers/usb/input/hid-core.c
+++ b/drivers/usb/input/hid-core.c
@@ -35,6 +35,7 @@
35 35
36#include <linux/hid.h> 36#include <linux/hid.h>
37#include <linux/hiddev.h> 37#include <linux/hiddev.h>
38#include <linux/hid-debug.h>
38#include "usbhid.h" 39#include "usbhid.h"
39 40
40/* 41/*
@@ -220,23 +221,6 @@ static void hid_irq_in(struct urb *urb)
220 } 221 }
221} 222}
222 223
223/*
224 * Find a report field with a specified HID usage.
225 */
226#if 0
227struct hid_field *hid_find_field_by_usage(struct hid_device *hid, __u32 wanted_usage, int type)
228{
229 struct hid_report *report;
230 int i;
231
232 list_for_each_entry(report, &hid->report_enum[type].report_list, list)
233 for (i = 0; i < report->maxfield; i++)
234 if (report->field[i]->logical == wanted_usage)
235 return report->field[i];
236 return NULL;
237}
238#endif /* 0 */
239
240static int hid_submit_out(struct hid_device *hid) 224static int hid_submit_out(struct hid_device *hid)
241{ 225{
242 struct hid_report *report; 226 struct hid_report *report;
@@ -501,7 +485,7 @@ static int hid_get_class_descriptor(struct usb_device *dev, int ifnum,
501{ 485{
502 int result, retries = 4; 486 int result, retries = 4;
503 487
504 memset(buf,0,size); // Make sure we parse really received data 488 memset(buf, 0, size);
505 489
506 do { 490 do {
507 result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 491 result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
@@ -528,18 +512,6 @@ void usbhid_close(struct hid_device *hid)
528 usb_kill_urb(usbhid->urbin); 512 usb_kill_urb(usbhid->urbin);
529} 513}
530 514
531static int hidinput_open(struct input_dev *dev)
532{
533 struct hid_device *hid = dev->private;
534 return usbhid_open(hid);
535}
536
537static void hidinput_close(struct input_dev *dev)
538{
539 struct hid_device *hid = dev->private;
540 usbhid_close(hid);
541}
542
543#define USB_VENDOR_ID_PANJIT 0x134c 515#define USB_VENDOR_ID_PANJIT 0x134c
544 516
545#define USB_VENDOR_ID_TURBOX 0x062a 517#define USB_VENDOR_ID_TURBOX 0x062a
@@ -770,6 +742,7 @@ void usbhid_init_reports(struct hid_device *hid)
770#define USB_DEVICE_ID_APPLE_GEYSER4_JIS 0x021c 742#define USB_DEVICE_ID_APPLE_GEYSER4_JIS 0x021c
771#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a 743#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
772#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b 744#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
745#define USB_DEVICE_ID_APPLE_IR 0x8240
773 746
774#define USB_VENDOR_ID_CHERRY 0x046a 747#define USB_VENDOR_ID_CHERRY 0x046a
775#define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023 748#define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023
@@ -792,6 +765,9 @@ void usbhid_init_reports(struct hid_device *hid)
792#define USB_VENDOR_ID_IMATION 0x0718 765#define USB_VENDOR_ID_IMATION 0x0718
793#define USB_DEVICE_ID_DISC_STAKKA 0xd000 766#define USB_DEVICE_ID_DISC_STAKKA 0xd000
794 767
768#define USB_VENDOR_ID_PANTHERLORD 0x0810
769#define USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK 0x0001
770
795/* 771/*
796 * Alphabetically sorted blacklist by quirk type. 772 * Alphabetically sorted blacklist by quirk type.
797 */ 773 */
@@ -946,19 +922,21 @@ static const struct hid_blacklist {
946 922
947 { USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION, HID_QUIRK_CYMOTION }, 923 { USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION, HID_QUIRK_CYMOTION },
948 924
949 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI, HID_QUIRK_POWERBOOK_HAS_FN }, 925 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
950 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO, HID_QUIRK_POWERBOOK_HAS_FN }, 926 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
951 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI, HID_QUIRK_POWERBOOK_HAS_FN }, 927 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
952 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD}, 928 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
953 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS, HID_QUIRK_POWERBOOK_HAS_FN }, 929 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
954 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI, HID_QUIRK_POWERBOOK_HAS_FN }, 930 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
955 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD}, 931 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
956 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS, HID_QUIRK_POWERBOOK_HAS_FN }, 932 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
957 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI, HID_QUIRK_POWERBOOK_HAS_FN }, 933 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
958 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD}, 934 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
959 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS, HID_QUIRK_POWERBOOK_HAS_FN }, 935 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
960 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN }, 936 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
961 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN }, 937 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
938
939 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IR, HID_QUIRK_IGNORE },
962 940
963 { USB_VENDOR_ID_PANJIT, 0x0001, HID_QUIRK_IGNORE }, 941 { USB_VENDOR_ID_PANJIT, 0x0001, HID_QUIRK_IGNORE },
964 { USB_VENDOR_ID_PANJIT, 0x0002, HID_QUIRK_IGNORE }, 942 { USB_VENDOR_ID_PANJIT, 0x0002, HID_QUIRK_IGNORE },
@@ -969,6 +947,8 @@ static const struct hid_blacklist {
969 947
970 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_USB_RECEIVER, HID_QUIRK_BAD_RELATIVE_KEYS }, 948 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_USB_RECEIVER, HID_QUIRK_BAD_RELATIVE_KEYS },
971 949
950 { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
951
972 { 0, 0 } 952 { 0, 0 }
973}; 953};
974 954
@@ -1064,6 +1044,11 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
1064 if (quirks & HID_QUIRK_IGNORE) 1044 if (quirks & HID_QUIRK_IGNORE)
1065 return NULL; 1045 return NULL;
1066 1046
1047 if ((quirks & HID_QUIRK_IGNORE_MOUSE) &&
1048 (interface->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_MOUSE))
1049 return NULL;
1050
1051
1067 if (usb_get_extra_descriptor(interface, HID_DT_HID, &hdesc) && 1052 if (usb_get_extra_descriptor(interface, HID_DT_HID, &hdesc) &&
1068 (!interface->desc.bNumEndpoints || 1053 (!interface->desc.bNumEndpoints ||
1069 usb_get_extra_descriptor(&interface->endpoint[0], HID_DT_HID, &hdesc))) { 1054 usb_get_extra_descriptor(&interface->endpoint[0], HID_DT_HID, &hdesc))) {
@@ -1235,8 +1220,8 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
1235 usbhid->urbctrl->transfer_dma = usbhid->ctrlbuf_dma; 1220 usbhid->urbctrl->transfer_dma = usbhid->ctrlbuf_dma;
1236 usbhid->urbctrl->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP); 1221 usbhid->urbctrl->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP);
1237 hid->hidinput_input_event = usb_hidinput_input_event; 1222 hid->hidinput_input_event = usb_hidinput_input_event;
1238 hid->hidinput_open = hidinput_open; 1223 hid->hid_open = usbhid_open;
1239 hid->hidinput_close = hidinput_close; 1224 hid->hid_close = usbhid_close;
1240#ifdef CONFIG_USB_HIDDEV 1225#ifdef CONFIG_USB_HIDDEV
1241 hid->hiddev_hid_event = hiddev_hid_event; 1226 hid->hiddev_hid_event = hiddev_hid_event;
1242 hid->hiddev_report_event = hiddev_report_event; 1227 hid->hiddev_report_event = hiddev_report_event;
@@ -1315,11 +1300,7 @@ static int hid_probe(struct usb_interface *intf, const struct usb_device_id *id)
1315 return -ENODEV; 1300 return -ENODEV;
1316 } 1301 }
1317 1302
1318 /* This only gets called when we are a single-input (most of the 1303 if ((hid->claimed & HID_CLAIMED_INPUT))
1319 * time). IOW, not a HID_QUIRK_MULTI_INPUT. The hid_ff_init() is
1320 * only useful in this case, and not for multi-input quirks. */
1321 if ((hid->claimed & HID_CLAIMED_INPUT) &&
1322 !(hid->quirks & HID_QUIRK_MULTI_INPUT))
1323 hid_ff_init(hid); 1304 hid_ff_init(hid);
1324 1305
1325 printk(KERN_INFO); 1306 printk(KERN_INFO);
diff --git a/drivers/usb/input/hid-ff.c b/drivers/usb/input/hid-ff.c
index 59ed65e7a621..5d145058a5cb 100644
--- a/drivers/usb/input/hid-ff.c
+++ b/drivers/usb/input/hid-ff.c
@@ -58,6 +58,9 @@ static struct hid_ff_initializer inits[] = {
58 { 0x46d, 0xc295, hid_lgff_init }, /* Logitech MOMO force wheel */ 58 { 0x46d, 0xc295, hid_lgff_init }, /* Logitech MOMO force wheel */
59 { 0x46d, 0xc219, hid_lgff_init }, /* Logitech Cordless rumble pad 2 */ 59 { 0x46d, 0xc219, hid_lgff_init }, /* Logitech Cordless rumble pad 2 */
60#endif 60#endif
61#ifdef CONFIG_PANTHERLORD_FF
62 { 0x810, 0x0001, hid_plff_init },
63#endif
61#ifdef CONFIG_THRUSTMASTER_FF 64#ifdef CONFIG_THRUSTMASTER_FF
62 { 0x44f, 0xb304, hid_tmff_init }, 65 { 0x44f, 0xb304, hid_tmff_init },
63#endif 66#endif
diff --git a/drivers/usb/input/hid-plff.c b/drivers/usb/input/hid-plff.c
new file mode 100644
index 000000000000..76d2e6e14db4
--- /dev/null
+++ b/drivers/usb/input/hid-plff.c
@@ -0,0 +1,129 @@
1/*
2 * Force feedback support for PantherLord USB/PS2 2in1 Adapter devices
3 *
4 * Copyright (c) 2007 Anssi Hannula <anssi.hannula@gmail.com>
5 */
6
7/*
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23
24/* #define DEBUG */
25
26#define debug(format, arg...) pr_debug("hid-plff: " format "\n" , ## arg)
27
28#include <linux/input.h>
29#include <linux/usb.h>
30#include <linux/hid.h>
31#include "usbhid.h"
32
33struct plff_device {
34 struct hid_report *report;
35};
36
37static int hid_plff_play(struct input_dev *dev, void *data,
38 struct ff_effect *effect)
39{
40 struct hid_device *hid = dev->private;
41 struct plff_device *plff = data;
42 int left, right;
43
44 left = effect->u.rumble.strong_magnitude;
45 right = effect->u.rumble.weak_magnitude;
46 debug("called with 0x%04x 0x%04x", left, right);
47
48 left = left * 0x7f / 0xffff;
49 right = right * 0x7f / 0xffff;
50
51 plff->report->field[0]->value[2] = left;
52 plff->report->field[0]->value[3] = right;
53 debug("running with 0x%02x 0x%02x", left, right);
54 usbhid_submit_report(hid, plff->report, USB_DIR_OUT);
55
56 return 0;
57}
58
59int hid_plff_init(struct hid_device *hid)
60{
61 struct plff_device *plff;
62 struct hid_report *report;
63 struct hid_input *hidinput;
64 struct list_head *report_list =
65 &hid->report_enum[HID_OUTPUT_REPORT].report_list;
66 struct list_head *report_ptr = report_list;
67 struct input_dev *dev;
68 int error;
69
70 /* The device contains 2 output reports (one for each
71 HID_QUIRK_MULTI_INPUT device), both containing 1 field, which
72 contains 4 ff00.0002 usages and 4 16bit absolute values.
73
74 The 2 input reports also contain a field which contains
75 8 ff00.0001 usages and 8 boolean values. Their meaning is
76 currently unknown. */
77
78 if (list_empty(report_list)) {
79 printk(KERN_ERR "hid-plff: no output reports found\n");
80 return -ENODEV;
81 }
82
83 list_for_each_entry(hidinput, &hid->inputs, list) {
84
85 report_ptr = report_ptr->next;
86
87 if (report_ptr == report_list) {
88 printk(KERN_ERR "hid-plff: required output report is missing\n");
89 return -ENODEV;
90 }
91
92 report = list_entry(report_ptr, struct hid_report, list);
93 if (report->maxfield < 1) {
94 printk(KERN_ERR "hid-plff: no fields in the report\n");
95 return -ENODEV;
96 }
97
98 if (report->field[0]->report_count < 4) {
99 printk(KERN_ERR "hid-plff: not enough values in the field\n");
100 return -ENODEV;
101 }
102
103 plff = kzalloc(sizeof(struct plff_device), GFP_KERNEL);
104 if (!plff)
105 return -ENOMEM;
106
107 dev = hidinput->input;
108
109 set_bit(FF_RUMBLE, dev->ffbit);
110
111 error = input_ff_create_memless(dev, plff, hid_plff_play);
112 if (error) {
113 kfree(plff);
114 return error;
115 }
116
117 plff->report = report;
118 plff->report->field[0]->value[0] = 0x00;
119 plff->report->field[0]->value[1] = 0x00;
120 plff->report->field[0]->value[2] = 0x00;
121 plff->report->field[0]->value[3] = 0x00;
122 usbhid_submit_report(hid, plff->report, USB_DIR_OUT);
123 }
124
125 printk(KERN_INFO "hid-plff: Force feedback for PantherLord USB/PS2 "
126 "2in1 Adapters by Anssi Hannula <anssi.hannula@gmail.com>\n");
127
128 return 0;
129}
diff --git a/drivers/video/output.c b/drivers/video/output.c
new file mode 100644
index 000000000000..1473f2c892d2
--- /dev/null
+++ b/drivers/video/output.c
@@ -0,0 +1,129 @@
1/*
2 * output.c - Display Output Switch driver
3 *
4 * Copyright (C) 2006 Luming Yu <luming.yu@intel.com>
5 *
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at
11 * your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 */
24#include <linux/module.h>
25#include <linux/video_output.h>
26#include <linux/err.h>
27#include <linux/ctype.h>
28
29
30MODULE_DESCRIPTION("Display Output Switcher Lowlevel Control Abstraction");
31MODULE_LICENSE("GPL");
32MODULE_AUTHOR("Luming Yu <luming.yu@intel.com>");
33
34static ssize_t video_output_show_state(struct class_device *dev,char *buf)
35{
36 ssize_t ret_size = 0;
37 struct output_device *od = to_output_device(dev);
38 if (od->props)
39 ret_size = sprintf(buf,"%.8x\n",od->props->get_status(od));
40 return ret_size;
41}
42
43static ssize_t video_output_store_state(struct class_device *dev,
44 const char *buf,size_t count)
45{
46 char *endp;
47 struct output_device *od = to_output_device(dev);
48 int request_state = simple_strtoul(buf,&endp,0);
49 size_t size = endp - buf;
50
51 if (*endp && isspace(*endp))
52 size++;
53 if (size != count)
54 return -EINVAL;
55
56 if (od->props) {
57 od->request_state = request_state;
58 od->props->set_state(od);
59 }
60 return count;
61}
62
63static void video_output_class_release(struct class_device *dev)
64{
65 struct output_device *od = to_output_device(dev);
66 kfree(od);
67}
68
69static struct class_device_attribute video_output_attributes[] = {
70 __ATTR(state, 0644, video_output_show_state, video_output_store_state),
71 __ATTR_NULL,
72};
73
74static struct class video_output_class = {
75 .name = "video_output",
76 .release = video_output_class_release,
77 .class_dev_attrs = video_output_attributes,
78};
79
80struct output_device *video_output_register(const char *name,
81 struct device *dev,
82 void *devdata,
83 struct output_properties *op)
84{
85 struct output_device *new_dev;
86 int ret_code = 0;
87
88 new_dev = kzalloc(sizeof(struct output_device),GFP_KERNEL);
89 if (!new_dev) {
90 ret_code = -ENOMEM;
91 goto error_return;
92 }
93 new_dev->props = op;
94 new_dev->class_dev.class = &video_output_class;
95 new_dev->class_dev.dev = dev;
96 strlcpy(new_dev->class_dev.class_id,name,KOBJ_NAME_LEN);
97 class_set_devdata(&new_dev->class_dev,devdata);
98 ret_code = class_device_register(&new_dev->class_dev);
99 if (ret_code) {
100 kfree(new_dev);
101 goto error_return;
102 }
103 return new_dev;
104
105error_return:
106 return ERR_PTR(ret_code);
107}
108EXPORT_SYMBOL(video_output_register);
109
110void video_output_unregister(struct output_device *dev)
111{
112 if (!dev)
113 return;
114 class_device_unregister(&dev->class_dev);
115}
116EXPORT_SYMBOL(video_output_unregister);
117
118static void __exit video_output_class_exit(void)
119{
120 class_unregister(&video_output_class);
121}
122
123static int __init video_output_class_init(void)
124{
125 return class_register(&video_output_class);
126}
127
128postcore_initcall(video_output_class_init);
129module_exit(video_output_class_exit);
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index d04d2f7448d9..85e3850bf2c9 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,6 +1,8 @@
1Version 1.47 1Version 1.47
2------------ 2------------
3Fix oops in list_del during mount caused by unaligned string. 3Fix oops in list_del during mount caused by unaligned string.
4Seek to SEEK_END forces check for update of file size for non-cached
5files.
4 6
5Version 1.46 7Version 1.46
6------------ 8------------
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 10c90294cd18..93ef09971d2f 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -511,7 +511,15 @@ static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
511{ 511{
512 /* origin == SEEK_END => we must revalidate the cached file length */ 512 /* origin == SEEK_END => we must revalidate the cached file length */
513 if (origin == SEEK_END) { 513 if (origin == SEEK_END) {
514 int retval = cifs_revalidate(file->f_path.dentry); 514 int retval;
515
516 /* some applications poll for the file length in this strange
517 way so we must seek to end on non-oplocked files by
518 setting the revalidate time to zero */
519 if(file->f_path.dentry->d_inode)
520 CIFS_I(file->f_path.dentry->d_inode)->time = 0;
521
522 retval = cifs_revalidate(file->f_path.dentry);
515 if (retval < 0) 523 if (retval < 0)
516 return (loff_t)retval; 524 return (loff_t)retval;
517 } 525 }
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 8a49b2e77d37..e9dcf5ee29a2 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1146,7 +1146,7 @@ static int cifs_writepages(struct address_space *mapping,
1146 pgoff_t end; 1146 pgoff_t end;
1147 pgoff_t index; 1147 pgoff_t index;
1148 int range_whole = 0; 1148 int range_whole = 0;
1149 struct kvec iov[32]; 1149 struct kvec * iov;
1150 int len; 1150 int len;
1151 int n_iov = 0; 1151 int n_iov = 0;
1152 pgoff_t next; 1152 pgoff_t next;
@@ -1171,15 +1171,21 @@ static int cifs_writepages(struct address_space *mapping,
1171 if((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server)) 1171 if((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1172 if(cifs_sb->tcon->ses->server->secMode & 1172 if(cifs_sb->tcon->ses->server->secMode &
1173 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) 1173 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1174 if(!experimEnabled) 1174 if(!experimEnabled)
1175 return generic_writepages(mapping, wbc); 1175 return generic_writepages(mapping, wbc);
1176 1176
1177 iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
1178 if(iov == NULL)
1179 return generic_writepages(mapping, wbc);
1180
1181
1177 /* 1182 /*
1178 * BB: Is this meaningful for a non-block-device file system? 1183 * BB: Is this meaningful for a non-block-device file system?
1179 * If it is, we should test it again after we do I/O 1184 * If it is, we should test it again after we do I/O
1180 */ 1185 */
1181 if (wbc->nonblocking && bdi_write_congested(bdi)) { 1186 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1182 wbc->encountered_congestion = 1; 1187 wbc->encountered_congestion = 1;
1188 kfree(iov);
1183 return 0; 1189 return 0;
1184 } 1190 }
1185 1191
@@ -1345,7 +1351,7 @@ retry:
1345 mapping->writeback_index = index; 1351 mapping->writeback_index = index;
1346 1352
1347 FreeXid(xid); 1353 FreeXid(xid);
1348 1354 kfree(iov);
1349 return rc; 1355 return rc;
1350} 1356}
1351 1357
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 99dfb5337e31..782940be550f 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -156,9 +156,9 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
156 tmp_inode->i_atime = cnvrtDosUnixTm( 156 tmp_inode->i_atime = cnvrtDosUnixTm(
157 le16_to_cpu(pfindData->LastAccessDate), 157 le16_to_cpu(pfindData->LastAccessDate),
158 le16_to_cpu(pfindData->LastAccessTime)); 158 le16_to_cpu(pfindData->LastAccessTime));
159 tmp_inode->i_ctime = cnvrtDosUnixTm( 159 tmp_inode->i_ctime = cnvrtDosUnixTm(
160 le16_to_cpu(pfindData->LastWriteDate), 160 le16_to_cpu(pfindData->LastWriteDate),
161 le16_to_cpu(pfindData->LastWriteTime)); 161 le16_to_cpu(pfindData->LastWriteTime));
162 AdjustForTZ(cifs_sb->tcon, tmp_inode); 162 AdjustForTZ(cifs_sb->tcon, tmp_inode);
163 attr = le16_to_cpu(pfindData->Attributes); 163 attr = le16_to_cpu(pfindData->Attributes);
164 allocation_size = le32_to_cpu(pfindData->AllocationSize); 164 allocation_size = le32_to_cpu(pfindData->AllocationSize);
diff --git a/fs/cifs/smbdes.c b/fs/cifs/smbdes.c
index 7a1b2b961ec8..1b1daf63f062 100644
--- a/fs/cifs/smbdes.c
+++ b/fs/cifs/smbdes.c
@@ -196,7 +196,7 @@ dohash(char *out, char *in, char *key, int forw)
196 char c[28]; 196 char c[28];
197 char d[28]; 197 char d[28];
198 char *cd; 198 char *cd;
199 char ki[16][48]; 199 char (*ki)[48];
200 char *pd1; 200 char *pd1;
201 char l[32], r[32]; 201 char l[32], r[32];
202 char *rl; 202 char *rl;
@@ -206,6 +206,12 @@ dohash(char *out, char *in, char *key, int forw)
206 if(pk1 == NULL) 206 if(pk1 == NULL)
207 return; 207 return;
208 208
209 ki = kmalloc(16*48, GFP_KERNEL);
210 if(ki == NULL) {
211 kfree(pk1);
212 return;
213 }
214
209 cd = pk1 + 56; 215 cd = pk1 + 56;
210 pd1= cd + 56; 216 pd1= cd + 56;
211 rl = pd1 + 64; 217 rl = pd1 + 64;
@@ -243,6 +249,7 @@ dohash(char *out, char *in, char *key, int forw)
243 er = kmalloc(48+48+32+32+32, GFP_KERNEL); 249 er = kmalloc(48+48+32+32+32, GFP_KERNEL);
244 if(er == NULL) { 250 if(er == NULL) {
245 kfree(pk1); 251 kfree(pk1);
252 kfree(ki);
246 return; 253 return;
247 } 254 }
248 erk = er+48; 255 erk = er+48;
@@ -290,6 +297,7 @@ dohash(char *out, char *in, char *key, int forw)
290 297
291 permute(out, rl, perm6, 64); 298 permute(out, rl, perm6, 64);
292 kfree(pk1); 299 kfree(pk1);
300 kfree(ki);
293} 301}
294 302
295static void 303static void
diff --git a/fs/dlm/Kconfig b/fs/dlm/Kconfig
index b5654a284fef..6fa7b0d5c043 100644
--- a/fs/dlm/Kconfig
+++ b/fs/dlm/Kconfig
@@ -3,21 +3,21 @@ menu "Distributed Lock Manager"
3 3
4config DLM 4config DLM
5 tristate "Distributed Lock Manager (DLM)" 5 tristate "Distributed Lock Manager (DLM)"
6 depends on IPV6 || IPV6=n 6 depends on SYSFS && (IPV6 || IPV6=n)
7 select CONFIGFS_FS 7 select CONFIGFS_FS
8 select IP_SCTP if DLM_SCTP 8 select IP_SCTP if DLM_SCTP
9 help 9 help
10 A general purpose distributed lock manager for kernel or userspace 10 A general purpose distributed lock manager for kernel or userspace
11 applications. 11 applications.
12 12
13choice 13choice
14 prompt "Select DLM communications protocol" 14 prompt "Select DLM communications protocol"
15 depends on DLM 15 depends on DLM
16 default DLM_TCP 16 default DLM_TCP
17 help 17 help
18 The DLM Can use TCP or SCTP for it's network communications. 18 The DLM Can use TCP or SCTP for it's network communications.
19 SCTP supports multi-homed operations whereas TCP doesn't. 19 SCTP supports multi-homed operations whereas TCP doesn't.
20 However, SCTP seems to have stability problems at the moment. 20 However, SCTP seems to have stability problems at the moment.
21 21
22config DLM_TCP 22config DLM_TCP
23 bool "TCP/IP" 23 bool "TCP/IP"
@@ -31,8 +31,8 @@ config DLM_DEBUG
31 bool "DLM debugging" 31 bool "DLM debugging"
32 depends on DLM 32 depends on DLM
33 help 33 help
34 Under the debugfs mount point, the name of each lockspace will 34 Under the debugfs mount point, the name of each lockspace will
35 appear as a file in the "dlm" directory. The output is the 35 appear as a file in the "dlm" directory. The output is the
36 list of resource and locks the local node knows about. 36 list of resource and locks the local node knows about.
37 37
38endmenu 38endmenu
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index 88553054bbfa..8665c88e5af2 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -54,6 +54,11 @@ static struct config_item *make_node(struct config_group *, const char *);
54static void drop_node(struct config_group *, struct config_item *); 54static void drop_node(struct config_group *, struct config_item *);
55static void release_node(struct config_item *); 55static void release_node(struct config_item *);
56 56
57static ssize_t show_cluster(struct config_item *i, struct configfs_attribute *a,
58 char *buf);
59static ssize_t store_cluster(struct config_item *i,
60 struct configfs_attribute *a,
61 const char *buf, size_t len);
57static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a, 62static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a,
58 char *buf); 63 char *buf);
59static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a, 64static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a,
@@ -73,6 +78,101 @@ static ssize_t node_nodeid_write(struct node *nd, const char *buf, size_t len);
73static ssize_t node_weight_read(struct node *nd, char *buf); 78static ssize_t node_weight_read(struct node *nd, char *buf);
74static ssize_t node_weight_write(struct node *nd, const char *buf, size_t len); 79static ssize_t node_weight_write(struct node *nd, const char *buf, size_t len);
75 80
81struct cluster {
82 struct config_group group;
83 unsigned int cl_tcp_port;
84 unsigned int cl_buffer_size;
85 unsigned int cl_rsbtbl_size;
86 unsigned int cl_lkbtbl_size;
87 unsigned int cl_dirtbl_size;
88 unsigned int cl_recover_timer;
89 unsigned int cl_toss_secs;
90 unsigned int cl_scan_secs;
91 unsigned int cl_log_debug;
92};
93
94enum {
95 CLUSTER_ATTR_TCP_PORT = 0,
96 CLUSTER_ATTR_BUFFER_SIZE,
97 CLUSTER_ATTR_RSBTBL_SIZE,
98 CLUSTER_ATTR_LKBTBL_SIZE,
99 CLUSTER_ATTR_DIRTBL_SIZE,
100 CLUSTER_ATTR_RECOVER_TIMER,
101 CLUSTER_ATTR_TOSS_SECS,
102 CLUSTER_ATTR_SCAN_SECS,
103 CLUSTER_ATTR_LOG_DEBUG,
104};
105
106struct cluster_attribute {
107 struct configfs_attribute attr;
108 ssize_t (*show)(struct cluster *, char *);
109 ssize_t (*store)(struct cluster *, const char *, size_t);
110};
111
112static ssize_t cluster_set(struct cluster *cl, unsigned int *cl_field,
113 unsigned int *info_field, int check_zero,
114 const char *buf, size_t len)
115{
116 unsigned int x;
117
118 if (!capable(CAP_SYS_ADMIN))
119 return -EACCES;
120
121 x = simple_strtoul(buf, NULL, 0);
122
123 if (check_zero && !x)
124 return -EINVAL;
125
126 *cl_field = x;
127 *info_field = x;
128
129 return len;
130}
131
132#define __CONFIGFS_ATTR(_name,_mode,_read,_write) { \
133 .attr = { .ca_name = __stringify(_name), \
134 .ca_mode = _mode, \
135 .ca_owner = THIS_MODULE }, \
136 .show = _read, \
137 .store = _write, \
138}
139
140#define CLUSTER_ATTR(name, check_zero) \
141static ssize_t name##_write(struct cluster *cl, const char *buf, size_t len) \
142{ \
143 return cluster_set(cl, &cl->cl_##name, &dlm_config.ci_##name, \
144 check_zero, buf, len); \
145} \
146static ssize_t name##_read(struct cluster *cl, char *buf) \
147{ \
148 return snprintf(buf, PAGE_SIZE, "%u\n", cl->cl_##name); \
149} \
150static struct cluster_attribute cluster_attr_##name = \
151__CONFIGFS_ATTR(name, 0644, name##_read, name##_write)
152
153CLUSTER_ATTR(tcp_port, 1);
154CLUSTER_ATTR(buffer_size, 1);
155CLUSTER_ATTR(rsbtbl_size, 1);
156CLUSTER_ATTR(lkbtbl_size, 1);
157CLUSTER_ATTR(dirtbl_size, 1);
158CLUSTER_ATTR(recover_timer, 1);
159CLUSTER_ATTR(toss_secs, 1);
160CLUSTER_ATTR(scan_secs, 1);
161CLUSTER_ATTR(log_debug, 0);
162
163static struct configfs_attribute *cluster_attrs[] = {
164 [CLUSTER_ATTR_TCP_PORT] = &cluster_attr_tcp_port.attr,
165 [CLUSTER_ATTR_BUFFER_SIZE] = &cluster_attr_buffer_size.attr,
166 [CLUSTER_ATTR_RSBTBL_SIZE] = &cluster_attr_rsbtbl_size.attr,
167 [CLUSTER_ATTR_LKBTBL_SIZE] = &cluster_attr_lkbtbl_size.attr,
168 [CLUSTER_ATTR_DIRTBL_SIZE] = &cluster_attr_dirtbl_size.attr,
169 [CLUSTER_ATTR_RECOVER_TIMER] = &cluster_attr_recover_timer.attr,
170 [CLUSTER_ATTR_TOSS_SECS] = &cluster_attr_toss_secs.attr,
171 [CLUSTER_ATTR_SCAN_SECS] = &cluster_attr_scan_secs.attr,
172 [CLUSTER_ATTR_LOG_DEBUG] = &cluster_attr_log_debug.attr,
173 NULL,
174};
175
76enum { 176enum {
77 COMM_ATTR_NODEID = 0, 177 COMM_ATTR_NODEID = 0,
78 COMM_ATTR_LOCAL, 178 COMM_ATTR_LOCAL,
@@ -152,10 +252,6 @@ struct clusters {
152 struct configfs_subsystem subsys; 252 struct configfs_subsystem subsys;
153}; 253};
154 254
155struct cluster {
156 struct config_group group;
157};
158
159struct spaces { 255struct spaces {
160 struct config_group ss_group; 256 struct config_group ss_group;
161}; 257};
@@ -197,6 +293,8 @@ static struct configfs_group_operations clusters_ops = {
197 293
198static struct configfs_item_operations cluster_ops = { 294static struct configfs_item_operations cluster_ops = {
199 .release = release_cluster, 295 .release = release_cluster,
296 .show_attribute = show_cluster,
297 .store_attribute = store_cluster,
200}; 298};
201 299
202static struct configfs_group_operations spaces_ops = { 300static struct configfs_group_operations spaces_ops = {
@@ -237,6 +335,7 @@ static struct config_item_type clusters_type = {
237 335
238static struct config_item_type cluster_type = { 336static struct config_item_type cluster_type = {
239 .ct_item_ops = &cluster_ops, 337 .ct_item_ops = &cluster_ops,
338 .ct_attrs = cluster_attrs,
240 .ct_owner = THIS_MODULE, 339 .ct_owner = THIS_MODULE,
241}; 340};
242 341
@@ -317,6 +416,16 @@ static struct config_group *make_cluster(struct config_group *g,
317 cl->group.default_groups[1] = &cms->cs_group; 416 cl->group.default_groups[1] = &cms->cs_group;
318 cl->group.default_groups[2] = NULL; 417 cl->group.default_groups[2] = NULL;
319 418
419 cl->cl_tcp_port = dlm_config.ci_tcp_port;
420 cl->cl_buffer_size = dlm_config.ci_buffer_size;
421 cl->cl_rsbtbl_size = dlm_config.ci_rsbtbl_size;
422 cl->cl_lkbtbl_size = dlm_config.ci_lkbtbl_size;
423 cl->cl_dirtbl_size = dlm_config.ci_dirtbl_size;
424 cl->cl_recover_timer = dlm_config.ci_recover_timer;
425 cl->cl_toss_secs = dlm_config.ci_toss_secs;
426 cl->cl_scan_secs = dlm_config.ci_scan_secs;
427 cl->cl_log_debug = dlm_config.ci_log_debug;
428
320 space_list = &sps->ss_group; 429 space_list = &sps->ss_group;
321 comm_list = &cms->cs_group; 430 comm_list = &cms->cs_group;
322 return &cl->group; 431 return &cl->group;
@@ -509,6 +618,25 @@ void dlm_config_exit(void)
509 * Functions for user space to read/write attributes 618 * Functions for user space to read/write attributes
510 */ 619 */
511 620
621static ssize_t show_cluster(struct config_item *i, struct configfs_attribute *a,
622 char *buf)
623{
624 struct cluster *cl = to_cluster(i);
625 struct cluster_attribute *cla =
626 container_of(a, struct cluster_attribute, attr);
627 return cla->show ? cla->show(cl, buf) : 0;
628}
629
630static ssize_t store_cluster(struct config_item *i,
631 struct configfs_attribute *a,
632 const char *buf, size_t len)
633{
634 struct cluster *cl = to_cluster(i);
635 struct cluster_attribute *cla =
636 container_of(a, struct cluster_attribute, attr);
637 return cla->store ? cla->store(cl, buf, len) : -EINVAL;
638}
639
512static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a, 640static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a,
513 char *buf) 641 char *buf)
514{ 642{
@@ -775,15 +903,17 @@ int dlm_our_addr(struct sockaddr_storage *addr, int num)
775#define DEFAULT_RECOVER_TIMER 5 903#define DEFAULT_RECOVER_TIMER 5
776#define DEFAULT_TOSS_SECS 10 904#define DEFAULT_TOSS_SECS 10
777#define DEFAULT_SCAN_SECS 5 905#define DEFAULT_SCAN_SECS 5
906#define DEFAULT_LOG_DEBUG 0
778 907
779struct dlm_config_info dlm_config = { 908struct dlm_config_info dlm_config = {
780 .tcp_port = DEFAULT_TCP_PORT, 909 .ci_tcp_port = DEFAULT_TCP_PORT,
781 .buffer_size = DEFAULT_BUFFER_SIZE, 910 .ci_buffer_size = DEFAULT_BUFFER_SIZE,
782 .rsbtbl_size = DEFAULT_RSBTBL_SIZE, 911 .ci_rsbtbl_size = DEFAULT_RSBTBL_SIZE,
783 .lkbtbl_size = DEFAULT_LKBTBL_SIZE, 912 .ci_lkbtbl_size = DEFAULT_LKBTBL_SIZE,
784 .dirtbl_size = DEFAULT_DIRTBL_SIZE, 913 .ci_dirtbl_size = DEFAULT_DIRTBL_SIZE,
785 .recover_timer = DEFAULT_RECOVER_TIMER, 914 .ci_recover_timer = DEFAULT_RECOVER_TIMER,
786 .toss_secs = DEFAULT_TOSS_SECS, 915 .ci_toss_secs = DEFAULT_TOSS_SECS,
787 .scan_secs = DEFAULT_SCAN_SECS 916 .ci_scan_secs = DEFAULT_SCAN_SECS,
917 .ci_log_debug = DEFAULT_LOG_DEBUG
788}; 918};
789 919
diff --git a/fs/dlm/config.h b/fs/dlm/config.h
index 9da7839958a9..1e978611a96e 100644
--- a/fs/dlm/config.h
+++ b/fs/dlm/config.h
@@ -17,14 +17,15 @@
17#define DLM_MAX_ADDR_COUNT 3 17#define DLM_MAX_ADDR_COUNT 3
18 18
19struct dlm_config_info { 19struct dlm_config_info {
20 int tcp_port; 20 int ci_tcp_port;
21 int buffer_size; 21 int ci_buffer_size;
22 int rsbtbl_size; 22 int ci_rsbtbl_size;
23 int lkbtbl_size; 23 int ci_lkbtbl_size;
24 int dirtbl_size; 24 int ci_dirtbl_size;
25 int recover_timer; 25 int ci_recover_timer;
26 int toss_secs; 26 int ci_toss_secs;
27 int scan_secs; 27 int ci_scan_secs;
28 int ci_log_debug;
28}; 29};
29 30
30extern struct dlm_config_info dlm_config; 31extern struct dlm_config_info dlm_config;
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 1ee8195e6fc0..61d93201e1b2 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -41,6 +41,7 @@
41#include <asm/uaccess.h> 41#include <asm/uaccess.h>
42 42
43#include <linux/dlm.h> 43#include <linux/dlm.h>
44#include "config.h"
44 45
45#define DLM_LOCKSPACE_LEN 64 46#define DLM_LOCKSPACE_LEN 64
46 47
@@ -69,12 +70,12 @@ struct dlm_mhandle;
69#define log_error(ls, fmt, args...) \ 70#define log_error(ls, fmt, args...) \
70 printk(KERN_ERR "dlm: %s: " fmt "\n", (ls)->ls_name , ##args) 71 printk(KERN_ERR "dlm: %s: " fmt "\n", (ls)->ls_name , ##args)
71 72
72#define DLM_LOG_DEBUG 73#define log_debug(ls, fmt, args...) \
73#ifdef DLM_LOG_DEBUG 74do { \
74#define log_debug(ls, fmt, args...) log_error(ls, fmt, ##args) 75 if (dlm_config.ci_log_debug) \
75#else 76 printk(KERN_DEBUG "dlm: %s: " fmt "\n", \
76#define log_debug(ls, fmt, args...) 77 (ls)->ls_name , ##args); \
77#endif 78} while (0)
78 79
79#define DLM_ASSERT(x, do) \ 80#define DLM_ASSERT(x, do) \
80{ \ 81{ \
@@ -309,8 +310,8 @@ static inline int rsb_flag(struct dlm_rsb *r, enum rsb_flags flag)
309 310
310/* dlm_header is first element of all structs sent between nodes */ 311/* dlm_header is first element of all structs sent between nodes */
311 312
312#define DLM_HEADER_MAJOR 0x00020000 313#define DLM_HEADER_MAJOR 0x00030000
313#define DLM_HEADER_MINOR 0x00000001 314#define DLM_HEADER_MINOR 0x00000000
314 315
315#define DLM_MSG 1 316#define DLM_MSG 1
316#define DLM_RCOM 2 317#define DLM_RCOM 2
@@ -386,6 +387,8 @@ struct dlm_rcom {
386 uint32_t rc_type; /* DLM_RCOM_ */ 387 uint32_t rc_type; /* DLM_RCOM_ */
387 int rc_result; /* multi-purpose */ 388 int rc_result; /* multi-purpose */
388 uint64_t rc_id; /* match reply with request */ 389 uint64_t rc_id; /* match reply with request */
390 uint64_t rc_seq; /* sender's ls_recover_seq */
391 uint64_t rc_seq_reply; /* remote ls_recover_seq */
389 char rc_buf[0]; 392 char rc_buf[0];
390}; 393};
391 394
@@ -523,6 +526,7 @@ struct dlm_user_proc {
523 spinlock_t asts_spin; 526 spinlock_t asts_spin;
524 struct list_head locks; 527 struct list_head locks;
525 spinlock_t locks_spin; 528 spinlock_t locks_spin;
529 struct list_head unlocking;
526 wait_queue_head_t wait; 530 wait_queue_head_t wait;
527}; 531};
528 532
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 30878defaeb6..e725005fafd0 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -754,6 +754,11 @@ static void add_to_waiters(struct dlm_lkb *lkb, int mstype)
754 mutex_unlock(&ls->ls_waiters_mutex); 754 mutex_unlock(&ls->ls_waiters_mutex);
755} 755}
756 756
757/* We clear the RESEND flag because we might be taking an lkb off the waiters
758 list as part of process_requestqueue (e.g. a lookup that has an optimized
759 request reply on the requestqueue) between dlm_recover_waiters_pre() which
760 set RESEND and dlm_recover_waiters_post() */
761
757static int _remove_from_waiters(struct dlm_lkb *lkb) 762static int _remove_from_waiters(struct dlm_lkb *lkb)
758{ 763{
759 int error = 0; 764 int error = 0;
@@ -764,6 +769,7 @@ static int _remove_from_waiters(struct dlm_lkb *lkb)
764 goto out; 769 goto out;
765 } 770 }
766 lkb->lkb_wait_type = 0; 771 lkb->lkb_wait_type = 0;
772 lkb->lkb_flags &= ~DLM_IFL_RESEND;
767 list_del(&lkb->lkb_wait_reply); 773 list_del(&lkb->lkb_wait_reply);
768 unhold_lkb(lkb); 774 unhold_lkb(lkb);
769 out: 775 out:
@@ -810,7 +816,7 @@ static int shrink_bucket(struct dlm_ls *ls, int b)
810 list_for_each_entry_reverse(r, &ls->ls_rsbtbl[b].toss, 816 list_for_each_entry_reverse(r, &ls->ls_rsbtbl[b].toss,
811 res_hashchain) { 817 res_hashchain) {
812 if (!time_after_eq(jiffies, r->res_toss_time + 818 if (!time_after_eq(jiffies, r->res_toss_time +
813 dlm_config.toss_secs * HZ)) 819 dlm_config.ci_toss_secs * HZ))
814 continue; 820 continue;
815 found = 1; 821 found = 1;
816 break; 822 break;
@@ -2144,12 +2150,24 @@ static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
2144 if (lkb->lkb_astaddr) 2150 if (lkb->lkb_astaddr)
2145 ms->m_asts |= AST_COMP; 2151 ms->m_asts |= AST_COMP;
2146 2152
2147 if (ms->m_type == DLM_MSG_REQUEST || ms->m_type == DLM_MSG_LOOKUP) 2153 /* compare with switch in create_message; send_remove() doesn't
2148 memcpy(ms->m_extra, r->res_name, r->res_length); 2154 use send_args() */
2149 2155
2150 else if (lkb->lkb_lvbptr) 2156 switch (ms->m_type) {
2157 case DLM_MSG_REQUEST:
2158 case DLM_MSG_LOOKUP:
2159 memcpy(ms->m_extra, r->res_name, r->res_length);
2160 break;
2161 case DLM_MSG_CONVERT:
2162 case DLM_MSG_UNLOCK:
2163 case DLM_MSG_REQUEST_REPLY:
2164 case DLM_MSG_CONVERT_REPLY:
2165 case DLM_MSG_GRANT:
2166 if (!lkb->lkb_lvbptr)
2167 break;
2151 memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen); 2168 memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
2152 2169 break;
2170 }
2153} 2171}
2154 2172
2155static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype) 2173static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
@@ -2418,8 +2436,12 @@ static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2418 2436
2419 DLM_ASSERT(is_master_copy(lkb), dlm_print_lkb(lkb);); 2437 DLM_ASSERT(is_master_copy(lkb), dlm_print_lkb(lkb););
2420 2438
2421 if (receive_lvb(ls, lkb, ms)) 2439 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
2422 return -ENOMEM; 2440 /* lkb was just created so there won't be an lvb yet */
2441 lkb->lkb_lvbptr = allocate_lvb(ls);
2442 if (!lkb->lkb_lvbptr)
2443 return -ENOMEM;
2444 }
2423 2445
2424 return 0; 2446 return 0;
2425} 2447}
@@ -3002,7 +3024,7 @@ int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery)
3002{ 3024{
3003 struct dlm_message *ms = (struct dlm_message *) hd; 3025 struct dlm_message *ms = (struct dlm_message *) hd;
3004 struct dlm_ls *ls; 3026 struct dlm_ls *ls;
3005 int error; 3027 int error = 0;
3006 3028
3007 if (!recovery) 3029 if (!recovery)
3008 dlm_message_in(ms); 3030 dlm_message_in(ms);
@@ -3119,7 +3141,7 @@ int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery)
3119 out: 3141 out:
3120 dlm_put_lockspace(ls); 3142 dlm_put_lockspace(ls);
3121 dlm_astd_wake(); 3143 dlm_astd_wake();
3122 return 0; 3144 return error;
3123} 3145}
3124 3146
3125 3147
@@ -3132,6 +3154,7 @@ static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb)
3132 if (middle_conversion(lkb)) { 3154 if (middle_conversion(lkb)) {
3133 hold_lkb(lkb); 3155 hold_lkb(lkb);
3134 ls->ls_stub_ms.m_result = -EINPROGRESS; 3156 ls->ls_stub_ms.m_result = -EINPROGRESS;
3157 ls->ls_stub_ms.m_flags = lkb->lkb_flags;
3135 _remove_from_waiters(lkb); 3158 _remove_from_waiters(lkb);
3136 _receive_convert_reply(lkb, &ls->ls_stub_ms); 3159 _receive_convert_reply(lkb, &ls->ls_stub_ms);
3137 3160
@@ -3205,6 +3228,7 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
3205 case DLM_MSG_UNLOCK: 3228 case DLM_MSG_UNLOCK:
3206 hold_lkb(lkb); 3229 hold_lkb(lkb);
3207 ls->ls_stub_ms.m_result = -DLM_EUNLOCK; 3230 ls->ls_stub_ms.m_result = -DLM_EUNLOCK;
3231 ls->ls_stub_ms.m_flags = lkb->lkb_flags;
3208 _remove_from_waiters(lkb); 3232 _remove_from_waiters(lkb);
3209 _receive_unlock_reply(lkb, &ls->ls_stub_ms); 3233 _receive_unlock_reply(lkb, &ls->ls_stub_ms);
3210 dlm_put_lkb(lkb); 3234 dlm_put_lkb(lkb);
@@ -3213,6 +3237,7 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
3213 case DLM_MSG_CANCEL: 3237 case DLM_MSG_CANCEL:
3214 hold_lkb(lkb); 3238 hold_lkb(lkb);
3215 ls->ls_stub_ms.m_result = -DLM_ECANCEL; 3239 ls->ls_stub_ms.m_result = -DLM_ECANCEL;
3240 ls->ls_stub_ms.m_flags = lkb->lkb_flags;
3216 _remove_from_waiters(lkb); 3241 _remove_from_waiters(lkb);
3217 _receive_cancel_reply(lkb, &ls->ls_stub_ms); 3242 _receive_cancel_reply(lkb, &ls->ls_stub_ms);
3218 dlm_put_lkb(lkb); 3243 dlm_put_lkb(lkb);
@@ -3571,6 +3596,14 @@ int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
3571 lock_rsb(r); 3596 lock_rsb(r);
3572 3597
3573 switch (error) { 3598 switch (error) {
3599 case -EBADR:
3600 /* There's a chance the new master received our lock before
3601 dlm_recover_master_reply(), this wouldn't happen if we did
3602 a barrier between recover_masters and recover_locks. */
3603 log_debug(ls, "master copy not ready %x r %lx %s", lkb->lkb_id,
3604 (unsigned long)r, r->res_name);
3605 dlm_send_rcom_lock(r, lkb);
3606 goto out;
3574 case -EEXIST: 3607 case -EEXIST:
3575 log_debug(ls, "master copy exists %x", lkb->lkb_id); 3608 log_debug(ls, "master copy exists %x", lkb->lkb_id);
3576 /* fall through */ 3609 /* fall through */
@@ -3585,7 +3618,7 @@ int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
3585 /* an ack for dlm_recover_locks() which waits for replies from 3618 /* an ack for dlm_recover_locks() which waits for replies from
3586 all the locks it sends to new masters */ 3619 all the locks it sends to new masters */
3587 dlm_recovered_lock(r); 3620 dlm_recovered_lock(r);
3588 3621 out:
3589 unlock_rsb(r); 3622 unlock_rsb(r);
3590 put_rsb(r); 3623 put_rsb(r);
3591 dlm_put_lkb(lkb); 3624 dlm_put_lkb(lkb);
@@ -3610,7 +3643,7 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
3610 } 3643 }
3611 3644
3612 if (flags & DLM_LKF_VALBLK) { 3645 if (flags & DLM_LKF_VALBLK) {
3613 ua->lksb.sb_lvbptr = kmalloc(DLM_USER_LVB_LEN, GFP_KERNEL); 3646 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_KERNEL);
3614 if (!ua->lksb.sb_lvbptr) { 3647 if (!ua->lksb.sb_lvbptr) {
3615 kfree(ua); 3648 kfree(ua);
3616 __put_lkb(ls, lkb); 3649 __put_lkb(ls, lkb);
@@ -3679,7 +3712,7 @@ int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
3679 ua = (struct dlm_user_args *)lkb->lkb_astparam; 3712 ua = (struct dlm_user_args *)lkb->lkb_astparam;
3680 3713
3681 if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) { 3714 if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
3682 ua->lksb.sb_lvbptr = kmalloc(DLM_USER_LVB_LEN, GFP_KERNEL); 3715 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_KERNEL);
3683 if (!ua->lksb.sb_lvbptr) { 3716 if (!ua->lksb.sb_lvbptr) {
3684 error = -ENOMEM; 3717 error = -ENOMEM;
3685 goto out_put; 3718 goto out_put;
@@ -3745,12 +3778,10 @@ int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
3745 goto out_put; 3778 goto out_put;
3746 3779
3747 spin_lock(&ua->proc->locks_spin); 3780 spin_lock(&ua->proc->locks_spin);
3748 list_del_init(&lkb->lkb_ownqueue); 3781 /* dlm_user_add_ast() may have already taken lkb off the proc list */
3782 if (!list_empty(&lkb->lkb_ownqueue))
3783 list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
3749 spin_unlock(&ua->proc->locks_spin); 3784 spin_unlock(&ua->proc->locks_spin);
3750
3751 /* this removes the reference for the proc->locks list added by
3752 dlm_user_request */
3753 unhold_lkb(lkb);
3754 out_put: 3785 out_put:
3755 dlm_put_lkb(lkb); 3786 dlm_put_lkb(lkb);
3756 out: 3787 out:
@@ -3790,9 +3821,8 @@ int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
3790 /* this lkb was removed from the WAITING queue */ 3821 /* this lkb was removed from the WAITING queue */
3791 if (lkb->lkb_grmode == DLM_LOCK_IV) { 3822 if (lkb->lkb_grmode == DLM_LOCK_IV) {
3792 spin_lock(&ua->proc->locks_spin); 3823 spin_lock(&ua->proc->locks_spin);
3793 list_del_init(&lkb->lkb_ownqueue); 3824 list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
3794 spin_unlock(&ua->proc->locks_spin); 3825 spin_unlock(&ua->proc->locks_spin);
3795 unhold_lkb(lkb);
3796 } 3826 }
3797 out_put: 3827 out_put:
3798 dlm_put_lkb(lkb); 3828 dlm_put_lkb(lkb);
@@ -3853,11 +3883,6 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
3853 mutex_lock(&ls->ls_clear_proc_locks); 3883 mutex_lock(&ls->ls_clear_proc_locks);
3854 3884
3855 list_for_each_entry_safe(lkb, safe, &proc->locks, lkb_ownqueue) { 3885 list_for_each_entry_safe(lkb, safe, &proc->locks, lkb_ownqueue) {
3856 if (lkb->lkb_ast_type) {
3857 list_del(&lkb->lkb_astqueue);
3858 unhold_lkb(lkb);
3859 }
3860
3861 list_del_init(&lkb->lkb_ownqueue); 3886 list_del_init(&lkb->lkb_ownqueue);
3862 3887
3863 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT) { 3888 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT) {
@@ -3874,6 +3899,20 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
3874 3899
3875 dlm_put_lkb(lkb); 3900 dlm_put_lkb(lkb);
3876 } 3901 }
3902
3903 /* in-progress unlocks */
3904 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
3905 list_del_init(&lkb->lkb_ownqueue);
3906 lkb->lkb_flags |= DLM_IFL_DEAD;
3907 dlm_put_lkb(lkb);
3908 }
3909
3910 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_astqueue) {
3911 list_del(&lkb->lkb_astqueue);
3912 dlm_put_lkb(lkb);
3913 }
3914
3877 mutex_unlock(&ls->ls_clear_proc_locks); 3915 mutex_unlock(&ls->ls_clear_proc_locks);
3878 unlock_recovery(ls); 3916 unlock_recovery(ls);
3879} 3917}
3918
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 59012b089e8d..f40817b53c6f 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -236,7 +236,7 @@ static int dlm_scand(void *data)
236 while (!kthread_should_stop()) { 236 while (!kthread_should_stop()) {
237 list_for_each_entry(ls, &lslist, ls_list) 237 list_for_each_entry(ls, &lslist, ls_list)
238 dlm_scan_rsbs(ls); 238 dlm_scan_rsbs(ls);
239 schedule_timeout_interruptible(dlm_config.scan_secs * HZ); 239 schedule_timeout_interruptible(dlm_config.ci_scan_secs * HZ);
240 } 240 }
241 return 0; 241 return 0;
242} 242}
@@ -422,7 +422,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
422 ls->ls_count = 0; 422 ls->ls_count = 0;
423 ls->ls_flags = 0; 423 ls->ls_flags = 0;
424 424
425 size = dlm_config.rsbtbl_size; 425 size = dlm_config.ci_rsbtbl_size;
426 ls->ls_rsbtbl_size = size; 426 ls->ls_rsbtbl_size = size;
427 427
428 ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_KERNEL); 428 ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_KERNEL);
@@ -434,7 +434,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
434 rwlock_init(&ls->ls_rsbtbl[i].lock); 434 rwlock_init(&ls->ls_rsbtbl[i].lock);
435 } 435 }
436 436
437 size = dlm_config.lkbtbl_size; 437 size = dlm_config.ci_lkbtbl_size;
438 ls->ls_lkbtbl_size = size; 438 ls->ls_lkbtbl_size = size;
439 439
440 ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_KERNEL); 440 ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_KERNEL);
@@ -446,7 +446,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
446 ls->ls_lkbtbl[i].counter = 1; 446 ls->ls_lkbtbl[i].counter = 1;
447 } 447 }
448 448
449 size = dlm_config.dirtbl_size; 449 size = dlm_config.ci_dirtbl_size;
450 ls->ls_dirtbl_size = size; 450 ls->ls_dirtbl_size = size;
451 451
452 ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_KERNEL); 452 ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_KERNEL);
@@ -489,7 +489,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
489 mutex_init(&ls->ls_requestqueue_mutex); 489 mutex_init(&ls->ls_requestqueue_mutex);
490 mutex_init(&ls->ls_clear_proc_locks); 490 mutex_init(&ls->ls_clear_proc_locks);
491 491
492 ls->ls_recover_buf = kmalloc(dlm_config.buffer_size, GFP_KERNEL); 492 ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_KERNEL);
493 if (!ls->ls_recover_buf) 493 if (!ls->ls_recover_buf)
494 goto out_dirfree; 494 goto out_dirfree;
495 495
diff --git a/fs/dlm/lowcomms-sctp.c b/fs/dlm/lowcomms-sctp.c
index fe158d7a9285..dc83a9d979b5 100644
--- a/fs/dlm/lowcomms-sctp.c
+++ b/fs/dlm/lowcomms-sctp.c
@@ -72,6 +72,8 @@ struct nodeinfo {
72 struct list_head writequeue; /* outgoing writequeue_entries */ 72 struct list_head writequeue; /* outgoing writequeue_entries */
73 spinlock_t writequeue_lock; 73 spinlock_t writequeue_lock;
74 int nodeid; 74 int nodeid;
75 struct work_struct swork; /* Send workqueue */
76 struct work_struct lwork; /* Locking workqueue */
75}; 77};
76 78
77static DEFINE_IDR(nodeinfo_idr); 79static DEFINE_IDR(nodeinfo_idr);
@@ -96,6 +98,7 @@ struct connection {
96 atomic_t waiting_requests; 98 atomic_t waiting_requests;
97 struct cbuf cb; 99 struct cbuf cb;
98 int eagain_flag; 100 int eagain_flag;
101 struct work_struct work; /* Send workqueue */
99}; 102};
100 103
101/* An entry waiting to be sent */ 104/* An entry waiting to be sent */
@@ -137,19 +140,23 @@ static void cbuf_eat(struct cbuf *cb, int n)
137static LIST_HEAD(write_nodes); 140static LIST_HEAD(write_nodes);
138static DEFINE_SPINLOCK(write_nodes_lock); 141static DEFINE_SPINLOCK(write_nodes_lock);
139 142
143
140/* Maximum number of incoming messages to process before 144/* Maximum number of incoming messages to process before
141 * doing a schedule() 145 * doing a schedule()
142 */ 146 */
143#define MAX_RX_MSG_COUNT 25 147#define MAX_RX_MSG_COUNT 25
144 148
145/* Manage daemons */ 149/* Work queues */
146static struct task_struct *recv_task; 150static struct workqueue_struct *recv_workqueue;
147static struct task_struct *send_task; 151static struct workqueue_struct *send_workqueue;
148static DECLARE_WAIT_QUEUE_HEAD(lowcomms_recv_wait); 152static struct workqueue_struct *lock_workqueue;
149 153
150/* The SCTP connection */ 154/* The SCTP connection */
151static struct connection sctp_con; 155static struct connection sctp_con;
152 156
157static void process_send_sockets(struct work_struct *work);
158static void process_recv_sockets(struct work_struct *work);
159static void process_lock_request(struct work_struct *work);
153 160
154static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr) 161static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr)
155{ 162{
@@ -222,6 +229,8 @@ static struct nodeinfo *nodeid2nodeinfo(int nodeid, gfp_t alloc)
222 spin_lock_init(&ni->lock); 229 spin_lock_init(&ni->lock);
223 INIT_LIST_HEAD(&ni->writequeue); 230 INIT_LIST_HEAD(&ni->writequeue);
224 spin_lock_init(&ni->writequeue_lock); 231 spin_lock_init(&ni->writequeue_lock);
232 INIT_WORK(&ni->lwork, process_lock_request);
233 INIT_WORK(&ni->swork, process_send_sockets);
225 ni->nodeid = nodeid; 234 ni->nodeid = nodeid;
226 235
227 if (nodeid > max_nodeid) 236 if (nodeid > max_nodeid)
@@ -249,11 +258,8 @@ static struct nodeinfo *assoc2nodeinfo(sctp_assoc_t assoc)
249/* Data or notification available on socket */ 258/* Data or notification available on socket */
250static void lowcomms_data_ready(struct sock *sk, int count_unused) 259static void lowcomms_data_ready(struct sock *sk, int count_unused)
251{ 260{
252 atomic_inc(&sctp_con.waiting_requests);
253 if (test_and_set_bit(CF_READ_PENDING, &sctp_con.flags)) 261 if (test_and_set_bit(CF_READ_PENDING, &sctp_con.flags))
254 return; 262 queue_work(recv_workqueue, &sctp_con.work);
255
256 wake_up_interruptible(&lowcomms_recv_wait);
257} 263}
258 264
259 265
@@ -361,10 +367,10 @@ static void init_failed(void)
361 spin_lock_bh(&write_nodes_lock); 367 spin_lock_bh(&write_nodes_lock);
362 list_add_tail(&ni->write_list, &write_nodes); 368 list_add_tail(&ni->write_list, &write_nodes);
363 spin_unlock_bh(&write_nodes_lock); 369 spin_unlock_bh(&write_nodes_lock);
370 queue_work(send_workqueue, &ni->swork);
364 } 371 }
365 } 372 }
366 } 373 }
367 wake_up_process(send_task);
368} 374}
369 375
370/* Something happened to an association */ 376/* Something happened to an association */
@@ -446,8 +452,8 @@ static void process_sctp_notification(struct msghdr *msg, char *buf)
446 spin_lock_bh(&write_nodes_lock); 452 spin_lock_bh(&write_nodes_lock);
447 list_add_tail(&ni->write_list, &write_nodes); 453 list_add_tail(&ni->write_list, &write_nodes);
448 spin_unlock_bh(&write_nodes_lock); 454 spin_unlock_bh(&write_nodes_lock);
455 queue_work(send_workqueue, &ni->swork);
449 } 456 }
450 wake_up_process(send_task);
451 } 457 }
452 break; 458 break;
453 459
@@ -580,8 +586,8 @@ static int receive_from_sock(void)
580 spin_lock_bh(&write_nodes_lock); 586 spin_lock_bh(&write_nodes_lock);
581 list_add_tail(&ni->write_list, &write_nodes); 587 list_add_tail(&ni->write_list, &write_nodes);
582 spin_unlock_bh(&write_nodes_lock); 588 spin_unlock_bh(&write_nodes_lock);
589 queue_work(send_workqueue, &ni->swork);
583 } 590 }
584 wake_up_process(send_task);
585 } 591 }
586 } 592 }
587 593
@@ -590,6 +596,7 @@ static int receive_from_sock(void)
590 return 0; 596 return 0;
591 597
592 cbuf_add(&sctp_con.cb, ret); 598 cbuf_add(&sctp_con.cb, ret);
599 // PJC: TODO: Add to node's workqueue....can we ??
593 ret = dlm_process_incoming_buffer(cpu_to_le32(sinfo->sinfo_ppid), 600 ret = dlm_process_incoming_buffer(cpu_to_le32(sinfo->sinfo_ppid),
594 page_address(sctp_con.rx_page), 601 page_address(sctp_con.rx_page),
595 sctp_con.cb.base, sctp_con.cb.len, 602 sctp_con.cb.base, sctp_con.cb.len,
@@ -635,7 +642,7 @@ static int add_bind_addr(struct sockaddr_storage *addr, int addr_len, int num)
635 642
636 if (result < 0) 643 if (result < 0)
637 log_print("Can't bind to port %d addr number %d", 644 log_print("Can't bind to port %d addr number %d",
638 dlm_config.tcp_port, num); 645 dlm_config.ci_tcp_port, num);
639 646
640 return result; 647 return result;
641} 648}
@@ -711,7 +718,7 @@ static int init_sock(void)
711 /* Bind to all interfaces. */ 718 /* Bind to all interfaces. */
712 for (i = 0; i < dlm_local_count; i++) { 719 for (i = 0; i < dlm_local_count; i++) {
713 memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr)); 720 memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
714 make_sockaddr(&localaddr, dlm_config.tcp_port, &addr_len); 721 make_sockaddr(&localaddr, dlm_config.ci_tcp_port, &addr_len);
715 722
716 result = add_bind_addr(&localaddr, addr_len, num); 723 result = add_bind_addr(&localaddr, addr_len, num);
717 if (result) 724 if (result)
@@ -820,7 +827,8 @@ void dlm_lowcomms_commit_buffer(void *arg)
820 spin_lock_bh(&write_nodes_lock); 827 spin_lock_bh(&write_nodes_lock);
821 list_add_tail(&ni->write_list, &write_nodes); 828 list_add_tail(&ni->write_list, &write_nodes);
822 spin_unlock_bh(&write_nodes_lock); 829 spin_unlock_bh(&write_nodes_lock);
823 wake_up_process(send_task); 830
831 queue_work(send_workqueue, &ni->swork);
824 } 832 }
825 return; 833 return;
826 834
@@ -863,7 +871,7 @@ static void initiate_association(int nodeid)
863 return; 871 return;
864 } 872 }
865 873
866 make_sockaddr(&rem_addr, dlm_config.tcp_port, &addrlen); 874 make_sockaddr(&rem_addr, dlm_config.ci_tcp_port, &addrlen);
867 875
868 outmessage.msg_name = &rem_addr; 876 outmessage.msg_name = &rem_addr;
869 outmessage.msg_namelen = addrlen; 877 outmessage.msg_namelen = addrlen;
@@ -1088,101 +1096,75 @@ int dlm_lowcomms_close(int nodeid)
1088 return 0; 1096 return 0;
1089} 1097}
1090 1098
1091static int write_list_empty(void) 1099// PJC: The work queue function for receiving.
1100static void process_recv_sockets(struct work_struct *work)
1092{ 1101{
1093 int status; 1102 if (test_and_clear_bit(CF_READ_PENDING, &sctp_con.flags)) {
1094 1103 int ret;
1095 spin_lock_bh(&write_nodes_lock);
1096 status = list_empty(&write_nodes);
1097 spin_unlock_bh(&write_nodes_lock);
1098
1099 return status;
1100}
1101
1102static int dlm_recvd(void *data)
1103{
1104 DECLARE_WAITQUEUE(wait, current);
1105
1106 while (!kthread_should_stop()) {
1107 int count = 0; 1104 int count = 0;
1108 1105
1109 set_current_state(TASK_INTERRUPTIBLE); 1106 do {
1110 add_wait_queue(&lowcomms_recv_wait, &wait); 1107 ret = receive_from_sock();
1111 if (!test_bit(CF_READ_PENDING, &sctp_con.flags))
1112 cond_resched();
1113 remove_wait_queue(&lowcomms_recv_wait, &wait);
1114 set_current_state(TASK_RUNNING);
1115
1116 if (test_and_clear_bit(CF_READ_PENDING, &sctp_con.flags)) {
1117 int ret;
1118
1119 do {
1120 ret = receive_from_sock();
1121 1108
1122 /* Don't starve out everyone else */ 1109 /* Don't starve out everyone else */
1123 if (++count >= MAX_RX_MSG_COUNT) { 1110 if (++count >= MAX_RX_MSG_COUNT) {
1124 cond_resched(); 1111 cond_resched();
1125 count = 0; 1112 count = 0;
1126 } 1113 }
1127 } while (!kthread_should_stop() && ret >=0); 1114 } while (!kthread_should_stop() && ret >=0);
1128 }
1129 cond_resched();
1130 } 1115 }
1131 1116 cond_resched();
1132 return 0;
1133} 1117}
1134 1118
1135static int dlm_sendd(void *data) 1119// PJC: the work queue function for sending
1120static void process_send_sockets(struct work_struct *work)
1136{ 1121{
1137 DECLARE_WAITQUEUE(wait, current); 1122 if (sctp_con.eagain_flag) {
1138 1123 sctp_con.eagain_flag = 0;
1139 add_wait_queue(sctp_con.sock->sk->sk_sleep, &wait); 1124 refill_write_queue();
1140
1141 while (!kthread_should_stop()) {
1142 set_current_state(TASK_INTERRUPTIBLE);
1143 if (write_list_empty())
1144 cond_resched();
1145 set_current_state(TASK_RUNNING);
1146
1147 if (sctp_con.eagain_flag) {
1148 sctp_con.eagain_flag = 0;
1149 refill_write_queue();
1150 }
1151 process_output_queue();
1152 } 1125 }
1126 process_output_queue();
1127}
1153 1128
1154 remove_wait_queue(sctp_con.sock->sk->sk_sleep, &wait); 1129// PJC: Process lock requests from a particular node.
1155 1130// TODO: can we optimise this out on UP ??
1156 return 0; 1131static void process_lock_request(struct work_struct *work)
1132{
1157} 1133}
1158 1134
1159static void daemons_stop(void) 1135static void daemons_stop(void)
1160{ 1136{
1161 kthread_stop(recv_task); 1137 destroy_workqueue(recv_workqueue);
1162 kthread_stop(send_task); 1138 destroy_workqueue(send_workqueue);
1139 destroy_workqueue(lock_workqueue);
1163} 1140}
1164 1141
1165static int daemons_start(void) 1142static int daemons_start(void)
1166{ 1143{
1167 struct task_struct *p;
1168 int error; 1144 int error;
1145 recv_workqueue = create_workqueue("dlm_recv");
1146 error = IS_ERR(recv_workqueue);
1147 if (error) {
1148 log_print("can't start dlm_recv %d", error);
1149 return error;
1150 }
1169 1151
1170 p = kthread_run(dlm_recvd, NULL, "dlm_recvd"); 1152 send_workqueue = create_singlethread_workqueue("dlm_send");
1171 error = IS_ERR(p); 1153 error = IS_ERR(send_workqueue);
1172 if (error) { 1154 if (error) {
1173 log_print("can't start dlm_recvd %d", error); 1155 log_print("can't start dlm_send %d", error);
1156 destroy_workqueue(recv_workqueue);
1174 return error; 1157 return error;
1175 } 1158 }
1176 recv_task = p;
1177 1159
1178 p = kthread_run(dlm_sendd, NULL, "dlm_sendd"); 1160 lock_workqueue = create_workqueue("dlm_rlock");
1179 error = IS_ERR(p); 1161 error = IS_ERR(lock_workqueue);
1180 if (error) { 1162 if (error) {
1181 log_print("can't start dlm_sendd %d", error); 1163 log_print("can't start dlm_rlock %d", error);
1182 kthread_stop(recv_task); 1164 destroy_workqueue(send_workqueue);
1165 destroy_workqueue(recv_workqueue);
1183 return error; 1166 return error;
1184 } 1167 }
1185 send_task = p;
1186 1168
1187 return 0; 1169 return 0;
1188} 1170}
@@ -1194,6 +1176,8 @@ int dlm_lowcomms_start(void)
1194{ 1176{
1195 int error; 1177 int error;
1196 1178
1179 INIT_WORK(&sctp_con.work, process_recv_sockets);
1180
1197 error = init_sock(); 1181 error = init_sock();
1198 if (error) 1182 if (error)
1199 goto fail_sock; 1183 goto fail_sock;
@@ -1224,4 +1208,3 @@ void dlm_lowcomms_stop(void)
1224 for (i = 0; i < dlm_local_count; i++) 1208 for (i = 0; i < dlm_local_count; i++)
1225 kfree(dlm_local_addr[i]); 1209 kfree(dlm_local_addr[i]);
1226} 1210}
1227
diff --git a/fs/dlm/lowcomms-tcp.c b/fs/dlm/lowcomms-tcp.c
index 9be3a440c42a..f1efd17b2614 100644
--- a/fs/dlm/lowcomms-tcp.c
+++ b/fs/dlm/lowcomms-tcp.c
@@ -2,7 +2,7 @@
2******************************************************************************* 2*******************************************************************************
3** 3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 5** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
6** 6**
7** This copyrighted material is made available to anyone wishing to use, 7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions 8** modify, copy, or redistribute it subject to the terms and conditions
@@ -96,10 +96,7 @@ static bool cbuf_empty(struct cbuf *cb)
96struct connection { 96struct connection {
97 struct socket *sock; /* NULL if not connected */ 97 struct socket *sock; /* NULL if not connected */
98 uint32_t nodeid; /* So we know who we are in the list */ 98 uint32_t nodeid; /* So we know who we are in the list */
99 struct rw_semaphore sock_sem; /* Stop connect races */ 99 struct mutex sock_mutex;
100 struct list_head read_list; /* On this list when ready for reading */
101 struct list_head write_list; /* On this list when ready for writing */
102 struct list_head state_list; /* On this list when ready to connect */
103 unsigned long flags; /* bit 1,2 = We are on the read/write lists */ 100 unsigned long flags; /* bit 1,2 = We are on the read/write lists */
104#define CF_READ_PENDING 1 101#define CF_READ_PENDING 1
105#define CF_WRITE_PENDING 2 102#define CF_WRITE_PENDING 2
@@ -112,9 +109,10 @@ struct connection {
112 struct page *rx_page; 109 struct page *rx_page;
113 struct cbuf cb; 110 struct cbuf cb;
114 int retries; 111 int retries;
115 atomic_t waiting_requests;
116#define MAX_CONNECT_RETRIES 3 112#define MAX_CONNECT_RETRIES 3
117 struct connection *othercon; 113 struct connection *othercon;
114 struct work_struct rwork; /* Receive workqueue */
115 struct work_struct swork; /* Send workqueue */
118}; 116};
119#define sock2con(x) ((struct connection *)(x)->sk_user_data) 117#define sock2con(x) ((struct connection *)(x)->sk_user_data)
120 118
@@ -131,14 +129,9 @@ struct writequeue_entry {
131 129
132static struct sockaddr_storage dlm_local_addr; 130static struct sockaddr_storage dlm_local_addr;
133 131
134/* Manage daemons */ 132/* Work queues */
135static struct task_struct *recv_task; 133static struct workqueue_struct *recv_workqueue;
136static struct task_struct *send_task; 134static struct workqueue_struct *send_workqueue;
137
138static wait_queue_t lowcomms_send_waitq_head;
139static DECLARE_WAIT_QUEUE_HEAD(lowcomms_send_waitq);
140static wait_queue_t lowcomms_recv_waitq_head;
141static DECLARE_WAIT_QUEUE_HEAD(lowcomms_recv_waitq);
142 135
143/* An array of pointers to connections, indexed by NODEID */ 136/* An array of pointers to connections, indexed by NODEID */
144static struct connection **connections; 137static struct connection **connections;
@@ -146,17 +139,8 @@ static DECLARE_MUTEX(connections_lock);
146static struct kmem_cache *con_cache; 139static struct kmem_cache *con_cache;
147static int conn_array_size; 140static int conn_array_size;
148 141
149/* List of sockets that have reads pending */ 142static void process_recv_sockets(struct work_struct *work);
150static LIST_HEAD(read_sockets); 143static void process_send_sockets(struct work_struct *work);
151static DEFINE_SPINLOCK(read_sockets_lock);
152
153/* List of sockets which have writes pending */
154static LIST_HEAD(write_sockets);
155static DEFINE_SPINLOCK(write_sockets_lock);
156
157/* List of sockets which have connects pending */
158static LIST_HEAD(state_sockets);
159static DEFINE_SPINLOCK(state_sockets_lock);
160 144
161static struct connection *nodeid2con(int nodeid, gfp_t allocation) 145static struct connection *nodeid2con(int nodeid, gfp_t allocation)
162{ 146{
@@ -186,9 +170,11 @@ static struct connection *nodeid2con(int nodeid, gfp_t allocation)
186 goto finish; 170 goto finish;
187 171
188 con->nodeid = nodeid; 172 con->nodeid = nodeid;
189 init_rwsem(&con->sock_sem); 173 mutex_init(&con->sock_mutex);
190 INIT_LIST_HEAD(&con->writequeue); 174 INIT_LIST_HEAD(&con->writequeue);
191 spin_lock_init(&con->writequeue_lock); 175 spin_lock_init(&con->writequeue_lock);
176 INIT_WORK(&con->swork, process_send_sockets);
177 INIT_WORK(&con->rwork, process_recv_sockets);
192 178
193 connections[nodeid] = con; 179 connections[nodeid] = con;
194 } 180 }
@@ -203,41 +189,22 @@ static void lowcomms_data_ready(struct sock *sk, int count_unused)
203{ 189{
204 struct connection *con = sock2con(sk); 190 struct connection *con = sock2con(sk);
205 191
206 atomic_inc(&con->waiting_requests); 192 if (!test_and_set_bit(CF_READ_PENDING, &con->flags))
207 if (test_and_set_bit(CF_READ_PENDING, &con->flags)) 193 queue_work(recv_workqueue, &con->rwork);
208 return;
209
210 spin_lock_bh(&read_sockets_lock);
211 list_add_tail(&con->read_list, &read_sockets);
212 spin_unlock_bh(&read_sockets_lock);
213
214 wake_up_interruptible(&lowcomms_recv_waitq);
215} 194}
216 195
217static void lowcomms_write_space(struct sock *sk) 196static void lowcomms_write_space(struct sock *sk)
218{ 197{
219 struct connection *con = sock2con(sk); 198 struct connection *con = sock2con(sk);
220 199
221 if (test_and_set_bit(CF_WRITE_PENDING, &con->flags)) 200 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
222 return; 201 queue_work(send_workqueue, &con->swork);
223
224 spin_lock_bh(&write_sockets_lock);
225 list_add_tail(&con->write_list, &write_sockets);
226 spin_unlock_bh(&write_sockets_lock);
227
228 wake_up_interruptible(&lowcomms_send_waitq);
229} 202}
230 203
231static inline void lowcomms_connect_sock(struct connection *con) 204static inline void lowcomms_connect_sock(struct connection *con)
232{ 205{
233 if (test_and_set_bit(CF_CONNECT_PENDING, &con->flags)) 206 if (!test_and_set_bit(CF_CONNECT_PENDING, &con->flags))
234 return; 207 queue_work(send_workqueue, &con->swork);
235
236 spin_lock_bh(&state_sockets_lock);
237 list_add_tail(&con->state_list, &state_sockets);
238 spin_unlock_bh(&state_sockets_lock);
239
240 wake_up_interruptible(&lowcomms_send_waitq);
241} 208}
242 209
243static void lowcomms_state_change(struct sock *sk) 210static void lowcomms_state_change(struct sock *sk)
@@ -279,7 +246,7 @@ static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
279/* Close a remote connection and tidy up */ 246/* Close a remote connection and tidy up */
280static void close_connection(struct connection *con, bool and_other) 247static void close_connection(struct connection *con, bool and_other)
281{ 248{
282 down_write(&con->sock_sem); 249 mutex_lock(&con->sock_mutex);
283 250
284 if (con->sock) { 251 if (con->sock) {
285 sock_release(con->sock); 252 sock_release(con->sock);
@@ -294,7 +261,7 @@ static void close_connection(struct connection *con, bool and_other)
294 con->rx_page = NULL; 261 con->rx_page = NULL;
295 } 262 }
296 con->retries = 0; 263 con->retries = 0;
297 up_write(&con->sock_sem); 264 mutex_unlock(&con->sock_mutex);
298} 265}
299 266
300/* Data received from remote end */ 267/* Data received from remote end */
@@ -308,10 +275,13 @@ static int receive_from_sock(struct connection *con)
308 int r; 275 int r;
309 int call_again_soon = 0; 276 int call_again_soon = 0;
310 277
311 down_read(&con->sock_sem); 278 mutex_lock(&con->sock_mutex);
279
280 if (con->sock == NULL) {
281 ret = -EAGAIN;
282 goto out_close;
283 }
312 284
313 if (con->sock == NULL)
314 goto out;
315 if (con->rx_page == NULL) { 285 if (con->rx_page == NULL) {
316 /* 286 /*
317 * This doesn't need to be atomic, but I think it should 287 * This doesn't need to be atomic, but I think it should
@@ -359,6 +329,9 @@ static int receive_from_sock(struct connection *con)
359 329
360 if (ret <= 0) 330 if (ret <= 0)
361 goto out_close; 331 goto out_close;
332 if (ret == -EAGAIN)
333 goto out_resched;
334
362 if (ret == len) 335 if (ret == len)
363 call_again_soon = 1; 336 call_again_soon = 1;
364 cbuf_add(&con->cb, ret); 337 cbuf_add(&con->cb, ret);
@@ -381,24 +354,26 @@ static int receive_from_sock(struct connection *con)
381 con->rx_page = NULL; 354 con->rx_page = NULL;
382 } 355 }
383 356
384out:
385 if (call_again_soon) 357 if (call_again_soon)
386 goto out_resched; 358 goto out_resched;
387 up_read(&con->sock_sem); 359 mutex_unlock(&con->sock_mutex);
388 return 0; 360 return 0;
389 361
390out_resched: 362out_resched:
391 lowcomms_data_ready(con->sock->sk, 0); 363 if (!test_and_set_bit(CF_READ_PENDING, &con->flags))
392 up_read(&con->sock_sem); 364 queue_work(recv_workqueue, &con->rwork);
393 cond_resched(); 365 mutex_unlock(&con->sock_mutex);
394 return 0; 366 return -EAGAIN;
395 367
396out_close: 368out_close:
397 up_read(&con->sock_sem); 369 mutex_unlock(&con->sock_mutex);
398 if (ret != -EAGAIN && !test_bit(CF_IS_OTHERCON, &con->flags)) { 370 if (ret != -EAGAIN && !test_bit(CF_IS_OTHERCON, &con->flags)) {
399 close_connection(con, false); 371 close_connection(con, false);
400 /* Reconnect when there is something to send */ 372 /* Reconnect when there is something to send */
401 } 373 }
374 /* Don't return success if we really got EOF */
375 if (ret == 0)
376 ret = -EAGAIN;
402 377
403 return ret; 378 return ret;
404} 379}
@@ -412,6 +387,7 @@ static int accept_from_sock(struct connection *con)
412 int len; 387 int len;
413 int nodeid; 388 int nodeid;
414 struct connection *newcon; 389 struct connection *newcon;
390 struct connection *addcon;
415 391
416 memset(&peeraddr, 0, sizeof(peeraddr)); 392 memset(&peeraddr, 0, sizeof(peeraddr));
417 result = sock_create_kern(dlm_local_addr.ss_family, SOCK_STREAM, 393 result = sock_create_kern(dlm_local_addr.ss_family, SOCK_STREAM,
@@ -419,7 +395,7 @@ static int accept_from_sock(struct connection *con)
419 if (result < 0) 395 if (result < 0)
420 return -ENOMEM; 396 return -ENOMEM;
421 397
422 down_read(&con->sock_sem); 398 mutex_lock_nested(&con->sock_mutex, 0);
423 399
424 result = -ENOTCONN; 400 result = -ENOTCONN;
425 if (con->sock == NULL) 401 if (con->sock == NULL)
@@ -445,7 +421,7 @@ static int accept_from_sock(struct connection *con)
445 if (dlm_addr_to_nodeid(&peeraddr, &nodeid)) { 421 if (dlm_addr_to_nodeid(&peeraddr, &nodeid)) {
446 printk("dlm: connect from non cluster node\n"); 422 printk("dlm: connect from non cluster node\n");
447 sock_release(newsock); 423 sock_release(newsock);
448 up_read(&con->sock_sem); 424 mutex_unlock(&con->sock_mutex);
449 return -1; 425 return -1;
450 } 426 }
451 427
@@ -462,7 +438,7 @@ static int accept_from_sock(struct connection *con)
462 result = -ENOMEM; 438 result = -ENOMEM;
463 goto accept_err; 439 goto accept_err;
464 } 440 }
465 down_write(&newcon->sock_sem); 441 mutex_lock_nested(&newcon->sock_mutex, 1);
466 if (newcon->sock) { 442 if (newcon->sock) {
467 struct connection *othercon = newcon->othercon; 443 struct connection *othercon = newcon->othercon;
468 444
@@ -470,41 +446,45 @@ static int accept_from_sock(struct connection *con)
470 othercon = kmem_cache_zalloc(con_cache, GFP_KERNEL); 446 othercon = kmem_cache_zalloc(con_cache, GFP_KERNEL);
471 if (!othercon) { 447 if (!othercon) {
472 printk("dlm: failed to allocate incoming socket\n"); 448 printk("dlm: failed to allocate incoming socket\n");
473 up_write(&newcon->sock_sem); 449 mutex_unlock(&newcon->sock_mutex);
474 result = -ENOMEM; 450 result = -ENOMEM;
475 goto accept_err; 451 goto accept_err;
476 } 452 }
477 othercon->nodeid = nodeid; 453 othercon->nodeid = nodeid;
478 othercon->rx_action = receive_from_sock; 454 othercon->rx_action = receive_from_sock;
479 init_rwsem(&othercon->sock_sem); 455 mutex_init(&othercon->sock_mutex);
456 INIT_WORK(&othercon->swork, process_send_sockets);
457 INIT_WORK(&othercon->rwork, process_recv_sockets);
480 set_bit(CF_IS_OTHERCON, &othercon->flags); 458 set_bit(CF_IS_OTHERCON, &othercon->flags);
481 newcon->othercon = othercon; 459 newcon->othercon = othercon;
482 } 460 }
483 othercon->sock = newsock; 461 othercon->sock = newsock;
484 newsock->sk->sk_user_data = othercon; 462 newsock->sk->sk_user_data = othercon;
485 add_sock(newsock, othercon); 463 add_sock(newsock, othercon);
464 addcon = othercon;
486 } 465 }
487 else { 466 else {
488 newsock->sk->sk_user_data = newcon; 467 newsock->sk->sk_user_data = newcon;
489 newcon->rx_action = receive_from_sock; 468 newcon->rx_action = receive_from_sock;
490 add_sock(newsock, newcon); 469 add_sock(newsock, newcon);
491 470 addcon = newcon;
492 } 471 }
493 472
494 up_write(&newcon->sock_sem); 473 mutex_unlock(&newcon->sock_mutex);
495 474
496 /* 475 /*
497 * Add it to the active queue in case we got data 476 * Add it to the active queue in case we got data
498 * beween processing the accept adding the socket 477 * beween processing the accept adding the socket
499 * to the read_sockets list 478 * to the read_sockets list
500 */ 479 */
501 lowcomms_data_ready(newsock->sk, 0); 480 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
502 up_read(&con->sock_sem); 481 queue_work(recv_workqueue, &addcon->rwork);
482 mutex_unlock(&con->sock_mutex);
503 483
504 return 0; 484 return 0;
505 485
506accept_err: 486accept_err:
507 up_read(&con->sock_sem); 487 mutex_unlock(&con->sock_mutex);
508 sock_release(newsock); 488 sock_release(newsock);
509 489
510 if (result != -EAGAIN) 490 if (result != -EAGAIN)
@@ -525,7 +505,7 @@ static void connect_to_sock(struct connection *con)
525 return; 505 return;
526 } 506 }
527 507
528 down_write(&con->sock_sem); 508 mutex_lock(&con->sock_mutex);
529 if (con->retries++ > MAX_CONNECT_RETRIES) 509 if (con->retries++ > MAX_CONNECT_RETRIES)
530 goto out; 510 goto out;
531 511
@@ -548,7 +528,7 @@ static void connect_to_sock(struct connection *con)
548 sock->sk->sk_user_data = con; 528 sock->sk->sk_user_data = con;
549 con->rx_action = receive_from_sock; 529 con->rx_action = receive_from_sock;
550 530
551 make_sockaddr(&saddr, dlm_config.tcp_port, &addr_len); 531 make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len);
552 532
553 add_sock(sock, con); 533 add_sock(sock, con);
554 534
@@ -577,7 +557,7 @@ out_err:
577 result = 0; 557 result = 0;
578 } 558 }
579out: 559out:
580 up_write(&con->sock_sem); 560 mutex_unlock(&con->sock_mutex);
581 return; 561 return;
582} 562}
583 563
@@ -616,10 +596,10 @@ static struct socket *create_listen_sock(struct connection *con,
616 con->sock = sock; 596 con->sock = sock;
617 597
618 /* Bind to our port */ 598 /* Bind to our port */
619 make_sockaddr(saddr, dlm_config.tcp_port, &addr_len); 599 make_sockaddr(saddr, dlm_config.ci_tcp_port, &addr_len);
620 result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len); 600 result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len);
621 if (result < 0) { 601 if (result < 0) {
622 printk("dlm: Can't bind to port %d\n", dlm_config.tcp_port); 602 printk("dlm: Can't bind to port %d\n", dlm_config.ci_tcp_port);
623 sock_release(sock); 603 sock_release(sock);
624 sock = NULL; 604 sock = NULL;
625 con->sock = NULL; 605 con->sock = NULL;
@@ -638,7 +618,7 @@ static struct socket *create_listen_sock(struct connection *con,
638 618
639 result = sock->ops->listen(sock, 5); 619 result = sock->ops->listen(sock, 5);
640 if (result < 0) { 620 if (result < 0) {
641 printk("dlm: Can't listen on port %d\n", dlm_config.tcp_port); 621 printk("dlm: Can't listen on port %d\n", dlm_config.ci_tcp_port);
642 sock_release(sock); 622 sock_release(sock);
643 sock = NULL; 623 sock = NULL;
644 goto create_out; 624 goto create_out;
@@ -709,6 +689,7 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len,
709 if (!con) 689 if (!con)
710 return NULL; 690 return NULL;
711 691
692 spin_lock(&con->writequeue_lock);
712 e = list_entry(con->writequeue.prev, struct writequeue_entry, list); 693 e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
713 if ((&e->list == &con->writequeue) || 694 if ((&e->list == &con->writequeue) ||
714 (PAGE_CACHE_SIZE - e->end < len)) { 695 (PAGE_CACHE_SIZE - e->end < len)) {
@@ -747,6 +728,7 @@ void dlm_lowcomms_commit_buffer(void *mh)
747 struct connection *con = e->con; 728 struct connection *con = e->con;
748 int users; 729 int users;
749 730
731 spin_lock(&con->writequeue_lock);
750 users = --e->users; 732 users = --e->users;
751 if (users) 733 if (users)
752 goto out; 734 goto out;
@@ -754,12 +736,8 @@ void dlm_lowcomms_commit_buffer(void *mh)
754 kunmap(e->page); 736 kunmap(e->page);
755 spin_unlock(&con->writequeue_lock); 737 spin_unlock(&con->writequeue_lock);
756 738
757 if (test_and_set_bit(CF_WRITE_PENDING, &con->flags) == 0) { 739 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) {
758 spin_lock_bh(&write_sockets_lock); 740 queue_work(send_workqueue, &con->swork);
759 list_add_tail(&con->write_list, &write_sockets);
760 spin_unlock_bh(&write_sockets_lock);
761
762 wake_up_interruptible(&lowcomms_send_waitq);
763 } 741 }
764 return; 742 return;
765 743
@@ -783,7 +761,7 @@ static void send_to_sock(struct connection *con)
783 struct writequeue_entry *e; 761 struct writequeue_entry *e;
784 int len, offset; 762 int len, offset;
785 763
786 down_read(&con->sock_sem); 764 mutex_lock(&con->sock_mutex);
787 if (con->sock == NULL) 765 if (con->sock == NULL)
788 goto out_connect; 766 goto out_connect;
789 767
@@ -800,6 +778,7 @@ static void send_to_sock(struct connection *con)
800 offset = e->offset; 778 offset = e->offset;
801 BUG_ON(len == 0 && e->users == 0); 779 BUG_ON(len == 0 && e->users == 0);
802 spin_unlock(&con->writequeue_lock); 780 spin_unlock(&con->writequeue_lock);
781 kmap(e->page);
803 782
804 ret = 0; 783 ret = 0;
805 if (len) { 784 if (len) {
@@ -828,18 +807,18 @@ static void send_to_sock(struct connection *con)
828 } 807 }
829 spin_unlock(&con->writequeue_lock); 808 spin_unlock(&con->writequeue_lock);
830out: 809out:
831 up_read(&con->sock_sem); 810 mutex_unlock(&con->sock_mutex);
832 return; 811 return;
833 812
834send_error: 813send_error:
835 up_read(&con->sock_sem); 814 mutex_unlock(&con->sock_mutex);
836 close_connection(con, false); 815 close_connection(con, false);
837 lowcomms_connect_sock(con); 816 lowcomms_connect_sock(con);
838 return; 817 return;
839 818
840out_connect: 819out_connect:
841 up_read(&con->sock_sem); 820 mutex_unlock(&con->sock_mutex);
842 lowcomms_connect_sock(con); 821 connect_to_sock(con);
843 return; 822 return;
844} 823}
845 824
@@ -872,7 +851,6 @@ int dlm_lowcomms_close(int nodeid)
872 if (con) { 851 if (con) {
873 clean_one_writequeue(con); 852 clean_one_writequeue(con);
874 close_connection(con, true); 853 close_connection(con, true);
875 atomic_set(&con->waiting_requests, 0);
876 } 854 }
877 return 0; 855 return 0;
878 856
@@ -880,102 +858,29 @@ out:
880 return -1; 858 return -1;
881} 859}
882 860
883/* API send message call, may queue the request */
884/* N.B. This is the old interface - use the new one for new calls */
885int lowcomms_send_message(int nodeid, char *buf, int len, gfp_t allocation)
886{
887 struct writequeue_entry *e;
888 char *b;
889
890 e = dlm_lowcomms_get_buffer(nodeid, len, allocation, &b);
891 if (e) {
892 memcpy(b, buf, len);
893 dlm_lowcomms_commit_buffer(e);
894 return 0;
895 }
896 return -ENOBUFS;
897}
898
899/* Look for activity on active sockets */ 861/* Look for activity on active sockets */
900static void process_sockets(void) 862static void process_recv_sockets(struct work_struct *work)
901{ 863{
902 struct list_head *list; 864 struct connection *con = container_of(work, struct connection, rwork);
903 struct list_head *temp; 865 int err;
904 int count = 0;
905
906 spin_lock_bh(&read_sockets_lock);
907 list_for_each_safe(list, temp, &read_sockets) {
908 866
909 struct connection *con = 867 clear_bit(CF_READ_PENDING, &con->flags);
910 list_entry(list, struct connection, read_list); 868 do {
911 list_del(&con->read_list); 869 err = con->rx_action(con);
912 clear_bit(CF_READ_PENDING, &con->flags); 870 } while (!err);
913
914 spin_unlock_bh(&read_sockets_lock);
915
916 /* This can reach zero if we are processing requests
917 * as they come in.
918 */
919 if (atomic_read(&con->waiting_requests) == 0) {
920 spin_lock_bh(&read_sockets_lock);
921 continue;
922 }
923
924 do {
925 con->rx_action(con);
926
927 /* Don't starve out everyone else */
928 if (++count >= MAX_RX_MSG_COUNT) {
929 cond_resched();
930 count = 0;
931 }
932
933 } while (!atomic_dec_and_test(&con->waiting_requests) &&
934 !kthread_should_stop());
935
936 spin_lock_bh(&read_sockets_lock);
937 }
938 spin_unlock_bh(&read_sockets_lock);
939} 871}
940 872
941/* Try to send any messages that are pending
942 */
943static void process_output_queue(void)
944{
945 struct list_head *list;
946 struct list_head *temp;
947
948 spin_lock_bh(&write_sockets_lock);
949 list_for_each_safe(list, temp, &write_sockets) {
950 struct connection *con =
951 list_entry(list, struct connection, write_list);
952 clear_bit(CF_WRITE_PENDING, &con->flags);
953 list_del(&con->write_list);
954
955 spin_unlock_bh(&write_sockets_lock);
956 send_to_sock(con);
957 spin_lock_bh(&write_sockets_lock);
958 }
959 spin_unlock_bh(&write_sockets_lock);
960}
961 873
962static void process_state_queue(void) 874static void process_send_sockets(struct work_struct *work)
963{ 875{
964 struct list_head *list; 876 struct connection *con = container_of(work, struct connection, swork);
965 struct list_head *temp;
966
967 spin_lock_bh(&state_sockets_lock);
968 list_for_each_safe(list, temp, &state_sockets) {
969 struct connection *con =
970 list_entry(list, struct connection, state_list);
971 list_del(&con->state_list);
972 clear_bit(CF_CONNECT_PENDING, &con->flags);
973 spin_unlock_bh(&state_sockets_lock);
974 877
878 if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) {
975 connect_to_sock(con); 879 connect_to_sock(con);
976 spin_lock_bh(&state_sockets_lock);
977 } 880 }
978 spin_unlock_bh(&state_sockets_lock); 881
882 clear_bit(CF_WRITE_PENDING, &con->flags);
883 send_to_sock(con);
979} 884}
980 885
981 886
@@ -992,109 +897,33 @@ static void clean_writequeues(void)
992 } 897 }
993} 898}
994 899
995static int read_list_empty(void) 900static void work_stop(void)
996{ 901{
997 int status; 902 destroy_workqueue(recv_workqueue);
998 903 destroy_workqueue(send_workqueue);
999 spin_lock_bh(&read_sockets_lock);
1000 status = list_empty(&read_sockets);
1001 spin_unlock_bh(&read_sockets_lock);
1002
1003 return status;
1004}
1005
1006/* DLM Transport comms receive daemon */
1007static int dlm_recvd(void *data)
1008{
1009 init_waitqueue_entry(&lowcomms_recv_waitq_head, current);
1010 add_wait_queue(&lowcomms_recv_waitq, &lowcomms_recv_waitq_head);
1011
1012 while (!kthread_should_stop()) {
1013 set_current_state(TASK_INTERRUPTIBLE);
1014 if (read_list_empty())
1015 cond_resched();
1016 set_current_state(TASK_RUNNING);
1017
1018 process_sockets();
1019 }
1020
1021 return 0;
1022} 904}
1023 905
1024static int write_and_state_lists_empty(void) 906static int work_start(void)
1025{ 907{
1026 int status;
1027
1028 spin_lock_bh(&write_sockets_lock);
1029 status = list_empty(&write_sockets);
1030 spin_unlock_bh(&write_sockets_lock);
1031
1032 spin_lock_bh(&state_sockets_lock);
1033 if (list_empty(&state_sockets) == 0)
1034 status = 0;
1035 spin_unlock_bh(&state_sockets_lock);
1036
1037 return status;
1038}
1039
1040/* DLM Transport send daemon */
1041static int dlm_sendd(void *data)
1042{
1043 init_waitqueue_entry(&lowcomms_send_waitq_head, current);
1044 add_wait_queue(&lowcomms_send_waitq, &lowcomms_send_waitq_head);
1045
1046 while (!kthread_should_stop()) {
1047 set_current_state(TASK_INTERRUPTIBLE);
1048 if (write_and_state_lists_empty())
1049 cond_resched();
1050 set_current_state(TASK_RUNNING);
1051
1052 process_state_queue();
1053 process_output_queue();
1054 }
1055
1056 return 0;
1057}
1058
1059static void daemons_stop(void)
1060{
1061 kthread_stop(recv_task);
1062 kthread_stop(send_task);
1063}
1064
1065static int daemons_start(void)
1066{
1067 struct task_struct *p;
1068 int error; 908 int error;
1069 909 recv_workqueue = create_workqueue("dlm_recv");
1070 p = kthread_run(dlm_recvd, NULL, "dlm_recvd"); 910 error = IS_ERR(recv_workqueue);
1071 error = IS_ERR(p);
1072 if (error) { 911 if (error) {
1073 log_print("can't start dlm_recvd %d", error); 912 log_print("can't start dlm_recv %d", error);
1074 return error; 913 return error;
1075 } 914 }
1076 recv_task = p;
1077 915
1078 p = kthread_run(dlm_sendd, NULL, "dlm_sendd"); 916 send_workqueue = create_singlethread_workqueue("dlm_send");
1079 error = IS_ERR(p); 917 error = IS_ERR(send_workqueue);
1080 if (error) { 918 if (error) {
1081 log_print("can't start dlm_sendd %d", error); 919 log_print("can't start dlm_send %d", error);
1082 kthread_stop(recv_task); 920 destroy_workqueue(recv_workqueue);
1083 return error; 921 return error;
1084 } 922 }
1085 send_task = p;
1086 923
1087 return 0; 924 return 0;
1088} 925}
1089 926
1090/*
1091 * Return the largest buffer size we can cope with.
1092 */
1093int lowcomms_max_buffer_size(void)
1094{
1095 return PAGE_CACHE_SIZE;
1096}
1097
1098void dlm_lowcomms_stop(void) 927void dlm_lowcomms_stop(void)
1099{ 928{
1100 int i; 929 int i;
@@ -1107,7 +936,7 @@ void dlm_lowcomms_stop(void)
1107 connections[i]->flags |= 0xFF; 936 connections[i]->flags |= 0xFF;
1108 } 937 }
1109 938
1110 daemons_stop(); 939 work_stop();
1111 clean_writequeues(); 940 clean_writequeues();
1112 941
1113 for (i = 0; i < conn_array_size; i++) { 942 for (i = 0; i < conn_array_size; i++) {
@@ -1159,7 +988,7 @@ int dlm_lowcomms_start(void)
1159 if (error) 988 if (error)
1160 goto fail_unlisten; 989 goto fail_unlisten;
1161 990
1162 error = daemons_start(); 991 error = work_start();
1163 if (error) 992 if (error)
1164 goto fail_unlisten; 993 goto fail_unlisten;
1165 994
diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
index c9b1c3d535f4..a5126e0c68a6 100644
--- a/fs/dlm/midcomms.c
+++ b/fs/dlm/midcomms.c
@@ -82,7 +82,7 @@ int dlm_process_incoming_buffer(int nodeid, const void *base,
82 if (msglen < sizeof(struct dlm_header)) 82 if (msglen < sizeof(struct dlm_header))
83 break; 83 break;
84 err = -E2BIG; 84 err = -E2BIG;
85 if (msglen > dlm_config.buffer_size) { 85 if (msglen > dlm_config.ci_buffer_size) {
86 log_print("message size %d from %d too big, buf len %d", 86 log_print("message size %d from %d too big, buf len %d",
87 msglen, nodeid, len); 87 msglen, nodeid, len);
88 break; 88 break;
@@ -103,7 +103,7 @@ int dlm_process_incoming_buffer(int nodeid, const void *base,
103 103
104 if (msglen > sizeof(__tmp) && 104 if (msglen > sizeof(__tmp) &&
105 msg == (struct dlm_header *) __tmp) { 105 msg == (struct dlm_header *) __tmp) {
106 msg = kmalloc(dlm_config.buffer_size, GFP_KERNEL); 106 msg = kmalloc(dlm_config.ci_buffer_size, GFP_KERNEL);
107 if (msg == NULL) 107 if (msg == NULL)
108 return ret; 108 return ret;
109 } 109 }
diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c
index 4cc31be9cd9d..6bfbd6153809 100644
--- a/fs/dlm/rcom.c
+++ b/fs/dlm/rcom.c
@@ -56,6 +56,10 @@ static int create_rcom(struct dlm_ls *ls, int to_nodeid, int type, int len,
56 56
57 rc->rc_type = type; 57 rc->rc_type = type;
58 58
59 spin_lock(&ls->ls_recover_lock);
60 rc->rc_seq = ls->ls_recover_seq;
61 spin_unlock(&ls->ls_recover_lock);
62
59 *mh_ret = mh; 63 *mh_ret = mh;
60 *rc_ret = rc; 64 *rc_ret = rc;
61 return 0; 65 return 0;
@@ -78,8 +82,17 @@ static void make_config(struct dlm_ls *ls, struct rcom_config *rf)
78 rf->rf_lsflags = ls->ls_exflags; 82 rf->rf_lsflags = ls->ls_exflags;
79} 83}
80 84
81static int check_config(struct dlm_ls *ls, struct rcom_config *rf, int nodeid) 85static int check_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
82{ 86{
87 struct rcom_config *rf = (struct rcom_config *) rc->rc_buf;
88
89 if ((rc->rc_header.h_version & 0xFFFF0000) != DLM_HEADER_MAJOR) {
90 log_error(ls, "version mismatch: %x nodeid %d: %x",
91 DLM_HEADER_MAJOR | DLM_HEADER_MINOR, nodeid,
92 rc->rc_header.h_version);
93 return -EINVAL;
94 }
95
83 if (rf->rf_lvblen != ls->ls_lvblen || 96 if (rf->rf_lvblen != ls->ls_lvblen ||
84 rf->rf_lsflags != ls->ls_exflags) { 97 rf->rf_lsflags != ls->ls_exflags) {
85 log_error(ls, "config mismatch: %d,%x nodeid %d: %d,%x", 98 log_error(ls, "config mismatch: %d,%x nodeid %d: %d,%x",
@@ -125,7 +138,7 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid)
125 goto out; 138 goto out;
126 139
127 allow_sync_reply(ls, &rc->rc_id); 140 allow_sync_reply(ls, &rc->rc_id);
128 memset(ls->ls_recover_buf, 0, dlm_config.buffer_size); 141 memset(ls->ls_recover_buf, 0, dlm_config.ci_buffer_size);
129 142
130 send_rcom(ls, mh, rc); 143 send_rcom(ls, mh, rc);
131 144
@@ -141,8 +154,7 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid)
141 log_debug(ls, "remote node %d not ready", nodeid); 154 log_debug(ls, "remote node %d not ready", nodeid);
142 rc->rc_result = 0; 155 rc->rc_result = 0;
143 } else 156 } else
144 error = check_config(ls, (struct rcom_config *) rc->rc_buf, 157 error = check_config(ls, rc, nodeid);
145 nodeid);
146 /* the caller looks at rc_result for the remote recovery status */ 158 /* the caller looks at rc_result for the remote recovery status */
147 out: 159 out:
148 return error; 160 return error;
@@ -159,6 +171,7 @@ static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in)
159 if (error) 171 if (error)
160 return; 172 return;
161 rc->rc_id = rc_in->rc_id; 173 rc->rc_id = rc_in->rc_id;
174 rc->rc_seq_reply = rc_in->rc_seq;
162 rc->rc_result = dlm_recover_status(ls); 175 rc->rc_result = dlm_recover_status(ls);
163 make_config(ls, (struct rcom_config *) rc->rc_buf); 176 make_config(ls, (struct rcom_config *) rc->rc_buf);
164 177
@@ -200,7 +213,7 @@ int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len)
200 if (nodeid == dlm_our_nodeid()) { 213 if (nodeid == dlm_our_nodeid()) {
201 dlm_copy_master_names(ls, last_name, last_len, 214 dlm_copy_master_names(ls, last_name, last_len,
202 ls->ls_recover_buf + len, 215 ls->ls_recover_buf + len,
203 dlm_config.buffer_size - len, nodeid); 216 dlm_config.ci_buffer_size - len, nodeid);
204 goto out; 217 goto out;
205 } 218 }
206 219
@@ -210,7 +223,7 @@ int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len)
210 memcpy(rc->rc_buf, last_name, last_len); 223 memcpy(rc->rc_buf, last_name, last_len);
211 224
212 allow_sync_reply(ls, &rc->rc_id); 225 allow_sync_reply(ls, &rc->rc_id);
213 memset(ls->ls_recover_buf, 0, dlm_config.buffer_size); 226 memset(ls->ls_recover_buf, 0, dlm_config.ci_buffer_size);
214 227
215 send_rcom(ls, mh, rc); 228 send_rcom(ls, mh, rc);
216 229
@@ -224,30 +237,17 @@ static void receive_rcom_names(struct dlm_ls *ls, struct dlm_rcom *rc_in)
224{ 237{
225 struct dlm_rcom *rc; 238 struct dlm_rcom *rc;
226 struct dlm_mhandle *mh; 239 struct dlm_mhandle *mh;
227 int error, inlen, outlen; 240 int error, inlen, outlen, nodeid;
228 int nodeid = rc_in->rc_header.h_nodeid;
229 uint32_t status = dlm_recover_status(ls);
230
231 /*
232 * We can't run dlm_dir_rebuild_send (which uses ls_nodes) while
233 * dlm_recoverd is running ls_nodes_reconfig (which changes ls_nodes).
234 * It could only happen in rare cases where we get a late NAMES
235 * message from a previous instance of recovery.
236 */
237
238 if (!(status & DLM_RS_NODES)) {
239 log_debug(ls, "ignoring RCOM_NAMES from %u", nodeid);
240 return;
241 }
242 241
243 nodeid = rc_in->rc_header.h_nodeid; 242 nodeid = rc_in->rc_header.h_nodeid;
244 inlen = rc_in->rc_header.h_length - sizeof(struct dlm_rcom); 243 inlen = rc_in->rc_header.h_length - sizeof(struct dlm_rcom);
245 outlen = dlm_config.buffer_size - sizeof(struct dlm_rcom); 244 outlen = dlm_config.ci_buffer_size - sizeof(struct dlm_rcom);
246 245
247 error = create_rcom(ls, nodeid, DLM_RCOM_NAMES_REPLY, outlen, &rc, &mh); 246 error = create_rcom(ls, nodeid, DLM_RCOM_NAMES_REPLY, outlen, &rc, &mh);
248 if (error) 247 if (error)
249 return; 248 return;
250 rc->rc_id = rc_in->rc_id; 249 rc->rc_id = rc_in->rc_id;
250 rc->rc_seq_reply = rc_in->rc_seq;
251 251
252 dlm_copy_master_names(ls, rc_in->rc_buf, inlen, rc->rc_buf, outlen, 252 dlm_copy_master_names(ls, rc_in->rc_buf, inlen, rc->rc_buf, outlen,
253 nodeid); 253 nodeid);
@@ -294,6 +294,7 @@ static void receive_rcom_lookup(struct dlm_ls *ls, struct dlm_rcom *rc_in)
294 ret_nodeid = error; 294 ret_nodeid = error;
295 rc->rc_result = ret_nodeid; 295 rc->rc_result = ret_nodeid;
296 rc->rc_id = rc_in->rc_id; 296 rc->rc_id = rc_in->rc_id;
297 rc->rc_seq_reply = rc_in->rc_seq;
297 298
298 send_rcom(ls, mh, rc); 299 send_rcom(ls, mh, rc);
299} 300}
@@ -375,20 +376,13 @@ static void receive_rcom_lock(struct dlm_ls *ls, struct dlm_rcom *rc_in)
375 376
376 memcpy(rc->rc_buf, rc_in->rc_buf, sizeof(struct rcom_lock)); 377 memcpy(rc->rc_buf, rc_in->rc_buf, sizeof(struct rcom_lock));
377 rc->rc_id = rc_in->rc_id; 378 rc->rc_id = rc_in->rc_id;
379 rc->rc_seq_reply = rc_in->rc_seq;
378 380
379 send_rcom(ls, mh, rc); 381 send_rcom(ls, mh, rc);
380} 382}
381 383
382static void receive_rcom_lock_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in) 384static void receive_rcom_lock_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
383{ 385{
384 uint32_t status = dlm_recover_status(ls);
385
386 if (!(status & DLM_RS_DIR)) {
387 log_debug(ls, "ignoring RCOM_LOCK_REPLY from %u",
388 rc_in->rc_header.h_nodeid);
389 return;
390 }
391
392 dlm_recover_process_copy(ls, rc_in); 386 dlm_recover_process_copy(ls, rc_in);
393} 387}
394 388
@@ -415,6 +409,7 @@ static int send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
415 409
416 rc->rc_type = DLM_RCOM_STATUS_REPLY; 410 rc->rc_type = DLM_RCOM_STATUS_REPLY;
417 rc->rc_id = rc_in->rc_id; 411 rc->rc_id = rc_in->rc_id;
412 rc->rc_seq_reply = rc_in->rc_seq;
418 rc->rc_result = -ESRCH; 413 rc->rc_result = -ESRCH;
419 414
420 rf = (struct rcom_config *) rc->rc_buf; 415 rf = (struct rcom_config *) rc->rc_buf;
@@ -426,6 +421,31 @@ static int send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
426 return 0; 421 return 0;
427} 422}
428 423
424static int is_old_reply(struct dlm_ls *ls, struct dlm_rcom *rc)
425{
426 uint64_t seq;
427 int rv = 0;
428
429 switch (rc->rc_type) {
430 case DLM_RCOM_STATUS_REPLY:
431 case DLM_RCOM_NAMES_REPLY:
432 case DLM_RCOM_LOOKUP_REPLY:
433 case DLM_RCOM_LOCK_REPLY:
434 spin_lock(&ls->ls_recover_lock);
435 seq = ls->ls_recover_seq;
436 spin_unlock(&ls->ls_recover_lock);
437 if (rc->rc_seq_reply != seq) {
438 log_debug(ls, "ignoring old reply %x from %d "
439 "seq_reply %llx expect %llx",
440 rc->rc_type, rc->rc_header.h_nodeid,
441 (unsigned long long)rc->rc_seq_reply,
442 (unsigned long long)seq);
443 rv = 1;
444 }
445 }
446 return rv;
447}
448
429/* Called by dlm_recvd; corresponds to dlm_receive_message() but special 449/* Called by dlm_recvd; corresponds to dlm_receive_message() but special
430 recovery-only comms are sent through here. */ 450 recovery-only comms are sent through here. */
431 451
@@ -449,11 +469,14 @@ void dlm_receive_rcom(struct dlm_header *hd, int nodeid)
449 } 469 }
450 470
451 if (dlm_recovery_stopped(ls) && (rc->rc_type != DLM_RCOM_STATUS)) { 471 if (dlm_recovery_stopped(ls) && (rc->rc_type != DLM_RCOM_STATUS)) {
452 log_error(ls, "ignoring recovery message %x from %d", 472 log_debug(ls, "ignoring recovery message %x from %d",
453 rc->rc_type, nodeid); 473 rc->rc_type, nodeid);
454 goto out; 474 goto out;
455 } 475 }
456 476
477 if (is_old_reply(ls, rc))
478 goto out;
479
457 if (nodeid != rc->rc_header.h_nodeid) { 480 if (nodeid != rc->rc_header.h_nodeid) {
458 log_error(ls, "bad rcom nodeid %d from %d", 481 log_error(ls, "bad rcom nodeid %d from %d",
459 rc->rc_header.h_nodeid, nodeid); 482 rc->rc_header.h_nodeid, nodeid);
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
index cf9f6831bab5..c2cc7694cd16 100644
--- a/fs/dlm/recover.c
+++ b/fs/dlm/recover.c
@@ -44,7 +44,7 @@
44static void dlm_wait_timer_fn(unsigned long data) 44static void dlm_wait_timer_fn(unsigned long data)
45{ 45{
46 struct dlm_ls *ls = (struct dlm_ls *) data; 46 struct dlm_ls *ls = (struct dlm_ls *) data;
47 mod_timer(&ls->ls_timer, jiffies + (dlm_config.recover_timer * HZ)); 47 mod_timer(&ls->ls_timer, jiffies + (dlm_config.ci_recover_timer * HZ));
48 wake_up(&ls->ls_wait_general); 48 wake_up(&ls->ls_wait_general);
49} 49}
50 50
@@ -55,7 +55,7 @@ int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls))
55 init_timer(&ls->ls_timer); 55 init_timer(&ls->ls_timer);
56 ls->ls_timer.function = dlm_wait_timer_fn; 56 ls->ls_timer.function = dlm_wait_timer_fn;
57 ls->ls_timer.data = (long) ls; 57 ls->ls_timer.data = (long) ls;
58 ls->ls_timer.expires = jiffies + (dlm_config.recover_timer * HZ); 58 ls->ls_timer.expires = jiffies + (dlm_config.ci_recover_timer * HZ);
59 add_timer(&ls->ls_timer); 59 add_timer(&ls->ls_timer);
60 60
61 wait_event(ls->ls_wait_general, testfn(ls) || dlm_recovery_stopped(ls)); 61 wait_event(ls->ls_wait_general, testfn(ls) || dlm_recovery_stopped(ls));
@@ -397,7 +397,9 @@ int dlm_recover_masters(struct dlm_ls *ls)
397 397
398 if (dlm_no_directory(ls)) 398 if (dlm_no_directory(ls))
399 count += recover_master_static(r); 399 count += recover_master_static(r);
400 else if (!is_master(r) && dlm_is_removed(ls, r->res_nodeid)) { 400 else if (!is_master(r) &&
401 (dlm_is_removed(ls, r->res_nodeid) ||
402 rsb_flag(r, RSB_NEW_MASTER))) {
401 recover_master(r); 403 recover_master(r);
402 count++; 404 count++;
403 } 405 }
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
index 650536aa5139..3cb636d60249 100644
--- a/fs/dlm/recoverd.c
+++ b/fs/dlm/recoverd.c
@@ -77,7 +77,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
77 77
78 error = dlm_recover_members(ls, rv, &neg); 78 error = dlm_recover_members(ls, rv, &neg);
79 if (error) { 79 if (error) {
80 log_error(ls, "recover_members failed %d", error); 80 log_debug(ls, "recover_members failed %d", error);
81 goto fail; 81 goto fail;
82 } 82 }
83 start = jiffies; 83 start = jiffies;
@@ -89,7 +89,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
89 89
90 error = dlm_recover_directory(ls); 90 error = dlm_recover_directory(ls);
91 if (error) { 91 if (error) {
92 log_error(ls, "recover_directory failed %d", error); 92 log_debug(ls, "recover_directory failed %d", error);
93 goto fail; 93 goto fail;
94 } 94 }
95 95
@@ -99,7 +99,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
99 99
100 error = dlm_recover_directory_wait(ls); 100 error = dlm_recover_directory_wait(ls);
101 if (error) { 101 if (error) {
102 log_error(ls, "recover_directory_wait failed %d", error); 102 log_debug(ls, "recover_directory_wait failed %d", error);
103 goto fail; 103 goto fail;
104 } 104 }
105 105
@@ -129,7 +129,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
129 129
130 error = dlm_recover_masters(ls); 130 error = dlm_recover_masters(ls);
131 if (error) { 131 if (error) {
132 log_error(ls, "recover_masters failed %d", error); 132 log_debug(ls, "recover_masters failed %d", error);
133 goto fail; 133 goto fail;
134 } 134 }
135 135
@@ -139,13 +139,13 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
139 139
140 error = dlm_recover_locks(ls); 140 error = dlm_recover_locks(ls);
141 if (error) { 141 if (error) {
142 log_error(ls, "recover_locks failed %d", error); 142 log_debug(ls, "recover_locks failed %d", error);
143 goto fail; 143 goto fail;
144 } 144 }
145 145
146 error = dlm_recover_locks_wait(ls); 146 error = dlm_recover_locks_wait(ls);
147 if (error) { 147 if (error) {
148 log_error(ls, "recover_locks_wait failed %d", error); 148 log_debug(ls, "recover_locks_wait failed %d", error);
149 goto fail; 149 goto fail;
150 } 150 }
151 151
@@ -166,7 +166,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
166 166
167 error = dlm_recover_locks_wait(ls); 167 error = dlm_recover_locks_wait(ls);
168 if (error) { 168 if (error) {
169 log_error(ls, "recover_locks_wait failed %d", error); 169 log_debug(ls, "recover_locks_wait failed %d", error);
170 goto fail; 170 goto fail;
171 } 171 }
172 } 172 }
@@ -184,7 +184,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
184 dlm_set_recover_status(ls, DLM_RS_DONE); 184 dlm_set_recover_status(ls, DLM_RS_DONE);
185 error = dlm_recover_done_wait(ls); 185 error = dlm_recover_done_wait(ls);
186 if (error) { 186 if (error) {
187 log_error(ls, "recover_done_wait failed %d", error); 187 log_debug(ls, "recover_done_wait failed %d", error);
188 goto fail; 188 goto fail;
189 } 189 }
190 190
@@ -192,19 +192,19 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
192 192
193 error = enable_locking(ls, rv->seq); 193 error = enable_locking(ls, rv->seq);
194 if (error) { 194 if (error) {
195 log_error(ls, "enable_locking failed %d", error); 195 log_debug(ls, "enable_locking failed %d", error);
196 goto fail; 196 goto fail;
197 } 197 }
198 198
199 error = dlm_process_requestqueue(ls); 199 error = dlm_process_requestqueue(ls);
200 if (error) { 200 if (error) {
201 log_error(ls, "process_requestqueue failed %d", error); 201 log_debug(ls, "process_requestqueue failed %d", error);
202 goto fail; 202 goto fail;
203 } 203 }
204 204
205 error = dlm_recover_waiters_post(ls); 205 error = dlm_recover_waiters_post(ls);
206 if (error) { 206 if (error) {
207 log_error(ls, "recover_waiters_post failed %d", error); 207 log_debug(ls, "recover_waiters_post failed %d", error);
208 goto fail; 208 goto fail;
209 } 209 }
210 210
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index c37e93e4f2df..d378b7fe2a1e 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -180,6 +180,14 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, int type)
180 ua->lksb.sb_status == -EAGAIN && !list_empty(&lkb->lkb_ownqueue)) 180 ua->lksb.sb_status == -EAGAIN && !list_empty(&lkb->lkb_ownqueue))
181 remove_ownqueue = 1; 181 remove_ownqueue = 1;
182 182
183 /* unlocks or cancels of waiting requests need to be removed from the
184 proc's unlocking list, again there must be a better way... */
185
186 if (ua->lksb.sb_status == -DLM_EUNLOCK ||
187 (ua->lksb.sb_status == -DLM_ECANCEL &&
188 lkb->lkb_grmode == DLM_LOCK_IV))
189 remove_ownqueue = 1;
190
183 /* We want to copy the lvb to userspace when the completion 191 /* We want to copy the lvb to userspace when the completion
184 ast is read if the status is 0, the lock has an lvb and 192 ast is read if the status is 0, the lock has an lvb and
185 lvb_ops says we should. We could probably have set_lvb_lock() 193 lvb_ops says we should. We could probably have set_lvb_lock()
@@ -523,6 +531,7 @@ static int device_open(struct inode *inode, struct file *file)
523 proc->lockspace = ls->ls_local_handle; 531 proc->lockspace = ls->ls_local_handle;
524 INIT_LIST_HEAD(&proc->asts); 532 INIT_LIST_HEAD(&proc->asts);
525 INIT_LIST_HEAD(&proc->locks); 533 INIT_LIST_HEAD(&proc->locks);
534 INIT_LIST_HEAD(&proc->unlocking);
526 spin_lock_init(&proc->asts_spin); 535 spin_lock_init(&proc->asts_spin);
527 spin_lock_init(&proc->locks_spin); 536 spin_lock_init(&proc->locks_spin);
528 init_waitqueue_head(&proc->wait); 537 init_waitqueue_head(&proc->wait);
diff --git a/fs/dlm/util.c b/fs/dlm/util.c
index 767197db9944..963889cf6740 100644
--- a/fs/dlm/util.c
+++ b/fs/dlm/util.c
@@ -134,6 +134,8 @@ void dlm_rcom_out(struct dlm_rcom *rc)
134 rc->rc_type = cpu_to_le32(rc->rc_type); 134 rc->rc_type = cpu_to_le32(rc->rc_type);
135 rc->rc_result = cpu_to_le32(rc->rc_result); 135 rc->rc_result = cpu_to_le32(rc->rc_result);
136 rc->rc_id = cpu_to_le64(rc->rc_id); 136 rc->rc_id = cpu_to_le64(rc->rc_id);
137 rc->rc_seq = cpu_to_le64(rc->rc_seq);
138 rc->rc_seq_reply = cpu_to_le64(rc->rc_seq_reply);
137 139
138 if (type == DLM_RCOM_LOCK) 140 if (type == DLM_RCOM_LOCK)
139 rcom_lock_out((struct rcom_lock *) rc->rc_buf); 141 rcom_lock_out((struct rcom_lock *) rc->rc_buf);
@@ -151,6 +153,8 @@ void dlm_rcom_in(struct dlm_rcom *rc)
151 rc->rc_type = le32_to_cpu(rc->rc_type); 153 rc->rc_type = le32_to_cpu(rc->rc_type);
152 rc->rc_result = le32_to_cpu(rc->rc_result); 154 rc->rc_result = le32_to_cpu(rc->rc_result);
153 rc->rc_id = le64_to_cpu(rc->rc_id); 155 rc->rc_id = le64_to_cpu(rc->rc_id);
156 rc->rc_seq = le64_to_cpu(rc->rc_seq);
157 rc->rc_seq_reply = le64_to_cpu(rc->rc_seq_reply);
154 158
155 if (rc->rc_type == DLM_RCOM_LOCK) 159 if (rc->rc_type == DLM_RCOM_LOCK)
156 rcom_lock_in((struct rcom_lock *) rc->rc_buf); 160 rcom_lock_in((struct rcom_lock *) rc->rc_buf);
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index 6a2ffa2db14f..de8e64c03f73 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -4,44 +4,43 @@ config GFS2_FS
4 select FS_POSIX_ACL 4 select FS_POSIX_ACL
5 select CRC32 5 select CRC32
6 help 6 help
7 A cluster filesystem. 7 A cluster filesystem.
8 8
9 Allows a cluster of computers to simultaneously use a block device 9 Allows a cluster of computers to simultaneously use a block device
10 that is shared between them (with FC, iSCSI, NBD, etc...). GFS reads 10 that is shared between them (with FC, iSCSI, NBD, etc...). GFS reads
11 and writes to the block device like a local filesystem, but also uses 11 and writes to the block device like a local filesystem, but also uses
12 a lock module to allow the computers coordinate their I/O so 12 a lock module to allow the computers coordinate their I/O so
13 filesystem consistency is maintained. One of the nifty features of 13 filesystem consistency is maintained. One of the nifty features of
14 GFS is perfect consistency -- changes made to the filesystem on one 14 GFS is perfect consistency -- changes made to the filesystem on one
15 machine show up immediately on all other machines in the cluster. 15 machine show up immediately on all other machines in the cluster.
16 16
17 To use the GFS2 filesystem, you will need to enable one or more of 17 To use the GFS2 filesystem, you will need to enable one or more of
18 the below locking modules. Documentation and utilities for GFS2 can 18 the below locking modules. Documentation and utilities for GFS2 can
19 be found here: http://sources.redhat.com/cluster 19 be found here: http://sources.redhat.com/cluster
20 20
21config GFS2_FS_LOCKING_NOLOCK 21config GFS2_FS_LOCKING_NOLOCK
22 tristate "GFS2 \"nolock\" locking module" 22 tristate "GFS2 \"nolock\" locking module"
23 depends on GFS2_FS 23 depends on GFS2_FS
24 help 24 help
25 Single node locking module for GFS2. 25 Single node locking module for GFS2.
26 26
27 Use this module if you want to use GFS2 on a single node without 27 Use this module if you want to use GFS2 on a single node without
28 its clustering features. You can still take advantage of the 28 its clustering features. You can still take advantage of the
29 large file support, and upgrade to running a full cluster later on 29 large file support, and upgrade to running a full cluster later on
30 if required. 30 if required.
31 31
32 If you will only be using GFS2 in cluster mode, you do not need this 32 If you will only be using GFS2 in cluster mode, you do not need this
33 module. 33 module.
34 34
35config GFS2_FS_LOCKING_DLM 35config GFS2_FS_LOCKING_DLM
36 tristate "GFS2 DLM locking module" 36 tristate "GFS2 DLM locking module"
37 depends on GFS2_FS && NET && INET && (IPV6 || IPV6=n) 37 depends on GFS2_FS && SYSFS && NET && INET && (IPV6 || IPV6=n)
38 select IP_SCTP if DLM_SCTP 38 select IP_SCTP if DLM_SCTP
39 select CONFIGFS_FS 39 select CONFIGFS_FS
40 select DLM 40 select DLM
41 help 41 help
42 Multiple node locking module for GFS2 42 Multiple node locking module for GFS2
43
44 Most users of GFS2 will require this module. It provides the locking
45 interface between GFS2 and the DLM, which is required to use GFS2
46 in a cluster environment.
47 43
44 Most users of GFS2 will require this module. It provides the locking
45 interface between GFS2 and the DLM, which is required to use GFS2
46 in a cluster environment.
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 8240c1ff94f4..113f6c9110c7 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -773,7 +773,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
773 gfs2_free_data(ip, bstart, blen); 773 gfs2_free_data(ip, bstart, blen);
774 } 774 }
775 775
776 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); 776 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME_SEC;
777 777
778 gfs2_dinode_out(ip, dibh->b_data); 778 gfs2_dinode_out(ip, dibh->b_data);
779 779
@@ -848,7 +848,7 @@ static int do_grow(struct gfs2_inode *ip, u64 size)
848 } 848 }
849 849
850 ip->i_di.di_size = size; 850 ip->i_di.di_size = size;
851 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); 851 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME_SEC;
852 852
853 error = gfs2_meta_inode_buffer(ip, &dibh); 853 error = gfs2_meta_inode_buffer(ip, &dibh);
854 if (error) 854 if (error)
@@ -963,7 +963,7 @@ static int trunc_start(struct gfs2_inode *ip, u64 size)
963 963
964 if (gfs2_is_stuffed(ip)) { 964 if (gfs2_is_stuffed(ip)) {
965 ip->i_di.di_size = size; 965 ip->i_di.di_size = size;
966 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); 966 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME_SEC;
967 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 967 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
968 gfs2_dinode_out(ip, dibh->b_data); 968 gfs2_dinode_out(ip, dibh->b_data);
969 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + size); 969 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + size);
@@ -975,7 +975,7 @@ static int trunc_start(struct gfs2_inode *ip, u64 size)
975 975
976 if (!error) { 976 if (!error) {
977 ip->i_di.di_size = size; 977 ip->i_di.di_size = size;
978 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); 978 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME_SEC;
979 ip->i_di.di_flags |= GFS2_DIF_TRUNC_IN_PROG; 979 ip->i_di.di_flags |= GFS2_DIF_TRUNC_IN_PROG;
980 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 980 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
981 gfs2_dinode_out(ip, dibh->b_data); 981 gfs2_dinode_out(ip, dibh->b_data);
@@ -1048,7 +1048,7 @@ static int trunc_end(struct gfs2_inode *ip)
1048 ip->i_num.no_addr; 1048 ip->i_num.no_addr;
1049 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 1049 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
1050 } 1050 }
1051 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); 1051 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME_SEC;
1052 ip->i_di.di_flags &= ~GFS2_DIF_TRUNC_IN_PROG; 1052 ip->i_di.di_flags &= ~GFS2_DIF_TRUNC_IN_PROG;
1053 1053
1054 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1054 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 0fdcb7713cd9..c93ca8f361b5 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -131,7 +131,7 @@ static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf,
131 memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size); 131 memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size);
132 if (ip->i_di.di_size < offset + size) 132 if (ip->i_di.di_size < offset + size)
133 ip->i_di.di_size = offset + size; 133 ip->i_di.di_size = offset + size;
134 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); 134 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME_SEC;
135 gfs2_dinode_out(ip, dibh->b_data); 135 gfs2_dinode_out(ip, dibh->b_data);
136 136
137 brelse(dibh); 137 brelse(dibh);
@@ -229,7 +229,7 @@ out:
229 229
230 if (ip->i_di.di_size < offset + copied) 230 if (ip->i_di.di_size < offset + copied)
231 ip->i_di.di_size = offset + copied; 231 ip->i_di.di_size = offset + copied;
232 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); 232 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME_SEC;
233 233
234 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 234 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
235 gfs2_dinode_out(ip, dibh->b_data); 235 gfs2_dinode_out(ip, dibh->b_data);
@@ -1198,12 +1198,11 @@ static int compare_dents(const void *a, const void *b)
1198 */ 1198 */
1199 1199
1200static int do_filldir_main(struct gfs2_inode *dip, u64 *offset, 1200static int do_filldir_main(struct gfs2_inode *dip, u64 *offset,
1201 void *opaque, gfs2_filldir_t filldir, 1201 void *opaque, filldir_t filldir,
1202 const struct gfs2_dirent **darr, u32 entries, 1202 const struct gfs2_dirent **darr, u32 entries,
1203 int *copied) 1203 int *copied)
1204{ 1204{
1205 const struct gfs2_dirent *dent, *dent_next; 1205 const struct gfs2_dirent *dent, *dent_next;
1206 struct gfs2_inum_host inum;
1207 u64 off, off_next; 1206 u64 off, off_next;
1208 unsigned int x, y; 1207 unsigned int x, y;
1209 int run = 0; 1208 int run = 0;
@@ -1240,11 +1239,9 @@ static int do_filldir_main(struct gfs2_inode *dip, u64 *offset,
1240 *offset = off; 1239 *offset = off;
1241 } 1240 }
1242 1241
1243 gfs2_inum_in(&inum, (char *)&dent->de_inum);
1244
1245 error = filldir(opaque, (const char *)(dent + 1), 1242 error = filldir(opaque, (const char *)(dent + 1),
1246 be16_to_cpu(dent->de_name_len), 1243 be16_to_cpu(dent->de_name_len),
1247 off, &inum, 1244 off, be64_to_cpu(dent->de_inum.no_addr),
1248 be16_to_cpu(dent->de_type)); 1245 be16_to_cpu(dent->de_type));
1249 if (error) 1246 if (error)
1250 return 1; 1247 return 1;
@@ -1262,8 +1259,8 @@ static int do_filldir_main(struct gfs2_inode *dip, u64 *offset,
1262} 1259}
1263 1260
1264static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque, 1261static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
1265 gfs2_filldir_t filldir, int *copied, 1262 filldir_t filldir, int *copied, unsigned *depth,
1266 unsigned *depth, u64 leaf_no) 1263 u64 leaf_no)
1267{ 1264{
1268 struct gfs2_inode *ip = GFS2_I(inode); 1265 struct gfs2_inode *ip = GFS2_I(inode);
1269 struct buffer_head *bh; 1266 struct buffer_head *bh;
@@ -1343,7 +1340,7 @@ out:
1343 */ 1340 */
1344 1341
1345static int dir_e_read(struct inode *inode, u64 *offset, void *opaque, 1342static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
1346 gfs2_filldir_t filldir) 1343 filldir_t filldir)
1347{ 1344{
1348 struct gfs2_inode *dip = GFS2_I(inode); 1345 struct gfs2_inode *dip = GFS2_I(inode);
1349 struct gfs2_sbd *sdp = GFS2_SB(inode); 1346 struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -1402,7 +1399,7 @@ out:
1402} 1399}
1403 1400
1404int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque, 1401int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
1405 gfs2_filldir_t filldir) 1402 filldir_t filldir)
1406{ 1403{
1407 struct gfs2_inode *dip = GFS2_I(inode); 1404 struct gfs2_inode *dip = GFS2_I(inode);
1408 struct dirent_gather g; 1405 struct dirent_gather g;
@@ -1568,7 +1565,7 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name,
1568 break; 1565 break;
1569 gfs2_trans_add_bh(ip->i_gl, bh, 1); 1566 gfs2_trans_add_bh(ip->i_gl, bh, 1);
1570 ip->i_di.di_entries++; 1567 ip->i_di.di_entries++;
1571 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds(); 1568 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME_SEC;
1572 gfs2_dinode_out(ip, bh->b_data); 1569 gfs2_dinode_out(ip, bh->b_data);
1573 brelse(bh); 1570 brelse(bh);
1574 error = 0; 1571 error = 0;
@@ -1654,7 +1651,7 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *name)
1654 gfs2_consist_inode(dip); 1651 gfs2_consist_inode(dip);
1655 gfs2_trans_add_bh(dip->i_gl, bh, 1); 1652 gfs2_trans_add_bh(dip->i_gl, bh, 1);
1656 dip->i_di.di_entries--; 1653 dip->i_di.di_entries--;
1657 dip->i_inode.i_mtime.tv_sec = dip->i_inode.i_ctime.tv_sec = get_seconds(); 1654 dip->i_inode.i_mtime = dip->i_inode.i_ctime = CURRENT_TIME_SEC;
1658 gfs2_dinode_out(dip, bh->b_data); 1655 gfs2_dinode_out(dip, bh->b_data);
1659 brelse(bh); 1656 brelse(bh);
1660 mark_inode_dirty(&dip->i_inode); 1657 mark_inode_dirty(&dip->i_inode);
@@ -1702,7 +1699,7 @@ int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
1702 gfs2_trans_add_bh(dip->i_gl, bh, 1); 1699 gfs2_trans_add_bh(dip->i_gl, bh, 1);
1703 } 1700 }
1704 1701
1705 dip->i_inode.i_mtime.tv_sec = dip->i_inode.i_ctime.tv_sec = get_seconds(); 1702 dip->i_inode.i_mtime = dip->i_inode.i_ctime = CURRENT_TIME_SEC;
1706 gfs2_dinode_out(dip, bh->b_data); 1703 gfs2_dinode_out(dip, bh->b_data);
1707 brelse(bh); 1704 brelse(bh);
1708 return 0; 1705 return 0;
diff --git a/fs/gfs2/dir.h b/fs/gfs2/dir.h
index b21b33668a5b..48fe89046bba 100644
--- a/fs/gfs2/dir.h
+++ b/fs/gfs2/dir.h
@@ -16,30 +16,13 @@ struct inode;
16struct gfs2_inode; 16struct gfs2_inode;
17struct gfs2_inum; 17struct gfs2_inum;
18 18
19/**
20 * gfs2_filldir_t - Report a directory entry to the caller of gfs2_dir_read()
21 * @opaque: opaque data used by the function
22 * @name: the name of the directory entry
23 * @length: the length of the name
24 * @offset: the entry's offset in the directory
25 * @inum: the inode number the entry points to
26 * @type: the type of inode the entry points to
27 *
28 * Returns: 0 on success, 1 if buffer full
29 */
30
31typedef int (*gfs2_filldir_t) (void *opaque,
32 const char *name, unsigned int length,
33 u64 offset,
34 struct gfs2_inum_host *inum, unsigned int type);
35
36int gfs2_dir_search(struct inode *dir, const struct qstr *filename, 19int gfs2_dir_search(struct inode *dir, const struct qstr *filename,
37 struct gfs2_inum_host *inum, unsigned int *type); 20 struct gfs2_inum_host *inum, unsigned int *type);
38int gfs2_dir_add(struct inode *inode, const struct qstr *filename, 21int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
39 const struct gfs2_inum_host *inum, unsigned int type); 22 const struct gfs2_inum_host *inum, unsigned int type);
40int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *filename); 23int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *filename);
41int gfs2_dir_read(struct inode *inode, u64 * offset, void *opaque, 24int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
42 gfs2_filldir_t filldir); 25 filldir_t filldir);
43int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename, 26int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
44 struct gfs2_inum_host *new_inum, unsigned int new_type); 27 struct gfs2_inum_host *new_inum, unsigned int new_type);
45 28
diff --git a/fs/gfs2/eattr.c b/fs/gfs2/eattr.c
index ebebbdcd7057..0c83c7f4dda8 100644
--- a/fs/gfs2/eattr.c
+++ b/fs/gfs2/eattr.c
@@ -301,7 +301,7 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
301 301
302 error = gfs2_meta_inode_buffer(ip, &dibh); 302 error = gfs2_meta_inode_buffer(ip, &dibh);
303 if (!error) { 303 if (!error) {
304 ip->i_inode.i_ctime.tv_sec = get_seconds(); 304 ip->i_inode.i_ctime = CURRENT_TIME_SEC;
305 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 305 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
306 gfs2_dinode_out(ip, dibh->b_data); 306 gfs2_dinode_out(ip, dibh->b_data);
307 brelse(dibh); 307 brelse(dibh);
@@ -718,7 +718,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
718 (er->er_mode & S_IFMT)); 718 (er->er_mode & S_IFMT));
719 ip->i_inode.i_mode = er->er_mode; 719 ip->i_inode.i_mode = er->er_mode;
720 } 720 }
721 ip->i_inode.i_ctime.tv_sec = get_seconds(); 721 ip->i_inode.i_ctime = CURRENT_TIME_SEC;
722 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 722 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
723 gfs2_dinode_out(ip, dibh->b_data); 723 gfs2_dinode_out(ip, dibh->b_data);
724 brelse(dibh); 724 brelse(dibh);
@@ -853,7 +853,7 @@ static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
853 (ip->i_inode.i_mode & S_IFMT) == (er->er_mode & S_IFMT)); 853 (ip->i_inode.i_mode & S_IFMT) == (er->er_mode & S_IFMT));
854 ip->i_inode.i_mode = er->er_mode; 854 ip->i_inode.i_mode = er->er_mode;
855 } 855 }
856 ip->i_inode.i_ctime.tv_sec = get_seconds(); 856 ip->i_inode.i_ctime = CURRENT_TIME_SEC;
857 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 857 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
858 gfs2_dinode_out(ip, dibh->b_data); 858 gfs2_dinode_out(ip, dibh->b_data);
859 brelse(dibh); 859 brelse(dibh);
@@ -1134,7 +1134,7 @@ static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1134 1134
1135 error = gfs2_meta_inode_buffer(ip, &dibh); 1135 error = gfs2_meta_inode_buffer(ip, &dibh);
1136 if (!error) { 1136 if (!error) {
1137 ip->i_inode.i_ctime.tv_sec = get_seconds(); 1137 ip->i_inode.i_ctime = CURRENT_TIME_SEC;
1138 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1138 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1139 gfs2_dinode_out(ip, dibh->b_data); 1139 gfs2_dinode_out(ip, dibh->b_data);
1140 brelse(dibh); 1140 brelse(dibh);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 438146904b58..6618c1190252 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -19,6 +19,8 @@
19#include <linux/gfs2_ondisk.h> 19#include <linux/gfs2_ondisk.h>
20#include <linux/list.h> 20#include <linux/list.h>
21#include <linux/lm_interface.h> 21#include <linux/lm_interface.h>
22#include <linux/wait.h>
23#include <linux/rwsem.h>
22#include <asm/uaccess.h> 24#include <asm/uaccess.h>
23 25
24#include "gfs2.h" 26#include "gfs2.h"
@@ -33,11 +35,6 @@
33#include "super.h" 35#include "super.h"
34#include "util.h" 36#include "util.h"
35 37
36struct greedy {
37 struct gfs2_holder gr_gh;
38 struct delayed_work gr_work;
39};
40
41struct gfs2_gl_hash_bucket { 38struct gfs2_gl_hash_bucket {
42 struct hlist_head hb_list; 39 struct hlist_head hb_list;
43}; 40};
@@ -47,6 +44,9 @@ typedef void (*glock_examiner) (struct gfs2_glock * gl);
47static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); 44static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
48static int dump_glock(struct gfs2_glock *gl); 45static int dump_glock(struct gfs2_glock *gl);
49static int dump_inode(struct gfs2_inode *ip); 46static int dump_inode(struct gfs2_inode *ip);
47static void gfs2_glock_xmote_th(struct gfs2_holder *gh);
48static void gfs2_glock_drop_th(struct gfs2_glock *gl);
49static DECLARE_RWSEM(gfs2_umount_flush_sem);
50 50
51#define GFS2_GL_HASH_SHIFT 15 51#define GFS2_GL_HASH_SHIFT 15
52#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) 52#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
@@ -213,30 +213,6 @@ out:
213} 213}
214 214
215/** 215/**
216 * queue_empty - check to see if a glock's queue is empty
217 * @gl: the glock
218 * @head: the head of the queue to check
219 *
220 * This function protects the list in the event that a process already
221 * has a holder on the list and is adding a second holder for itself.
222 * The glmutex lock is what generally prevents processes from working
223 * on the same glock at once, but the special case of adding a second
224 * holder for yourself ("recursive" locking) doesn't involve locking
225 * glmutex, making the spin lock necessary.
226 *
227 * Returns: 1 if the queue is empty
228 */
229
230static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head)
231{
232 int empty;
233 spin_lock(&gl->gl_spin);
234 empty = list_empty(head);
235 spin_unlock(&gl->gl_spin);
236 return empty;
237}
238
239/**
240 * search_bucket() - Find struct gfs2_glock by lock number 216 * search_bucket() - Find struct gfs2_glock by lock number
241 * @bucket: the bucket to search 217 * @bucket: the bucket to search
242 * @name: The lock name 218 * @name: The lock name
@@ -395,11 +371,6 @@ void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
395 gh->gh_flags = flags; 371 gh->gh_flags = flags;
396 gh->gh_error = 0; 372 gh->gh_error = 0;
397 gh->gh_iflags = 0; 373 gh->gh_iflags = 0;
398 init_completion(&gh->gh_wait);
399
400 if (gh->gh_state == LM_ST_EXCLUSIVE)
401 gh->gh_flags |= GL_LOCAL_EXCL;
402
403 gfs2_glock_hold(gl); 374 gfs2_glock_hold(gl);
404} 375}
405 376
@@ -417,9 +388,6 @@ void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *
417{ 388{
418 gh->gh_state = state; 389 gh->gh_state = state;
419 gh->gh_flags = flags; 390 gh->gh_flags = flags;
420 if (gh->gh_state == LM_ST_EXCLUSIVE)
421 gh->gh_flags |= GL_LOCAL_EXCL;
422
423 gh->gh_iflags &= 1 << HIF_ALLOCED; 391 gh->gh_iflags &= 1 << HIF_ALLOCED;
424 gh->gh_ip = (unsigned long)__builtin_return_address(0); 392 gh->gh_ip = (unsigned long)__builtin_return_address(0);
425} 393}
@@ -479,6 +447,29 @@ static void gfs2_holder_put(struct gfs2_holder *gh)
479 kfree(gh); 447 kfree(gh);
480} 448}
481 449
450static void gfs2_holder_dispose_or_wake(struct gfs2_holder *gh)
451{
452 if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) {
453 gfs2_holder_put(gh);
454 return;
455 }
456 clear_bit(HIF_WAIT, &gh->gh_iflags);
457 smp_mb();
458 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
459}
460
461static int holder_wait(void *word)
462{
463 schedule();
464 return 0;
465}
466
467static void wait_on_holder(struct gfs2_holder *gh)
468{
469 might_sleep();
470 wait_on_bit(&gh->gh_iflags, HIF_WAIT, holder_wait, TASK_UNINTERRUPTIBLE);
471}
472
482/** 473/**
483 * rq_mutex - process a mutex request in the queue 474 * rq_mutex - process a mutex request in the queue
484 * @gh: the glock holder 475 * @gh: the glock holder
@@ -493,7 +484,9 @@ static int rq_mutex(struct gfs2_holder *gh)
493 list_del_init(&gh->gh_list); 484 list_del_init(&gh->gh_list);
494 /* gh->gh_error never examined. */ 485 /* gh->gh_error never examined. */
495 set_bit(GLF_LOCK, &gl->gl_flags); 486 set_bit(GLF_LOCK, &gl->gl_flags);
496 complete(&gh->gh_wait); 487 clear_bit(HIF_WAIT, &gh->gh_iflags);
488 smp_mb();
489 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
497 490
498 return 1; 491 return 1;
499} 492}
@@ -511,7 +504,6 @@ static int rq_promote(struct gfs2_holder *gh)
511{ 504{
512 struct gfs2_glock *gl = gh->gh_gl; 505 struct gfs2_glock *gl = gh->gh_gl;
513 struct gfs2_sbd *sdp = gl->gl_sbd; 506 struct gfs2_sbd *sdp = gl->gl_sbd;
514 const struct gfs2_glock_operations *glops = gl->gl_ops;
515 507
516 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { 508 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
517 if (list_empty(&gl->gl_holders)) { 509 if (list_empty(&gl->gl_holders)) {
@@ -526,7 +518,7 @@ static int rq_promote(struct gfs2_holder *gh)
526 gfs2_reclaim_glock(sdp); 518 gfs2_reclaim_glock(sdp);
527 } 519 }
528 520
529 glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags); 521 gfs2_glock_xmote_th(gh);
530 spin_lock(&gl->gl_spin); 522 spin_lock(&gl->gl_spin);
531 } 523 }
532 return 1; 524 return 1;
@@ -537,11 +529,11 @@ static int rq_promote(struct gfs2_holder *gh)
537 set_bit(GLF_LOCK, &gl->gl_flags); 529 set_bit(GLF_LOCK, &gl->gl_flags);
538 } else { 530 } else {
539 struct gfs2_holder *next_gh; 531 struct gfs2_holder *next_gh;
540 if (gh->gh_flags & GL_LOCAL_EXCL) 532 if (gh->gh_state == LM_ST_EXCLUSIVE)
541 return 1; 533 return 1;
542 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder, 534 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
543 gh_list); 535 gh_list);
544 if (next_gh->gh_flags & GL_LOCAL_EXCL) 536 if (next_gh->gh_state == LM_ST_EXCLUSIVE)
545 return 1; 537 return 1;
546 } 538 }
547 539
@@ -549,7 +541,7 @@ static int rq_promote(struct gfs2_holder *gh)
549 gh->gh_error = 0; 541 gh->gh_error = 0;
550 set_bit(HIF_HOLDER, &gh->gh_iflags); 542 set_bit(HIF_HOLDER, &gh->gh_iflags);
551 543
552 complete(&gh->gh_wait); 544 gfs2_holder_dispose_or_wake(gh);
553 545
554 return 0; 546 return 0;
555} 547}
@@ -564,7 +556,6 @@ static int rq_promote(struct gfs2_holder *gh)
564static int rq_demote(struct gfs2_holder *gh) 556static int rq_demote(struct gfs2_holder *gh)
565{ 557{
566 struct gfs2_glock *gl = gh->gh_gl; 558 struct gfs2_glock *gl = gh->gh_gl;
567 const struct gfs2_glock_operations *glops = gl->gl_ops;
568 559
569 if (!list_empty(&gl->gl_holders)) 560 if (!list_empty(&gl->gl_holders))
570 return 1; 561 return 1;
@@ -573,10 +564,7 @@ static int rq_demote(struct gfs2_holder *gh)
573 list_del_init(&gh->gh_list); 564 list_del_init(&gh->gh_list);
574 gh->gh_error = 0; 565 gh->gh_error = 0;
575 spin_unlock(&gl->gl_spin); 566 spin_unlock(&gl->gl_spin);
576 if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) 567 gfs2_holder_dispose_or_wake(gh);
577 gfs2_holder_put(gh);
578 else
579 complete(&gh->gh_wait);
580 spin_lock(&gl->gl_spin); 568 spin_lock(&gl->gl_spin);
581 } else { 569 } else {
582 gl->gl_req_gh = gh; 570 gl->gl_req_gh = gh;
@@ -585,9 +573,9 @@ static int rq_demote(struct gfs2_holder *gh)
585 573
586 if (gh->gh_state == LM_ST_UNLOCKED || 574 if (gh->gh_state == LM_ST_UNLOCKED ||
587 gl->gl_state != LM_ST_EXCLUSIVE) 575 gl->gl_state != LM_ST_EXCLUSIVE)
588 glops->go_drop_th(gl); 576 gfs2_glock_drop_th(gl);
589 else 577 else
590 glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags); 578 gfs2_glock_xmote_th(gh);
591 579
592 spin_lock(&gl->gl_spin); 580 spin_lock(&gl->gl_spin);
593 } 581 }
@@ -596,30 +584,6 @@ static int rq_demote(struct gfs2_holder *gh)
596} 584}
597 585
598/** 586/**
599 * rq_greedy - process a queued request to drop greedy status
600 * @gh: the glock holder
601 *
602 * Returns: 1 if the queue is blocked
603 */
604
605static int rq_greedy(struct gfs2_holder *gh)
606{
607 struct gfs2_glock *gl = gh->gh_gl;
608
609 list_del_init(&gh->gh_list);
610 /* gh->gh_error never examined. */
611 clear_bit(GLF_GREEDY, &gl->gl_flags);
612 spin_unlock(&gl->gl_spin);
613
614 gfs2_holder_uninit(gh);
615 kfree(container_of(gh, struct greedy, gr_gh));
616
617 spin_lock(&gl->gl_spin);
618
619 return 0;
620}
621
622/**
623 * run_queue - process holder structures on a glock 587 * run_queue - process holder structures on a glock
624 * @gl: the glock 588 * @gl: the glock
625 * 589 *
@@ -649,8 +613,6 @@ static void run_queue(struct gfs2_glock *gl)
649 613
650 if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) 614 if (test_bit(HIF_DEMOTE, &gh->gh_iflags))
651 blocked = rq_demote(gh); 615 blocked = rq_demote(gh);
652 else if (test_bit(HIF_GREEDY, &gh->gh_iflags))
653 blocked = rq_greedy(gh);
654 else 616 else
655 gfs2_assert_warn(gl->gl_sbd, 0); 617 gfs2_assert_warn(gl->gl_sbd, 0);
656 618
@@ -684,6 +646,8 @@ static void gfs2_glmutex_lock(struct gfs2_glock *gl)
684 646
685 gfs2_holder_init(gl, 0, 0, &gh); 647 gfs2_holder_init(gl, 0, 0, &gh);
686 set_bit(HIF_MUTEX, &gh.gh_iflags); 648 set_bit(HIF_MUTEX, &gh.gh_iflags);
649 if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags))
650 BUG();
687 651
688 spin_lock(&gl->gl_spin); 652 spin_lock(&gl->gl_spin);
689 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 653 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
@@ -691,11 +655,13 @@ static void gfs2_glmutex_lock(struct gfs2_glock *gl)
691 } else { 655 } else {
692 gl->gl_owner = current; 656 gl->gl_owner = current;
693 gl->gl_ip = (unsigned long)__builtin_return_address(0); 657 gl->gl_ip = (unsigned long)__builtin_return_address(0);
694 complete(&gh.gh_wait); 658 clear_bit(HIF_WAIT, &gh.gh_iflags);
659 smp_mb();
660 wake_up_bit(&gh.gh_iflags, HIF_WAIT);
695 } 661 }
696 spin_unlock(&gl->gl_spin); 662 spin_unlock(&gl->gl_spin);
697 663
698 wait_for_completion(&gh.gh_wait); 664 wait_on_holder(&gh);
699 gfs2_holder_uninit(&gh); 665 gfs2_holder_uninit(&gh);
700} 666}
701 667
@@ -774,6 +740,7 @@ restart:
774 return; 740 return;
775 set_bit(HIF_DEMOTE, &new_gh->gh_iflags); 741 set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
776 set_bit(HIF_DEALLOC, &new_gh->gh_iflags); 742 set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
743 set_bit(HIF_WAIT, &new_gh->gh_iflags);
777 744
778 goto restart; 745 goto restart;
779 } 746 }
@@ -825,7 +792,7 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
825 int op_done = 1; 792 int op_done = 1;
826 793
827 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 794 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
828 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); 795 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
829 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC)); 796 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
830 797
831 state_change(gl, ret & LM_OUT_ST_MASK); 798 state_change(gl, ret & LM_OUT_ST_MASK);
@@ -908,12 +875,8 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
908 875
909 gfs2_glock_put(gl); 876 gfs2_glock_put(gl);
910 877
911 if (gh) { 878 if (gh)
912 if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) 879 gfs2_holder_dispose_or_wake(gh);
913 gfs2_holder_put(gh);
914 else
915 complete(&gh->gh_wait);
916 }
917} 880}
918 881
919/** 882/**
@@ -924,23 +887,26 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
924 * 887 *
925 */ 888 */
926 889
927void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags) 890void gfs2_glock_xmote_th(struct gfs2_holder *gh)
928{ 891{
892 struct gfs2_glock *gl = gh->gh_gl;
929 struct gfs2_sbd *sdp = gl->gl_sbd; 893 struct gfs2_sbd *sdp = gl->gl_sbd;
894 int flags = gh->gh_flags;
895 unsigned state = gh->gh_state;
930 const struct gfs2_glock_operations *glops = gl->gl_ops; 896 const struct gfs2_glock_operations *glops = gl->gl_ops;
931 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB | 897 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
932 LM_FLAG_NOEXP | LM_FLAG_ANY | 898 LM_FLAG_NOEXP | LM_FLAG_ANY |
933 LM_FLAG_PRIORITY); 899 LM_FLAG_PRIORITY);
934 unsigned int lck_ret; 900 unsigned int lck_ret;
935 901
902 if (glops->go_xmote_th)
903 glops->go_xmote_th(gl);
904
936 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 905 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
937 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); 906 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
938 gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED); 907 gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
939 gfs2_assert_warn(sdp, state != gl->gl_state); 908 gfs2_assert_warn(sdp, state != gl->gl_state);
940 909
941 if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
942 glops->go_sync(gl);
943
944 gfs2_glock_hold(gl); 910 gfs2_glock_hold(gl);
945 gl->gl_req_bh = xmote_bh; 911 gl->gl_req_bh = xmote_bh;
946 912
@@ -971,10 +937,8 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
971 const struct gfs2_glock_operations *glops = gl->gl_ops; 937 const struct gfs2_glock_operations *glops = gl->gl_ops;
972 struct gfs2_holder *gh = gl->gl_req_gh; 938 struct gfs2_holder *gh = gl->gl_req_gh;
973 939
974 clear_bit(GLF_PREFETCH, &gl->gl_flags);
975
976 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 940 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
977 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); 941 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
978 gfs2_assert_warn(sdp, !ret); 942 gfs2_assert_warn(sdp, !ret);
979 943
980 state_change(gl, LM_ST_UNLOCKED); 944 state_change(gl, LM_ST_UNLOCKED);
@@ -1001,12 +965,8 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
1001 965
1002 gfs2_glock_put(gl); 966 gfs2_glock_put(gl);
1003 967
1004 if (gh) { 968 if (gh)
1005 if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) 969 gfs2_holder_dispose_or_wake(gh);
1006 gfs2_holder_put(gh);
1007 else
1008 complete(&gh->gh_wait);
1009 }
1010} 970}
1011 971
1012/** 972/**
@@ -1015,19 +975,19 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
1015 * 975 *
1016 */ 976 */
1017 977
1018void gfs2_glock_drop_th(struct gfs2_glock *gl) 978static void gfs2_glock_drop_th(struct gfs2_glock *gl)
1019{ 979{
1020 struct gfs2_sbd *sdp = gl->gl_sbd; 980 struct gfs2_sbd *sdp = gl->gl_sbd;
1021 const struct gfs2_glock_operations *glops = gl->gl_ops; 981 const struct gfs2_glock_operations *glops = gl->gl_ops;
1022 unsigned int ret; 982 unsigned int ret;
1023 983
984 if (glops->go_drop_th)
985 glops->go_drop_th(gl);
986
1024 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 987 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1025 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); 988 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
1026 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED); 989 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
1027 990
1028 if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
1029 glops->go_sync(gl);
1030
1031 gfs2_glock_hold(gl); 991 gfs2_glock_hold(gl);
1032 gl->gl_req_bh = drop_bh; 992 gl->gl_req_bh = drop_bh;
1033 993
@@ -1107,8 +1067,7 @@ static int glock_wait_internal(struct gfs2_holder *gh)
1107 if (gh->gh_flags & LM_FLAG_PRIORITY) 1067 if (gh->gh_flags & LM_FLAG_PRIORITY)
1108 do_cancels(gh); 1068 do_cancels(gh);
1109 1069
1110 wait_for_completion(&gh->gh_wait); 1070 wait_on_holder(gh);
1111
1112 if (gh->gh_error) 1071 if (gh->gh_error)
1113 return gh->gh_error; 1072 return gh->gh_error;
1114 1073
@@ -1164,6 +1123,8 @@ static void add_to_queue(struct gfs2_holder *gh)
1164 struct gfs2_holder *existing; 1123 struct gfs2_holder *existing;
1165 1124
1166 BUG_ON(!gh->gh_owner); 1125 BUG_ON(!gh->gh_owner);
1126 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1127 BUG();
1167 1128
1168 existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner); 1129 existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner);
1169 if (existing) { 1130 if (existing) {
@@ -1227,8 +1188,6 @@ restart:
1227 } 1188 }
1228 } 1189 }
1229 1190
1230 clear_bit(GLF_PREFETCH, &gl->gl_flags);
1231
1232 return error; 1191 return error;
1233} 1192}
1234 1193
@@ -1321,98 +1280,6 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
1321} 1280}
1322 1281
1323/** 1282/**
1324 * gfs2_glock_prefetch - Try to prefetch a glock
1325 * @gl: the glock
1326 * @state: the state to prefetch in
1327 * @flags: flags passed to go_xmote_th()
1328 *
1329 */
1330
1331static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state,
1332 int flags)
1333{
1334 const struct gfs2_glock_operations *glops = gl->gl_ops;
1335
1336 spin_lock(&gl->gl_spin);
1337
1338 if (test_bit(GLF_LOCK, &gl->gl_flags) || !list_empty(&gl->gl_holders) ||
1339 !list_empty(&gl->gl_waiters1) || !list_empty(&gl->gl_waiters2) ||
1340 !list_empty(&gl->gl_waiters3) ||
1341 relaxed_state_ok(gl->gl_state, state, flags)) {
1342 spin_unlock(&gl->gl_spin);
1343 return;
1344 }
1345
1346 set_bit(GLF_PREFETCH, &gl->gl_flags);
1347 set_bit(GLF_LOCK, &gl->gl_flags);
1348 spin_unlock(&gl->gl_spin);
1349
1350 glops->go_xmote_th(gl, state, flags);
1351}
1352
1353static void greedy_work(struct work_struct *work)
1354{
1355 struct greedy *gr = container_of(work, struct greedy, gr_work.work);
1356 struct gfs2_holder *gh = &gr->gr_gh;
1357 struct gfs2_glock *gl = gh->gh_gl;
1358 const struct gfs2_glock_operations *glops = gl->gl_ops;
1359
1360 clear_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1361
1362 if (glops->go_greedy)
1363 glops->go_greedy(gl);
1364
1365 spin_lock(&gl->gl_spin);
1366
1367 if (list_empty(&gl->gl_waiters2)) {
1368 clear_bit(GLF_GREEDY, &gl->gl_flags);
1369 spin_unlock(&gl->gl_spin);
1370 gfs2_holder_uninit(gh);
1371 kfree(gr);
1372 } else {
1373 gfs2_glock_hold(gl);
1374 list_add_tail(&gh->gh_list, &gl->gl_waiters2);
1375 run_queue(gl);
1376 spin_unlock(&gl->gl_spin);
1377 gfs2_glock_put(gl);
1378 }
1379}
1380
1381/**
1382 * gfs2_glock_be_greedy -
1383 * @gl:
1384 * @time:
1385 *
1386 * Returns: 0 if go_greedy will be called, 1 otherwise
1387 */
1388
1389int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time)
1390{
1391 struct greedy *gr;
1392 struct gfs2_holder *gh;
1393
1394 if (!time || gl->gl_sbd->sd_args.ar_localcaching ||
1395 test_and_set_bit(GLF_GREEDY, &gl->gl_flags))
1396 return 1;
1397
1398 gr = kmalloc(sizeof(struct greedy), GFP_KERNEL);
1399 if (!gr) {
1400 clear_bit(GLF_GREEDY, &gl->gl_flags);
1401 return 1;
1402 }
1403 gh = &gr->gr_gh;
1404
1405 gfs2_holder_init(gl, 0, 0, gh);
1406 set_bit(HIF_GREEDY, &gh->gh_iflags);
1407 INIT_DELAYED_WORK(&gr->gr_work, greedy_work);
1408
1409 set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1410 schedule_delayed_work(&gr->gr_work, time);
1411
1412 return 0;
1413}
1414
1415/**
1416 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it 1283 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1417 * @gh: the holder structure 1284 * @gh: the holder structure
1418 * 1285 *
@@ -1470,10 +1337,7 @@ static int glock_compare(const void *arg_a, const void *arg_b)
1470 return 1; 1337 return 1;
1471 if (a->ln_number < b->ln_number) 1338 if (a->ln_number < b->ln_number)
1472 return -1; 1339 return -1;
1473 if (gh_a->gh_state == LM_ST_SHARED && gh_b->gh_state == LM_ST_EXCLUSIVE) 1340 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1474 return 1;
1475 if (!(gh_a->gh_flags & GL_LOCAL_EXCL) && (gh_b->gh_flags & GL_LOCAL_EXCL))
1476 return 1;
1477 return 0; 1341 return 0;
1478} 1342}
1479 1343
@@ -1618,34 +1482,6 @@ void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1618} 1482}
1619 1483
1620/** 1484/**
1621 * gfs2_glock_prefetch_num - prefetch a glock based on lock number
1622 * @sdp: the filesystem
1623 * @number: the lock number
1624 * @glops: the glock operations for the type of glock
1625 * @state: the state to acquire the glock in
1626 * @flags: modifier flags for the aquisition
1627 *
1628 * Returns: errno
1629 */
1630
1631void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, u64 number,
1632 const struct gfs2_glock_operations *glops,
1633 unsigned int state, int flags)
1634{
1635 struct gfs2_glock *gl;
1636 int error;
1637
1638 if (atomic_read(&sdp->sd_reclaim_count) <
1639 gfs2_tune_get(sdp, gt_reclaim_limit)) {
1640 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1641 if (!error) {
1642 gfs2_glock_prefetch(gl, state, flags);
1643 gfs2_glock_put(gl);
1644 }
1645 }
1646}
1647
1648/**
1649 * gfs2_lvb_hold - attach a LVB from a glock 1485 * gfs2_lvb_hold - attach a LVB from a glock
1650 * @gl: The glock in question 1486 * @gl: The glock in question
1651 * 1487 *
@@ -1703,8 +1539,6 @@ static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1703 if (!gl) 1539 if (!gl)
1704 return; 1540 return;
1705 1541
1706 if (gl->gl_ops->go_callback)
1707 gl->gl_ops->go_callback(gl, state);
1708 handle_callback(gl, state); 1542 handle_callback(gl, state);
1709 1543
1710 spin_lock(&gl->gl_spin); 1544 spin_lock(&gl->gl_spin);
@@ -1746,12 +1580,14 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1746 struct lm_async_cb *async = data; 1580 struct lm_async_cb *async = data;
1747 struct gfs2_glock *gl; 1581 struct gfs2_glock *gl;
1748 1582
1583 down_read(&gfs2_umount_flush_sem);
1749 gl = gfs2_glock_find(sdp, &async->lc_name); 1584 gl = gfs2_glock_find(sdp, &async->lc_name);
1750 if (gfs2_assert_warn(sdp, gl)) 1585 if (gfs2_assert_warn(sdp, gl))
1751 return; 1586 return;
1752 if (!gfs2_assert_warn(sdp, gl->gl_req_bh)) 1587 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1753 gl->gl_req_bh(gl, async->lc_ret); 1588 gl->gl_req_bh(gl, async->lc_ret);
1754 gfs2_glock_put(gl); 1589 gfs2_glock_put(gl);
1590 up_read(&gfs2_umount_flush_sem);
1755 return; 1591 return;
1756 } 1592 }
1757 1593
@@ -1781,15 +1617,11 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1781 1617
1782static int demote_ok(struct gfs2_glock *gl) 1618static int demote_ok(struct gfs2_glock *gl)
1783{ 1619{
1784 struct gfs2_sbd *sdp = gl->gl_sbd;
1785 const struct gfs2_glock_operations *glops = gl->gl_ops; 1620 const struct gfs2_glock_operations *glops = gl->gl_ops;
1786 int demote = 1; 1621 int demote = 1;
1787 1622
1788 if (test_bit(GLF_STICKY, &gl->gl_flags)) 1623 if (test_bit(GLF_STICKY, &gl->gl_flags))
1789 demote = 0; 1624 demote = 0;
1790 else if (test_bit(GLF_PREFETCH, &gl->gl_flags))
1791 demote = time_after_eq(jiffies, gl->gl_stamp +
1792 gfs2_tune_get(sdp, gt_prefetch_secs) * HZ);
1793 else if (glops->go_demote_ok) 1625 else if (glops->go_demote_ok)
1794 demote = glops->go_demote_ok(gl); 1626 demote = glops->go_demote_ok(gl);
1795 1627
@@ -1845,7 +1677,7 @@ void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1845 atomic_inc(&sdp->sd_reclaimed); 1677 atomic_inc(&sdp->sd_reclaimed);
1846 1678
1847 if (gfs2_glmutex_trylock(gl)) { 1679 if (gfs2_glmutex_trylock(gl)) {
1848 if (queue_empty(gl, &gl->gl_holders) && 1680 if (list_empty(&gl->gl_holders) &&
1849 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) 1681 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1850 handle_callback(gl, LM_ST_UNLOCKED); 1682 handle_callback(gl, LM_ST_UNLOCKED);
1851 gfs2_glmutex_unlock(gl); 1683 gfs2_glmutex_unlock(gl);
@@ -1909,7 +1741,7 @@ static void scan_glock(struct gfs2_glock *gl)
1909 return; 1741 return;
1910 1742
1911 if (gfs2_glmutex_trylock(gl)) { 1743 if (gfs2_glmutex_trylock(gl)) {
1912 if (queue_empty(gl, &gl->gl_holders) && 1744 if (list_empty(&gl->gl_holders) &&
1913 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) 1745 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1914 goto out_schedule; 1746 goto out_schedule;
1915 gfs2_glmutex_unlock(gl); 1747 gfs2_glmutex_unlock(gl);
@@ -1958,7 +1790,7 @@ static void clear_glock(struct gfs2_glock *gl)
1958 } 1790 }
1959 1791
1960 if (gfs2_glmutex_trylock(gl)) { 1792 if (gfs2_glmutex_trylock(gl)) {
1961 if (queue_empty(gl, &gl->gl_holders) && 1793 if (list_empty(&gl->gl_holders) &&
1962 gl->gl_state != LM_ST_UNLOCKED) 1794 gl->gl_state != LM_ST_UNLOCKED)
1963 handle_callback(gl, LM_ST_UNLOCKED); 1795 handle_callback(gl, LM_ST_UNLOCKED);
1964 gfs2_glmutex_unlock(gl); 1796 gfs2_glmutex_unlock(gl);
@@ -2000,7 +1832,9 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
2000 t = jiffies; 1832 t = jiffies;
2001 } 1833 }
2002 1834
1835 down_write(&gfs2_umount_flush_sem);
2003 invalidate_inodes(sdp->sd_vfs); 1836 invalidate_inodes(sdp->sd_vfs);
1837 up_write(&gfs2_umount_flush_sem);
2004 msleep(10); 1838 msleep(10);
2005 } 1839 }
2006} 1840}
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index fb39108fc05c..f50e40ceca43 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -20,7 +20,6 @@
20#define LM_FLAG_ANY 0x00000008 20#define LM_FLAG_ANY 0x00000008
21#define LM_FLAG_PRIORITY 0x00000010 */ 21#define LM_FLAG_PRIORITY 0x00000010 */
22 22
23#define GL_LOCAL_EXCL 0x00000020
24#define GL_ASYNC 0x00000040 23#define GL_ASYNC 0x00000040
25#define GL_EXACT 0x00000080 24#define GL_EXACT 0x00000080
26#define GL_SKIP 0x00000100 25#define GL_SKIP 0x00000100
@@ -83,17 +82,11 @@ void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
83void gfs2_holder_reinit(unsigned int state, unsigned flags, 82void gfs2_holder_reinit(unsigned int state, unsigned flags,
84 struct gfs2_holder *gh); 83 struct gfs2_holder *gh);
85void gfs2_holder_uninit(struct gfs2_holder *gh); 84void gfs2_holder_uninit(struct gfs2_holder *gh);
86
87void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags);
88void gfs2_glock_drop_th(struct gfs2_glock *gl);
89
90int gfs2_glock_nq(struct gfs2_holder *gh); 85int gfs2_glock_nq(struct gfs2_holder *gh);
91int gfs2_glock_poll(struct gfs2_holder *gh); 86int gfs2_glock_poll(struct gfs2_holder *gh);
92int gfs2_glock_wait(struct gfs2_holder *gh); 87int gfs2_glock_wait(struct gfs2_holder *gh);
93void gfs2_glock_dq(struct gfs2_holder *gh); 88void gfs2_glock_dq(struct gfs2_holder *gh);
94 89
95int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time);
96
97void gfs2_glock_dq_uninit(struct gfs2_holder *gh); 90void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
98int gfs2_glock_nq_num(struct gfs2_sbd *sdp, 91int gfs2_glock_nq_num(struct gfs2_sbd *sdp,
99 u64 number, const struct gfs2_glock_operations *glops, 92 u64 number, const struct gfs2_glock_operations *glops,
@@ -103,10 +96,6 @@ int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
103void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs); 96void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
104void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs); 97void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs);
105 98
106void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, u64 number,
107 const struct gfs2_glock_operations *glops,
108 unsigned int state, int flags);
109
110/** 99/**
111 * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock 100 * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
112 * @gl: the glock 101 * @gl: the glock
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index b068d10bcb6e..c4b0391b7aa2 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -117,12 +117,14 @@ static void gfs2_pte_inval(struct gfs2_glock *gl)
117 117
118static void meta_go_sync(struct gfs2_glock *gl) 118static void meta_go_sync(struct gfs2_glock *gl)
119{ 119{
120 if (gl->gl_state != LM_ST_EXCLUSIVE)
121 return;
122
120 if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) { 123 if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) {
121 gfs2_log_flush(gl->gl_sbd, gl); 124 gfs2_log_flush(gl->gl_sbd, gl);
122 gfs2_meta_sync(gl); 125 gfs2_meta_sync(gl);
123 gfs2_ail_empty_gl(gl); 126 gfs2_ail_empty_gl(gl);
124 } 127 }
125
126} 128}
127 129
128/** 130/**
@@ -142,6 +144,37 @@ static void meta_go_inval(struct gfs2_glock *gl, int flags)
142} 144}
143 145
144/** 146/**
147 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
148 * @gl: the glock protecting the inode
149 *
150 */
151
152static void inode_go_sync(struct gfs2_glock *gl)
153{
154 struct gfs2_inode *ip = gl->gl_object;
155
156 if (ip && !S_ISREG(ip->i_inode.i_mode))
157 ip = NULL;
158
159 if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
160 gfs2_log_flush(gl->gl_sbd, gl);
161 if (ip)
162 filemap_fdatawrite(ip->i_inode.i_mapping);
163 gfs2_meta_sync(gl);
164 if (ip) {
165 struct address_space *mapping = ip->i_inode.i_mapping;
166 int error = filemap_fdatawait(mapping);
167 if (error == -ENOSPC)
168 set_bit(AS_ENOSPC, &mapping->flags);
169 else if (error)
170 set_bit(AS_EIO, &mapping->flags);
171 }
172 clear_bit(GLF_DIRTY, &gl->gl_flags);
173 gfs2_ail_empty_gl(gl);
174 }
175}
176
177/**
145 * inode_go_xmote_th - promote/demote a glock 178 * inode_go_xmote_th - promote/demote a glock
146 * @gl: the glock 179 * @gl: the glock
147 * @state: the requested state 180 * @state: the requested state
@@ -149,12 +182,12 @@ static void meta_go_inval(struct gfs2_glock *gl, int flags)
149 * 182 *
150 */ 183 */
151 184
152static void inode_go_xmote_th(struct gfs2_glock *gl, unsigned int state, 185static void inode_go_xmote_th(struct gfs2_glock *gl)
153 int flags)
154{ 186{
155 if (gl->gl_state != LM_ST_UNLOCKED) 187 if (gl->gl_state != LM_ST_UNLOCKED)
156 gfs2_pte_inval(gl); 188 gfs2_pte_inval(gl);
157 gfs2_glock_xmote_th(gl, state, flags); 189 if (gl->gl_state == LM_ST_EXCLUSIVE)
190 inode_go_sync(gl);
158} 191}
159 192
160/** 193/**
@@ -189,38 +222,8 @@ static void inode_go_xmote_bh(struct gfs2_glock *gl)
189static void inode_go_drop_th(struct gfs2_glock *gl) 222static void inode_go_drop_th(struct gfs2_glock *gl)
190{ 223{
191 gfs2_pte_inval(gl); 224 gfs2_pte_inval(gl);
192 gfs2_glock_drop_th(gl); 225 if (gl->gl_state == LM_ST_EXCLUSIVE)
193} 226 inode_go_sync(gl);
194
195/**
196 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
197 * @gl: the glock protecting the inode
198 *
199 */
200
201static void inode_go_sync(struct gfs2_glock *gl)
202{
203 struct gfs2_inode *ip = gl->gl_object;
204
205 if (ip && !S_ISREG(ip->i_inode.i_mode))
206 ip = NULL;
207
208 if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
209 gfs2_log_flush(gl->gl_sbd, gl);
210 if (ip)
211 filemap_fdatawrite(ip->i_inode.i_mapping);
212 gfs2_meta_sync(gl);
213 if (ip) {
214 struct address_space *mapping = ip->i_inode.i_mapping;
215 int error = filemap_fdatawait(mapping);
216 if (error == -ENOSPC)
217 set_bit(AS_ENOSPC, &mapping->flags);
218 else if (error)
219 set_bit(AS_EIO, &mapping->flags);
220 }
221 clear_bit(GLF_DIRTY, &gl->gl_flags);
222 gfs2_ail_empty_gl(gl);
223 }
224} 227}
225 228
226/** 229/**
@@ -295,7 +298,7 @@ static int inode_go_lock(struct gfs2_holder *gh)
295 298
296 if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) && 299 if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) &&
297 (gl->gl_state == LM_ST_EXCLUSIVE) && 300 (gl->gl_state == LM_ST_EXCLUSIVE) &&
298 (gh->gh_flags & GL_LOCAL_EXCL)) 301 (gh->gh_state == LM_ST_EXCLUSIVE))
299 error = gfs2_truncatei_resume(ip); 302 error = gfs2_truncatei_resume(ip);
300 303
301 return error; 304 return error;
@@ -319,39 +322,6 @@ static void inode_go_unlock(struct gfs2_holder *gh)
319} 322}
320 323
321/** 324/**
322 * inode_greedy -
323 * @gl: the glock
324 *
325 */
326
327static void inode_greedy(struct gfs2_glock *gl)
328{
329 struct gfs2_sbd *sdp = gl->gl_sbd;
330 struct gfs2_inode *ip = gl->gl_object;
331 unsigned int quantum = gfs2_tune_get(sdp, gt_greedy_quantum);
332 unsigned int max = gfs2_tune_get(sdp, gt_greedy_max);
333 unsigned int new_time;
334
335 spin_lock(&ip->i_spin);
336
337 if (time_after(ip->i_last_pfault + quantum, jiffies)) {
338 new_time = ip->i_greedy + quantum;
339 if (new_time > max)
340 new_time = max;
341 } else {
342 new_time = ip->i_greedy - quantum;
343 if (!new_time || new_time > max)
344 new_time = 1;
345 }
346
347 ip->i_greedy = new_time;
348
349 spin_unlock(&ip->i_spin);
350
351 iput(&ip->i_inode);
352}
353
354/**
355 * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock 325 * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
356 * @gl: the glock 326 * @gl: the glock
357 * 327 *
@@ -398,8 +368,7 @@ static void rgrp_go_unlock(struct gfs2_holder *gh)
398 * 368 *
399 */ 369 */
400 370
401static void trans_go_xmote_th(struct gfs2_glock *gl, unsigned int state, 371static void trans_go_xmote_th(struct gfs2_glock *gl)
402 int flags)
403{ 372{
404 struct gfs2_sbd *sdp = gl->gl_sbd; 373 struct gfs2_sbd *sdp = gl->gl_sbd;
405 374
@@ -408,8 +377,6 @@ static void trans_go_xmote_th(struct gfs2_glock *gl, unsigned int state,
408 gfs2_meta_syncfs(sdp); 377 gfs2_meta_syncfs(sdp);
409 gfs2_log_shutdown(sdp); 378 gfs2_log_shutdown(sdp);
410 } 379 }
411
412 gfs2_glock_xmote_th(gl, state, flags);
413} 380}
414 381
415/** 382/**
@@ -461,8 +428,6 @@ static void trans_go_drop_th(struct gfs2_glock *gl)
461 gfs2_meta_syncfs(sdp); 428 gfs2_meta_syncfs(sdp);
462 gfs2_log_shutdown(sdp); 429 gfs2_log_shutdown(sdp);
463 } 430 }
464
465 gfs2_glock_drop_th(gl);
466} 431}
467 432
468/** 433/**
@@ -478,8 +443,8 @@ static int quota_go_demote_ok(struct gfs2_glock *gl)
478} 443}
479 444
480const struct gfs2_glock_operations gfs2_meta_glops = { 445const struct gfs2_glock_operations gfs2_meta_glops = {
481 .go_xmote_th = gfs2_glock_xmote_th, 446 .go_xmote_th = meta_go_sync,
482 .go_drop_th = gfs2_glock_drop_th, 447 .go_drop_th = meta_go_sync,
483 .go_type = LM_TYPE_META, 448 .go_type = LM_TYPE_META,
484}; 449};
485 450
@@ -487,19 +452,14 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
487 .go_xmote_th = inode_go_xmote_th, 452 .go_xmote_th = inode_go_xmote_th,
488 .go_xmote_bh = inode_go_xmote_bh, 453 .go_xmote_bh = inode_go_xmote_bh,
489 .go_drop_th = inode_go_drop_th, 454 .go_drop_th = inode_go_drop_th,
490 .go_sync = inode_go_sync,
491 .go_inval = inode_go_inval, 455 .go_inval = inode_go_inval,
492 .go_demote_ok = inode_go_demote_ok, 456 .go_demote_ok = inode_go_demote_ok,
493 .go_lock = inode_go_lock, 457 .go_lock = inode_go_lock,
494 .go_unlock = inode_go_unlock, 458 .go_unlock = inode_go_unlock,
495 .go_greedy = inode_greedy,
496 .go_type = LM_TYPE_INODE, 459 .go_type = LM_TYPE_INODE,
497}; 460};
498 461
499const struct gfs2_glock_operations gfs2_rgrp_glops = { 462const struct gfs2_glock_operations gfs2_rgrp_glops = {
500 .go_xmote_th = gfs2_glock_xmote_th,
501 .go_drop_th = gfs2_glock_drop_th,
502 .go_sync = meta_go_sync,
503 .go_inval = meta_go_inval, 463 .go_inval = meta_go_inval,
504 .go_demote_ok = rgrp_go_demote_ok, 464 .go_demote_ok = rgrp_go_demote_ok,
505 .go_lock = rgrp_go_lock, 465 .go_lock = rgrp_go_lock,
@@ -515,33 +475,23 @@ const struct gfs2_glock_operations gfs2_trans_glops = {
515}; 475};
516 476
517const struct gfs2_glock_operations gfs2_iopen_glops = { 477const struct gfs2_glock_operations gfs2_iopen_glops = {
518 .go_xmote_th = gfs2_glock_xmote_th,
519 .go_drop_th = gfs2_glock_drop_th,
520 .go_type = LM_TYPE_IOPEN, 478 .go_type = LM_TYPE_IOPEN,
521}; 479};
522 480
523const struct gfs2_glock_operations gfs2_flock_glops = { 481const struct gfs2_glock_operations gfs2_flock_glops = {
524 .go_xmote_th = gfs2_glock_xmote_th,
525 .go_drop_th = gfs2_glock_drop_th,
526 .go_type = LM_TYPE_FLOCK, 482 .go_type = LM_TYPE_FLOCK,
527}; 483};
528 484
529const struct gfs2_glock_operations gfs2_nondisk_glops = { 485const struct gfs2_glock_operations gfs2_nondisk_glops = {
530 .go_xmote_th = gfs2_glock_xmote_th,
531 .go_drop_th = gfs2_glock_drop_th,
532 .go_type = LM_TYPE_NONDISK, 486 .go_type = LM_TYPE_NONDISK,
533}; 487};
534 488
535const struct gfs2_glock_operations gfs2_quota_glops = { 489const struct gfs2_glock_operations gfs2_quota_glops = {
536 .go_xmote_th = gfs2_glock_xmote_th,
537 .go_drop_th = gfs2_glock_drop_th,
538 .go_demote_ok = quota_go_demote_ok, 490 .go_demote_ok = quota_go_demote_ok,
539 .go_type = LM_TYPE_QUOTA, 491 .go_type = LM_TYPE_QUOTA,
540}; 492};
541 493
542const struct gfs2_glock_operations gfs2_journal_glops = { 494const struct gfs2_glock_operations gfs2_journal_glops = {
543 .go_xmote_th = gfs2_glock_xmote_th,
544 .go_drop_th = gfs2_glock_drop_th,
545 .go_type = LM_TYPE_JOURNAL, 495 .go_type = LM_TYPE_JOURNAL,
546}; 496};
547 497
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 734421edae85..12c80fd28db5 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -101,17 +101,14 @@ struct gfs2_bufdata {
101}; 101};
102 102
103struct gfs2_glock_operations { 103struct gfs2_glock_operations {
104 void (*go_xmote_th) (struct gfs2_glock *gl, unsigned int state, int flags); 104 void (*go_xmote_th) (struct gfs2_glock *gl);
105 void (*go_xmote_bh) (struct gfs2_glock *gl); 105 void (*go_xmote_bh) (struct gfs2_glock *gl);
106 void (*go_drop_th) (struct gfs2_glock *gl); 106 void (*go_drop_th) (struct gfs2_glock *gl);
107 void (*go_drop_bh) (struct gfs2_glock *gl); 107 void (*go_drop_bh) (struct gfs2_glock *gl);
108 void (*go_sync) (struct gfs2_glock *gl);
109 void (*go_inval) (struct gfs2_glock *gl, int flags); 108 void (*go_inval) (struct gfs2_glock *gl, int flags);
110 int (*go_demote_ok) (struct gfs2_glock *gl); 109 int (*go_demote_ok) (struct gfs2_glock *gl);
111 int (*go_lock) (struct gfs2_holder *gh); 110 int (*go_lock) (struct gfs2_holder *gh);
112 void (*go_unlock) (struct gfs2_holder *gh); 111 void (*go_unlock) (struct gfs2_holder *gh);
113 void (*go_callback) (struct gfs2_glock *gl, unsigned int state);
114 void (*go_greedy) (struct gfs2_glock *gl);
115 const int go_type; 112 const int go_type;
116}; 113};
117 114
@@ -120,7 +117,6 @@ enum {
120 HIF_MUTEX = 0, 117 HIF_MUTEX = 0,
121 HIF_PROMOTE = 1, 118 HIF_PROMOTE = 1,
122 HIF_DEMOTE = 2, 119 HIF_DEMOTE = 2,
123 HIF_GREEDY = 3,
124 120
125 /* States */ 121 /* States */
126 HIF_ALLOCED = 4, 122 HIF_ALLOCED = 4,
@@ -128,6 +124,7 @@ enum {
128 HIF_HOLDER = 6, 124 HIF_HOLDER = 6,
129 HIF_FIRST = 7, 125 HIF_FIRST = 7,
130 HIF_ABORTED = 9, 126 HIF_ABORTED = 9,
127 HIF_WAIT = 10,
131}; 128};
132 129
133struct gfs2_holder { 130struct gfs2_holder {
@@ -140,17 +137,14 @@ struct gfs2_holder {
140 137
141 int gh_error; 138 int gh_error;
142 unsigned long gh_iflags; 139 unsigned long gh_iflags;
143 struct completion gh_wait;
144 unsigned long gh_ip; 140 unsigned long gh_ip;
145}; 141};
146 142
147enum { 143enum {
148 GLF_LOCK = 1, 144 GLF_LOCK = 1,
149 GLF_STICKY = 2, 145 GLF_STICKY = 2,
150 GLF_PREFETCH = 3,
151 GLF_DIRTY = 5, 146 GLF_DIRTY = 5,
152 GLF_SKIP_WAITERS2 = 6, 147 GLF_SKIP_WAITERS2 = 6,
153 GLF_GREEDY = 7,
154}; 148};
155 149
156struct gfs2_glock { 150struct gfs2_glock {
@@ -167,7 +161,7 @@ struct gfs2_glock {
167 unsigned long gl_ip; 161 unsigned long gl_ip;
168 struct list_head gl_holders; 162 struct list_head gl_holders;
169 struct list_head gl_waiters1; /* HIF_MUTEX */ 163 struct list_head gl_waiters1; /* HIF_MUTEX */
170 struct list_head gl_waiters2; /* HIF_DEMOTE, HIF_GREEDY */ 164 struct list_head gl_waiters2; /* HIF_DEMOTE */
171 struct list_head gl_waiters3; /* HIF_PROMOTE */ 165 struct list_head gl_waiters3; /* HIF_PROMOTE */
172 166
173 const struct gfs2_glock_operations *gl_ops; 167 const struct gfs2_glock_operations *gl_ops;
@@ -236,7 +230,6 @@ struct gfs2_inode {
236 230
237 spinlock_t i_spin; 231 spinlock_t i_spin;
238 struct rw_semaphore i_rw_mutex; 232 struct rw_semaphore i_rw_mutex;
239 unsigned int i_greedy;
240 unsigned long i_last_pfault; 233 unsigned long i_last_pfault;
241 234
242 struct buffer_head *i_cache[GFS2_MAX_META_HEIGHT]; 235 struct buffer_head *i_cache[GFS2_MAX_META_HEIGHT];
@@ -418,17 +411,12 @@ struct gfs2_tune {
418 unsigned int gt_atime_quantum; /* Min secs between atime updates */ 411 unsigned int gt_atime_quantum; /* Min secs between atime updates */
419 unsigned int gt_new_files_jdata; 412 unsigned int gt_new_files_jdata;
420 unsigned int gt_new_files_directio; 413 unsigned int gt_new_files_directio;
421 unsigned int gt_max_atomic_write; /* Split big writes into this size */
422 unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */ 414 unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
423 unsigned int gt_lockdump_size; 415 unsigned int gt_lockdump_size;
424 unsigned int gt_stall_secs; /* Detects trouble! */ 416 unsigned int gt_stall_secs; /* Detects trouble! */
425 unsigned int gt_complain_secs; 417 unsigned int gt_complain_secs;
426 unsigned int gt_reclaim_limit; /* Max num of glocks in reclaim list */ 418 unsigned int gt_reclaim_limit; /* Max num of glocks in reclaim list */
427 unsigned int gt_entries_per_readdir; 419 unsigned int gt_entries_per_readdir;
428 unsigned int gt_prefetch_secs; /* Usage window for prefetched glocks */
429 unsigned int gt_greedy_default;
430 unsigned int gt_greedy_quantum;
431 unsigned int gt_greedy_max;
432 unsigned int gt_statfs_quantum; 420 unsigned int gt_statfs_quantum;
433 unsigned int gt_statfs_slow; 421 unsigned int gt_statfs_slow;
434}; 422};
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index d122074c45e1..0d6831a40565 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -287,10 +287,8 @@ out:
287 * 287 *
288 * Returns: errno 288 * Returns: errno
289 */ 289 */
290
291int gfs2_change_nlink(struct gfs2_inode *ip, int diff) 290int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
292{ 291{
293 struct gfs2_sbd *sdp = ip->i_inode.i_sb->s_fs_info;
294 struct buffer_head *dibh; 292 struct buffer_head *dibh;
295 u32 nlink; 293 u32 nlink;
296 int error; 294 int error;
@@ -315,42 +313,34 @@ int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
315 else 313 else
316 drop_nlink(&ip->i_inode); 314 drop_nlink(&ip->i_inode);
317 315
318 ip->i_inode.i_ctime.tv_sec = get_seconds(); 316 ip->i_inode.i_ctime = CURRENT_TIME_SEC;
319 317
320 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 318 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
321 gfs2_dinode_out(ip, dibh->b_data); 319 gfs2_dinode_out(ip, dibh->b_data);
322 brelse(dibh); 320 brelse(dibh);
323 mark_inode_dirty(&ip->i_inode); 321 mark_inode_dirty(&ip->i_inode);
324 322
325 if (ip->i_inode.i_nlink == 0) { 323 if (ip->i_inode.i_nlink == 0)
326 struct gfs2_rgrpd *rgd;
327 struct gfs2_holder ri_gh, rg_gh;
328
329 error = gfs2_rindex_hold(sdp, &ri_gh);
330 if (error)
331 goto out;
332 error = -EIO;
333 rgd = gfs2_blk2rgrpd(sdp, ip->i_num.no_addr);
334 if (!rgd)
335 goto out_norgrp;
336 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
337 if (error)
338 goto out_norgrp;
339
340 gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */ 324 gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */
341 gfs2_glock_dq_uninit(&rg_gh); 325
342out_norgrp:
343 gfs2_glock_dq_uninit(&ri_gh);
344 }
345out:
346 return error; 326 return error;
347} 327}
348 328
349struct inode *gfs2_lookup_simple(struct inode *dip, const char *name) 329struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
350{ 330{
351 struct qstr qstr; 331 struct qstr qstr;
332 struct inode *inode;
352 gfs2_str2qstr(&qstr, name); 333 gfs2_str2qstr(&qstr, name);
353 return gfs2_lookupi(dip, &qstr, 1, NULL); 334 inode = gfs2_lookupi(dip, &qstr, 1, NULL);
335 /* gfs2_lookupi has inconsistent callers: vfs
336 * related routines expect NULL for no entry found,
337 * gfs2_lookup_simple callers expect ENOENT
338 * and do not check for NULL.
339 */
340 if (inode == NULL)
341 return ERR_PTR(-ENOENT);
342 else
343 return inode;
354} 344}
355 345
356 346
@@ -361,8 +351,10 @@ struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
361 * @is_root: If 1, ignore the caller's permissions 351 * @is_root: If 1, ignore the caller's permissions
362 * @i_gh: An uninitialized holder for the new inode glock 352 * @i_gh: An uninitialized holder for the new inode glock
363 * 353 *
364 * There will always be a vnode (Linux VFS inode) for the d_gh inode unless 354 * This can be called via the VFS filldir function when NFS is doing
365 * @is_root is true. 355 * a readdirplus and the inode which its intending to stat isn't
356 * already in cache. In this case we must not take the directory glock
357 * again, since the readdir call will have already taken that lock.
366 * 358 *
367 * Returns: errno 359 * Returns: errno
368 */ 360 */
@@ -375,8 +367,9 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
375 struct gfs2_holder d_gh; 367 struct gfs2_holder d_gh;
376 struct gfs2_inum_host inum; 368 struct gfs2_inum_host inum;
377 unsigned int type; 369 unsigned int type;
378 int error = 0; 370 int error;
379 struct inode *inode = NULL; 371 struct inode *inode = NULL;
372 int unlock = 0;
380 373
381 if (!name->len || name->len > GFS2_FNAMESIZE) 374 if (!name->len || name->len > GFS2_FNAMESIZE)
382 return ERR_PTR(-ENAMETOOLONG); 375 return ERR_PTR(-ENAMETOOLONG);
@@ -388,9 +381,12 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
388 return dir; 381 return dir;
389 } 382 }
390 383
391 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); 384 if (gfs2_glock_is_locked_by_me(dip->i_gl) == 0) {
392 if (error) 385 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
393 return ERR_PTR(error); 386 if (error)
387 return ERR_PTR(error);
388 unlock = 1;
389 }
394 390
395 if (!is_root) { 391 if (!is_root) {
396 error = permission(dir, MAY_EXEC, NULL); 392 error = permission(dir, MAY_EXEC, NULL);
@@ -405,10 +401,11 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
405 inode = gfs2_inode_lookup(sb, &inum, type); 401 inode = gfs2_inode_lookup(sb, &inum, type);
406 402
407out: 403out:
408 gfs2_glock_dq_uninit(&d_gh); 404 if (unlock)
405 gfs2_glock_dq_uninit(&d_gh);
409 if (error == -ENOENT) 406 if (error == -ENOENT)
410 return NULL; 407 return NULL;
411 return inode; 408 return inode ? inode : ERR_PTR(error);
412} 409}
413 410
414static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino) 411static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino)
diff --git a/fs/gfs2/lm.c b/fs/gfs2/lm.c
index effe4a337c1d..e30673dd37e0 100644
--- a/fs/gfs2/lm.c
+++ b/fs/gfs2/lm.c
@@ -104,15 +104,9 @@ int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...)
104 vprintk(fmt, args); 104 vprintk(fmt, args);
105 va_end(args); 105 va_end(args);
106 106
107 fs_err(sdp, "about to withdraw from the cluster\n"); 107 fs_err(sdp, "about to withdraw this file system\n");
108 BUG_ON(sdp->sd_args.ar_debug); 108 BUG_ON(sdp->sd_args.ar_debug);
109 109
110
111 fs_err(sdp, "waiting for outstanding I/O\n");
112
113 /* FIXME: suspend dm device so oustanding bio's complete
114 and all further io requests fail */
115
116 fs_err(sdp, "telling LM to withdraw\n"); 110 fs_err(sdp, "telling LM to withdraw\n");
117 gfs2_withdraw_lockproto(&sdp->sd_lockstruct); 111 gfs2_withdraw_lockproto(&sdp->sd_lockstruct);
118 fs_err(sdp, "withdrawn\n"); 112 fs_err(sdp, "withdrawn\n");
diff --git a/fs/gfs2/locking/dlm/lock_dlm.h b/fs/gfs2/locking/dlm/lock_dlm.h
index 33af707a4d3f..a87c7bf3c568 100644
--- a/fs/gfs2/locking/dlm/lock_dlm.h
+++ b/fs/gfs2/locking/dlm/lock_dlm.h
@@ -36,7 +36,7 @@
36 36
37#define GDLM_STRNAME_BYTES 24 37#define GDLM_STRNAME_BYTES 24
38#define GDLM_LVB_SIZE 32 38#define GDLM_LVB_SIZE 32
39#define GDLM_DROP_COUNT 50000 39#define GDLM_DROP_COUNT 200000
40#define GDLM_DROP_PERIOD 60 40#define GDLM_DROP_PERIOD 60
41#define GDLM_NAME_LEN 128 41#define GDLM_NAME_LEN 128
42 42
diff --git a/fs/gfs2/locking/dlm/main.c b/fs/gfs2/locking/dlm/main.c
index 2194b1d5b5ec..a0e7eda643ed 100644
--- a/fs/gfs2/locking/dlm/main.c
+++ b/fs/gfs2/locking/dlm/main.c
@@ -11,9 +11,6 @@
11 11
12#include "lock_dlm.h" 12#include "lock_dlm.h"
13 13
14extern int gdlm_drop_count;
15extern int gdlm_drop_period;
16
17extern struct lm_lockops gdlm_ops; 14extern struct lm_lockops gdlm_ops;
18 15
19static int __init init_lock_dlm(void) 16static int __init init_lock_dlm(void)
@@ -40,9 +37,6 @@ static int __init init_lock_dlm(void)
40 return error; 37 return error;
41 } 38 }
42 39
43 gdlm_drop_count = GDLM_DROP_COUNT;
44 gdlm_drop_period = GDLM_DROP_PERIOD;
45
46 printk(KERN_INFO 40 printk(KERN_INFO
47 "Lock_DLM (built %s %s) installed\n", __DATE__, __TIME__); 41 "Lock_DLM (built %s %s) installed\n", __DATE__, __TIME__);
48 return 0; 42 return 0;
diff --git a/fs/gfs2/locking/dlm/mount.c b/fs/gfs2/locking/dlm/mount.c
index cdd1694e889b..1d8faa3da8af 100644
--- a/fs/gfs2/locking/dlm/mount.c
+++ b/fs/gfs2/locking/dlm/mount.c
@@ -9,8 +9,6 @@
9 9
10#include "lock_dlm.h" 10#include "lock_dlm.h"
11 11
12int gdlm_drop_count;
13int gdlm_drop_period;
14const struct lm_lockops gdlm_ops; 12const struct lm_lockops gdlm_ops;
15 13
16 14
@@ -24,8 +22,8 @@ static struct gdlm_ls *init_gdlm(lm_callback_t cb, struct gfs2_sbd *sdp,
24 if (!ls) 22 if (!ls)
25 return NULL; 23 return NULL;
26 24
27 ls->drop_locks_count = gdlm_drop_count; 25 ls->drop_locks_count = GDLM_DROP_COUNT;
28 ls->drop_locks_period = gdlm_drop_period; 26 ls->drop_locks_period = GDLM_DROP_PERIOD;
29 ls->fscb = cb; 27 ls->fscb = cb;
30 ls->sdp = sdp; 28 ls->sdp = sdp;
31 ls->fsflags = flags; 29 ls->fsflags = flags;
diff --git a/fs/gfs2/locking/dlm/sysfs.c b/fs/gfs2/locking/dlm/sysfs.c
index 29ae06f94944..4746b884662d 100644
--- a/fs/gfs2/locking/dlm/sysfs.c
+++ b/fs/gfs2/locking/dlm/sysfs.c
@@ -116,6 +116,17 @@ static ssize_t recover_status_show(struct gdlm_ls *ls, char *buf)
116 return sprintf(buf, "%d\n", ls->recover_jid_status); 116 return sprintf(buf, "%d\n", ls->recover_jid_status);
117} 117}
118 118
119static ssize_t drop_count_show(struct gdlm_ls *ls, char *buf)
120{
121 return sprintf(buf, "%d\n", ls->drop_locks_count);
122}
123
124static ssize_t drop_count_store(struct gdlm_ls *ls, const char *buf, size_t len)
125{
126 ls->drop_locks_count = simple_strtol(buf, NULL, 0);
127 return len;
128}
129
119struct gdlm_attr { 130struct gdlm_attr {
120 struct attribute attr; 131 struct attribute attr;
121 ssize_t (*show)(struct gdlm_ls *, char *); 132 ssize_t (*show)(struct gdlm_ls *, char *);
@@ -135,6 +146,7 @@ GDLM_ATTR(first_done, 0444, first_done_show, NULL);
135GDLM_ATTR(recover, 0644, recover_show, recover_store); 146GDLM_ATTR(recover, 0644, recover_show, recover_store);
136GDLM_ATTR(recover_done, 0444, recover_done_show, NULL); 147GDLM_ATTR(recover_done, 0444, recover_done_show, NULL);
137GDLM_ATTR(recover_status, 0444, recover_status_show, NULL); 148GDLM_ATTR(recover_status, 0444, recover_status_show, NULL);
149GDLM_ATTR(drop_count, 0644, drop_count_show, drop_count_store);
138 150
139static struct attribute *gdlm_attrs[] = { 151static struct attribute *gdlm_attrs[] = {
140 &gdlm_attr_proto_name.attr, 152 &gdlm_attr_proto_name.attr,
@@ -147,6 +159,7 @@ static struct attribute *gdlm_attrs[] = {
147 &gdlm_attr_recover.attr, 159 &gdlm_attr_recover.attr,
148 &gdlm_attr_recover_done.attr, 160 &gdlm_attr_recover_done.attr,
149 &gdlm_attr_recover_status.attr, 161 &gdlm_attr_recover_status.attr,
162 &gdlm_attr_drop_count.attr,
150 NULL, 163 NULL,
151}; 164};
152 165
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 4d7f94d8c7bd..16bb4b4561ae 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -69,13 +69,16 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
69 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le); 69 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
70 struct gfs2_trans *tr; 70 struct gfs2_trans *tr;
71 71
72 if (!list_empty(&bd->bd_list_tr)) 72 gfs2_log_lock(sdp);
73 if (!list_empty(&bd->bd_list_tr)) {
74 gfs2_log_unlock(sdp);
73 return; 75 return;
74 76 }
75 tr = current->journal_info; 77 tr = current->journal_info;
76 tr->tr_touched = 1; 78 tr->tr_touched = 1;
77 tr->tr_num_buf++; 79 tr->tr_num_buf++;
78 list_add(&bd->bd_list_tr, &tr->tr_list_buf); 80 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
81 gfs2_log_unlock(sdp);
79 82
80 if (!list_empty(&le->le_list)) 83 if (!list_empty(&le->le_list))
81 return; 84 return;
@@ -84,7 +87,6 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
84 87
85 gfs2_meta_check(sdp, bd->bd_bh); 88 gfs2_meta_check(sdp, bd->bd_bh);
86 gfs2_pin(sdp, bd->bd_bh); 89 gfs2_pin(sdp, bd->bd_bh);
87
88 gfs2_log_lock(sdp); 90 gfs2_log_lock(sdp);
89 sdp->sd_log_num_buf++; 91 sdp->sd_log_num_buf++;
90 list_add(&le->le_list, &sdp->sd_log_le_buf); 92 list_add(&le->le_list, &sdp->sd_log_le_buf);
@@ -98,11 +100,13 @@ static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
98 struct list_head *head = &tr->tr_list_buf; 100 struct list_head *head = &tr->tr_list_buf;
99 struct gfs2_bufdata *bd; 101 struct gfs2_bufdata *bd;
100 102
103 gfs2_log_lock(sdp);
101 while (!list_empty(head)) { 104 while (!list_empty(head)) {
102 bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr); 105 bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
103 list_del_init(&bd->bd_list_tr); 106 list_del_init(&bd->bd_list_tr);
104 tr->tr_num_buf--; 107 tr->tr_num_buf--;
105 } 108 }
109 gfs2_log_unlock(sdp);
106 gfs2_assert_warn(sdp, !tr->tr_num_buf); 110 gfs2_assert_warn(sdp, !tr->tr_num_buf);
107} 111}
108 112
@@ -462,13 +466,17 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
462 struct address_space *mapping = bd->bd_bh->b_page->mapping; 466 struct address_space *mapping = bd->bd_bh->b_page->mapping;
463 struct gfs2_inode *ip = GFS2_I(mapping->host); 467 struct gfs2_inode *ip = GFS2_I(mapping->host);
464 468
469 gfs2_log_lock(sdp);
465 tr->tr_touched = 1; 470 tr->tr_touched = 1;
466 if (list_empty(&bd->bd_list_tr) && 471 if (list_empty(&bd->bd_list_tr) &&
467 (ip->i_di.di_flags & GFS2_DIF_JDATA)) { 472 (ip->i_di.di_flags & GFS2_DIF_JDATA)) {
468 tr->tr_num_buf++; 473 tr->tr_num_buf++;
469 list_add(&bd->bd_list_tr, &tr->tr_list_buf); 474 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
475 gfs2_log_unlock(sdp);
470 gfs2_pin(sdp, bd->bd_bh); 476 gfs2_pin(sdp, bd->bd_bh);
471 tr->tr_num_buf_new++; 477 tr->tr_num_buf_new++;
478 } else {
479 gfs2_log_unlock(sdp);
472 } 480 }
473 gfs2_trans_add_gl(bd->bd_gl); 481 gfs2_trans_add_gl(bd->bd_gl);
474 gfs2_log_lock(sdp); 482 gfs2_log_lock(sdp);
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index d8d69a72a10d..56e33590b656 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -16,6 +16,7 @@
16#include <linux/pagevec.h> 16#include <linux/pagevec.h>
17#include <linux/mpage.h> 17#include <linux/mpage.h>
18#include <linux/fs.h> 18#include <linux/fs.h>
19#include <linux/writeback.h>
19#include <linux/gfs2_ondisk.h> 20#include <linux/gfs2_ondisk.h>
20#include <linux/lm_interface.h> 21#include <linux/lm_interface.h>
21 22
@@ -157,6 +158,32 @@ out_ignore:
157} 158}
158 159
159/** 160/**
161 * gfs2_writepages - Write a bunch of dirty pages back to disk
162 * @mapping: The mapping to write
163 * @wbc: Write-back control
164 *
165 * For journaled files and/or ordered writes this just falls back to the
166 * kernel's default writepages path for now. We will probably want to change
167 * that eventually (i.e. when we look at allocate on flush).
168 *
169 * For the data=writeback case though we can already ignore buffer heads
170 * and write whole extents at once. This is a big reduction in the
171 * number of I/O requests we send and the bmap calls we make in this case.
172 */
173static int gfs2_writepages(struct address_space *mapping,
174 struct writeback_control *wbc)
175{
176 struct inode *inode = mapping->host;
177 struct gfs2_inode *ip = GFS2_I(inode);
178 struct gfs2_sbd *sdp = GFS2_SB(inode);
179
180 if (sdp->sd_args.ar_data == GFS2_DATA_WRITEBACK && !gfs2_is_jdata(ip))
181 return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
182
183 return generic_writepages(mapping, wbc);
184}
185
186/**
160 * stuffed_readpage - Fill in a Linux page with stuffed file data 187 * stuffed_readpage - Fill in a Linux page with stuffed file data
161 * @ip: the inode 188 * @ip: the inode
162 * @page: the page 189 * @page: the page
@@ -256,7 +283,7 @@ out_unlock:
256 * the page lock and the glock) and return having done no I/O. Its 283 * the page lock and the glock) and return having done no I/O. Its
257 * obviously not something we'd want to do on too regular a basis. 284 * obviously not something we'd want to do on too regular a basis.
258 * Any I/O we ignore at this time will be done via readpage later. 285 * Any I/O we ignore at this time will be done via readpage later.
259 * 2. We have to handle stuffed files here too. 286 * 2. We don't handle stuffed files here we let readpage do the honours.
260 * 3. mpage_readpages() does most of the heavy lifting in the common case. 287 * 3. mpage_readpages() does most of the heavy lifting in the common case.
261 * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places. 288 * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places.
262 * 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as 289 * 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as
@@ -269,8 +296,7 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping,
269 struct gfs2_inode *ip = GFS2_I(inode); 296 struct gfs2_inode *ip = GFS2_I(inode);
270 struct gfs2_sbd *sdp = GFS2_SB(inode); 297 struct gfs2_sbd *sdp = GFS2_SB(inode);
271 struct gfs2_holder gh; 298 struct gfs2_holder gh;
272 unsigned page_idx; 299 int ret = 0;
273 int ret;
274 int do_unlock = 0; 300 int do_unlock = 0;
275 301
276 if (likely(file != &gfs2_internal_file_sentinel)) { 302 if (likely(file != &gfs2_internal_file_sentinel)) {
@@ -289,29 +315,8 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping,
289 goto out_unlock; 315 goto out_unlock;
290 } 316 }
291skip_lock: 317skip_lock:
292 if (gfs2_is_stuffed(ip)) { 318 if (!gfs2_is_stuffed(ip))
293 struct pagevec lru_pvec;
294 pagevec_init(&lru_pvec, 0);
295 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
296 struct page *page = list_entry(pages->prev, struct page, lru);
297 prefetchw(&page->flags);
298 list_del(&page->lru);
299 if (!add_to_page_cache(page, mapping,
300 page->index, GFP_KERNEL)) {
301 ret = stuffed_readpage(ip, page);
302 unlock_page(page);
303 if (!pagevec_add(&lru_pvec, page))
304 __pagevec_lru_add(&lru_pvec);
305 } else {
306 page_cache_release(page);
307 }
308 }
309 pagevec_lru_add(&lru_pvec);
310 ret = 0;
311 } else {
312 /* What we really want to do .... */
313 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block); 319 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block);
314 }
315 320
316 if (do_unlock) { 321 if (do_unlock) {
317 gfs2_glock_dq_m(1, &gh); 322 gfs2_glock_dq_m(1, &gh);
@@ -356,8 +361,10 @@ static int gfs2_prepare_write(struct file *file, struct page *page,
356 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|LM_FLAG_TRY_1CB, &ip->i_gh); 361 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|LM_FLAG_TRY_1CB, &ip->i_gh);
357 error = gfs2_glock_nq_atime(&ip->i_gh); 362 error = gfs2_glock_nq_atime(&ip->i_gh);
358 if (unlikely(error)) { 363 if (unlikely(error)) {
359 if (error == GLR_TRYFAILED) 364 if (error == GLR_TRYFAILED) {
365 unlock_page(page);
360 error = AOP_TRUNCATED_PAGE; 366 error = AOP_TRUNCATED_PAGE;
367 }
361 goto out_uninit; 368 goto out_uninit;
362 } 369 }
363 370
@@ -594,6 +601,36 @@ static void gfs2_invalidatepage(struct page *page, unsigned long offset)
594 return; 601 return;
595} 602}
596 603
604/**
605 * gfs2_ok_for_dio - check that dio is valid on this file
606 * @ip: The inode
607 * @rw: READ or WRITE
608 * @offset: The offset at which we are reading or writing
609 *
610 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
611 * 1 (to accept the i/o request)
612 */
613static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
614{
615 /*
616 * Should we return an error here? I can't see that O_DIRECT for
617 * a journaled file makes any sense. For now we'll silently fall
618 * back to buffered I/O, likewise we do the same for stuffed
619 * files since they are (a) small and (b) unaligned.
620 */
621 if (gfs2_is_jdata(ip))
622 return 0;
623
624 if (gfs2_is_stuffed(ip))
625 return 0;
626
627 if (offset > i_size_read(&ip->i_inode))
628 return 0;
629 return 1;
630}
631
632
633
597static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, 634static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
598 const struct iovec *iov, loff_t offset, 635 const struct iovec *iov, loff_t offset,
599 unsigned long nr_segs) 636 unsigned long nr_segs)
@@ -604,42 +641,28 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
604 struct gfs2_holder gh; 641 struct gfs2_holder gh;
605 int rv; 642 int rv;
606 643
607 if (rw == READ)
608 mutex_lock(&inode->i_mutex);
609 /* 644 /*
610 * Shared lock, even if its a write, since we do no allocation 645 * Deferred lock, even if its a write, since we do no allocation
611 * on this path. All we need change is atime. 646 * on this path. All we need change is atime, and this lock mode
647 * ensures that other nodes have flushed their buffered read caches
648 * (i.e. their page cache entries for this inode). We do not,
649 * unfortunately have the option of only flushing a range like
650 * the VFS does.
612 */ 651 */
613 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh); 652 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, GL_ATIME, &gh);
614 rv = gfs2_glock_nq_atime(&gh); 653 rv = gfs2_glock_nq_atime(&gh);
615 if (rv) 654 if (rv)
616 goto out; 655 return rv;
617 656 rv = gfs2_ok_for_dio(ip, rw, offset);
618 if (offset > i_size_read(inode)) 657 if (rv != 1)
619 goto out; 658 goto out; /* dio not valid, fall back to buffered i/o */
620 659
621 /* 660 rv = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev,
622 * Should we return an error here? I can't see that O_DIRECT for 661 iov, offset, nr_segs,
623 * a journaled file makes any sense. For now we'll silently fall 662 gfs2_get_block_direct, NULL);
624 * back to buffered I/O, likewise we do the same for stuffed
625 * files since they are (a) small and (b) unaligned.
626 */
627 if (gfs2_is_jdata(ip))
628 goto out;
629
630 if (gfs2_is_stuffed(ip))
631 goto out;
632
633 rv = blockdev_direct_IO_own_locking(rw, iocb, inode,
634 inode->i_sb->s_bdev,
635 iov, offset, nr_segs,
636 gfs2_get_block_direct, NULL);
637out: 663out:
638 gfs2_glock_dq_m(1, &gh); 664 gfs2_glock_dq_m(1, &gh);
639 gfs2_holder_uninit(&gh); 665 gfs2_holder_uninit(&gh);
640 if (rw == READ)
641 mutex_unlock(&inode->i_mutex);
642
643 return rv; 666 return rv;
644} 667}
645 668
@@ -763,6 +786,7 @@ out:
763 786
764const struct address_space_operations gfs2_file_aops = { 787const struct address_space_operations gfs2_file_aops = {
765 .writepage = gfs2_writepage, 788 .writepage = gfs2_writepage,
789 .writepages = gfs2_writepages,
766 .readpage = gfs2_readpage, 790 .readpage = gfs2_readpage,
767 .readpages = gfs2_readpages, 791 .readpages = gfs2_readpages,
768 .sync_page = block_sync_page, 792 .sync_page = block_sync_page,
diff --git a/fs/gfs2/ops_dentry.c b/fs/gfs2/ops_dentry.c
index d355899585d8..9187eb174b43 100644
--- a/fs/gfs2/ops_dentry.c
+++ b/fs/gfs2/ops_dentry.c
@@ -46,6 +46,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
46 struct gfs2_inum_host inum; 46 struct gfs2_inum_host inum;
47 unsigned int type; 47 unsigned int type;
48 int error; 48 int error;
49 int had_lock=0;
49 50
50 if (inode && is_bad_inode(inode)) 51 if (inode && is_bad_inode(inode))
51 goto invalid; 52 goto invalid;
@@ -53,9 +54,12 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
53 if (sdp->sd_args.ar_localcaching) 54 if (sdp->sd_args.ar_localcaching)
54 goto valid; 55 goto valid;
55 56
56 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); 57 had_lock = gfs2_glock_is_locked_by_me(dip->i_gl);
57 if (error) 58 if (!had_lock) {
58 goto fail; 59 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
60 if (error)
61 goto fail;
62 }
59 63
60 error = gfs2_dir_search(parent->d_inode, &dentry->d_name, &inum, &type); 64 error = gfs2_dir_search(parent->d_inode, &dentry->d_name, &inum, &type);
61 switch (error) { 65 switch (error) {
@@ -82,13 +86,15 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
82 } 86 }
83 87
84valid_gunlock: 88valid_gunlock:
85 gfs2_glock_dq_uninit(&d_gh); 89 if (!had_lock)
90 gfs2_glock_dq_uninit(&d_gh);
86valid: 91valid:
87 dput(parent); 92 dput(parent);
88 return 1; 93 return 1;
89 94
90invalid_gunlock: 95invalid_gunlock:
91 gfs2_glock_dq_uninit(&d_gh); 96 if (!had_lock)
97 gfs2_glock_dq_uninit(&d_gh);
92invalid: 98invalid:
93 if (inode && S_ISDIR(inode->i_mode)) { 99 if (inode && S_ISDIR(inode->i_mode)) {
94 if (have_submounts(dentry)) 100 if (have_submounts(dentry))
diff --git a/fs/gfs2/ops_export.c b/fs/gfs2/ops_export.c
index b4e7b8775315..4855e8cca622 100644
--- a/fs/gfs2/ops_export.c
+++ b/fs/gfs2/ops_export.c
@@ -22,6 +22,7 @@
22#include "glock.h" 22#include "glock.h"
23#include "glops.h" 23#include "glops.h"
24#include "inode.h" 24#include "inode.h"
25#include "ops_dentry.h"
25#include "ops_export.h" 26#include "ops_export.h"
26#include "rgrp.h" 27#include "rgrp.h"
27#include "util.h" 28#include "util.h"
@@ -112,13 +113,12 @@ struct get_name_filldir {
112 char *name; 113 char *name;
113}; 114};
114 115
115static int get_name_filldir(void *opaque, const char *name, unsigned int length, 116static int get_name_filldir(void *opaque, const char *name, int length,
116 u64 offset, struct gfs2_inum_host *inum, 117 loff_t offset, u64 inum, unsigned int type)
117 unsigned int type)
118{ 118{
119 struct get_name_filldir *gnfd = (struct get_name_filldir *)opaque; 119 struct get_name_filldir *gnfd = opaque;
120 120
121 if (!gfs2_inum_equal(inum, &gnfd->inum)) 121 if (inum != gnfd->inum.no_addr)
122 return 0; 122 return 0;
123 123
124 memcpy(gnfd->name, name, length); 124 memcpy(gnfd->name, name, length);
@@ -189,6 +189,7 @@ static struct dentry *gfs2_get_parent(struct dentry *child)
189 return ERR_PTR(-ENOMEM); 189 return ERR_PTR(-ENOMEM);
190 } 190 }
191 191
192 dentry->d_op = &gfs2_dops;
192 return dentry; 193 return dentry;
193} 194}
194 195
@@ -215,8 +216,7 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb, void *inum_obj)
215 } 216 }
216 217
217 error = gfs2_glock_nq_num(sdp, inum->no_addr, &gfs2_inode_glops, 218 error = gfs2_glock_nq_num(sdp, inum->no_addr, &gfs2_inode_glops,
218 LM_ST_SHARED, LM_FLAG_ANY | GL_LOCAL_EXCL, 219 LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
219 &i_gh);
220 if (error) 220 if (error)
221 return ERR_PTR(error); 221 return ERR_PTR(error);
222 222
@@ -269,6 +269,7 @@ out_inode:
269 return ERR_PTR(-ENOMEM); 269 return ERR_PTR(-ENOMEM);
270 } 270 }
271 271
272 dentry->d_op = &gfs2_dops;
272 return dentry; 273 return dentry;
273 274
274fail_rgd: 275fail_rgd:
diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c
index faa07e4b97d0..c996aa739a05 100644
--- a/fs/gfs2/ops_file.c
+++ b/fs/gfs2/ops_file.c
@@ -43,15 +43,6 @@
43#include "util.h" 43#include "util.h"
44#include "eaops.h" 44#include "eaops.h"
45 45
46/* For regular, non-NFS */
47struct filldir_reg {
48 struct gfs2_sbd *fdr_sbd;
49 int fdr_prefetch;
50
51 filldir_t fdr_filldir;
52 void *fdr_opaque;
53};
54
55/* 46/*
56 * Most fields left uninitialised to catch anybody who tries to 47 * Most fields left uninitialised to catch anybody who tries to
57 * use them. f_flags set to prevent file_accessed() from touching 48 * use them. f_flags set to prevent file_accessed() from touching
@@ -128,41 +119,6 @@ static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
128} 119}
129 120
130/** 121/**
131 * filldir_func - Report a directory entry to the caller of gfs2_dir_read()
132 * @opaque: opaque data used by the function
133 * @name: the name of the directory entry
134 * @length: the length of the name
135 * @offset: the entry's offset in the directory
136 * @inum: the inode number the entry points to
137 * @type: the type of inode the entry points to
138 *
139 * Returns: 0 on success, 1 if buffer full
140 */
141
142static int filldir_func(void *opaque, const char *name, unsigned int length,
143 u64 offset, struct gfs2_inum_host *inum,
144 unsigned int type)
145{
146 struct filldir_reg *fdr = (struct filldir_reg *)opaque;
147 struct gfs2_sbd *sdp = fdr->fdr_sbd;
148 int error;
149
150 error = fdr->fdr_filldir(fdr->fdr_opaque, name, length, offset,
151 inum->no_addr, type);
152 if (error)
153 return 1;
154
155 if (fdr->fdr_prefetch && !(length == 1 && *name == '.')) {
156 gfs2_glock_prefetch_num(sdp, inum->no_addr, &gfs2_inode_glops,
157 LM_ST_SHARED, LM_FLAG_TRY | LM_FLAG_ANY);
158 gfs2_glock_prefetch_num(sdp, inum->no_addr, &gfs2_iopen_glops,
159 LM_ST_SHARED, LM_FLAG_TRY);
160 }
161
162 return 0;
163}
164
165/**
166 * gfs2_readdir - Read directory entries from a directory 122 * gfs2_readdir - Read directory entries from a directory
167 * @file: The directory to read from 123 * @file: The directory to read from
168 * @dirent: Buffer for dirents 124 * @dirent: Buffer for dirents
@@ -175,16 +131,10 @@ static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
175{ 131{
176 struct inode *dir = file->f_mapping->host; 132 struct inode *dir = file->f_mapping->host;
177 struct gfs2_inode *dip = GFS2_I(dir); 133 struct gfs2_inode *dip = GFS2_I(dir);
178 struct filldir_reg fdr;
179 struct gfs2_holder d_gh; 134 struct gfs2_holder d_gh;
180 u64 offset = file->f_pos; 135 u64 offset = file->f_pos;
181 int error; 136 int error;
182 137
183 fdr.fdr_sbd = GFS2_SB(dir);
184 fdr.fdr_prefetch = 1;
185 fdr.fdr_filldir = filldir;
186 fdr.fdr_opaque = dirent;
187
188 gfs2_holder_init(dip->i_gl, LM_ST_SHARED, GL_ATIME, &d_gh); 138 gfs2_holder_init(dip->i_gl, LM_ST_SHARED, GL_ATIME, &d_gh);
189 error = gfs2_glock_nq_atime(&d_gh); 139 error = gfs2_glock_nq_atime(&d_gh);
190 if (error) { 140 if (error) {
@@ -192,7 +142,7 @@ static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
192 return error; 142 return error;
193 } 143 }
194 144
195 error = gfs2_dir_read(dir, &offset, &fdr, filldir_func); 145 error = gfs2_dir_read(dir, &offset, dirent, filldir);
196 146
197 gfs2_glock_dq_uninit(&d_gh); 147 gfs2_glock_dq_uninit(&d_gh);
198 148
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 636dda4c7d38..f40a84807d75 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -264,13 +264,23 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
264 struct gfs2_inode *dip = GFS2_I(dir); 264 struct gfs2_inode *dip = GFS2_I(dir);
265 struct gfs2_sbd *sdp = GFS2_SB(dir); 265 struct gfs2_sbd *sdp = GFS2_SB(dir);
266 struct gfs2_inode *ip = GFS2_I(dentry->d_inode); 266 struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
267 struct gfs2_holder ghs[2]; 267 struct gfs2_holder ghs[3];
268 struct gfs2_rgrpd *rgd;
269 struct gfs2_holder ri_gh;
268 int error; 270 int error;
269 271
272 error = gfs2_rindex_hold(sdp, &ri_gh);
273 if (error)
274 return error;
275
270 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); 276 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
271 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1); 277 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1);
272 278
273 error = gfs2_glock_nq_m(2, ghs); 279 rgd = gfs2_blk2rgrpd(sdp, ip->i_num.no_addr);
280 gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2);
281
282
283 error = gfs2_glock_nq_m(3, ghs);
274 if (error) 284 if (error)
275 goto out; 285 goto out;
276 286
@@ -291,10 +301,12 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
291out_end_trans: 301out_end_trans:
292 gfs2_trans_end(sdp); 302 gfs2_trans_end(sdp);
293out_gunlock: 303out_gunlock:
294 gfs2_glock_dq_m(2, ghs); 304 gfs2_glock_dq_m(3, ghs);
295out: 305out:
296 gfs2_holder_uninit(ghs); 306 gfs2_holder_uninit(ghs);
297 gfs2_holder_uninit(ghs + 1); 307 gfs2_holder_uninit(ghs + 1);
308 gfs2_holder_uninit(ghs + 2);
309 gfs2_glock_dq_uninit(&ri_gh);
298 return error; 310 return error;
299} 311}
300 312
@@ -449,13 +461,22 @@ static int gfs2_rmdir(struct inode *dir, struct dentry *dentry)
449 struct gfs2_inode *dip = GFS2_I(dir); 461 struct gfs2_inode *dip = GFS2_I(dir);
450 struct gfs2_sbd *sdp = GFS2_SB(dir); 462 struct gfs2_sbd *sdp = GFS2_SB(dir);
451 struct gfs2_inode *ip = GFS2_I(dentry->d_inode); 463 struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
452 struct gfs2_holder ghs[2]; 464 struct gfs2_holder ghs[3];
465 struct gfs2_rgrpd *rgd;
466 struct gfs2_holder ri_gh;
453 int error; 467 int error;
454 468
469
470 error = gfs2_rindex_hold(sdp, &ri_gh);
471 if (error)
472 return error;
455 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); 473 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
456 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1); 474 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1);
457 475
458 error = gfs2_glock_nq_m(2, ghs); 476 rgd = gfs2_blk2rgrpd(sdp, ip->i_num.no_addr);
477 gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2);
478
479 error = gfs2_glock_nq_m(3, ghs);
459 if (error) 480 if (error)
460 goto out; 481 goto out;
461 482
@@ -483,10 +504,12 @@ static int gfs2_rmdir(struct inode *dir, struct dentry *dentry)
483 gfs2_trans_end(sdp); 504 gfs2_trans_end(sdp);
484 505
485out_gunlock: 506out_gunlock:
486 gfs2_glock_dq_m(2, ghs); 507 gfs2_glock_dq_m(3, ghs);
487out: 508out:
488 gfs2_holder_uninit(ghs); 509 gfs2_holder_uninit(ghs);
489 gfs2_holder_uninit(ghs + 1); 510 gfs2_holder_uninit(ghs + 1);
511 gfs2_holder_uninit(ghs + 2);
512 gfs2_glock_dq_uninit(&ri_gh);
490 return error; 513 return error;
491} 514}
492 515
@@ -547,7 +570,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
547 struct gfs2_inode *ip = GFS2_I(odentry->d_inode); 570 struct gfs2_inode *ip = GFS2_I(odentry->d_inode);
548 struct gfs2_inode *nip = NULL; 571 struct gfs2_inode *nip = NULL;
549 struct gfs2_sbd *sdp = GFS2_SB(odir); 572 struct gfs2_sbd *sdp = GFS2_SB(odir);
550 struct gfs2_holder ghs[4], r_gh; 573 struct gfs2_holder ghs[5], r_gh;
574 struct gfs2_rgrpd *nrgd;
551 unsigned int num_gh; 575 unsigned int num_gh;
552 int dir_rename = 0; 576 int dir_rename = 0;
553 int alloc_required; 577 int alloc_required;
@@ -587,6 +611,13 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
587 if (nip) { 611 if (nip) {
588 gfs2_holder_init(nip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh); 612 gfs2_holder_init(nip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh);
589 num_gh++; 613 num_gh++;
614 /* grab the resource lock for unlink flag twiddling
615 * this is the case of the target file already existing
616 * so we unlink before doing the rename
617 */
618 nrgd = gfs2_blk2rgrpd(sdp, nip->i_num.no_addr);
619 if (nrgd)
620 gfs2_holder_init(nrgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh++);
590 } 621 }
591 622
592 error = gfs2_glock_nq_m(num_gh, ghs); 623 error = gfs2_glock_nq_m(num_gh, ghs);
@@ -684,12 +715,12 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
684 error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + 715 error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
685 al->al_rgd->rd_ri.ri_length + 716 al->al_rgd->rd_ri.ri_length +
686 4 * RES_DINODE + 4 * RES_LEAF + 717 4 * RES_DINODE + 4 * RES_LEAF +
687 RES_STATFS + RES_QUOTA, 0); 718 RES_STATFS + RES_QUOTA + 4, 0);
688 if (error) 719 if (error)
689 goto out_ipreserv; 720 goto out_ipreserv;
690 } else { 721 } else {
691 error = gfs2_trans_begin(sdp, 4 * RES_DINODE + 722 error = gfs2_trans_begin(sdp, 4 * RES_DINODE +
692 5 * RES_LEAF, 0); 723 5 * RES_LEAF + 4, 0);
693 if (error) 724 if (error)
694 goto out_gunlock; 725 goto out_gunlock;
695 } 726 }
@@ -728,7 +759,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
728 error = gfs2_meta_inode_buffer(ip, &dibh); 759 error = gfs2_meta_inode_buffer(ip, &dibh);
729 if (error) 760 if (error)
730 goto out_end_trans; 761 goto out_end_trans;
731 ip->i_inode.i_ctime.tv_sec = get_seconds(); 762 ip->i_inode.i_ctime = CURRENT_TIME_SEC;
732 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 763 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
733 gfs2_dinode_out(ip, dibh->b_data); 764 gfs2_dinode_out(ip, dibh->b_data);
734 brelse(dibh); 765 brelse(dibh);
@@ -1018,7 +1049,7 @@ static int gfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
1018 } 1049 }
1019 1050
1020 generic_fillattr(inode, stat); 1051 generic_fillattr(inode, stat);
1021 if (unlock); 1052 if (unlock)
1022 gfs2_glock_dq_uninit(&gh); 1053 gfs2_glock_dq_uninit(&gh);
1023 1054
1024 return 0; 1055 return 0;
diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c
index 7685b46f934b..47369d011214 100644
--- a/fs/gfs2/ops_super.c
+++ b/fs/gfs2/ops_super.c
@@ -173,6 +173,9 @@ static void gfs2_write_super_lockfs(struct super_block *sb)
173 struct gfs2_sbd *sdp = sb->s_fs_info; 173 struct gfs2_sbd *sdp = sb->s_fs_info;
174 int error; 174 int error;
175 175
176 if (test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
177 return;
178
176 for (;;) { 179 for (;;) {
177 error = gfs2_freeze_fs(sdp); 180 error = gfs2_freeze_fs(sdp);
178 if (!error) 181 if (!error)
@@ -426,6 +429,12 @@ static void gfs2_delete_inode(struct inode *inode)
426 } 429 }
427 430
428 error = gfs2_dinode_dealloc(ip); 431 error = gfs2_dinode_dealloc(ip);
432 /*
433 * Must do this before unlock to avoid trying to write back
434 * potentially dirty data now that inode no longer exists
435 * on disk.
436 */
437 truncate_inode_pages(&inode->i_data, 0);
429 438
430out_unlock: 439out_unlock:
431 gfs2_glock_dq(&ip->i_iopen_gh); 440 gfs2_glock_dq(&ip->i_iopen_gh);
@@ -443,14 +452,12 @@ out:
443 452
444static struct inode *gfs2_alloc_inode(struct super_block *sb) 453static struct inode *gfs2_alloc_inode(struct super_block *sb)
445{ 454{
446 struct gfs2_sbd *sdp = sb->s_fs_info;
447 struct gfs2_inode *ip; 455 struct gfs2_inode *ip;
448 456
449 ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL); 457 ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
450 if (ip) { 458 if (ip) {
451 ip->i_flags = 0; 459 ip->i_flags = 0;
452 ip->i_gl = NULL; 460 ip->i_gl = NULL;
453 ip->i_greedy = gfs2_tune_get(sdp, gt_greedy_default);
454 ip->i_last_pfault = jiffies; 461 ip->i_last_pfault = jiffies;
455 } 462 }
456 return &ip->i_inode; 463 return &ip->i_inode;
diff --git a/fs/gfs2/ops_vm.c b/fs/gfs2/ops_vm.c
index 45a5f11fc39a..14b380fb0602 100644
--- a/fs/gfs2/ops_vm.c
+++ b/fs/gfs2/ops_vm.c
@@ -28,34 +28,13 @@
28#include "trans.h" 28#include "trans.h"
29#include "util.h" 29#include "util.h"
30 30
31static void pfault_be_greedy(struct gfs2_inode *ip)
32{
33 unsigned int time;
34
35 spin_lock(&ip->i_spin);
36 time = ip->i_greedy;
37 ip->i_last_pfault = jiffies;
38 spin_unlock(&ip->i_spin);
39
40 igrab(&ip->i_inode);
41 if (gfs2_glock_be_greedy(ip->i_gl, time))
42 iput(&ip->i_inode);
43}
44
45static struct page *gfs2_private_nopage(struct vm_area_struct *area, 31static struct page *gfs2_private_nopage(struct vm_area_struct *area,
46 unsigned long address, int *type) 32 unsigned long address, int *type)
47{ 33{
48 struct gfs2_inode *ip = GFS2_I(area->vm_file->f_mapping->host); 34 struct gfs2_inode *ip = GFS2_I(area->vm_file->f_mapping->host);
49 struct page *result;
50 35
51 set_bit(GIF_PAGED, &ip->i_flags); 36 set_bit(GIF_PAGED, &ip->i_flags);
52 37 return filemap_nopage(area, address, type);
53 result = filemap_nopage(area, address, type);
54
55 if (result && result != NOPAGE_OOM)
56 pfault_be_greedy(ip);
57
58 return result;
59} 38}
60 39
61static int alloc_page_backing(struct gfs2_inode *ip, struct page *page) 40static int alloc_page_backing(struct gfs2_inode *ip, struct page *page)
@@ -167,7 +146,6 @@ static struct page *gfs2_sharewrite_nopage(struct vm_area_struct *area,
167 set_page_dirty(result); 146 set_page_dirty(result);
168 } 147 }
169 148
170 pfault_be_greedy(ip);
171out: 149out:
172 gfs2_glock_dq_uninit(&i_gh); 150 gfs2_glock_dq_uninit(&i_gh);
173 151
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 43a24f2e5905..70f424fcf1cd 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -71,17 +71,12 @@ void gfs2_tune_init(struct gfs2_tune *gt)
71 gt->gt_atime_quantum = 3600; 71 gt->gt_atime_quantum = 3600;
72 gt->gt_new_files_jdata = 0; 72 gt->gt_new_files_jdata = 0;
73 gt->gt_new_files_directio = 0; 73 gt->gt_new_files_directio = 0;
74 gt->gt_max_atomic_write = 4 << 20;
75 gt->gt_max_readahead = 1 << 18; 74 gt->gt_max_readahead = 1 << 18;
76 gt->gt_lockdump_size = 131072; 75 gt->gt_lockdump_size = 131072;
77 gt->gt_stall_secs = 600; 76 gt->gt_stall_secs = 600;
78 gt->gt_complain_secs = 10; 77 gt->gt_complain_secs = 10;
79 gt->gt_reclaim_limit = 5000; 78 gt->gt_reclaim_limit = 5000;
80 gt->gt_entries_per_readdir = 32; 79 gt->gt_entries_per_readdir = 32;
81 gt->gt_prefetch_secs = 10;
82 gt->gt_greedy_default = HZ / 10;
83 gt->gt_greedy_quantum = HZ / 40;
84 gt->gt_greedy_max = HZ / 4;
85 gt->gt_statfs_quantum = 30; 80 gt->gt_statfs_quantum = 30;
86 gt->gt_statfs_slow = 0; 81 gt->gt_statfs_slow = 0;
87} 82}
@@ -359,8 +354,7 @@ int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
359 mutex_lock(&sdp->sd_jindex_mutex); 354 mutex_lock(&sdp->sd_jindex_mutex);
360 355
361 for (;;) { 356 for (;;) {
362 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 357 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh);
363 GL_LOCAL_EXCL, ji_gh);
364 if (error) 358 if (error)
365 break; 359 break;
366 360
@@ -529,8 +523,7 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
529 struct gfs2_log_header_host head; 523 struct gfs2_log_header_host head;
530 int error; 524 int error;
531 525
532 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 526 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &t_gh);
533 GL_LOCAL_EXCL, &t_gh);
534 if (error) 527 if (error)
535 return error; 528 return error;
536 529
@@ -583,9 +576,8 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
583 gfs2_quota_sync(sdp); 576 gfs2_quota_sync(sdp);
584 gfs2_statfs_sync(sdp); 577 gfs2_statfs_sync(sdp);
585 578
586 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 579 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE,
587 GL_LOCAL_EXCL | GL_NOCACHE, 580 &t_gh);
588 &t_gh);
589 if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) 581 if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
590 return error; 582 return error;
591 583
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 983eaf1e06be..d01f9f0fda26 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -436,17 +436,12 @@ TUNE_ATTR(atime_quantum, 0);
436TUNE_ATTR(max_readahead, 0); 436TUNE_ATTR(max_readahead, 0);
437TUNE_ATTR(complain_secs, 0); 437TUNE_ATTR(complain_secs, 0);
438TUNE_ATTR(reclaim_limit, 0); 438TUNE_ATTR(reclaim_limit, 0);
439TUNE_ATTR(prefetch_secs, 0);
440TUNE_ATTR(statfs_slow, 0); 439TUNE_ATTR(statfs_slow, 0);
441TUNE_ATTR(new_files_jdata, 0); 440TUNE_ATTR(new_files_jdata, 0);
442TUNE_ATTR(new_files_directio, 0); 441TUNE_ATTR(new_files_directio, 0);
443TUNE_ATTR(quota_simul_sync, 1); 442TUNE_ATTR(quota_simul_sync, 1);
444TUNE_ATTR(quota_cache_secs, 1); 443TUNE_ATTR(quota_cache_secs, 1);
445TUNE_ATTR(max_atomic_write, 1);
446TUNE_ATTR(stall_secs, 1); 444TUNE_ATTR(stall_secs, 1);
447TUNE_ATTR(greedy_default, 1);
448TUNE_ATTR(greedy_quantum, 1);
449TUNE_ATTR(greedy_max, 1);
450TUNE_ATTR(statfs_quantum, 1); 445TUNE_ATTR(statfs_quantum, 1);
451TUNE_ATTR_DAEMON(scand_secs, scand_process); 446TUNE_ATTR_DAEMON(scand_secs, scand_process);
452TUNE_ATTR_DAEMON(recoverd_secs, recoverd_process); 447TUNE_ATTR_DAEMON(recoverd_secs, recoverd_process);
@@ -465,15 +460,10 @@ static struct attribute *tune_attrs[] = {
465 &tune_attr_max_readahead.attr, 460 &tune_attr_max_readahead.attr,
466 &tune_attr_complain_secs.attr, 461 &tune_attr_complain_secs.attr,
467 &tune_attr_reclaim_limit.attr, 462 &tune_attr_reclaim_limit.attr,
468 &tune_attr_prefetch_secs.attr,
469 &tune_attr_statfs_slow.attr, 463 &tune_attr_statfs_slow.attr,
470 &tune_attr_quota_simul_sync.attr, 464 &tune_attr_quota_simul_sync.attr,
471 &tune_attr_quota_cache_secs.attr, 465 &tune_attr_quota_cache_secs.attr,
472 &tune_attr_max_atomic_write.attr,
473 &tune_attr_stall_secs.attr, 466 &tune_attr_stall_secs.attr,
474 &tune_attr_greedy_default.attr,
475 &tune_attr_greedy_quantum.attr,
476 &tune_attr_greedy_max.attr,
477 &tune_attr_statfs_quantum.attr, 467 &tune_attr_statfs_quantum.attr,
478 &tune_attr_scand_secs.attr, 468 &tune_attr_scand_secs.attr,
479 &tune_attr_recoverd_secs.attr, 469 &tune_attr_recoverd_secs.attr,
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index f5719117edfe..e285022f006c 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -182,9 +182,9 @@ int jfs_get_block(struct inode *ip, sector_t lblock,
182 * Take appropriate lock on inode 182 * Take appropriate lock on inode
183 */ 183 */
184 if (create) 184 if (create)
185 IWRITE_LOCK(ip); 185 IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
186 else 186 else
187 IREAD_LOCK(ip); 187 IREAD_LOCK(ip, RDWRLOCK_NORMAL);
188 188
189 if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) && 189 if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) &&
190 (!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) && 190 (!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) &&
@@ -359,7 +359,7 @@ void jfs_truncate(struct inode *ip)
359 359
360 nobh_truncate_page(ip->i_mapping, ip->i_size); 360 nobh_truncate_page(ip->i_mapping, ip->i_size);
361 361
362 IWRITE_LOCK(ip); 362 IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
363 jfs_truncate_nolock(ip, ip->i_size); 363 jfs_truncate_nolock(ip, ip->i_size);
364 IWRITE_UNLOCK(ip); 364 IWRITE_UNLOCK(ip);
365} 365}
diff --git a/fs/jfs/jfs_debug.h b/fs/jfs/jfs_debug.h
index ddffbbd4d955..7378798f0b21 100644
--- a/fs/jfs/jfs_debug.h
+++ b/fs/jfs/jfs_debug.h
@@ -39,10 +39,6 @@ extern void jfs_proc_clean(void);
39/* 39/*
40 * assert with traditional printf/panic 40 * assert with traditional printf/panic
41 */ 41 */
42#ifdef CONFIG_KERNEL_ASSERTS
43/* kgdb stuff */
44#define assert(p) KERNEL_ASSERT(#p, p)
45#else
46#define assert(p) do { \ 42#define assert(p) do { \
47 if (!(p)) { \ 43 if (!(p)) { \
48 printk(KERN_CRIT "BUG at %s:%d assert(%s)\n", \ 44 printk(KERN_CRIT "BUG at %s:%d assert(%s)\n", \
@@ -50,7 +46,6 @@ extern void jfs_proc_clean(void);
50 BUG(); \ 46 BUG(); \
51 } \ 47 } \
52} while (0) 48} while (0)
53#endif
54 49
55/* 50/*
56 * debug ON 51 * debug ON
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index 23546c8fd48b..82b0544bd76d 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -337,7 +337,7 @@ int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
337 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap; 337 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
338 struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap; 338 struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
339 339
340 IREAD_LOCK(ipbmap); 340 IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
341 341
342 /* block to be freed better be within the mapsize. */ 342 /* block to be freed better be within the mapsize. */
343 if (unlikely((blkno == 0) || (blkno + nblocks > bmp->db_mapsize))) { 343 if (unlikely((blkno == 0) || (blkno + nblocks > bmp->db_mapsize))) {
@@ -733,7 +733,7 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
733 * allocation group size, try to allocate anywhere. 733 * allocation group size, try to allocate anywhere.
734 */ 734 */
735 if (l2nb > bmp->db_agl2size) { 735 if (l2nb > bmp->db_agl2size) {
736 IWRITE_LOCK(ipbmap); 736 IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
737 737
738 rc = dbAllocAny(bmp, nblocks, l2nb, results); 738 rc = dbAllocAny(bmp, nblocks, l2nb, results);
739 739
@@ -774,7 +774,7 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
774 * the hint using a tiered strategy. 774 * the hint using a tiered strategy.
775 */ 775 */
776 if (nblocks <= BPERDMAP) { 776 if (nblocks <= BPERDMAP) {
777 IREAD_LOCK(ipbmap); 777 IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
778 778
779 /* get the buffer for the dmap containing the hint. 779 /* get the buffer for the dmap containing the hint.
780 */ 780 */
@@ -844,7 +844,7 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
844 /* try to satisfy the allocation request with blocks within 844 /* try to satisfy the allocation request with blocks within
845 * the same allocation group as the hint. 845 * the same allocation group as the hint.
846 */ 846 */
847 IWRITE_LOCK(ipbmap); 847 IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
848 if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) != -ENOSPC) 848 if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) != -ENOSPC)
849 goto write_unlock; 849 goto write_unlock;
850 850
@@ -856,7 +856,7 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
856 * Let dbNextAG recommend a preferred allocation group 856 * Let dbNextAG recommend a preferred allocation group
857 */ 857 */
858 agno = dbNextAG(ipbmap); 858 agno = dbNextAG(ipbmap);
859 IWRITE_LOCK(ipbmap); 859 IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP);
860 860
861 /* Try to allocate within this allocation group. if that fails, try to 861 /* Try to allocate within this allocation group. if that fails, try to
862 * allocate anywhere in the map. 862 * allocate anywhere in the map.
@@ -900,7 +900,7 @@ int dbAllocExact(struct inode *ip, s64 blkno, int nblocks)
900 s64 lblkno; 900 s64 lblkno;
901 struct metapage *mp; 901 struct metapage *mp;
902 902
903 IREAD_LOCK(ipbmap); 903 IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
904 904
905 /* 905 /*
906 * validate extent request: 906 * validate extent request:
@@ -1050,7 +1050,7 @@ static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks)
1050 */ 1050 */
1051 extblkno = lastblkno + 1; 1051 extblkno = lastblkno + 1;
1052 1052
1053 IREAD_LOCK(ipbmap); 1053 IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
1054 1054
1055 /* better be within the file system */ 1055 /* better be within the file system */
1056 bmp = sbi->bmap; 1056 bmp = sbi->bmap;
@@ -3116,7 +3116,7 @@ int dbAllocBottomUp(struct inode *ip, s64 blkno, s64 nblocks)
3116 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap; 3116 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
3117 struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap; 3117 struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
3118 3118
3119 IREAD_LOCK(ipbmap); 3119 IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
3120 3120
3121 /* block to be allocated better be within the mapsize. */ 3121 /* block to be allocated better be within the mapsize. */
3122 ASSERT(nblocks <= bmp->db_mapsize - blkno); 3122 ASSERT(nblocks <= bmp->db_mapsize - blkno);
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index 53f63b47a6d3..aa5124b643b1 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -331,7 +331,7 @@ int diRead(struct inode *ip)
331 331
332 /* read the iag */ 332 /* read the iag */
333 imap = JFS_IP(ipimap)->i_imap; 333 imap = JFS_IP(ipimap)->i_imap;
334 IREAD_LOCK(ipimap); 334 IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
335 rc = diIAGRead(imap, iagno, &mp); 335 rc = diIAGRead(imap, iagno, &mp);
336 IREAD_UNLOCK(ipimap); 336 IREAD_UNLOCK(ipimap);
337 if (rc) { 337 if (rc) {
@@ -920,7 +920,7 @@ int diFree(struct inode *ip)
920 /* Obtain read lock in imap inode. Don't release it until we have 920 /* Obtain read lock in imap inode. Don't release it until we have
921 * read all of the IAG's that we are going to. 921 * read all of the IAG's that we are going to.
922 */ 922 */
923 IREAD_LOCK(ipimap); 923 IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
924 924
925 /* read the iag. 925 /* read the iag.
926 */ 926 */
@@ -1415,7 +1415,7 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
1415 AG_LOCK(imap, agno); 1415 AG_LOCK(imap, agno);
1416 1416
1417 /* Get read lock on imap inode */ 1417 /* Get read lock on imap inode */
1418 IREAD_LOCK(ipimap); 1418 IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
1419 1419
1420 /* get the iag number and read the iag */ 1420 /* get the iag number and read the iag */
1421 iagno = INOTOIAG(inum); 1421 iagno = INOTOIAG(inum);
@@ -1808,7 +1808,7 @@ static int diAllocIno(struct inomap * imap, int agno, struct inode *ip)
1808 return -ENOSPC; 1808 return -ENOSPC;
1809 1809
1810 /* obtain read lock on imap inode */ 1810 /* obtain read lock on imap inode */
1811 IREAD_LOCK(imap->im_ipimap); 1811 IREAD_LOCK(imap->im_ipimap, RDWRLOCK_IMAP);
1812 1812
1813 /* read the iag at the head of the list. 1813 /* read the iag at the head of the list.
1814 */ 1814 */
@@ -1946,7 +1946,7 @@ static int diAllocExt(struct inomap * imap, int agno, struct inode *ip)
1946 } else { 1946 } else {
1947 /* read the iag. 1947 /* read the iag.
1948 */ 1948 */
1949 IREAD_LOCK(imap->im_ipimap); 1949 IREAD_LOCK(imap->im_ipimap, RDWRLOCK_IMAP);
1950 if ((rc = diIAGRead(imap, iagno, &mp))) { 1950 if ((rc = diIAGRead(imap, iagno, &mp))) {
1951 IREAD_UNLOCK(imap->im_ipimap); 1951 IREAD_UNLOCK(imap->im_ipimap);
1952 jfs_error(ip->i_sb, "diAllocExt: error reading iag"); 1952 jfs_error(ip->i_sb, "diAllocExt: error reading iag");
@@ -2509,7 +2509,7 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
2509 */ 2509 */
2510 2510
2511 /* acquire inode map lock */ 2511 /* acquire inode map lock */
2512 IWRITE_LOCK(ipimap); 2512 IWRITE_LOCK(ipimap, RDWRLOCK_IMAP);
2513 2513
2514 if (ipimap->i_size >> L2PSIZE != imap->im_nextiag + 1) { 2514 if (ipimap->i_size >> L2PSIZE != imap->im_nextiag + 1) {
2515 IWRITE_UNLOCK(ipimap); 2515 IWRITE_UNLOCK(ipimap);
@@ -2648,7 +2648,7 @@ diNewIAG(struct inomap * imap, int *iagnop, int agno, struct metapage ** mpp)
2648 } 2648 }
2649 2649
2650 /* obtain read lock on map */ 2650 /* obtain read lock on map */
2651 IREAD_LOCK(ipimap); 2651 IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
2652 2652
2653 /* read the iag */ 2653 /* read the iag */
2654 if ((rc = diIAGRead(imap, iagno, &mp))) { 2654 if ((rc = diIAGRead(imap, iagno, &mp))) {
@@ -2779,7 +2779,7 @@ diUpdatePMap(struct inode *ipimap,
2779 return -EIO; 2779 return -EIO;
2780 } 2780 }
2781 /* read the iag */ 2781 /* read the iag */
2782 IREAD_LOCK(ipimap); 2782 IREAD_LOCK(ipimap, RDWRLOCK_IMAP);
2783 rc = diIAGRead(imap, iagno, &mp); 2783 rc = diIAGRead(imap, iagno, &mp);
2784 IREAD_UNLOCK(ipimap); 2784 IREAD_UNLOCK(ipimap);
2785 if (rc) 2785 if (rc)
diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h
index 94005584445a..8f453eff3c83 100644
--- a/fs/jfs/jfs_incore.h
+++ b/fs/jfs/jfs_incore.h
@@ -109,9 +109,11 @@ struct jfs_inode_info {
109 109
110#define JFS_ACL_NOT_CACHED ((void *)-1) 110#define JFS_ACL_NOT_CACHED ((void *)-1)
111 111
112#define IREAD_LOCK(ip) down_read(&JFS_IP(ip)->rdwrlock) 112#define IREAD_LOCK(ip, subclass) \
113 down_read_nested(&JFS_IP(ip)->rdwrlock, subclass)
113#define IREAD_UNLOCK(ip) up_read(&JFS_IP(ip)->rdwrlock) 114#define IREAD_UNLOCK(ip) up_read(&JFS_IP(ip)->rdwrlock)
114#define IWRITE_LOCK(ip) down_write(&JFS_IP(ip)->rdwrlock) 115#define IWRITE_LOCK(ip, subclass) \
116 down_write_nested(&JFS_IP(ip)->rdwrlock, subclass)
115#define IWRITE_UNLOCK(ip) up_write(&JFS_IP(ip)->rdwrlock) 117#define IWRITE_UNLOCK(ip) up_write(&JFS_IP(ip)->rdwrlock)
116 118
117/* 119/*
@@ -127,6 +129,29 @@ enum cflags {
127 COMMIT_Synclist, /* metadata pages on group commit synclist */ 129 COMMIT_Synclist, /* metadata pages on group commit synclist */
128}; 130};
129 131
132/*
133 * commit_mutex nesting subclasses:
134 */
135enum commit_mutex_class
136{
137 COMMIT_MUTEX_PARENT,
138 COMMIT_MUTEX_CHILD,
139 COMMIT_MUTEX_SECOND_PARENT, /* Renaming */
140 COMMIT_MUTEX_VICTIM /* Inode being unlinked due to rename */
141};
142
143/*
144 * rdwrlock subclasses:
145 * The dmap inode may be locked while a normal inode or the imap inode are
146 * locked.
147 */
148enum rdwrlock_class
149{
150 RDWRLOCK_NORMAL,
151 RDWRLOCK_IMAP,
152 RDWRLOCK_DMAP
153};
154
130#define set_cflag(flag, ip) set_bit(flag, &(JFS_IP(ip)->cflag)) 155#define set_cflag(flag, ip) set_bit(flag, &(JFS_IP(ip)->cflag))
131#define clear_cflag(flag, ip) clear_bit(flag, &(JFS_IP(ip)->cflag)) 156#define clear_cflag(flag, ip) clear_bit(flag, &(JFS_IP(ip)->cflag))
132#define test_cflag(flag, ip) test_bit(flag, &(JFS_IP(ip)->cflag)) 157#define test_cflag(flag, ip) test_bit(flag, &(JFS_IP(ip)->cflag))
diff --git a/fs/jfs/jfs_lock.h b/fs/jfs/jfs_lock.h
index 7d78e83d7c40..df48ece4b7a3 100644
--- a/fs/jfs/jfs_lock.h
+++ b/fs/jfs/jfs_lock.h
@@ -42,7 +42,7 @@ do { \
42 if (cond) \ 42 if (cond) \
43 break; \ 43 break; \
44 unlock_cmd; \ 44 unlock_cmd; \
45 schedule(); \ 45 io_schedule(); \
46 lock_cmd; \ 46 lock_cmd; \
47 } \ 47 } \
48 current->state = TASK_RUNNING; \ 48 current->state = TASK_RUNNING; \
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index ceaf03b94935..58deae007507 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -56,7 +56,7 @@ static inline void __lock_metapage(struct metapage *mp)
56 set_current_state(TASK_UNINTERRUPTIBLE); 56 set_current_state(TASK_UNINTERRUPTIBLE);
57 if (metapage_locked(mp)) { 57 if (metapage_locked(mp)) {
58 unlock_page(mp->page); 58 unlock_page(mp->page);
59 schedule(); 59 io_schedule();
60 lock_page(mp->page); 60 lock_page(mp->page);
61 } 61 }
62 } while (trylock_metapage(mp)); 62 } while (trylock_metapage(mp));
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index d558e51b0df8..6988a1082f58 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -135,7 +135,7 @@ static inline void TXN_SLEEP_DROP_LOCK(wait_queue_head_t * event)
135 add_wait_queue(event, &wait); 135 add_wait_queue(event, &wait);
136 set_current_state(TASK_UNINTERRUPTIBLE); 136 set_current_state(TASK_UNINTERRUPTIBLE);
137 TXN_UNLOCK(); 137 TXN_UNLOCK();
138 schedule(); 138 io_schedule();
139 current->state = TASK_RUNNING; 139 current->state = TASK_RUNNING;
140 remove_wait_queue(event, &wait); 140 remove_wait_queue(event, &wait);
141} 141}
diff --git a/fs/jfs/jfs_xtree.c b/fs/jfs/jfs_xtree.c
index e98eb03e5310..acc97c46d8a4 100644
--- a/fs/jfs/jfs_xtree.c
+++ b/fs/jfs/jfs_xtree.c
@@ -757,6 +757,11 @@ static int xtSearch(struct inode *ip, s64 xoff, s64 *nextp,
757 nsplit = 0; 757 nsplit = 0;
758 758
759 /* push (bn, index) of the parent page/entry */ 759 /* push (bn, index) of the parent page/entry */
760 if (BT_STACK_FULL(btstack)) {
761 jfs_error(ip->i_sb, "stack overrun in xtSearch!");
762 XT_PUTPAGE(mp);
763 return -EIO;
764 }
760 BT_PUSH(btstack, bn, index); 765 BT_PUSH(btstack, bn, index);
761 766
762 /* get the child page block number */ 767 /* get the child page block number */
@@ -3915,6 +3920,11 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
3915 */ 3920 */
3916 getChild: 3921 getChild:
3917 /* save current parent entry for the child page */ 3922 /* save current parent entry for the child page */
3923 if (BT_STACK_FULL(&btstack)) {
3924 jfs_error(ip->i_sb, "stack overrun in xtTruncate!");
3925 XT_PUTPAGE(mp);
3926 return -EIO;
3927 }
3918 BT_PUSH(&btstack, bn, index); 3928 BT_PUSH(&btstack, bn, index);
3919 3929
3920 /* get child page */ 3930 /* get child page */
@@ -4112,6 +4122,11 @@ s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size)
4112 */ 4122 */
4113 getChild: 4123 getChild:
4114 /* save current parent entry for the child page */ 4124 /* save current parent entry for the child page */
4125 if (BT_STACK_FULL(&btstack)) {
4126 jfs_error(ip->i_sb, "stack overrun in xtTruncate_pmap!");
4127 XT_PUTPAGE(mp);
4128 return -EIO;
4129 }
4115 BT_PUSH(&btstack, bn, index); 4130 BT_PUSH(&btstack, bn, index);
4116 4131
4117 /* get child page */ 4132 /* get child page */
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index a6a8c16c872c..7ab47561b68d 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -104,8 +104,8 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, int mode,
104 104
105 tid = txBegin(dip->i_sb, 0); 105 tid = txBegin(dip->i_sb, 0);
106 106
107 mutex_lock(&JFS_IP(dip)->commit_mutex); 107 mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT);
108 mutex_lock(&JFS_IP(ip)->commit_mutex); 108 mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
109 109
110 rc = jfs_init_acl(tid, ip, dip); 110 rc = jfs_init_acl(tid, ip, dip);
111 if (rc) 111 if (rc)
@@ -238,8 +238,8 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode)
238 238
239 tid = txBegin(dip->i_sb, 0); 239 tid = txBegin(dip->i_sb, 0);
240 240
241 mutex_lock(&JFS_IP(dip)->commit_mutex); 241 mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT);
242 mutex_lock(&JFS_IP(ip)->commit_mutex); 242 mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
243 243
244 rc = jfs_init_acl(tid, ip, dip); 244 rc = jfs_init_acl(tid, ip, dip);
245 if (rc) 245 if (rc)
@@ -365,8 +365,8 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
365 365
366 tid = txBegin(dip->i_sb, 0); 366 tid = txBegin(dip->i_sb, 0);
367 367
368 mutex_lock(&JFS_IP(dip)->commit_mutex); 368 mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT);
369 mutex_lock(&JFS_IP(ip)->commit_mutex); 369 mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
370 370
371 iplist[0] = dip; 371 iplist[0] = dip;
372 iplist[1] = ip; 372 iplist[1] = ip;
@@ -483,12 +483,12 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
483 if ((rc = get_UCSname(&dname, dentry))) 483 if ((rc = get_UCSname(&dname, dentry)))
484 goto out; 484 goto out;
485 485
486 IWRITE_LOCK(ip); 486 IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
487 487
488 tid = txBegin(dip->i_sb, 0); 488 tid = txBegin(dip->i_sb, 0);
489 489
490 mutex_lock(&JFS_IP(dip)->commit_mutex); 490 mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT);
491 mutex_lock(&JFS_IP(ip)->commit_mutex); 491 mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
492 492
493 iplist[0] = dip; 493 iplist[0] = dip;
494 iplist[1] = ip; 494 iplist[1] = ip;
@@ -802,8 +802,8 @@ static int jfs_link(struct dentry *old_dentry,
802 802
803 tid = txBegin(ip->i_sb, 0); 803 tid = txBegin(ip->i_sb, 0);
804 804
805 mutex_lock(&JFS_IP(dir)->commit_mutex); 805 mutex_lock_nested(&JFS_IP(dir)->commit_mutex, COMMIT_MUTEX_PARENT);
806 mutex_lock(&JFS_IP(ip)->commit_mutex); 806 mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
807 807
808 /* 808 /*
809 * scan parent directory for entry/freespace 809 * scan parent directory for entry/freespace
@@ -913,8 +913,8 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
913 913
914 tid = txBegin(dip->i_sb, 0); 914 tid = txBegin(dip->i_sb, 0);
915 915
916 mutex_lock(&JFS_IP(dip)->commit_mutex); 916 mutex_lock_nested(&JFS_IP(dip)->commit_mutex, COMMIT_MUTEX_PARENT);
917 mutex_lock(&JFS_IP(ip)->commit_mutex); 917 mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
918 918
919 rc = jfs_init_security(tid, ip, dip); 919 rc = jfs_init_security(tid, ip, dip);
920 if (rc) 920 if (rc)
@@ -1127,7 +1127,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1127 goto out3; 1127 goto out3;
1128 } 1128 }
1129 } else if (new_ip) { 1129 } else if (new_ip) {
1130 IWRITE_LOCK(new_ip); 1130 IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL);
1131 /* Init inode for quota operations. */ 1131 /* Init inode for quota operations. */
1132 DQUOT_INIT(new_ip); 1132 DQUOT_INIT(new_ip);
1133 } 1133 }
@@ -1137,13 +1137,21 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1137 */ 1137 */
1138 tid = txBegin(new_dir->i_sb, 0); 1138 tid = txBegin(new_dir->i_sb, 0);
1139 1139
1140 mutex_lock(&JFS_IP(new_dir)->commit_mutex); 1140 /*
1141 mutex_lock(&JFS_IP(old_ip)->commit_mutex); 1141 * How do we know the locking is safe from deadlocks?
1142 * The vfs does the hard part for us. Any time we are taking nested
1143 * commit_mutexes, the vfs already has i_mutex held on the parent.
1144 * Here, the vfs has already taken i_mutex on both old_dir and new_dir.
1145 */
1146 mutex_lock_nested(&JFS_IP(new_dir)->commit_mutex, COMMIT_MUTEX_PARENT);
1147 mutex_lock_nested(&JFS_IP(old_ip)->commit_mutex, COMMIT_MUTEX_CHILD);
1142 if (old_dir != new_dir) 1148 if (old_dir != new_dir)
1143 mutex_lock(&JFS_IP(old_dir)->commit_mutex); 1149 mutex_lock_nested(&JFS_IP(old_dir)->commit_mutex,
1150 COMMIT_MUTEX_SECOND_PARENT);
1144 1151
1145 if (new_ip) { 1152 if (new_ip) {
1146 mutex_lock(&JFS_IP(new_ip)->commit_mutex); 1153 mutex_lock_nested(&JFS_IP(new_ip)->commit_mutex,
1154 COMMIT_MUTEX_VICTIM);
1147 /* 1155 /*
1148 * Change existing directory entry to new inode number 1156 * Change existing directory entry to new inode number
1149 */ 1157 */
@@ -1357,8 +1365,8 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry,
1357 1365
1358 tid = txBegin(dir->i_sb, 0); 1366 tid = txBegin(dir->i_sb, 0);
1359 1367
1360 mutex_lock(&JFS_IP(dir)->commit_mutex); 1368 mutex_lock_nested(&JFS_IP(dir)->commit_mutex, COMMIT_MUTEX_PARENT);
1361 mutex_lock(&JFS_IP(ip)->commit_mutex); 1369 mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD);
1362 1370
1363 rc = jfs_init_acl(tid, ip, dir); 1371 rc = jfs_init_acl(tid, ip, dir);
1364 if (rc) 1372 if (rc)
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index e1216364d191..d026b4f27757 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -306,8 +306,8 @@ int ocfs2_journal_dirty_data(handle_t *handle,
306 * for the dinode, one for the new block. */ 306 * for the dinode, one for the new block. */
307#define OCFS2_SIMPLE_DIR_EXTEND_CREDITS (2) 307#define OCFS2_SIMPLE_DIR_EXTEND_CREDITS (2)
308 308
309/* file update (nlink, etc) + dir entry block */ 309/* file update (nlink, etc) + directory mtime/ctime + dir entry block */
310#define OCFS2_LINK_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 1) 310#define OCFS2_LINK_CREDITS (2*OCFS2_INODE_UPDATE_CREDITS + 1)
311 311
312/* inode + dir inode (if we unlink a dir), + dir entry block + orphan 312/* inode + dir inode (if we unlink a dir), + dir entry block + orphan
313 * dir inode link */ 313 * dir inode link */
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index ebc1f697615a..422f29c06c77 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -63,7 +63,7 @@
63 63
64/* Current ACPICA subsystem version in YYYYMMDD format */ 64/* Current ACPICA subsystem version in YYYYMMDD format */
65 65
66#define ACPI_CA_VERSION 0x20060707 66#define ACPI_CA_VERSION 0x20070126
67 67
68/* 68/*
69 * OS name, used for the _OS object. The _OS object is essentially obsolete, 69 * OS name, used for the _OS object. The _OS object is essentially obsolete,
@@ -115,6 +115,10 @@
115 115
116#define ACPI_NUM_OWNERID_MASKS 8 116#define ACPI_NUM_OWNERID_MASKS 8
117 117
118/* Size of the root table array is increased by this increment */
119
120#define ACPI_ROOT_TABLE_SIZE_INCREMENT 4
121
118/****************************************************************************** 122/******************************************************************************
119 * 123 *
120 * ACPI Specification constants (Do not change unless the specification changes) 124 * ACPI Specification constants (Do not change unless the specification changes)
@@ -152,6 +156,11 @@
152#define ACPI_PATH_SEGMENT_LENGTH 5 /* 4 chars for name + 1 char for separator */ 156#define ACPI_PATH_SEGMENT_LENGTH 5 /* 4 chars for name + 1 char for separator */
153#define ACPI_PATH_SEPARATOR '.' 157#define ACPI_PATH_SEPARATOR '.'
154 158
159/* Sizes for ACPI table headers */
160
161#define ACPI_OEM_ID_SIZE 6
162#define ACPI_OEM_TABLE_ID_SIZE 8
163
155/* Constants used in searching for the RSDP in low memory */ 164/* Constants used in searching for the RSDP in low memory */
156 165
157#define ACPI_EBDA_PTR_LOCATION 0x0000040E /* Physical Address */ 166#define ACPI_EBDA_PTR_LOCATION 0x0000040E /* Physical Address */
diff --git a/include/acpi/acdebug.h b/include/acpi/acdebug.h
index d8167095caf3..d626bb1d2973 100644
--- a/include/acpi/acdebug.h
+++ b/include/acpi/acdebug.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -159,6 +159,10 @@ void
159acpi_db_create_execution_threads(char *num_threads_arg, 159acpi_db_create_execution_threads(char *num_threads_arg,
160 char *num_loops_arg, char *method_name_arg); 160 char *num_loops_arg, char *method_name_arg);
161 161
162#ifdef ACPI_DBG_TRACK_ALLOCATIONS
163u32 acpi_db_get_cache_info(struct acpi_memory_list *cache);
164#endif
165
162/* 166/*
163 * dbfileio - Debugger file I/O commands 167 * dbfileio - Debugger file I/O commands
164 */ 168 */
@@ -214,4 +218,6 @@ void acpi_db_prep_namestring(char *name);
214 218
215struct acpi_namespace_node *acpi_db_local_ns_lookup(char *name); 219struct acpi_namespace_node *acpi_db_local_ns_lookup(char *name);
216 220
221void acpi_db_uint32_to_hex_string(u32 value, char *buffer);
222
217#endif /* __ACDEBUG_H__ */ 223#endif /* __ACDEBUG_H__ */
diff --git a/include/acpi/acdisasm.h b/include/acpi/acdisasm.h
index 9a7d6921f534..389d772c7d5b 100644
--- a/include/acpi/acdisasm.h
+++ b/include/acpi/acdisasm.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -97,9 +97,11 @@ typedef const struct acpi_dmtable_info {
97#define ACPI_DMT_CHKSUM 20 97#define ACPI_DMT_CHKSUM 20
98#define ACPI_DMT_SPACEID 21 98#define ACPI_DMT_SPACEID 21
99#define ACPI_DMT_GAS 22 99#define ACPI_DMT_GAS 22
100#define ACPI_DMT_MADT 23 100#define ACPI_DMT_DMAR 23
101#define ACPI_DMT_SRAT 24 101#define ACPI_DMT_MADT 24
102#define ACPI_DMT_EXIT 25 102#define ACPI_DMT_SRAT 25
103#define ACPI_DMT_EXIT 26
104#define ACPI_DMT_SIG 27
103 105
104typedef 106typedef
105void (*ACPI_TABLE_HANDLER) (struct acpi_table_header * table); 107void (*ACPI_TABLE_HANDLER) (struct acpi_table_header * table);
@@ -108,6 +110,7 @@ struct acpi_dmtable_data {
108 char *signature; 110 char *signature;
109 struct acpi_dmtable_info *table_info; 111 struct acpi_dmtable_info *table_info;
110 ACPI_TABLE_HANDLER table_handler; 112 ACPI_TABLE_HANDLER table_handler;
113 char *name;
111}; 114};
112 115
113struct acpi_op_walk_info { 116struct acpi_op_walk_info {
@@ -139,7 +142,9 @@ extern const char *acpi_gbl_match_ops[];
139 142
140extern struct acpi_dmtable_info acpi_dm_table_info_asf0[]; 143extern struct acpi_dmtable_info acpi_dm_table_info_asf0[];
141extern struct acpi_dmtable_info acpi_dm_table_info_asf1[]; 144extern struct acpi_dmtable_info acpi_dm_table_info_asf1[];
145extern struct acpi_dmtable_info acpi_dm_table_info_asf1a[];
142extern struct acpi_dmtable_info acpi_dm_table_info_asf2[]; 146extern struct acpi_dmtable_info acpi_dm_table_info_asf2[];
147extern struct acpi_dmtable_info acpi_dm_table_info_asf2a[];
143extern struct acpi_dmtable_info acpi_dm_table_info_asf3[]; 148extern struct acpi_dmtable_info acpi_dm_table_info_asf3[];
144extern struct acpi_dmtable_info acpi_dm_table_info_asf4[]; 149extern struct acpi_dmtable_info acpi_dm_table_info_asf4[];
145extern struct acpi_dmtable_info acpi_dm_table_info_asf_hdr[]; 150extern struct acpi_dmtable_info acpi_dm_table_info_asf_hdr[];
@@ -147,6 +152,11 @@ extern struct acpi_dmtable_info acpi_dm_table_info_boot[];
147extern struct acpi_dmtable_info acpi_dm_table_info_cpep[]; 152extern struct acpi_dmtable_info acpi_dm_table_info_cpep[];
148extern struct acpi_dmtable_info acpi_dm_table_info_cpep0[]; 153extern struct acpi_dmtable_info acpi_dm_table_info_cpep0[];
149extern struct acpi_dmtable_info acpi_dm_table_info_dbgp[]; 154extern struct acpi_dmtable_info acpi_dm_table_info_dbgp[];
155extern struct acpi_dmtable_info acpi_dm_table_info_dmar[];
156extern struct acpi_dmtable_info acpi_dm_table_info_dmar_hdr[];
157extern struct acpi_dmtable_info acpi_dm_table_info_dmar_scope[];
158extern struct acpi_dmtable_info acpi_dm_table_info_dmar0[];
159extern struct acpi_dmtable_info acpi_dm_table_info_dmar1[];
150extern struct acpi_dmtable_info acpi_dm_table_info_ecdt[]; 160extern struct acpi_dmtable_info acpi_dm_table_info_ecdt[];
151extern struct acpi_dmtable_info acpi_dm_table_info_facs[]; 161extern struct acpi_dmtable_info acpi_dm_table_info_facs[];
152extern struct acpi_dmtable_info acpi_dm_table_info_fadt1[]; 162extern struct acpi_dmtable_info acpi_dm_table_info_fadt1[];
@@ -201,6 +211,8 @@ void acpi_dm_dump_asf(struct acpi_table_header *table);
201 211
202void acpi_dm_dump_cpep(struct acpi_table_header *table); 212void acpi_dm_dump_cpep(struct acpi_table_header *table);
203 213
214void acpi_dm_dump_dmar(struct acpi_table_header *table);
215
204void acpi_dm_dump_fadt(struct acpi_table_header *table); 216void acpi_dm_dump_fadt(struct acpi_table_header *table);
205 217
206void acpi_dm_dump_srat(struct acpi_table_header *table); 218void acpi_dm_dump_srat(struct acpi_table_header *table);
@@ -314,7 +326,7 @@ acpi_dm_resource_template(struct acpi_op_walk_info *info,
314 union acpi_parse_object *op, 326 union acpi_parse_object *op,
315 u8 * byte_data, u32 byte_count); 327 u8 * byte_data, u32 byte_count);
316 328
317u8 acpi_dm_is_resource_template(union acpi_parse_object *op); 329acpi_status acpi_dm_is_resource_template(union acpi_parse_object *op);
318 330
319void acpi_dm_indent(u32 level); 331void acpi_dm_indent(u32 level);
320 332
diff --git a/include/acpi/acdispat.h b/include/acpi/acdispat.h
index a22fe9cf8493..cb8d2868c8ac 100644
--- a/include/acpi/acdispat.h
+++ b/include/acpi/acdispat.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -210,7 +210,7 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state);
210 * dsinit 210 * dsinit
211 */ 211 */
212acpi_status 212acpi_status
213acpi_ds_initialize_objects(struct acpi_table_desc *table_desc, 213acpi_ds_initialize_objects(acpi_native_uint table_index,
214 struct acpi_namespace_node *start_node); 214 struct acpi_namespace_node *start_node);
215 215
216/* 216/*
diff --git a/include/acpi/acevents.h b/include/acpi/acevents.h
index 234142828e1a..d23cdf326808 100644
--- a/include/acpi/acevents.h
+++ b/include/acpi/acevents.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 797ca1ea5214..b73f18a48785 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -178,8 +178,10 @@
178#define AE_CTRL_BREAK (acpi_status) (0x0009 | AE_CODE_CONTROL) 178#define AE_CTRL_BREAK (acpi_status) (0x0009 | AE_CODE_CONTROL)
179#define AE_CTRL_CONTINUE (acpi_status) (0x000A | AE_CODE_CONTROL) 179#define AE_CTRL_CONTINUE (acpi_status) (0x000A | AE_CODE_CONTROL)
180#define AE_CTRL_SKIP (acpi_status) (0x000B | AE_CODE_CONTROL) 180#define AE_CTRL_SKIP (acpi_status) (0x000B | AE_CODE_CONTROL)
181#define AE_CTRL_PARSE_CONTINUE (acpi_status) (0x000C | AE_CODE_CONTROL)
182#define AE_CTRL_PARSE_PENDING (acpi_status) (0x000D | AE_CODE_CONTROL)
181 183
182#define AE_CODE_CTRL_MAX 0x000B 184#define AE_CODE_CTRL_MAX 0x000D
183 185
184#ifdef DEFINE_ACPI_GLOBALS 186#ifdef DEFINE_ACPI_GLOBALS
185 187
@@ -291,7 +293,9 @@ char const *acpi_gbl_exception_names_ctrl[] = {
291 "AE_CTRL_TRANSFER", 293 "AE_CTRL_TRANSFER",
292 "AE_CTRL_BREAK", 294 "AE_CTRL_BREAK",
293 "AE_CTRL_CONTINUE", 295 "AE_CTRL_CONTINUE",
294 "AE_CTRL_SKIP" 296 "AE_CTRL_SKIP",
297 "AE_CTRL_PARSE_CONTINUE",
298 "AE_CTRL_PARSE_PENDING"
295}; 299};
296 300
297#endif /* ACPI GLOBALS */ 301#endif /* ACPI GLOBALS */
diff --git a/include/acpi/acglobal.h b/include/acpi/acglobal.h
index 06972e6637de..24c3f05ab367 100644
--- a/include/acpi/acglobal.h
+++ b/include/acpi/acglobal.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -58,37 +58,6 @@
58#define ACPI_INIT_GLOBAL(a,b) a 58#define ACPI_INIT_GLOBAL(a,b) a
59#endif 59#endif
60 60
61/*
62 * Keep local copies of these FADT-based registers. NOTE: These globals
63 * are first in this file for alignment reasons on 64-bit systems.
64 */
65ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_enable;
66ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_enable;
67
68/*****************************************************************************
69 *
70 * Debug support
71 *
72 ****************************************************************************/
73
74/* Runtime configuration of debug print levels */
75
76extern u32 acpi_dbg_level;
77extern u32 acpi_dbg_layer;
78
79/* Procedure nesting level for debug output */
80
81extern u32 acpi_gbl_nesting_level;
82
83/* Support for dynamic control method tracing mechanism */
84
85ACPI_EXTERN u32 acpi_gbl_original_dbg_level;
86ACPI_EXTERN u32 acpi_gbl_original_dbg_layer;
87ACPI_EXTERN acpi_name acpi_gbl_trace_method_name;
88ACPI_EXTERN u32 acpi_gbl_trace_dbg_level;
89ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
90ACPI_EXTERN u32 acpi_gbl_trace_flags;
91
92/***************************************************************************** 61/*****************************************************************************
93 * 62 *
94 * Runtime configuration (static defaults that can be overriden at runtime) 63 * Runtime configuration (static defaults that can be overriden at runtime)
@@ -135,52 +104,62 @@ ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_leave_wake_gpes_disabled, TRUE);
135 104
136/***************************************************************************** 105/*****************************************************************************
137 * 106 *
107 * Debug support
108 *
109 ****************************************************************************/
110
111/* Runtime configuration of debug print levels */
112
113extern u32 acpi_dbg_level;
114extern u32 acpi_dbg_layer;
115
116/* Procedure nesting level for debug output */
117
118extern u32 acpi_gbl_nesting_level;
119
120/* Event counters */
121
122ACPI_EXTERN u32 acpi_gpe_count;
123
124/* Support for dynamic control method tracing mechanism */
125
126ACPI_EXTERN u32 acpi_gbl_original_dbg_level;
127ACPI_EXTERN u32 acpi_gbl_original_dbg_layer;
128ACPI_EXTERN acpi_name acpi_gbl_trace_method_name;
129ACPI_EXTERN u32 acpi_gbl_trace_dbg_level;
130ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
131ACPI_EXTERN u32 acpi_gbl_trace_flags;
132
133/*****************************************************************************
134 *
138 * ACPI Table globals 135 * ACPI Table globals
139 * 136 *
140 ****************************************************************************/ 137 ****************************************************************************/
141 138
142/* 139/*
143 * Table pointers. 140 * acpi_gbl_root_table_list is the master list of ACPI tables found in the
144 * Although these pointers are somewhat redundant with the global acpi_table, 141 * RSDT/XSDT.
145 * they are convenient because they are typed pointers.
146 * 142 *
147 * These tables are single-table only; meaning that there can be at most one 143 * acpi_gbl_FADT is a local copy of the FADT, converted to a common format.
148 * of each in the system. Each global points to the actual table.
149 */
150ACPI_EXTERN u32 acpi_gbl_table_flags;
151ACPI_EXTERN u32 acpi_gbl_rsdt_table_count;
152ACPI_EXTERN struct rsdp_descriptor *acpi_gbl_RSDP;
153ACPI_EXTERN struct xsdt_descriptor *acpi_gbl_XSDT;
154ACPI_EXTERN struct fadt_descriptor *acpi_gbl_FADT;
155ACPI_EXTERN struct acpi_table_header *acpi_gbl_DSDT;
156ACPI_EXTERN struct facs_descriptor *acpi_gbl_FACS;
157ACPI_EXTERN struct acpi_common_facs acpi_gbl_common_fACS;
158/*
159 * Since there may be multiple SSDTs and PSDTs, a single pointer is not
160 * sufficient; Therefore, there isn't one!
161 */ 144 */
145ACPI_EXTERN struct acpi_internal_rsdt acpi_gbl_root_table_list;
146ACPI_EXTERN struct acpi_table_fadt acpi_gbl_FADT;
147extern acpi_native_uint acpi_gbl_permanent_mmap;
162 148
163/* The root table can be either an RSDT or an XSDT */ 149/* These addresses are calculated from FADT address values */
164 150
165ACPI_EXTERN u8 acpi_gbl_root_table_type; 151ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_enable;
166#define ACPI_TABLE_TYPE_RSDT 'R' 152ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_enable;
167#define ACPI_TABLE_TYPE_XSDT 'X'
168 153
169/* 154/*
170 * Handle both ACPI 1.0 and ACPI 2.0 Integer widths: 155 * Handle both ACPI 1.0 and ACPI 2.0 Integer widths. The integer width is
171 * If we are executing a method that exists in a 32-bit ACPI table, 156 * determined by the revision of the DSDT: If the DSDT revision is less than
172 * use only the lower 32 bits of the (internal) 64-bit Integer. 157 * 2, use only the lower 32 bits of the internal 64-bit Integer.
173 */ 158 */
174ACPI_EXTERN u8 acpi_gbl_integer_bit_width; 159ACPI_EXTERN u8 acpi_gbl_integer_bit_width;
175ACPI_EXTERN u8 acpi_gbl_integer_byte_width; 160ACPI_EXTERN u8 acpi_gbl_integer_byte_width;
176ACPI_EXTERN u8 acpi_gbl_integer_nybble_width; 161ACPI_EXTERN u8 acpi_gbl_integer_nybble_width;
177 162
178/*
179 * ACPI Table info arrays
180 */
181extern struct acpi_table_list acpi_gbl_table_lists[ACPI_TABLE_ID_MAX + 1];
182extern struct acpi_table_support acpi_gbl_table_data[ACPI_TABLE_ID_MAX + 1];
183
184/***************************************************************************** 163/*****************************************************************************
185 * 164 *
186 * Mutual exlusion within ACPICA subsystem 165 * Mutual exlusion within ACPICA subsystem
@@ -188,7 +167,7 @@ extern struct acpi_table_support acpi_gbl_table_data[ACPI_TABLE_ID_MAX + 1];
188 ****************************************************************************/ 167 ****************************************************************************/
189 168
190/* 169/*
191 * Predefined mutex objects. This array contains the 170 * Predefined mutex objects. This array contains the
192 * actual OS mutex handles, indexed by the local ACPI_MUTEX_HANDLEs. 171 * actual OS mutex handles, indexed by the local ACPI_MUTEX_HANDLEs.
193 * (The table maps local handles to the real OS handles) 172 * (The table maps local handles to the real OS handles)
194 */ 173 */
@@ -197,6 +176,7 @@ ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX];
197/* 176/*
198 * Global lock semaphore works in conjunction with the actual HW global lock 177 * Global lock semaphore works in conjunction with the actual HW global lock
199 */ 178 */
179ACPI_EXTERN acpi_mutex acpi_gbl_global_lock_mutex;
200ACPI_EXTERN acpi_semaphore acpi_gbl_global_lock_semaphore; 180ACPI_EXTERN acpi_semaphore acpi_gbl_global_lock_semaphore;
201 181
202/* 182/*
@@ -220,6 +200,7 @@ ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock; /* For ACPI H/W except GPE regis
220 200
221ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list; 201ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list;
222ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list; 202ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list;
203ACPI_EXTERN u8 acpi_gbl_display_final_mem_stats;
223#endif 204#endif
224 205
225/* Object caches */ 206/* Object caches */
@@ -240,7 +221,6 @@ ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk;
240 221
241/* Misc */ 222/* Misc */
242 223
243ACPI_EXTERN u32 acpi_gbl_global_lock_thread_count;
244ACPI_EXTERN u32 acpi_gbl_original_mode; 224ACPI_EXTERN u32 acpi_gbl_original_mode;
245ACPI_EXTERN u32 acpi_gbl_rsdp_original_location; 225ACPI_EXTERN u32 acpi_gbl_rsdp_original_location;
246ACPI_EXTERN u32 acpi_gbl_ns_lookup_count; 226ACPI_EXTERN u32 acpi_gbl_ns_lookup_count;
@@ -260,12 +240,19 @@ ACPI_EXTERN u8 acpi_gbl_system_awake_and_running;
260 240
261extern u8 acpi_gbl_shutdown; 241extern u8 acpi_gbl_shutdown;
262extern u32 acpi_gbl_startup_flags; 242extern u32 acpi_gbl_startup_flags;
263extern const u8 acpi_gbl_decode_to8bit[8];
264extern const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT]; 243extern const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT];
265extern const char *acpi_gbl_highest_dstate_names[4]; 244extern const char *acpi_gbl_highest_dstate_names[4];
266extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES]; 245extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
267extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS]; 246extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS];
268 247
248/* Exception codes */
249
250extern char const *acpi_gbl_exception_names_env[];
251extern char const *acpi_gbl_exception_names_pgm[];
252extern char const *acpi_gbl_exception_names_tbl[];
253extern char const *acpi_gbl_exception_names_aml[];
254extern char const *acpi_gbl_exception_names_ctrl[];
255
269/***************************************************************************** 256/*****************************************************************************
270 * 257 *
271 * Namespace globals 258 * Namespace globals
diff --git a/include/acpi/achware.h b/include/acpi/achware.h
index 29b60a8c0593..9df275cf7bc1 100644
--- a/include/acpi/achware.h
+++ b/include/acpi/achware.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -61,8 +61,6 @@
61/* 61/*
62 * hwacpi - high level functions 62 * hwacpi - high level functions
63 */ 63 */
64acpi_status acpi_hw_initialize(void);
65
66acpi_status acpi_hw_set_mode(u32 mode); 64acpi_status acpi_hw_set_mode(u32 mode);
67 65
68u32 acpi_hw_get_mode(void); 66u32 acpi_hw_get_mode(void);
@@ -84,7 +82,7 @@ acpi_hw_low_level_read(u32 width,
84acpi_status 82acpi_status
85acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address *reg); 83acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address *reg);
86 84
87acpi_status acpi_hw_clear_acpi_status(u32 flags); 85acpi_status acpi_hw_clear_acpi_status(void);
88 86
89/* 87/*
90 * hwgpe - GPE support 88 * hwgpe - GPE support
diff --git a/include/acpi/acinterp.h b/include/acpi/acinterp.h
index 91586d0d5bb5..ce7c9d653910 100644
--- a/include/acpi/acinterp.h
+++ b/include/acpi/acinterp.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -277,12 +277,6 @@ acpi_status acpi_ex_system_do_suspend(acpi_integer time);
277 277
278acpi_status acpi_ex_system_do_stall(u32 time); 278acpi_status acpi_ex_system_do_stall(u32 time);
279 279
280acpi_status
281acpi_ex_system_acquire_mutex(union acpi_operand_object *time,
282 union acpi_operand_object *obj_desc);
283
284acpi_status acpi_ex_system_release_mutex(union acpi_operand_object *obj_desc);
285
286acpi_status acpi_ex_system_signal_event(union acpi_operand_object *obj_desc); 280acpi_status acpi_ex_system_signal_event(union acpi_operand_object *obj_desc);
287 281
288acpi_status 282acpi_status
@@ -451,10 +445,14 @@ acpi_ex_copy_integer_to_buffer_field(union acpi_operand_object *source_desc,
451/* 445/*
452 * exutils - interpreter/scanner utilities 446 * exutils - interpreter/scanner utilities
453 */ 447 */
454acpi_status acpi_ex_enter_interpreter(void); 448void acpi_ex_enter_interpreter(void);
455 449
456void acpi_ex_exit_interpreter(void); 450void acpi_ex_exit_interpreter(void);
457 451
452void acpi_ex_reacquire_interpreter(void);
453
454void acpi_ex_relinquish_interpreter(void);
455
458void acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc); 456void acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc);
459 457
460u8 acpi_ex_acquire_global_lock(u32 rule); 458u8 acpi_ex_acquire_global_lock(u32 rule);
diff --git a/include/acpi/aclocal.h b/include/acpi/aclocal.h
index 063c4b54290f..6f83ddbed3af 100644
--- a/include/acpi/aclocal.h
+++ b/include/acpi/aclocal.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -80,8 +80,8 @@ union acpi_parse_object;
80 * table below also! 80 * table below also!
81 */ 81 */
82#define ACPI_MTX_INTERPRETER 0 /* AML Interpreter, main lock */ 82#define ACPI_MTX_INTERPRETER 0 /* AML Interpreter, main lock */
83#define ACPI_MTX_TABLES 1 /* Data for ACPI tables */ 83#define ACPI_MTX_NAMESPACE 1 /* ACPI Namespace */
84#define ACPI_MTX_NAMESPACE 2 /* ACPI Namespace */ 84#define ACPI_MTX_TABLES 2 /* Data for ACPI tables */
85#define ACPI_MTX_EVENTS 3 /* Data for ACPI events */ 85#define ACPI_MTX_EVENTS 3 /* Data for ACPI events */
86#define ACPI_MTX_CACHES 4 /* Internal caches, general purposes */ 86#define ACPI_MTX_CACHES 4 /* Internal caches, general purposes */
87#define ACPI_MTX_MEMORY 5 /* Debug memory tracking lists */ 87#define ACPI_MTX_MEMORY 5 /* Debug memory tracking lists */
@@ -162,7 +162,7 @@ struct acpi_mutex_info {
162typedef enum { 162typedef enum {
163 ACPI_IMODE_LOAD_PASS1 = 0x01, 163 ACPI_IMODE_LOAD_PASS1 = 0x01,
164 ACPI_IMODE_LOAD_PASS2 = 0x02, 164 ACPI_IMODE_LOAD_PASS2 = 0x02,
165 ACPI_IMODE_EXECUTE = 0x0E 165 ACPI_IMODE_EXECUTE = 0x03
166} acpi_interpreter_mode; 166} acpi_interpreter_mode;
167 167
168union acpi_name_union { 168union acpi_name_union {
@@ -204,7 +204,7 @@ struct acpi_namespace_node {
204/* Namespace Node flags */ 204/* Namespace Node flags */
205 205
206#define ANOBJ_END_OF_PEER_LIST 0x01 /* End-of-list, Peer field points to parent */ 206#define ANOBJ_END_OF_PEER_LIST 0x01 /* End-of-list, Peer field points to parent */
207#define ANOBJ_RESERVED 0x02 /* Available for future use */ 207#define ANOBJ_TEMPORARY 0x02 /* Node is create by a method and is temporary */
208#define ANOBJ_METHOD_ARG 0x04 /* Node is a method argument */ 208#define ANOBJ_METHOD_ARG 0x04 /* Node is a method argument */
209#define ANOBJ_METHOD_LOCAL 0x08 /* Node is a method local */ 209#define ANOBJ_METHOD_LOCAL 0x08 /* Node is a method local */
210#define ANOBJ_SUBTREE_HAS_INI 0x10 /* Used to optimize device initialization */ 210#define ANOBJ_SUBTREE_HAS_INI 0x10 /* Used to optimize device initialization */
@@ -219,25 +219,42 @@ struct acpi_namespace_node {
219 * ACPI Table Descriptor. One per ACPI table 219 * ACPI Table Descriptor. One per ACPI table
220 */ 220 */
221struct acpi_table_desc { 221struct acpi_table_desc {
222 struct acpi_table_desc *prev; 222 acpi_physical_address address;
223 struct acpi_table_desc *next;
224 struct acpi_table_desc *installed_desc;
225 struct acpi_table_header *pointer; 223 struct acpi_table_header *pointer;
226 u8 *aml_start; 224 u32 length; /* Length fixed at 32 bits */
227 u64 physical_address; 225 union acpi_name_union signature;
228 acpi_size length;
229 u32 aml_length;
230 acpi_owner_id owner_id; 226 acpi_owner_id owner_id;
231 u8 type; 227 u8 flags;
232 u8 allocation;
233 u8 loaded_into_namespace;
234}; 228};
235 229
236struct acpi_table_list { 230/* Flags for above */
237 struct acpi_table_desc *next; 231
232#define ACPI_TABLE_ORIGIN_UNKNOWN (0)
233#define ACPI_TABLE_ORIGIN_MAPPED (1)
234#define ACPI_TABLE_ORIGIN_ALLOCATED (2)
235#define ACPI_TABLE_ORIGIN_MASK (3)
236#define ACPI_TABLE_IS_LOADED (4)
237
238/* One internal RSDT for table management */
239
240struct acpi_internal_rsdt {
241 struct acpi_table_desc *tables;
238 u32 count; 242 u32 count;
243 u32 size;
244 u8 flags;
239}; 245};
240 246
247/* Flags for above */
248
249#define ACPI_ROOT_ORIGIN_UNKNOWN (0) /* ~ORIGIN_ALLOCATED */
250#define ACPI_ROOT_ORIGIN_ALLOCATED (1)
251#define ACPI_ROOT_ALLOW_RESIZE (2)
252
253/* Predefined (fixed) table indexes */
254
255#define ACPI_TABLE_INDEX_DSDT (0)
256#define ACPI_TABLE_INDEX_FACS (1)
257
241struct acpi_find_context { 258struct acpi_find_context {
242 char *search_for; 259 char *search_for;
243 acpi_handle *list; 260 acpi_handle *list;
@@ -350,7 +367,7 @@ struct acpi_gpe_event_info {
350 union acpi_gpe_dispatch_info dispatch; /* Either Method or Handler */ 367 union acpi_gpe_dispatch_info dispatch; /* Either Method or Handler */
351 struct acpi_gpe_register_info *register_info; /* Backpointer to register info */ 368 struct acpi_gpe_register_info *register_info; /* Backpointer to register info */
352 u8 flags; /* Misc info about this GPE */ 369 u8 flags; /* Misc info about this GPE */
353 u8 register_bit; /* This GPE bit within the register */ 370 u8 gpe_number; /* This GPE */
354}; 371};
355 372
356/* Information about a GPE register pair, one per each status/enable pair in an array */ 373/* Information about a GPE register pair, one per each status/enable pair in an array */
@@ -855,12 +872,30 @@ struct acpi_bit_register_info {
855 ****************************************************************************/ 872 ****************************************************************************/
856 873
857struct acpi_db_method_info { 874struct acpi_db_method_info {
858 acpi_handle thread_gate; 875 acpi_handle main_thread_gate;
876 acpi_handle thread_complete_gate;
877 u32 *threads;
878 u32 num_threads;
879 u32 num_created;
880 u32 num_completed;
881
859 char *name; 882 char *name;
860 char **args;
861 u32 flags; 883 u32 flags;
862 u32 num_loops; 884 u32 num_loops;
863 char pathname[128]; 885 char pathname[128];
886 char **args;
887
888 /*
889 * Arguments to be passed to method for the command
890 * Threads -
891 * the Number of threads, ID of current thread and
892 * Index of current thread inside all them created.
893 */
894 char init_args;
895 char *arguments[4];
896 char num_threads_str[11];
897 char id_of_thread_str[11];
898 char index_of_thread_str[11];
864}; 899};
865 900
866struct acpi_integrity_info { 901struct acpi_integrity_info {
@@ -919,6 +954,8 @@ struct acpi_memory_list {
919 954
920 u32 total_allocated; 955 u32 total_allocated;
921 u32 total_freed; 956 u32 total_freed;
957 u32 max_occupied;
958 u32 total_size;
922 u32 current_total_size; 959 u32 current_total_size;
923 u32 requests; 960 u32 requests;
924 u32 hits; 961 u32 hits;
diff --git a/include/acpi/acmacros.h b/include/acpi/acmacros.h
index 192fa095a515..8948a6461834 100644
--- a/include/acpi/acmacros.h
+++ b/include/acpi/acmacros.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -55,25 +55,12 @@
55#define ACPI_SET_BIT(target,bit) ((target) |= (bit)) 55#define ACPI_SET_BIT(target,bit) ((target) |= (bit))
56#define ACPI_CLEAR_BIT(target,bit) ((target) &= ~(bit)) 56#define ACPI_CLEAR_BIT(target,bit) ((target) &= ~(bit))
57#define ACPI_MIN(a,b) (((a)<(b))?(a):(b)) 57#define ACPI_MIN(a,b) (((a)<(b))?(a):(b))
58#define ACPI_MAX(a,b) (((a)>(b))?(a):(b))
58 59
59/* Size calculation */ 60/* Size calculation */
60 61
61#define ACPI_ARRAY_LENGTH(x) (sizeof(x) / sizeof((x)[0])) 62#define ACPI_ARRAY_LENGTH(x) (sizeof(x) / sizeof((x)[0]))
62 63
63#if ACPI_MACHINE_WIDTH == 16
64
65/*
66 * For 16-bit addresses, we have to assume that the upper 32 bits
67 * (out of 64) are zero.
68 */
69#define ACPI_LODWORD(l) ((u32)(l))
70#define ACPI_HIDWORD(l) ((u32)(0))
71
72#define ACPI_GET_ADDRESS(a) ((a).lo)
73#define ACPI_STORE_ADDRESS(a,b) {(a).hi=0;(a).lo=(u32)(b);}
74#define ACPI_VALID_ADDRESS(a) ((a).hi | (a).lo)
75
76#else
77#ifdef ACPI_NO_INTEGER64_SUPPORT 64#ifdef ACPI_NO_INTEGER64_SUPPORT
78/* 65/*
79 * acpi_integer is 32-bits, no 64-bit support on this platform 66 * acpi_integer is 32-bits, no 64-bit support on this platform
@@ -81,10 +68,6 @@
81#define ACPI_LODWORD(l) ((u32)(l)) 68#define ACPI_LODWORD(l) ((u32)(l))
82#define ACPI_HIDWORD(l) ((u32)(0)) 69#define ACPI_HIDWORD(l) ((u32)(0))
83 70
84#define ACPI_GET_ADDRESS(a) (a)
85#define ACPI_STORE_ADDRESS(a,b) ((a)=(b))
86#define ACPI_VALID_ADDRESS(a) (a)
87
88#else 71#else
89 72
90/* 73/*
@@ -92,11 +75,6 @@
92 */ 75 */
93#define ACPI_LODWORD(l) ((u32)(u64)(l)) 76#define ACPI_LODWORD(l) ((u32)(u64)(l))
94#define ACPI_HIDWORD(l) ((u32)(((*(struct uint64_struct *)(void *)(&l))).hi)) 77#define ACPI_HIDWORD(l) ((u32)(((*(struct uint64_struct *)(void *)(&l))).hi))
95
96#define ACPI_GET_ADDRESS(a) (a)
97#define ACPI_STORE_ADDRESS(a,b) ((a)=(acpi_physical_address)(b))
98#define ACPI_VALID_ADDRESS(a) (a)
99#endif
100#endif 78#endif
101 79
102/* 80/*
@@ -134,15 +112,8 @@
134#define ACPI_TO_POINTER(i) ACPI_ADD_PTR (void,(void *) NULL,(acpi_native_uint) i) 112#define ACPI_TO_POINTER(i) ACPI_ADD_PTR (void,(void *) NULL,(acpi_native_uint) i)
135#define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p,(void *) NULL) 113#define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p,(void *) NULL)
136#define ACPI_OFFSET(d,f) (acpi_size) ACPI_PTR_DIFF (&(((d *)0)->f),(void *) NULL) 114#define ACPI_OFFSET(d,f) (acpi_size) ACPI_PTR_DIFF (&(((d *)0)->f),(void *) NULL)
137
138#if ACPI_MACHINE_WIDTH == 16
139#define ACPI_STORE_POINTER(d,s) ACPI_MOVE_32_TO_32(d,s)
140#define ACPI_PHYSADDR_TO_PTR(i) (void *)(i)
141#define ACPI_PTR_TO_PHYSADDR(i) (u32) ACPI_CAST_PTR (u8,(i))
142#else
143#define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i) 115#define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i)
144#define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i) 116#define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i)
145#endif
146 117
147#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED 118#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED
148#define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32,(a)) == *ACPI_CAST_PTR (u32,(b))) 119#define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32,(a)) == *ACPI_CAST_PTR (u32,(b)))
@@ -223,28 +194,6 @@
223 194
224/* The hardware supports unaligned transfers, just do the little-endian move */ 195/* The hardware supports unaligned transfers, just do the little-endian move */
225 196
226#if ACPI_MACHINE_WIDTH == 16
227
228/* No 64-bit integers */
229/* 16-bit source, 16/32/64 destination */
230
231#define ACPI_MOVE_16_TO_16(d,s) *(u16 *)(void *)(d) = *(u16 *)(void *)(s)
232#define ACPI_MOVE_16_TO_32(d,s) *(u32 *)(void *)(d) = *(u16 *)(void *)(s)
233#define ACPI_MOVE_16_TO_64(d,s) ACPI_MOVE_16_TO_32(d,s)
234
235/* 32-bit source, 16/32/64 destination */
236
237#define ACPI_MOVE_32_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */
238#define ACPI_MOVE_32_TO_32(d,s) *(u32 *)(void *)(d) = *(u32 *)(void *)(s)
239#define ACPI_MOVE_32_TO_64(d,s) ACPI_MOVE_32_TO_32(d,s)
240
241/* 64-bit source, 16/32/64 destination */
242
243#define ACPI_MOVE_64_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */
244#define ACPI_MOVE_64_TO_32(d,s) ACPI_MOVE_32_TO_32(d,s) /* Truncate to 32 */
245#define ACPI_MOVE_64_TO_64(d,s) ACPI_MOVE_32_TO_32(d,s)
246
247#else
248/* 16-bit source, 16/32/64 destination */ 197/* 16-bit source, 16/32/64 destination */
249 198
250#define ACPI_MOVE_16_TO_16(d,s) *(u16 *)(void *)(d) = *(u16 *)(void *)(s) 199#define ACPI_MOVE_16_TO_16(d,s) *(u16 *)(void *)(d) = *(u16 *)(void *)(s)
@@ -262,7 +211,6 @@
262#define ACPI_MOVE_64_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */ 211#define ACPI_MOVE_64_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s) /* Truncate to 16 */
263#define ACPI_MOVE_64_TO_32(d,s) ACPI_MOVE_32_TO_32(d,s) /* Truncate to 32 */ 212#define ACPI_MOVE_64_TO_32(d,s) ACPI_MOVE_32_TO_32(d,s) /* Truncate to 32 */
264#define ACPI_MOVE_64_TO_64(d,s) *(u64 *)(void *)(d) = *(u64 *)(void *)(s) 213#define ACPI_MOVE_64_TO_64(d,s) *(u64 *)(void *)(d) = *(u64 *)(void *)(s)
265#endif
266 214
267#else 215#else
268/* 216/*
@@ -307,10 +255,7 @@
307 255
308/* Macros based on machine integer width */ 256/* Macros based on machine integer width */
309 257
310#if ACPI_MACHINE_WIDTH == 16 258#if ACPI_MACHINE_WIDTH == 32
311#define ACPI_MOVE_SIZE_TO_16(d,s) ACPI_MOVE_16_TO_16(d,s)
312
313#elif ACPI_MACHINE_WIDTH == 32
314#define ACPI_MOVE_SIZE_TO_16(d,s) ACPI_MOVE_32_TO_16(d,s) 259#define ACPI_MOVE_SIZE_TO_16(d,s) ACPI_MOVE_32_TO_16(d,s)
315 260
316#elif ACPI_MACHINE_WIDTH == 64 261#elif ACPI_MACHINE_WIDTH == 64
@@ -695,16 +640,6 @@
695#define ACPI_DEBUGGER_EXEC(a) 640#define ACPI_DEBUGGER_EXEC(a)
696#endif 641#endif
697 642
698/*
699 * For 16-bit code, we want to shrink some things even though
700 * we are using ACPI_DEBUG_OUTPUT to get the debug output
701 */
702#if ACPI_MACHINE_WIDTH == 16
703#undef ACPI_DEBUG_ONLY_MEMBERS
704#undef _VERBOSE_STRUCTURES
705#define ACPI_DEBUG_ONLY_MEMBERS(a)
706#endif
707
708#ifdef ACPI_DEBUG_OUTPUT 643#ifdef ACPI_DEBUG_OUTPUT
709/* 644/*
710 * 1) Set name to blanks 645 * 1) Set name to blanks
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index b67da3636899..34bfae8a05f3 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acnamesp.h b/include/acpi/acnamesp.h
index 83b52f9f899a..535b7e1c41bc 100644
--- a/include/acpi/acnamesp.h
+++ b/include/acpi/acnamesp.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -65,9 +65,13 @@
65#define ACPI_NS_ERROR_IF_FOUND 0x08 65#define ACPI_NS_ERROR_IF_FOUND 0x08
66#define ACPI_NS_PREFIX_IS_SCOPE 0x10 66#define ACPI_NS_PREFIX_IS_SCOPE 0x10
67#define ACPI_NS_EXTERNAL 0x20 67#define ACPI_NS_EXTERNAL 0x20
68#define ACPI_NS_TEMPORARY 0x40
68 69
69#define ACPI_NS_WALK_UNLOCK TRUE 70/* Flags for acpi_ns_walk_namespace */
70#define ACPI_NS_WALK_NO_UNLOCK FALSE 71
72#define ACPI_NS_WALK_NO_UNLOCK 0
73#define ACPI_NS_WALK_UNLOCK 0x01
74#define ACPI_NS_WALK_TEMP_NODES 0x02
71 75
72/* 76/*
73 * nsinit - Namespace initialization 77 * nsinit - Namespace initialization
@@ -82,7 +86,7 @@ acpi_status acpi_ns_initialize_devices(void);
82acpi_status acpi_ns_load_namespace(void); 86acpi_status acpi_ns_load_namespace(void);
83 87
84acpi_status 88acpi_status
85acpi_ns_load_table(struct acpi_table_desc *table_desc, 89acpi_ns_load_table(acpi_native_uint table_index,
86 struct acpi_namespace_node *node); 90 struct acpi_namespace_node *node);
87 91
88/* 92/*
@@ -92,7 +96,7 @@ acpi_status
92acpi_ns_walk_namespace(acpi_object_type type, 96acpi_ns_walk_namespace(acpi_object_type type,
93 acpi_handle start_object, 97 acpi_handle start_object,
94 u32 max_depth, 98 u32 max_depth,
95 u8 unlock_before_callback, 99 u32 flags,
96 acpi_walk_callback user_function, 100 acpi_walk_callback user_function,
97 void *context, void **return_value); 101 void *context, void **return_value);
98 102
@@ -106,11 +110,12 @@ struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type,
106 * nsparse - table parsing 110 * nsparse - table parsing
107 */ 111 */
108acpi_status 112acpi_status
109acpi_ns_parse_table(struct acpi_table_desc *table_desc, 113acpi_ns_parse_table(acpi_native_uint table_index,
110 struct acpi_namespace_node *scope); 114 struct acpi_namespace_node *start_node);
111 115
112acpi_status 116acpi_status
113acpi_ns_one_complete_parse(u8 pass_number, struct acpi_table_desc *table_desc); 117acpi_ns_one_complete_parse(acpi_native_uint pass_number,
118 acpi_native_uint table_index);
114 119
115/* 120/*
116 * nsaccess - Top-level namespace access 121 * nsaccess - Top-level namespace access
diff --git a/include/acpi/acobject.h b/include/acpi/acobject.h
index 8fdee31119f3..04e9735a6742 100644
--- a/include/acpi/acobject.h
+++ b/include/acpi/acobject.h
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -52,7 +52,15 @@
52 * to the interpreter, and to keep track of the various handlers such as 52 * to the interpreter, and to keep track of the various handlers such as
53 * address space handlers and notify handlers. The object is a constant 53 * address space handlers and notify handlers. The object is a constant
54 * size in order to allow it to be cached and reused. 54 * size in order to allow it to be cached and reused.
55 *
56 * Note: The object is optimized to be aligned and will not work if it is
57 * byte-packed.
55 */ 58 */
59#if ACPI_MACHINE_WIDTH == 64
60#pragma pack(8)
61#else
62#pragma pack(4)
63#endif
56 64
57/******************************************************************************* 65/*******************************************************************************
58 * 66 *
@@ -101,7 +109,8 @@ struct acpi_object_common {
101ACPI_OBJECT_COMMON_HEADER}; 109ACPI_OBJECT_COMMON_HEADER};
102 110
103struct acpi_object_integer { 111struct acpi_object_integer {
104 ACPI_OBJECT_COMMON_HEADER acpi_integer value; 112 ACPI_OBJECT_COMMON_HEADER u8 fill[3]; /* Prevent warning on some compilers */
113 acpi_integer value;
105}; 114};
106 115
107/* 116/*
@@ -203,7 +212,9 @@ struct acpi_object_power_resource {
203}; 212};
204 213
205struct acpi_object_processor { 214struct acpi_object_processor {
206 ACPI_OBJECT_COMMON_HEADER u8 proc_id; 215 ACPI_OBJECT_COMMON_HEADER
216 /* The next two fields take advantage of the 3-byte space before NOTIFY_INFO */
217 u8 proc_id;
207 u8 length; 218 u8 length;
208 ACPI_COMMON_NOTIFY_INFO acpi_io_address address; 219 ACPI_COMMON_NOTIFY_INFO acpi_io_address address;
209}; 220};
@@ -406,4 +417,6 @@ union acpi_descriptor {
406 union acpi_parse_object op; 417 union acpi_parse_object op;
407}; 418};
408 419
420#pragma pack()
421
409#endif /* _ACOBJECT_H */ 422#endif /* _ACOBJECT_H */
diff --git a/include/acpi/acopcode.h b/include/acpi/acopcode.h
index 7659a46bc432..e6f76a280a94 100644
--- a/include/acpi/acopcode.h
+++ b/include/acpi/acopcode.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -257,7 +257,7 @@
257#define ARGI_LLESSEQUAL_OP ARGI_INVALID_OPCODE 257#define ARGI_LLESSEQUAL_OP ARGI_INVALID_OPCODE
258#define ARGI_LNOT_OP ARGI_LIST1 (ARGI_INTEGER) 258#define ARGI_LNOT_OP ARGI_LIST1 (ARGI_INTEGER)
259#define ARGI_LNOTEQUAL_OP ARGI_INVALID_OPCODE 259#define ARGI_LNOTEQUAL_OP ARGI_INVALID_OPCODE
260#define ARGI_LOAD_OP ARGI_LIST2 (ARGI_REGION_OR_FIELD,ARGI_TARGETREF) 260#define ARGI_LOAD_OP ARGI_LIST2 (ARGI_REGION_OR_BUFFER,ARGI_TARGETREF)
261#define ARGI_LOAD_TABLE_OP ARGI_LIST6 (ARGI_STRING, ARGI_STRING, ARGI_STRING, ARGI_STRING, ARGI_STRING, ARGI_ANYTYPE) 261#define ARGI_LOAD_TABLE_OP ARGI_LIST6 (ARGI_STRING, ARGI_STRING, ARGI_STRING, ARGI_STRING, ARGI_STRING, ARGI_ANYTYPE)
262#define ARGI_LOCAL0 ARG_NONE 262#define ARGI_LOCAL0 ARG_NONE
263#define ARGI_LOCAL1 ARG_NONE 263#define ARGI_LOCAL1 ARG_NONE
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 8d5039d0b430..7812267b577f 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acparser.h b/include/acpi/acparser.h
index 9d49d3c41cd9..85c358e21014 100644
--- a/include/acpi/acparser.h
+++ b/include/acpi/acparser.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h
index b9a39d1009bd..2e5f00d3ea0d 100644
--- a/include/acpi/acpi.h
+++ b/include/acpi/acpi.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index fdd10953b2b6..0d9f984a60a1 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -59,7 +59,6 @@ acpi_evaluate_reference(acpi_handle handle,
59 59
60#define ACPI_BUS_FILE_ROOT "acpi" 60#define ACPI_BUS_FILE_ROOT "acpi"
61extern struct proc_dir_entry *acpi_root_dir; 61extern struct proc_dir_entry *acpi_root_dir;
62extern struct fadt_descriptor acpi_fadt;
63 62
64enum acpi_bus_removal_type { 63enum acpi_bus_removal_type {
65 ACPI_BUS_REMOVAL_NORMAL = 0, 64 ACPI_BUS_REMOVAL_NORMAL = 0,
@@ -92,13 +91,12 @@ typedef int (*acpi_op_remove) (struct acpi_device * device, int type);
92typedef int (*acpi_op_lock) (struct acpi_device * device, int type); 91typedef int (*acpi_op_lock) (struct acpi_device * device, int type);
93typedef int (*acpi_op_start) (struct acpi_device * device); 92typedef int (*acpi_op_start) (struct acpi_device * device);
94typedef int (*acpi_op_stop) (struct acpi_device * device, int type); 93typedef int (*acpi_op_stop) (struct acpi_device * device, int type);
95typedef int (*acpi_op_suspend) (struct acpi_device * device, int state); 94typedef int (*acpi_op_suspend) (struct acpi_device * device, pm_message_t state);
96typedef int (*acpi_op_resume) (struct acpi_device * device, int state); 95typedef int (*acpi_op_resume) (struct acpi_device * device);
97typedef int (*acpi_op_scan) (struct acpi_device * device); 96typedef int (*acpi_op_scan) (struct acpi_device * device);
98typedef int (*acpi_op_bind) (struct acpi_device * device); 97typedef int (*acpi_op_bind) (struct acpi_device * device);
99typedef int (*acpi_op_unbind) (struct acpi_device * device); 98typedef int (*acpi_op_unbind) (struct acpi_device * device);
100typedef int (*acpi_op_match) (struct acpi_device * device, 99typedef int (*acpi_op_shutdown) (struct acpi_device * device);
101 struct acpi_driver * driver);
102 100
103struct acpi_bus_ops { 101struct acpi_bus_ops {
104 u32 acpi_op_add:1; 102 u32 acpi_op_add:1;
@@ -111,7 +109,7 @@ struct acpi_bus_ops {
111 u32 acpi_op_scan:1; 109 u32 acpi_op_scan:1;
112 u32 acpi_op_bind:1; 110 u32 acpi_op_bind:1;
113 u32 acpi_op_unbind:1; 111 u32 acpi_op_unbind:1;
114 u32 acpi_op_match:1; 112 u32 acpi_op_shutdown:1;
115 u32 reserved:21; 113 u32 reserved:21;
116}; 114};
117 115
@@ -126,16 +124,16 @@ struct acpi_device_ops {
126 acpi_op_scan scan; 124 acpi_op_scan scan;
127 acpi_op_bind bind; 125 acpi_op_bind bind;
128 acpi_op_unbind unbind; 126 acpi_op_unbind unbind;
129 acpi_op_match match; 127 acpi_op_shutdown shutdown;
130}; 128};
131 129
132struct acpi_driver { 130struct acpi_driver {
133 struct list_head node;
134 char name[80]; 131 char name[80];
135 char class[80]; 132 char class[80];
136 atomic_t references;
137 char *ids; /* Supported Hardware IDs */ 133 char *ids; /* Supported Hardware IDs */
138 struct acpi_device_ops ops; 134 struct acpi_device_ops ops;
135 struct device_driver drv;
136 struct module *owner;
139}; 137};
140 138
141/* 139/*
@@ -185,7 +183,7 @@ struct acpi_device_dir {
185 183
186typedef char acpi_bus_id[5]; 184typedef char acpi_bus_id[5];
187typedef unsigned long acpi_bus_address; 185typedef unsigned long acpi_bus_address;
188typedef char acpi_hardware_id[9]; 186typedef char acpi_hardware_id[15];
189typedef char acpi_unique_id[9]; 187typedef char acpi_unique_id[9];
190typedef char acpi_device_name[40]; 188typedef char acpi_device_name[40];
191typedef char acpi_device_class[20]; 189typedef char acpi_device_class[20];
@@ -296,11 +294,14 @@ struct acpi_device {
296 struct acpi_device_ops ops; 294 struct acpi_device_ops ops;
297 struct acpi_driver *driver; 295 struct acpi_driver *driver;
298 void *driver_data; 296 void *driver_data;
299 struct kobject kobj;
300 struct device dev; 297 struct device dev;
298 struct acpi_bus_ops bus_ops; /* workaround for different code path for hotplug */
299 enum acpi_bus_removal_type removal_type; /* indicate for different removal type */
301}; 300};
302 301
303#define acpi_driver_data(d) ((d)->driver_data) 302#define acpi_driver_data(d) ((d)->driver_data)
303#define to_acpi_device(d) container_of(d, struct acpi_device, dev)
304#define to_acpi_driver(d) container_of(d, struct acpi_driver, drv)
304 305
305/* 306/*
306 * Events 307 * Events
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 6a5bdcefec64..4dc8a5043ef0 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -36,13 +36,14 @@
36 36
37/* _HID definitions */ 37/* _HID definitions */
38 38
39#define ACPI_POWER_HID "ACPI_PWR" 39#define ACPI_POWER_HID "power_resource"
40#define ACPI_PROCESSOR_HID "ACPI_CPU" 40#define ACPI_PROCESSOR_HID "ACPI0007"
41#define ACPI_SYSTEM_HID "ACPI_SYS" 41#define ACPI_SYSTEM_HID "acpi_system"
42#define ACPI_THERMAL_HID "ACPI_THM" 42#define ACPI_THERMAL_HID "thermal"
43#define ACPI_BUTTON_HID_POWERF "ACPI_FPB" 43#define ACPI_BUTTON_HID_POWERF "button_power"
44#define ACPI_BUTTON_HID_SLEEPF "ACPI_FSB" 44#define ACPI_BUTTON_HID_SLEEPF "button_sleep"
45 45#define ACPI_VIDEO_HID "video"
46#define ACPI_BAY_HID "bay"
46/* -------------------------------------------------------------------------- 47/* --------------------------------------------------------------------------
47 PCI 48 PCI
48 -------------------------------------------------------------------------- */ 49 -------------------------------------------------------------------------- */
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 0cd63bce0ae4..781394b9efe0 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -8,7 +8,7 @@
8 *****************************************************************************/ 8 *****************************************************************************/
9 9
10/* 10/*
11 * Copyright (C) 2000 - 2006, R. Byron Moore 11 * Copyright (C) 2000 - 2007, R. Byron Moore
12 * All rights reserved. 12 * All rights reserved.
13 * 13 *
14 * Redistribution and use in source and binary forms, with or without 14 * Redistribution and use in source and binary forms, with or without
@@ -85,7 +85,7 @@ acpi_status acpi_os_terminate(void);
85/* 85/*
86 * ACPI Table interfaces 86 * ACPI Table interfaces
87 */ 87 */
88acpi_status acpi_os_get_root_pointer(u32 flags, struct acpi_pointer *address); 88acpi_physical_address acpi_os_get_root_pointer(void);
89 89
90acpi_status 90acpi_status
91acpi_os_predefined_override(const struct acpi_predefined_names *init_val, 91acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
@@ -143,9 +143,7 @@ void acpi_os_release_mutex(acpi_mutex handle);
143 */ 143 */
144void *acpi_os_allocate(acpi_size size); 144void *acpi_os_allocate(acpi_size size);
145 145
146acpi_status 146void __iomem *acpi_os_map_memory(acpi_physical_address where, acpi_native_uint length);
147acpi_os_map_memory(acpi_physical_address physical_address,
148 acpi_size size, void __iomem ** logical_address);
149 147
150void acpi_os_unmap_memory(void __iomem * logical_address, acpi_size size); 148void acpi_os_unmap_memory(void __iomem * logical_address, acpi_size size);
151 149
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 81458767a90e..e08f7df85a4f 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -51,6 +51,10 @@
51/* 51/*
52 * Global interfaces 52 * Global interfaces
53 */ 53 */
54acpi_status
55acpi_initialize_tables(struct acpi_table_desc *initial_storage,
56 u32 initial_table_count, u8 allow_resize);
57
54acpi_status acpi_initialize_subsystem(void); 58acpi_status acpi_initialize_subsystem(void);
55 59
56acpi_status acpi_enable_subsystem(u32 flags); 60acpi_status acpi_enable_subsystem(u32 flags);
@@ -92,30 +96,28 @@ void acpi_free(void *address);
92/* 96/*
93 * ACPI table manipulation interfaces 97 * ACPI table manipulation interfaces
94 */ 98 */
95acpi_status 99acpi_status acpi_reallocate_root_table(void);
96acpi_find_root_pointer(u32 flags, struct acpi_pointer *rsdp_address); 100
101acpi_status acpi_find_root_pointer(acpi_native_uint * rsdp_address);
97 102
98acpi_status acpi_load_tables(void); 103acpi_status acpi_load_tables(void);
99 104
100acpi_status acpi_load_table(struct acpi_table_header *table_ptr); 105acpi_status acpi_load_table(struct acpi_table_header *table_ptr);
101 106
102acpi_status acpi_unload_table_id(acpi_table_type table_type, acpi_owner_id id); 107acpi_status acpi_unload_table_id(acpi_owner_id id);
103 108
104#ifdef ACPI_FUTURE_USAGE
105acpi_status acpi_unload_table(acpi_table_type table_type);
106acpi_status 109acpi_status
107acpi_get_table_header(acpi_table_type table_type, 110acpi_get_table_header(acpi_string signature,
108 u32 instance, struct acpi_table_header *out_table_header); 111 acpi_native_uint instance,
109#endif /* ACPI_FUTURE_USAGE */ 112 struct acpi_table_header *out_table_header);
110 113
111acpi_status 114acpi_status
112acpi_get_table(acpi_table_type table_type, 115acpi_get_table(acpi_string signature,
113 u32 instance, struct acpi_buffer *ret_buffer); 116 acpi_native_uint instance, struct acpi_table_header **out_table);
114 117
115acpi_status 118acpi_status
116acpi_get_firmware_table(acpi_string signature, 119acpi_get_table_by_index(acpi_native_uint table_index,
117 u32 instance, 120 struct acpi_table_header **out_table);
118 u32 flags, struct acpi_table_header **table_pointer);
119 121
120/* 122/*
121 * Namespace and name interfaces 123 * Namespace and name interfaces
@@ -310,9 +312,9 @@ acpi_resource_to_address64(struct acpi_resource *resource,
310/* 312/*
311 * Hardware (ACPI device) interfaces 313 * Hardware (ACPI device) interfaces
312 */ 314 */
313acpi_status acpi_get_register(u32 register_id, u32 * return_value, u32 flags); 315acpi_status acpi_get_register(u32 register_id, u32 * return_value);
314 316
315acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags); 317acpi_status acpi_set_register(u32 register_id, u32 value);
316 318
317acpi_status 319acpi_status
318acpi_set_firmware_waking_vector(acpi_physical_address physical_address); 320acpi_set_firmware_waking_vector(acpi_physical_address physical_address);
diff --git a/include/acpi/acresrc.h b/include/acpi/acresrc.h
index 80a3b33571b4..9486ab266a5e 100644
--- a/include/acpi/acresrc.h
+++ b/include/acpi/acresrc.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acstruct.h b/include/acpi/acstruct.h
index 5e8095f0f78f..aeb4498e5e06 100644
--- a/include/acpi/acstruct.h
+++ b/include/acpi/acstruct.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -139,7 +139,8 @@ struct acpi_init_walk_info {
139 u16 buffer_init; 139 u16 buffer_init;
140 u16 package_init; 140 u16 package_init;
141 u16 object_count; 141 u16 object_count;
142 struct acpi_table_desc *table_desc; 142 acpi_owner_id owner_id;
143 acpi_native_uint table_index;
143}; 144};
144 145
145struct acpi_get_devices_info { 146struct acpi_get_devices_info {
diff --git a/include/acpi/actables.h b/include/acpi/actables.h
index 4dbaf02fe526..2b9f46f9da4d 100644
--- a/include/acpi/actables.h
+++ b/include/acpi/actables.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -44,105 +44,75 @@
44#ifndef __ACTABLES_H__ 44#ifndef __ACTABLES_H__
45#define __ACTABLES_H__ 45#define __ACTABLES_H__
46 46
47/* Used in acpi_tb_map_acpi_table for size parameter if table header is to be used */ 47acpi_status acpi_allocate_root_table(u32 initial_table_count);
48
49#define SIZE_IN_HEADER 0
50 48
51/* 49/*
52 * tbconvrt - Table conversion routines 50 * tbfadt - FADT parse/convert/validate
53 */ 51 */
54acpi_status acpi_tb_convert_to_xsdt(struct acpi_table_desc *table_info); 52void acpi_tb_parse_fadt(acpi_native_uint table_index, u8 flags);
55
56acpi_status acpi_tb_convert_table_fadt(void);
57 53
58acpi_status acpi_tb_build_common_facs(struct acpi_table_desc *table_info); 54void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length);
59
60u32
61acpi_tb_get_table_count(struct rsdp_descriptor *RSDP,
62 struct acpi_table_header *RSDT);
63 55
64/* 56/*
65 * tbget - Table "get" routines 57 * tbfind - find ACPI table
66 */ 58 */
67acpi_status 59acpi_status
68acpi_tb_get_table(struct acpi_pointer *address, 60acpi_tb_find_table(char *signature,
69 struct acpi_table_desc *table_info); 61 char *oem_id,
70 62 char *oem_table_id, acpi_native_uint * table_index);
71acpi_status
72acpi_tb_get_table_header(struct acpi_pointer *address,
73 struct acpi_table_header *return_header);
74
75acpi_status
76acpi_tb_get_table_body(struct acpi_pointer *address,
77 struct acpi_table_header *header,
78 struct acpi_table_desc *table_info);
79
80acpi_status
81acpi_tb_get_table_ptr(acpi_table_type table_type,
82 u32 instance, struct acpi_table_header **table_ptr_loc);
83
84acpi_status acpi_tb_verify_rsdp(struct acpi_pointer *address);
85
86void acpi_tb_get_rsdt_address(struct acpi_pointer *out_address);
87
88acpi_status acpi_tb_validate_rsdt(struct acpi_table_header *table_ptr);
89 63
90/* 64/*
91 * tbgetall - get multiple required tables 65 * tbinstal - Table removal and deletion
92 */ 66 */
93acpi_status acpi_tb_get_required_tables(void); 67acpi_status acpi_tb_resize_root_table_list(void);
94 68
95/* 69acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc);
96 * tbinstall - Table installation
97 */
98acpi_status acpi_tb_install_table(struct acpi_table_desc *table_info);
99 70
100acpi_status 71acpi_status
101acpi_tb_recognize_table(struct acpi_table_desc *table_info, u8 search_type); 72acpi_tb_add_table(struct acpi_table_desc *table_desc,
73 acpi_native_uint * table_index);
102 74
103acpi_status 75acpi_status
104acpi_tb_init_table_descriptor(acpi_table_type table_type, 76acpi_tb_store_table(acpi_physical_address address,
105 struct acpi_table_desc *table_info); 77 struct acpi_table_header *table,
78 u32 length, u8 flags, acpi_native_uint * table_index);
106 79
107/* 80void acpi_tb_delete_table(struct acpi_table_desc *table_desc);
108 * tbremove - Table removal and deletion
109 */
110void acpi_tb_delete_all_tables(void);
111 81
112void acpi_tb_delete_tables_by_type(acpi_table_type type); 82void acpi_tb_terminate(void);
113 83
114void acpi_tb_delete_single_table(struct acpi_table_desc *table_desc); 84void acpi_tb_delete_namespace_by_owner(acpi_native_uint table_index);
115 85
116struct acpi_table_desc *acpi_tb_uninstall_table(struct acpi_table_desc 86acpi_status acpi_tb_allocate_owner_id(acpi_native_uint table_index);
117 *table_desc); 87
88acpi_status acpi_tb_release_owner_id(acpi_native_uint table_index);
118 89
119/*
120 * tbxfroot - RSDP, RSDT utilities
121 */
122acpi_status 90acpi_status
123acpi_tb_find_table(char *signature, 91acpi_tb_get_owner_id(acpi_native_uint table_index, acpi_owner_id * owner_id);
124 char *oem_id,
125 char *oem_table_id, struct acpi_table_header **table_ptr);
126 92
127acpi_status acpi_tb_get_table_rsdt(void); 93u8 acpi_tb_is_table_loaded(acpi_native_uint table_index);
128 94
129acpi_status acpi_tb_validate_rsdp(struct rsdp_descriptor *rsdp); 95void acpi_tb_set_table_loaded_flag(acpi_native_uint table_index, u8 is_loaded);
130 96
131/* 97/*
132 * tbutils - common table utilities 98 * tbutils - table manager utilities
133 */ 99 */
134acpi_status acpi_tb_is_table_installed(struct acpi_table_desc *new_table_desc); 100u8 acpi_tb_tables_loaded(void);
135 101
136acpi_status 102void
137acpi_tb_verify_table_checksum(struct acpi_table_header *table_header); 103acpi_tb_print_table_header(acpi_physical_address address,
104 struct acpi_table_header *header);
138 105
139u8 acpi_tb_sum_table(void *buffer, u32 length); 106u8 acpi_tb_checksum(u8 * buffer, acpi_native_uint length);
140 107
141u8 acpi_tb_generate_checksum(struct acpi_table_header *table); 108acpi_status
109acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length);
142 110
143void acpi_tb_set_checksum(struct acpi_table_header *table); 111void
112acpi_tb_install_table(acpi_physical_address address,
113 u8 flags, char *signature, acpi_native_uint table_index);
144 114
145acpi_status 115acpi_status
146acpi_tb_validate_table_header(struct acpi_table_header *table_header); 116acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags);
147 117
148#endif /* __ACTABLES_H__ */ 118#endif /* __ACTABLES_H__ */
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index b125ceed9cb7..09469e7db6a5 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -48,15 +48,15 @@
48 * Values for description table header signatures. Useful because they make 48 * Values for description table header signatures. Useful because they make
49 * it more difficult to inadvertently type in the wrong signature. 49 * it more difficult to inadvertently type in the wrong signature.
50 */ 50 */
51#define DSDT_SIG "DSDT" /* Differentiated System Description Table */ 51#define ACPI_SIG_DSDT "DSDT" /* Differentiated System Description Table */
52#define FADT_SIG "FACP" /* Fixed ACPI Description Table */ 52#define ACPI_SIG_FADT "FACP" /* Fixed ACPI Description Table */
53#define FACS_SIG "FACS" /* Firmware ACPI Control Structure */ 53#define ACPI_SIG_FACS "FACS" /* Firmware ACPI Control Structure */
54#define PSDT_SIG "PSDT" /* Persistent System Description Table */ 54#define ACPI_SIG_PSDT "PSDT" /* Persistent System Description Table */
55#define RSDP_SIG "RSD PTR " /* Root System Description Pointer */ 55#define ACPI_SIG_RSDP "RSD PTR " /* Root System Description Pointer */
56#define RSDT_SIG "RSDT" /* Root System Description Table */ 56#define ACPI_SIG_RSDT "RSDT" /* Root System Description Table */
57#define XSDT_SIG "XSDT" /* Extended System Description Table */ 57#define ACPI_SIG_XSDT "XSDT" /* Extended System Description Table */
58#define SSDT_SIG "SSDT" /* Secondary System Description Table */ 58#define ACPI_SIG_SSDT "SSDT" /* Secondary System Description Table */
59#define RSDP_NAME "RSDP" 59#define ACPI_RSDP_NAME "RSDP" /* Short name for RSDP, not signature */
60 60
61/* 61/*
62 * All tables and structures must be byte-packed to match the ACPI 62 * All tables and structures must be byte-packed to match the ACPI
@@ -83,27 +83,29 @@
83 * 83 *
84 ******************************************************************************/ 84 ******************************************************************************/
85 85
86#define ACPI_TABLE_HEADER_DEF \
87 char signature[4]; /* ASCII table signature */\
88 u32 length; /* Length of table in bytes, including this header */\
89 u8 revision; /* ACPI Specification minor version # */\
90 u8 checksum; /* To make sum of entire table == 0 */\
91 char oem_id[6]; /* ASCII OEM identification */\
92 char oem_table_id[8]; /* ASCII OEM table identification */\
93 u32 oem_revision; /* OEM revision number */\
94 char asl_compiler_id[4]; /* ASCII ASL compiler vendor ID */\
95 u32 asl_compiler_revision; /* ASL compiler version */
96
97struct acpi_table_header { 86struct acpi_table_header {
98ACPI_TABLE_HEADER_DEF}; 87 char signature[ACPI_NAME_SIZE]; /* ASCII table signature */
88 u32 length; /* Length of table in bytes, including this header */
89 u8 revision; /* ACPI Specification minor version # */
90 u8 checksum; /* To make sum of entire table == 0 */
91 char oem_id[ACPI_OEM_ID_SIZE]; /* ASCII OEM identification */
92 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; /* ASCII OEM table identification */
93 u32 oem_revision; /* OEM revision number */
94 char asl_compiler_id[ACPI_NAME_SIZE]; /* ASCII ASL compiler vendor ID */
95 u32 asl_compiler_revision; /* ASL compiler version */
96};
99 97
100/* 98/*
101 * GAS - Generic Address Structure (ACPI 2.0+) 99 * GAS - Generic Address Structure (ACPI 2.0+)
100 *
101 * Note: Since this structure is used in the ACPI tables, it is byte aligned.
102 * If misalignment is not supported, access to the Address field must be
103 * performed with care.
102 */ 104 */
103struct acpi_generic_address { 105struct acpi_generic_address {
104 u8 address_space_id; /* Address space where struct or register exists */ 106 u8 space_id; /* Address space where struct or register exists */
105 u8 register_bit_width; /* Size in bits of given register */ 107 u8 bit_width; /* Size in bits of given register */
106 u8 register_bit_offset; /* Bit offset within the register */ 108 u8 bit_offset; /* Bit offset within the register */
107 u8 access_width; /* Minimum Access size (ACPI 3.0) */ 109 u8 access_width; /* Minimum Access size (ACPI 3.0) */
108 u64 address; /* 64-bit address of struct or register */ 110 u64 address; /* 64-bit address of struct or register */
109}; 111};
@@ -114,10 +116,10 @@ struct acpi_generic_address {
114 * 116 *
115 ******************************************************************************/ 117 ******************************************************************************/
116 118
117struct rsdp_descriptor { 119struct acpi_table_rsdp {
118 char signature[8]; /* ACPI signature, contains "RSD PTR " */ 120 char signature[8]; /* ACPI signature, contains "RSD PTR " */
119 u8 checksum; /* ACPI 1.0 checksum */ 121 u8 checksum; /* ACPI 1.0 checksum */
120 char oem_id[6]; /* OEM identification */ 122 char oem_id[ACPI_OEM_ID_SIZE]; /* OEM identification */
121 u8 revision; /* Must be (0) for ACPI 1.0 or (2) for ACPI 2.0+ */ 123 u8 revision; /* Must be (0) for ACPI 1.0 or (2) for ACPI 2.0+ */
122 u32 rsdt_physical_address; /* 32-bit physical address of the RSDT */ 124 u32 rsdt_physical_address; /* 32-bit physical address of the RSDT */
123 u32 length; /* Table length in bytes, including header (ACPI 2.0+) */ 125 u32 length; /* Table length in bytes, including header (ACPI 2.0+) */
@@ -134,12 +136,14 @@ struct rsdp_descriptor {
134 * 136 *
135 ******************************************************************************/ 137 ******************************************************************************/
136 138
137struct rsdt_descriptor { 139struct acpi_table_rsdt {
138 ACPI_TABLE_HEADER_DEF u32 table_offset_entry[1]; /* Array of pointers to ACPI tables */ 140 struct acpi_table_header header; /* Common ACPI table header */
141 u32 table_offset_entry[1]; /* Array of pointers to ACPI tables */
139}; 142};
140 143
141struct xsdt_descriptor { 144struct acpi_table_xsdt {
142 ACPI_TABLE_HEADER_DEF u64 table_offset_entry[1]; /* Array of pointers to ACPI tables */ 145 struct acpi_table_header header; /* Common ACPI table header */
146 u64 table_offset_entry[1]; /* Array of pointers to ACPI tables */
143}; 147};
144 148
145/******************************************************************************* 149/*******************************************************************************
@@ -148,36 +152,27 @@ struct xsdt_descriptor {
148 * 152 *
149 ******************************************************************************/ 153 ******************************************************************************/
150 154
151struct facs_descriptor { 155struct acpi_table_facs {
152 char signature[4]; /* ASCII table signature */ 156 char signature[4]; /* ASCII table signature */
153 u32 length; /* Length of structure, in bytes */ 157 u32 length; /* Length of structure, in bytes */
154 u32 hardware_signature; /* Hardware configuration signature */ 158 u32 hardware_signature; /* Hardware configuration signature */
155 u32 firmware_waking_vector; /* 32-bit physical address of the Firmware Waking Vector */ 159 u32 firmware_waking_vector; /* 32-bit physical address of the Firmware Waking Vector */
156 u32 global_lock; /* Global Lock for shared hardware resources */ 160 u32 global_lock; /* Global Lock for shared hardware resources */
157 161 u32 flags;
158 /* Flags (32 bits) */
159
160 u8 S4bios_f:1; /* 00: S4BIOS support is present */
161 u8:7; /* 01-07: Reserved, must be zero */
162 u8 reserved1[3]; /* 08-31: Reserved, must be zero */
163
164 u64 xfirmware_waking_vector; /* 64-bit version of the Firmware Waking Vector (ACPI 2.0+) */ 162 u64 xfirmware_waking_vector; /* 64-bit version of the Firmware Waking Vector (ACPI 2.0+) */
165 u8 version; /* Version of this table (ACPI 2.0+) */ 163 u8 version; /* Version of this table (ACPI 2.0+) */
166 u8 reserved[31]; /* Reserved, must be zero */ 164 u8 reserved[31]; /* Reserved, must be zero */
167}; 165};
168 166
167/* Flag macros */
168
169#define ACPI_FACS_S4_BIOS_PRESENT (1) /* 00: S4BIOS support is present */
170
171/* Global lock flags */
172
169#define ACPI_GLOCK_PENDING 0x01 /* 00: Pending global lock ownership */ 173#define ACPI_GLOCK_PENDING 0x01 /* 00: Pending global lock ownership */
170#define ACPI_GLOCK_OWNED 0x02 /* 01: Global lock is owned */ 174#define ACPI_GLOCK_OWNED 0x02 /* 01: Global lock is owned */
171 175
172/*
173 * Common FACS - This is a version-independent FACS structure used for internal use only
174 */
175struct acpi_common_facs {
176 u32 *global_lock;
177 u64 *firmware_waking_vector;
178 u8 vector_width;
179};
180
181/******************************************************************************* 176/*******************************************************************************
182 * 177 *
183 * FADT - Fixed ACPI Description Table (Signature "FACP") 178 * FADT - Fixed ACPI Description Table (Signature "FACP")
@@ -186,121 +181,98 @@ struct acpi_common_facs {
186 181
187/* Fields common to all versions of the FADT */ 182/* Fields common to all versions of the FADT */
188 183
189#define ACPI_FADT_COMMON \ 184struct acpi_table_fadt {
190 ACPI_TABLE_HEADER_DEF \ 185 struct acpi_table_header header; /* Common ACPI table header */
191 u32 V1_firmware_ctrl; /* 32-bit physical address of FACS */ \ 186 u32 facs; /* 32-bit physical address of FACS */
192 u32 V1_dsdt; /* 32-bit physical address of DSDT */ \ 187 u32 dsdt; /* 32-bit physical address of DSDT */
193 u8 reserved1; /* System Interrupt Model isn't used in ACPI 2.0*/ \ 188 u8 model; /* System Interrupt Model (ACPI 1.0) - not used in ACPI 2.0+ */
194 u8 prefer_PM_profile; /* Conveys preferred power management profile to OSPM. */ \ 189 u8 preferred_profile; /* Conveys preferred power management profile to OSPM. */
195 u16 sci_int; /* System vector of SCI interrupt */ \ 190 u16 sci_interrupt; /* System vector of SCI interrupt */
196 u32 smi_cmd; /* Port address of SMI command port */ \ 191 u32 smi_command; /* 32-bit Port address of SMI command port */
197 u8 acpi_enable; /* Value to write to smi_cmd to enable ACPI */ \ 192 u8 acpi_enable; /* Value to write to smi_cmd to enable ACPI */
198 u8 acpi_disable; /* Value to write to smi_cmd to disable ACPI */ \ 193 u8 acpi_disable; /* Value to write to smi_cmd to disable ACPI */
199 u8 S4bios_req; /* Value to write to SMI CMD to enter S4BIOS state */ \ 194 u8 S4bios_request; /* Value to write to SMI CMD to enter S4BIOS state */
200 u8 pstate_cnt; /* Processor performance state control*/ \ 195 u8 pstate_control; /* Processor performance state control */
201 u32 V1_pm1a_evt_blk; /* Port address of Power Mgt 1a Event Reg Blk */ \ 196 u32 pm1a_event_block; /* 32-bit Port address of Power Mgt 1a Event Reg Blk */
202 u32 V1_pm1b_evt_blk; /* Port address of Power Mgt 1b Event Reg Blk */ \ 197 u32 pm1b_event_block; /* 32-bit Port address of Power Mgt 1b Event Reg Blk */
203 u32 V1_pm1a_cnt_blk; /* Port address of Power Mgt 1a Control Reg Blk */ \ 198 u32 pm1a_control_block; /* 32-bit Port address of Power Mgt 1a Control Reg Blk */
204 u32 V1_pm1b_cnt_blk; /* Port address of Power Mgt 1b Control Reg Blk */ \ 199 u32 pm1b_control_block; /* 32-bit Port address of Power Mgt 1b Control Reg Blk */
205 u32 V1_pm2_cnt_blk; /* Port address of Power Mgt 2 Control Reg Blk */ \ 200 u32 pm2_control_block; /* 32-bit Port address of Power Mgt 2 Control Reg Blk */
206 u32 V1_pm_tmr_blk; /* Port address of Power Mgt Timer Ctrl Reg Blk */ \ 201 u32 pm_timer_block; /* 32-bit Port address of Power Mgt Timer Ctrl Reg Blk */
207 u32 V1_gpe0_blk; /* Port addr of General Purpose acpi_event 0 Reg Blk */ \ 202 u32 gpe0_block; /* 32-bit Port address of General Purpose Event 0 Reg Blk */
208 u32 V1_gpe1_blk; /* Port addr of General Purpose acpi_event 1 Reg Blk */ \ 203 u32 gpe1_block; /* 32-bit Port address of General Purpose Event 1 Reg Blk */
209 u8 pm1_evt_len; /* Byte Length of ports at pm1_x_evt_blk */ \ 204 u8 pm1_event_length; /* Byte Length of ports at pm1x_event_block */
210 u8 pm1_cnt_len; /* Byte Length of ports at pm1_x_cnt_blk */ \ 205 u8 pm1_control_length; /* Byte Length of ports at pm1x_control_block */
211 u8 pm2_cnt_len; /* Byte Length of ports at pm2_cnt_blk */ \ 206 u8 pm2_control_length; /* Byte Length of ports at pm2_control_block */
212 u8 pm_tm_len; /* Byte Length of ports at pm_tm_blk */ \ 207 u8 pm_timer_length; /* Byte Length of ports at pm_timer_block */
213 u8 gpe0_blk_len; /* Byte Length of ports at gpe0_blk */ \ 208 u8 gpe0_block_length; /* Byte Length of ports at gpe0_block */
214 u8 gpe1_blk_len; /* Byte Length of ports at gpe1_blk */ \ 209 u8 gpe1_block_length; /* Byte Length of ports at gpe1_block */
215 u8 gpe1_base; /* Offset in gpe model where gpe1 events start */ \ 210 u8 gpe1_base; /* Offset in GPE number space where GPE1 events start */
216 u8 cst_cnt; /* Support for the _CST object and C States change notification.*/ \ 211 u8 cst_control; /* Support for the _CST object and C States change notification */
217 u16 plvl2_lat; /* Worst case HW latency to enter/exit C2 state */ \ 212 u16 C2latency; /* Worst case HW latency to enter/exit C2 state */
218 u16 plvl3_lat; /* Worst case HW latency to enter/exit C3 state */ \ 213 u16 C3latency; /* Worst case HW latency to enter/exit C3 state */
219 u16 flush_size; /* Processor's memory cache line width, in bytes */ \ 214 u16 flush_size; /* Processor's memory cache line width, in bytes */
220 u16 flush_stride; /* Number of flush strides that need to be read */ \ 215 u16 flush_stride; /* Number of flush strides that need to be read */
221 u8 duty_offset; /* Processor's duty cycle index in processor's P_CNT reg*/ \ 216 u8 duty_offset; /* Processor duty cycle index in processor's P_CNT reg */
222 u8 duty_width; /* Processor's duty cycle value bit width in P_CNT register.*/ \ 217 u8 duty_width; /* Processor duty cycle value bit width in P_CNT register. */
223 u8 day_alrm; /* Index to day-of-month alarm in RTC CMOS RAM */ \ 218 u8 day_alarm; /* Index to day-of-month alarm in RTC CMOS RAM */
224 u8 mon_alrm; /* Index to month-of-year alarm in RTC CMOS RAM */ \ 219 u8 month_alarm; /* Index to month-of-year alarm in RTC CMOS RAM */
225 u8 century; /* Index to century in RTC CMOS RAM */ \ 220 u8 century; /* Index to century in RTC CMOS RAM */
226 u16 iapc_boot_arch; /* IA-PC Boot Architecture Flags. See Table 5-10 for description*/ \ 221 u16 boot_flags; /* IA-PC Boot Architecture Flags. See Table 5-10 for description */
227 u8 reserved2; /* Reserved, must be zero */ 222 u8 reserved; /* Reserved, must be zero */
228 223 u32 flags; /* Miscellaneous flag bits (see below for individual flags) */
229/* 224 struct acpi_generic_address reset_register; /* 64-bit address of the Reset register */
230 * ACPI 2.0+ FADT
231 */
232struct fadt_descriptor {
233 ACPI_FADT_COMMON
234 /* Flags (32 bits) */
235 u8 wb_invd:1; /* 00: The wbinvd instruction works properly */
236 u8 wb_invd_flush:1; /* 01: The wbinvd flushes but does not invalidate */
237 u8 proc_c1:1; /* 02: All processors support C1 state */
238 u8 plvl2_up:1; /* 03: C2 state works on MP system */
239 u8 pwr_button:1; /* 04: Power button is handled as a generic feature */
240 u8 sleep_button:1; /* 05: Sleep button is handled as a generic feature, or not present */
241 u8 fixed_rTC:1; /* 06: RTC wakeup stat not in fixed register space */
242 u8 rtcs4:1; /* 07: RTC wakeup stat not possible from S4 */
243 u8 tmr_val_ext:1; /* 08: tmr_val is 32 bits 0=24-bits */
244 u8 dock_cap:1; /* 09: Docking supported */
245 u8 reset_reg_sup:1; /* 10: System reset via the FADT RESET_REG supported */
246 u8 sealed_case:1; /* 11: No internal expansion capabilities and case is sealed */
247 u8 headless:1; /* 12: No local video capabilities or local input devices */
248 u8 cpu_sw_sleep:1; /* 13: Must execute native instruction after writing SLP_TYPx register */
249
250 u8 pci_exp_wak:1; /* 14: System supports PCIEXP_WAKE (STS/EN) bits (ACPI 3.0) */
251 u8 use_platform_clock:1; /* 15: OSPM should use platform-provided timer (ACPI 3.0) */
252 u8 S4rtc_sts_valid:1; /* 16: Contents of RTC_STS valid after S4 wake (ACPI 3.0) */
253 u8 remote_power_on_capable:1; /* 17: System is compatible with remote power on (ACPI 3.0) */
254 u8 force_apic_cluster_model:1; /* 18: All local APICs must use cluster model (ACPI 3.0) */
255 u8 force_apic_physical_destination_mode:1; /* 19: All local x_aPICs must use physical dest mode (ACPI 3.0) */
256 u8:4; /* 20-23: Reserved, must be zero */
257 u8 reserved3; /* 24-31: Reserved, must be zero */
258
259 struct acpi_generic_address reset_register; /* Reset register address in GAS format */
260 u8 reset_value; /* Value to write to the reset_register port to reset the system */ 225 u8 reset_value; /* Value to write to the reset_register port to reset the system */
261 u8 reserved4[3]; /* These three bytes must be zero */ 226 u8 reserved4[3]; /* Reserved, must be zero */
262 u64 xfirmware_ctrl; /* 64-bit physical address of FACS */ 227 u64 Xfacs; /* 64-bit physical address of FACS */
263 u64 Xdsdt; /* 64-bit physical address of DSDT */ 228 u64 Xdsdt; /* 64-bit physical address of DSDT */
264 struct acpi_generic_address xpm1a_evt_blk; /* Extended Power Mgt 1a acpi_event Reg Blk address */ 229 struct acpi_generic_address xpm1a_event_block; /* 64-bit Extended Power Mgt 1a Event Reg Blk address */
265 struct acpi_generic_address xpm1b_evt_blk; /* Extended Power Mgt 1b acpi_event Reg Blk address */ 230 struct acpi_generic_address xpm1b_event_block; /* 64-bit Extended Power Mgt 1b Event Reg Blk address */
266 struct acpi_generic_address xpm1a_cnt_blk; /* Extended Power Mgt 1a Control Reg Blk address */ 231 struct acpi_generic_address xpm1a_control_block; /* 64-bit Extended Power Mgt 1a Control Reg Blk address */
267 struct acpi_generic_address xpm1b_cnt_blk; /* Extended Power Mgt 1b Control Reg Blk address */ 232 struct acpi_generic_address xpm1b_control_block; /* 64-bit Extended Power Mgt 1b Control Reg Blk address */
268 struct acpi_generic_address xpm2_cnt_blk; /* Extended Power Mgt 2 Control Reg Blk address */ 233 struct acpi_generic_address xpm2_control_block; /* 64-bit Extended Power Mgt 2 Control Reg Blk address */
269 struct acpi_generic_address xpm_tmr_blk; /* Extended Power Mgt Timer Ctrl Reg Blk address */ 234 struct acpi_generic_address xpm_timer_block; /* 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */
270 struct acpi_generic_address xgpe0_blk; /* Extended General Purpose acpi_event 0 Reg Blk address */ 235 struct acpi_generic_address xgpe0_block; /* 64-bit Extended General Purpose Event 0 Reg Blk address */
271 struct acpi_generic_address xgpe1_blk; /* Extended General Purpose acpi_event 1 Reg Blk address */ 236 struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */
272}; 237};
273 238
274/* 239/* FADT flags */
275 * "Down-revved" ACPI 2.0 FADT descriptor 240
276 * Defined here to allow compiler to generate the length of the struct 241#define ACPI_FADT_WBINVD (1) /* 00: The wbinvd instruction works properly */
277 */ 242#define ACPI_FADT_WBINVD_FLUSH (1<<1) /* 01: The wbinvd flushes but does not invalidate */
278struct fadt_descriptor_rev2_minus { 243#define ACPI_FADT_C1_SUPPORTED (1<<2) /* 02: All processors support C1 state */
279 ACPI_FADT_COMMON u32 flags; 244#define ACPI_FADT_C2_MP_SUPPORTED (1<<3) /* 03: C2 state works on MP system */
280 struct acpi_generic_address reset_register; /* Reset register address in GAS format */ 245#define ACPI_FADT_POWER_BUTTON (1<<4) /* 04: Power button is handled as a generic feature */
281 u8 reset_value; /* Value to write to the reset_register port to reset the system. */ 246#define ACPI_FADT_SLEEP_BUTTON (1<<5) /* 05: Sleep button is handled as a generic feature, or not present */
282 u8 reserved7[3]; /* Reserved, must be zero */ 247#define ACPI_FADT_FIXED_RTC (1<<6) /* 06: RTC wakeup stat not in fixed register space */
283}; 248#define ACPI_FADT_S4_RTC_WAKE (1<<7) /* 07: RTC wakeup stat not possible from S4 */
249#define ACPI_FADT_32BIT_TIMER (1<<8) /* 08: tmr_val is 32 bits 0=24-bits */
250#define ACPI_FADT_DOCKING_SUPPORTED (1<<9) /* 09: Docking supported */
251#define ACPI_FADT_RESET_REGISTER (1<<10) /* 10: System reset via the FADT RESET_REG supported */
252#define ACPI_FADT_SEALED_CASE (1<<11) /* 11: No internal expansion capabilities and case is sealed */
253#define ACPI_FADT_HEADLESS (1<<12) /* 12: No local video capabilities or local input devices */
254#define ACPI_FADT_SLEEP_TYPE (1<<13) /* 13: Must execute native instruction after writing SLP_TYPx register */
255#define ACPI_FADT_PCI_EXPRESS_WAKE (1<<14) /* 14: System supports PCIEXP_WAKE (STS/EN) bits (ACPI 3.0) */
256#define ACPI_FADT_PLATFORM_CLOCK (1<<15) /* 15: OSPM should use platform-provided timer (ACPI 3.0) */
257#define ACPI_FADT_S4_RTC_VALID (1<<16) /* 16: Contents of RTC_STS valid after S4 wake (ACPI 3.0) */
258#define ACPI_FADT_REMOTE_POWER_ON (1<<17) /* 17: System is compatible with remote power on (ACPI 3.0) */
259#define ACPI_FADT_APIC_CLUSTER (1<<18) /* 18: All local APICs must use cluster model (ACPI 3.0) */
260#define ACPI_FADT_APIC_PHYSICAL (1<<19) /* 19: All local x_aPICs must use physical dest mode (ACPI 3.0) */
284 261
285/* 262/*
286 * ACPI 1.0 FADT 263 * FADT Prefered Power Management Profiles
287 * Defined here to allow compiler to generate the length of the struct
288 */ 264 */
289struct fadt_descriptor_rev1 { 265enum acpi_prefered_pm_profiles {
290 ACPI_FADT_COMMON u32 flags; 266 PM_UNSPECIFIED = 0,
267 PM_DESKTOP = 1,
268 PM_MOBILE = 2,
269 PM_WORKSTATION = 3,
270 PM_ENTERPRISE_SERVER = 4,
271 PM_SOHO_SERVER = 5,
272 PM_APPLIANCE_PC = 6
291}; 273};
292 274
293/* FADT: Prefered Power Management Profiles */ 275/* FADT Boot Arch Flags */
294
295#define PM_UNSPECIFIED 0
296#define PM_DESKTOP 1
297#define PM_MOBILE 2
298#define PM_WORKSTATION 3
299#define PM_ENTERPRISE_SERVER 4
300#define PM_SOHO_SERVER 5
301#define PM_APPLIANCE_PC 6
302
303/* FADT: Boot Arch Flags */
304 276
305#define BAF_LEGACY_DEVICES 0x0001 277#define BAF_LEGACY_DEVICES 0x0001
306#define BAF_8042_KEYBOARD_CONTROLLER 0x0002 278#define BAF_8042_KEYBOARD_CONTROLLER 0x0002
@@ -312,59 +284,12 @@ struct fadt_descriptor_rev1 {
312 284
313#pragma pack() 285#pragma pack()
314 286
315/* 287#define ACPI_FADT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_fadt, f)
316 * This macro is temporary until the table bitfield flag definitions
317 * are removed and replaced by a Flags field.
318 */
319#define ACPI_FLAG_OFFSET(d,f,o) (u8) (ACPI_OFFSET (d,f) + \
320 sizeof(((d *)0)->f) + o)
321/*
322 * Get the remaining ACPI tables
323 */
324#include "actbl1.h"
325 288
326/* 289/*
327 * ACPI Table information. We save the table address, length, 290 * Get the remaining ACPI tables
328 * and type of memory allocation (mapped or allocated) for each
329 * table for 1) when we exit, and 2) if a new table is installed
330 */ 291 */
331#define ACPI_MEM_NOT_ALLOCATED 0
332#define ACPI_MEM_ALLOCATED 1
333#define ACPI_MEM_MAPPED 2
334
335/* Definitions for the Flags bitfield member of struct acpi_table_support */
336
337#define ACPI_TABLE_SINGLE 0x00
338#define ACPI_TABLE_MULTIPLE 0x01
339#define ACPI_TABLE_EXECUTABLE 0x02
340
341#define ACPI_TABLE_ROOT 0x00
342#define ACPI_TABLE_PRIMARY 0x10
343#define ACPI_TABLE_SECONDARY 0x20
344#define ACPI_TABLE_ALL 0x30
345#define ACPI_TABLE_TYPE_MASK 0x30
346
347/* Data about each known table type */
348
349struct acpi_table_support {
350 char *name;
351 char *signature;
352 void **global_ptr;
353 u8 sig_length;
354 u8 flags;
355};
356
357extern u8 acpi_fadt_is_v1; /* is set to 1 if FADT is revision 1,
358 * needed for certain workarounds */
359/* Macros used to generate offsets to specific table fields */
360
361#define ACPI_FACS_OFFSET(f) (u8) ACPI_OFFSET (struct facs_descriptor,f)
362#define ACPI_FADT_OFFSET(f) (u8) ACPI_OFFSET (struct fadt_descriptor, f)
363#define ACPI_GAS_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_generic_address,f)
364#define ACPI_HDR_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_header,f)
365#define ACPI_RSDP_OFFSET(f) (u8) ACPI_OFFSET (struct rsdp_descriptor,f)
366 292
367#define ACPI_FADT_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct fadt_descriptor,f,o) 293#include <acpi/actbl1.h>
368#define ACPI_FACS_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct facs_descriptor,f,o)
369 294
370#endif /* __ACTBL_H__ */ 295#endif /* __ACTBL_H__ */
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 745a6445a4f9..4e5d3ca53a8e 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -61,6 +61,7 @@
61#define ACPI_SIG_BOOT "BOOT" /* Simple Boot Flag Table */ 61#define ACPI_SIG_BOOT "BOOT" /* Simple Boot Flag Table */
62#define ACPI_SIG_CPEP "CPEP" /* Corrected Platform Error Polling table */ 62#define ACPI_SIG_CPEP "CPEP" /* Corrected Platform Error Polling table */
63#define ACPI_SIG_DBGP "DBGP" /* Debug Port table */ 63#define ACPI_SIG_DBGP "DBGP" /* Debug Port table */
64#define ACPI_SIG_DMAR "DMAR" /* DMA Remapping table */
64#define ACPI_SIG_ECDT "ECDT" /* Embedded Controller Boot Resources Table */ 65#define ACPI_SIG_ECDT "ECDT" /* Embedded Controller Boot Resources Table */
65#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */ 66#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */
66#define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */ 67#define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */
@@ -73,12 +74,6 @@
73#define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */ 74#define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */
74#define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */ 75#define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */
75 76
76/* Legacy names */
77
78#define APIC_SIG "APIC" /* Multiple APIC Description Table */
79#define BOOT_SIG "BOOT" /* Simple Boot Flag Table */
80#define SBST_SIG "SBST" /* Smart Battery Specification Table */
81
82/* 77/*
83 * All tables must be byte-packed to match the ACPI specification, since 78 * All tables must be byte-packed to match the ACPI specification, since
84 * the tables are provided by the system BIOS. 79 * the tables are provided by the system BIOS.
@@ -91,31 +86,43 @@
91 * portable, so do not use any other bitfield types. 86 * portable, so do not use any other bitfield types.
92 */ 87 */
93 88
89/* Common Sub-table header (used in MADT, SRAT, etc.) */
90
91struct acpi_subtable_header {
92 u8 type;
93 u8 length;
94};
95
94/******************************************************************************* 96/*******************************************************************************
95 * 97 *
96 * ASF - Alert Standard Format table (Signature "ASF!") 98 * ASF - Alert Standard Format table (Signature "ASF!")
97 * 99 *
100 * Conforms to the Alert Standard Format Specification V2.0, 23 April 2003
101 *
98 ******************************************************************************/ 102 ******************************************************************************/
99 103
100struct acpi_table_asf { 104struct acpi_table_asf {
101ACPI_TABLE_HEADER_DEF}; 105 struct acpi_table_header header; /* Common ACPI table header */
106};
102 107
103#define ACPI_ASF_HEADER_DEF \ 108/* ASF subtable header */
104 u8 type; \
105 u8 reserved; \
106 u16 length;
107 109
108struct acpi_asf_header { 110struct acpi_asf_header {
109ACPI_ASF_HEADER_DEF}; 111 u8 type;
112 u8 reserved;
113 u16 length;
114};
110 115
111/* Values for Type field */ 116/* Values for Type field above */
112 117
113#define ASF_INFO 0 118enum acpi_asf_type {
114#define ASF_ALERT 1 119 ACPI_ASF_TYPE_INFO = 0,
115#define ASF_CONTROL 2 120 ACPI_ASF_TYPE_ALERT = 1,
116#define ASF_BOOT 3 121 ACPI_ASF_TYPE_CONTROL = 2,
117#define ASF_ADDRESS 4 122 ACPI_ASF_TYPE_BOOT = 3,
118#define ASF_RESERVED 5 123 ACPI_ASF_TYPE_ADDRESS = 4,
124 ACPI_ASF_TYPE_RESERVED = 5
125};
119 126
120/* 127/*
121 * ASF subtables 128 * ASF subtables
@@ -124,7 +131,8 @@ ACPI_ASF_HEADER_DEF};
124/* 0: ASF Information */ 131/* 0: ASF Information */
125 132
126struct acpi_asf_info { 133struct acpi_asf_info {
127 ACPI_ASF_HEADER_DEF u8 min_reset_value; 134 struct acpi_asf_header header;
135 u8 min_reset_value;
128 u8 min_poll_interval; 136 u8 min_poll_interval;
129 u16 system_id; 137 u16 system_id;
130 u32 mfg_id; 138 u32 mfg_id;
@@ -135,26 +143,49 @@ struct acpi_asf_info {
135/* 1: ASF Alerts */ 143/* 1: ASF Alerts */
136 144
137struct acpi_asf_alert { 145struct acpi_asf_alert {
138 ACPI_ASF_HEADER_DEF u8 assert_mask; 146 struct acpi_asf_header header;
147 u8 assert_mask;
139 u8 deassert_mask; 148 u8 deassert_mask;
140 u8 alerts; 149 u8 alerts;
141 u8 data_length; 150 u8 data_length;
142 u8 array[1]; 151};
152
153struct acpi_asf_alert_data {
154 u8 address;
155 u8 command;
156 u8 mask;
157 u8 value;
158 u8 sensor_type;
159 u8 type;
160 u8 offset;
161 u8 source_type;
162 u8 severity;
163 u8 sensor_number;
164 u8 entity;
165 u8 instance;
143}; 166};
144 167
145/* 2: ASF Remote Control */ 168/* 2: ASF Remote Control */
146 169
147struct acpi_asf_remote { 170struct acpi_asf_remote {
148 ACPI_ASF_HEADER_DEF u8 controls; 171 struct acpi_asf_header header;
172 u8 controls;
149 u8 data_length; 173 u8 data_length;
150 u16 reserved2; 174 u16 reserved2;
151 u8 array[1]; 175};
176
177struct acpi_asf_control_data {
178 u8 function;
179 u8 address;
180 u8 command;
181 u8 value;
152}; 182};
153 183
154/* 3: ASF RMCP Boot Options */ 184/* 3: ASF RMCP Boot Options */
155 185
156struct acpi_asf_rmcp { 186struct acpi_asf_rmcp {
157 ACPI_ASF_HEADER_DEF u8 capabilities[7]; 187 struct acpi_asf_header header;
188 u8 capabilities[7];
158 u8 completion_code; 189 u8 completion_code;
159 u32 enterprise_id; 190 u32 enterprise_id;
160 u8 command; 191 u8 command;
@@ -166,9 +197,9 @@ struct acpi_asf_rmcp {
166/* 4: ASF Address */ 197/* 4: ASF Address */
167 198
168struct acpi_asf_address { 199struct acpi_asf_address {
169 ACPI_ASF_HEADER_DEF u8 eprom_address; 200 struct acpi_asf_header header;
201 u8 eprom_address;
170 u8 devices; 202 u8 devices;
171 u8 smbus_addresses[1];
172}; 203};
173 204
174/******************************************************************************* 205/*******************************************************************************
@@ -178,7 +209,8 @@ struct acpi_asf_address {
178 ******************************************************************************/ 209 ******************************************************************************/
179 210
180struct acpi_table_boot { 211struct acpi_table_boot {
181 ACPI_TABLE_HEADER_DEF u8 cmos_index; /* Index in CMOS RAM for the boot register */ 212 struct acpi_table_header header; /* Common ACPI table header */
213 u8 cmos_index; /* Index in CMOS RAM for the boot register */
182 u8 reserved[3]; 214 u8 reserved[3];
183}; 215};
184 216
@@ -189,7 +221,8 @@ struct acpi_table_boot {
189 ******************************************************************************/ 221 ******************************************************************************/
190 222
191struct acpi_table_cpep { 223struct acpi_table_cpep {
192 ACPI_TABLE_HEADER_DEF u64 reserved; 224 struct acpi_table_header header; /* Common ACPI table header */
225 u64 reserved;
193}; 226};
194 227
195/* Subtable */ 228/* Subtable */
@@ -197,9 +230,9 @@ struct acpi_table_cpep {
197struct acpi_cpep_polling { 230struct acpi_cpep_polling {
198 u8 type; 231 u8 type;
199 u8 length; 232 u8 length;
200 u8 processor_id; /* Processor ID */ 233 u8 id; /* Processor ID */
201 u8 processor_eid; /* Processor EID */ 234 u8 eid; /* Processor EID */
202 u32 polling_interval; /* Polling interval (msec) */ 235 u32 interval; /* Polling interval (msec) */
203}; 236};
204 237
205/******************************************************************************* 238/*******************************************************************************
@@ -209,196 +242,281 @@ struct acpi_cpep_polling {
209 ******************************************************************************/ 242 ******************************************************************************/
210 243
211struct acpi_table_dbgp { 244struct acpi_table_dbgp {
212 ACPI_TABLE_HEADER_DEF u8 interface_type; /* 0=full 16550, 1=subset of 16550 */ 245 struct acpi_table_header header; /* Common ACPI table header */
246 u8 type; /* 0=full 16550, 1=subset of 16550 */
213 u8 reserved[3]; 247 u8 reserved[3];
214 struct acpi_generic_address debug_port; 248 struct acpi_generic_address debug_port;
215}; 249};
216 250
217/******************************************************************************* 251/*******************************************************************************
218 * 252 *
219 * ECDT - Embedded Controller Boot Resources Table 253 * DMAR - DMA Remapping table
220 * 254 *
221 ******************************************************************************/ 255 ******************************************************************************/
222 256
223struct ec_boot_resources { 257struct acpi_table_dmar {
224 ACPI_TABLE_HEADER_DEF struct acpi_generic_address ec_control; /* Address of EC command/status register */ 258 struct acpi_table_header header; /* Common ACPI table header */
225 struct acpi_generic_address ec_data; /* Address of EC data register */ 259 u8 width; /* Host Address Width */
226 u32 uid; /* Unique ID - must be same as the EC _UID method */ 260 u8 reserved[11];
227 u8 gpe_bit; /* The GPE for the EC */ 261};
228 u8 ec_id[1]; /* Full namepath of the EC in the ACPI namespace */ 262
263/* DMAR subtable header */
264
265struct acpi_dmar_header {
266 u16 type;
267 u16 length;
268 u8 flags;
269 u8 reserved[3];
270};
271
272/* Values for subtable type in struct acpi_dmar_header */
273
274enum acpi_dmar_type {
275 ACPI_DMAR_TYPE_HARDWARE_UNIT = 0,
276 ACPI_DMAR_TYPE_RESERVED_MEMORY = 1,
277 ACPI_DMAR_TYPE_RESERVED = 2 /* 2 and greater are reserved */
278};
279
280struct acpi_dmar_device_scope {
281 u8 entry_type;
282 u8 length;
283 u8 segment;
284 u8 bus;
285};
286
287/* Values for entry_type in struct acpi_dmar_device_scope */
288
289enum acpi_dmar_scope_type {
290 ACPI_DMAR_SCOPE_TYPE_NOT_USED = 0,
291 ACPI_DMAR_SCOPE_TYPE_ENDPOINT = 1,
292 ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2,
293 ACPI_DMAR_SCOPE_TYPE_RESERVED = 3 /* 3 and greater are reserved */
294};
295
296/*
297 * DMAR Sub-tables, correspond to Type in struct acpi_dmar_header
298 */
299
300/* 0: Hardware Unit Definition */
301
302struct acpi_dmar_hardware_unit {
303 struct acpi_dmar_header header;
304 u64 address; /* Register Base Address */
305};
306
307/* Flags */
308
309#define ACPI_DMAR_INCLUDE_ALL (1)
310
311/* 1: Reserved Memory Defininition */
312
313struct acpi_dmar_reserved_memory {
314 struct acpi_dmar_header header;
315 u64 address; /* 4_k aligned base address */
316 u64 end_address; /* 4_k aligned limit address */
229}; 317};
230 318
319/* Flags */
320
321#define ACPI_DMAR_ALLOW_ALL (1)
322
231/******************************************************************************* 323/*******************************************************************************
232 * 324 *
233 * HPET - High Precision Event Timer table 325 * ECDT - Embedded Controller Boot Resources Table
234 * 326 *
235 ******************************************************************************/ 327 ******************************************************************************/
236 328
237struct acpi_hpet_table { 329struct acpi_table_ecdt {
238 ACPI_TABLE_HEADER_DEF u32 hardware_id; /* Hardware ID of event timer block */ 330 struct acpi_table_header header; /* Common ACPI table header */
239 struct acpi_generic_address base_address; /* Address of event timer block */ 331 struct acpi_generic_address control; /* Address of EC command/status register */
240 u8 hpet_number; /* HPET sequence number */ 332 struct acpi_generic_address data; /* Address of EC data register */
241 u16 clock_tick; /* Main counter min tick, periodic mode */ 333 u32 uid; /* Unique ID - must be same as the EC _UID method */
242 u8 attributes; 334 u8 gpe; /* The GPE for the EC */
335 u8 id[1]; /* Full namepath of the EC in the ACPI namespace */
243}; 336};
244 337
245#if 0 /* HPET flags to be converted to macros */
246struct { /* Flags (8 bits) */
247 u8 page_protect:1; /* 00: No page protection */
248 u8 page_protect4:1; /* 01: 4_kB page protected */
249 u8 page_protect64:1; /* 02: 64_kB page protected */
250 u8:5; /* 03-07: Reserved, must be zero */
251} flags;
252#endif
253
254/******************************************************************************* 338/*******************************************************************************
255 * 339 *
256 * MADT - Multiple APIC Description Table 340 * HPET - High Precision Event Timer table
257 * 341 *
258 ******************************************************************************/ 342 ******************************************************************************/
259 343
260struct multiple_apic_table { 344struct acpi_table_hpet {
261 ACPI_TABLE_HEADER_DEF u32 local_apic_address; /* Physical address of local APIC */ 345 struct acpi_table_header header; /* Common ACPI table header */
262 346 u32 id; /* Hardware ID of event timer block */
263 /* Flags (32 bits) */ 347 struct acpi_generic_address address; /* Address of event timer block */
264 348 u8 sequence; /* HPET sequence number */
265 u8 PCATcompat:1; /* 00: System also has dual 8259s */ 349 u16 minimum_tick; /* Main counter min tick, periodic mode */
266 u8:7; /* 01-07: Reserved, must be zero */ 350 u8 flags;
267 u8 reserved1[3]; /* 08-31: Reserved, must be zero */
268}; 351};
269 352
270/* Values for MADT PCATCompat */ 353/*! Flags */
271 354
272#define DUAL_PIC 0 355#define ACPI_HPET_PAGE_PROTECT (1) /* 00: No page protection */
273#define MULTIPLE_APIC 1 356#define ACPI_HPET_PAGE_PROTECT_4 (1<<1) /* 01: 4KB page protected */
357#define ACPI_HPET_PAGE_PROTECT_64 (1<<2) /* 02: 64KB page protected */
274 358
275/* Common MADT Sub-table header */ 359/*! [End] no source code translation !*/
276 360
277#define APIC_HEADER_DEF \ 361/*******************************************************************************
278 u8 type; \ 362 *
279 u8 length; 363 * MADT - Multiple APIC Description Table
280 364 *
281struct apic_header { 365 ******************************************************************************/
282APIC_HEADER_DEF};
283
284/* Values for Type in struct apic_header */
285 366
286#define APIC_PROCESSOR 0 367struct acpi_table_madt {
287#define APIC_IO 1 368 struct acpi_table_header header; /* Common ACPI table header */
288#define APIC_XRUPT_OVERRIDE 2 369 u32 address; /* Physical address of local APIC */
289#define APIC_NMI 3 370 u32 flags;
290#define APIC_LOCAL_NMI 4 371};
291#define APIC_ADDRESS_OVERRIDE 5
292#define APIC_IO_SAPIC 6
293#define APIC_LOCAL_SAPIC 7
294#define APIC_XRUPT_SOURCE 8
295#define APIC_RESERVED 9 /* 9 and greater are reserved */
296 372
297/* Flag definitions for MADT sub-tables */ 373/* Flags */
298 374
299#define ACPI_MADT_IFLAGS /* INTI flags (16 bits) */ \ 375#define ACPI_MADT_PCAT_COMPAT (1) /* 00: System also has dual 8259s */
300 u8 polarity : 2; /* 00-01: Polarity of APIC I/O input signals */\
301 u8 trigger_mode : 2; /* 02-03: Trigger mode of APIC input signals */\
302 u8 : 4; /* 04-07: Reserved, must be zero */\
303 u8 reserved1; /* 08-15: Reserved, must be zero */
304 376
305#define ACPI_MADT_LFLAGS /* Local Sapic flags (32 bits) */ \ 377/* Values for PCATCompat flag */
306 u8 processor_enabled: 1; /* 00: Processor is usable if set */\
307 u8 : 7; /* 01-07: Reserved, must be zero */\
308 u8 reserved2[3]; /* 08-31: Reserved, must be zero */
309 378
310/* Values for MPS INTI flags */ 379#define ACPI_MADT_DUAL_PIC 0
380#define ACPI_MADT_MULTIPLE_APIC 1
311 381
312#define POLARITY_CONFORMS 0 382/* Values for subtable type in struct acpi_subtable_header */
313#define POLARITY_ACTIVE_HIGH 1
314#define POLARITY_RESERVED 2
315#define POLARITY_ACTIVE_LOW 3
316 383
317#define TRIGGER_CONFORMS 0 384enum acpi_madt_type {
318#define TRIGGER_EDGE 1 385 ACPI_MADT_TYPE_LOCAL_APIC = 0,
319#define TRIGGER_RESERVED 2 386 ACPI_MADT_TYPE_IO_APIC = 1,
320#define TRIGGER_LEVEL 3 387 ACPI_MADT_TYPE_INTERRUPT_OVERRIDE = 2,
388 ACPI_MADT_TYPE_NMI_SOURCE = 3,
389 ACPI_MADT_TYPE_LOCAL_APIC_NMI = 4,
390 ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE = 5,
391 ACPI_MADT_TYPE_IO_SAPIC = 6,
392 ACPI_MADT_TYPE_LOCAL_SAPIC = 7,
393 ACPI_MADT_TYPE_INTERRUPT_SOURCE = 8,
394 ACPI_MADT_TYPE_RESERVED = 9 /* 9 and greater are reserved */
395};
321 396
322/* 397/*
323 * MADT Sub-tables, correspond to Type in struct apic_header 398 * MADT Sub-tables, correspond to Type in struct acpi_subtable_header
324 */ 399 */
325 400
326/* 0: processor APIC */ 401/* 0: Processor Local APIC */
327 402
328struct madt_processor_apic { 403struct acpi_madt_local_apic {
329 APIC_HEADER_DEF u8 processor_id; /* ACPI processor id */ 404 struct acpi_subtable_header header;
330 u8 local_apic_id; /* Processor's local APIC id */ 405 u8 processor_id; /* ACPI processor id */
331 ACPI_MADT_LFLAGS}; 406 u8 id; /* Processor's local APIC id */
407 u32 lapic_flags;
408};
332 409
333/* 1: IO APIC */ 410/* 1: IO APIC */
334 411
335struct madt_io_apic { 412struct acpi_madt_io_apic {
336 APIC_HEADER_DEF u8 io_apic_id; /* I/O APIC ID */ 413 struct acpi_subtable_header header;
414 u8 id; /* I/O APIC ID */
337 u8 reserved; /* Reserved - must be zero */ 415 u8 reserved; /* Reserved - must be zero */
338 u32 address; /* APIC physical address */ 416 u32 address; /* APIC physical address */
339 u32 interrupt; /* Global system interrupt where INTI lines start */ 417 u32 global_irq_base; /* Global system interrupt where INTI lines start */
340}; 418};
341 419
342/* 2: Interrupt Override */ 420/* 2: Interrupt Override */
343 421
344struct madt_interrupt_override { 422struct acpi_madt_interrupt_override {
345 APIC_HEADER_DEF u8 bus; /* 0 - ISA */ 423 struct acpi_subtable_header header;
346 u8 source; /* Interrupt source (IRQ) */ 424 u8 bus; /* 0 - ISA */
347 u32 interrupt; /* Global system interrupt */ 425 u8 source_irq; /* Interrupt source (IRQ) */
348 ACPI_MADT_IFLAGS}; 426 u32 global_irq; /* Global system interrupt */
427 u16 inti_flags;
428};
349 429
350/* 3: NMI Sources */ 430/* 3: NMI Source */
351 431
352struct madt_nmi_source { 432struct acpi_madt_nmi_source {
353 APIC_HEADER_DEF ACPI_MADT_IFLAGS u32 interrupt; /* Global system interrupt */ 433 struct acpi_subtable_header header;
434 u16 inti_flags;
435 u32 global_irq; /* Global system interrupt */
354}; 436};
355 437
356/* 4: Local APIC NMI */ 438/* 4: Local APIC NMI */
357 439
358struct madt_local_apic_nmi { 440struct acpi_madt_local_apic_nmi {
359 APIC_HEADER_DEF u8 processor_id; /* ACPI processor id */ 441 struct acpi_subtable_header header;
360 ACPI_MADT_IFLAGS u8 lint; /* LINTn to which NMI is connected */ 442 u8 processor_id; /* ACPI processor id */
443 u16 inti_flags;
444 u8 lint; /* LINTn to which NMI is connected */
361}; 445};
362 446
363/* 5: Address Override */ 447/* 5: Address Override */
364 448
365struct madt_address_override { 449struct acpi_madt_local_apic_override {
366 APIC_HEADER_DEF u16 reserved; /* Reserved, must be zero */ 450 struct acpi_subtable_header header;
451 u16 reserved; /* Reserved, must be zero */
367 u64 address; /* APIC physical address */ 452 u64 address; /* APIC physical address */
368}; 453};
369 454
370/* 6: I/O Sapic */ 455/* 6: I/O Sapic */
371 456
372struct madt_io_sapic { 457struct acpi_madt_io_sapic {
373 APIC_HEADER_DEF u8 io_sapic_id; /* I/O SAPIC ID */ 458 struct acpi_subtable_header header;
459 u8 id; /* I/O SAPIC ID */
374 u8 reserved; /* Reserved, must be zero */ 460 u8 reserved; /* Reserved, must be zero */
375 u32 interrupt_base; /* Glocal interrupt for SAPIC start */ 461 u32 global_irq_base; /* Global interrupt for SAPIC start */
376 u64 address; /* SAPIC physical address */ 462 u64 address; /* SAPIC physical address */
377}; 463};
378 464
379/* 7: Local Sapic */ 465/* 7: Local Sapic */
380 466
381struct madt_local_sapic { 467struct acpi_madt_local_sapic {
382 APIC_HEADER_DEF u8 processor_id; /* ACPI processor id */ 468 struct acpi_subtable_header header;
383 u8 local_sapic_id; /* SAPIC ID */ 469 u8 processor_id; /* ACPI processor id */
384 u8 local_sapic_eid; /* SAPIC EID */ 470 u8 id; /* SAPIC ID */
471 u8 eid; /* SAPIC EID */
385 u8 reserved[3]; /* Reserved, must be zero */ 472 u8 reserved[3]; /* Reserved, must be zero */
386 ACPI_MADT_LFLAGS u32 processor_uID; /* Numeric UID - ACPI 3.0 */ 473 u32 lapic_flags;
387 char processor_uIDstring[1]; /* String UID - ACPI 3.0 */ 474 u32 uid; /* Numeric UID - ACPI 3.0 */
475 char uid_string[1]; /* String UID - ACPI 3.0 */
388}; 476};
389 477
390/* 8: Platform Interrupt Source */ 478/* 8: Platform Interrupt Source */
391 479
392struct madt_interrupt_source { 480struct acpi_madt_interrupt_source {
393 APIC_HEADER_DEF ACPI_MADT_IFLAGS u8 interrupt_type; /* 1=PMI, 2=INIT, 3=corrected */ 481 struct acpi_subtable_header header;
394 u8 processor_id; /* Processor ID */ 482 u16 inti_flags;
395 u8 processor_eid; /* Processor EID */ 483 u8 type; /* 1=PMI, 2=INIT, 3=corrected */
484 u8 id; /* Processor ID */
485 u8 eid; /* Processor EID */
396 u8 io_sapic_vector; /* Vector value for PMI interrupts */ 486 u8 io_sapic_vector; /* Vector value for PMI interrupts */
397 u32 interrupt; /* Global system interrupt */ 487 u32 global_irq; /* Global system interrupt */
398 u32 flags; /* Interrupt Source Flags */ 488 u32 flags; /* Interrupt Source Flags */
399}; 489};
400 490
401#ifdef DUPLICATE_DEFINITION_WITH_LINUX_ACPI_H 491/* Flags field above */
492
493#define ACPI_MADT_CPEI_OVERRIDE (1)
494
495/*
496 * Common flags fields for MADT subtables
497 */
498
499/* MADT Local APIC flags (lapic_flags) */
500
501#define ACPI_MADT_ENABLED (1) /* 00: Processor is usable if set */
502
503/* MADT MPS INTI flags (inti_flags) */
504
505#define ACPI_MADT_POLARITY_MASK (3) /* 00-01: Polarity of APIC I/O input signals */
506#define ACPI_MADT_TRIGGER_MASK (3<<2) /* 02-03: Trigger mode of APIC input signals */
507
508/* Values for MPS INTI flags */
509
510#define ACPI_MADT_POLARITY_CONFORMS 0
511#define ACPI_MADT_POLARITY_ACTIVE_HIGH 1
512#define ACPI_MADT_POLARITY_RESERVED 2
513#define ACPI_MADT_POLARITY_ACTIVE_LOW 3
514
515#define ACPI_MADT_TRIGGER_CONFORMS (0)
516#define ACPI_MADT_TRIGGER_EDGE (1<<2)
517#define ACPI_MADT_TRIGGER_RESERVED (2<<2)
518#define ACPI_MADT_TRIGGER_LEVEL (3<<2)
519
402/******************************************************************************* 520/*******************************************************************************
403 * 521 *
404 * MCFG - PCI Memory Mapped Configuration table and sub-table 522 * MCFG - PCI Memory Mapped Configuration table and sub-table
@@ -406,17 +524,19 @@ struct madt_interrupt_source {
406 ******************************************************************************/ 524 ******************************************************************************/
407 525
408struct acpi_table_mcfg { 526struct acpi_table_mcfg {
409 ACPI_TABLE_HEADER_DEF u8 reserved[8]; 527 struct acpi_table_header header; /* Common ACPI table header */
528 u8 reserved[8];
410}; 529};
411 530
531/* Subtable */
532
412struct acpi_mcfg_allocation { 533struct acpi_mcfg_allocation {
413 u64 base_address; /* Base address, processor-relative */ 534 u64 address; /* Base address, processor-relative */
414 u16 pci_segment; /* PCI segment group number */ 535 u16 pci_segment; /* PCI segment group number */
415 u8 start_bus_number; /* Starting PCI Bus number */ 536 u8 start_bus_number; /* Starting PCI Bus number */
416 u8 end_bus_number; /* Final PCI Bus number */ 537 u8 end_bus_number; /* Final PCI Bus number */
417 u32 reserved; 538 u32 reserved;
418}; 539};
419#endif
420 540
421/******************************************************************************* 541/*******************************************************************************
422 * 542 *
@@ -424,8 +544,9 @@ struct acpi_mcfg_allocation {
424 * 544 *
425 ******************************************************************************/ 545 ******************************************************************************/
426 546
427struct smart_battery_table { 547struct acpi_table_sbst {
428 ACPI_TABLE_HEADER_DEF u32 warning_level; 548 struct acpi_table_header header; /* Common ACPI table header */
549 u32 warning_level;
429 u32 low_level; 550 u32 low_level;
430 u32 critical_level; 551 u32 critical_level;
431}; 552};
@@ -436,9 +557,10 @@ struct smart_battery_table {
436 * 557 *
437 ******************************************************************************/ 558 ******************************************************************************/
438 559
439struct system_locality_info { 560struct acpi_table_slit {
440 ACPI_TABLE_HEADER_DEF u64 locality_count; 561 struct acpi_table_header header; /* Common ACPI table header */
441 u8 entry[1][1]; 562 u64 locality_count;
563 u8 entry[1]; /* Real size = localities^2 */
442}; 564};
443 565
444/******************************************************************************* 566/*******************************************************************************
@@ -448,7 +570,8 @@ struct system_locality_info {
448 ******************************************************************************/ 570 ******************************************************************************/
449 571
450struct acpi_table_spcr { 572struct acpi_table_spcr {
451 ACPI_TABLE_HEADER_DEF u8 interface_type; /* 0=full 16550, 1=subset of 16550 */ 573 struct acpi_table_header header; /* Common ACPI table header */
574 u8 interface_type; /* 0=full 16550, 1=subset of 16550 */
452 u8 reserved[3]; 575 u8 reserved[3];
453 struct acpi_generic_address serial_port; 576 struct acpi_generic_address serial_port;
454 u8 interrupt_type; 577 u8 interrupt_type;
@@ -459,7 +582,7 @@ struct acpi_table_spcr {
459 u8 stop_bits; 582 u8 stop_bits;
460 u8 flow_control; 583 u8 flow_control;
461 u8 terminal_type; 584 u8 terminal_type;
462 u8 reserved2; 585 u8 reserved1;
463 u16 pci_device_id; 586 u16 pci_device_id;
464 u16 pci_vendor_id; 587 u16 pci_vendor_id;
465 u8 pci_bus; 588 u8 pci_bus;
@@ -467,7 +590,7 @@ struct acpi_table_spcr {
467 u8 pci_function; 590 u8 pci_function;
468 u32 pci_flags; 591 u32 pci_flags;
469 u8 pci_segment; 592 u8 pci_segment;
470 u32 reserved3; 593 u32 reserved2;
471}; 594};
472 595
473/******************************************************************************* 596/*******************************************************************************
@@ -477,12 +600,13 @@ struct acpi_table_spcr {
477 ******************************************************************************/ 600 ******************************************************************************/
478 601
479struct acpi_table_spmi { 602struct acpi_table_spmi {
480 ACPI_TABLE_HEADER_DEF u8 reserved; 603 struct acpi_table_header header; /* Common ACPI table header */
604 u8 reserved;
481 u8 interface_type; 605 u8 interface_type;
482 u16 spec_revision; /* Version of IPMI */ 606 u16 spec_revision; /* Version of IPMI */
483 u8 interrupt_type; 607 u8 interrupt_type;
484 u8 gpe_number; /* GPE assigned */ 608 u8 gpe_number; /* GPE assigned */
485 u8 reserved2; 609 u8 reserved1;
486 u8 pci_device_flag; 610 u8 pci_device_flag;
487 u32 interrupt; 611 u32 interrupt;
488 struct acpi_generic_address ipmi_register; 612 struct acpi_generic_address ipmi_register;
@@ -498,58 +622,53 @@ struct acpi_table_spmi {
498 * 622 *
499 ******************************************************************************/ 623 ******************************************************************************/
500 624
501struct system_resource_affinity { 625struct acpi_table_srat {
502 ACPI_TABLE_HEADER_DEF u32 reserved1; /* Must be value '1' */ 626 struct acpi_table_header header; /* Common ACPI table header */
503 u64 reserved2; /* Reserved, must be zero */ 627 u32 table_revision; /* Must be value '1' */
628 u64 reserved; /* Reserved, must be zero */
504}; 629};
505 630
506/* SRAT common sub-table header */ 631/* Values for subtable type in struct acpi_subtable_header */
507 632
508#define SRAT_SUBTABLE_HEADER \ 633enum acpi_srat_type {
509 u8 type; \ 634 ACPI_SRAT_TYPE_CPU_AFFINITY = 0,
510 u8 length; 635 ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1,
511 636 ACPI_SRAT_TYPE_RESERVED = 2
512/* Values for Type above */ 637};
513
514#define SRAT_CPU_AFFINITY 0
515#define SRAT_MEMORY_AFFINITY 1
516#define SRAT_RESERVED 2
517 638
518/* SRAT sub-tables */ 639/* SRAT sub-tables */
519 640
520struct static_resource_alloc { 641struct acpi_srat_cpu_affinity {
521 SRAT_SUBTABLE_HEADER u8 proximity_domain_lo; 642 struct acpi_subtable_header header;
643 u8 proximity_domain_lo;
522 u8 apic_id; 644 u8 apic_id;
523 645 u32 flags;
524 /* Flags (32 bits) */
525
526 u8 enabled:1; /* 00: Use affinity structure */
527 u8:7; /* 01-07: Reserved, must be zero */
528 u8 reserved3[3]; /* 08-31: Reserved, must be zero */
529
530 u8 local_sapic_eid; 646 u8 local_sapic_eid;
531 u8 proximity_domain_hi[3]; 647 u8 proximity_domain_hi[3];
532 u32 reserved4; /* Reserved, must be zero */ 648 u32 reserved; /* Reserved, must be zero */
533}; 649};
534 650
535struct memory_affinity { 651/* Flags */
536 SRAT_SUBTABLE_HEADER u32 proximity_domain;
537 u16 reserved3;
538 u64 base_address;
539 u64 address_length;
540 u32 reserved4;
541
542 /* Flags (32 bits) */
543 652
544 u8 enabled:1; /* 00: Use affinity structure */ 653#define ACPI_SRAT_CPU_ENABLED (1) /* 00: Use affinity structure */
545 u8 hot_pluggable:1; /* 01: Memory region is hot pluggable */
546 u8 non_volatile:1; /* 02: Memory is non-volatile */
547 u8:5; /* 03-07: Reserved, must be zero */
548 u8 reserved5[3]; /* 08-31: Reserved, must be zero */
549 654
550 u64 reserved6; /* Reserved, must be zero */ 655struct acpi_srat_mem_affinity {
656 struct acpi_subtable_header header;
657 u32 proximity_domain;
658 u16 reserved; /* Reserved, must be zero */
659 u64 base_address;
660 u64 length;
661 u32 memory_type; /* See acpi_address_range_id */
662 u32 flags;
663 u64 reserved1; /* Reserved, must be zero */
551}; 664};
552 665
666/* Flags */
667
668#define ACPI_SRAT_MEM_ENABLED (1) /* 00: Use affinity structure */
669#define ACPI_SRAT_MEM_HOT_PLUGGABLE (1<<1) /* 01: Memory region is hot pluggable */
670#define ACPI_SRAT_MEM_NON_VOLATILE (1<<2) /* 02: Memory region is non-volatile */
671
553/******************************************************************************* 672/*******************************************************************************
554 * 673 *
555 * TCPA - Trusted Computing Platform Alliance table 674 * TCPA - Trusted Computing Platform Alliance table
@@ -557,7 +676,8 @@ struct memory_affinity {
557 ******************************************************************************/ 676 ******************************************************************************/
558 677
559struct acpi_table_tcpa { 678struct acpi_table_tcpa {
560 ACPI_TABLE_HEADER_DEF u16 reserved; 679 struct acpi_table_header header; /* Common ACPI table header */
680 u16 reserved;
561 u32 max_log_length; /* Maximum length for the event log area */ 681 u32 max_log_length; /* Maximum length for the event log area */
562 u64 log_address; /* Address of the event log area */ 682 u64 log_address; /* Address of the event log area */
563}; 683};
@@ -569,7 +689,8 @@ struct acpi_table_tcpa {
569 ******************************************************************************/ 689 ******************************************************************************/
570 690
571struct acpi_table_wdrt { 691struct acpi_table_wdrt {
572 ACPI_TABLE_HEADER_DEF u32 header_length; /* Watchdog Header Length */ 692 struct acpi_table_header header; /* Common ACPI table header */
693 u32 header_length; /* Watchdog Header Length */
573 u8 pci_segment; /* PCI Segment number */ 694 u8 pci_segment; /* PCI Segment number */
574 u8 pci_bus; /* PCI Bus number */ 695 u8 pci_bus; /* PCI Bus number */
575 u8 pci_device; /* PCI Device number */ 696 u8 pci_device; /* PCI Device number */
@@ -582,58 +703,9 @@ struct acpi_table_wdrt {
582 u32 entries; /* Number of watchdog entries that follow */ 703 u32 entries; /* Number of watchdog entries that follow */
583}; 704};
584 705
585#if 0 /* Flags, will be converted to macros */ 706/* Flags */
586u8 enabled:1; /* 00: Timer enabled */ 707
587u8:6; /* 01-06: Reserved */ 708#define ACPI_WDRT_TIMER_ENABLED (1) /* 00: Timer enabled */
588u8 sleep_stop:1; /* 07: Timer stopped in sleep state */
589#endif
590
591/* Macros used to generate offsets to specific table fields */
592
593#define ACPI_ASF0_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_asf_info,f)
594#define ACPI_ASF1_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_asf_alert,f)
595#define ACPI_ASF2_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_asf_remote,f)
596#define ACPI_ASF3_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_asf_rmcp,f)
597#define ACPI_ASF4_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_asf_address,f)
598#define ACPI_BOOT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_boot,f)
599#define ACPI_CPEP_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_cpep,f)
600#define ACPI_CPEP0_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_cpep_polling,f)
601#define ACPI_DBGP_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_dbgp,f)
602#define ACPI_ECDT_OFFSET(f) (u8) ACPI_OFFSET (struct ec_boot_resources,f)
603#define ACPI_HPET_OFFSET(f) (u8) ACPI_OFFSET (struct hpet_table,f)
604#define ACPI_MADT_OFFSET(f) (u8) ACPI_OFFSET (struct multiple_apic_table,f)
605#define ACPI_MADT0_OFFSET(f) (u8) ACPI_OFFSET (struct madt_processor_apic,f)
606#define ACPI_MADT1_OFFSET(f) (u8) ACPI_OFFSET (struct madt_io_apic,f)
607#define ACPI_MADT2_OFFSET(f) (u8) ACPI_OFFSET (struct madt_interrupt_override,f)
608#define ACPI_MADT3_OFFSET(f) (u8) ACPI_OFFSET (struct madt_nmi_source,f)
609#define ACPI_MADT4_OFFSET(f) (u8) ACPI_OFFSET (struct madt_local_apic_nmi,f)
610#define ACPI_MADT5_OFFSET(f) (u8) ACPI_OFFSET (struct madt_address_override,f)
611#define ACPI_MADT6_OFFSET(f) (u8) ACPI_OFFSET (struct madt_io_sapic,f)
612#define ACPI_MADT7_OFFSET(f) (u8) ACPI_OFFSET (struct madt_local_sapic,f)
613#define ACPI_MADT8_OFFSET(f) (u8) ACPI_OFFSET (struct madt_interrupt_source,f)
614#define ACPI_MADTH_OFFSET(f) (u8) ACPI_OFFSET (struct apic_header,f)
615#define ACPI_MCFG_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_mcfg,f)
616#define ACPI_MCFG0_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_mcfg_allocation,f)
617#define ACPI_SBST_OFFSET(f) (u8) ACPI_OFFSET (struct smart_battery_table,f)
618#define ACPI_SLIT_OFFSET(f) (u8) ACPI_OFFSET (struct system_locality_info,f)
619#define ACPI_SPCR_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_spcr,f)
620#define ACPI_SPMI_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_spmi,f)
621#define ACPI_SRAT_OFFSET(f) (u8) ACPI_OFFSET (struct system_resource_affinity,f)
622#define ACPI_SRAT0_OFFSET(f) (u8) ACPI_OFFSET (struct static_resource_alloc,f)
623#define ACPI_SRAT1_OFFSET(f) (u8) ACPI_OFFSET (struct memory_affinity,f)
624#define ACPI_TCPA_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_tcpa,f)
625#define ACPI_WDRT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_wdrt,f)
626
627#define ACPI_HPET_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct hpet_table,f,o)
628#define ACPI_SRAT0_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct static_resource_alloc,f,o)
629#define ACPI_SRAT1_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct memory_affinity,f,o)
630#define ACPI_MADT_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct multiple_apic_table,f,o)
631#define ACPI_MADT0_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_processor_apic,f,o)
632#define ACPI_MADT2_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_interrupt_override,f,o)
633#define ACPI_MADT3_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_nmi_source,f,o)
634#define ACPI_MADT4_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_local_apic_nmi,f,o)
635#define ACPI_MADT7_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_local_sapic,f,o)
636#define ACPI_MADT8_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_interrupt_source,f,o)
637 709
638/* Reset to default packing */ 710/* Reset to default packing */
639 711
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
deleted file mode 100644
index 67efe6cad27b..000000000000
--- a/include/acpi/actbl2.h
+++ /dev/null
@@ -1,49 +0,0 @@
1/******************************************************************************
2 *
3 * Name: actbl2.h - ACPI Specification Revision 2.0 Tables
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#ifndef __ACTBL2_H__
45#define __ACTBL2_H__
46
47/* Code moved to both actbl.h and actbl1.h */
48
49#endif /* __ACTBL2_H__ */
diff --git a/include/acpi/actbl71.h b/include/acpi/actbl71.h
deleted file mode 100644
index 10ac05bb36bc..000000000000
--- a/include/acpi/actbl71.h
+++ /dev/null
@@ -1,134 +0,0 @@
1/******************************************************************************
2 *
3 * Name: actbl71.h - IA-64 Extensions to the ACPI Spec Rev. 0.71
4 * This file includes tables specific to this
5 * specification revision.
6 *
7 *****************************************************************************/
8
9/*
10 * Copyright (C) 2000 - 2003, R. Byron Moore
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#ifndef __ACTBL71_H__
28#define __ACTBL71_H__
29
30/* 0.71 FADT address_space data item bitmasks defines */
31/* If the associated bit is zero then it is in memory space else in io space */
32
33#define SMI_CMD_ADDRESS_SPACE 0x01
34#define PM1_BLK_ADDRESS_SPACE 0x02
35#define PM2_CNT_BLK_ADDRESS_SPACE 0x04
36#define PM_TMR_BLK_ADDRESS_SPACE 0x08
37#define GPE0_BLK_ADDRESS_SPACE 0x10
38#define GPE1_BLK_ADDRESS_SPACE 0x20
39
40/* Only for clarity in declarations */
41
42typedef u64 IO_ADDRESS;
43
44#pragma pack(1)
45struct { /* Root System Descriptor Pointer */
46 NATIVE_CHAR signature[8]; /* contains "RSD PTR " */
47 u8 checksum; /* to make sum of struct == 0 */
48 NATIVE_CHAR oem_id[6]; /* OEM identification */
49 u8 reserved; /* Must be 0 for 1.0, 2 for 2.0 */
50 u64 rsdt_physical_address; /* 64-bit physical address of RSDT */
51};
52
53/*****************************************/
54/* IA64 Extensions to ACPI Spec Rev 0.71 */
55/* for the Root System Description Table */
56/*****************************************/
57struct {
58 struct acpi_table_header header; /* Table header */
59 u32 reserved_pad; /* IA64 alignment, must be 0 */
60 u64 table_offset_entry[1]; /* Array of pointers to other */
61 /* tables' headers */
62};
63
64/*******************************************/
65/* IA64 Extensions to ACPI Spec Rev 0.71 */
66/* for the Firmware ACPI Control Structure */
67/*******************************************/
68struct {
69 NATIVE_CHAR signature[4]; /* signature "FACS" */
70 u32 length; /* length of structure, in bytes */
71 u32 hardware_signature; /* hardware configuration signature */
72 u32 reserved4; /* must be 0 */
73 u64 firmware_waking_vector; /* ACPI OS waking vector */
74 u64 global_lock; /* Global Lock */
75 u32 S4bios_f:1; /* Indicates if S4BIOS support is present */
76 u32 reserved1:31; /* must be 0 */
77 u8 reserved3[28]; /* reserved - must be zero */
78};
79
80/******************************************/
81/* IA64 Extensions to ACPI Spec Rev 0.71 */
82/* for the Fixed ACPI Description Table */
83/******************************************/
84struct {
85 struct acpi_table_header header; /* table header */
86 u32 reserved_pad; /* IA64 alignment, must be 0 */
87 u64 firmware_ctrl; /* 64-bit Physical address of FACS */
88 u64 dsdt; /* 64-bit Physical address of DSDT */
89 u8 model; /* System Interrupt Model */
90 u8 address_space; /* Address Space Bitmask */
91 u16 sci_int; /* System vector of SCI interrupt */
92 u8 acpi_enable; /* value to write to smi_cmd to enable ACPI */
93 u8 acpi_disable; /* value to write to smi_cmd to disable ACPI */
94 u8 S4bios_req; /* Value to write to SMI CMD to enter S4BIOS state */
95 u8 reserved2; /* reserved - must be zero */
96 u64 smi_cmd; /* Port address of SMI command port */
97 u64 pm1a_evt_blk; /* Port address of Power Mgt 1a acpi_event Reg Blk */
98 u64 pm1b_evt_blk; /* Port address of Power Mgt 1b acpi_event Reg Blk */
99 u64 pm1a_cnt_blk; /* Port address of Power Mgt 1a Control Reg Blk */
100 u64 pm1b_cnt_blk; /* Port address of Power Mgt 1b Control Reg Blk */
101 u64 pm2_cnt_blk; /* Port address of Power Mgt 2 Control Reg Blk */
102 u64 pm_tmr_blk; /* Port address of Power Mgt Timer Ctrl Reg Blk */
103 u64 gpe0_blk; /* Port addr of General Purpose acpi_event 0 Reg Blk */
104 u64 gpe1_blk; /* Port addr of General Purpose acpi_event 1 Reg Blk */
105 u8 pm1_evt_len; /* Byte length of ports at pm1_x_evt_blk */
106 u8 pm1_cnt_len; /* Byte length of ports at pm1_x_cnt_blk */
107 u8 pm2_cnt_len; /* Byte Length of ports at pm2_cnt_blk */
108 u8 pm_tm_len; /* Byte Length of ports at pm_tm_blk */
109 u8 gpe0_blk_len; /* Byte Length of ports at gpe0_blk */
110 u8 gpe1_blk_len; /* Byte Length of ports at gpe1_blk */
111 u8 gpe1_base; /* offset in gpe model where gpe1 events start */
112 u8 reserved3; /* reserved */
113 u16 plvl2_lat; /* worst case HW latency to enter/exit C2 state */
114 u16 plvl3_lat; /* worst case HW latency to enter/exit C3 state */
115 u8 day_alrm; /* index to day-of-month alarm in RTC CMOS RAM */
116 u8 mon_alrm; /* index to month-of-year alarm in RTC CMOS RAM */
117 u8 century; /* index to century in RTC CMOS RAM */
118 u8 reserved4; /* reserved */
119 u32 flush_cash:1; /* PAL_FLUSH_CACHE is correctly supported */
120 u32 reserved5:1; /* reserved - must be zero */
121 u32 proc_c1:1; /* all processors support C1 state */
122 u32 plvl2_up:1; /* C2 state works on MP system */
123 u32 pwr_button:1; /* Power button is handled as a generic feature */
124 u32 sleep_button:1; /* Sleep button is handled as a generic feature, or not present */
125 u32 fixed_rTC:1; /* RTC wakeup stat not in fixed register space */
126 u32 rtcs4:1; /* RTC wakeup stat not possible from S4 */
127 u32 tmr_val_ext:1; /* tmr_val is 32 bits */
128 u32 dock_cap:1; /* Supports Docking */
129 u32 reserved6:22; /* reserved - must be zero */
130};
131
132#pragma pack()
133
134#endif /* __ACTBL71_H__ */
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 64b603cfe92e..72a6e2c3a536 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -48,7 +48,8 @@
48 48
49/* 49/*
50 * ACPI_MACHINE_WIDTH must be specified in an OS- or compiler-dependent header 50 * ACPI_MACHINE_WIDTH must be specified in an OS- or compiler-dependent header
51 * and must be either 16, 32, or 64 51 * and must be either 32 or 64. 16-bit ACPICA is no longer supported, as of
52 * 12/2006.
52 */ 53 */
53#ifndef ACPI_MACHINE_WIDTH 54#ifndef ACPI_MACHINE_WIDTH
54#error ACPI_MACHINE_WIDTH not defined 55#error ACPI_MACHINE_WIDTH not defined
@@ -149,7 +150,6 @@ typedef int INT32;
149typedef u64 acpi_native_uint; 150typedef u64 acpi_native_uint;
150typedef s64 acpi_native_int; 151typedef s64 acpi_native_int;
151 152
152typedef u64 acpi_table_ptr;
153typedef u64 acpi_io_address; 153typedef u64 acpi_io_address;
154typedef u64 acpi_physical_address; 154typedef u64 acpi_physical_address;
155 155
@@ -189,48 +189,15 @@ typedef int INT32;
189typedef u32 acpi_native_uint; 189typedef u32 acpi_native_uint;
190typedef s32 acpi_native_int; 190typedef s32 acpi_native_int;
191 191
192typedef u64 acpi_table_ptr;
193typedef u32 acpi_io_address; 192typedef u32 acpi_io_address;
194typedef u64 acpi_physical_address; 193typedef u32 acpi_physical_address;
195 194
196#define ACPI_MAX_PTR ACPI_UINT32_MAX 195#define ACPI_MAX_PTR ACPI_UINT32_MAX
197#define ACPI_SIZE_MAX ACPI_UINT32_MAX 196#define ACPI_SIZE_MAX ACPI_UINT32_MAX
198 197
199/*******************************************************************************
200 *
201 * Types specific to 16-bit targets
202 *
203 ******************************************************************************/
204
205#elif ACPI_MACHINE_WIDTH == 16
206
207/*! [Begin] no source code translation (keep the typedefs as-is) */
208
209typedef unsigned long UINT32;
210typedef short INT16;
211typedef long INT32;
212
213/*! [End] no source code translation !*/
214
215typedef u16 acpi_native_uint;
216typedef s16 acpi_native_int;
217
218typedef u32 acpi_table_ptr;
219typedef u32 acpi_io_address;
220typedef char *acpi_physical_address;
221
222#define ACPI_MAX_PTR ACPI_UINT16_MAX
223#define ACPI_SIZE_MAX ACPI_UINT16_MAX
224
225#define ACPI_USE_NATIVE_DIVIDE /* No 64-bit integers, ok to use native divide */
226
227/* 64-bit integers cannot be supported */
228
229#define ACPI_NO_INTEGER64_SUPPORT
230
231#else 198#else
232 199
233/* ACPI_MACHINE_WIDTH must be either 64, 32, or 16 */ 200/* ACPI_MACHINE_WIDTH must be either 64 or 32 */
234 201
235#error unknown ACPI_MACHINE_WIDTH 202#error unknown ACPI_MACHINE_WIDTH
236#endif 203#endif
@@ -311,36 +278,6 @@ typedef acpi_native_uint acpi_size;
311 * 278 *
312 ******************************************************************************/ 279 ******************************************************************************/
313 280
314/*
315 * Pointer overlays to avoid lots of typecasting for
316 * code that accepts both physical and logical pointers.
317 */
318union acpi_pointers {
319 acpi_physical_address physical;
320 void *logical;
321 acpi_table_ptr value;
322};
323
324struct acpi_pointer {
325 u32 pointer_type;
326 union acpi_pointers pointer;
327};
328
329/* pointer_types for above */
330
331#define ACPI_PHYSICAL_POINTER 0x01
332#define ACPI_LOGICAL_POINTER 0x02
333
334/* Processor mode */
335
336#define ACPI_PHYSICAL_ADDRESSING 0x04
337#define ACPI_LOGICAL_ADDRESSING 0x08
338#define ACPI_MEMORY_MODE 0x0C
339
340#define ACPI_PHYSMODE_PHYSPTR ACPI_PHYSICAL_ADDRESSING | ACPI_PHYSICAL_POINTER
341#define ACPI_LOGMODE_PHYSPTR ACPI_LOGICAL_ADDRESSING | ACPI_PHYSICAL_POINTER
342#define ACPI_LOGMODE_LOGPTR ACPI_LOGICAL_ADDRESSING | ACPI_LOGICAL_POINTER
343
344/* Logical defines and NULL */ 281/* Logical defines and NULL */
345 282
346#ifdef FALSE 283#ifdef FALSE
@@ -442,7 +379,8 @@ typedef u64 acpi_integer;
442/* 379/*
443 * Initialization state 380 * Initialization state
444 */ 381 */
445#define ACPI_INITIALIZED_OK 0x01 382#define ACPI_SUBSYSTEM_INITIALIZE 0x01
383#define ACPI_INITIALIZED_OK 0x02
446 384
447/* 385/*
448 * Power state values 386 * Power state values
@@ -491,21 +429,6 @@ typedef u64 acpi_integer;
491#define ACPI_NOTIFY_POWER_FAULT (u8) 7 429#define ACPI_NOTIFY_POWER_FAULT (u8) 7
492 430
493/* 431/*
494 * Table types. These values are passed to the table related APIs
495 */
496typedef u32 acpi_table_type;
497
498#define ACPI_TABLE_ID_RSDP (acpi_table_type) 0
499#define ACPI_TABLE_ID_DSDT (acpi_table_type) 1
500#define ACPI_TABLE_ID_FADT (acpi_table_type) 2
501#define ACPI_TABLE_ID_FACS (acpi_table_type) 3
502#define ACPI_TABLE_ID_PSDT (acpi_table_type) 4
503#define ACPI_TABLE_ID_SSDT (acpi_table_type) 5
504#define ACPI_TABLE_ID_XSDT (acpi_table_type) 6
505#define ACPI_TABLE_ID_MAX 6
506#define ACPI_NUM_TABLE_TYPES (ACPI_TABLE_ID_MAX+1)
507
508/*
509 * Types associated with ACPI names and objects. The first group of 432 * Types associated with ACPI names and objects. The first group of
510 * values (up to ACPI_TYPE_EXTERNAL_MAX) correspond to the definition 433 * values (up to ACPI_TYPE_EXTERNAL_MAX) correspond to the definition
511 * of the ACPI object_type() operator (See the ACPI Spec). Therefore, 434 * of the ACPI object_type() operator (See the ACPI Spec). Therefore,
@@ -637,7 +560,7 @@ typedef u32 acpi_event_status;
637 * | | | +--- Type of dispatch -- to method, handler, or none 560 * | | | +--- Type of dispatch -- to method, handler, or none
638 * | | +--- Enabled for runtime? 561 * | | +--- Enabled for runtime?
639 * | +--- Enabled for wake? 562 * | +--- Enabled for wake?
640 * +--- System state when GPE ocurred (running/waking) 563 * +--- Unused
641 */ 564 */
642#define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x01 565#define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x01
643#define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x01 566#define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x01
@@ -663,10 +586,6 @@ typedef u32 acpi_event_status;
663 586
664#define ACPI_GPE_ENABLE_MASK (u8) 0x60 /* Both run/wake */ 587#define ACPI_GPE_ENABLE_MASK (u8) 0x60 /* Both run/wake */
665 588
666#define ACPI_GPE_SYSTEM_MASK (u8) 0x80
667#define ACPI_GPE_SYSTEM_RUNNING (u8) 0x80
668#define ACPI_GPE_SYSTEM_WAKING (u8) 0x00
669
670/* 589/*
671 * Flags for GPE and Lock interfaces 590 * Flags for GPE and Lock interfaces
672 */ 591 */
@@ -816,13 +735,6 @@ struct acpi_buffer {
816#define ACPI_SYS_MODES_MASK 0x0003 735#define ACPI_SYS_MODES_MASK 0x0003
817 736
818/* 737/*
819 * ACPI Table Info. One per ACPI table _type_
820 */
821struct acpi_table_info {
822 u32 count;
823};
824
825/*
826 * System info returned by acpi_get_system_info() 738 * System info returned by acpi_get_system_info()
827 */ 739 */
828struct acpi_system_info { 740struct acpi_system_info {
@@ -833,8 +745,6 @@ struct acpi_system_info {
833 u32 reserved2; 745 u32 reserved2;
834 u32 debug_level; 746 u32 debug_level;
835 u32 debug_layer; 747 u32 debug_layer;
836 u32 num_table_types;
837 struct acpi_table_info table_info[ACPI_TABLE_ID_MAX + 1];
838}; 748};
839 749
840/* 750/*
diff --git a/include/acpi/acutils.h b/include/acpi/acutils.h
index ba039ea1a057..883ffe92148f 100644
--- a/include/acpi/acutils.h
+++ b/include/acpi/acutils.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -141,8 +141,6 @@ acpi_status acpi_ut_hardware_initialize(void);
141 141
142void acpi_ut_subsystem_shutdown(void); 142void acpi_ut_subsystem_shutdown(void);
143 143
144acpi_status acpi_ut_validate_fadt(void);
145
146/* 144/*
147 * utclib - Local implementations of C library functions 145 * utclib - Local implementations of C library functions
148 */ 146 */
@@ -453,6 +451,8 @@ acpi_ut_short_divide(acpi_integer in_dividend,
453/* 451/*
454 * utmisc 452 * utmisc
455 */ 453 */
454const char *acpi_ut_validate_exception(acpi_status status);
455
456u8 acpi_ut_is_aml_table(struct acpi_table_header *table); 456u8 acpi_ut_is_aml_table(struct acpi_table_header *table);
457 457
458acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id); 458acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id);
@@ -470,7 +470,7 @@ void acpi_ut_print_string(char *string, u8 max_length);
470 470
471u8 acpi_ut_valid_acpi_name(u32 name); 471u8 acpi_ut_valid_acpi_name(u32 name);
472 472
473acpi_name acpi_ut_repair_name(acpi_name name); 473acpi_name acpi_ut_repair_name(char *name);
474 474
475u8 acpi_ut_valid_acpi_char(char character, acpi_native_uint position); 475u8 acpi_ut_valid_acpi_char(char character, acpi_native_uint position);
476 476
diff --git a/include/acpi/amlcode.h b/include/acpi/amlcode.h
index cf18426a87b1..da53a4ef287a 100644
--- a/include/acpi/amlcode.h
+++ b/include/acpi/amlcode.h
@@ -7,7 +7,7 @@
7 *****************************************************************************/ 7 *****************************************************************************/
8 8
9/* 9/*
10 * Copyright (C) 2000 - 2006, R. Byron Moore 10 * Copyright (C) 2000 - 2007, R. Byron Moore
11 * All rights reserved. 11 * All rights reserved.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
@@ -273,7 +273,7 @@
273#define ARGI_DATAOBJECT 0x12 /* Buffer, String, package or reference to a Node - Used only by size_of operator */ 273#define ARGI_DATAOBJECT 0x12 /* Buffer, String, package or reference to a Node - Used only by size_of operator */
274#define ARGI_COMPLEXOBJ 0x13 /* Buffer, String, or package (Used by INDEX op only) */ 274#define ARGI_COMPLEXOBJ 0x13 /* Buffer, String, or package (Used by INDEX op only) */
275#define ARGI_REF_OR_STRING 0x14 /* Reference or String (Used by DEREFOF op only) */ 275#define ARGI_REF_OR_STRING 0x14 /* Reference or String (Used by DEREFOF op only) */
276#define ARGI_REGION_OR_FIELD 0x15 /* Used by LOAD op only */ 276#define ARGI_REGION_OR_BUFFER 0x15 /* Used by LOAD op only */
277#define ARGI_DATAREFOBJ 0x16 277#define ARGI_DATAREFOBJ 0x16
278 278
279/* Note: types above can expand to 0x1F maximum */ 279/* Note: types above can expand to 0x1F maximum */
diff --git a/include/acpi/amlresrc.h b/include/acpi/amlresrc.h
index be03818af9d1..f7d541239da4 100644
--- a/include/acpi/amlresrc.h
+++ b/include/acpi/amlresrc.h
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 453a469fd397..dab2ec59a3b0 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index da80933963db..3bb50494a38a 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 7f1e92930b62..5f532d2ac180 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h
index 7cfad93edf10..5e657eb8946c 100644
--- a/include/asm-i386/acpi.h
+++ b/include/asm-i386/acpi.h
@@ -39,7 +39,7 @@
39 * Calling conventions: 39 * Calling conventions:
40 * 40 *
41 * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) 41 * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
42 * ACPI_EXTERNAL_XFACE - External ACPI interfaces 42 * ACPI_EXTERNAL_XFACE - External ACPI interfaces
43 * ACPI_INTERNAL_XFACE - Internal ACPI interfaces 43 * ACPI_INTERNAL_XFACE - Internal ACPI interfaces
44 * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces 44 * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
45 */ 45 */
@@ -59,11 +59,11 @@
59int __acpi_acquire_global_lock(unsigned int *lock); 59int __acpi_acquire_global_lock(unsigned int *lock);
60int __acpi_release_global_lock(unsigned int *lock); 60int __acpi_release_global_lock(unsigned int *lock);
61 61
62#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \ 62#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
63 ((Acq) = __acpi_acquire_global_lock((unsigned int *) GLptr)) 63 ((Acq) = __acpi_acquire_global_lock(&facs->global_lock))
64 64
65#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \ 65#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
66 ((Acq) = __acpi_release_global_lock((unsigned int *) GLptr)) 66 ((Acq) = __acpi_release_global_lock(&facs->global_lock))
67 67
68/* 68/*
69 * Math helper asm macros 69 * Math helper asm macros
@@ -87,7 +87,7 @@ extern void check_acpi_pci(void);
87static inline void check_acpi_pci(void) { } 87static inline void check_acpi_pci(void) { }
88#endif 88#endif
89 89
90#ifdef CONFIG_ACPI 90#ifdef CONFIG_ACPI
91extern int acpi_lapic; 91extern int acpi_lapic;
92extern int acpi_ioapic; 92extern int acpi_ioapic;
93extern int acpi_noirq; 93extern int acpi_noirq;
@@ -95,9 +95,9 @@ extern int acpi_strict;
95extern int acpi_disabled; 95extern int acpi_disabled;
96extern int acpi_ht; 96extern int acpi_ht;
97extern int acpi_pci_disabled; 97extern int acpi_pci_disabled;
98static inline void disable_acpi(void) 98static inline void disable_acpi(void)
99{ 99{
100 acpi_disabled = 1; 100 acpi_disabled = 1;
101 acpi_ht = 0; 101 acpi_ht = 0;
102 acpi_pci_disabled = 1; 102 acpi_pci_disabled = 1;
103 acpi_noirq = 1; 103 acpi_noirq = 1;
@@ -114,9 +114,9 @@ extern int acpi_use_timer_override;
114#endif 114#endif
115 115
116static inline void acpi_noirq_set(void) { acpi_noirq = 1; } 116static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
117static inline void acpi_disable_pci(void) 117static inline void acpi_disable_pci(void)
118{ 118{
119 acpi_pci_disabled = 1; 119 acpi_pci_disabled = 1;
120 acpi_noirq_set(); 120 acpi_noirq_set();
121} 121}
122extern int acpi_irq_balance_set(char *str); 122extern int acpi_irq_balance_set(char *str);
@@ -144,8 +144,6 @@ extern void acpi_reserve_bootmem(void);
144 144
145#endif /*CONFIG_ACPI_SLEEP*/ 145#endif /*CONFIG_ACPI_SLEEP*/
146 146
147extern u8 x86_acpiid_to_apicid[];
148
149#define ARCH_HAS_POWER_INIT 1 147#define ARCH_HAS_POWER_INIT 1
150 148
151#endif /*__KERNEL__*/ 149#endif /*__KERNEL__*/
diff --git a/include/asm-i386/mach-es7000/mach_mpparse.h b/include/asm-i386/mach-es7000/mach_mpparse.h
index 99f66be240be..24990e546da3 100644
--- a/include/asm-i386/mach-es7000/mach_mpparse.h
+++ b/include/asm-i386/mach-es7000/mach_mpparse.h
@@ -3,13 +3,13 @@
3 3
4#include <linux/acpi.h> 4#include <linux/acpi.h>
5 5
6static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, 6static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name,
7 struct mpc_config_translation *translation) 7 struct mpc_config_translation *translation)
8{ 8{
9 Dprintk("Bus #%d is %s\n", m->mpc_busid, name); 9 Dprintk("Bus #%d is %s\n", m->mpc_busid, name);
10} 10}
11 11
12static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, 12static inline void mpc_oem_pci_bus(struct mpc_config_bus *m,
13 struct mpc_config_translation *translation) 13 struct mpc_config_translation *translation)
14{ 14{
15} 15}
@@ -22,7 +22,7 @@ static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
22 char *productid) 22 char *productid)
23{ 23{
24 if (mpc->mpc_oemptr) { 24 if (mpc->mpc_oemptr) {
25 struct mp_config_oemtable *oem_table = 25 struct mp_config_oemtable *oem_table =
26 (struct mp_config_oemtable *)mpc->mpc_oemptr; 26 (struct mp_config_oemtable *)mpc->mpc_oemptr;
27 if (!strncmp(oem, "UNISYS", 6)) 27 if (!strncmp(oem, "UNISYS", 6))
28 return parse_unisys_oem((char *)oem_table); 28 return parse_unisys_oem((char *)oem_table);
@@ -31,12 +31,13 @@ static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
31} 31}
32 32
33#ifdef CONFIG_ACPI 33#ifdef CONFIG_ACPI
34
34static inline int es7000_check_dsdt(void) 35static inline int es7000_check_dsdt(void)
35{ 36{
36 struct acpi_table_header *header = NULL; 37 struct acpi_table_header header;
37 if(!acpi_get_table_header_early(ACPI_DSDT, &header)) 38 memcpy(&header, 0, sizeof(struct acpi_table_header));
38 acpi_table_print(header, 0); 39 acpi_get_table_header(ACPI_SIG_DSDT, 0, &header);
39 if (!strncmp(header->oem_id, "UNISYS", 6)) 40 if (!strncmp(header.oem_id, "UNISYS", 6))
40 return 1; 41 return 1;
41 return 0; 42 return 0;
42} 43}
@@ -44,7 +45,7 @@ static inline int es7000_check_dsdt(void)
44/* Hook from generic ACPI tables.c */ 45/* Hook from generic ACPI tables.c */
45static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) 46static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
46{ 47{
47 unsigned long oem_addr; 48 unsigned long oem_addr;
48 if (!find_unisys_acpi_oem_table(&oem_addr)) { 49 if (!find_unisys_acpi_oem_table(&oem_addr)) {
49 if (es7000_check_dsdt()) 50 if (es7000_check_dsdt())
50 return parse_unisys_oem((char *)oem_addr); 51 return parse_unisys_oem((char *)oem_addr);
diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h
index 09a5dd0e44a8..5d03792d4f65 100644
--- a/include/asm-ia64/acpi.h
+++ b/include/asm-ia64/acpi.h
@@ -82,11 +82,11 @@ ia64_acpi_release_global_lock (unsigned int *lock)
82 return old & 0x1; 82 return old & 0x1;
83} 83}
84 84
85#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \ 85#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
86 ((Acq) = ia64_acpi_acquire_global_lock((unsigned int *) GLptr)) 86 ((Acq) = ia64_acpi_acquire_global_lock(&facs->global_lock))
87 87
88#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \ 88#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
89 ((Acq) = ia64_acpi_release_global_lock((unsigned int *) GLptr)) 89 ((Acq) = ia64_acpi_release_global_lock(&facs->global_lock))
90 90
91#define acpi_disabled 0 /* ACPI always enabled on IA64 */ 91#define acpi_disabled 0 /* ACPI always enabled on IA64 */
92#define acpi_noirq 0 /* ACPI always enabled on IA64 */ 92#define acpi_noirq 0 /* ACPI always enabled on IA64 */
@@ -119,8 +119,6 @@ extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
119extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; 119extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
120#endif 120#endif
121 121
122extern u16 ia64_acpiid_to_sapicid[];
123
124/* 122/*
125 * Refer Intel ACPI _PDC support document for bit definitions 123 * Refer Intel ACPI _PDC support document for bit definitions
126 */ 124 */
diff --git a/include/asm-ia64/dma.h b/include/asm-ia64/dma.h
index dad3a735df8b..4d97f60f1ef5 100644
--- a/include/asm-ia64/dma.h
+++ b/include/asm-ia64/dma.h
@@ -19,4 +19,6 @@ extern unsigned long MAX_DMA_ADDRESS;
19 19
20#define free_dma(x) 20#define free_dma(x)
21 21
22void dma_mark_clean(void *addr, size_t size);
23
22#endif /* _ASM_IA64_DMA_H */ 24#endif /* _ASM_IA64_DMA_H */
diff --git a/include/asm-ia64/esi.h b/include/asm-ia64/esi.h
index 84aac0e0b583..40991c6ba647 100644
--- a/include/asm-ia64/esi.h
+++ b/include/asm-ia64/esi.h
@@ -19,7 +19,6 @@ enum esi_proc_type {
19 ESI_PROC_REENTRANT /* MP-safe and reentrant */ 19 ESI_PROC_REENTRANT /* MP-safe and reentrant */
20}; 20};
21 21
22extern int ia64_esi_init (void);
23extern struct ia64_sal_retval esi_call_phys (void *, u64 *); 22extern struct ia64_sal_retval esi_call_phys (void *, u64 *);
24extern int ia64_esi_call(efi_guid_t, struct ia64_sal_retval *, 23extern int ia64_esi_call(efi_guid_t, struct ia64_sal_retval *,
25 enum esi_proc_type, 24 enum esi_proc_type,
diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h
index c8df75901083..6dd476b652c6 100644
--- a/include/asm-ia64/meminit.h
+++ b/include/asm-ia64/meminit.h
@@ -51,12 +51,13 @@ extern void efi_memmap_init(unsigned long *, unsigned long *);
51 51
52#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */ 52#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
53 53
54extern int register_active_ranges(u64 start, u64 end, void *arg);
55
54#ifdef CONFIG_VIRTUAL_MEM_MAP 56#ifdef CONFIG_VIRTUAL_MEM_MAP
55# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */ 57# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
56 extern unsigned long vmalloc_end; 58 extern unsigned long vmalloc_end;
57 extern struct page *vmem_map; 59 extern struct page *vmem_map;
58 extern int find_largest_hole (u64 start, u64 end, void *arg); 60 extern int find_largest_hole (u64 start, u64 end, void *arg);
59 extern int register_active_ranges (u64 start, u64 end, void *arg);
60 extern int create_mem_map_page_table (u64 start, u64 end, void *arg); 61 extern int create_mem_map_page_table (u64 start, u64 end, void *arg);
61 extern int vmemmap_find_next_valid_pfn(int, int); 62 extern int vmemmap_find_next_valid_pfn(int, int);
62#else 63#else
diff --git a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h
index 393e04c42a2c..560c287b1233 100644
--- a/include/asm-ia64/pgalloc.h
+++ b/include/asm-ia64/pgalloc.h
@@ -137,7 +137,8 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
137static inline struct page *pte_alloc_one(struct mm_struct *mm, 137static inline struct page *pte_alloc_one(struct mm_struct *mm,
138 unsigned long addr) 138 unsigned long addr)
139{ 139{
140 return virt_to_page(pgtable_quicklist_alloc()); 140 void *pg = pgtable_quicklist_alloc();
141 return pg ? virt_to_page(pg) : NULL;
141} 142}
142 143
143static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 144static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
diff --git a/include/asm-ia64/sn/acpi.h b/include/asm-ia64/sn/acpi.h
index 2850a7ef5e71..9ce2801cbd57 100644
--- a/include/asm-ia64/sn/acpi.h
+++ b/include/asm-ia64/sn/acpi.h
@@ -11,6 +11,7 @@
11 11
12#include "acpi/acglobal.h" 12#include "acpi/acglobal.h"
13 13
14#define SN_ACPI_BASE_SUPPORT() (acpi_gbl_DSDT->oem_revision >= 0x20101) 14extern int sn_acpi_rev;
15#define SN_ACPI_BASE_SUPPORT() (sn_acpi_rev >= 0x20101)
15 16
16#endif /* _ASM_IA64_SN_ACPI_H */ 17#endif /* _ASM_IA64_SN_ACPI_H */
diff --git a/include/asm-ia64/sn/pcibr_provider.h b/include/asm-ia64/sn/pcibr_provider.h
index da3eade0cae2..17cb6cc3f21a 100644
--- a/include/asm-ia64/sn/pcibr_provider.h
+++ b/include/asm-ia64/sn/pcibr_provider.h
@@ -142,7 +142,7 @@ extern int pcibr_ate_alloc(struct pcibus_info *, int);
142extern void pcibr_ate_free(struct pcibus_info *, int); 142extern void pcibr_ate_free(struct pcibus_info *, int);
143extern void ate_write(struct pcibus_info *, int, int, u64); 143extern void ate_write(struct pcibus_info *, int, int, u64);
144extern int sal_pcibr_slot_enable(struct pcibus_info *soft, int device, 144extern int sal_pcibr_slot_enable(struct pcibus_info *soft, int device,
145 void *resp); 145 void *resp, char **ssdt);
146extern int sal_pcibr_slot_disable(struct pcibus_info *soft, int device, 146extern int sal_pcibr_slot_disable(struct pcibus_info *soft, int device,
147 int action, void *resp); 147 int action, void *resp);
148extern u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus); 148extern u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus);
diff --git a/include/asm-ia64/sn/pcidev.h b/include/asm-ia64/sn/pcidev.h
index 9fe89a93d880..1c2382cea807 100644
--- a/include/asm-ia64/sn/pcidev.h
+++ b/include/asm-ia64/sn/pcidev.h
@@ -70,10 +70,16 @@ extern void sn_irq_fixup(struct pci_dev *pci_dev,
70 struct sn_irq_info *sn_irq_info); 70 struct sn_irq_info *sn_irq_info);
71extern void sn_irq_unfixup(struct pci_dev *pci_dev); 71extern void sn_irq_unfixup(struct pci_dev *pci_dev);
72extern struct pcidev_info * sn_pcidev_info_get(struct pci_dev *); 72extern struct pcidev_info * sn_pcidev_info_get(struct pci_dev *);
73extern void sn_bus_fixup(struct pci_bus *);
74extern void sn_acpi_bus_fixup(struct pci_bus *);
75extern void sn_common_bus_fixup(struct pci_bus *, struct pcibus_bussoft *);
73extern void sn_bus_store_sysdata(struct pci_dev *dev); 76extern void sn_bus_store_sysdata(struct pci_dev *dev);
74extern void sn_bus_free_sysdata(void); 77extern void sn_bus_free_sysdata(void);
75extern void sn_generate_path(struct pci_bus *pci_bus, char *address); 78extern void sn_generate_path(struct pci_bus *pci_bus, char *address);
76extern void sn_pci_fixup_slot(struct pci_dev *dev); 79extern void sn_io_slot_fixup(struct pci_dev *);
80extern void sn_acpi_slot_fixup(struct pci_dev *);
81extern void sn_pci_fixup_slot(struct pci_dev *dev, struct pcidev_info *,
82 struct sn_irq_info *);
77extern void sn_pci_unfixup_slot(struct pci_dev *dev); 83extern void sn_pci_unfixup_slot(struct pci_dev *dev);
78extern void sn_irq_lh_init(void); 84extern void sn_irq_lh_init(void);
79#endif /* _ASM_IA64_SN_PCI_PCIDEV_H */ 85#endif /* _ASM_IA64_SN_PCI_PCIDEV_H */
diff --git a/include/asm-ia64/swiotlb.h b/include/asm-ia64/swiotlb.h
new file mode 100644
index 000000000000..452c162dee4e
--- /dev/null
+++ b/include/asm-ia64/swiotlb.h
@@ -0,0 +1,9 @@
1#ifndef _ASM_SWIOTLB_H
2#define _ASM_SWIOTLB_H 1
3
4#include <asm/machvec.h>
5
6#define SWIOTLB_ARCH_NEED_LATE_INIT
7#define SWIOTLB_ARCH_NEED_ALLOC
8
9#endif /* _ASM_SWIOTLB_H */
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index 9b505b25544f..91698599f918 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -84,6 +84,7 @@ struct thread_info {
84#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ 84#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
85#define TIF_SYSCALL_TRACE 3 /* syscall trace active */ 85#define TIF_SYSCALL_TRACE 3 /* syscall trace active */
86#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ 86#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
87#define TIF_SINGLESTEP 5 /* restore singlestep on return to user mode */
87#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 88#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
88#define TIF_MEMDIE 17 89#define TIF_MEMDIE 17
89#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ 90#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
@@ -92,7 +93,8 @@ struct thread_info {
92 93
93#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 94#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
94#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 95#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
95#define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) 96#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
97#define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP)
96#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 98#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
97#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 99#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
98#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 100#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h
index 53c5c0ee122c..a9e1fa4cac4d 100644
--- a/include/asm-ia64/unistd.h
+++ b/include/asm-ia64/unistd.h
@@ -291,11 +291,13 @@
291#define __NR_sync_file_range 1300 291#define __NR_sync_file_range 1300
292#define __NR_tee 1301 292#define __NR_tee 1301
293#define __NR_vmsplice 1302 293#define __NR_vmsplice 1302
294/* 1303 reserved for move_pages */
295#define __NR_getcpu 1304
294 296
295#ifdef __KERNEL__ 297#ifdef __KERNEL__
296 298
297 299
298#define NR_syscalls 279 /* length of syscall table */ 300#define NR_syscalls 281 /* length of syscall table */
299 301
300#define __ARCH_WANT_SYS_RT_SIGACTION 302#define __ARCH_WANT_SYS_RT_SIGACTION
301 303
diff --git a/include/asm-mips/bootinfo.h b/include/asm-mips/bootinfo.h
index 8e321f53a382..c7c945baf1ee 100644
--- a/include/asm-mips/bootinfo.h
+++ b/include/asm-mips/bootinfo.h
@@ -243,6 +243,10 @@ extern struct boot_mem_map boot_mem_map;
243extern void add_memory_region(phys_t start, phys_t size, long type); 243extern void add_memory_region(phys_t start, phys_t size, long type);
244 244
245extern void prom_init(void); 245extern void prom_init(void);
246extern void prom_free_prom_memory(void);
247
248extern void free_init_pages(const char *what,
249 unsigned long begin, unsigned long end);
246 250
247/* 251/*
248 * Initial kernel command line, usually setup by prom_init() 252 * Initial kernel command line, usually setup by prom_init()
diff --git a/include/asm-mips/ddb5xxx/ddb5477.h b/include/asm-mips/ddb5xxx/ddb5477.h
index c5af4b73fdd7..6cf177caf6d5 100644
--- a/include/asm-mips/ddb5xxx/ddb5477.h
+++ b/include/asm-mips/ddb5xxx/ddb5477.h
@@ -17,6 +17,7 @@
17#ifndef __ASM_DDB5XXX_DDB5477_H 17#ifndef __ASM_DDB5XXX_DDB5477_H
18#define __ASM_DDB5XXX_DDB5477_H 18#define __ASM_DDB5XXX_DDB5477_H
19 19
20#include <irq.h>
20 21
21/* 22/*
22 * This contains macros that are specific to DDB5477 or renamed from 23 * This contains macros that are specific to DDB5477 or renamed from
@@ -251,14 +252,10 @@ extern void ll_vrc5477_irq_disable(int vrc5477_irq);
251 */ 252 */
252 253
253#define NUM_CPU_IRQ 8 254#define NUM_CPU_IRQ 8
254#define NUM_I8259_IRQ 16
255#define NUM_VRC5477_IRQ 32 255#define NUM_VRC5477_IRQ 32
256 256
257#define DDB_IRQ_BASE 0 257#define CPU_IRQ_BASE MIPS_CPU_IRQ_BASE
258 258#define VRC5477_IRQ_BASE (CPU_IRQ_BASE + NUM_CPU_IRQ)
259#define I8259_IRQ_BASE DDB_IRQ_BASE
260#define VRC5477_IRQ_BASE (I8259_IRQ_BASE + NUM_I8259_IRQ)
261#define CPU_IRQ_BASE (VRC5477_IRQ_BASE + NUM_VRC5477_IRQ)
262 259
263/* 260/*
264 * vrc5477 irq defs 261 * vrc5477 irq defs
@@ -300,22 +297,22 @@ extern void ll_vrc5477_irq_disable(int vrc5477_irq);
300/* 297/*
301 * i2859 irq assignment 298 * i2859 irq assignment
302 */ 299 */
303#define I8259_IRQ_RESERVED_0 (0 + I8259_IRQ_BASE) 300#define I8259_IRQ_RESERVED_0 (0 + I8259A_IRQ_BASE)
304#define I8259_IRQ_KEYBOARD (1 + I8259_IRQ_BASE) /* M1543 default */ 301#define I8259_IRQ_KEYBOARD (1 + I8259A_IRQ_BASE) /* M1543 default */
305#define I8259_IRQ_CASCADE (2 + I8259_IRQ_BASE) 302#define I8259_IRQ_CASCADE (2 + I8259A_IRQ_BASE)
306#define I8259_IRQ_UART_B (3 + I8259_IRQ_BASE) /* M1543 default, may conflict with RTC according to schematic diagram */ 303#define I8259_IRQ_UART_B (3 + I8259A_IRQ_BASE) /* M1543 default, may conflict with RTC according to schematic diagram */
307#define I8259_IRQ_UART_A (4 + I8259_IRQ_BASE) /* M1543 default */ 304#define I8259_IRQ_UART_A (4 + I8259A_IRQ_BASE) /* M1543 default */
308#define I8259_IRQ_PARALLEL (5 + I8259_IRQ_BASE) /* M1543 default */ 305#define I8259_IRQ_PARALLEL (5 + I8259A_IRQ_BASE) /* M1543 default */
309#define I8259_IRQ_RESERVED_6 (6 + I8259_IRQ_BASE) 306#define I8259_IRQ_RESERVED_6 (6 + I8259A_IRQ_BASE)
310#define I8259_IRQ_RESERVED_7 (7 + I8259_IRQ_BASE) 307#define I8259_IRQ_RESERVED_7 (7 + I8259A_IRQ_BASE)
311#define I8259_IRQ_RTC (8 + I8259_IRQ_BASE) /* who set this? */ 308#define I8259_IRQ_RTC (8 + I8259A_IRQ_BASE) /* who set this? */
312#define I8259_IRQ_USB (9 + I8259_IRQ_BASE) /* ddb_setup */ 309#define I8259_IRQ_USB (9 + I8259A_IRQ_BASE) /* ddb_setup */
313#define I8259_IRQ_PMU (10 + I8259_IRQ_BASE) /* ddb_setup */ 310#define I8259_IRQ_PMU (10 + I8259A_IRQ_BASE) /* ddb_setup */
314#define I8259_IRQ_RESERVED_11 (11 + I8259_IRQ_BASE) 311#define I8259_IRQ_RESERVED_11 (11 + I8259A_IRQ_BASE)
315#define I8259_IRQ_RESERVED_12 (12 + I8259_IRQ_BASE) /* m1543_irq_setup */ 312#define I8259_IRQ_RESERVED_12 (12 + I8259A_IRQ_BASE) /* m1543_irq_setup */
316#define I8259_IRQ_RESERVED_13 (13 + I8259_IRQ_BASE) 313#define I8259_IRQ_RESERVED_13 (13 + I8259A_IRQ_BASE)
317#define I8259_IRQ_HDC1 (14 + I8259_IRQ_BASE) /* default and ddb_setup */ 314#define I8259_IRQ_HDC1 (14 + I8259A_IRQ_BASE) /* default and ddb_setup */
318#define I8259_IRQ_HDC2 (15 + I8259_IRQ_BASE) /* default */ 315#define I8259_IRQ_HDC2 (15 + I8259A_IRQ_BASE) /* default */
319 316
320 317
321/* 318/*
diff --git a/include/asm-mips/dec/interrupts.h b/include/asm-mips/dec/interrupts.h
index 273e4d65bfe6..e10d341067c8 100644
--- a/include/asm-mips/dec/interrupts.h
+++ b/include/asm-mips/dec/interrupts.h
@@ -14,6 +14,7 @@
14#ifndef __ASM_DEC_INTERRUPTS_H 14#ifndef __ASM_DEC_INTERRUPTS_H
15#define __ASM_DEC_INTERRUPTS_H 15#define __ASM_DEC_INTERRUPTS_H
16 16
17#include <irq.h>
17#include <asm/mipsregs.h> 18#include <asm/mipsregs.h>
18 19
19 20
@@ -87,7 +88,7 @@
87#define DEC_CPU_INR_SW1 1 /* software #1 */ 88#define DEC_CPU_INR_SW1 1 /* software #1 */
88#define DEC_CPU_INR_SW0 0 /* software #0 */ 89#define DEC_CPU_INR_SW0 0 /* software #0 */
89 90
90#define DEC_CPU_IRQ_BASE 0 /* first IRQ assigned to CPU */ 91#define DEC_CPU_IRQ_BASE MIPS_CPU_IRQ_BASE /* first IRQ assigned to CPU */
91 92
92#define DEC_CPU_IRQ_NR(n) ((n) + DEC_CPU_IRQ_BASE) 93#define DEC_CPU_IRQ_NR(n) ((n) + DEC_CPU_IRQ_BASE)
93#define DEC_CPU_IRQ_MASK(n) (1 << ((n) + CAUSEB_IP)) 94#define DEC_CPU_IRQ_MASK(n) (1 << ((n) + CAUSEB_IP))
diff --git a/include/asm-mips/dma.h b/include/asm-mips/dma.h
index 23f789c80845..e06ef0776d48 100644
--- a/include/asm-mips/dma.h
+++ b/include/asm-mips/dma.h
@@ -91,6 +91,7 @@
91#else 91#else
92#define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x01000000) 92#define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x01000000)
93#endif 93#endif
94#define MAX_DMA_PFN PFN_DOWN(virt_to_phys((void *)MAX_DMA_ADDRESS))
94 95
95/* 8237 DMA controllers */ 96/* 8237 DMA controllers */
96#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ 97#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
diff --git a/include/asm-mips/emma2rh/emma2rh.h b/include/asm-mips/emma2rh/emma2rh.h
index 4fb8df71caa9..6a1af0af51e3 100644
--- a/include/asm-mips/emma2rh/emma2rh.h
+++ b/include/asm-mips/emma2rh/emma2rh.h
@@ -24,6 +24,8 @@
24#ifndef __ASM_EMMA2RH_EMMA2RH_H 24#ifndef __ASM_EMMA2RH_EMMA2RH_H
25#define __ASM_EMMA2RH_EMMA2RH_H 25#define __ASM_EMMA2RH_EMMA2RH_H
26 26
27#include <irq.h>
28
27/* 29/*
28 * EMMA2RH registers 30 * EMMA2RH registers
29 */ 31 */
@@ -104,7 +106,8 @@
104#define NUM_EMMA2RH_IRQ 96 106#define NUM_EMMA2RH_IRQ 96
105 107
106#define CPU_EMMA2RH_CASCADE 2 108#define CPU_EMMA2RH_CASCADE 2
107#define EMMA2RH_IRQ_BASE 0 109#define CPU_IRQ_BASE MIPS_CPU_IRQ_BASE
110#define EMMA2RH_IRQ_BASE (CPU_IRQ_BASE + NUM_CPU_IRQ)
108 111
109/* 112/*
110 * emma2rh irq defs 113 * emma2rh irq defs
diff --git a/include/asm-mips/emma2rh/markeins.h b/include/asm-mips/emma2rh/markeins.h
index 8fa766795078..973b0628490d 100644
--- a/include/asm-mips/emma2rh/markeins.h
+++ b/include/asm-mips/emma2rh/markeins.h
@@ -33,7 +33,6 @@
33 33
34#define EMMA2RH_SW_IRQ_BASE (EMMA2RH_IRQ_BASE + NUM_EMMA2RH_IRQ) 34#define EMMA2RH_SW_IRQ_BASE (EMMA2RH_IRQ_BASE + NUM_EMMA2RH_IRQ)
35#define EMMA2RH_GPIO_IRQ_BASE (EMMA2RH_SW_IRQ_BASE + NUM_EMMA2RH_IRQ_SW) 35#define EMMA2RH_GPIO_IRQ_BASE (EMMA2RH_SW_IRQ_BASE + NUM_EMMA2RH_IRQ_SW)
36#define CPU_IRQ_BASE (EMMA2RH_GPIO_IRQ_BASE + NUM_EMMA2RH_IRQ_GPIO)
37 36
38#define EMMA2RH_SW_IRQ_INT0 (0+EMMA2RH_SW_IRQ_BASE) 37#define EMMA2RH_SW_IRQ_INT0 (0+EMMA2RH_SW_IRQ_BASE)
39#define EMMA2RH_SW_IRQ_INT1 (1+EMMA2RH_SW_IRQ_BASE) 38#define EMMA2RH_SW_IRQ_INT1 (1+EMMA2RH_SW_IRQ_BASE)
diff --git a/include/asm-mips/i8259.h b/include/asm-mips/i8259.h
index 4df8d8b118c0..e88a01607fea 100644
--- a/include/asm-mips/i8259.h
+++ b/include/asm-mips/i8259.h
@@ -18,6 +18,7 @@
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19 19
20#include <asm/io.h> 20#include <asm/io.h>
21#include <irq.h>
21 22
22/* i8259A PIC registers */ 23/* i8259A PIC registers */
23#define PIC_MASTER_CMD 0x20 24#define PIC_MASTER_CMD 0x20
@@ -42,8 +43,6 @@ extern void disable_8259A_irq(unsigned int irq);
42 43
43extern void init_i8259_irqs(void); 44extern void init_i8259_irqs(void);
44 45
45#define I8259A_IRQ_BASE 0
46
47/* 46/*
48 * Do the traditional i8259 interrupt polling thing. This is for the few 47 * Do the traditional i8259 interrupt polling thing. This is for the few
49 * cases where no better interrupt acknowledge method is available and we 48 * cases where no better interrupt acknowledge method is available and we
diff --git a/include/asm-mips/io.h b/include/asm-mips/io.h
index d77b657c09c7..67f081078904 100644
--- a/include/asm-mips/io.h
+++ b/include/asm-mips/io.h
@@ -115,7 +115,7 @@ static inline void set_io_port_base(unsigned long base)
115 */ 115 */
116static inline unsigned long virt_to_phys(volatile const void *address) 116static inline unsigned long virt_to_phys(volatile const void *address)
117{ 117{
118 return (unsigned long)address - PAGE_OFFSET; 118 return (unsigned long)address - PAGE_OFFSET + PHYS_OFFSET;
119} 119}
120 120
121/* 121/*
@@ -132,7 +132,7 @@ static inline unsigned long virt_to_phys(volatile const void *address)
132 */ 132 */
133static inline void * phys_to_virt(unsigned long address) 133static inline void * phys_to_virt(unsigned long address)
134{ 134{
135 return (void *)(address + PAGE_OFFSET); 135 return (void *)(address + PAGE_OFFSET - PHYS_OFFSET);
136} 136}
137 137
138/* 138/*
diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h
index 386da82e5774..91803ba30ff2 100644
--- a/include/asm-mips/irq.h
+++ b/include/asm-mips/irq.h
@@ -18,7 +18,7 @@
18#ifdef CONFIG_I8259 18#ifdef CONFIG_I8259
19static inline int irq_canonicalize(int irq) 19static inline int irq_canonicalize(int irq)
20{ 20{
21 return ((irq == 2) ? 9 : irq); 21 return ((irq == I8259A_IRQ_BASE + 2) ? I8259A_IRQ_BASE + 9 : irq);
22} 22}
23#else 23#else
24#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ 24#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */
diff --git a/include/asm-mips/irq_cpu.h b/include/asm-mips/irq_cpu.h
index ed3d1e3d09ec..ef6a07cddb23 100644
--- a/include/asm-mips/irq_cpu.h
+++ b/include/asm-mips/irq_cpu.h
@@ -13,8 +13,8 @@
13#ifndef _ASM_IRQ_CPU_H 13#ifndef _ASM_IRQ_CPU_H
14#define _ASM_IRQ_CPU_H 14#define _ASM_IRQ_CPU_H
15 15
16extern void mips_cpu_irq_init(int irq_base); 16extern void mips_cpu_irq_init(void);
17extern void rm7k_cpu_irq_init(int irq_base); 17extern void rm7k_cpu_irq_init(void);
18extern void rm9k_cpu_irq_init(int irq_base); 18extern void rm9k_cpu_irq_init(void);
19 19
20#endif /* _ASM_IRQ_CPU_H */ 20#endif /* _ASM_IRQ_CPU_H */
diff --git a/include/asm-mips/mach-au1x00/au1000.h b/include/asm-mips/mach-au1x00/au1000.h
index 582acd8adb81..58fca8a5a9a6 100644
--- a/include/asm-mips/mach-au1x00/au1000.h
+++ b/include/asm-mips/mach-au1x00/au1000.h
@@ -39,6 +39,7 @@
39#ifndef _LANGUAGE_ASSEMBLY 39#ifndef _LANGUAGE_ASSEMBLY
40 40
41#include <linux/delay.h> 41#include <linux/delay.h>
42#include <linux/types.h>
42#include <asm/io.h> 43#include <asm/io.h>
43 44
44/* cpu pipeline flush */ 45/* cpu pipeline flush */
diff --git a/include/asm-mips/mach-cobalt/cobalt.h b/include/asm-mips/mach-cobalt/cobalt.h
index 00b0fc68d5cb..24a8d51a55a3 100644
--- a/include/asm-mips/mach-cobalt/cobalt.h
+++ b/include/asm-mips/mach-cobalt/cobalt.h
@@ -12,6 +12,8 @@
12#ifndef __ASM_COBALT_H 12#ifndef __ASM_COBALT_H
13#define __ASM_COBALT_H 13#define __ASM_COBALT_H
14 14
15#include <irq.h>
16
15/* 17/*
16 * i8259 legacy interrupts used on Cobalt: 18 * i8259 legacy interrupts used on Cobalt:
17 * 19 *
@@ -25,7 +27,7 @@
25/* 27/*
26 * CPU IRQs are 16 ... 23 28 * CPU IRQs are 16 ... 23
27 */ 29 */
28#define COBALT_CPU_IRQ 16 30#define COBALT_CPU_IRQ MIPS_CPU_IRQ_BASE
29 31
30#define COBALT_GALILEO_IRQ (COBALT_CPU_IRQ + 2) 32#define COBALT_GALILEO_IRQ (COBALT_CPU_IRQ + 2)
31#define COBALT_SCC_IRQ (COBALT_CPU_IRQ + 3) /* pre-production has 85C30 */ 33#define COBALT_SCC_IRQ (COBALT_CPU_IRQ + 3) /* pre-production has 85C30 */
diff --git a/include/asm-mips/mach-emma2rh/irq.h b/include/asm-mips/mach-emma2rh/irq.h
index bce64244b800..5439eb856461 100644
--- a/include/asm-mips/mach-emma2rh/irq.h
+++ b/include/asm-mips/mach-emma2rh/irq.h
@@ -10,4 +10,6 @@
10 10
11#define NR_IRQS 256 11#define NR_IRQS 256
12 12
13#include_next <irq.h>
14
13#endif /* __ASM_MACH_EMMA2RH_IRQ_H */ 15#endif /* __ASM_MACH_EMMA2RH_IRQ_H */
diff --git a/include/asm-mips/mach-generic/irq.h b/include/asm-mips/mach-generic/irq.h
index 500e10ff24de..70d9a25132c5 100644
--- a/include/asm-mips/mach-generic/irq.h
+++ b/include/asm-mips/mach-generic/irq.h
@@ -8,6 +8,38 @@
8#ifndef __ASM_MACH_GENERIC_IRQ_H 8#ifndef __ASM_MACH_GENERIC_IRQ_H
9#define __ASM_MACH_GENERIC_IRQ_H 9#define __ASM_MACH_GENERIC_IRQ_H
10 10
11#ifndef NR_IRQS
11#define NR_IRQS 128 12#define NR_IRQS 128
13#endif
14
15#ifdef CONFIG_I8259
16#ifndef I8259A_IRQ_BASE
17#define I8259A_IRQ_BASE 0
18#endif
19#endif
20
21#ifdef CONFIG_IRQ_CPU
22
23#ifndef MIPS_CPU_IRQ_BASE
24#ifdef CONFIG_I8259
25#define MIPS_CPU_IRQ_BASE 16
26#else
27#define MIPS_CPU_IRQ_BASE 0
28#endif /* CONFIG_I8259 */
29#endif
30
31#ifdef CONFIG_IRQ_CPU_RM7K
32#ifndef RM7K_CPU_IRQ_BASE
33#define RM7K_CPU_IRQ_BASE (MIPS_CPU_IRQ_BASE+8)
34#endif
35#endif
36
37#ifdef CONFIG_IRQ_CPU_RM9K
38#ifndef RM9K_CPU_IRQ_BASE
39#define RM9K_CPU_IRQ_BASE (MIPS_CPU_IRQ_BASE+12)
40#endif
41#endif
42
43#endif /* CONFIG_IRQ_CPU */
12 44
13#endif /* __ASM_MACH_GENERIC_IRQ_H */ 45#endif /* __ASM_MACH_GENERIC_IRQ_H */
diff --git a/include/asm-mips/mach-mips/irq.h b/include/asm-mips/mach-mips/irq.h
index e994b0c01227..9b9da26683c2 100644
--- a/include/asm-mips/mach-mips/irq.h
+++ b/include/asm-mips/mach-mips/irq.h
@@ -4,4 +4,6 @@
4 4
5#define NR_IRQS 256 5#define NR_IRQS 256
6 6
7#include_next <irq.h>
8
7#endif /* __ASM_MACH_MIPS_IRQ_H */ 9#endif /* __ASM_MACH_MIPS_IRQ_H */
diff --git a/include/asm-mips/mach-vr41xx/irq.h b/include/asm-mips/mach-vr41xx/irq.h
new file mode 100644
index 000000000000..848812296052
--- /dev/null
+++ b/include/asm-mips/mach-vr41xx/irq.h
@@ -0,0 +1,11 @@
1#ifndef __ASM_MACH_VR41XX_IRQ_H
2#define __ASM_MACH_VR41XX_IRQ_H
3
4#include <asm/vr41xx/irq.h> /* for MIPS_CPU_IRQ_BASE */
5#ifdef CONFIG_NEC_CMBVR4133
6#include <asm/vr41xx/cmbvr4133.h> /* for I8259A_IRQ_BASE */
7#endif
8
9#include_next <irq.h>
10
11#endif /* __ASM_MACH_VR41XX_IRQ_H */
diff --git a/include/asm-mips/mips-boards/atlasint.h b/include/asm-mips/mips-boards/atlasint.h
index b15e4ea0b091..76add42e486e 100644
--- a/include/asm-mips/mips-boards/atlasint.h
+++ b/include/asm-mips/mips-boards/atlasint.h
@@ -26,10 +26,12 @@
26#ifndef _MIPS_ATLASINT_H 26#ifndef _MIPS_ATLASINT_H
27#define _MIPS_ATLASINT_H 27#define _MIPS_ATLASINT_H
28 28
29#include <irq.h>
30
29/* 31/*
30 * Interrupts 0..7 are used for Atlas CPU interrupts (nonEIC mode) 32 * Interrupts 0..7 are used for Atlas CPU interrupts (nonEIC mode)
31 */ 33 */
32#define MIPSCPU_INT_BASE 0 34#define MIPSCPU_INT_BASE MIPS_CPU_IRQ_BASE
33 35
34/* CPU interrupt offsets */ 36/* CPU interrupt offsets */
35#define MIPSCPU_INT_SW0 0 37#define MIPSCPU_INT_SW0 0
diff --git a/include/asm-mips/mips-boards/maltaint.h b/include/asm-mips/mips-boards/maltaint.h
index da6cc2fbbc78..9180d6466113 100644
--- a/include/asm-mips/mips-boards/maltaint.h
+++ b/include/asm-mips/mips-boards/maltaint.h
@@ -25,6 +25,8 @@
25#ifndef _MIPS_MALTAINT_H 25#ifndef _MIPS_MALTAINT_H
26#define _MIPS_MALTAINT_H 26#define _MIPS_MALTAINT_H
27 27
28#include <irq.h>
29
28/* 30/*
29 * Interrupts 0..15 are used for Malta ISA compatible interrupts 31 * Interrupts 0..15 are used for Malta ISA compatible interrupts
30 */ 32 */
@@ -33,7 +35,7 @@
33/* 35/*
34 * Interrupts 16..23 are used for Malta CPU interrupts (nonEIC mode) 36 * Interrupts 16..23 are used for Malta CPU interrupts (nonEIC mode)
35 */ 37 */
36#define MIPSCPU_INT_BASE 16 38#define MIPSCPU_INT_BASE MIPS_CPU_IRQ_BASE
37 39
38/* CPU interrupt offsets */ 40/* CPU interrupt offsets */
39#define MIPSCPU_INT_SW0 0 41#define MIPSCPU_INT_SW0 0
diff --git a/include/asm-mips/mips-boards/prom.h b/include/asm-mips/mips-boards/prom.h
index 4168c7fcd43e..7bf6f5f6ab9c 100644
--- a/include/asm-mips/mips-boards/prom.h
+++ b/include/asm-mips/mips-boards/prom.h
@@ -33,7 +33,6 @@ extern void prom_printf(char *fmt, ...);
33extern void prom_init_cmdline(void); 33extern void prom_init_cmdline(void);
34extern void prom_meminit(void); 34extern void prom_meminit(void);
35extern void prom_fixup_mem_map(unsigned long start_mem, unsigned long end_mem); 35extern void prom_fixup_mem_map(unsigned long start_mem, unsigned long end_mem);
36extern unsigned long prom_free_prom_memory (void);
37extern void mips_display_message(const char *str); 36extern void mips_display_message(const char *str);
38extern void mips_display_word(unsigned int num); 37extern void mips_display_word(unsigned int num);
39extern int get_ethernet_addr(char *ethernet_addr); 38extern int get_ethernet_addr(char *ethernet_addr);
diff --git a/include/asm-mips/mips-boards/seadint.h b/include/asm-mips/mips-boards/seadint.h
index 365c2a3c64f5..4f6a3933699d 100644
--- a/include/asm-mips/mips-boards/seadint.h
+++ b/include/asm-mips/mips-boards/seadint.h
@@ -20,10 +20,12 @@
20#ifndef _MIPS_SEADINT_H 20#ifndef _MIPS_SEADINT_H
21#define _MIPS_SEADINT_H 21#define _MIPS_SEADINT_H
22 22
23#include <irq.h>
24
23/* 25/*
24 * Interrupts 0..7 are used for SEAD CPU interrupts 26 * Interrupts 0..7 are used for SEAD CPU interrupts
25 */ 27 */
26#define MIPSCPU_INT_BASE 0 28#define MIPSCPU_INT_BASE MIPS_CPU_IRQ_BASE
27 29
28#define MIPSCPU_INT_UART0 2 30#define MIPSCPU_INT_UART0 2
29#define MIPSCPU_INT_UART1 3 31#define MIPSCPU_INT_UART1 3
diff --git a/include/asm-mips/mips-boards/simint.h b/include/asm-mips/mips-boards/simint.h
index 4952e0b3bf11..54f2fe621d69 100644
--- a/include/asm-mips/mips-boards/simint.h
+++ b/include/asm-mips/mips-boards/simint.h
@@ -17,10 +17,11 @@
17#ifndef _MIPS_SIMINT_H 17#ifndef _MIPS_SIMINT_H
18#define _MIPS_SIMINT_H 18#define _MIPS_SIMINT_H
19 19
20#include <irq.h>
20 21
21#define SIM_INT_BASE 0 22#define SIM_INT_BASE 0
22#define MIPSCPU_INT_MB0 2 23#define MIPSCPU_INT_MB0 2
23#define MIPSCPU_INT_BASE 16 24#define MIPSCPU_INT_BASE MIPS_CPU_IRQ_BASE
24#define MIPS_CPU_TIMER_IRQ 7 25#define MIPS_CPU_TIMER_IRQ 7
25 26
26 27
diff --git a/include/asm-mips/mipsmtregs.h b/include/asm-mips/mipsmtregs.h
index 3e9468f424f4..294bca12cd3f 100644
--- a/include/asm-mips/mipsmtregs.h
+++ b/include/asm-mips/mipsmtregs.h
@@ -165,8 +165,6 @@
165 165
166#ifndef __ASSEMBLY__ 166#ifndef __ASSEMBLY__
167 167
168extern void mips_mt_regdump(unsigned long previous_mvpcontrol_value);
169
170static inline unsigned int dvpe(void) 168static inline unsigned int dvpe(void)
171{ 169{
172 int res = 0; 170 int res = 0;
diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h
index 2f9e1a9ec51f..d3fbd83ff545 100644
--- a/include/asm-mips/page.h
+++ b/include/asm-mips/page.h
@@ -34,6 +34,20 @@
34 34
35#ifndef __ASSEMBLY__ 35#ifndef __ASSEMBLY__
36 36
37/*
38 * This gives the physical RAM offset.
39 */
40#ifndef PHYS_OFFSET
41#define PHYS_OFFSET 0UL
42#endif
43
44/*
45 * It's normally defined only for FLATMEM config but it's
46 * used in our early mem init code for all memory models.
47 * So always define it.
48 */
49#define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET)
50
37#include <linux/pfn.h> 51#include <linux/pfn.h>
38#include <asm/io.h> 52#include <asm/io.h>
39 53
@@ -132,20 +146,23 @@ typedef struct { unsigned long pgprot; } pgprot_t;
132/* to align the pointer to the (next) page boundary */ 146/* to align the pointer to the (next) page boundary */
133#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) 147#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
134 148
149/*
150 * __pa()/__va() should be used only during mem init.
151 */
135#if defined(CONFIG_64BIT) && !defined(CONFIG_BUILD_ELF64) 152#if defined(CONFIG_64BIT) && !defined(CONFIG_BUILD_ELF64)
136#define __pa_page_offset(x) ((unsigned long)(x) < CKSEG0 ? PAGE_OFFSET : CKSEG0) 153#define __pa_page_offset(x) ((unsigned long)(x) < CKSEG0 ? PAGE_OFFSET : CKSEG0)
137#else 154#else
138#define __pa_page_offset(x) PAGE_OFFSET 155#define __pa_page_offset(x) PAGE_OFFSET
139#endif 156#endif
140#define __pa(x) ((unsigned long)(x) - __pa_page_offset(x)) 157#define __pa(x) ((unsigned long)(x) - __pa_page_offset(x) + PHYS_OFFSET)
141#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0)) 158#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
142#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET)) 159#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0))
143 160
144#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 161#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
145 162
146#ifdef CONFIG_FLATMEM 163#ifdef CONFIG_FLATMEM
147 164
148#define pfn_valid(pfn) ((pfn) < max_mapnr) 165#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr)
149 166
150#elif defined(CONFIG_SPARSEMEM) 167#elif defined(CONFIG_SPARSEMEM)
151 168
diff --git a/include/asm-mips/rtlx.h b/include/asm-mips/rtlx.h
index 76cd51c6be39..59162f74a798 100644
--- a/include/asm-mips/rtlx.h
+++ b/include/asm-mips/rtlx.h
@@ -6,9 +6,10 @@
6#ifndef __ASM_RTLX_H 6#ifndef __ASM_RTLX_H
7#define __ASM_RTLX_H_ 7#define __ASM_RTLX_H_
8 8
9#include <irq.h>
10
9#define LX_NODE_BASE 10 11#define LX_NODE_BASE 10
10 12
11#define MIPSCPU_INT_BASE 16
12#define MIPS_CPU_RTLX_IRQ 0 13#define MIPS_CPU_RTLX_IRQ 0
13 14
14#define RTLX_VERSION 2 15#define RTLX_VERSION 2
diff --git a/include/asm-mips/sections.h b/include/asm-mips/sections.h
index f7016278b266..b7e37262c246 100644
--- a/include/asm-mips/sections.h
+++ b/include/asm-mips/sections.h
@@ -3,6 +3,4 @@
3 3
4#include <asm-generic/sections.h> 4#include <asm-generic/sections.h>
5 5
6extern char _fdata;
7
8#endif /* _ASM_SECTIONS_H */ 6#endif /* _ASM_SECTIONS_H */
diff --git a/include/asm-mips/sgi/ip22.h b/include/asm-mips/sgi/ip22.h
index bbfc05c3cab9..6592f3bd1999 100644
--- a/include/asm-mips/sgi/ip22.h
+++ b/include/asm-mips/sgi/ip22.h
@@ -21,15 +21,16 @@
21 * HAL2 driver). This will prevent many complications, trust me ;-) 21 * HAL2 driver). This will prevent many complications, trust me ;-)
22 */ 22 */
23 23
24#include <irq.h>
24#include <asm/sgi/ioc.h> 25#include <asm/sgi/ioc.h>
25 26
26#define SGINT_EISA 0 /* 16 EISA irq levels (Indigo2) */ 27#define SGINT_EISA 0 /* 16 EISA irq levels (Indigo2) */
27#define SGINT_CPU 16 /* MIPS CPU define 8 interrupt sources */ 28#define SGINT_CPU MIPS_CPU_IRQ_BASE /* MIPS CPU define 8 interrupt sources */
28#define SGINT_LOCAL0 24 /* 8 local0 irq levels */ 29#define SGINT_LOCAL0 (SGINT_CPU+8) /* 8 local0 irq levels */
29#define SGINT_LOCAL1 32 /* 8 local1 irq levels */ 30#define SGINT_LOCAL1 (SGINT_CPU+16) /* 8 local1 irq levels */
30#define SGINT_LOCAL2 40 /* 8 local2 vectored irq levels */ 31#define SGINT_LOCAL2 (SGINT_CPU+24) /* 8 local2 vectored irq levels */
31#define SGINT_LOCAL3 48 /* 8 local3 vectored irq levels */ 32#define SGINT_LOCAL3 (SGINT_CPU+32) /* 8 local3 vectored irq levels */
32#define SGINT_END 56 /* End of 'spaces' */ 33#define SGINT_END (SGINT_CPU+40) /* End of 'spaces' */
33 34
34/* 35/*
35 * Individual interrupt definitions for the Indy and Indigo2 36 * Individual interrupt definitions for the Indy and Indigo2
diff --git a/include/asm-mips/smtc_ipi.h b/include/asm-mips/smtc_ipi.h
index f22c3e2f993a..55f3419f6546 100644
--- a/include/asm-mips/smtc_ipi.h
+++ b/include/asm-mips/smtc_ipi.h
@@ -44,9 +44,6 @@ struct smtc_ipi_q {
44 int depth; 44 int depth;
45}; 45};
46 46
47extern struct smtc_ipi_q IPIQ[NR_CPUS];
48extern struct smtc_ipi_q freeIPIq;
49
50static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p) 47static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p)
51{ 48{
52 long flags; 49 long flags;
diff --git a/include/asm-mips/uaccess.h b/include/asm-mips/uaccess.h
index 1cdd4eeb2f73..c12ebc53ef31 100644
--- a/include/asm-mips/uaccess.h
+++ b/include/asm-mips/uaccess.h
@@ -488,7 +488,8 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
488}) 488})
489 489
490/* 490/*
491 * __copy_from_user: - Copy a block of data from user space, with less checking. * @to: Destination address, in kernel space. 491 * __copy_from_user: - Copy a block of data from user space, with less checking.
492 * @to: Destination address, in kernel space.
492 * @from: Source address, in user space. 493 * @from: Source address, in user space.
493 * @n: Number of bytes to copy. 494 * @n: Number of bytes to copy.
494 * 495 *
diff --git a/include/asm-mips/vr41xx/cmbvr4133.h b/include/asm-mips/vr41xx/cmbvr4133.h
index 9490ade58b46..42300037d593 100644
--- a/include/asm-mips/vr41xx/cmbvr4133.h
+++ b/include/asm-mips/vr41xx/cmbvr4133.h
@@ -35,8 +35,8 @@
35#define CMBVR41XX_INTD_IRQ GIU_IRQ(CMBVR41XX_INTD_PIN) 35#define CMBVR41XX_INTD_IRQ GIU_IRQ(CMBVR41XX_INTD_PIN)
36#define CMBVR41XX_INTE_IRQ GIU_IRQ(CMBVR41XX_INTE_PIN) 36#define CMBVR41XX_INTE_IRQ GIU_IRQ(CMBVR41XX_INTE_PIN)
37 37
38#define I8259_IRQ_BASE 72 38#define I8259A_IRQ_BASE 72
39#define I8259_IRQ(x) (I8259_IRQ_BASE + (x)) 39#define I8259_IRQ(x) (I8259A_IRQ_BASE + (x))
40#define TIMER_IRQ I8259_IRQ(0) 40#define TIMER_IRQ I8259_IRQ(0)
41#define KEYBOARD_IRQ I8259_IRQ(1) 41#define KEYBOARD_IRQ I8259_IRQ(1)
42#define I8259_SLAVE_IRQ I8259_IRQ(2) 42#define I8259_SLAVE_IRQ I8259_IRQ(2)
@@ -52,6 +52,5 @@
52#define AUX_IRQ I8259_IRQ(12) 52#define AUX_IRQ I8259_IRQ(12)
53#define IDE_PRIMARY_IRQ I8259_IRQ(14) 53#define IDE_PRIMARY_IRQ I8259_IRQ(14)
54#define IDE_SECONDARY_IRQ I8259_IRQ(15) 54#define IDE_SECONDARY_IRQ I8259_IRQ(15)
55#define I8259_IRQ_LAST IDE_SECONDARY_IRQ
56 55
57#endif /* __NEC_CMBVR4133_H */ 56#endif /* __NEC_CMBVR4133_H */
diff --git a/include/asm-s390/compat.h b/include/asm-s390/compat.h
index 356a0b183539..296f4f1a20e1 100644
--- a/include/asm-s390/compat.h
+++ b/include/asm-s390/compat.h
@@ -6,6 +6,34 @@
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/sched.h> 7#include <linux/sched.h>
8 8
9#define PSW32_MASK_PER 0x40000000UL
10#define PSW32_MASK_DAT 0x04000000UL
11#define PSW32_MASK_IO 0x02000000UL
12#define PSW32_MASK_EXT 0x01000000UL
13#define PSW32_MASK_KEY 0x00F00000UL
14#define PSW32_MASK_MCHECK 0x00040000UL
15#define PSW32_MASK_WAIT 0x00020000UL
16#define PSW32_MASK_PSTATE 0x00010000UL
17#define PSW32_MASK_ASC 0x0000C000UL
18#define PSW32_MASK_CC 0x00003000UL
19#define PSW32_MASK_PM 0x00000f00UL
20
21#define PSW32_ADDR_AMODE31 0x80000000UL
22#define PSW32_ADDR_INSN 0x7FFFFFFFUL
23
24#define PSW32_BASE_BITS 0x00080000UL
25
26#define PSW32_ASC_PRIMARY 0x00000000UL
27#define PSW32_ASC_ACCREG 0x00004000UL
28#define PSW32_ASC_SECONDARY 0x00008000UL
29#define PSW32_ASC_HOME 0x0000C000UL
30
31#define PSW32_MASK_MERGE(CURRENT,NEW) \
32 (((CURRENT) & ~(PSW32_MASK_CC|PSW32_MASK_PM)) | \
33 ((NEW) & (PSW32_MASK_CC|PSW32_MASK_PM)))
34
35extern long psw32_user_bits;
36
9#define COMPAT_USER_HZ 100 37#define COMPAT_USER_HZ 100
10 38
11typedef u32 compat_size_t; 39typedef u32 compat_size_t;
diff --git a/include/asm-s390/etr.h b/include/asm-s390/etr.h
new file mode 100644
index 000000000000..b498f19bb9a7
--- /dev/null
+++ b/include/asm-s390/etr.h
@@ -0,0 +1,219 @@
1/*
2 * include/asm-s390/etr.h
3 *
4 * Copyright IBM Corp. 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 */
7#ifndef __S390_ETR_H
8#define __S390_ETR_H
9
10/* ETR attachment control register */
11struct etr_eacr {
12 unsigned int e0 : 1; /* port 0 stepping control */
13 unsigned int e1 : 1; /* port 1 stepping control */
14 unsigned int _pad0 : 5; /* must be 00100 */
15 unsigned int dp : 1; /* data port control */
16 unsigned int p0 : 1; /* port 0 change recognition control */
17 unsigned int p1 : 1; /* port 1 change recognition control */
18 unsigned int _pad1 : 3; /* must be 000 */
19 unsigned int ea : 1; /* ETR alert control */
20 unsigned int es : 1; /* ETR sync check control */
21 unsigned int sl : 1; /* switch to local control */
22} __attribute__ ((packed));
23
24/* Port state returned by steai */
25enum etr_psc {
26 etr_psc_operational = 0,
27 etr_psc_semi_operational = 1,
28 etr_psc_protocol_error = 4,
29 etr_psc_no_symbols = 8,
30 etr_psc_no_signal = 12,
31 etr_psc_pps_mode = 13
32};
33
34/* Logical port state returned by stetr */
35enum etr_lpsc {
36 etr_lpsc_operational_step = 0,
37 etr_lpsc_operational_alt = 1,
38 etr_lpsc_semi_operational = 2,
39 etr_lpsc_protocol_error = 4,
40 etr_lpsc_no_symbol_sync = 8,
41 etr_lpsc_no_signal = 12,
42 etr_lpsc_pps_mode = 13
43};
44
45/* ETR status words */
46struct etr_esw {
47 struct etr_eacr eacr; /* attachment control register */
48 unsigned int y : 1; /* stepping mode */
49 unsigned int _pad0 : 5; /* must be 00000 */
50 unsigned int p : 1; /* stepping port number */
51 unsigned int q : 1; /* data port number */
52 unsigned int psc0 : 4; /* port 0 state code */
53 unsigned int psc1 : 4; /* port 1 state code */
54} __attribute__ ((packed));
55
56/* Second level data register status word */
57struct etr_slsw {
58 unsigned int vv1 : 1; /* copy of validity bit data frame 1 */
59 unsigned int vv2 : 1; /* copy of validity bit data frame 2 */
60 unsigned int vv3 : 1; /* copy of validity bit data frame 3 */
61 unsigned int vv4 : 1; /* copy of validity bit data frame 4 */
62 unsigned int _pad0 : 19; /* must by all zeroes */
63 unsigned int n : 1; /* EAF port number */
64 unsigned int v1 : 1; /* validity bit ETR data frame 1 */
65 unsigned int v2 : 1; /* validity bit ETR data frame 2 */
66 unsigned int v3 : 1; /* validity bit ETR data frame 3 */
67 unsigned int v4 : 1; /* validity bit ETR data frame 4 */
68 unsigned int _pad1 : 4; /* must be 0000 */
69} __attribute__ ((packed));
70
71/* ETR data frames */
72struct etr_edf1 {
73 unsigned int u : 1; /* untuned bit */
74 unsigned int _pad0 : 1; /* must be 0 */
75 unsigned int r : 1; /* service request bit */
76 unsigned int _pad1 : 4; /* must be 0000 */
77 unsigned int a : 1; /* time adjustment bit */
78 unsigned int net_id : 8; /* ETR network id */
79 unsigned int etr_id : 8; /* id of ETR which sends data frames */
80 unsigned int etr_pn : 8; /* port number of ETR output port */
81} __attribute__ ((packed));
82
83struct etr_edf2 {
84 unsigned int etv : 32; /* Upper 32 bits of TOD. */
85} __attribute__ ((packed));
86
87struct etr_edf3 {
88 unsigned int rc : 8; /* failure reason code */
89 unsigned int _pad0 : 3; /* must be 000 */
90 unsigned int c : 1; /* ETR coupled bit */
91 unsigned int tc : 4; /* ETR type code */
92 unsigned int blto : 8; /* biased local time offset */
93 /* (blto - 128) * 15 = minutes */
94 unsigned int buo : 8; /* biased utc offset */
95 /* (buo - 128) = leap seconds */
96} __attribute__ ((packed));
97
98struct etr_edf4 {
99 unsigned int ed : 8; /* ETS device dependent data */
100 unsigned int _pad0 : 1; /* must be 0 */
101 unsigned int buc : 5; /* biased ut1 correction */
102 /* (buc - 16) * 0.1 seconds */
103 unsigned int em : 6; /* ETS error magnitude */
104 unsigned int dc : 6; /* ETS drift code */
105 unsigned int sc : 6; /* ETS steering code */
106} __attribute__ ((packed));
107
108/*
109 * ETR attachment information block, two formats
110 * format 1 has 4 reserved words with a size of 64 bytes
111 * format 2 has 16 reserved words with a size of 96 bytes
112 */
113struct etr_aib {
114 struct etr_esw esw;
115 struct etr_slsw slsw;
116 unsigned long long tsp;
117 struct etr_edf1 edf1;
118 struct etr_edf2 edf2;
119 struct etr_edf3 edf3;
120 struct etr_edf4 edf4;
121 unsigned int reserved[16];
122} __attribute__ ((packed,aligned(8)));
123
124/* ETR interruption parameter */
125struct etr_interruption_parameter {
126 unsigned int _pad0 : 8;
127 unsigned int pc0 : 1; /* port 0 state change */
128 unsigned int pc1 : 1; /* port 1 state change */
129 unsigned int _pad1 : 3;
130 unsigned int eai : 1; /* ETR alert indication */
131 unsigned int _pad2 : 18;
132} __attribute__ ((packed));
133
134/* Query TOD offset result */
135struct etr_ptff_qto {
136 unsigned long long physical_clock;
137 unsigned long long tod_offset;
138 unsigned long long logical_tod_offset;
139 unsigned long long tod_epoch_difference;
140} __attribute__ ((packed));
141
142/* Inline assembly helper functions */
143static inline int etr_setr(struct etr_eacr *ctrl)
144{
145 int rc = -ENOSYS;
146
147 asm volatile(
148 " .insn s,0xb2160000,0(%2)\n"
149 "0: la %0,0\n"
150 "1:\n"
151 EX_TABLE(0b,1b)
152 : "+d" (rc) : "m" (*ctrl), "a" (ctrl));
153 return rc;
154}
155
156/* Stores a format 1 aib with 64 bytes */
157static inline int etr_stetr(struct etr_aib *aib)
158{
159 int rc = -ENOSYS;
160
161 asm volatile(
162 " .insn s,0xb2170000,0(%2)\n"
163 "0: la %0,0\n"
164 "1:\n"
165 EX_TABLE(0b,1b)
166 : "+d" (rc) : "m" (*aib), "a" (aib));
167 return rc;
168}
169
170/* Stores a format 2 aib with 96 bytes for specified port */
171static inline int etr_steai(struct etr_aib *aib, unsigned int func)
172{
173 register unsigned int reg0 asm("0") = func;
174 int rc = -ENOSYS;
175
176 asm volatile(
177 " .insn s,0xb2b30000,0(%2)\n"
178 "0: la %0,0\n"
179 "1:\n"
180 EX_TABLE(0b,1b)
181 : "+d" (rc) : "m" (*aib), "a" (aib), "d" (reg0));
182 return rc;
183}
184
185/* Function codes for the steai instruction. */
186#define ETR_STEAI_STEPPING_PORT 0x10
187#define ETR_STEAI_ALTERNATE_PORT 0x11
188#define ETR_STEAI_PORT_0 0x12
189#define ETR_STEAI_PORT_1 0x13
190
191static inline int etr_ptff(void *ptff_block, unsigned int func)
192{
193 register unsigned int reg0 asm("0") = func;
194 register unsigned long reg1 asm("1") = (unsigned long) ptff_block;
195 int rc = -ENOSYS;
196
197 asm volatile(
198 " .word 0x0104\n"
199 " ipm %0\n"
200 " srl %0,28\n"
201 : "=d" (rc), "=m" (ptff_block)
202 : "d" (reg0), "d" (reg1), "m" (ptff_block) : "cc");
203 return rc;
204}
205
206/* Function codes for the ptff instruction. */
207#define ETR_PTFF_QAF 0x00 /* query available functions */
208#define ETR_PTFF_QTO 0x01 /* query tod offset */
209#define ETR_PTFF_QSI 0x02 /* query steering information */
210#define ETR_PTFF_ATO 0x40 /* adjust tod offset */
211#define ETR_PTFF_STO 0x41 /* set tod offset */
212#define ETR_PTFF_SFS 0x42 /* set fine steering rate */
213#define ETR_PTFF_SGS 0x43 /* set gross steering rate */
214
215/* Functions needed by the machine check handler */
216extern void etr_switch_to_local(void);
217extern void etr_sync_check(void);
218
219#endif /* __S390_ETR_H */
diff --git a/include/asm-s390/hardirq.h b/include/asm-s390/hardirq.h
index c2f6a8782d31..31beb18cb3d1 100644
--- a/include/asm-s390/hardirq.h
+++ b/include/asm-s390/hardirq.h
@@ -32,6 +32,6 @@ typedef struct {
32 32
33#define HARDIRQ_BITS 8 33#define HARDIRQ_BITS 8
34 34
35extern void account_ticks(void); 35extern void account_ticks(u64 time);
36 36
37#endif /* __ASM_HARDIRQ_H */ 37#endif /* __ASM_HARDIRQ_H */
diff --git a/include/asm-s390/io.h b/include/asm-s390/io.h
index efb7de9c1c6b..a4c2d550dad4 100644
--- a/include/asm-s390/io.h
+++ b/include/asm-s390/io.h
@@ -28,11 +28,7 @@ static inline unsigned long virt_to_phys(volatile void * address)
28{ 28{
29 unsigned long real_address; 29 unsigned long real_address;
30 asm volatile( 30 asm volatile(
31#ifndef __s390x__
32 " lra %0,0(%1)\n" 31 " lra %0,0(%1)\n"
33#else /* __s390x__ */
34 " lrag %0,0(%1)\n"
35#endif /* __s390x__ */
36 " jz 0f\n" 32 " jz 0f\n"
37 " la %0,0\n" 33 " la %0,0\n"
38 "0:" 34 "0:"
diff --git a/include/asm-s390/kdebug.h b/include/asm-s390/kdebug.h
index 40cc68025e01..1b50f89819a4 100644
--- a/include/asm-s390/kdebug.h
+++ b/include/asm-s390/kdebug.h
@@ -26,7 +26,6 @@ extern int register_page_fault_notifier(struct notifier_block *);
26extern int unregister_page_fault_notifier(struct notifier_block *); 26extern int unregister_page_fault_notifier(struct notifier_block *);
27extern struct atomic_notifier_head s390die_chain; 27extern struct atomic_notifier_head s390die_chain;
28 28
29
30enum die_val { 29enum die_val {
31 DIE_OOPS = 1, 30 DIE_OOPS = 1,
32 DIE_BPT, 31 DIE_BPT,
@@ -56,4 +55,6 @@ static inline int notify_die(enum die_val val, const char *str,
56 return atomic_notifier_call_chain(&s390die_chain, val, &args); 55 return atomic_notifier_call_chain(&s390die_chain, val, &args);
57} 56}
58 57
58extern void die(const char *, struct pt_regs *, long);
59
59#endif 60#endif
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index 74f7389bd3ee..4a31d0a7ee83 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -220,7 +220,8 @@ struct _lowcore
220 __u32 kernel_asce; /* 0xc4c */ 220 __u32 kernel_asce; /* 0xc4c */
221 __u32 user_asce; /* 0xc50 */ 221 __u32 user_asce; /* 0xc50 */
222 __u32 panic_stack; /* 0xc54 */ 222 __u32 panic_stack; /* 0xc54 */
223 __u8 pad10[0xc60-0xc58]; /* 0xc58 */ 223 __u32 user_exec_asce; /* 0xc58 */
224 __u8 pad10[0xc60-0xc5c]; /* 0xc5c */
224 /* entry.S sensitive area start */ 225 /* entry.S sensitive area start */
225 struct cpuinfo_S390 cpu_data; /* 0xc60 */ 226 struct cpuinfo_S390 cpu_data; /* 0xc60 */
226 __u32 ipl_device; /* 0xc7c */ 227 __u32 ipl_device; /* 0xc7c */
@@ -310,7 +311,8 @@ struct _lowcore
310 __u64 kernel_asce; /* 0xd58 */ 311 __u64 kernel_asce; /* 0xd58 */
311 __u64 user_asce; /* 0xd60 */ 312 __u64 user_asce; /* 0xd60 */
312 __u64 panic_stack; /* 0xd68 */ 313 __u64 panic_stack; /* 0xd68 */
313 __u8 pad10[0xd80-0xd70]; /* 0xd70 */ 314 __u64 user_exec_asce; /* 0xd70 */
315 __u8 pad10[0xd80-0xd78]; /* 0xd78 */
314 /* entry.S sensitive area start */ 316 /* entry.S sensitive area start */
315 struct cpuinfo_S390 cpu_data; /* 0xd80 */ 317 struct cpuinfo_S390 cpu_data; /* 0xd80 */
316 __u32 ipl_device; /* 0xdb8 */ 318 __u32 ipl_device; /* 0xdb8 */
diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h
index bcf24a873874..1d21da220d49 100644
--- a/include/asm-s390/mmu_context.h
+++ b/include/asm-s390/mmu_context.h
@@ -9,6 +9,7 @@
9#ifndef __S390_MMU_CONTEXT_H 9#ifndef __S390_MMU_CONTEXT_H
10#define __S390_MMU_CONTEXT_H 10#define __S390_MMU_CONTEXT_H
11 11
12#include <asm/pgalloc.h>
12/* 13/*
13 * get a new mmu context.. S390 don't know about contexts. 14 * get a new mmu context.. S390 don't know about contexts.
14 */ 15 */
@@ -16,29 +17,44 @@
16 17
17#define destroy_context(mm) do { } while (0) 18#define destroy_context(mm) do { } while (0)
18 19
20#ifndef __s390x__
21#define LCTL_OPCODE "lctl"
22#define PGTABLE_BITS (_SEGMENT_TABLE|USER_STD_MASK)
23#else
24#define LCTL_OPCODE "lctlg"
25#define PGTABLE_BITS (_REGION_TABLE|USER_STD_MASK)
26#endif
27
19static inline void enter_lazy_tlb(struct mm_struct *mm, 28static inline void enter_lazy_tlb(struct mm_struct *mm,
20 struct task_struct *tsk) 29 struct task_struct *tsk)
21{ 30{
22} 31}
23 32
24static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 33static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
25 struct task_struct *tsk) 34 struct task_struct *tsk)
26{ 35{
27 if (prev != next) { 36 pgd_t *shadow_pgd = get_shadow_pgd(next->pgd);
28#ifndef __s390x__ 37
29 S390_lowcore.user_asce = (__pa(next->pgd)&PAGE_MASK) | 38 if (prev != next) {
30 (_SEGMENT_TABLE|USER_STD_MASK); 39 S390_lowcore.user_asce = (__pa(next->pgd) & PAGE_MASK) |
31 /* Load home space page table origin. */ 40 PGTABLE_BITS;
32 asm volatile("lctl 13,13,%0" 41 if (shadow_pgd) {
33 : : "m" (S390_lowcore.user_asce) ); 42 /* Load primary/secondary space page table origin. */
34#else /* __s390x__ */ 43 S390_lowcore.user_exec_asce =
35 S390_lowcore.user_asce = (__pa(next->pgd) & PAGE_MASK) | 44 (__pa(shadow_pgd) & PAGE_MASK) | PGTABLE_BITS;
36 (_REGION_TABLE|USER_STD_MASK); 45 asm volatile(LCTL_OPCODE" 1,1,%0\n"
37 /* Load home space page table origin. */ 46 LCTL_OPCODE" 7,7,%1"
38 asm volatile("lctlg 13,13,%0" 47 : : "m" (S390_lowcore.user_exec_asce),
39 : : "m" (S390_lowcore.user_asce) ); 48 "m" (S390_lowcore.user_asce) );
40#endif /* __s390x__ */ 49 } else if (switch_amode) {
41 } 50 /* Load primary space page table origin. */
51 asm volatile(LCTL_OPCODE" 1,1,%0"
52 : : "m" (S390_lowcore.user_asce) );
53 } else
54 /* Load home space page table origin. */
55 asm volatile(LCTL_OPCODE" 13,13,%0"
56 : : "m" (S390_lowcore.user_asce) );
57 }
42 cpu_set(smp_processor_id(), next->cpu_vm_mask); 58 cpu_set(smp_processor_id(), next->cpu_vm_mask);
43} 59}
44 60
@@ -51,4 +67,4 @@ static inline void activate_mm(struct mm_struct *prev,
51 set_fs(current->thread.mm_segment); 67 set_fs(current->thread.mm_segment);
52} 68}
53 69
54#endif 70#endif /* __S390_MMU_CONTEXT_H */
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h
index 0707a7e2fc16..56c8a6c80e2e 100644
--- a/include/asm-s390/pgalloc.h
+++ b/include/asm-s390/pgalloc.h
@@ -47,6 +47,17 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
47 47
48 if (!pgd) 48 if (!pgd)
49 return NULL; 49 return NULL;
50 if (s390_noexec) {
51 pgd_t *shadow_pgd = (pgd_t *)
52 __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
53 struct page *page = virt_to_page(pgd);
54
55 if (!shadow_pgd) {
56 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
57 return NULL;
58 }
59 page->lru.next = (void *) shadow_pgd;
60 }
50 for (i = 0; i < PTRS_PER_PGD; i++) 61 for (i = 0; i < PTRS_PER_PGD; i++)
51#ifndef __s390x__ 62#ifndef __s390x__
52 pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE)); 63 pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
@@ -58,6 +69,10 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
58 69
59static inline void pgd_free(pgd_t *pgd) 70static inline void pgd_free(pgd_t *pgd)
60{ 71{
72 pgd_t *shadow_pgd = get_shadow_pgd(pgd);
73
74 if (shadow_pgd)
75 free_pages((unsigned long) shadow_pgd, PGD_ALLOC_ORDER);
61 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER); 76 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
62} 77}
63 78
@@ -71,6 +86,7 @@ static inline void pgd_free(pgd_t *pgd)
71#define pmd_free(x) do { } while (0) 86#define pmd_free(x) do { } while (0)
72#define __pmd_free_tlb(tlb,x) do { } while (0) 87#define __pmd_free_tlb(tlb,x) do { } while (0)
73#define pgd_populate(mm, pmd, pte) BUG() 88#define pgd_populate(mm, pmd, pte) BUG()
89#define pgd_populate_kernel(mm, pmd, pte) BUG()
74#else /* __s390x__ */ 90#else /* __s390x__ */
75static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) 91static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
76{ 92{
@@ -79,6 +95,17 @@ static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
79 95
80 if (!pmd) 96 if (!pmd)
81 return NULL; 97 return NULL;
98 if (s390_noexec) {
99 pmd_t *shadow_pmd = (pmd_t *)
100 __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
101 struct page *page = virt_to_page(pmd);
102
103 if (!shadow_pmd) {
104 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
105 return NULL;
106 }
107 page->lru.next = (void *) shadow_pmd;
108 }
82 for (i=0; i < PTRS_PER_PMD; i++) 109 for (i=0; i < PTRS_PER_PMD; i++)
83 pmd_clear(pmd + i); 110 pmd_clear(pmd + i);
84 return pmd; 111 return pmd;
@@ -86,6 +113,10 @@ static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
86 113
87static inline void pmd_free (pmd_t *pmd) 114static inline void pmd_free (pmd_t *pmd)
88{ 115{
116 pmd_t *shadow_pmd = get_shadow_pmd(pmd);
117
118 if (shadow_pmd)
119 free_pages((unsigned long) shadow_pmd, PMD_ALLOC_ORDER);
89 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER); 120 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
90} 121}
91 122
@@ -95,11 +126,22 @@ static inline void pmd_free (pmd_t *pmd)
95 pmd_free(pmd); \ 126 pmd_free(pmd); \
96 } while (0) 127 } while (0)
97 128
98static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) 129static inline void
130pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
99{ 131{
100 pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd); 132 pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd);
101} 133}
102 134
135static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
136{
137 pgd_t *shadow_pgd = get_shadow_pgd(pgd);
138 pmd_t *shadow_pmd = get_shadow_pmd(pmd);
139
140 if (shadow_pgd && shadow_pmd)
141 pgd_populate_kernel(mm, shadow_pgd, shadow_pmd);
142 pgd_populate_kernel(mm, pgd, pmd);
143}
144
103#endif /* __s390x__ */ 145#endif /* __s390x__ */
104 146
105static inline void 147static inline void
@@ -119,7 +161,13 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
119static inline void 161static inline void
120pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) 162pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
121{ 163{
122 pmd_populate_kernel(mm, pmd, (pte_t *)page_to_phys(page)); 164 pte_t *pte = (pte_t *)page_to_phys(page);
165 pmd_t *shadow_pmd = get_shadow_pmd(pmd);
166 pte_t *shadow_pte = get_shadow_pte(pte);
167
168 pmd_populate_kernel(mm, pmd, pte);
169 if (shadow_pmd && shadow_pte)
170 pmd_populate_kernel(mm, shadow_pmd, shadow_pte);
123} 171}
124 172
125/* 173/*
@@ -133,6 +181,17 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr)
133 181
134 if (!pte) 182 if (!pte)
135 return NULL; 183 return NULL;
184 if (s390_noexec) {
185 pte_t *shadow_pte = (pte_t *)
186 __get_free_page(GFP_KERNEL|__GFP_REPEAT);
187 struct page *page = virt_to_page(pte);
188
189 if (!shadow_pte) {
190 free_page((unsigned long) pte);
191 return NULL;
192 }
193 page->lru.next = (void *) shadow_pte;
194 }
136 for (i=0; i < PTRS_PER_PTE; i++) { 195 for (i=0; i < PTRS_PER_PTE; i++) {
137 pte_clear(mm, vmaddr, pte + i); 196 pte_clear(mm, vmaddr, pte + i);
138 vmaddr += PAGE_SIZE; 197 vmaddr += PAGE_SIZE;
@@ -151,14 +210,30 @@ pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
151 210
152static inline void pte_free_kernel(pte_t *pte) 211static inline void pte_free_kernel(pte_t *pte)
153{ 212{
154 free_page((unsigned long) pte); 213 pte_t *shadow_pte = get_shadow_pte(pte);
214
215 if (shadow_pte)
216 free_page((unsigned long) shadow_pte);
217 free_page((unsigned long) pte);
155} 218}
156 219
157static inline void pte_free(struct page *pte) 220static inline void pte_free(struct page *pte)
158{ 221{
159 __free_page(pte); 222 struct page *shadow_page = get_shadow_page(pte);
223
224 if (shadow_page)
225 __free_page(shadow_page);
226 __free_page(pte);
160} 227}
161 228
162#define __pte_free_tlb(tlb,pte) tlb_remove_page(tlb,pte) 229#define __pte_free_tlb(tlb, pte) \
230({ \
231 struct mmu_gather *__tlb = (tlb); \
232 struct page *__pte = (pte); \
233 struct page *shadow_page = get_shadow_page(__pte); \
234 if (shadow_page) \
235 tlb_remove_page(__tlb, shadow_page); \
236 tlb_remove_page(__tlb, __pte); \
237})
163 238
164#endif /* _S390_PGALLOC_H */ 239#endif /* _S390_PGALLOC_H */
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index ae61aca5d483..13c16546eff5 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -40,6 +40,7 @@ struct mm_struct;
40 40
41extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); 41extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
42extern void paging_init(void); 42extern void paging_init(void);
43extern void vmem_map_init(void);
43 44
44/* 45/*
45 * The S390 doesn't have any external MMU info: the kernel page 46 * The S390 doesn't have any external MMU info: the kernel page
@@ -223,6 +224,8 @@ extern unsigned long vmalloc_end;
223#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ 224#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
224#define _PAGE_TYPE_RO 0x200 225#define _PAGE_TYPE_RO 0x200
225#define _PAGE_TYPE_RW 0x000 226#define _PAGE_TYPE_RW 0x000
227#define _PAGE_TYPE_EX_RO 0x202
228#define _PAGE_TYPE_EX_RW 0x002
226 229
227/* 230/*
228 * PTE type bits are rather complicated. handle_pte_fault uses pte_present, 231 * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
@@ -243,11 +246,13 @@ extern unsigned long vmalloc_end;
243 * _PAGE_TYPE_FILE 11?1 -> 11?1 246 * _PAGE_TYPE_FILE 11?1 -> 11?1
244 * _PAGE_TYPE_RO 0100 -> 1100 247 * _PAGE_TYPE_RO 0100 -> 1100
245 * _PAGE_TYPE_RW 0000 -> 1000 248 * _PAGE_TYPE_RW 0000 -> 1000
249 * _PAGE_TYPE_EX_RO 0110 -> 1110
250 * _PAGE_TYPE_EX_RW 0010 -> 1010
246 * 251 *
247 * pte_none is true for bits combinations 1000, 1100 252 * pte_none is true for bits combinations 1000, 1010, 1100, 1110
248 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 253 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
249 * pte_file is true for bits combinations 1101, 1111 254 * pte_file is true for bits combinations 1101, 1111
250 * swap pte is 1011 and 0001, 0011, 0101, 0111, 1010 and 1110 are invalid. 255 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
251 */ 256 */
252 257
253#ifndef __s390x__ 258#ifndef __s390x__
@@ -312,33 +317,100 @@ extern unsigned long vmalloc_end;
312#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) 317#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
313#define PAGE_RO __pgprot(_PAGE_TYPE_RO) 318#define PAGE_RO __pgprot(_PAGE_TYPE_RO)
314#define PAGE_RW __pgprot(_PAGE_TYPE_RW) 319#define PAGE_RW __pgprot(_PAGE_TYPE_RW)
320#define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO)
321#define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW)
315 322
316#define PAGE_KERNEL PAGE_RW 323#define PAGE_KERNEL PAGE_RW
317#define PAGE_COPY PAGE_RO 324#define PAGE_COPY PAGE_RO
318 325
319/* 326/*
320 * The S390 can't do page protection for execute, and considers that the 327 * Dependent on the EXEC_PROTECT option s390 can do execute protection.
321 * same are read. Also, write permissions imply read permissions. This is 328 * Write permission always implies read permission. In theory with a
322 * the closest we can get.. 329 * primary/secondary page table execute only can be implemented but
330 * it would cost an additional bit in the pte to distinguish all the
331 * different pte types. To avoid that execute permission currently
332 * implies read permission as well.
323 */ 333 */
324 /*xwr*/ 334 /*xwr*/
325#define __P000 PAGE_NONE 335#define __P000 PAGE_NONE
326#define __P001 PAGE_RO 336#define __P001 PAGE_RO
327#define __P010 PAGE_RO 337#define __P010 PAGE_RO
328#define __P011 PAGE_RO 338#define __P011 PAGE_RO
329#define __P100 PAGE_RO 339#define __P100 PAGE_EX_RO
330#define __P101 PAGE_RO 340#define __P101 PAGE_EX_RO
331#define __P110 PAGE_RO 341#define __P110 PAGE_EX_RO
332#define __P111 PAGE_RO 342#define __P111 PAGE_EX_RO
333 343
334#define __S000 PAGE_NONE 344#define __S000 PAGE_NONE
335#define __S001 PAGE_RO 345#define __S001 PAGE_RO
336#define __S010 PAGE_RW 346#define __S010 PAGE_RW
337#define __S011 PAGE_RW 347#define __S011 PAGE_RW
338#define __S100 PAGE_RO 348#define __S100 PAGE_EX_RO
339#define __S101 PAGE_RO 349#define __S101 PAGE_EX_RO
340#define __S110 PAGE_RW 350#define __S110 PAGE_EX_RW
341#define __S111 PAGE_RW 351#define __S111 PAGE_EX_RW
352
353#ifndef __s390x__
354# define PMD_SHADOW_SHIFT 1
355# define PGD_SHADOW_SHIFT 1
356#else /* __s390x__ */
357# define PMD_SHADOW_SHIFT 2
358# define PGD_SHADOW_SHIFT 2
359#endif /* __s390x__ */
360
361static inline struct page *get_shadow_page(struct page *page)
362{
363 if (s390_noexec && !list_empty(&page->lru))
364 return virt_to_page(page->lru.next);
365 return NULL;
366}
367
368static inline pte_t *get_shadow_pte(pte_t *ptep)
369{
370 unsigned long pteptr = (unsigned long) (ptep);
371
372 if (s390_noexec) {
373 unsigned long offset = pteptr & (PAGE_SIZE - 1);
374 void *addr = (void *) (pteptr ^ offset);
375 struct page *page = virt_to_page(addr);
376 if (!list_empty(&page->lru))
377 return (pte_t *) ((unsigned long) page->lru.next |
378 offset);
379 }
380 return NULL;
381}
382
383static inline pmd_t *get_shadow_pmd(pmd_t *pmdp)
384{
385 unsigned long pmdptr = (unsigned long) (pmdp);
386
387 if (s390_noexec) {
388 unsigned long offset = pmdptr &
389 ((PAGE_SIZE << PMD_SHADOW_SHIFT) - 1);
390 void *addr = (void *) (pmdptr ^ offset);
391 struct page *page = virt_to_page(addr);
392 if (!list_empty(&page->lru))
393 return (pmd_t *) ((unsigned long) page->lru.next |
394 offset);
395 }
396 return NULL;
397}
398
399static inline pgd_t *get_shadow_pgd(pgd_t *pgdp)
400{
401 unsigned long pgdptr = (unsigned long) (pgdp);
402
403 if (s390_noexec) {
404 unsigned long offset = pgdptr &
405 ((PAGE_SIZE << PGD_SHADOW_SHIFT) - 1);
406 void *addr = (void *) (pgdptr ^ offset);
407 struct page *page = virt_to_page(addr);
408 if (!list_empty(&page->lru))
409 return (pgd_t *) ((unsigned long) page->lru.next |
410 offset);
411 }
412 return NULL;
413}
342 414
343/* 415/*
344 * Certain architectures need to do special things when PTEs 416 * Certain architectures need to do special things when PTEs
@@ -347,7 +419,16 @@ extern unsigned long vmalloc_end;
347 */ 419 */
348static inline void set_pte(pte_t *pteptr, pte_t pteval) 420static inline void set_pte(pte_t *pteptr, pte_t pteval)
349{ 421{
422 pte_t *shadow_pte = get_shadow_pte(pteptr);
423
350 *pteptr = pteval; 424 *pteptr = pteval;
425 if (shadow_pte) {
426 if (!(pte_val(pteval) & _PAGE_INVALID) &&
427 (pte_val(pteval) & _PAGE_SWX))
428 pte_val(*shadow_pte) = pte_val(pteval) | _PAGE_RO;
429 else
430 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
431 }
351} 432}
352#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 433#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
353 434
@@ -465,7 +546,7 @@ static inline int pte_read(pte_t pte)
465 546
466static inline void pgd_clear(pgd_t * pgdp) { } 547static inline void pgd_clear(pgd_t * pgdp) { }
467 548
468static inline void pmd_clear(pmd_t * pmdp) 549static inline void pmd_clear_kernel(pmd_t * pmdp)
469{ 550{
470 pmd_val(pmdp[0]) = _PAGE_TABLE_INV; 551 pmd_val(pmdp[0]) = _PAGE_TABLE_INV;
471 pmd_val(pmdp[1]) = _PAGE_TABLE_INV; 552 pmd_val(pmdp[1]) = _PAGE_TABLE_INV;
@@ -473,24 +554,55 @@ static inline void pmd_clear(pmd_t * pmdp)
473 pmd_val(pmdp[3]) = _PAGE_TABLE_INV; 554 pmd_val(pmdp[3]) = _PAGE_TABLE_INV;
474} 555}
475 556
557static inline void pmd_clear(pmd_t * pmdp)
558{
559 pmd_t *shadow_pmd = get_shadow_pmd(pmdp);
560
561 pmd_clear_kernel(pmdp);
562 if (shadow_pmd)
563 pmd_clear_kernel(shadow_pmd);
564}
565
476#else /* __s390x__ */ 566#else /* __s390x__ */
477 567
478static inline void pgd_clear(pgd_t * pgdp) 568static inline void pgd_clear_kernel(pgd_t * pgdp)
479{ 569{
480 pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY; 570 pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY;
481} 571}
482 572
483static inline void pmd_clear(pmd_t * pmdp) 573static inline void pgd_clear(pgd_t * pgdp)
574{
575 pgd_t *shadow_pgd = get_shadow_pgd(pgdp);
576
577 pgd_clear_kernel(pgdp);
578 if (shadow_pgd)
579 pgd_clear_kernel(shadow_pgd);
580}
581
582static inline void pmd_clear_kernel(pmd_t * pmdp)
484{ 583{
485 pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; 584 pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY;
486 pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; 585 pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY;
487} 586}
488 587
588static inline void pmd_clear(pmd_t * pmdp)
589{
590 pmd_t *shadow_pmd = get_shadow_pmd(pmdp);
591
592 pmd_clear_kernel(pmdp);
593 if (shadow_pmd)
594 pmd_clear_kernel(shadow_pmd);
595}
596
489#endif /* __s390x__ */ 597#endif /* __s390x__ */
490 598
491static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 599static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
492{ 600{
601 pte_t *shadow_pte = get_shadow_pte(ptep);
602
493 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 603 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
604 if (shadow_pte)
605 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
494} 606}
495 607
496/* 608/*
@@ -608,8 +720,11 @@ ptep_clear_flush(struct vm_area_struct *vma,
608 unsigned long address, pte_t *ptep) 720 unsigned long address, pte_t *ptep)
609{ 721{
610 pte_t pte = *ptep; 722 pte_t pte = *ptep;
723 pte_t *shadow_pte = get_shadow_pte(ptep);
611 724
612 __ptep_ipte(address, ptep); 725 __ptep_ipte(address, ptep);
726 if (shadow_pte)
727 __ptep_ipte(address, shadow_pte);
613 return pte; 728 return pte;
614} 729}
615 730
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index cbbedc63ba25..4c1b73940351 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -50,6 +50,7 @@ struct cpuinfo_S390
50 unsigned long pgtable_cache_sz; 50 unsigned long pgtable_cache_sz;
51}; 51};
52 52
53extern void s390_adjust_jiffies(void);
53extern void print_cpu_info(struct cpuinfo_S390 *); 54extern void print_cpu_info(struct cpuinfo_S390 *);
54 55
55/* Lazy FPU handling on uni-processor */ 56/* Lazy FPU handling on uni-processor */
@@ -144,7 +145,8 @@ struct stack_frame {
144#ifndef __s390x__ 145#ifndef __s390x__
145 146
146#define start_thread(regs, new_psw, new_stackp) do { \ 147#define start_thread(regs, new_psw, new_stackp) do { \
147 regs->psw.mask = PSW_USER_BITS; \ 148 set_fs(USER_DS); \
149 regs->psw.mask = psw_user_bits; \
148 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ 150 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
149 regs->gprs[15] = new_stackp ; \ 151 regs->gprs[15] = new_stackp ; \
150} while (0) 152} while (0)
@@ -152,13 +154,15 @@ struct stack_frame {
152#else /* __s390x__ */ 154#else /* __s390x__ */
153 155
154#define start_thread(regs, new_psw, new_stackp) do { \ 156#define start_thread(regs, new_psw, new_stackp) do { \
155 regs->psw.mask = PSW_USER_BITS; \ 157 set_fs(USER_DS); \
158 regs->psw.mask = psw_user_bits; \
156 regs->psw.addr = new_psw; \ 159 regs->psw.addr = new_psw; \
157 regs->gprs[15] = new_stackp; \ 160 regs->gprs[15] = new_stackp; \
158} while (0) 161} while (0)
159 162
160#define start_thread31(regs, new_psw, new_stackp) do { \ 163#define start_thread31(regs, new_psw, new_stackp) do { \
161 regs->psw.mask = PSW_USER32_BITS; \ 164 set_fs(USER_DS); \
165 regs->psw.mask = psw_user32_bits; \
162 regs->psw.addr = new_psw; \ 166 regs->psw.addr = new_psw; \
163 regs->gprs[15] = new_stackp; \ 167 regs->gprs[15] = new_stackp; \
164} while (0) 168} while (0)
@@ -201,9 +205,8 @@ unsigned long get_wchan(struct task_struct *p);
201static inline void cpu_relax(void) 205static inline void cpu_relax(void)
202{ 206{
203 if (MACHINE_HAS_DIAG44) 207 if (MACHINE_HAS_DIAG44)
204 asm volatile("diag 0,0,68" : : : "memory"); 208 asm volatile("diag 0,0,68");
205 else 209 barrier();
206 barrier();
207} 210}
208 211
209/* 212/*
@@ -328,6 +331,18 @@ static inline void disabled_wait(unsigned long code)
328} 331}
329 332
330/* 333/*
334 * Basic Machine Check/Program Check Handler.
335 */
336
337extern void s390_base_mcck_handler(void);
338extern void s390_base_pgm_handler(void);
339extern void s390_base_ext_handler(void);
340
341extern void (*s390_base_mcck_handler_fn)(void);
342extern void (*s390_base_pgm_handler_fn)(void);
343extern void (*s390_base_ext_handler_fn)(void);
344
345/*
331 * CPU idle notifier chain. 346 * CPU idle notifier chain.
332 */ 347 */
333#define CPU_IDLE 0 348#define CPU_IDLE 0
diff --git a/include/asm-s390/ptrace.h b/include/asm-s390/ptrace.h
index 7b768c5c68a8..fa6ca87080e8 100644
--- a/include/asm-s390/ptrace.h
+++ b/include/asm-s390/ptrace.h
@@ -266,17 +266,12 @@ typedef struct
266#define PSW_ASC_SECONDARY 0x0000800000000000UL 266#define PSW_ASC_SECONDARY 0x0000800000000000UL
267#define PSW_ASC_HOME 0x0000C00000000000UL 267#define PSW_ASC_HOME 0x0000C00000000000UL
268 268
269#define PSW_USER32_BITS (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME | \ 269extern long psw_user32_bits;
270 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | \
271 PSW_MASK_PSTATE | PSW_DEFAULT_KEY)
272 270
273#endif /* __s390x__ */ 271#endif /* __s390x__ */
274 272
275#define PSW_KERNEL_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | \ 273extern long psw_kernel_bits;
276 PSW_MASK_MCHECK | PSW_DEFAULT_KEY) 274extern long psw_user_bits;
277#define PSW_USER_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | \
278 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | \
279 PSW_MASK_PSTATE | PSW_DEFAULT_KEY)
280 275
281/* This macro merges a NEW PSW mask specified by the user into 276/* This macro merges a NEW PSW mask specified by the user into
282 the currently active PSW mask CURRENT, modifying only those 277 the currently active PSW mask CURRENT, modifying only those
diff --git a/include/asm-s390/reset.h b/include/asm-s390/reset.h
index 532e65a2aafc..f584f4a52581 100644
--- a/include/asm-s390/reset.h
+++ b/include/asm-s390/reset.h
@@ -18,7 +18,4 @@ struct reset_call {
18extern void register_reset_call(struct reset_call *reset); 18extern void register_reset_call(struct reset_call *reset);
19extern void unregister_reset_call(struct reset_call *reset); 19extern void unregister_reset_call(struct reset_call *reset);
20extern void s390_reset_system(void); 20extern void s390_reset_system(void);
21extern void (*s390_reset_mcck_handler)(void);
22extern void (*s390_reset_pgm_handler)(void);
23
24#endif /* _ASM_S390_RESET_H */ 21#endif /* _ASM_S390_RESET_H */
diff --git a/include/asm-s390/sclp.h b/include/asm-s390/sclp.h
new file mode 100644
index 000000000000..468b97018405
--- /dev/null
+++ b/include/asm-s390/sclp.h
@@ -0,0 +1,39 @@
1/*
2 * include/asm-s390/sclp.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#ifndef _ASM_S390_SCLP_H
9#define _ASM_S390_SCLP_H
10
11#include <linux/types.h>
12
13struct sccb_header {
14 u16 length;
15 u8 function_code;
16 u8 control_mask[3];
17 u16 response_code;
18} __attribute__((packed));
19
20#define LOADPARM_LEN 8
21
22struct sclp_readinfo_sccb {
23 struct sccb_header header; /* 0-7 */
24 u16 rnmax; /* 8-9 */
25 u8 rnsize; /* 10 */
26 u8 _reserved0[24 - 11]; /* 11-23 */
27 u8 loadparm[LOADPARM_LEN]; /* 24-31 */
28 u8 _reserved1[91 - 32]; /* 32-90 */
29 u8 flags; /* 91 */
30 u8 _reserved2[100 - 92]; /* 92-99 */
31 u32 rnsize2; /* 100-103 */
32 u64 rnmax2; /* 104-111 */
33 u8 _reserved3[4096 - 112]; /* 112-4095 */
34} __attribute__((packed, aligned(4096)));
35
36extern struct sclp_readinfo_sccb s390_readinfo_sccb;
37extern void sclp_readinfo_early(void);
38
39#endif /* _ASM_S390_SCLP_H */
diff --git a/include/asm-s390/sections.h b/include/asm-s390/sections.h
index 3a0b8ffeab7a..1c5a2c4ccdad 100644
--- a/include/asm-s390/sections.h
+++ b/include/asm-s390/sections.h
@@ -3,4 +3,6 @@
3 3
4#include <asm-generic/sections.h> 4#include <asm-generic/sections.h>
5 5
6extern char _eshared[];
7
6#endif 8#endif
diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h
index 9574fe80a046..3388bb52597c 100644
--- a/include/asm-s390/setup.h
+++ b/include/asm-s390/setup.h
@@ -42,6 +42,18 @@ struct mem_chunk {
42 42
43extern struct mem_chunk memory_chunk[]; 43extern struct mem_chunk memory_chunk[];
44 44
45#ifdef CONFIG_S390_SWITCH_AMODE
46extern unsigned int switch_amode;
47#else
48#define switch_amode (0)
49#endif
50
51#ifdef CONFIG_S390_EXEC_PROTECT
52extern unsigned int s390_noexec;
53#else
54#define s390_noexec (0)
55#endif
56
45/* 57/*
46 * Machine features detected in head.S 58 * Machine features detected in head.S
47 */ 59 */
@@ -74,6 +86,9 @@ extern unsigned int console_mode;
74extern unsigned int console_devno; 86extern unsigned int console_devno;
75extern unsigned int console_irq; 87extern unsigned int console_irq;
76 88
89extern char vmhalt_cmd[];
90extern char vmpoff_cmd[];
91
77#define CONSOLE_IS_UNDEFINED (console_mode == 0) 92#define CONSOLE_IS_UNDEFINED (console_mode == 0)
78#define CONSOLE_IS_SCLP (console_mode == 1) 93#define CONSOLE_IS_SCLP (console_mode == 1)
79#define CONSOLE_IS_3215 (console_mode == 2) 94#define CONSOLE_IS_3215 (console_mode == 2)
@@ -141,13 +156,19 @@ struct ipl_parameter_block {
141extern u32 ipl_flags; 156extern u32 ipl_flags;
142extern u16 ipl_devno; 157extern u16 ipl_devno;
143 158
144void do_reipl(void); 159extern void do_reipl(void);
160extern void ipl_save_parameters(void);
145 161
146enum { 162enum {
147 IPL_DEVNO_VALID = 1, 163 IPL_DEVNO_VALID = 1,
148 IPL_PARMBLOCK_VALID = 2, 164 IPL_PARMBLOCK_VALID = 2,
165 IPL_NSS_VALID = 4,
149}; 166};
150 167
168#define NSS_NAME_SIZE 8
169
170extern char kernel_nss_name[];
171
151#define IPL_PARMBLOCK_START ((struct ipl_parameter_block *) \ 172#define IPL_PARMBLOCK_START ((struct ipl_parameter_block *) \
152 IPL_PARMBLOCK_ORIGIN) 173 IPL_PARMBLOCK_ORIGIN)
153#define IPL_PARMBLOCK_SIZE (IPL_PARMBLOCK_START->hdr.len) 174#define IPL_PARMBLOCK_SIZE (IPL_PARMBLOCK_START->hdr.len)
diff --git a/arch/s390/math-emu/sfp-util.h b/include/asm-s390/sfp-util.h
index 5b6ca4570ea4..8cabcd23d976 100644
--- a/arch/s390/math-emu/sfp-util.h
+++ b/include/asm-s390/sfp-util.h
@@ -52,12 +52,12 @@
52}) 52})
53 53
54#define udiv_qrnnd(q, r, n1, n0, d) \ 54#define udiv_qrnnd(q, r, n1, n0, d) \
55 do { unsigned long __r; \ 55 do { unsigned int __r; \
56 (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \ 56 (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \
57 (r) = __r; \ 57 (r) = __r; \
58 } while (0) 58 } while (0)
59extern unsigned long __udiv_qrnnd (unsigned long *, unsigned long, 59extern unsigned long __udiv_qrnnd (unsigned int *, unsigned int,
60 unsigned long , unsigned long); 60 unsigned int , unsigned int);
61 61
62#define UDIV_NEEDS_NORMALIZATION 0 62#define UDIV_NEEDS_NORMALIZATION 0
63 63
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index 7097c96ed026..b957e4cda464 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -31,6 +31,10 @@ typedef struct
31 __u16 cpu; 31 __u16 cpu;
32} sigp_info; 32} sigp_info;
33 33
34extern void machine_restart_smp(char *);
35extern void machine_halt_smp(void);
36extern void machine_power_off_smp(void);
37
34extern void smp_setup_cpu_possible_map(void); 38extern void smp_setup_cpu_possible_map(void);
35extern int smp_call_function_on(void (*func) (void *info), void *info, 39extern int smp_call_function_on(void (*func) (void *info), void *info,
36 int nonatomic, int wait, int cpu); 40 int nonatomic, int wait, int cpu);
@@ -106,7 +110,7 @@ smp_call_function_on(void (*func) (void *info), void *info,
106static inline void smp_send_stop(void) 110static inline void smp_send_stop(void)
107{ 111{
108 /* Disable all interrupts/machine checks */ 112 /* Disable all interrupts/machine checks */
109 __load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK); 113 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
110} 114}
111 115
112#define smp_cpu_not_running(cpu) 1 116#define smp_cpu_not_running(cpu) 1
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index bd0b05ae87d2..bbe137c3ed69 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -373,8 +373,8 @@ __set_psw_mask(unsigned long mask)
373 __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8))); 373 __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8)));
374} 374}
375 375
376#define local_mcck_enable() __set_psw_mask(PSW_KERNEL_BITS) 376#define local_mcck_enable() __set_psw_mask(psw_kernel_bits)
377#define local_mcck_disable() __set_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK) 377#define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK)
378 378
379#ifdef CONFIG_SMP 379#ifdef CONFIG_SMP
380 380
diff --git a/include/asm-s390/tape390.h b/include/asm-s390/tape390.h
index f1d66ba0deef..884fba48f1ff 100644
--- a/include/asm-s390/tape390.h
+++ b/include/asm-s390/tape390.h
@@ -1,11 +1,11 @@
1/************************************************************************* 1/*************************************************************************
2 * 2 *
3 * tape390.h 3 * tape390.h
4 * enables user programs to display messages on the tape device 4 * enables user programs to display messages and control encryption
5 * on s390 tape devices
5 * 6 *
6 * S390 and zSeries version 7 * Copyright IBM Corp. 2001,2006
7 * Copyright (C) 2001 IBM Corporation 8 * Author(s): Michael Holzheu <holzheu@de.ibm.com>
8 * Author(s): Despina Papadopoulou <despina_p@de.ibm.com>
9 * 9 *
10 *************************************************************************/ 10 *************************************************************************/
11 11
@@ -36,4 +36,68 @@ typedef struct display_struct {
36 char message2[8]; 36 char message2[8];
37} display_struct; 37} display_struct;
38 38
39/*
40 * Tape encryption support
41 */
42
43struct tape390_crypt_info {
44 char capability;
45 char status;
46 char medium_status;
47} __attribute__ ((packed));
48
49
50/* Macros for "capable" field */
51#define TAPE390_CRYPT_SUPPORTED_MASK 0x01
52#define TAPE390_CRYPT_SUPPORTED(x) \
53 ((x.capability & TAPE390_CRYPT_SUPPORTED_MASK))
54
55/* Macros for "status" field */
56#define TAPE390_CRYPT_ON_MASK 0x01
57#define TAPE390_CRYPT_ON(x) (((x.status) & TAPE390_CRYPT_ON_MASK))
58
59/* Macros for "medium status" field */
60#define TAPE390_MEDIUM_LOADED_MASK 0x01
61#define TAPE390_MEDIUM_ENCRYPTED_MASK 0x02
62#define TAPE390_MEDIUM_ENCRYPTED(x) \
63 (((x.medium_status) & TAPE390_MEDIUM_ENCRYPTED_MASK))
64#define TAPE390_MEDIUM_LOADED(x) \
65 (((x.medium_status) & TAPE390_MEDIUM_LOADED_MASK))
66
67/*
68 * The TAPE390_CRYPT_SET ioctl is used to switch on/off encryption.
69 * The "encryption_capable" and "tape_status" fields are ignored for this ioctl!
70 */
71#define TAPE390_CRYPT_SET _IOW('d', 2, struct tape390_crypt_info)
72
73/*
74 * The TAPE390_CRYPT_QUERY ioctl is used to query the encryption state.
75 */
76#define TAPE390_CRYPT_QUERY _IOR('d', 3, struct tape390_crypt_info)
77
78/* Values for "kekl1/2_type" and "kekl1/2_type_on_tape" fields */
79#define TAPE390_KEKL_TYPE_NONE 0
80#define TAPE390_KEKL_TYPE_LABEL 1
81#define TAPE390_KEKL_TYPE_HASH 2
82
83struct tape390_kekl {
84 unsigned char type;
85 unsigned char type_on_tape;
86 char label[65];
87} __attribute__ ((packed));
88
89struct tape390_kekl_pair {
90 struct tape390_kekl kekl[2];
91} __attribute__ ((packed));
92
93/*
94 * The TAPE390_KEKL_SET ioctl is used to set Key Encrypting Key labels.
95 */
96#define TAPE390_KEKL_SET _IOW('d', 4, struct tape390_kekl_pair)
97
98/*
99 * The TAPE390_KEKL_QUERY ioctl is used to query Key Encrypting Key labels.
100 */
101#define TAPE390_KEKL_QUERY _IOR('d', 5, struct tape390_kekl_pair)
102
39#endif 103#endif
diff --git a/include/asm-s390/timer.h b/include/asm-s390/timer.h
index 30e5cbe570f2..adb34860a543 100644
--- a/include/asm-s390/timer.h
+++ b/include/asm-s390/timer.h
@@ -45,6 +45,9 @@ extern void add_virt_timer_periodic(void *new);
45extern int mod_virt_timer(struct vtimer_list *timer, __u64 expires); 45extern int mod_virt_timer(struct vtimer_list *timer, __u64 expires);
46extern int del_virt_timer(struct vtimer_list *timer); 46extern int del_virt_timer(struct vtimer_list *timer);
47 47
48extern void init_cpu_vtimer(void);
49extern void vtime_init(void);
50
48#endif /* __KERNEL__ */ 51#endif /* __KERNEL__ */
49 52
50#endif /* _ASM_S390_TIMER_H */ 53#endif /* _ASM_S390_TIMER_H */
diff --git a/include/asm-s390/timex.h b/include/asm-s390/timex.h
index 4df4a41029a3..98229db24314 100644
--- a/include/asm-s390/timex.h
+++ b/include/asm-s390/timex.h
@@ -11,6 +11,41 @@
11#ifndef _ASM_S390_TIMEX_H 11#ifndef _ASM_S390_TIMEX_H
12#define _ASM_S390_TIMEX_H 12#define _ASM_S390_TIMEX_H
13 13
14/* Inline functions for clock register access. */
15static inline int set_clock(__u64 time)
16{
17 int cc;
18
19 asm volatile(
20 " sck 0(%2)\n"
21 " ipm %0\n"
22 " srl %0,28\n"
23 : "=d" (cc) : "m" (time), "a" (&time) : "cc");
24 return cc;
25}
26
27static inline int store_clock(__u64 *time)
28{
29 int cc;
30
31 asm volatile(
32 " stck 0(%2)\n"
33 " ipm %0\n"
34 " srl %0,28\n"
35 : "=d" (cc), "=m" (*time) : "a" (time) : "cc");
36 return cc;
37}
38
39static inline void set_clock_comparator(__u64 time)
40{
41 asm volatile("sckc 0(%1)" : : "m" (time), "a" (&time));
42}
43
44static inline void store_clock_comparator(__u64 *time)
45{
46 asm volatile("stckc 0(%1)" : "=m" (*time) : "a" (time));
47}
48
14#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ 49#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
15 50
16typedef unsigned long long cycles_t; 51typedef unsigned long long cycles_t;
@@ -27,9 +62,24 @@ static inline unsigned long long get_clock (void)
27 return clk; 62 return clk;
28} 63}
29 64
65static inline void get_clock_extended(void *dest)
66{
67 typedef struct { unsigned long long clk[2]; } __clock_t;
68
69#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
70 asm volatile("stcke %0" : "=Q" (*((__clock_t *)dest)) : : "cc");
71#else /* __GNUC__ */
72 asm volatile("stcke 0(%1)" : "=m" (*((__clock_t *)dest))
73 : "a" ((__clock_t *)dest) : "cc");
74#endif /* __GNUC__ */
75}
76
30static inline cycles_t get_cycles(void) 77static inline cycles_t get_cycles(void)
31{ 78{
32 return (cycles_t) get_clock() >> 2; 79 return (cycles_t) get_clock() >> 2;
33} 80}
34 81
82int get_sync_clock(unsigned long long *clock);
83void init_cpu_timer(void);
84
35#endif 85#endif
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h
index fa4dc916a9bf..66793f55c8b2 100644
--- a/include/asm-s390/tlbflush.h
+++ b/include/asm-s390/tlbflush.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <asm/processor.h> 5#include <asm/processor.h>
6#include <asm/pgalloc.h>
6 7
7/* 8/*
8 * TLB flushing: 9 * TLB flushing:
@@ -102,6 +103,14 @@ static inline void __flush_tlb_mm(struct mm_struct * mm)
102 if (unlikely(cpus_empty(mm->cpu_vm_mask))) 103 if (unlikely(cpus_empty(mm->cpu_vm_mask)))
103 return; 104 return;
104 if (MACHINE_HAS_IDTE) { 105 if (MACHINE_HAS_IDTE) {
106 pgd_t *shadow_pgd = get_shadow_pgd(mm->pgd);
107
108 if (shadow_pgd) {
109 asm volatile(
110 " .insn rrf,0xb98e0000,0,%0,%1,0"
111 : : "a" (2048),
112 "a" (__pa(shadow_pgd) & PAGE_MASK) : "cc" );
113 }
105 asm volatile( 114 asm volatile(
106 " .insn rrf,0xb98e0000,0,%0,%1,0" 115 " .insn rrf,0xb98e0000,0,%0,%1,0"
107 : : "a" (2048), "a" (__pa(mm->pgd)&PAGE_MASK) : "cc"); 116 : : "a" (2048), "a" (__pa(mm->pgd)&PAGE_MASK) : "cc");
diff --git a/include/asm-s390/uaccess.h b/include/asm-s390/uaccess.h
index 73ac4e82217b..0235970278f0 100644
--- a/include/asm-s390/uaccess.h
+++ b/include/asm-s390/uaccess.h
@@ -90,6 +90,8 @@ struct uaccess_ops {
90extern struct uaccess_ops uaccess; 90extern struct uaccess_ops uaccess;
91extern struct uaccess_ops uaccess_std; 91extern struct uaccess_ops uaccess_std;
92extern struct uaccess_ops uaccess_mvcos; 92extern struct uaccess_ops uaccess_mvcos;
93extern struct uaccess_ops uaccess_mvcos_switch;
94extern struct uaccess_ops uaccess_pt;
93 95
94static inline int __put_user_fn(size_t size, void __user *ptr, void *x) 96static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
95{ 97{
diff --git a/include/asm-x86_64/acpi.h b/include/asm-x86_64/acpi.h
index 6b6fc6f8be7e..a29f05087a31 100644
--- a/include/asm-x86_64/acpi.h
+++ b/include/asm-x86_64/acpi.h
@@ -37,7 +37,7 @@
37 * Calling conventions: 37 * Calling conventions:
38 * 38 *
39 * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) 39 * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
40 * ACPI_EXTERNAL_XFACE - External ACPI interfaces 40 * ACPI_EXTERNAL_XFACE - External ACPI interfaces
41 * ACPI_INTERNAL_XFACE - Internal ACPI interfaces 41 * ACPI_INTERNAL_XFACE - Internal ACPI interfaces
42 * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces 42 * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
43 */ 43 */
@@ -57,11 +57,11 @@
57int __acpi_acquire_global_lock(unsigned int *lock); 57int __acpi_acquire_global_lock(unsigned int *lock);
58int __acpi_release_global_lock(unsigned int *lock); 58int __acpi_release_global_lock(unsigned int *lock);
59 59
60#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \ 60#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
61 ((Acq) = __acpi_acquire_global_lock((unsigned int *) GLptr)) 61 ((Acq) = __acpi_acquire_global_lock(&facs->global_lock))
62 62
63#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \ 63#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
64 ((Acq) = __acpi_release_global_lock((unsigned int *) GLptr)) 64 ((Acq) = __acpi_release_global_lock(&facs->global_lock))
65 65
66/* 66/*
67 * Math helper asm macros 67 * Math helper asm macros
@@ -87,10 +87,10 @@ extern int acpi_strict;
87extern int acpi_disabled; 87extern int acpi_disabled;
88extern int acpi_pci_disabled; 88extern int acpi_pci_disabled;
89extern int acpi_ht; 89extern int acpi_ht;
90static inline void disable_acpi(void) 90static inline void disable_acpi(void)
91{ 91{
92 acpi_disabled = 1; 92 acpi_disabled = 1;
93 acpi_ht = 0; 93 acpi_ht = 0;
94 acpi_pci_disabled = 1; 94 acpi_pci_disabled = 1;
95 acpi_noirq = 1; 95 acpi_noirq = 1;
96} 96}
@@ -100,9 +100,9 @@ static inline void disable_acpi(void)
100 100
101extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); 101extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
102static inline void acpi_noirq_set(void) { acpi_noirq = 1; } 102static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
103static inline void acpi_disable_pci(void) 103static inline void acpi_disable_pci(void)
104{ 104{
105 acpi_pci_disabled = 1; 105 acpi_pci_disabled = 1;
106 acpi_noirq_set(); 106 acpi_noirq_set();
107} 107}
108extern int acpi_irq_balance_set(char *str); 108extern int acpi_irq_balance_set(char *str);
@@ -136,8 +136,6 @@ extern void acpi_reserve_bootmem(void);
136extern int acpi_disabled; 136extern int acpi_disabled;
137extern int acpi_pci_disabled; 137extern int acpi_pci_disabled;
138 138
139extern u8 x86_acpiid_to_apicid[];
140
141#define ARCH_HAS_POWER_INIT 1 139#define ARCH_HAS_POWER_INIT 1
142 140
143extern int acpi_skip_timer_override; 141extern int acpi_skip_timer_override;
diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h
index ba94ab3d2673..ab913ffcad56 100644
--- a/include/asm-x86_64/swiotlb.h
+++ b/include/asm-x86_64/swiotlb.h
@@ -1,6 +1,5 @@
1#ifndef _ASM_SWIOTLB_H 1#ifndef _ASM_SWIOTLB_H
2#define _ASM_SWTIOLB_H 1 2#define _ASM_SWIOTLB_H 1
3
4 3
5#include <asm/dma-mapping.h> 4#include <asm/dma-mapping.h>
6 5
@@ -45,6 +44,7 @@ extern void swiotlb_init(void);
45extern int swiotlb_force; 44extern int swiotlb_force;
46 45
47#ifdef CONFIG_SWIOTLB 46#ifdef CONFIG_SWIOTLB
47#define SWIOTLB_ARCH_NEED_ALLOC
48extern int swiotlb; 48extern int swiotlb;
49#else 49#else
50#define swiotlb 0 50#define swiotlb 0
@@ -52,4 +52,6 @@ extern int swiotlb;
52 52
53extern void pci_swiotlb_init(void); 53extern void pci_swiotlb_init(void);
54 54
55#endif /* _ASM_SWTIOLB_H */ 55static inline void dma_mark_clean(void *addr, size_t size) {}
56
57#endif /* _ASM_SWIOTLB_H */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 91f1f2363870..815f1fb4ce21 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -53,166 +53,6 @@ enum acpi_irq_model_id {
53 53
54extern enum acpi_irq_model_id acpi_irq_model; 54extern enum acpi_irq_model_id acpi_irq_model;
55 55
56
57/* Root System Description Pointer (RSDP) */
58
59struct acpi_table_rsdp {
60 char signature[8];
61 u8 checksum;
62 char oem_id[6];
63 u8 revision;
64 u32 rsdt_address;
65} __attribute__ ((packed));
66
67struct acpi20_table_rsdp {
68 char signature[8];
69 u8 checksum;
70 char oem_id[6];
71 u8 revision;
72 u32 rsdt_address;
73 u32 length;
74 u64 xsdt_address;
75 u8 ext_checksum;
76 u8 reserved[3];
77} __attribute__ ((packed));
78
79typedef struct {
80 u8 type;
81 u8 length;
82} __attribute__ ((packed)) acpi_table_entry_header;
83
84/* Root System Description Table (RSDT) */
85
86struct acpi_table_rsdt {
87 struct acpi_table_header header;
88 u32 entry[8];
89} __attribute__ ((packed));
90
91/* Extended System Description Table (XSDT) */
92
93struct acpi_table_xsdt {
94 struct acpi_table_header header;
95 u64 entry[1];
96} __attribute__ ((packed));
97
98/* Fixed ACPI Description Table (FADT) */
99
100struct acpi_table_fadt {
101 struct acpi_table_header header;
102 u32 facs_addr;
103 u32 dsdt_addr;
104 /* ... */
105} __attribute__ ((packed));
106
107/* Multiple APIC Description Table (MADT) */
108
109struct acpi_table_madt {
110 struct acpi_table_header header;
111 u32 lapic_address;
112 struct {
113 u32 pcat_compat:1;
114 u32 reserved:31;
115 } flags;
116} __attribute__ ((packed));
117
118enum acpi_madt_entry_id {
119 ACPI_MADT_LAPIC = 0,
120 ACPI_MADT_IOAPIC,
121 ACPI_MADT_INT_SRC_OVR,
122 ACPI_MADT_NMI_SRC,
123 ACPI_MADT_LAPIC_NMI,
124 ACPI_MADT_LAPIC_ADDR_OVR,
125 ACPI_MADT_IOSAPIC,
126 ACPI_MADT_LSAPIC,
127 ACPI_MADT_PLAT_INT_SRC,
128 ACPI_MADT_ENTRY_COUNT
129};
130
131typedef struct {
132 u16 polarity:2;
133 u16 trigger:2;
134 u16 reserved:12;
135} __attribute__ ((packed)) acpi_interrupt_flags;
136
137struct acpi_table_lapic {
138 acpi_table_entry_header header;
139 u8 acpi_id;
140 u8 id;
141 struct {
142 u32 enabled:1;
143 u32 reserved:31;
144 } flags;
145} __attribute__ ((packed));
146
147struct acpi_table_ioapic {
148 acpi_table_entry_header header;
149 u8 id;
150 u8 reserved;
151 u32 address;
152 u32 global_irq_base;
153} __attribute__ ((packed));
154
155struct acpi_table_int_src_ovr {
156 acpi_table_entry_header header;
157 u8 bus;
158 u8 bus_irq;
159 u32 global_irq;
160 acpi_interrupt_flags flags;
161} __attribute__ ((packed));
162
163struct acpi_table_nmi_src {
164 acpi_table_entry_header header;
165 acpi_interrupt_flags flags;
166 u32 global_irq;
167} __attribute__ ((packed));
168
169struct acpi_table_lapic_nmi {
170 acpi_table_entry_header header;
171 u8 acpi_id;
172 acpi_interrupt_flags flags;
173 u8 lint;
174} __attribute__ ((packed));
175
176struct acpi_table_lapic_addr_ovr {
177 acpi_table_entry_header header;
178 u8 reserved[2];
179 u64 address;
180} __attribute__ ((packed));
181
182struct acpi_table_iosapic {
183 acpi_table_entry_header header;
184 u8 id;
185 u8 reserved;
186 u32 global_irq_base;
187 u64 address;
188} __attribute__ ((packed));
189
190struct acpi_table_lsapic {
191 acpi_table_entry_header header;
192 u8 acpi_id;
193 u8 id;
194 u8 eid;
195 u8 reserved[3];
196 struct {
197 u32 enabled:1;
198 u32 reserved:31;
199 } flags;
200} __attribute__ ((packed));
201
202struct acpi_table_plat_int_src {
203 acpi_table_entry_header header;
204 acpi_interrupt_flags flags;
205 u8 type; /* See acpi_interrupt_type */
206 u8 id;
207 u8 eid;
208 u8 iosapic_vector;
209 u32 global_irq;
210 struct {
211 u32 cpei_override_flag:1;
212 u32 reserved:31;
213 } plint_flags;
214} __attribute__ ((packed));
215
216enum acpi_interrupt_id { 56enum acpi_interrupt_id {
217 ACPI_INTERRUPT_PMI = 1, 57 ACPI_INTERRUPT_PMI = 1,
218 ACPI_INTERRUPT_INIT, 58 ACPI_INTERRUPT_INIT,
@@ -222,89 +62,6 @@ enum acpi_interrupt_id {
222 62
223#define ACPI_SPACE_MEM 0 63#define ACPI_SPACE_MEM 0
224 64
225struct acpi_gen_regaddr {
226 u8 space_id;
227 u8 bit_width;
228 u8 bit_offset;
229 u8 resv;
230 u32 addrl;
231 u32 addrh;
232} __attribute__ ((packed));
233
234struct acpi_table_hpet {
235 struct acpi_table_header header;
236 u32 id;
237 struct acpi_gen_regaddr addr;
238 u8 number;
239 u16 min_tick;
240 u8 page_protect;
241} __attribute__ ((packed));
242
243/*
244 * Simple Boot Flags
245 * http://www.microsoft.com/whdc/hwdev/resources/specs/simp_bios.mspx
246 */
247struct acpi_table_sbf
248{
249 u8 sbf_signature[4];
250 u32 sbf_len;
251 u8 sbf_revision;
252 u8 sbf_csum;
253 u8 sbf_oemid[6];
254 u8 sbf_oemtable[8];
255 u8 sbf_revdata[4];
256 u8 sbf_creator[4];
257 u8 sbf_crearev[4];
258 u8 sbf_cmos;
259 u8 sbf_spare[3];
260} __attribute__ ((packed));
261
262/*
263 * System Resource Affinity Table (SRAT)
264 * http://www.microsoft.com/whdc/hwdev/platform/proc/SRAT.mspx
265 */
266
267struct acpi_table_srat {
268 struct acpi_table_header header;
269 u32 table_revision;
270 u64 reserved;
271} __attribute__ ((packed));
272
273enum acpi_srat_entry_id {
274 ACPI_SRAT_PROCESSOR_AFFINITY = 0,
275 ACPI_SRAT_MEMORY_AFFINITY,
276 ACPI_SRAT_ENTRY_COUNT
277};
278
279struct acpi_table_processor_affinity {
280 acpi_table_entry_header header;
281 u8 proximity_domain;
282 u8 apic_id;
283 struct {
284 u32 enabled:1;
285 u32 reserved:31;
286 } flags;
287 u8 lsapic_eid;
288 u8 reserved[7];
289} __attribute__ ((packed));
290
291struct acpi_table_memory_affinity {
292 acpi_table_entry_header header;
293 u8 proximity_domain;
294 u8 reserved1[5];
295 u32 base_addr_lo;
296 u32 base_addr_hi;
297 u32 length_lo;
298 u32 length_hi;
299 u32 memory_type; /* See acpi_address_range_id */
300 struct {
301 u32 enabled:1;
302 u32 hot_pluggable:1;
303 u32 reserved:30;
304 } flags;
305 u64 reserved2;
306} __attribute__ ((packed));
307
308enum acpi_address_range_id { 65enum acpi_address_range_id {
309 ACPI_ADDRESS_RANGE_MEMORY = 1, 66 ACPI_ADDRESS_RANGE_MEMORY = 1,
310 ACPI_ADDRESS_RANGE_RESERVED = 2, 67 ACPI_ADDRESS_RANGE_RESERVED = 2,
@@ -313,84 +70,12 @@ enum acpi_address_range_id {
313 ACPI_ADDRESS_RANGE_COUNT 70 ACPI_ADDRESS_RANGE_COUNT
314}; 71};
315 72
316/*
317 * System Locality Information Table (SLIT)
318 * see http://devresource.hp.com/devresource/docs/techpapers/ia64/slit.pdf
319 */
320
321struct acpi_table_slit {
322 struct acpi_table_header header;
323 u64 localities;
324 u8 entry[1]; /* real size = localities^2 */
325} __attribute__ ((packed));
326
327/* Smart Battery Description Table (SBST) */
328
329struct acpi_table_sbst {
330 struct acpi_table_header header;
331 u32 warning; /* Warn user */
332 u32 low; /* Critical sleep */
333 u32 critical; /* Critical shutdown */
334} __attribute__ ((packed));
335
336/* Embedded Controller Boot Resources Table (ECDT) */
337
338struct acpi_table_ecdt {
339 struct acpi_table_header header;
340 struct acpi_generic_address ec_control;
341 struct acpi_generic_address ec_data;
342 u32 uid;
343 u8 gpe_bit;
344 char ec_id[0];
345} __attribute__ ((packed));
346
347/* PCI MMCONFIG */
348
349/* Defined in PCI Firmware Specification 3.0 */
350struct acpi_table_mcfg_config {
351 u32 base_address;
352 u32 base_reserved;
353 u16 pci_segment_group_number;
354 u8 start_bus_number;
355 u8 end_bus_number;
356 u8 reserved[4];
357} __attribute__ ((packed));
358struct acpi_table_mcfg {
359 struct acpi_table_header header;
360 u8 reserved[8];
361 struct acpi_table_mcfg_config config[0];
362} __attribute__ ((packed));
363 73
364/* Table Handlers */ 74/* Table Handlers */
365 75
366enum acpi_table_id { 76typedef int (*acpi_table_handler) (struct acpi_table_header *table);
367 ACPI_TABLE_UNKNOWN = 0,
368 ACPI_APIC,
369 ACPI_BOOT,
370 ACPI_DBGP,
371 ACPI_DSDT,
372 ACPI_ECDT,
373 ACPI_ETDT,
374 ACPI_FADT,
375 ACPI_FACS,
376 ACPI_OEMX,
377 ACPI_PSDT,
378 ACPI_SBST,
379 ACPI_SLIT,
380 ACPI_SPCR,
381 ACPI_SRAT,
382 ACPI_SSDT,
383 ACPI_SPMI,
384 ACPI_HPET,
385 ACPI_MCFG,
386 ACPI_TABLE_COUNT
387};
388
389typedef int (*acpi_table_handler) (unsigned long phys_addr, unsigned long size);
390
391extern acpi_table_handler acpi_table_ops[ACPI_TABLE_COUNT];
392 77
393typedef int (*acpi_madt_entry_handler) (acpi_table_entry_header *header, const unsigned long end); 78typedef int (*acpi_madt_entry_handler) (struct acpi_subtable_header *header, const unsigned long end);
394 79
395char * __acpi_map_table (unsigned long phys_addr, unsigned long size); 80char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
396unsigned long acpi_find_rsdp (void); 81unsigned long acpi_find_rsdp (void);
@@ -399,14 +84,12 @@ int acpi_boot_table_init (void);
399int acpi_numa_init (void); 84int acpi_numa_init (void);
400 85
401int acpi_table_init (void); 86int acpi_table_init (void);
402int acpi_table_parse (enum acpi_table_id id, acpi_table_handler handler); 87int acpi_table_parse (char *id, acpi_table_handler handler);
403int acpi_get_table_header_early (enum acpi_table_id id, struct acpi_table_header **header); 88int acpi_table_parse_madt (enum acpi_madt_type id, acpi_madt_entry_handler handler, unsigned int max_entries);
404int acpi_table_parse_madt (enum acpi_madt_entry_id id, acpi_madt_entry_handler handler, unsigned int max_entries); 89int acpi_table_parse_srat (enum acpi_srat_type id, acpi_madt_entry_handler handler, unsigned int max_entries);
405int acpi_table_parse_srat (enum acpi_srat_entry_id id, acpi_madt_entry_handler handler, unsigned int max_entries); 90int acpi_parse_mcfg (struct acpi_table_header *header);
406int acpi_parse_mcfg (unsigned long phys_addr, unsigned long size); 91void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
407void acpi_table_print (struct acpi_table_header *header, unsigned long phys_addr); 92void acpi_table_print_srat_entry (struct acpi_subtable_header *srat);
408void acpi_table_print_madt_entry (acpi_table_entry_header *madt);
409void acpi_table_print_srat_entry (acpi_table_entry_header *srat);
410 93
411/* the following four functions are architecture-dependent */ 94/* the following four functions are architecture-dependent */
412#ifdef CONFIG_HAVE_ARCH_PARSE_SRAT 95#ifdef CONFIG_HAVE_ARCH_PARSE_SRAT
@@ -417,8 +100,8 @@ void acpi_table_print_srat_entry (acpi_table_entry_header *srat);
417#define acpi_numa_arch_fixup() do {} while (0) 100#define acpi_numa_arch_fixup() do {} while (0)
418#else 101#else
419void acpi_numa_slit_init (struct acpi_table_slit *slit); 102void acpi_numa_slit_init (struct acpi_table_slit *slit);
420void acpi_numa_processor_affinity_init (struct acpi_table_processor_affinity *pa); 103void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
421void acpi_numa_memory_affinity_init (struct acpi_table_memory_affinity *ma); 104void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
422void acpi_numa_arch_fixup(void); 105void acpi_numa_arch_fixup(void);
423#endif 106#endif
424 107
@@ -433,7 +116,7 @@ int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base);
433 116
434extern int acpi_mp_config; 117extern int acpi_mp_config;
435 118
436extern struct acpi_table_mcfg_config *pci_mmcfg_config; 119extern struct acpi_mcfg_allocation *pci_mmcfg_config;
437extern int pci_mmcfg_config_num; 120extern int pci_mmcfg_config_num;
438 121
439extern int sbf_port; 122extern int sbf_port;
diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h
index 8e4dbb51fc70..50d568ec178a 100644
--- a/include/linux/hid-debug.h
+++ b/include/linux/hid-debug.h
@@ -1,10 +1,8 @@
1#ifndef __HID_DEBUG_H
2#define __HID_DEBUG_H
3
1/* 4/*
2 * $Id: hid-debug.h,v 1.8 2001/09/25 09:37:57 vojtech Exp $ 5 * Copyright (c) 2007 Jiri Kosina
3 *
4 * (c) 1999 Andreas Gal <gal@cs.uni-magdeburg.de>
5 * (c) 2000-2001 Vojtech Pavlik <vojtech@ucw.cz>
6 *
7 * Some debug stuff for the HID parser.
8 */ 6 */
9 7
10/* 8/*
@@ -22,737 +20,26 @@
22 * along with this program; if not, write to the Free Software 20 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 * 22 *
25 * Should you need to contact me, the author, you can do so either by
26 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
27 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
28 */ 23 */
29 24
30#include <linux/input.h> 25#ifdef CONFIG_HID_DEBUG
31
32struct hid_usage_entry {
33 unsigned page;
34 unsigned usage;
35 char *description;
36};
37
38static const struct hid_usage_entry hid_usage_table[] = {
39 { 0, 0, "Undefined" },
40 { 1, 0, "GenericDesktop" },
41 {0, 0x01, "Pointer"},
42 {0, 0x02, "Mouse"},
43 {0, 0x04, "Joystick"},
44 {0, 0x05, "GamePad"},
45 {0, 0x06, "Keyboard"},
46 {0, 0x07, "Keypad"},
47 {0, 0x08, "MultiAxis"},
48 {0, 0x30, "X"},
49 {0, 0x31, "Y"},
50 {0, 0x32, "Z"},
51 {0, 0x33, "Rx"},
52 {0, 0x34, "Ry"},
53 {0, 0x35, "Rz"},
54 {0, 0x36, "Slider"},
55 {0, 0x37, "Dial"},
56 {0, 0x38, "Wheel"},
57 {0, 0x39, "HatSwitch"},
58 {0, 0x3a, "CountedBuffer"},
59 {0, 0x3b, "ByteCount"},
60 {0, 0x3c, "MotionWakeup"},
61 {0, 0x3d, "Start"},
62 {0, 0x3e, "Select"},
63 {0, 0x40, "Vx"},
64 {0, 0x41, "Vy"},
65 {0, 0x42, "Vz"},
66 {0, 0x43, "Vbrx"},
67 {0, 0x44, "Vbry"},
68 {0, 0x45, "Vbrz"},
69 {0, 0x46, "Vno"},
70 {0, 0x80, "SystemControl"},
71 {0, 0x81, "SystemPowerDown"},
72 {0, 0x82, "SystemSleep"},
73 {0, 0x83, "SystemWakeUp"},
74 {0, 0x84, "SystemContextMenu"},
75 {0, 0x85, "SystemMainMenu"},
76 {0, 0x86, "SystemAppMenu"},
77 {0, 0x87, "SystemMenuHelp"},
78 {0, 0x88, "SystemMenuExit"},
79 {0, 0x89, "SystemMenuSelect"},
80 {0, 0x8a, "SystemMenuRight"},
81 {0, 0x8b, "SystemMenuLeft"},
82 {0, 0x8c, "SystemMenuUp"},
83 {0, 0x8d, "SystemMenuDown"},
84 {0, 0x90, "D-PadUp"},
85 {0, 0x91, "D-PadDown"},
86 {0, 0x92, "D-PadRight"},
87 {0, 0x93, "D-PadLeft"},
88 { 2, 0, "Simulation" },
89 {0, 0xb0, "Aileron"},
90 {0, 0xb1, "AileronTrim"},
91 {0, 0xb2, "Anti-Torque"},
92 {0, 0xb3, "Autopilot"},
93 {0, 0xb4, "Chaff"},
94 {0, 0xb5, "Collective"},
95 {0, 0xb6, "DiveBrake"},
96 {0, 0xb7, "ElectronicCountermeasures"},
97 {0, 0xb8, "Elevator"},
98 {0, 0xb9, "ElevatorTrim"},
99 {0, 0xba, "Rudder"},
100 {0, 0xbb, "Throttle"},
101 {0, 0xbc, "FlightCommunications"},
102 {0, 0xbd, "FlareRelease"},
103 {0, 0xbe, "LandingGear"},
104 {0, 0xbf, "ToeBrake"},
105 { 7, 0, "Keyboard" },
106 { 8, 0, "LED" },
107 {0, 0x01, "NumLock"},
108 {0, 0x02, "CapsLock"},
109 {0, 0x03, "ScrollLock"},
110 {0, 0x04, "Compose"},
111 {0, 0x05, "Kana"},
112 {0, 0x4b, "GenericIndicator"},
113 { 9, 0, "Button" },
114 { 10, 0, "Ordinal" },
115 { 12, 0, "Consumer" },
116 {0, 0x238, "HorizontalWheel"},
117 { 13, 0, "Digitizers" },
118 {0, 0x01, "Digitizer"},
119 {0, 0x02, "Pen"},
120 {0, 0x03, "LightPen"},
121 {0, 0x04, "TouchScreen"},
122 {0, 0x05, "TouchPad"},
123 {0, 0x20, "Stylus"},
124 {0, 0x21, "Puck"},
125 {0, 0x22, "Finger"},
126 {0, 0x30, "TipPressure"},
127 {0, 0x31, "BarrelPressure"},
128 {0, 0x32, "InRange"},
129 {0, 0x33, "Touch"},
130 {0, 0x34, "UnTouch"},
131 {0, 0x35, "Tap"},
132 {0, 0x39, "TabletFunctionKey"},
133 {0, 0x3a, "ProgramChangeKey"},
134 {0, 0x3c, "Invert"},
135 {0, 0x42, "TipSwitch"},
136 {0, 0x43, "SecondaryTipSwitch"},
137 {0, 0x44, "BarrelSwitch"},
138 {0, 0x45, "Eraser"},
139 {0, 0x46, "TabletPick"},
140 { 15, 0, "PhysicalInterfaceDevice" },
141 {0, 0x00, "Undefined"},
142 {0, 0x01, "Physical_Interface_Device"},
143 {0, 0x20, "Normal"},
144 {0, 0x21, "Set_Effect_Report"},
145 {0, 0x22, "Effect_Block_Index"},
146 {0, 0x23, "Parameter_Block_Offset"},
147 {0, 0x24, "ROM_Flag"},
148 {0, 0x25, "Effect_Type"},
149 {0, 0x26, "ET_Constant_Force"},
150 {0, 0x27, "ET_Ramp"},
151 {0, 0x28, "ET_Custom_Force_Data"},
152 {0, 0x30, "ET_Square"},
153 {0, 0x31, "ET_Sine"},
154 {0, 0x32, "ET_Triangle"},
155 {0, 0x33, "ET_Sawtooth_Up"},
156 {0, 0x34, "ET_Sawtooth_Down"},
157 {0, 0x40, "ET_Spring"},
158 {0, 0x41, "ET_Damper"},
159 {0, 0x42, "ET_Inertia"},
160 {0, 0x43, "ET_Friction"},
161 {0, 0x50, "Duration"},
162 {0, 0x51, "Sample_Period"},
163 {0, 0x52, "Gain"},
164 {0, 0x53, "Trigger_Button"},
165 {0, 0x54, "Trigger_Repeat_Interval"},
166 {0, 0x55, "Axes_Enable"},
167 {0, 0x56, "Direction_Enable"},
168 {0, 0x57, "Direction"},
169 {0, 0x58, "Type_Specific_Block_Offset"},
170 {0, 0x59, "Block_Type"},
171 {0, 0x5A, "Set_Envelope_Report"},
172 {0, 0x5B, "Attack_Level"},
173 {0, 0x5C, "Attack_Time"},
174 {0, 0x5D, "Fade_Level"},
175 {0, 0x5E, "Fade_Time"},
176 {0, 0x5F, "Set_Condition_Report"},
177 {0, 0x60, "CP_Offset"},
178 {0, 0x61, "Positive_Coefficient"},
179 {0, 0x62, "Negative_Coefficient"},
180 {0, 0x63, "Positive_Saturation"},
181 {0, 0x64, "Negative_Saturation"},
182 {0, 0x65, "Dead_Band"},
183 {0, 0x66, "Download_Force_Sample"},
184 {0, 0x67, "Isoch_Custom_Force_Enable"},
185 {0, 0x68, "Custom_Force_Data_Report"},
186 {0, 0x69, "Custom_Force_Data"},
187 {0, 0x6A, "Custom_Force_Vendor_Defined_Data"},
188 {0, 0x6B, "Set_Custom_Force_Report"},
189 {0, 0x6C, "Custom_Force_Data_Offset"},
190 {0, 0x6D, "Sample_Count"},
191 {0, 0x6E, "Set_Periodic_Report"},
192 {0, 0x6F, "Offset"},
193 {0, 0x70, "Magnitude"},
194 {0, 0x71, "Phase"},
195 {0, 0x72, "Period"},
196 {0, 0x73, "Set_Constant_Force_Report"},
197 {0, 0x74, "Set_Ramp_Force_Report"},
198 {0, 0x75, "Ramp_Start"},
199 {0, 0x76, "Ramp_End"},
200 {0, 0x77, "Effect_Operation_Report"},
201 {0, 0x78, "Effect_Operation"},
202 {0, 0x79, "Op_Effect_Start"},
203 {0, 0x7A, "Op_Effect_Start_Solo"},
204 {0, 0x7B, "Op_Effect_Stop"},
205 {0, 0x7C, "Loop_Count"},
206 {0, 0x7D, "Device_Gain_Report"},
207 {0, 0x7E, "Device_Gain"},
208 {0, 0x7F, "PID_Pool_Report"},
209 {0, 0x80, "RAM_Pool_Size"},
210 {0, 0x81, "ROM_Pool_Size"},
211 {0, 0x82, "ROM_Effect_Block_Count"},
212 {0, 0x83, "Simultaneous_Effects_Max"},
213 {0, 0x84, "Pool_Alignment"},
214 {0, 0x85, "PID_Pool_Move_Report"},
215 {0, 0x86, "Move_Source"},
216 {0, 0x87, "Move_Destination"},
217 {0, 0x88, "Move_Length"},
218 {0, 0x89, "PID_Block_Load_Report"},
219 {0, 0x8B, "Block_Load_Status"},
220 {0, 0x8C, "Block_Load_Success"},
221 {0, 0x8D, "Block_Load_Full"},
222 {0, 0x8E, "Block_Load_Error"},
223 {0, 0x8F, "Block_Handle"},
224 {0, 0x90, "PID_Block_Free_Report"},
225 {0, 0x91, "Type_Specific_Block_Handle"},
226 {0, 0x92, "PID_State_Report"},
227 {0, 0x94, "Effect_Playing"},
228 {0, 0x95, "PID_Device_Control_Report"},
229 {0, 0x96, "PID_Device_Control"},
230 {0, 0x97, "DC_Enable_Actuators"},
231 {0, 0x98, "DC_Disable_Actuators"},
232 {0, 0x99, "DC_Stop_All_Effects"},
233 {0, 0x9A, "DC_Device_Reset"},
234 {0, 0x9B, "DC_Device_Pause"},
235 {0, 0x9C, "DC_Device_Continue"},
236 {0, 0x9F, "Device_Paused"},
237 {0, 0xA0, "Actuators_Enabled"},
238 {0, 0xA4, "Safety_Switch"},
239 {0, 0xA5, "Actuator_Override_Switch"},
240 {0, 0xA6, "Actuator_Power"},
241 {0, 0xA7, "Start_Delay"},
242 {0, 0xA8, "Parameter_Block_Size"},
243 {0, 0xA9, "Device_Managed_Pool"},
244 {0, 0xAA, "Shared_Parameter_Blocks"},
245 {0, 0xAB, "Create_New_Effect_Report"},
246 {0, 0xAC, "RAM_Pool_Available"},
247 { 0x84, 0, "Power Device" },
248 { 0x84, 0x02, "PresentStatus" },
249 { 0x84, 0x03, "ChangeStatus" },
250 { 0x84, 0x04, "UPS" },
251 { 0x84, 0x05, "PowerSupply" },
252 { 0x84, 0x10, "BatterySystem" },
253 { 0x84, 0x11, "BatterySystemID" },
254 { 0x84, 0x12, "Battery" },
255 { 0x84, 0x13, "BatteryID" },
256 { 0x84, 0x14, "Charger" },
257 { 0x84, 0x15, "ChargerID" },
258 { 0x84, 0x16, "PowerConverter" },
259 { 0x84, 0x17, "PowerConverterID" },
260 { 0x84, 0x18, "OutletSystem" },
261 { 0x84, 0x19, "OutletSystemID" },
262 { 0x84, 0x1a, "Input" },
263 { 0x84, 0x1b, "InputID" },
264 { 0x84, 0x1c, "Output" },
265 { 0x84, 0x1d, "OutputID" },
266 { 0x84, 0x1e, "Flow" },
267 { 0x84, 0x1f, "FlowID" },
268 { 0x84, 0x20, "Outlet" },
269 { 0x84, 0x21, "OutletID" },
270 { 0x84, 0x22, "Gang" },
271 { 0x84, 0x24, "PowerSummary" },
272 { 0x84, 0x25, "PowerSummaryID" },
273 { 0x84, 0x30, "Voltage" },
274 { 0x84, 0x31, "Current" },
275 { 0x84, 0x32, "Frequency" },
276 { 0x84, 0x33, "ApparentPower" },
277 { 0x84, 0x35, "PercentLoad" },
278 { 0x84, 0x40, "ConfigVoltage" },
279 { 0x84, 0x41, "ConfigCurrent" },
280 { 0x84, 0x43, "ConfigApparentPower" },
281 { 0x84, 0x53, "LowVoltageTransfer" },
282 { 0x84, 0x54, "HighVoltageTransfer" },
283 { 0x84, 0x56, "DelayBeforeStartup" },
284 { 0x84, 0x57, "DelayBeforeShutdown" },
285 { 0x84, 0x58, "Test" },
286 { 0x84, 0x5a, "AudibleAlarmControl" },
287 { 0x84, 0x60, "Present" },
288 { 0x84, 0x61, "Good" },
289 { 0x84, 0x62, "InternalFailure" },
290 { 0x84, 0x65, "Overload" },
291 { 0x84, 0x66, "OverCharged" },
292 { 0x84, 0x67, "OverTemperature" },
293 { 0x84, 0x68, "ShutdownRequested" },
294 { 0x84, 0x69, "ShutdownImminent" },
295 { 0x84, 0x6b, "SwitchOn/Off" },
296 { 0x84, 0x6c, "Switchable" },
297 { 0x84, 0x6d, "Used" },
298 { 0x84, 0x6e, "Boost" },
299 { 0x84, 0x73, "CommunicationLost" },
300 { 0x84, 0xfd, "iManufacturer" },
301 { 0x84, 0xfe, "iProduct" },
302 { 0x84, 0xff, "iSerialNumber" },
303 { 0x85, 0, "Battery System" },
304 { 0x85, 0x01, "SMBBatteryMode" },
305 { 0x85, 0x02, "SMBBatteryStatus" },
306 { 0x85, 0x03, "SMBAlarmWarning" },
307 { 0x85, 0x04, "SMBChargerMode" },
308 { 0x85, 0x05, "SMBChargerStatus" },
309 { 0x85, 0x06, "SMBChargerSpecInfo" },
310 { 0x85, 0x07, "SMBSelectorState" },
311 { 0x85, 0x08, "SMBSelectorPresets" },
312 { 0x85, 0x09, "SMBSelectorInfo" },
313 { 0x85, 0x29, "RemainingCapacityLimit" },
314 { 0x85, 0x2c, "CapacityMode" },
315 { 0x85, 0x42, "BelowRemainingCapacityLimit" },
316 { 0x85, 0x44, "Charging" },
317 { 0x85, 0x45, "Discharging" },
318 { 0x85, 0x4b, "NeedReplacement" },
319 { 0x85, 0x66, "RemainingCapacity" },
320 { 0x85, 0x68, "RunTimeToEmpty" },
321 { 0x85, 0x6a, "AverageTimeToFull" },
322 { 0x85, 0x83, "DesignCapacity" },
323 { 0x85, 0x85, "ManufacturerDate" },
324 { 0x85, 0x89, "iDeviceChemistry" },
325 { 0x85, 0x8b, "Rechargable" },
326 { 0x85, 0x8f, "iOEMInformation" },
327 { 0x85, 0x8d, "CapacityGranularity1" },
328 { 0x85, 0xd0, "ACPresent" },
329 /* pages 0xff00 to 0xffff are vendor-specific */
330 { 0xffff, 0, "Vendor-specific-FF" },
331 { 0, 0, NULL }
332};
333
334static void resolv_usage_page(unsigned page) {
335 const struct hid_usage_entry *p;
336
337 for (p = hid_usage_table; p->description; p++)
338 if (p->page == page) {
339 printk("%s", p->description);
340 return;
341 }
342 printk("%04x", page);
343}
344
345static void resolv_usage(unsigned usage) {
346 const struct hid_usage_entry *p;
347
348 resolv_usage_page(usage >> 16);
349 printk(".");
350 for (p = hid_usage_table; p->description; p++)
351 if (p->page == (usage >> 16)) {
352 for(++p; p->description && p->usage != 0; p++)
353 if (p->usage == (usage & 0xffff)) {
354 printk("%s", p->description);
355 return;
356 }
357 break;
358 }
359 printk("%04x", usage & 0xffff);
360}
361
362__inline__ static void tab(int n) {
363 while (n--) printk(" ");
364}
365
366static void hid_dump_field(struct hid_field *field, int n) {
367 int j;
368
369 if (field->physical) {
370 tab(n);
371 printk("Physical(");
372 resolv_usage(field->physical); printk(")\n");
373 }
374 if (field->logical) {
375 tab(n);
376 printk("Logical(");
377 resolv_usage(field->logical); printk(")\n");
378 }
379 tab(n); printk("Usage(%d)\n", field->maxusage);
380 for (j = 0; j < field->maxusage; j++) {
381 tab(n+2);resolv_usage(field->usage[j].hid); printk("\n");
382 }
383 if (field->logical_minimum != field->logical_maximum) {
384 tab(n); printk("Logical Minimum(%d)\n", field->logical_minimum);
385 tab(n); printk("Logical Maximum(%d)\n", field->logical_maximum);
386 }
387 if (field->physical_minimum != field->physical_maximum) {
388 tab(n); printk("Physical Minimum(%d)\n", field->physical_minimum);
389 tab(n); printk("Physical Maximum(%d)\n", field->physical_maximum);
390 }
391 if (field->unit_exponent) {
392 tab(n); printk("Unit Exponent(%d)\n", field->unit_exponent);
393 }
394 if (field->unit) {
395 char *systems[5] = { "None", "SI Linear", "SI Rotation", "English Linear", "English Rotation" };
396 char *units[5][8] = {
397 { "None", "None", "None", "None", "None", "None", "None", "None" },
398 { "None", "Centimeter", "Gram", "Seconds", "Kelvin", "Ampere", "Candela", "None" },
399 { "None", "Radians", "Gram", "Seconds", "Kelvin", "Ampere", "Candela", "None" },
400 { "None", "Inch", "Slug", "Seconds", "Fahrenheit", "Ampere", "Candela", "None" },
401 { "None", "Degrees", "Slug", "Seconds", "Fahrenheit", "Ampere", "Candela", "None" }
402 };
403
404 int i;
405 int sys;
406 __u32 data = field->unit;
407
408 /* First nibble tells us which system we're in. */
409 sys = data & 0xf;
410 data >>= 4;
411
412 if(sys > 4) {
413 tab(n); printk("Unit(Invalid)\n");
414 }
415 else {
416 int earlier_unit = 0;
417
418 tab(n); printk("Unit(%s : ", systems[sys]);
419
420 for (i=1 ; i<sizeof(__u32)*2 ; i++) {
421 char nibble = data & 0xf;
422 data >>= 4;
423 if (nibble != 0) {
424 if(earlier_unit++ > 0)
425 printk("*");
426 printk("%s", units[sys][i]);
427 if(nibble != 1) {
428 /* This is a _signed_ nibble(!) */
429
430 int val = nibble & 0x7;
431 if(nibble & 0x08)
432 val = -((0x7 & ~val) +1);
433 printk("^%d", val);
434 }
435 }
436 }
437 printk(")\n");
438 }
439 }
440 tab(n); printk("Report Size(%u)\n", field->report_size);
441 tab(n); printk("Report Count(%u)\n", field->report_count);
442 tab(n); printk("Report Offset(%u)\n", field->report_offset);
443
444 tab(n); printk("Flags( ");
445 j = field->flags;
446 printk("%s", HID_MAIN_ITEM_CONSTANT & j ? "Constant " : "");
447 printk("%s", HID_MAIN_ITEM_VARIABLE & j ? "Variable " : "Array ");
448 printk("%s", HID_MAIN_ITEM_RELATIVE & j ? "Relative " : "Absolute ");
449 printk("%s", HID_MAIN_ITEM_WRAP & j ? "Wrap " : "");
450 printk("%s", HID_MAIN_ITEM_NONLINEAR & j ? "NonLinear " : "");
451 printk("%s", HID_MAIN_ITEM_NO_PREFERRED & j ? "NoPrefferedState " : "");
452 printk("%s", HID_MAIN_ITEM_NULL_STATE & j ? "NullState " : "");
453 printk("%s", HID_MAIN_ITEM_VOLATILE & j ? "Volatile " : "");
454 printk("%s", HID_MAIN_ITEM_BUFFERED_BYTE & j ? "BufferedByte " : "");
455 printk(")\n");
456}
457
458static void __attribute__((unused)) hid_dump_device(struct hid_device *device) {
459 struct hid_report_enum *report_enum;
460 struct hid_report *report;
461 struct list_head *list;
462 unsigned i,k;
463 static char *table[] = {"INPUT", "OUTPUT", "FEATURE"};
464
465 for (i = 0; i < HID_REPORT_TYPES; i++) {
466 report_enum = device->report_enum + i;
467 list = report_enum->report_list.next;
468 while (list != &report_enum->report_list) {
469 report = (struct hid_report *) list;
470 tab(2);
471 printk("%s", table[i]);
472 if (report->id)
473 printk("(%d)", report->id);
474 printk("[%s]", table[report->type]);
475 printk("\n");
476 for (k = 0; k < report->maxfield; k++) {
477 tab(4);
478 printk("Field(%d)\n", k);
479 hid_dump_field(report->field[k], 6);
480 }
481 list = list->next;
482 }
483 }
484}
485
486static void __attribute__((unused)) hid_dump_input(struct hid_usage *usage, __s32 value) {
487 printk("hid-debug: input ");
488 resolv_usage(usage->hid);
489 printk(" = %d\n", value);
490}
491
492
493static char *events[EV_MAX + 1] = {
494 [EV_SYN] = "Sync", [EV_KEY] = "Key",
495 [EV_REL] = "Relative", [EV_ABS] = "Absolute",
496 [EV_MSC] = "Misc", [EV_LED] = "LED",
497 [EV_SND] = "Sound", [EV_REP] = "Repeat",
498 [EV_FF] = "ForceFeedback", [EV_PWR] = "Power",
499 [EV_FF_STATUS] = "ForceFeedbackStatus",
500};
501
502static char *syncs[2] = {
503 [SYN_REPORT] = "Report", [SYN_CONFIG] = "Config",
504};
505static char *keys[KEY_MAX + 1] = {
506 [KEY_RESERVED] = "Reserved", [KEY_ESC] = "Esc",
507 [KEY_1] = "1", [KEY_2] = "2",
508 [KEY_3] = "3", [KEY_4] = "4",
509 [KEY_5] = "5", [KEY_6] = "6",
510 [KEY_7] = "7", [KEY_8] = "8",
511 [KEY_9] = "9", [KEY_0] = "0",
512 [KEY_MINUS] = "Minus", [KEY_EQUAL] = "Equal",
513 [KEY_BACKSPACE] = "Backspace", [KEY_TAB] = "Tab",
514 [KEY_Q] = "Q", [KEY_W] = "W",
515 [KEY_E] = "E", [KEY_R] = "R",
516 [KEY_T] = "T", [KEY_Y] = "Y",
517 [KEY_U] = "U", [KEY_I] = "I",
518 [KEY_O] = "O", [KEY_P] = "P",
519 [KEY_LEFTBRACE] = "LeftBrace", [KEY_RIGHTBRACE] = "RightBrace",
520 [KEY_ENTER] = "Enter", [KEY_LEFTCTRL] = "LeftControl",
521 [KEY_A] = "A", [KEY_S] = "S",
522 [KEY_D] = "D", [KEY_F] = "F",
523 [KEY_G] = "G", [KEY_H] = "H",
524 [KEY_J] = "J", [KEY_K] = "K",
525 [KEY_L] = "L", [KEY_SEMICOLON] = "Semicolon",
526 [KEY_APOSTROPHE] = "Apostrophe", [KEY_GRAVE] = "Grave",
527 [KEY_LEFTSHIFT] = "LeftShift", [KEY_BACKSLASH] = "BackSlash",
528 [KEY_Z] = "Z", [KEY_X] = "X",
529 [KEY_C] = "C", [KEY_V] = "V",
530 [KEY_B] = "B", [KEY_N] = "N",
531 [KEY_M] = "M", [KEY_COMMA] = "Comma",
532 [KEY_DOT] = "Dot", [KEY_SLASH] = "Slash",
533 [KEY_RIGHTSHIFT] = "RightShift", [KEY_KPASTERISK] = "KPAsterisk",
534 [KEY_LEFTALT] = "LeftAlt", [KEY_SPACE] = "Space",
535 [KEY_CAPSLOCK] = "CapsLock", [KEY_F1] = "F1",
536 [KEY_F2] = "F2", [KEY_F3] = "F3",
537 [KEY_F4] = "F4", [KEY_F5] = "F5",
538 [KEY_F6] = "F6", [KEY_F7] = "F7",
539 [KEY_F8] = "F8", [KEY_F9] = "F9",
540 [KEY_F10] = "F10", [KEY_NUMLOCK] = "NumLock",
541 [KEY_SCROLLLOCK] = "ScrollLock", [KEY_KP7] = "KP7",
542 [KEY_KP8] = "KP8", [KEY_KP9] = "KP9",
543 [KEY_KPMINUS] = "KPMinus", [KEY_KP4] = "KP4",
544 [KEY_KP5] = "KP5", [KEY_KP6] = "KP6",
545 [KEY_KPPLUS] = "KPPlus", [KEY_KP1] = "KP1",
546 [KEY_KP2] = "KP2", [KEY_KP3] = "KP3",
547 [KEY_KP0] = "KP0", [KEY_KPDOT] = "KPDot",
548 [KEY_ZENKAKUHANKAKU] = "Zenkaku/Hankaku", [KEY_102ND] = "102nd",
549 [KEY_F11] = "F11", [KEY_F12] = "F12",
550 [KEY_RO] = "RO", [KEY_KATAKANA] = "Katakana",
551 [KEY_HIRAGANA] = "HIRAGANA", [KEY_HENKAN] = "Henkan",
552 [KEY_KATAKANAHIRAGANA] = "Katakana/Hiragana", [KEY_MUHENKAN] = "Muhenkan",
553 [KEY_KPJPCOMMA] = "KPJpComma", [KEY_KPENTER] = "KPEnter",
554 [KEY_RIGHTCTRL] = "RightCtrl", [KEY_KPSLASH] = "KPSlash",
555 [KEY_SYSRQ] = "SysRq", [KEY_RIGHTALT] = "RightAlt",
556 [KEY_LINEFEED] = "LineFeed", [KEY_HOME] = "Home",
557 [KEY_UP] = "Up", [KEY_PAGEUP] = "PageUp",
558 [KEY_LEFT] = "Left", [KEY_RIGHT] = "Right",
559 [KEY_END] = "End", [KEY_DOWN] = "Down",
560 [KEY_PAGEDOWN] = "PageDown", [KEY_INSERT] = "Insert",
561 [KEY_DELETE] = "Delete", [KEY_MACRO] = "Macro",
562 [KEY_MUTE] = "Mute", [KEY_VOLUMEDOWN] = "VolumeDown",
563 [KEY_VOLUMEUP] = "VolumeUp", [KEY_POWER] = "Power",
564 [KEY_KPEQUAL] = "KPEqual", [KEY_KPPLUSMINUS] = "KPPlusMinus",
565 [KEY_PAUSE] = "Pause", [KEY_KPCOMMA] = "KPComma",
566 [KEY_HANGUEL] = "Hangeul", [KEY_HANJA] = "Hanja",
567 [KEY_YEN] = "Yen", [KEY_LEFTMETA] = "LeftMeta",
568 [KEY_RIGHTMETA] = "RightMeta", [KEY_COMPOSE] = "Compose",
569 [KEY_STOP] = "Stop", [KEY_AGAIN] = "Again",
570 [KEY_PROPS] = "Props", [KEY_UNDO] = "Undo",
571 [KEY_FRONT] = "Front", [KEY_COPY] = "Copy",
572 [KEY_OPEN] = "Open", [KEY_PASTE] = "Paste",
573 [KEY_FIND] = "Find", [KEY_CUT] = "Cut",
574 [KEY_HELP] = "Help", [KEY_MENU] = "Menu",
575 [KEY_CALC] = "Calc", [KEY_SETUP] = "Setup",
576 [KEY_SLEEP] = "Sleep", [KEY_WAKEUP] = "WakeUp",
577 [KEY_FILE] = "File", [KEY_SENDFILE] = "SendFile",
578 [KEY_DELETEFILE] = "DeleteFile", [KEY_XFER] = "X-fer",
579 [KEY_PROG1] = "Prog1", [KEY_PROG2] = "Prog2",
580 [KEY_WWW] = "WWW", [KEY_MSDOS] = "MSDOS",
581 [KEY_COFFEE] = "Coffee", [KEY_DIRECTION] = "Direction",
582 [KEY_CYCLEWINDOWS] = "CycleWindows", [KEY_MAIL] = "Mail",
583 [KEY_BOOKMARKS] = "Bookmarks", [KEY_COMPUTER] = "Computer",
584 [KEY_BACK] = "Back", [KEY_FORWARD] = "Forward",
585 [KEY_CLOSECD] = "CloseCD", [KEY_EJECTCD] = "EjectCD",
586 [KEY_EJECTCLOSECD] = "EjectCloseCD", [KEY_NEXTSONG] = "NextSong",
587 [KEY_PLAYPAUSE] = "PlayPause", [KEY_PREVIOUSSONG] = "PreviousSong",
588 [KEY_STOPCD] = "StopCD", [KEY_RECORD] = "Record",
589 [KEY_REWIND] = "Rewind", [KEY_PHONE] = "Phone",
590 [KEY_ISO] = "ISOKey", [KEY_CONFIG] = "Config",
591 [KEY_HOMEPAGE] = "HomePage", [KEY_REFRESH] = "Refresh",
592 [KEY_EXIT] = "Exit", [KEY_MOVE] = "Move",
593 [KEY_EDIT] = "Edit", [KEY_SCROLLUP] = "ScrollUp",
594 [KEY_SCROLLDOWN] = "ScrollDown", [KEY_KPLEFTPAREN] = "KPLeftParenthesis",
595 [KEY_KPRIGHTPAREN] = "KPRightParenthesis", [KEY_NEW] = "New",
596 [KEY_REDO] = "Redo", [KEY_F13] = "F13",
597 [KEY_F14] = "F14", [KEY_F15] = "F15",
598 [KEY_F16] = "F16", [KEY_F17] = "F17",
599 [KEY_F18] = "F18", [KEY_F19] = "F19",
600 [KEY_F20] = "F20", [KEY_F21] = "F21",
601 [KEY_F22] = "F22", [KEY_F23] = "F23",
602 [KEY_F24] = "F24", [KEY_PLAYCD] = "PlayCD",
603 [KEY_PAUSECD] = "PauseCD", [KEY_PROG3] = "Prog3",
604 [KEY_PROG4] = "Prog4", [KEY_SUSPEND] = "Suspend",
605 [KEY_CLOSE] = "Close", [KEY_PLAY] = "Play",
606 [KEY_FASTFORWARD] = "FastForward", [KEY_BASSBOOST] = "BassBoost",
607 [KEY_PRINT] = "Print", [KEY_HP] = "HP",
608 [KEY_CAMERA] = "Camera", [KEY_SOUND] = "Sound",
609 [KEY_QUESTION] = "Question", [KEY_EMAIL] = "Email",
610 [KEY_CHAT] = "Chat", [KEY_SEARCH] = "Search",
611 [KEY_CONNECT] = "Connect", [KEY_FINANCE] = "Finance",
612 [KEY_SPORT] = "Sport", [KEY_SHOP] = "Shop",
613 [KEY_ALTERASE] = "AlternateErase", [KEY_CANCEL] = "Cancel",
614 [KEY_BRIGHTNESSDOWN] = "BrightnessDown", [KEY_BRIGHTNESSUP] = "BrightnessUp",
615 [KEY_MEDIA] = "Media", [KEY_UNKNOWN] = "Unknown",
616 [BTN_0] = "Btn0", [BTN_1] = "Btn1",
617 [BTN_2] = "Btn2", [BTN_3] = "Btn3",
618 [BTN_4] = "Btn4", [BTN_5] = "Btn5",
619 [BTN_6] = "Btn6", [BTN_7] = "Btn7",
620 [BTN_8] = "Btn8", [BTN_9] = "Btn9",
621 [BTN_LEFT] = "LeftBtn", [BTN_RIGHT] = "RightBtn",
622 [BTN_MIDDLE] = "MiddleBtn", [BTN_SIDE] = "SideBtn",
623 [BTN_EXTRA] = "ExtraBtn", [BTN_FORWARD] = "ForwardBtn",
624 [BTN_BACK] = "BackBtn", [BTN_TASK] = "TaskBtn",
625 [BTN_TRIGGER] = "Trigger", [BTN_THUMB] = "ThumbBtn",
626 [BTN_THUMB2] = "ThumbBtn2", [BTN_TOP] = "TopBtn",
627 [BTN_TOP2] = "TopBtn2", [BTN_PINKIE] = "PinkieBtn",
628 [BTN_BASE] = "BaseBtn", [BTN_BASE2] = "BaseBtn2",
629 [BTN_BASE3] = "BaseBtn3", [BTN_BASE4] = "BaseBtn4",
630 [BTN_BASE5] = "BaseBtn5", [BTN_BASE6] = "BaseBtn6",
631 [BTN_DEAD] = "BtnDead", [BTN_A] = "BtnA",
632 [BTN_B] = "BtnB", [BTN_C] = "BtnC",
633 [BTN_X] = "BtnX", [BTN_Y] = "BtnY",
634 [BTN_Z] = "BtnZ", [BTN_TL] = "BtnTL",
635 [BTN_TR] = "BtnTR", [BTN_TL2] = "BtnTL2",
636 [BTN_TR2] = "BtnTR2", [BTN_SELECT] = "BtnSelect",
637 [BTN_START] = "BtnStart", [BTN_MODE] = "BtnMode",
638 [BTN_THUMBL] = "BtnThumbL", [BTN_THUMBR] = "BtnThumbR",
639 [BTN_TOOL_PEN] = "ToolPen", [BTN_TOOL_RUBBER] = "ToolRubber",
640 [BTN_TOOL_BRUSH] = "ToolBrush", [BTN_TOOL_PENCIL] = "ToolPencil",
641 [BTN_TOOL_AIRBRUSH] = "ToolAirbrush", [BTN_TOOL_FINGER] = "ToolFinger",
642 [BTN_TOOL_MOUSE] = "ToolMouse", [BTN_TOOL_LENS] = "ToolLens",
643 [BTN_TOUCH] = "Touch", [BTN_STYLUS] = "Stylus",
644 [BTN_STYLUS2] = "Stylus2", [BTN_TOOL_DOUBLETAP] = "ToolDoubleTap",
645 [BTN_TOOL_TRIPLETAP] = "ToolTripleTap", [BTN_GEAR_DOWN] = "WheelBtn",
646 [BTN_GEAR_UP] = "Gear up", [KEY_OK] = "Ok",
647 [KEY_SELECT] = "Select", [KEY_GOTO] = "Goto",
648 [KEY_CLEAR] = "Clear", [KEY_POWER2] = "Power2",
649 [KEY_OPTION] = "Option", [KEY_INFO] = "Info",
650 [KEY_TIME] = "Time", [KEY_VENDOR] = "Vendor",
651 [KEY_ARCHIVE] = "Archive", [KEY_PROGRAM] = "Program",
652 [KEY_CHANNEL] = "Channel", [KEY_FAVORITES] = "Favorites",
653 [KEY_EPG] = "EPG", [KEY_PVR] = "PVR",
654 [KEY_MHP] = "MHP", [KEY_LANGUAGE] = "Language",
655 [KEY_TITLE] = "Title", [KEY_SUBTITLE] = "Subtitle",
656 [KEY_ANGLE] = "Angle", [KEY_ZOOM] = "Zoom",
657 [KEY_MODE] = "Mode", [KEY_KEYBOARD] = "Keyboard",
658 [KEY_SCREEN] = "Screen", [KEY_PC] = "PC",
659 [KEY_TV] = "TV", [KEY_TV2] = "TV2",
660 [KEY_VCR] = "VCR", [KEY_VCR2] = "VCR2",
661 [KEY_SAT] = "Sat", [KEY_SAT2] = "Sat2",
662 [KEY_CD] = "CD", [KEY_TAPE] = "Tape",
663 [KEY_RADIO] = "Radio", [KEY_TUNER] = "Tuner",
664 [KEY_PLAYER] = "Player", [KEY_TEXT] = "Text",
665 [KEY_DVD] = "DVD", [KEY_AUX] = "Aux",
666 [KEY_MP3] = "MP3", [KEY_AUDIO] = "Audio",
667 [KEY_VIDEO] = "Video", [KEY_DIRECTORY] = "Directory",
668 [KEY_LIST] = "List", [KEY_MEMO] = "Memo",
669 [KEY_CALENDAR] = "Calendar", [KEY_RED] = "Red",
670 [KEY_GREEN] = "Green", [KEY_YELLOW] = "Yellow",
671 [KEY_BLUE] = "Blue", [KEY_CHANNELUP] = "ChannelUp",
672 [KEY_CHANNELDOWN] = "ChannelDown", [KEY_FIRST] = "First",
673 [KEY_LAST] = "Last", [KEY_AB] = "AB",
674 [KEY_NEXT] = "Next", [KEY_RESTART] = "Restart",
675 [KEY_SLOW] = "Slow", [KEY_SHUFFLE] = "Shuffle",
676 [KEY_BREAK] = "Break", [KEY_PREVIOUS] = "Previous",
677 [KEY_DIGITS] = "Digits", [KEY_TEEN] = "TEEN",
678 [KEY_TWEN] = "TWEN", [KEY_DEL_EOL] = "DeleteEOL",
679 [KEY_DEL_EOS] = "DeleteEOS", [KEY_INS_LINE] = "InsertLine",
680 [KEY_DEL_LINE] = "DeleteLine",
681 [KEY_SEND] = "Send", [KEY_REPLY] = "Reply",
682 [KEY_FORWARDMAIL] = "ForwardMail", [KEY_SAVE] = "Save",
683 [KEY_DOCUMENTS] = "Documents",
684 [KEY_FN] = "Fn", [KEY_FN_ESC] = "Fn+ESC",
685 [KEY_FN_1] = "Fn+1", [KEY_FN_2] = "Fn+2",
686 [KEY_FN_B] = "Fn+B", [KEY_FN_D] = "Fn+D",
687 [KEY_FN_E] = "Fn+E", [KEY_FN_F] = "Fn+F",
688 [KEY_FN_S] = "Fn+S",
689 [KEY_FN_F1] = "Fn+F1", [KEY_FN_F2] = "Fn+F2",
690 [KEY_FN_F3] = "Fn+F3", [KEY_FN_F4] = "Fn+F4",
691 [KEY_FN_F5] = "Fn+F5", [KEY_FN_F6] = "Fn+F6",
692 [KEY_FN_F7] = "Fn+F7", [KEY_FN_F8] = "Fn+F8",
693 [KEY_FN_F9] = "Fn+F9", [KEY_FN_F10] = "Fn+F10",
694 [KEY_FN_F11] = "Fn+F11", [KEY_FN_F12] = "Fn+F12",
695 [KEY_KBDILLUMTOGGLE] = "KbdIlluminationToggle",
696 [KEY_KBDILLUMDOWN] = "KbdIlluminationDown",
697 [KEY_KBDILLUMUP] = "KbdIlluminationUp",
698 [KEY_SWITCHVIDEOMODE] = "SwitchVideoMode",
699};
700
701static char *relatives[REL_MAX + 1] = {
702 [REL_X] = "X", [REL_Y] = "Y",
703 [REL_Z] = "Z", [REL_RX] = "Rx",
704 [REL_RY] = "Ry", [REL_RZ] = "Rz",
705 [REL_HWHEEL] = "HWheel", [REL_DIAL] = "Dial",
706 [REL_WHEEL] = "Wheel", [REL_MISC] = "Misc",
707};
708
709static char *absolutes[ABS_MAX + 1] = {
710 [ABS_X] = "X", [ABS_Y] = "Y",
711 [ABS_Z] = "Z", [ABS_RX] = "Rx",
712 [ABS_RY] = "Ry", [ABS_RZ] = "Rz",
713 [ABS_THROTTLE] = "Throttle", [ABS_RUDDER] = "Rudder",
714 [ABS_WHEEL] = "Wheel", [ABS_GAS] = "Gas",
715 [ABS_BRAKE] = "Brake", [ABS_HAT0X] = "Hat0X",
716 [ABS_HAT0Y] = "Hat0Y", [ABS_HAT1X] = "Hat1X",
717 [ABS_HAT1Y] = "Hat1Y", [ABS_HAT2X] = "Hat2X",
718 [ABS_HAT2Y] = "Hat2Y", [ABS_HAT3X] = "Hat3X",
719 [ABS_HAT3Y] = "Hat 3Y", [ABS_PRESSURE] = "Pressure",
720 [ABS_DISTANCE] = "Distance", [ABS_TILT_X] = "XTilt",
721 [ABS_TILT_Y] = "YTilt", [ABS_TOOL_WIDTH] = "Tool Width",
722 [ABS_VOLUME] = "Volume", [ABS_MISC] = "Misc",
723};
724 26
725static char *misc[MSC_MAX + 1] = { 27void hid_dump_input(struct hid_usage *, __s32);
726 [MSC_SERIAL] = "Serial", [MSC_PULSELED] = "Pulseled", 28void hid_dump_device(struct hid_device *);
727 [MSC_GESTURE] = "Gesture", [MSC_RAW] = "RawData" 29void hid_dump_field(struct hid_field *, int);
728}; 30void hid_resolv_usage(unsigned);
31void hid_resolv_event(__u8, __u16);
729 32
730static char *leds[LED_MAX + 1] = { 33#else
731 [LED_NUML] = "NumLock", [LED_CAPSL] = "CapsLock",
732 [LED_SCROLLL] = "ScrollLock", [LED_COMPOSE] = "Compose",
733 [LED_KANA] = "Kana", [LED_SLEEP] = "Sleep",
734 [LED_SUSPEND] = "Suspend", [LED_MUTE] = "Mute",
735 [LED_MISC] = "Misc",
736};
737 34
738static char *repeats[REP_MAX + 1] = { 35#define hid_dump_input(a,b) do { } while (0)
739 [REP_DELAY] = "Delay", [REP_PERIOD] = "Period" 36#define hid_dump_device(c) do { } while (0)
740}; 37#define hid_dump_field(a,b) do { } while (0)
38#define hid_resolv_usage(a) do { } while (0)
39#define hid_resolv_event(a,b) do { } while (0)
741 40
742static char *sounds[SND_MAX + 1] = { 41#endif /* CONFIG_HID_DEBUG */
743 [SND_CLICK] = "Click", [SND_BELL] = "Bell",
744 [SND_TONE] = "Tone"
745};
746 42
747static char **names[EV_MAX + 1] = {
748 [EV_SYN] = syncs, [EV_KEY] = keys,
749 [EV_REL] = relatives, [EV_ABS] = absolutes,
750 [EV_MSC] = misc, [EV_LED] = leds,
751 [EV_SND] = sounds, [EV_REP] = repeats,
752};
753 43
754static void __attribute__((unused)) resolv_event(__u8 type, __u16 code) { 44#endif
755 45
756 printk("%s.%s", events[type] ? events[type] : "?",
757 names[type] ? (names[type][code] ? names[type][code] : "?") : "?");
758}
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 342b4e639acb..93173fe45634 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -264,6 +264,8 @@ struct hid_item {
264#define HID_QUIRK_INVERT_HWHEEL 0x00004000 264#define HID_QUIRK_INVERT_HWHEEL 0x00004000
265#define HID_QUIRK_POWERBOOK_ISO_KEYBOARD 0x00008000 265#define HID_QUIRK_POWERBOOK_ISO_KEYBOARD 0x00008000
266#define HID_QUIRK_BAD_RELATIVE_KEYS 0x00010000 266#define HID_QUIRK_BAD_RELATIVE_KEYS 0x00010000
267#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00020000
268#define HID_QUIRK_IGNORE_MOUSE 0x00040000
267 269
268/* 270/*
269 * This is the global environment of the parser. This information is 271 * This is the global environment of the parser. This information is
@@ -430,8 +432,8 @@ struct hid_device { /* device report descriptor */
430 432
431 /* device-specific function pointers */ 433 /* device-specific function pointers */
432 int (*hidinput_input_event) (struct input_dev *, unsigned int, unsigned int, int); 434 int (*hidinput_input_event) (struct input_dev *, unsigned int, unsigned int, int);
433 int (*hidinput_open) (struct input_dev *); 435 int (*hid_open) (struct hid_device *);
434 void (*hidinput_close) (struct input_dev *); 436 void (*hid_close) (struct hid_device *);
435 437
436 /* hiddev event handler */ 438 /* hiddev event handler */
437 void (*hiddev_hid_event) (struct hid_device *, struct hid_field *field, 439 void (*hiddev_hid_event) (struct hid_device *, struct hid_field *field,
@@ -471,16 +473,6 @@ struct hid_descriptor {
471 struct hid_class_descriptor desc[1]; 473 struct hid_class_descriptor desc[1];
472} __attribute__ ((packed)); 474} __attribute__ ((packed));
473 475
474#ifdef DEBUG
475#include "hid-debug.h"
476#else
477#define hid_dump_input(a,b) do { } while (0)
478#define hid_dump_device(c) do { } while (0)
479#define hid_dump_field(a,b) do { } while (0)
480#define resolv_usage(a) do { } while (0)
481#define resolv_event(a,b) do { } while (0)
482#endif
483
484/* Applications from HID Usage Tables 4/8/99 Version 1.1 */ 476/* Applications from HID Usage Tables 4/8/99 Version 1.1 */
485/* We ignore a few input applications that are not widely used */ 477/* We ignore a few input applications that are not widely used */
486#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001)) 478#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001))
@@ -503,6 +495,7 @@ struct hid_device *hid_parse_report(__u8 *start, unsigned size);
503int hid_ff_init(struct hid_device *hid); 495int hid_ff_init(struct hid_device *hid);
504 496
505int hid_lgff_init(struct hid_device *hid); 497int hid_lgff_init(struct hid_device *hid);
498int hid_plff_init(struct hid_device *hid);
506int hid_tmff_init(struct hid_device *hid); 499int hid_tmff_init(struct hid_device *hid);
507int hid_zpff_init(struct hid_device *hid); 500int hid_zpff_init(struct hid_device *hid);
508#ifdef CONFIG_HID_PID 501#ifdef CONFIG_HID_PID
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index d0e6a5497614..e45712acfac5 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -71,6 +71,7 @@ struct mmc_card {
71#define MMC_STATE_SDCARD (1<<3) /* is an SD card */ 71#define MMC_STATE_SDCARD (1<<3) /* is an SD card */
72#define MMC_STATE_READONLY (1<<4) /* card is read-only */ 72#define MMC_STATE_READONLY (1<<4) /* card is read-only */
73#define MMC_STATE_HIGHSPEED (1<<5) /* card is in high speed mode */ 73#define MMC_STATE_HIGHSPEED (1<<5) /* card is in high speed mode */
74#define MMC_STATE_BLOCKADDR (1<<6) /* card uses block-addressing */
74 u32 raw_cid[4]; /* raw card CID */ 75 u32 raw_cid[4]; /* raw card CID */
75 u32 raw_csd[4]; /* raw card CSD */ 76 u32 raw_csd[4]; /* raw card CSD */
76 u32 raw_scr[2]; /* raw card SCR */ 77 u32 raw_scr[2]; /* raw card SCR */
@@ -87,6 +88,7 @@ struct mmc_card {
87#define mmc_card_sd(c) ((c)->state & MMC_STATE_SDCARD) 88#define mmc_card_sd(c) ((c)->state & MMC_STATE_SDCARD)
88#define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY) 89#define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY)
89#define mmc_card_highspeed(c) ((c)->state & MMC_STATE_HIGHSPEED) 90#define mmc_card_highspeed(c) ((c)->state & MMC_STATE_HIGHSPEED)
91#define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR)
90 92
91#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT) 93#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT)
92#define mmc_card_set_dead(c) ((c)->state |= MMC_STATE_DEAD) 94#define mmc_card_set_dead(c) ((c)->state |= MMC_STATE_DEAD)
@@ -94,6 +96,7 @@ struct mmc_card {
94#define mmc_card_set_sd(c) ((c)->state |= MMC_STATE_SDCARD) 96#define mmc_card_set_sd(c) ((c)->state |= MMC_STATE_SDCARD)
95#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY) 97#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
96#define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED) 98#define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED)
99#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
97 100
98#define mmc_card_name(c) ((c)->cid.prod_name) 101#define mmc_card_name(c) ((c)->cid.prod_name)
99#define mmc_card_id(c) ((c)->dev.bus_id) 102#define mmc_card_id(c) ((c)->dev.bus_id)
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index c15ae1986b98..913e5752569f 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -92,8 +92,10 @@ struct mmc_host {
92 unsigned int max_seg_size; /* see blk_queue_max_segment_size */ 92 unsigned int max_seg_size; /* see blk_queue_max_segment_size */
93 unsigned short max_hw_segs; /* see blk_queue_max_hw_segments */ 93 unsigned short max_hw_segs; /* see blk_queue_max_hw_segments */
94 unsigned short max_phys_segs; /* see blk_queue_max_phys_segments */ 94 unsigned short max_phys_segs; /* see blk_queue_max_phys_segments */
95 unsigned short max_sectors; /* see blk_queue_max_sectors */
96 unsigned short unused; 95 unsigned short unused;
96 unsigned int max_req_size; /* maximum number of bytes in one req */
97 unsigned int max_blk_size; /* maximum size of one mmc block */
98 unsigned int max_blk_count; /* maximum number of blocks in one req */
97 99
98 /* private data */ 100 /* private data */
99 struct mmc_ios ios; /* current io bus settings */ 101 struct mmc_ios ios; /* current io bus settings */
@@ -106,8 +108,9 @@ struct mmc_host {
106 struct list_head cards; /* devices attached to this host */ 108 struct list_head cards; /* devices attached to this host */
107 109
108 wait_queue_head_t wq; 110 wait_queue_head_t wq;
109 spinlock_t lock; /* card_busy lock */ 111 spinlock_t lock; /* claimed lock */
110 struct mmc_card *card_busy; /* the MMC card claiming host */ 112 unsigned int claimed:1; /* host exclusively claimed */
113
111 struct mmc_card *card_selected; /* the selected MMC card */ 114 struct mmc_card *card_selected; /* the selected MMC card */
112 115
113 struct delayed_work detect; 116 struct delayed_work detect;
@@ -126,6 +129,7 @@ static inline void *mmc_priv(struct mmc_host *host)
126} 129}
127 130
128#define mmc_dev(x) ((x)->parent) 131#define mmc_dev(x) ((x)->parent)
132#define mmc_classdev(x) (&(x)->class_dev)
129#define mmc_hostname(x) ((x)->class_dev.bus_id) 133#define mmc_hostname(x) ((x)->class_dev.bus_id)
130 134
131extern int mmc_suspend_host(struct mmc_host *, pm_message_t); 135extern int mmc_suspend_host(struct mmc_host *, pm_message_t);
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index bcf24909d677..cdc54be804f1 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -43,6 +43,7 @@ struct mmc_command {
43#define MMC_RSP_R2 (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC) 43#define MMC_RSP_R2 (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC)
44#define MMC_RSP_R3 (MMC_RSP_PRESENT) 44#define MMC_RSP_R3 (MMC_RSP_PRESENT)
45#define MMC_RSP_R6 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) 45#define MMC_RSP_R6 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
46#define MMC_RSP_R7 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
46 47
47#define mmc_resp_type(cmd) ((cmd)->flags & (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC|MMC_RSP_BUSY|MMC_RSP_OPCODE)) 48#define mmc_resp_type(cmd) ((cmd)->flags & (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC|MMC_RSP_BUSY|MMC_RSP_OPCODE))
48 49
diff --git a/include/linux/mmc/protocol.h b/include/linux/mmc/protocol.h
index 2dce60c43f4b..c90b6768329d 100644
--- a/include/linux/mmc/protocol.h
+++ b/include/linux/mmc/protocol.h
@@ -79,9 +79,12 @@
79#define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */ 79#define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */
80 80
81/* SD commands type argument response */ 81/* SD commands type argument response */
82 /* class 8 */ 82 /* class 0 */
83/* This is basically the same command as for MMC with some quirks. */ 83/* This is basically the same command as for MMC with some quirks. */
84#define SD_SEND_RELATIVE_ADDR 3 /* bcr R6 */ 84#define SD_SEND_RELATIVE_ADDR 3 /* bcr R6 */
85#define SD_SEND_IF_COND 8 /* bcr [11:0] See below R7 */
86
87 /* class 10 */
85#define SD_SWITCH 6 /* adtc [31:0] See below R1 */ 88#define SD_SWITCH 6 /* adtc [31:0] See below R1 */
86 89
87 /* Application commands */ 90 /* Application commands */
@@ -115,6 +118,14 @@
115 */ 118 */
116 119
117/* 120/*
121 * SD_SEND_IF_COND argument format:
122 *
123 * [31:12] Reserved (0)
124 * [11:8] Host Voltage Supply Flags
125 * [7:0] Check Pattern (0xAA)
126 */
127
128/*
118 MMC status in R1 129 MMC status in R1
119 Type 130 Type
120 e : error bit 131 e : error bit
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 7098961cc869..5b4f1e3caf5d 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -735,9 +735,11 @@
735#define PCI_DEVICE_ID_TI_TVP4020 0x3d07 735#define PCI_DEVICE_ID_TI_TVP4020 0x3d07
736#define PCI_DEVICE_ID_TI_4450 0x8011 736#define PCI_DEVICE_ID_TI_4450 0x8011
737#define PCI_DEVICE_ID_TI_XX21_XX11 0x8031 737#define PCI_DEVICE_ID_TI_XX21_XX11 0x8031
738#define PCI_DEVICE_ID_TI_XX21_XX11_FM 0x8033
738#define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034 739#define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034
739#define PCI_DEVICE_ID_TI_X515 0x8036 740#define PCI_DEVICE_ID_TI_X515 0x8036
740#define PCI_DEVICE_ID_TI_XX12 0x8039 741#define PCI_DEVICE_ID_TI_XX12 0x8039
742#define PCI_DEVICE_ID_TI_XX12_FM 0x803b
741#define PCI_DEVICE_ID_TI_1130 0xac12 743#define PCI_DEVICE_ID_TI_1130 0xac12
742#define PCI_DEVICE_ID_TI_1031 0xac13 744#define PCI_DEVICE_ID_TI_1031 0xac13
743#define PCI_DEVICE_ID_TI_1131 0xac15 745#define PCI_DEVICE_ID_TI_1131 0xac15
@@ -765,6 +767,7 @@
765#define PCI_DEVICE_ID_TI_1510 0xac56 767#define PCI_DEVICE_ID_TI_1510 0xac56
766#define PCI_DEVICE_ID_TI_X620 0xac8d 768#define PCI_DEVICE_ID_TI_X620 0xac8d
767#define PCI_DEVICE_ID_TI_X420 0xac8e 769#define PCI_DEVICE_ID_TI_X420 0xac8e
770#define PCI_DEVICE_ID_TI_XX20_FM 0xac8f
768 771
769#define PCI_VENDOR_ID_SONY 0x104d 772#define PCI_VENDOR_ID_SONY 0x104d
770 773
@@ -1971,6 +1974,7 @@
1971#define PCI_DEVICE_ID_TOPIC_TP560 0x0000 1974#define PCI_DEVICE_ID_TOPIC_TP560 0x0000
1972 1975
1973#define PCI_VENDOR_ID_ENE 0x1524 1976#define PCI_VENDOR_ID_ENE 0x1524
1977#define PCI_DEVICE_ID_ENE_CB712_SD 0x0550
1974#define PCI_DEVICE_ID_ENE_1211 0x1211 1978#define PCI_DEVICE_ID_ENE_1211 0x1211
1975#define PCI_DEVICE_ID_ENE_1225 0x1225 1979#define PCI_DEVICE_ID_ENE_1225 0x1225
1976#define PCI_DEVICE_ID_ENE_1410 0x1410 1980#define PCI_DEVICE_ID_ENE_1410 0x1410
diff --git a/include/linux/tifm.h b/include/linux/tifm.h
index dfb8052eee5e..3deb0a6c1370 100644
--- a/include/linux/tifm.h
+++ b/include/linux/tifm.h
@@ -17,7 +17,7 @@
17#include <linux/wait.h> 17#include <linux/wait.h>
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/pci.h> 19#include <linux/pci.h>
20#include <linux/scatterlist.h> 20#include <linux/kthread.h>
21 21
22/* Host registers (relative to pci base address): */ 22/* Host registers (relative to pci base address): */
23enum { 23enum {
@@ -62,11 +62,10 @@ enum {
62 62
63 63
64#define TIFM_IRQ_ENABLE 0x80000000 64#define TIFM_IRQ_ENABLE 0x80000000
65#define TIFM_IRQ_SOCKMASK 0x00000001 65#define TIFM_IRQ_SOCKMASK(x) (x)
66#define TIFM_IRQ_CARDMASK 0x00000100 66#define TIFM_IRQ_CARDMASK(x) ((x) << 8)
67#define TIFM_IRQ_FIFOMASK 0x00010000 67#define TIFM_IRQ_FIFOMASK(x) ((x) << 16)
68#define TIFM_IRQ_SETALL 0xffffffff 68#define TIFM_IRQ_SETALL 0xffffffff
69#define TIFM_IRQ_SETALLSOCK 0x0000000f
70 69
71#define TIFM_CTRL_LED 0x00000040 70#define TIFM_CTRL_LED 0x00000040
72#define TIFM_CTRL_FAST_CLK 0x00000100 71#define TIFM_CTRL_FAST_CLK 0x00000100
@@ -89,10 +88,9 @@ struct tifm_dev {
89 char __iomem *addr; 88 char __iomem *addr;
90 spinlock_t lock; 89 spinlock_t lock;
91 tifm_media_id media_id; 90 tifm_media_id media_id;
92 char wq_name[KOBJ_NAME_LEN]; 91 unsigned int socket_id;
93 struct workqueue_struct *wq;
94 92
95 unsigned int (*signal_irq)(struct tifm_dev *sock, 93 void (*signal_irq)(struct tifm_dev *sock,
96 unsigned int sock_irq_status); 94 unsigned int sock_irq_status);
97 95
98 struct tifm_driver *drv; 96 struct tifm_driver *drv;
@@ -103,24 +101,23 @@ struct tifm_driver {
103 tifm_media_id *id_table; 101 tifm_media_id *id_table;
104 int (*probe)(struct tifm_dev *dev); 102 int (*probe)(struct tifm_dev *dev);
105 void (*remove)(struct tifm_dev *dev); 103 void (*remove)(struct tifm_dev *dev);
104 int (*suspend)(struct tifm_dev *dev,
105 pm_message_t state);
106 int (*resume)(struct tifm_dev *dev);
106 107
107 struct device_driver driver; 108 struct device_driver driver;
108}; 109};
109 110
110struct tifm_adapter { 111struct tifm_adapter {
111 char __iomem *addr; 112 char __iomem *addr;
112 unsigned int irq_status;
113 unsigned int insert_mask;
114 unsigned int remove_mask;
115 spinlock_t lock; 113 spinlock_t lock;
114 unsigned int irq_status;
115 unsigned int socket_change_set;
116 wait_queue_head_t change_set_notify;
116 unsigned int id; 117 unsigned int id;
117 unsigned int max_sockets; 118 unsigned int num_sockets;
118 char wq_name[KOBJ_NAME_LEN];
119 unsigned int inhibit_new_cards;
120 struct workqueue_struct *wq;
121 struct work_struct media_inserter;
122 struct work_struct media_remover;
123 struct tifm_dev **sockets; 119 struct tifm_dev **sockets;
120 struct task_struct *media_switcher;
124 struct class_device cdev; 121 struct class_device cdev;
125 struct device *dev; 122 struct device *dev;
126 123
@@ -130,9 +127,9 @@ struct tifm_adapter {
130struct tifm_adapter *tifm_alloc_adapter(void); 127struct tifm_adapter *tifm_alloc_adapter(void);
131void tifm_free_device(struct device *dev); 128void tifm_free_device(struct device *dev);
132void tifm_free_adapter(struct tifm_adapter *fm); 129void tifm_free_adapter(struct tifm_adapter *fm);
133int tifm_add_adapter(struct tifm_adapter *fm); 130int tifm_add_adapter(struct tifm_adapter *fm, int (*mediathreadfn)(void *data));
134void tifm_remove_adapter(struct tifm_adapter *fm); 131void tifm_remove_adapter(struct tifm_adapter *fm);
135struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm, unsigned int id); 132struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm);
136int tifm_register_driver(struct tifm_driver *drv); 133int tifm_register_driver(struct tifm_driver *drv);
137void tifm_unregister_driver(struct tifm_driver *drv); 134void tifm_unregister_driver(struct tifm_driver *drv);
138void tifm_eject(struct tifm_dev *sock); 135void tifm_eject(struct tifm_dev *sock);
diff --git a/include/linux/video_output.h b/include/linux/video_output.h
new file mode 100644
index 000000000000..e63e0c03ee0d
--- /dev/null
+++ b/include/linux/video_output.h
@@ -0,0 +1,42 @@
1/*
2 *
3 * Copyright (C) 2006 Luming Yu <luming.yu@intel.com>
4 *
5 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or (at
10 * your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
20 *
21 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
22 */
23#ifndef _LINUX_VIDEO_OUTPUT_H
24#define _LINUX_VIDEO_OUTPUT_H
25#include <linux/device.h>
26struct output_device;
27struct output_properties {
28 int (*set_state)(struct output_device *);
29 int (*get_status)(struct output_device *);
30};
31struct output_device {
32 int request_state;
33 struct output_properties *props;
34 struct class_device class_dev;
35};
36#define to_output_device(obj) container_of(obj, struct output_device, class_dev)
37struct output_device *video_output_register(const char *name,
38 struct device *dev,
39 void *devdata,
40 struct output_properties *op);
41void video_output_unregister(struct output_device *dev);
42#endif
diff --git a/include/rdma/ib_user_mad.h b/include/rdma/ib_user_mad.h
index 44537aa32e62..d66b15ea82c4 100644
--- a/include/rdma/ib_user_mad.h
+++ b/include/rdma/ib_user_mad.h
@@ -98,7 +98,7 @@ struct ib_user_mad_hdr {
98 */ 98 */
99struct ib_user_mad { 99struct ib_user_mad {
100 struct ib_user_mad_hdr hdr; 100 struct ib_user_mad_hdr hdr;
101 __u8 data[0]; 101 __u64 data[0];
102}; 102};
103 103
104/** 104/**
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 0bfa3328d686..765589f4d166 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -45,6 +45,7 @@
45#include <linux/device.h> 45#include <linux/device.h>
46#include <linux/mm.h> 46#include <linux/mm.h>
47#include <linux/dma-mapping.h> 47#include <linux/dma-mapping.h>
48#include <linux/kref.h>
48 49
49#include <asm/atomic.h> 50#include <asm/atomic.h>
50#include <asm/scatterlist.h> 51#include <asm/scatterlist.h>
@@ -419,8 +420,8 @@ struct ib_wc {
419 enum ib_wc_opcode opcode; 420 enum ib_wc_opcode opcode;
420 u32 vendor_err; 421 u32 vendor_err;
421 u32 byte_len; 422 u32 byte_len;
423 struct ib_qp *qp;
422 __be32 imm_data; 424 __be32 imm_data;
423 u32 qp_num;
424 u32 src_qp; 425 u32 src_qp;
425 int wc_flags; 426 int wc_flags;
426 u16 pkey_index; 427 u16 pkey_index;
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 10625785eefd..50a438010182 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Dynamic DMA mapping support. 2 * Dynamic DMA mapping support.
3 * 3 *
4 * This implementation is for IA-64 and EM64T platforms that do not support 4 * This implementation is a fallback for platforms that do not support
5 * I/O TLBs (aka DMA address translation hardware). 5 * I/O TLBs (aka DMA address translation hardware).
6 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com> 6 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
7 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com> 7 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
@@ -28,6 +28,7 @@
28#include <asm/io.h> 28#include <asm/io.h>
29#include <asm/dma.h> 29#include <asm/dma.h>
30#include <asm/scatterlist.h> 30#include <asm/scatterlist.h>
31#include <asm/swiotlb.h>
31 32
32#include <linux/init.h> 33#include <linux/init.h>
33#include <linux/bootmem.h> 34#include <linux/bootmem.h>
@@ -35,8 +36,10 @@
35#define OFFSET(val,align) ((unsigned long) \ 36#define OFFSET(val,align) ((unsigned long) \
36 ( (val) & ( (align) - 1))) 37 ( (val) & ( (align) - 1)))
37 38
39#ifndef SG_ENT_VIRT_ADDRESS
38#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) 40#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
39#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) 41#define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg))
42#endif
40 43
41/* 44/*
42 * Maximum allowable number of contiguous slabs to map, 45 * Maximum allowable number of contiguous slabs to map,
@@ -101,13 +104,25 @@ static unsigned int io_tlb_index;
101 * We need to save away the original address corresponding to a mapped entry 104 * We need to save away the original address corresponding to a mapped entry
102 * for the sync operations. 105 * for the sync operations.
103 */ 106 */
104static unsigned char **io_tlb_orig_addr; 107#ifndef SWIOTLB_ARCH_HAS_IO_TLB_ADDR_T
108typedef char *io_tlb_addr_t;
109#define swiotlb_orig_addr_null(buffer) (!(buffer))
110#define ptr_to_io_tlb_addr(ptr) (ptr)
111#define page_to_io_tlb_addr(pg, off) (page_address(pg) + (off))
112#define sg_to_io_tlb_addr(sg) SG_ENT_VIRT_ADDRESS(sg)
113#endif
114static io_tlb_addr_t *io_tlb_orig_addr;
105 115
106/* 116/*
107 * Protect the above data structures in the map and unmap calls 117 * Protect the above data structures in the map and unmap calls
108 */ 118 */
109static DEFINE_SPINLOCK(io_tlb_lock); 119static DEFINE_SPINLOCK(io_tlb_lock);
110 120
121#ifdef SWIOTLB_EXTRA_VARIABLES
122SWIOTLB_EXTRA_VARIABLES;
123#endif
124
125#ifndef SWIOTLB_ARCH_HAS_SETUP_IO_TLB_NPAGES
111static int __init 126static int __init
112setup_io_tlb_npages(char *str) 127setup_io_tlb_npages(char *str)
113{ 128{
@@ -122,30 +137,50 @@ setup_io_tlb_npages(char *str)
122 swiotlb_force = 1; 137 swiotlb_force = 1;
123 return 1; 138 return 1;
124} 139}
140#endif
125__setup("swiotlb=", setup_io_tlb_npages); 141__setup("swiotlb=", setup_io_tlb_npages);
126/* make io_tlb_overflow tunable too? */ 142/* make io_tlb_overflow tunable too? */
127 143
144#ifndef swiotlb_adjust_size
145#define swiotlb_adjust_size(size) ((void)0)
146#endif
147
148#ifndef swiotlb_adjust_seg
149#define swiotlb_adjust_seg(start, size) ((void)0)
150#endif
151
152#ifndef swiotlb_print_info
153#define swiotlb_print_info(bytes) \
154 printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - " \
155 "0x%lx\n", bytes >> 20, \
156 virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end))
157#endif
158
128/* 159/*
129 * Statically reserve bounce buffer space and initialize bounce buffer data 160 * Statically reserve bounce buffer space and initialize bounce buffer data
130 * structures for the software IO TLB used to implement the DMA API. 161 * structures for the software IO TLB used to implement the DMA API.
131 */ 162 */
132void 163void __init
133swiotlb_init_with_default_size (size_t default_size) 164swiotlb_init_with_default_size(size_t default_size)
134{ 165{
135 unsigned long i; 166 unsigned long i, bytes;
136 167
137 if (!io_tlb_nslabs) { 168 if (!io_tlb_nslabs) {
138 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); 169 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
139 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); 170 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
140 } 171 }
172 swiotlb_adjust_size(io_tlb_nslabs);
173 swiotlb_adjust_size(io_tlb_overflow);
174
175 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
141 176
142 /* 177 /*
143 * Get IO TLB memory from the low pages 178 * Get IO TLB memory from the low pages
144 */ 179 */
145 io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); 180 io_tlb_start = alloc_bootmem_low_pages(bytes);
146 if (!io_tlb_start) 181 if (!io_tlb_start)
147 panic("Cannot allocate SWIOTLB buffer"); 182 panic("Cannot allocate SWIOTLB buffer");
148 io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); 183 io_tlb_end = io_tlb_start + bytes;
149 184
150 /* 185 /*
151 * Allocate and initialize the free list array. This array is used 186 * Allocate and initialize the free list array. This array is used
@@ -153,34 +188,45 @@ swiotlb_init_with_default_size (size_t default_size)
153 * between io_tlb_start and io_tlb_end. 188 * between io_tlb_start and io_tlb_end.
154 */ 189 */
155 io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); 190 io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
156 for (i = 0; i < io_tlb_nslabs; i++) 191 for (i = 0; i < io_tlb_nslabs; i++) {
192 if ( !(i % IO_TLB_SEGSIZE) )
193 swiotlb_adjust_seg(io_tlb_start + (i << IO_TLB_SHIFT),
194 IO_TLB_SEGSIZE << IO_TLB_SHIFT);
157 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 195 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
196 }
158 io_tlb_index = 0; 197 io_tlb_index = 0;
159 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *)); 198 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(io_tlb_addr_t));
160 199
161 /* 200 /*
162 * Get the overflow emergency buffer 201 * Get the overflow emergency buffer
163 */ 202 */
164 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); 203 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
165 printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n", 204 if (!io_tlb_overflow_buffer)
166 virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end)); 205 panic("Cannot allocate SWIOTLB overflow buffer!\n");
206 swiotlb_adjust_seg(io_tlb_overflow_buffer, io_tlb_overflow);
207
208 swiotlb_print_info(bytes);
167} 209}
210#ifndef __swiotlb_init_with_default_size
211#define __swiotlb_init_with_default_size swiotlb_init_with_default_size
212#endif
168 213
169void 214void __init
170swiotlb_init (void) 215swiotlb_init(void)
171{ 216{
172 swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ 217 __swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
173} 218}
174 219
220#ifdef SWIOTLB_ARCH_NEED_LATE_INIT
175/* 221/*
176 * Systems with larger DMA zones (those that don't support ISA) can 222 * Systems with larger DMA zones (those that don't support ISA) can
177 * initialize the swiotlb later using the slab allocator if needed. 223 * initialize the swiotlb later using the slab allocator if needed.
178 * This should be just like above, but with some error catching. 224 * This should be just like above, but with some error catching.
179 */ 225 */
180int 226int
181swiotlb_late_init_with_default_size (size_t default_size) 227swiotlb_late_init_with_default_size(size_t default_size)
182{ 228{
183 unsigned long i, req_nslabs = io_tlb_nslabs; 229 unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
184 unsigned int order; 230 unsigned int order;
185 231
186 if (!io_tlb_nslabs) { 232 if (!io_tlb_nslabs) {
@@ -191,8 +237,9 @@ swiotlb_late_init_with_default_size (size_t default_size)
191 /* 237 /*
192 * Get IO TLB memory from the low pages 238 * Get IO TLB memory from the low pages
193 */ 239 */
194 order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); 240 order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
195 io_tlb_nslabs = SLABS_PER_PAGE << order; 241 io_tlb_nslabs = SLABS_PER_PAGE << order;
242 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
196 243
197 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 244 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
198 io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN, 245 io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
@@ -205,13 +252,14 @@ swiotlb_late_init_with_default_size (size_t default_size)
205 if (!io_tlb_start) 252 if (!io_tlb_start)
206 goto cleanup1; 253 goto cleanup1;
207 254
208 if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) { 255 if (order != get_order(bytes)) {
209 printk(KERN_WARNING "Warning: only able to allocate %ld MB " 256 printk(KERN_WARNING "Warning: only able to allocate %ld MB "
210 "for software IO TLB\n", (PAGE_SIZE << order) >> 20); 257 "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
211 io_tlb_nslabs = SLABS_PER_PAGE << order; 258 io_tlb_nslabs = SLABS_PER_PAGE << order;
259 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
212 } 260 }
213 io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); 261 io_tlb_end = io_tlb_start + bytes;
214 memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT)); 262 memset(io_tlb_start, 0, bytes);
215 263
216 /* 264 /*
217 * Allocate and initialize the free list array. This array is used 265 * Allocate and initialize the free list array. This array is used
@@ -227,12 +275,12 @@ swiotlb_late_init_with_default_size (size_t default_size)
227 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 275 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
228 io_tlb_index = 0; 276 io_tlb_index = 0;
229 277
230 io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL, 278 io_tlb_orig_addr = (io_tlb_addr_t *)__get_free_pages(GFP_KERNEL,
231 get_order(io_tlb_nslabs * sizeof(char *))); 279 get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t)));
232 if (!io_tlb_orig_addr) 280 if (!io_tlb_orig_addr)
233 goto cleanup3; 281 goto cleanup3;
234 282
235 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *)); 283 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(io_tlb_addr_t));
236 284
237 /* 285 /*
238 * Get the overflow emergency buffer 286 * Get the overflow emergency buffer
@@ -242,29 +290,29 @@ swiotlb_late_init_with_default_size (size_t default_size)
242 if (!io_tlb_overflow_buffer) 290 if (!io_tlb_overflow_buffer)
243 goto cleanup4; 291 goto cleanup4;
244 292
245 printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - " 293 swiotlb_print_info(bytes);
246 "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20,
247 virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
248 294
249 return 0; 295 return 0;
250 296
251cleanup4: 297cleanup4:
252 free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * 298 free_pages((unsigned long)io_tlb_orig_addr,
253 sizeof(char *))); 299 get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t)));
254 io_tlb_orig_addr = NULL; 300 io_tlb_orig_addr = NULL;
255cleanup3: 301cleanup3:
256 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * 302 free_pages((unsigned long)io_tlb_list,
257 sizeof(int))); 303 get_order(io_tlb_nslabs * sizeof(int)));
258 io_tlb_list = NULL; 304 io_tlb_list = NULL;
259 io_tlb_end = NULL;
260cleanup2: 305cleanup2:
306 io_tlb_end = NULL;
261 free_pages((unsigned long)io_tlb_start, order); 307 free_pages((unsigned long)io_tlb_start, order);
262 io_tlb_start = NULL; 308 io_tlb_start = NULL;
263cleanup1: 309cleanup1:
264 io_tlb_nslabs = req_nslabs; 310 io_tlb_nslabs = req_nslabs;
265 return -ENOMEM; 311 return -ENOMEM;
266} 312}
313#endif
267 314
315#ifndef SWIOTLB_ARCH_HAS_NEEDS_MAPPING
268static inline int 316static inline int
269address_needs_mapping(struct device *hwdev, dma_addr_t addr) 317address_needs_mapping(struct device *hwdev, dma_addr_t addr)
270{ 318{
@@ -275,11 +323,35 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr)
275 return (addr & ~mask) != 0; 323 return (addr & ~mask) != 0;
276} 324}
277 325
326static inline int range_needs_mapping(const void *ptr, size_t size)
327{
328 return swiotlb_force;
329}
330
331static inline int order_needs_mapping(unsigned int order)
332{
333 return 0;
334}
335#endif
336
337static void
338__sync_single(io_tlb_addr_t buffer, char *dma_addr, size_t size, int dir)
339{
340#ifndef SWIOTLB_ARCH_HAS_SYNC_SINGLE
341 if (dir == DMA_TO_DEVICE)
342 memcpy(dma_addr, buffer, size);
343 else
344 memcpy(buffer, dma_addr, size);
345#else
346 __swiotlb_arch_sync_single(buffer, dma_addr, size, dir);
347#endif
348}
349
278/* 350/*
279 * Allocates bounce buffer and returns its kernel virtual address. 351 * Allocates bounce buffer and returns its kernel virtual address.
280 */ 352 */
281static void * 353static void *
282map_single(struct device *hwdev, char *buffer, size_t size, int dir) 354map_single(struct device *hwdev, io_tlb_addr_t buffer, size_t size, int dir)
283{ 355{
284 unsigned long flags; 356 unsigned long flags;
285 char *dma_addr; 357 char *dma_addr;
@@ -352,7 +424,7 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
352 */ 424 */
353 io_tlb_orig_addr[index] = buffer; 425 io_tlb_orig_addr[index] = buffer;
354 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 426 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
355 memcpy(dma_addr, buffer, size); 427 __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
356 428
357 return dma_addr; 429 return dma_addr;
358} 430}
@@ -366,17 +438,18 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
366 unsigned long flags; 438 unsigned long flags;
367 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 439 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
368 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 440 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
369 char *buffer = io_tlb_orig_addr[index]; 441 io_tlb_addr_t buffer = io_tlb_orig_addr[index];
370 442
371 /* 443 /*
372 * First, sync the memory before unmapping the entry 444 * First, sync the memory before unmapping the entry
373 */ 445 */
374 if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) 446 if (!swiotlb_orig_addr_null(buffer)
447 && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
375 /* 448 /*
376 * bounce... copy the data back into the original buffer * and 449 * bounce... copy the data back into the original buffer * and
377 * delete the bounce buffer. 450 * delete the bounce buffer.
378 */ 451 */
379 memcpy(buffer, dma_addr, size); 452 __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
380 453
381 /* 454 /*
382 * Return the buffer to the free list by setting the corresponding 455 * Return the buffer to the free list by setting the corresponding
@@ -409,18 +482,18 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
409 int dir, int target) 482 int dir, int target)
410{ 483{
411 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 484 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
412 char *buffer = io_tlb_orig_addr[index]; 485 io_tlb_addr_t buffer = io_tlb_orig_addr[index];
413 486
414 switch (target) { 487 switch (target) {
415 case SYNC_FOR_CPU: 488 case SYNC_FOR_CPU:
416 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 489 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
417 memcpy(buffer, dma_addr, size); 490 __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
418 else 491 else
419 BUG_ON(dir != DMA_TO_DEVICE); 492 BUG_ON(dir != DMA_TO_DEVICE);
420 break; 493 break;
421 case SYNC_FOR_DEVICE: 494 case SYNC_FOR_DEVICE:
422 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) 495 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
423 memcpy(dma_addr, buffer, size); 496 __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
424 else 497 else
425 BUG_ON(dir != DMA_FROM_DEVICE); 498 BUG_ON(dir != DMA_FROM_DEVICE);
426 break; 499 break;
@@ -429,11 +502,13 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
429 } 502 }
430} 503}
431 504
505#ifdef SWIOTLB_ARCH_NEED_ALLOC
506
432void * 507void *
433swiotlb_alloc_coherent(struct device *hwdev, size_t size, 508swiotlb_alloc_coherent(struct device *hwdev, size_t size,
434 dma_addr_t *dma_handle, gfp_t flags) 509 dma_addr_t *dma_handle, gfp_t flags)
435{ 510{
436 unsigned long dev_addr; 511 dma_addr_t dev_addr;
437 void *ret; 512 void *ret;
438 int order = get_order(size); 513 int order = get_order(size);
439 514
@@ -444,8 +519,11 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
444 */ 519 */
445 flags |= GFP_DMA; 520 flags |= GFP_DMA;
446 521
447 ret = (void *)__get_free_pages(flags, order); 522 if (!order_needs_mapping(order))
448 if (ret && address_needs_mapping(hwdev, virt_to_phys(ret))) { 523 ret = (void *)__get_free_pages(flags, order);
524 else
525 ret = NULL;
526 if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
449 /* 527 /*
450 * The allocated memory isn't reachable by the device. 528 * The allocated memory isn't reachable by the device.
451 * Fall back on swiotlb_map_single(). 529 * Fall back on swiotlb_map_single().
@@ -465,22 +543,24 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
465 if (swiotlb_dma_mapping_error(handle)) 543 if (swiotlb_dma_mapping_error(handle))
466 return NULL; 544 return NULL;
467 545
468 ret = phys_to_virt(handle); 546 ret = bus_to_virt(handle);
469 } 547 }
470 548
471 memset(ret, 0, size); 549 memset(ret, 0, size);
472 dev_addr = virt_to_phys(ret); 550 dev_addr = virt_to_bus(ret);
473 551
474 /* Confirm address can be DMA'd by device */ 552 /* Confirm address can be DMA'd by device */
475 if (address_needs_mapping(hwdev, dev_addr)) { 553 if (address_needs_mapping(hwdev, dev_addr)) {
476 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016lx\n", 554 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
477 (unsigned long long)*hwdev->dma_mask, dev_addr); 555 (unsigned long long)*hwdev->dma_mask,
556 (unsigned long long)dev_addr);
478 panic("swiotlb_alloc_coherent: allocated memory is out of " 557 panic("swiotlb_alloc_coherent: allocated memory is out of "
479 "range for device"); 558 "range for device");
480 } 559 }
481 *dma_handle = dev_addr; 560 *dma_handle = dev_addr;
482 return ret; 561 return ret;
483} 562}
563EXPORT_SYMBOL(swiotlb_alloc_coherent);
484 564
485void 565void
486swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, 566swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
@@ -493,6 +573,9 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
493 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 573 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
494 swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); 574 swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
495} 575}
576EXPORT_SYMBOL(swiotlb_free_coherent);
577
578#endif
496 579
497static void 580static void
498swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) 581swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
@@ -504,7 +587,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
504 * When the mapping is small enough return a static buffer to limit 587 * When the mapping is small enough return a static buffer to limit
505 * the damage, or panic when the transfer is too big. 588 * the damage, or panic when the transfer is too big.
506 */ 589 */
507 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %lu bytes at " 590 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
508 "device %s\n", size, dev ? dev->bus_id : "?"); 591 "device %s\n", size, dev ? dev->bus_id : "?");
509 592
510 if (size > io_tlb_overflow && do_panic) { 593 if (size > io_tlb_overflow && do_panic) {
@@ -525,7 +608,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
525dma_addr_t 608dma_addr_t
526swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) 609swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
527{ 610{
528 unsigned long dev_addr = virt_to_phys(ptr); 611 dma_addr_t dev_addr = virt_to_bus(ptr);
529 void *map; 612 void *map;
530 613
531 BUG_ON(dir == DMA_NONE); 614 BUG_ON(dir == DMA_NONE);
@@ -534,19 +617,20 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
534 * we can safely return the device addr and not worry about bounce 617 * we can safely return the device addr and not worry about bounce
535 * buffering it. 618 * buffering it.
536 */ 619 */
537 if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force) 620 if (!range_needs_mapping(ptr, size)
621 && !address_needs_mapping(hwdev, dev_addr))
538 return dev_addr; 622 return dev_addr;
539 623
540 /* 624 /*
541 * Oh well, have to allocate and map a bounce buffer. 625 * Oh well, have to allocate and map a bounce buffer.
542 */ 626 */
543 map = map_single(hwdev, ptr, size, dir); 627 map = map_single(hwdev, ptr_to_io_tlb_addr(ptr), size, dir);
544 if (!map) { 628 if (!map) {
545 swiotlb_full(hwdev, size, dir, 1); 629 swiotlb_full(hwdev, size, dir, 1);
546 map = io_tlb_overflow_buffer; 630 map = io_tlb_overflow_buffer;
547 } 631 }
548 632
549 dev_addr = virt_to_phys(map); 633 dev_addr = virt_to_bus(map);
550 634
551 /* 635 /*
552 * Ensure that the address returned is DMA'ble 636 * Ensure that the address returned is DMA'ble
@@ -558,25 +642,6 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
558} 642}
559 643
560/* 644/*
561 * Since DMA is i-cache coherent, any (complete) pages that were written via
562 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
563 * flush them when they get mapped into an executable vm-area.
564 */
565static void
566mark_clean(void *addr, size_t size)
567{
568 unsigned long pg_addr, end;
569
570 pg_addr = PAGE_ALIGN((unsigned long) addr);
571 end = (unsigned long) addr + size;
572 while (pg_addr + PAGE_SIZE <= end) {
573 struct page *page = virt_to_page(pg_addr);
574 set_bit(PG_arch_1, &page->flags);
575 pg_addr += PAGE_SIZE;
576 }
577}
578
579/*
580 * Unmap a single streaming mode DMA translation. The dma_addr and size must 645 * Unmap a single streaming mode DMA translation. The dma_addr and size must
581 * match what was provided for in a previous swiotlb_map_single call. All 646 * match what was provided for in a previous swiotlb_map_single call. All
582 * other usages are undefined. 647 * other usages are undefined.
@@ -588,13 +653,13 @@ void
588swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, 653swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
589 int dir) 654 int dir)
590{ 655{
591 char *dma_addr = phys_to_virt(dev_addr); 656 char *dma_addr = bus_to_virt(dev_addr);
592 657
593 BUG_ON(dir == DMA_NONE); 658 BUG_ON(dir == DMA_NONE);
594 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 659 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
595 unmap_single(hwdev, dma_addr, size, dir); 660 unmap_single(hwdev, dma_addr, size, dir);
596 else if (dir == DMA_FROM_DEVICE) 661 else if (dir == DMA_FROM_DEVICE)
597 mark_clean(dma_addr, size); 662 dma_mark_clean(dma_addr, size);
598} 663}
599 664
600/* 665/*
@@ -611,13 +676,13 @@ static inline void
611swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 676swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
612 size_t size, int dir, int target) 677 size_t size, int dir, int target)
613{ 678{
614 char *dma_addr = phys_to_virt(dev_addr); 679 char *dma_addr = bus_to_virt(dev_addr);
615 680
616 BUG_ON(dir == DMA_NONE); 681 BUG_ON(dir == DMA_NONE);
617 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 682 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
618 sync_single(hwdev, dma_addr, size, dir, target); 683 sync_single(hwdev, dma_addr, size, dir, target);
619 else if (dir == DMA_FROM_DEVICE) 684 else if (dir == DMA_FROM_DEVICE)
620 mark_clean(dma_addr, size); 685 dma_mark_clean(dma_addr, size);
621} 686}
622 687
623void 688void
@@ -642,13 +707,13 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
642 unsigned long offset, size_t size, 707 unsigned long offset, size_t size,
643 int dir, int target) 708 int dir, int target)
644{ 709{
645 char *dma_addr = phys_to_virt(dev_addr) + offset; 710 char *dma_addr = bus_to_virt(dev_addr) + offset;
646 711
647 BUG_ON(dir == DMA_NONE); 712 BUG_ON(dir == DMA_NONE);
648 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 713 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
649 sync_single(hwdev, dma_addr, size, dir, target); 714 sync_single(hwdev, dma_addr, size, dir, target);
650 else if (dir == DMA_FROM_DEVICE) 715 else if (dir == DMA_FROM_DEVICE)
651 mark_clean(dma_addr, size); 716 dma_mark_clean(dma_addr, size);
652} 717}
653 718
654void 719void
@@ -687,18 +752,16 @@ int
687swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, 752swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
688 int dir) 753 int dir)
689{ 754{
690 void *addr; 755 dma_addr_t dev_addr;
691 unsigned long dev_addr;
692 int i; 756 int i;
693 757
694 BUG_ON(dir == DMA_NONE); 758 BUG_ON(dir == DMA_NONE);
695 759
696 for (i = 0; i < nelems; i++, sg++) { 760 for (i = 0; i < nelems; i++, sg++) {
697 addr = SG_ENT_VIRT_ADDRESS(sg); 761 dev_addr = SG_ENT_PHYS_ADDRESS(sg);
698 dev_addr = virt_to_phys(addr); 762 if (range_needs_mapping(SG_ENT_VIRT_ADDRESS(sg), sg->length)
699 if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { 763 || address_needs_mapping(hwdev, dev_addr)) {
700 void *map = map_single(hwdev, addr, sg->length, dir); 764 void *map = map_single(hwdev, sg_to_io_tlb_addr(sg), sg->length, dir);
701 sg->dma_address = virt_to_bus(map);
702 if (!map) { 765 if (!map) {
703 /* Don't panic here, we expect map_sg users 766 /* Don't panic here, we expect map_sg users
704 to do proper error handling. */ 767 to do proper error handling. */
@@ -707,6 +770,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
707 sg[0].dma_length = 0; 770 sg[0].dma_length = 0;
708 return 0; 771 return 0;
709 } 772 }
773 sg->dma_address = virt_to_bus(map);
710 } else 774 } else
711 sg->dma_address = dev_addr; 775 sg->dma_address = dev_addr;
712 sg->dma_length = sg->length; 776 sg->dma_length = sg->length;
@@ -728,9 +792,10 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
728 792
729 for (i = 0; i < nelems; i++, sg++) 793 for (i = 0; i < nelems; i++, sg++)
730 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 794 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
731 unmap_single(hwdev, (void *) phys_to_virt(sg->dma_address), sg->dma_length, dir); 795 unmap_single(hwdev, bus_to_virt(sg->dma_address),
796 sg->dma_length, dir);
732 else if (dir == DMA_FROM_DEVICE) 797 else if (dir == DMA_FROM_DEVICE)
733 mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); 798 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
734} 799}
735 800
736/* 801/*
@@ -750,8 +815,10 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg,
750 815
751 for (i = 0; i < nelems; i++, sg++) 816 for (i = 0; i < nelems; i++, sg++)
752 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 817 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
753 sync_single(hwdev, (void *) sg->dma_address, 818 sync_single(hwdev, bus_to_virt(sg->dma_address),
754 sg->dma_length, dir, target); 819 sg->dma_length, dir, target);
820 else if (dir == DMA_FROM_DEVICE)
821 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
755} 822}
756 823
757void 824void
@@ -768,10 +835,48 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
768 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); 835 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
769} 836}
770 837
838#ifdef SWIOTLB_ARCH_NEED_MAP_PAGE
839
840dma_addr_t
841swiotlb_map_page(struct device *hwdev, struct page *page,
842 unsigned long offset, size_t size,
843 enum dma_data_direction direction)
844{
845 dma_addr_t dev_addr;
846 char *map;
847
848 dev_addr = page_to_bus(page) + offset;
849 if (address_needs_mapping(hwdev, dev_addr)) {
850 map = map_single(hwdev, page_to_io_tlb_addr(page, offset), size, direction);
851 if (!map) {
852 swiotlb_full(hwdev, size, direction, 1);
853 map = io_tlb_overflow_buffer;
854 }
855 dev_addr = virt_to_bus(map);
856 }
857
858 return dev_addr;
859}
860
861void
862swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
863 size_t size, enum dma_data_direction direction)
864{
865 char *dma_addr = bus_to_virt(dev_addr);
866
867 BUG_ON(direction == DMA_NONE);
868 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
869 unmap_single(hwdev, dma_addr, size, direction);
870 else if (direction == DMA_FROM_DEVICE)
871 dma_mark_clean(dma_addr, size);
872}
873
874#endif
875
771int 876int
772swiotlb_dma_mapping_error(dma_addr_t dma_addr) 877swiotlb_dma_mapping_error(dma_addr_t dma_addr)
773{ 878{
774 return (dma_addr == virt_to_phys(io_tlb_overflow_buffer)); 879 return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
775} 880}
776 881
777/* 882/*
@@ -780,10 +885,13 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr)
780 * during bus mastering, then you would pass 0x00ffffff as the mask to 885 * during bus mastering, then you would pass 0x00ffffff as the mask to
781 * this function. 886 * this function.
782 */ 887 */
888#ifndef __swiotlb_dma_supported
889#define __swiotlb_dma_supported(hwdev, mask) (virt_to_bus(io_tlb_end - 1) <= (mask))
890#endif
783int 891int
784swiotlb_dma_supported (struct device *hwdev, u64 mask) 892swiotlb_dma_supported(struct device *hwdev, u64 mask)
785{ 893{
786 return (virt_to_phys (io_tlb_end) - 1) <= mask; 894 return __swiotlb_dma_supported(hwdev, mask);
787} 895}
788 896
789EXPORT_SYMBOL(swiotlb_init); 897EXPORT_SYMBOL(swiotlb_init);
@@ -798,6 +906,4 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
798EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); 906EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
799EXPORT_SYMBOL(swiotlb_sync_sg_for_device); 907EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
800EXPORT_SYMBOL(swiotlb_dma_mapping_error); 908EXPORT_SYMBOL(swiotlb_dma_mapping_error);
801EXPORT_SYMBOL(swiotlb_alloc_coherent);
802EXPORT_SYMBOL(swiotlb_free_coherent);
803EXPORT_SYMBOL(swiotlb_dma_supported); 909EXPORT_SYMBOL(swiotlb_dma_supported);
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index f01f8c072852..d65c40331e66 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -1,7 +1,7 @@
1#### 1####
2# kbuild: Generic definitions 2# kbuild: Generic definitions
3 3
4# Convinient variables 4# Convenient constants
5comma := , 5comma := ,
6squote := ' 6squote := '
7empty := 7empty :=
@@ -56,76 +56,89 @@ endef
56# gcc support functions 56# gcc support functions
57# See documentation in Documentation/kbuild/makefiles.txt 57# See documentation in Documentation/kbuild/makefiles.txt
58 58
59# output directory for tests below 59# checker-shell
60TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/) 60# Usage: option = $(call checker-shell,$(CC)...-o $$OUT,option-ok,otherwise)
61# Exit code chooses option. $$OUT is safe location for needless output.
62define checker-shell
63 $(shell set -e; \
64 DIR=$(KBUILD_EXTMOD); \
65 cd $${DIR:-$(objtree)}; \
66 OUT=$$PWD/.$$$$.null; \
67 \
68 ln -s /dev/null $$OUT; \
69 if $(1) >/dev/null 2>&1; \
70 then echo "$(2)"; \
71 else echo "$(3)"; \
72 fi; \
73 rm -f $$OUT)
74endef
61 75
62# as-option 76# as-option
63# Usage: cflags-y += $(call as-option, -Wa$(comma)-isa=foo,) 77# Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,)
64 78as-option = $(call checker-shell,\
65as-option = $(shell if $(CC) $(CFLAGS) $(1) -Wa,-Z -c -o /dev/null \ 79 $(CC) $(CFLAGS) $(1) -c -xassembler /dev/null -o $$OUT,$(1),$(2))
66 -xassembler /dev/null > /dev/null 2>&1; then echo "$(1)"; \
67 else echo "$(2)"; fi ;)
68 80
69# as-instr 81# as-instr
70# Usage: cflags-y += $(call as-instr, instr, option1, option2) 82# Usage: cflags-y += $(call as-instr,instr,option1,option2)
71 83as-instr = $(call checker-shell,\
72as-instr = $(shell if echo -e "$(1)" | \ 84 printf "$(1)" | $(CC) $(AFLAGS) -c -xassembler -o $$OUT -,$(2),$(3))
73 $(CC) $(AFLAGS) -c -xassembler - \
74 -o $(TMPOUT)astest$$$$.out > /dev/null 2>&1; \
75 then rm $(TMPOUT)astest$$$$.out; echo "$(2)"; \
76 else echo "$(3)"; fi)
77 85
78# cc-option 86# cc-option
79# Usage: cflags-y += $(call cc-option, -march=winchip-c6, -march=i586) 87# Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
80 88cc-option = $(call checker-shell,\
81cc-option = $(shell if $(CC) $(CFLAGS) $(1) -S -o /dev/null -xc /dev/null \ 89 $(CC) $(CFLAGS) $(if $(3),$(3),$(1)) -S -xc /dev/null -o $$OUT,$(1),$(2))
82 > /dev/null 2>&1; then echo "$(1)"; else echo "$(2)"; fi ;)
83 90
84# cc-option-yn 91# cc-option-yn
85# Usage: flag := $(call cc-option-yn, -march=winchip-c6) 92# Usage: flag := $(call cc-option-yn,-march=winchip-c6)
86cc-option-yn = $(shell if $(CC) $(CFLAGS) $(1) -S -o /dev/null -xc /dev/null \ 93cc-option-yn = $(call cc-option,"y","n",$(1))
87 > /dev/null 2>&1; then echo "y"; else echo "n"; fi;)
88 94
89# cc-option-align 95# cc-option-align
90# Prefix align with either -falign or -malign 96# Prefix align with either -falign or -malign
91cc-option-align = $(subst -functions=0,,\ 97cc-option-align = $(subst -functions=0,,\
92 $(call cc-option,-falign-functions=0,-malign-functions=0)) 98 $(call cc-option,-falign-functions=0,-malign-functions=0))
93 99
94# cc-version 100# cc-version
95# Usage gcc-ver := $(call cc-version, $(CC)) 101# Usage gcc-ver := $(call cc-version,$(CC))
96cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC)) 102cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC))
97 103
98# cc-ifversion 104# cc-ifversion
99# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) 105# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1)
100cc-ifversion = $(shell if [ $(call cc-version, $(CC)) $(1) $(2) ]; then \ 106cc-ifversion = $(shell [ $(call cc-version, $(CC)) $(1) $(2) ] && echo $(3))
101 echo $(3); fi;)
102 107
103# ld-option 108# ld-option
104# Usage: ldflags += $(call ld-option, -Wl$(comma)--hash-style=both) 109# Usage: ldflags += $(call ld-option, -Wl$(comma)--hash-style=both)
105ld-option = $(shell if $(CC) $(1) -nostdlib -xc /dev/null \ 110ld-option = $(call checker-shell,\
106 -o $(TMPOUT)ldtest$$$$.out > /dev/null 2>&1; \ 111 $(CC) $(1) -nostdlib -xc /dev/null -o $$OUT,$(1),$(2))
107 then rm $(TMPOUT)ldtest$$$$.out; echo "$(1)"; \ 112
108 else echo "$(2)"; fi) 113######
109 114
110###
111# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.build obj= 115# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.build obj=
112# Usage: 116# Usage:
113# $(Q)$(MAKE) $(build)=dir 117# $(Q)$(MAKE) $(build)=dir
114build := -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.build obj 118build := -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.build obj
115 119
116# Prefix -I with $(srctree) if it is not an absolute path 120# Prefix -I with $(srctree) if it is not an absolute path,
117addtree = $(if $(filter-out -I/%,$(1)),$(patsubst -I%,-I$(srctree)/%,$(1))) $(1) 121# add original to the end
122addtree = $(if \
123 $(filter-out -I/%,$(1)),$(patsubst -I%,-I$(srctree)/%,$(1))) $(1)
124
118# Find all -I options and call addtree 125# Find all -I options and call addtree
119flags = $(foreach o,$($(1)),$(if $(filter -I%,$(o)),$(call addtree,$(o)),$(o))) 126flags = $(foreach o,$($(1)),\
127 $(if $(filter -I%,$(o)),$(call addtree,$(o)),$(o)))
128
129# echo command.
130# Short version is used, if $(quiet) equals `quiet_', otherwise full one.
131echo-cmd = $(if $($(quiet)cmd_$(1)),\
132 echo ' $(call escsq,$($(quiet)cmd_$(1)))$(echo-why)';)
120 133
121# If quiet is set, only print short version of command 134# printing commands
122cmd = @$(echo-cmd) $(cmd_$(1)) 135cmd = @$(echo-cmd) $(cmd_$(1))
123 136
124# Add $(obj)/ for paths that is not absolute 137# Add $(obj)/ for paths that are not absolute
125objectify = $(foreach o,$(1),$(if $(filter /%,$(o)),$(o),$(obj)/$(o))) 138objectify = $(foreach o,$(1),$(if $(filter /%,$(o)),$(o),$(obj)/$(o)))
126 139
127### 140###
128# if_changed - execute command if any prerequisite is newer than 141# if_changed - execute command if any prerequisite is newer than
129# target, or command line has changed 142# target, or command line has changed
130# if_changed_dep - as if_changed, but uses fixdep to reveal dependencies 143# if_changed_dep - as if_changed, but uses fixdep to reveal dependencies
131# including used config symbols 144# including used config symbols
@@ -133,16 +146,12 @@ objectify = $(foreach o,$(1),$(if $(filter /%,$(o)),$(o),$(obj)/$(o)))
133# See Documentation/kbuild/makefiles.txt for more info 146# See Documentation/kbuild/makefiles.txt for more info
134 147
135ifneq ($(KBUILD_NOCMDDEP),1) 148ifneq ($(KBUILD_NOCMDDEP),1)
136# Check if both arguments has same arguments. Result in empty string if equal 149# Check if both arguments has same arguments. Result is empty string, if equal.
137# User may override this check using make KBUILD_NOCMDDEP=1 150# User may override this check using make KBUILD_NOCMDDEP=1
138arg-check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \ 151arg-check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \
139 $(filter-out $(cmd_$@), $(cmd_$(1))) ) 152 $(filter-out $(cmd_$@), $(cmd_$(1))) )
140endif 153endif
141 154
142# echo command. Short version is $(quiet) equals quiet, otherwise full command
143echo-cmd = $(if $($(quiet)cmd_$(1)), \
144 echo ' $(call escsq,$($(quiet)cmd_$(1)))$(echo-why)';)
145
146# >'< substitution is for echo to work, 155# >'< substitution is for echo to work,
147# >$< substitution to preserve $ when reloading .cmd file 156# >$< substitution to preserve $ when reloading .cmd file
148# note: when using inline perl scripts [perl -e '...$$t=1;...'] 157# note: when using inline perl scripts [perl -e '...$$t=1;...']
@@ -153,15 +162,15 @@ make-cmd = $(subst \#,\\\#,$(subst $$,$$$$,$(call escsq,$(cmd_$(1)))))
153# PHONY targets skipped in both cases. 162# PHONY targets skipped in both cases.
154any-prereq = $(filter-out $(PHONY),$?) $(filter-out $(PHONY) $(wildcard $^),$^) 163any-prereq = $(filter-out $(PHONY),$?) $(filter-out $(PHONY) $(wildcard $^),$^)
155 164
156# Execute command if command has changed or prerequisitei(s) are updated 165# Execute command if command has changed or prerequisite(s) are updated.
157# 166#
158if_changed = $(if $(strip $(any-prereq) $(arg-check)), \ 167if_changed = $(if $(strip $(any-prereq) $(arg-check)), \
159 @set -e; \ 168 @set -e; \
160 $(echo-cmd) $(cmd_$(1)); \ 169 $(echo-cmd) $(cmd_$(1)); \
161 echo 'cmd_$@ := $(make-cmd)' > $(dot-target).cmd) 170 echo 'cmd_$@ := $(make-cmd)' > $(dot-target).cmd)
162 171
163# execute the command and also postprocess generated .d dependencies 172# Execute the command and also postprocess generated .d dependencies file.
164# file 173#
165if_changed_dep = $(if $(strip $(any-prereq) $(arg-check) ), \ 174if_changed_dep = $(if $(strip $(any-prereq) $(arg-check) ), \
166 @set -e; \ 175 @set -e; \
167 $(echo-cmd) $(cmd_$(1)); \ 176 $(echo-cmd) $(cmd_$(1)); \
@@ -169,9 +178,10 @@ if_changed_dep = $(if $(strip $(any-prereq) $(arg-check) ), \
169 rm -f $(depfile); \ 178 rm -f $(depfile); \
170 mv -f $(dot-target).tmp $(dot-target).cmd) 179 mv -f $(dot-target).tmp $(dot-target).cmd)
171 180
181# Will check if $(cmd_foo) changed, or any of the prerequisites changed,
182# and if so will execute $(rule_foo).
172# Usage: $(call if_changed_rule,foo) 183# Usage: $(call if_changed_rule,foo)
173# will check if $(cmd_foo) changed, or any of the prequisites changed, 184#
174# and if so will execute $(rule_foo)
175if_changed_rule = $(if $(strip $(any-prereq) $(arg-check) ), \ 185if_changed_rule = $(if $(strip $(any-prereq) $(arg-check) ), \
176 @set -e; \ 186 @set -e; \
177 $(rule_$(1))) 187 $(rule_$(1)))
diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh
index 4c723fd18648..43f75d6e4d96 100644
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -1,6 +1,6 @@
1#!/bin/bash 1#!/bin/bash
2# Copyright (C) Martin Schlemmer <azarah@nosferatu.za.org> 2# Copyright (C) Martin Schlemmer <azarah@nosferatu.za.org>
3# Copyright (c) 2006 Sam Ravnborg <sam@ravnborg.org> 3# Copyright (C) 2006 Sam Ravnborg <sam@ravnborg.org>
4# 4#
5# Released under the terms of the GNU GPL 5# Released under the terms of the GNU GPL
6# 6#
@@ -17,15 +17,15 @@ cat << EOF
17Usage: 17Usage:
18$0 [-o <file>] [-u <uid>] [-g <gid>] {-d | <cpio_source>} ... 18$0 [-o <file>] [-u <uid>] [-g <gid>] {-d | <cpio_source>} ...
19 -o <file> Create gzipped initramfs file named <file> using 19 -o <file> Create gzipped initramfs file named <file> using
20 gen_init_cpio and gzip 20 gen_init_cpio and gzip
21 -u <uid> User ID to map to user ID 0 (root). 21 -u <uid> User ID to map to user ID 0 (root).
22 <uid> is only meaningful if <cpio_source> 22 <uid> is only meaningful if <cpio_source>
23 is a directory. 23 is a directory.
24 -g <gid> Group ID to map to group ID 0 (root). 24 -g <gid> Group ID to map to group ID 0 (root).
25 <gid> is only meaningful if <cpio_source> 25 <gid> is only meaningful if <cpio_source>
26 is a directory. 26 is a directory.
27 <cpio_source> File list or directory for cpio archive. 27 <cpio_source> File list or directory for cpio archive.
28 If <cpio_source> is a .cpio file it will be used 28 If <cpio_source> is a .cpio file it will be used
29 as direct input to initramfs. 29 as direct input to initramfs.
30 -d Output the default cpio list. 30 -d Output the default cpio list.
31 31
@@ -36,6 +36,12 @@ to reset the root/group mapping.
36EOF 36EOF
37} 37}
38 38
39# awk style field access
40# $1 - field number; rest is argument string
41field() {
42 shift $1 ; echo $1
43}
44
39list_default_initramfs() { 45list_default_initramfs() {
40 # echo usr/kinit/kinit 46 # echo usr/kinit/kinit
41 : 47 :
@@ -119,22 +125,17 @@ parse() {
119 str="${ftype} ${name} ${location} ${str}" 125 str="${ftype} ${name} ${location} ${str}"
120 ;; 126 ;;
121 "nod") 127 "nod")
122 local dev_type= 128 local dev=`LC_ALL=C ls -l "${location}"`
123 local maj=$(LC_ALL=C ls -l "${location}" | \ 129 local maj=`field 5 ${dev}`
124 gawk '{sub(/,/, "", $5); print $5}') 130 local min=`field 6 ${dev}`
125 local min=$(LC_ALL=C ls -l "${location}" | \ 131 maj=${maj%,}
126 gawk '{print $6}') 132
127 133 [ -b "${location}" ] && dev="b" || dev="c"
128 if [ -b "${location}" ]; then 134
129 dev_type="b" 135 str="${ftype} ${name} ${str} ${dev} ${maj} ${min}"
130 else
131 dev_type="c"
132 fi
133 str="${ftype} ${name} ${str} ${dev_type} ${maj} ${min}"
134 ;; 136 ;;
135 "slink") 137 "slink")
136 local target=$(LC_ALL=C ls -l "${location}" | \ 138 local target=`field 11 $(LC_ALL=C ls -l "${location}")`
137 gawk '{print $11}')
138 str="${ftype} ${name} ${target} ${str}" 139 str="${ftype} ${name} ${target} ${str}"
139 ;; 140 ;;
140 *) 141 *)
diff --git a/scripts/makelst b/scripts/makelst
index 34bd72391238..4fc80f2b7e19 100755
--- a/scripts/makelst
+++ b/scripts/makelst
@@ -1,31 +1,31 @@
1#!/bin/bash 1#!/bin/sh
2# A script to dump mixed source code & assembly 2# A script to dump mixed source code & assembly
3# with correct relocations from System.map 3# with correct relocations from System.map
4# Requires the following lines in Rules.make. 4# Requires the following lines in makefile:
5# Author(s): DJ Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
6# William Stearns <wstearns@pobox.com>
7#%.lst: %.c 5#%.lst: %.c
8# $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(CFLAGS_$@) -g -c -o $*.o $< 6# $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(CFLAGS_$@) -g -c -o $*.o $<
9# $(TOPDIR)/scripts/makelst $*.o $(TOPDIR)/System.map $(OBJDUMP) 7# $(srctree)/scripts/makelst $*.o $(objtree)/System.map $(OBJDUMP)
10# 8#
11# Copyright (C) 2000 IBM Corporation 9# Copyright (C) 2000 IBM Corporation
12# Author(s): DJ Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) 10# Author(s): DJ Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
11# William Stearns <wstearns@pobox.com>
13# 12#
14 13
15t1=`$3 --syms $1 | grep .text | grep " F " | head -n 1` 14# awk style field access
15field() {
16 shift $1 ; echo $1
17}
18
19t1=`$3 --syms $1 | grep .text | grep -m1 " F "`
16if [ -n "$t1" ]; then 20if [ -n "$t1" ]; then
17 t2=`echo $t1 | gawk '{ print $6 }'` 21 t2=`field 6 $t1`
18 if [ ! -r $2 ]; then 22 if [ ! -r $2 ]; then
19 echo "No System.map" >&2 23 echo "No System.map" >&2
20 t7=0
21 else 24 else
22 t3=`grep $t2 $2` 25 t3=`grep $t2 $2`
23 t4=`echo $t3 | gawk '{ print $1 }'` 26 t4=`field 1 $t3`
24 t5=`echo $t1 | gawk '{ print $1 }'` 27 t5=`field 1 $t1`
25 t6=`echo $t4 - $t5 | tr a-f A-F` 28 t6=`printf "%lu" $((0x$t4 - 0x$t5))`
26 t7=`( echo ibase=16 ; echo $t6 ) | bc`
27 fi 29 fi
28else
29 t7=0
30fi 30fi
31$3 -r --source --adjust-vma=$t7 $1 31$3 -r --source --adjust-vma=${t6:-0} $1
diff --git a/security/keys/key.c b/security/keys/key.c
index ac9326c5f1da..700400d801dc 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -188,6 +188,7 @@ static inline void key_alloc_serial(struct key *key)
188 188
189 spin_lock(&key_serial_lock); 189 spin_lock(&key_serial_lock);
190 190
191attempt_insertion:
191 parent = NULL; 192 parent = NULL;
192 p = &key_serial_tree.rb_node; 193 p = &key_serial_tree.rb_node;
193 194
@@ -202,39 +203,33 @@ static inline void key_alloc_serial(struct key *key)
202 else 203 else
203 goto serial_exists; 204 goto serial_exists;
204 } 205 }
205 goto insert_here; 206
207 /* we've found a suitable hole - arrange for this key to occupy it */
208 rb_link_node(&key->serial_node, parent, p);
209 rb_insert_color(&key->serial_node, &key_serial_tree);
210
211 spin_unlock(&key_serial_lock);
212 return;
206 213
207 /* we found a key with the proposed serial number - walk the tree from 214 /* we found a key with the proposed serial number - walk the tree from
208 * that point looking for the next unused serial number */ 215 * that point looking for the next unused serial number */
209serial_exists: 216serial_exists:
210 for (;;) { 217 for (;;) {
211 key->serial++; 218 key->serial++;
212 if (key->serial < 2) 219 if (key->serial < 3) {
213 key->serial = 2; 220 key->serial = 3;
214 221 goto attempt_insertion;
215 if (!rb_parent(parent)) 222 }
216 p = &key_serial_tree.rb_node;
217 else if (rb_parent(parent)->rb_left == parent)
218 p = &(rb_parent(parent)->rb_left);
219 else
220 p = &(rb_parent(parent)->rb_right);
221 223
222 parent = rb_next(parent); 224 parent = rb_next(parent);
223 if (!parent) 225 if (!parent)
224 break; 226 goto attempt_insertion;
225 227
226 xkey = rb_entry(parent, struct key, serial_node); 228 xkey = rb_entry(parent, struct key, serial_node);
227 if (key->serial < xkey->serial) 229 if (key->serial < xkey->serial)
228 goto insert_here; 230 goto attempt_insertion;
229 } 231 }
230 232
231 /* we've found a suitable hole - arrange for this key to occupy it */
232insert_here:
233 rb_link_node(&key->serial_node, parent, p);
234 rb_insert_color(&key->serial_node, &key_serial_tree);
235
236 spin_unlock(&key_serial_lock);
237
238} /* end key_alloc_serial() */ 233} /* end key_alloc_serial() */
239 234
240/*****************************************************************************/ 235/*****************************************************************************/